Compare commits

...

935 Commits

Author SHA1 Message Date
Harshavardhana
2f4af09c01 fix: alow changes to readAllData to decrement activeCount() 2021-02-28 20:09:23 -08:00
Harshavardhana
37960cbc2f fix: avoid writing more content on network with O_DIRECT reads (#11659)
There was an io.LimitReader was missing for the 'length'
parameter for ranged requests, that would cause client to
get truncated responses and errors.

fixes #11651
2021-02-28 15:33:03 -08:00
cbows
c67d1bf120 add unauthenticated lookup-bind mode to LDAP identity (#11655)
Closes #11646
2021-02-28 12:57:31 -08:00
Klaus Post
c5b3a675fa Block profiling tweaks (#11612)
The base profiles contains no valuable data, don't record them.

Reduce block rate by 2 orders of magnitude, should still capture just as valuable data with less CPU strain.
2021-02-27 09:22:14 -08:00
Harshavardhana
b690304eed use faster way for siphash (#11640) 2021-02-26 16:53:06 -08:00
Harshavardhana
9171d6ef65 rename all references from crawl -> scanner (#11621) 2021-02-26 15:11:42 -08:00
Harshavardhana
6386b45c08 [feat] use rename instead of recursive deletes (#11641)
most of the delete calls today spend time in
a blocking operation where multiple calls need
to be recursively sent to delete the objects,
instead we can use rename operation to atomically
move the objects from the namespace to `tmp/.trash`

we can schedule deletion of objects at this
location once in 15, 30mins and we can also add
wait times between each delete operation.

this allows us to make delete's faster as well
less chattier on the drives, each server runs locally
a groutine which would clean this up regularly.
2021-02-26 09:52:27 -08:00
Andreas Auernhammer
1f659204a2 remove GetObject from ObjectLayer interface (#11635)
This commit removes the `GetObject` method
from the `ObjectLayer` interface.

The `GetObject` method is not longer used by
the HTTP handlers implementing the high-level
S3 semantics. Instead, they use the `GetObjectNInfo`
method which returns both, an object handle as well
as the object metadata.

Therefore, it is no longer necessary that a concrete
`ObjectLayer` implements `GetObject`.
2021-02-26 09:52:02 -08:00
Harshavardhana
f9f6fd0421 fix: service account permissions generated from LDAP user (#11637)
service accounts generated from LDAP parent user
did not inherit correct permissions, this PR fixes
this fully.
2021-02-25 13:49:59 -08:00
Klaus Post
85620dfe93 use bucket in path in distribution hash (#11634)
Use bucket in erasure distribution hash.

For the rare cases where objects with the same names are uploaded to many buckets.
2021-02-25 10:11:31 -08:00
Harshavardhana
a8e4f64ff3 Revert "fix: remove persistence layer for metacache store in memory (#11538)"
This reverts commit b23659927c.
2021-02-24 22:24:51 -08:00
Krishnan Parthasarathi
ca5c6e3160 fix: translate empty versionID string to null version where appropriate (#11629)
We store the null version as empty string. We should translate it to null
version for bucket with version suspended too.
2021-02-24 18:39:10 -08:00
Harshavardhana
b23659927c fix: remove persistence layer for metacache store in memory (#11538)
store the cache in-memory instead of disks to avoid large
write amplifications for list heavy workloads, store in
memory instead and let it auto expire.
2021-02-24 15:51:41 -08:00
Minio Trusted
b912e9ab41 Update yaml files to latest version RELEASE.2021-02-24T18-44-45Z 2021-02-24 19:08:36 +00:00
Andreas Auernhammer
c1a49be639 use crypto/sha256 for FIPS 140-2 compliance (#11623)
This commit replaces the usage of
github.com/minio/sha256-simd with crypto/sha256
of the standard library in all non-performance
critical paths.

This is necessary for FIPS 140-2 compliance which
requires that all crypto. primitives are implemented
by a FIPS-validated module.

Go can use the Google FIPS module. The boringcrypto
branch of the Go standard library uses the BoringSSL
FIPS module to implement crypto. primitives like AES
or SHA256.

We only keep github.com/minio/sha256-simd when computing
the content-SHA256 of an object. Therefore, this commit
relies on a build tag `fips`.

When MinIO is compiled without the `fips` flag it will
use github.com/minio/sha256-simd. When MinIO is compiled
with the fips flag (go build --tags "fips") then MinIO
uses crypto/sha256 to compute the content-SHA256.
2021-02-24 09:00:15 -08:00
Klaus Post
03172b89e2 Ensure cache has finished deserializing (#11620)
Make sure that response has been fully deserialized before returning.
2021-02-24 02:59:49 -08:00
Harshavardhana
b517c791e9 [feat]: use DSYNC for xl.meta writes and NOATIME for reads (#11615)
Instead of using O_SYNC, we are better off using O_DSYNC
instead since we are only ever interested in data to be
persisted to disk not the associated filesystem metadata.

For reads we ask customers to turn off noatime, but instead
we can proactively use O_NOATIME flag to avoid atime updates
upon reads.
2021-02-24 00:14:16 -08:00
Petr Tichý
14aef52004 remove Content-MD5 on Range requests (#11611)
This removes the Content-MD5 response header on Range requests in Azure
Gateway mode. The partial content MD5 doesn't match the full object MD5
in metadata.
2021-02-23 19:32:56 -08:00
Harshavardhana
67b20125e4 fix: do not ignore CREDITS file 2021-02-23 12:34:59 -08:00
Andreas Auernhammer
d4b822d697 pkg/etag: add new package for S3 ETag handling (#11577)
This commit adds a new package `etag` for dealing
with S3 ETags.

Even though ETag is often viewed as MD5 checksum of
an object, handling S3 ETags correctly is a surprisingly
complex task. While it is true that the ETag corresponds
to the MD5 for the most basic S3 API operations, there are
many exceptions in case of multipart uploads or encryption.

In worse, some S3 clients expect very specific behavior when
it comes to ETags. For example, some clients expect that the
ETag is a double-quoted string and fail otherwise.
Non-AWS compliant ETag handling has been a source of many bugs
in the past.

Therefore, this commit adds a dedicated `etag` package that provides
functionality for parsing, generating and converting S3 ETags.
Further, this commit removes the ETag computation from the `hash`
package. Instead, the `hash` package (i.e. `hash.Reader`) should
focus only on computing and verifying the content-sha256.

One core feature of this commit is to provide a mechanism to
communicate a computed ETag from a low-level `io.Reader` to
a high-level `io.Reader`.

This problem occurs when an S3 server receives a request and
has to compute the ETag of the content. However, the server
may also wrap the initial body with several other `io.Reader`,
e.g. when encrypting or compressing the content:
```
   reader := Encrypt(Compress(ETag(content)))
```
In such a case, the ETag should be accessible by the high-level
`io.Reader`.

The `etag` provides a mechanism to wrap `io.Reader` implementations
such that the `ETag` can be accessed by a type-check.
This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 12:31:53 -08:00
Minio Trusted
1b63291ee2 Update yaml files to latest version RELEASE.2021-02-23T20-05-01Z 2021-02-23 20:28:30 +00:00
Harshavardhana
aa7244a9a4 fix: make sure to convert the error properly in HealBucket() (#11610)
server startup code expects the object layer to properly
convert error into a proper type, so that in situations when
servers are coming up and quorum is not available servers
wait on each other.
2021-02-23 09:23:11 -08:00
Harshavardhana
2a79ea0332 isServerResolvable its sufficient to check server is reachable (#11609)
using isServerResolvable for expiration can lead to chicken
and egg problems, a lock might expire knowingly when server
is booting up causing perpetual locks getting expired.
2021-02-22 16:29:53 -08:00
Ritesh H Shukla
6e5c61d917 Skip printing error if empty for reporting bandwidth (#11606) 2021-02-22 13:41:40 -08:00
Aditya Manthramurthy
02e7de6367 LDAP config: fix substitution variables (#11586)
- In username search filter and username format variables we support %s for
replacing with the username.

- In group search filter we support %s for username and %d for the full DN of
the username.
2021-02-22 13:20:36 -08:00
Harshavardhana
cec12f4c76 update sha256-simd to v1.0.0 (#11607) 2021-02-22 13:19:53 -08:00
Harshavardhana
da676ac298 remove network calls for getLocalDisks (#11603) 2021-02-22 13:19:44 -08:00
Harshavardhana
18ec933085 fix: for containers use root-disk detection cleverly (#11593)
root-disk implemented currently had issues where root
disk partitions getting modified might race and provide
incorrect results, to avoid this lets rely again back on
DeviceID and match it instead.

In-case of containers `/data` is one such extra entity that
needs to be verified for root disk, due to how 'overlay'
filesystem works and the 'overlay' presents a completely
different 'device' id - using `/data` as another entity
for fallback helps because our containers describe 'VOLUME'
parameter that allows containers to automatically have a
virtual `/data` that points to the container root path this
can either be at `/` or `/var/lib/` (on different partition)
2021-02-22 10:32:21 -08:00
Harshavardhana
c31d2c3fdc fix: CrawlAndGetDataUsage close pipe() before using a new one (#11600)
also additionally make sure errors during deserializer closes
the reader with right error type such that Write() end
actually see the final error, this avoids a waitGroup usage
and waiting.
2021-02-22 10:04:32 -08:00
Harshavardhana
8778828a03 fix: read metadata in O_DIRECT if configured and supported (#11594)
reduce the page-cache pressure completely by moving
the entire read-phase of our operations to O_DIRECT,
primarily this is going to be very useful for chatty
metadata operations such as listing, scanner, ilm, healing
like operations to avoid filling up the page-cache upon
repeated runs.
2021-02-22 01:36:17 -08:00
Sarasa Kisaragi
48b212dd8e Fix HDFS wrong filepath if subpath provided (#11574) 2021-02-20 15:32:18 -08:00
Harshavardhana
be7de911c4 fix: update minio-go to fix an issue with S3 gateway (#11591)
since we have changed our default envs to MINIO_ROOT_USER,
MINIO_ROOT_PASSWORD this was not supported by minio-go
credentials package, update minio-go to v7.0.10 for this
support. This also addresses few bugs related to users
had to specify AWS_ACCESS_KEY_ID as well to authenticate
with their S3 backend if they only used MINIO_ROOT_USER.
2021-02-20 11:10:21 -08:00
Harshavardhana
8cad407e0b fix: Bring support for symlink on regular files on NAS (#11383)
fixes #11203
2021-02-20 00:30:12 -08:00
Poorna Krishnamoorthy
85d2187c20 fix: ETag mismatch for large upload in replica (#11587) 2021-02-20 00:22:17 -08:00
Anis Elleuch
98d3f94996 metrics: Add the number of requests in the waiting queue (#11580)
We can use this metric to check if there are too many S3 clients in the
queue and could explain why some of those S3 clients are timing out.

```
minio_s3_requests_waiting_total{server="127.0.0.1:9000"} 9981
```

If max_requests is 10000 then there is a strong possibility that clients
are timing out because of the queue deadline.
2021-02-20 00:21:55 -08:00
mailsmail
173284903b fix incorrect http range in SelectObjectContentHandler (#11585) 2021-02-19 17:55:28 -08:00
WangYuMu
c70240b893 fix incorrect values in sizing guide (#11583) 2021-02-19 10:05:04 -08:00
Harshavardhana
8ba2136e06 Update yaml files to latest version RELEASE.2021-02-19T04-38-02Z 2021-02-18 21:02:25 -08:00
Poorna Krishnamoorthy
2dce5d9442 fix: delete marker permanent delete replication (#11581) 2021-02-18 16:35:37 -08:00
Anis Elleuch
f28b063091 heal: Use healDeleteDangling global const in self healing (#11579)
A small fix, use healDeleteDangling constant instead of 'true' in the
self-healing code.
2021-02-18 15:16:20 -08:00
Klaus Post
90abea5b7a check if kafka producer is connected (#11578)
fixes #11576
2021-02-18 11:14:27 -08:00
Klaus Post
c5b2a8441b fix: faster healing when disk is replaced. (#11520) 2021-02-18 11:06:54 -08:00
Harshavardhana
0f5ca83418 move CI to go1.15 and go1.16 (#11570) 2021-02-18 09:42:32 -08:00
Klaus Post
8a6b13c239 Avoid synchronizing usage writes (#11560)
If the periodic `case <-t.C:` save gets held up for a long time it will end up 
synchronize all disk writes for saving the caches.

We add jitter to per set writes so they don't sync up and don't hold a 
lock for the write, since it isn't needed anyway.

If an outage prevents writes for a long while we also add individual 
waits for each disk in case there was a queue.

Furthermore limit the number of buffers kept to 2GiB, since this could get 
huge in large clusters. This will not act as a hard limit but should be enough 
for normal operation.
2021-02-18 00:38:37 -08:00
Poorna Krishnamoorthy
8e8a792d9d Allow delete marker replication from replica (#11566)
in the case of active-active replication.

This PR also has the following changes:

- add docs on replication design
- fix corner case of completing versioned delete on a delete marker
  when the target is down and `mc rm --vid` is performed repeatedly. Instead
  the version should still be retained in the `PENDING|FAILED` state until
  replication sync completes.
- remove `s3:Replication:OperationCompletedReplication` and
   `s3:Replication:OperationFailedReplication` from ObjectCreated 
  events type
2021-02-18 00:33:51 -08:00
Harshavardhana
95e0acbb26 fix: allow accountInfo with creds with parentUsers (#11568) 2021-02-17 20:57:17 -08:00
Poorna Krishnamoorthy
55037e6e54 lifecycle:Fix args passed to determine expiry header (#11567) 2021-02-17 19:25:19 -08:00
Harshavardhana
289e1d8b2a fix: reduce crawler memory usage by orders of magnitude (#11556)
currently crawler waits for an entire readdir call to
return until it processes usage, lifecycle, replication
and healing - instead we should pass the applicator all
the way down to avoid building any special stack for all
the contents in a single directory.

This allows for

- no need to remember the entire list of entries per directory
  before applying the required functions
- no need to wait for entire readdir() call to finish before
  applying the required functions
2021-02-17 15:34:42 -08:00
Anis Elleuch
e07918abe3 lifecycle: Fix expiration header in some cases (#11565)
Expiration header was not correctly computed in case of non current
versions.

The behavior is fixed in this commit.
2021-02-17 14:51:29 -08:00
Harshavardhana
ffea6fcf09 fix: rename crawler as scanner in config (#11549) 2021-02-17 12:04:11 -08:00
Klaus Post
11b2220696 Don't autoheal if disks are healing (#11558)
Don't spawn automatic healing ops if a disk is healing.
2021-02-17 10:18:12 -08:00
Harshavardhana
aa8450a2a1 fix: parallelize getPoolIdx() for object lookup (#11547) 2021-02-16 19:36:15 -08:00
Harshavardhana
87cce344f6 default to common conditions if conditions not present (#11546)
fixes #11544
2021-02-16 11:56:45 -08:00
Harshavardhana
7d4a2d2b68 fix: multiple pool reads parallelize when possible (#11537) 2021-02-16 02:43:47 -08:00
Minio Trusted
cfc8b92dff Update yaml files to latest version RELEASE.2021-02-14T04-01-33Z 2021-02-14 04:25:52 +00:00
Anis Elleuch
c4e12dc846 fix: in MultiDelete API return MalformedXML upon empty input (#11532)
To follow S3 spec
2021-02-13 09:48:25 -08:00
Harshavardhana
a94a9c37fa fix: support IAM policy handling for wildcard actions (#11530)
This PR fixes

- allow 's3:versionid` as a valid conditional for
  Get,Put,Tags,Object locking APIs
- allow additional headers missing for object APIs
- allow wildcard based action matching
2021-02-12 23:05:09 -08:00
Harshavardhana
79b6a43467 fix: avoid timed value for network calls (#11531)
additionally simply timedValue to have RWMutex
to avoid concurrent calls to DiskInfo() getting
serialized, this has an effect on all calls that
use GetDiskInfo() on the same disks.

Such as getOnlineDisks, getOnlineDisksWithoutHealing
2021-02-12 18:17:52 -08:00
Shireesh Anjal
928de04f7a fix: osinfos incomplete in case of warnings (#11505)
The function used for getting host information
(host.SensorsTemperaturesWithContext) returns warnings in some cases.

Returning with error in such cases means we miss out on the other useful
information already fetched (os info).

If the OS info has been succesfully fetched, it should always be
included in the output irrespective of whether the other data (CPU
sensors, users) could be fetched or not.
2021-02-12 17:57:57 -08:00
Poorna Krishnamoorthy
93fd248b52 fix: save ModTime properly in disk cache (#11522)
fix #11414
2021-02-11 19:25:47 -08:00
Harshavardhana
2a7b123895 turn off http2 for TLS setups for now (#11523)
due to lots of issues with x/net/http2, as
well as the bundled h2_bundle.go in the go
runtime should be avoided for now.

https://github.com/golang/go/issues/23559
https://github.com/golang/go/issues/42534
https://github.com/golang/go/issues/43989
https://github.com/golang/go/issues/33425
https://github.com/golang/go/issues/29246

With collection of such issues present, it
make sense to remove HTTP2 support for now
2021-02-11 15:53:04 -08:00
Harshavardhana
b3c56b53fb fix: metacache should only rename entries during cleanup (#11503)
To avoid large delays in metacache cleanup, use rename
instead of recursive delete calls, renames are cheaper
move the content to minioMetaTmpBucket and then cleanup
this folder once in 24hrs instead.

If the new cache can replace an existing one, we should
let it replace since that is currently being saved anyways,
this avoids pile up of 1000's of metacache entires for
same listing calls that are not necessary to be stored
on disk.
2021-02-11 10:22:03 -08:00
Minio Trusted
0ef3e359d8 Update yaml files to latest version RELEASE.2021-02-11T08-23-43Z 2021-02-11 08:47:10 +00:00
Poorna Krishnamoorthy
f24d8127ab fix: DeleteMultipleObjectsHandler to process deleted objects correctly (#11515)
DeleteMarkerVersionID which is returned by the lower layer should 
not be used in the key to lookup ObjectToDelete map
2021-02-10 23:41:41 -08:00
Harshavardhana
7875d472bc avoid notification for non-existent delete objects (#11514)
Skip notifications on objects that might have had
an error during deletion, this also avoids unnecessary
replication attempt on such objects.

Refactor some places to make sure that we have notified
the client before we

- notify
- schedule for replication
- lifecycle etc.
2021-02-10 22:00:42 -08:00
Harshavardhana
711adb9652 remove ipv6 fallbackdelay leave it as default 2021-02-10 17:35:09 -08:00
Poorna Krishnamoorthy
e6b4ea7618 More fixes for delete marker replication (#11504)
continuation of PR#11491 for multiple server pools and
bi-directional replication.

Moving proxying for GET/HEAD to handler level rather than
server pool layer as this was also causing incorrect proxying 
of HEAD.

Also fixing metadata update on CopyObject - minio-go was not passing
source version ID in X-Amz-Copy-Source header
2021-02-10 17:25:04 -08:00
Aditya Manthramurthy
466e95bb59 Return group DN instead of group name in LDAP STS (#11501)
- Additionally, check if the user or their groups has a policy attached during
the STS call.

- Remove the group name attribute configuration value.
2021-02-10 16:52:49 -08:00
Harshavardhana
881f98e511 fix: use getPoolIdx in DeleteObjects() (#11513)
filter out relevant objects for each pool to
avoid calling, further delete operations on
subsequent pools where some of these objects
might not exist.

This is mainly useful to avoid situations
during bi-directional bucket replication.
2021-02-10 14:25:43 -08:00
Harshavardhana
cbf4bb62e0 fix: getPoolIdx decouple from top level options (#11512)
top-level options shouldn't be passed down for
GetObjectInfo() while verifying the objects in
different pools, this is to make sure that
we always get the value from the pool where
the object exists.
2021-02-10 11:45:02 -08:00
Anis Elleuch
682482459d Change the default object content-type to binary/octet-stream (#11508) 2021-02-10 08:56:37 -08:00
Krishnan Parthasarathi
b87fae0049 Simplify PutObjReader for plain-text reader usage (#11470)
This change moves away from a unified constructor for plaintext and encrypted
usage. NewPutObjReader is simplified for the plain-text reader use. For
encrypted reader use, WithEncryption should be called on an initialized PutObjReader.

Plaintext:
func NewPutObjReader(rawReader *hash.Reader) *PutObjReader

The hash.Reader is used to provide payload size and md5sum to the downstream
consumers. This is different from the previous version in that there is no need
to pass nil values for unused parameters.

Encrypted:
func WithEncryption(encReader *hash.Reader,
key *crypto.ObjectKey) (*PutObjReader, error)

This method sets up encrypted reader along with the key to seal the md5sum
produced by the plain-text reader (already setup when NewPutObjReader was
called).

Usage:
```
  pReader := NewPutObjReader(rawReader)
  // ... other object handler code goes here

  // Prepare the encrypted hashed reader
  pReader, err = pReader.WithEncryption(encReader, objEncKey)

```
2021-02-10 08:52:50 -08:00
Anis Elleuch
b8b44c879f lifecycle: Remove a single delete marker with noncurrent expiry rule (#11444)
NoncurrentVersionExpiry can remove single delete markers according to
S3 spec:

```
The NoncurrentVersionExpiration action in the same Lifecycle
configuration removes noncurrent objects 30 days after they become
noncurrent. Thus, in this example, all object versions are permanently
removed 90 days after object creation. You will have expired object
delete markers, but Amazon S3 detects and removes the expired object
delete markers for you.
```
2021-02-10 08:51:34 -08:00
Harshavardhana
f53d1de87f fix: missing data on multiple columns reading parquet (#11499)
fixes #11413
2021-02-10 08:49:48 -08:00
Shireesh Anjal
5a18d437ce fix: drive hw info incomplete when smartinfo fails (#11509)
Collection of SMART information doesn't work in certain scenarios e.g.
in a container based setup. In such cases, instead of returning an error
(without any data), we should only set the error on the smartinfo
struct, so that other important drive hw info like device, mountpoint,
etc is retained in the output.
2021-02-10 08:48:14 -08:00
Poorna Krishnamoorthy
93eb549a83 fix: duplicate delete marker attempts in bi-directional replication (#11491) 2021-02-09 15:11:43 -08:00
Harshavardhana
fe3c39b583 use the new errgroup API whereever applicable (#11466)
start using the new errgroup concurrency control
API introduced in #11457
2021-02-09 12:08:25 -08:00
Harshavardhana
84d400487f fix: accountInfo API to cater for federated setups (#11484)
when MinIO is deployed in a federated setup, use etcd 
based listing of buckets to provide appropriate filtering 
of buckets per user.
2021-02-09 09:53:07 -08:00
Shireesh Anjal
3afa499885 fix: empty buckets/objects nodes in new setup (#11493) 2021-02-09 09:52:38 -08:00
Shireesh Anjal
13d015cf93 Fix make target hotfix (#11492)
The code inside the `hotfix` taget is overriding the values set at the
beginning of the Makefile affecting other make targets as well.

For example, running `TAG=mytag make docker` also ends up tagging
the docker image as a hotfix instead of `mytag`.

Using the `eval` function inside the `hotfix` target fixes this.
2021-02-09 09:29:43 -08:00
Krishna Srinivas
876b79b8d8 read-health check endpoint returns success if cluster can serve read requests (#11310) 2021-02-09 01:00:44 -08:00
Ritesh H Shukla
3d74efa6b1 fux: copy object for encrypted objects (#11490) 2021-02-08 19:58:17 -08:00
Harshavardhana
68d299e719 fix: case-insensitive lookups for metadata (#11489)
continuation of #11487, with more changes
2021-02-08 18:12:28 -08:00
Poorna Krishnamoorthy
f9c5636c2d fix: lookup metdata case insensitively (#11487)
while setting replication options
2021-02-08 16:19:05 -08:00
Klaus Post
9b10118d34 Metacache add abs entry limit (#11483)
Add an absolute limit to the number of metacaches for a bucket.

Delete excess caches if they haven't been handed out in an hour.
2021-02-08 11:36:16 -08:00
Harshavardhana
0e3211f4ad fix: server upgrades should have more descriptive error messages (#11476)
during rolling upgrade, provide a more descriptive error
message and discourage rolling upgrade in such situations,
allowing users to take action.

additionally also rename `slashpath -> pathutil` to avoid
a slighly mis-pronounced usage of `path` package.
2021-02-08 10:15:12 -08:00
Harshavardhana
2e4d9124ad honor region specified for remote targets (#11480)
fixes #11472
2021-02-08 08:54:27 -08:00
Harshavardhana
6fef4c21b9 fix: align atomic variables for 32bit arch (#11475)
fixes #11474
2021-02-08 08:51:12 -08:00
Poorna Krishnamoorthy
8e1bbd989a replication:alloc UserDefined map before use (#11478) 2021-02-07 22:01:10 -08:00
Sarasa Kisaragi
152d7cd95b HDFS support keytab (#11473) 2021-02-07 17:29:47 -08:00
Harshavardhana
74080bf108 update CREDITS file 2021-02-07 00:37:12 -08:00
Minio Trusted
647a209c73 Update yaml files to latest version RELEASE.2021-02-07T01-31-02Z 2021-02-07 01:53:27 +00:00
Harshavardhana
0d057c777a remove restriction for multi pool distribution algo 2021-02-06 16:19:05 -08:00
Anis Elleuch
275f7a63e8 lc: Apply DeleteAction correctly to objects (#11471)
When lifecycle decides to Delete an object and not a version in a
versioned bucket, the code should create a delete marker and not
removing the scanned version.

This commit fixes the issue.
2021-02-06 16:10:33 -08:00
Shireesh Anjal
97fe57bba9 Remove Connections from SysProcess struct (#11373)
The connections info of the processes takes up a huge amount of space,
and is not important for adding any useful health checks. Removing it
will significantly reduce the size of the subnet health report.
2021-02-05 21:32:28 -08:00
Harshavardhana
88c1bb0720 fix: improper ticker usage in goroutines (#11468)
- lock maintenance loop was incorrectly sleeping
  as well as using ticker badly, leading to
  extra expiration routines getting triggered
  that could flood the network.

- multipart upload cleanup should be based on
  timer instead of ticker, to ensure that long
  running jobs don't get triggered twice.

- make sure to get right lockers for object name
2021-02-05 19:23:48 -08:00
Harshavardhana
1fdafaf72f fix: listing for directory object when delimiter is present (#11463)
When you have heirarchy of prefixes with directory objects
our current master would list directory objects as prefixes
when delimiter is present, this is inconsistent with AWS S3

```
aws s3api list-objects --endpoint-url http://localhost:9000 \
    --profile minio --bucket testbucket-v --prefix new/ --delimiter /
{
    "CommonPrefixes": [
        {
            "Prefix": "new/"
        },
        {
            "Prefix": "new/new/"
        }
    ]
}
```

Instead this PR fixes this to behave like AWS S3

```
aws s3api list-objects --endpoint-url http://localhost:9000 \
      --profile minio --bucket testbucket-v --prefix new/ --delimiter /
{
    "Contents": [
        {
            "Key": "new/",
            "LastModified": "2021-02-05T06:27:42.660Z",
            "ETag": "\"d41d8cd98f00b204e9800998ecf8427e\"",
            "Size": 0,
            "StorageClass": "STANDARD",
            "Owner": {
                "DisplayName": "",
                "ID": "02d6176db174dc93cb1b899f7c6078f08654445fe8cf1b6ce98d8855f66bdbf4"
            }
        }
    ],
    "CommonPrefixes": [
        {
            "Prefix": "new/new/"
        }
    ]
}
```
2021-02-05 16:24:40 -08:00
Ritesh H Shukla
5fe4bb6b36 Reduce redundant crawler logging (#11448) 2021-02-05 15:51:11 -08:00
Harshavardhana
99b733d44c fix: deletion of delete marker regression (#11465)
fixes #11440
fixes #11451
fixes #11454
2021-02-05 15:06:23 -08:00
Klaus Post
b4ac05523b Add parallel bucket healing during startup (#11457)
Replaces #11449

Does concurrent healing but limits concurrency to 50 buckets.

Aborts on first error.

`errgroup.Group` is extended to facilitate this in a generic way.
2021-02-05 13:04:26 -08:00
Anis Elleuch
c7eacba41c health-info: Add tags to errors (#11412)
We use multiple libraries in health info, but the returned error does
not indicate exactly what library call is failing, hence adding named
tags to returned errors whenever applicable.
2021-02-05 12:37:15 -08:00
Anis Elleuch
1887c25279 xl: Fix feeding NumVersions & SuccessorModTime to lifecycle (#11462)
After recent refactor where lifecycle started to rely on ObjectInfo to
make decisions, it turned out there are some issues calculating
Successor Modtime and NumVersions, hence the lifecycle is not working as
expected in a versioning bucket in some cases.

This commit fixes the behavior.
2021-02-05 11:59:08 -08:00
Harshavardhana
c9b0f595b9 support directory objects in listing in certain scenarios (#11452)
When a directory object is presented as a `prefix`
param our implementation tend to only list objects
present common to the `prefix` than the `prefix` itself,
to mimic AWS S3 like flat key behavior this PR ensures
that if `prefix` is directory object, it should be
automatically considered to be part of the eventual
listing result.

fixes #11370
2021-02-05 10:12:25 -08:00
Harshavardhana
8bb580abfc fix: use getObjectNInfo to avoid bytes.Buffer usage (#11428)
few places were still using legacy call GetObject()
which was mainly designed for client response writer,
use GetObjectNInfo() for internal calls instead.
2021-02-05 09:57:30 -08:00
Harshavardhana
af9cb5f5f2 remove deprecated StandardSCData 2021-02-05 01:34:23 -08:00
Harshavardhana
9497dfd804 docs: add deprecation notice for federation 2021-02-04 17:18:37 -08:00
Harshavardhana
da55a05587 fix aggressive expiration detection (#11446)
for some flaky networks this may be too fast of a value
choose a defensive value, and let this be addressed
properly in a new refactor of dsync with renewal logic.

Also enable faster fallback delay to cater for misconfigured
IPv6 servers

refer
 - https://golang.org/pkg/net/#Dialer
 - https://tools.ietf.org/html/rfc6555
2021-02-04 16:56:40 -08:00
Harshavardhana
3fc4d6f620 update dependenices for relevant projects (#11445)
- minio-go -> v7.0.8
- ldap/v3 -> v3.2.4
- reedsolomon -> v1.9.11
- sio-go -> v0.3.1
- msgp -> v1.1.5
- simdjson-go, md5-simd, highwayhash
2021-02-04 13:49:52 -08:00
Ritesh H Shukla
67a8f37df0 fix: disk usage capacity metric reporting (#11435) 2021-02-04 12:26:58 -08:00
Anis Elleuch
075c429021 lifcycle: Add more validation to the config (#11382) 2021-02-04 11:26:02 -08:00
ArthurMa
df0c678167 fix: ldap config parsing issue for UserDNSearchFilter (#11437) 2021-02-04 11:07:29 -08:00
Harshavardhana
f108873c48 fix: replication metadata comparsion and other fixes (#11410)
- using miniogo.ObjectInfo.UserMetadata is not correct
- using UserTags from Map->String() can change order
- ContentType comparison needs to be removed.
- Compare both lowercase and uppercase key names.
- do not silently error out constructing PutObjectOptions
  if tag parsing fails
- avoid notification for empty object info, failed operations
  should rely on valid objInfo for notification in all
  situations
- optimize copyObject implementation, also introduce a new 
  replication event
- clone ObjectInfo() before scheduling for replication
- add additional headers for comparison
- remove strings.EqualFold comparison avoid unexpected bugs
- fix pool based proxying with multiple pools
- compare only specific metadata

Co-authored-by: Poorna Krishnamoorthy <poornas@users.noreply.github.com>
2021-02-03 20:41:33 -08:00
Andreas Auernhammer
871b450dbd crypto: add support for decrypting SSE-KMS metadata (#11415)
This commit refactors the SSE implementation and add
S3-compatible SSE-KMS context handling.

SSE-KMS differs from SSE-S3 in two main aspects:
 1. The client can request a particular key and
    specify a KMS context as part of the request.
 2. The ETag of an SSE-KMS encrypted object is not
    the MD5 sum of the object content.

This commit only focuses on the 1st aspect.

A client can send an optional SSE context when using
SSE-KMS. This context is remembered by the S3 server
such that the client does not have to specify the
context again (during multipart PUT / GET / HEAD ...).
The crypto. context also includes the bucket/object
name to prevent renaming objects at the backend.

Now, AWS S3 behaves as following:
 - If the user does not provide a SSE-KMS context
   it does not store one - resp. does not include
   the SSE-KMS context header in the response (e.g. HEAD).
 - If the user specifies a SSE-KMS context without
   the bucket/object name then AWS stores the exact
   context the client provided but adds the bucket/object
   name internally. The response contains the KMS context
   without the bucket/object name.
 - If the user specifies a SSE-KMS context with
   the bucket/object name then AWS again stores the exact
   context provided by the client. The response contains
   the KMS context with the bucket/object name.

This commit implements this behavior w.r.t. SSE-KMS.
However, as of now, no such object can be created since
the server rejects SSE-KMS encryption requests.

This commit is one stepping stone for SSE-KMS support.

Co-authored-by: Harshavardhana <harsha@minio.io>
2021-02-03 15:19:08 -08:00
Harshavardhana
f71e192343 avoid listing an empty dir without __XLDIR__ (#11427)
```
minio server /tmp/disk{1...4}
mc mb myminio/testbucket/
mkdir -p /tmp/disk{1..4}/testbucket/test-prefix/
```

This would end up being listed in the current
master, this PR fixes this situation.

If a directory is a leaf dir we should it
being listed, since it cannot be deleted anymore
with DeleteObject, DeleteObjects() API calls
because we natively support directories now.

Avoid listing it and let healing purge this folder
eventually in the background.
2021-02-03 14:06:54 -08:00
Anis Elleuch
b3f81e75f6 xl: Make it clear when to create delete marker for a non existant object (#11423) 2021-02-03 10:33:43 -08:00
Klaus Post
a71e0483c9 Fix nil disks in getOnlineDisksWithHealing (#11419)
If a disk is skipped when nil it is still returned.
2021-02-02 17:04:37 -08:00
Bahram Aghaei
f2d49ec21a Update ldap.md: add a link to ldap.go (#11409) 2021-02-02 15:47:04 -08:00
Klaus Post
4a9d9c8585 Update colinmarc/hdfs (#11417)
Updates needed dependency as well.

Fixes #11416
2021-02-02 15:37:30 -08:00
Harshavardhana
c885777ac6 Add support for TCP_QUICKACK (#11369)
TCP_QUICKACK is a setting that allows TCP endpoints
to acknowledge the receipt of data instantly in situations
where they would normally wait to see if more data
would be arriving.

https://assets.extrahop.com/whitepapers/TCP-Optimization-Guide-by-ExtraHop.pdf
2021-02-02 09:44:18 -08:00
Poorna Krishnamoorthy
fe3aca70c3 Make number of replication workers configurable. (#11379)
MINIO_API_REPLICATION_WORKERS env.var and
`mc admin config set api` allow number of replication
workers to be configurable. Defaults to half the number
of cpus available.

Co-authored-by: Poorna Krishnamoorthy <poorna@minio.io>
2021-02-02 16:45:06 +05:30
Ritesh H Shukla
c4848f9b4f Add process start time to cluster metrics. (#11405) 2021-02-01 23:02:18 -08:00
Andreas Auernhammer
838d4dafbd gateway: don't use encrypted ETags for If-Match (#11400)
This commit fixes a bug in the S3 gateway that causes
GET requests to fail when the object is encrypted by the
gateway itself.

The gateway was not able to GET the object since it always
specified a `If-Match` pre-condition checking that the object
ETag matches an expected ETag - even for encrypted ETags.

The problem is that an encrypted ETag will never match the ETag
computed by the backend causing the `If-Match` pre-condition
to fail.

This commit fixes this by not sending an `If-Match` header when
the ETag is encrypted. This is acceptable because:
  1. A gateway-encrypted object consists of two objects at the backend
     and there is no way to provide a concurrency-safe implementation
     of two consecutive S3 GETs in the deployment model of the S3
     gateway.
     Ref: S3 gateways are self-contained and isolated - and there may
          be multiple instances at the same time (no lock across
          instances).
  2. Even if the data object changes (concurrent PUT) while gateway
     A has download the metadata object (but not issued the GET to
     the data object => data race) then we don't return invalid data
     to the client since the decryption (of the currently uploaded data)
     will fail - given the metadata of the previous object.
2021-02-01 23:02:08 -08:00
swartz-k
8c663f93f7 fix: typo in chinese docs (#11401) 2021-02-01 18:42:58 -08:00
Minio Trusted
b4cb7edf85 Update yaml files to latest version RELEASE.2021-02-01T22-56-52Z 2021-02-01 23:28:23 +00:00
Anis Elleuch
e96fdcd5ec tagging: Add event notif for PUT object tagging (#11366)
An optimization to avoid double calling for during PutObject tagging
2021-02-01 13:52:51 -08:00
Anis Elleuch
6ef678663e xl: Create a delete-marker when no other version exists (#11362)
Currently, it is not possible to create a delete-marker when xl.meta
does not exist (no version is created for that object yet). This makes a
problem for replication and mc mirroring with versioning enabled.

This also follows S3 specification.
2021-02-01 13:23:50 -08:00
Harshavardhana
f737a027cf fix: regression introduced in federated listing buckets
regression was introduced in
6cd255d516 fix it properly.
2021-02-01 12:06:58 -08:00
Ravind Kumar
547efecd82 Reverting TOC due to gluegun incompatibility. Revert this commit once we migrate to new docs site (#11402) 2021-02-01 11:10:07 -08:00
Anis Elleuch
65aa2bc614 ilm: Remove object in HEAD/GET if having an applicable ILM rule (#11296)
Remove an object on the fly if there is a lifecycle rule with delete
expiry action for the corresponding object.
2021-02-01 09:52:11 -08:00
Daniel Jakots
de4421d6a3 fix: build on OpenBSD (#11384)
github.com/ncw/directio doesn't support OpenBSD, but OpenBSD has
syscall.Fsync. (It also has fdatasync:
https://man.openbsd.org/fdatasync
but apparently Golang can't call it).
2021-02-01 09:48:49 -08:00
swartz-k
6c3467e300 fix: docs typo in README_zh_CN (#11375) 2021-01-31 00:11:01 -08:00
Andreas Auernhammer
33554651e9 crypto: deprecate native Hashicorp Vault support (#11352)
This commit deprecates the native Hashicorp Vault
support and removes the legacy Vault documentation.

The native Hashicorp Vault documentation is marked as
outdated and deprecated for over a year now. We give
another 6 months before we start removing Hashicorp Vault
support and show a deprecation warning when a MinIO server
starts with a native Vault configuration.
2021-01-29 17:55:37 -08:00
Minio Trusted
451d9057f3 Update yaml files to latest version RELEASE.2021-01-30T00-20-58Z 2021-01-30 00:45:11 +00:00
Poorna Krishnamoorthy
c82aef0a56 fix ObjectInfo returned by CopyObject (#11377)
erasure CopyObject was returning old metadata
2021-01-29 14:49:18 -08:00
Harshavardhana
1e53bf2789 fix: allow expansion with newer constraints for older setups (#11372)
currently we had a restriction where older setups would
need to follow previous style of "stripe" count being same
expansion, we can relax that instead newer pools can be
expanded for older setups with newer constraints of
common parity ratio.
2021-01-29 11:40:55 -08:00
Harshavardhana
d48ff93ba3 add hotfix+commit-id+docker builds 2021-01-28 15:54:41 -08:00
Ritesh H Shukla
c8489a8f0c fix: log notification errors only once (#11350) 2021-01-28 13:40:31 -08:00
Klaus Post
2680772d4b Don't mark remotes online when shutting down (#11368)
Shutting down will mark remotes online when the shutdown has 
started since the context is canceled.

For example:

```
API: SYSTEM()
Time: 16:21:31 CET 01/28/2021
DeploymentID: 313b0065-c5a1-4aa3-9233-07223e77a730
Error: Storage resources are insufficient for the write operation .minio.sys/tmp/ced455c4-3d27-4bdd-95fc-b4707a179b8a/fd934ef3-8fc8-4330-abc1-f039fbbb9700/part.1 (cmd.InsufficientWriteQuorum)
       1: d:\minio\minio\cmd\data-usage.go:56:cmd.storeDataUsageInBackend()
Exiting on signal: INTERRUPT
Client http://127.0.0.1:9002/minio/lock/v5 online
Client http://127.0.0.1:9002/minio/storage/data/distxl/s2/d3/v24 online
Client http://127.0.0.1:9002/minio/storage/data/distxl/s2/d2/v24 online
Client http://127.0.0.1:9002/minio/storage/data/distxl/s2/d1/v24 online
Client http://127.0.0.1:9002/minio/peer/v12 online
Client http://127.0.0.1:9002/minio/storage/data/distxl/s2/d4/v24 online
```

Use a fresh context for health checks.
2021-01-28 13:38:12 -08:00
Harshavardhana
567f7bdd05 fix: verify overlapping domains when > 1 2021-01-28 13:08:53 -08:00
Harshavardhana
6cd255d516 fix: allow updated domain names in federation (#11365)
additionally also disallow overlapping domain names
2021-01-28 11:44:48 -08:00
Aditya Manthramurthy
e79829b5b3 Bind to lookup user after user auth to lookup ldap groups (#11357) 2021-01-27 17:31:21 -08:00
Poorna Krishnamoorthy
fd3f02637a fix: replication regression due to proxying requests (#11356)
In PR #11165 due to incorrect proxying for 2 
way replication even when the object was not 
yet replicated

Additionally, fix metadata comparisons when
deciding to do full replication vs metadata copy.

fixes #11340
2021-01-27 11:22:34 -08:00
Harshavardhana
e019f21bda fix: trigger heal if one of the parts are not found (#11358)
Previously we added heal trigger when bit-rot checks
failed, now extend that to support heal when parts
are not found either. This healing gets only triggered
if we can successfully decode the object i.e read
quorum is still satisfied for the object.
2021-01-27 10:21:14 -08:00
Anis Elleuch
e9ac7b0fb7 heal: Remove empty directories (#11354)
Since the introduction of __XLDIR__, an empty directory does not have a
meaning anymore in erasure mode. Make healing removes it wherever it
finds it.
2021-01-27 02:19:28 -08:00
Harshavardhana
1debd722b5 rename last remaining Zone->Pool 2021-01-26 20:47:42 -08:00
massintha azamoum
e7f6051f19 Send bucket name to peers when bucket notification is enabled (#11351) 2021-01-26 13:48:28 -08:00
Harshavardhana
6717295e18 fix: rename audit log docs and datastructure 2021-01-26 13:39:55 -08:00
Anis Elleuch
00cff1aac5 audit: per object send pool number, set number and servers per operation (#11233) 2021-01-26 13:21:51 -08:00
Harshavardhana
9722531817 fix: purge LDAP deprecated keys 2021-01-26 09:53:29 -08:00
Harshavardhana
5c6bfae4c7 fix: load credentials from etcd directly when possible (#11339)
under large deployments loading credentials might be
time consuming, while this is okay and we will not
respond quickly for `mc admin user list` like queries
but it is possible to support `mc admin user info`

just like how we handle authentication by fetching
the user directly from persistent store.

additionally support service accounts properly,
reloaded from etcd during watch() - this was missing

This PR is also half way remedy for #11305
2021-01-25 20:01:49 -08:00
Aditya Manthramurthy
5f51ef0b40 Add LDAP Lookup-Bind mode (#11318)
This change allows the MinIO server to be configured with a special (read-only)
LDAP account to perform user DN lookups.

The following configuration parameters are added (along with corresponding
environment variables) to LDAP identity configuration (under `identity_ldap`):

- lookup_bind_dn / MINIO_IDENTITY_LDAP_LOOKUP_BIND_DN
- lookup_bind_password / MINIO_IDENTITY_LDAP_LOOKUP_BIND_PASSWORD
- user_dn_search_base_dn / MINIO_IDENTITY_LDAP_USER_DN_SEARCH_BASE_DN
- user_dn_search_filter / MINIO_IDENTITY_LDAP_USER_DN_SEARCH_FILTER

This lookup-bind account is a service account that is used to lookup the user's
DN from their username provided in the STS API. When configured, searching for
the user DN is enabled and configuration of the base DN and filter for search is
required. In this "lookup-bind" mode, the username format is not checked and must
not be specified. This feature is to support Active Directory setups where the
DN cannot be simply derived from the username.

When the lookup-bind is not configured, the old behavior is enabled: the minio
server performs LDAP lookups as the LDAP user making the STS API request and the
username format is checked and configuring it is required.
2021-01-25 14:26:10 -08:00
Harshavardhana
7e266293e6 fix: notify bucket replication after replication/ilm (#11343) 2021-01-25 14:04:41 -08:00
Harshavardhana
eb6871ecd9 fix: LoginSTS should be an inline implementation (#11337)
STS tokens can be obtained by using local APIs
once the remote JWT token is presented, current
code was not validating the incoming token in the
first place and was incorrectly making a network
operation using that token.

For the most part this always works without issues,
but under adversarial scenarios it exposes client
to hand-craft a request that can reach internal
services without authentication.

This kind of proxying should be avoided before
validating the incoming token.
2021-01-25 10:15:03 -08:00
Harshavardhana
9cdd981ce7 fix: expire locks only on participating lockers (#11335)
additionally also add a new ForceUnlock API, to
allow forcibly unlocking locks if possible.
2021-01-25 10:01:27 -08:00
Anis Elleuch
bd8020aba8 heal: Decode object name in healing result (#11348)
The user can see __XLDIR__ prefix in mc admin heal when the command
heals an empty object with a trailing slash. This commit decodes the
name of the object before sending it back to the upper level.
2021-01-25 09:53:37 -08:00
Harshavardhana
09bc49bd51 fix: healBucket across sets should capture results properly (#11341)
healing `.minio.sys/config` returns incorrect quorum errors
across sets, healing of the buckets.
2021-01-25 09:45:09 -08:00
Harshavardhana
82f0471d1b honor maxWait heal config when maxIO hits (#11338) 2021-01-25 07:53:12 -08:00
Ritesh H Shukla
0bf2d84f96 update new metrics url docs (#11342) 2021-01-25 01:03:07 -08:00
Harshavardhana
6a95f412c9 avoid double CORS headers in federation (#11334)
CORS proxying adds double headers one
by the receiving server, one by proxied
server. Remove them before proxying
when 'Origin' header is found.
2021-01-23 18:27:23 -08:00
Ritesh H Shukla
7575c24037 Add open FD and FD limit to cluster metrics (#11328) 2021-01-22 18:30:16 -08:00
Harshavardhana
43f973c4cf fix: check for O_DIRECT support for reads and writes (#11331)
In-case user enables O_DIRECT for reads and backend does
not support it we shall proceed to turn it off instead
and print a warning. This validation avoids any unexpected
downtimes that users may incur.
2021-01-22 15:38:21 -08:00
Harshavardhana
1b453728a3 initialize forwarder after init() to avoid crashes (#11330)
DNSCache dialer is a global value initialized in
init(), whereas `go` keeps `var =` before `init()`
, also we don't need to keep proxy routers as
global entities - register the forwarder as
necessary to avoid crashes.
2021-01-22 15:37:41 -08:00
Harshavardhana
a6c146bd00 validate storage class across pools when setting config (#11320)
```
mc admin config set alias/ storage_class standard=EC:3
```

should only succeed if parity ratio is valid for all
server pools, if not we should fail proactively.

This PR also needs to bring other changes now that
we need to cater for variadic drive counts per pool.

Bonus fixes also various bugs reproduced with

- GetObjectWithPartNumber()
- CopyObjectPartWithOffsets()
- CopyObjectWithMetadata()
- PutObjectPart,PutObject with truncated streams
2021-01-22 12:09:24 -08:00
Harshavardhana
a35cbb3ff3 update healthcheck tests for new prometheus endpoint (#11333) 2021-01-22 11:04:52 -08:00
Harshavardhana
c080f04e66 fix: prometheus metrics link typo update to latest 2021-01-22 01:53:23 -08:00
Klaus Post
2167ba0111 Feed correct part number to sio (#11326)
When offsets were specified we relied on the first part number to be correct.

Recalculate based on offset.
2021-01-21 08:43:03 -08:00
Klaus Post
4e6d717f39 Compress profiling data (#11313)
Trace data can be rather large and compresses fine.

Compress profile data in zip files:

```
277.895.314 before.profiles.zip
152.800.318 after.profiles.zip
```
2021-01-20 15:49:53 -08:00
Poorna Krishnamoorthy
845e251fa9 fix: crash in notificationsys when peers online is 0 (#11307)
Check if the number of peers online > 0 before using peerClient
2021-01-20 13:13:05 -08:00
Harshavardhana
d1a8f0b786 fix possible crashes on deleteMarker replication (#11308)
Delete marker can have `metaSys` set to nil, that
can lead to crashes after the delete marker has
been healed.

Additionally also fix isObjectDangling check
for transitioned objects, that do not have parts
should be treated similar to Delete marker.
2021-01-20 13:12:12 -08:00
Klaus Post
dac19d7272 Clarify root disk error (#11314)
Make it clearer what the problem is and how to resolve it.
2021-01-20 13:11:42 -08:00
Harshavardhana
7624c8b9bb fix: honor storage class uniformity for multiple pools (#11309) 2021-01-20 01:41:18 -08:00
Klaus Post
19fb1086b2 select: Fix leak on compressed files (#11302)
Properly close gzip reader when done reading

fixes #11300
2021-01-19 17:51:46 -08:00
Harshavardhana
a5e23a40ff fix: allow delayed etcd updates to have fallbacks (#11151)
fixes #11149
2021-01-19 10:05:41 -08:00
Harshavardhana
1ad2b7b699 fix: add stricter validation for erasure server pools (#11299)
During expansion we need to validate if

- new deployment is expanded with newer constraints
- existing deployment is expanded with older constraints
- multiple server pools rejected if they have different
  deploymentID and distribution algo
2021-01-19 10:01:31 -08:00
Harshavardhana
b5049d541f fix: reduce an extra readdir() attempted on non-legacy setups (#11301)
to verify moving content and preserving legacy content,
we have way to detect the objects through readdir()
this path is not necessary for most common cases on
newer setups, avoid readdir() to save multiple system
calls.

also fix the CheckFile behavior for most common
use case i.e without legacy format.
2021-01-19 10:01:06 -08:00
Harshavardhana
e0055609bb fix: crawler to skip healing the drives in a set being healed (#11274)
If an erasure set had a drive replacement recently, we don't
need to attempt healing on another drive with in the same erasure
set - this would ensure we do not double heal the same content
and also prioritizes usage for such an erasure set to be calculated
sooner.
2021-01-19 02:40:52 -08:00
Klaus Post
e8ce348da1 crypto: Escape JSON text (#10794)
Escape the JSON keys+values from the context.

We do not add the HTML escapes, since that is an extra escape level not mandatory for JSON.
2021-01-19 01:39:04 -08:00
Harshavardhana
6bfa162342 fix go mod tidy, remove unexpected deps 2021-01-18 20:38:23 -08:00
Ritesh H Shukla
b4add82bb6 Updated Prometheus metrics (#11141)
* Add metrics for nodes online and offline
* Add cluster capacity metrics
* Introduce v2 metrics
2021-01-18 20:35:38 -08:00
Harshavardhana
3bda8f755c update gjson dependency 2021-01-18 20:16:18 -08:00
Harshavardhana
3ca6330661 fix: optimize parentDirIsObject by moving isObject to storage layer (#11291)
For objects with `N` prefix depth, this PR reduces `N` such network
operations by converting `CheckFile` into a single bulk operation.

Reduction in chattiness here would allow disks to be utilized more
cleanly, while maintaining the same functionality along with one
extra volume check stat() call is removed.

Update tests to test multiple sets scenario
2021-01-18 12:25:22 -08:00
Klaus Post
3d9000d5b5 Upgrade simdjson to v0.2.0 with 30-50% faster parsing (#11295) 2021-01-18 10:29:50 -08:00
Aditya Manthramurthy
3163a660aa Fix support for multiple LDAP user formats (#11276)
Fixes support for using multiple base DNs for user search in the LDAP directory
allowing users from different subtrees in the LDAP hierarchy to request
credentials.

- The username in the produced credentials is now the full DN of the LDAP user
to disambiguate users in different base DNs.
2021-01-17 21:54:32 -08:00
Harshavardhana
0dadfd1b3d fix: do not compute usage for not found lifecycle operations (#11288)
Currently we would proceed to apply incorrect lifecycle policies
for non-existent objects, this PR handles them appropriately.
2021-01-17 13:58:41 -08:00
Harshavardhana
98f76008c7 fix: bucket lifecycle again to remove Days parameter 2021-01-17 01:50:56 -08:00
Harshavardhana
8da0b7cf03 fix: lifecycle documentation for DeleteMarker 2021-01-17 01:37:25 -08:00
Harshavardhana
4315f93421 fix: make sure parentDirIsObject is used at set level (#11280)
parentDirIsObject is not using set level understanding
to check for parent objects, without this it can lead to
objects that can actually reside on a separate set as
objects and would conflict.
2021-01-17 01:11:48 -08:00
Harshavardhana
ddb5d7043a fix: standard storage class is allowed to be '0' 2021-01-16 17:32:25 -08:00
Harshavardhana
f903cae6ff Support variable server pools (#11256)
Current implementation requires server pools to have
same erasure stripe sizes, to facilitate same SLA
and expectations.

This PR allows server pools to be variadic, i.e they
do not have to be same erasure stripe sizes - instead
they should have SLA for parity ratio.

If the parity ratio cannot be guaranteed by the new
server pool, the deployment is rejected i.e server
pool expansion is not allowed.
2021-01-16 12:08:02 -08:00
Minio Trusted
40d59c1961 Update yaml files to latest version RELEASE.2021-01-16T02-19-44Z 2021-01-16 02:43:53 +00:00
Poorna Krishnamoorthy
7090bcc8e0 fix: doc links and delete replication permissions enforcement (#11285) 2021-01-15 15:22:55 -08:00
Harshavardhana
c222bde14b fix: use common logging implementation for DNSCache (#11284) 2021-01-15 14:04:56 -08:00
Harshavardhana
4e06a72632 fix broken URL to k8s operator 2021-01-15 11:55:36 -08:00
Ravind Kumar
16040bc544 Update README for cleaner navigation, improved quickstart experience (#11277) 2021-01-15 11:54:05 -08:00
Harshavardhana
cc2d887e0e fix: whitespace and formatting in replication docs 2021-01-14 22:58:53 -08:00
Poorna Krishnamoorthy
c1b4b24236 Update replication docs (#11279) 2021-01-15 10:22:57 +05:30
Poorna Krishnamoorthy
feaf8dfb9a Fix replication status reported on completion (#11273)
Fixes: #11272
2021-01-13 11:52:28 -08:00
Harshavardhana
628ef081d1 fix: preserve cache calculated previously while moving from v2 to v3 (#11269)
This ensures that all the prometheus monitoring and usage
trackers to avoid alerts configured, although we cannot
support v1 to v2 here - we can v2 to v3.
2021-01-13 09:58:08 -08:00
Harshavardhana
44dff36ff7 listing with prefix prefixed with '/' should be ignored (#11268)
fixes #11265
2021-01-13 09:44:11 -08:00
Poorna Krishnamoorthy
b97d53b29c fix remote target healthcheck (#11267) 2021-01-12 20:48:04 -08:00
Aditya Manthramurthy
00af9881b0 LDAP doc fix: remove repeated paragraph and add emphasis (#11266) 2021-01-12 15:44:31 -08:00
Kanagaraj M
e09196d626 browser: fix file uploads with '#' in the name (#11261)
When the file name or the prefix contains `#`,
whatever comes after is ingored.

This is fixed by encoding the object path using
encodeURIComponent.

Fixes #11112
2021-01-12 12:47:46 -08:00
Harshavardhana
1a5775e2e8 enable small and large file optimization (#11260)
- for large objects we found that 1MiB block for
  r/w respectively.
- for small objects we found that 128KiB block for
  r/w respectively.
2021-01-12 10:20:39 -08:00
Anis Elleuch
e2579b1f5a azure: Use default upload parameters to avoid consuming too much memory (#11251)
A lot of memory is consumed when uploading small files in parallel, use
the default upload parameters and add MINIO_AZURE_UPLOAD_CONCURRENCY for
users to tweak.
2021-01-11 22:48:09 -08:00
Poorna Krishnamoorthy
7824e19d20 Allow synchronous replication if enabled. (#11165)
Synchronous replication can be enabled by setting the --sync
flag while adding a remote replication target.

This PR also adds proxying on GET/HEAD to another node in a
active-active replication setup in the event of a 404 on the current node.
2021-01-11 22:36:51 -08:00
Harshavardhana
317305d5f9 fix: regression in adding new replication targets (#11257) 2021-01-11 09:08:42 -08:00
Harshavardhana
e4e117faab fix: enable xl.json to xl.meta only if legacy drive is found (#11255)
another optimization is renameLegacyMetadata() never needs
to validate bucket with os.Stat() again, leading to reduction
in one extra syscall.
2021-01-11 02:27:04 -08:00
George Tsatsis
e8176fe978 Use -new during OpenSSL certificate generation (#11199)
As per https://stackoverflow.com/a/3758443/8156177, OpenSSL expects a certificate via STDIN. 
`-new` will allow a new certificate to be generated instead.
2021-01-11 02:24:50 -08:00
Harshavardhana
828602d672 fix: nancy github URL has changed 2021-01-10 13:37:05 -08:00
Minio Trusted
d9224fbc65 Update yaml files to latest version RELEASE.2021-01-08T21-18-21Z 2021-01-08 21:37:35 +00:00
Klaus Post
51dad1d130 Fix missing GetObjectNInfo Closure (#11243)
Review for missing Close of returned value from `GetObjectNInfo`.

This was often obscured by the stuff that auto-unlocks when reaching EOF.
2021-01-08 10:12:26 -08:00
Harshavardhana
4593b146be fix: print errors only when metacache status has errors (#11248) 2021-01-08 16:52:19 +05:30
Harshavardhana
f21d650ed4 fix: readData in bulk call using messagepack byte wrappers (#11228)
This PR refactors the way we use buffers for O_DIRECT and
to re-use those buffers for messagepack reader writer.

After some extensive benchmarking found that not all objects
have this benefit, and only objects smaller than 64KiB see
this benefit overall.

Benefits are seen from almost all objects from

1KiB - 32KiB

Beyond this no objects see benefit with bulk call approach
as the latency of bytes sent over the wire v/s streaming
content directly from disk negate each other with no
remarkable benefits.

All other optimizations include reuse of msgp.Reader,
msgp.Writer using sync.Pool's for all internode calls.
2021-01-07 19:27:31 -08:00
Harshavardhana
a4f6705874 expire stale locks when owner is down (#11247)
fixes #11246
2021-01-07 19:16:18 -08:00
Poorna Krishnamoorthy
b35b537e3f Pass versionID to checkReplicateDelete in web handler (#11244) 2021-01-07 15:28:27 -08:00
Harshavardhana
5c52d5ffc7 fix: treat errVolumeNotFound as EOF error in listPathRaw (#11238) 2021-01-07 09:52:53 -08:00
Harshavardhana
f0808bb2e5 fix: getObject fd leaks in transition and replication code (#11237) 2021-01-06 16:13:10 -08:00
Harshavardhana
a6dee21092 initialize IAM store before Init() to avoid any crash (#11236) 2021-01-06 13:40:20 -08:00
Anis Elleuch
6f781c5e7a heal: Reduce whitespace ticker to 5 seconds (#11234)
30 seconds white spaces is long for some setups which time out when no
read activity in short time, reduce the subnet health white space ticker
to 5 seconds, since it has no cost at all.
2021-01-06 13:29:50 -08:00
Harshavardhana
f8ca859790 fix: server/gateway banner formatting (#11230) 2021-01-06 10:38:07 -08:00
Kanagaraj M
b78521cd69 update license verifier to use updated keys (#11197) 2021-01-06 10:17:05 -08:00
Harshavardhana
76e2713ffe fix: use buffers only when necessary for io.Copy() (#11229)
Use separate sync.Pool for writes/reads

Avoid passing buffers for io.CopyBuffer()
if the writer or reader implement io.WriteTo or io.ReadFrom
respectively then its useless for sync.Pool to allocate
buffers on its own since that will be completely ignored
by the io.CopyBuffer Go implementation.

Improve this wherever we see this to be optimal.

This allows us to be more efficient on memory usage.
```
   385  // copyBuffer is the actual implementation of Copy and CopyBuffer.
   386  // if buf is nil, one is allocated.
   387  func copyBuffer(dst Writer, src Reader, buf []byte) (written int64, err error) {
   388  	// If the reader has a WriteTo method, use it to do the copy.
   389  	// Avoids an allocation and a copy.
   390  	if wt, ok := src.(WriterTo); ok {
   391  		return wt.WriteTo(dst)
   392  	}
   393  	// Similarly, if the writer has a ReadFrom method, use it to do the copy.
   394  	if rt, ok := dst.(ReaderFrom); ok {
   395  		return rt.ReadFrom(src)
   396  	}
```

From readahead package
```
// WriteTo writes data to w until there's no more data to write or when an error occurs.
// The return value n is the number of bytes written.
// Any error encountered during the write is also returned.
func (a *reader) WriteTo(w io.Writer) (n int64, err error) {
	if a.err != nil {
		return 0, a.err
	}
	n = 0
	for {
		err = a.fill()
		if err != nil {
			return n, err
		}
		n2, err := w.Write(a.cur.buffer())
		a.cur.inc(n2)
		n += int64(n2)
		if err != nil {
			return n, err
		}
```
2021-01-06 09:36:55 -08:00
Harshavardhana
b5d291ea88 fix: rename remaining zone -> pool (#11231) 2021-01-06 09:35:47 -08:00
Klaus Post
eb9172eecb Allow Compression + encryption (#11103) 2021-01-05 20:08:35 -08:00
Klaus Post
97a4c120e9 Add Optimization as a type of change (#11221)
* Add Optimization as a type of change. 
* Make the checkmark affirmative instead of disaffirming
2021-01-05 14:53:52 -08:00
Poorna Krishnamoorthy
64bddf47d8 Pass deletemarker correctly to replicate opts (#11227)
fixes: #11180
2021-01-05 14:12:37 -08:00
Harshavardhana
4ed45ce543 fix: healing buckets during pool expansion (#11224)
fixes #11209
2021-01-05 13:24:22 -08:00
Klaus Post
ad511b0eb8 tests: Fix occasional data race (#11223)
CI tests could trigger a data race.

Servers are generally not expected to reinitialize, so tests could trigger data races when reinitializing and async operations are running.

We add the option to safely reset global vars instead of overwriting.

Fixes races like:

```
WARNING: DATA RACE
Read at 0x00000477ab18 by goroutine 1159:
  github.com/minio/minio/cmd.FileInfo.ToObjectInfo()
      /home/runner/work/minio/minio/cmd/erasure-metadata.go:105 +0x16d
  github.com/minio/minio/cmd.erasureObjects.putObject()
      /home/runner/work/minio/minio/cmd/erasure-object.go:748 +0x13f8
  github.com/minio/minio/cmd.(*erasureObjects).listPath.func3.2()
      /home/runner/work/minio/minio/cmd/metacache-set.go:682 +0x7d3
  github.com/minio/minio/cmd.newMetacacheBlockWriter.func1.2()
      /home/runner/work/minio/minio/cmd/metacache-stream.go:777 +0x1c4
  github.com/minio/minio/cmd.newMetacacheBlockWriter.func1()
      /home/runner/work/minio/minio/cmd/metacache-stream.go:806 +0x614

Previous write at 0x00000477ab18 by goroutine 1269:
  [failed to restore the stack]

Goroutine 1159 (running) created at:
  github.com/minio/minio/cmd.newMetacacheBlockWriter()
      /home/runner/work/minio/minio/cmd/metacache-stream.go:760 +0x112
  github.com/minio/minio/cmd.(*erasureObjects).listPath.func3()
      /home/runner/work/minio/minio/cmd/metacache-set.go:672 +0xe22

Goroutine 1269 (running) created at:
  testing.(*T).Run()
      /opt/hostedtoolcache/go/1.14.13/x64/src/testing/testing.go:1095 +0x537
  testing.runTests.func1()
      /opt/hostedtoolcache/go/1.14.13/x64/src/testing/testing.go:1339 +0xa6
  testing.tRunner()
      /opt/hostedtoolcache/go/1.14.13/x64/src/testing/testing.go:1050 +0x1eb
  testing.runTests()
      /opt/hostedtoolcache/go/1.14.13/x64/src/testing/testing.go:1337 +0x594
  testing.(*M).Run()
      /opt/hostedtoolcache/go/1.14.13/x64/src/testing/testing.go:1252 +0x2ff
  github.com/minio/minio/cmd.TestMain()
      /home/runner/work/minio/minio/cmd/test-utils_test.go:120 +0x44e
  main.main()
      _testmain.go:1408 +0x223
==================
==================
WARNING: DATA RACE
Read at 0x00000477aae8 by goroutine 1159:
  github.com/minio/minio/cmd.(*BucketVersioningSys).Enabled()
      /home/runner/work/minio/minio/cmd/bucket-versioning.go:26 +0x52
  github.com/minio/minio/cmd.FileInfo.ToObjectInfo()
      /home/runner/work/minio/minio/cmd/erasure-metadata.go:105 +0x197
  github.com/minio/minio/cmd.erasureObjects.putObject()
      /home/runner/work/minio/minio/cmd/erasure-object.go:748 +0x13f8
  github.com/minio/minio/cmd.(*erasureObjects).listPath.func3.2()
      /home/runner/work/minio/minio/cmd/metacache-set.go:682 +0x7d3
  github.com/minio/minio/cmd.newMetacacheBlockWriter.func1.2()
      /home/runner/work/minio/minio/cmd/metacache-stream.go:777 +0x1c4
  github.com/minio/minio/cmd.newMetacacheBlockWriter.func1()
      /home/runner/work/minio/minio/cmd/metacache-stream.go:806 +0x614

Previous write at 0x00000477aae8 by goroutine 1269:
  [failed to restore the stack]

Goroutine 1159 (running) created at:
  github.com/minio/minio/cmd.newMetacacheBlockWriter()
      /home/runner/work/minio/minio/cmd/metacache-stream.go:760 +0x112
  github.com/minio/minio/cmd.(*erasureObjects).listPath.func3()
      /home/runner/work/minio/minio/cmd/metacache-set.go:672 +0xe22

Goroutine 1269 (running) created at:
  testing.(*T).Run()
      /opt/hostedtoolcache/go/1.14.13/x64/src/testing/testing.go:1095 +0x537
  testing.runTests.func1()
      /opt/hostedtoolcache/go/1.14.13/x64/src/testing/testing.go:1339 +0xa6
  testing.tRunner()
      /opt/hostedtoolcache/go/1.14.13/x64/src/testing/testing.go:1050 +0x1eb
  testing.runTests()
      /opt/hostedtoolcache/go/1.14.13/x64/src/testing/testing.go:1337 +0x594
  testing.(*M).Run()
      /opt/hostedtoolcache/go/1.14.13/x64/src/testing/testing.go:1252 +0x2ff
  github.com/minio/minio/cmd.TestMain()
      /home/runner/work/minio/minio/cmd/test-utils_test.go:120 +0x44e
  main.main()
      _testmain.go:1408 +0x223
==================
```
2021-01-05 10:45:26 -08:00
Harshavardhana
cb0eaeaad8 feat: migrate to ROOT_USER/PASSWORD from ACCESS/SECRET_KEY (#11185) 2021-01-05 10:22:57 -08:00
Minio Trusted
f3f0041ad0 Update yaml files to latest version RELEASE.2021-01-05T05-22-38Z 2021-01-05 05:42:54 +00:00
Harshavardhana
d0027c3c41 do not use large buffers if not necessary (#11220)
without this change, there is a performance
regression for small objects GETs, this makes
the overall speed to go back to pre '59d363'
commit days.
2021-01-04 18:51:52 -08:00
Anis Elleuch
cb7fc99368 handlers: Avoid initializing a struct in each handler call (#11217) 2021-01-04 09:54:22 -08:00
Harshavardhana
a4383051d9 remove/deprecate crawler disable environment (#11214)
with changes present to automatically throttle crawler
at runtime, there is no need to have an environment
value to disable crawling. crawling is a fundamental
piece for healing, lifecycle and many other features
there is no good reason anyone would need to disable
this on a production system.

* Apply suggestions from code review
2021-01-04 09:43:31 -08:00
Harshavardhana
e7ae49f9c9 fix: calculate prometheus disks_offline/disks_total correctly (#11215)
fixes #11196
2021-01-04 09:42:09 -08:00
Anis Elleuch
153d4be032 tracing: NumSubscribers() to use atomic instead of mutex (#11219)
globalSubscribers.NumSubscribers() is heavily used in S3 requests and it
uses mutex, use atomic.Load instead since it is faster

Co-authored-by: Anis Elleuch <anis@min.io>
2021-01-04 09:40:30 -08:00
Anis Elleuch
dfd99b6d8f handlers: Little bit more optimizations (#11211) 2021-01-04 00:01:06 -08:00
Harshavardhana
c4b1d394d6 erasure: avoid io.Copy in hotpaths to reduce allocation (#11213) 2021-01-03 16:27:34 -08:00
Harshavardhana
c4131c2798 feat: Small object optimization read data in single bulk call (#11207) 2021-01-03 11:27:57 -08:00
Anis Elleuch
c9d502e6fa parentDirIsObject() to return quickly with inexistant parent (#11204)
Rewrite parentIsObject() function. Currently if a client uploads
a/b/c/d, we always check if c, b, a are actual objects or not.

The new code will check with the reverse order and quickly quit if 
the segment doesn't exist.

So if a, b, c in 'a/b/c' does not exist in the first place, then returns
false quickly.
2021-01-02 12:01:29 -08:00
Anis Elleuch
677e80c0f8 xl: Remove check-dir in ReadVersion (#11200)
The only purpose of check-dir flag in
ReadVersion is to return 404 when
an object has xl.meta but without data.

This is causing an extract call to the disk 
which can be penalizing in case of busy system
where disks receive many concurrent access.
2021-01-02 10:35:57 -08:00
Harshavardhana
aa85af4d1a fix: missing CopyObjectPart maxClients reorder 2021-01-01 23:07:37 -08:00
Anis Elleuch
ae731d232f trace: Reorder http/trace maxClients wrapping for correct tracing (#11202)
mc admin trace does not show the correct handler name in the output: it
is printing `maxClients' for all handlers. The reason is that the wrong
order of handler wrapping.
2021-01-01 23:06:07 -08:00
Anis Elleuch
a317d220ed xl-storage: Do not stat bucket assuming the object exists (#11201)
In HEAD/GET, only STAT the bucket if the 
object does not exist to return the correct
error response.
2021-01-01 09:44:36 -08:00
Harshavardhana
3e1221a01c fix: log once updating dataUsageCache versions (#11190)
also reduce usage of *bytes.Buffer for
reading `usage-cache.bin`
2020-12-31 09:45:09 -08:00
Baptiste Mille-Mathias
c1f6ca6697 Fix caddy project url (#11198) 2020-12-31 09:44:07 -08:00
Ritesh H Shukla
36fc2f98ed fix: admin trace throttled requests (#11192) 2020-12-30 21:04:55 -08:00
Ritesh H Shukla
556524c715 Reduce logging when peer is offline (#11184) 2020-12-30 14:38:54 -08:00
Harshavardhana
428f288379 update release Dockerfile string 2020-12-30 08:50:43 -08:00
0xflotus
cde801282d chore: enabled syntax highlighting in docs (#11182) 2020-12-29 17:38:28 -08:00
Ravind Kumar
6cf0008469 fix: docs typo in object lock docs (#11181) 2020-12-29 16:14:10 -08:00
Minio Trusted
7b0330a98c Update yaml files to latest version RELEASE.2020-12-29T23-29-29Z 2020-12-29 23:46:24 +00:00
Harshavardhana
cc457f1798 fix: enhance logging in crawler use console.Debug instead of logger.Info (#11179) 2020-12-29 01:57:28 -08:00
Harshavardhana
ca0d31b09a fix: re-arrange handlers to handle requests on /minio (#11177)
fixes #11175
2020-12-28 17:10:33 -08:00
Harshavardhana
445a9bd827 fix: heal optimizations in crawler to avoid multiple healing attempts (#11173)
Fixes two problems

- Double healing when bitrot is enabled, instead heal attempt
  once in applyActions() before lifecycle is applied.

- If applyActions() is successful and getSize() returns proper
  value, then object is accounted for and should be removed
  from the oldCache namespace map to avoid double heal attempts.
2020-12-28 10:31:00 -08:00
Harshavardhana
d8d25a308f fix: use HealObject for cleaning up dangling objects (#11171)
main reason is that HealObjects starts a recursive listing
for each object, this can be a really really long time on
large namespaces instead avoid recursive listing just
perform HealObject() instead at the prefix.

delete's already handle purging dangling content, we
don't need to achieve this by doing recursive listing,
this in-turn can delay crawling significantly.
2020-12-27 15:42:20 -08:00
Harshavardhana
c19e6ce773 avoid a crash in crawler when lifecycle is not initialized (#11170)
Bonus for static buffers use bytes.NewReader instead of
bytes.NewBuffer, to use a more reader friendly implementation
2020-12-26 22:58:06 -08:00
Minio Trusted
d3c853a3be Update yaml files to latest version RELEASE.2020-12-26T01-35-54Z 2020-12-26 01:53:30 +00:00
Harshavardhana
59d3639396 fix: inherit heal opts globally, including bitrot settings (#11166)
Bonus re-use ReadFileStream internal io.Copy buffers, fixes
lots of chatty allocations when reading metacache readers
with many sustained concurrent listing operations

```
   17.30GB  1.27% 84.80%    35.26GB  2.58%  io.copyBuffer
```
2020-12-24 23:04:03 -08:00
Harshavardhana
027e17468a fix: discarding results do not attempt in-memory metacache writer (#11163)
Optimizations include

- do not write the metacache block if the size of the
  block is '0' and it is the first block - where listing
  is attempted for a transient prefix, this helps to
  avoid creating lots of empty metacache entries for
  `minioMetaBucket`

- avoid the entire initialization sequence of cacheCh
  , metacacheBlockWriter if we are simply going to skip
  them when discardResults is set to true.

- No need to hold write locks while writing metacache
  blocks - each block is unique, per bucket, per prefix
  and also is written by a single node.
2020-12-24 15:02:02 -08:00
Harshavardhana
45ea161f8d webUI: change listing to 1000 keys from browser UI (#11159)
gateway implementations do not handle maxKeys being
`-1` properly unlike MinIO implementation, handle it
by setting an appropriate value.

fixes #11158
2020-12-23 19:58:15 -08:00
Poorna Krishnamoorthy
7b8a456f68 Update lifecycle README docs (#11160)
Removing reference to transition feature in docs as this
feature is being revamped to provide better extensibility
across different cloud targets.
2020-12-23 19:56:55 -08:00
Harshavardhana
b43906f6ee fix: docs typos and keywords 2020-12-23 11:59:20 -08:00
Harshavardhana
6a66f142d4 fix: strict quorum in list should list on all drives (#11157)
current implementation was incorrect, it in-fact
assumed only read quorum number of disks. in-fact
that value is only meant for read quorum good entries
from all online disks.

This PR fixes this behavior properly.
2020-12-23 09:26:40 -08:00
Harshavardhana
5982965839 fix: re-use bytes.Buffer using sync.Pool (#11156) 2020-12-22 23:22:37 -08:00
Minio Trusted
bfb92a27b7 Update yaml files to latest version RELEASE.2020-12-23T02-24-12Z 2020-12-23 02:43:25 +00:00
Harshavardhana
8565cefe4e fix: allow HTTP2.0 to be always configured 2020-12-22 16:32:58 -08:00
Andreas Auernhammer
8cdf2106b0 refactor cmd/crypto code for SSE handling and parsing (#11045)
This commit refactors the code in `cmd/crypto`
and separates SSE-S3, SSE-C and SSE-KMS.

This commit should not cause any behavior change
except for:
  - `IsRequested(http.Header)`

which now returns the requested type {SSE-C, SSE-S3,
SSE-KMS} and does not consider SSE-C copy headers.

However, SSE-C copy headers alone are anyway not valid.
2020-12-22 09:19:32 -08:00
Harshavardhana
35fafb837b fix: issues with handling delete markers in metacache (#11150)
Additional cases handled

- fix address situations where healing is not
  triggered on failed writes and deletes.

- consider object exists during listing when
  metadata can be successfully decoded.
2020-12-22 09:16:43 -08:00
Harshavardhana
274bbad5cb fix: select always online peers for remote listing (#11153)
always find the right set of online peers for remote listing,
this may have an effect on listing if the server is down - we
should do this to avoid always performing transient operations
on bucket->peerClient that is permanently or down for a long
period.
2020-12-22 09:16:07 -08:00
Harshavardhana
5c451d1690 update x/net/http2 to address few bugs (#11144)
additionally also configure http2 healthcheck
values to quickly detect unstable connections
and let them timeout.

also use single transport for proxying requests
2020-12-21 21:42:38 -08:00
Poorna Krishnamoorthy
c987313431 Encrypt remote target if kms is configured (#11034)
Co-authored-by: Poorna Krishnamoorthy <poorna@minio.io>
2020-12-21 16:21:33 -08:00
Anis Elleuch
2ecaab55a6 admin: ServerInfo returns info without object layer initialized (#11142) 2020-12-21 09:35:19 -08:00
Harshavardhana
3e792ae2a2 fix: change defaults for DNS cache dialer (#11145) 2020-12-21 09:33:29 -08:00
Yingrong Zhao
6df6ac0f34 fix testMultipartUploadFailure to properly cleanup (#11137) 2020-12-21 08:23:27 -08:00
Harshavardhana
4cc500a041 normalize users with double // in accessKeys (#11143)
Bonus fix, use constant time compare for secret keys  in web-handlers.go:SetAuth()
2020-12-20 10:09:51 -08:00
Harshavardhana
d8e28830cf fix: allow STS creds for admin accounts to add users (#11138)
Allow rotating creds with privileges to add users

fixes https://github.com/minio/console/issues/529
2020-12-19 13:24:21 -08:00
Harshavardhana
3e16ec457a fix: support user/groups with '/' character (#11127)
NOTE: user/groups with `//` shall be normalized to `/`

fixes #11126
2020-12-19 09:36:37 -08:00
Harshavardhana
e5d378931d fix: delimiter based listing was broken without marker (#11136)
with missing nextMarker with delimiter based listing,
top level prefixes beyond 4500 or max-keys value
wouldn't be sent back for client to ask for the next
batch.

reproduced at a customer deployment, create prefixes
as shown below

```
for year in $(seq 2017 2020)
do
    for month in {01..12}
    do for day in {01..31}
       do
           mc -q cp file myminio/testbucket/dir/day_id=$year-$month-$day/;
       done
    done
done
```

Then perform

```
aws s3api --profile minio --endpoint-url http://localhost:9000 list-objects \
    --bucket testbucket --prefix dir/ --delimiter / --max-keys 1000
```

You shall see missing NextMarker, this would disallow listing beyond max-keys
requested and also disallow beyond 4500 (maxKeyObjectList) prefixes being listed
because client wouldn't know the NextMarker available.

This PR addresses this situation properly by making the implementation
more spec compatible. i.e NextMarker in-fact can be either an object, a prefix
with delimiter depending on the input operation.

This issue was introduced after the list caching changes and has been present
for a while.
2020-12-19 09:36:04 -08:00
Harshavardhana
6128304f6e fix: regression introduced in aws-sdk-go tests
regression was introduced by cce5d7152a
which incorrectly implemented the tests and fixed a wrong requirement,
CompleteMultipart should never succeed in the tests.
2020-12-18 21:14:27 -08:00
Anis Elleuch
e63a10e505 Profiling does not required object layer to be initialized (#11133) 2020-12-18 11:51:15 -08:00
Anis Elleuch
5434088c51 replication: Ensure to always use nano precision source modtime (#11135) 2020-12-18 11:37:28 -08:00
Harshavardhana
a773cf48d8 fix: overlapping object and prefix rejected (#11130)
fixes #11129
2020-12-18 08:51:09 -08:00
Minio Trusted
386dd56856 Update yaml files to latest version RELEASE.2020-12-18T03-27-42Z 2020-12-18 03:45:13 +00:00
Harshavardhana
f714840da7 add _MINIO_SERVER_DEBUG env for enabling debug messages (#11128) 2020-12-17 16:52:47 -08:00
Harshavardhana
7c9ef76f66 fix: timer deadlock on expired timers (#11124)
issue was introduced in #11106 the following
pattern

<-t.C // timer fired

if !t.Stop() {
   <-t.C // timer hangs
}

Seems to hang at the last `t.C` line, this
issue happens because a fired timer cannot be
Stopped() anymore and t.Stop() returns `false`
leading to confusing state of usage.

Refactor the code such that use timers appropriately
with exact requirements in place.
2020-12-17 12:35:02 -08:00
Anis Elleuch
cffdb01279 azure/s3 gateways: Pass ETag during GET call to avoid data corruption (#11024)
Both Azure & S3 gateways call for object information before returning
the stream of the object, however, the object content/length could be
modified meanwhile, which means it can return a corrupted object.

Use ETag to ensure that the object was not modified during the GET call
2020-12-17 09:11:14 -08:00
Harshavardhana
970ddb424b feat: treat /var/run/secrets/ on k8s as system cert directory (#11123)
consider `/var/run/secrets/kubernetes.io/serviceaccount`
as system cert directory for container platform.
2020-12-16 18:24:12 -08:00
Harshavardhana
b390a2a0b9 fix: reuser timers in erasure set hotpaths (#11106)
reuser timers in

 - connectDisks() monitoring
 - healMRFRoutine() channel timeouts
2020-12-16 14:33:05 -08:00
Yingrong Zhao
cce5d7152a fix testListMultipartUpload in aws-sdk-go (#11122)
Currently, the test doesn't complete nor abort the multipart upload
after tesing the list operation. It caused the `cleanup()` to fail since
the object can't not be deleted which causes the bucket deletion to
fail.

This PR adds a `CompleteMultipartUpload` call to commit the object so
the cleanup can successfully delete the object and bucket.
2020-12-16 13:24:10 -08:00
Harshavardhana
90158f1e33 fix: avoid logging for Heal APIs in FS mode (#11121)
fixes #11120
2020-12-16 09:46:13 -08:00
Minio Trusted
26624552be Update yaml files to latest version RELEASE.2020-12-16T05-05-17Z 2020-12-16 05:23:19 +00:00
Harshavardhana
c606c76323 fix: prioritized latest buckets for crawler to finish the scans faster (#11115)
crawler should only ListBuckets once not for each serverPool,
buckets are same across all pools, across sets and ListBuckets
always returns an unified view, once list buckets returns
sort it by create time to scan the latest buckets earlier
with the assumption that latest buckets would have lesser
content than older buckets allowing them to be scanned faster
and also to be able to provide more closer to latest view.
2020-12-15 17:34:54 -08:00
Harshavardhana
d674263eb7 fix: remove inode free as part of SameDisk check (#11107) 2020-12-15 11:39:38 -08:00
Klaus Post
e7d3b49a20 metacache: Make very small requests transient (#11109) 2020-12-15 11:25:36 -08:00
Harshavardhana
5df61ab96b fix: remove gorilla/rpc/ deps fully after our fork (#11108) 2020-12-15 11:18:06 -08:00
Poorna Krishnamoorthy
3456b03b12 Ignore ObjectNotFound errors in delete api while enforcing locking (#11114)
AWS does not report this or version not found as errors in the response.
2020-12-15 11:15:49 -08:00
Klaus Post
f6fb27e8f0 Don't copy interesting ids, clean up logging (#11102)
When searching the caches don't copy the ids, instead inline the loop.

```
Benchmark_bucketMetacache_findCache-32    	   19200	     63490 ns/op	    8303 B/op	       5 allocs/op
Benchmark_bucketMetacache_findCache-32    	   20338	     58609 ns/op	     111 B/op	       4 allocs/op
```

Add a reasonable, but still the simplistic benchmark.

Bonus - make nicer zero alloc logging
2020-12-14 13:13:33 -08:00
Harshavardhana
8368ab76aa fix: remove the requirement for healing buckets in ListBucketsHeal (#11098)
With new refactor of bucket healing, healing bucket happens
automatically including its metadata, there is no need to
redundant heal buckets also in ListBucketsHeal remove
it.
2020-12-14 12:07:07 -08:00
Harshavardhana
3e83643320 lifecycle improvements and additional debug logging (#11096)
Bonus change fix browser assets
2020-12-13 12:05:54 -08:00
Harshavardhana
2eb52ca5f4 fix: heal bucket metadata right before healing bucket (#11097)
optimization mainly to avoid listing the entire
`.minio.sys/buckets/.minio.sys` directory, this
can get really huge and comes in the way of startup
routines, contents inside `.minio.sys/buckets/.minio.sys`
are rather transient and not necessary to be healed.
2020-12-13 11:57:08 -08:00
Harshavardhana
705e196b6c upgrade event notification dependencies (#11095) 2020-12-12 20:31:28 -08:00
Andreas Auernhammer
7b5223d83d add vulnerability report policy (#11084) 2020-12-12 17:38:37 -08:00
Anis Elleuch
f164085227 xl: Always set root disk to true in test environment (#11094)
Tests environments (go test or manual testing) should always consider
the passed disks are root disks and should not rely on disk.IsRootDisk()
function. The reason is that this latter can return a false negative
when called in a busy system. However, returning a false negative will
only occur in a testing environment and not in a production, so we can
accept this trade-off for now.
2020-12-12 16:10:07 -08:00
Minio Trusted
31bf6f0c25 Update yaml files to latest version RELEASE.2020-12-12T08-39-07Z 2020-12-12 08:56:30 +00:00
Harshavardhana
48191dd748 return NoSuchVersion if invalid version-id is specified (#11091) 2020-12-11 20:44:08 -08:00
Anis Elleuch
c4f29d24da metacache: Ask all disks when drive count is 4 (#11087) 2020-12-11 17:54:31 -08:00
Harshavardhana
db7890660e fix: a crash when disk is nil, safe access on erasureDisks (#11089)
fixes #11088
2020-12-11 16:58:36 -08:00
Poorna Krishnamoorthy
9adc33efbb Return version-id header in DeleteObject response (#11090)
even when the object version is non-existent

To make this consistent with aws behavior.

Co-authored-by: Poorna Krishnamoorthy <poorna@minio.io>
2020-12-11 16:58:15 -08:00
Poorna Krishnamoorthy
8f65aba04b ignore NoSuchVersion error in DeleteObjects API (#11086)
Currently, the error response reports NoSuchVersion
for a non-existent version-id, whereas AWS ignores it.
2020-12-11 12:39:09 -08:00
Harshavardhana
3a0082f0f1 fix: TTFB prometheus metrics calculation (#11082)
until now metrics was reporting entire call
duration instead of ttfb's this PR fixes it
2020-12-10 23:02:25 -08:00
Harshavardhana
14792cdbc6 docs: fix the metrics formatting (#11081) 2020-12-10 18:15:47 -08:00
Harshavardhana
4939987eb8 update deps to latest for some vulnerable deps (#11080)
- github.com/miekg/dns
- update elasticsearch library deps
  to circumvent some aws-sdk-go deps
2020-12-10 13:23:06 -08:00
Klaus Post
4bca62a0bd crawler: Stream bucket usage cache data (#11068)
Stream bucket caches to storage and through RPC calls.
2020-12-10 13:03:22 -08:00
Klaus Post
82e2be4239 metacache: Speed up cleanup operation (#11078)
Perform cleanup operations on copied data. Avoids read locking
data while determining which caches to keep.

Also, reduce the log(N*N) operation to log(N*M) where M caches 
with the same root or below when checking potential replacements.
2020-12-10 12:30:28 -08:00
Harshavardhana
4550ac6fff fix: refactor locks to apply them uniquely per node (#11052)
This refactor is done for few reasons below

- to avoid deadlocks in scenarios when number
  of nodes are smaller < actual erasure stripe
  count where in N participating local lockers
  can lead to deadlocks across systems.

- avoids expiry routines to run 1000 of separate
  network operations and routes per disk where
  as each of them are still accessing one single
  local entity.

- it is ideal to have since globalLockServer
  per instance.

- In a 32node deployment however, each server
  group is still concentrated towards the
  same set of lockers that partipicate during
  the write/read phase, unlike previous minio/dsync
  implementation - this potentially avoids send
  32 requests instead we will still send at max
  requests of unique nodes participating in a
  write/read phase.

- reduces overall chattiness on smaller setups.
2020-12-10 07:28:37 -08:00
Harshavardhana
97856bfebf fix: grafana double counting for bucket usage, histrogram and objects (#11070) 2020-12-09 20:30:37 -08:00
Harshavardhana
a60a0e52bb ubi-minimal doesn't support arm32, remove from build manifest 2020-12-09 18:56:11 -08:00
Minio Trusted
83a67a1d21 Update yaml files to latest version RELEASE.2020-12-10T01-54-29Z 2020-12-10 02:12:01 +00:00
Harshavardhana
12391ec4ba update nats.io/jwt dependency (#11066)
primarily for a vulnerability report https://nvd.nist.gov/vuln/detail/CVE-2020-26521
2020-12-09 14:30:35 -08:00
Klaus Post
e65ed2e44f listcache: Add path index (#11063)
Add a root path index.

```
Before:
Benchmark_bucketMetacache_findCache-32    	   10000	    730737 ns/op

With excluded prints:
Benchmark_bucketMetacache_findCache-32    	   10000	    207100 ns/op

With the root path:
Benchmark_bucketMetacache_findCache-32    	  705765	      1943 ns/op
```

Benchmark used (not linear):

```Go
func Benchmark_bucketMetacache_findCache(b *testing.B) {
	bm := newBucketMetacache("", false)
	for i := 0; i < b.N; i++ {
		bm.findCache(listPathOptions{
			ID:           mustGetUUID(),
			Bucket:       "",
			BaseDir:      "prefix/" + mustGetUUID(),
			Prefix:       "",
			FilterPrefix: "",
			Marker:       "",
			Limit:        0,
			AskDisks:     0,
			Recursive:    false,
			Separator:    slashSeparator,
			Create:       true,
			CurrentCycle: 0,
			OldestCycle:  0,
		})
	}
}
```

Replaces #11058
2020-12-09 08:37:43 -08:00
Anis Elleuch
d90044b847 federation: Redirect Lifecycle PUT request by bucket name (#11062)
The bucket forwarder handler considers MakeBucket to be always local but
it mistakenly thinks that PUT bucket lifecycle to be a MakeBucket call.

Fix the check of the MakeBucket call by ensuring that the query is empty
in the PUT url.
2020-12-09 07:25:26 -08:00
Harshavardhana
d8c1f93de6 reject mixed drive situations with drives on root disks (#11057)
till now we used to match the inode number of the root
drive and the drive path minio would use, if they match
we knew that its a root disk.

this may not be true in all situations such as running
inside a container environment where the container might
be mounted from a different partition altogether, root
disk detection might fail.
2020-12-09 00:27:02 -08:00
Nitish Tiwari
54d243cd98 fix: grafana dashboard calculating online nodes (#11041)
Also use a generic name instead of diff names per revision
2020-12-09 00:26:42 -08:00
Harshavardhana
d74e4642e3 avoid updating nsswitch.conf for redhat UBI images (#11056) 2020-12-08 14:28:12 -08:00
Anis Elleuch
a51488cbaa s3: Fix reading GET with partNumber specified (#11032)
partNumber was miscalculting the start and end of parts when partNumber
query is specified in the GET request. This commit fixes it and also
fixes the ContentRange header in that case.
2020-12-08 13:12:42 -08:00
Ritesh H Shukla
04848dfa1c Add documentation for bucket replication related metrics (#11055) 2020-12-08 12:48:10 -08:00
Nitish Tiwari
78d18d8fc8 Remove alpine based image in favour or RedHat UBI (#11006) 2020-12-08 11:14:06 -08:00
Harshavardhana
dc819afa44 fix: auto update crawler meta version
PR 038bcd9079 introduced
version '3', we need to make sure that we do not
print an unexpected error instead log a message to
indicate we will auto update the version.
2020-12-08 10:40:51 -08:00
Harshavardhana
4a564336fe Revert "Add metrics for nodes online and offline (#11050)"
This reverts commit f60bbdf86b.
2020-12-08 09:23:35 -08:00
Anis Elleuch
6b7ced80fe make: Add hotfix target to generate hotfix binaries (#11053)
hotfix target will fetch the release tag prior to the latest commit and create a binary
with the same release tag plus '.hotfix' suffix

e.g.   RELEASE.2020-12-03T05-49-24Z.hotfix
2020-12-08 08:12:13 -08:00
Ritesh H Shukla
f60bbdf86b Add metrics for nodes online and offline (#11050) 2020-12-08 01:06:27 -08:00
Harshavardhana
8c79f87f02 add dynamic config docs (#11048)
Co-authored-by: Eco <41090896+eco-minio@users.noreply.github.com>
2020-12-07 19:02:20 -08:00
Poorna Krishnamoorthy
f3beb1236a Add cache usage, total capacity to prometheus metrics (#11026) 2020-12-07 16:35:11 -08:00
Poorna Krishnamoorthy
934bed47fa Add transition event notification (#11047)
This is a MinIO specific extension to allow monitoring of transition events.
2020-12-07 13:53:28 -08:00
Ritesh H Shukla
038bcd9079 Add replication capacity metrics support in crawler (#10786) 2020-12-07 13:47:48 -08:00
Justin Page
6d70f6a4ac fix: README with missing word (#11035)
Include "to" for clarity.
2020-12-07 11:11:58 -08:00
Harshavardhana
ce93b2681b fix: re-use er.getDisks() properly in certain calls (#11043) 2020-12-07 10:04:07 -08:00
Harshavardhana
8d036ed6d8 fix: allow sub-admin to modify password for other users (#11039)
fixes #11037
2020-12-06 20:36:34 -08:00
Harshavardhana
9c53cc1b83 fix: heal multiple buckets in bulk (#11029)
makes server startup, orders of magnitude
faster with large number of buckets
2020-12-05 13:00:44 -08:00
Harshavardhana
3514e89eb3 support envs as well for new crawler sub-system (#11033) 2020-12-04 21:54:24 -08:00
Nitish Tiwari
6ff12f5f01 Add the dashboard json file (#11028)
This will allow users to contribute to the dashboard as needed.
2020-12-04 16:27:41 -08:00
Harshavardhana
ee2a436a5b fix: release locks if the client timedout (#11030)
situations where client indeed timedout there was
a potential to falsely think that lock is still
active.
2020-12-04 11:33:56 -08:00
Klaus Post
a896125490 Add crawler delay config + dynamic config values (#11018) 2020-12-04 09:32:35 -08:00
Harshavardhana
e083471ec4 use argon2 with sync.Pool for better memory management (#11019) 2020-12-03 19:23:19 -08:00
Nitish Tiwari
de9b64834e fix: update grafana dashboard docs (#11023)
Refer to the official Grafana dashboard
2020-12-03 15:56:15 -08:00
Harshavardhana
919441d9c4 update gocredits with new updated dependencies 2020-12-03 11:39:10 -08:00
Harshavardhana
80d31113e5 fix: etcd import paths again depend on v3.4.14 release (#11020)
Due to botched upstream renames of project repositories
and incomplete migration to go.mod support, our current
dependency version of `go.mod` had bugs i.e it was
using commits from master branch which didn't have
the required fixes present in release-3.4 branches

which leads to some rare bugs

https://github.com/etcd-io/etcd/pull/11477 provides
a workaround for now and we should migrate to this.

release-3.5 eventually claims to fix all of this
properly until then we cannot use /v3 import right now
2020-12-03 11:35:18 -08:00
Ritesh H Shukla
7e2b79984e Stream bucket bandwidth measurements (#11014) 2020-12-03 11:34:42 -08:00
Minio Trusted
d54cf77356 Update yaml files to latest version RELEASE.2020-12-03T05-49-24Z 2020-12-03 06:05:56 +00:00
Harshavardhana
951b6b203b skip metacache entries healing to speed up startup 2020-12-02 21:30:54 -08:00
Harshavardhana
44e23b7f4f fix: startup being slow - wait only if IOCount > 0 2020-12-02 21:06:17 -08:00
Harshavardhana
c22a387695 fix: building docker images 2020-12-02 17:34:42 -08:00
Minio Trusted
1ab4d6a6aa Update yaml files to latest version RELEASE.2020-12-03T00-03-10Z 2020-12-03 00:24:16 +00:00
Harshavardhana
96c0ce1f0c add support for tuning healing to make healing more aggressive (#11003)
supports `mc admin config set <alias> heal sleep=100ms` to
enable more aggressive healing under certain times.

also optimize some areas that were doing extra checks than
necessary when bitrotscan was enabled, avoid double sleeps
make healing more predictable.

fixes #10497
2020-12-02 11:12:00 -08:00
Anis Elleuch
fe11e9047d deprecate CommonName from TLS docs (#11017)
CommonName is not supported anymore in Go 1.15

fix the TLS documentation to use subjAltNames
2020-12-02 10:18:39 -08:00
Harshavardhana
ce0e17b62b fix: load certs on windows from registry (#11016)
fixes #11007
2020-12-02 02:23:51 -08:00
ebozduman
303be1866d Adds "x-amz-usr-agent" and "x-id" params to be used in authentication of presignedURL (#10792) 2020-12-02 02:02:49 -08:00
Harshavardhana
a6113b2315 update minio-go version v7.0.6 (#11012) 2020-12-01 19:15:38 -08:00
Sudarshan (Sid)
3ca046b408 Added set keyword in the command set to enable encryption on buckets (#11010) 2020-12-01 16:00:49 -08:00
Harshavardhana
4ec45753e6 rename server sets to server pools 2020-12-01 13:50:33 -08:00
Klaus Post
e6ea5c2703 crawler: Missing folder heal check per set (#10876) 2020-12-01 12:07:39 -08:00
Harshavardhana
790833f3b2 Revert "Support variable server sets (#10314)"
This reverts commit aabf053d2f.
2020-12-01 12:02:29 -08:00
Klaus Post
02aecb2fc1 select: Check if CSV is valid utf8 (#10991)
Check if first block of data is valid utf8.

Fixes #10970
2020-12-01 09:09:06 -08:00
Harshavardhana
7cbca43eb1 fix: allow admins to create users (#11005)
PR #10978 introduced a regression, root
credential should be allowed to create users
2020-11-30 21:53:23 -08:00
Poorna Krishnamoorthy
2f564437ae Disallow writeback caching with cache_after (#11002)
fixes #10974
2020-11-30 20:53:27 -08:00
Harshavardhana
ae4ded7fd1 fix: s3select tests with new minio-py SDK (#10995) 2020-11-29 13:05:37 -08:00
Harshavardhana
bdd094bc39 fix: avoid sending errors on missing objects on locked buckets (#10994)
make sure multi-object delete returned errors that are AWS S3 compatible
2020-11-28 21:15:45 -08:00
Harshavardhana
e6fa410778 fix: allow accountInfo, addUser and getUserInfo implicit (#10978)
- accountInfo API that returns information about
  user, access to buckets and the size per bucket
- addUser - user is allowed to change their secretKey
- getUserInfo - returns user info if the incoming
  is the same user requesting their information
2020-11-27 17:23:57 -08:00
Harshavardhana
350c5ff8f8 fix: update RPM spec using rpmbuild (#10979)
To test this PR

> rpmbuild --undefine=_disable_source_fetch -ba minio.spec
2020-11-27 13:36:02 -08:00
Klaus Post
f139a19238 Upgrade compress and pgzip package (#10992)
Should provide faster decompression for s3 select and other places where it is used.
2020-11-27 10:10:15 -08:00
Harshavardhana
e90efd73a2 add docker ubi buildx script 2020-11-26 21:41:05 -08:00
Harshavardhana
81c907b4bf fix: docker buildx support for multiplatform build (#10983) 2020-11-26 09:47:30 -08:00
Nitish Tiwari
ab49471f33 Add Dockerfile based on Red Hat UBI (#10958)
See https://connect.redhat.com/zones/containers/container-certification-policy-guide
for details

Co-authored-by: Harshavardhana <harsha@minio.io>
2020-11-26 15:52:22 +05:30
Harshavardhana
aabf053d2f Support variable server sets (#10314) 2020-11-25 16:28:47 -08:00
Minio Trusted
f839bb5a0a Update yaml files to latest version RELEASE.2020-11-25T22-36-25Z 2020-11-25 22:53:02 +00:00
Anis Elleuch
91130e884b Avoid sending errors in gob in storage requests (#10977) 2020-11-25 12:42:48 -08:00
Poorna Krishnamoorthy
2ff655a745 Refactor replication, ILM handling in DELETE API (#10945) 2020-11-25 11:24:50 -08:00
Klaus Post
0422eda6a2 metacache: Always close block writer (#10973)
In some cases a writer could be left behind unclosed, leaking compression blocks.

Always close and set compression concurrency to 2 which should be fine to keep up.
2020-11-25 09:37:30 -08:00
Harshavardhana
31e6f60847 fix: improve error handling in metacache (#10965) 2020-11-25 01:11:22 -08:00
Poorna Krishnamoorthy
7742238495 fix: marshaling stack overflow in noncurrentversion lifecycle config (#10971) 2020-11-24 20:43:11 -08:00
Poorna Krishnamoorthy
3ad41fe89d Add admin API to edit remote bucket target credentials (#10848) 2020-11-24 19:09:05 -08:00
Harshavardhana
f96ed3769f run go mod tidy 2020-11-24 12:04:15 -08:00
Klaus Post
a75fafdbe2 Remove msgp workaround (#10964)
The error in `github.com/philhofer/fwd` was quickly fixed through 
https://github.com/philhofer/fwd/pull/22 - update the dependency 
and remove the workaround.
2020-11-24 11:58:10 -08:00
Klaus Post
a58b7874ef Temporary workaround for msgp skipping (#10960)
Due to https://github.com/philhofer/fwd/issues/20 when skipping a metadata entry that is >2048 bytes and the buffer is full (2048 bytes) the skip will fail with `io.ErrNoProgress`.

Enlarge the buffer so we temporarily make this much more unlikely.

If it still happens we will have to rewrite the skips to reads.

Fixes #10959
2020-11-23 18:51:59 -08:00
Harshavardhana
6990de9c94 fix: dangling object delete shall return object doesn't exist (#10961)
dangling object when deleted means object doesn't exist
anymore, so we should return appropriate errors, this
allows crawler heal to ensure that it removes the tracker
for dangling objects.
2020-11-23 18:50:53 -08:00
Anis Elleuch
75a8e81f8f azure: Specify different Azure storage in the shell env (#10943)
AZURE_STORAGE_ACCOUNT and AZURE_STORAGE_KEY are used in 
azure CLI to specify the azure blob storage access & secret keys. With this commit, 
it is possible to set them if you want the gateway's own credentials to be
different from the Azure blob credentials.

Co-authored-by: Harshavardhana <harsha@minio.io>
2020-11-23 16:45:56 -08:00
Harshavardhana
519c0077a9 fix: do not return an error for successfully deleted dangling objects (#10938)
dangling objects when removed `mc admin heal -r` or crawler
auto heal would incorrectly return error - this can interfere
with usage calculation as the entry size for this would be
returned as `0`, instead upon success use the resultant
object size to calculate the final size for the object
and avoid reporting this in the log messages

Also do not set ObjectSize in healResultItem to be '-1'
this has an effect on crawler metrics calculating 1 byte
less for objects which seem to be missing their `xl.meta`
2020-11-23 09:12:17 -08:00
Harshavardhana
734d07a532 fix: all hosts local and port same should be local erasure setup (#10951)
this is needed to avoid initializing notification peers
that can lead to races in many sub-systems

fixes #10950
2020-11-23 09:07:50 -08:00
Harshavardhana
df93102235 fix: unwrapping issues with os.Is* functions (#10949)
reduces  3 stat calls, reducing the
overall startup time significantly.
2020-11-23 08:36:49 -08:00
Poorna Krishnamoorthy
39f3d5493b Show Delete replication status header (#10946)
X-Minio-Replication-Delete-Status header shows the
status of the replication of a permanent delete of a version.

All GETs are disallowed and return 405 on this object version.
In the case of replicating delete markers.

X-Minio-Replication-DeleteMarker-Status shows the status 
of replication, and would similarly return 405.

Additionally, this PR adds reporting of delete marker event completion
and updates documentation
2020-11-21 23:48:50 -08:00
Shireesh Anjal
14a7ae8586 Remove platform specific structure definitions (#10935)
Instead of having less/more fields inside a structure depending on the
platform (non-linux/linux), it would be better to have the same standard
definition in all platforms, and certain fields of the structure to be
populated or left unpopulated depending on the platform.
2020-11-21 09:41:33 -08:00
Klaus Post
692ff41ef7 Unwrap network errors (#10934)
Alternative to #10927

Instead of having an upstream fix, do unwrap when checking network errors.

'As' will also work when destination is an interface as checked by the tests.
2020-11-20 22:55:35 -08:00
Harshavardhana
86409fa93d add audit/admin trace support for browser requests (#10947)
To support this functionality we had to fork
the gorilla/rpc package with relevant changes
2020-11-20 22:52:17 -08:00
Shireesh Anjal
7bc47a14cc Rename OBD to Health (#10842)
Also, Remove thread stats and openfds from the health report 
as we already have process stats and numfds
2020-11-20 12:52:53 -08:00
Dominik Lessel
4a31b31ca6 feat(docker): add a CI/CD Dockerfile, which starts the server right away (#10933) 2020-11-20 11:27:43 -08:00
Harshavardhana
9263be8cca docs: fix missing event types in notifications (#10944) 2020-11-20 11:27:27 -08:00
Harshavardhana
73e308079a fix: handle errors appropriately as they are wrapped (#10917) 2020-11-20 10:43:07 -08:00
Poorna Krishnamoorthy
08b24620c0 Display storage-class of transitioned object in HEAD 2020-11-20 09:17:31 -08:00
Harshavardhana
95675b0c9a fix: do not crash PutObjectTags when node is down (#10940)
fixes #10939
2020-11-20 09:10:48 -08:00
Poorna Krishnamoorthy
251c1ef6da Add support for replication of object tags, retention metadata (#10880) 2020-11-19 18:56:09 -08:00
Poorna Krishnamoorthy
0fa430c1da validate service type of target in replication/ilm transition config (#10928) 2020-11-19 18:47:33 -08:00
Poorna Krishnamoorthy
f60b6eb82e fix validation for deletemarker replication on object locked bucket (#10892) 2020-11-19 18:47:19 -08:00
Poorna Krishnamoorthy
1ebf6f146a Add support for ILM transition (#10565)
This PR adds transition support for ILM
to transition data to another MinIO target
represented by a storage class ARN. Subsequent
GET or HEAD for that object will be streamed from
the transition tier. If PostRestoreObject API is
invoked, the transitioned object can be restored for
duration specified to the source cluster.
2020-11-19 18:47:17 -08:00
Harshavardhana
8f7fe0405e fix: delete marker replication should support directories (#10878)
allow directories to be replicated as well, along with
their delete markers in replication.

Bonus fix to fix bloom filter updates for directories
to be preserved.
2020-11-19 18:47:12 -08:00
Harshavardhana
9a34fd5c4a Revert "Revert "Add delete marker replication support (#10396)""
This reverts commit 267d7bf0a9.
2020-11-19 18:43:58 -08:00
Minio Trusted
b9e3a8b5ac Update yaml files to latest version RELEASE.2020-11-19T23-48-16Z 2020-11-20 00:06:03 +00:00
Harshavardhana
f794fe79e3 fix: network shutdown was not handle properly (#10927)
fixes a regression introduced in #10859, due
to the error returned by rest.Client being typed
i.e *rest.NetworkError - IsNetworkHostDown function
didn't work as expected to detect network issues.

This in-turn aggravated the situations when nodes
are disconnected leading to performance loss.
2020-11-19 13:53:49 -08:00
Harshavardhana
0f9e125cf3 fix: check for gateway backend online without http request (#10924)
fixes #10921
2020-11-19 10:38:02 -08:00
Harshavardhana
d778d9493f remove MinIO release tag as part of HTTP Server string (#10929) 2020-11-19 09:16:02 -08:00
Harshavardhana
70d2c2ccc9 skip files that are not erasure objects or directories (#10926)
without this change WalkDir reports errors while
trying to read `format.json/xl.meta` which is a
replicated file
2020-11-19 09:15:09 -08:00
Harshavardhana
9dea7020f0 allow prefix filtering for WalkDir to be optional (#10923) 2020-11-18 12:03:16 -08:00
Klaus Post
990d074f7d metacache: Allow prefix filtering (#10920)
Do listings with prefix filter when bloom filter is dirty.

This will forward the prefix filter to the lister which will make it 
only scan the folders/objects with the specified prefix.

If we have a clean bloom filter we try to build a more generally 
useful cache so in that case, we will list all objects/folders.
2020-11-18 10:44:18 -08:00
Klaus Post
e413f05397 Save listing error async (#10922)
Since the RPC call may have to time out save an error state async 
to not hold up the listing returning.

Fixes #10919
2020-11-18 10:28:22 -08:00
Harshavardhana
d1b1fee080 fix: save healing tracker right before healing (#10915)
this change avoids a situation where accidentally
if the user deleted the healing tracker or drives
were replaced again within the 10sec window.
2020-11-18 09:34:46 -08:00
Harshavardhana
9738d605e4 increase readdir per block memory to facilitate faster WalkDir (#10908) 2020-11-18 09:21:02 -08:00
Harshavardhana
7ff8128f15 update jstream to latest release v1.0.1 (#10909) 2020-11-17 09:13:28 -08:00
Klaus Post
10099357b6 listcache: Wrap returned errors (#10882)
To give an indication of where they happen
2020-11-17 09:11:59 -08:00
Harshavardhana
80b8ce89a4 remove context deadline from Delete calls (#10901) 2020-11-17 09:09:45 -08:00
Matthias Petermann
0745736e28 Use hw.physmem64 instead of hw.physmem for NetBSD in pkg/sys/getHwPhysmem (#10907)
"hw.physmem" only reports reasonable results on 32 bit systems and is
kept to not break binary compatibility. On other systems, it reports
"-1". "hw.phymem64" reports reasonable results on all systems.

MinIO getHwPhysmem method currently uses the deprecated 
"hw.physmem" key which results in a parse error (due to the -1).

This pull request asks for changing this. The change was tested
successfully on NetBSD/amd64 9.1.
2020-11-16 20:53:47 -08:00
Poorna Krishnamoorthy
0b766288ef fix: send replication completed event notification (#10902) 2020-11-15 22:16:41 -08:00
Rafael Bodill
598ca0569c fix: global in-place update boolean check (#10900) 2020-11-15 13:34:12 -08:00
Poorna Krishnamoorthy
d295ce5708 Fix disk cache usage percent for prometheus (#10898)
Fixes: #10895

Co-authored-by: Poorna Krishnamoorthy <poorna@minio.io>
2020-11-14 19:18:00 -08:00
Klaus Post
b5a3d79bce listobjectversions: Add shortcut for Veeam blocks (#10893)
Add shortcut for `APN/1.0 Veeam/1.0 Backup/10.0`

It requests unique blocks with a specific prefix. We skip 
scanning the parent directory for more objects matching the prefix.
2020-11-13 16:58:20 -08:00
Harshavardhana
17a5ff51ff fix: move context timeout closer to network for Delete calls (#10897)
allowing for disconnects to be limited to the drive
themselves instead of disconnecting all drives.
2020-11-13 16:56:45 -08:00
Minio Trusted
0784a0c33a Update yaml files to latest version RELEASE.2020-11-13T20-10-18Z 2020-11-13 20:27:01 +00:00
Minio Trusted
3595cb1267 Update yaml files to latest version RELEASE.2020-11-12T22-33-34Z 2020-11-12 22:51:11 +00:00
Harshavardhana
0bcb1b679d fix: disallow update if dates are same (#10890)
fixes #10889
2020-11-12 14:18:59 -08:00
Klaus Post
a3017c724e Sort directory objects correctly (#10886)
Decode dir objects when listing and sort them correctly.
2020-11-12 13:09:34 -08:00
Omar Alvarez
07859ef48b docs: clarify notifications support for gateways (#10729)
Co-authored-by: Harshavardhana <harsha@minio.io>
2020-11-12 12:19:04 -08:00
Harshavardhana
267d7bf0a9 Revert "Add delete marker replication support (#10396)"
This reverts commit 50c10a5087.

PR is moved to origin/dev branch
2020-11-12 11:43:14 -08:00
cksac
be83dfc52a fix: HDFS list bucket when subpath is provided (#10884) 2020-11-12 11:26:51 -08:00
Harshavardhana
ca88ca753c ignore typed errors correctly in list cache layer (#10879)
bonus write bucket metadata cache with enough quorum

Possible fix for #10868
2020-11-12 09:28:56 -08:00
Klaus Post
f86d3538f6 Allow deeper sleep (#10883)
Allow each crawler operation to sleep up to 10 seconds on very heavily loaded systems.

This will of course make minimum crawler speed less, but should be more effective at stopping.
2020-11-12 09:17:56 -08:00
Klaus Post
1c3590078d Skip 0 byte stream writes (#10875)
Don't send a packet when receiving 0 bytes or there is an error recorded
2020-11-11 18:07:40 -08:00
Harshavardhana
aa158228f9 fix: simplify healing metadata objects per set (#10867) 2020-11-11 10:58:16 -08:00
Klaus Post
8747834c69 DeletedObjects: Return objects on lock failure (#10874)
Return objects when locking fails.

<details>
<summary>Panic</summary>

```
: 2020/11/10 04:15:55 http: panic serving 10.10.62.153:44858: runtime error: index out of range [0] with length 0
: goroutine 363537270 [running]:
: net/http.(*conn).serve.func1(0xc019232780)
:         net/http/server.go:1801 +0x147
: panic(0x1cadd60, 0xc001719260)
:         runtime/panic.go:975 +0x47a
: github.com/minio/minio/cmd.criticalErrorHandler.ServeHTTP.func1(0xc0121d1200, 0x210cda0, 0xc0141940e0)
:         github.com/minio/minio/cmd/generic-handlers.go:781 +0x1a8
: panic(0x1cadd60, 0xc001719260)
:         runtime/panic.go:969 +0x1b9
: github.com/minio/minio/cmd.objectAPIHandlers.DeleteMultipleObjectsHandler(0x1e71ce8, 0x1e71cc8, 0x2108420, 0xc0192328c0, 0xc0121d1400)
:         github.com/minio/minio/cmd/bucket-handlers.go:465 +0x2490
: net/http.HandlerFunc.ServeHTTP(...)
:         net/http/server.go:2042
: github.com/minio/minio/cmd.httpTraceAll.func1(0x2108420, 0xc0192328c0, 0xc0121d1400)
:         github.com/minio/minio/cmd/handler-utils.go:353 +0x158
: net/http.HandlerFunc.ServeHTTP(...)
:         net/http/server.go:2042
: github.com/minio/minio/cmd.collectAPIStats.func1(0x2108420, 0xc019232820, 0xc0121d1400)
:         github.com/minio/minio/cmd/handler-utils.go:380 +0xed
: net/http.HandlerFunc.ServeHTTP(...)
:         net/http/server.go:2042
: github.com/minio/minio/cmd.maxClients.func1(0x2108420, 0xc019232820, 0xc0121d1400)
:         github.com/minio/minio/cmd/handler-api.go:132 +0x33b
: net/http.HandlerFunc.ServeHTTP(0xc00271d590, 0x2108420, 0xc019232820, 0xc0121d1400)
:         net/http/server.go:2042 +0x44
: github.com/minio/minio/cmd.redirectHandler.ServeHTTP(0x20e2180, 0xc00271d590, 0x2108420, 0xc019232820, 0xc0121d1400)
:         github.com/minio/minio/cmd/generic-handlers.go:192 +0x156
: github.com/minio/minio/cmd.customHeaderHandler.ServeHTTP(0x20e1060, 0xc0141a22b0, 0x21083e0, 0xc01814d2e0, 0xc0121d1400)
:         github.com/minio/minio/cmd/generic-handlers.go:751 +0x162
: github.com/minio/minio/cmd.securityHeaderHandler.ServeHTTP(0x20e0fc0, 0xc0141a22c0, 0x21083e0, 0xc01814d2e0, 0xc0121d1400)
:         github.com/minio/minio/cmd/generic-handlers.go:766 +0x1d6
: github.com/minio/minio/cmd.bucketForwardingHandler.ServeHTTP(0xc0121c7a40, 0x20e1120, 0xc0141a22d0, 0x21083e0, 0xc01814d2e0, 0xc0121d1400)
:         github.com/minio/minio/cmd/generic-handlers.go:624 +0xbf
: github.com/minio/minio/cmd.requestValidityHandler.ServeHTTP(0x20e0f20, 0xc01814d280, 0x21083e0, 0xc01814d2e0, 0xc0121d1400)
:         github.com/minio/minio/cmd/generic-handlers.go:608 +0x42a
: github.com/minio/minio/cmd.httpStatsHandler.ServeHTTP(0x20e10c0, 0xc0141a2300, 0x210cda0, 0xc0141940e0, 0xc0121d1400)
:         github.com/minio/minio/cmd/generic-handlers.go:536 +0xe4
: github.com/minio/minio/cmd.requestSizeLimitHandler.ServeHTTP(0x20e0fe0, 0xc0141a2310, 0x50004000000, 0x210cda0, 0xc0141940e0, 0xc0121d1400)
:         github.com/minio/minio/cmd/generic-handlers.go:68 +0xd4
: github.com/minio/minio/cmd.requestHeaderSizeLimitHandler.ServeHTTP(0x20e10a0, 0xc01814d2a0, 0x210cda0, 0xc0141940e0, 0xc0121d1400)
:         github.com/minio/minio/cmd/generic-handlers.go:93 +0x1b7
: github.com/minio/minio/cmd.crossDomainPolicy.ServeHTTP(0x20e1080, 0xc0141a2320, 0x210cda0, 0xc0141940e0, 0xc0121d1400)
:         github.com/minio/minio/cmd/crossdomain-xml-handler.go:51 +0x82
: github.com/minio/minio/cmd.browserRedirectHandler.ServeHTTP(0x20e0fa0, 0xc0141a2330, 0x210cda0, 0xc0141940e0, 0xc0121d1400)
:         github.com/minio/minio/cmd/generic-handlers.go:276 +0x68
: github.com/minio/minio/cmd.minioReservedBucketHandler.ServeHTTP(0x20e0f00, 0xc0141a2340, 0x210cda0, 0xc0141940e0, 0xc0121d1400)
:         github.com/minio/minio/cmd/generic-handlers.go:344 +0xb8
: github.com/minio/minio/cmd.cacheControlHandler.ServeHTTP(0x20e1020, 0xc0141a2350, 0x210cda0, 0xc0141940e0, 0xc0121d1400)
:         github.com/minio/minio/cmd/generic-handlers.go:303 +0x1ce
: github.com/minio/minio/cmd.timeValidityHandler.ServeHTTP(0x20e0f40, 0xc0141a2360, 0x210cda0, 0xc0141940e0, 0xc0121d1400)
:         github.com/minio/minio/cmd/generic-handlers.go:414 +0x3ca
: github.com/minio/minio/cmd.resourceHandler.ServeHTTP(0x20e1160, 0xc0141a2370, 0x210cda0, 0xc0141940e0, 0xc0121d1400)
:         github.com/minio/minio/cmd/generic-handlers.go:516 +0xab
: github.com/minio/minio/cmd.authHandler.ServeHTTP(0x20e1100, 0xc0141a2380, 0x210cda0, 0xc0141940e0, 0xc0121d1400)
:         github.com/minio/minio/cmd/auth-handler.go:502 +0x2e7
: github.com/minio/minio/cmd.sseTLSHandler.ServeHTTP(0x20e0ee0, 0xc0141a2390, 0x210cda0, 0xc0141940e0, 0xc0121d1400)
:         github.com/minio/minio/cmd/generic-handlers.go:802 +0x79
: github.com/minio/minio/cmd.reservedMetadataHandler.ServeHTTP(0x20e1140, 0xc0141a23a0, 0x210cda0, 0xc0141940e0, 0xc0121d1400)
:         github.com/minio/minio/cmd/generic-handlers.go:139 +0x1b7
: github.com/gorilla/mux.(*Router).ServeHTTP(0xc00073fb00, 0x210cda0, 0xc0141940e0, 0xc0121d1200)
:         github.com/gorilla/mux@v1.8.0/mux.go:210 +0xd3
: github.com/rs/cors.(*Cors).Handler.func1(0x210cda0, 0xc0141940e0, 0xc0121d1200)
:         github.com/rs/cors@v1.7.0/cors.go:219 +0x1b9
: net/http.HandlerFunc.ServeHTTP(0xc0009aece0, 0x210cda0, 0xc0141940e0, 0xc0121d1200)
:         net/http/server.go:2042 +0x44
: github.com/minio/minio/cmd.criticalErrorHandler.ServeHTTP(0x20e2180, 0xc0009aece0, 0x210cda0, 0xc0141940e0, 0xc0121d1200)
:         github.com/minio/minio/cmd/generic-handlers.go:784 +0x85
: github.com/minio/minio/cmd/http.(*Server).Start.func1(0x210cda0, 0xc0141940e0, 0xc0121d1200)
:         github.com/minio/minio/cmd/http/server.go:101 +0x258
: net/http.HandlerFunc.ServeHTTP(0xc000dc4080, 0x210cda0, 0xc0141940e0, 0xc0121d1200)
:         net/http/server.go:2042 +0x44
: net/http.serverHandler.ServeHTTP(0xc000764c60, 0x210cda0, 0xc0141940e0, 0xc0121d1200)
:         net/http/server.go:2843 +0xa3
: net/http.(*conn).serve(0xc019232780, 0x2114720, 0xc03381f6c0)
:         net/http/server.go:1925 +0x8ad
: created by net/http.(*Server).Serve
:         net/http/server.go:2969 +0x36c
```
</details>
2020-11-11 09:14:32 -08:00
Anton Melser
2c1e37197b fix: bad example json for policy in replication docs (#10869) 2020-11-10 17:49:49 -08:00
Poorna Krishnamoorthy
50c10a5087 Add delete marker replication support (#10396)
Delete marker replication is implemented for V2
configuration specified in AWS spec (though AWS
allows it only in the V1 configuration).

This PR also brings in a MinIO only extension of
replicating permanent deletes, i.e. deletes specifying
version id are replicated to target cluster.
2020-11-10 15:24:14 -08:00
Minio Trusted
9f4ad873bc Update yaml files to latest version RELEASE.2020-11-10T21-02-24Z 2020-11-10 21:18:35 +00:00
Steven Reitsma
4683a623dc fix: negative STS IAM token TTL value (#10866) 2020-11-10 12:24:01 -08:00
Klaus Post
06899210a7 Reduce health check output (#10859)
This will make the health check clients 'silent'.
Use `IsNetworkOrHostDown` determine if network is ok so it mimics the functionality in the actual client.
2020-11-10 09:28:23 -08:00
Harshavardhana
cbdab62c1e fix: heal user/metadata right away upon server startup (#10863)
this is needed such that we make sure to heal the
users, policies and bucket metadata right away as
we do listing based on list cache which only lists
'3' sufficiently good drives, to avoid possibly
losing access to these users upon upgrade make
sure to heal them.
2020-11-10 09:02:06 -08:00
Harshavardhana
8df6112204 fix: avoid divide by zero error single node distributed setup (#10862) 2020-11-09 20:40:39 -08:00
Anis Elleuch
8e8ddf7233 doc: Add definition of 1KB and 1MB in prometheus (#10857) 2020-11-09 10:05:01 -08:00
Klaus Post
311ab43d4c Upgrade msgp to official release (#10858)
Code identical, but official release.
2020-11-09 07:52:24 -08:00
Harshavardhana
97692bc772 re-route requests if IAM is not initialized (#10850) 2020-11-07 21:03:06 -08:00
Minio Trusted
21016265e5 Update yaml files to latest version RELEASE.2020-11-06T23-17-07Z 2020-11-06 23:34:19 +00:00
Steven Reitsma
54120107ce fix: infinite loop in cleanupStaleUploads of encrypted MPUs (#10845)
fixes #10588
2020-11-06 11:53:42 -08:00
Klaus Post
9bf5990ea9 metadata: Invalidate cache if unreadable and not updating (#10844)
If a scanning server shuts down unexpectedly we may have "successful" caches that are incomplete on a set.

In this case mark the cache with an error so it will no longer be handed out.
2020-11-06 08:54:09 -08:00
Steven Reitsma
74f7cf24ae fix: s3 gateway SSE pagination (#10840)
Fixes #10838
2020-11-05 15:04:03 -08:00
Harshavardhana
fb28aa847b fix: add missing deleted key element in multiObjectDelete (#10839)
fixes #10832
2020-11-05 12:47:46 -08:00
Klaus Post
0724205f35 metacache: Add option for life extension (#10837)
Add `MINIO_API_EXTEND_LIST_CACHE_LIFE` that will extend 
the life of generated caches for a while.

This changes caches to remain valid until no updates have been 
received for the specified time plus a fixed margin.

This also changes the caches from being invalidated when the *first* 
set finishes until the *last* set has finished plus the specified time 
has passed.
2020-11-05 11:49:56 -08:00
Harshavardhana
b72cac4cf3 fix: dangling objects on actual namespace (#10822) 2020-11-05 11:48:55 -08:00
Klaus Post
47d715f642 Update msgp to main branch (#10835)
The main branch has error unwrapping to clean up logs on canceled contexts, etc.

Asked for a new release: https://github.com/tinylib/msgp/issues/282
2020-11-05 08:13:03 -08:00
Klaus Post
bd77f29fc4 Don't replace caches that are receiving updates (#10834)
Keep caches while they are receiving updates.
Move update code to separate function.
2020-11-05 07:34:08 -08:00
Klaus Post
d1e1205036 metacache: Always close the s2 writer (#10836)
The s2 writer could be leaked if there was an error.

Make sure it is always closed.
2020-11-05 07:30:14 -08:00
Harshavardhana
71753e21e0 add missing TTL for STS credentials on etcd (#10828) 2020-11-04 13:06:05 -08:00
Harshavardhana
fde3299bf3 re-use optimized readdir for isDirEmpty() (#10829)
reduces effective memory usage by an order
of magnitude, also increases performance for
small objects
2020-11-04 13:05:21 -08:00
Harshavardhana
1a1f00fa15 fix: use internode data for DisksInfo, VolsInfo in message pack (#10821)
Similar to #10775 for fewer memory allocations, since we use
getOnlineDisks() extensively for listing we should optimize it
further.

Additionally, remove all unused walkers from the storage layer
2020-11-04 10:10:54 -08:00
Bill Thorp
4a1efabda4 Context based AccessKey passing (#10615)
A new field called AccessKey is added to the ReqInfo struct and populated.
Because ReqInfo is added to the context, this allows the AccessKey to be
accessed from 3rd-party code, such as a custom ObjectLayer.

Co-authored-by: Harshavardhana <harsha@minio.io>
Co-authored-by: Kaloyan Raev <kaloyan@storj.io>
2020-11-04 09:13:34 -08:00
Klaus Post
3b88a646ec Add remote online/offline information (#10825)
Log information about remote clients being marked offline.

This will help to identify root causes of failures.
2020-11-04 08:27:32 -08:00
Klaus Post
2294e53a0b Don't retain context in locker (#10515)
Use the context for internal timeouts, but disconnect it from outgoing 
calls so we always receive the results and cancel it remotely.
2020-11-04 08:25:42 -08:00
Klaus Post
f0819cce75 Keep transient lists while they are updating (#10826)
On extremely long running listings keep the transient list 15 minutes after last update instead of using start time.

Also don't do overlap checks on transient lists.
2020-11-04 08:01:33 -08:00
Klaus Post
1e11b4629f Add remote Diskinfo caching (#10824)
Add 1 second remote disk info cache.

Should decrease need for remote calls a great deal due to how actively it is used now.
2020-11-04 08:00:18 -08:00
Harshavardhana
5c72a34fa8 fix: honor delimiter as per AWS S3 spec (#10823) 2020-11-04 07:56:58 -08:00
Klaus Post
b9277c8030 metacache: Add trashcan (#10820)
Add trashcan that keeps recently updated lists after bucket deletion.
All caches were deleted once a bucket was deleted, so caches still running would report errors. Now they are canceled.
Fix `.minio.sys` not being transient.
2020-11-03 12:47:52 -08:00
Harshavardhana
8c76e1353e initialize IAM after etcd has initialized (#10819) 2020-11-03 12:12:30 -08:00
Harshavardhana
ad382799b1 use list cache for Walk() with webUI and quota (#10814)
bring list cache optimizations for web UI
object listing, also FIFO quota enforcement
through list cache as well.
2020-11-03 08:53:48 -08:00
Harshavardhana
68de5a6f6a fix: IAM store fallback to list users and policies from disk (#10787)
Bonus fixes, remove package retry it is harder to get it
right, also manage context remove it such that we don't have
to rely on it anymore instead use a simple Jitter retry.
2020-11-02 17:52:13 -08:00
Harshavardhana
4ea31da889 fix: move list quorum ENV to config (#10804) 2020-11-02 17:21:56 -08:00
Klaus Post
0a796505c1 metacache: Check only one disk for updates (#10809)
Check only one disk for updates.

This will reduce IO while waiting for lists to finish.
2020-11-02 17:20:27 -08:00
Klaus Post
37749f4623 Optimize FileInfo(Version) transfer (#10775)
File Info decoding, in particular, is showing up as a major 
allocator and time consumer for internode data transfers

Switch to message pack for cross-server transfers:

```
MSGP:

Size: 945 bytes

BenchmarkEncodeFileInfoMsgp-32    	 1558444	       866 ns/op	   1.16 MB/s	       0 B/op	       0 allocs/op
BenchmarkDecodeFileInfoMsgp-32    	  479968	      2487 ns/op	   0.40 MB/s	     848 B/op	      18 allocs/op

GOB:

Size: 1409 bytes

BenchmarkEncodeFileInfoGOB-32    	  333339	      3237 ns/op	   0.31 MB/s	     576 B/op	      19 allocs/op
BenchmarkDecodeFileInfoGOB-32    	   20869	     57837 ns/op	   0.02 MB/s	   16439 B/op	     428 allocs/op
```
2020-11-02 17:07:52 -08:00
Klaus Post
86e0d272f3 Reduce WriteAll allocs (#10810)
WriteAll saw 127GB allocs in a 5 minute timeframe for 4MiB buffers 
used by `io.CopyBuffer` even if they are pooled.

Since all writers appear to write byte buffers, just send those 
instead and write directly. The files are opened through the `os` 
package so they have no special properties anyway.

This removes the alloc and copy for each operation.

REST sends content length so a precise alloc can be made.
2020-11-02 16:14:31 -08:00
Harshavardhana
8527f22df1 optimize request URL encoding for internode (#10811)
this reduces allocations in order of magnitude

Also, revert "erasure: delete dangling objects automatically (#10765)" 
affects list caching should be investigated.
2020-11-02 15:15:12 -08:00
Anis Elleuch
b456292295 erasure: delete dangling objects automatically (#10765) 2020-11-02 10:49:30 -08:00
Poorna Krishnamoorthy
03fdbc3ec2 Add async caching commit option in diskcache (#10742)
Add store and a forward option for a single part
uploads when an async mode is enabled with env
MINIO_CACHE_COMMIT=writeback 

It defaults to `writethrough` if unspecified.
2020-11-02 10:00:45 -08:00
Harshavardhana
4c773f7068 re-use remote transports in Peer,Storage,Locker clients (#10788)
use one transport for internode communication
2020-11-02 07:43:11 -08:00
Venkata K Sadineni
d8e07f2c41 go mod update golang.org/x/text to address a CVE (#10805) 2020-11-01 19:42:48 -08:00
Harshavardhana
5412d730c1 simplify monitoring doesn't need to be canceled (#10803)
connect disks monitoring doesn't need to be canceled
upon drive replacement, since we only need to replace
the newly replaced drive.
2020-10-31 14:10:12 -07:00
Klaus Post
fe9f23e632 Recreate bucket metacache if corrupted (#10800)
If bucket metadata cannot be read, clean up existing and create a new.
2020-10-31 10:26:16 -07:00
Klaus Post
422898d9b3 Clean up metadata cache when deleting bucket (#10802)
Metadata caches were left behind when deleting a bucket.
2020-10-31 09:46:18 -07:00
Harshavardhana
b686bb9c83 fix: replaced drive properly by healing the entire drive (#10799)
Bonus fixes, we do not need reload format anymore
as the replaced drive is healed locally we only need
to ensure that drive heal reloads the drive properly.

We preserve the UUID of the original order, this means
that the replacement in `format.json` doesn't mean that
the drive needs to be reloaded into memory anymore.

fixes #10791
2020-10-31 01:34:48 -07:00
Harshavardhana
5e5cdc581d remove unnecessary logging and move to log once (#10798)
the current master logs way too much when a node
is down, instead log once and move on.
2020-10-30 14:55:50 -07:00
Harshavardhana
02cfa774be allow requests to be proxied when server is booting up (#10790)
when server is booting up there is a possibility
that users might see '503' because object layer
when not initialized, then the request is proxied
to neighboring peers first one which is online.
2020-10-30 12:20:28 -07:00
Krishna Srinivas
3a2f89b3c0 fix: add support for O_DIRECT reads for erasure backends (#10718) 2020-10-30 11:04:29 -07:00
Klaus Post
6135f072d2 Fix invalidated metacaches (#10784)
* Fix caches having EOF marked as a failure.
* Simplify cache updates.
* Provide context for checkMetacacheState failures.
* Log 499 when the client disconnects.
2020-10-30 09:33:16 -07:00
kannappanr
7331659d3d obd: Remove unused log constants (#10778) 2020-10-29 13:00:30 -07:00
Klaus Post
e63a44b734 rest client: Expect context timeouts for locks (#10782)
Add option for rest clients to not mark a remote offline for context timeouts.

This can be used if context timeouts are expected on the call.
2020-10-29 09:52:11 -07:00
Klaus Post
6b14c4ab1e Optimize decryptObjectInfo (#10726)
`decryptObjectInfo` is a significant bottleneck when listing objects.

Reduce the allocations for a significant speedup.

https://github.com/minio/sio/pull/40

```
λ benchcmp before.txt after.txt
benchmark                          old ns/op     new ns/op     delta
Benchmark_decryptObjectInfo-32     24260928      808656        -96.67%

benchmark                          old MB/s     new MB/s     speedup
Benchmark_decryptObjectInfo-32     0.04         1.24         31.00x

benchmark                          old allocs     new allocs     delta
Benchmark_decryptObjectInfo-32     75112          48996          -34.77%

benchmark                          old bytes     new bytes     delta
Benchmark_decryptObjectInfo-32     287694772     4228076       -98.53%
```
2020-10-29 09:34:20 -07:00
Harshavardhana
4bf90ca67f fix: handle a crash when AskDisks is set to -1 (#10777) 2020-10-29 09:25:43 -07:00
Harshavardhana
e0655e24f2 fix: A possible crash when fi.Erasure.Distribution is empty (#10779) 2020-10-28 19:24:01 -07:00
Klaus Post
bfc36aed89 Add update retry limit and compare error by string instead (#10776) 2020-10-28 13:19:53 -07:00
Kaloyan Raev
be7f67268d fix: Do not cleanup range files in cache SaveMetadata when total hits are false (#10728) 2020-10-28 09:23:17 -07:00
Klaus Post
a982baff27 ListObjects Metadata Caching (#10648)
Design: https://gist.github.com/klauspost/025c09b48ed4a1293c917cecfabdf21c

Gist of improvements:

* Cross-server caching and listing will use the same data across servers and requests.
* Lists can be arbitrarily resumed at a constant speed.
* Metadata for all files scanned is stored for streaming retrieval.
* The existing bloom filters controlled by the crawler is used for validating caches.
* Concurrent requests for the same data (or parts of it) will not spawn additional walkers.
* Listing a subdirectory of an existing recursive cache will use the cache.
* All listing operations are fully streamable so the number of objects in a bucket no 
  longer dictates the amount of memory.
* Listings can be handled by any server within the cluster.
* Caches are cleaned up when out of date or superseded by a more recent one.
2020-10-28 09:18:35 -07:00
Minio Trusted
51222cc664 Update yaml files to latest version RELEASE.2020-10-28T08-16-50Z 2020-10-28 08:33:22 +00:00
Krishna Srinivas
f53c5a020e fix: heal object shards with ec.index and ec.distribution mismatches (#10773)
Co-authored-by: Harshavardhana <harsha@minio.io>
2020-10-28 00:10:20 -07:00
Harshavardhana
5b30bbda92 fix: add more protection distribution to match EcIndex (#10772)
allows for more stricter validation in picking up the right
set of disks for reconstruction.
2020-10-28 00:09:15 -07:00
Shireesh Anjal
858e2a43df Remove logging info from OBDInfoHandler (#10727)
A lot of logging data is counterproductive. A better implementation with
precise useful log data can be introduced later.
2020-10-27 17:41:48 -07:00
Kaloyan Raev
df9894e275 avoid caching http ranges in background goroutine (#10724) 2020-10-26 23:04:48 -07:00
Minio Trusted
ca77ee1c0e Update yaml files to latest version RELEASE.2020-10-27T04-03-55Z 2020-10-27 04:20:10 +00:00
Krishna Srinivas
592f2f23a3 fix: heal rejects objects with disk re-ordering issue (#10766) 2020-10-26 18:48:47 -07:00
Krishna Srinivas
c49a80db41 fix: use meta.Erasure.Index for GetObject() to reconstruct object (#10764) 2020-10-26 16:19:42 -07:00
Poorna Krishnamoorthy
46275c6547 cache: rename function declarations (#10763) 2020-10-26 15:41:24 -07:00
Poorna Krishnamoorthy
0994ed9783 cache: fix call in GetObjectNInfo (#10762)
Fixes: #10751
2020-10-26 12:30:40 -07:00
Anis Elleuch
eb95353cb1 fix: Get/HeadObject return 404 on non quorum objects (#10753) 2020-10-26 10:30:46 -07:00
Harshavardhana
029758cb20 fix: retain the previous UUID for newly replaced drives (#10759)
only newly replaced drives get the new `format.json`,
this avoids disks reloading their in-memory reference
format, ensures that drives are online without
reloading the in-memory reference format.

keeping reference format in-tact means UUIDs
never change once they are formatted.
2020-10-26 10:29:29 -07:00
Harshavardhana
649035677f fix: arm64 Dockerfile with missing \ for multiple ENVs
fixes #10758
2020-10-25 13:52:55 -07:00
Harshavardhana
646d6917ed turn-off checking for updates completely if MINIO_UPDATE=off (#10752) 2020-10-24 22:39:44 -07:00
Harshavardhana
d9db7f3308 expire lockers if lockers are offline (#10749)
lockers currently might leave stale lockers,
in unknown ways waiting for downed lockers.

locker check interval is high enough to safely
cleanup stale locks.
2020-10-24 13:23:16 -07:00
Harshavardhana
6a8c62f9fd make sure to preserve UUID from reference format (#10748)
reference format should be source of truth
for inconsistent drives which reconnect,
add them back to their original position

remove automatic fix for existing offline
disk uuids
2020-10-24 13:23:08 -07:00
Harshavardhana
4442382c16 update STS examples to use latest v7 APIs 2020-10-24 11:16:59 -07:00
Anis Elleuch
00124c56d9 erasure: Commit data before xl.meta in RenameData() (#10734)
This will reduce the chance to have updated xl.meta without data.
2020-10-23 21:54:58 -07:00
Anis Elleuch
2c32c2149e tests: Avoid running TestNSRace in short test mode (#10735) 2020-10-23 21:23:12 -07:00
Harshavardhana
734f258878 fix: slow down auto healing more aggressively (#10730)
Bonus fixes

- logging improvements to ensure that we don't use
  `go logger.LogIf` to avoid runtime.Caller missing
  the function name. log where necessary.
- remove unused code at erasure sets
2020-10-22 13:36:24 -07:00
Anis Elleuch
0e0c53bba4 tests: Lower expectation in addr selection in rand cache dialer (#10739)
Test TestDialContextWithDNSCacheRand was failing sometimes because it depends
on a random selection of addresses when testing random DNS resolution from cache.

Lower addr selection exception to 10%
2020-10-22 09:35:32 -07:00
Poorna Krishnamoorthy
5cc23ae052 validate if iam store is initialized (#10719)
Fixes panic - regression from d6d770c1b1
2020-10-20 21:28:24 -07:00
Anis Elleuch
6fd088f448 docs: Update ILM doc with versioning features (#10714) 2020-10-20 09:23:27 -07:00
Harshavardhana
d6d770c1b1 initialize object layer right after config has loaded 2020-10-19 22:04:59 -07:00
Harshavardhana
b07df5cae1 initialize IAM as soon as object layer is initialized (#10700)
Allow requests to come in for users as soon as object
layer and config are initialized, this allows users
to be authenticated sooner and would succeed automatically
on servers which are yet to fully initialize.
2020-10-19 09:54:40 -07:00
Harshavardhana
c107728676 fix: s3 gateway DNS cache initialization (#10706)
fixes #10705
2020-10-19 01:34:23 -07:00
Minio Trusted
ba5215561f Update yaml files to latest version RELEASE.2020-10-18T21-54-12Z 2020-10-18 22:15:30 +00:00
Márk Sági-Kazár
4eb45c9a0f fix: dex getting started guide URL (#10701) 2020-10-17 16:47:28 -07:00
Harshavardhana
187129a907 fix comment in bucket bandwidth package 2020-10-17 00:38:54 -07:00
Anis Elleuch
284a2b9021 ilm: Send delete marker creation event when appropriate (#10696)
Before this commit, the crawler ILM will always send object delete event
notification though this is wrong.
2020-10-16 21:22:12 -07:00
Ritesh H Shukla
0b53e30ecb Clean up monitor on delete bucket (#10698) 2020-10-16 17:59:31 -07:00
Harshavardhana
bd2131ba34 add DNS cache support to avoid DNS flooding (#10693)
Go stdlib resolver doesn't support caching DNS
resolutions, since we compile with CGO disabled
we are more probe to DNS flooding for all network
calls to resolve for DNS from the DNS server.

Under various containerized environments such as
VMWare this becomes a problem because there are
no DNS caches available and we may end up overloading
the kube-dns resolver under concurrent I/O.

To circumvent this issue implement a DNSCache resolver
which resolves DNS and caches them for around 10secs
with every 3sec invalidation attempted.
2020-10-16 14:49:05 -07:00
Ritesh H Shukla
73a41a725a Always close response body (#10697) 2020-10-16 12:40:36 -07:00
ebozduman
1aec168c84 fix: azure gateway should reject bucket names with "." (#10635) 2020-10-16 09:30:18 -07:00
Klaus Post
21a549a83b fix: keep MRF channel open to avoid random CI crash (#10686)
There doesn't seem to be any benefit to closing the channel, so just keep 
it open and let it die with the server.
2020-10-16 09:08:51 -07:00
Ritesh H Shukla
8a16a1a1a9 fix: misc fixes for bandwidth reporting amd monitoring (#10683)
* Set peer for fetch bandwidth
* Fix the limit for bandwidth that is reported.
* Reduce CPU burn from bandwidth management.
2020-10-16 09:07:50 -07:00
Harshavardhana
ad726b49b4 rename zones to serverSets to avoid terminology conflict (#10679)
we are bringing in availability zones, we should avoid
zones as per server expansion concept.
2020-10-15 14:28:50 -07:00
Anis Elleuch
db2241066b heal: Enable removing dangling delete markers (#10688) 2020-10-15 13:06:40 -07:00
Harshavardhana
f1cc16e788 fix: background heal rely on getOnlineDisks() (#10687) 2020-10-15 13:06:23 -07:00
Klaus Post
3820a905e0 in getOnlineDisks wait for disks to be populated (#10685) 2020-10-15 06:37:10 -07:00
Harshavardhana
2042d4873c rename crawler config option to heal (#10678) 2020-10-14 13:51:51 -07:00
Harshavardhana
f9be783f3e fix: allow crawler to crawl on disks without usage constraints (#10677)
additionally also change the resolution usage wise
return of disks, allows to small byte level differences
to be masked.
2020-10-14 12:12:10 -07:00
Harshavardhana
23773bb32b update NTP package for accurate time resolution fixes (#10670) 2020-10-14 12:29:20 +05:30
Harshavardhana
2fd7545b6c fix: move to go1.15 (#10660) 2020-10-13 18:33:18 -07:00
Harshavardhana
71b97fd3ac fix: connect disks pre-emptively during startup (#10669)
connect disks pre-emptively upon startup, to ensure we have
enough disks are connected at startup rather than wait
for them.

we need to do this to avoid long wait times for server to
be online when we have servers come up in rolling upgrade
fashion
2020-10-13 18:28:42 -07:00
Klaus Post
03991c5d41 crawler: Remove waitForLowActiveIO (#10667)
Only use dynamic delays for the crawler. Even though the max wait was 1 second the number 
of waits could severely impact crawler speed.

Instead of relying on a global metric, we use the stateless local delays to keep the crawler 
running at a speed more adjusted to current conditions.

The only case we keep it is before bitrot checks when enabled.
2020-10-13 13:45:08 -07:00
Harshavardhana
9c042a503b remove deprecate readiness from healthcheck docs (#10659) 2020-10-12 18:56:03 -07:00
飞雪无情
614060764d fix: use the correct Action type for policy.Args and iampolicy.Args (#10650) 2020-10-12 15:18:22 -07:00
Minio Trusted
4a678ad70f Update yaml files to latest version RELEASE.2020-10-12T21-53-21Z 2020-10-12 22:15:06 +00:00
Harshavardhana
a3ba8188d7 fix: allow locker to be niladic 2020-10-12 14:23:44 -07:00
Harshavardhana
2760fc86af Bump default idleConnsPerHost to control conns in time_wait (#10653)
This PR fixes a hang which occurs quite commonly at higher concurrency
by allowing following changes

- allowing lower connections in time_wait allows faster socket open's
- lower idle connection timeout to ensure that we let kernel
  reclaim the time_wait connections quickly
- increase somaxconn to 4096 instead of 2048 to allow larger tcp
  syn backlogs.

fixes #10413
2020-10-12 14:19:46 -07:00
P R
abb14aeec1 Header row seperator for console Table (#10651) 2020-10-12 11:32:43 -07:00
Ritesh H Shukla
8ceb2a93fd fix: peer replication bandwidth monitoring in distributed setup (#10652) 2020-10-12 09:04:55 -07:00
Ritesh H Shukla
c2f16ee846 Add basic bandwidth monitoring for replication. (#10501)
This change tracks bandwidth for a bucket and object

- [x] Add Admin API
- [x] Add Peer API
- [x] Add BW throttling
- [x] Admin APIs to set replication limit
- [x] Admin APIs for fetch bandwidth
2020-10-09 20:36:00 -07:00
Minio Trusted
071c004f8b Update yaml files to latest version RELEASE.2020-10-09T22-55-05Z 2020-10-09 23:16:30 +00:00
Harshavardhana
6484453fc6 optionally allow strict quorum listing (#10649)
```
export MINIO_API_LIST_STRICT_QUORUM=on
```

would enable listing in quorum if necessary
2020-10-09 15:40:46 -07:00
Harshavardhana
a0d0645128 remove safeMode behavior in startup (#10645)
In almost all scenarios MinIO now is
mostly ready for all sub-systems
independently, safe-mode is not useful
anymore and do not serve its original
intended purpose.

allow server to be fully functional
even with config partially configured,
this is to cater for availability of actual
I/O v/s manually fixing the server.

In k8s like environments it will never make
sense to take pod into safe-mode state,
because there is no real access to perform
any remote operation on them.
2020-10-09 09:59:52 -07:00
miraculli
1738eb24b1 fix: caching doc README.md missing high watermark (#10646) 2020-10-09 08:32:51 -07:00
Harshavardhana
253194e491 do not hold write locks - if objects don't exist (#10644) 2020-10-08 17:47:21 -07:00
Harshavardhana
736e58dd68 fix: handle concurrent lockers with multiple optimizations (#10640)
- select lockers which are non-local and online to have
  affinity towards remote servers for lock contention

- optimize lock retry interval to avoid sending too many
  messages during lock contention, reduces average CPU
  usage as well

- if bucket is not set, when deleteObject fails make sure
  setPutObjHeaders() honors lifecycle only if bucket name
  is set.

- fix top locks to list out always the oldest lockers always,
  avoid getting bogged down into map's unordered nature.
2020-10-08 12:32:32 -07:00
Poorna Krishnamoorthy
907a171edd Generalize error messages for remote targets (#10638)
This is to allow remote targets to be generalized
for replication/ILM transition

Also adding a field in BucketTarget to identify
a remote target with a label.
2020-10-08 10:54:11 -07:00
Andreas Auernhammer
ed6d2a100f logger: avoid writing audit log response header twice (#10642)
This commit fixes a misuse of the `http.ResponseWriter.WriteHeader`.
A caller should **either** call `WriteHeader` exactly once **or**
write to the response writer and causing an implicit 200 OK.

Writing the response headers more than once causes a `http: superfluous
response.WriteHeader call` log message. This commit fixes this
by preventing a 2nd `WriteHeader` call being forwarded to the underlying
`ResponseWriter`.

Updates #10587
2020-10-08 09:29:10 -07:00
Harshavardhana
effe131090 fix: allow read unlocks to be defensive about split brains (#10637) 2020-10-07 09:15:01 -07:00
Poorna Krishnamoorthy
01498a3e34 fix: add docs for new event types in notification (#10636) 2020-10-06 13:33:23 -07:00
Harshavardhana
18063bf25c fix: cleanup old directory handling code (#10633)
we don't need them anymore, remove legacy code.
2020-10-06 12:03:57 -07:00
Ravind Kumar
57f0176759 Update KES table to include additional supported KMS providers (#10631) 2020-10-06 11:09:43 -07:00
Poorna Krishnamoorthy
dbbed6f7f0 update minio-go dependency (#10634) 2020-10-06 08:37:09 -07:00
Poorna Krishnamoorthy
7fbfdceba3 Fix replication slowness (#10632)
- Increase channel buffer length
- Avoid blocking wait on replicaCh
2020-10-05 14:45:42 -07:00
Mark Clarkson
9dda9fb903 fix: https healthcheck mint test (#10622) 2020-10-05 08:21:41 -07:00
Shireesh Anjal
f1418a50f0 add NVMe drive info [model num, serial num, drive temp. etc.] (#10613)
* add NVMe drive info [model num, serial num, drive temp. etc.]
* Ignore fuse partitions
* Add the nvme logic only for linux
* Move smart/nvme structs to a separate file

Co-authored-by: wlan0 <sidharthamn@gmail.com>
2020-10-04 10:18:46 -07:00
Minio Trusted
017954e7ea Update yaml files to latest version RELEASE.2020-10-03T02-19-42Z 2020-10-03 02:36:54 +00:00
Harshavardhana
806625cbff turn-off go mod 2020-10-02 18:57:01 -07:00
Krishna Srinivas
045e30f2c1 Set LastModified time from source for bucket replication (#10627) 2020-10-02 18:32:22 -07:00
Harshavardhana
c6a9a94f94 fix: optimize ServerInfo() handler to avoid reading config (#10626)
fixes #10620
2020-10-02 16:19:44 -07:00
Harshavardhana
8e7c00f3d4 add missing request-id from DeleteObject events (#10623)
fixes #10621
2020-10-02 13:36:13 -07:00
Anis Elleuch
d1ed1da8c6 build: check-gen should exit with some changes in generated code (#10590) 2020-10-02 11:10:39 -07:00
Harshavardhana
23e8390997 fix: Allow Walk to honor load balanced drives (#10610) 2020-10-01 20:24:34 -07:00
Anis Elleuch
71403be912 fix: consider partNumber in GET/HEAD requests (#10618) 2020-10-01 15:41:12 -07:00
Harshavardhana
f28d02b7f2 fix: simplify obd how we calculate transferred bytes (#10617) 2020-10-01 14:34:51 -07:00
Harshavardhana
e0cb814f3f fail if port is not accessible (#10616)
throw proper error when port is not accessible
for the regular user, this is possibly a regression.

```
ERROR Unable to start the server: Insufficient permissions to use specified port
   > Please ensure MinIO binary has 'cap_net_bind_service=+ep' permissions
   HINT:
     Use 'sudo setcap cap_net_bind_service=+ep /path/to/minio' to provide sufficient permissions
```
2020-10-01 13:23:31 -07:00
Harshavardhana
98a08e1644 fix: protect updating latencies/throughput slices in obd (#10611)
Additionally close the transferChan upon function exit.
2020-10-01 09:50:08 -07:00
Klaus Post
3047121255 dataupdate: Bump to force rescan (#10609)
After #10594 let's invalidate the bloom filters to force the next cycles to go through all data.

There is a small chance that the linked PR could have caused missing bloom filter data.

This will invalidate the current bloom filters and make the crawler go through everything.
2020-09-30 16:10:40 -07:00
Ritesh H Shukla
5a7f92481e fix: client errors for DNS service creation errors (#10584) 2020-09-30 14:09:41 -07:00
Anis Elleuch
0d45c38782 List v1/versions routes based on source IP if found (#10603)
Routing using on source IP if found. This should distribute
the listing load for V1 and versioning on multiple nodes
evenly between different clients.

If source IP is not found from the http request header, then falls back
to bucket name instead.
2020-09-30 13:38:27 -07:00
Poorna Krishnamoorthy
56d1b227cf Handle changes to versioning config for replication (#10598)
Disallow versioning suspension on a bucket with
pre-existing replication configuration

If versioning is suspended on the target,replication
should fail.
2020-09-30 13:36:37 -07:00
Harshavardhana
061fa0635c remove ignoring incorrect CVE (#10597) 2020-09-30 13:10:24 -07:00
Shireesh Anjal
6e138f955e Fix a couple of typos in json config (#10605)
Vault.Encrypt: encryp -> encrypt
SysOBDProcess.Uids: uidsomitempty -> uids,omitempty
2020-09-30 13:08:11 -07:00
Lenin Alevski
bea87a5a20 fix: reading multiple TLS certificates when deployed in K8S (#10601)
Ignore all regular files, CAs directory and any 
directory that starts with `..` inside the
`.minio/certs` folder
2020-09-30 08:21:30 -07:00
Harshavardhana
2b4eb87d77 pick disks which are common maximally used (#10600)
further optimization to ensure that good disks
are always used for listing, other than healing
we only use disks that are maximally used.
2020-09-29 22:54:02 -07:00
sadegh
799758e54f fix: dropzone height to fill all screen height (#10547) 2020-09-29 22:27:41 -07:00
Harshavardhana
1f9abbee4d make sure to release locks upon timeout (#10596)
fixes #10418
2020-09-29 15:18:34 -07:00
Klaus Post
fdf0ae9167 exit data update tracker only upon context completion (#10594)
The data update tracker saver would exit if data wasn't updated for between cycles.
2020-09-29 13:23:53 -07:00
Harshavardhana
00eb6f6bc9 cache DiskInfo at storage layer for performance (#10586)
`mc admin info` on busy setups will not move HDD
heads unnecessarily for repeated calls, provides
a better responsiveness for the call overall.

Bonus change allow listTolerancePerSet be N-1
for good entries, to avoid skipping entries
for some reason one of the disk went offline.
2020-09-29 09:54:41 -07:00
Harshavardhana
66174692a2 add '.healing.bin' for tracking currently healing disk (#10573)
add a hint on the disk to allow for tracking fresh disk
being healed, to allow for restartable heals, and also
use this as a way to track and remove disks.

There are more pending changes where we should move
all the disk formatting logic to backend drives, this
PR doesn't deal with this refactor instead makes it
easier to track healing in the future.
2020-09-28 19:39:32 -07:00
Harshavardhana
849fcf0127 block unlocks if there are quorum failures (#10582)
fixes #10418
2020-09-28 15:39:52 -07:00
飞雪无情
209680e89f Remove redundant http.HandlerFunc type conversion. (#10576) 2020-09-28 13:33:49 -07:00
Anis Elleuch
e0c04a2da0 Makefile: Check for any non committed auto-generated code (#10579)
Always check if the auto-generated code is still compatible with the
existing written code to avoid a possible forgetting or sometimes a non
intentional change.
2020-09-28 13:33:34 -07:00
飞雪无情
27d9bd04e5 Handling unhandled errors in the InfoCannedPolicy method. (#10575) 2020-09-27 10:24:04 -07:00
Minio Trusted
511424a287 Update yaml files to latest version RELEASE.2020-09-26T03-44-56Z 2020-09-26 04:02:42 +00:00
Harshavardhana
bebcf4f004 unlock() only if locking was successful 2020-09-25 19:36:47 -07:00
Harshavardhana
eafa775952 fix: add lock ownership to expire locks (#10571)
- Add owner information for expiry, locking, unlocking a resource
- TopLocks returns now locks in quorum by default, provides
  a way to capture stale locks as well with `?stale=true`
- Simplify the quorum handling for locks to avoid from storage
  class, because there were challenges to make it consistent
  across all situations.
- And other tiny simplifications to reset locks.
2020-09-25 19:21:52 -07:00
Harshavardhana
66b4a862e0 fix: network failure err check should ignore context canceled errors (#10567)
context canceled errors bubbling up from the network
layer has the potential to be misconstrued as network
errors, taking prematurely a server offline and triggering
a health check routine avoid this potential occurrence.
2020-09-25 14:35:47 -07:00
Anis Elleuch
9603489dd3 federation: Honor range with UploadObjectPart to a different cluster (#10570)
Use gr & length instead of srcInfo.Reader & srcInfo.Size because 
they don't honor range header
2020-09-25 12:06:42 -07:00
Anis Elleuch
b302c8a5f4 heal: Fix periodic healing cleanup (#10569)
isEnded() was incorrectly calculating if the current healing sequence is
ended or not. h.currentStatus.Items could be empty if healing is very
slow and mc admin heal consumed all items.
2020-09-25 10:29:00 -07:00
飞雪无情
4de88e87bb os.SEEK_SET is deprecated,use io.SeekStart. (#10563) 2020-09-25 03:12:25 -07:00
Praveen raj Mani
b880796aef Set the maximum open connections limit in PG and MySQL target configs (#10558)
As the bulk/recursive delete will require multiple connections to open at an instance,
The default open connections limit will be reached which results in the following error

```FATAL:  sorry, too many clients already```

By setting the open connections to a reasonable value - `2`, We ensure that the max open connections
will not be exhausted and lie under bounds.

The queries are simple inserts/updates/deletes which is operational and sufficient with the
the maximum open connection limit is 2.

Fixes #10553

Allow user configuration for MaxOpenConnections
2020-09-24 22:20:30 -07:00
Harshavardhana
37a5d5d7a0 reduce timeouts between servers for faster disconnects (#10562) 2020-09-24 20:10:07 -07:00
Harshavardhana
3cac262dd1 report heal drives properly, also from global state (#10561)
It is possible the heal drives are not reported from
the maintenance check because the background heal
state simply relied on the `format.json` for capturing
unformatted drives. It is possible that drives might
be still healing - make sure that applications which
rely on cluster health check respond back this detail.
2020-09-24 15:36:47 -07:00
poornas
e6ab4db6b8 Fix minimum replication workers started (#10560)
This PR also fixes GetReplicationConfiguration permission
in web-handlers.go to use bucket as resource
2020-09-24 12:25:41 -07:00
Harshavardhana
ca989eb0b3 avoid ListBuckets returning quorum errors when node is down (#10555)
Also, revamp the way ListBuckets work make few portions
of the healing logic parallel

- walk objects for healing disks in parallel
- collect the list of buckets in parallel across drives
- provide consistent view for listBuckets()
2020-09-24 09:53:38 -07:00
飞雪无情
d778d034e7 Remove redundant mgmtQueryKey type. (#10557)
Remove redundant type conversion.
2020-09-24 08:40:21 -07:00
Espen Finnesand
df08fd1f03 Update Kubernetes Helm Readme.md (#10559)
The link to the Minio Helm Chart repository was wrong.
2020-09-24 08:38:46 -07:00
Minio Trusted
ac82f416a4 Update yaml files to latest version RELEASE.2020-09-23T19-18-30Z 2020-09-23 19:38:14 +00:00
Harshavardhana
f7f9517b6a fix: host extraction without port 2020-09-23 12:10:14 -07:00
Harshavardhana
90cff10e2b avoid crash if disks are not initialized 2020-09-23 12:00:29 -07:00
Harshavardhana
81caf35926 fix: reduce healthcheck interval for storage rest client (#10544) 2020-09-23 10:43:42 -07:00
poornas
5726cef3ca validate bucket exists in ListRemoteTargets api (#10552) 2020-09-23 10:37:54 -07:00
飞雪无情
5fdf47b118 Add Chinese documentation for distributed design (#10549) 2020-09-23 09:26:47 -07:00
Harshavardhana
8b74a72b21 fix: rename READY deadline to CLUSTER deadline ENV (#10535) 2020-09-23 09:14:33 -07:00
Klaus Post
eec69d6796 Fix stale context for bucket retrieval (#10551)
The provided context gets captured by the closure making all subsequent calls fail.
2020-09-23 08:30:31 -07:00
Harshavardhana
0537a21b79 avoid concurrenct use of rand.NewSource (#10543) 2020-09-22 15:34:27 -07:00
poornas
4c54ed8748 Close replica channel only once (#10542)
Also enforce s3:GetReplicationConfiguration permission check as a
bucket level resource.
2020-09-22 12:47:24 -07:00
poornas
a4006e23a0 Update replication docs to clarify permissions (#10536)
Co-authored-by: Klaus Post <klauspost@gmail.com>
2020-09-22 11:58:04 -07:00
Shireesh Anjal
b17dc81540 Change "disks" node to "drives" in OBD output (#10540) 2020-09-22 11:53:19 -07:00
Minio Trusted
d73c4f09f3 Update yaml files to latest version RELEASE.2020-09-21T22-31-59Z 2020-09-21 22:58:40 +00:00
Anis Elleuch
4c81201f95 fix: healing delete marker on versioned buckets (#10530)
Healing was not working correctly in the distributed mode because
errFileVersionNotFound was not properly converted in storage rest
client.

Besides, fixing the healing delete marker is not working as expected.
2020-09-21 15:16:16 -07:00
Harshavardhana
cd8d511d3d move versionsOrder struct to xl-storage-utils 2020-09-21 14:24:42 -07:00
Kaan Kabalak
899a2fa1c7 re-order right hand side dropdown options (#10534) 2020-09-21 13:59:42 -07:00
Harshavardhana
17e17da00d add parallel workers to perform replication in parallel (#10525)
set the concurrency for replication be to runtime.NumCPU()/2
2020-09-21 13:43:29 -07:00
Harshavardhana
a5da9120f3 fix: [fs] an error upon rwPool.Write() just attempt rwPool.Create() (#10533)
On some NFS clients looks like errno is incorrectly set,
which leads to incorrect errors thrown upwards.
2020-09-21 12:54:23 -07:00
poornas
aa12d75d75 fix crawler to detect lifecycle on bucket even if filter nil (#10532) 2020-09-21 11:41:07 -07:00
飞雪无情
dd4a2d7419 update distributed zh_CN document. (#10531) 2020-09-21 10:54:56 -07:00
Harshavardhana
6fcbdd5607 remove unused putObjectDir code (#10528) 2020-09-21 09:41:39 -07:00
Harshavardhana
3831cc9e3b fix: [fs] CompleteMultipart use trie structure for partMatch (#10522)
performance improves by around 100x or more

```
go test -v -run NONE -bench BenchmarkGetPartFile
goos: linux
goarch: amd64
pkg: github.com/minio/minio/cmd
BenchmarkGetPartFileWithTrie
BenchmarkGetPartFileWithTrie-4          1000000000               0.140 ns/op           0 B/op          0 allocs/op
PASS
ok      github.com/minio/minio/cmd      1.737s
```

fixes #10520
2020-09-21 01:18:13 -07:00
Krishna Srinivas
230fc0d186 Support for "directory" objects (#10499) 2020-09-19 08:39:41 -07:00
Harshavardhana
7f9498f43f fix: ignore faulty drives and continue (#10511)
drives might return different types of errors
handle them individually, and for some errors
just log an error and continue
2020-09-18 12:09:05 -07:00
Harshavardhana
1cf322b7d4 change leader locker only for crawler (#10509) 2020-09-18 11:15:54 -07:00
Derek Bender
3168e93730 fix typo in healthcheck README.md (#10518) 2020-09-18 09:52:37 -07:00
Klaus Post
0b1c824618 Fix incorrect request start time (#10516)
Log request start time BEFORE starting processing the request
2020-09-18 09:30:52 -07:00
Klaus Post
c851e022b7 Tweaks to dynamic locks (#10508)
* Fix cases where minimum timeout > default timeout.
* Add defensive code for too small/negative timeouts.
* Never set timeout below the maximum value of a request.
* Protect against (unlikely) int64 wraps.
* Decrease timeout slower.
* Don't re-lock before copying.
2020-09-18 09:18:18 -07:00
Aleksey Pogibelev
6f45e303f5 fix tls flag (#10487) 2020-09-18 15:35:54 +05:30
Klaus Post
5ad032826a Add a reasonable if unable to get total RAM (#10506)
Though unlikely we shouldn't skip initializing the API if we cannot get RAM.

Add 16GiB as a default and log the error.
2020-09-18 02:03:02 -07:00
Harshavardhana
84bf4624a4 fix: make sure to preserve metadata during overwrite in FS mode (#10512)
This bug was introduced in 14f0047295
almost 3yrs ago, as a side affect of removing stale `fs.json`
but we in-fact end up removing existing good `fs.json` for an
existing object, leading to some form of a data loss.

fixes #10496
2020-09-18 00:16:16 -07:00
飞雪无情
dff37aa33d update disk caching zh_CN document. (#10472)
add disk caching DESIGN zh_CN document.
2020-09-18 00:07:48 -07:00
saurabh29789
d12831eb07 Add support for searching objects (#10424) 2020-09-17 23:01:37 -07:00
Harshavardhana
4a36cd7035 fix: improve performance ListObjectParts in FS mode (#10510)
from 20s for 10000 parts to less than 1sec

Without the patch
```
~ time aws --endpoint-url=http://localhost:9000 --profile minio s3api \
       list-parts --bucket testbucket --key test \
       --upload-id c1cd1f50-ea9a-4824-881c-63b5de95315a

real    0m20.394s
user    0m0.589s
sys     0m0.174s
```

With the patch
```
~ time aws --endpoint-url=http://localhost:9000 --profile minio s3api \
       list-parts --bucket testbucket --key test \
       --upload-id c1cd1f50-ea9a-4824-881c-63b5de95315a

real    0m0.891s
user    0m0.624s
sys     0m0.182s
```

fixes #10503
2020-09-17 18:51:16 -07:00
poornas
00555c747e Strip standard ports off remote target url (#10498) 2020-09-17 11:09:50 -07:00
Klaus Post
03490c811b Fix obd goroutine leak (#10504)
The gouroutine collecting transfer stats never exits. Add missing channel close.
2020-09-17 10:10:20 -07:00
Minio Trusted
48d2c03250 Update yaml files to latest version RELEASE.2020-09-17T04-49-20Z 2020-09-17 05:07:58 +00:00
Harshavardhana
ed78854cea fix: list across all drives to avoid stale disks 2020-09-16 21:17:10 -07:00
Harshavardhana
e60834838f fix: background disk heal, to reload format consistently (#10502)
It was observed in VMware vsphere environment during a
pod replacement, `mc admin info` might report incorrect
offline nodes for the replaced drive. This issue eventually
goes away but requires quite a lot of time for all servers
to be in sync.

This PR fixes this behavior properly.
2020-09-16 21:14:35 -07:00
Harshavardhana
d616d8a857 serialize replication and feed it through task model (#10500)
this allows for eventually controlling the concurrency
of replication and overally control of throughput
2020-09-16 16:04:55 -07:00
Anis Elleuch
24cab7f9df ilm: Remove a 'null' version if not latest (#10494)
If the ILM document requires removing noncurrent versions, the 
the server should be able to remove 'null' versions as well. 
'null' versions are created when versioning is not enabled 
or suspended.
2020-09-16 10:21:50 -07:00
Minio Trusted
b2536476c9 Update yaml files to latest version RELEASE.2020-09-16T04-22-35Z 2020-09-16 04:41:00 +00:00
Harshavardhana
02c1a08a5b fix: make sure to lock CopyObject for in-place updates (#10492) 2020-09-15 20:44:48 -07:00
Ritesh H Shukla
5c47ce456e Run replication in the background (#10491) 2020-09-15 18:44:58 -07:00
Anis Elleuch
8ea55f9dba obd: Add console log to OBD output (#10372) 2020-09-15 18:02:54 -07:00
poornas
80e3dce631 azure: update content-md5 to metadata after upload (#10482)
Fixes #10453
2020-09-15 16:31:47 -07:00
Harshavardhana
80fab03b63 fix: S3 gateway doesn't support full passthrough for encryption (#10484)
The entire encryption layer is dependent on the fact that
KMS should be configured for S3 encryption to work properly
and we only support passing the headers as is to the backend
for encryption only if KMS is configured.

Make sure that this predictability is maintained, currently
the code was allowing encryption to go through and fail
at later to indicate that KMS was not configured. We should
simply reply "NotImplemented" if KMS is not configured, this
allows clients to simply proceed with their tests.
2020-09-15 13:57:15 -07:00
Harshavardhana
730d2dc7be fix: allow CopyObject/PutObjecTags on pre-existing content (#10485)
fixes #10475
2020-09-15 09:18:41 -07:00
Harshavardhana
0ee9678190 fix: add missing delete marker created filter (#10481) 2020-09-14 21:32:52 -07:00
Klaus Post
34859c6d4b Preallocate (safe) slices when we know the size (#10459) 2020-09-14 20:44:18 -07:00
Klaus Post
b1c99e88ac reduce CPU usage upto 50% in readdir (#10466) 2020-09-14 17:19:54 -07:00
Harshavardhana
0104af6bcc delayed locks until we have started reading the body (#10474)
This is to ensure that Go contexts work properly, after some
interesting experiments I found that Go net/http doesn't
cancel the context when Body is non-zero and hasn't been
read till EOF.

The following gist explains this, this can lead to pile up
of go-routines on the server which will never be canceled
and will die at a really later point in time, which can
simply overwhelm the server.

https://gist.github.com/harshavardhana/c51dcfd055780eaeb71db54f9c589150

To avoid this refactor the locking such that we take locks after we
have started reading from the body and only take locks when needed.

Also, remove contextReader as it's not useful, doesn't work as expected
context is not canceled until the body reaches EOF so there is no point
in wrapping it with context and putting a `select {` on it which
can unnecessarily increase the CPU overhead.

We will still use the context to cancel the lockers etc.
Additional simplification in the locker code to avoid timers
as re-using them is a complicated ordeal avoid them in
the hot path, since locking is very common this may avoid
lots of allocations.
2020-09-14 15:57:13 -07:00
Andreas Auernhammer
224daee391 fix nats TLS unit tests (#10476)
This commit fixes the nats TLS tests by generating new certificates
(root CA, server and client) - each valid for 10y. The new certificates
don't have a common name (deprecated by X.509) but SANs instead.

Since Go 1.15 the Go `crypto/x509` package rejects certificates that
only have a common name and no SAN. See: https://golang.org/doc/go1.15#commonname
2020-09-14 13:19:46 -07:00
Harshavardhana
34ea1d2167 fix: return correct error code for MetadataTooLarge (#10470)
fixes #10469
2020-09-13 21:26:35 -07:00
Harshavardhana
9d95937018 update KMS docs indicating deprecation of AUTO_ENCRYPTION env 2020-09-13 16:23:28 -07:00
Derek Bender
74a7889a3e Fix typo in README.md (#10471) 2020-09-13 09:02:10 -07:00
Klaus Post
fa01e640f5 Continous healing: add optional bitrot check (#10417) 2020-09-12 00:08:12 -07:00
Harshavardhana
f355374962 add support for configurable remote transport deadline (#10447)
configurable remote transport timeouts for some special cases
where this value needs to be bumped to a higher value when
transferring large data between federated instances.
2020-09-11 23:03:08 -07:00
Harshavardhana
bda0fe3150 fix: allow LDAP identity to support form body POST (#10468)
similar to other STS APIs
2020-09-11 23:02:32 -07:00
Harshavardhana
b70995dd60 Revert "ilm: Remove null version if not latest with proper config (#10467)"
This reverts commit 4b6264da7d.
2020-09-11 18:15:49 -07:00
Anis Elleuch
4b6264da7d ilm: Remove null version if not latest with proper config (#10467) 2020-09-11 14:20:09 -07:00
Harshavardhana
48919de301 fix: for defer'ed deleteObject use internal context (#10463) 2020-09-11 06:39:19 -07:00
Minio Trusted
eb3ded420e Update yaml files to latest version RELEASE.2020-09-10T22-02-45Z 2020-09-10 22:22:28 +00:00
Harshavardhana
eb2934f0c1 simplify webhook DNS further generalize for gateway (#10448)
continuation of the changes from eaaf05a7cc
this further simplifies, enables this for gateway deployments as well
2020-09-10 14:19:32 -07:00
Klaus Post
b7438fe4e6 Copy metadata before spawning goroutine + prealloc maps (#10458)
In `(*cacheObjects).GetObjectNInfo` copy the metadata before spawning a goroutine.

Clean up a few map[string]string copies as well, reducing allocs and simplifying the code.

Fixes #10426
2020-09-10 11:37:22 -07:00
Anis Elleuch
ce6cef6855 erasure: Call Walk() from all disks (#10445)
It does not make sense to call Walk() in only N/2 disks and then
requires N/2 quorum, just keep it N/2+1 

The commit fixes this behavior.
2020-09-10 09:27:52 -07:00
飞雪无情
a966ccd17d Support nginx LB with docker-compose example (#10434) 2020-09-10 09:19:17 -07:00
Klaus Post
493c714663 Remove erasureSets and erasureObjects from ObjectLayer (#10442) 2020-09-10 09:18:19 -07:00
Harshavardhana
e959c5d71c fix: server panic in FS mode (#10455)
fixes #10454
2020-09-10 09:16:26 -07:00
Harshavardhana
4a2928eb49 generate missing object delete bucket notifications (#10449)
fixes #10381
2020-09-09 18:23:08 -07:00
Anis Elleuch
af88772a78 lifecycle: NoncurrentVersionExpiration considers noncurrent version age (#10444)
From https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions

```
When specifying the number of days in the NoncurrentVersionTransition
and NoncurrentVersionExpiration actions in a Lifecycle configuration,
note the following:

It is the number of days from when the version of the object becomes
noncurrent (that is, when the object is overwritten or deleted), that
Amazon S3 will perform the action on the specified object or objects.

Amazon S3 calculates the time by adding the number of days specified in
the rule to the time when the new successor version of the object is
created and rounding the resulting time to the next day midnight UTC.
For example, in your bucket, suppose that you have a current version of
an object that was created at 1/1/2014 10:30 AM UTC. If the new version
of the object that replaces the current version is created at 1/15/2014
10:30 AM UTC, and you specify 3 days in a transition rule, the
transition date of the object is calculated as 1/19/2014 00:00 UTC.
```
2020-09-09 18:11:24 -07:00
Li Yi
1dce6918c2 fix sysctl.sh quotes which are incompatible with sysctl (#10446) 2020-09-09 17:29:23 -07:00
Harshavardhana
9109148474 add support for new UA values for update an check (#10451) 2020-09-09 17:21:39 -07:00
Nitish Tiwari
eaaf05a7cc Add Kubernetes operator webook server as DNS target (#10404)
This PR adds a DNS target that ensures to update an entry
into Kubernetes operator when a bucket is created or deleted.

See minio/operator#264 for details.

Co-authored-by: Harshavardhana <harsha@minio.io>
2020-09-09 12:20:49 -07:00
Harshavardhana
52e21bc853 update docs to indicate go1.14 is minimum 2020-09-09 11:27:48 -07:00
Klaus Post
16e1a25bc0 go.mod: Require Go 1.14 (#10441)
MinIO requires Go 1.14, so bump mod version.
2020-09-09 11:23:54 -07:00
Harshavardhana
958661cbb5 skip subdomain from bucket DNS which start with minio.domain (#10390)
extend host matcher to reject the host match
2020-09-09 09:57:37 -07:00
Minio Trusted
6019628f7d Update yaml files to latest version RELEASE.2020-09-08T23-05-18Z 2020-09-08 23:25:40 +00:00
Klaus Post
0987069e37 select: Fix integer conversion overflow (#10437)
Do not convert float value to integer if it will over/underflow.

The comparison cannot be `<=` since rounding may overflow it.

Fixes #10436
2020-09-08 15:56:11 -07:00
Harshavardhana
6a0372be6c cleanup tmpDir any older entries automatically just like multipart (#10439)
also consider multipart uploads, temporary files in `.minio.sys/tmp`
as stale beyond 24hrs and clean them up automatically
2020-09-08 15:55:40 -07:00
Harshavardhana
c13afd56e8 Remove MaxConnsPerHost settings to avoid potential hangs (#10438)
MaxConnsPerHost can potentially hang a call without any
way to timeout, we do not need this setting for our proxy
and gateway implementations instead IdleConn settings are
good enough.

Also ensure to use NewRequestWithContext and make sure to
take the disks offline only for network errors.

Fixes #10304
2020-09-08 14:22:04 -07:00
Harshavardhana
96997d2b21 allow ctrl+c to be consistent at early startup (#10435)
fixes #10431
2020-09-08 09:10:55 -07:00
Klaus Post
86a3319d41 Ignore config values from unknown subsystems (#10432) 2020-09-08 08:57:04 -07:00
飞雪无情
a694ba93d9 update kernel tuning zh_CN document (#10433) 2020-09-07 19:21:52 -07:00
Harshavardhana
9f60e84ce1 always copy UserDefined metadata map (#10427)
fixes #10426
2020-09-07 09:25:28 -07:00
Minio Trusted
a9aaea0d67 Update yaml files to latest version RELEASE.2020-09-05T07-14-49Z 2020-09-05 07:32:10 +00:00
Harshavardhana
572b1721b2 set max API requests automatically based on RAM (#10421) 2020-09-04 19:37:37 -07:00
Harshavardhana
b0e1d4ce78 re-attach offline drive after new drive replacement (#10416)
inconsistent drive healing when one of the drive is offline
while a new drive was replaced, this change is to ensure
that we can add the offline drive back into the mix by
healing it again.
2020-09-04 17:09:02 -07:00
Harshavardhana
eb19c8af40 Bump response header timeout for proxying list request (#10420) 2020-09-04 16:07:40 -07:00
飞雪无情
746f1585eb add debugging zh_CN document. (#10401) 2020-09-04 13:03:46 -07:00
Klaus Post
2d58a8d861 Add storage layer contexts (#10321)
Add context to all (non-trivial) calls to the storage layer. 

Contexts are propagated through the REST client.

- `context.TODO()` is left in place for the places where it needs to be added to the caller.
- `endWalkCh` could probably be removed from the walkers, but no changes so far.

The "dangerous" part is that now a caller disconnecting *will* propagate down,  so a 
"delete" operation will now be interrupted. In some cases we might want to disconnect 
this functionality so the operation completes if it has started, leaving the system in a cleaner state.
2020-09-04 09:45:06 -07:00
poornas
0037951b6e improve error message when remote target missing (#10412) 2020-09-04 08:48:38 -07:00
Andreas Auernhammer
fbd1c5f51a certs: refactor cert manager to support multiple certificates (#10207)
This commit refactors the certificate management implementation
in the `certs` package such that multiple certificates can be
specified at the same time. Therefore, the following layout of
the `certs/` directory is expected:
```
certs/
 │
 ├─ public.crt
 ├─ private.key
 ├─ CAs/          // CAs directory is ignored
 │   │
 │    ...
 │
 ├─ example.com/
 │   │
 │   ├─ public.crt
 │   └─ private.key
 └─ foobar.org/
     │
     ├─ public.crt
     └─ private.key
   ...
```

However, directory names like `example.com` are just for human
readability/organization and don't have any meaning w.r.t whether
a particular certificate is served or not. This decision is made based
on the SNI sent by the client and the SAN of the certificate.

***

The `Manager` will pick a certificate based on the client trying
to establish a TLS connection. In particular, it looks at the client
hello (i.e. SNI) to determine which host the client tries to access.
If the manager can find a certificate that matches the SNI it
returns this certificate to the client.

However, the client may choose to not send an SNI or tries to access
a server directly via IP (`https://<ip>:<port>`). In this case, we
cannot use the SNI to determine which certificate to serve. However,
we also should not pick "the first" certificate that would be accepted
by the client (based on crypto. parameters - like a signature algorithm)
because it may be an internal certificate that contains internal hostnames. 
We would disclose internal infrastructure details doing so.

Therefore, the `Manager` returns the "default" certificate when the
client does not specify an SNI. The default certificate the top-level
`public.crt` - i.e. `certs/public.crt`.

This approach has some consequences:
 - It's the operator's responsibility to ensure that the top-level
   `public.crt` does not disclose any information (i.e. hostnames)
   that are not publicly visible. However, this was the case in the
   past already.
 - Any other `public.crt` - except for the top-level one - must not
   contain any IP SAN. The reason for this restriction is that the
   Manager cannot match a SNI to an IP b/c the SNI is the server host
   name. The entire purpose of SNI is to indicate which host the client
   tries to connect to when multiple hosts run on the same IP. So, a
   client will not set the SNI to an IP.
   If we would allow IP SANs in a lower-level `public.crt` a user would
   expect that it is possible to connect to MinIO directly via IP address
   and that the MinIO server would pick "the right" certificate. However,
   the MinIO server cannot determine which certificate to serve, and
   therefore always picks the "default" one. This may lead to all sorts
   of confusing errors like:
   "It works if I use `https:instance.minio.local` but not when I use
   `https://10.0.2.1`.

These consequences/limitations should be pointed out / explained in our
docs in an appropriate way. However, the support for multiple
certificates should not have any impact on how deployment with a single
certificate function today.

Co-authored-by: Harshavardhana <harsha@minio.io>
2020-09-03 23:33:37 -07:00
Harshavardhana
1c6781757c add missing ListBucketVersions from policy actions (#10414) 2020-09-03 18:25:06 -07:00
Harshavardhana
b4e3956e69 update KES docs to talk about 'mc encrypt' command (#10400)
add a deprecation notice for KMS_AUTO_ENCRYPTION
2020-09-03 12:43:45 -07:00
Krishna Srinivas
c51229493b azure docs remove default chunk size example (#10406) 2020-09-03 01:31:56 -07:00
Rushan
631d55aa22 Update License link in Readme.md (#10409) 2020-09-03 01:30:53 -07:00
Harshavardhana
8a291e1dc0 Cluster healthcheck improvements (#10408)
- do not fail the healthcheck if heal status
  was not obtained from one of the nodes,
  if many nodes fail then report this as a
  catastrophic error.
- add "x-minio-write-quorum" value to match
  the write tolerance supported by server.
- admin info now states if a drive is healing
  where madmin.Disk.Healing is set to true
  and madmin.Disk.State is "ok"
2020-09-02 22:54:56 -07:00
Klaus Post
650dccfa9e cache: Only start at high watermark (#10403)
Currently, cache purges are triggered as soon as the low watermark is exceeded.
To reduce IO this should only be done when reaching the high watermark.
This simplifies checks and reduces all calls for a GC to go through
`dcache.diskSpaceAvailable(size)`. While a comment claims that 
`dcache.triggerGC <- struct{}{}` was non-blocking I don't see how 
that was possible. Instead, we add a 1 size to the queue channel 
and use channel  semantics to avoid blocking when a GC has 
already been requested.

`bytesToClear` now takes the high watermark into account to it will 
not request any bytes to be cleared until that is reached.
2020-09-02 17:48:44 -07:00
Minio Trusted
d08b4b147d Update yaml files to latest version RELEASE.2020-09-02T18-19-50Z 2020-09-02 18:40:57 +00:00
Andreas Auernhammer
9a703befe6 crypto: reduce retry delay when retrying KES requests (#10394)
This commit reduces the retry delay when retrying a request
to a KES server by:
 - reducing the max. jitter delay from 3s to 1.5s
 - skipping the random delay when there are more KES endpoints
   available.

If there are more KES endpoints we can directly retry to the request
by sending it to the next endpoint - as pointed out by @krishnasrinivas
2020-09-02 11:04:10 -07:00
Klaus Post
9a1615768d Fix flaky TestXLStorageVerifyFile (#10398)
`TestXLStorageVerifyFile` would fail 1 in 256 if the first random character was 'a'.

Instead write 256 bytes which has 1 in 256^256 probability.
2020-09-02 09:42:24 -07:00
Harshavardhana
37da0c647e fix: delete marker compatibility behavior for suspended bucket (#10395)
- delete-marker should be created on a suspended bucket as `null`
- delete-marker should delete any pre-existing `null` versioned
  object and create an entry `null`
2020-09-02 00:19:03 -07:00
Harshavardhana
2acb530ccd update rulesguard with new rules (#10392)
Co-authored-by: Nitish Tiwari <nitish@minio.io>
Co-authored-by: Praveen raj Mani <praveen@minio.io>
2020-09-01 16:58:13 -07:00
Klaus Post
3e1fb17b70 heal: Check for truncated files (#10399)
When checking parts we already do a stat for each part.

Since we have the on disk size check if it is at least what we expect.

When checking metadata check if metadata is 0 bytes.
2020-09-01 12:06:45 -07:00
Klaus Post
a89d6b8e3d Fix common Windows failure (#10397)
The `getNonLoopBackIP` may grab an IP from an interface that
doesn't allow binding (on Windows), so this test consistently fails.

We exclude that specific error.
2020-09-01 10:11:15 -07:00
Klaus Post
1c085f7d1a Fix crash on Windows when crawling (#10385)
* readDirN: Check if file is directory

`syscall.FindNextFile` crashes if the handle is a file.

`errFileNotFound` matches 'unix' functionality: d19b434ffc/cmd/os-readdir_unix.go (L106)

Fixes #10384
2020-09-01 09:33:16 -07:00
Harshavardhana
4b6585d249 support 'ldap:user' variable replacement properly (#10391)
also update `ldap.go` examples with latest
minio-go changes

Fixes #10367
2020-09-01 12:26:22 +05:30
Harshavardhana
9ffad7fceb discard empty endpoint in crypto kes
introduced in 18725679c4
2020-08-31 19:35:43 -07:00
Andreas Auernhammer
18725679c4 crypto: allow multiple KES endpoints (#10383)
This commit addresses a maintenance / automation problem when MinIO-KES
is deployed on bare-metal. In orchestrated env. the orchestrator (K8S)
will make sure that `n` KES servers (IPs) are available via the same DNS
name. There it is sufficient to provide just one endpoint.
2020-08-31 18:10:52 -07:00
Anis Elleuch
ba8a8ad818 ListObjectsV1 requests unnecessarily fail with offline nodes (#10386)
ListObjectsV1 requests are actually redirected to a specific node, 
depending on the bucket name. The purpose of this behavior was
to optimize listing.

However, the current code sends a Bad Gateway error if the
target node is offline, which is a bad behavior because it means
that the list request will fail, although this is unnecessary since
we can still use the current node to list as well (the default behavior
without using proxying optimization)

Currently, you can see mint fails when there is one offline node, after
this PR, mint will always succeed.
2020-08-31 12:37:31 -07:00
Harshavardhana
102ad60dee simplify removing temporary files (#10389) 2020-08-31 12:35:40 -07:00
飞雪无情
cb61e50b51 since its an internal RPC API we do not need to document it. (#10382) 2020-08-31 10:20:07 -07:00
Gaige B Paulsen
859ef52886 update for smartos build (solaris too) (#10378) 2020-08-31 10:19:25 -07:00
Harshavardhana
f04a1f220c update stale to ignore 'do not close' label 2020-08-30 20:45:03 -07:00
unlimitedbits
cd380251b3 Support custom paths for secret files in docker-entrypoint.sh (#10344) 2020-08-28 14:04:29 -07:00
Eco
92cd1eed45 Clarify zone example (#10374) 2020-08-28 14:03:29 -07:00
Harshavardhana
db32a24cb6 update kernel tuning docs to indicate sane values 2020-08-28 12:07:55 -07:00
飞雪无情
2d96940826 fix: adminTrace show any errors when server is shutdown. (#10370) 2020-08-28 10:04:54 -07:00
Harshavardhana
e730da1438 fix: referesh JWKS public keys upon failure (#10368)
fixes #10359
2020-08-28 08:15:12 -07:00
Anis Elleuch
46ee8659b4 fix write quorum calculation for bucket operations (#10364)
When the number of disks is odd, the calculation of quorum 
for bucket operations were not correct, fix it.
2020-08-27 12:55:32 -07:00
poornas
73a6b4ea11 fix typo in replication docs (#10366) 2020-08-27 12:54:23 -07:00
Minio Trusted
c1b88c17cc Update yaml files to latest version RELEASE.2020-08-27T05-16-20Z 2020-08-27 05:36:01 +00:00
Harshavardhana
a359e36e35 tolerate listing with only readQuorum disks (#10357)
We can reduce this further in the future, but this is a good
value to keep around. With the advent of continuous healing,
we can be assured that namespace will eventually be
consistent so we are okay to avoid the necessity to
a list across all drives on all sets.

Bonus Pop()'s in parallel seem to have the potential to
wait too on large drive setups and cause more slowness
instead of gaining any performance remove it for now.

Also, implement load balanced reply for local disks,
ensuring that local disks have an affinity for

- cleanupStaleMultipartUploads()
2020-08-26 19:29:35 -07:00
Jorge Israel Peña
0a2e6d58a5 hdfs gateway handle listing single files (#10362) 2020-08-26 16:03:53 -07:00
Harshavardhana
7e80afdd7f remove nancy ignore list, as upstream issue is fixed 2020-08-26 15:45:42 -07:00
Klaus Post
1b119557c2 getDisksInfo: Attribute failed disks to correct endpoint (#10360)
If DiskInfo calls failed the information returned was used anyway 
resulting in no endpoint being set.

This would make the drive be attributed to the local system since 
`disk.Endpoint == disk.DrivePath` in that case.

Instead, if the call fails record the endpoint and the error only.
2020-08-26 10:11:26 -07:00
Harshavardhana
7778fef6bb update continous heal metrics appropriately for scanned items (#10352)
bonus make sure to ignore objectNotFound, and versionNotFound
errors properly at all layers, since HealObjects() returns
objectNotFound error if the bucket or prefix is empty.
2020-08-26 08:53:33 -07:00
飞雪无情
ea1803417f Use constants for gateway names to avoid bugs caused by spelling. (#10355) 2020-08-26 08:52:46 -07:00
飞雪无情
ea5094e842 Add missing go mod libraries for mint. (#10347)
The aws-sdk-go,healthcheck,security module in mint
lacks required dependency libraries,now add them in go.mod
2020-08-25 17:51:12 -07:00
Minio Trusted
5a974fb10c Update yaml files to latest version RELEASE.2020-08-26T00-00-49Z 2020-08-26 00:19:30 +00:00
Anis Elleuch
9acdeab73d lifecycle: Accept document without expiration (#10348) 2020-08-25 12:38:59 -07:00
Harshavardhana
d19b434ffc fix: bring back delayed leaf detection in listing (#10346) 2020-08-25 12:26:48 -07:00
Klaus Post
17a1eda702 Disregard healing disks in crawling (#10349)
When crawling never use a disk we know is healing.

Most of the change involves keeping track of the original endpoint on xlStorage
and this also fixes DiskInfo.Endpoint never being populated.

Heal master will print `data-crawl: Disk "http://localhost:9001/data/mindev/data2/xl1" is 
Healing, skipping` once on a cycle (no more often than every 5m).
2020-08-25 10:55:15 -07:00
Minio Trusted
7d50a0cfea Update yaml files to latest version RELEASE.2020-08-25T00-21-20Z 2020-08-25 00:44:39 +00:00
Harshavardhana
ceff7bcca5 fix: ruleguard gopath link 2020-08-24 16:25:00 -07:00
Daniel Valdivia
7d1734d033 indicate through HTTP header cluster healing in progress (#10342) 2020-08-24 15:20:50 -07:00
Harshavardhana
03ec6adfd0 fix: KES http2.0 communication support (#10341) 2020-08-24 14:37:53 -07:00
Harshavardhana
309b10f201 keep crawler cycle at 5 minutes 2020-08-24 14:05:16 -07:00
飞雪无情
2a8e40f19f add chroot zh_CN document (#10337) 2020-08-24 13:58:19 -07:00
KevinSmile
5f7bd2b1da fix: lifecycle-expiration validation bug (#10327) 2020-08-24 13:56:50 -07:00
Klaus Post
c097ce9c32 continous healing based on crawler (#10103)
Design: https://gist.github.com/klauspost/792fe25c315caf1dd15c8e79df124914
2020-08-24 13:47:01 -07:00
Harshavardhana
caad314faa add ruleguard support, fix all the reported issues (#10335) 2020-08-24 12:11:20 -07:00
Klaus Post
bc2ebe0021 Only enforce quota on success (#10339)
We should only enforce quotas if no error has been returned.

firstErr is safe to access since all goroutines have exited at this point.

If `firstErr` hasn't been set by something else return the context error if cancelled.
2020-08-24 10:15:46 -07:00
飞雪无情
21e8440423 add compression zh_CN document (#10330) 2020-08-23 22:07:36 -07:00
Harshavardhana
11aa393ba7 Allow region errors to be dynamic (#10323)
remove other FIXMEs as we are not planning to fix these, 
instead we will add dynamism case by case basis.

fixes #10250
2020-08-23 22:06:22 -07:00
Praveen raj Mani
d0c910a6f3 Support https and basic-auth for elasticsearch notification target (#10332) 2020-08-23 09:43:48 -07:00
Harshavardhana
81c90ae430 move to gorilla mux 1.8.0 (#10328) 2020-08-22 23:14:02 -07:00
kannappanr
d15a5ad4cc S3 Gateway: Check for encryption headers properly (#10309) 2020-08-22 11:41:49 -07:00
飞雪无情
0ff246653b CN bucket policy document that is no longer needed can be removed, (#10310)
which is consistent with the English document.
2020-08-22 11:05:21 -07:00
飞雪无情
113bcbdb78 update minio limit zh_CN document (#10320) 2020-08-22 11:04:31 -07:00
Harshavardhana
95411228db add missing cleanupStaleMultipartUploads (#10325)
fixes #10319
2020-08-21 21:39:54 -07:00
ebozduman
23774353b7 get_object() returns NoSuchKey error when object is a prefix (#10315) 2020-08-21 13:08:01 -07:00
Tobias Nygren
052b5262ff use statvfs(2) for disk.GetInfo on NetBSD (#10257) 2020-08-20 20:13:06 -07:00
poornas
a2a5ec93d3 fix: use global context for filling cache in the background (#10308) 2020-08-20 14:23:24 -07:00
飞雪无情
331c517a5b translate bucket versioning design document to zh_CN (#10300) 2020-08-20 13:18:51 -07:00
Harshavardhana
27a774cbe9 fix: FS mode should reject putBucketVersioning (#10307) 2020-08-20 13:18:06 -07:00
Klaus Post
8e6787a302 Fix TestDataUpdateTracker hanging (#10302)
Keep dataUpdateTracker while goroutine is starting.

This will ensure the object is updated one `start` returns

Tested with

```
λ go test -cpu=1,2,4,8 -test.run TestDataUpdateTracker -count=1000
PASS
ok      github.com/minio/minio/cmd      8.913s
```

Fixes #10295
2020-08-20 13:17:42 -07:00
Harshavardhana
59352d0ac2 load all blocking metadata in background (#10298)
most of this metadata already has fallbacks
and there is no good reason to load them
in blocking fashion
2020-08-20 10:38:53 -07:00
Harshavardhana
75d44b3bae add disk for more context in bitrot errors (#10296) 2020-08-20 09:41:15 -07:00
Klaus Post
95ae6c4b49 Fix missing unlock in *healSequence.hasEnded() (#10305)
The background healing sequence would always hang when this function is called.
2020-08-20 08:48:09 -07:00
Harshavardhana
98ca770f81 mint fixes for go1.14 (#10294)
add go.mod to ensure that mint can be
built with go1.14.x
2020-08-19 20:07:37 -07:00
Krishnan Parthasarathi
ccd967e3be Add ExpiresAt to LicenseInfo (#10293) 2020-08-19 19:21:04 -07:00
KevinSmile
0ebb73ee2e use const instead of literals (#10292) 2020-08-19 16:43:52 -07:00
Harshavardhana
c8b84a0e9e Add nancy vulnerability scanner (#10289) 2020-08-19 14:25:21 -07:00
Ritesh H Shukla
3acb5cff45 Update code comment (#10287) 2020-08-19 14:24:58 -07:00
Harshavardhana
ab801ad3d4 build edge containers with go1.14 (#10279) 2020-08-19 12:08:11 +05:30
Harshavardhana
74116204ce handle fresh setup with mixed drives (#10273)
fresh drive setups when one of the drive is
a root drive, we should ignore such a root
drive and not proceed to format.

This PR handles this properly by marking
the disks which are root disk and they are
taken offline.
2020-08-18 14:37:26 -07:00
飞雪无情
2eb5f934d8 add bucket versioning zh_CN document (#10281) 2020-08-18 13:10:26 -07:00
Minio Trusted
b43d376a87 Update yaml files to latest version RELEASE.2020-08-18T19-41-00Z 2020-08-18 20:04:22 +00:00
Harshavardhana
e4a44f6224 fix: commonPrefixes behavior in ListObjectVersions (#10286)
```
$ aws s3api --profile minio --endpoint-url http://localhost:9003 \
    list-object-versions --bucket testbucket \
    --delimiter / --prefix Veeam/Archive/

{
    "CommonPrefixes": [
        {
            "Prefix": "Veeam/Archive/003/"
        }
    ]
}
```

Also add coverage tests similar to ListObjects to
catch errors in future, skip these tests in FS mode
2020-08-18 12:19:44 -07:00
poornas
0272973175 Fix regression in web ui for retention (#10285)
Fixes: #10283 regression from PR #9259
2020-08-18 12:09:42 -07:00
Klaus Post
adca28801d feat: disable Parquet by default (breaking change) (#9920)
I have built a fuzz test and it crashes heavily in seconds and will OOM shortly after.
It seems like supporting Parquet is basically a completely open way to crash the 
server if you can upload a file and run s3 select on it.

Until Parquet is more hardened it is DISABLED by default since hostile 
crafted input can easily crash the server.

If you are in a controlled environment where it is safe to assume no hostile
content can be uploaded to your cluster you can safely enable Parquet.

To enable Parquet set the environment variable `MINIO_API_SELECT_PARQUET=on`
while starting the MinIO server.

Furthermore, we guard parquet by recover functions.
2020-08-18 10:23:28 -07:00
Harshavardhana
d2a3f92452 fix: health handler for lockers (#10280) 2020-08-18 07:27:41 -07:00
Harshavardhana
ede86845e5 docs: Add policy variables for resource and conditions (#10278)
Bonus fix adds LDAP policy variable and clarifies the
usage of policy variables for temporary credentials.

fixes #10197
2020-08-17 17:39:55 -07:00
Harshavardhana
e57c742674 use single dynamic timeout for most locked API/heal ops (#10275)
newDynamicTimeout should be allocated once, in-case
of temporary locks in config and IAM we should
have allocated timeout once before the `for loop`

This PR doesn't fix any issue as such, but provides
enough dynamism for the timeout as per expectation.
2020-08-17 11:29:58 -07:00
Klaus Post
bb5976d727 healbucket: Send object version ID (#10263)
Based on our previous conversations I assume we should send the version
 id when healing an object.

Maybe we should even list object versions and heal all?
2020-08-17 08:25:44 -07:00
Minio Trusted
670724184c Update yaml files to latest version RELEASE.2020-08-16T18-39-38Z 2020-08-16 18:56:41 +00:00
Harshavardhana
f7c1a59de1 add validation logs for configured Logger/Audit HTTP targets (#10274)
extra logs in-case of misconfiguration of audit/logger targets
2020-08-16 10:25:00 -07:00
飞雪无情
01a2ccc52f add bucket retention zh_CN document (#10264) 2020-08-14 22:12:03 -07:00
Anis Elleuch
51ba1dac49 listing: Fix result when prefix is an object with a slash (#10267)
In a non recursive mode, issuing a list request where prefix
is an existing object with a slash and delimiter is a slash will
return entries in the object directory (data dir IDs)

```
$ aws s3api --profile minioadmin --endpoint-url http://localhost:9000 \
        list-objects-v2 --bucket testbucket --prefix code_of_conduct.md/ --delimiter '/'
{
    "CommonPrefixes": [
        {
            "Prefix":
"code_of_conduct.md/ec750fe0-ea7e-4b87-bbec-1e32407e5e47/"
        }
    ]
}
```

This commit adds a fast exit track in Walk() in this specific case.
2020-08-14 20:13:24 -07:00
Harshavardhana
a4463dd40f fix: storageClass shouldn't set the value upon failure (#10271) 2020-08-14 19:48:04 -07:00
Harshavardhana
83a82d818e allow lock tolerance to match storage-class drive tolerance (#10270) 2020-08-14 18:17:14 -07:00
Harshavardhana
1d1c4430b2 decrypt ETags in parallel around 500 at a time (#10261)
Listing speed-up gained from 10secs for
just 400 entries to 2secs for 400 entries
2020-08-14 11:56:35 -07:00
Krishnan Parthasarathi
4e00b47b52 licverifier: fail verify if accountId is missing in license metadata (#10258) 2020-08-13 17:05:24 -07:00
Harshavardhana
43e6d1ce2d fix: missing proxy request by bucket for ListVersions (#10260) 2020-08-13 16:31:58 -07:00
Harshavardhana
30da442a85 rootDisk on containers can have different device Id (#10259)
use `/etc/hosts` instead of `/` to check for common
device id, if the device is same for `/etc/hosts`
and the --bind mount to detect root disks.

Bonus enhance healthcheck logging by adding maintenance
tags, for all messages.
2020-08-13 15:21:20 -07:00
Harshavardhana
038d91feaa fix: add public certs automatically as part of global CAs (#10256) 2020-08-13 09:46:50 -07:00
Harshavardhana
e7ba78beee use GlobalContext instead of context.Background when possible (#10254) 2020-08-13 09:16:01 -07:00
Krishnan Parthasarathi
ab43804efd licverifier: Validate JWT token expiry (#10253)
With this change the expiry is validated for the license key JWT
2020-08-12 21:31:52 -07:00
Minio Trusted
1c865dd119 Update yaml files to latest version RELEASE.2020-08-13T02-39-50Z 2020-08-13 02:57:25 +00:00
Harshavardhana
b32d0a5b60 use the correct endpoints for offline drives 2020-08-12 19:17:49 -07:00
poornas
79e21601b0 fix: web handlers to enforce replication (#10249)
This PR also preserves source ETag for replication
2020-08-12 17:32:24 -07:00
Harshavardhana
34253aa595 feat: cache env value in-case network is not reachable (#10251) 2020-08-12 16:53:15 -07:00
Harshavardhana
79ed7ce451 fs: listObjects shouldn't take FS locks while listing (#10248) 2020-08-12 15:23:14 +05:30
Harshavardhana
900eebb9a4 use jwt instead of basicAuth for webEnv (#10246) 2020-08-11 16:09:34 -07:00
飞雪无情
6914b2c99d Add bucket replication zh_CN document (#10243) 2020-08-11 11:33:01 -07:00
Harshavardhana
0dd3a08169 move the certPool loader function into pkg/certs (#10239) 2020-08-11 08:29:50 -07:00
Klaus Post
f8f290e848 security: Remove insecure custom headers (#10244)
Background: https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w

Remove these custom headers from incoming and outgoing requests.
2020-08-11 08:29:29 -07:00
Harshavardhana
9179cdfc9d update mint tests with new minio-py APIs (#10238) 2020-08-10 14:32:17 -07:00
Krishnan Parthasarathi
76b6dc0112 Add licverifier package (#10237)
license verification package implements a simple library to 
verify MinIO Subnet license keys.
2020-08-10 13:30:12 -07:00
Harshavardhana
ce303f5c7e update node for mint 14.x (#10236) 2020-08-10 11:25:40 -07:00
飞雪无情
b4b7a18497 add bucket quota zh_CN document (#10232) 2020-08-10 10:55:34 -07:00
Harshavardhana
1e2ebc9945 feat: time to bring back http2.0 support (#10230)
Bonus move our CI/CD to go1.14
2020-08-10 09:02:29 -07:00
飞雪无情
a49e3647b6 add bucket lifecycle zh_CN document (#10231) 2020-08-09 02:29:25 -07:00
Minio Trusted
954e17c3d0 Update yaml files to latest version RELEASE.2020-08-08T04-50-06Z 2020-08-07 22:32:27 -07:00
Harshavardhana
2a9819aff8 fix: refactor background heal for cluster health (#10225) 2020-08-07 19:43:06 -07:00
Ritesh H Shukla
8049184dcc fix: documentation changes in replication docs (#10209) 2020-08-07 13:30:52 -07:00
Harshavardhana
6c6137b2e7 add cluster maintenance healthcheck drive heal affinity (#10218) 2020-08-07 13:22:53 -07:00
飞雪无情
19c4f3082b update and improve bucket notifications zh_CN document (#10224) 2020-08-07 12:36:27 -07:00
Anis Elleuch
433c2831ae fix: typo in parsing non remote env variables (#10223) 2020-08-07 09:57:20 -07:00
Anis Elleuch
9138b2b503 Avoid duplicate headers when proxying S3 listing requests (#10220) 2020-08-07 04:10:16 -07:00
Minio Trusted
6d64aab420 Update yaml files to latest version RELEASE.2020-08-07T01-23-07Z 2020-08-07 01:43:14 +00:00
Harshavardhana
77509ce391 Support looking up environment remotely (#10215)
adds a feature where we can fetch the MinIO
command-line remotely, this
is primarily meant to add some stateless
nature to the MinIO deployment in k8s
environments, MinIO operator would run a
webhook service endpoint
which can be used to fetch any environment
value in a generalized approach.
2020-08-06 18:03:16 -07:00
poornas
adcaa6f9de fix: Change ListBucketTargets handler (#10217)
to list all targets across a tenant.
Also fixing some validations.
2020-08-06 17:10:21 -07:00
飞雪无情
ce129efa09 update and improve config,throttle zh_CN document (#10214) 2020-08-06 11:43:30 -07:00
poornas
121164db56 fix: relax some replication validations (#10210)
Also inherit storage class from source object
if replication configuration does not have a storage
class specified for destination bucket.
2020-08-05 20:01:20 -07:00
Minio Trusted
195f95196e Update yaml files to latest version RELEASE.2020-08-05T21-34-13Z 2020-08-05 21:58:17 +00:00
Harshavardhana
a20d4568a2 fix: make sure to use uniform drive count calculation (#10208)
It is possible in situations when server was deployed
in asymmetric configuration in the past such as

```
minio server ~/fs{1...4}/disk{1...5}
```

Results in setDriveCount of 10 in older releases
but with fairly recent releases we have moved to
having server affinity which means that a set drive
count ascertained from above config will be now '4'

While the object layer make sure that we honor
`format.json` the storageClass configuration however
was by mistake was using the global value obtained
by heuristics. Which leads to prematurely using
lower parity without being requested by the an
administrator.

This PR fixes this behavior.
2020-08-05 13:31:12 -07:00
Harshavardhana
e656beb915 feat: allow service accounts to be generated with OpenID STS (#10184)
Bonus also fix a bug where we did not purge relevant
service accounts generated by rotating credentials
appropriately, service accounts should become invalid
as soon as its corresponding parent user becomes invalid.

Since service account themselves carry parent claim always
we would never reach this problem, as the access get
rejected at IAM policy layer.
2020-08-05 13:08:40 -07:00
Harshavardhana
cd04600862 update minio-go version to latest v7.0.3 (#10201)
- Add methods to set/remove replication rules (poornas)
- fix: only SSE-C headers should be applied to destination (Harshavardhana)
- fix: avoid data race by copying the buffer (Harshavardhana)
- remove deprecated build badges (Harshavardhana)
- fix: handle readFull bug with certain readers (Harshavardhana)
- fix a typo in README.md (Julien K)
- lifecycle: Fix marshaling expiration date/days (Anis Elleuch)
- add replication-status, expiration headers (Harshavardhana)
- Return object's version id in StatObject(Anis Elleuch)
- display appropriate funcName with nested callers (Harshavardhana)
- allow KMS tests to be run in the CI/CD (Harshavardhana)
- fix: removing lifecycle properly (Harshavardhana)
- feat: Add ListenNotification API to listen for all events (Harshavardhana)
2020-08-05 08:14:36 -07:00
poornas
3acc0ebb81 fix: Change service name in Arn for replication (#10205) 2020-08-05 00:43:18 -07:00
poornas
88daaef76b Validate object lock when setting replication config. (#10200)
Check if object lock is enabled on
destination bucket while setting replication
configuration on a object lock enabled bucket.
2020-08-04 23:02:27 -07:00
Minio Trusted
1cdaced8b6 Update yaml files to latest version RELEASE.2020-08-04T23-10-51Z 2020-08-04 23:46:06 +00:00
Harshavardhana
0b8255529a fix: proxies set keep-alive timeouts to be system dependent (#10199)
Split the DialContext's one for internode and another
for all other external communications especially
proxy forwarders, gateway transport etc.
2020-08-04 14:55:53 -07:00
Harshavardhana
019fe69a57 fix: reduce an extra system call for writes instead fail later (#10187) 2020-08-04 12:09:41 -07:00
Harshavardhana
d90ab904e7 fix: multi-tenant deployment guide to use ellipses format (#10192) 2020-08-04 08:15:37 -07:00
Anis Elleuch
6ae30b21c9 fix ILM should not remove a protected version (#10189) 2020-08-03 23:04:40 -07:00
Harshavardhana
b16781846e allow server to start even with corrupted/faulty disks (#10175) 2020-08-03 18:17:48 -07:00
Harshavardhana
5ce82b45da add CopyObject optimization when source and destination are same (#10170)
when source and destination are same and versioning is enabled
on the destination bucket - we do not need to re-create the entire
object once again to optimize on space utilization.

Cases this PR is not supporting

- any pre-existing legacy object will not
  be preserved in this manner, meaning a new
  dataDir will be created.

- key-rotation and storage class changes
  of course will never re-use the dataDir
2020-08-03 16:21:10 -07:00
Harshavardhana
e99bc177c0 fix: allow FS mode situations when conflicting files exist (#10185)
conflicting files can exist on FS at
`.minio.sys/buckets/testbucket/policy.json/`, this is an
expected valid scenario for FS mode allow it to work,
i.e ignore and move forward
2020-08-03 13:20:49 -07:00
Harshavardhana
b68bc75dad fix: quorum calculation mistake with reduced parity (#10186)
With reduced parity our write quorum should be same
as read quorum, but code was still assuming

```
readQuorum+1
```

In all situations which is not necessary.
2020-08-03 12:15:08 -07:00
Harshavardhana
d61eac080b fix: connection_string should override other params (#10180)
closes #9965
2020-08-03 09:16:00 -07:00
Harshavardhana
d8be9f12a2 fix: pull credits in docker from master branch (#10177) 2020-08-02 09:01:57 -07:00
飞雪无情
0cf4539fe8 fix: update and improve zh_CN documentation (#10174) 2020-08-01 11:52:46 -07:00
Harshavardhana
db6bba709a update CREDITS with minio/selfupdate project 2020-07-31 23:58:30 -07:00
Minio Trusted
2174a22835 Update yaml files to latest version RELEASE.2020-07-31T03-39-05Z 2020-07-31 03:58:04 +00:00
poornas
a8dd7b3eda Refactor replication target management. (#10154)
Generalize replication target management so
that remote targets for a bucket can be
managed with ARNs. `mc admin bucket remote`
command will be used to manage targets.
2020-07-30 19:55:22 -07:00
Harshavardhana
25a55bae6f fix: avoid buffering of server sent events by proxies (#10164) 2020-07-30 19:45:12 -07:00
Harshavardhana
fe157166ca fix: Pass context all the way down to the network call in lockers (#10161)
Context timeout might race on each other when timeouts are lower
i.e when two lock attempts happened very quickly on the same resource
and the servers were yet trying to establish quorum.

This situation can lead to locks held which wouldn't be unlocked
and subsequent lock attempts would fail.

This would require a complete server restart. A potential of this
issue happening is when server is booting up and we are trying
to hold a 'transaction.lock' in quick bursts of timeout.
2020-07-29 23:15:34 -07:00
Adam Brown
f7259adf83 Update LastUpdate timestamp before save (#10152) 2020-07-28 13:20:50 -07:00
Harshavardhana
6669560cb9 turn-off bucket usage metrics in gateway mode (#10150)
closes #10147
2020-07-28 13:04:26 -07:00
poornas
b46ab7e921 Rename replication target handler (#10142)
Rename replication target handler to a generic bucket target handler
2020-07-28 11:50:47 -07:00
Harshavardhana
27266f8a54 fix: if OPA set do not enforce policy claim (#10149) 2020-07-28 11:47:57 -07:00
poornas
1b6ba0d062 Add validation in cache for offline drives (#10146)
closes #10144
2020-07-28 10:06:52 -07:00
飞雪无情
0d0f09ee66 update and improve zh_CN readme documentation (#10145) 2020-07-28 09:08:22 -07:00
Harshavardhana
f200a7fb6a fix: speed up OBD tests avoid unnecessary memory allocation (#10141)
replace dummy buffer with nullReader{} instead,
to avoid large memory allocations in memory
constrainted environments. allows running
obd tests in such environments.
2020-07-27 14:51:59 -07:00
Minio Trusted
98691f75bc Update yaml files to latest version RELEASE.2020-07-27T18-37-02Z 2020-07-27 18:57:10 +00:00
Harshavardhana
47e304d03c fix: add missing content-disposition from CORS handler (#10137) 2020-07-27 09:03:38 -07:00
Harshavardhana
9108abf204 fix: allow shareable URLs with rotating creds (#10135)
closes #8935
2020-07-27 09:02:53 -07:00
Harshavardhana
6529dcb3b5 fix: gateway Walk() implementation to list correct contents (#10131)
closes #10122
2020-07-26 22:56:05 -07:00
Harshavardhana
4b22b288a6 add license section be about Apache 2.0
Bonus, remove fossa reference
provides a lot of false positives
and really hard to fix/manage removing
the reference here to avoid any confusion
2020-07-26 01:00:01 -07:00
Harshavardhana
abbf6ce6cc simplify JWKS decoding in OpenID and more tests (#10119)
add tests for non-compliant Azure AD behavior
with "nonce" to fail properly and treat it as
expected behavior for non-standard JWT tokens.
2020-07-25 08:42:41 -07:00
Ritesh H Shukla
416ec316bd Add clarification for minimum drives per server (#10130) 2020-07-24 22:09:01 -07:00
Harshavardhana
5ffc733eec fix: enforce bucket quota from browser uploads (#10129) 2020-07-24 21:16:54 -07:00
Minio Trusted
8a23988711 Update yaml files to latest version RELEASE.2020-07-24T22-43-05Z 2020-07-24 23:00:32 +00:00
Harshavardhana
35212b673e add unformatted disk as part of the error list (#10128)
these errors should be ignored for quorum
error calculation to ensure that we don't
prematurely return unformatted disk error
as part of API calls
2020-07-24 13:16:11 -07:00
Harshavardhana
57ff9abca2 Apply quota usage cache invalidation per second (#10127)
Allow faster lookups for quota check enforcement
2020-07-24 12:24:21 -07:00
Jorge Israel Peña
4752323e1c Use hdfs.Readdir() to optimize HDFS directory listings (#10121)
Currently, listing directories on HDFS incurs a per-entry remote Stat() call
penalty, the cost of which can really blow up on directories with many
entries (+1,000) especially when considered in addition to peripheral
calls (such as validation) and the fact that minio is an intermediary to the
client (whereas other clients listed below can query HDFS directly).

Because listing directories this way is expensive, the Golang HDFS library
provides the [`Client.Open()`] function which creates a [`FileReader`] that is
able to batch multiple calls together through the [`Readdir()`] function.

This is substantially more efficient for very large directories.

In one case we were witnessing about +20 seconds to list a directory with 1,500
entries, admittedly large, but the Java hdfs ls utility as well as the HDFS
library sample ls utility were much faster.

Hadoop HDFS DFS (4.02s):

    λ ~/code/minio → use-readdir
    » time hdfs dfs -ls /directory/with/1500/entries/
    …
    hdfs dfs -ls   5.81s user 0.49s system 156% cpu 4.020 total

Golang HDFS library (0.47s):

    λ ~/code/hdfs → master
    » time ./hdfs ls -lh /directory/with/1500/entries/
    …
    ./hdfs ls -lh   0.13s user 0.14s system 56% cpu 0.478 total

mc and minio **without** optimization (16.96s):

    λ ~/code/minio → master
    » time mc ls myhdfs/directory/with/1500/entries/
    …
    ./mc ls   0.22s user 0.29s system 3% cpu 16.968 total

mc and minio **with** optimization (0.40s):

    λ ~/code/minio → use-readdir
    » time mc ls myhdfs/directory/with/1500/entries/
    …
    ./mc ls   0.13s user 0.28s system 102% cpu 0.403 total

[`Client.Open()`]: https://godoc.org/github.com/colinmarc/hdfs#Client.Open
[`FileReader`]: https://godoc.org/github.com/colinmarc/hdfs#FileReader
[`Readdir()`]: https://godoc.org/github.com/colinmarc/hdfs#FileReader.Readdir
2020-07-24 11:31:51 -07:00
Klaus Post
11593c6cc4 Usage: Reset merged info when updating (#10126)
When merging multiple buckets reset between each update.

Avoids merging the same usage metrics multiple times resulting 
in duplicate data entries.
2020-07-24 11:02:10 -07:00
Harshavardhana
10025bda45 fix: add missing response headers to CORS handler (#10124) 2020-07-24 00:46:51 -07:00
Praveen raj Mani
b800541fbe fix: a type in NSQ notification target environment key (#10118)
fixes #10100
2020-07-23 12:19:36 -07:00
Harshavardhana
3a73f1ead5 refactor server update behavior (#10107) 2020-07-23 08:03:31 -07:00
Anis Elleuch
1340281cb8 Fix marshaling expiration field in lifecycle (#10117) 2020-07-23 08:01:25 -07:00
poornas
b9be841fd2 Add missing validation for replication API conditions (#10114) 2020-07-22 17:39:40 -07:00
Harshavardhana
73890f31af add minisign verification for container builds (#10115) 2020-07-22 17:09:31 -07:00
Anis Elleuch
456b2ef6eb Avoid healing to be stuck with many concurrent event listeners (#10111)
If there are many listeners to bucket notifications or to the trace
subsystem, healing fails to work properly since it suspends itself when
the number of concurrent connections is above a certain threshold.

These connections are also continuous and not costly (*no disk access*),
it is okay to just ignore them in waitForLowHTTPReq().
2020-07-22 13:16:55 -07:00
Harshavardhana
ad8b53e6d4 add mips64 support for cross compilation (#10106) 2020-07-21 23:56:14 -07:00
Harshavardhana
0b5d1bc91d fix: bucket replication docs (#10104)
* fix: bucket replication docs

* Update docs/bucket/replication/README.md

Co-authored-by: kannappanr <30541348+kannappanr@users.noreply.github.com>

Co-authored-by: kannappanr <30541348+kannappanr@users.noreply.github.com>
2020-07-21 22:19:30 -07:00
poornas
c43da3005a Add support for server side bucket replication (#9882) 2020-07-21 17:49:56 -07:00
Minio Trusted
ca4c15bc63 Update yaml files to latest version RELEASE.2020-07-22T00-26-33Z 2020-07-22 00:44:03 +00:00
Harshavardhana
a880283593 Send the lower level error directly from GetDiskID() (#10095)
this is to detect situations of corruption disk
format etc errors quickly and keep the disk online
in such scenarios for requests to fail appropriately.
2020-07-21 13:54:06 -07:00
Bruce Wang
e464a5bfbc Fix bug with fields that contain trimming spaces (#10079)
String x might contain trimming spaces. And it needs to be trimmed. For
example, in csv files, there might be trimming spaces in a field that
ought to meet a query condition that contains the value without
trimming spaces. This applies to both intCast and floatCast functions.
2020-07-21 12:57:09 -07:00
Harshavardhana
eb6bf454f1 fix: copyObject encryption from unencrypted object (#10102)
This is a continuation of #10085
2020-07-21 12:25:01 -07:00
Harshavardhana
ec06089eda fix: re-implement cluster healthcheck (#10101) 2020-07-20 18:31:22 -07:00
Harshavardhana
0c4be55936 fix: fix lockup in merge-walk pool (#10098)
Fixes two different types of problems

- continuation of the problem seen in FS #9992
  as not fixed for erasure coded deployments,
  reproduced this issue with spark and its fixed now

- another issue was leaking walk go-routines which
  would lead to high memory usage and crash the system
  this is simply because all the walks which were purged
  at the top limit had leaking end walkers which would
  consume memory endlessly.

closes #9966
closes #10088
2020-07-20 17:28:26 -07:00
Harshavardhana
11d21d5d1b fix: pass around the correct drives per set (#10097)
this is a precursor change before adding parity
based SLA across zones instead of same stripe size
2020-07-20 16:38:40 -07:00
findmyname666
f9648d3976 add tests lifecycle rules with empty prefix (#10093) 2020-07-20 13:12:50 -07:00
Harshavardhana
2955aae8e4 feat: Add notification support for bucketCreates and removal (#10075) 2020-07-20 12:52:49 -07:00
Harshavardhana
9fd836e51f add dnsStore interface for upcoming operator webhook (#10077) 2020-07-20 12:28:48 -07:00
Anis Elleuch
518f44908c fs: Close object fs.json before deletion (#10092)
NFS fails when deleting a file while it is already opened. The reason is
that the object fs.json meta file is opened but not closed before
removal.
2020-07-20 08:52:24 -07:00
Minio Trusted
38f60b3c1d Update yaml files to latest version RELEASE.2020-07-20T02-25-16Z 2020-07-20 02:41:52 +00:00
Harshavardhana
e2c71717f8 add different TCP timeouts for internal and incoming (#10090)
closes #10086
2020-07-19 17:16:12 -07:00
Harshavardhana
7764c542f2 allow claims to be optional in STS (#10078)
not all claims need to be present for
the JWT claim, let the policies not
exist and only apply which are present
when generating the credentials

once credentials are generated then
those policies should exist, otherwise
the request will fail.
2020-07-19 15:34:01 -07:00
findmyname666
aa6468932b make sure lifecycle rule ID is present (#10084) 2020-07-19 15:10:05 -07:00
Harshavardhana
30104cb12b docs: fix veeam document formatting 2020-07-18 18:38:12 -07:00
Harshavardhana
d53e560ce0 fix: copyObject key rotation issue (#10085)
- copyObject in-place decryption failed
  due to incorrect verification of headers
- do not decode ETag when object is encrypted
  with SSE-C, so that pre-conditions don't fail
  prematurely.
2020-07-18 17:36:32 -07:00
Anis Elleuch
44c8af66ad fs: Fix expiry regression after versioning refactor (#10083)
Do not ignore non-versioned objects in lifecycle compute 
action function.
2020-07-18 15:43:13 -07:00
Minio Trusted
68aaa5bbc3 Update yaml files to latest version RELEASE.2020-07-18T18-48-16Z 2020-07-18 19:04:01 +00:00
Harshavardhana
17747db93f fix: support healing older content (#10076)
This PR adds support for healing older
content i.e from 2yrs, 1yr. Also handles
other situations where our config was
not encrypted yet.

This PR also ensures that our Listing
is consistent and quorum friendly,
such that we don't list partial objects
2020-07-17 17:41:29 -07:00
Harshavardhana
3fe27c8411 fix: In federated setup dial all hosts to figure out online host (#10074)
In federated NAS gateway setups, multiple hosts in srvRecords
was picked at random which could mean that if one of the
host was down the request can indeed fail and if client
retries it would succeed. Instead allow server to figure
out the current online host quickly such that we can
exclude the host which is down.

At the max the attempt to look for a downed node is to
300 millisecond, if the node is taking longer to respond
than this value we simply ignore and move to the node,
total attempts are equal to number of srvRecords if no
server is online we simply fallback to last dialed host.
2020-07-17 14:25:47 -07:00
Harshavardhana
14b1c9f8e4 fix: return Range errors after If-Matches (#10045)
closes #7292
2020-07-17 13:01:22 -07:00
Klaus Post
d84fc58cac fix: CheckParts endpoint call to correct API (#10073)
CheckParts is calling the wrong endpoint, so instead of 
checking parts, it is writing metadata.
2020-07-17 10:17:59 -07:00
Harshavardhana
187c3f62df fix: heal replaced drives properly (#10069)
healing was not working properly when drives were
replaced, due to the error check in root disk
calculation this PR fixes this behavior

This PR also adds additional fix for missing
metadata entries from .minio.sys as part of
disk healing as well.

Added code to ignore and print more context
sensitive errors for better debugging.

This PR is continuation of fix in 7b14e9b660
2020-07-17 10:08:04 -07:00
Anis Elleuch
4a447a439a Fix lifecycle rules not applied in some cases (#10072)
HasActiveRules was not behaving as expected, this commit fixes it
and adds more unit tests.
2020-07-17 09:48:00 -07:00
Harshavardhana
4bfc50411c fix: return versionId in tagging APIs (#10068) 2020-07-16 22:38:58 -07:00
Eco
5e8392c8ef Update Veeam integration doc with immutability references (#10067) 2020-07-16 17:16:53 -07:00
Harshavardhana
d3c81a6e93 add missing available space from metrics (#10065) 2020-07-16 14:43:48 -07:00
Harshavardhana
7342b5355f fix: obtain correct location string with DNS style buckets (#10060)
closes #10054
2020-07-16 13:28:29 -07:00
鸿则
1341bf5a9e add preview width constraint (#10062)
* fix: add preview width constraint

* fix: set object item dropdown menu nowrap style
2020-07-16 10:46:27 -07:00
findmyname666
48aebf2d9d allow lifecycle rules with overlapping prefixes (#10053) 2020-07-16 07:39:41 -07:00
Harshavardhana
7b14e9b660 fix: diskInfo should check diskID only if disk is online (#10058)
closes #10057
2020-07-16 07:30:05 -07:00
Harshavardhana
07eb24b775 add absolute path for images (#10056) 2020-07-16 00:06:14 -07:00
Harshavardhana
cd849bc2ff update STS docs with new values (#10055)
Co-authored-by: Poorna <poornas@users.noreply.github.com>
2020-07-15 14:36:14 -07:00
Harshavardhana
9c66812b99 Add missing action stringer for DeleteVersionAction (#10049) 2020-07-15 17:28:49 +05:30
Harshavardhana
ec91fa55db docs: Add more STS docs with dex and python example (#10047) 2020-07-15 17:25:55 +05:30
Klaus Post
00d3cc4b69 Enforce quota checks after crawl (#10036)
Enforce bucket quotas when crawling has finished. 
This ensures that we will not do quota enforcement on old data.

Additionally, delete less if we are closer to quota than we thought.
2020-07-14 18:59:05 -07:00
Harshavardhana
14ff7f5fcf add hdfs sub-path support (#10046)
for users who don't have access to HDFS rootPath '/'
can optionally specify `minio gateway hdfs hdfs://namenode:8200/path`
for which they have access to, allowing all writes to be
performed at `/path`.

NOTE: once configured in this manner you need to make
sure command line is correctly specified, otherwise
your data might not be visible

closes #10011
2020-07-14 15:49:10 -07:00
Minio Trusted
a97ce3c96e Update yaml files to latest version RELEASE.2020-07-14T19-14-30Z 2020-07-14 19:32:27 +00:00
Harshavardhana
cfc5681b36 fix: policy_test to use minio-go/v7 2020-07-14 10:30:00 -07:00
Harshavardhana
369a876ebe fix: handle array policies in JWT claim (#10041)
PR #10014 was not complete as only handled
policy claims partially.
2020-07-14 10:26:47 -07:00
Anis Elleuch
778e9c864f Move dependency from minio-go v6 to v7 (#10042) 2020-07-14 09:38:05 -07:00
Harshavardhana
a2616b8227 allow turning off secure ciphers (#10038)
this PR to allow legacy support for big-data
applications which run older Java versions
which do not support the secure ciphers
currently defaulted in MinIO. This option
allows optionally to turn them off such
that client and server can negotiate the
best ciphers themselves.

This env is purposefully not documented,
meant as a last resort when client
application cannot be changed easily.
2020-07-13 14:20:21 -07:00
Minio Trusted
8d8f28eae4 Update yaml files to latest version RELEASE.2020-07-13T18-09-56Z 2020-07-13 18:29:35 +00:00
Harshavardhana
e7d7d5232c fix: admin info output and improve overall performance (#10015)
- admin info node offline check is now quicker
- admin info now doesn't duplicate the code
  across doing the same checks for disks
- rely on StorageInfo to return appropriate errors
  instead of calling locally.
- diskID checks now return proper errors when
  disk not found v/s format.json missing.
- add more disk states for more clarity on the
  underlying disk errors.
2020-07-13 09:51:07 -07:00
Harshavardhana
1d65ef3201 fix: deletes on older format properly (#10029)
while we handle all situations for writes and reads
on older format, what we didn't cater for properly
yet was delete where we only ended up deleting
just `xl.meta` - instead we should allow all the
deletes to go through for older format without
versioning enabled buckets.
2020-07-13 09:01:17 -07:00
Andreas Auernhammer
8d425e3372 fix etcd module dependency (#10032)
This commit fixes a dependency resolution problem w.r.t. minio => etcd.
Building any project depending on minio (e.g. mc) currently fails with
latest master since the module replace directive is not honored for
transitive / indirect dependencies.

This commit fixes this by adding the etcd module directly instead of
using a module replace instruction.
2020-07-13 07:46:34 -07:00
Minio Trusted
3939c6f6e7 Update yaml files to latest version RELEASE.2020-07-12T19-14-17Z 2020-07-12 19:48:42 +00:00
Harshavardhana
60d91234b9 fix: avoid broken link when preview image (#10021)
closes #9589
2020-07-12 12:09:22 -07:00
Harshavardhana
37c14207d6 fix: cors handling again for not just OPTIONS request (#10025)
CORS is notorious requires specific headers to be
handled appropriately in request and response,
using cors package as part of handlerFunc() for
options method lacks the necessary control this
package needs to add headers.
2020-07-12 10:56:57 -07:00
Harshavardhana
3b9fbf80ad fix: make sure to use new restClient for healthcheck (#10026)
Without instantiating a new rest client we can
have a recursive error which can lead to
healthcheck returning always offline, this can
prematurely take the servers offline.
2020-07-11 22:19:38 -07:00
Minio Trusted
c2fdf73491 Update yaml files to latest version RELEASE.2020-07-11T21-14-23Z 2020-07-11 21:30:53 +00:00
Harshavardhana
143f9371c6 fix: loading users regression
additionally also move to latest gorilla/mux master
to fix the DNS style bucket routing regression

resolves #10022
resolves #10023
2020-07-11 14:03:27 -07:00
Harshavardhana
3f1902face fix: cors should be available on all paths (#10020) 2020-07-11 13:49:24 -07:00
Harshavardhana
c0adb52213 sync to disk only upon successful legacy metadata rename (#10018) 2020-07-11 09:37:34 -07:00
Harshavardhana
3520e946a2 fix: versioning docs add more examples 2020-07-11 00:57:46 -07:00
Harshavardhana
f38adc1865 cleanup security overview guide 2020-07-11 00:34:56 -07:00
Harshavardhana
d5ff1c8e3b fix docs image urls to be absolute path 2020-07-11 00:27:30 -07:00
Minio Trusted
ad7417bc50 Update yaml files to latest version RELEASE.2020-07-11T06-07-16Z 2020-07-11 06:28:08 +00:00
763 changed files with 61987 additions and 24030 deletions

View File

@@ -1,2 +1,9 @@
.git
.github
docs
default.etcd
browser
*.gz
*.tar.gz
*.bzip2
*.zip

View File

@@ -10,9 +10,10 @@
## Types of changes
- [ ] Bug fix (non-breaking change which fixes an issue)
- [ ] New feature (non-breaking change which adds functionality)
- [ ] Optimization (provides speedup with no functional changes)
- [ ] Breaking change (fix or feature that would cause existing functionality to change)
## Checklist:
- [ ] Fixes a regression (If yes, please add `commit-id` or `PR #` here)
- [ ] Documentation needed
- [ ] Unit tests needed
- [ ] Documentation updated
- [ ] Unit tests added/updated

1
.github/stale.yml vendored
View File

@@ -14,6 +14,7 @@ onlyLabels: []
exemptLabels:
- "security"
- "pending discussion"
- "do not close"
# Set to true to ignore issues in a project (defaults to false)
exemptProjects: false

View File

@@ -1,51 +0,0 @@
name: "Code scanning - action"
on:
push:
pull_request:
schedule:
- cron: '0 19 * * 0'
jobs:
CodeQL-Build:
# CodeQL runs on ubuntu-latest and windows-latest
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v2
with:
# We must fetch at least the immediate parents so that if this is
# a pull request then we can checkout the head.
fetch-depth: 2
# If this run was triggered by a pull request event, then checkout
# the head of the pull request instead of the merge commit.
- run: git checkout HEAD^2
if: ${{ github.event_name == 'pull_request' }}
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v1
with:
languages: go, javascript
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v1
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
#- run: |
# make bootstrap
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v1

View File

@@ -4,7 +4,6 @@ on:
pull_request:
branches:
- master
- release
jobs:
build:
@@ -12,7 +11,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.13.x]
go-version: [1.15.x, 1.16.x]
os: [ubuntu-latest, windows-latest]
steps:
- uses: actions/checkout@v2
@@ -27,7 +26,6 @@ jobs:
env:
CGO_ENABLED: 0
GO111MODULE: on
MINIO_CI_CD: 1
run: |
go build --ldflags="-s -w" -o %GOPATH%\bin\minio.exe
go test -v --timeout 50m ./...
@@ -36,9 +34,13 @@ jobs:
env:
CGO_ENABLED: 0
GO111MODULE: on
MINIO_CI_CD: 1
run: |
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
sudo apt-get install devscripts shellcheck
nancy_version=$(curl --retry 10 -Ls -o /dev/null -w "%{url_effective}" https://github.com/sonatype-nexus-community/nancy/releases/latest | sed "s/https:\/\/github.com\/sonatype-nexus-community\/nancy\/releases\/tag\///")
curl -L -o nancy https://github.com/sonatype-nexus-community/nancy/releases/download/${nancy_version}/nancy-${nancy_version}-linux-amd64 && chmod +x nancy
go list -m all | ./nancy sleuth
make
diff -au <(gofmt -s -d cmd) <(printf "")
diff -au <(gofmt -s -d pkg) <(printf "")

View File

@@ -23,5 +23,11 @@ issues:
exclude:
- should have a package comment
- error strings should not be capitalized or end with punctuation or a newline
run:
skip-dirs:
- pkg/rpc
- pkg/argon2
service:
golangci-lint-version: 1.20.0 # use the fixed version to not introduce new linters unexpectedly

View File

@@ -114,11 +114,11 @@ checksum:
signs:
-
signature: "${artifact}.asc"
signature: "${artifact}.minisig"
cmd: "sh"
args:
- '-c'
- 'gpg --quiet --detach-sign -a ${artifact}'
- 'minisign -s /media/${USER}/minio/minisign.key -qQSm ${artifact} < /media/${USER}/minio/minisign-passphrase'
artifacts: all
changelog:

0
.nancy-ignore Normal file
View File

2721
CREDITS

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,4 @@
FROM golang:1.13-alpine as builder
FROM golang:1.15-alpine as builder
LABEL maintainer="MinIO Inc <dev@min.io>"
@@ -11,22 +11,27 @@ RUN \
git clone https://github.com/minio/minio && cd minio && \
git checkout master && go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)"
FROM alpine:3.12
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.3
ENV MINIO_UPDATE off
ENV MINIO_ACCESS_KEY_FILE=access_key \
MINIO_SECRET_KEY_FILE=secret_key \
MINIO_ROOT_USER_FILE=access_key \
MINIO_ROOT_PASSWORD_FILE=secret_key \
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
MINIO_SSE_MASTER_KEY_FILE=sse_master_key \
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav"
EXPOSE 9000
COPY --from=builder /go/bin/minio /usr/bin/minio
COPY --from=builder /go/minio/CREDITS /third_party/
COPY --from=builder /go/minio/CREDITS /licenses/CREDITS
COPY --from=builder /go/minio/LICENSE /licenses/LICENSE
COPY --from=builder /go/minio/dockerscripts/docker-entrypoint.sh /usr/bin/
RUN \
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
microdnf update --nodocs && \
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
microdnf clean all && \
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]

View File

@@ -1,29 +0,0 @@
FROM multiarch/qemu-user-static:x86_64-arm as qemu
FROM arm32v7/alpine:3.12
LABEL maintainer="MinIO Inc <dev@min.io>"
COPY --from=qemu /usr/bin/qemu-arm-static /usr/bin
ENV MINIO_UPDATE off
ENV MINIO_ACCESS_KEY_FILE=access_key \
MINIO_SECRET_KEY_FILE=secret_key \
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
RUN \
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
curl -s -q https://dl.min.io/server/minio/release/linux-arm/minio -o /usr/bin/minio && \
curl -s -q https://raw.githubusercontent.com/minio/minio/release/dockerscripts/docker-entrypoint.sh -o /usr/bin/docker-entrypoint.sh && \
chmod +x /usr/bin/minio && \
chmod +x /usr/bin/docker-entrypoint.sh && \
curl -s -q -O https://raw.githubusercontent.com/minio/minio/release/CREDITS
EXPOSE 9000
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
VOLUME ["/data"]
CMD ["minio"]

View File

@@ -1,29 +0,0 @@
FROM multiarch/qemu-user-static:x86_64-aarch64 as qemu
FROM arm64v8/alpine:3.12
LABEL maintainer="MinIO Inc <dev@min.io>"
COPY --from=qemu /usr/bin/qemu-aarch64-static /usr/bin
ENV MINIO_UPDATE off
ENV MINIO_ACCESS_KEY_FILE=access_key \
MINIO_SECRET_KEY_FILE=secret_key \
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
RUN \
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
curl -s -q https://dl.min.io/server/minio/release/linux-arm64/minio -o /usr/bin/minio && \
curl -s -q https://raw.githubusercontent.com/minio/minio/release/dockerscripts/docker-entrypoint.sh -o /usr/bin/docker-entrypoint.sh && \
chmod +x /usr/bin/minio && \
chmod +x /usr/bin/docker-entrypoint.sh && \
curl -s -q -O https://raw.githubusercontent.com/minio/minio/release/CREDITS
EXPOSE 9000
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
VOLUME ["/data"]
CMD ["minio"]

42
Dockerfile.cicd Normal file
View File

@@ -0,0 +1,42 @@
FROM golang:1.15-alpine as builder
LABEL maintainer="MinIO Inc <dev@min.io>"
ENV GOPATH /go
ENV CGO_ENABLED 0
ENV GO111MODULE on
RUN \
apk add --no-cache git && \
git clone https://github.com/minio/minio && cd minio && \
git checkout master && go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)"
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.3
ARG TARGETARCH
ENV MINIO_ACCESS_KEY_FILE=access_key \
MINIO_SECRET_KEY_FILE=secret_key \
MINIO_ROOT_USER_FILE=access_key \
MINIO_ROOT_PASSWORD_FILE=secret_key \
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
MINIO_SSE_MASTER_KEY_FILE=sse_master_key \
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav"
EXPOSE 9000
COPY --from=builder /go/bin/minio /usr/bin/minio
COPY --from=builder /go/minio/CREDITS /licenses/CREDITS
COPY --from=builder /go/minio/LICENSE /licenses/LICENSE
COPY --from=builder /go/minio/dockerscripts/docker-entrypoint.sh /usr/bin/
RUN \
microdnf update --nodocs && \
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
microdnf clean all
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
VOLUME ["/data"]
CMD ["minio", "server", "/data"]

View File

@@ -1,22 +1,25 @@
FROM alpine:3.12
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.3
ARG TARGETARCH
LABEL maintainer="MinIO Inc <dev@min.io>"
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
COPY minio /usr/bin/
COPY CREDITS /third_party/
ENV MINIO_UPDATE off
ENV MINIO_ACCESS_KEY_FILE=access_key \
ENV MINIO_UPDATE=off \
MINIO_ACCESS_KEY_FILE=access_key \
MINIO_SECRET_KEY_FILE=secret_key \
MINIO_ROOT_USER_FILE=access_key \
MINIO_ROOT_PASSWORD_FILE=secret_key \
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
RUN \
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
chmod +x /usr/bin/minio && \
chmod +x /usr/bin/docker-entrypoint.sh
RUN microdnf update --nodocs
RUN microdnf install curl ca-certificates shadow-utils util-linux --nodocs
RUN microdnf clean all && \
chmod +x /usr/bin/minio && \
chmod +x /usr/bin/docker-entrypoint.sh
EXPOSE 9000

View File

@@ -1,4 +1,4 @@
FROM ubuntu
FROM ubuntu:20.04
LABEL maintainer="MinIO Inc <dev@min.io>"

View File

@@ -1,29 +0,0 @@
FROM multiarch/qemu-user-static:x86_64-ppc64le as qemu
FROM ppc64le/alpine:3.12
LABEL maintainer="MinIO Inc <dev@min.io>"
COPY --from=qemu /usr/bin/qemu-ppc64le-static /usr/bin
ENV MINIO_UPDATE off
ENV MINIO_ACCESS_KEY_FILE=access_key \
MINIO_SECRET_KEY_FILE=secret_key \
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
RUN \
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
curl -s -q https://dl.min.io/server/minio/release/linux-ppc64le/minio -o /usr/bin/minio && \
curl -s -q https://raw.githubusercontent.com/minio/minio/release/dockerscripts/docker-entrypoint.sh -o /usr/bin/docker-entrypoint.sh && \
chmod +x /usr/bin/minio && \
chmod +x /usr/bin/docker-entrypoint.sh && \
curl -s -q -O https://raw.githubusercontent.com/minio/minio/release/CREDITS
EXPOSE 9000
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
VOLUME ["/data"]
CMD ["minio"]

View File

@@ -1,21 +1,41 @@
FROM alpine:3.12
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.3
LABEL maintainer="MinIO Inc <dev@min.io>"
ARG TARGETARCH
LABEL name="MinIO" \
vendor="MinIO Inc <dev@min.io>" \
maintainer="MinIO Inc <dev@min.io>" \
version="RELEASE.2021-02-24T18-44-45Z" \
release="RELEASE.2021-02-24T18-44-45Z" \
summary="MinIO is a High Performance Object Storage, API compatible with Amazon S3 cloud storage service." \
description="MinIO object storage is fundamentally different. Designed for performance and the S3 API, it is 100% open-source. MinIO is ideal for large, private cloud environments with stringent security requirements and delivers mission-critical availability across a diverse range of workloads."
ENV MINIO_UPDATE off
ENV MINIO_ACCESS_KEY_FILE=access_key \
MINIO_SECRET_KEY_FILE=secret_key \
MINIO_ROOT_USER_FILE=access_key \
MINIO_ROOT_PASSWORD_FILE=secret_key \
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
MINIO_SSE_MASTER_KEY_FILE=sse_master_key \
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav"
COPY dockerscripts/verify-minio.sh /usr/bin/verify-minio.sh
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
COPY CREDITS /licenses/CREDITS
COPY LICENSE /licenses/LICENSE
RUN \
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
curl -s -q https://dl.min.io/server/minio/release/linux-amd64/minio -o /usr/bin/minio && \
curl -s -q https://raw.githubusercontent.com/minio/minio/release/dockerscripts/docker-entrypoint.sh -o /usr/bin/docker-entrypoint.sh && \
microdnf update --nodocs && \
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \
microdnf install minisign --nodocs && \
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/minio -o /usr/bin/minio && \
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/minio.sha256sum -o /usr/bin/minio.sha256sum && \
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/minio.minisig -o /usr/bin/minio.minisig && \
microdnf clean all && \
chmod +x /usr/bin/minio && \
chmod +x /usr/bin/docker-entrypoint.sh && \
curl -s -q -O https://raw.githubusercontent.com/minio/minio/release/CREDITS
chmod +x /usr/bin/verify-minio.sh && \
/usr/bin/verify-minio.sh
EXPOSE 9000

View File

@@ -1,29 +0,0 @@
FROM multiarch/qemu-user-static:x86_64-s390x as qemu
FROM s390x/alpine:3.12
LABEL maintainer="MinIO Inc <dev@min.io>"
COPY --from=qemu /usr/bin/qemu-s390x-static /usr/bin
ENV MINIO_UPDATE off
ENV MINIO_ACCESS_KEY_FILE=access_key \
MINIO_SECRET_KEY_FILE=secret_key \
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
RUN \
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
curl -s -q https://dl.min.io/server/minio/release/linux-s390x/minio -o /usr/bin/minio && \
curl -s -q https://raw.githubusercontent.com/minio/minio/release/dockerscripts/docker-entrypoint.sh -o /usr/bin/docker-entrypoint.sh && \
chmod +x /usr/bin/minio && \
chmod +x /usr/bin/docker-entrypoint.sh && \
curl -s -q -O https://raw.githubusercontent.com/minio/minio/release/CREDITS
EXPOSE 9000
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
VOLUME ["/data"]
CMD ["minio"]

View File

@@ -17,11 +17,18 @@ checks:
getdeps:
@mkdir -p ${GOPATH}/bin
@which golangci-lint 1>/dev/null || (echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.27.0)
@which ruleguard 1>/dev/null || (echo "Installing ruleguard" && go get github.com/quasilyte/go-ruleguard/cmd/ruleguard@v0.2.1)
@which msgp 1>/dev/null || (echo "Installing msgp" && go get github.com/tinylib/msgp@v1.1.3)
@which stringer 1>/dev/null || (echo "Installing stringer" && go get golang.org/x/tools/cmd/stringer)
crosscompile:
@(env bash $(PWD)/buildscripts/cross-compile.sh)
verifiers: getdeps fmt lint
verifiers: getdeps fmt lint ruleguard check-gen
check-gen:
@go generate ./... >/dev/null
@(! git diff --name-only | grep '_gen.go$$') || (echo "Non-committed changes in auto-generated code is detected, please commit them to proceed." && false)
fmt:
@echo "Running $@ check"
@@ -31,7 +38,11 @@ fmt:
lint:
@echo "Running $@ check"
@GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=10m --config ./.golangci.yml
ruleguard:
@echo "Running $@ check"
@${GOPATH}/bin/ruleguard -rules ruleguard.rules.go github.com/minio/minio/...
# Builds minio, runs the verifiers then runs the tests.
check: test
@@ -46,7 +57,7 @@ test-race: verifiers build
# Verify minio binary
verify:
@echo "Verifying build with race"
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
@GO111MODULE=on CGO_ENABLED=1 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
@(env bash $(PWD)/buildscripts/verify-build.sh)
# Verify healing of disks with minio binary
@@ -60,9 +71,18 @@ build: checks
@echo "Building minio binary to './minio'"
@GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
docker: checks
hotfix-vars:
$(eval LDFLAGS := $(shell MINIO_RELEASE="RELEASE" MINIO_HOTFIX="hotfix.$(shell git rev-parse --short HEAD)" go run buildscripts/gen-ldflags.go $(shell git describe --tags --abbrev=0 | \
sed 's#RELEASE\.\([0-9]\+\)-\([0-9]\+\)-\([0-9]\+\)T\([0-9]\+\)-\([0-9]\+\)-\([0-9]\+\)Z#\1-\2-\3T\4:\5:\6Z#')))
$(eval TAG := "minio/minio:$(shell git describe --tags --abbrev=0).hotfix.$(shell git rev-parse --short HEAD)")
hotfix: hotfix-vars install
docker-hotfix: hotfix checks
@echo "Building minio docker image '$(TAG)'"
@docker build -t $(TAG) . -f Dockerfile.dev
docker: build checks
@echo "Building minio docker image '$(TAG)'"
@GOOS=linux GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
@docker build -t $(TAG) . -f Dockerfile.dev
# Builds minio and installs it to $GOPATH/bin.

228
README.md
View File

@@ -5,80 +5,175 @@
MinIO is a High Performance Object Storage released under Apache License v2.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads.
## Docker Container
### Stable
```
docker run -p 9000:9000 \
-e "MINIO_ACCESS_KEY=AKIAIOSFODNN7EXAMPLE" \
-e "MINIO_SECRET_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \
minio/minio server /data
This README provides quickstart instructions on running MinIO on baremetal hardware, including Docker-based installations. For Kubernetes environments,
use the [MinIO Kubernetes Operator](https://github.com/minio/operator/blob/master/README.md).
# Docker Installation
Use the following commands to run a standalone MinIO server on a Docker container.
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
for more complete documentation.
## Stable
Run the following command to run the latest stable image of MinIO on a Docker container using an ephemeral data volume:
```sh
docker run -p 9000:9000 minio/minio server /data
```
### Edge
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
> NOTE: To deploy MinIO on Docker with persistent storage, you must map local persistent directories from the host OS to the container using the
`docker -v` option. For example, `-v /mnt/data:/data` maps the host OS drive at `/mnt/data` to `/data` on the Docker container.
## Edge
Run the following command to run the bleeding-edge image of MinIO on a Docker container using an ephemeral data volume:
```
docker run -p 9000:9000 \
-e "MINIO_ACCESS_KEY=AKIAIOSFODNN7EXAMPLE" \
-e "MINIO_SECRET_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \
minio/minio:edge server /data
docker run -p 9000:9000 minio/minio:edge server /data
```
> NOTE: Docker will not display the default keys unless you start the container with the `-it`(interactive TTY) argument. Generally, it is not recommended to use default keys with containers. Please visit MinIO Docker quickstart guide for more information [here](https://docs.min.io/docs/minio-docker-quickstart-guide)
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
> NOTE: To deploy MinIO on Docker with persistent storage, you must map local persistent directories from the host OS to the container using the
`docker -v` option. For example, `-v /mnt/data:/data` maps the host OS drive at `/mnt/data` to `/data` on the Docker container.
# macOS
Use the following commands to run a standalone MinIO server on macOS.
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
for more complete documentation.
## Homebrew (recommended)
Run the following command to install the latest stable MinIO package using [Homebrew](https://brew.sh/). Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.
## macOS
### Homebrew (recommended)
Install minio packages using [Homebrew](https://brew.sh/)
```sh
brew install minio/stable/minio
minio server /data
```
> NOTE: If you previously installed minio using `brew install minio` then it is recommended that you reinstall minio from `minio/stable/minio` official repo instead.
```sh
brew uninstall minio
brew install minio/stable/minio
```
### Binary Download
| Platform | Architecture | URL |
| ---------- | -------- | ------ |
| Apple macOS | 64-bit Intel | https://dl.min.io/server/minio/release/darwin-amd64/minio |
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
## Binary Download
Use the following command to download and run a standalone MinIO server on macOS. Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.
```sh
chmod 755 minio
wget https://dl.min.io/server/minio/release/darwin-amd64/minio
chmod +x minio
./minio server /data
```
## GNU/Linux
### Binary Download
| Platform | Architecture | URL |
| ---------- | -------- | ------ |
| GNU/Linux | 64-bit Intel | https://dl.min.io/server/minio/release/linux-amd64/minio |
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
# GNU/Linux
Use the following command to run a standalone MinIO server on Linux hosts running 64-bit Intel/AMD architectures. Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.
```sh
wget https://dl.min.io/server/minio/release/linux-amd64/minio
chmod +x minio
./minio server /data
```
| Platform | Architecture | URL |
| ---------- | -------- | ------ |
| GNU/Linux | ppc64le | https://dl.min.io/server/minio/release/linux-ppc64le/minio |
Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.
The following table lists supported architectures. Replace the `wget` URL with the architecture for your Linux host.
| Architecture | URL |
| -------- | ------ |
| 64-bit Intel/AMD | https://dl.min.io/server/minio/release/linux-amd64/minio |
| 64-bit ARM | https://dl.min.io/server/minio/release/linux-arm64/minio |
| 64-bit PowerPC LE (ppc64le) | https://dl.min.io/server/minio/release/linux-ppc64le/minio |
| IBM Z-Series (S390X) | https://dl.min.io/server/minio/release/linux-s390x/minio |
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
for more complete documentation.
# Microsoft Windows
To run MinIO on 64-bit Windows hosts, download the MinIO executable from the following URL:
```sh
wget https://dl.min.io/server/minio/release/linux-ppc64le/minio
chmod +x minio
./minio server /data
https://dl.min.io/server/minio/release/windows-amd64/minio.exe
```
## Microsoft Windows
### Binary Download
| Platform | Architecture | URL |
| ---------- | -------- | ------ |
| Microsoft Windows | 64-bit | https://dl.min.io/server/minio/release/windows-amd64/minio.exe |
Use the following command to run a standalone MinIO server on the Windows host. Replace ``D:\`` with the path to the drive or directory in which you want MinIO to store data. You must change the terminal or powershell directory to the location of the ``minio.exe`` executable, *or* add the path to that directory to the system ``$PATH``:
```sh
minio.exe server D:\Photos
minio.exe server D:\
```
## FreeBSD
### Port
Install minio packages using [pkg](https://github.com/freebsd/pkg), MinIO doesn't officially build FreeBSD binaries but is maintained by FreeBSD upstream [here](https://www.freshports.org/www/minio).
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
for more complete documentation.
# FreeBSD
MinIO does not provide an official FreeBSD binary. However, FreeBSD maintains an [upstream release](https://www.freshports.org/www/minio) using [pkg](https://github.com/freebsd/pkg):
```sh
pkg install minio
@@ -87,13 +182,32 @@ sysrc minio_disks=/home/user/Photos
service minio start
```
## Install from Source
Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.13](https://golang.org/dl/#stable)
# Install from Source
Use the following commands to compile and run a standalone MinIO server from source. Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.15](https://golang.org/dl/#stable)
```sh
GO111MODULE=on go get github.com/minio/minio
```
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
for more complete documentation.
MinIO strongly recommends *against* using compiled-from-source MinIO servers for production environments.
# Deployment Recommendations
## Allow port access for Firewalls
By default MinIO uses the port 9000 to listen for incoming connections. If your platform blocks the port by default, you may need to enable access to the port.
@@ -149,20 +263,22 @@ iptables -A INPUT -p tcp --dport 9000:9010 -j ACCEPT
service iptables restart
```
## Pre-existing data
When deployed on a single drive, MinIO server lets clients access any pre-existing data in the data directory. For example, if MinIO is started with the command `minio server /mnt/data`, any pre-existing data in the `/mnt/data` directory would be accessible to the clients.
The above statement is also valid for all gateway backends.
# Test MinIO Connectivity
## Test using MinIO Browser
MinIO Server comes with an embedded web based object browser. Point your web browser to http://127.0.0.1:9000 ensure your server has started successfully.
MinIO Server comes with an embedded web based object browser. Point your web browser to http://127.0.0.1:9000 to ensure your server has started successfully.
![Screenshot](https://github.com/minio/minio/blob/master/docs/screenshots/minio-browser.png?raw=true)
## Test using MinIO Client `mc`
`mc` provides a modern alternative to UNIX commands like ls, cat, cp, mirror, diff etc. It supports filesystems and Amazon S3 compatible cloud storage services. Follow the MinIO Client [Quickstart Guide](https://docs.min.io/docs/minio-client-quickstart-guide) for further instructions.
## Pre-existing data
When deployed on a single drive, MinIO server lets clients access any pre-existing data in the data directory. For example, if MinIO is started with the command `minio server /mnt/data`, any pre-existing data in the `/mnt/data` directory would be accessible to the clients.
The above statement is also valid for all gateway backends.
## Upgrading MinIO
# Upgrading MinIO
MinIO server supports rolling upgrades, i.e. you can update one MinIO instance at a time in a distributed cluster. This allows upgrades with no downtime. Upgrades can be done manually by replacing the binary with the latest release and restarting all servers in a rolling fashion. However, we recommend all our users to use [`mc admin update`](https://docs.min.io/docs/minio-admin-complete-guide.html#update) from the client. This will update all the nodes in the cluster simultaneously and restart them, as shown in the following command from the MinIO client (mc):
```
@@ -171,17 +287,17 @@ mc admin update <minio alias, e.g., myminio>
> NOTE: some releases might not allow rolling upgrades, this is always called out in the release notes and it is generally advised to read release notes before upgrading. In such a situation `mc admin update` is the recommended upgrading mechanism to upgrade all servers at once.
### Important things to remember during MinIO upgrades
## Important things to remember during MinIO upgrades
- `mc admin update` will only work if the user running MinIO has write access to the parent directory where the binary is located, for example if the current binary is at `/usr/local/bin/minio`, you would need write access to `/usr/local/bin`.
- `mc admin update` updates and restarts all servers simultaneously, applications would retry and continue their respective operations upon upgrade.
- `mc admin update` is disabled in kubernetes/container environments, container environments provide their own mechanisms to rollout of updates.
- In the case of federated setups `mc admin update` should be run against each cluster individually. Avoid updating `mc` to any new releases until all clusters have been successfully updated.
- If using `kes` as KMS with MinIO, just replace the binary and restart `kes` more information about `kes` can be found [here](https://github.com/minio/kes/wiki)x
- If using `kes` as KMS with MinIO, just replace the binary and restart `kes` more information about `kes` can be found [here](https://github.com/minio/kes/wiki)
- If using Vault as KMS with MinIO, ensure you have followed the Vault upgrade procedure outlined here: https://www.vaultproject.io/docs/upgrading/index.html
- If using etcd with MinIO for the federation, ensure you have followed the etcd upgrade procedure outlined here: https://github.com/etcd-io/etcd/blob/master/Documentation/upgrades/upgrading-etcd.md
## Explore Further
# Explore Further
- [MinIO Erasure Code QuickStart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide)
- [Use `mc` with MinIO Server](https://docs.min.io/docs/minio-client-quickstart-guide)
- [Use `aws-cli` with MinIO Server](https://docs.min.io/docs/aws-cli-with-minio)
@@ -189,8 +305,8 @@ mc admin update <minio alias, e.g., myminio>
- [Use `minio-go` SDK with MinIO Server](https://docs.min.io/docs/golang-client-quickstart-guide)
- [The MinIO documentation website](https://docs.min.io)
## Contribute to MinIO Project
# Contribute to MinIO Project
Please follow MinIO [Contributor's Guide](https://github.com/minio/minio/blob/master/CONTRIBUTING.md)
## License
[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fminio%2Fminio.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fminio%2Fminio?ref=badge_large)
# License
Use of MinIO is governed by the Apache 2.0 License found at [LICENSE](https://github.com/minio/minio/blob/master/LICENSE).

View File

@@ -7,29 +7,33 @@ MinIO是一个非常轻量的服务,可以很简单的和其他应用的结合
## Docker 容器
### 稳定版
```
docker pull minio/minio
docker run -p 9000:9000 minio/minio server /data
docker run -p 9000:9000 \
-e "MINIO_ROOT_USER=AKIAIOSFODNN7EXAMPLE" \
-e "MINIO_ROOT_PASSWORD=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \
minio/minio server /data
```
### 尝鲜版
```
docker pull minio/minio:edge
docker run -p 9000:9000 minio/minio:edge server /data
docker run -p 9000:9000 \
-e "MINIO_ROOT_USER=AKIAIOSFODNN7EXAMPLE" \
-e "MINIO_ROOT_PASSWORD=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \
minio/minio:edge server /data
```
更多Docker部署信息请访问 [这里](https://docs.min.io/docs/minio-docker-quickstart-guide)
> 提示:除非你通过`-it`(TTY交互)参数启动容器否则Docker将不会显示默认的密钥。一般情况下并不推荐使用容器的默认密钥更多Docker部署信息请访问 [这里](https://docs.min.io/docs/minio-docker-quickstart-guide)
## macOS
### Homebrew
### Homebrew(推荐)
使用 [Homebrew](http://brew.sh/)安装minio
```sh
brew install minio/stable/minio
minio server /data
```
#### Note
如果你之前使用 `brew install minio`安装过minio, 可以用 `minio/stable/minio` 官方镜像进行重装. 由于golang 1.8的bug,homebrew版本不太稳定。
```
> 提示:如果你之前使用 `brew install minio`安装过minio, 可以用 `minio/stable/minio` 官方镜像进行重装. 由于golang 1.8的bug,homebrew版本不太稳定。
```sh
brew uninstall minio
brew install minio/stable/minio
```
@@ -49,6 +53,16 @@ chmod 755 minio
| ---------- | -------- | ------ |
| GNU/Linux | 64-bit Intel | https://dl.min.io/server/minio/release/linux-amd64/minio |
```sh
wget https://dl.min.io/server/minio/release/linux-amd64/minio
chmod +x minio
./minio server /data
```
| 操作系统 | CPU架构 | 地址 |
| ---------- | -------- | ------ |
| GNU/Linux | ppc64le | https://dl.min.io/server/minio/release/linux-ppc64le/minio |
```sh
wget https://dl.min.io/server/minio/release/linux-ppc64le/minio
chmod +x minio
./minio server /data
```
@@ -64,7 +78,7 @@ minio.exe server D:\Photos
## FreeBSD
### Port
使用 [pkg](https://github.com/freebsd/pkg)进行安装。
使用 [pkg](https://github.com/freebsd/pkg)进行安装MinIO官方并没有提供FreeBSD二进制文件 它由FreeBSD上游维护点击 [这里](https://www.freshports.org/www/minio)查看
```sh
pkg install minio
@@ -75,14 +89,68 @@ service minio start
## 使用源码安装
采用源码安装仅供开发人员和高级用户使用,如果你还没有Golang环境 请参考 [How to install Golang](https://golang.org/doc/install).
采用源码安装仅供开发人员和高级用户使用,如果你还没有Golang环境 请参考 [How to install Golang](https://golang.org/doc/install)。最低需要Golang版本为 [go1.15](https://golang.org/dl/#stable)
```sh
go get -u github.com/minio/minio
GO111MODULE=on go get github.com/minio/minio
```
## 为防火墙设置允许访问的端口
默认情况下MinIO 使用端口9000来侦听传入的连接。如果你的平台默认阻止了该端口则需要启用对该端口的访问。
### ufw
对于启用了ufw的主机基于Debian的发行版, 你可以通过`ufw`命令允许指定端口上的所有流量连接. 通过如下命令允许访问端口9000
```sh
ufw allow 9000
```
如下命令允许端口9000-9010上的所有传入流量。
```sh
ufw allow 9000:9010/tcp
```
### firewall-cmd
对于启用了firewall-cmd的主机CentOS, 你可以通过`firewall-cmd`命令允许指定端口上的所有流量连接。 通过如下命令允许访问端口9000
```sh
firewall-cmd --get-active-zones
```
这个命令获取当前正在使用的区域。 现在,就可以为以上返回的区域应用端口规则了。 假如返回的区域是 `public`, 使用如下命令
```sh
firewall-cmd --zone=public --add-port=9000/tcp --permanent
```
这里的`permanent`参数表示持久化存储规则,可用于防火墙启动、重启和重新加载。 最后,需要防火墙重新加载,让我们刚刚的修改生效。
```sh
firewall-cmd --reload
```
### iptables
对于启用了iptables的主机RHEL, CentOS, etc, 你可以通过`iptables`命令允许指定端口上的所有流量连接。 通过如下命令允许访问端口9000
```sh
iptables -A INPUT -p tcp --dport 9000 -j ACCEPT
service iptables restart
```
如下命令允许端口9000-9010上的所有传入流量。
```sh
iptables -A INPUT -p tcp --dport 9000:9010 -j ACCEPT
service iptables restart
```
## 使用MinIO浏览器进行验证
安装后使用浏览器访问[http://127.0.0.1:9000](http://127.0.0.1:9000)如果可以访问则表示minio已经安装成功。
MinIO Server带有一个嵌入的Web对象浏览器安装后使用浏览器访问[http://127.0.0.1:9000](http://127.0.0.1:9000)如果可以访问则表示minio已经安装成功。
![Screenshot](https://github.com/minio/minio/blob/master/docs/screenshots/minio-browser.png?raw=true)
@@ -94,6 +162,25 @@ go get -u github.com/minio/minio
上述描述对所有网关后端同样有效。
## 升级 MinIO
MinIO 服务端支持滚动升级, 也就是说你可以一次更新分布式集群中的一个MinIO实例。 这样可以在不停机的情况下进行升级。可以通过将二进制文件替换为最新版本并以滚动方式重新启动所有服务器来手动完成升级。但是, 我们建议所有用户从客户端使用 [`mc admin update`](https://docs.min.io/docs/minio-admin-complete-guide.html#update) 命令升级。 这将同时更新集群中的所有节点并重新启动它们, 如下命令所示:
```
mc admin update <minio alias, e.g., myminio>
```
> 注意: 有些发行版可能不允许滚动升级,这通常在发行说明中提到,所以建议在升级之前阅读发行说明。在这种情况下,建议使用`mc admin update`升级机制来一次升级所有服务器。
### MinIO升级时要记住的重要事项
- `mc admin update` 命令仅当运行MinIO的用户对二进制文件所在的父目录具有写权限时才工作, 比如当前二进制文件位于`/usr/local/bin/minio`, 你需要具备`/usr/local/bin`目录的写权限.
- `mc admin update` 命令同时更新并重新启动所有服务器,应用程序将在升级后重试并继续各自的操作。
- `mc admin update` 命令在 kubernetes/container 环境下是不能用的, 容器环境提供了它自己的更新机制来更新。
- 对于联盟部署模式,应分别针对每个群集运行`mc admin update`。 在成功更新所有群集之前,不要将`mc`更新为任何新版本。
- 如果将`kes`用作MinIO的KMS只需替换二进制文件并重新启动`kes`,可以在 [这里](https://github.com/minio/kes/wiki) 找到有关`kes`的更多信息。
- 如果将Vault作为MinIO的KMS请确保已遵循如下Vault升级过程的概述https://www.vaultproject.io/docs/upgrading/index.html
- 如果将MinIO与etcd配合使用, 请确保已遵循如下etcd升级过程的概述: https://github.com/etcd-io/etcd/blob/master/Documentation/upgrades/upgrading-etcd.md
## 了解更多
- [MinIO纠删码入门](https://docs.min.io/docs/minio-erasure-code-quickstart-guide)
- [`mc`快速入门](https://docs.min.io/docs/minio-client-quickstart-guide)
@@ -103,4 +190,7 @@ go get -u github.com/minio/minio
- [MinIO文档](https://docs.min.io)
## 如何参与到MinIO项目
请参考 [贡献者指南](https://github.com/minio/minio/blob/master/CONTRIBUTING.md)。欢迎各位中国程序员加到MinIO项目中。
请参考 [贡献者指南](https://github.com/minio/minio/blob/master/CONTRIBUTING.md)。欢迎各位中国程序员加到MinIO项目中。
## 授权许可
MinIO的使用受 Apache 2.0 License 约束,你可以在 [LICENSE](./LICENSE) 查看许可。

39
VULNERABILITY_REPORT.md Normal file
View File

@@ -0,0 +1,39 @@
## Vulnerability Management Policy
This document formally describes the process of addressing and managing a
reported vulnerability that has been found in the MinIO server code base,
any directly connected ecosystem component or a direct / indirect dependency
of the code base.
### Scope
The vulnerability management policy described in this document covers the
process of investigating, assessing and resolving a vulnerability report
opened by a MinIO employee or an external third party.
Therefore, it lists pre-conditions and actions that should be performed to
resolve and fix a reported vulnerability.
### Vulnerability Management Process
The vulnerability management process requires that the vulnerability report
contains the following information:
- The project / component that contains the reported vulnerability.
- A description of the vulnerability. In particular, the type of the
reported vulnerability and how it might be exploited. Alternatively,
a well-established vulnerability identifier, e.g. CVE number, can be
used instead.
Based on the description mentioned above, a MinIO engineer or security team
member investigates:
- Whether the reported vulnerability exists.
- The conditions that are required such that the vulnerability can be exploited.
- The steps required to fix the vulnerability.
In general, if the vulnerability exists in one of the MinIO code bases
itself - not in a code dependency - then MinIO will, if possible, fix
the vulnerability or implement reasonable countermeasures such that the
vulnerability cannot be exploited anymore.

View File

@@ -57,22 +57,6 @@ export class BrowserDropdown extends React.Component {
const { fetchServerInfo } = this.props
fetchServerInfo()
}
fullScreen(e) {
e.preventDefault()
let el = document.documentElement
if (el.requestFullscreen) {
el.requestFullscreen()
}
if (el.mozRequestFullScreen) {
el.mozRequestFullScreen()
}
if (el.webkitRequestFullscreen) {
el.webkitRequestFullscreen()
}
if (el.msRequestFullscreen) {
el.msRequestFullscreen()
}
}
logout(e) {
e.preventDefault()
web.Logout()
@@ -87,24 +71,30 @@ export class BrowserDropdown extends React.Component {
<i className="fas fa-bars" />
</Dropdown.Toggle>
<Dropdown.Menu className="dropdown-menu-right">
<li>
<a href="" onClick={this.showChangePassword.bind(this)}>
Change Password <i className="fas fa-cog" />
</a>
{this.state.showChangePasswordModal && (
<ChangePasswordModal
serverInfo={serverInfo}
hideChangePassword={this.hideChangePassword.bind(this)}
/>
)}
</li>
<li>
<a target="_blank" href="https://docs.min.io/?ref=ob">
Documentation <i className="fas fa-book" />
</a>
</li>
<li>
<a target="_blank" href="https://github.com/minio/minio">
GitHub <i className="fab fa-github" />
</a>
</li>
<li>
<a href="" onClick={this.fullScreen}>
Fullscreen <i className="fas fa-expand" />
</a>
</li>
<li>
<a target="_blank" href="https://docs.min.io/">
Documentation <i className="fas fa-book" />
</a>
</li>
<li>
<a target="_blank" href="https://slack.min.io">
Ask for help <i className="fas fa-question-circle" />
<a target="_blank" href="https://min.io/pricing?ref=ob">
Get Support <i className="fas fa-question-circle" />
</a>
</li>
<li>
@@ -118,20 +108,9 @@ export class BrowserDropdown extends React.Component {
/>
)}
</li>
<li>
<a href="" onClick={this.showChangePassword.bind(this)}>
Change Password <i className="fas fa-cog" />
</a>
{this.state.showChangePasswordModal && (
<ChangePasswordModal
serverInfo={serverInfo}
hideChangePassword={this.hideChangePassword.bind(this)}
/>
)}
</li>
<li>
<a href="" id="logout" onClick={this.logout}>
Sign Out <i className="fas fa-sign-out-alt" />
Logout <i className="fas fa-sign-out-alt" />
</a>
</li>
</Dropdown.Menu>

View File

@@ -15,6 +15,7 @@
*/
import React from "react"
import ObjectsSearch from "../objects/ObjectsSearch"
import Path from "../objects/Path"
import StorageInfo from "./StorageInfo"
import BrowserDropdown from "./BrowserDropdown"
@@ -27,6 +28,7 @@ export const Header = () => {
<header className="fe-header">
<Path />
{loggedIn && <StorageInfo />}
{loggedIn && <ObjectsSearch />}
<ul className="feh-actions">
{loggedIn ? (
<BrowserDropdown />

View File

@@ -22,7 +22,8 @@ const bucketsFilterSelector = state => state.buckets.filter
export const getFilteredBuckets = createSelector(
bucketsSelector,
bucketsFilterSelector,
(buckets, filter) => buckets.filter(bucket => bucket.indexOf(filter) > -1)
(buckets, filter) => buckets.filter(
bucket => bucket.toLowerCase().indexOf(filter.toLowerCase()) > -1)
)
export const getCurrentBucket = state => state.buckets.currentBucket

View File

@@ -18,6 +18,7 @@ import React from "react"
import { connect } from "react-redux"
import InfiniteScroll from "react-infinite-scroller"
import ObjectsList from "./ObjectsList"
import { getFilteredObjects } from "./selectors"
export class ObjectsListContainer extends React.Component {
constructor(props) {
@@ -39,22 +40,29 @@ export class ObjectsListContainer extends React.Component {
})
}
}
componentDidUpdate(prevProps) {
if (this.props.filter !== prevProps.filter) {
this.setState({
page: 1
})
}
}
loadNextPage() {
this.setState(state => {
return { page: state.page + 1 }
})
}
render() {
const { objects, listLoading } = this.props
const { filteredObjects, listLoading } = this.props
const visibleObjects = objects.slice(0, this.state.page * 100)
const visibleObjects = filteredObjects.slice(0, this.state.page * 100)
return (
<div style={{ position: "relative" }}>
<InfiniteScroll
pageStart={0}
loadMore={this.loadNextPage}
hasMore={objects.length > visibleObjects.length}
hasMore={filteredObjects.length > visibleObjects.length}
useWindow={true}
initialLoad={false}
>
@@ -70,7 +78,8 @@ const mapStateToProps = state => {
return {
currentBucket: state.buckets.currentBucket,
currentPrefix: state.objects.currentPrefix,
objects: state.objects.list,
filteredObjects: getFilteredObjects(state),
filter: state.objects.filter,
sortBy: state.objects.sortBy,
sortOrder: state.objects.sortOrder,
listLoading: state.objects.listLoading

View File

@@ -0,0 +1,43 @@
/*
* MinIO Cloud Storage (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import React from "react"
import { connect } from "react-redux"
import * as actionsObjects from "./actions"
export const ObjectsSearch = ({ onChange }) => (
<div
className="input-group ig-left ig-search-dark"
style={{ display: "block" }}
>
<input
className="ig-text"
type="input"
placeholder="Search Objects..."
onChange={e => onChange(e.target.value)}
/>
<i className="ig-helpers" />
</div>
)
const mapDispatchToProps = dispatch => {
return {
onChange: filter =>
dispatch(actionsObjects.setFilter(filter))
}
}
export default connect(undefined, mapDispatchToProps)(ObjectsSearch)

View File

@@ -46,11 +46,11 @@ class PreviewObjectModal extends React.Component {
<ModalBody>
<div className="input-group">
{this.state.url && (
<img
alt="Image broken"
src={this.state.url}
style={{ display: "block", width: "100%" }}
/>
<object data={this.state.url} style={{ display: "block", width: "100%" }}>
<h3 style={{ textAlign: "center", display: "block", width: "100%" }}>
Do not have read permissions to preview "{this.props.object.name}"
</h3>
</object>
)}
</div>
</ModalBody>

View File

@@ -20,13 +20,13 @@ import { ObjectsListContainer } from "../ObjectsListContainer"
describe("ObjectsList", () => {
it("should render without crashing", () => {
shallow(<ObjectsListContainer objects={[]} />)
shallow(<ObjectsListContainer filteredObjects={[]} />)
})
it("should render ObjectsList with objects", () => {
const wrapper = shallow(
<ObjectsListContainer
objects={[{ name: "test1.jpg" }, { name: "test2.jpg" }]}
filteredObjects={[{ name: "test1.jpg" }, { name: "test2.jpg" }]}
/>
)
expect(wrapper.find("ObjectsList").length).toBe(1)
@@ -40,7 +40,7 @@ describe("ObjectsList", () => {
const wrapper = shallow(
<ObjectsListContainer
currentBucket="test1"
objects={[]}
filteredObjects={[]}
listLoading={true}
/>
)

View File

@@ -0,0 +1,32 @@
/*
* MinIO Cloud Storage (C) 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import React from "react"
import { shallow } from "enzyme"
import { ObjectsSearch } from "../ObjectsSearch"
describe("ObjectsSearch", () => {
it("should render without crashing", () => {
shallow(<ObjectsSearch />)
})
it("should call onChange with search text", () => {
const onChange = jest.fn()
const wrapper = shallow(<ObjectsSearch onChange={onChange} />)
wrapper.find("input").simulate("change", { target: { value: "test" } })
expect(onChange).toHaveBeenCalledWith("test")
})
})

View File

@@ -23,6 +23,7 @@ describe("objects reducer", () => {
const initialState = reducer(undefined, {})
expect(initialState).toEqual({
list: [],
filter: "",
listLoading: false,
sortBy: "",
sortOrder: SORT_ORDER_ASC,

View File

@@ -36,6 +36,7 @@ import { getServerInfo, hasServerPublicDomain } from '../browser/selectors'
export const SET_LIST = "objects/SET_LIST"
export const RESET_LIST = "objects/RESET_LIST"
export const SET_FILTER = "objects/SET_FILTER"
export const APPEND_LIST = "objects/APPEND_LIST"
export const REMOVE = "objects/REMOVE"
export const SET_SORT_BY = "objects/SET_SORT_BY"
@@ -57,6 +58,13 @@ export const resetList = () => ({
type: RESET_LIST,
})
export const setFilter = filter => {
return {
type: SET_FILTER,
filter
}
}
export const setListLoading = (listLoading) => ({
type: SET_LIST_LOADING,
listLoading,

View File

@@ -28,6 +28,7 @@ const removeObject = (list, objectToRemove, lookup) => {
export default (
state = {
list: [],
filter: "",
listLoading: false,
sortBy: "",
sortOrder: SORT_ORDER_ASC,
@@ -53,6 +54,11 @@ export default (
...state,
list: []
}
case actionsObjects.SET_FILTER:
return {
...state,
filter: action.filter
}
case actionsObjects.SET_LIST_LOADING:
return {
...state,

View File

@@ -21,3 +21,13 @@ export const getCurrentPrefix = state => state.objects.currentPrefix
export const getCheckedList = state => state.objects.checkedList
export const getPrefixWritable = state => state.objects.prefixWritable
const objectsSelector = state => state.objects.list
const objectsFilterSelector = state => state.objects.filter
export const getFilteredObjects = createSelector(
objectsSelector,
objectsFilterSelector,
(objects, filter) => objects.filter(
object => object.name.toLowerCase().startsWith(filter.toLowerCase()))
)

View File

@@ -36,7 +36,7 @@ export class Dropzone extends React.Component {
// Overwrite the default styling from react-dropzone; otherwise it
// won't handle child elements correctly.
const style = {
height: "100%",
flex: "1",
borderWidth: "0",
borderStyle: "dashed",
borderColor: "#fff"

View File

@@ -138,9 +138,10 @@ describe("Uploads actions", () => {
objects: { currentPrefix: "pre1/" }
})
store.dispatch(uploadsActions.uploadFile(file))
const objectPath = encodeURIComponent("pre1/file1")
expect(open).toHaveBeenCalledWith(
"PUT",
"https://localhost:8080/upload/test1/pre1/file1",
"https://localhost:8080/upload/test1/" + objectPath,
true
)
expect(send).toHaveBeenCalledWith(file)

View File

@@ -94,7 +94,7 @@ export const uploadFile = file => {
_filePath = _filePath.substring(1)
}
const filePath = _filePath
const objectName = `${currentPrefix}${filePath}`
const objectName = encodeURIComponent(`${currentPrefix}${filePath}`)
const uploadUrl = `${
window.location.origin
}${minioBrowserPrefix}/upload/${currentBucket}/${objectName}`

View File

@@ -21,7 +21,7 @@ import storage from 'local-storage-fallback'
class Web {
constructor(endpoint) {
const namespace = 'Web'
const namespace = 'web'
this.JSONrpc = new JSONrpc({
endpoint,
namespace

View File

@@ -20,7 +20,8 @@
@media(max-width: @screen-sm-max) {
padding: 75px 0 80px;
}
display: flex;
flex-direction: column;
min-height:100vh;
overflow: auto;
}

View File

@@ -169,6 +169,24 @@ select.form-control {
}
}
.ig-search-dark {
&:before {
font-family: @font-family-icon;
font-weight: 900;
content: '\f002';
font-size: 15px;
position: absolute;
left: 2px;
top: 8px;
color: rgba(0, 0, 0, 0.5);
}
.ig-text {
padding-left: 25px;
.placeholder(rgba(0, 0, 0, 0.5))
}
}
.ig-search {
&:before {
font-family: @font-family-icon;
@@ -270,4 +288,4 @@ select.form-control {
.set-expire-decrease {
bottom: -27px;
.rotate(-180deg);
}
}

View File

@@ -349,6 +349,7 @@ div.fesl-row {
margin: 0;
height: 100%;
text-align: right;
white-space: nowrap;
}
.dropdown {
@@ -495,4 +496,4 @@ div.fesl-row {
.opacity(1);
right: 0;
}
}
}

View File

@@ -2271,7 +2271,9 @@
"asap": {
"version": "2.0.6",
"resolved": "https://registry.npmjs.org/asap/-/asap-2.0.6.tgz",
"integrity": "sha1-5QNHYR1+aQlDIIu9r+vLwvuGbUY="
"integrity": "sha1-5QNHYR1+aQlDIIu9r+vLwvuGbUY=",
"dev": true,
"optional": true
},
"asn1": {
"version": "0.2.4",
@@ -2283,19 +2285,20 @@
}
},
"asn1.js": {
"version": "4.10.1",
"resolved": "https://registry.npmjs.org/asn1.js/-/asn1.js-4.10.1.tgz",
"integrity": "sha512-p32cOF5q0Zqs9uBiONKYLm6BClCoBCM5O9JfeUSlnQLBTxYdTK+pW+nXflm8UkKd2UYlEbYz5qEi0JuZR9ckSw==",
"version": "5.4.1",
"resolved": "https://registry.npmjs.org/asn1.js/-/asn1.js-5.4.1.tgz",
"integrity": "sha512-+I//4cYPccV8LdmBLiX8CYvf9Sp3vQsrqu2QNXRcrbiWvcx/UdlFiqUJJzxRQxgsZmvhXhn4cSKeSmoFjVdupA==",
"requires": {
"bn.js": "^4.0.0",
"inherits": "^2.0.1",
"minimalistic-assert": "^1.0.0"
"minimalistic-assert": "^1.0.0",
"safer-buffer": "^2.1.0"
},
"dependencies": {
"bn.js": {
"version": "4.11.8",
"resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.8.tgz",
"integrity": "sha512-ItfYfPLkWHUjckQCk8xC+LwxgK8NYcXywGigJgSwOP8Y2iyWT4f2vsZnoOXTTbo+o5yXmIUJ4gn5538SO5S3gA=="
"version": "4.11.9",
"resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz",
"integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw=="
}
}
},
@@ -3541,9 +3544,9 @@
}
},
"base64-js": {
"version": "1.3.1",
"resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.3.1.tgz",
"integrity": "sha512-mLQ4i2QO1ytvGWFWmcngKO//JXAQueZvwEKtjgQFM4jIK0kU+ytMfplL8j+n5mspOfjHwoAg+9yhb7BwAHm36g=="
"version": "1.5.1",
"resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz",
"integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA=="
},
"batch": {
"version": "0.6.1",
@@ -3574,13 +3577,12 @@
"bluebird": {
"version": "3.7.2",
"resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz",
"integrity": "sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==",
"dev": true
"integrity": "sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg=="
},
"bn.js": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/bn.js/-/bn.js-5.1.1.tgz",
"integrity": "sha512-IUTD/REb78Z2eodka1QZyyEk66pciRcP6Sroka0aI3tG/iwIdYLrBD62RsubR7vqdt3WyX8p4jxeatzmRSphtA=="
"version": "5.1.3",
"resolved": "https://registry.npmjs.org/bn.js/-/bn.js-5.1.3.tgz",
"integrity": "sha512-GkTiFpjFtUzU9CbMeJ5iazkCzGL3jrhzerzZIuqLABjbwRaFt33I9tUdSNryIptM+RxDet6OKm2WnLXzW51KsQ=="
},
"body-parser": {
"version": "1.19.0",
@@ -3754,31 +3756,24 @@
}
},
"browserify-rsa": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/browserify-rsa/-/browserify-rsa-4.0.1.tgz",
"integrity": "sha1-IeCr+vbyApzy+vsTNWenAdQTVSQ=",
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/browserify-rsa/-/browserify-rsa-4.1.0.tgz",
"integrity": "sha512-AdEER0Hkspgno2aR97SAf6vi0y0k8NuOpGnVH3O99rcA5Q6sh8QxcngtHuJ6uXwnfAXNM4Gn1Gb7/MV1+Ymbog==",
"requires": {
"bn.js": "^4.1.0",
"bn.js": "^5.0.0",
"randombytes": "^2.0.1"
},
"dependencies": {
"bn.js": {
"version": "4.11.8",
"resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.8.tgz",
"integrity": "sha512-ItfYfPLkWHUjckQCk8xC+LwxgK8NYcXywGigJgSwOP8Y2iyWT4f2vsZnoOXTTbo+o5yXmIUJ4gn5538SO5S3gA=="
}
}
},
"browserify-sign": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/browserify-sign/-/browserify-sign-4.2.0.tgz",
"integrity": "sha512-hEZC1KEeYuoHRqhGhTy6gWrpJA3ZDjFWv0DE61643ZnOXAKJb3u7yWcrU0mMc9SwAqK1n7myPGndkp0dFG7NFA==",
"version": "4.2.1",
"resolved": "https://registry.npmjs.org/browserify-sign/-/browserify-sign-4.2.1.tgz",
"integrity": "sha512-/vrA5fguVAKKAVTNJjgSm1tRQDHUU6DbwO9IROu/0WAzC8PKhucDSh18J0RMvVeHAn5puMd+QHC2erPRNf8lmg==",
"requires": {
"bn.js": "^5.1.1",
"browserify-rsa": "^4.0.1",
"create-hash": "^1.2.0",
"create-hmac": "^1.1.7",
"elliptic": "^6.5.2",
"elliptic": "^6.5.3",
"inherits": "^2.0.4",
"parse-asn1": "^5.1.5",
"readable-stream": "^3.6.0",
@@ -4022,9 +4017,9 @@
}
},
"chokidar": {
"version": "3.4.0",
"resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.4.0.tgz",
"integrity": "sha512-aXAaho2VJtisB/1fg1+3nlLJqGOuewTzQpd/Tz0yTg2R0e4IGtshYvtjowyEumcBv2z+y4+kc75Mz7j5xJskcQ==",
"version": "3.4.3",
"resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.4.3.tgz",
"integrity": "sha512-DtM3g7juCXQxFVSNPNByEC2+NImtBuxQQvWlHunpJIS5Ocr0lG306cC7FCi7cEA0fzmybPUIl4txBIobk1gGOQ==",
"optional": true,
"requires": {
"anymatch": "~3.1.1",
@@ -4034,7 +4029,7 @@
"is-binary-path": "~2.1.0",
"is-glob": "~4.0.1",
"normalize-path": "~3.0.0",
"readdirp": "~3.4.0"
"readdirp": "~3.5.0"
},
"dependencies": {
"anymatch": {
@@ -4048,9 +4043,9 @@
}
},
"binary-extensions": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.0.0.tgz",
"integrity": "sha512-Phlt0plgpIIBOGTT/ehfFnbNlfsDEiqmzE2KRXoX1bLIlir4X/MR+zSyBEkL05ffWgnRSf/DXv+WrUAVr93/ow==",
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.1.0.tgz",
"integrity": "sha512-1Yj8h9Q+QDF5FzhMs/c9+6UntbD5MkRfRwac8DoEm9ZfUBZ7tZ55YcGVAzEe4bXsdQHEk+s9S5wsOKVdZrw0tQ==",
"optional": true
},
"braces": {
@@ -4123,9 +4118,9 @@
"optional": true
},
"readdirp": {
"version": "3.4.0",
"resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.4.0.tgz",
"integrity": "sha512-0xe001vZBnJEK+uKcj8qOhyAKPzIT+gStxWr3LCB0DwcXR5NZJ3IaC+yGnHCYzB/S7ov3m3EEbZI2zeNvX+hGQ==",
"version": "3.5.0",
"resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.5.0.tgz",
"integrity": "sha512-cMhu7c/8rdhkHXWsY+osBhfSy0JikwpHK/5+imo+LpeasTF8ouErHrlYkwT0++njiyuDvc7OFY5T3ukvZ8qmFQ==",
"optional": true,
"requires": {
"picomatch": "^2.2.1"
@@ -4143,9 +4138,9 @@
}
},
"chownr": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.1.tgz",
"integrity": "sha512-j38EvO5+LHX84jlo6h4UzmOwi0UgW61WRyPtJz4qaadK5eY3BTS5TY/S1Stc3Uk2lIM6TPevAlULiEJwie860g=="
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz",
"integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg=="
},
"chrome-trace-event": {
"version": "1.0.2",
@@ -4713,18 +4708,18 @@
"integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac="
},
"create-ecdh": {
"version": "4.0.3",
"resolved": "https://registry.npmjs.org/create-ecdh/-/create-ecdh-4.0.3.tgz",
"integrity": "sha512-GbEHQPMOswGpKXM9kCWVrremUcBmjteUaQ01T9rkKCPDXfUHX0IoP9LpHYo2NPFampa4e+/pFDc3jQdxrxQLaw==",
"version": "4.0.4",
"resolved": "https://registry.npmjs.org/create-ecdh/-/create-ecdh-4.0.4.tgz",
"integrity": "sha512-mf+TCx8wWc9VpuxfP2ht0iSISLZnt0JgWlrOKZiNqyUZWnjIaCIVNQArMHnCZKfEYRg6IM7A+NeJoN8gf/Ws0A==",
"requires": {
"bn.js": "^4.1.0",
"elliptic": "^6.0.0"
"elliptic": "^6.5.3"
},
"dependencies": {
"bn.js": {
"version": "4.11.8",
"resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.8.tgz",
"integrity": "sha512-ItfYfPLkWHUjckQCk8xC+LwxgK8NYcXywGigJgSwOP8Y2iyWT4f2vsZnoOXTTbo+o5yXmIUJ4gn5538SO5S3gA=="
"version": "4.11.9",
"resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz",
"integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw=="
}
}
},
@@ -4925,9 +4920,9 @@
}
},
"cyclist": {
"version": "0.2.2",
"resolved": "https://registry.npmjs.org/cyclist/-/cyclist-0.2.2.tgz",
"integrity": "sha1-GzN5LhHpFKL9bW7WRHRkRE5fpkA="
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/cyclist/-/cyclist-1.0.1.tgz",
"integrity": "sha1-WW6WmP0MgOEgOMK4LW6xs1tiJNk="
},
"dashdash": {
"version": "1.14.1",
@@ -5242,9 +5237,9 @@
},
"dependencies": {
"bn.js": {
"version": "4.11.8",
"resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.8.tgz",
"integrity": "sha512-ItfYfPLkWHUjckQCk8xC+LwxgK8NYcXywGigJgSwOP8Y2iyWT4f2vsZnoOXTTbo+o5yXmIUJ4gn5538SO5S3gA=="
"version": "4.11.9",
"resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz",
"integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw=="
}
}
},
@@ -5420,9 +5415,9 @@
}
},
"duplexify": {
"version": "3.6.1",
"resolved": "https://registry.npmjs.org/duplexify/-/duplexify-3.6.1.tgz",
"integrity": "sha512-vM58DwdnKmty+FSPzT14K9JXb90H+j5emaR4KYbr2KTIz00WHGbWOe5ghQTx233ZCLZtrGDALzKwcjEtSt35mA==",
"version": "3.7.1",
"resolved": "https://registry.npmjs.org/duplexify/-/duplexify-3.7.1.tgz",
"integrity": "sha512-07z8uv2wMyS51kKhD1KsdXJg5WQ6t93RneqRxUHnskXVtlYYkLqM0gqStQZ3pj073g687jPCHrqNfCzawLYh5g==",
"requires": {
"end-of-stream": "^1.0.0",
"inherits": "^2.0.1",
@@ -5471,9 +5466,9 @@
"dev": true
},
"elliptic": {
"version": "6.5.2",
"resolved": "https://registry.npmjs.org/elliptic/-/elliptic-6.5.2.tgz",
"integrity": "sha512-f4x70okzZbIQl/NSRLkI/+tteV/9WqL98zx+SQ69KbXxmVrmjwsNUPn/gYJJ0sHvEak24cZgHIPegRePAtA/xw==",
"version": "6.5.3",
"resolved": "https://registry.npmjs.org/elliptic/-/elliptic-6.5.3.tgz",
"integrity": "sha512-IMqzv5wNQf+E6aHeIqATs0tOLeOTwj1QKbRcS3jBbYkl5oLAserA8yJTT7/VyHUYG91PRmPyeQDObKLPpeS4dw==",
"requires": {
"bn.js": "^4.4.0",
"brorand": "^1.0.1",
@@ -5485,9 +5480,9 @@
},
"dependencies": {
"bn.js": {
"version": "4.11.8",
"resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.8.tgz",
"integrity": "sha512-ItfYfPLkWHUjckQCk8xC+LwxgK8NYcXywGigJgSwOP8Y2iyWT4f2vsZnoOXTTbo+o5yXmIUJ4gn5538SO5S3gA=="
"version": "4.11.9",
"resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz",
"integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw=="
}
}
},
@@ -5509,14 +5504,6 @@
"integrity": "sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k=",
"dev": true
},
"encoding": {
"version": "0.1.12",
"resolved": "https://registry.npmjs.org/encoding/-/encoding-0.1.12.tgz",
"integrity": "sha1-U4tm8+5izRq1HsMjgp0flIDHS+s=",
"requires": {
"iconv-lite": "~0.4.13"
}
},
"end-of-stream": {
"version": "1.4.1",
"resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.1.tgz",
@@ -5526,9 +5513,9 @@
}
},
"enhanced-resolve": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-4.1.1.tgz",
"integrity": "sha512-98p2zE+rL7/g/DzMHMTF4zZlCgeVdJ7yr6xzEpJRYwFYrGi9ANdn5DnJURg6RpBkyk60XYDnWIv51VfIhfNGuA==",
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-4.3.0.tgz",
"integrity": "sha512-3e87LvavsdxyoCfGusJnrZ5G8SLPOFeHSNpZI/ATL9a5leXo2k0w6MKnbqhdBad9qTobSfB20Ld7UmgoNbAZkQ==",
"requires": {
"graceful-fs": "^4.1.2",
"memory-fs": "^0.5.0",
@@ -5886,9 +5873,9 @@
},
"dependencies": {
"lodash": {
"version": "4.17.15",
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.15.tgz",
"integrity": "sha512-8xOcRHvCjnocdS5cpwXQXVzmmh5e5+saE2QGoeQmbKmRS6J3VQppPOIt0MnmE+4xlZoumy0GPG0D0MVIQbNA1A==",
"version": "4.17.20",
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.20.tgz",
"integrity": "sha512-PlhdFcillOINfeV7Ni6oF1TAEayyZBoZ8bcshTHqOYJYlrqzRK5hagpagky5o4HfCzzd1TRkXPMFq6cKk9rGmA==",
"dev": true
},
"react-is": {
@@ -6162,11 +6149,18 @@
"dev": true
},
"esrecurse": {
"version": "4.2.1",
"resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.2.1.tgz",
"integrity": "sha512-64RBB++fIOAXPw3P9cy89qfMlvZEXZkqqJkjqqXIvzP5ezRZjW+lPWjw35UX/3EhUPFYbg5ER4JYgDw4007/DQ==",
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz",
"integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==",
"requires": {
"estraverse": "^4.1.0"
"estraverse": "^5.2.0"
},
"dependencies": {
"estraverse": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.2.0.tgz",
"integrity": "sha512-BxbNGGNm0RyRYvUdHpIwv9IWzeM9XClbOxwoATuFdOE7ZE6wHL+HQ5T8hoPM+zHvmKzzsEqhgy0GrQ5X13afiQ=="
}
}
},
"estraverse": {
@@ -6193,9 +6187,9 @@
"dev": true
},
"events": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/events/-/events-3.1.0.tgz",
"integrity": "sha512-Rv+u8MLHNOdMjTAFeT3nCjHn2aGlx435FP/sDHNaRhDEMwyI/aB22Kj2qIN8R0cw3z28psEQLYwxVKLsKrMgWg=="
"version": "3.2.0",
"resolved": "https://registry.npmjs.org/events/-/events-3.2.0.tgz",
"integrity": "sha512-/46HWwbfCX2xTawVfkKLGxMifJYQBWMwY1mjywRtb4c9x8l5NP3KoJtnIOiL1hfdRkIuYhETxQlo62IF8tcnlg=="
},
"eventsource": {
"version": "1.0.7",
@@ -6722,27 +6716,6 @@
"bser": "2.1.1"
}
},
"fbjs": {
"version": "0.8.16",
"resolved": "https://registry.npmjs.org/fbjs/-/fbjs-0.8.16.tgz",
"integrity": "sha1-XmdDL1UNxBtXK/VYR7ispk5TN9s=",
"requires": {
"core-js": "^1.0.0",
"isomorphic-fetch": "^2.1.1",
"loose-envify": "^1.0.0",
"object-assign": "^4.1.0",
"promise": "^7.1.1",
"setimmediate": "^1.0.5",
"ua-parser-js": "^0.7.9"
},
"dependencies": {
"core-js": {
"version": "1.2.7",
"resolved": "https://registry.npmjs.org/core-js/-/core-js-1.2.7.tgz",
"integrity": "sha1-ZSKUwUZR2yj6k70tX/KYOk8IxjY="
}
}
},
"figgy-pudding": {
"version": "3.5.2",
"resolved": "https://registry.npmjs.org/figgy-pudding/-/figgy-pudding-3.5.2.tgz",
@@ -7013,12 +6986,12 @@
}
},
"flush-write-stream": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/flush-write-stream/-/flush-write-stream-1.0.3.tgz",
"integrity": "sha512-calZMC10u0FMUqoiunI2AiGIIUtUIvifNwkHhNupZH4cbNnW1Itkoh/Nf5HFYmDrwWPjrUxpkZT0KhuCq0jmGw==",
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/flush-write-stream/-/flush-write-stream-1.1.1.tgz",
"integrity": "sha512-3Z4XhFZ3992uIq0XOqb9AreonueSYphE6oYbpt5+3u06JWklbsPkNv3ZKkP9Bz/r+1MWCaMoSQ28P85+1Yc77w==",
"requires": {
"inherits": "^2.0.1",
"readable-stream": "^2.0.4"
"inherits": "^2.0.3",
"readable-stream": "^2.3.6"
}
},
"follow-redirects": {
@@ -8376,9 +8349,9 @@
}
},
"lodash": {
"version": "4.17.15",
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.15.tgz",
"integrity": "sha512-8xOcRHvCjnocdS5cpwXQXVzmmh5e5+saE2QGoeQmbKmRS6J3VQppPOIt0MnmE+4xlZoumy0GPG0D0MVIQbNA1A==",
"version": "4.17.20",
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.20.tgz",
"integrity": "sha512-PlhdFcillOINfeV7Ni6oF1TAEayyZBoZ8bcshTHqOYJYlrqzRK5hagpagky5o4HfCzzd1TRkXPMFq6cKk9rGmA==",
"dev": true
},
"tapable": {
@@ -8651,14 +8624,6 @@
"resolved": "https://registry.npmjs.org/humanize/-/humanize-0.0.9.tgz",
"integrity": "sha1-GZT/rs3+nEQe0r2sdFK3u0yeQaQ="
},
"iconv-lite": {
"version": "0.4.23",
"resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.23.tgz",
"integrity": "sha512-neyTUVFtahjf0mB3dZT77u+8O0QB89jFdnBkd5P1JgYPbPaia3gXXOVL2fq8VyU2gMMD7SaN7QukTB/pmXYvDA==",
"requires": {
"safer-buffer": ">= 2.1.2 < 3"
}
},
"icss-utils": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/icss-utils/-/icss-utils-4.1.1.tgz",
@@ -8677,9 +8642,9 @@
}
},
"ieee754": {
"version": "1.1.13",
"resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.1.13.tgz",
"integrity": "sha512-4vf7I2LYV/HaWerSo3XmlMkp5eZ83i+/CDluXi/IGTs/O1sejBNhTtnxzmRZfvOUqj7lZjqHkeTvpgSFDlWZTg=="
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz",
"integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA=="
},
"iferr": {
"version": "0.1.5",
@@ -8746,9 +8711,9 @@
"integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4="
},
"ini": {
"version": "1.3.5",
"resolved": "https://registry.npmjs.org/ini/-/ini-1.3.5.tgz",
"integrity": "sha512-RZY5huIKCMRWDUqZlEi72f/lmXKMvuszcMBduliQ3nnWbx9X/ZBQO7DijMEYS9EhHBb2qacRUMtC7svLwe0lcw==",
"version": "1.3.8",
"resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz",
"integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==",
"dev": true
},
"internal-ip": {
@@ -9013,7 +8978,8 @@
"is-stream": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz",
"integrity": "sha1-EtSj3U5o4Lec6428hBc66A2RykQ="
"integrity": "sha1-EtSj3U5o4Lec6428hBc66A2RykQ=",
"dev": true
},
"is-subset": {
"version": "0.1.1",
@@ -9068,15 +9034,6 @@
"resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz",
"integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8="
},
"isomorphic-fetch": {
"version": "2.2.1",
"resolved": "https://registry.npmjs.org/isomorphic-fetch/-/isomorphic-fetch-2.2.1.tgz",
"integrity": "sha1-YRrhrPFPXoH3KVB0coGf6XM1WKk=",
"requires": {
"node-fetch": "^1.0.1",
"whatwg-fetch": ">=0.10.0"
}
},
"isstream": {
"version": "0.1.2",
"resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz",
@@ -10573,9 +10530,9 @@
}
},
"lodash": {
"version": "4.17.14",
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.14.tgz",
"integrity": "sha512-mmKYbW3GLuJeX+iGP+Y7Gp1AiGHGbXHCOh/jZmrawMmsE7MS4znI3RL2FsjbqOyMayHInjOeykW7PEajUk1/xw==",
"version": "4.17.20",
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.20.tgz",
"integrity": "sha512-PlhdFcillOINfeV7Ni6oF1TAEayyZBoZ8bcshTHqOYJYlrqzRK5hagpagky5o4HfCzzd1TRkXPMFq6cKk9rGmA==",
"dev": true
},
"lodash._baseisequal": {
@@ -10682,6 +10639,14 @@
"tslib": "^1.10.0"
}
},
"lru-cache": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz",
"integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==",
"requires": {
"yallist": "^3.0.2"
}
},
"make-dir": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/make-dir/-/make-dir-1.3.0.tgz",
@@ -10868,9 +10833,9 @@
},
"dependencies": {
"bn.js": {
"version": "4.11.8",
"resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.8.tgz",
"integrity": "sha512-ItfYfPLkWHUjckQCk8xC+LwxgK8NYcXywGigJgSwOP8Y2iyWT4f2vsZnoOXTTbo+o5yXmIUJ4gn5538SO5S3gA=="
"version": "4.11.9",
"resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz",
"integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw=="
}
}
},
@@ -10993,6 +10958,23 @@
}
}
},
"mississippi": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/mississippi/-/mississippi-3.0.0.tgz",
"integrity": "sha512-x471SsVjUtBRtcvd4BzKE9kFC+/2TeWgKCgw0bZcw1b9l2X3QX5vCWgF+KaZaYm87Ss//rHnWryupDrgLvmSkA==",
"requires": {
"concat-stream": "^1.5.0",
"duplexify": "^3.4.2",
"end-of-stream": "^1.1.0",
"flush-write-stream": "^1.0.0",
"from2": "^2.1.0",
"parallel-transform": "^1.1.0",
"pump": "^3.0.0",
"pumpify": "^1.3.3",
"stream-each": "^1.1.0",
"through2": "^2.0.0"
}
},
"mixin-deep": {
"version": "1.3.2",
"resolved": "https://registry.npmjs.org/mixin-deep/-/mixin-deep-1.3.2.tgz",
@@ -11165,19 +11147,10 @@
"tslib": "^1.10.0"
}
},
"node-fetch": {
"version": "1.6.3",
"resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-1.6.3.tgz",
"integrity": "sha1-3CNO3WSJmC1Y6PDbT2lQKavNjAQ=",
"requires": {
"encoding": "^0.1.11",
"is-stream": "^1.0.1"
}
},
"node-forge": {
"version": "0.9.0",
"resolved": "https://registry.npmjs.org/node-forge/-/node-forge-0.9.0.tgz",
"integrity": "sha512-7ASaDa3pD+lJ3WvXFsxekJQelBKRpne+GOVbLbtHYdd7pFspyeuJHnWfLplGf3SwKGbfs/aYl5V/JCIaHVUKKQ==",
"version": "0.10.0",
"resolved": "https://registry.npmjs.org/node-forge/-/node-forge-0.10.0.tgz",
"integrity": "sha512-PPmu8eEeG9saEUvI97fm4OYxXVB6bFvyNTyiUOBichBpFG8A1Ljw3bY62+5oOjDEMHRnd0Y7HQ+x7uzxOzC6JA==",
"dev": true
},
"node-int64": {
@@ -11768,11 +11741,11 @@
"integrity": "sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw=="
},
"parallel-transform": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/parallel-transform/-/parallel-transform-1.1.0.tgz",
"integrity": "sha1-1BDwZbBdojCB/NEPKIVMKb2jOwY=",
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/parallel-transform/-/parallel-transform-1.2.0.tgz",
"integrity": "sha512-P2vSmIu38uIlvdcU7fDkyrxj33gTUy/ABO5ZUbGowxNCopBq/OoD42bP4UmMrJoPyk4Uqf0mu3mtWBhHCZD8yg==",
"requires": {
"cyclist": "~0.2.2",
"cyclist": "^1.0.1",
"inherits": "^2.0.3",
"readable-stream": "^2.1.5"
}
@@ -11788,13 +11761,12 @@
}
},
"parse-asn1": {
"version": "5.1.5",
"resolved": "https://registry.npmjs.org/parse-asn1/-/parse-asn1-5.1.5.tgz",
"integrity": "sha512-jkMYn1dcJqF6d5CpU689bq7w/b5ALS9ROVSpQDPrZsqqesUJii9qutvoT5ltGedNXMO2e16YUWIghG9KxaViTQ==",
"version": "5.1.6",
"resolved": "https://registry.npmjs.org/parse-asn1/-/parse-asn1-5.1.6.tgz",
"integrity": "sha512-RnZRo1EPU6JBnra2vGHj0yhp6ebyjBZpmUCLHWiFhxlzvBCCpAuZ7elsBp1PVAbQN0/04VD/19rfzlBSwLstMw==",
"requires": {
"asn1.js": "^4.0.0",
"asn1.js": "^5.2.0",
"browserify-aes": "^1.0.0",
"create-hash": "^1.1.0",
"evp_bytestokey": "^1.0.0",
"pbkdf2": "^3.0.3",
"safe-buffer": "^5.1.1"
@@ -11913,9 +11885,9 @@
}
},
"pbkdf2": {
"version": "3.0.17",
"resolved": "https://registry.npmjs.org/pbkdf2/-/pbkdf2-3.0.17.tgz",
"integrity": "sha512-U/il5MsrZp7mGg3mSQfn742na2T+1/vHDCG5/iTI3X9MKUuYUZVLQhyRsg06mCgDBTd57TxzgZt7P+fYfjRLtA==",
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/pbkdf2/-/pbkdf2-3.1.1.tgz",
"integrity": "sha512-4Ejy1OPxi9f2tt1rRV7Go7zmfDQ+ZectEQz3VGUQhgq62HtIRPDyG/JtnwIxs6x3uNMwo2V7q1fMvKjb+Tnpqg==",
"requires": {
"create-hash": "^1.1.2",
"create-hmac": "^1.1.4",
@@ -12218,6 +12190,8 @@
"version": "7.3.1",
"resolved": "https://registry.npmjs.org/promise/-/promise-7.3.1.tgz",
"integrity": "sha512-nolQXZ/4L+bP/UGlkfaIujX9BKxGwmQ9OT4mOt5yvy8iK1h3wqTEJCijzGANTCCl9nWjY41juyAn2K3Q1hLLTg==",
"dev": true,
"optional": true,
"requires": {
"asap": "~2.0.3"
}
@@ -12238,13 +12212,23 @@
}
},
"prop-types": {
"version": "15.6.0",
"resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.6.0.tgz",
"integrity": "sha1-zq8IMCL8RrSjX2nhPvda7Q1jmFY=",
"version": "15.7.2",
"resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.7.2.tgz",
"integrity": "sha512-8QQikdH7//R2vurIJSutZ1smHYTcLpRWEOlHnzcWHmBYrOGUysKwSsrC89BCiFj3CbrfJ/nXFdJepOVrY1GCHQ==",
"requires": {
"fbjs": "^0.8.16",
"loose-envify": "^1.3.1",
"object-assign": "^4.1.1"
"loose-envify": "^1.4.0",
"object-assign": "^4.1.1",
"react-is": "^16.8.1"
},
"dependencies": {
"loose-envify": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz",
"integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==",
"requires": {
"js-tokens": "^3.0.0 || ^4.0.0"
}
}
}
},
"prop-types-exact": {
@@ -12330,16 +12314,16 @@
},
"dependencies": {
"bn.js": {
"version": "4.11.8",
"resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.8.tgz",
"integrity": "sha512-ItfYfPLkWHUjckQCk8xC+LwxgK8NYcXywGigJgSwOP8Y2iyWT4f2vsZnoOXTTbo+o5yXmIUJ4gn5538SO5S3gA=="
"version": "4.11.9",
"resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz",
"integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw=="
}
}
},
"pump": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/pump/-/pump-2.0.1.tgz",
"integrity": "sha512-ruPMNRkN3MHP1cWJc9OWr+T/xDP0jhXYCLfJcBuX54hhfIBnaQmAUMfDcG4DM5UMWByBbJY69QSphm3jtDKIkA==",
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz",
"integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==",
"requires": {
"end-of-stream": "^1.1.0",
"once": "^1.3.1"
@@ -12353,6 +12337,17 @@
"duplexify": "^3.6.0",
"inherits": "^2.0.3",
"pump": "^2.0.0"
},
"dependencies": {
"pump": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/pump/-/pump-2.0.1.tgz",
"integrity": "sha512-ruPMNRkN3MHP1cWJc9OWr+T/xDP0jhXYCLfJcBuX54hhfIBnaQmAUMfDcG4DM5UMWByBbJY69QSphm3jtDKIkA==",
"requires": {
"end-of-stream": "^1.1.0",
"once": "^1.3.1"
}
}
}
},
"punycode": {
@@ -13492,9 +13487,9 @@
},
"dependencies": {
"lodash": {
"version": "4.17.15",
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.15.tgz",
"integrity": "sha512-8xOcRHvCjnocdS5cpwXQXVzmmh5e5+saE2QGoeQmbKmRS6J3VQppPOIt0MnmE+4xlZoumy0GPG0D0MVIQbNA1A==",
"version": "4.17.20",
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.20.tgz",
"integrity": "sha512-PlhdFcillOINfeV7Ni6oF1TAEayyZBoZ8bcshTHqOYJYlrqzRK5hagpagky5o4HfCzzd1TRkXPMFq6cKk9rGmA==",
"dev": true
}
}
@@ -13958,12 +13953,12 @@
"dev": true
},
"selfsigned": {
"version": "1.10.7",
"resolved": "https://registry.npmjs.org/selfsigned/-/selfsigned-1.10.7.tgz",
"integrity": "sha512-8M3wBCzeWIJnQfl43IKwOmC4H/RAp50S8DF60znzjW5GVqTcSe2vWclt7hmYVPkKPlHWOu5EaWOMZ2Y6W8ZXTA==",
"version": "1.10.8",
"resolved": "https://registry.npmjs.org/selfsigned/-/selfsigned-1.10.8.tgz",
"integrity": "sha512-2P4PtieJeEwVgTU9QEcwIRDQ/mXJLX8/+I3ur+Pg16nS8oNbrGxEso9NyYWy8NAmXiNl4dlAp5MwoNeCWzON4w==",
"dev": true,
"requires": {
"node-forge": "0.9.0"
"node-forge": "^0.10.0"
}
},
"semver": {
@@ -14001,10 +13996,13 @@
}
},
"serialize-javascript": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-3.0.0.tgz",
"integrity": "sha512-skZcHYw2vEX4bw90nAr2iTTsz6x2SrHEnfxgKYmZlvJYBEZrvbKtobJWlQ20zczKb3bsHHXXTYt48zBA7ni9cw==",
"dev": true
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-3.1.0.tgz",
"integrity": "sha512-JIJT1DGiWmIKhzRsG91aS6Ze4sFUrYbltlkg2onR5OrnNM02Kl/hnY/T4FN2omvyeBbQmMJv+K4cPOpGzOTFBg==",
"dev": true,
"requires": {
"randombytes": "^2.1.0"
}
},
"serializerr": {
"version": "1.0.3",
@@ -14644,9 +14642,9 @@
}
},
"stream-shift": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/stream-shift/-/stream-shift-1.0.0.tgz",
"integrity": "sha1-1cdSgl5TZ+eG944Y5EXqIjoVWVI="
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/stream-shift/-/stream-shift-1.0.1.tgz",
"integrity": "sha512-AiisoFqQ0vbGcZgQPY1cdP2I76glaVA/RauYR4G4thNFgkTqr90yXTo4LYX60Jl+sIlPNHHdGSwo01AvbKUSVQ=="
},
"strict-uri-encode": {
"version": "2.0.0",
@@ -15356,26 +15354,21 @@
}
},
"terser-webpack-plugin": {
"version": "1.4.3",
"resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-1.4.3.tgz",
"integrity": "sha512-QMxecFz/gHQwteWwSo5nTc6UaICqN1bMedC5sMtUc7y3Ha3Q8y6ZO0iCR8pq4RJC8Hjf0FEPEHZqcMB/+DFCrA==",
"version": "1.4.5",
"resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-1.4.5.tgz",
"integrity": "sha512-04Rfe496lN8EYruwi6oPQkG0vo8C+HT49X687FZnpPF0qMAIHONI6HEXYPKDOE8e5HjXTyKfqRd/agHtH0kOtw==",
"requires": {
"cacache": "^12.0.2",
"find-cache-dir": "^2.1.0",
"is-wsl": "^1.1.0",
"schema-utils": "^1.0.0",
"serialize-javascript": "^2.1.2",
"serialize-javascript": "^4.0.0",
"source-map": "^0.6.1",
"terser": "^4.1.2",
"webpack-sources": "^1.4.0",
"worker-farm": "^1.7.0"
},
"dependencies": {
"bluebird": {
"version": "3.7.2",
"resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz",
"integrity": "sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg=="
},
"cacache": {
"version": "12.0.4",
"resolved": "https://registry.npmjs.org/cacache/-/cacache-12.0.4.tgz",
@@ -15443,14 +15436,6 @@
"path-exists": "^3.0.0"
}
},
"lru-cache": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz",
"integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==",
"requires": {
"yallist": "^3.0.2"
}
},
"make-dir": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz",
@@ -15460,23 +15445,6 @@
"semver": "^5.6.0"
}
},
"mississippi": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/mississippi/-/mississippi-3.0.0.tgz",
"integrity": "sha512-x471SsVjUtBRtcvd4BzKE9kFC+/2TeWgKCgw0bZcw1b9l2X3QX5vCWgF+KaZaYm87Ss//rHnWryupDrgLvmSkA==",
"requires": {
"concat-stream": "^1.5.0",
"duplexify": "^3.4.2",
"end-of-stream": "^1.1.0",
"flush-write-stream": "^1.0.0",
"from2": "^2.1.0",
"parallel-transform": "^1.1.0",
"pump": "^3.0.0",
"pumpify": "^1.3.3",
"stream-each": "^1.1.0",
"through2": "^2.0.0"
}
},
"p-limit": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz",
@@ -15511,24 +15479,13 @@
"find-up": "^3.0.0"
}
},
"pump": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz",
"integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==",
"requires": {
"end-of-stream": "^1.1.0",
"once": "^1.3.1"
}
},
"serialize-javascript": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-2.1.2.tgz",
"integrity": "sha512-rs9OggEUF0V4jUSecXazOYsLfu7OGK2qIn3c7IPBiffz32XniEp/TX9Xmc9LQfK2nQ2QKHvZ2oygKUGU0lG4jQ=="
},
"source-list-map": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/source-list-map/-/source-list-map-2.0.1.tgz",
"integrity": "sha512-qnQ7gVMxGNxsiL4lEuJwe/To8UnK7fAnmbGEEH8RpLouuKbeEm0lhbQVFIrNSuB+G7tVrAlVsZgETT5nljf+Iw=="
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-4.0.0.tgz",
"integrity": "sha512-GaNA54380uFefWghODBWEGisLZFj00nS5ACs6yHa9nLqlLpVLO8ChDGeKRjZnV4Nh4n0Qi7nhYZD/9fCPzEqkw==",
"requires": {
"randombytes": "^2.1.0"
}
},
"source-map": {
"version": "0.6.1",
@@ -15542,15 +15499,6 @@
"requires": {
"figgy-pudding": "^3.5.1"
}
},
"webpack-sources": {
"version": "1.4.3",
"resolved": "https://registry.npmjs.org/webpack-sources/-/webpack-sources-1.4.3.tgz",
"integrity": "sha512-lgTS3Xhv1lCOKo7SA5TjKXMjpSM4sBjNV5+q2bqesbSPs5FjGmU6jjtBSkX9b4qW87vDIsCIlUPOEhbZrMdjeQ==",
"requires": {
"source-list-map": "^2.0.0",
"source-map": "~0.6.1"
}
}
}
},
@@ -15589,9 +15537,9 @@
"dev": true
},
"timers-browserify": {
"version": "2.0.11",
"resolved": "https://registry.npmjs.org/timers-browserify/-/timers-browserify-2.0.11.tgz",
"integrity": "sha512-60aV6sgJ5YEbzUdn9c8kYGIqOubPoUdqQCul3SBAsRCZ40s6Y5cMcrW4dt3/k/EsbLVJNl9n6Vz3fTc+k2GeKQ==",
"version": "2.0.12",
"resolved": "https://registry.npmjs.org/timers-browserify/-/timers-browserify-2.0.12.tgz",
"integrity": "sha512-9phl76Cqm6FhSX9Xe1ZUAMLtm1BLkKj2Qd5ApyWkXzsMRaA7dgr81kf4wJmQf/hAvg8EEyJxDo3du/0KlhPiKQ==",
"requires": {
"setimmediate": "^1.0.4"
}
@@ -15790,11 +15738,6 @@
"resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz",
"integrity": "sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c="
},
"ua-parser-js": {
"version": "0.7.20",
"resolved": "https://registry.npmjs.org/ua-parser-js/-/ua-parser-js-0.7.20.tgz",
"integrity": "sha512-8OaIKfzL5cpx8eCMAhhvTlft8GYF8b2eQr6JkCyVdrgjcytyOmPCXrqXFcUnhonRpLlh5yxEZVohm6mzaowUOw=="
},
"uglify-js": {
"version": "3.9.3",
"resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.9.3.tgz",
@@ -16172,20 +16115,20 @@
}
},
"watchpack": {
"version": "1.7.2",
"resolved": "https://registry.npmjs.org/watchpack/-/watchpack-1.7.2.tgz",
"integrity": "sha512-ymVbbQP40MFTp+cNMvpyBpBtygHnPzPkHqoIwRRj/0B8KhqQwV8LaKjtbaxF2lK4vl8zN9wCxS46IFCU5K4W0g==",
"version": "1.7.5",
"resolved": "https://registry.npmjs.org/watchpack/-/watchpack-1.7.5.tgz",
"integrity": "sha512-9P3MWk6SrKjHsGkLT2KHXdQ/9SNkyoJbabxnKOoJepsvJjJG8uYTR3yTPxPQvNDI3w4Nz1xnE0TLHK4RIVe/MQ==",
"requires": {
"chokidar": "^3.4.0",
"chokidar": "^3.4.1",
"graceful-fs": "^4.1.2",
"neo-async": "^2.5.0",
"watchpack-chokidar2": "^2.0.0"
"watchpack-chokidar2": "^2.0.1"
}
},
"watchpack-chokidar2": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/watchpack-chokidar2/-/watchpack-chokidar2-2.0.0.tgz",
"integrity": "sha512-9TyfOyN/zLUbA288wZ8IsMZ+6cbzvsNyEzSBp6e/zkifi6xxbl8SmQ/CxQq32k8NNqrdVEVUVSEf56L4rQ/ZxA==",
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/watchpack-chokidar2/-/watchpack-chokidar2-2.0.1.tgz",
"integrity": "sha512-nCFfBIPKr5Sh61s4LPpy1Wtfi0HE8isJ3d2Yb5/Ppw2P2B/3eVSEBjKfN0fmHJSK14+31KwMKmcrzs2GM4P0Ww==",
"optional": true,
"requires": {
"chokidar": "^2.1.8"
@@ -16447,9 +16390,9 @@
"dev": true
},
"webpack": {
"version": "4.43.0",
"resolved": "https://registry.npmjs.org/webpack/-/webpack-4.43.0.tgz",
"integrity": "sha512-GW1LjnPipFW2Y78OOab8NJlCflB7EFskMih2AHdvjbpKMeDJqEgSx24cXXXiPS65+WSwVyxtDsJH6jGX2czy+g==",
"version": "4.44.2",
"resolved": "https://registry.npmjs.org/webpack/-/webpack-4.44.2.tgz",
"integrity": "sha512-6KJVGlCxYdISyurpQ0IPTklv+DULv05rs2hseIXer6D7KrUicRDLFb4IUM1S6LUAKypPM/nSiVSuv8jHu1m3/Q==",
"requires": {
"@webassemblyjs/ast": "1.9.0",
"@webassemblyjs/helper-module-context": "1.9.0",
@@ -16459,7 +16402,7 @@
"ajv": "^6.10.2",
"ajv-keywords": "^3.4.1",
"chrome-trace-event": "^1.0.2",
"enhanced-resolve": "^4.1.0",
"enhanced-resolve": "^4.3.0",
"eslint-scope": "^4.0.3",
"json-parse-better-errors": "^1.0.2",
"loader-runner": "^2.4.0",
@@ -16472,14 +16415,14 @@
"schema-utils": "^1.0.0",
"tapable": "^1.1.3",
"terser-webpack-plugin": "^1.4.3",
"watchpack": "^1.6.1",
"watchpack": "^1.7.4",
"webpack-sources": "^1.4.1"
},
"dependencies": {
"ajv": {
"version": "6.12.2",
"resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.2.tgz",
"integrity": "sha512-k+V+hzjm5q/Mr8ef/1Y9goCmlsK4I6Sm74teeyGvFk1XrOsbsKLjEdrvny42CZ+a8sXbk8KWpY/bDwS+FLL2UQ==",
"version": "6.12.6",
"resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz",
"integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==",
"requires": {
"fast-deep-equal": "^3.1.1",
"fast-json-stable-stringify": "^2.0.0",
@@ -16578,9 +16521,9 @@
}
},
"fast-deep-equal": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.1.tgz",
"integrity": "sha512-8UEa58QDLauDNfpbrX55Q9jrGHThw2ZMdOky5Gl1CDtVeJDPVrG4Jxx1N8jw2gkWaff5UUuX1KJd+9zGe2B+ZA=="
"version": "3.1.3",
"resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz",
"integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q=="
},
"is-accessor-descriptor": {
"version": "0.1.6",
@@ -16661,29 +16604,10 @@
"to-regex": "^3.0.2"
}
},
"source-list-map": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/source-list-map/-/source-list-map-2.0.1.tgz",
"integrity": "sha512-qnQ7gVMxGNxsiL4lEuJwe/To8UnK7fAnmbGEEH8RpLouuKbeEm0lhbQVFIrNSuB+G7tVrAlVsZgETT5nljf+Iw=="
},
"source-map": {
"version": "0.6.1",
"resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
"integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g=="
},
"tapable": {
"version": "1.1.3",
"resolved": "https://registry.npmjs.org/tapable/-/tapable-1.1.3.tgz",
"integrity": "sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA=="
},
"webpack-sources": {
"version": "1.4.3",
"resolved": "https://registry.npmjs.org/webpack-sources/-/webpack-sources-1.4.3.tgz",
"integrity": "sha512-lgTS3Xhv1lCOKo7SA5TjKXMjpSM4sBjNV5+q2bqesbSPs5FjGmU6jjtBSkX9b4qW87vDIsCIlUPOEhbZrMdjeQ==",
"requires": {
"source-list-map": "^2.0.0",
"source-map": "~0.6.1"
}
}
}
},
@@ -17605,7 +17529,6 @@
"version": "1.4.3",
"resolved": "https://registry.npmjs.org/webpack-sources/-/webpack-sources-1.4.3.tgz",
"integrity": "sha512-lgTS3Xhv1lCOKo7SA5TjKXMjpSM4sBjNV5+q2bqesbSPs5FjGmU6jjtBSkX9b4qW87vDIsCIlUPOEhbZrMdjeQ==",
"dev": true,
"requires": {
"source-list-map": "^2.0.0",
"source-map": "~0.6.1"
@@ -17614,14 +17537,12 @@
"source-list-map": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/source-list-map/-/source-list-map-2.0.1.tgz",
"integrity": "sha512-qnQ7gVMxGNxsiL4lEuJwe/To8UnK7fAnmbGEEH8RpLouuKbeEm0lhbQVFIrNSuB+G7tVrAlVsZgETT5nljf+Iw==",
"dev": true
"integrity": "sha512-qnQ7gVMxGNxsiL4lEuJwe/To8UnK7fAnmbGEEH8RpLouuKbeEm0lhbQVFIrNSuB+G7tVrAlVsZgETT5nljf+Iw=="
},
"source-map": {
"version": "0.6.1",
"resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
"integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
"dev": true
"integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g=="
}
}
},
@@ -17660,11 +17581,6 @@
}
}
},
"whatwg-fetch": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/whatwg-fetch/-/whatwg-fetch-3.0.0.tgz",
"integrity": "sha512-9GSJUgz1D4MfyKU7KRqwOjXCXTqWdFNvEr7eUBYchQiVc744mqK/MzXPNR2WsPkmkOa4ywfg8C2n8h+13Bey1Q=="
},
"whatwg-mimetype": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/whatwg-mimetype/-/whatwg-mimetype-2.3.0.tgz",
@@ -17780,9 +17696,9 @@
"dev": true
},
"xtend": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.1.tgz",
"integrity": "sha1-pcbVMr5lbiPbgg77lDofBJmNY68="
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz",
"integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ=="
},
"y18n": {
"version": "4.0.0",

File diff suppressed because one or more lines are too long

View File

@@ -9,7 +9,7 @@ function _init() {
export CGO_ENABLED=0
## List of architectures and OS to test coss compilation.
SUPPORTED_OSARCH="linux/ppc64le linux/arm64 linux/s390x darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64"
SUPPORTED_OSARCH="linux/ppc64le linux/mips64 linux/arm64 linux/s390x darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64"
}
function _build() {

View File

@@ -1,69 +0,0 @@
#!/bin/bash
#
# MinIO Cloud Storage, (C) 2019 MinIO, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -e
set -E
set -o pipefail
function start_minio_server()
{
MINIO_ACCESS_KEY=minio MINIO_SECRET_KEY=minio123 \
minio --quiet --json server /data --address 127.0.0.1:24242 > server.log 2>&1 &
server_pid=$!
sleep 10
echo "$server_pid"
}
function start_minio_gateway_s3()
{
MINIO_ACCESS_KEY=minio MINIO_SECRET_KEY=minio123 \
minio --quiet --json gateway s3 http://127.0.0.1:24242 \
--address 127.0.0.1:24240 > gateway.log 2>&1 &
gw_pid=$!
sleep 10
echo "$gw_pid"
}
function main()
{
sr_pid="$(start_minio_server)"
gw_pid="$(start_minio_gateway_s3)"
SERVER_ENDPOINT=127.0.0.1:24240 ENABLE_HTTPS=0 ACCESS_KEY=minio \
SECRET_KEY=minio123 MINT_MODE="full" /mint/entrypoint.sh \
aws-sdk-go aws-sdk-java aws-sdk-php aws-sdk-ruby awscli \
healthcheck mc minio-dotnet minio-js \
minio-py s3cmd s3select security
rv=$?
kill "$sr_pid"
kill "$gw_pid"
sleep 3
if [ "$rv" -ne 0 ]; then
echo "=========== Gateway ==========="
cat "gateway.log"
echo "=========== Server ==========="
cat "server.log"
fi
rm -f gateway.log server.log
}
main "$@"

View File

@@ -44,10 +44,21 @@ func releaseTag(version string) string {
relPrefix = prefix
}
relSuffix := ""
if hotfix := os.Getenv("MINIO_HOTFIX"); hotfix != "" {
relSuffix = hotfix
}
relTag := strings.Replace(version, " ", "-", -1)
relTag = strings.Replace(relTag, ":", "-", -1)
relTag = strings.Replace(relTag, ",", "", -1)
return relPrefix + "." + relTag
relTag = relPrefix + "." + relTag
if relSuffix != "" {
relTag += "." + relSuffix
}
return relTag
}
// commitID returns the abbreviated commit-id hash of the last commit.
@@ -68,5 +79,12 @@ func commitID() string {
}
func main() {
fmt.Println(genLDFlags(time.Now().UTC().Format(time.RFC3339)))
var version string
if len(os.Args) > 1 {
version = os.Args[1]
} else {
version = time.Now().UTC().Format(time.RFC3339)
}
fmt.Println(genLDFlags(version))
}

View File

@@ -3,5 +3,5 @@
set -e
for d in $(go list ./... | grep -v browser); do
CGO_ENABLED=1 go test -v -race --timeout 50m "$d"
CGO_ENABLED=1 go test -v -race --timeout 100m "$d"
done

View File

@@ -45,88 +45,64 @@ FUNCTIONAL_TESTS="$WORK_DIR/functional-tests.sh"
function start_minio_fs()
{
"${MINIO[@]}" server "${WORK_DIR}/fs-disk" >"$WORK_DIR/fs-minio.log" 2>&1 &
minio_pid=$!
sleep 10
echo "$minio_pid"
}
function start_minio_erasure()
{
"${MINIO[@]}" server "${WORK_DIR}/erasure-disk1" "${WORK_DIR}/erasure-disk2" "${WORK_DIR}/erasure-disk3" "${WORK_DIR}/erasure-disk4" >"$WORK_DIR/erasure-minio.log" 2>&1 &
minio_pid=$!
sleep 15
echo "$minio_pid"
}
function start_minio_erasure_sets()
{
"${MINIO[@]}" server "${WORK_DIR}/erasure-disk-sets{1...32}" >"$WORK_DIR/erasure-minio-sets.log" 2>&1 &
minio_pid=$!
export MINIO_ENDPOINTS="${WORK_DIR}/erasure-disk-sets{1...32}"
"${MINIO[@]}" server > "$WORK_DIR/erasure-minio-sets.log" 2>&1 &
sleep 15
echo "$minio_pid"
}
function start_minio_zone_erasure_sets()
function start_minio_pool_erasure_sets()
{
declare -a minio_pids
export MINIO_ACCESS_KEY=$ACCESS_KEY
export MINIO_SECRET_KEY=$SECRET_KEY
"${MINIO[@]}" server --address=:9000 "http://127.0.0.1:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://127.0.0.1:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9000.log" 2>&1 &
minio_pids[0]=$!
"${MINIO[@]}" server --address=:9001 "http://127.0.0.1:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://127.0.0.1:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9001.log" 2>&1 &
minio_pids[1]=$!
export MINIO_ROOT_USER=$ACCESS_KEY
export MINIO_ROOT_PASSWORD=$SECRET_KEY
export MINIO_ENDPOINTS="http://127.0.0.1:9000${WORK_DIR}/pool-disk-sets{1...4} http://127.0.0.1:9001${WORK_DIR}/pool-disk-sets{5...8}"
"${MINIO[@]}" server --address ":9000" > "$WORK_DIR/pool-minio-9000.log" 2>&1 &
"${MINIO[@]}" server --address ":9001" > "$WORK_DIR/pool-minio-9001.log" 2>&1 &
sleep 40
echo "${minio_pids[@]}"
}
function start_minio_zone_erasure_sets_ipv6()
function start_minio_pool_erasure_sets_ipv6()
{
declare -a minio_pids
export MINIO_ACCESS_KEY=$ACCESS_KEY
export MINIO_SECRET_KEY=$SECRET_KEY
"${MINIO[@]}" server --address="[::1]:9000" "http://[::1]:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://[::1]:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9000.log" 2>&1 &
minio_pids[0]=$!
"${MINIO[@]}" server --address="[::1]:9001" "http://[::1]:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://[::1]:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9001.log" 2>&1 &
minio_pids[1]=$!
export MINIO_ROOT_USER=$ACCESS_KEY
export MINIO_ROOT_PASSWORD=$SECRET_KEY
export MINIO_ENDPOINTS="http://[::1]:9000${WORK_DIR}/pool-disk-sets{1...4} http://[::1]:9001${WORK_DIR}/pool-disk-sets{5...8}"
"${MINIO[@]}" server --address="[::1]:9000" > "$WORK_DIR/pool-minio-ipv6-9000.log" 2>&1 &
"${MINIO[@]}" server --address="[::1]:9001" > "$WORK_DIR/pool-minio-ipv6-9001.log" 2>&1 &
sleep 40
echo "${minio_pids[@]}"
}
function start_minio_dist_erasure()
{
declare -a minio_pids
export MINIO_ACCESS_KEY=$ACCESS_KEY
export MINIO_SECRET_KEY=$SECRET_KEY
"${MINIO[@]}" server --address=:9000 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9000.log" 2>&1 &
minio_pids[0]=$!
"${MINIO[@]}" server --address=:9001 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9001.log" 2>&1 &
minio_pids[1]=$!
"${MINIO[@]}" server --address=:9002 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9002.log" 2>&1 &
minio_pids[2]=$!
"${MINIO[@]}" server --address=:9003 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9003.log" 2>&1 &
minio_pids[3]=$!
export MINIO_ROOT_USER=$ACCESS_KEY
export MINIO_ROOT_PASSWORD=$SECRET_KEY
export MINIO_ENDPOINTS="http://127.0.0.1:9000${WORK_DIR}/dist-disk1 http://127.0.0.1:9001${WORK_DIR}/dist-disk2 http://127.0.0.1:9002${WORK_DIR}/dist-disk3 http://127.0.0.1:9003${WORK_DIR}/dist-disk4"
for i in $(seq 0 3); do
"${MINIO[@]}" server --address ":900${i}" > "$WORK_DIR/dist-minio-900${i}.log" 2>&1 &
done
sleep 40
echo "${minio_pids[@]}"
}
function run_test_fs()
{
minio_pid="$(start_minio_fs)"
start_minio_fs
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
kill "$minio_pid"
pkill minio
sleep 3
if [ "$rv" -ne 0 ]; then
@@ -137,13 +113,14 @@ function run_test_fs()
return "$rv"
}
function run_test_erasure_sets() {
minio_pid="$(start_minio_erasure_sets)"
function run_test_erasure_sets()
{
start_minio_erasure_sets
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
kill "$minio_pid"
pkill minio
sleep 3
if [ "$rv" -ne 0 ]; then
@@ -154,55 +131,51 @@ function run_test_erasure_sets() {
return "$rv"
}
function run_test_zone_erasure_sets()
function run_test_pool_erasure_sets()
{
minio_pids=( $(start_minio_zone_erasure_sets) )
start_minio_pool_erasure_sets
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
for pid in "${minio_pids[@]}"; do
kill "$pid"
done
pkill minio
sleep 3
if [ "$rv" -ne 0 ]; then
for i in $(seq 0 1); do
echo "server$i log:"
cat "$WORK_DIR/zone-minio-900$i.log"
cat "$WORK_DIR/pool-minio-900$i.log"
done
fi
for i in $(seq 0 1); do
rm -f "$WORK_DIR/zone-minio-900$i.log"
rm -f "$WORK_DIR/pool-minio-900$i.log"
done
return "$rv"
}
function run_test_zone_erasure_sets_ipv6()
function run_test_pool_erasure_sets_ipv6()
{
minio_pids=( $(start_minio_zone_erasure_sets_ipv6) )
start_minio_pool_erasure_sets_ipv6
export SERVER_ENDPOINT="[::1]:9000"
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
for pid in "${minio_pids[@]}"; do
kill "$pid"
done
pkill minio
sleep 3
if [ "$rv" -ne 0 ]; then
for i in $(seq 0 1); do
echo "server$i log:"
cat "$WORK_DIR/zone-minio-ipv6-900$i.log"
cat "$WORK_DIR/pool-minio-ipv6-900$i.log"
done
fi
for i in $(seq 0 1); do
rm -f "$WORK_DIR/zone-minio-ipv6-900$i.log"
rm -f "$WORK_DIR/pool-minio-ipv6-900$i.log"
done
return "$rv"
@@ -210,12 +183,12 @@ function run_test_zone_erasure_sets_ipv6()
function run_test_erasure()
{
minio_pid="$(start_minio_erasure)"
start_minio_erasure
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
kill "$minio_pid"
pkill minio
sleep 3
if [ "$rv" -ne 0 ]; then
@@ -228,14 +201,12 @@ function run_test_erasure()
function run_test_dist_erasure()
{
minio_pids=( $(start_minio_dist_erasure) )
start_minio_dist_erasure
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
for pid in "${minio_pids[@]}"; do
kill "$pid"
done
pkill minio
sleep 3
if [ "$rv" -ne 0 ]; then
@@ -251,7 +222,7 @@ function run_test_dist_erasure()
rm -f "$WORK_DIR/dist-minio-9000.log" "$WORK_DIR/dist-minio-9001.log" "$WORK_DIR/dist-minio-9002.log" "$WORK_DIR/dist-minio-9003.log"
return "$rv"
return "$rv"
}
function purge()
@@ -324,14 +295,14 @@ function main()
fi
echo "Testing in Distributed Eraure expanded setup"
if ! run_test_zone_erasure_sets; then
if ! run_test_pool_erasure_sets; then
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
echo "Testing in Distributed Erasure expanded setup with ipv6"
if ! run_test_zone_erasure_sets_ipv6; then
if ! run_test_pool_erasure_sets_ipv6; then
echo "FAILED"
purge "$WORK_DIR"
exit 1

View File

@@ -29,44 +29,52 @@ MINIO_CONFIG_DIR="$WORK_DIR/.minio"
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server )
function start_minio_3_node() {
declare -a minio_pids
declare -a ARGS
export MINIO_ACCESS_KEY=minio
export MINIO_SECRET_KEY=minio123
export MINIO_ROOT_USER=minio
export MINIO_ROOT_PASSWORD=minio123
export MINIO_ERASURE_SET_DRIVE_COUNT=6
start_port=$(shuf -i 10000-65000 -n 1)
args=""
for i in $(seq 1 3); do
ARGS+=("http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/1/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/2/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/3/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/4/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/5/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/6/")
args="$args http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/1/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/2/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/3/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/4/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/5/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/6/"
done
"${MINIO[@]}" --address ":$[$start_port+1]" ${ARGS[@]} > "${WORK_DIR}/dist-minio-server1.log" 2>&1 &
minio_pids[0]=$!
disown "${minio_pids[0]}"
"${MINIO[@]}" --address ":$[$start_port+1]" $args > "${WORK_DIR}/dist-minio-server1.log" 2>&1 &
disown $!
"${MINIO[@]}" --address ":$[$start_port+2]" ${ARGS[@]} > "${WORK_DIR}/dist-minio-server2.log" 2>&1 &
minio_pids[1]=$!
disown "${minio_pids[1]}"
"${MINIO[@]}" --address ":$[$start_port+2]" $args > "${WORK_DIR}/dist-minio-server2.log" 2>&1 &
disown $!
"${MINIO[@]}" --address ":$[$start_port+3]" ${ARGS[@]} > "${WORK_DIR}/dist-minio-server3.log" 2>&1 &
minio_pids[2]=$!
disown "${minio_pids[2]}"
"${MINIO[@]}" --address ":$[$start_port+3]" $args > "${WORK_DIR}/dist-minio-server3.log" 2>&1 &
disown $!
sleep "$1"
for pid in "${minio_pids[@]}"; do
if ! kill "$pid"; then
for i in $(seq 1 3); do
echo "server$i log:"
cat "${WORK_DIR}/dist-minio-server$i.log"
done
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
if [ "$(pgrep -c minio)" -ne 3 ]; then
for i in $(seq 1 3); do
echo "server$i log:"
cat "${WORK_DIR}/dist-minio-server$i.log"
done
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
if ! pkill minio; then
for i in $(seq 1 3); do
echo "server$i log:"
cat "${WORK_DIR}/dist-minio-server$i.log"
done
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
sleep 1;
if pgrep minio; then
# forcibly killing, to proceed further properly.
kill -9 "$pid"
sleep 1 # wait 1sec per pid
done
if ! pkill -9 minio; then
echo "no minio process running anymore, proceed."
fi
fi
}

View File

@@ -61,7 +61,7 @@ type accessControlPolicy struct {
func (api objectAPIHandlers) PutBucketACLHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutBucketACL")
defer logger.AuditLog(w, r, "PutBucketACL", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
@@ -125,7 +125,7 @@ func (api objectAPIHandlers) PutBucketACLHandler(w http.ResponseWriter, r *http.
func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketACL")
defer logger.AuditLog(w, r, "GetBucketACL", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
@@ -176,7 +176,7 @@ func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.
func (api objectAPIHandlers) PutObjectACLHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutObjectACL")
defer logger.AuditLog(w, r, "PutObjectACL", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
@@ -240,7 +240,7 @@ func (api objectAPIHandlers) PutObjectACLHandler(w http.ResponseWriter, r *http.
func (api objectAPIHandlers) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetObjectACL")
defer logger.AuditLog(w, r, "GetObjectACL", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]

View File

@@ -0,0 +1,289 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"encoding/json"
"io"
"io/ioutil"
"net/http"
"github.com/gorilla/mux"
"github.com/minio/minio/cmd/logger"
iampolicy "github.com/minio/minio/pkg/iam/policy"
"github.com/minio/minio/pkg/madmin"
)
const (
bucketQuotaConfigFile = "quota.json"
bucketTargetsFile = "bucket-targets.json"
)
// PutBucketQuotaConfigHandler - PUT Bucket quota configuration.
// ----------
// Places a quota configuration on the specified bucket. The quota
// specified in the quota configuration will be applied by default
// to enforce total quota for the specified bucket.
func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutBucketQuotaConfig")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SetBucketQuotaAdminAction)
if objectAPI == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
data, err := ioutil.ReadAll(r.Body)
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
if _, err = parseBucketQuota(bucket, data); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if err = globalBucketMetadataSys.Update(bucket, bucketQuotaConfigFile, data); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Write success response.
writeSuccessResponseHeadersOnly(w)
}
// GetBucketQuotaConfigHandler - gets bucket quota configuration
func (a adminAPIHandlers) GetBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketQuotaConfig")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetBucketQuotaAdminAction)
if objectAPI == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
config, err := globalBucketMetadataSys.GetQuotaConfig(bucket)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
configData, err := json.Marshal(config)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Write success response.
writeSuccessResponseJSON(w, configData)
}
// SetRemoteTargetHandler - sets a remote target for bucket
func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetBucketTarget")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
update := r.URL.Query().Get("update") == "true"
if !globalIsErasure {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
// Get current object layer instance.
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.SetBucketTargetAction)
if objectAPI == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
// Check if bucket exists.
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
cred, _, _, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
password := cred.SecretKey
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
return
}
var target madmin.BucketTarget
if err = json.Unmarshal(reqBytes, &target); err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
return
}
sameTarget, _ := isLocalHost(target.URL().Hostname(), target.URL().Port(), globalMinioPort)
if sameTarget && bucket == target.TargetBucket {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrBucketRemoteIdenticalToSource), r.URL)
return
}
target.SourceBucket = bucket
if !update {
target.Arn = globalBucketTargetSys.getRemoteARN(bucket, &target)
}
if target.Arn == "" {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
return
}
if err = globalBucketTargetSys.SetTarget(ctx, bucket, &target, update); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
targets, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
tgtBytes, err := json.Marshal(&targets)
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
return
}
if err = globalBucketMetadataSys.Update(bucket, bucketTargetsFile, tgtBytes); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
data, err := json.Marshal(target.Arn)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Write success response.
writeSuccessResponseJSON(w, data)
}
// ListRemoteTargetsHandler - lists remote target(s) for a bucket or gets a target
// for a particular ARN type
func (a adminAPIHandlers) ListRemoteTargetsHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListBucketTargets")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
arnType := vars["type"]
if !globalIsErasure {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
// Get current object layer instance.
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetBucketTargetAction)
if objectAPI == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
if bucket != "" {
// Check if bucket exists.
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if _, err := globalBucketMetadataSys.GetBucketTargetsConfig(bucket); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
targets := globalBucketTargetSys.ListTargets(ctx, bucket, arnType)
data, err := json.Marshal(targets)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Write success response.
writeSuccessResponseJSON(w, data)
}
// RemoveRemoteTargetHandler - removes a remote target for bucket with specified ARN
func (a adminAPIHandlers) RemoveRemoteTargetHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "RemoveBucketTarget")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
arn := vars["arn"]
if !globalIsErasure {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
// Get current object layer instance.
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.SetBucketTargetAction)
if objectAPI == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
// Check if bucket exists.
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if err := globalBucketTargetSys.RemoveTarget(ctx, bucket, arn); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
targets, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
tgtBytes, err := json.Marshal(&targets)
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
return
}
if err = globalBucketMetadataSys.Update(bucket, bucketTargetsFile, tgtBytes); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Write success response.
writeSuccessNoContent(w)
}

View File

@@ -42,14 +42,14 @@ import (
func validateAdminReqConfigKV(ctx context.Context, w http.ResponseWriter, r *http.Request) (auth.Credentials, ObjectLayer) {
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
objectAPI := newObjectLayerFn()
if objectAPI == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return auth.Credentials{}, nil
}
// Validate request signature.
cred, adminAPIErr := checkAdminRequestAuthType(ctx, r, iampolicy.ConfigUpdateAdminAction, "")
cred, adminAPIErr := checkAdminRequestAuth(ctx, r, iampolicy.ConfigUpdateAdminAction, "")
if adminAPIErr != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
return cred, nil
@@ -62,7 +62,7 @@ func validateAdminReqConfigKV(ctx context.Context, w http.ResponseWriter, r *htt
func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "DeleteConfigKV")
defer logger.AuditLog(w, r, "DeleteConfigKV", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
@@ -104,7 +104,7 @@ func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Requ
func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetConfigKV")
defer logger.AuditLog(w, r, "SetConfigKV", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
@@ -131,12 +131,13 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
return
}
if _, err = cfg.ReadFrom(bytes.NewReader(kvBytes)); err != nil {
dynamic, err := cfg.ReadConfig(bytes.NewReader(kvBytes))
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err = validateConfig(cfg); err != nil {
if err = validateConfig(cfg, objectAPI.SetDriveCounts()); err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
return
}
@@ -158,6 +159,16 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
saveConfig(GlobalContext, objectAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
}
if dynamic {
// Apply dynamic values.
if err := applyDynamicConfig(GlobalContext, objectAPI, cfg); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
globalNotificationSys.SignalService(serviceReloadDynamic)
// If all values were dynamic, tell the client.
w.Header().Set(madmin.ConfigAppliedHeader, madmin.ConfigAppliedTrue)
}
writeSuccessResponseHeadersOnly(w)
}
@@ -165,7 +176,7 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetConfigKV")
defer logger.AuditLog(w, r, "GetConfigKV", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
@@ -203,7 +214,7 @@ func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Requ
func (a adminAPIHandlers) ClearConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ClearConfigHistoryKV")
defer logger.AuditLog(w, r, "ClearConfigHistoryKV", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
@@ -240,7 +251,7 @@ func (a adminAPIHandlers) ClearConfigHistoryKVHandler(w http.ResponseWriter, r *
func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "RestoreConfigHistoryKV")
defer logger.AuditLog(w, r, "RestoreConfigHistoryKV", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
@@ -266,12 +277,12 @@ func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r
return
}
if _, err = cfg.ReadFrom(bytes.NewReader(kvBytes)); err != nil {
if _, err = cfg.ReadConfig(bytes.NewReader(kvBytes)); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err = validateConfig(cfg); err != nil {
if err = validateConfig(cfg, objectAPI.SetDriveCounts()); err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
return
}
@@ -288,7 +299,7 @@ func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r
func (a adminAPIHandlers) ListConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListConfigHistoryKV")
defer logger.AuditLog(w, r, "ListConfigHistoryKV", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
@@ -328,7 +339,7 @@ func (a adminAPIHandlers) ListConfigHistoryKVHandler(w http.ResponseWriter, r *h
func (a adminAPIHandlers) HelpConfigKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "HelpConfigKV")
defer logger.AuditLog(w, r, "HelpHistoryKV", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
@@ -356,7 +367,7 @@ func (a adminAPIHandlers) HelpConfigKVHandler(w http.ResponseWriter, r *http.Req
func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetConfig")
defer logger.AuditLog(w, r, "SetConfig", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
@@ -378,12 +389,12 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
}
cfg := newServerConfig()
if _, err = cfg.ReadFrom(bytes.NewReader(kvBytes)); err != nil {
if _, err = cfg.ReadConfig(bytes.NewReader(kvBytes)); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err = validateConfig(cfg); err != nil {
if err = validateConfig(cfg, objectAPI.SetDriveCounts()); err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
return
}
@@ -413,7 +424,7 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
func (a adminAPIHandlers) GetConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetConfig")
defer logger.AuditLog(w, r, "GetConfig", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {

View File

@@ -22,8 +22,11 @@ import (
"io"
"io/ioutil"
"net/http"
"path"
"sort"
"github.com/gorilla/mux"
"github.com/minio/minio/cmd/config/dns"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
iampolicy "github.com/minio/minio/pkg/iam/policy"
@@ -35,14 +38,14 @@ func validateAdminUsersReq(ctx context.Context, w http.ResponseWriter, r *http.R
var adminAPIErr APIErrorCode
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return nil, cred
}
// Validate request signature.
cred, adminAPIErr = checkAdminRequestAuthType(ctx, r, action, "")
cred, adminAPIErr = checkAdminRequestAuth(ctx, r, action, "")
if adminAPIErr != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
return nil, cred
@@ -55,7 +58,7 @@ func validateAdminUsersReq(ctx context.Context, w http.ResponseWriter, r *http.R
func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "RemoveUser")
defer logger.AuditLog(w, r, "RemoveUser", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.DeleteUserAdminAction)
if objectAPI == nil {
@@ -65,7 +68,7 @@ func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
accessKey := vars["accessKey"]
ok, err := globalIAMSys.IsTempUser(accessKey)
ok, _, err := globalIAMSys.IsTempUser(accessKey)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
@@ -93,7 +96,7 @@ func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListUsers")
defer logger.AuditLog(w, r, "ListUsers", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, cred := validateAdminUsersReq(ctx, w, r, iampolicy.ListUsersAdminAction)
if objectAPI == nil {
@@ -127,16 +130,43 @@ func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetUserInfo")
defer logger.AuditLog(w, r, "GetUserInfo", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetUserAdminAction)
if objectAPI == nil {
return
}
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
name := vars["accessKey"]
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
accessKey := cred.AccessKey
if cred.ParentUser != "" {
accessKey = cred.ParentUser
}
implicitPerm := name == accessKey
if !implicitPerm {
if !globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: accessKey,
Action: iampolicy.GetUserAdminAction,
ConditionValues: getConditionValues(r, "", accessKey, claims),
IsOwner: owner,
Claims: claims,
}) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
}
userInfo, err := globalIAMSys.GetUserInfo(name)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
@@ -156,7 +186,7 @@ func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "UpdateGroupMembers")
defer logger.AuditLog(w, r, "UpdateGroupMembers", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.AddUserToGroupAdminAction)
if objectAPI == nil {
@@ -201,7 +231,7 @@ func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Requ
func (a adminAPIHandlers) GetGroup(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetGroup")
defer logger.AuditLog(w, r, "GetGroup", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetGroupAdminAction)
if objectAPI == nil {
@@ -230,7 +260,7 @@ func (a adminAPIHandlers) GetGroup(w http.ResponseWriter, r *http.Request) {
func (a adminAPIHandlers) ListGroups(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListGroups")
defer logger.AuditLog(w, r, "ListGroups", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListGroupsAdminAction)
if objectAPI == nil {
@@ -256,7 +286,7 @@ func (a adminAPIHandlers) ListGroups(w http.ResponseWriter, r *http.Request) {
func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetGroupStatus")
defer logger.AuditLog(w, r, "SetGroupStatus", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.EnableGroupAdminAction)
if objectAPI == nil {
@@ -293,7 +323,7 @@ func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request)
func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetUserStatus")
defer logger.AuditLog(w, r, "SetUserStatus", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.EnableUserAdminAction)
if objectAPI == nil {
@@ -304,7 +334,7 @@ func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request)
accessKey := vars["accessKey"]
status := vars["status"]
// Custom IAM policies not allowed for admin user.
// This API is not allowed to lookup accessKey user status
if accessKey == globalActiveCred.AccessKey {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
@@ -328,22 +358,55 @@ func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request)
func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "AddUser")
defer logger.AuditLog(w, r, "AddUser", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, cred := validateAdminUsersReq(ctx, w, r, iampolicy.CreateUserAdminAction)
if objectAPI == nil {
vars := mux.Vars(r)
accessKey := path.Clean(vars["accessKey"])
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
vars := mux.Vars(r)
accessKey := vars["accessKey"]
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
// Custom IAM policies not allowed for admin user.
if accessKey == globalActiveCred.AccessKey {
// Not allowed to add a user with same access key as root credential
if owner && accessKey == cred.AccessKey {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAddUserInvalidArgument), r.URL)
return
}
if (cred.IsTemp() || cred.IsServiceAccount()) && cred.ParentUser == accessKey {
// Incoming access key matches parent user then we should
// reject password change requests.
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAddUserInvalidArgument), r.URL)
return
}
implicitPerm := accessKey == cred.AccessKey
if !implicitPerm {
parentUser := cred.ParentUser
if parentUser == "" {
parentUser = cred.AccessKey
}
if !globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: parentUser,
Action: iampolicy.CreateUserAdminAction,
ConditionValues: getConditionValues(r, "", parentUser, claims),
IsOwner: owner,
Claims: claims,
}) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
}
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
// More than maxConfigSize bytes were available
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
@@ -365,7 +428,7 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
return
}
if err = globalIAMSys.SetUser(accessKey, uinfo); err != nil {
if err = globalIAMSys.CreateUser(accessKey, uinfo); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
@@ -383,11 +446,11 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "AddServiceAccount")
defer logger.AuditLog(w, r, "AddServiceAccount", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
@@ -398,6 +461,12 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
return
}
// Disallow creating service accounts by root user.
if owner {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
return
}
password := cred.SecretKey
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
@@ -411,18 +480,12 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
return
}
// Disallow creating service accounts by root user.
if owner {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
return
}
parentUser := cred.AccessKey
if cred.ParentUser != "" {
parentUser = cred.ParentUser
}
newCred, err := globalIAMSys.NewServiceAccount(ctx, parentUser, createReq.Policy)
newCred, err := globalIAMSys.NewServiceAccount(ctx, parentUser, cred.Groups, createReq.Policy)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
@@ -462,11 +525,11 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListServiceAccounts")
defer logger.AuditLog(w, r, "ListServiceAccounts", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
@@ -517,11 +580,11 @@ func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Req
func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "DeleteServiceAccount")
defer logger.AuditLog(w, r, "DeleteServiceAccount", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
@@ -572,15 +635,15 @@ func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Re
writeSuccessNoContent(w)
}
// AccountUsageInfoHandler returns usage
func (a adminAPIHandlers) AccountUsageInfoHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "AccountUsageInfo")
// AccountInfoHandler returns usage
func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "AccountInfo")
defer logger.AuditLog(w, r, "AccountUsageInfo", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
@@ -627,12 +690,6 @@ func (a adminAPIHandlers) AccountUsageInfoHandler(w http.ResponseWriter, r *http
return rd, wr
}
buckets, err := objectAPI.ListBuckets(ctx)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Load the latest calculated data usage
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
if err != nil {
@@ -640,13 +697,44 @@ func (a adminAPIHandlers) AccountUsageInfoHandler(w http.ResponseWriter, r *http
logger.LogIf(ctx, err)
}
accountName := cred.AccessKey
if cred.ParentUser != "" {
accountName = cred.ParentUser
// If etcd, dns federation configured list buckets from etcd.
var buckets []BucketInfo
if globalDNSConfig != nil && globalBucketFederation {
dnsBuckets, err := globalDNSConfig.List()
if err != nil && !IsErrIgnored(err,
dns.ErrNoEntriesFound,
dns.ErrDomainMissing) {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
for _, dnsRecords := range dnsBuckets {
buckets = append(buckets, BucketInfo{
Name: dnsRecords[0].Key,
Created: dnsRecords[0].CreationDate,
})
}
sort.Slice(buckets, func(i, j int) bool {
return buckets[i].Name < buckets[j].Name
})
} else {
buckets, err = objectAPI.ListBuckets(ctx)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
acctInfo := madmin.AccountUsageInfo{
accountName := cred.AccessKey
policies, err := globalIAMSys.PolicyDBGet(accountName, false)
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
acctInfo := madmin.AccountInfo{
AccountName: accountName,
Policy: globalIAMSys.GetCombinedPolicy(policies...),
}
for _, bucket := range buckets {
@@ -682,7 +770,7 @@ func (a adminAPIHandlers) AccountUsageInfoHandler(w http.ResponseWriter, r *http
func (a adminAPIHandlers) InfoCannedPolicyV2(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "InfoCannedPolicyV2")
defer logger.AuditLog(w, r, "InfoCannedPolicyV2", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetPolicyAdminAction)
if objectAPI == nil {
@@ -709,7 +797,7 @@ func (a adminAPIHandlers) InfoCannedPolicyV2(w http.ResponseWriter, r *http.Requ
func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "InfoCannedPolicy")
defer logger.AuditLog(w, r, "InfoCannedPolicy", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetPolicyAdminAction)
if objectAPI == nil {
@@ -722,7 +810,10 @@ func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Reques
return
}
json.NewEncoder(w).Encode(policy)
if err = json.NewEncoder(w).Encode(policy); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
w.(http.Flusher).Flush()
}
@@ -730,7 +821,7 @@ func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Reques
func (a adminAPIHandlers) ListCannedPoliciesV2(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListCannedPoliciesV2")
defer logger.AuditLog(w, r, "ListCannedPoliciesV2", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListUserPoliciesAdminAction)
if objectAPI == nil {
@@ -764,7 +855,7 @@ func (a adminAPIHandlers) ListCannedPoliciesV2(w http.ResponseWriter, r *http.Re
func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListCannedPolicies")
defer logger.AuditLog(w, r, "ListCannedPolicies", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListUserPoliciesAdminAction)
if objectAPI == nil {
@@ -798,7 +889,7 @@ func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Requ
func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "RemoveCannedPolicy")
defer logger.AuditLog(w, r, "RemoveCannedPolicy", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.DeletePolicyAdminAction)
if objectAPI == nil {
@@ -826,7 +917,7 @@ func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Requ
func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "AddCannedPolicy")
defer logger.AuditLog(w, r, "AddCannedPolicy", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.CreatePolicyAdminAction)
if objectAPI == nil {
@@ -878,7 +969,7 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetPolicyForUserOrGroup")
defer logger.AuditLog(w, r, "SetPolicyForUserOrGroup", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.AttachPolicyAdminAction)
if objectAPI == nil {
@@ -891,7 +982,7 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http
isGroup := vars["isGroup"] == "true"
if !isGroup {
ok, err := globalIAMSys.IsTempUser(entityName)
ok, _, err := globalIAMSys.IsTempUser(entityName)
if err != nil && err != errNoSuchUser {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return

File diff suppressed because it is too large Load Diff

View File

@@ -66,7 +66,7 @@ func prepareAdminErasureTestBed(ctx context.Context) (*adminErasureTestBed, erro
// Initialize boot time
globalBootTime = UTCNow()
globalEndpoints = mustGetZoneEndpoints(erasureDirs...)
globalEndpoints = mustGetPoolEndpoints(erasureDirs...)
newAllSubsystems()
@@ -97,16 +97,9 @@ func initTestErasureObjLayer(ctx context.Context) (ObjectLayer, []string, error)
if err != nil {
return nil, nil, err
}
endpoints := mustGetNewEndpoints(erasureDirs...)
storageDisks, format, err := waitForFormatErasure(true, endpoints, 1, 1, 16, "")
if err != nil {
removeRoots(erasureDirs)
return nil, nil, err
}
endpoints := mustGetPoolEndpoints(erasureDirs...)
globalPolicySys = NewPolicySys()
objLayer := &erasureZones{zones: make([]*erasureSets, 1)}
objLayer.zones[0], err = newErasureSets(ctx, endpoints, storageDisks, format)
objLayer, err := newErasureServerPools(ctx, endpoints)
if err != nil {
return nil, nil, err
}
@@ -327,13 +320,13 @@ func TestExtractHealInitParams(t *testing.T) {
mkParams := func(clientToken string, forceStart, forceStop bool) url.Values {
v := url.Values{}
if clientToken != "" {
v.Add(string(mgmtClientToken), clientToken)
v.Add(mgmtClientToken, clientToken)
}
if forceStart {
v.Add(string(mgmtForceStart), "")
v.Add(mgmtForceStart, "")
}
if forceStop {
v.Add(string(mgmtForceStop), "")
v.Add(mgmtForceStop, "")
}
return v
}
@@ -351,11 +344,11 @@ func TestExtractHealInitParams(t *testing.T) {
}
varsArr := []map[string]string{
// Invalid cases
{string(mgmtPrefix): "objprefix"},
{mgmtPrefix: "objprefix"},
// Valid cases
{},
{string(mgmtBucket): "bucket"},
{string(mgmtBucket): "bucket", string(mgmtPrefix): "objprefix"},
{mgmtBucket: "bucket"},
{mgmtBucket: "bucket", mgmtPrefix: "objprefix"},
}
// Body is always valid - we do not test JSON decoding.
@@ -364,7 +357,7 @@ func TestExtractHealInitParams(t *testing.T) {
// Test all combinations!
for pIdx, parms := range qParmsArr {
for vIdx, vars := range varsArr {
_, err := extractHealInitParams(vars, parms, bytes.NewBuffer([]byte(body)))
_, err := extractHealInitParams(vars, parms, bytes.NewReader([]byte(body)))
isErrCase := false
if pIdx < 4 || vIdx < 1 {
isErrCase = true

View File

@@ -21,6 +21,8 @@ import (
"encoding/json"
"fmt"
"net/http"
"sort"
"strings"
"sync"
"time"
@@ -63,7 +65,7 @@ var (
errHealStopSignalled = fmt.Errorf("heal stop signaled")
errFnHealFromAPIErr = func(ctx context.Context, err error) error {
apiErr := toAPIError(ctx, err)
apiErr := toAdminAPIErr(ctx, err)
return fmt.Errorf("Heal internal error: %s: %s",
apiErr.Code, apiErr.Description)
}
@@ -85,29 +87,71 @@ type healSequenceStatus struct {
// structure to hold state of all heal sequences in server memory
type allHealState struct {
sync.Mutex
sync.RWMutex
// map of heal path to heal sequence
healSeqMap map[string]*healSequence
healSeqMap map[string]*healSequence
healLocalDisks map[Endpoint]struct{}
}
// newHealState - initialize global heal state management
func newHealState() *allHealState {
healState := &allHealState{
healSeqMap: make(map[string]*healSequence),
func newHealState(cleanup bool) *allHealState {
hstate := &allHealState{
healSeqMap: make(map[string]*healSequence),
healLocalDisks: map[Endpoint]struct{}{},
}
if cleanup {
go hstate.periodicHealSeqsClean(GlobalContext)
}
return hstate
}
go healState.periodicHealSeqsClean(GlobalContext)
func (ahs *allHealState) healDriveCount() int {
ahs.RLock()
defer ahs.RUnlock()
return healState
return len(ahs.healLocalDisks)
}
func (ahs *allHealState) getHealLocalDisks() Endpoints {
ahs.RLock()
defer ahs.RUnlock()
var endpoints Endpoints
for ep := range ahs.healLocalDisks {
endpoints = append(endpoints, ep)
}
return endpoints
}
func (ahs *allHealState) popHealLocalDisks(healLocalDisks ...Endpoint) {
ahs.Lock()
defer ahs.Unlock()
for _, ep := range healLocalDisks {
delete(ahs.healLocalDisks, ep)
}
}
func (ahs *allHealState) pushHealLocalDisks(healLocalDisks ...Endpoint) {
ahs.Lock()
defer ahs.Unlock()
for _, ep := range healLocalDisks {
ahs.healLocalDisks[ep] = struct{}{}
}
}
func (ahs *allHealState) periodicHealSeqsClean(ctx context.Context) {
// Launch clean-up routine to remove this heal sequence (after
// it ends) from the global state after timeout has elapsed.
periodicTimer := time.NewTimer(time.Minute * 5)
defer periodicTimer.Stop()
for {
select {
case <-time.After(time.Minute * 5):
case <-periodicTimer.C:
periodicTimer.Reset(time.Minute * 5)
now := UTCNow()
ahs.Lock()
for path, h := range ahs.healSeqMap {
@@ -151,7 +195,7 @@ func (ahs *allHealState) stopHealSequence(path string) ([]byte, APIError) {
he, exists := ahs.getHealSequence(path)
if !exists {
hsp = madmin.HealStopSuccess{
ClientToken: "invalid",
ClientToken: "unknown",
StartTime: UTCNow(),
}
} else {
@@ -190,24 +234,17 @@ func (ahs *allHealState) stopHealSequence(path string) ([]byte, APIError) {
// `keepHealSeqStateDuration`. This function also launches a
// background routine to clean up heal results after the
// aforementioned duration.
func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
func (ahs *allHealState) LaunchNewHealSequence(h *healSequence, objAPI ObjectLayer) (
respBytes []byte, apiErr APIError, errMsg string) {
existsAndLive := false
he, exists := ahs.getHealSequence(pathJoin(h.bucket, h.object))
if exists {
existsAndLive = !he.hasEnded()
}
if existsAndLive {
// A heal sequence exists on the given path.
if h.forceStarted {
// stop the running heal sequence - wait for it to finish.
he.stop()
for !he.hasEnded() {
time.Sleep(1 * time.Second)
}
} else {
if h.forceStarted {
_, apiErr = ahs.stopHealSequence(pathJoin(h.bucket, h.object))
if apiErr.Code != "" {
return respBytes, apiErr, ""
}
} else {
oh, exists := ahs.getHealSequence(pathJoin(h.bucket, h.object))
if exists && !oh.hasEnded() {
errMsg = "Heal is already running on the given path " +
"(use force-start option to stop and start afresh). " +
fmt.Sprintf("The heal was started by IP %s at %s, token is %s",
@@ -224,7 +261,6 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
hpath := pathJoin(h.bucket, h.object)
for k, hSeq := range ahs.healSeqMap {
if !hSeq.hasEnded() && (HasPrefix(k, hpath) || HasPrefix(hpath, k)) {
errMsg = "The provided heal sequence path overlaps with an existing " +
fmt.Sprintf("heal path: %s", k)
return nil, errorCodes.ToAPIErr(ErrHealOverlappingPaths), errMsg
@@ -235,7 +271,7 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
ahs.healSeqMap[hpath] = h
// Launch top-level background heal go-routine
go h.healSequenceStart()
go h.healSequenceStart(objAPI)
clientToken := h.clientToken
if globalIsDistErasure {
@@ -249,7 +285,7 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
})
if err != nil {
logger.LogIf(h.ctx, err)
return nil, toAPIError(h.ctx, err), ""
return nil, toAdminAPIErr(h.ctx, err), ""
}
return b, noError, ""
}
@@ -264,8 +300,11 @@ func (ahs *allHealState) PopHealStatusJSON(hpath string,
// fetch heal state for given path
h, exists := ahs.getHealSequence(hpath)
if !exists {
// If there is no such heal sequence, return error.
return nil, ErrHealNoSuchProcess
// heal sequence doesn't exist, must have finished.
jbytes, err := json.Marshal(healSequenceStatus{
Summary: healFinishedStatus,
})
return jbytes, toAdminAPIErrCode(GlobalContext, err)
}
// Check if client-token is valid
@@ -490,9 +529,12 @@ func (h *healSequence) isQuitting() bool {
// check if the heal sequence has ended
func (h *healSequence) hasEnded() bool {
h.mutex.RLock()
ended := len(h.currentStatus.Items) == 0 || h.currentStatus.Summary == healStoppedStatus || h.currentStatus.Summary == healFinishedStatus
h.mutex.RUnlock()
return ended
defer h.mutex.RUnlock()
// background heal never ends
if h.clientToken == bgHealingUUID {
return false
}
return !h.endTime.IsZero()
}
// stops the heal sequence - safe to call multiple times.
@@ -573,7 +615,7 @@ func (h *healSequence) pushHealResultItem(r madmin.HealResultItem) error {
// routine for completion, and (2) listens for external stop
// signals. When either event happens, it sets the finish status for
// the heal-sequence.
func (h *healSequence) healSequenceStart() {
func (h *healSequence) healSequenceStart(objAPI ObjectLayer) {
// Set status as running
h.mutex.Lock()
h.currentStatus.Summary = healRunningStatus
@@ -581,7 +623,7 @@ func (h *healSequence) healSequenceStart() {
h.mutex.Unlock()
if h.sourceCh == nil {
go h.traverseAndHeal()
go h.traverseAndHeal(objAPI)
} else {
go h.healFromSourceCh()
}
@@ -606,8 +648,7 @@ func (h *healSequence) healSequenceStart() {
case <-h.ctx.Done():
h.mutex.Lock()
h.endTime = UTCNow()
h.currentStatus.Summary = healStoppedStatus
h.currentStatus.FailureDetail = errHealStopSignalled.Error()
h.currentStatus.Summary = healFinishedStatus
h.mutex.Unlock()
// drain traverse channel so the traversal
@@ -621,7 +662,18 @@ func (h *healSequence) healSequenceStart() {
}
}
func (h *healSequence) logHeal(healType madmin.HealItemType) {
h.mutex.Lock()
h.scannedItemsMap[healType]++
h.lastHealActivity = UTCNow()
h.mutex.Unlock()
}
func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItemType) error {
globalHealConfigMu.Lock()
opts := globalHealConfig
globalHealConfigMu.Unlock()
// Send heal request
task := healTask{
bucket: source.bucket,
@@ -633,6 +685,18 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
if source.opts != nil {
task.opts = *source.opts
}
if opts.Bitrot {
task.opts.ScanMode = madmin.HealDeepScan
}
// Wait and proceed if there are active requests
waitForLowHTTPReq(opts.IOCount, opts.Sleep)
h.mutex.Lock()
h.scannedItemsMap[healType]++
h.lastHealActivity = UTCNow()
h.mutex.Unlock()
globalBackgroundHealRoutine.queueHealTask(task)
select {
@@ -640,9 +704,11 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
if !h.reportProgress {
// Object might have been deleted, by the time heal
// was attempted, we should ignore this object and
// return success.
// return the error and not calculate this object
// as part of the metrics.
if isErrObjectNotFound(res.err) || isErrVersionNotFound(res.err) {
return nil
// Return the error so that caller can handle it.
return res.err
}
h.mutex.Lock()
@@ -691,24 +757,31 @@ func (h *healSequence) healItemsFromSourceCh() error {
if !ok {
return nil
}
var itemType madmin.HealItemType
switch {
case source.bucket == nopHeal:
switch source.bucket {
case nopHeal:
continue
case source.bucket == SlashSeparator:
case SlashSeparator:
itemType = madmin.HealItemMetadata
case source.bucket != "" && source.object == "":
itemType = madmin.HealItemBucket
default:
itemType = madmin.HealItemObject
if source.object == "" {
itemType = madmin.HealItemBucket
} else {
itemType = madmin.HealItemObject
}
}
if err := h.queueHealTask(source, itemType); err != nil {
logger.LogIf(h.ctx, err)
switch err.(type) {
case ObjectExistsAsDirectory:
case ObjectNotFound:
case VersionNotFound:
default:
logger.LogIf(h.ctx, fmt.Errorf("Heal attempt failed for %s: %w",
pathJoin(source.bucket, source.object), err))
}
}
h.scannedItemsMap[itemType]++
h.lastHealActivity = UTCNow()
case <-h.ctx.Done():
return nil
}
@@ -719,28 +792,28 @@ func (h *healSequence) healFromSourceCh() {
h.healItemsFromSourceCh()
}
func (h *healSequence) healDiskMeta() error {
// Start with format healing
if err := h.healDiskFormat(); err != nil {
return err
func (h *healSequence) healDiskMeta(objAPI ObjectLayer) error {
// Try to pro-actively heal backend-encrypted file.
if err := h.queueHealTask(healSource{
bucket: minioMetaBucket,
object: backendEncryptedFile,
}, madmin.HealItemBucketMetadata); err != nil {
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
return err
}
}
// Start healing the config prefix.
if err := h.healMinioSysMeta(minioConfigPrefix)(); err != nil {
return err
}
// Start healing the bucket config prefix.
return h.healMinioSysMeta(bucketConfigPrefix)()
return h.healMinioSysMeta(objAPI, minioConfigPrefix)()
}
func (h *healSequence) healItems(bucketsOnly bool) error {
if err := h.healDiskMeta(); err != nil {
func (h *healSequence) healItems(objAPI ObjectLayer, bucketsOnly bool) error {
if err := h.healDiskMeta(objAPI); err != nil {
return err
}
// Heal buckets and objects
return h.healBuckets(bucketsOnly)
return h.healBuckets(objAPI, bucketsOnly)
}
// traverseAndHeal - traverses on-disk data and performs healing
@@ -750,41 +823,40 @@ func (h *healSequence) healItems(bucketsOnly bool) error {
// quit signal is received, this routine cannot quit immediately and
// has to wait until a safe point is reached, such as between scanning
// two objects.
func (h *healSequence) traverseAndHeal() {
func (h *healSequence) traverseAndHeal(objAPI ObjectLayer) {
bucketsOnly := false // Heals buckets and objects also.
h.traverseAndHealDoneCh <- h.healItems(bucketsOnly)
h.traverseAndHealDoneCh <- h.healItems(objAPI, bucketsOnly)
close(h.traverseAndHealDoneCh)
}
// healMinioSysMeta - heals all files under a given meta prefix, returns a function
// which in-turn heals the respective meta directory path and any files in int.
func (h *healSequence) healMinioSysMeta(metaPrefix string) func() error {
func (h *healSequence) healMinioSysMeta(objAPI ObjectLayer, metaPrefix string) func() error {
return func() error {
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil {
return errServerNotInitialized
}
// NOTE: Healing on meta is run regardless
// of any bucket being selected, this is to ensure that
// meta are always upto date and correct.
return objectAPI.HealObjects(h.ctx, minioMetaBucket, metaPrefix, h.settings, func(bucket, object, versionID string) error {
return objAPI.HealObjects(h.ctx, minioMetaBucket, metaPrefix, h.settings, func(bucket, object, versionID string) error {
if h.isQuitting() {
return errHealStopSignalled
}
herr := h.queueHealTask(healSource{
// Skip metacache entries healing
if strings.HasPrefix(object, "buckets/.minio.sys/.metacache/") {
return nil
}
err := h.queueHealTask(healSource{
bucket: bucket,
object: object,
versionID: versionID,
}, madmin.HealItemBucketMetadata)
// Object might have been deleted, by the time heal
// was attempted we ignore this object an move on.
if isErrObjectNotFound(herr) || isErrVersionNotFound(herr) {
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
return nil
}
return herr
return err
})
}
}
@@ -796,39 +868,32 @@ func (h *healSequence) healDiskFormat() error {
return errHealStopSignalled
}
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil {
return errServerNotInitialized
}
return h.queueHealTask(healSource{bucket: SlashSeparator}, madmin.HealItemMetadata)
}
// healBuckets - check for all buckets heal or just particular bucket.
func (h *healSequence) healBuckets(bucketsOnly bool) error {
func (h *healSequence) healBuckets(objAPI ObjectLayer, bucketsOnly bool) error {
if h.isQuitting() {
return errHealStopSignalled
}
// 1. If a bucket was specified, heal only the bucket.
if h.bucket != "" {
return h.healBucket(h.bucket, bucketsOnly)
return h.healBucket(objAPI, h.bucket, bucketsOnly)
}
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil {
return errServerNotInitialized
}
buckets, err := objectAPI.ListBucketsHeal(h.ctx)
buckets, err := objAPI.ListBuckets(h.ctx)
if err != nil {
return errFnHealFromAPIErr(h.ctx, err)
}
// Heal latest buckets first.
sort.Slice(buckets, func(i, j int) bool {
return buckets[i].Created.After(buckets[j].Created)
})
for _, bucket := range buckets {
if err = h.healBucket(bucket.Name, bucketsOnly); err != nil {
if err = h.healBucket(objAPI, bucket.Name, bucketsOnly); err != nil {
return err
}
}
@@ -837,15 +902,11 @@ func (h *healSequence) healBuckets(bucketsOnly bool) error {
}
// healBucket - traverses and heals given bucket
func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil {
return errServerNotInitialized
}
func (h *healSequence) healBucket(objAPI ObjectLayer, bucket string, bucketsOnly bool) error {
if err := h.queueHealTask(healSource{bucket: bucket}, madmin.HealItemBucket); err != nil {
return err
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
return err
}
}
if bucketsOnly {
@@ -856,9 +917,12 @@ func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
if h.object != "" {
// Check if an object named as the objPrefix exists,
// and if so heal it.
_, err := objectAPI.GetObjectInfo(h.ctx, bucket, h.object, ObjectOptions{})
oi, err := objAPI.GetObjectInfo(h.ctx, bucket, h.object, ObjectOptions{})
if err == nil {
if err = h.healObject(bucket, h.object, ""); err != nil {
if err = h.healObject(bucket, h.object, oi.VersionID); err != nil {
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
return nil
}
return err
}
}
@@ -867,27 +931,26 @@ func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
return nil
}
if err := objectAPI.HealObjects(h.ctx, bucket, h.object, h.settings, h.healObject); err != nil {
return errFnHealFromAPIErr(h.ctx, err)
if err := objAPI.HealObjects(h.ctx, bucket, h.object, h.settings, h.healObject); err != nil {
// Object might have been deleted, by the time heal
// was attempted we ignore this object an move on.
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
return errFnHealFromAPIErr(h.ctx, err)
}
}
return nil
}
// healObject - heal the given object and record result
func (h *healSequence) healObject(bucket, object, versionID string) error {
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil {
return errServerNotInitialized
}
if h.isQuitting() {
return errHealStopSignalled
}
return h.queueHealTask(healSource{
err := h.queueHealTask(healSource{
bucket: bucket,
object: object,
versionID: versionID,
}, madmin.HealItemObject)
return err
}

View File

@@ -1,118 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"encoding/json"
"io/ioutil"
"net/http"
"github.com/gorilla/mux"
"github.com/minio/minio/cmd/config"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/env"
iampolicy "github.com/minio/minio/pkg/iam/policy"
)
const (
bucketQuotaConfigFile = "quota.json"
)
// PutBucketQuotaConfigHandler - PUT Bucket quota configuration.
// ----------
// Places a quota configuration on the specified bucket. The quota
// specified in the quota configuration will be applied by default
// to enforce total quota for the specified bucket.
func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutBucketQuotaConfig")
defer logger.AuditLog(w, r, "PutBucketQuotaConfig", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SetBucketQuotaAdminAction)
if objectAPI == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
// Turn off quota commands if data usage info is unavailable.
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOff {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminBucketQuotaDisabled), r.URL)
return
}
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
data, err := ioutil.ReadAll(r.Body)
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
if _, err = parseBucketQuota(bucket, data); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if err = globalBucketMetadataSys.Update(bucket, bucketQuotaConfigFile, data); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Write success response.
writeSuccessResponseHeadersOnly(w)
}
// GetBucketQuotaConfigHandler - gets bucket quota configuration
func (a adminAPIHandlers) GetBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketQuotaConfig")
defer logger.AuditLog(w, r, "GetBucketQuotaConfig", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetBucketQuotaAdminAction)
if objectAPI == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
config, err := globalBucketMetadataSys.GetQuotaConfig(bucket)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
configData, err := json.Marshal(config)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Write success response.
writeSuccessResponseJSON(w, configData)
}

View File

@@ -20,8 +20,6 @@ import (
"net/http"
"github.com/gorilla/mux"
"github.com/minio/minio/cmd/config"
"github.com/minio/minio/pkg/env"
"github.com/minio/minio/pkg/madmin"
)
@@ -112,11 +110,10 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
// -- IAM APIs --
// Add policy IAM
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.AddCannedPolicy)).Queries("name", "{name:.*}")
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-canned-policy").HandlerFunc(httpTraceAll(adminAPI.AddCannedPolicy)).Queries("name", "{name:.*}")
// Add user IAM
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/accountusageinfo").HandlerFunc(httpTraceAll(adminAPI.AccountUsageInfoHandler))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/accountinfo").HandlerFunc(httpTraceAll(adminAPI.AccountInfoHandler))
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-user").HandlerFunc(httpTraceHdrs(adminAPI.AddUser)).Queries("accessKey", "{accessKey:.*}")
@@ -171,22 +168,32 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-group-status").HandlerFunc(httpTraceHdrs(adminAPI.SetGroupStatus)).Queries("group", "{group:.*}").Queries("status", "{status:.*}")
}
// Quota operations
if globalIsDistErasure || globalIsErasure {
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOn {
// GetBucketQuotaConfig
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-bucket-quota").HandlerFunc(
httpTraceHdrs(adminAPI.GetBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
// PutBucketQuotaConfig
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-bucket-quota").HandlerFunc(
httpTraceHdrs(adminAPI.PutBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
}
// GetBucketQuotaConfig
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-bucket-quota").HandlerFunc(
httpTraceHdrs(adminAPI.GetBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
// PutBucketQuotaConfig
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-bucket-quota").HandlerFunc(
httpTraceHdrs(adminAPI.PutBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
// Bucket replication operations
// GetBucketTargetHandler
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-remote-targets").HandlerFunc(
httpTraceHdrs(adminAPI.ListRemoteTargetsHandler)).Queries("bucket", "{bucket:.*}", "type", "{type:.*}")
// SetRemoteTargetHandler
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-remote-target").HandlerFunc(
httpTraceHdrs(adminAPI.SetRemoteTargetHandler)).Queries("bucket", "{bucket:.*}")
// RemoveRemoteTargetHandler
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-remote-target").HandlerFunc(
httpTraceHdrs(adminAPI.RemoveRemoteTargetHandler)).Queries("bucket", "{bucket:.*}", "arn", "{arn:.*}")
}
// -- Top APIs --
// Top locks
if globalIsDistErasure {
// Top locks
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/top/locks").HandlerFunc(httpTraceHdrs(adminAPI.TopLocksHandler))
// Force unlocks paths
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/force-unlock").
Queries("paths", "{paths:.*}").HandlerFunc(httpTraceHdrs(adminAPI.ForceUnlockHandler))
}
// HTTP Trace
@@ -201,23 +208,18 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/kms/key/status").HandlerFunc(httpTraceAll(adminAPI.KMSKeyStatusHandler))
if !globalIsGateway {
// -- OBD API --
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/obdinfo").
HandlerFunc(httpTraceHdrs(adminAPI.OBDInfoHandler)).
Queries("perfdrive", "{perfdrive:true|false}",
"perfnet", "{perfnet:true|false}",
"minioinfo", "{minioinfo:true|false}",
"minioconfig", "{minioconfig:true|false}",
"syscpu", "{syscpu:true|false}",
"sysdiskhw", "{sysdiskhw:true|false}",
"sysosinfo", "{sysosinfo:true|false}",
"sysmem", "{sysmem:true|false}",
"sysprocess", "{sysprocess:true|false}",
)
// Keep obdinfo for backward compatibility with mc
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/obdinfo").
HandlerFunc(httpTraceHdrs(adminAPI.HealthInfoHandler))
// -- Health API --
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/healthinfo").
HandlerFunc(httpTraceHdrs(adminAPI.HealthInfoHandler))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/bandwidth").
HandlerFunc(httpTraceHdrs(adminAPI.BandwidthMonitorHandler))
}
}
// If none of the routes match add default error handler routes
adminRouter.NotFoundHandler = http.HandlerFunc(httpTraceAll(errorResponseHandler))
adminRouter.MethodNotAllowedHandler = http.HandlerFunc(httpTraceAll(errorResponseHandler))
adminRouter.NotFoundHandler = httpTraceAll(errorResponseHandler)
adminRouter.MethodNotAllowedHandler = httpTraceAll(methodNotAllowedHandler("Admin"))
}

View File

@@ -17,23 +17,24 @@
package cmd
import (
"context"
"net/http"
"os"
"time"
"github.com/minio/minio/pkg/disk"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/madmin"
)
// getLocalServerProperty - returns madmin.ServerProperties for only the
// local endpoints from given list of endpoints
func getLocalServerProperty(endpointZones EndpointZones, r *http.Request) madmin.ServerProperties {
var disks []madmin.Disk
func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Request) madmin.ServerProperties {
var localEndpoints Endpoints
addr := r.Host
if globalIsDistErasure {
addr = GetLocalPeer(endpointZones)
addr = GetLocalPeer(endpointServerPools)
}
network := make(map[string]string)
for _, ep := range endpointZones {
for _, ep := range endpointServerPools {
for _, endpoint := range ep.Endpoints {
nodeName := endpoint.Host
if nodeName == "" {
@@ -42,38 +43,27 @@ func getLocalServerProperty(endpointZones EndpointZones, r *http.Request) madmin
if endpoint.IsLocal {
// Only proceed for local endpoints
network[nodeName] = "online"
var di = madmin.Disk{
DrivePath: endpoint.Path,
}
diInfo, err := disk.GetInfo(endpoint.Path)
if err != nil {
if os.IsNotExist(err) || isSysErrPathNotFound(err) {
di.State = madmin.DriveStateMissing
} else {
di.State = madmin.DriveStateCorrupt
}
localEndpoints = append(localEndpoints, endpoint)
continue
}
_, present := network[nodeName]
if !present {
if err := isServerResolvable(endpoint, 2*time.Second); err == nil {
network[nodeName] = "online"
} else {
di.State = madmin.DriveStateOk
di.DrivePath = endpoint.Path
di.TotalSpace = diInfo.Total
di.UsedSpace = diInfo.Total - diInfo.Free
di.Utilization = float64((diInfo.Total - diInfo.Free) / diInfo.Total * 100)
}
disks = append(disks, di)
} else {
_, present := network[nodeName]
if !present {
err := IsServerResolvable(endpoint)
if err == nil {
network[nodeName] = "online"
} else {
network[nodeName] = "offline"
}
network[nodeName] = "offline"
// log once the error
logger.LogOnceIf(context.Background(), err, nodeName)
}
}
}
}
localDisks, _ := initStorageDisksWithErrors(localEndpoints)
defer closeStorageDisks(localDisks)
storageInfo, _ := getStorageInfo(localDisks, localEndpoints.GetAllStrings())
return madmin.ServerProperties{
State: "ok",
Endpoint: addr,
@@ -81,6 +71,23 @@ func getLocalServerProperty(endpointZones EndpointZones, r *http.Request) madmin
Version: Version,
CommitID: CommitID,
Network: network,
Disks: disks,
Disks: storageInfo.Disks,
}
}
func getLocalDisks(endpointServerPools EndpointServerPools) []madmin.Disk {
var localEndpoints Endpoints
for _, ep := range endpointServerPools {
for _, endpoint := range ep.Endpoints {
if !endpoint.IsLocal {
continue
}
localEndpoints = append(localEndpoints, endpoint)
}
}
localDisks, _ := initStorageDisksWithErrors(localEndpoints)
defer closeStorageDisks(localDisks)
storageInfo, _ := getStorageInfo(localDisks, localEndpoints.GetAllStrings())
return storageInfo.Disks
}

View File

@@ -18,6 +18,7 @@ package cmd
import (
"encoding/xml"
"time"
)
// DeletedObject objects deleted
@@ -26,12 +27,44 @@ type DeletedObject struct {
DeleteMarkerVersionID string `xml:"DeleteMarkerVersionId,omitempty"`
ObjectName string `xml:"Key,omitempty"`
VersionID string `xml:"VersionId,omitempty"`
// MinIO extensions to support delete marker replication
// Replication status of DeleteMarker
DeleteMarkerReplicationStatus string `xml:"DeleteMarkerReplicationStatus,omitempty"`
// MTime of DeleteMarker on source that needs to be propagated to replica
DeleteMarkerMTime DeleteMarkerMTime `xml:"DeleteMarkerMTime,omitempty"`
// Status of versioned delete (of object or DeleteMarker)
VersionPurgeStatus VersionPurgeStatusType `xml:"VersionPurgeStatus,omitempty"`
// PurgeTransitioned is nonempty if object is in transition tier
PurgeTransitioned string `xml:"PurgeTransitioned,omitempty"`
}
// DeleteMarkerMTime is an embedded type containing time.Time for XML marshal
type DeleteMarkerMTime struct {
time.Time
}
// MarshalXML encodes expiration date if it is non-zero and encodes
// empty string otherwise
func (t DeleteMarkerMTime) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
if t.Time.IsZero() {
return nil
}
return e.EncodeElement(t.Time.Format(time.RFC3339), startElement)
}
// ObjectToDelete carries key name for the object to delete.
type ObjectToDelete struct {
ObjectName string `xml:"Key"`
VersionID string `xml:"VersionId"`
// Replication status of DeleteMarker
DeleteMarkerReplicationStatus string `xml:"DeleteMarkerReplicationStatus"`
// Status of versioned delete (of object or DeleteMarker)
VersionPurgeStatus VersionPurgeStatusType `xml:"VersionPurgeStatus"`
// Version ID of delete marker
DeleteMarkerVersionID string `xml:"DeleteMarkerVersionId"`
// PurgeTransitioned is nonempty if object is in transition tier
PurgeTransitioned string `xml:"PurgeTransitioned"`
}
// createBucketConfiguration container for bucket configuration request from client.

View File

@@ -27,13 +27,15 @@ import (
"github.com/Azure/azure-storage-blob-go/azblob"
"google.golang.org/api/googleapi"
minio "github.com/minio/minio-go/v6"
"github.com/minio/minio-go/v6/pkg/tags"
"github.com/minio/minio/cmd/config/etcd/dns"
minio "github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/tags"
"github.com/minio/minio/cmd/config/dns"
"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/bucket/lifecycle"
"github.com/minio/minio/pkg/bucket/replication"
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
"github.com/minio/minio/pkg/bucket/policy"
"github.com/minio/minio/pkg/bucket/versioning"
@@ -85,6 +87,7 @@ const (
ErrInvalidMaxUploads
ErrInvalidMaxParts
ErrInvalidPartNumberMarker
ErrInvalidPartNumber
ErrInvalidRequestBody
ErrInvalidCopySource
ErrInvalidMetadataDirective
@@ -104,8 +107,24 @@ const (
ErrNoSuchCORSConfiguration
ErrNoSuchWebsiteConfiguration
ErrReplicationConfigurationNotFoundError
ErrRemoteDestinationNotFoundError
ErrReplicationDestinationMissingLock
ErrRemoteTargetNotFoundError
ErrReplicationRemoteConnectionError
ErrBucketRemoteIdenticalToSource
ErrBucketRemoteAlreadyExists
ErrBucketRemoteLabelInUse
ErrBucketRemoteArnTypeInvalid
ErrBucketRemoteArnInvalid
ErrBucketRemoteRemoveDisallowed
ErrRemoteTargetNotVersionedError
ErrReplicationSourceNotVersionedError
ErrReplicationNeedsVersioningError
ErrReplicationBucketNeedsVersioningError
ErrObjectRestoreAlreadyInProgress
ErrNoSuchKey
ErrNoSuchUpload
ErrInvalidVersionID
ErrNoSuchVersion
ErrNotImplemented
ErrPreconditionFailed
@@ -216,6 +235,7 @@ const (
ErrInvalidResourceName
ErrServerNotInitialized
ErrOperationTimedOut
ErrClientDisconnected
ErrOperationMaxedOut
ErrInvalidRequest
// MinIO storage class error codes
@@ -243,7 +263,6 @@ const (
// Bucket Quota error codes
ErrAdminBucketQuotaExceeded
ErrAdminNoSuchQuotaConfiguration
ErrAdminBucketQuotaDisabled
ErrHealNotImplemented
ErrHealNoSuchProcess
@@ -344,6 +363,7 @@ const (
ErrInvalidDecompressedSize
ErrAddUserInvalidArgument
ErrAdminAccountNotEligible
ErrAccountNotEligible
ErrServiceAccountNotFound
ErrPostPolicyConditionInvalidFormat
)
@@ -418,6 +438,11 @@ var errorCodes = errorCodeMap{
Description: "Argument partNumberMarker must be an integer.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidPartNumber: {
Code: "InvalidPartNumber",
Description: "The requested partnumber is not satisfiable",
HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable,
},
ErrInvalidPolicyDocument: {
Code: "InvalidPolicyDocument",
Description: "The content of the form does not meet the conditions specified in the policy document.",
@@ -538,11 +563,16 @@ var errorCodes = errorCodeMap{
Description: "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.",
HTTPStatusCode: http.StatusNotFound,
},
ErrNoSuchVersion: {
ErrInvalidVersionID: {
Code: "InvalidArgument",
Description: "Invalid version id specified",
HTTPStatusCode: http.StatusBadRequest,
},
ErrNoSuchVersion: {
Code: "NoSuchVersion",
Description: "The specified version does not exist.",
HTTPStatusCode: http.StatusNotFound,
},
ErrNotImplemented: {
Code: "NotImplemented",
Description: "A header you provided implies functionality that is not implemented",
@@ -648,20 +678,9 @@ var errorCodes = errorCodeMap{
Description: "X-Amz-Date must be in the ISO8601 Long Format \"yyyyMMdd'T'HHmmss'Z'\"",
HTTPStatusCode: http.StatusBadRequest,
},
// FIXME: Should contain the invalid param set as seen in https://github.com/minio/minio/issues/2385.
// right Description: "Error parsing the X-Amz-Credential parameter; incorrect date format \"%s\". This date in the credential must be in the format \"yyyyMMdd\".",
// Need changes to make sure variable messages can be constructed.
ErrMalformedCredentialDate: {
Code: "AuthorizationQueryParametersError",
Description: "Error parsing the X-Amz-Credential parameter; incorrect date format \"%s\". This date in the credential must be in the format \"yyyyMMdd\".",
HTTPStatusCode: http.StatusBadRequest,
},
// FIXME: Should contain the invalid param set as seen in https://github.com/minio/minio/issues/2385.
// right Description: "Error parsing the X-Amz-Credential parameter; the region 'us-east-' is wrong; expecting 'us-east-1'".
// Need changes to make sure variable messages can be constructed.
ErrMalformedCredentialRegion: {
Code: "AuthorizationQueryParametersError",
Description: "Error parsing the X-Amz-Credential parameter; the region is wrong;",
Description: "Error parsing the X-Amz-Credential parameter; incorrect date format. This date in the credential must be in the format \"yyyyMMdd\".",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidRegion: {
@@ -669,9 +688,6 @@ var errorCodes = errorCodeMap{
Description: "Region does not match.",
HTTPStatusCode: http.StatusBadRequest,
},
// FIXME: Should contain the invalid param set as seen in https://github.com/minio/minio/issues/2385.
// right Description: "Error parsing the X-Amz-Credential parameter; incorrect service \"s4\". This endpoint belongs to \"s3\".".
// Need changes to make sure variable messages can be constructed.
ErrInvalidServiceS3: {
Code: "AuthorizationParametersError",
Description: "Error parsing the Credential/X-Amz-Credential parameter; incorrect service. This endpoint belongs to \"s3\".",
@@ -682,9 +698,6 @@ var errorCodes = errorCodeMap{
Description: "Error parsing the Credential parameter; incorrect service. This endpoint belongs to \"sts\".",
HTTPStatusCode: http.StatusBadRequest,
},
// FIXME: Should contain the invalid param set as seen in https://github.com/minio/minio/issues/2385.
// Description: "Error parsing the X-Amz-Credential parameter; incorrect terminal "aws4_reque". This endpoint uses "aws4_request".
// Need changes to make sure variable messages can be constructed.
ErrInvalidRequestVersion: {
Code: "AuthorizationQueryParametersError",
Description: "Error parsing the X-Amz-Credential parameter; incorrect terminal. This endpoint uses \"aws4_request\".",
@@ -755,8 +768,6 @@ var errorCodes = errorCodeMap{
Description: "Your key is too long",
HTTPStatusCode: http.StatusBadRequest,
},
// FIXME: Actual XML error response also contains the header which missed in list of signed header parameters.
ErrUnsignedHeaders: {
Code: "AccessDenied",
Description: "There were headers present in the request which were not signed",
@@ -812,6 +823,76 @@ var errorCodes = errorCodeMap{
Description: "The replication configuration was not found",
HTTPStatusCode: http.StatusNotFound,
},
ErrRemoteDestinationNotFoundError: {
Code: "RemoteDestinationNotFoundError",
Description: "The remote destination bucket does not exist",
HTTPStatusCode: http.StatusNotFound,
},
ErrReplicationDestinationMissingLock: {
Code: "ReplicationDestinationMissingLockError",
Description: "The replication destination bucket does not have object locking enabled",
HTTPStatusCode: http.StatusBadRequest,
},
ErrRemoteTargetNotFoundError: {
Code: "XMinioAdminRemoteTargetNotFoundError",
Description: "The remote target does not exist",
HTTPStatusCode: http.StatusNotFound,
},
ErrReplicationRemoteConnectionError: {
Code: "XMinioAdminReplicationRemoteConnectionError",
Description: "Remote service connection error - please check remote service credentials and target bucket",
HTTPStatusCode: http.StatusNotFound,
},
ErrBucketRemoteIdenticalToSource: {
Code: "XMinioAdminRemoteIdenticalToSource",
Description: "The remote target cannot be identical to source",
HTTPStatusCode: http.StatusBadRequest,
},
ErrBucketRemoteAlreadyExists: {
Code: "XMinioAdminBucketRemoteAlreadyExists",
Description: "The remote target already exists",
HTTPStatusCode: http.StatusBadRequest,
},
ErrBucketRemoteLabelInUse: {
Code: "XMinioAdminBucketRemoteLabelInUse",
Description: "The remote target with this label already exists",
HTTPStatusCode: http.StatusBadRequest,
},
ErrBucketRemoteRemoveDisallowed: {
Code: "XMinioAdminRemoteRemoveDisallowed",
Description: "This ARN is in use by an existing configuration",
HTTPStatusCode: http.StatusBadRequest,
},
ErrBucketRemoteArnTypeInvalid: {
Code: "XMinioAdminRemoteARNTypeInvalid",
Description: "The bucket remote ARN type is not valid",
HTTPStatusCode: http.StatusBadRequest,
},
ErrBucketRemoteArnInvalid: {
Code: "XMinioAdminRemoteArnInvalid",
Description: "The bucket remote ARN does not have correct format",
HTTPStatusCode: http.StatusBadRequest,
},
ErrRemoteTargetNotVersionedError: {
Code: "RemoteTargetNotVersionedError",
Description: "The remote target does not have versioning enabled",
HTTPStatusCode: http.StatusBadRequest,
},
ErrReplicationSourceNotVersionedError: {
Code: "ReplicationSourceNotVersionedError",
Description: "The replication source does not have versioning enabled",
HTTPStatusCode: http.StatusBadRequest,
},
ErrReplicationNeedsVersioningError: {
Code: "InvalidRequest",
Description: "Versioning must be 'Enabled' on the bucket to apply a replication configuration",
HTTPStatusCode: http.StatusBadRequest,
},
ErrReplicationBucketNeedsVersioningError: {
Code: "InvalidRequest",
Description: "Versioning must be 'Enabled' on the bucket to add a replication target",
HTTPStatusCode: http.StatusBadRequest,
},
ErrNoSuchObjectLockConfiguration: {
Code: "NoSuchObjectLockConfiguration",
Description: "The specified object does not have a ObjectLock configuration",
@@ -842,6 +923,11 @@ var errorCodes = errorCodeMap{
Description: "x-amz-object-lock-retain-until-date and x-amz-object-lock-mode must both be supplied",
HTTPStatusCode: http.StatusBadRequest,
},
ErrObjectRestoreAlreadyInProgress: {
Code: "RestoreAlreadyInProgress",
Description: "Object restore is already in progress",
HTTPStatusCode: http.StatusConflict,
},
/// Bucket notification related errors.
ErrEventNotification: {
Code: "InvalidArgument",
@@ -904,7 +990,7 @@ var errorCodes = errorCodeMap{
HTTPStatusCode: http.StatusBadRequest,
},
ErrMetadataTooLarge: {
Code: "InvalidArgument",
Code: "MetadataTooLarge",
Description: "Your metadata headers exceed the maximum allowed metadata size.",
HTTPStatusCode: http.StatusBadRequest,
},
@@ -1128,11 +1214,6 @@ var errorCodes = errorCodeMap{
Description: "The quota configuration does not exist",
HTTPStatusCode: http.StatusNotFound,
},
ErrAdminBucketQuotaDisabled: {
Code: "XMinioAdminBucketQuotaDisabled",
Description: "Quota specified but disk usage crawl is disabled on MinIO server",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInsecureClientRequest: {
Code: "XMinioInsecureClientRequest",
Description: "Cannot respond to plain-text request from TLS-encrypted server",
@@ -1143,6 +1224,11 @@ var errorCodes = errorCodeMap{
Description: "A timeout occurred while trying to lock a resource, please reduce your request rate",
HTTPStatusCode: http.StatusServiceUnavailable,
},
ErrClientDisconnected: {
Code: "ClientDisconnected",
Description: "Client disconnected before response was ready",
HTTPStatusCode: 499, // No official code, use nginx value.
},
ErrOperationMaxedOut: {
Code: "SlowDown",
Description: "A timeout exceeded while waiting to proceed with the request, please reduce your request rate",
@@ -1641,12 +1727,17 @@ var errorCodes = errorCodeMap{
ErrAddUserInvalidArgument: {
Code: "XMinioInvalidIAMCredentials",
Description: "User is not allowed to be same as admin access key",
HTTPStatusCode: http.StatusConflict,
HTTPStatusCode: http.StatusForbidden,
},
ErrAdminAccountNotEligible: {
Code: "XMinioInvalidIAMCredentials",
Description: "The administrator key is not eligible for this operation",
HTTPStatusCode: http.StatusConflict,
HTTPStatusCode: http.StatusForbidden,
},
ErrAccountNotEligible: {
Code: "XMinioInvalidIAMCredentials",
Description: "The account key is not eligible for this operation",
HTTPStatusCode: http.StatusForbidden,
},
ErrServiceAccountNotFound: {
Code: "XMinioInvalidIAMCredentials",
@@ -1669,6 +1760,16 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
return ErrNone
}
// Only return ErrClientDisconnected if the provided context is actually canceled.
// This way downstream context.Canceled will still report ErrOperationTimedOut
select {
case <-ctx.Done():
if ctx.Err() == context.Canceled {
return ErrClientDisconnected
}
default:
}
switch err {
case errInvalidArgument:
apiErr = ErrAdminInvalidArgument
@@ -1785,6 +1886,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
apiErr = ErrNoSuchKey
case MethodNotAllowed:
apiErr = ErrMethodNotAllowed
case InvalidVersionID:
apiErr = ErrInvalidVersionID
case VersionNotFound:
apiErr = ErrNoSuchVersion
case ObjectAlreadyExists:
@@ -1837,6 +1940,30 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
apiErr = ErrObjectLockConfigurationNotFound
case BucketQuotaConfigNotFound:
apiErr = ErrAdminNoSuchQuotaConfiguration
case BucketReplicationConfigNotFound:
apiErr = ErrReplicationConfigurationNotFoundError
case BucketRemoteDestinationNotFound:
apiErr = ErrRemoteDestinationNotFoundError
case BucketReplicationDestinationMissingLock:
apiErr = ErrReplicationDestinationMissingLock
case BucketRemoteTargetNotFound:
apiErr = ErrRemoteTargetNotFoundError
case BucketRemoteConnectionErr:
apiErr = ErrReplicationRemoteConnectionError
case BucketRemoteAlreadyExists:
apiErr = ErrBucketRemoteAlreadyExists
case BucketRemoteLabelInUse:
apiErr = ErrBucketRemoteLabelInUse
case BucketRemoteArnTypeInvalid:
apiErr = ErrBucketRemoteArnTypeInvalid
case BucketRemoteArnInvalid:
apiErr = ErrBucketRemoteArnInvalid
case BucketRemoteRemoveDisallowed:
apiErr = ErrBucketRemoteRemoveDisallowed
case BucketRemoteTargetNotVersioned:
apiErr = ErrRemoteTargetNotVersionedError
case BucketReplicationSourceNotVersioned:
apiErr = ErrReplicationSourceNotVersionedError
case BucketQuotaExceeded:
apiErr = ErrAdminBucketQuotaExceeded
case *event.ErrInvalidEventName:
@@ -1867,6 +1994,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
apiErr = ErrBackendDown
case ObjectNameTooLong:
apiErr = ErrKeyTooLongError
case dns.ErrInvalidBucketName:
apiErr = ErrInvalidBucketName
default:
var ie, iw int
// This work-around is to handle the issue golang/go#30648
@@ -1903,6 +2032,12 @@ func toAPIError(ctx context.Context, err error) APIError {
}
var apiErr = errorCodes.ToAPIErr(toAPIErrorCode(ctx, err))
e, ok := err.(dns.ErrInvalidBucketName)
if ok {
code := toAPIErrorCode(ctx, e)
apiErr = errorCodes.ToAPIErrWithErr(code, e)
}
if apiErr.Code == "InternalError" {
// If we see an internal error try to interpret
// any underlying errors if possible depending on
@@ -1941,6 +2076,12 @@ func toAPIError(ctx context.Context, err error) APIError {
Description: e.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case replication.Error:
apiErr = APIError{
Code: "MalformedXML",
Description: e.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case tags.Error:
apiErr = APIError{
Code: e.Code(),
@@ -1984,6 +2125,12 @@ func toAPIError(ctx context.Context, err error) APIError {
HTTPStatusCode: e.Response().StatusCode,
}
// Add more Gateway SDKs here if any in future.
default:
apiErr = APIError{
Code: apiErr.Code,
Description: fmt.Sprintf("%s: cause(%v)", apiErr.Description, err),
HTTPStatusCode: apiErr.HTTPStatusCode,
}
}
}

View File

@@ -38,9 +38,18 @@ func mustGetRequestID(t time.Time) string {
return fmt.Sprintf("%X", t.UnixNano())
}
// setEventStreamHeaders to allow proxies to avoid buffering proxy responses
func setEventStreamHeaders(w http.ResponseWriter) {
w.Header().Set(xhttp.ContentType, "text/event-stream")
w.Header().Set(xhttp.CacheControl, "no-cache") // nginx to turn off buffering
w.Header().Set("X-Accel-Buffering", "no") // nginx to turn off buffering
}
// Write http common headers
func setCommonHeaders(w http.ResponseWriter) {
w.Header().Set(xhttp.ServerInfo, "MinIO/"+ReleaseTag)
// Set the "Server" http header.
w.Header().Set(xhttp.ServerInfo, "MinIO")
// Set `x-amz-bucket-region` only if region is set on the server
// by default minio uses an empty region.
if region := globalServerRegion; region != "" {
@@ -77,7 +86,7 @@ func setPartsCountHeaders(w http.ResponseWriter, objInfo ObjectInfo) {
}
// Write object header
func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec) (err error) {
func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec, opts ObjectOptions) (err error) {
// set common headers
setCommonHeaders(w)
@@ -108,10 +117,11 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
}
// Set tag count if object has tags
tags, _ := url.ParseQuery(objInfo.UserTags)
tagCount := len(tags)
if tagCount > 0 {
w.Header()[xhttp.AmzTagCount] = []string{strconv.Itoa(tagCount)}
if len(objInfo.UserTags) > 0 {
tags, _ := url.ParseQuery(objInfo.UserTags)
if len(tags) > 0 {
w.Header()[xhttp.AmzTagCount] = []string{strconv.Itoa(len(tags))}
}
}
// Set all other user defined metadata.
@@ -121,31 +131,43 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
// values to client.
continue
}
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
if equals(k, xhttp.AmzMetaUnencryptedContentLength, xhttp.AmzMetaUnencryptedContentMD5) {
continue
}
var isSet bool
for _, userMetadataPrefix := range userMetadataKeyPrefixes {
if !strings.HasPrefix(k, userMetadataPrefix) {
if !strings.HasPrefix(strings.ToLower(k), strings.ToLower(userMetadataPrefix)) {
continue
}
w.Header()[strings.ToLower(k)] = []string{v}
isSet = true
break
}
if !isSet {
w.Header().Set(k, v)
}
}
var start, rangeLen int64
totalObjectSize, err := objInfo.GetActualSize()
if err != nil {
return err
}
// for providing ranged content
start, rangeLen, err := rs.GetOffsetLength(totalObjectSize)
// For providing ranged content
start, rangeLen, err = rs.GetOffsetLength(totalObjectSize)
if err != nil {
return err
}
if rs == nil && opts.PartNumber > 0 {
rs = partNumberToRangeSpec(objInfo, opts.PartNumber)
}
// Set content length.
w.Header().Set(xhttp.ContentLength, strconv.FormatInt(rangeLen, 10))
if rs != nil {
@@ -157,21 +179,27 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
if objInfo.VersionID != "" {
w.Header()[xhttp.AmzVersionID] = []string{objInfo.VersionID}
}
if objInfo.ReplicationStatus.String() != "" {
w.Header()[xhttp.AmzBucketReplicationStatus] = []string{objInfo.ReplicationStatus.String()}
}
if lc, err := globalLifecycleSys.Get(objInfo.Bucket); err == nil {
ruleID, expiryTime := lc.PredictExpiryTime(lifecycle.ObjectOpts{
Name: objInfo.Name,
UserTags: objInfo.UserTags,
VersionID: objInfo.VersionID,
ModTime: objInfo.ModTime,
IsLatest: objInfo.IsLatest,
DeleteMarker: objInfo.DeleteMarker,
Name: objInfo.Name,
UserTags: objInfo.UserTags,
VersionID: objInfo.VersionID,
ModTime: objInfo.ModTime,
IsLatest: objInfo.IsLatest,
DeleteMarker: objInfo.DeleteMarker,
SuccessorModTime: objInfo.SuccessorModTime,
})
if !expiryTime.IsZero() {
w.Header()[xhttp.AmzExpiration] = []string{
fmt.Sprintf(`expiry-date="%s", rule-id="%s"`, expiryTime.Format(http.TimeFormat), ruleID),
}
}
if objInfo.TransitionStatus == lifecycle.TransitionComplete {
w.Header()[xhttp.AmzStorageClass] = []string{objInfo.StorageClass}
}
}
return nil

View File

@@ -20,6 +20,7 @@ import (
"context"
"encoding/base64"
"encoding/xml"
"fmt"
"net/http"
"net/url"
"path"
@@ -34,11 +35,11 @@ import (
const (
// RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
iso8601TimeFormat = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision.
maxObjectList = 10000 // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse.
maxDeleteList = 10000 // Limit number of objects deleted in a delete call.
maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse.
maxPartsList = 10000 // Limit number of parts in a listPartsResponse.
iso8601TimeFormat = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision.
maxObjectList = metacacheBlockSize - (metacacheBlockSize / 10) // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse.
maxDeleteList = 10000 // Limit number of objects deleted in a delete call.
maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse.
maxPartsList = 10000 // Limit number of parts in a listPartsResponse.
)
// LocationResponse - format for location response.
@@ -387,7 +388,7 @@ func getObjectLocation(r *http.Request, domains []string, bucket, object string)
}
proto := handlers.GetSourceScheme(r)
if proto == "" {
proto = getURLScheme(globalIsSSL)
proto = getURLScheme(globalIsTLS)
}
u := &url.URL{
Host: r.Host,
@@ -396,8 +397,7 @@ func getObjectLocation(r *http.Request, domains []string, bucket, object string)
}
// If domain is set then we need to use bucket DNS style.
for _, domain := range domains {
if strings.Contains(r.Host, domain) {
u.Host = bucket + "." + r.Host
if strings.HasPrefix(r.Host, bucket+"."+domain) {
u.Path = path.Join(SlashSeparator, object)
break
}
@@ -408,7 +408,7 @@ func getObjectLocation(r *http.Request, domains []string, bucket, object string)
// generates ListBucketsResponse from array of BucketInfo which can be
// serialized to match XML and JSON API spec output.
func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
var listbuckets []Bucket
listbuckets := make([]Bucket, 0, len(buckets))
var data = ListBucketsResponse{}
var owner = Owner{}
@@ -428,8 +428,7 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
// generates an ListBucketVersions response for the said bucket with other enumerated options.
func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo) ListVersionsResponse {
var versions []ObjectVersion
var prefixes []CommonPrefix
versions := make([]ObjectVersion, 0, len(resp.Objects))
var owner = Owner{}
var data = ListVersionsResponse{}
@@ -473,6 +472,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
data.VersionIDMarker = versionIDMarker
data.IsTruncated = resp.IsTruncated
prefixes := make([]CommonPrefix, 0, len(resp.Prefixes))
for _, prefix := range resp.Prefixes {
var prefixItem = CommonPrefix{}
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
@@ -484,8 +484,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
// generates an ListObjectsV1 response for the said bucket with other enumerated options.
func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
var contents []Object
var prefixes []CommonPrefix
contents := make([]Object, 0, len(resp.Objects))
var owner = Owner{}
var data = ListObjectsResponse{}
@@ -517,9 +516,10 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
data.Marker = s3EncodeName(marker, encodingType)
data.Delimiter = s3EncodeName(delimiter, encodingType)
data.MaxKeys = maxKeys
data.NextMarker = s3EncodeName(resp.NextMarker, encodingType)
data.IsTruncated = resp.IsTruncated
prefixes := make([]CommonPrefix, 0, len(resp.Prefixes))
for _, prefix := range resp.Prefixes {
var prefixItem = CommonPrefix{}
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
@@ -531,8 +531,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
// generates an ListObjectsV2 response for the said bucket with other enumerated options.
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata bool) ListObjectsV2Response {
var contents []Object
var commonPrefixes []CommonPrefix
contents := make([]Object, 0, len(objects))
var owner = Owner{}
var data = ListObjectsV2Response{}
@@ -565,6 +564,10 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
// values to client.
continue
}
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
if equals(k, xhttp.AmzMetaUnencryptedContentLength, xhttp.AmzMetaUnencryptedContentMD5) {
continue
}
content.UserMetadata[k] = v
}
}
@@ -581,6 +584,8 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
data.ContinuationToken = base64.StdEncoding.EncodeToString([]byte(token))
data.NextContinuationToken = base64.StdEncoding.EncodeToString([]byte(nextToken))
data.IsTruncated = isTruncated
commonPrefixes := make([]CommonPrefix, 0, len(prefixes))
for _, prefix := range prefixes {
var prefixItem = CommonPrefix{}
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
@@ -634,8 +639,16 @@ func generateListPartsResponse(partsInfo ListPartsInfo, encodingType string) Lis
listPartsResponse.Key = s3EncodeName(partsInfo.Object, encodingType)
listPartsResponse.UploadID = partsInfo.UploadID
listPartsResponse.StorageClass = globalMinioDefaultStorageClass
listPartsResponse.Initiator.ID = globalMinioDefaultOwnerID
listPartsResponse.Owner.ID = globalMinioDefaultOwnerID
// Dumb values not meaningful
listPartsResponse.Initiator = Initiator{
ID: globalMinioDefaultOwnerID,
DisplayName: globalMinioDefaultOwnerID,
}
listPartsResponse.Owner = Owner{
ID: globalMinioDefaultOwnerID,
DisplayName: globalMinioDefaultOwnerID,
}
listPartsResponse.MaxParts = partsInfo.MaxParts
listPartsResponse.PartNumberMarker = partsInfo.PartNumberMarker
@@ -698,10 +711,6 @@ func generateMultiDeleteResponse(quiet bool, deletedObjects []DeletedObject, err
}
func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) {
if newObjectLayerFn() == nil {
// Server still in safe mode.
w.Header().Set(xhttp.MinIOServerStatus, "safemode")
}
setCommonHeaders(w)
if mType != mimeNone {
w.Header().Set(xhttp.ContentType, string(mType))
@@ -760,14 +769,14 @@ func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError
// Set retry-after header to indicate user-agents to retry request after 120secs.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
w.Header().Set(xhttp.RetryAfter, "120")
case "InvalidRegion":
err.Description = fmt.Sprintf("Region does not match; expecting '%s'.", globalServerRegion)
case "AuthorizationHeaderMalformed":
err.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalServerRegion)
case "AccessDenied":
// The request is from browser and also if browser
// is enabled we need to redirect.
if browser && globalBrowserEnabled {
if newObjectLayerFn() == nil {
// server still in safe mode.
w.Header().Set(xhttp.MinIOServerStatus, "safemode")
}
w.Header().Set(xhttp.Location, minioReservedBucketPath+reqURL.Path)
w.WriteHeader(http.StatusTemporaryRedirect)
return
@@ -818,38 +827,3 @@ func writeCustomErrorResponseJSON(ctx context.Context, w http.ResponseWriter, er
encodedErrorResponse := encodeResponseJSON(errorResponse)
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON)
}
// writeCustomErrorResponseXML - similar to writeErrorResponse,
// but accepts the error message directly (this allows messages to be
// dynamically generated.)
func writeCustomErrorResponseXML(ctx context.Context, w http.ResponseWriter, err APIError, errBody string, reqURL *url.URL, browser bool) {
switch err.Code {
case "SlowDown", "XMinioServerNotInitialized", "XMinioReadQuorum", "XMinioWriteQuorum":
// Set retry-after header to indicate user-agents to retry request after 120secs.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
w.Header().Set(xhttp.RetryAfter, "120")
case "AccessDenied":
// The request is from browser and also if browser
// is enabled we need to redirect.
if browser && globalBrowserEnabled {
w.Header().Set(xhttp.Location, minioReservedBucketPath+reqURL.Path)
w.WriteHeader(http.StatusTemporaryRedirect)
return
}
}
reqInfo := logger.GetReqInfo(ctx)
errorResponse := APIErrorResponse{
Code: err.Code,
Message: errBody,
Resource: reqURL.Path,
BucketName: reqInfo.BucketName,
Key: reqInfo.ObjectName,
RequestID: w.Header().Get(xhttp.AmzRequestID),
HostID: globalDeploymentID,
}
encodedErrorResponse := encodeResponse(errorResponse)
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeXML)
}

View File

@@ -77,7 +77,7 @@ func TestObjectLocation(t *testing.T) {
// Server with virtual domain name.
{
request: &http.Request{
Host: "mys3.bucket.org",
Host: "mybucket.mys3.bucket.org",
Header: map[string][]string{},
},
domains: []string{"mys3.bucket.org"},
@@ -87,7 +87,7 @@ func TestObjectLocation(t *testing.T) {
},
{
request: &http.Request{
Host: "mys3.bucket.org",
Host: "mybucket.mys3.bucket.org",
Header: map[string][]string{
"X-Forwarded-Scheme": {httpsScheme},
},
@@ -98,11 +98,14 @@ func TestObjectLocation(t *testing.T) {
expectedLocation: "https://mybucket.mys3.bucket.org/test/1.txt",
},
}
for i, testCase := range testCases {
gotLocation := getObjectLocation(testCase.request, testCase.domains, testCase.bucket, testCase.object)
if testCase.expectedLocation != gotLocation {
t.Errorf("Test %d: expected %s, got %s", i+1, testCase.expectedLocation, gotLocation)
}
for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) {
gotLocation := getObjectLocation(testCase.request, testCase.domains, testCase.bucket, testCase.object)
if testCase.expectedLocation != gotLocation {
t.Errorf("expected %s, got %s", testCase.expectedLocation, gotLocation)
}
})
}
}

View File

@@ -17,6 +17,7 @@
package cmd
import (
"net"
"net/http"
"github.com/gorilla/mux"
@@ -26,74 +27,92 @@ import (
)
func newHTTPServerFn() *xhttp.Server {
globalObjLayerMutex.Lock()
defer globalObjLayerMutex.Unlock()
globalObjLayerMutex.RLock()
defer globalObjLayerMutex.RUnlock()
return globalHTTPServer
}
func newObjectLayerWithoutSafeModeFn() ObjectLayer {
func setHTTPServer(h *xhttp.Server) {
globalObjLayerMutex.Lock()
defer globalObjLayerMutex.Unlock()
return globalObjectAPI
globalHTTPServer = h
globalObjLayerMutex.Unlock()
}
func newObjectLayerFn() ObjectLayer {
globalObjLayerMutex.Lock()
defer globalObjLayerMutex.Unlock()
if globalSafeMode {
return nil
}
globalObjLayerMutex.RLock()
defer globalObjLayerMutex.RUnlock()
return globalObjectAPI
}
func newCachedObjectLayerFn() CacheObjectLayer {
globalObjLayerMutex.Lock()
defer globalObjLayerMutex.Unlock()
if globalSafeMode {
return nil
}
globalObjLayerMutex.RLock()
defer globalObjLayerMutex.RUnlock()
return globalCacheObjectAPI
}
func setCacheObjectLayer(c CacheObjectLayer) {
globalObjLayerMutex.Lock()
globalCacheObjectAPI = c
globalObjLayerMutex.Unlock()
}
func setObjectLayer(o ObjectLayer) {
globalObjLayerMutex.Lock()
globalObjectAPI = o
globalObjLayerMutex.Unlock()
}
// objectAPIHandler implements and provides http handlers for S3 API.
type objectAPIHandlers struct {
ObjectAPI func() ObjectLayer
CacheAPI func() CacheObjectLayer
// Returns true of handlers should interpret encryption.
EncryptionEnabled func() bool
// Returns true if handlers allow SSE-KMS encryption headers.
AllowSSEKMS func() bool
}
// getHost tries its best to return the request host.
// According to section 14.23 of RFC 2616 the Host header
// can include the port number if the default value of 80 is not used.
func getHost(r *http.Request) string {
if r.URL.IsAbs() {
return r.URL.Host
}
return r.Host
}
// registerAPIRouter - registers S3 compatible APIs.
func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool) {
func registerAPIRouter(router *mux.Router) {
// Initialize API.
api := objectAPIHandlers{
ObjectAPI: newObjectLayerFn,
CacheAPI: newCachedObjectLayerFn,
EncryptionEnabled: func() bool {
return encryptionEnabled
},
AllowSSEKMS: func() bool {
return allowSSEKMS
},
}
// API Router
apiRouter := router.PathPrefix(SlashSeparator).Subrouter()
var routers []*mux.Router
for _, domainName := range globalDomainNames {
if globalMinioPort == "80" || globalMinioPort == "443" {
// For standard ports its possible, incoming requests
// might not have a port assigned, in such scenarios use
// have wildcard routing.
// FIXME: remove this code once https://github.com/gorilla/mux/pull/579
// is merged and released upstream, as this entire change can become
// a single line.
if IsKubernetes() {
routers = append(routers, apiRouter.MatcherFunc(func(r *http.Request, match *mux.RouteMatch) bool {
host, _, err := net.SplitHostPort(getHost(r))
if err != nil {
host = r.Host
}
// Make sure to skip matching minio.<domain>` this is
// specifically meant for operator/k8s deployment
// The reason we need to skip this is for a special
// usecase where we need to make sure that
// minio.<namespace>.svc.<cluster_domain> is ignored
// by the bucketDNS style to ensure that path style
// is available and honored at this domain.
//
// All other `<bucket>.<namespace>.svc.<cluster_domain>`
// makes sure that buckets are routed through this matcher
// to match for `<bucket>`
return host != minioReservedBucket+"."+domainName
}).Host("{bucket:.+}."+domainName).Subrouter())
} else {
routers = append(routers, apiRouter.Host("{bucket:.+}."+domainName).Subrouter())
}
routers = append(routers, apiRouter.Host("{bucket:.+}."+domainName+":{port:.*}").Subrouter())
}
routers = append(routers, apiRouter.PathPrefix("/{bucket}").Subrouter())
@@ -101,220 +120,234 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
// Object operations
// HeadObject
bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("headobject", httpTraceAll(api.HeadObjectHandler))))
collectAPIStats("headobject", maxClients(httpTraceAll(api.HeadObjectHandler))))
// CopyObjectPart
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(maxClients(collectAPIStats("copyobjectpart", httpTraceAll(api.CopyObjectPartHandler)))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
bucket.Methods(http.MethodPut).Path("/{object:.+}").
HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").
HandlerFunc(collectAPIStats("copyobjectpart", maxClients(httpTraceAll(api.CopyObjectPartHandler)))).
Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
// PutObjectPart
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("putobjectpart", httpTraceHdrs(api.PutObjectPartHandler)))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
collectAPIStats("putobjectpart", maxClients(httpTraceHdrs(api.PutObjectPartHandler)))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
// ListObjectParts
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("listobjectparts", httpTraceAll(api.ListObjectPartsHandler)))).Queries("uploadId", "{uploadId:.*}")
collectAPIStats("listobjectparts", maxClients(httpTraceAll(api.ListObjectPartsHandler)))).Queries("uploadId", "{uploadId:.*}")
// CompleteMultipartUpload
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("completemutipartupload", httpTraceAll(api.CompleteMultipartUploadHandler)))).Queries("uploadId", "{uploadId:.*}")
collectAPIStats("completemutipartupload", maxClients(httpTraceAll(api.CompleteMultipartUploadHandler)))).Queries("uploadId", "{uploadId:.*}")
// NewMultipartUpload
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("newmultipartupload", httpTraceAll(api.NewMultipartUploadHandler)))).Queries("uploads", "")
collectAPIStats("newmultipartupload", maxClients(httpTraceAll(api.NewMultipartUploadHandler)))).Queries("uploads", "")
// AbortMultipartUpload
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("abortmultipartupload", httpTraceAll(api.AbortMultipartUploadHandler)))).Queries("uploadId", "{uploadId:.*}")
collectAPIStats("abortmultipartupload", maxClients(httpTraceAll(api.AbortMultipartUploadHandler)))).Queries("uploadId", "{uploadId:.*}")
// GetObjectACL - this is a dummy call.
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("getobjectacl", httpTraceHdrs(api.GetObjectACLHandler)))).Queries("acl", "")
collectAPIStats("getobjectacl", maxClients(httpTraceHdrs(api.GetObjectACLHandler)))).Queries("acl", "")
// PutObjectACL - this is a dummy call.
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("putobjectacl", httpTraceHdrs(api.PutObjectACLHandler)))).Queries("acl", "")
collectAPIStats("putobjectacl", maxClients(httpTraceHdrs(api.PutObjectACLHandler)))).Queries("acl", "")
// GetObjectTagging
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("getobjecttagging", httpTraceHdrs(api.GetObjectTaggingHandler)))).Queries("tagging", "")
collectAPIStats("getobjecttagging", maxClients(httpTraceHdrs(api.GetObjectTaggingHandler)))).Queries("tagging", "")
// PutObjectTagging
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("putobjecttagging", httpTraceHdrs(api.PutObjectTaggingHandler)))).Queries("tagging", "")
collectAPIStats("putobjecttagging", maxClients(httpTraceHdrs(api.PutObjectTaggingHandler)))).Queries("tagging", "")
// DeleteObjectTagging
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("deleteobjecttagging", httpTraceHdrs(api.DeleteObjectTaggingHandler)))).Queries("tagging", "")
collectAPIStats("deleteobjecttagging", maxClients(httpTraceHdrs(api.DeleteObjectTaggingHandler)))).Queries("tagging", "")
// SelectObjectContent
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("selectobjectcontent", httpTraceHdrs(api.SelectObjectContentHandler)))).Queries("select", "").Queries("select-type", "2")
collectAPIStats("selectobjectcontent", maxClients(httpTraceHdrs(api.SelectObjectContentHandler)))).Queries("select", "").Queries("select-type", "2")
// GetObjectRetention
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("getobjectretention", httpTraceAll(api.GetObjectRetentionHandler)))).Queries("retention", "")
collectAPIStats("getobjectretention", maxClients(httpTraceAll(api.GetObjectRetentionHandler)))).Queries("retention", "")
// GetObjectLegalHold
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("getobjectlegalhold", httpTraceAll(api.GetObjectLegalHoldHandler)))).Queries("legal-hold", "")
collectAPIStats("getobjectlegalhold", maxClients(httpTraceAll(api.GetObjectLegalHoldHandler)))).Queries("legal-hold", "")
// GetObject
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("getobject", httpTraceHdrs(api.GetObjectHandler))))
collectAPIStats("getobject", maxClients(httpTraceHdrs(api.GetObjectHandler))))
// CopyObject
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(maxClients(collectAPIStats("copyobject", httpTraceAll(api.CopyObjectHandler))))
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(
collectAPIStats("copyobject", maxClients(httpTraceAll(api.CopyObjectHandler))))
// PutObjectRetention
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("putobjectretention", httpTraceAll(api.PutObjectRetentionHandler)))).Queries("retention", "")
collectAPIStats("putobjectretention", maxClients(httpTraceAll(api.PutObjectRetentionHandler)))).Queries("retention", "")
// PutObjectLegalHold
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("putobjectlegalhold", httpTraceAll(api.PutObjectLegalHoldHandler)))).Queries("legal-hold", "")
collectAPIStats("putobjectlegalhold", maxClients(httpTraceAll(api.PutObjectLegalHoldHandler)))).Queries("legal-hold", "")
// PutObject
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("putobject", httpTraceHdrs(api.PutObjectHandler))))
collectAPIStats("putobject", maxClients(httpTraceHdrs(api.PutObjectHandler))))
// DeleteObject
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("deleteobject", httpTraceAll(api.DeleteObjectHandler))))
collectAPIStats("deleteobject", maxClients(httpTraceAll(api.DeleteObjectHandler))))
/// Bucket operations
// GetBucketLocation
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketlocation", httpTraceAll(api.GetBucketLocationHandler)))).Queries("location", "")
collectAPIStats("getbucketlocation", maxClients(httpTraceAll(api.GetBucketLocationHandler)))).Queries("location", "")
// GetBucketPolicy
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketpolicy", httpTraceAll(api.GetBucketPolicyHandler)))).Queries("policy", "")
collectAPIStats("getbucketpolicy", maxClients(httpTraceAll(api.GetBucketPolicyHandler)))).Queries("policy", "")
// GetBucketLifecycle
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketlifecycle", httpTraceAll(api.GetBucketLifecycleHandler)))).Queries("lifecycle", "")
collectAPIStats("getbucketlifecycle", maxClients(httpTraceAll(api.GetBucketLifecycleHandler)))).Queries("lifecycle", "")
// GetBucketEncryption
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketencryption", httpTraceAll(api.GetBucketEncryptionHandler)))).Queries("encryption", "")
collectAPIStats("getbucketencryption", maxClients(httpTraceAll(api.GetBucketEncryptionHandler)))).Queries("encryption", "")
// GetBucketObjectLockConfig
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketobjectlockconfiguration", maxClients(httpTraceAll(api.GetBucketObjectLockConfigHandler)))).Queries("object-lock", "")
// GetBucketReplicationConfig
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketreplicationconfiguration", maxClients(httpTraceAll(api.GetBucketReplicationConfigHandler)))).Queries("replication", "")
// GetBucketVersioning
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketversioning", maxClients(httpTraceAll(api.GetBucketVersioningHandler)))).Queries("versioning", "")
// GetBucketNotification
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketnotification", maxClients(httpTraceAll(api.GetBucketNotificationHandler)))).Queries("notification", "")
// ListenNotification
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("listennotification", maxClients(httpTraceAll(api.ListenNotificationHandler)))).Queries("events", "{events:.*}")
// Dummy Bucket Calls
// GetBucketACL -- this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketacl", httpTraceAll(api.GetBucketACLHandler)))).Queries("acl", "")
collectAPIStats("getbucketacl", maxClients(httpTraceAll(api.GetBucketACLHandler)))).Queries("acl", "")
// PutBucketACL -- this is a dummy call.
bucket.Methods(http.MethodPut).HandlerFunc(
maxClients(collectAPIStats("putbucketacl", httpTraceAll(api.PutBucketACLHandler)))).Queries("acl", "")
collectAPIStats("putbucketacl", maxClients(httpTraceAll(api.PutBucketACLHandler)))).Queries("acl", "")
// GetBucketCors - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketcors", httpTraceAll(api.GetBucketCorsHandler)))).Queries("cors", "")
collectAPIStats("getbucketcors", maxClients(httpTraceAll(api.GetBucketCorsHandler)))).Queries("cors", "")
// GetBucketWebsiteHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketwebsite", httpTraceAll(api.GetBucketWebsiteHandler)))).Queries("website", "")
collectAPIStats("getbucketwebsite", maxClients(httpTraceAll(api.GetBucketWebsiteHandler)))).Queries("website", "")
// GetBucketAccelerateHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketaccelerate", httpTraceAll(api.GetBucketAccelerateHandler)))).Queries("accelerate", "")
collectAPIStats("getbucketaccelerate", maxClients(httpTraceAll(api.GetBucketAccelerateHandler)))).Queries("accelerate", "")
// GetBucketRequestPaymentHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketrequestpayment", httpTraceAll(api.GetBucketRequestPaymentHandler)))).Queries("requestPayment", "")
collectAPIStats("getbucketrequestpayment", maxClients(httpTraceAll(api.GetBucketRequestPaymentHandler)))).Queries("requestPayment", "")
// GetBucketLoggingHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketlogging", httpTraceAll(api.GetBucketLoggingHandler)))).Queries("logging", "")
collectAPIStats("getbucketlogging", maxClients(httpTraceAll(api.GetBucketLoggingHandler)))).Queries("logging", "")
// GetBucketLifecycleHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketlifecycle", httpTraceAll(api.GetBucketLifecycleHandler)))).Queries("lifecycle", "")
// GetBucketReplicationHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketreplication", httpTraceAll(api.GetBucketReplicationHandler)))).Queries("replication", "")
collectAPIStats("getbucketlifecycle", maxClients(httpTraceAll(api.GetBucketLifecycleHandler)))).Queries("lifecycle", "")
// GetBucketTaggingHandler
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbuckettagging", httpTraceAll(api.GetBucketTaggingHandler)))).Queries("tagging", "")
collectAPIStats("getbuckettagging", maxClients(httpTraceAll(api.GetBucketTaggingHandler)))).Queries("tagging", "")
//DeleteBucketWebsiteHandler
bucket.Methods(http.MethodDelete).HandlerFunc(
maxClients(collectAPIStats("deletebucketwebsite", httpTraceAll(api.DeleteBucketWebsiteHandler)))).Queries("website", "")
collectAPIStats("deletebucketwebsite", maxClients(httpTraceAll(api.DeleteBucketWebsiteHandler)))).Queries("website", "")
// DeleteBucketTaggingHandler
bucket.Methods(http.MethodDelete).HandlerFunc(
maxClients(collectAPIStats("deletebuckettagging", httpTraceAll(api.DeleteBucketTaggingHandler)))).Queries("tagging", "")
collectAPIStats("deletebuckettagging", maxClients(httpTraceAll(api.DeleteBucketTaggingHandler)))).Queries("tagging", "")
// GetBucketObjectLockConfig
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketobjectlockconfiguration", httpTraceAll(api.GetBucketObjectLockConfigHandler)))).Queries("object-lock", "")
// GetBucketVersioning
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketversioning", httpTraceAll(api.GetBucketVersioningHandler)))).Queries("versioning", "")
// GetBucketNotification
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketnotification", httpTraceAll(api.GetBucketNotificationHandler)))).Queries("notification", "")
// ListenBucketNotification
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listenbucketnotification", httpTraceAll(api.ListenBucketNotificationHandler))).Queries("events", "{events:.*}")
// ListMultipartUploads
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("listmultipartuploads", httpTraceAll(api.ListMultipartUploadsHandler)))).Queries("uploads", "")
collectAPIStats("listmultipartuploads", maxClients(httpTraceAll(api.ListMultipartUploadsHandler)))).Queries("uploads", "")
// ListObjectsV2M
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("listobjectsv2M", httpTraceAll(api.ListObjectsV2MHandler)))).Queries("list-type", "2", "metadata", "true")
collectAPIStats("listobjectsv2M", maxClients(httpTraceAll(api.ListObjectsV2MHandler)))).Queries("list-type", "2", "metadata", "true")
// ListObjectsV2
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("listobjectsv2", httpTraceAll(api.ListObjectsV2Handler)))).Queries("list-type", "2")
collectAPIStats("listobjectsv2", maxClients(httpTraceAll(api.ListObjectsV2Handler)))).Queries("list-type", "2")
// ListObjectVersions
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("listobjectversions", httpTraceAll(api.ListObjectVersionsHandler)))).Queries("versions", "")
collectAPIStats("listobjectversions", maxClients(httpTraceAll(api.ListObjectVersionsHandler)))).Queries("versions", "")
// ListObjectsV1 (Legacy)
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("listobjectsv1", httpTraceAll(api.ListObjectsV1Handler))))
collectAPIStats("listobjectsv1", maxClients(httpTraceAll(api.ListObjectsV1Handler))))
// PutBucketLifecycle
bucket.Methods(http.MethodPut).HandlerFunc(
maxClients(collectAPIStats("putbucketlifecycle", httpTraceAll(api.PutBucketLifecycleHandler)))).Queries("lifecycle", "")
collectAPIStats("putbucketlifecycle", maxClients(httpTraceAll(api.PutBucketLifecycleHandler)))).Queries("lifecycle", "")
// PutBucketReplicationConfig
bucket.Methods(http.MethodPut).HandlerFunc(
collectAPIStats("putbucketreplicationconfiguration", maxClients(httpTraceAll(api.PutBucketReplicationConfigHandler)))).Queries("replication", "")
// GetObjectRetention
// PutBucketEncryption
bucket.Methods(http.MethodPut).HandlerFunc(
maxClients(collectAPIStats("putbucketencryption", httpTraceAll(api.PutBucketEncryptionHandler)))).Queries("encryption", "")
collectAPIStats("putbucketencryption", maxClients(httpTraceAll(api.PutBucketEncryptionHandler)))).Queries("encryption", "")
// PutBucketPolicy
bucket.Methods(http.MethodPut).HandlerFunc(
maxClients(collectAPIStats("putbucketpolicy", httpTraceAll(api.PutBucketPolicyHandler)))).Queries("policy", "")
collectAPIStats("putbucketpolicy", maxClients(httpTraceAll(api.PutBucketPolicyHandler)))).Queries("policy", "")
// PutBucketObjectLockConfig
bucket.Methods(http.MethodPut).HandlerFunc(
maxClients(collectAPIStats("putbucketobjectlockconfig", httpTraceAll(api.PutBucketObjectLockConfigHandler)))).Queries("object-lock", "")
collectAPIStats("putbucketobjectlockconfig", maxClients(httpTraceAll(api.PutBucketObjectLockConfigHandler)))).Queries("object-lock", "")
// PutBucketTaggingHandler
bucket.Methods(http.MethodPut).HandlerFunc(
maxClients(collectAPIStats("putbuckettagging", httpTraceAll(api.PutBucketTaggingHandler)))).Queries("tagging", "")
collectAPIStats("putbuckettagging", maxClients(httpTraceAll(api.PutBucketTaggingHandler)))).Queries("tagging", "")
// PutBucketVersioning
bucket.Methods(http.MethodPut).HandlerFunc(
maxClients(collectAPIStats("putbucketversioning", httpTraceAll(api.PutBucketVersioningHandler)))).Queries("versioning", "")
collectAPIStats("putbucketversioning", maxClients(httpTraceAll(api.PutBucketVersioningHandler)))).Queries("versioning", "")
// PutBucketNotification
bucket.Methods(http.MethodPut).HandlerFunc(
maxClients(collectAPIStats("putbucketnotification", httpTraceAll(api.PutBucketNotificationHandler)))).Queries("notification", "")
collectAPIStats("putbucketnotification", maxClients(httpTraceAll(api.PutBucketNotificationHandler)))).Queries("notification", "")
// PutBucket
bucket.Methods(http.MethodPut).HandlerFunc(
maxClients(collectAPIStats("putbucket", httpTraceAll(api.PutBucketHandler))))
collectAPIStats("putbucket", maxClients(httpTraceAll(api.PutBucketHandler))))
// HeadBucket
bucket.Methods(http.MethodHead).HandlerFunc(
maxClients(collectAPIStats("headbucket", httpTraceAll(api.HeadBucketHandler))))
collectAPIStats("headbucket", maxClients(httpTraceAll(api.HeadBucketHandler))))
// PostPolicy
bucket.Methods(http.MethodPost).HeadersRegexp(xhttp.ContentType, "multipart/form-data*").HandlerFunc(
maxClients(collectAPIStats("postpolicybucket", httpTraceHdrs(api.PostPolicyBucketHandler))))
collectAPIStats("postpolicybucket", maxClients(httpTraceHdrs(api.PostPolicyBucketHandler))))
// DeleteMultipleObjects
bucket.Methods(http.MethodPost).HandlerFunc(
maxClients(collectAPIStats("deletemultipleobjects", httpTraceAll(api.DeleteMultipleObjectsHandler)))).Queries("delete", "")
collectAPIStats("deletemultipleobjects", maxClients(httpTraceAll(api.DeleteMultipleObjectsHandler)))).Queries("delete", "")
// DeleteBucketPolicy
bucket.Methods(http.MethodDelete).HandlerFunc(
maxClients(collectAPIStats("deletebucketpolicy", httpTraceAll(api.DeleteBucketPolicyHandler)))).Queries("policy", "")
collectAPIStats("deletebucketpolicy", maxClients(httpTraceAll(api.DeleteBucketPolicyHandler)))).Queries("policy", "")
// DeleteBucketReplication
bucket.Methods(http.MethodDelete).HandlerFunc(
collectAPIStats("deletebucketreplicationconfiguration", maxClients(httpTraceAll(api.DeleteBucketReplicationConfigHandler)))).Queries("replication", "")
// DeleteBucketLifecycle
bucket.Methods(http.MethodDelete).HandlerFunc(
maxClients(collectAPIStats("deletebucketlifecycle", httpTraceAll(api.DeleteBucketLifecycleHandler)))).Queries("lifecycle", "")
collectAPIStats("deletebucketlifecycle", maxClients(httpTraceAll(api.DeleteBucketLifecycleHandler)))).Queries("lifecycle", "")
// DeleteBucketEncryption
bucket.Methods(http.MethodDelete).HandlerFunc(
maxClients(collectAPIStats("deletebucketencryption", httpTraceAll(api.DeleteBucketEncryptionHandler)))).Queries("encryption", "")
collectAPIStats("deletebucketencryption", maxClients(httpTraceAll(api.DeleteBucketEncryptionHandler)))).Queries("encryption", "")
// DeleteBucket
bucket.Methods(http.MethodDelete).HandlerFunc(
maxClients(collectAPIStats("deletebucket", httpTraceAll(api.DeleteBucketHandler))))
collectAPIStats("deletebucket", maxClients(httpTraceAll(api.DeleteBucketHandler))))
// PostRestoreObject
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
collectAPIStats("restoreobject", maxClients(httpTraceAll(api.PostRestoreObjectHandler)))).Queries("restore", "")
}
/// Root operation
// ListenNotification
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(
collectAPIStats("listennotification", maxClients(httpTraceAll(api.ListenNotificationHandler)))).Queries("events", "{events:.*}")
// ListBuckets
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(
maxClients(collectAPIStats("listbuckets", httpTraceAll(api.ListBucketsHandler))))
collectAPIStats("listbuckets", maxClients(httpTraceAll(api.ListBucketsHandler))))
// S3 browser with signature v4 adds '//' for ListBuckets request, so rather
// than failing with UnknownAPIRequest we simply handle it for now.
apiRouter.Methods(http.MethodGet).Path(SlashSeparator + SlashSeparator).HandlerFunc(
maxClients(collectAPIStats("listbuckets", httpTraceAll(api.ListBucketsHandler))))
// Supports cors only for S3 handlers
apiRouter.Methods(http.MethodOptions).Path(SlashSeparator).HandlerFunc(
maxClients(collectAPIStats("cors", httpTraceAll(corsHandlerFunc()))))
apiRouter.Methods(http.MethodOptions).Path(SlashSeparator + SlashSeparator).HandlerFunc(
maxClients(collectAPIStats("cors", httpTraceAll(corsHandlerFunc()))))
collectAPIStats("listbuckets", maxClients(httpTraceAll(api.ListBucketsHandler))))
// If none of the routes match add default error handler routes
apiRouter.NotFoundHandler = http.HandlerFunc(collectAPIStats("notfound", httpTraceAll(errorResponseHandler)))
apiRouter.MethodNotAllowedHandler = http.HandlerFunc(collectAPIStats("methodnotallowed", httpTraceAll(errorResponseHandler)))
apiRouter.NotFoundHandler = collectAPIStats("notfound", httpTraceAll(errorResponseHandler))
apiRouter.MethodNotAllowedHandler = collectAPIStats("methodnotallowed", httpTraceAll(methodNotAllowedHandler("S3")))
}
// setCorsHandler handler for CORS (Cross Origin Resource Sharing)
func corsHandlerFunc() http.HandlerFunc {
// corsHandler handler for CORS (Cross Origin Resource Sharing)
func corsHandler(handler http.Handler) http.Handler {
commonS3Headers := []string{
xhttp.Date,
xhttp.ETag,
@@ -325,12 +358,19 @@ func corsHandlerFunc() http.HandlerFunc {
xhttp.ContentEncoding,
xhttp.ContentLength,
xhttp.ContentType,
xhttp.ContentDisposition,
xhttp.LastModified,
xhttp.ContentLanguage,
xhttp.CacheControl,
xhttp.RetryAfter,
xhttp.AmzBucketRegion,
xhttp.Expires,
"X-Amz*",
"x-amz*",
"*",
}
c := cors.New(cors.Options{
return cors.New(cors.Options{
AllowOriginFunc: func(origin string) bool {
for _, allowedOrigin := range globalAPIConfig.getCorsAllowOrigins() {
if wildcard.MatchSimple(allowedOrigin, origin) {
@@ -351,7 +391,5 @@ func corsHandlerFunc() http.HandlerFunc {
AllowedHeaders: commonS3Headers,
ExposedHeaders: commonS3Headers,
AllowCredentials: true,
})
return c.HandlerFunc
}).Handler(handler)
}

View File

@@ -151,9 +151,10 @@ func validateAdminSignature(ctx context.Context, r *http.Request, region string)
return cred, claims, owner, ErrNone
}
// checkAdminRequestAuthType checks whether the request is a valid signature V2 or V4 request.
// It does not accept presigned or JWT or anonymous requests.
func checkAdminRequestAuthType(ctx context.Context, r *http.Request, action iampolicy.AdminAction, region string) (auth.Credentials, APIErrorCode) {
// checkAdminRequestAuth checks for authentication and authorization for the incoming
// request. It only accepts V2 and V4 requests. Presigned, JWT and anonymous requests
// are automatically rejected.
func checkAdminRequestAuth(ctx context.Context, r *http.Request, action iampolicy.AdminAction, region string) (auth.Credentials, APIErrorCode) {
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, region)
if s3Err != ErrNone {
return cred, s3Err
@@ -213,15 +214,15 @@ func getClaimsFromToken(r *http.Request, token string) (map[string]interface{},
if globalPolicyOPA == nil {
// If OPA is not set and if ldap claim key is set, allow the claim.
if _, ok := claims.Lookup(ldapUser); ok {
if _, ok := claims.MapClaims[ldapUser]; ok {
return claims.Map(), nil
}
// If OPA is not set, session token should
// have a policy and its mandatory, reject
// requests without policy claim.
_, pokOpenID := claims.Lookup(iamPolicyClaimNameOpenID())
_, pokSA := claims.Lookup(iamPolicyClaimNameSA())
_, pokOpenID := claims.MapClaims[iamPolicyClaimNameOpenID()]
_, pokSA := claims.MapClaims[iamPolicyClaimNameSA()]
if !pokOpenID && !pokSA {
return nil, errAuthentication
}
@@ -333,8 +334,12 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
// Populate payload again to handle it in HTTP handler.
r.Body = ioutil.NopCloser(bytes.NewReader(payload))
}
if cred.AccessKey != "" {
logger.GetReqInfo(ctx).AccessKey = cred.AccessKey
}
if cred.AccessKey == "" {
if action != policy.ListAllMyBucketsAction && cred.AccessKey == "" {
// Anonymous checks are not meant for ListBuckets action
if globalPolicySys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Action: action,
@@ -346,8 +351,26 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
// Request is allowed return the appropriate access key.
return cred.AccessKey, owner, ErrNone
}
if action == policy.ListBucketVersionsAction {
// In AWS S3 s3:ListBucket permission is same as s3:ListBucketVersions permission
// verify as a fallback.
if globalPolicySys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Action: policy.ListBucketAction,
BucketName: bucketName,
ConditionValues: getConditionValues(r, locationConstraint, "", nil),
IsOwner: false,
ObjectName: objectName,
}) {
// Request is allowed return the appropriate access key.
return cred.AccessKey, owner, ErrNone
}
}
return cred.AccessKey, owner, ErrAccessDenied
}
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: iampolicy.Action(action),
@@ -360,6 +383,24 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
// Request is allowed return the appropriate access key.
return cred.AccessKey, owner, ErrNone
}
if action == policy.ListBucketVersionsAction {
// In AWS S3 s3:ListBucket permission is same as s3:ListBucketVersions permission
// verify as a fallback.
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: iampolicy.ListBucketAction,
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
ObjectName: objectName,
IsOwner: owner,
Claims: claims,
}) {
// Request is allowed return the appropriate access key.
return cred.AccessKey, owner, ErrNone
}
}
return cred.AccessKey, owner, ErrAccessDenied
}
@@ -418,8 +459,7 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
// Verify 'Content-Md5' and/or 'X-Amz-Content-Sha256' if present.
// The verification happens implicit during reading.
reader, err := hash.NewReader(r.Body, -1, hex.EncodeToString(contentMD5),
hex.EncodeToString(contentSHA256), -1, globalCLIContext.StrictS3Compat)
reader, err := hash.NewReader(r.Body, -1, hex.EncodeToString(contentMD5), hex.EncodeToString(contentSHA256), -1)
if err != nil {
return toAPIErrorCode(ctx, err)
}
@@ -427,16 +467,6 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
return ErrNone
}
// authHandler - handles all the incoming authorization headers and validates them if possible.
type authHandler struct {
handler http.Handler
}
// setAuthHandler to validate authorization header for the incoming request.
func setAuthHandler(h http.Handler) http.Handler {
return authHandler{h}
}
// List of all support S3 auth types.
var supportedS3AuthTypes = map[authType]struct{}{
authTypeAnonymous: {},
@@ -454,26 +484,30 @@ func isSupportedS3AuthType(aType authType) bool {
return ok
}
// handler for validating incoming authorization headers.
func (a authHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
aType := getRequestAuthType(r)
if isSupportedS3AuthType(aType) {
// Let top level caller validate for anonymous and known signed requests.
a.handler.ServeHTTP(w, r)
return
} else if aType == authTypeJWT {
// Validate Authorization header if its valid for JWT request.
if _, _, authErr := webRequestAuthenticate(r); authErr != nil {
w.WriteHeader(http.StatusUnauthorized)
// setAuthHandler to validate authorization header for the incoming request.
func setAuthHandler(h http.Handler) http.Handler {
// handler for validating incoming authorization headers.
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
aType := getRequestAuthType(r)
if isSupportedS3AuthType(aType) {
// Let top level caller validate for anonymous and known signed requests.
h.ServeHTTP(w, r)
return
} else if aType == authTypeJWT {
// Validate Authorization header if its valid for JWT request.
if _, _, authErr := webRequestAuthenticate(r); authErr != nil {
w.WriteHeader(http.StatusUnauthorized)
w.Write([]byte(authErr.Error()))
return
}
h.ServeHTTP(w, r)
return
} else if aType == authTypeSTS {
h.ServeHTTP(w, r)
return
}
a.handler.ServeHTTP(w, r)
return
} else if aType == authTypeSTS {
a.handler.ServeHTTP(w, r)
return
}
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrSignatureVersionNotSupported), r.URL, guessIsBrowserReq(r))
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrSignatureVersionNotSupported), r.URL, guessIsBrowserReq(r))
})
}
func validateSignature(atype authType, r *http.Request) (auth.Credentials, bool, map[string]interface{}, APIErrorCode) {
@@ -519,7 +553,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
if retMode == objectlock.RetGovernance && byPassSet {
byPassSet = globalPolicySys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Action: policy.Action(policy.BypassGovernanceRetentionAction),
Action: policy.BypassGovernanceRetentionAction,
BucketName: bucketName,
ConditionValues: conditions,
IsOwner: false,
@@ -528,7 +562,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
}
if globalPolicySys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Action: policy.Action(policy.PutObjectRetentionAction),
Action: policy.PutObjectRetentionAction,
BucketName: bucketName,
ConditionValues: conditions,
IsOwner: false,
@@ -551,7 +585,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
if retMode == objectlock.RetGovernance && byPassSet {
byPassSet = globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: policy.BypassGovernanceRetentionAction,
Action: iampolicy.BypassGovernanceRetentionAction,
BucketName: bucketName,
ObjectName: objectName,
ConditionValues: conditions,
@@ -561,7 +595,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
}
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: policy.PutObjectRetentionAction,
Action: iampolicy.PutObjectRetentionAction,
BucketName: bucketName,
ConditionValues: conditions,
ObjectName: objectName,
@@ -579,7 +613,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
// isPutActionAllowed - check if PUT operation is allowed on the resource, this
// call verifies bucket policies and IAM policies, supports multi user
// checks etc.
func isPutActionAllowed(atype authType, bucketName, objectName string, r *http.Request, action iampolicy.Action) (s3Err APIErrorCode) {
func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectName string, r *http.Request, action iampolicy.Action) (s3Err APIErrorCode) {
var cred auth.Credentials
var owner bool
switch atype {
@@ -600,6 +634,10 @@ func isPutActionAllowed(atype authType, bucketName, objectName string, r *http.R
return s3Err
}
if cred.AccessKey != "" {
logger.GetReqInfo(ctx).AccessKey = cred.AccessKey
}
// Do not check for PutObjectRetentionAction permission,
// if mode and retain until date are not set.
// Can happen when bucket has default lock config set

View File

@@ -52,7 +52,7 @@ func TestGetRequestAuthType(t *testing.T) {
"X-Amz-Content-Sha256": []string{streamingContentSHA256},
"Content-Encoding": []string{streamingContentEncoding},
},
Method: "PUT",
Method: http.MethodPut,
},
authT: authTypeStreamingSigned,
},
@@ -111,7 +111,7 @@ func TestGetRequestAuthType(t *testing.T) {
Header: http.Header{
"Content-Type": []string{"multipart/form-data"},
},
Method: "POST",
Method: http.MethodPost,
},
authT: authTypePostPolicy,
},
@@ -212,7 +212,7 @@ func TestIsRequestPresignedSignatureV2(t *testing.T) {
for i, testCase := range testCases {
// creating an input HTTP request.
// Only the query parameters are relevant for this particular test.
inputReq, err := http.NewRequest("GET", "http://example.com", nil)
inputReq, err := http.NewRequest(http.MethodGet, "http://example.com", nil)
if err != nil {
t.Fatalf("Error initializing input HTTP request: %v", err)
}
@@ -246,7 +246,7 @@ func TestIsRequestPresignedSignatureV4(t *testing.T) {
for i, testCase := range testCases {
// creating an input HTTP request.
// Only the query parameters are relevant for this particular test.
inputReq, err := http.NewRequest("GET", "http://example.com", nil)
inputReq, err := http.NewRequest(http.MethodGet, "http://example.com", nil)
if err != nil {
t.Fatalf("Error initializing input HTTP request: %v", err)
}
@@ -369,15 +369,15 @@ func TestIsReqAuthenticated(t *testing.T) {
s3Error APIErrorCode
}{
// When request is unsigned, access denied is returned.
{mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrAccessDenied},
{mustNewRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrAccessDenied},
// Empty Content-Md5 header.
{mustNewSignedEmptyMD5Request("PUT", "http://127.0.0.1:9000/", 5, bytes.NewReader([]byte("hello")), t), ErrInvalidDigest},
{mustNewSignedEmptyMD5Request(http.MethodPut, "http://127.0.0.1:9000/", 5, bytes.NewReader([]byte("hello")), t), ErrInvalidDigest},
// Short Content-Md5 header.
{mustNewSignedShortMD5Request("PUT", "http://127.0.0.1:9000/", 5, bytes.NewReader([]byte("hello")), t), ErrInvalidDigest},
{mustNewSignedShortMD5Request(http.MethodPut, "http://127.0.0.1:9000/", 5, bytes.NewReader([]byte("hello")), t), ErrInvalidDigest},
// When request is properly signed, but has bad Content-MD5 header.
{mustNewSignedBadMD5Request("PUT", "http://127.0.0.1:9000/", 5, bytes.NewReader([]byte("hello")), t), ErrBadDigest},
{mustNewSignedBadMD5Request(http.MethodPut, "http://127.0.0.1:9000/", 5, bytes.NewReader([]byte("hello")), t), ErrBadDigest},
// When request is properly signed, error is none.
{mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrNone},
{mustNewSignedRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrNone},
}
ctx := context.Background()
@@ -413,15 +413,15 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
Request *http.Request
ErrCode APIErrorCode
}{
{Request: mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
{Request: mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrNone},
{Request: mustNewSignedV2Request("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
{Request: mustNewPresignedV2Request("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
{Request: mustNewPresignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
{Request: mustNewRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
{Request: mustNewSignedRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrNone},
{Request: mustNewSignedV2Request(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
{Request: mustNewPresignedV2Request(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
{Request: mustNewPresignedRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
}
ctx := context.Background()
for i, testCase := range testCases {
if _, s3Error := checkAdminRequestAuthType(ctx, testCase.Request, iampolicy.AllAdminActions, globalServerRegion); s3Error != testCase.ErrCode {
if _, s3Error := checkAdminRequestAuth(ctx, testCase.Request, iampolicy.AllAdminActions, globalServerRegion); s3Error != testCase.ErrCode {
t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i, testCase.ErrCode, s3Error)
}
}
@@ -461,7 +461,7 @@ func TestValidateAdminSignature(t *testing.T) {
}
for i, testCase := range testCases {
req := mustNewRequest("GET", "http://localhost:9000/", 0, nil, t)
req := mustNewRequest(http.MethodGet, "http://localhost:9000/", 0, nil, t)
if err := signRequestV4(req, testCase.AccessKey, testCase.SecretKey); err != nil {
t.Fatalf("Unable to inititalized new signed http request %s", err)
}

View File

@@ -18,7 +18,6 @@ package cmd
import (
"context"
"path"
"time"
"github.com/minio/minio/cmd/logger"
@@ -55,15 +54,39 @@ func (h *healRoutine) queueHealTask(task healTask) {
h.tasks <- task
}
func waitForLowHTTPReq(tolerance int32) {
func waitForLowHTTPReq(maxIO int, maxWait time.Duration) {
// No need to wait run at full speed.
if maxIO <= 0 {
return
}
// At max 10 attempts to wait with 100 millisecond interval before proceeding
waitTick := 100 * time.Millisecond
// Bucket notification and http trace are not costly, it is okay to ignore them
// while counting the number of concurrent connections
maxIOFn := func() int {
return maxIO + int(globalHTTPListen.NumSubscribers()) + int(globalHTTPTrace.NumSubscribers())
}
tmpMaxWait := maxWait
if httpServer := newHTTPServerFn(); httpServer != nil {
// Wait at max 10 minute for an inprogress request before proceeding to heal
waitCount := 600
// Any requests in progress, delay the heal.
for (httpServer.GetRequestCount() >= tolerance) &&
waitCount > 0 {
waitCount--
time.Sleep(1 * time.Second)
for httpServer.GetRequestCount() >= maxIOFn() {
if tmpMaxWait > 0 {
if tmpMaxWait < waitTick {
time.Sleep(tmpMaxWait)
} else {
time.Sleep(waitTick)
}
tmpMaxWait = tmpMaxWait - waitTick
}
if tmpMaxWait <= 0 {
if intDataUpdateTracker.debug {
logger.Info("waitForLowHTTPReq: waited max %s, resuming", maxWait)
}
break
}
}
}
}
@@ -74,28 +97,25 @@ func (h *healRoutine) run(ctx context.Context, objAPI ObjectLayer) {
select {
case task, ok := <-h.tasks:
if !ok {
break
return
}
// Wait and proceed if there are active requests
waitForLowHTTPReq(int32(globalEndpoints.NEndpoints()))
var res madmin.HealResultItem
var err error
switch {
case task.bucket == nopHeal:
switch task.bucket {
case nopHeal:
continue
case task.bucket == SlashSeparator:
case SlashSeparator:
res, err = healDiskFormat(ctx, objAPI, task.opts)
case task.bucket != "" && task.object == "":
res, err = objAPI.HealBucket(ctx, task.bucket, task.opts.DryRun, task.opts.Remove)
case task.bucket != "" && task.object != "":
res, err = objAPI.HealObject(ctx, task.bucket, task.object, task.versionID, task.opts)
}
if task.bucket != "" && task.object != "" {
ObjectPathUpdated(path.Join(task.bucket, task.object))
default:
if task.object == "" {
res, err = objAPI.HealBucket(ctx, task.bucket, task.opts)
} else {
res, err = objAPI.HealObject(ctx, task.bucket, task.object, task.versionID, task.opts)
}
}
task.responseCh <- healResult{result: res, err: err}
case <-h.doneCh:
return
case <-ctx.Done():
@@ -112,24 +132,6 @@ func newHealRoutine() *healRoutine {
}
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
// Run the background healer
globalBackgroundHealRoutine = newHealRoutine()
go globalBackgroundHealRoutine.run(ctx, objAPI)
nh := newBgHealSequence()
// Heal any disk format and metadata early, if possible.
if err := nh.healDiskMeta(); err != nil {
if newObjectLayerFn() != nil {
// log only in situations, when object layer
// has fully initialized.
logger.LogIf(nh.ctx, err)
}
}
globalBackgroundHealState.LaunchNewHealSequence(nh)
}
// healDiskFormat - heals format.json, return value indicates if a
// failure error occurred.
func healDiskFormat(ctx context.Context, objAPI ObjectLayer, opts madmin.HealOpts) (madmin.HealResultItem, error) {
@@ -141,24 +143,5 @@ func healDiskFormat(ctx context.Context, objAPI ObjectLayer, opts madmin.HealOpt
return madmin.HealResultItem{}, err
}
// Healing succeeded notify the peers to reload format and re-initialize disks.
// We will not notify peers if healing is not required.
if err == nil {
// Notify servers in background and retry if needed.
go func() {
retry:
for _, nerr := range globalNotificationSys.ReloadFormat(opts.DryRun) {
if nerr.Err != nil {
if nerr.Err.Error() == errServerNotInitialized.Error() {
time.Sleep(time.Second)
goto retry
}
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
}()
}
return res, nil
}

View File

@@ -18,106 +18,208 @@ package cmd
import (
"context"
"errors"
"fmt"
"sort"
"time"
"github.com/dustin/go-humanize"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/color"
"github.com/minio/minio/pkg/console"
)
const defaultMonitorNewDiskInterval = time.Minute * 10
const (
defaultMonitorNewDiskInterval = time.Second * 10
healingTrackerFilename = ".healing.bin"
)
func initLocalDisksAutoHeal(ctx context.Context, objAPI ObjectLayer) {
go monitorLocalDisksAndHeal(ctx, objAPI)
//go:generate msgp -file $GOFILE -unexported
type healingTracker struct {
ID string
// future add more tracking capabilities
}
func initAutoHeal(ctx context.Context, objAPI ObjectLayer) {
z, ok := objAPI.(*erasureServerPools)
if !ok {
return
}
initBackgroundHealing(ctx, objAPI) // start quick background healing
bgSeq := mustGetHealSequence(ctx)
globalBackgroundHealState.pushHealLocalDisks(getLocalDisksToHeal()...)
if drivesToHeal := globalBackgroundHealState.healDriveCount(); drivesToHeal > 0 {
logger.Info(fmt.Sprintf("Found drives to heal %d, waiting until %s to heal the content...",
drivesToHeal, defaultMonitorNewDiskInterval))
// Heal any disk format and metadata early, if possible.
// Start with format healing
if err := bgSeq.healDiskFormat(); err != nil {
if newObjectLayerFn() != nil {
// log only in situations, when object layer
// has fully initialized.
logger.LogIf(bgSeq.ctx, err)
}
}
}
if err := bgSeq.healDiskMeta(objAPI); err != nil {
if newObjectLayerFn() != nil {
// log only in situations, when object layer
// has fully initialized.
logger.LogIf(bgSeq.ctx, err)
}
}
go monitorLocalDisksAndHeal(ctx, z, bgSeq)
}
func getLocalDisksToHeal() (disksToHeal Endpoints) {
for _, ep := range globalEndpoints {
for _, endpoint := range ep.Endpoints {
if !endpoint.IsLocal {
continue
}
// Try to connect to the current endpoint
// and reformat if the current disk is not formatted
disk, _, err := connectEndpoint(endpoint)
if errors.Is(err, errUnformattedDisk) {
disksToHeal = append(disksToHeal, endpoint)
} else if err == nil && disk != nil && disk.Healing() {
disksToHeal = append(disksToHeal, disk.Endpoint())
}
}
}
return disksToHeal
}
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
// Run the background healer
globalBackgroundHealRoutine = newHealRoutine()
go globalBackgroundHealRoutine.run(ctx, objAPI)
globalBackgroundHealState.LaunchNewHealSequence(newBgHealSequence(), objAPI)
}
// monitorLocalDisksAndHeal - ensures that detected new disks are healed
// 1. Only the concerned erasure set will be listed and healed
// 2. Only the node hosting the disk is responsible to perform the heal
func monitorLocalDisksAndHeal(ctx context.Context, objAPI ObjectLayer) {
z, ok := objAPI.(*erasureZones)
if !ok {
return
}
var bgSeq *healSequence
var found bool
for {
bgSeq, found = globalBackgroundHealState.getHealSequenceByToken(bgHealingUUID)
if found {
break
}
time.Sleep(time.Second)
}
func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools, bgSeq *healSequence) {
// Perform automatic disk healing when a disk is replaced locally.
diskCheckTimer := time.NewTimer(defaultMonitorNewDiskInterval)
defer diskCheckTimer.Stop()
wait:
for {
select {
case <-ctx.Done():
return
case <-time.After(defaultMonitorNewDiskInterval):
// Attempt a heal as the server starts-up first.
localDisksInZoneHeal := make([]Endpoints, len(z.zones))
var healNewDisks bool
for i, ep := range globalEndpoints {
localDisksToHeal := Endpoints{}
for _, endpoint := range ep.Endpoints {
if !endpoint.IsLocal {
continue
}
// Try to connect to the current endpoint
// and reformat if the current disk is not formatted
_, _, err := connectEndpoint(endpoint)
if err == errUnformattedDisk {
localDisksToHeal = append(localDisksToHeal, endpoint)
}
case <-diskCheckTimer.C:
// Reset to next interval.
diskCheckTimer.Reset(defaultMonitorNewDiskInterval)
var erasureSetInPoolDisksToHeal []map[int][]StorageAPI
healDisks := globalBackgroundHealState.getHealLocalDisks()
if len(healDisks) > 0 {
// Reformat disks
bgSeq.sourceCh <- healSource{bucket: SlashSeparator}
// Ensure that reformatting disks is finished
bgSeq.sourceCh <- healSource{bucket: nopHeal}
logger.Info(fmt.Sprintf("Found drives to heal %d, proceeding to heal content...",
len(healDisks)))
erasureSetInPoolDisksToHeal = make([]map[int][]StorageAPI, len(z.serverPools))
for i := range z.serverPools {
erasureSetInPoolDisksToHeal[i] = map[int][]StorageAPI{}
}
if len(localDisksToHeal) == 0 {
}
if serverDebugLog {
console.Debugf(color.Green("healDisk:")+" disk check timer fired, attempting to heal %d drives\n", len(healDisks))
}
// heal only if new disks found.
for _, endpoint := range healDisks {
disk, format, err := connectEndpoint(endpoint)
if err != nil {
printEndpointError(endpoint, err, true)
continue
}
localDisksInZoneHeal[i] = localDisksToHeal
healNewDisks = true
}
// Reformat disks only if needed.
if !healNewDisks {
continue
}
// Reformat disks
bgSeq.sourceCh <- healSource{bucket: SlashSeparator}
// Ensure that reformatting disks is finished
bgSeq.sourceCh <- healSource{bucket: nopHeal}
var erasureSetInZoneToHeal = make([][]int, len(localDisksInZoneHeal))
// Compute the list of erasure set to heal
for i, localDisksToHeal := range localDisksInZoneHeal {
var erasureSetToHeal []int
for _, endpoint := range localDisksToHeal {
// Load the new format of this passed endpoint
_, format, err := connectEndpoint(endpoint)
if err != nil {
logger.LogIf(ctx, err)
continue
}
// Calculate the set index where the current endpoint belongs
setIndex, _, err := findDiskIndex(z.zones[i].format, format)
if err != nil {
logger.LogIf(ctx, err)
continue
}
erasureSetToHeal = append(erasureSetToHeal, setIndex)
poolIdx := globalEndpoints.GetLocalPoolIdx(disk.Endpoint())
if poolIdx < 0 {
continue
}
erasureSetInZoneToHeal[i] = erasureSetToHeal
// Calculate the set index where the current endpoint belongs
z.serverPools[poolIdx].erasureDisksMu.RLock()
// Protect reading reference format.
setIndex, _, err := findDiskIndex(z.serverPools[poolIdx].format, format)
z.serverPools[poolIdx].erasureDisksMu.RUnlock()
if err != nil {
printEndpointError(endpoint, err, false)
continue
}
erasureSetInPoolDisksToHeal[poolIdx][setIndex] = append(erasureSetInPoolDisksToHeal[poolIdx][setIndex], disk)
}
// Heal all erasure sets that need
for i, erasureSetToHeal := range erasureSetInZoneToHeal {
for _, setIndex := range erasureSetToHeal {
err := healErasureSet(ctx, setIndex, z.zones[i].sets[setIndex], z.zones[i].drivesPerSet)
if err != nil {
logger.LogIf(ctx, err)
buckets, _ := z.ListBuckets(ctx)
// Heal latest buckets first.
sort.Slice(buckets, func(i, j int) bool {
return buckets[i].Created.After(buckets[j].Created)
})
for i, setMap := range erasureSetInPoolDisksToHeal {
for setIndex, disks := range setMap {
for _, disk := range disks {
logger.Info("Healing disk '%s' on %s pool", disk, humanize.Ordinal(i+1))
// So someone changed the drives underneath, healing tracker missing.
if !disk.Healing() {
logger.Info("Healing tracker missing on '%s', disk was swapped again on %s pool", disk, humanize.Ordinal(i+1))
diskID, err := disk.GetDiskID()
if err != nil {
logger.LogIf(ctx, err)
// reading format.json failed or not found, proceed to look
// for new disks to be healed again, we cannot proceed further.
goto wait
}
if err := saveHealingTracker(disk, diskID); err != nil {
logger.LogIf(ctx, err)
// Unable to write healing tracker, permission denied or some
// other unexpected error occurred. Proceed to look for new
// disks to be healed again, we cannot proceed further.
goto wait
}
}
err := z.serverPools[i].sets[setIndex].healErasureSet(ctx, buckets)
if err != nil {
logger.LogIf(ctx, err)
continue
}
logger.Info("Healing disk '%s' on %s pool complete", disk, humanize.Ordinal(i+1))
if err := disk.Delete(ctx, pathJoin(minioMetaBucket, bucketMetaPrefix),
healingTrackerFilename, false); err != nil && !errors.Is(err, errFileNotFound) {
logger.LogIf(ctx, err)
continue
}
// Only upon success pop the healed disk.
globalBackgroundHealState.popHealLocalDisks(disk.Endpoint())
}
}
}

View File

@@ -0,0 +1,110 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "ID":
z.ID, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "ID")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 1
// write "ID"
err = en.Append(0x81, 0xa2, 0x49, 0x44)
if err != nil {
return
}
err = en.WriteString(z.ID)
if err != nil {
err = msgp.WrapError(err, "ID")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 1
// string "ID"
o = append(o, 0x81, 0xa2, 0x49, 0x44)
o = msgp.AppendString(o, z.ID)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "ID":
z.ID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ID")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z healingTracker) Msgsize() (s int) {
s = 1 + 3 + msgp.StringPrefixSize + len(z.ID)
return
}

View File

@@ -0,0 +1,123 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalhealingTracker(t *testing.T) {
v := healingTracker{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsghealingTracker(b *testing.B) {
v := healingTracker{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsghealingTracker(b *testing.B) {
v := healingTracker{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalhealingTracker(b *testing.B) {
v := healingTracker{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodehealingTracker(t *testing.T) {
v := healingTracker{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodehealingTracker Msgsize() is inaccurate")
}
vn := healingTracker{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodehealingTracker(b *testing.B) {
v := healingTracker{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodehealingTracker(b *testing.B) {
v := healingTracker{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}

View File

@@ -19,7 +19,6 @@ package cmd
import (
"bytes"
"context"
"io/ioutil"
"math"
"math/rand"
"strconv"
@@ -55,7 +54,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
for i := 0; i < b.N; i++ {
// insert the object.
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
mustGetPutObjReader(b, bytes.NewReader(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
if err != nil {
b.Fatal(err)
}
@@ -114,7 +113,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
md5hex := getMD5Hash([]byte(textPartData))
var partInfo PartInfo
partInfo, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, j,
mustGetPutObjReader(b, bytes.NewBuffer(textPartData), int64(len(textPartData)), md5hex, sha256hex), ObjectOptions{})
mustGetPutObjReader(b, bytes.NewReader(textPartData), int64(len(textPartData)), md5hex, sha256hex), ObjectOptions{})
if err != nil {
b.Fatal(err)
}
@@ -175,56 +174,6 @@ func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int)
runPutObjectBenchmarkParallel(b, objLayer, objSize)
}
// Benchmark utility functions for ObjectLayer.GetObject().
// Creates Object layer setup ( MakeBucket, PutObject) and then runs the benchmark.
func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
// obtains random bucket name.
bucket := getRandomBucketName()
// create bucket.
err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
if err != nil {
b.Fatal(err)
}
textData := generateBytesData(objSize)
// generate etag for the generated data.
// etag of the data to written is required as input for PutObject.
// PutObject is the functions which writes the data onto the FS/Erasure backend.
// get text data generated for number of bytes equal to object size.
md5hex := getMD5Hash(textData)
sha256hex := ""
for i := 0; i < 10; i++ {
// insert the object.
var objInfo ObjectInfo
objInfo, err = obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
if err != nil {
b.Fatal(err)
}
if objInfo.ETag != md5hex {
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, objInfo.ETag, md5hex)
}
}
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
b.ReportAllocs()
// the actual benchmark for GetObject starts here. Reset the benchmark timer.
b.ResetTimer()
for i := 0; i < b.N; i++ {
var buffer = new(bytes.Buffer)
err = obj.GetObject(context.Background(), bucket, "object"+strconv.Itoa(i%10), 0, int64(objSize), buffer, "", ObjectOptions{})
if err != nil {
b.Error(err)
}
}
// Benchmark ends here. Stop timer.
b.StopTimer()
}
// randomly picks a character and returns its equivalent byte array.
func getRandomByte() []byte {
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
@@ -240,38 +189,6 @@ func generateBytesData(size int) []byte {
return bytes.Repeat(getRandomByte(), size)
}
// creates Erasure/FS backend setup, obtains the object layer and calls the runGetObjectBenchmark function.
func benchmarkGetObject(b *testing.B, instanceType string, objSize int) {
// create a temp Erasure/FS backend.
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
if err != nil {
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
}
// cleaning up the backend by removing all the directories and files created.
defer removeRoots(disks)
// uses *testing.B and the object Layer to run the benchmark.
runGetObjectBenchmark(b, objLayer, objSize)
}
// creates Erasure/FS backend setup, obtains the object layer and runs parallel benchmark for ObjectLayer.GetObject() .
func benchmarkGetObjectParallel(b *testing.B, instanceType string, objSize int) {
// create a temp Erasure/FS backend.
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
if err != nil {
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
}
// cleaning up the backend by removing all the directories and files created.
defer removeRoots(disks)
// uses *testing.B and the object Layer to run the benchmark.
runGetObjectBenchmarkParallel(b, objLayer, objSize)
}
// Parallel benchmark utility functions for ObjectLayer.PutObject().
// Creates Object layer setup ( MakeBucket ) and then runs the PutObject benchmark.
func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
@@ -301,7 +218,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
for pb.Next() {
// insert the object.
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
mustGetPutObjReader(b, bytes.NewReader(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
if err != nil {
b.Fatal(err)
}
@@ -315,58 +232,3 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
// Benchmark ends here. Stop timer.
b.StopTimer()
}
// Parallel benchmark utility functions for ObjectLayer.GetObject().
// Creates Object layer setup ( MakeBucket, PutObject) and then runs the benchmark.
func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
// obtains random bucket name.
bucket := getRandomBucketName()
// create bucket.
err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
if err != nil {
b.Fatal(err)
}
// get text data generated for number of bytes equal to object size.
textData := generateBytesData(objSize)
// generate md5sum for the generated data.
// md5sum of the data to written is required as input for PutObject.
// PutObject is the functions which writes the data onto the FS/Erasure backend.
md5hex := getMD5Hash([]byte(textData))
sha256hex := ""
for i := 0; i < 10; i++ {
// insert the object.
var objInfo ObjectInfo
objInfo, err = obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
if err != nil {
b.Fatal(err)
}
if objInfo.ETag != md5hex {
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, objInfo.ETag, md5hex)
}
}
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
b.ReportAllocs()
// the actual benchmark for GetObject starts here. Reset the benchmark timer.
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
i := 0
for pb.Next() {
err = obj.GetObject(context.Background(), bucket, "object"+strconv.Itoa(i), 0, int64(objSize), ioutil.Discard, "", ObjectOptions{})
if err != nil {
b.Error(err)
}
i++
if i == 10 {
i = 0
}
}
})
// Benchmark ends here. Stop timer.
b.StopTimer()
}

View File

@@ -18,6 +18,7 @@ package cmd
import (
"bytes"
"context"
"encoding/hex"
"fmt"
"hash"
@@ -80,7 +81,7 @@ func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length i
bitrotSumsTotalSize := ceilFrac(length, shardSize) * int64(h.Size()) // Size used for storing bitrot checksums.
totalFileSize = bitrotSumsTotalSize + length
}
err := disk.CreateFile(volume, filePath, totalFileSize, r)
err := disk.CreateFile(context.TODO(), volume, filePath, totalFileSize, r)
r.CloseWithError(err)
close(bw.canClose)
}()
@@ -90,7 +91,8 @@ func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length i
// ReadAt() implementation which verifies the bitrot hash available as part of the stream.
type streamingBitrotReader struct {
disk StorageAPI
rc io.ReadCloser
data []byte
rc io.Reader
volume string
filePath string
tillOffset int64
@@ -104,7 +106,10 @@ func (b *streamingBitrotReader) Close() error {
if b.rc == nil {
return nil
}
return b.rc.Close()
if closer, ok := b.rc.(io.Closer); ok {
return closer.Close()
}
return nil
}
func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
@@ -118,11 +123,16 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
// For the first ReadAt() call we need to open the stream for reading.
b.currOffset = offset
streamOffset := (offset/b.shardSize)*int64(b.h.Size()) + offset
b.rc, err = b.disk.ReadFileStream(b.volume, b.filePath, streamOffset, b.tillOffset-streamOffset)
if len(b.data) == 0 {
b.rc, err = b.disk.ReadFileStream(context.TODO(), b.volume, b.filePath, streamOffset, b.tillOffset-streamOffset)
} else {
b.rc = io.NewSectionReader(bytes.NewReader(b.data), streamOffset, b.tillOffset-streamOffset)
}
if err != nil {
return 0, err
}
}
if offset != b.currOffset {
// Can never happen unless there are programmer bugs
return 0, errUnexpected
@@ -139,20 +149,20 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
b.h.Write(buf)
if !bytes.Equal(b.h.Sum(nil), b.hashBytes) {
err := &errHashMismatch{fmt.Sprintf("hashes do not match expected %s, got %s",
hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil)))}
logger.LogIf(GlobalContext, err)
return 0, err
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s -> %s/%s - content hash does not match - expected %s, got %s",
b.disk, b.volume, b.filePath, hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil))))
return 0, errFileCorrupt
}
b.currOffset += int64(len(buf))
return len(buf), nil
}
// Returns streaming bitrot reader implementation.
func newStreamingBitrotReader(disk StorageAPI, volume, filePath string, tillOffset int64, algo BitrotAlgorithm, shardSize int64) *streamingBitrotReader {
func newStreamingBitrotReader(disk StorageAPI, data []byte, volume, filePath string, tillOffset int64, algo BitrotAlgorithm, shardSize int64) *streamingBitrotReader {
h := algo.New()
return &streamingBitrotReader{
disk,
data,
nil,
volume,
filePath,

View File

@@ -17,6 +17,8 @@
package cmd
import (
"context"
"fmt"
"hash"
"io"
@@ -33,14 +35,14 @@ type wholeBitrotWriter struct {
}
func (b *wholeBitrotWriter) Write(p []byte) (int, error) {
err := b.disk.AppendFile(b.volume, b.filePath, p)
err := b.disk.AppendFile(context.TODO(), b.volume, b.filePath, p)
if err != nil {
logger.LogIf(GlobalContext, err)
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s returned %w", b.disk, err))
return 0, err
}
_, err = b.Hash.Write(p)
if err != nil {
logger.LogIf(GlobalContext, err)
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s returned %w", b.disk, err))
return 0, err
}
return len(p), nil
@@ -68,15 +70,13 @@ type wholeBitrotReader struct {
func (b *wholeBitrotReader) ReadAt(buf []byte, offset int64) (n int, err error) {
if b.buf == nil {
b.buf = make([]byte, b.tillOffset-offset)
if _, err := b.disk.ReadFile(b.volume, b.filePath, offset, b.buf, b.verifier); err != nil {
ctx := GlobalContext
logger.GetReqInfo(ctx).AppendTags("disk", b.disk.String())
logger.LogIf(ctx, err)
if _, err := b.disk.ReadFile(context.TODO(), b.volume, b.filePath, offset, b.buf, b.verifier); err != nil {
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s -> %s/%s returned %w", b.disk, b.volume, b.filePath, err))
return 0, err
}
}
if len(b.buf) < len(buf) {
logger.LogIf(GlobalContext, errLessData)
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s -> %s/%s returned %w", b.disk, b.volume, b.filePath, errLessData))
return 0, errLessData
}
n = copy(buf, b.buf)

View File

@@ -17,13 +17,13 @@
package cmd
import (
"crypto/sha256"
"errors"
"hash"
"io"
"github.com/minio/highwayhash"
"github.com/minio/minio/cmd/logger"
sha256 "github.com/minio/sha256-simd"
"golang.org/x/crypto/blake2b"
)
@@ -103,9 +103,9 @@ func newBitrotWriter(disk StorageAPI, volume, filePath string, length int64, alg
return newWholeBitrotWriter(disk, volume, filePath, algo, shardSize)
}
func newBitrotReader(disk StorageAPI, bucket string, filePath string, tillOffset int64, algo BitrotAlgorithm, sum []byte, shardSize int64) io.ReaderAt {
func newBitrotReader(disk StorageAPI, data []byte, bucket string, filePath string, tillOffset int64, algo BitrotAlgorithm, sum []byte, shardSize int64) io.ReaderAt {
if algo == HighwayHash256S {
return newStreamingBitrotReader(disk, bucket, filePath, tillOffset, algo, shardSize)
return newStreamingBitrotReader(disk, data, bucket, filePath, tillOffset, algo, shardSize)
}
return newWholeBitrotReader(disk, bucket, filePath, algo, tillOffset, sum)
}

View File

@@ -17,9 +17,9 @@
package cmd
import (
"context"
"io"
"io/ioutil"
"log"
"os"
"testing"
)
@@ -27,53 +27,53 @@ import (
func testBitrotReaderWriterAlgo(t *testing.T, bitrotAlgo BitrotAlgorithm) {
tmpDir, err := ioutil.TempDir("", "")
if err != nil {
log.Fatal(err)
t.Fatal(err)
}
defer os.RemoveAll(tmpDir)
volume := "testvol"
filePath := "testfile"
disk, err := newXLStorage(tmpDir, "")
disk, err := newLocalXLStorage(tmpDir)
if err != nil {
t.Fatal(err)
}
disk.MakeVol(volume)
disk.MakeVol(context.Background(), volume)
writer := newBitrotWriter(disk, volume, filePath, 35, bitrotAlgo, 10)
_, err = writer.Write([]byte("aaaaaaaaaa"))
if err != nil {
log.Fatal(err)
t.Fatal(err)
}
_, err = writer.Write([]byte("aaaaaaaaaa"))
if err != nil {
log.Fatal(err)
t.Fatal(err)
}
_, err = writer.Write([]byte("aaaaaaaaaa"))
if err != nil {
log.Fatal(err)
t.Fatal(err)
}
_, err = writer.Write([]byte("aaaaa"))
if err != nil {
log.Fatal(err)
t.Fatal(err)
}
writer.(io.Closer).Close()
reader := newBitrotReader(disk, volume, filePath, 35, bitrotAlgo, bitrotWriterSum(writer), 10)
reader := newBitrotReader(disk, nil, volume, filePath, 35, bitrotAlgo, bitrotWriterSum(writer), 10)
b := make([]byte, 10)
if _, err = reader.ReadAt(b, 0); err != nil {
log.Fatal(err)
t.Fatal(err)
}
if _, err = reader.ReadAt(b, 10); err != nil {
log.Fatal(err)
t.Fatal(err)
}
if _, err = reader.ReadAt(b, 20); err != nil {
log.Fatal(err)
t.Fatal(err)
}
if _, err = reader.ReadAt(b[:5], 30); err != nil {
log.Fatal(err)
t.Fatal(err)
}
}

View File

@@ -18,9 +18,7 @@ package cmd
import (
"context"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
@@ -29,7 +27,7 @@ import (
"time"
"github.com/gorilla/mux"
"github.com/minio/minio-go/v6/pkg/set"
"github.com/minio/minio-go/v7/pkg/set"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/cmd/rest"
@@ -54,7 +52,7 @@ type bootstrapRESTServer struct{}
type ServerSystemConfig struct {
MinioPlatform string
MinioRuntime string
MinioEndpoints EndpointZones
MinioEndpoints EndpointServerPools
}
// Diff - returns error on first difference found in two configs.
@@ -131,7 +129,7 @@ func (client *bootstrapRESTClient) callWithContext(ctx context.Context, method s
values = make(url.Values)
}
respBody, err = client.restClient.CallWithContext(ctx, method, values, body, length)
respBody, err = client.restClient.Call(ctx, method, values, body, length)
if err == nil {
return respBody, nil
}
@@ -161,9 +159,9 @@ func (client *bootstrapRESTClient) Verify(ctx context.Context, srcCfg ServerSyst
return srcCfg.Diff(recvCfg)
}
func verifyServerSystemConfig(ctx context.Context, endpointZones EndpointZones) error {
func verifyServerSystemConfig(ctx context.Context, endpointServerPools EndpointServerPools) error {
srcCfg := getServerSystemCfg()
clnts := newBootstrapRESTClients(endpointZones)
clnts := newBootstrapRESTClients(endpointServerPools)
var onlineServers int
var offlineEndpoints []string
var retries int
@@ -178,25 +176,30 @@ func verifyServerSystemConfig(ctx context.Context, endpointZones EndpointZones)
}
onlineServers++
}
// Sleep for a while - so that we don't go into
// 100% CPU when half the endpoints are offline.
time.Sleep(500 * time.Millisecond)
retries++
// after 5 retries start logging that servers are not reachable yet
if retries >= 5 {
logger.Info(fmt.Sprintf("Waiting for atleast %d servers to be online for bootstrap check", len(clnts)/2))
logger.Info(fmt.Sprintf("Following servers are currently offline or unreachable %s", offlineEndpoints))
retries = 0 // reset to log again after 5 retries.
select {
case <-ctx.Done():
return ctx.Err()
default:
// Sleep for a while - so that we don't go into
// 100% CPU when half the endpoints are offline.
time.Sleep(100 * time.Millisecond)
retries++
// after 5 retries start logging that servers are not reachable yet
if retries >= 5 {
logger.Info(fmt.Sprintf("Waiting for atleast %d remote servers to be online for bootstrap check", len(clnts)/2))
logger.Info(fmt.Sprintf("Following servers are currently offline or unreachable %s", offlineEndpoints))
retries = 0 // reset to log again after 5 retries.
}
offlineEndpoints = nil
}
offlineEndpoints = nil
}
return nil
}
func newBootstrapRESTClients(endpointZones EndpointZones) []*bootstrapRESTClient {
func newBootstrapRESTClients(endpointServerPools EndpointServerPools) []*bootstrapRESTClient {
seenHosts := set.NewStringSet()
var clnts []*bootstrapRESTClient
for _, ep := range endpointZones {
for _, ep := range endpointServerPools {
for _, endpoint := range ep.Endpoints {
if seenHosts.Contains(endpoint.Host) {
continue
@@ -205,11 +208,7 @@ func newBootstrapRESTClients(endpointZones EndpointZones) []*bootstrapRESTClient
// Only proceed for remote endpoints.
if !endpoint.IsLocal {
clnt, err := newBootstrapRESTClient(endpoint)
if err != nil {
continue
}
clnts = append(clnts, clnt)
clnts = append(clnts, newBootstrapRESTClient(endpoint))
}
}
}
@@ -217,35 +216,15 @@ func newBootstrapRESTClients(endpointZones EndpointZones) []*bootstrapRESTClient
}
// Returns a new bootstrap client.
func newBootstrapRESTClient(endpoint Endpoint) (*bootstrapRESTClient, error) {
func newBootstrapRESTClient(endpoint Endpoint) *bootstrapRESTClient {
serverURL := &url.URL{
Scheme: endpoint.Scheme,
Host: endpoint.Host,
Path: bootstrapRESTPath,
}
var tlsConfig *tls.Config
if globalIsSSL {
tlsConfig = &tls.Config{
ServerName: endpoint.Hostname(),
RootCAs: globalRootCAs,
}
}
restClient := rest.NewClient(serverURL, globalInternodeTransport, newAuthToken)
restClient.HealthCheckFn = nil
trFn := newCustomHTTPTransport(tlsConfig, rest.DefaultRESTTimeout)
restClient, err := rest.NewClient(serverURL, trFn, newAuthToken)
if err != nil {
return nil, err
}
restClient.HealthCheckFn = func() bool {
ctx, cancel := context.WithTimeout(GlobalContext, restClient.HealthCheckTimeout)
respBody, err := restClient.CallWithContext(ctx, bootstrapRESTMethodHealth, nil, nil, -1)
xhttp.DrainBody(respBody)
cancel()
var ne *rest.NetworkError
return !errors.Is(err, context.DeadlineExceeded) && !errors.As(err, &ne)
}
return &bootstrapRESTClient{endpoint: endpoint, restClient: restClient}, nil
return &bootstrapRESTClient{endpoint: endpoint, restClient: restClient}
}

View File

@@ -37,7 +37,7 @@ const (
func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutBucketEncryption")
defer logger.AuditLog(w, r, "PutBucketEncryption", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objAPI := api.ObjectAPI()
if objAPI == nil {
@@ -102,7 +102,7 @@ func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r
func (api objectAPIHandlers) GetBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketEncryption")
defer logger.AuditLog(w, r, "GetBucketEncryption", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objAPI := api.ObjectAPI()
if objAPI == nil {
@@ -145,7 +145,7 @@ func (api objectAPIHandlers) GetBucketEncryptionHandler(w http.ResponseWriter, r
func (api objectAPIHandlers) DeleteBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "DeleteBucketEncryption")
defer logger.AuditLog(w, r, "DeleteBucketEncryption", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objAPI := api.ObjectAPI()
if objAPI == nil {

View File

@@ -34,7 +34,7 @@ func NewBucketSSEConfigSys() *BucketSSEConfigSys {
// Get - gets bucket encryption config for the given bucket.
func (sys *BucketSSEConfigSys) Get(bucket string) (*bucketsse.BucketSSEConfig, error) {
if globalIsGateway {
objAPI := newObjectLayerWithoutSafeModeFn()
objAPI := newObjectLayerFn()
if objAPI == nil {
return nil, errServerNotInitialized
}

View File

@@ -22,22 +22,28 @@ import (
"fmt"
"io"
"net/http"
"net/textproto"
"net/url"
"path"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"github.com/google/uuid"
"github.com/gorilla/mux"
"github.com/minio/minio-go/v6/pkg/set"
"github.com/minio/minio-go/v6/pkg/tags"
"github.com/minio/minio/cmd/config/etcd/dns"
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio-go/v7/pkg/tags"
"github.com/minio/minio/cmd/config/dns"
"github.com/minio/minio/cmd/crypto"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/bucket/lifecycle"
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
"github.com/minio/minio/pkg/bucket/policy"
"github.com/minio/minio/pkg/bucket/replication"
"github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/handlers"
"github.com/minio/minio/pkg/hash"
@@ -46,8 +52,9 @@ import (
)
const (
objectLockConfig = "object-lock.xml"
bucketTaggingConfig = "tagging.xml"
objectLockConfig = "object-lock.xml"
bucketTaggingConfig = "tagging.xml"
bucketReplicationConfig = "replication.xml"
)
// Check if there are buckets on server without corresponding entry in etcd backend and
@@ -68,7 +75,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
// Get buckets in the DNS
dnsBuckets, err := globalDNSConfig.List()
if err != nil && err != dns.ErrNoEntriesFound {
if err != nil && !IsErrIgnored(err, dns.ErrNoEntriesFound, dns.ErrNotImplemented, dns.ErrDomainMissing) {
logger.LogIf(GlobalContext, err)
return
}
@@ -76,38 +83,53 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
bucketsSet := set.NewStringSet()
bucketsToBeUpdated := set.NewStringSet()
bucketsInConflict := set.NewStringSet()
for _, bucket := range buckets {
bucketsSet.Add(bucket.Name)
r, ok := dnsBuckets[bucket.Name]
if !ok {
bucketsToBeUpdated.Add(bucket.Name)
continue
}
if !globalDomainIPs.Intersection(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() {
if globalDomainIPs.Difference(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() {
// No difference in terms of domainIPs and nothing
// has changed so we don't change anything on the etcd.
// This means that domain is updated, we should update
// all bucket entries with new domain name.
domainMissing := err == dns.ErrDomainMissing
if dnsBuckets != nil {
for _, bucket := range buckets {
bucketsSet.Add(bucket.Name)
r, ok := dnsBuckets[bucket.Name]
if !ok {
bucketsToBeUpdated.Add(bucket.Name)
continue
}
// if domain IPs intersect then it won't be an empty set.
// such an intersection means that bucket exists on etcd.
// but if we do see a difference with local domain IPs with
// hostSlice from etcd then we should update with newer
// domainIPs, we proceed to do that here.
bucketsToBeUpdated.Add(bucket.Name)
continue
if !globalDomainIPs.Intersection(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() {
if globalDomainIPs.Difference(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() && !domainMissing {
// No difference in terms of domainIPs and nothing
// has changed so we don't change anything on the etcd.
//
// Additionally also check if domain is updated/missing with more
// entries, if that is the case we should update the
// new domain entries as well.
continue
}
// if domain IPs intersect then it won't be an empty set.
// such an intersection means that bucket exists on etcd.
// but if we do see a difference with local domain IPs with
// hostSlice from etcd then we should update with newer
// domainIPs, we proceed to do that here.
bucketsToBeUpdated.Add(bucket.Name)
continue
}
// No IPs seem to intersect, this means that bucket exists but has
// different IP addresses perhaps from a different deployment.
// bucket names are globally unique in federation at a given
// path prefix, name collision is not allowed. We simply log
// an error and continue.
bucketsInConflict.Add(bucket.Name)
}
// No IPs seem to intersect, this means that bucket exists but has
// different IP addresses perhaps from a different deployment.
// bucket names are globally unique in federation at a given
// path prefix, name collision is not allowed. We simply log
// an error and continue.
bucketsInConflict.Add(bucket.Name)
}
// Add/update buckets that are not registered with the DNS
g := errgroup.WithNErrs(len(buckets))
bucketsToBeUpdatedSlice := bucketsToBeUpdated.ToSlice()
g := errgroup.WithNErrs(len(bucketsToBeUpdatedSlice)).WithConcurrency(50)
ctx, cancel := g.WithCancelOnError(GlobalContext)
defer cancel()
for index := range bucketsToBeUpdatedSlice {
index := index
g.Go(func() error {
@@ -115,16 +137,16 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
}, index)
}
for _, err := range g.Wait() {
if err != nil {
logger.LogIf(GlobalContext, err)
}
if err := g.WaitErr(); err != nil {
logger.LogIf(ctx, err)
return
}
for _, bucket := range bucketsInConflict.ToSlice() {
logger.LogIf(GlobalContext, fmt.Errorf("Unable to add bucket DNS entry for bucket %s, an entry exists for the same bucket. Use one of these IP addresses %v to access the bucket", bucket, globalDomainIPs.ToSlice()))
logger.LogIf(ctx, fmt.Errorf("Unable to add bucket DNS entry for bucket %s, an entry exists for the same bucket by a different tenant. This local bucket will be ignored. Bucket names are globally unique in federated deployments. Use path style requests on following addresses '%v' to access this bucket", bucket, globalDomainIPs.ToSlice()))
}
var wg sync.WaitGroup
// Remove buckets that are in DNS for this server, but aren't local
for bucket, records := range dnsBuckets {
if bucketsSet.Contains(bucket) {
@@ -136,13 +158,18 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
continue
}
// We go to here, so we know the bucket no longer exists,
// but is registered in DNS to this server
if err = globalDNSConfig.Delete(bucket); err != nil {
logger.LogIf(GlobalContext, fmt.Errorf("Failed to remove DNS entry for %s due to %w",
bucket, err))
}
wg.Add(1)
go func(bucket string) {
defer wg.Done()
// We go to here, so we know the bucket no longer exists,
// but is registered in DNS to this server
if err := globalDNSConfig.Delete(bucket); err != nil {
logger.LogIf(GlobalContext, fmt.Errorf("Failed to remove DNS entry for %s due to %w",
bucket, err))
}
}(bucket)
}
wg.Wait()
}
// GetBucketLocationHandler - GET Bucket location.
@@ -151,7 +178,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketLocation")
defer logger.AuditLog(w, r, "GetBucketLocation", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
@@ -199,7 +226,7 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListMultipartUploads")
defer logger.AuditLog(w, r, "ListMultipartUploads", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
@@ -254,7 +281,7 @@ func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter,
func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListBuckets")
defer logger.AuditLog(w, r, "ListBuckets", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI := api.ObjectAPI()
if objectAPI == nil {
@@ -274,7 +301,9 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
var bucketsInfo []BucketInfo
if globalDNSConfig != nil && globalBucketFederation {
dnsBuckets, err := globalDNSConfig.List()
if err != nil && err != dns.ErrNoEntriesFound {
if err != nil && !IsErrIgnored(err,
dns.ErrNoEntriesFound,
dns.ErrDomainMissing) {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -284,6 +313,11 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
Created: dnsRecords[0].CreationDate,
})
}
sort.Slice(bucketsInfo, func(i, j int) bool {
return bucketsInfo[i].Name < bucketsInfo[j].Name
})
} else {
// Invoke the list buckets.
var err error
@@ -341,7 +375,7 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "DeleteMultipleObjects")
defer logger.AuditLog(w, r, "DeleteMultipleObjects", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
@@ -377,6 +411,10 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
return
}
// Call checkRequestAuthType to populate ReqInfo.AccessKey before GetBucketInfo()
// Ignore errors here to preserve the S3 error behavior of GetBucketInfo()
checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, "")
// Before proceeding validate if bucket exists.
_, err := objectAPI.GetBucketInfo(ctx, bucket)
if err != nil {
@@ -389,12 +427,29 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
deleteObjectsFn = api.CacheAPI().DeleteObjects
}
// Return Malformed XML as S3 spec if the list of objects is empty
if len(deleteObjects.Objects) == 0 {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedXML), r.URL, guessIsBrowserReq(r))
return
}
var objectsToDelete = map[ObjectToDelete]int{}
getObjectInfoFn := objectAPI.GetObjectInfo
if api.CacheAPI() != nil {
getObjectInfoFn = api.CacheAPI().GetObjectInfo
}
var (
hasLockEnabled, hasLifecycleConfig, replicateSync bool
goi ObjectInfo
gerr error
)
replicateDeletes := hasReplicationRules(ctx, bucket, deleteObjects.Objects)
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled {
hasLockEnabled = true
}
if _, err := globalBucketMetadataSys.GetLifecycleConfig(bucket); err == nil {
hasLifecycleConfig = true
}
dErrs := make([]DeleteError, len(deleteObjects.Objects))
for index, object := range deleteObjects.Objects {
if apiErrCode := checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, object.ObjectName); apiErrCode != ErrNone {
@@ -411,10 +466,55 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
}
continue
}
if object.VersionID != "" && object.VersionID != nullVersionID {
if _, err := uuid.Parse(object.VersionID); err != nil {
logger.LogIf(ctx, fmt.Errorf("invalid version-id specified %w", err))
apiErr := errorCodes.ToAPIErr(ErrNoSuchVersion)
dErrs[index] = DeleteError{
Code: apiErr.Code,
Message: apiErr.Description,
Key: object.ObjectName,
VersionID: object.VersionID,
}
continue
}
}
if replicateDeletes || hasLockEnabled || hasLifecycleConfig {
goi, gerr = getObjectInfoFn(ctx, bucket, object.ObjectName, ObjectOptions{
VersionID: object.VersionID,
})
}
if hasLifecycleConfig && gerr == nil {
object.PurgeTransitioned = goi.TransitionStatus
}
if replicateDeletes {
delMarker, replicate, repsync := checkReplicateDelete(ctx, bucket, ObjectToDelete{
ObjectName: object.ObjectName,
VersionID: object.VersionID,
}, goi, gerr)
replicateSync = repsync
if replicate {
if apiErrCode := checkRequestAuthType(ctx, r, policy.ReplicateDeleteAction, bucket, object.ObjectName); apiErrCode != ErrNone {
if apiErrCode == ErrSignatureDoesNotMatch || apiErrCode == ErrInvalidAccessKeyID {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(apiErrCode), r.URL, guessIsBrowserReq(r))
return
}
continue
}
if object.VersionID != "" {
object.VersionPurgeStatus = Pending
if delMarker {
object.DeleteMarkerVersionID = object.VersionID
}
} else {
object.DeleteMarkerReplicationStatus = string(replication.Pending)
}
}
}
if object.VersionID != "" {
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled {
if apiErrCode := enforceRetentionBypassForDelete(ctx, r, bucket, object, getObjectInfoFn); apiErrCode != ErrNone {
if hasLockEnabled {
if apiErrCode := enforceRetentionBypassForDelete(ctx, r, bucket, object, goi, gerr); apiErrCode != ErrNone {
apiErr := errorCodes.ToAPIErr(apiErrCode)
dErrs[index] = DeleteError{
Code: apiErr.Code,
@@ -445,17 +545,27 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
deleteList := toNames(objectsToDelete)
dObjects, errs := deleteObjectsFn(ctx, bucket, deleteList, ObjectOptions{
Versioned: globalBucketVersioningSys.Enabled(bucket),
Versioned: globalBucketVersioningSys.Enabled(bucket),
VersionSuspended: globalBucketVersioningSys.Suspended(bucket),
})
deletedObjects := make([]DeletedObject, len(deleteObjects.Objects))
for i := range errs {
dindex := objectsToDelete[deleteList[i]]
apiErr := toAPIError(ctx, errs[i])
if apiErr.Code == "" || apiErr.Code == "NoSuchKey" {
dindex := objectsToDelete[ObjectToDelete{
ObjectName: dObjects[i].ObjectName,
VersionID: dObjects[i].VersionID,
VersionPurgeStatus: dObjects[i].VersionPurgeStatus,
DeleteMarkerReplicationStatus: dObjects[i].DeleteMarkerReplicationStatus,
PurgeTransitioned: dObjects[i].PurgeTransitioned,
}]
if errs[i] == nil || isErrObjectNotFound(errs[i]) || isErrVersionNotFound(errs[i]) {
if replicateDeletes {
dObjects[i].DeleteMarkerReplicationStatus = deleteList[i].DeleteMarkerReplicationStatus
dObjects[i].VersionPurgeStatus = deleteList[i].VersionPurgeStatus
}
deletedObjects[dindex] = dObjects[i]
continue
}
apiErr := toAPIError(ctx, errs[i])
dErrs[dindex] = DeleteError{
Code: apiErr.Code,
Message: apiErr.Description,
@@ -477,22 +587,43 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
// Write success response.
writeSuccessResponseXML(w, encodedSuccessResponse)
// Notify deleted event for objects.
for _, dobj := range deletedObjects {
if dobj.ObjectName == "" {
continue
}
if replicateDeletes {
if dobj.DeleteMarkerReplicationStatus == string(replication.Pending) || dobj.VersionPurgeStatus == Pending {
dv := DeletedObjectVersionInfo{
DeletedObject: dobj,
Bucket: bucket,
}
scheduleReplicationDelete(ctx, dv, objectAPI, replicateSync)
}
}
if hasLifecycleConfig && dobj.PurgeTransitioned == lifecycle.TransitionComplete { // clean up transitioned tier
deleteTransitionedObject(ctx, objectAPI, bucket, dobj.ObjectName, lifecycle.ObjectOpts{
Name: dobj.ObjectName,
VersionID: dobj.VersionID,
DeleteMarker: dobj.DeleteMarker,
}, false, true)
}
eventName := event.ObjectRemovedDelete
objInfo := ObjectInfo{
Name: dobj.ObjectName,
VersionID: dobj.VersionID,
}
if dobj.DeleteMarker {
objInfo = ObjectInfo{
Name: dobj.ObjectName,
DeleteMarker: dobj.DeleteMarker,
VersionID: dobj.DeleteMarkerVersionID,
}
objInfo.DeleteMarker = dobj.DeleteMarker
objInfo.VersionID = dobj.DeleteMarkerVersionID
eventName = event.ObjectRemovedDeleteMarkerCreated
}
sendEvent(eventArgs{
EventName: event.ObjectRemovedDelete,
EventName: eventName,
BucketName: bucket,
Object: objInfo,
ReqParams: extractReqParams(r),
@@ -509,7 +640,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutBucket")
defer logger.AuditLog(w, r, "PutBucket", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI := api.ObjectAPI()
if objectAPI == nil {
@@ -557,7 +688,9 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
if globalDNSConfig != nil {
sr, err := globalDNSConfig.Get(bucket)
if err != nil {
if err == dns.ErrNoEntriesFound {
// ErrNotImplemented indicates a DNS backend that doesn't need to check if bucket already
// exists elsewhere
if err == dns.ErrNoEntriesFound || err == dns.ErrNotImplemented {
// Proceed to creating a bucket.
if err = objectAPI.MakeBucketWithLocation(ctx, bucket, opts); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
@@ -578,6 +711,16 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
getObjectLocation(r, globalDomainNames, bucket, ""))
writeSuccessResponseHeadersOnly(w)
sendEvent(eventArgs{
EventName: event.BucketCreated,
BucketName: bucket,
ReqParams: extractReqParams(r),
RespElements: extractRespElements(w),
UserAgent: r.UserAgent(),
Host: handlers.GetSourceIP(r),
})
return
}
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
@@ -610,6 +753,15 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
w.Header().Set(xhttp.Location, path.Clean(r.URL.Path)) // Clean any trailing slashes.
writeSuccessResponseHeadersOnly(w)
sendEvent(eventArgs{
EventName: event.BucketCreated,
BucketName: bucket,
ReqParams: extractReqParams(r),
RespElements: extractRespElements(w),
UserAgent: r.UserAgent(),
Host: handlers.GetSourceIP(r),
})
}
// PostPolicyBucketHandler - POST policy
@@ -619,7 +771,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PostPolicyBucket")
defer logger.AuditLog(w, r, "PostPolicyBucket", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI := api.ObjectAPI()
if objectAPI == nil {
@@ -631,16 +783,14 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
return
}
if !api.EncryptionEnabled() && crypto.IsRequested(r.Header) {
if _, ok := crypto.IsRequested(r.Header); !objectAPI.IsEncryptionSupported() && ok {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
return
}
bucket := mux.Vars(r)["bucket"]
// To detect if the client has disconnected.
r.Body = &contextReader{r.Body, r.Context()}
// Require Content-Length to be set in the request
size := r.ContentLength
if size < 0 {
@@ -739,7 +889,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
// Make sure formValues adhere to policy restrictions.
if err = checkPostPolicy(formValues, postPolicyForm); err != nil {
writeCustomErrorResponseXML(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), err.Error(), r.URL, guessIsBrowserReq(r))
writeErrorResponse(ctx, w, errorCodes.ToAPIErrWithErr(ErrAccessDenied, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -761,27 +911,27 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
// Extract metadata to be saved from received Form.
metadata := make(map[string]string)
err = extractMetadataFromMap(ctx, formValues, metadata)
err = extractMetadataFromMime(ctx, textproto.MIMEHeader(formValues), metadata)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
hashReader, err := hash.NewReader(fileBody, fileSize, "", "", fileSize, globalCLIContext.StrictS3Compat)
hashReader, err := hash.NewReader(fileBody, fileSize, "", "", fileSize)
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
rawReader := hashReader
pReader := NewPutObjReader(rawReader, nil, nil)
pReader := NewPutObjReader(rawReader)
var objectEncryptionKey crypto.ObjectKey
// Check if bucket encryption is enabled
if _, err = globalBucketSSEConfigSys.Get(bucket); err == nil || globalAutoEncryption {
// This request header needs to be set prior to setting ObjectOptions
if !crypto.SSEC.IsRequested(r.Header) {
r.Header.Add(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
r.Header.Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES)
}
}
@@ -793,7 +943,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
return
}
if objectAPI.IsEncryptionSupported() {
if crypto.IsRequested(formValues) && !HasSuffix(object, SlashSeparator) { // handle SSE requests
if _, ok := crypto.IsRequested(formValues); ok && !HasSuffix(object, SlashSeparator) { // handle SSE requests
if crypto.SSECopy.IsRequested(r.Header) {
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidEncryptionParameters), r.URL, guessIsBrowserReq(r))
return
@@ -814,12 +964,16 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
}
info := ObjectInfo{Size: fileSize}
// do not try to verify encrypted content
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", fileSize, globalCLIContext.StrictS3Compat)
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", fileSize)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
pReader, err = pReader.WithEncryption(hashReader, &objectEncryptionKey)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
pReader = NewPutObjReader(rawReader, hashReader, &objectEncryptionKey)
}
}
@@ -885,7 +1039,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "HeadBucket")
defer logger.AuditLog(w, r, "HeadBucket", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
@@ -915,7 +1069,7 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "DeleteBucket")
defer logger.AuditLog(w, r, "DeleteBucket", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
@@ -976,7 +1130,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
if globalDNSConfig != nil {
if err := globalDNSConfig.Delete(bucket); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to delete bucket DNS entry %w, please delete it manually using etcdctl", err))
logger.LogIf(ctx, fmt.Errorf("Unable to delete bucket DNS entry %w, please delete it manually", err))
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -984,6 +1138,15 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
// Write success response.
writeSuccessNoContent(w)
sendEvent(eventArgs{
EventName: event.BucketRemoved,
BucketName: bucket,
ReqParams: extractReqParams(r),
RespElements: extractRespElements(w),
UserAgent: r.UserAgent(),
Host: handlers.GetSourceIP(r),
})
}
// PutBucketObjectLockConfigHandler - PUT Bucket object lock configuration.
@@ -994,7 +1157,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutBucketObjectLockConfig")
defer logger.AuditLog(w, r, "PutBucketObjectLockConfig", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
@@ -1004,7 +1167,10 @@ func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWri
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
if !globalIsErasure {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketObjectLockConfigurationAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
@@ -1047,7 +1213,7 @@ func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWri
func (api objectAPIHandlers) GetBucketObjectLockConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketObjectLockConfig")
defer logger.AuditLog(w, r, "GetBucketObjectLockConfig", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
@@ -1085,7 +1251,7 @@ func (api objectAPIHandlers) GetBucketObjectLockConfigHandler(w http.ResponseWri
func (api objectAPIHandlers) PutBucketTaggingHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutBucketTagging")
defer logger.AuditLog(w, r, "PutBucketTagging", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
@@ -1129,7 +1295,7 @@ func (api objectAPIHandlers) PutBucketTaggingHandler(w http.ResponseWriter, r *h
func (api objectAPIHandlers) GetBucketTaggingHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketTagging")
defer logger.AuditLog(w, r, "GetBucketTagging", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
@@ -1167,7 +1333,7 @@ func (api objectAPIHandlers) GetBucketTaggingHandler(w http.ResponseWriter, r *h
func (api objectAPIHandlers) DeleteBucketTaggingHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "DeleteBucketTagging")
defer logger.AuditLog(w, r, "DeleteBucketTagging", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
@@ -1191,3 +1357,141 @@ func (api objectAPIHandlers) DeleteBucketTaggingHandler(w http.ResponseWriter, r
// Write success response.
writeSuccessResponseHeadersOnly(w)
}
// PutBucketReplicationConfigHandler - PUT Bucket replication configuration.
// ----------
// Add a replication configuration on the specified bucket as specified in https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html
func (api objectAPIHandlers) PutBucketReplicationConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutBucketReplicationConfig")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
if !globalIsErasure {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.PutReplicationConfigurationAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Check if bucket exists.
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if versioned := globalBucketVersioningSys.Enabled(bucket); !versioned {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrReplicationNeedsVersioningError), r.URL, guessIsBrowserReq(r))
return
}
replicationConfig, err := replication.ParseConfig(io.LimitReader(r.Body, r.ContentLength))
if err != nil {
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
apiErr.Description = err.Error()
writeErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r))
return
}
sameTarget, err := validateReplicationDestination(ctx, bucket, replicationConfig)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Validate the received bucket replication config
if err = replicationConfig.Validate(bucket, sameTarget); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
configData, err := xml.Marshal(replicationConfig)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if err = globalBucketMetadataSys.Update(bucket, bucketReplicationConfig, configData); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Write success response.
writeSuccessResponseHeadersOnly(w)
}
// GetBucketReplicationConfigHandler - GET Bucket replication configuration.
// ----------
// Gets the replication configuration for a bucket.
func (api objectAPIHandlers) GetBucketReplicationConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketReplicationConfig")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
// check if user has permissions to perform this operation
if s3Error := checkRequestAuthType(ctx, r, policy.GetReplicationConfigurationAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Check if bucket exists.
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
config, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
configData, err := xml.Marshal(config)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Write success response.
writeSuccessResponseXML(w, configData)
}
// DeleteBucketReplicationConfigHandler - DELETE Bucket replication config.
// ----------
func (api objectAPIHandlers) DeleteBucketReplicationConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "DeleteBucketReplicationConfig")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.PutReplicationConfigurationAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Check if bucket exists.
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if err := globalBucketMetadataSys.Update(bucket, bucketReplicationConfig, nil); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Write success response.
writeSuccessResponseHeadersOnly(w)
}

View File

@@ -19,7 +19,6 @@ package cmd
import (
"bytes"
"encoding/xml"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
@@ -36,7 +35,7 @@ func TestRemoveBucketHandler(t *testing.T) {
func testRemoveBucketHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) {
_, err := obj.PutObject(GlobalContext, bucketName, "test-object", mustGetPutObjReader(t, bytes.NewBuffer([]byte{}), int64(0), "", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"), ObjectOptions{})
_, err := obj.PutObject(GlobalContext, bucketName, "test-object", mustGetPutObjReader(t, bytes.NewReader([]byte{}), int64(0), "", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"), ObjectOptions{})
// if object upload fails stop the test.
if err != nil {
t.Fatalf("Error uploading object: <ERROR> %v", err)
@@ -45,7 +44,7 @@ func testRemoveBucketHandler(obj ObjectLayer, instanceType, bucketName string, a
// initialize httptest Recorder, this records any mutations to response writer inside the handler.
rec := httptest.NewRecorder()
// construct HTTP request for DELETE bucket.
req, err := newTestSignedRequestV4("DELETE", getBucketLocationURL("", bucketName), 0, nil, credentials.AccessKey, credentials.SecretKey, nil)
req, err := newTestSignedRequestV4(http.MethodDelete, getBucketLocationURL("", bucketName), 0, nil, credentials.AccessKey, credentials.SecretKey, nil)
if err != nil {
t.Fatalf("Test %s: Failed to create HTTP request for RemoveBucketHandler: <ERROR> %v", instanceType, err)
}
@@ -61,7 +60,7 @@ func testRemoveBucketHandler(obj ObjectLayer, instanceType, bucketName string, a
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV2 := httptest.NewRecorder()
// construct HTTP request for DELETE bucket.
reqV2, err := newTestSignedRequestV2("DELETE", getBucketLocationURL("", bucketName), 0, nil, credentials.AccessKey, credentials.SecretKey, nil)
reqV2, err := newTestSignedRequestV2(http.MethodDelete, getBucketLocationURL("", bucketName), 0, nil, credentials.AccessKey, credentials.SecretKey, nil)
if err != nil {
t.Fatalf("Test %s: Failed to create HTTP request for RemoveBucketHandler: <ERROR> %v", instanceType, err)
}
@@ -129,7 +128,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
// initialize httptest Recorder, this records any mutations to response writer inside the handler.
rec := httptest.NewRecorder()
// construct HTTP request for Get bucket location.
req, err := newTestSignedRequestV4("GET", getBucketLocationURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
req, err := newTestSignedRequestV4(http.MethodGet, getBucketLocationURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for GetBucketLocationHandler: <ERROR> %v", i+1, instanceType, err)
}
@@ -161,7 +160,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV2 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint.
reqV2, err := newTestSignedRequestV2("GET", getBucketLocationURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
reqV2, err := newTestSignedRequestV2(http.MethodGet, getBucketLocationURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
@@ -192,7 +191,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
// Test for Anonymous/unsigned http request.
// ListBucketsHandler doesn't support bucket policies, setting the policies shouldn't make any difference.
anonReq, err := newTestRequest("GET", getBucketLocationURL("", bucketName), 0, nil)
anonReq, err := newTestRequest(http.MethodGet, getBucketLocationURL("", bucketName), 0, nil)
if err != nil {
t.Fatalf("MinIO %s: Failed to create an anonymous request.", instanceType)
}
@@ -208,7 +207,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
// The only aim is to generate an HTTP request in a way that the relevant/registered end point is evoked/called.
nilBucket := "dummy-bucket"
nilReq, err := newTestRequest("GET", getBucketLocationURL("", nilBucket), 0, nil)
nilReq, err := newTestRequest(http.MethodGet, getBucketLocationURL("", nilBucket), 0, nil)
if err != nil {
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
@@ -265,7 +264,7 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
rec := httptest.NewRecorder()
// construct HTTP request for HEAD bucket.
req, err := newTestSignedRequestV4("HEAD", getHEADBucketURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
req, err := newTestSignedRequestV4(http.MethodHead, getHEADBucketURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for HeadBucketHandler: <ERROR> %v", i+1, instanceType, err)
}
@@ -280,7 +279,7 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV2 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint.
reqV2, err := newTestSignedRequestV2("HEAD", getHEADBucketURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
reqV2, err := newTestSignedRequestV2(http.MethodHead, getHEADBucketURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
@@ -295,7 +294,7 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
}
// Test for Anonymous/unsigned http request.
anonReq, err := newTestRequest("HEAD", getHEADBucketURL("", bucketName), 0, nil)
anonReq, err := newTestRequest(http.MethodHead, getHEADBucketURL("", bucketName), 0, nil)
if err != nil {
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
@@ -313,7 +312,7 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
// The only aim is to generate an HTTP request in a way that the relevant/registered end point is evoked/called.
nilBucket := "dummy-bucket"
nilReq, err := newTestRequest("HEAD", getHEADBucketURL("", nilBucket), 0, nil)
nilReq, err := newTestRequest(http.MethodHead, getHEADBucketURL("", nilBucket), 0, nil)
if err != nil {
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
@@ -482,7 +481,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
// construct HTTP request for List multipart uploads endpoint.
u := getListMultipartUploadsURLWithParams("", testCase.bucket, testCase.prefix, testCase.keyMarker, testCase.uploadIDMarker, testCase.delimiter, testCase.maxUploads)
req, gerr := newTestSignedRequestV4("GET", u, 0, nil, testCase.accessKey, testCase.secretKey, nil)
req, gerr := newTestSignedRequestV4(http.MethodGet, u, 0, nil, testCase.accessKey, testCase.secretKey, nil)
if gerr != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for ListMultipartUploadsHandler: <ERROR> %v", i+1, instanceType, gerr)
}
@@ -499,7 +498,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
// construct HTTP request for PUT bucket policy endpoint.
// verify response for V2 signed HTTP request.
reqV2, err := newTestSignedRequestV2("GET", u, 0, nil, testCase.accessKey, testCase.secretKey, nil)
reqV2, err := newTestSignedRequestV2(http.MethodGet, u, 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
}
@@ -516,7 +515,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
// construct HTTP request for List multipart uploads endpoint.
u := getListMultipartUploadsURLWithParams("", bucketName, "", "", "", "", "")
req, err := newTestSignedRequestV4("GET", u, 0, nil, "", "", nil) // Generate an anonymous request.
req, err := newTestSignedRequestV4(http.MethodGet, u, 0, nil, "", "", nil) // Generate an anonymous request.
if err != nil {
t.Fatalf("Test %s: Failed to create HTTP request for ListMultipartUploadsHandler: <ERROR> %v", instanceType, err)
}
@@ -530,7 +529,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
url := getListMultipartUploadsURLWithParams("", testCases[6].bucket, testCases[6].prefix, testCases[6].keyMarker,
testCases[6].uploadIDMarker, testCases[6].delimiter, testCases[6].maxUploads)
// Test for Anonymous/unsigned http request.
anonReq, err := newTestRequest("GET", url, 0, nil)
anonReq, err := newTestRequest(http.MethodGet, url, 0, nil)
if err != nil {
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
instanceType, bucketName, err)
@@ -550,7 +549,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
url = getListMultipartUploadsURLWithParams("", nilBucket, "dummy-prefix", testCases[6].keyMarker,
testCases[6].uploadIDMarker, testCases[6].delimiter, testCases[6].maxUploads)
nilReq, err := newTestRequest("GET", url, 0, nil)
nilReq, err := newTestRequest(http.MethodGet, url, 0, nil)
if err != nil {
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
@@ -596,7 +595,7 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
for i, testCase := range testCases {
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
rec := httptest.NewRecorder()
req, lerr := newTestSignedRequestV4("GET", getListBucketURL(""), 0, nil, testCase.accessKey, testCase.secretKey, nil)
req, lerr := newTestSignedRequestV4(http.MethodGet, getListBucketURL(""), 0, nil, testCase.accessKey, testCase.secretKey, nil)
if lerr != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for ListBucketsHandler: <ERROR> %v", i+1, instanceType, lerr)
}
@@ -613,7 +612,7 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
// construct HTTP request for PUT bucket policy endpoint.
// verify response for V2 signed HTTP request.
reqV2, err := newTestSignedRequestV2("GET", getListBucketURL(""), 0, nil, testCase.accessKey, testCase.secretKey, nil)
reqV2, err := newTestSignedRequestV2(http.MethodGet, getListBucketURL(""), 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
@@ -628,7 +627,7 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
// Test for Anonymous/unsigned http request.
// ListBucketsHandler doesn't support bucket policies, setting the policies shouldn't make a difference.
anonReq, err := newTestRequest("GET", getListBucketURL(""), 0, nil)
anonReq, err := newTestRequest(http.MethodGet, getListBucketURL(""), 0, nil)
if err != nil {
t.Fatalf("MinIO %s: Failed to create an anonymous request.", instanceType)
@@ -644,7 +643,7 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
// since the `objectLayer==nil` check is performed before any other checks inside the handlers.
// The only aim is to generate an HTTP request in a way that the relevant/registered end point is evoked/called.
nilReq, err := newTestRequest("GET", getListBucketURL(""), 0, nil)
nilReq, err := newTestRequest(http.MethodGet, getListBucketURL(""), 0, nil)
if err != nil {
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
@@ -670,7 +669,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
for i := 0; i < 10; i++ {
objectName := "test-object-" + strconv.Itoa(i)
// uploading the object.
_, err = obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewBuffer(contentBytes), int64(len(contentBytes)), "", sha256sum), ObjectOptions{})
_, err = obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader(contentBytes), int64(len(contentBytes)), "", sha256sum), ObjectOptions{})
// if object upload fails stop the test.
if err != nil {
t.Fatalf("Put Object %d: Error uploading object: <ERROR> %v", i, err)
@@ -808,10 +807,10 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
// Generate a signed or anonymous request based on the testCase
if testCase.accessKey != "" {
req, err = newTestSignedRequestV4("POST", getDeleteMultipleObjectsURL("", bucketName),
req, err = newTestSignedRequestV4(http.MethodPost, getDeleteMultipleObjectsURL("", bucketName),
int64(len(testCase.objects)), bytes.NewReader(testCase.objects), testCase.accessKey, testCase.secretKey, nil)
} else {
req, err = newTestRequest("POST", getDeleteMultipleObjectsURL("", bucketName),
req, err = newTestRequest(http.MethodPost, getDeleteMultipleObjectsURL("", bucketName),
int64(len(testCase.objects)), bytes.NewReader(testCase.objects))
}
@@ -837,7 +836,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
// Verify whether the bucket obtained object is same as the one created.
if testCase.expectedContent != nil && !bytes.Equal(testCase.expectedContent, actualContent) {
fmt.Println(string(testCase.expectedContent), string(actualContent))
t.Log(string(testCase.expectedContent), string(actualContent))
t.Errorf("Test %d : MinIO %s: Object content differs from expected value.", i+1, instanceType)
}
}
@@ -850,7 +849,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
nilBucket := "dummy-bucket"
nilObject := ""
nilReq, err := newTestSignedRequestV4("POST", getDeleteMultipleObjectsURL("", nilBucket), 0, nil, "", "", nil)
nilReq, err := newTestSignedRequestV4(http.MethodPost, getDeleteMultipleObjectsURL("", nilBucket), 0, nil, "", "", nil)
if err != nil {
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
}

View File

@@ -38,7 +38,7 @@ const (
func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutBucketLifecycle")
defer logger.AuditLog(w, r, "PutBucketLifecycle", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objAPI := api.ObjectAPI()
if objAPI == nil {
@@ -78,6 +78,12 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
return
}
// Validate the transition storage ARNs
if err = validateLifecycleTransition(ctx, bucket, bucketLifecycle); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
configData, err := xml.Marshal(bucketLifecycle)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
@@ -97,7 +103,7 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
func (api objectAPIHandlers) GetBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketLifecycle")
defer logger.AuditLog(w, r, "GetBucketLifecycle", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objAPI := api.ObjectAPI()
if objAPI == nil {
@@ -139,7 +145,7 @@ func (api objectAPIHandlers) GetBucketLifecycleHandler(w http.ResponseWriter, r
func (api objectAPIHandlers) DeleteBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "DeleteBucketLifecycle")
defer logger.AuditLog(w, r, "DeleteBucketLifecycle", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objAPI := api.ObjectAPI()
if objAPI == nil {

View File

@@ -50,7 +50,7 @@ func testBucketLifecycleHandlersWrongCredentials(obj ObjectLayer, instanceType,
}{
// GET empty credentials
{
method: "GET", bucketName: bucketName,
method: http.MethodGet, bucketName: bucketName,
accessKey: "",
secretKey: "",
expectedRespStatus: http.StatusForbidden,
@@ -64,7 +64,7 @@ func testBucketLifecycleHandlersWrongCredentials(obj ObjectLayer, instanceType,
},
// GET wrong credentials
{
method: "GET", bucketName: bucketName,
method: http.MethodGet, bucketName: bucketName,
accessKey: "abcd",
secretKey: "abcd",
expectedRespStatus: http.StatusForbidden,
@@ -78,7 +78,7 @@ func testBucketLifecycleHandlersWrongCredentials(obj ObjectLayer, instanceType,
},
// PUT empty credentials
{
method: "PUT",
method: http.MethodPut,
bucketName: bucketName,
accessKey: "",
secretKey: "",
@@ -93,7 +93,7 @@ func testBucketLifecycleHandlersWrongCredentials(obj ObjectLayer, instanceType,
},
// PUT wrong credentials
{
method: "PUT",
method: http.MethodPut,
bucketName: bucketName,
accessKey: "abcd",
secretKey: "abcd",
@@ -108,7 +108,7 @@ func testBucketLifecycleHandlersWrongCredentials(obj ObjectLayer, instanceType,
},
// DELETE empty credentials
{
method: "DELETE",
method: http.MethodDelete,
bucketName: bucketName,
accessKey: "",
secretKey: "",
@@ -123,7 +123,7 @@ func testBucketLifecycleHandlersWrongCredentials(obj ObjectLayer, instanceType,
},
// DELETE wrong credentials
{
method: "DELETE",
method: http.MethodDelete,
bucketName: bucketName,
accessKey: "abcd",
secretKey: "abcd",
@@ -168,7 +168,7 @@ func testBucketLifecycleHandlers(obj ObjectLayer, instanceType, bucketName strin
// Test case - 1.
// Filter contains more than (Prefix,Tag,And) rule
{
method: "PUT",
method: http.MethodPut,
bucketName: bucketName,
accessKey: creds.AccessKey,
secretKey: creds.SecretKey,
@@ -185,7 +185,7 @@ func testBucketLifecycleHandlers(obj ObjectLayer, instanceType, bucketName strin
},
// Date contains wrong format
{
method: "PUT",
method: http.MethodPut,
bucketName: bucketName,
accessKey: creds.AccessKey,
secretKey: creds.SecretKey,
@@ -201,7 +201,7 @@ func testBucketLifecycleHandlers(obj ObjectLayer, instanceType, bucketName strin
shouldPass: false,
},
{
method: "PUT",
method: http.MethodPut,
bucketName: bucketName,
accessKey: creds.AccessKey,
secretKey: creds.SecretKey,
@@ -212,7 +212,7 @@ func testBucketLifecycleHandlers(obj ObjectLayer, instanceType, bucketName strin
shouldPass: true,
},
{
method: "GET",
method: http.MethodGet,
accessKey: creds.AccessKey,
secretKey: creds.SecretKey,
bucketName: bucketName,
@@ -223,7 +223,7 @@ func testBucketLifecycleHandlers(obj ObjectLayer, instanceType, bucketName strin
shouldPass: true,
},
{
method: "DELETE",
method: http.MethodDelete,
accessKey: creds.AccessKey,
secretKey: creds.SecretKey,
bucketName: bucketName,
@@ -234,7 +234,7 @@ func testBucketLifecycleHandlers(obj ObjectLayer, instanceType, bucketName strin
shouldPass: true,
},
{
method: "GET",
method: http.MethodGet,
accessKey: creds.AccessKey,
secretKey: creds.SecretKey,
bucketName: bucketName,

View File

@@ -17,7 +17,25 @@
package cmd
import (
"context"
"encoding/xml"
"fmt"
"io"
"net/http"
"runtime"
"strings"
"time"
miniogo "github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/tags"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
sse "github.com/minio/minio/pkg/bucket/encryption"
"github.com/minio/minio/pkg/bucket/lifecycle"
"github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/madmin"
"github.com/minio/minio/pkg/s3select"
)
const (
@@ -31,7 +49,7 @@ type LifecycleSys struct{}
// Get - gets lifecycle config associated to a given bucket name.
func (sys *LifecycleSys) Get(bucketName string) (lc *lifecycle.Lifecycle, err error) {
if globalIsGateway {
objAPI := newObjectLayerWithoutSafeModeFn()
objAPI := newObjectLayerFn()
if objAPI == nil {
return nil, errServerNotInitialized
}
@@ -46,3 +64,644 @@ func (sys *LifecycleSys) Get(bucketName string) (lc *lifecycle.Lifecycle, err er
func NewLifecycleSys() *LifecycleSys {
return &LifecycleSys{}
}
type expiryTask struct {
objInfo ObjectInfo
versionExpiry bool
}
type expiryState struct {
expiryCh chan expiryTask
}
func (es *expiryState) queueExpiryTask(oi ObjectInfo, rmVersion bool) {
select {
case es.expiryCh <- expiryTask{objInfo: oi, versionExpiry: rmVersion}:
default:
}
}
var (
globalExpiryState *expiryState
)
func newExpiryState() *expiryState {
es := &expiryState{
expiryCh: make(chan expiryTask, 10000),
}
go func() {
<-GlobalContext.Done()
close(es.expiryCh)
}()
return es
}
func initBackgroundExpiry(ctx context.Context, objectAPI ObjectLayer) {
globalExpiryState = newExpiryState()
go func() {
for t := range globalExpiryState.expiryCh {
applyExpiryRule(ctx, objectAPI, t.objInfo, false, t.versionExpiry)
}
}()
}
type transitionState struct {
// add future metrics here
transitionCh chan ObjectInfo
}
func (t *transitionState) queueTransitionTask(oi ObjectInfo) {
select {
case t.transitionCh <- oi:
default:
}
}
var (
globalTransitionState *transitionState
globalTransitionConcurrent = runtime.GOMAXPROCS(0) / 2
)
func newTransitionState() *transitionState {
// fix minimum concurrent transition to 1 for single CPU setup
if globalTransitionConcurrent == 0 {
globalTransitionConcurrent = 1
}
ts := &transitionState{
transitionCh: make(chan ObjectInfo, 10000),
}
go func() {
<-GlobalContext.Done()
close(ts.transitionCh)
}()
return ts
}
// addWorker creates a new worker to process tasks
func (t *transitionState) addWorker(ctx context.Context, objectAPI ObjectLayer) {
// Add a new worker.
go func() {
for {
select {
case <-ctx.Done():
return
case oi, ok := <-t.transitionCh:
if !ok {
return
}
if err := transitionObject(ctx, objectAPI, oi); err != nil {
logger.LogIf(ctx, err)
}
}
}
}()
}
func initBackgroundTransition(ctx context.Context, objectAPI ObjectLayer) {
if globalTransitionState == nil {
return
}
// Start with globalTransitionConcurrent.
for i := 0; i < globalTransitionConcurrent; i++ {
globalTransitionState.addWorker(ctx, objectAPI)
}
}
func validateLifecycleTransition(ctx context.Context, bucket string, lfc *lifecycle.Lifecycle) error {
for _, rule := range lfc.Rules {
if rule.Transition.StorageClass != "" {
sameTarget, destbucket, err := validateTransitionDestination(ctx, bucket, rule.Transition.StorageClass)
if err != nil {
return err
}
if sameTarget && destbucket == bucket {
return fmt.Errorf("Transition destination cannot be the same as the source bucket")
}
}
}
return nil
}
// validateTransitionDestination returns error if transition destination bucket missing or not configured
// It also returns true if transition destination is same as this server.
func validateTransitionDestination(ctx context.Context, bucket string, targetLabel string) (bool, string, error) {
tgt := globalBucketTargetSys.GetRemoteTargetWithLabel(ctx, bucket, targetLabel)
if tgt == nil {
return false, "", BucketRemoteTargetNotFound{Bucket: bucket}
}
arn, err := madmin.ParseARN(tgt.Arn)
if err != nil {
return false, "", BucketRemoteTargetNotFound{Bucket: bucket}
}
if arn.Type != madmin.ILMService {
return false, "", BucketRemoteArnTypeInvalid{}
}
clnt := globalBucketTargetSys.GetRemoteTargetClient(ctx, tgt.Arn)
if clnt == nil {
return false, "", BucketRemoteTargetNotFound{Bucket: bucket}
}
if found, _ := clnt.BucketExists(ctx, arn.Bucket); !found {
return false, "", BucketRemoteDestinationNotFound{Bucket: arn.Bucket}
}
sameTarget, _ := isLocalHost(clnt.EndpointURL().Hostname(), clnt.EndpointURL().Port(), globalMinioPort)
return sameTarget, arn.Bucket, nil
}
// transitionSC returns storage class label for this bucket
func transitionSC(ctx context.Context, bucket string) string {
cfg, err := globalBucketMetadataSys.GetLifecycleConfig(bucket)
if err != nil {
return ""
}
for _, rule := range cfg.Rules {
if rule.Status == Disabled {
continue
}
if rule.Transition.StorageClass != "" {
return rule.Transition.StorageClass
}
}
return ""
}
// return true if ARN representing transition storage class is present in a active rule
// for the lifecycle configured on this bucket
func transitionSCInUse(ctx context.Context, lfc *lifecycle.Lifecycle, bucket, arnStr string) bool {
tgtLabel := globalBucketTargetSys.GetRemoteLabelWithArn(ctx, bucket, arnStr)
if tgtLabel == "" {
return false
}
for _, rule := range lfc.Rules {
if rule.Status == Disabled {
continue
}
if rule.Transition.StorageClass != "" && rule.Transition.StorageClass == tgtLabel {
return true
}
}
return false
}
// set PutObjectOptions for PUT operation to transition data to target cluster
func putTransitionOpts(objInfo ObjectInfo) (putOpts miniogo.PutObjectOptions, err error) {
meta := make(map[string]string)
putOpts = miniogo.PutObjectOptions{
UserMetadata: meta,
ContentType: objInfo.ContentType,
ContentEncoding: objInfo.ContentEncoding,
StorageClass: objInfo.StorageClass,
Internal: miniogo.AdvancedPutOptions{
SourceVersionID: objInfo.VersionID,
SourceMTime: objInfo.ModTime,
SourceETag: objInfo.ETag,
},
}
if objInfo.UserTags != "" {
tag, _ := tags.ParseObjectTags(objInfo.UserTags)
if tag != nil {
putOpts.UserTags = tag.ToMap()
}
}
lkMap := caseInsensitiveMap(objInfo.UserDefined)
if lang, ok := lkMap.Lookup(xhttp.ContentLanguage); ok {
putOpts.ContentLanguage = lang
}
if disp, ok := lkMap.Lookup(xhttp.ContentDisposition); ok {
putOpts.ContentDisposition = disp
}
if cc, ok := lkMap.Lookup(xhttp.CacheControl); ok {
putOpts.CacheControl = cc
}
if mode, ok := lkMap.Lookup(xhttp.AmzObjectLockMode); ok {
rmode := miniogo.RetentionMode(mode)
putOpts.Mode = rmode
}
if retainDateStr, ok := lkMap.Lookup(xhttp.AmzObjectLockRetainUntilDate); ok {
rdate, err := time.Parse(time.RFC3339, retainDateStr)
if err != nil {
return putOpts, err
}
putOpts.RetainUntilDate = rdate
}
if lhold, ok := lkMap.Lookup(xhttp.AmzObjectLockLegalHold); ok {
putOpts.LegalHold = miniogo.LegalHoldStatus(lhold)
}
return putOpts, nil
}
// handle deletes of transitioned objects or object versions when one of the following is true:
// 1. temporarily restored copies of objects (restored with the PostRestoreObject API) expired.
// 2. life cycle expiry date is met on the object.
// 3. Object is removed through DELETE api call
func deleteTransitionedObject(ctx context.Context, objectAPI ObjectLayer, bucket, object string, lcOpts lifecycle.ObjectOpts, restoredObject, isDeleteTierOnly bool) error {
if lcOpts.TransitionStatus == "" && !isDeleteTierOnly {
return nil
}
lc, err := globalLifecycleSys.Get(bucket)
if err != nil {
return err
}
arn := getLifecycleTransitionTargetArn(ctx, lc, bucket, lcOpts)
if arn == nil {
return fmt.Errorf("remote target not configured")
}
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, arn.String())
if tgt == nil {
return fmt.Errorf("remote target not configured")
}
var opts ObjectOptions
opts.Versioned = globalBucketVersioningSys.Enabled(bucket)
opts.VersionID = lcOpts.VersionID
if restoredObject {
// delete locally restored copy of object or object version
// from the source, while leaving metadata behind. The data on
// transitioned tier lies untouched and still accessible
opts.TransitionStatus = lcOpts.TransitionStatus
_, err = objectAPI.DeleteObject(ctx, bucket, object, opts)
return err
}
// When an object is past expiry, delete the data from transitioned tier and
// metadata from source
if err := tgt.RemoveObject(context.Background(), arn.Bucket, object, miniogo.RemoveObjectOptions{VersionID: lcOpts.VersionID}); err != nil {
logger.LogIf(ctx, err)
}
if isDeleteTierOnly {
return nil
}
objInfo, err := objectAPI.DeleteObject(ctx, bucket, object, opts)
if err != nil {
return err
}
eventName := event.ObjectRemovedDelete
if lcOpts.DeleteMarker {
eventName = event.ObjectRemovedDeleteMarkerCreated
}
// Notify object deleted event.
sendEvent(eventArgs{
EventName: eventName,
BucketName: bucket,
Object: objInfo,
Host: "Internal: [ILM-EXPIRY]",
})
// should never reach here
return nil
}
// transition object to target specified by the transition ARN. When an object is transitioned to another
// storage specified by the transition ARN, the metadata is left behind on source cluster and original content
// is moved to the transition tier. Note that in the case of encrypted objects, entire encrypted stream is moved
// to the transition tier without decrypting or re-encrypting.
func transitionObject(ctx context.Context, objectAPI ObjectLayer, objInfo ObjectInfo) error {
lc, err := globalLifecycleSys.Get(objInfo.Bucket)
if err != nil {
return err
}
lcOpts := lifecycle.ObjectOpts{
Name: objInfo.Name,
UserTags: objInfo.UserTags,
}
arn := getLifecycleTransitionTargetArn(ctx, lc, objInfo.Bucket, lcOpts)
if arn == nil {
return fmt.Errorf("remote target not configured")
}
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, arn.String())
if tgt == nil {
return fmt.Errorf("remote target not configured")
}
gr, err := objectAPI.GetObjectNInfo(ctx, objInfo.Bucket, objInfo.Name, nil, http.Header{}, readLock, ObjectOptions{
VersionID: objInfo.VersionID,
TransitionStatus: lifecycle.TransitionPending,
})
if err != nil {
return err
}
oi := gr.ObjInfo
if oi.TransitionStatus == lifecycle.TransitionComplete {
gr.Close()
return nil
}
putOpts, err := putTransitionOpts(oi)
if err != nil {
gr.Close()
return err
}
if _, err = tgt.PutObject(ctx, arn.Bucket, oi.Name, gr, oi.Size, putOpts); err != nil {
gr.Close()
return err
}
gr.Close()
var opts ObjectOptions
opts.Versioned = globalBucketVersioningSys.Enabled(oi.Bucket)
opts.VersionID = oi.VersionID
opts.TransitionStatus = lifecycle.TransitionComplete
eventName := event.ObjectTransitionComplete
objInfo, err = objectAPI.DeleteObject(ctx, oi.Bucket, oi.Name, opts)
if err != nil {
eventName = event.ObjectTransitionFailed
}
// Notify object deleted event.
sendEvent(eventArgs{
EventName: eventName,
BucketName: objInfo.Bucket,
Object: objInfo,
Host: "Internal: [ILM-Transition]",
})
return err
}
// getLifecycleTransitionTargetArn returns transition ARN for storage class specified in the config.
func getLifecycleTransitionTargetArn(ctx context.Context, lc *lifecycle.Lifecycle, bucket string, obj lifecycle.ObjectOpts) *madmin.ARN {
for _, rule := range lc.FilterActionableRules(obj) {
if rule.Transition.StorageClass != "" {
return globalBucketTargetSys.GetRemoteArnWithLabel(ctx, bucket, rule.Transition.StorageClass)
}
}
return nil
}
// getTransitionedObjectReader returns a reader from the transitioned tier.
func getTransitionedObjectReader(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, oi ObjectInfo, opts ObjectOptions) (gr *GetObjectReader, err error) {
var lc *lifecycle.Lifecycle
lc, err = globalLifecycleSys.Get(bucket)
if err != nil {
return nil, err
}
arn := getLifecycleTransitionTargetArn(ctx, lc, bucket, lifecycle.ObjectOpts{
Name: object,
UserTags: oi.UserTags,
ModTime: oi.ModTime,
VersionID: oi.VersionID,
DeleteMarker: oi.DeleteMarker,
IsLatest: oi.IsLatest,
})
if arn == nil {
return nil, fmt.Errorf("remote target not configured")
}
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, arn.String())
if tgt == nil {
return nil, fmt.Errorf("remote target not configured")
}
fn, off, length, err := NewGetObjectReader(rs, oi, opts)
if err != nil {
return nil, ErrorRespToObjectError(err, bucket, object)
}
gopts := miniogo.GetObjectOptions{VersionID: opts.VersionID}
// get correct offsets for encrypted object
if off >= 0 && length >= 0 {
if err := gopts.SetRange(off, off+length-1); err != nil {
return nil, ErrorRespToObjectError(err, bucket, object)
}
}
reader, err := tgt.GetObject(ctx, arn.Bucket, object, gopts)
if err != nil {
return nil, err
}
closeReader := func() { reader.Close() }
return fn(reader, h, opts.CheckPrecondFn, closeReader)
}
// RestoreRequestType represents type of restore.
type RestoreRequestType string
const (
// SelectRestoreRequest specifies select request. This is the only valid value
SelectRestoreRequest RestoreRequestType = "SELECT"
)
// Encryption specifies encryption setting on restored bucket
type Encryption struct {
EncryptionType sse.SSEAlgorithm `xml:"EncryptionType"`
KMSContext string `xml:"KMSContext,omitempty"`
KMSKeyID string `xml:"KMSKeyId,omitempty"`
}
// MetadataEntry denotes name and value.
type MetadataEntry struct {
Name string `xml:"Name"`
Value string `xml:"Value"`
}
// S3Location specifies s3 location that receives result of a restore object request
type S3Location struct {
BucketName string `xml:"BucketName,omitempty"`
Encryption Encryption `xml:"Encryption,omitempty"`
Prefix string `xml:"Prefix,omitempty"`
StorageClass string `xml:"StorageClass,omitempty"`
Tagging *tags.Tags `xml:"Tagging,omitempty"`
UserMetadata []MetadataEntry `xml:"UserMetadata"`
}
// OutputLocation specifies bucket where object needs to be restored
type OutputLocation struct {
S3 S3Location `xml:"S3,omitempty"`
}
// IsEmpty returns true if output location not specified.
func (o *OutputLocation) IsEmpty() bool {
return o.S3.BucketName == ""
}
// SelectParameters specifies sql select parameters
type SelectParameters struct {
s3select.S3Select
}
// IsEmpty returns true if no select parameters set
func (sp *SelectParameters) IsEmpty() bool {
return sp == nil || sp.S3Select == s3select.S3Select{}
}
var (
selectParamsXMLName = "SelectParameters"
)
// UnmarshalXML - decodes XML data.
func (sp *SelectParameters) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
// Essentially the same as S3Select barring the xml name.
if start.Name.Local == selectParamsXMLName {
start.Name = xml.Name{Space: "", Local: "SelectRequest"}
}
return sp.S3Select.UnmarshalXML(d, start)
}
// RestoreObjectRequest - xml to restore a transitioned object
type RestoreObjectRequest struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ RestoreRequest" json:"-"`
Days int `xml:"Days,omitempty"`
Type RestoreRequestType `xml:"Type,omitempty"`
Tier string `xml:"Tier,-"`
Description string `xml:"Description,omitempty"`
SelectParameters *SelectParameters `xml:"SelectParameters,omitempty"`
OutputLocation OutputLocation `xml:"OutputLocation,omitempty"`
}
// Maximum 2MiB size per restore object request.
const maxRestoreObjectRequestSize = 2 << 20
// parseRestoreRequest parses RestoreObjectRequest from xml
func parseRestoreRequest(reader io.Reader) (*RestoreObjectRequest, error) {
req := RestoreObjectRequest{}
if err := xml.NewDecoder(io.LimitReader(reader, maxRestoreObjectRequestSize)).Decode(&req); err != nil {
return nil, err
}
return &req, nil
}
// validate a RestoreObjectRequest as per AWS S3 spec https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html
func (r *RestoreObjectRequest) validate(ctx context.Context, objAPI ObjectLayer) error {
if r.Type != SelectRestoreRequest && !r.SelectParameters.IsEmpty() {
return fmt.Errorf("Select parameters can only be specified with SELECT request type")
}
if r.Type == SelectRestoreRequest && r.SelectParameters.IsEmpty() {
return fmt.Errorf("SELECT restore request requires select parameters to be specified")
}
if r.Type != SelectRestoreRequest && !r.OutputLocation.IsEmpty() {
return fmt.Errorf("OutputLocation required only for SELECT request type")
}
if r.Type == SelectRestoreRequest && r.OutputLocation.IsEmpty() {
return fmt.Errorf("OutputLocation required for SELECT requests")
}
if r.Days != 0 && r.Type == SelectRestoreRequest {
return fmt.Errorf("Days cannot be specified with SELECT restore request")
}
if r.Days == 0 && r.Type != SelectRestoreRequest {
return fmt.Errorf("restoration days should be at least 1")
}
// Check if bucket exists.
if !r.OutputLocation.IsEmpty() {
if _, err := objAPI.GetBucketInfo(ctx, r.OutputLocation.S3.BucketName); err != nil {
return err
}
if r.OutputLocation.S3.Prefix == "" {
return fmt.Errorf("Prefix is a required parameter in OutputLocation")
}
if r.OutputLocation.S3.Encryption.EncryptionType != xhttp.AmzEncryptionAES {
return NotImplemented{}
}
}
return nil
}
// set ObjectOptions for PUT call to restore temporary copy of transitioned data
func putRestoreOpts(bucket, object string, rreq *RestoreObjectRequest, objInfo ObjectInfo) (putOpts ObjectOptions) {
meta := make(map[string]string)
sc := rreq.OutputLocation.S3.StorageClass
if sc == "" {
sc = objInfo.StorageClass
}
meta[strings.ToLower(xhttp.AmzStorageClass)] = sc
if rreq.Type == SelectRestoreRequest {
for _, v := range rreq.OutputLocation.S3.UserMetadata {
if !strings.HasPrefix("x-amz-meta", strings.ToLower(v.Name)) {
meta["x-amz-meta-"+v.Name] = v.Value
continue
}
meta[v.Name] = v.Value
}
meta[xhttp.AmzObjectTagging] = rreq.OutputLocation.S3.Tagging.String()
if rreq.OutputLocation.S3.Encryption.EncryptionType != "" {
meta[xhttp.AmzServerSideEncryption] = xhttp.AmzEncryptionAES
}
return ObjectOptions{
Versioned: globalBucketVersioningSys.Enabled(bucket),
VersionSuspended: globalBucketVersioningSys.Suspended(bucket),
UserDefined: meta,
}
}
for k, v := range objInfo.UserDefined {
meta[k] = v
}
meta[xhttp.AmzObjectTagging] = objInfo.UserTags
return ObjectOptions{
Versioned: globalBucketVersioningSys.Enabled(bucket),
VersionSuspended: globalBucketVersioningSys.Suspended(bucket),
UserDefined: meta,
VersionID: objInfo.VersionID,
MTime: objInfo.ModTime,
Expires: objInfo.Expires,
}
}
var (
errRestoreHDRMissing = fmt.Errorf("x-amz-restore header not found")
errRestoreHDRMalformed = fmt.Errorf("x-amz-restore header malformed")
)
// parse x-amz-restore header from user metadata to get the status of ongoing request and expiry of restoration
// if any. This header value is of format: ongoing-request=true|false, expires=time
func parseRestoreHeaderFromMeta(meta map[string]string) (ongoing bool, expiry time.Time, err error) {
restoreHdr, ok := meta[xhttp.AmzRestore]
if !ok {
return ongoing, expiry, errRestoreHDRMissing
}
rslc := strings.SplitN(restoreHdr, ",", 2)
if len(rslc) != 2 {
return ongoing, expiry, errRestoreHDRMalformed
}
rstatusSlc := strings.SplitN(rslc[0], "=", 2)
if len(rstatusSlc) != 2 {
return ongoing, expiry, errRestoreHDRMalformed
}
rExpSlc := strings.SplitN(rslc[1], "=", 2)
if len(rExpSlc) != 2 {
return ongoing, expiry, errRestoreHDRMalformed
}
expiry, err = time.Parse(http.TimeFormat, rExpSlc[1])
if err != nil {
return
}
return rstatusSlc[1] == "true", expiry, nil
}
// restoreTransitionedObject is similar to PostObjectRestore from AWS GLACIER
// storage class. When PostObjectRestore API is called, a temporary copy of the object
// is restored locally to the bucket on source cluster until the restore expiry date.
// The copy that was transitioned continues to reside in the transitioned tier.
func restoreTransitionedObject(ctx context.Context, bucket, object string, objAPI ObjectLayer, objInfo ObjectInfo, rreq *RestoreObjectRequest, restoreExpiry time.Time) error {
var rs *HTTPRangeSpec
gr, err := getTransitionedObjectReader(ctx, bucket, object, rs, http.Header{}, objInfo, ObjectOptions{
VersionID: objInfo.VersionID})
if err != nil {
return err
}
defer gr.Close()
hashReader, err := hash.NewReader(gr, objInfo.Size, "", "", objInfo.Size)
if err != nil {
return err
}
pReader := NewPutObjReader(hashReader)
opts := putRestoreOpts(bucket, object, rreq, objInfo)
opts.UserDefined[xhttp.AmzRestore] = fmt.Sprintf("ongoing-request=%t, expiry-date=%s", false, restoreExpiry.Format(http.TimeFormat))
if _, err := objAPI.PutObject(ctx, bucket, object, pReader, opts); err != nil {
return err
}
return nil
}

View File

@@ -18,18 +18,35 @@ package cmd
import (
"context"
"fmt"
"net/http"
"strconv"
"strings"
"github.com/gorilla/mux"
"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/bucket/policy"
"github.com/minio/minio/pkg/sync/errgroup"
)
func concurrentDecryptETag(ctx context.Context, objects []ObjectInfo) {
g := errgroup.WithNErrs(len(objects)).WithConcurrency(500)
_, cancel := g.WithCancelOnError(ctx)
defer cancel()
for index := range objects {
index := index
g.Go(func() error {
size, err := objects[index].GetActualSize()
if err == nil {
objects[index].Size = size
}
objects[index].ETag = objects[index].GetActualETag(nil)
return nil
}, index)
}
g.WaitErr()
}
// Validate all the ListObjects query arguments, returns an APIErrorCode
// if one of the args do not meet the required conditions.
// Special conditions required by MinIO server are as below
@@ -58,7 +75,7 @@ func validateListObjectsArgs(marker, delimiter, encodingType string, maxKeys int
func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListObjectVersions")
defer logger.AuditLog(w, r, "ListObjectVersions", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
@@ -69,7 +86,7 @@ func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone {
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketVersionsAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
@@ -100,16 +117,7 @@ func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r
return
}
for i := range listObjectVersionsInfo.Objects {
if crypto.IsEncrypted(listObjectVersionsInfo.Objects[i].UserDefined) {
listObjectVersionsInfo.Objects[i].ETag = getDecryptedETag(r.Header, listObjectVersionsInfo.Objects[i], false)
}
listObjectVersionsInfo.Objects[i].Size, err = listObjectVersionsInfo.Objects[i].GetActualSize()
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
}
concurrentDecryptETag(ctx, listObjectVersionsInfo.Objects)
response := generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType, maxkeys, listObjectVersionsInfo)
@@ -120,7 +128,7 @@ func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r
// ListObjectsV2MHandler - GET Bucket (List Objects) Version 2 with metadata.
// --------------------------
// This implementation of the GET operation returns some or all (up to 10000)
// of the objects in a bucket. You can use the request parame<ters as selection
// of the objects in a bucket. You can use the request parameters as selection
// criteria to return a subset of the objects in a bucket.
//
// NOTE: It is recommended that this API to be used for application development.
@@ -128,7 +136,7 @@ func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r
func (api objectAPIHandlers) ListObjectsV2MHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListObjectsV2M")
defer logger.AuditLog(w, r, "ListObjectsV2M", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
@@ -160,13 +168,6 @@ func (api objectAPIHandlers) ListObjectsV2MHandler(w http.ResponseWriter, r *htt
return
}
// Analyze continuation token and route the request accordingly
var success bool
token, success = proxyRequestByToken(ctx, w, r, token)
if success {
return
}
listObjectsV2 := objectAPI.ListObjectsV2
// Inititate a list objects operation based on the input params.
@@ -178,22 +179,10 @@ func (api objectAPIHandlers) ListObjectsV2MHandler(w http.ResponseWriter, r *htt
return
}
for i := range listObjectsV2Info.Objects {
if crypto.IsEncrypted(listObjectsV2Info.Objects[i].UserDefined) {
listObjectsV2Info.Objects[i].ETag = getDecryptedETag(r.Header, listObjectsV2Info.Objects[i], false)
}
listObjectsV2Info.Objects[i].Size, err = listObjectsV2Info.Objects[i].GetActualSize()
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
}
concurrentDecryptETag(ctx, listObjectsV2Info.Objects)
// The next continuation token has id@node_index format to optimize paginated listing
nextContinuationToken := listObjectsV2Info.NextContinuationToken
if nextContinuationToken != "" && listObjectsV2Info.IsTruncated {
nextContinuationToken = fmt.Sprintf("%s@%d", listObjectsV2Info.NextContinuationToken, getLocalNodeIndex())
}
response := generateListObjectsV2Response(bucket, prefix, token, nextContinuationToken, startAfter,
delimiter, encodingType, fetchOwner, listObjectsV2Info.IsTruncated,
@@ -214,7 +203,7 @@ func (api objectAPIHandlers) ListObjectsV2MHandler(w http.ResponseWriter, r *htt
func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListObjectsV2")
defer logger.AuditLog(w, r, "ListObjectsV2", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
@@ -246,13 +235,6 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
return
}
// Analyze continuation token and route the request accordingly
var success bool
token, success = proxyRequestByToken(ctx, w, r, token)
if success {
return
}
listObjectsV2 := objectAPI.ListObjectsV2
// Inititate a list objects operation based on the input params.
@@ -264,24 +246,9 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
return
}
for i := range listObjectsV2Info.Objects {
if crypto.IsEncrypted(listObjectsV2Info.Objects[i].UserDefined) {
listObjectsV2Info.Objects[i].ETag = getDecryptedETag(r.Header, listObjectsV2Info.Objects[i], false)
}
listObjectsV2Info.Objects[i].Size, err = listObjectsV2Info.Objects[i].GetActualSize()
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
}
concurrentDecryptETag(ctx, listObjectsV2Info.Objects)
// The next continuation token has id@node_index format to optimize paginated listing
nextContinuationToken := listObjectsV2Info.NextContinuationToken
if nextContinuationToken != "" && listObjectsV2Info.IsTruncated {
nextContinuationToken = fmt.Sprintf("%s@%d", listObjectsV2Info.NextContinuationToken, getLocalNodeIndex())
}
response := generateListObjectsV2Response(bucket, prefix, token, nextContinuationToken, startAfter,
response := generateListObjectsV2Response(bucket, prefix, token, listObjectsV2Info.NextContinuationToken, startAfter,
delimiter, encodingType, fetchOwner, listObjectsV2Info.IsTruncated,
maxKeys, listObjectsV2Info.Objects, listObjectsV2Info.Prefixes, false)
@@ -289,18 +256,6 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
writeSuccessResponseXML(w, encodeResponse(response))
}
func getLocalNodeIndex() int {
if len(globalProxyEndpoints) == 0 {
return -1
}
for i, ep := range globalProxyEndpoints {
if ep.IsLocal {
return i
}
}
return -1
}
func parseRequestToken(token string) (subToken string, nodeIndex int) {
if token == "" {
return token, -1
@@ -339,10 +294,6 @@ func proxyRequestByNodeIndex(ctx context.Context, w http.ResponseWriter, r *http
return proxyRequest(ctx, w, r, ep)
}
func proxyRequestByBucket(ctx context.Context, w http.ResponseWriter, r *http.Request, bucket string) (success bool) {
return proxyRequestByNodeIndex(ctx, w, r, crcHashMod(bucket, len(globalProxyEndpoints)))
}
// ListObjectsV1Handler - GET Bucket (List Objects) Version 1.
// --------------------------
// This implementation of the GET operation returns some or all (up to 10000)
@@ -352,7 +303,7 @@ func proxyRequestByBucket(ctx context.Context, w http.ResponseWriter, r *http.Re
func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListObjectsV1")
defer logger.AuditLog(w, r, "ListObjectsV1", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
@@ -381,10 +332,6 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
return
}
if proxyRequestByBucket(ctx, w, r, bucket) {
return
}
listObjects := objectAPI.ListObjects
// Inititate a list objects operation based on the input params.
@@ -396,16 +343,7 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
return
}
for i := range listObjectsInfo.Objects {
if crypto.IsEncrypted(listObjectsInfo.Objects[i].UserDefined) {
listObjectsInfo.Objects[i].ETag = getDecryptedETag(r.Header, listObjectsInfo.Objects[i], false)
}
listObjectsInfo.Objects[i].Size, err = listObjectsInfo.Objects[i].GetActualSize()
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
}
concurrentDecryptETag(ctx, listObjectsInfo.Objects)
response := generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType, maxKeys, listObjectsInfo)

View File

@@ -23,11 +23,14 @@ import (
"fmt"
"sync"
"github.com/minio/minio-go/v6/pkg/tags"
"github.com/minio/minio-go/v7/pkg/tags"
"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/cmd/logger"
bucketsse "github.com/minio/minio/pkg/bucket/encryption"
"github.com/minio/minio/pkg/bucket/lifecycle"
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
"github.com/minio/minio/pkg/bucket/policy"
"github.com/minio/minio/pkg/bucket/replication"
"github.com/minio/minio/pkg/bucket/versioning"
"github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/madmin"
@@ -47,6 +50,7 @@ func (sys *BucketMetadataSys) Remove(bucket string) {
}
sys.Lock()
delete(sys.metadataMap, bucket)
globalBucketMonitor.DeleteBucket(bucket)
sys.Unlock()
}
@@ -70,7 +74,7 @@ func (sys *BucketMetadataSys) Set(bucket string, meta BucketMetadata) {
// Update update bucket metadata for the specified config file.
// The configData data should not be modified after being sent here.
func (sys *BucketMetadataSys) Update(bucket string, configFile string, configData []byte) error {
objAPI := newObjectLayerWithoutSafeModeFn()
objAPI := newObjectLayerFn()
if objAPI == nil {
return errServerNotInitialized
}
@@ -79,7 +83,7 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
// This code is needed only for gateway implementations.
switch configFile {
case bucketSSEConfig:
if globalGatewayName == "nas" {
if globalGatewayName == NASBackendGateway {
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
if err != nil {
return err
@@ -88,7 +92,7 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
return meta.Save(GlobalContext, objAPI)
}
case bucketLifecycleConfig:
if globalGatewayName == "nas" {
if globalGatewayName == NASBackendGateway {
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
if err != nil {
return err
@@ -97,7 +101,7 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
return meta.Save(GlobalContext, objAPI)
}
case bucketTaggingConfig:
if globalGatewayName == "nas" {
if globalGatewayName == NASBackendGateway {
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
if err != nil {
return err
@@ -106,7 +110,7 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
return meta.Save(GlobalContext, objAPI)
}
case bucketNotificationConfig:
if globalGatewayName == "nas" {
if globalGatewayName == NASBackendGateway {
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
if err != nil {
return err
@@ -147,12 +151,28 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
meta.EncryptionConfigXML = configData
case bucketTaggingConfig:
meta.TaggingConfigXML = configData
case objectLockConfig:
meta.ObjectLockConfigXML = configData
case bucketVersioningConfig:
meta.VersioningConfigXML = configData
case bucketQuotaConfigFile:
meta.QuotaConfigJSON = configData
case objectLockConfig:
if !globalIsErasure && !globalIsDistErasure {
return NotImplemented{}
}
meta.ObjectLockConfigXML = configData
case bucketVersioningConfig:
if !globalIsErasure && !globalIsDistErasure {
return NotImplemented{}
}
meta.VersioningConfigXML = configData
case bucketReplicationConfig:
if !globalIsErasure && !globalIsDistErasure {
return NotImplemented{}
}
meta.ReplicationConfigXML = configData
case bucketTargetsFile:
meta.BucketTargetsConfigJSON, meta.BucketTargetsConfigMetaJSON, err = encryptBucketMetadata(meta.Name, configData, crypto.Context{bucket: meta.Name, bucketTargetsFile: bucketTargetsFile})
if err != nil {
return fmt.Errorf("Error encrypting bucket target metadata %w", err)
}
default:
return fmt.Errorf("Unknown bucket %s metadata update requested %s", bucket, configFile)
}
@@ -171,6 +191,12 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
// If no metadata exists errConfigNotFound is returned and a new metadata is returned.
// Only a shallow copy is returned, so referenced data should not be modified,
// but can be replaced atomically.
//
// This function should only be used with
// - GetBucketInfo
// - ListBuckets
// For all other bucket specific metadata, use the relevant
// calls implemented specifically for each of those features.
func (sys *BucketMetadataSys) Get(bucket string) (BucketMetadata, error) {
if globalIsGateway || bucket == minioMetaBucket {
return newBucketMetadata(bucket), errConfigNotFound
@@ -248,9 +274,9 @@ func (sys *BucketMetadataSys) GetLifecycleConfig(bucket string) (*lifecycle.Life
// GetNotificationConfig returns configured notification config
// The returned object may not be modified.
func (sys *BucketMetadataSys) GetNotificationConfig(bucket string) (*event.Config, error) {
if globalIsGateway && globalGatewayName == "nas" {
if globalIsGateway && globalGatewayName == NASBackendGateway {
// Only needed in case of NAS gateway.
objAPI := newObjectLayerWithoutSafeModeFn()
objAPI := newObjectLayerFn()
if objAPI == nil {
return nil, errServerNotInitialized
}
@@ -288,7 +314,7 @@ func (sys *BucketMetadataSys) GetSSEConfig(bucket string) (*bucketsse.BucketSSEC
// The returned object may not be modified.
func (sys *BucketMetadataSys) GetPolicyConfig(bucket string) (*policy.Policy, error) {
if globalIsGateway {
objAPI := newObjectLayerWithoutSafeModeFn()
objAPI := newObjectLayerFn()
if objAPI == nil {
return nil, errServerNotInitialized
}
@@ -318,10 +344,54 @@ func (sys *BucketMetadataSys) GetQuotaConfig(bucket string) (*madmin.BucketQuota
return meta.quotaConfig, nil
}
// GetConfig returns the current bucket metadata
// GetReplicationConfig returns configured bucket replication config
// The returned object may not be modified.
func (sys *BucketMetadataSys) GetReplicationConfig(ctx context.Context, bucket string) (*replication.Config, error) {
meta, err := sys.GetConfig(bucket)
if err != nil {
if errors.Is(err, errConfigNotFound) {
return nil, BucketReplicationConfigNotFound{Bucket: bucket}
}
return nil, err
}
if meta.replicationConfig == nil {
return nil, BucketReplicationConfigNotFound{Bucket: bucket}
}
return meta.replicationConfig, nil
}
// GetBucketTargetsConfig returns configured bucket targets for this bucket
// The returned object may not be modified.
func (sys *BucketMetadataSys) GetBucketTargetsConfig(bucket string) (*madmin.BucketTargets, error) {
meta, err := sys.GetConfig(bucket)
if err != nil {
return nil, err
}
if meta.bucketTargetConfig == nil {
return nil, BucketRemoteTargetNotFound{Bucket: bucket}
}
return meta.bucketTargetConfig, nil
}
// GetBucketTarget returns the target for the bucket and arn.
func (sys *BucketMetadataSys) GetBucketTarget(bucket string, arn string) (madmin.BucketTarget, error) {
targets, err := sys.GetBucketTargetsConfig(bucket)
if err != nil {
return madmin.BucketTarget{}, err
}
for _, t := range targets.Targets {
if t.Arn == arn {
return t, nil
}
}
return madmin.BucketTarget{}, errConfigNotFound
}
// GetConfig returns a specific configuration from the bucket metadata.
// The returned object may not be modified.
func (sys *BucketMetadataSys) GetConfig(bucket string) (BucketMetadata, error) {
objAPI := newObjectLayerWithoutSafeModeFn()
objAPI := newObjectLayerFn()
if objAPI == nil {
return newBucketMetadata(bucket), errServerNotInitialized
}
@@ -362,16 +432,21 @@ func (sys *BucketMetadataSys) Init(ctx context.Context, buckets []BucketInfo, ob
return nil
}
// Load PolicySys once during boot.
return sys.load(ctx, buckets, objAPI)
// Load bucket metadata sys in background
go sys.load(ctx, buckets, objAPI)
return nil
}
// concurrently load bucket metadata to speed up loading bucket metadata.
func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) error {
func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) {
g := errgroup.WithNErrs(len(buckets))
for index := range buckets {
index := index
g.Go(func() error {
_, _ = objAPI.HealBucket(ctx, buckets[index].Name, madmin.HealOpts{
// Ensure heal opts for bucket metadata be deep healed all the time.
ScanMode: madmin.HealDeepScan,
})
meta, err := loadBucketMetadata(ctx, objAPI, buckets[index].Name)
if err != nil {
return err
@@ -384,26 +459,33 @@ func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []Buck
}
for _, err := range g.Wait() {
if err != nil {
return err
logger.LogIf(ctx, err)
}
}
return nil
}
// Loads bucket metadata for all buckets into BucketMetadataSys.
func (sys *BucketMetadataSys) load(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) error {
func (sys *BucketMetadataSys) load(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) {
count := 100 // load 100 bucket metadata at a time.
for {
if len(buckets) < count {
return sys.concurrentLoad(ctx, buckets, objAPI)
}
if err := sys.concurrentLoad(ctx, buckets[:count], objAPI); err != nil {
return err
sys.concurrentLoad(ctx, buckets, objAPI)
return
}
sys.concurrentLoad(ctx, buckets[:count], objAPI)
buckets = buckets[count:]
}
}
// Reset the state of the BucketMetadataSys.
func (sys *BucketMetadataSys) Reset() {
sys.Lock()
for k := range sys.metadataMap {
delete(sys.metadataMap, k)
}
sys.Unlock()
}
// NewBucketMetadataSys - creates new policy system.
func NewBucketMetadataSys() *BucketMetadataSys {
return &BucketMetadataSys{

View File

@@ -19,22 +19,27 @@ package cmd
import (
"bytes"
"context"
"crypto/rand"
"encoding/binary"
"encoding/json"
"encoding/xml"
"errors"
"fmt"
"path"
"time"
"github.com/minio/minio-go/v6/pkg/tags"
"github.com/minio/minio-go/v7/pkg/tags"
"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/cmd/logger"
bucketsse "github.com/minio/minio/pkg/bucket/encryption"
"github.com/minio/minio/pkg/bucket/lifecycle"
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
"github.com/minio/minio/pkg/bucket/policy"
"github.com/minio/minio/pkg/bucket/replication"
"github.com/minio/minio/pkg/bucket/versioning"
"github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/madmin"
"github.com/minio/sio"
)
const (
@@ -59,27 +64,33 @@ var (
// bucketMetadataFormat refers to the format.
// bucketMetadataVersion can be used to track a rolling upgrade of a field.
type BucketMetadata struct {
Name string
Created time.Time
LockEnabled bool // legacy not used anymore.
PolicyConfigJSON []byte
NotificationConfigXML []byte
LifecycleConfigXML []byte
ObjectLockConfigXML []byte
VersioningConfigXML []byte
EncryptionConfigXML []byte
TaggingConfigXML []byte
QuotaConfigJSON []byte
Name string
Created time.Time
LockEnabled bool // legacy not used anymore.
PolicyConfigJSON []byte
NotificationConfigXML []byte
LifecycleConfigXML []byte
ObjectLockConfigXML []byte
VersioningConfigXML []byte
EncryptionConfigXML []byte
TaggingConfigXML []byte
QuotaConfigJSON []byte
ReplicationConfigXML []byte
BucketTargetsConfigJSON []byte
BucketTargetsConfigMetaJSON []byte
// Unexported fields. Must be updated atomically.
policyConfig *policy.Policy
notificationConfig *event.Config
lifecycleConfig *lifecycle.Lifecycle
objectLockConfig *objectlock.Config
versioningConfig *versioning.Versioning
sseConfig *bucketsse.BucketSSEConfig
taggingConfig *tags.Tags
quotaConfig *madmin.BucketQuota
policyConfig *policy.Policy
notificationConfig *event.Config
lifecycleConfig *lifecycle.Lifecycle
objectLockConfig *objectlock.Config
versioningConfig *versioning.Versioning
sseConfig *bucketsse.BucketSSEConfig
taggingConfig *tags.Tags
quotaConfig *madmin.BucketQuota
replicationConfig *replication.Config
bucketTargetConfig *madmin.BucketTargets
bucketTargetConfigMeta map[string]string
}
// newBucketMetadata creates BucketMetadata with the supplied name and Created to Now.
@@ -94,12 +105,18 @@ func newBucketMetadata(name string) BucketMetadata {
versioningConfig: &versioning.Versioning{
XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/",
},
bucketTargetConfig: &madmin.BucketTargets{},
bucketTargetConfigMeta: make(map[string]string),
}
}
// Load - loads the metadata of bucket by name from ObjectLayer api.
// If an error is returned the returned metadata will be default initialized.
func (b *BucketMetadata) Load(ctx context.Context, api ObjectLayer, name string) error {
if name == "" {
logger.LogIf(ctx, errors.New("bucket name cannot be empty"))
return errors.New("bucket name cannot be empty")
}
configFile := path.Join(bucketConfigPrefix, name, bucketMetadataFile)
data, err := readConfig(ctx, api, configFile)
if err != nil {
@@ -119,26 +136,26 @@ func (b *BucketMetadata) Load(ctx context.Context, api ObjectLayer, name string)
default:
return fmt.Errorf("loadBucketMetadata: unknown version: %d", binary.LittleEndian.Uint16(data[2:4]))
}
// OK, parse data.
_, err = b.UnmarshalMsg(data[4:])
b.Name = name // in-case parsing failed for some reason, make sure bucket name is not empty.
return err
}
// loadBucketMetadata loads and migrates to bucket metadata.
func loadBucketMetadata(ctx context.Context, objectAPI ObjectLayer, bucket string) (BucketMetadata, error) {
b := newBucketMetadata(bucket)
err := b.Load(ctx, objectAPI, bucket)
if err == nil {
return b, b.convertLegacyConfigs(ctx, objectAPI)
}
if err != errConfigNotFound {
err := b.Load(ctx, objectAPI, b.Name)
if err != nil && !errors.Is(err, errConfigNotFound) {
return b, err
}
// Old bucket without bucket metadata. Hence we migrate existing settings.
return b, b.convertLegacyConfigs(ctx, objectAPI)
if err := b.convertLegacyConfigs(ctx, objectAPI); err != nil {
return b, err
}
// migrate unencrypted remote targets
return b, b.migrateTargetConfig(ctx, objectAPI)
}
// parseAllConfigs will parse all configs and populate the private fields.
@@ -213,6 +230,23 @@ func (b *BucketMetadata) parseAllConfigs(ctx context.Context, objectAPI ObjectLa
}
}
if len(b.ReplicationConfigXML) != 0 {
b.replicationConfig, err = replication.ParseConfig(bytes.NewReader(b.ReplicationConfigXML))
if err != nil {
return err
}
} else {
b.replicationConfig = nil
}
if len(b.BucketTargetsConfigJSON) != 0 {
b.bucketTargetConfig, err = parseBucketTargetConfig(b.Name, b.BucketTargetsConfigJSON, b.BucketTargetsConfigMetaJSON)
if err != nil {
return err
}
} else {
b.bucketTargetConfig = &madmin.BucketTargets{}
}
return nil
}
@@ -225,6 +259,8 @@ func (b *BucketMetadata) convertLegacyConfigs(ctx context.Context, objectAPI Obj
bucketQuotaConfigFile,
bucketSSEConfig,
bucketTaggingConfig,
bucketReplicationConfig,
bucketTargetsFile,
objectLockConfig,
}
@@ -242,6 +278,12 @@ func (b *BucketMetadata) convertLegacyConfigs(ctx context.Context, objectAPI Obj
configData, err := readConfig(ctx, objectAPI, configFile)
if err != nil {
switch err.(type) {
case ObjectExistsAsDirectory:
// in FS mode it possible that we have actual
// files in this folder with `.minio.sys/buckets/bucket/configFile`
continue
}
if errors.Is(err, errConfigNotFound) {
// legacy file config not found, proceed to look for new metadata.
continue
@@ -281,6 +323,10 @@ func (b *BucketMetadata) convertLegacyConfigs(ctx context.Context, objectAPI Obj
b.VersioningConfigXML = enabledBucketVersioningConfig
case bucketQuotaConfigFile:
b.QuotaConfigJSON = configData
case bucketReplicationConfig:
b.ReplicationConfigXML = configData
case bucketTargetsFile:
b.BucketTargetsConfigJSON = configData
}
}
@@ -322,7 +368,7 @@ func (b *BucketMetadata) Save(ctx context.Context, api ObjectLayer) error {
// deleteBucketMetadata deletes bucket metadata
// If config does not exist no error is returned.
func deleteBucketMetadata(ctx context.Context, obj ObjectLayer, bucket string) error {
func deleteBucketMetadata(ctx context.Context, obj objectDeleter, bucket string) error {
metadataFiles := []string{
dataUsageCacheName,
bucketMetadataFile,
@@ -335,3 +381,76 @@ func deleteBucketMetadata(ctx context.Context, obj ObjectLayer, bucket string) e
}
return nil
}
// migrate config for remote targets by encrypting data if currently unencrypted and kms is configured.
func (b *BucketMetadata) migrateTargetConfig(ctx context.Context, objectAPI ObjectLayer) error {
var err error
// early return if no targets or already encrypted
if len(b.BucketTargetsConfigJSON) == 0 || GlobalKMS == nil || len(b.BucketTargetsConfigMetaJSON) != 0 {
return nil
}
encBytes, metaBytes, err := encryptBucketMetadata(b.Name, b.BucketTargetsConfigJSON, crypto.Context{b.Name: b.Name, bucketTargetsFile: bucketTargetsFile})
if err != nil {
return err
}
b.BucketTargetsConfigJSON = encBytes
b.BucketTargetsConfigMetaJSON = metaBytes
return b.Save(ctx, objectAPI)
}
// encrypt bucket metadata if kms is configured.
func encryptBucketMetadata(bucket string, input []byte, kmsContext crypto.Context) (output, metabytes []byte, err error) {
var sealedKey crypto.SealedKey
if GlobalKMS == nil {
output = input
return
}
var (
key [32]byte
encKey []byte
)
metadata := make(map[string]string)
key, encKey, err = GlobalKMS.GenerateKey(GlobalKMS.DefaultKeyID(), kmsContext)
if err != nil {
return
}
outbuf := bytes.NewBuffer(nil)
objectKey := crypto.GenerateKey(key, rand.Reader)
sealedKey = objectKey.Seal(key, crypto.GenerateIV(rand.Reader), crypto.S3.String(), bucket, "")
crypto.S3.CreateMetadata(metadata, GlobalKMS.DefaultKeyID(), encKey, sealedKey)
_, err = sio.Encrypt(outbuf, bytes.NewBuffer(input), sio.Config{Key: objectKey[:], MinVersion: sio.Version20})
if err != nil {
return output, metabytes, err
}
metabytes, err = json.Marshal(metadata)
if err != nil {
return
}
return outbuf.Bytes(), metabytes, nil
}
// decrypt bucket metadata if kms is configured.
func decryptBucketMetadata(input []byte, bucket string, meta map[string]string, kmsContext crypto.Context) ([]byte, error) {
if GlobalKMS == nil {
return nil, errKMSNotConfigured
}
keyID, kmsKey, sealedKey, err := crypto.S3.ParseMetadata(meta)
if err != nil {
return nil, err
}
extKey, err := GlobalKMS.UnsealKey(keyID, kmsKey, kmsContext)
if err != nil {
return nil, err
}
var objectKey crypto.ObjectKey
if err = objectKey.Unseal(extKey, sealedKey, crypto.S3.String(), bucket, ""); err != nil {
return nil, err
}
outbuf := bytes.NewBuffer(nil)
_, err = sio.Decrypt(outbuf, bytes.NewBuffer(input), sio.Config{Key: objectKey[:], MinVersion: sio.Version20})
return outbuf.Bytes(), err
}

View File

@@ -90,6 +90,24 @@ func (z *BucketMetadata) DecodeMsg(dc *msgp.Reader) (err error) {
err = msgp.WrapError(err, "QuotaConfigJSON")
return
}
case "ReplicationConfigXML":
z.ReplicationConfigXML, err = dc.ReadBytes(z.ReplicationConfigXML)
if err != nil {
err = msgp.WrapError(err, "ReplicationConfigXML")
return
}
case "BucketTargetsConfigJSON":
z.BucketTargetsConfigJSON, err = dc.ReadBytes(z.BucketTargetsConfigJSON)
if err != nil {
err = msgp.WrapError(err, "BucketTargetsConfigJSON")
return
}
case "BucketTargetsConfigMetaJSON":
z.BucketTargetsConfigMetaJSON, err = dc.ReadBytes(z.BucketTargetsConfigMetaJSON)
if err != nil {
err = msgp.WrapError(err, "BucketTargetsConfigMetaJSON")
return
}
default:
err = dc.Skip()
if err != nil {
@@ -103,9 +121,9 @@ func (z *BucketMetadata) DecodeMsg(dc *msgp.Reader) (err error) {
// EncodeMsg implements msgp.Encodable
func (z *BucketMetadata) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 11
// map header, size 14
// write "Name"
err = en.Append(0x8b, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
err = en.Append(0x8e, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
if err != nil {
return
}
@@ -214,15 +232,45 @@ func (z *BucketMetadata) EncodeMsg(en *msgp.Writer) (err error) {
err = msgp.WrapError(err, "QuotaConfigJSON")
return
}
// write "ReplicationConfigXML"
err = en.Append(0xb4, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x58, 0x4d, 0x4c)
if err != nil {
return
}
err = en.WriteBytes(z.ReplicationConfigXML)
if err != nil {
err = msgp.WrapError(err, "ReplicationConfigXML")
return
}
// write "BucketTargetsConfigJSON"
err = en.Append(0xb7, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x53, 0x4f, 0x4e)
if err != nil {
return
}
err = en.WriteBytes(z.BucketTargetsConfigJSON)
if err != nil {
err = msgp.WrapError(err, "BucketTargetsConfigJSON")
return
}
// write "BucketTargetsConfigMetaJSON"
err = en.Append(0xbb, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x74, 0x61, 0x4a, 0x53, 0x4f, 0x4e)
if err != nil {
return
}
err = en.WriteBytes(z.BucketTargetsConfigMetaJSON)
if err != nil {
err = msgp.WrapError(err, "BucketTargetsConfigMetaJSON")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *BucketMetadata) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 11
// map header, size 14
// string "Name"
o = append(o, 0x8b, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
o = append(o, 0x8e, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
o = msgp.AppendString(o, z.Name)
// string "Created"
o = append(o, 0xa7, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64)
@@ -254,6 +302,15 @@ func (z *BucketMetadata) MarshalMsg(b []byte) (o []byte, err error) {
// string "QuotaConfigJSON"
o = append(o, 0xaf, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x53, 0x4f, 0x4e)
o = msgp.AppendBytes(o, z.QuotaConfigJSON)
// string "ReplicationConfigXML"
o = append(o, 0xb4, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x58, 0x4d, 0x4c)
o = msgp.AppendBytes(o, z.ReplicationConfigXML)
// string "BucketTargetsConfigJSON"
o = append(o, 0xb7, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x53, 0x4f, 0x4e)
o = msgp.AppendBytes(o, z.BucketTargetsConfigJSON)
// string "BucketTargetsConfigMetaJSON"
o = append(o, 0xbb, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x74, 0x61, 0x4a, 0x53, 0x4f, 0x4e)
o = msgp.AppendBytes(o, z.BucketTargetsConfigMetaJSON)
return
}
@@ -341,6 +398,24 @@ func (z *BucketMetadata) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "QuotaConfigJSON")
return
}
case "ReplicationConfigXML":
z.ReplicationConfigXML, bts, err = msgp.ReadBytesBytes(bts, z.ReplicationConfigXML)
if err != nil {
err = msgp.WrapError(err, "ReplicationConfigXML")
return
}
case "BucketTargetsConfigJSON":
z.BucketTargetsConfigJSON, bts, err = msgp.ReadBytesBytes(bts, z.BucketTargetsConfigJSON)
if err != nil {
err = msgp.WrapError(err, "BucketTargetsConfigJSON")
return
}
case "BucketTargetsConfigMetaJSON":
z.BucketTargetsConfigMetaJSON, bts, err = msgp.ReadBytesBytes(bts, z.BucketTargetsConfigMetaJSON)
if err != nil {
err = msgp.WrapError(err, "BucketTargetsConfigMetaJSON")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
@@ -355,6 +430,6 @@ func (z *BucketMetadata) UnmarshalMsg(bts []byte) (o []byte, err error) {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *BucketMetadata) Msgsize() (s int) {
s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 8 + msgp.TimeSize + 12 + msgp.BoolSize + 17 + msgp.BytesPrefixSize + len(z.PolicyConfigJSON) + 22 + msgp.BytesPrefixSize + len(z.NotificationConfigXML) + 19 + msgp.BytesPrefixSize + len(z.LifecycleConfigXML) + 20 + msgp.BytesPrefixSize + len(z.ObjectLockConfigXML) + 20 + msgp.BytesPrefixSize + len(z.VersioningConfigXML) + 20 + msgp.BytesPrefixSize + len(z.EncryptionConfigXML) + 17 + msgp.BytesPrefixSize + len(z.TaggingConfigXML) + 16 + msgp.BytesPrefixSize + len(z.QuotaConfigJSON)
s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 8 + msgp.TimeSize + 12 + msgp.BoolSize + 17 + msgp.BytesPrefixSize + len(z.PolicyConfigJSON) + 22 + msgp.BytesPrefixSize + len(z.NotificationConfigXML) + 19 + msgp.BytesPrefixSize + len(z.LifecycleConfigXML) + 20 + msgp.BytesPrefixSize + len(z.ObjectLockConfigXML) + 20 + msgp.BytesPrefixSize + len(z.VersioningConfigXML) + 20 + msgp.BytesPrefixSize + len(z.EncryptionConfigXML) + 17 + msgp.BytesPrefixSize + len(z.TaggingConfigXML) + 16 + msgp.BytesPrefixSize + len(z.QuotaConfigJSON) + 21 + msgp.BytesPrefixSize + len(z.ReplicationConfigXML) + 24 + msgp.BytesPrefixSize + len(z.BucketTargetsConfigJSON) + 28 + msgp.BytesPrefixSize + len(z.BucketTargetsConfigMetaJSON)
return
}

View File

@@ -17,15 +17,12 @@
package cmd
import (
"encoding/json"
"encoding/xml"
"io"
"net/http"
"reflect"
"time"
"github.com/gorilla/mux"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/bucket/policy"
"github.com/minio/minio/pkg/event"
@@ -42,7 +39,7 @@ const (
func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketNotification")
defer logger.AuditLog(w, r, "GetBucketNotification", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucketName := vars["bucket"]
@@ -114,7 +111,7 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutBucketNotification")
defer logger.AuditLog(w, r, "PutBucketNotification", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI := api.ObjectAPI()
if objectAPI == nil {
@@ -173,137 +170,3 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
writeSuccessResponseHeadersOnly(w)
}
func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListenBucketNotification")
defer logger.AuditLog(w, r, "ListenBucketNotification", mustGetClaimsFromToken(r))
// Validate if bucket exists.
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
if !objAPI.IsNotificationSupported() {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
return
}
if !objAPI.IsListenBucketSupported() {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
return
}
vars := mux.Vars(r)
bucketName := vars["bucket"]
values := r.URL.Query()
values.Set(peerRESTListenBucket, bucketName)
var prefix string
if len(values[peerRESTListenPrefix]) > 1 {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrFilterNamePrefix), r.URL, guessIsBrowserReq(r))
return
}
if len(values[peerRESTListenPrefix]) == 1 {
if err := event.ValidateFilterRuleValue(values[peerRESTListenPrefix][0]); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
prefix = values[peerRESTListenPrefix][0]
}
var suffix string
if len(values[peerRESTListenSuffix]) > 1 {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrFilterNameSuffix), r.URL, guessIsBrowserReq(r))
return
}
if len(values[peerRESTListenSuffix]) == 1 {
if err := event.ValidateFilterRuleValue(values[peerRESTListenSuffix][0]); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
suffix = values[peerRESTListenSuffix][0]
}
pattern := event.NewPattern(prefix, suffix)
var eventNames []event.Name
for _, s := range values[peerRESTListenEvents] {
eventName, err := event.ParseName(s)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
eventNames = append(eventNames, eventName)
}
if _, err := objAPI.GetBucketInfo(ctx, bucketName); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
rulesMap := event.NewRulesMap(eventNames, pattern, event.TargetID{ID: mustGetUUID()})
w.Header().Set(xhttp.ContentType, "text/event-stream")
// Listen Publisher and peer-listen-client uses nonblocking send and hence does not wait for slow receivers.
// Use buffered channel to take care of burst sends or slow w.Write()
listenCh := make(chan interface{}, 4000)
peers := newPeerRestClients(globalEndpoints)
globalHTTPListen.Subscribe(listenCh, ctx.Done(), func(evI interface{}) bool {
ev, ok := evI.(event.Event)
if !ok {
return false
}
if ev.S3.Bucket.Name != values.Get(peerRESTListenBucket) {
return false
}
return rulesMap.MatchSimple(ev.EventName, ev.S3.Object.Key)
})
for _, peer := range peers {
if peer == nil {
continue
}
peer.Listen(listenCh, ctx.Done(), values)
}
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
defer keepAliveTicker.Stop()
enc := json.NewEncoder(w)
for {
select {
case evI := <-listenCh:
ev := evI.(event.Event)
if len(string(ev.EventName)) > 0 {
if err := enc.Encode(struct{ Records []event.Event }{[]event.Event{ev}}); err != nil {
return
}
} else {
if _, err := w.Write([]byte(" ")); err != nil {
return
}
}
w.(http.Flusher).Flush()
case <-keepAliveTicker.C:
if _, err := w.Write([]byte(" ")); err != nil {
return
}
w.(http.Flusher).Flush()
case <-ctx.Done():
return
}
}
}

View File

@@ -21,10 +21,12 @@ import (
"math"
"net/http"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
"github.com/minio/minio/pkg/bucket/policy"
"github.com/minio/minio/pkg/bucket/replication"
)
// BucketObjectLockSys - map of bucket and retention configuration.
@@ -33,7 +35,7 @@ type BucketObjectLockSys struct{}
// Get - Get retention configuration.
func (sys *BucketObjectLockSys) Get(bucketName string) (r objectlock.Retention, err error) {
if globalIsGateway {
objAPI := newObjectLayerWithoutSafeModeFn()
objAPI := newObjectLayerFn()
if objAPI == nil {
return r, errServerNotInitialized
}
@@ -80,24 +82,25 @@ func enforceRetentionForDeletion(ctx context.Context, objInfo ObjectInfo) (locke
// For objects in "Governance" mode, overwrite is allowed if a) object retention date is past OR
// governance bypass headers are set and user has governance bypass permissions.
// Objects in "Compliance" mode can be overwritten only if retention date is past.
func enforceRetentionBypassForDelete(ctx context.Context, r *http.Request, bucket string, object ObjectToDelete, getObjectInfoFn GetObjectInfoFn) APIErrorCode {
func enforceRetentionBypassForDelete(ctx context.Context, r *http.Request, bucket string, object ObjectToDelete, oi ObjectInfo, gerr error) APIErrorCode {
opts, err := getOpts(ctx, r, bucket, object.ObjectName)
if err != nil {
return toAPIErrorCode(ctx, err)
}
opts.VersionID = object.VersionID
oi, err := getObjectInfoFn(ctx, bucket, object.ObjectName, opts)
if err != nil {
switch err.(type) {
if gerr != nil { // error from GetObjectInfo
switch gerr.(type) {
case MethodNotAllowed: // This happens usually for a delete marker
if oi.DeleteMarker {
// Delete marker should be present and valid.
return ErrNone
}
}
return toAPIErrorCode(ctx, err)
if isErrObjectNotFound(gerr) || isErrVersionNotFound(gerr) {
return ErrNone
}
return toAPIErrorCode(ctx, gerr)
}
lhold := objectlock.GetObjectLegalHoldMeta(oi.UserDefined)
@@ -245,13 +248,13 @@ func enforceRetentionBypassForPut(ctx context.Context, r *http.Request, bucket,
// For objects in "Compliance" mode, retention date cannot be shortened, and mode cannot be altered.
// For objects with legal hold header set, the s3:PutObjectLegalHold permission is expected to be set
// Both legal hold and retention can be applied independently on an object
func checkPutObjectLockAllowed(ctx context.Context, r *http.Request, bucket, object string, getObjectInfoFn GetObjectInfoFn, retentionPermErr, legalHoldPermErr APIErrorCode) (objectlock.RetMode, objectlock.RetentionDate, objectlock.ObjectLegalHold, APIErrorCode) {
func checkPutObjectLockAllowed(ctx context.Context, rq *http.Request, bucket, object string, getObjectInfoFn GetObjectInfoFn, retentionPermErr, legalHoldPermErr APIErrorCode) (objectlock.RetMode, objectlock.RetentionDate, objectlock.ObjectLegalHold, APIErrorCode) {
var mode objectlock.RetMode
var retainDate objectlock.RetentionDate
var legalHold objectlock.ObjectLegalHold
retentionRequested := objectlock.IsObjectLockRetentionRequested(r.Header)
legalHoldRequested := objectlock.IsObjectLockLegalHoldRequested(r.Header)
retentionRequested := objectlock.IsObjectLockRetentionRequested(rq.Header)
legalHoldRequested := objectlock.IsObjectLockLegalHoldRequested(rq.Header)
retentionCfg, err := globalBucketObjectLockSys.Get(bucket)
if err != nil {
@@ -267,25 +270,24 @@ func checkPutObjectLockAllowed(ctx context.Context, r *http.Request, bucket, obj
return mode, retainDate, legalHold, ErrNone
}
opts, err := getOpts(ctx, r, bucket, object)
opts, err := getOpts(ctx, rq, bucket, object)
if err != nil {
return mode, retainDate, legalHold, toAPIErrorCode(ctx, err)
}
if opts.VersionID != "" {
replica := rq.Header.Get(xhttp.AmzBucketReplicationStatus) == replication.Replica.String()
if opts.VersionID != "" && !replica {
if objInfo, err := getObjectInfoFn(ctx, bucket, object, opts); err == nil {
r := objectlock.GetObjectRetentionMeta(objInfo.UserDefined)
t, err := objectlock.UTCNowNTP()
if err != nil {
logger.LogIf(ctx, err)
return mode, retainDate, legalHold, ErrObjectLocked
}
if r.Mode == objectlock.RetCompliance && r.RetainUntilDate.After(t) {
return mode, retainDate, legalHold, ErrObjectLocked
}
mode = r.Mode
retainDate = r.RetainUntilDate
legalHold = objectlock.GetObjectLegalHoldMeta(objInfo.UserDefined)
@@ -298,17 +300,17 @@ func checkPutObjectLockAllowed(ctx context.Context, r *http.Request, bucket, obj
if legalHoldRequested {
var lerr error
if legalHold, lerr = objectlock.ParseObjectLockLegalHoldHeaders(r.Header); lerr != nil {
if legalHold, lerr = objectlock.ParseObjectLockLegalHoldHeaders(rq.Header); lerr != nil {
return mode, retainDate, legalHold, toAPIErrorCode(ctx, err)
}
}
if retentionRequested {
legalHold, err := objectlock.ParseObjectLockLegalHoldHeaders(r.Header)
legalHold, err := objectlock.ParseObjectLockLegalHoldHeaders(rq.Header)
if err != nil {
return mode, retainDate, legalHold, toAPIErrorCode(ctx, err)
}
rMode, rDate, err := objectlock.ParseObjectLockRetentionHeaders(r.Header)
rMode, rDate, err := objectlock.ParseObjectLockRetentionHeaders(rq.Header)
if err != nil {
return mode, retainDate, legalHold, toAPIErrorCode(ctx, err)
}
@@ -317,7 +319,9 @@ func checkPutObjectLockAllowed(ctx context.Context, r *http.Request, bucket, obj
}
return rMode, rDate, legalHold, ErrNone
}
if replica { // replica inherits retention metadata only from source
return "", objectlock.RetentionDate{}, legalHold, ErrNone
}
if !retentionRequested && retentionCfg.Validity > 0 {
if retentionPermErr != ErrNone {
return mode, retainDate, legalHold, retentionPermErr

View File

@@ -40,7 +40,7 @@ const (
func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutBucketPolicy")
defer logger.AuditLog(w, r, "PutBucketPolicy", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objAPI := api.ObjectAPI()
if objAPI == nil {
@@ -106,7 +106,7 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "DeleteBucketPolicy")
defer logger.AuditLog(w, r, "DeleteBucketPolicy", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objAPI := api.ObjectAPI()
if objAPI == nil {
@@ -141,7 +141,7 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r
func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketPolicy")
defer logger.AuditLog(w, r, "GetBucketPolicy", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objAPI := api.ObjectAPI()
if objAPI == nil {

View File

@@ -252,7 +252,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV4 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint.
reqV4, err := newTestSignedRequestV4("PUT", getPutPolicyURL("", testCase.bucketName),
reqV4, err := newTestSignedRequestV4(http.MethodPut, getPutPolicyURL("", testCase.bucketName),
int64(testCase.policyLen), testCase.bucketPolicyReader, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
@@ -266,7 +266,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV2 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint.
reqV2, err := newTestSignedRequestV2("PUT", getPutPolicyURL("", testCase.bucketName),
reqV2, err := newTestSignedRequestV2(http.MethodPut, getPutPolicyURL("", testCase.bucketName),
int64(testCase.policyLen), testCase.bucketPolicyReader, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
@@ -283,7 +283,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
// Bucket policy related functions doesn't support anonymous requests, setting policies shouldn't make a difference.
bucketPolicyStr := fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)
// create unsigned HTTP request for PutBucketPolicyHandler.
anonReq, err := newTestRequest("PUT", getPutPolicyURL("", bucketName),
anonReq, err := newTestRequest(http.MethodPut, getPutPolicyURL("", bucketName),
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)))
if err != nil {
@@ -302,7 +302,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
// The only aim is to generate an HTTP request in a way that the relevant/registered end point is evoked/called.
nilBucket := "dummy-bucket"
nilReq, err := newTestSignedRequestV4("PUT", getPutPolicyURL("", nilBucket),
nilReq, err := newTestSignedRequestV4(http.MethodPut, getPutPolicyURL("", nilBucket),
0, nil, "", "", nil)
if err != nil {
@@ -344,7 +344,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV4 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint.
reqV4, err := newTestSignedRequestV4("PUT", getPutPolicyURL("", testPolicy.bucketName),
reqV4, err := newTestSignedRequestV4(http.MethodPut, getPutPolicyURL("", testPolicy.bucketName),
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, err)
@@ -358,7 +358,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV2 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint.
reqV2, err := newTestSignedRequestV2("PUT", getPutPolicyURL("", testPolicy.bucketName),
reqV2, err := newTestSignedRequestV2(http.MethodPut, getPutPolicyURL("", testPolicy.bucketName),
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, err)
@@ -415,7 +415,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV4 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint.
reqV4, err := newTestSignedRequestV4("GET", getGetPolicyURL("", testCase.bucketName),
reqV4, err := newTestSignedRequestV4(http.MethodGet, getGetPolicyURL("", testCase.bucketName),
0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
@@ -454,7 +454,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV2 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint.
reqV2, err := newTestSignedRequestV2("GET", getGetPolicyURL("", testCase.bucketName),
reqV2, err := newTestSignedRequestV2(http.MethodGet, getGetPolicyURL("", testCase.bucketName),
0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for GetBucketPolicyHandler: <ERROR> %v", i+1, err)
@@ -491,7 +491,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
// Test for Anonymous/unsigned http request.
// Bucket policy related functions doesn't support anonymous requests, setting policies shouldn't make a difference.
// create unsigned HTTP request for PutBucketPolicyHandler.
anonReq, err := newTestRequest("GET", getPutPolicyURL("", bucketName), 0, nil)
anonReq, err := newTestRequest(http.MethodGet, getPutPolicyURL("", bucketName), 0, nil)
if err != nil {
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
@@ -509,7 +509,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
// The only aim is to generate an HTTP request in a way that the relevant/registered end point is evoked/called.
nilBucket := "dummy-bucket"
nilReq, err := newTestSignedRequestV4("GET", getGetPolicyURL("", nilBucket),
nilReq, err := newTestSignedRequestV4(http.MethodGet, getGetPolicyURL("", nilBucket),
0, nil, "", "", nil)
if err != nil {
@@ -589,7 +589,7 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV4 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint.
reqV4, err := newTestSignedRequestV4("PUT", getPutPolicyURL("", testPolicy.bucketName),
reqV4, err := newTestSignedRequestV4(http.MethodPut, getPutPolicyURL("", testPolicy.bucketName),
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, err)
@@ -639,7 +639,7 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV4 := httptest.NewRecorder()
// construct HTTP request for Delete bucket policy endpoint.
reqV4, err := newTestSignedRequestV4("DELETE", getDeletePolicyURL("", testCase.bucketName),
reqV4, err := newTestSignedRequestV4(http.MethodDelete, getDeletePolicyURL("", testCase.bucketName),
0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for GetBucketPolicyHandler: <ERROR> %v", i+1, err)
@@ -661,7 +661,7 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV2 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint.
reqV2, err := newTestSignedRequestV2("PUT", getPutPolicyURL("", testPolicy.bucketName),
reqV2, err := newTestSignedRequestV2(http.MethodPut, getPutPolicyURL("", testPolicy.bucketName),
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, err)
@@ -678,7 +678,7 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV2 := httptest.NewRecorder()
// construct HTTP request for Delete bucket policy endpoint.
reqV2, err := newTestSignedRequestV2("DELETE", getDeletePolicyURL("", testCase.bucketName),
reqV2, err := newTestSignedRequestV2(http.MethodDelete, getDeletePolicyURL("", testCase.bucketName),
0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for GetBucketPolicyHandler: <ERROR> %v", i+1, err)
@@ -694,7 +694,7 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
// Test for Anonymous/unsigned http request.
// Bucket policy related functions doesn't support anonymous requests, setting policies shouldn't make a difference.
// create unsigned HTTP request for PutBucketPolicyHandler.
anonReq, err := newTestRequest("DELETE", getPutPolicyURL("", bucketName), 0, nil)
anonReq, err := newTestRequest(http.MethodDelete, getPutPolicyURL("", bucketName), 0, nil)
if err != nil {
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
@@ -712,7 +712,7 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
// The only aim is to generate an HTTP request in a way that the relevant/registered end point is evoked/called.
nilBucket := "dummy-bucket"
nilReq, err := newTestSignedRequestV4("DELETE", getDeletePolicyURL("", nilBucket),
nilReq, err := newTestSignedRequestV4(http.MethodDelete, getDeletePolicyURL("", nilBucket),
0, nil, "", "", nil)
if err != nil {

View File

@@ -25,7 +25,7 @@ import (
"time"
jsoniter "github.com/json-iterator/go"
miniogopolicy "github.com/minio/minio-go/v6/pkg/policy"
miniogopolicy "github.com/minio/minio-go/v7/pkg/policy"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/bucket/policy"
@@ -68,6 +68,12 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
principalType := "Anonymous"
if username != "" {
principalType = "User"
if len(claims) > 0 {
principalType = "AssumedRole"
}
if username == globalActiveCred.AccessKey {
principalType = "Account"
}
}
vid := r.URL.Query().Get("versionId")
@@ -75,9 +81,6 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
if u, err := url.Parse(r.Header.Get(xhttp.AmzCopySource)); err == nil {
vid = u.Query().Get("versionId")
}
if vid == "" {
vid = r.Header.Get(xhttp.AmzCopySourceVersionID)
}
}
args := map[string][]string{
@@ -146,7 +149,12 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
for k, v := range claims {
vStr, ok := v.(string)
if ok {
args[k] = []string{vStr}
// Special case for AD/LDAP STS users
if k == ldapUser {
args["user"] = []string{vStr}
} else {
args[k] = []string{vStr}
}
}
}

View File

@@ -22,9 +22,7 @@ import (
"fmt"
"time"
"github.com/minio/minio/cmd/config"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/env"
"github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/madmin"
)
@@ -65,48 +63,43 @@ func parseBucketQuota(bucket string, data []byte) (quotaCfg *madmin.BucketQuota,
}
func (sys *BucketQuotaSys) check(ctx context.Context, bucket string, size int64) error {
objAPI := newObjectLayerWithoutSafeModeFn()
objAPI := newObjectLayerFn()
if objAPI == nil {
return errServerNotInitialized
}
q, err := sys.Get(bucket)
if err != nil {
return nil
}
if q.Type == madmin.FIFOQuota {
return nil
}
if q.Quota == 0 {
// No quota set return quickly.
return nil
}
sys.bucketStorageCache.Once.Do(func() {
sys.bucketStorageCache.TTL = 10 * time.Second
sys.bucketStorageCache.TTL = 1 * time.Second
sys.bucketStorageCache.Update = func() (interface{}, error) {
ctx, done := context.WithTimeout(context.Background(), 5*time.Second)
defer done()
return loadDataUsageFromBackend(ctx, objAPI)
}
})
v, err := sys.bucketStorageCache.Get()
q, err := sys.Get(bucket)
if err != nil {
return err
}
dui := v.(DataUsageInfo)
if q != nil && q.Type == madmin.HardQuota && q.Quota > 0 {
v, err := sys.bucketStorageCache.Get()
if err != nil {
return err
}
bui, ok := dui.BucketsUsage[bucket]
if !ok {
// bucket not found, cannot enforce quota
// call will fail anyways later.
return nil
}
dui := v.(DataUsageInfo)
if (bui.Size + uint64(size)) > q.Quota {
return BucketQuotaExceeded{Bucket: bucket}
bui, ok := dui.BucketsUsage[bucket]
if !ok {
// bucket not found, cannot enforce quota
// call will fail anyways later.
return nil
}
if (bui.Size + uint64(size)) >= q.Quota {
return BucketQuotaExceeded{Bucket: bucket}
}
}
return nil
@@ -120,149 +113,112 @@ func enforceBucketQuota(ctx context.Context, bucket string, size int64) error {
return globalBucketQuotaSys.check(ctx, bucket, size)
}
const (
bgQuotaInterval = 1 * time.Hour
)
// initQuotaEnforcement starts the routine that deletes objects in bucket
// that exceeds the FIFO quota
func initQuotaEnforcement(ctx context.Context, objAPI ObjectLayer) {
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOn {
go startBucketQuotaEnforcement(ctx, objAPI)
}
}
func startBucketQuotaEnforcement(ctx context.Context, objAPI ObjectLayer) {
for {
select {
case <-ctx.Done():
return
case <-time.NewTimer(bgQuotaInterval).C:
enforceFIFOQuota(ctx, objAPI)
}
}
}
// enforceFIFOQuota deletes objects in FIFO order until sufficient objects
// have been deleted so as to bring bucket usage within quota
func enforceFIFOQuota(ctx context.Context, objectAPI ObjectLayer) {
// Turn off quota enforcement if data usage info is unavailable.
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOff {
// have been deleted so as to bring bucket usage within quota.
func enforceFIFOQuotaBucket(ctx context.Context, objectAPI ObjectLayer, bucket string, bui BucketUsageInfo) {
// Check if the current bucket has quota restrictions, if not skip it
cfg, err := globalBucketQuotaSys.Get(bucket)
if err != nil {
return
}
buckets, err := objectAPI.ListBuckets(ctx)
if cfg.Type != madmin.FIFOQuota {
return
}
var toFree uint64
if bui.Size > cfg.Quota && cfg.Quota > 0 {
toFree = bui.Size - cfg.Quota
}
if toFree <= 0 {
return
}
// Allocate new results channel to receive ObjectInfo.
objInfoCh := make(chan ObjectInfo)
versioned := globalBucketVersioningSys.Enabled(bucket)
// Walk through all objects
if err := objectAPI.Walk(ctx, bucket, "", objInfoCh, ObjectOptions{WalkVersions: versioned}); err != nil {
logger.LogIf(ctx, err)
return
}
// reuse the fileScorer used by disk cache to score entries by
// ModTime to find the oldest objects in bucket to delete. In
// the context of bucket quota enforcement - number of hits are
// irrelevant.
scorer, err := newFileScorer(toFree, time.Now().Unix(), 1)
if err != nil {
logger.LogIf(ctx, err)
return
}
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
if err != nil {
logger.LogIf(ctx, err)
return
}
for _, binfo := range buckets {
bucket := binfo.Name
bui, ok := dataUsageInfo.BucketsUsage[bucket]
if !ok {
// bucket doesn't exist anymore, or we
// do not have any information to proceed.
continue
}
// Check if the current bucket has quota restrictions, if not skip it
cfg, err := globalBucketQuotaSys.Get(bucket)
if err != nil {
continue
}
if cfg.Type != madmin.FIFOQuota {
continue
}
var toFree uint64
if bui.Size > cfg.Quota && cfg.Quota > 0 {
toFree = bui.Size - cfg.Quota
}
if toFree == 0 {
continue
}
// Allocate new results channel to receive ObjectInfo.
objInfoCh := make(chan ObjectInfo)
versioned := globalBucketVersioningSys.Enabled(bucket)
// Walk through all objects
if err := objectAPI.Walk(ctx, bucket, "", objInfoCh, ObjectOptions{WalkVersions: versioned}); err != nil {
logger.LogIf(ctx, err)
continue
}
// reuse the fileScorer used by disk cache to score entries by
// ModTime to find the oldest objects in bucket to delete. In
// the context of bucket quota enforcement - number of hits are
// irrelevant.
scorer, err := newFileScorer(toFree, time.Now().Unix(), 1)
if err != nil {
logger.LogIf(ctx, err)
continue
}
rcfg, _ := globalBucketObjectLockSys.Get(bucket)
for obj := range objInfoCh {
if obj.DeleteMarker {
// Delete markers are automatically added for FIFO purge.
scorer.addFileWithObjInfo(obj, 1)
continue
}
// skip objects currently under retention
if rcfg.LockEnabled && enforceRetentionForDeletion(ctx, obj) {
continue
}
rcfg, _ := globalBucketObjectLockSys.Get(bucket)
for obj := range objInfoCh {
if obj.DeleteMarker {
// Delete markers are automatically added for FIFO purge.
scorer.addFileWithObjInfo(obj, 1)
continue
}
// skip objects currently under retention
if rcfg.LockEnabled && enforceRetentionForDeletion(ctx, obj) {
continue
}
scorer.addFileWithObjInfo(obj, 1)
}
// If we saw less than quota we are good.
if scorer.seenBytes <= cfg.Quota {
return
}
// Calculate how much we want to delete now.
toFreeNow := scorer.seenBytes - cfg.Quota
// We were less over quota than we thought. Adjust so we delete less.
// If we are more over, leave it for the next run to pick up.
if toFreeNow < toFree {
if !scorer.adjustSaveBytes(int64(toFreeNow) - int64(toFree)) {
// We got below or at quota.
return
}
}
var objects []ObjectToDelete
numKeys := len(scorer.fileObjInfos())
for i, obj := range scorer.fileObjInfos() {
objects = append(objects, ObjectToDelete{
ObjectName: obj.Name,
VersionID: obj.VersionID,
})
if len(objects) < maxDeleteList && (i < numKeys-1) {
// skip deletion until maxDeleteList or end of slice
continue
}
var objects []ObjectToDelete
numKeys := len(scorer.fileObjInfos())
for i, obj := range scorer.fileObjInfos() {
objects = append(objects, ObjectToDelete{
ObjectName: obj.Name,
VersionID: obj.VersionID,
})
if len(objects) < maxDeleteList && (i < numKeys-1) {
// skip deletion until maxDeleteList or end of slice
if len(objects) == 0 {
break
}
// Deletes a list of objects.
_, deleteErrs := objectAPI.DeleteObjects(ctx, bucket, objects, ObjectOptions{
Versioned: versioned,
})
for i := range deleteErrs {
if deleteErrs[i] != nil {
logger.LogIf(ctx, deleteErrs[i])
continue
}
if len(objects) == 0 {
break
}
// Deletes a list of objects.
_, deleteErrs := objectAPI.DeleteObjects(ctx, bucket, objects, ObjectOptions{
Versioned: versioned,
// Notify object deleted event.
sendEvent(eventArgs{
EventName: event.ObjectRemovedDelete,
BucketName: bucket,
Object: obj,
Host: "Internal: [FIFO-QUOTA-EXPIRY]",
})
for i := range deleteErrs {
if deleteErrs[i] != nil {
logger.LogIf(ctx, deleteErrs[i])
continue
}
// Notify object deleted event.
sendEvent(eventArgs{
EventName: event.ObjectRemovedDelete,
BucketName: bucket,
Object: obj,
Host: "Internal: [FIFO-QUOTA-EXPIRY]",
})
}
objects = nil
}
objects = nil
}
}

991
cmd/bucket-replication.go Normal file
View File

@@ -0,0 +1,991 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"fmt"
"net/http"
"reflect"
"strings"
"time"
minio "github.com/minio/minio-go/v7"
miniogo "github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/encrypt"
"github.com/minio/minio-go/v7/pkg/tags"
"github.com/minio/minio/cmd/crypto"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/bucket/bandwidth"
"github.com/minio/minio/pkg/bucket/replication"
"github.com/minio/minio/pkg/event"
iampolicy "github.com/minio/minio/pkg/iam/policy"
"github.com/minio/minio/pkg/madmin"
)
// gets replication config associated to a given bucket name.
func getReplicationConfig(ctx context.Context, bucketName string) (rc *replication.Config, err error) {
if globalIsGateway {
objAPI := newObjectLayerFn()
if objAPI == nil {
return nil, errServerNotInitialized
}
return nil, BucketReplicationConfigNotFound{Bucket: bucketName}
}
return globalBucketMetadataSys.GetReplicationConfig(ctx, bucketName)
}
// validateReplicationDestination returns error if replication destination bucket missing or not configured
// It also returns true if replication destination is same as this server.
func validateReplicationDestination(ctx context.Context, bucket string, rCfg *replication.Config) (bool, error) {
arn, err := madmin.ParseARN(rCfg.RoleArn)
if err != nil {
return false, BucketRemoteArnInvalid{}
}
if arn.Type != madmin.ReplicationService {
return false, BucketRemoteArnTypeInvalid{}
}
clnt := globalBucketTargetSys.GetRemoteTargetClient(ctx, rCfg.RoleArn)
if clnt == nil {
return false, BucketRemoteTargetNotFound{Bucket: bucket}
}
if found, _ := clnt.BucketExists(ctx, rCfg.GetDestination().Bucket); !found {
return false, BucketRemoteDestinationNotFound{Bucket: rCfg.GetDestination().Bucket}
}
if ret, err := globalBucketObjectLockSys.Get(bucket); err == nil {
if ret.LockEnabled {
lock, _, _, _, err := clnt.GetObjectLockConfig(ctx, rCfg.GetDestination().Bucket)
if err != nil || lock != "Enabled" {
return false, BucketReplicationDestinationMissingLock{Bucket: rCfg.GetDestination().Bucket}
}
}
}
// validate replication ARN against target endpoint
c, ok := globalBucketTargetSys.arnRemotesMap[rCfg.RoleArn]
if ok {
if c.EndpointURL().String() == clnt.EndpointURL().String() {
sameTarget, _ := isLocalHost(clnt.EndpointURL().Hostname(), clnt.EndpointURL().Port(), globalMinioPort)
return sameTarget, nil
}
}
return false, BucketRemoteTargetNotFound{Bucket: bucket}
}
func mustReplicateWeb(ctx context.Context, r *http.Request, bucket, object string, meta map[string]string, replStatus string, permErr APIErrorCode) (replicate bool, sync bool) {
if permErr != ErrNone {
return
}
return mustReplicater(ctx, bucket, object, meta, replStatus)
}
// mustReplicate returns 2 booleans - true if object meets replication criteria and true if replication is to be done in
// a synchronous manner.
func mustReplicate(ctx context.Context, r *http.Request, bucket, object string, meta map[string]string, replStatus string) (replicate bool, sync bool) {
if s3Err := isPutActionAllowed(ctx, getRequestAuthType(r), bucket, "", r, iampolicy.GetReplicationConfigurationAction); s3Err != ErrNone {
return
}
return mustReplicater(ctx, bucket, object, meta, replStatus)
}
// mustReplicater returns 2 booleans - true if object meets replication criteria and true if replication is to be done in
// a synchronous manner.
func mustReplicater(ctx context.Context, bucket, object string, meta map[string]string, replStatus string) (replicate bool, sync bool) {
if globalIsGateway {
return replicate, sync
}
if rs, ok := meta[xhttp.AmzBucketReplicationStatus]; ok {
replStatus = rs
}
if replication.StatusType(replStatus) == replication.Replica {
return replicate, sync
}
cfg, err := getReplicationConfig(ctx, bucket)
if err != nil {
return replicate, sync
}
opts := replication.ObjectOpts{
Name: object,
SSEC: crypto.SSEC.IsEncrypted(meta),
}
tagStr, ok := meta[xhttp.AmzObjectTagging]
if ok {
opts.UserTags = tagStr
}
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, cfg.RoleArn)
// the target online status should not be used here while deciding
// whether to replicate as the target could be temporarily down
if tgt != nil {
return cfg.Replicate(opts), tgt.replicateSync
}
return cfg.Replicate(opts), false
}
// Standard headers that needs to be extracted from User metadata.
var standardHeaders = []string{
xhttp.ContentType,
xhttp.CacheControl,
xhttp.ContentEncoding,
xhttp.ContentLanguage,
xhttp.ContentDisposition,
xhttp.AmzStorageClass,
xhttp.AmzObjectTagging,
xhttp.AmzBucketReplicationStatus,
xhttp.AmzObjectLockMode,
xhttp.AmzObjectLockRetainUntilDate,
xhttp.AmzObjectLockLegalHold,
xhttp.AmzTagCount,
xhttp.AmzServerSideEncryption,
}
// returns true if any of the objects being deleted qualifies for replication.
func hasReplicationRules(ctx context.Context, bucket string, objects []ObjectToDelete) bool {
c, err := getReplicationConfig(ctx, bucket)
if err != nil || c == nil {
return false
}
for _, obj := range objects {
if c.HasActiveRules(obj.ObjectName, true) {
return true
}
}
return false
}
// isStandardHeader returns true if header is a supported header and not a custom header
func isStandardHeader(matchHeaderKey string) bool {
return equals(matchHeaderKey, standardHeaders...)
}
// returns whether object version is a deletemarker and if object qualifies for replication
func checkReplicateDelete(ctx context.Context, bucket string, dobj ObjectToDelete, oi ObjectInfo, gerr error) (dm, replicate, sync bool) {
rcfg, err := getReplicationConfig(ctx, bucket)
if err != nil || rcfg == nil {
return false, false, sync
}
opts := replication.ObjectOpts{
Name: dobj.ObjectName,
SSEC: crypto.SSEC.IsEncrypted(oi.UserDefined),
UserTags: oi.UserTags,
DeleteMarker: oi.DeleteMarker,
VersionID: dobj.VersionID,
}
replicate = rcfg.Replicate(opts)
// when incoming delete is removal of a delete marker( a.k.a versioned delete),
// GetObjectInfo returns extra information even though it returns errFileNotFound
if gerr != nil {
validReplStatus := false
switch oi.ReplicationStatus {
case replication.Pending, replication.Completed, replication.Failed:
validReplStatus = true
}
if oi.DeleteMarker && (validReplStatus || replicate) {
return oi.DeleteMarker, true, sync
}
// can be the case that other cluster is down and duplicate `mc rm --vid`
// is issued - this still needs to be replicated back to the other target
return oi.DeleteMarker, oi.VersionPurgeStatus == Pending || oi.VersionPurgeStatus == Failed, sync
}
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, rcfg.RoleArn)
// the target online status should not be used here while deciding
// whether to replicate deletes as the target could be temporarily down
if tgt == nil {
return oi.DeleteMarker, false, false
}
return oi.DeleteMarker, replicate, tgt.replicateSync
}
// replicate deletes to the designated replication target if replication configuration
// has delete marker replication or delete replication (MinIO extension to allow deletes where version id
// is specified) enabled.
// Similar to bucket replication for PUT operation, soft delete (a.k.a setting delete marker) and
// permanent deletes (by specifying a version ID in the delete operation) have three states "Pending", "Complete"
// and "Failed" to mark the status of the replication of "DELETE" operation. All failed operations can
// then be retried by healing. In the case of permanent deletes, until the replication is completed on the
// target cluster, the object version is marked deleted on the source and hidden from listing. It is permanently
// deleted from the source when the VersionPurgeStatus changes to "Complete", i.e after replication succeeds
// on target.
func replicateDelete(ctx context.Context, dobj DeletedObjectVersionInfo, objectAPI ObjectLayer) {
bucket := dobj.Bucket
versionID := dobj.DeleteMarkerVersionID
if versionID == "" {
versionID = dobj.VersionID
}
rcfg, err := getReplicationConfig(ctx, bucket)
if err != nil || rcfg == nil {
logger.LogIf(ctx, err)
sendEvent(eventArgs{
BucketName: bucket,
Object: ObjectInfo{
Bucket: bucket,
Name: dobj.ObjectName,
VersionID: versionID,
DeleteMarker: dobj.DeleteMarker,
},
Host: "Internal: [Replication]",
EventName: event.ObjectReplicationNotTracked,
})
return
}
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, rcfg.RoleArn)
if tgt == nil {
logger.LogIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, rcfg.RoleArn))
sendEvent(eventArgs{
BucketName: bucket,
Object: ObjectInfo{
Bucket: bucket,
Name: dobj.ObjectName,
VersionID: versionID,
DeleteMarker: dobj.DeleteMarker,
},
Host: "Internal: [Replication]",
EventName: event.ObjectReplicationNotTracked,
})
return
}
rmErr := tgt.RemoveObject(ctx, rcfg.GetDestination().Bucket, dobj.ObjectName, miniogo.RemoveObjectOptions{
VersionID: versionID,
Internal: miniogo.AdvancedRemoveOptions{
ReplicationDeleteMarker: dobj.DeleteMarkerVersionID != "",
ReplicationMTime: dobj.DeleteMarkerMTime.Time,
ReplicationStatus: miniogo.ReplicationStatusReplica,
},
})
replicationStatus := dobj.DeleteMarkerReplicationStatus
versionPurgeStatus := dobj.VersionPurgeStatus
if rmErr != nil {
if dobj.VersionID == "" {
replicationStatus = string(replication.Failed)
} else {
versionPurgeStatus = Failed
}
logger.LogIf(ctx, fmt.Errorf("Unable to replicate delete marker to %s/%s(%s): %s", rcfg.GetDestination().Bucket, dobj.ObjectName, versionID, rmErr))
} else {
if dobj.VersionID == "" {
replicationStatus = string(replication.Completed)
} else {
versionPurgeStatus = Complete
}
}
var eventName = event.ObjectReplicationComplete
if replicationStatus == string(replication.Failed) || versionPurgeStatus == Failed {
eventName = event.ObjectReplicationFailed
}
// Update metadata on the delete marker or purge permanent delete if replication success.
dobjInfo, err := objectAPI.DeleteObject(ctx, bucket, dobj.ObjectName, ObjectOptions{
VersionID: versionID,
DeleteMarkerReplicationStatus: replicationStatus,
VersionPurgeStatus: versionPurgeStatus,
Versioned: globalBucketVersioningSys.Enabled(bucket),
VersionSuspended: globalBucketVersioningSys.Suspended(bucket),
})
if err != nil && !isErrVersionNotFound(err) { // VersionNotFound would be reported by pool that object version is missing on.
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s/%s(%s): %s", bucket, dobj.ObjectName, versionID, err))
sendEvent(eventArgs{
BucketName: bucket,
Object: ObjectInfo{
Bucket: bucket,
Name: dobj.ObjectName,
VersionID: versionID,
DeleteMarker: dobj.DeleteMarker,
},
Host: "Internal: [Replication]",
EventName: eventName,
})
} else {
sendEvent(eventArgs{
BucketName: bucket,
Object: dobjInfo,
Host: "Internal: [Replication]",
EventName: eventName,
})
}
}
func getCopyObjMetadata(oi ObjectInfo, dest replication.Destination) map[string]string {
meta := make(map[string]string, len(oi.UserDefined))
for k, v := range oi.UserDefined {
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
continue
}
if equals(k, xhttp.AmzBucketReplicationStatus) {
continue
}
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
if equals(k, xhttp.AmzMetaUnencryptedContentLength, xhttp.AmzMetaUnencryptedContentMD5) {
continue
}
meta[k] = v
}
if oi.ContentEncoding != "" {
meta[xhttp.ContentEncoding] = oi.ContentEncoding
}
if oi.ContentType != "" {
meta[xhttp.ContentType] = oi.ContentType
}
if oi.UserTags != "" {
meta[xhttp.AmzObjectTagging] = oi.UserTags
meta[xhttp.AmzTagDirective] = "REPLACE"
}
sc := dest.StorageClass
if sc == "" {
sc = oi.StorageClass
}
if sc != "" {
meta[xhttp.AmzStorageClass] = sc
}
meta[xhttp.MinIOSourceETag] = oi.ETag
meta[xhttp.MinIOSourceMTime] = oi.ModTime.Format(time.RFC3339Nano)
meta[xhttp.AmzBucketReplicationStatus] = replication.Replica.String()
return meta
}
type caseInsensitiveMap map[string]string
// Lookup map entry case insensitively.
func (m caseInsensitiveMap) Lookup(key string) (string, bool) {
if len(m) == 0 {
return "", false
}
for _, k := range []string{
key,
strings.ToLower(key),
http.CanonicalHeaderKey(key),
} {
v, ok := m[k]
if ok {
return v, ok
}
}
return "", false
}
func putReplicationOpts(ctx context.Context, dest replication.Destination, objInfo ObjectInfo) (putOpts miniogo.PutObjectOptions, err error) {
meta := make(map[string]string)
for k, v := range objInfo.UserDefined {
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
continue
}
if isStandardHeader(k) {
continue
}
meta[k] = v
}
sc := dest.StorageClass
if sc == "" {
sc = objInfo.StorageClass
}
putOpts = miniogo.PutObjectOptions{
UserMetadata: meta,
ContentType: objInfo.ContentType,
ContentEncoding: objInfo.ContentEncoding,
StorageClass: sc,
Internal: miniogo.AdvancedPutOptions{
SourceVersionID: objInfo.VersionID,
ReplicationStatus: miniogo.ReplicationStatusReplica,
SourceMTime: objInfo.ModTime,
SourceETag: objInfo.ETag,
},
}
if objInfo.UserTags != "" {
tag, _ := tags.ParseObjectTags(objInfo.UserTags)
if tag != nil {
putOpts.UserTags = tag.ToMap()
}
}
lkMap := caseInsensitiveMap(objInfo.UserDefined)
if lang, ok := lkMap.Lookup(xhttp.ContentLanguage); ok {
putOpts.ContentLanguage = lang
}
if disp, ok := lkMap.Lookup(xhttp.ContentDisposition); ok {
putOpts.ContentDisposition = disp
}
if cc, ok := lkMap.Lookup(xhttp.CacheControl); ok {
putOpts.CacheControl = cc
}
if mode, ok := lkMap.Lookup(xhttp.AmzObjectLockMode); ok {
rmode := miniogo.RetentionMode(mode)
putOpts.Mode = rmode
}
if retainDateStr, ok := lkMap.Lookup(xhttp.AmzObjectLockRetainUntilDate); ok {
rdate, err := time.Parse(time.RFC3339, retainDateStr)
if err != nil {
return putOpts, err
}
putOpts.RetainUntilDate = rdate
}
if lhold, ok := lkMap.Lookup(xhttp.AmzObjectLockLegalHold); ok {
putOpts.LegalHold = miniogo.LegalHoldStatus(lhold)
}
if crypto.S3.IsEncrypted(objInfo.UserDefined) {
putOpts.ServerSideEncryption = encrypt.NewSSE()
}
return
}
type replicationAction string
const (
replicateMetadata replicationAction = "metadata"
replicateNone replicationAction = "none"
replicateAll replicationAction = "all"
)
// matches k1 with all keys, returns 'true' if one of them matches
func equals(k1 string, keys ...string) bool {
for _, k2 := range keys {
if strings.ToLower(k1) == strings.ToLower(k2) {
return true
}
}
return false
}
// returns replicationAction by comparing metadata between source and target
func getReplicationAction(oi1 ObjectInfo, oi2 minio.ObjectInfo) replicationAction {
// needs full replication
if oi1.ETag != oi2.ETag ||
oi1.VersionID != oi2.VersionID ||
oi1.Size != oi2.Size ||
oi1.DeleteMarker != oi2.IsDeleteMarker ||
oi1.ModTime.Unix() != oi2.LastModified.Unix() {
return replicateAll
}
if oi1.ContentType != oi2.ContentType {
return replicateMetadata
}
if oi1.ContentEncoding != "" {
enc, ok := oi2.Metadata[xhttp.ContentEncoding]
if !ok {
enc, ok = oi2.Metadata[strings.ToLower(xhttp.ContentEncoding)]
if !ok {
return replicateMetadata
}
}
if strings.Join(enc, ",") != oi1.ContentEncoding {
return replicateMetadata
}
}
t, _ := tags.ParseObjectTags(oi1.UserTags)
if !reflect.DeepEqual(oi2.UserTags, t.ToMap()) {
return replicateMetadata
}
// Compare only necessary headers
compareKeys := []string{
"Expires",
"Cache-Control",
"Content-Language",
"Content-Disposition",
"X-Amz-Object-Lock-Mode",
"X-Amz-Object-Lock-Retain-Until-Date",
"X-Amz-Object-Lock-Legal-Hold",
"X-Amz-Website-Redirect-Location",
"X-Amz-Meta-",
}
// compare metadata on both maps to see if meta is identical
compareMeta1 := make(map[string]string)
for k, v := range oi1.UserDefined {
var found bool
for _, prefix := range compareKeys {
if !strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) {
continue
}
found = true
break
}
if found {
compareMeta1[strings.ToLower(k)] = v
}
}
compareMeta2 := make(map[string]string)
for k, v := range oi2.Metadata {
var found bool
for _, prefix := range compareKeys {
if !strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) {
continue
}
found = true
break
}
if found {
compareMeta2[strings.ToLower(k)] = strings.Join(v, ",")
}
}
if !reflect.DeepEqual(compareMeta1, compareMeta2) {
return replicateMetadata
}
return replicateNone
}
// replicateObject replicates the specified version of the object to destination bucket
// The source object is then updated to reflect the replication status.
func replicateObject(ctx context.Context, objInfo ObjectInfo, objectAPI ObjectLayer) {
z, ok := objectAPI.(*erasureServerPools)
if !ok {
return
}
bucket := objInfo.Bucket
object := objInfo.Name
cfg, err := getReplicationConfig(ctx, bucket)
if err != nil {
logger.LogIf(ctx, err)
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
Object: objInfo,
Host: "Internal: [Replication]",
})
return
}
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, cfg.RoleArn)
if tgt == nil {
logger.LogIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, cfg.RoleArn))
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
Object: objInfo,
Host: "Internal: [Replication]",
})
return
}
gr, err := objectAPI.GetObjectNInfo(ctx, bucket, object, nil, http.Header{}, readLock, ObjectOptions{
VersionID: objInfo.VersionID,
})
if err != nil {
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
Object: objInfo,
Host: "Internal: [Replication]",
})
logger.LogIf(ctx, err)
return
}
defer gr.Close() // hold read lock for entire transaction
objInfo = gr.ObjInfo
size, err := objInfo.GetActualSize()
if err != nil {
logger.LogIf(ctx, err)
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
Object: objInfo,
Host: "Internal: [Replication]",
})
return
}
dest := cfg.GetDestination()
if dest.Bucket == "" {
logger.LogIf(ctx, fmt.Errorf("Unable to replicate object %s(%s), bucket is empty", objInfo.Name, objInfo.VersionID))
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
Object: objInfo,
Host: "Internal: [Replication]",
})
return
}
rtype := replicateAll
oi, err := tgt.StatObject(ctx, dest.Bucket, object, miniogo.StatObjectOptions{
VersionID: objInfo.VersionID,
Internal: miniogo.AdvancedGetOptions{
ReplicationProxyRequest: "false",
}})
if err == nil {
rtype = getReplicationAction(objInfo, oi)
if rtype == replicateNone {
// object with same VersionID already exists, replication kicked off by
// PutObject might have completed.
return
}
}
replicationStatus := replication.Completed
// use core client to avoid doing multipart on PUT
c := &miniogo.Core{Client: tgt.Client}
if rtype != replicateAll {
// replicate metadata for object tagging/copy with metadata replacement
srcOpts := miniogo.CopySrcOptions{
Bucket: dest.Bucket,
Object: object,
VersionID: objInfo.VersionID}
dstOpts := miniogo.PutObjectOptions{Internal: miniogo.AdvancedPutOptions{SourceVersionID: objInfo.VersionID}}
if _, err = c.CopyObject(ctx, dest.Bucket, object, dest.Bucket, object, getCopyObjMetadata(objInfo, dest), srcOpts, dstOpts); err != nil {
replicationStatus = replication.Failed
logger.LogIf(ctx, fmt.Errorf("Unable to replicate metadata for object %s/%s(%s): %s", bucket, objInfo.Name, objInfo.VersionID, err))
}
} else {
target, err := globalBucketMetadataSys.GetBucketTarget(bucket, cfg.RoleArn)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("failed to get target for replication bucket:%s cfg:%s err:%s", bucket, cfg.RoleArn, err))
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
Object: objInfo,
Host: "Internal: [Replication]",
})
return
}
putOpts, err := putReplicationOpts(ctx, dest, objInfo)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("failed to get target for replication bucket:%s cfg:%s err:%w", bucket, cfg.RoleArn, err))
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
Object: objInfo,
Host: "Internal: [Replication]",
})
return
}
// Setup bandwidth throttling
peers, _ := globalEndpoints.peers()
totalNodesCount := len(peers)
if totalNodesCount == 0 {
totalNodesCount = 1 // For standalone erasure coding
}
b := target.BandwidthLimit / int64(totalNodesCount)
var headerSize int
for k, v := range putOpts.Header() {
headerSize += len(k) + len(v)
}
// r takes over closing gr.
r := bandwidth.NewMonitoredReader(ctx, globalBucketMonitor, objInfo.Bucket, objInfo.Name, gr, headerSize, b, target.BandwidthLimit)
if _, err = c.PutObject(ctx, dest.Bucket, object, r, size, "", "", putOpts); err != nil {
replicationStatus = replication.Failed
logger.LogIf(ctx, fmt.Errorf("Unable to replicate for object %s/%s(%s): %w", bucket, objInfo.Name, objInfo.VersionID, err))
}
defer r.Close()
}
objInfo.UserDefined[xhttp.AmzBucketReplicationStatus] = replicationStatus.String()
if objInfo.UserTags != "" {
objInfo.UserDefined[xhttp.AmzObjectTagging] = objInfo.UserTags
}
// FIXME: add support for missing replication events
// - event.ObjectReplicationMissedThreshold
// - event.ObjectReplicationReplicatedAfterThreshold
var eventName = event.ObjectReplicationComplete
if replicationStatus == replication.Failed {
eventName = event.ObjectReplicationFailed
}
// This lower level implementation is necessary to avoid write locks from CopyObject.
poolIdx, err := z.getPoolIdx(ctx, bucket, object, objInfo.Size)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s/%s(%s): %w", bucket, objInfo.Name, objInfo.VersionID, err))
} else {
if err = z.serverPools[poolIdx].getHashedSet(object).updateObjectMeta(ctx, bucket, object, objInfo.UserDefined, ObjectOptions{
VersionID: objInfo.VersionID,
}); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s/%s(%s): %w", bucket, objInfo.Name, objInfo.VersionID, err))
}
}
sendEvent(eventArgs{
EventName: eventName,
BucketName: bucket,
Object: objInfo,
Host: "Internal: [Replication]",
})
}
// filterReplicationStatusMetadata filters replication status metadata for COPY
func filterReplicationStatusMetadata(metadata map[string]string) map[string]string {
// Copy on write
dst := metadata
var copied bool
delKey := func(key string) {
if _, ok := metadata[key]; !ok {
return
}
if !copied {
dst = make(map[string]string, len(metadata))
for k, v := range metadata {
dst[k] = v
}
copied = true
}
delete(dst, key)
}
delKey(xhttp.AmzBucketReplicationStatus)
return dst
}
// DeletedObjectVersionInfo has info on deleted object
type DeletedObjectVersionInfo struct {
DeletedObject
Bucket string
}
type replicationState struct {
// add future metrics here
replicaCh chan ObjectInfo
replicaDeleteCh chan DeletedObjectVersionInfo
}
func (r *replicationState) queueReplicaTask(oi ObjectInfo) {
if r == nil {
return
}
select {
case r.replicaCh <- oi:
default:
}
}
func (r *replicationState) queueReplicaDeleteTask(doi DeletedObjectVersionInfo) {
if r == nil {
return
}
select {
case r.replicaDeleteCh <- doi:
default:
}
}
var (
globalReplicationState *replicationState
)
func newReplicationState() *replicationState {
rs := &replicationState{
replicaCh: make(chan ObjectInfo, 10000),
replicaDeleteCh: make(chan DeletedObjectVersionInfo, 10000),
}
go func() {
<-GlobalContext.Done()
close(rs.replicaCh)
close(rs.replicaDeleteCh)
}()
return rs
}
// addWorker creates a new worker to process tasks
func (r *replicationState) addWorker(ctx context.Context, objectAPI ObjectLayer) {
// Add a new worker.
go func() {
for {
select {
case <-ctx.Done():
return
case oi, ok := <-r.replicaCh:
if !ok {
return
}
replicateObject(ctx, oi, objectAPI)
case doi, ok := <-r.replicaDeleteCh:
if !ok {
return
}
replicateDelete(ctx, doi, objectAPI)
}
}
}()
}
func initBackgroundReplication(ctx context.Context, objectAPI ObjectLayer) {
if globalReplicationState == nil {
return
}
// Start replication workers per count set in api config or MINIO_API_REPLICATION_WORKERS.
for i := 0; i < globalAPIConfig.getReplicationWorkers(); i++ {
globalReplicationState.addWorker(ctx, objectAPI)
}
}
// get Reader from replication target if active-active replication is in place and
// this node returns a 404
func proxyGetToReplicationTarget(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, opts ObjectOptions) (gr *GetObjectReader, proxy bool) {
tgt, oi, proxy, err := proxyHeadToRepTarget(ctx, bucket, object, opts)
if !proxy || err != nil {
return nil, false
}
fn, off, length, err := NewGetObjectReader(rs, oi, opts)
if err != nil {
return nil, false
}
gopts := miniogo.GetObjectOptions{
VersionID: opts.VersionID,
ServerSideEncryption: opts.ServerSideEncryption,
Internal: miniogo.AdvancedGetOptions{
ReplicationProxyRequest: "true",
},
}
// get correct offsets for encrypted object
if off >= 0 && length >= 0 {
if err := gopts.SetRange(off, off+length-1); err != nil {
return nil, false
}
}
// Make sure to match ETag when proxying.
if err = gopts.SetMatchETag(oi.ETag); err != nil {
return nil, false
}
c := miniogo.Core{Client: tgt.Client}
obj, _, _, err := c.GetObject(ctx, bucket, object, gopts)
if err != nil {
return nil, false
}
closeReader := func() { obj.Close() }
reader, err := fn(obj, h, opts.CheckPrecondFn, closeReader)
if err != nil {
return nil, false
}
reader.ObjInfo = oi.Clone()
return reader, true
}
// isProxyable returns true if replication config found for this bucket
func isProxyable(ctx context.Context, bucket string) bool {
cfg, err := getReplicationConfig(ctx, bucket)
if err != nil {
return false
}
dest := cfg.GetDestination()
return dest.Bucket == bucket
}
func proxyHeadToRepTarget(ctx context.Context, bucket, object string, opts ObjectOptions) (tgt *TargetClient, oi ObjectInfo, proxy bool, err error) {
// this option is set when active-active replication is in place between site A -> B,
// and site B does not have the object yet.
if opts.ProxyRequest || (opts.ProxyHeaderSet && !opts.ProxyRequest) { // true only when site B sets MinIOSourceProxyRequest header
return nil, oi, false, nil
}
cfg, err := getReplicationConfig(ctx, bucket)
if err != nil {
return nil, oi, false, err
}
dest := cfg.GetDestination()
if dest.Bucket != bucket { // not active-active
return nil, oi, false, err
}
ssec := false
if opts.ServerSideEncryption != nil {
ssec = opts.ServerSideEncryption.Type() == encrypt.SSEC
}
ropts := replication.ObjectOpts{
Name: object,
SSEC: ssec,
}
if !cfg.Replicate(ropts) { // no matching rule for object prefix
return nil, oi, false, nil
}
tgt = globalBucketTargetSys.GetRemoteTargetClient(ctx, cfg.RoleArn)
if tgt == nil || tgt.isOffline() {
return nil, oi, false, fmt.Errorf("target is offline or not configured")
}
gopts := miniogo.GetObjectOptions{
VersionID: opts.VersionID,
ServerSideEncryption: opts.ServerSideEncryption,
Internal: miniogo.AdvancedGetOptions{
ReplicationProxyRequest: "true",
},
}
objInfo, err := tgt.StatObject(ctx, dest.Bucket, object, gopts)
if err != nil {
return nil, oi, false, err
}
tags, _ := tags.MapToObjectTags(objInfo.UserTags)
oi = ObjectInfo{
Bucket: bucket,
Name: object,
ModTime: objInfo.LastModified,
Size: objInfo.Size,
ETag: objInfo.ETag,
VersionID: objInfo.VersionID,
IsLatest: objInfo.IsLatest,
DeleteMarker: objInfo.IsDeleteMarker,
ContentType: objInfo.ContentType,
Expires: objInfo.Expires,
StorageClass: objInfo.StorageClass,
ReplicationStatus: replication.StatusType(objInfo.ReplicationStatus),
UserTags: tags.String(),
}
oi.UserDefined = make(map[string]string, len(objInfo.Metadata))
for k, v := range objInfo.Metadata {
oi.UserDefined[k] = v[0]
}
ce, ok := oi.UserDefined[xhttp.ContentEncoding]
if !ok {
ce, ok = oi.UserDefined[strings.ToLower(xhttp.ContentEncoding)]
}
if ok {
oi.ContentEncoding = ce
}
return tgt, oi, true, nil
}
// get object info from replication target if active-active replication is in place and
// this node returns a 404
func proxyHeadToReplicationTarget(ctx context.Context, bucket, object string, opts ObjectOptions) (oi ObjectInfo, proxy bool, err error) {
_, oi, proxy, err = proxyHeadToRepTarget(ctx, bucket, object, opts)
return oi, proxy, err
}
func scheduleReplication(ctx context.Context, objInfo ObjectInfo, o ObjectLayer, sync bool) {
if sync {
replicateObject(ctx, objInfo, o)
} else {
globalReplicationState.queueReplicaTask(objInfo)
}
}
func scheduleReplicationDelete(ctx context.Context, dv DeletedObjectVersionInfo, o ObjectLayer, sync bool) {
if sync {
replicateDelete(ctx, dv, o)
} else {
globalReplicationState.queueReplicaDeleteTask(dv)
}
}

471
cmd/bucket-targets.go Normal file
View File

@@ -0,0 +1,471 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"net/http"
"strings"
"sync"
"sync/atomic"
"time"
minio "github.com/minio/minio-go/v7"
miniogo "github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/pkg/bucket/versioning"
"github.com/minio/minio/pkg/madmin"
)
const (
defaultHealthCheckDuration = 100 * time.Second
)
// BucketTargetSys represents bucket targets subsystem
type BucketTargetSys struct {
sync.RWMutex
arnRemotesMap map[string]*TargetClient
targetsMap map[string][]madmin.BucketTarget
}
// ListTargets lists bucket targets across tenant or for individual bucket, and returns
// results filtered by arnType
func (sys *BucketTargetSys) ListTargets(ctx context.Context, bucket, arnType string) (targets []madmin.BucketTarget) {
if bucket != "" {
if ts, err := sys.ListBucketTargets(ctx, bucket); err == nil {
for _, t := range ts.Targets {
if string(t.Type) == arnType || arnType == "" {
targets = append(targets, t.Clone())
}
}
}
return targets
}
sys.RLock()
defer sys.RUnlock()
for _, tgts := range sys.targetsMap {
for _, t := range tgts {
if string(t.Type) == arnType || arnType == "" {
targets = append(targets, t.Clone())
}
}
}
return
}
// ListBucketTargets - gets list of bucket targets for this bucket.
func (sys *BucketTargetSys) ListBucketTargets(ctx context.Context, bucket string) (*madmin.BucketTargets, error) {
sys.RLock()
defer sys.RUnlock()
tgts, ok := sys.targetsMap[bucket]
if ok {
return &madmin.BucketTargets{Targets: tgts}, nil
}
return nil, BucketRemoteTargetNotFound{Bucket: bucket}
}
// SetTarget - sets a new minio-go client target for this bucket.
func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *madmin.BucketTarget, update bool) error {
if globalIsGateway {
return nil
}
if !tgt.Type.IsValid() && !update {
return BucketRemoteArnTypeInvalid{Bucket: bucket}
}
clnt, err := sys.getRemoteTargetClient(tgt)
if err != nil {
return BucketRemoteTargetNotFound{Bucket: tgt.TargetBucket}
}
// validate if target credentials are ok
if _, err = clnt.BucketExists(ctx, tgt.TargetBucket); err != nil {
if minio.ToErrorResponse(err).Code == "NoSuchBucket" {
return BucketRemoteTargetNotFound{Bucket: tgt.TargetBucket}
}
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket}
}
if tgt.Type == madmin.ReplicationService {
if !globalIsErasure {
return NotImplemented{}
}
if !globalBucketVersioningSys.Enabled(bucket) {
return BucketReplicationSourceNotVersioned{Bucket: bucket}
}
vcfg, err := clnt.GetBucketVersioning(ctx, tgt.TargetBucket)
if err != nil {
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket}
}
if vcfg.Status != string(versioning.Enabled) {
return BucketRemoteTargetNotVersioned{Bucket: tgt.TargetBucket}
}
}
if tgt.Type == madmin.ILMService {
if globalBucketVersioningSys.Enabled(bucket) {
vcfg, err := clnt.GetBucketVersioning(ctx, tgt.TargetBucket)
if err != nil {
if minio.ToErrorResponse(err).Code == "NoSuchBucket" {
return BucketRemoteTargetNotFound{Bucket: tgt.TargetBucket}
}
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket}
}
if vcfg.Status != string(versioning.Enabled) {
return BucketRemoteTargetNotVersioned{Bucket: tgt.TargetBucket}
}
}
}
sys.Lock()
defer sys.Unlock()
tgts := sys.targetsMap[bucket]
newtgts := make([]madmin.BucketTarget, len(tgts))
labels := make(map[string]struct{}, len(tgts))
found := false
for idx, t := range tgts {
labels[t.Label] = struct{}{}
if t.Type == tgt.Type {
if t.Arn == tgt.Arn && !update {
return BucketRemoteAlreadyExists{Bucket: t.TargetBucket}
}
if t.Label == tgt.Label && !update {
return BucketRemoteLabelInUse{Bucket: t.TargetBucket}
}
newtgts[idx] = *tgt
found = true
continue
}
newtgts[idx] = t
}
if _, ok := labels[tgt.Label]; ok && !update {
return BucketRemoteLabelInUse{Bucket: tgt.TargetBucket}
}
if !found && !update {
newtgts = append(newtgts, *tgt)
}
sys.targetsMap[bucket] = newtgts
sys.arnRemotesMap[tgt.Arn] = clnt
return nil
}
// RemoveTarget - removes a remote bucket target for this source bucket.
func (sys *BucketTargetSys) RemoveTarget(ctx context.Context, bucket, arnStr string) error {
if globalIsGateway {
return nil
}
if arnStr == "" {
return BucketRemoteArnInvalid{Bucket: bucket}
}
arn, err := madmin.ParseARN(arnStr)
if err != nil {
return BucketRemoteArnInvalid{Bucket: bucket}
}
if arn.Type == madmin.ReplicationService {
if !globalIsErasure {
return NotImplemented{}
}
// reject removal of remote target if replication configuration is present
rcfg, err := getReplicationConfig(ctx, bucket)
if err == nil && rcfg.RoleArn == arnStr {
if _, ok := sys.arnRemotesMap[arnStr]; ok {
return BucketRemoteRemoveDisallowed{Bucket: bucket}
}
}
}
if arn.Type == madmin.ILMService {
// reject removal of remote target if lifecycle transition uses this arn
config, err := globalBucketMetadataSys.GetLifecycleConfig(bucket)
if err == nil && transitionSCInUse(ctx, config, bucket, arnStr) {
if _, ok := sys.arnRemotesMap[arnStr]; ok {
return BucketRemoteRemoveDisallowed{Bucket: bucket}
}
}
}
// delete ARN type from list of matching targets
sys.Lock()
defer sys.Unlock()
found := false
tgts, ok := sys.targetsMap[bucket]
if !ok {
return BucketRemoteTargetNotFound{Bucket: bucket}
}
targets := make([]madmin.BucketTarget, 0, len(tgts))
for _, tgt := range tgts {
if tgt.Arn != arnStr {
targets = append(targets, tgt)
continue
}
found = true
}
if !found {
return BucketRemoteTargetNotFound{Bucket: bucket}
}
sys.targetsMap[bucket] = targets
delete(sys.arnRemotesMap, arnStr)
return nil
}
// GetRemoteTargetClient returns minio-go client for replication target instance
func (sys *BucketTargetSys) GetRemoteTargetClient(ctx context.Context, arn string) *TargetClient {
sys.RLock()
defer sys.RUnlock()
return sys.arnRemotesMap[arn]
}
// GetRemoteTargetWithLabel returns bucket target given a target label
func (sys *BucketTargetSys) GetRemoteTargetWithLabel(ctx context.Context, bucket, targetLabel string) *madmin.BucketTarget {
sys.RLock()
defer sys.RUnlock()
for _, t := range sys.targetsMap[bucket] {
if strings.ToUpper(t.Label) == strings.ToUpper(targetLabel) {
tgt := t.Clone()
return &tgt
}
}
return nil
}
// GetRemoteArnWithLabel returns bucket target's ARN given its target label
func (sys *BucketTargetSys) GetRemoteArnWithLabel(ctx context.Context, bucket, tgtLabel string) *madmin.ARN {
tgt := sys.GetRemoteTargetWithLabel(ctx, bucket, tgtLabel)
if tgt == nil {
return nil
}
arn, err := madmin.ParseARN(tgt.Arn)
if err != nil {
return nil
}
return arn
}
// GetRemoteLabelWithArn returns a bucket target's label given its ARN
func (sys *BucketTargetSys) GetRemoteLabelWithArn(ctx context.Context, bucket, arnStr string) string {
sys.RLock()
defer sys.RUnlock()
for _, t := range sys.targetsMap[bucket] {
if t.Arn == arnStr {
return t.Label
}
}
return ""
}
// NewBucketTargetSys - creates new replication system.
func NewBucketTargetSys() *BucketTargetSys {
return &BucketTargetSys{
arnRemotesMap: make(map[string]*TargetClient),
targetsMap: make(map[string][]madmin.BucketTarget),
}
}
// Init initializes the bucket targets subsystem for buckets which have targets configured.
func (sys *BucketTargetSys) Init(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) error {
if objAPI == nil {
return errServerNotInitialized
}
// In gateway mode, bucket targets is not supported.
if globalIsGateway {
return nil
}
// Load bucket targets once during boot in background.
go sys.load(ctx, buckets, objAPI)
return nil
}
// UpdateAllTargets updates target to reflect metadata updates
func (sys *BucketTargetSys) UpdateAllTargets(bucket string, tgts *madmin.BucketTargets) {
if sys == nil {
return
}
sys.Lock()
defer sys.Unlock()
if tgts == nil || tgts.Empty() {
// remove target and arn association
if tgts, ok := sys.targetsMap[bucket]; ok {
for _, t := range tgts {
delete(sys.arnRemotesMap, t.Arn)
}
}
delete(sys.targetsMap, bucket)
return
}
if len(tgts.Targets) > 0 {
sys.targetsMap[bucket] = tgts.Targets
}
for _, tgt := range tgts.Targets {
tgtClient, err := sys.getRemoteTargetClient(&tgt)
if err != nil {
continue
}
sys.arnRemotesMap[tgt.Arn] = tgtClient
}
sys.targetsMap[bucket] = tgts.Targets
}
// create minio-go clients for buckets having remote targets
func (sys *BucketTargetSys) load(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) {
for _, bucket := range buckets {
cfg, err := globalBucketMetadataSys.GetBucketTargetsConfig(bucket.Name)
if err != nil {
continue
}
if cfg == nil || cfg.Empty() {
continue
}
if len(cfg.Targets) > 0 {
sys.targetsMap[bucket.Name] = cfg.Targets
}
for _, tgt := range cfg.Targets {
tgtClient, err := sys.getRemoteTargetClient(&tgt)
if err != nil {
continue
}
sys.arnRemotesMap[tgt.Arn] = tgtClient
}
sys.targetsMap[bucket.Name] = cfg.Targets
}
}
// getRemoteTargetInstanceTransport contains a singleton roundtripper.
var getRemoteTargetInstanceTransport http.RoundTripper
var getRemoteTargetInstanceTransportOnce sync.Once
// Returns a minio-go Client configured to access remote host described in replication target config.
func (sys *BucketTargetSys) getRemoteTargetClient(tcfg *madmin.BucketTarget) (*TargetClient, error) {
config := tcfg.Credentials
creds := credentials.NewStaticV4(config.AccessKey, config.SecretKey, "")
getRemoteTargetInstanceTransportOnce.Do(func() {
getRemoteTargetInstanceTransport = newGatewayHTTPTransport(10 * time.Minute)
})
api, err := minio.New(tcfg.Endpoint, &miniogo.Options{
Creds: creds,
Secure: tcfg.Secure,
Region: tcfg.Region,
Transport: getRemoteTargetInstanceTransport,
})
if err != nil {
return nil, err
}
hcDuration := defaultHealthCheckDuration
if tcfg.HealthCheckDuration >= 1 { // require minimum health check duration of 1 sec.
hcDuration = tcfg.HealthCheckDuration
}
tc := &TargetClient{
Client: api,
healthCheckDuration: hcDuration,
bucket: tcfg.TargetBucket,
replicateSync: tcfg.ReplicationSync,
}
go tc.healthCheck()
return tc, nil
}
// getRemoteARN gets existing ARN for an endpoint or generates a new one.
func (sys *BucketTargetSys) getRemoteARN(bucket string, target *madmin.BucketTarget) string {
if target == nil {
return ""
}
tgts := sys.targetsMap[bucket]
for _, tgt := range tgts {
if tgt.Type == target.Type && tgt.TargetBucket == target.TargetBucket && target.URL().String() == tgt.URL().String() {
return tgt.Arn
}
}
if !madmin.ServiceType(target.Type).IsValid() {
return ""
}
return generateARN(target)
}
// generate ARN that is unique to this target type
func generateARN(t *madmin.BucketTarget) string {
hash := sha256.New()
hash.Write([]byte(t.Type))
hash.Write([]byte(t.Region))
hash.Write([]byte(t.TargetBucket))
hashSum := hex.EncodeToString(hash.Sum(nil))
arn := madmin.ARN{
Type: t.Type,
ID: hashSum,
Region: t.Region,
Bucket: t.TargetBucket,
}
return arn.String()
}
// Returns parsed target config. If KMS is configured, remote target is decrypted
func parseBucketTargetConfig(bucket string, cdata, cmetadata []byte) (*madmin.BucketTargets, error) {
var (
data []byte
err error
t madmin.BucketTargets
meta map[string]string
)
if len(cdata) == 0 {
return nil, nil
}
data = cdata
if len(cmetadata) != 0 {
if err := json.Unmarshal(cmetadata, &meta); err != nil {
return nil, err
}
if crypto.S3.IsEncrypted(meta) {
if data, err = decryptBucketMetadata(cdata, bucket, meta, crypto.Context{bucket: bucket, bucketTargetsFile: bucketTargetsFile}); err != nil {
return nil, err
}
}
}
if err = json.Unmarshal(data, &t); err != nil {
return nil, err
}
return &t, nil
}
// TargetClient is the struct for remote target client.
type TargetClient struct {
*miniogo.Client
up int32
healthCheckDuration time.Duration
bucket string // remote bucket target
replicateSync bool
}
func (tc *TargetClient) isOffline() bool {
return atomic.LoadInt32(&tc.up) == 0
}
func (tc *TargetClient) healthCheck() {
for {
_, err := tc.BucketExists(GlobalContext, tc.bucket)
if err != nil {
atomic.StoreInt32(&tc.up, 0)
time.Sleep(tc.healthCheckDuration)
continue
}
atomic.StoreInt32(&tc.up, 1)
time.Sleep(tc.healthCheckDuration)
}
}

View File

@@ -40,7 +40,7 @@ const (
func (api objectAPIHandlers) PutBucketVersioningHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutBucketVersioning")
defer logger.AuditLog(w, r, "PutBucketVersioning", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
@@ -70,6 +70,14 @@ func (api objectAPIHandlers) PutBucketVersioningHandler(w http.ResponseWriter, r
}, r.URL, guessIsBrowserReq(r))
return
}
if _, err := getReplicationConfig(ctx, bucket); err == nil && v.Suspended() {
writeErrorResponse(ctx, w, APIError{
Code: "InvalidBucketState",
Description: "A replication configuration is present on this bucket, so the versioning state cannot be changed.",
HTTPStatusCode: http.StatusConflict,
}, r.URL, guessIsBrowserReq(r))
return
}
configData, err := xml.Marshal(v)
if err != nil {
@@ -90,7 +98,7 @@ func (api objectAPIHandlers) PutBucketVersioningHandler(w http.ResponseWriter, r
func (api objectAPIHandlers) GetBucketVersioningHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketVersioning")
defer logger.AuditLog(w, r, "GetBucketVersioning", mustGetClaimsFromToken(r))
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]

View File

@@ -51,6 +51,11 @@ func (sys *BucketVersioningSys) Get(bucket string) (*versioning.Versioning, erro
return globalBucketMetadataSys.GetVersioningConfig(bucket)
}
// Reset BucketVersioningSys to initial state.
func (sys *BucketVersioningSys) Reset() {
// There is currently no internal state.
}
// NewBucketVersioningSys - creates new versioning system.
func NewBucketVersioningSys() *BucketVersioningSys {
return &BucketVersioningSys{}

View File

@@ -17,38 +17,72 @@
package cmd
import (
"context"
"crypto/x509"
"encoding/gob"
"errors"
"fmt"
"math/rand"
"net"
"net/url"
"os"
"path/filepath"
"runtime"
"sort"
"strings"
"time"
"github.com/fatih/color"
dns2 "github.com/miekg/dns"
"github.com/minio/cli"
"github.com/minio/minio-go/v6/pkg/set"
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio/cmd/config"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/certs"
"github.com/minio/minio/pkg/console"
"github.com/minio/minio/pkg/env"
"github.com/minio/minio/pkg/handlers"
)
// serverDebugLog will enable debug printing
var serverDebugLog = env.Get("_MINIO_SERVER_DEBUG", config.EnableOff) == config.EnableOn
func init() {
rand.Seed(time.Now().UTC().UnixNano())
logger.Init(GOPATH, GOROOT)
logger.RegisterError(config.FmtError)
// Initialize globalConsoleSys system
globalConsoleSys = NewConsoleLogger(GlobalContext)
logger.AddTarget(globalConsoleSys)
// Inject into config package.
config.Logger.Info = logger.Info
config.Logger.LogIf = logger.LogIf
globalDNSCache = xhttp.NewDNSCache(10*time.Second, 10*time.Second, logger.LogOnceIf)
initGlobalContext()
globalForwarder = handlers.NewForwarder(&handlers.Forwarder{
PassHost: true,
RoundTripper: newGatewayHTTPTransport(1 * time.Hour),
Logger: func(err error) {
if err != nil && !errors.Is(err, context.Canceled) {
logger.LogIf(GlobalContext, err)
}
},
})
globalReplicationState = newReplicationState()
globalTransitionState = newTransitionState()
console.SetColor("Debug", color.New())
gob.Register(StorageErr(""))
}
func verifyObjectLayerFeatures(name string, objAPI ObjectLayer) {
if (globalAutoEncryption || GlobalKMS != nil) && !objAPI.IsEncryptionSupported() {
if (GlobalKMS != nil) && !objAPI.IsEncryptionSupported() {
logger.Fatal(errInvalidArgument,
"Encryption support is requested but '%s' does not support encryption", name)
}
@@ -60,25 +94,50 @@ func verifyObjectLayerFeatures(name string, objAPI ObjectLayer) {
}
}
globalCompressConfigMu.Lock()
if globalCompressConfig.Enabled && !objAPI.IsCompressionSupported() {
logger.Fatal(errInvalidArgument,
"Compression support is requested but '%s' does not support compression", name)
}
globalCompressConfigMu.Unlock()
}
// Check for updates and print a notification message
func checkUpdate(mode string) {
// Its OK to ignore any errors during doUpdate() here.
if updateMsg, _, currentReleaseTime, latestReleaseTime, err := getUpdateInfo(2*time.Second, mode); err == nil {
if updateMsg == "" {
return
}
if globalInplaceUpdateDisabled {
logStartupMessage(updateMsg)
} else {
logStartupMessage(prepareUpdateMessage("Run `mc admin update`", latestReleaseTime.Sub(currentReleaseTime)))
}
updateURL := minioReleaseInfoURL
if runtime.GOOS == globalWindowsOSName {
updateURL = minioReleaseWindowsInfoURL
}
u, err := url.Parse(updateURL)
if err != nil {
return
}
// Its OK to ignore any errors during doUpdate() here.
crTime, err := GetCurrentReleaseTime()
if err != nil {
return
}
_, lrTime, err := getLatestReleaseTime(u, 2*time.Second, mode)
if err != nil {
return
}
var older time.Duration
var downloadURL string
if lrTime.After(crTime) {
older = lrTime.Sub(crTime)
downloadURL = getDownloadURL(releaseTimeToReleaseTag(lrTime))
}
updateMsg := prepareUpdateMessage(downloadURL, older)
if updateMsg == "" {
return
}
logStartupMessage(prepareUpdateMessage("Run `mc admin update`", lrTime.Sub(crTime)))
}
func newConfigDirFromCtx(ctx *cli.Context, option string, getDefaultDir func() string) (*ConfigDir, bool) {
@@ -200,6 +259,14 @@ func handleCommonEnvVars() {
}
globalDomainNames = append(globalDomainNames, domainName)
}
sort.Strings(globalDomainNames)
lcpSuf := lcpSuffix(globalDomainNames)
for _, domainName := range globalDomainNames {
if domainName == lcpSuf && len(globalDomainNames) > 1 {
logger.Fatal(config.ErrOverlappingDomainValue(nil).Msg("Overlapping domains `%s` not allowed", globalDomainNames),
"Invalid MINIO_DOMAIN value in environment variable")
}
}
}
publicIPs := env.Get(config.EnvPublicIPs, "")
@@ -223,7 +290,11 @@ func handleCommonEnvVars() {
} else {
// Add found interfaces IP address to global domain IPS,
// loopback addresses will be naturally dropped.
updateDomainIPs(mustGetLocalIP4())
domainIPs := mustGetLocalIP4()
for _, host := range globalEndpoints.Hostnames() {
domainIPs.Add(host)
}
updateDomainIPs(domainIPs)
}
// In place update is true by default if the MINIO_UPDATE is not set
@@ -241,6 +312,16 @@ func handleCommonEnvVars() {
globalConfigEncrypted = true
}
if env.IsSet(config.EnvRootUser) || env.IsSet(config.EnvRootPassword) {
cred, err := auth.CreateCredentials(env.Get(config.EnvRootUser, ""), env.Get(config.EnvRootPassword, ""))
if err != nil {
logger.Fatal(config.ErrInvalidCredentials(err),
"Unable to validate credentials inherited from the shell environment")
}
globalActiveCred = cred
globalConfigEncrypted = true
}
if env.IsSet(config.EnvAccessKeyOld) && env.IsSet(config.EnvSecretKeyOld) {
oldCred, err := auth.CreateCredentials(env.Get(config.EnvAccessKeyOld, ""), env.Get(config.EnvSecretKeyOld, ""))
if err != nil {
@@ -251,6 +332,17 @@ func handleCommonEnvVars() {
os.Unsetenv(config.EnvAccessKeyOld)
os.Unsetenv(config.EnvSecretKeyOld)
}
if env.IsSet(config.EnvRootUserOld) && env.IsSet(config.EnvRootPasswordOld) {
oldCred, err := auth.CreateCredentials(env.Get(config.EnvRootUserOld, ""), env.Get(config.EnvRootPasswordOld, ""))
if err != nil {
logger.Fatal(config.ErrInvalidCredentials(err),
"Unable to validate the old credentials inherited from the shell environment")
}
globalOldCred = oldCred
os.Unsetenv(config.EnvRootUserOld)
os.Unsetenv(config.EnvRootPasswordOld)
}
}
func logStartupMessage(msg string) {
@@ -260,7 +352,7 @@ func logStartupMessage(msg string) {
logger.StartupMessage(msg)
}
func getTLSConfig() (x509Certs []*x509.Certificate, c *certs.Certs, secureConn bool, err error) {
func getTLSConfig() (x509Certs []*x509.Certificate, manager *certs.Manager, secureConn bool, err error) {
if !(isFile(getPublicCertFile()) && isFile(getPrivateKeyFile())) {
return nil, nil, false, nil
}
@@ -269,11 +361,71 @@ func getTLSConfig() (x509Certs []*x509.Certificate, c *certs.Certs, secureConn b
return nil, nil, false, err
}
c, err = certs.New(getPublicCertFile(), getPrivateKeyFile(), config.LoadX509KeyPair)
manager, err = certs.NewManager(GlobalContext, getPublicCertFile(), getPrivateKeyFile(), config.LoadX509KeyPair)
if err != nil {
return nil, nil, false, err
}
// MinIO has support for multiple certificates. It expects the following structure:
// certs/
// │
// ├─ public.crt
// ├─ private.key
// │
// ├─ example.com/
// │ │
// │ ├─ public.crt
// │ └─ private.key
// └─ foobar.org/
// │
// ├─ public.crt
// └─ private.key
// ...
//
// Therefore, we read all filenames in the cert directory and check
// for each directory whether it contains a public.crt and private.key.
// If so, we try to add it to certificate manager.
root, err := os.Open(globalCertsDir.Get())
if err != nil {
return nil, nil, false, err
}
defer root.Close()
files, err := root.Readdir(-1)
if err != nil {
return nil, nil, false, err
}
for _, file := range files {
// Ignore all
// - regular files
// - "CAs" directory
// - any directory which starts with ".."
if file.Mode().IsRegular() || file.Name() == "CAs" || strings.HasPrefix(file.Name(), "..") {
continue
}
if file.Mode()&os.ModeSymlink == os.ModeSymlink {
file, err = os.Stat(filepath.Join(root.Name(), file.Name()))
if err != nil {
// not accessible ignore
continue
}
if !file.IsDir() {
continue
}
}
var (
certFile = filepath.Join(root.Name(), file.Name(), publicCertFile)
keyFile = filepath.Join(root.Name(), file.Name(), privateKeyFile)
)
if !isFile(certFile) || !isFile(keyFile) {
continue
}
if err = manager.AddCertificate(certFile, keyFile); err != nil {
err = fmt.Errorf("Unable to load TLS certificate '%s,%s': %w", certFile, keyFile, err)
logger.LogIf(GlobalContext, err, logger.Minio)
}
}
secureConn = true
return x509Certs, c, secureConn, nil
return x509Certs, manager, secureConn, nil
}

View File

@@ -20,36 +20,42 @@ import (
"bytes"
"context"
"errors"
"io/ioutil"
"net/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/hash"
)
var errConfigNotFound = errors.New("config file not found")
func readConfig(ctx context.Context, objAPI ObjectLayer, configFile string) ([]byte, error) {
var buffer bytes.Buffer
// Read entire content by setting size to -1
if err := objAPI.GetObject(ctx, minioMetaBucket, configFile, 0, -1, &buffer, "", ObjectOptions{}); err != nil {
r, err := objAPI.GetObjectNInfo(ctx, minioMetaBucket, configFile, nil, http.Header{}, readLock, ObjectOptions{})
if err != nil {
// Treat object not found as config not found.
if isErrObjectNotFound(err) {
return nil, errConfigNotFound
}
logger.GetReqInfo(ctx).AppendTags("configFile", configFile)
logger.LogIf(ctx, err)
return nil, err
}
defer r.Close()
// Return config not found on empty content.
if buffer.Len() == 0 {
buf, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
if len(buf) == 0 {
return nil, errConfigNotFound
}
return buffer.Bytes(), nil
return buf, nil
}
func deleteConfig(ctx context.Context, objAPI ObjectLayer, configFile string) error {
type objectDeleter interface {
DeleteObject(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error)
}
func deleteConfig(ctx context.Context, objAPI objectDeleter, configFile string) error {
_, err := objAPI.DeleteObject(ctx, minioMetaBucket, configFile, ObjectOptions{})
if err != nil && isErrObjectNotFound(err) {
return errConfigNotFound
@@ -58,12 +64,12 @@ func deleteConfig(ctx context.Context, objAPI ObjectLayer, configFile string) er
}
func saveConfig(ctx context.Context, objAPI ObjectLayer, configFile string, data []byte) error {
hashReader, err := hash.NewReader(bytes.NewReader(data), int64(len(data)), "", getSHA256Hash(data), int64(len(data)), globalCLIContext.StrictS3Compat)
hashReader, err := hash.NewReader(bytes.NewReader(data), int64(len(data)), "", getSHA256Hash(data), int64(len(data)))
if err != nil {
return err
}
_, err = objAPI.PutObject(ctx, minioMetaBucket, configFile, NewPutObjReader(hashReader, nil, nil), ObjectOptions{})
_, err = objAPI.PutObject(ctx, minioMetaBucket, configFile, NewPutObjReader(hashReader), ObjectOptions{})
return err
}
@@ -74,8 +80,6 @@ func checkConfig(ctx context.Context, objAPI ObjectLayer, configFile string) err
return errConfigNotFound
}
logger.GetReqInfo(ctx).AppendTags("configFile", configFile)
logger.LogIf(ctx, err)
return err
}
return nil

Some files were not shown because too many files have changed in this diff Show More