Compare commits

...

219 Commits

Author SHA1 Message Date
Andreas Auernhammer
d1c8e9f31b update KMS guide to work with latest KES changes (#9498)
This commit updates the KMS guide to reflect the
latest changes in KES. Based on internal design
meetings we made some adjustments to the overall
KES configuration.
This commit ensures that the KMS guide contains
a working KES demo-setup with Vault.
2020-05-01 12:36:30 -07:00
Bala FA
83ccae6c8b Store bucket created time as a metadata (#9465)
Fixes #9459
2020-05-01 09:53:14 -07:00
Frank Wessels
086be07bf5 Fix ndjson unsupported (#9500) 2020-05-01 08:06:29 -07:00
Harshavardhana
28f9c477a8 fix: assume parentUser correctly for serviceAccounts (#9504)
ListServiceAccounts/DeleteServiceAccount didn't work properly
with STS credentials yet due to incorrect Parent user.
2020-05-01 08:05:14 -07:00
Harshavardhana
09571d03a5 avoid unnecessary logging in IAM (#9502) 2020-05-01 18:11:17 +05:30
Harshavardhana
71ce63f79c fix: background heal to call HealFormat only if needed (#9491)
In large setups this avoids unnecessary data transfer
across nodes and potential locks.

This PR also optimizes heal result channel, which should
be avoided for each queueHealTask as its expensive
to create/close channels for large number of objects.
2020-04-30 20:23:00 -07:00
Harshavardhana
5205c9591f print proper certinfo on console when starting up (#9479)
also potentially fix a race in certs.go implementation
while accessing tls.Certificate concurrently.
2020-04-30 16:15:29 -07:00
poornas
9a547dcbfb Add API's for managing bucket quota (#9379)
This PR allows setting a "hard" or "fifo" quota
restriction at the bucket level. Buckets that
have reached the FIFO quota configured, will
automatically be cleaned up in FIFO manner until
bucket usage drops to configured quota.
If a bucket is configured with a "hard" quota
ceiling, all further writes are disallowed.
2020-04-30 15:55:54 -07:00
Anis Elleuch
27632ca6ec audit: Merge ResponseWriter with RecordAPIStats (#9496)
ResponseWriter & RecordAPIStats has similar role, merge them.

This commit will also fix wrong auditing for STS and Web and others
since they are using ResponseWriter instead of the RecordAPIStats.
2020-04-30 11:27:19 -07:00
Harshavardhana
c7470e6e6e fix: go mod tidy 2020-04-29 22:31:34 -07:00
Anis Elleuch
d090a17ed0 fix: Audit tests on the correct response writer type (#9445) 2020-04-29 22:17:36 -07:00
Harshavardhana
c2529260e7 fix: crash observed when position of drives different (#9490)
allocate the disk slice properly before populating
disk by its ID and its position.

Fixes #9416
2020-04-29 13:42:37 -07:00
Arthur Lutz
da87188ff8 fix: tls doc markdown title (#9487) 2020-04-29 12:28:45 -07:00
Harshavardhana
d099039f5d Add new github workflow (#9480) 2020-04-29 09:17:32 -07:00
P R
5dd9cf4398 fix: CopyObject with REPLACE directive deletes existing tags (#9478)
Fixes #9477
2020-04-29 10:26:37 +05:30
Harshavardhana
ab77b216d1 fix: remove restrictions on windows for NAME_MAX (#9469)
Fixes #9393
2020-04-28 17:32:46 -07:00
Minio Trusted
b37a02cddf Update yaml files to latest version RELEASE.2020-04-28T23-56-56Z 2020-04-29 00:05:12 +00:00
Anis Elleuch
c3c3e9087b config: More fixes in parsing Audit & Logger env variables (#9474)
- Add support of missed legacy Logger webhook
- Disable enabling Audit or logger if _ENABLE
  if not explicitly set to "on".
2020-04-28 15:20:40 -07:00
Anis Elleuch
7ad6bc955f show a notice when mixed rootfs & mounted disks is detected (#9471)
A user can incorrectly mounts a newly fresh disk. MinIO will detect
that it is writing with a rootfs disk and will mark it down. However,
it is hard for the user to understand what's going on.

This commit will just print a notice so it will be easy to spot
such use case.
2020-04-28 14:55:01 -07:00
Harshavardhana
7a5271ad96 fix: re-use connections in webhook/elasticsearch (#9461)
- elasticsearch client should rely on the SDK helpers
  instead of pure HTTP calls.
- webhook shouldn't need to check for IsActive() for
  all notifications, failure should be delayed.
- Remove DialHTTP as its never used properly

Fixes #9460
2020-04-28 13:57:56 -07:00
Harshavardhana
1b122526aa fix: add service account support for AssumeRole/LDAPIdentity creds (#9451)
allow generating service accounts for temporary credentials
which have a designated parent, currently OpenID is not yet
supported.

added checks to ensure that service account cannot generate
further service accounts for itself, service accounts can
never be a parent to any credential.
2020-04-28 12:49:56 -07:00
Anis Elleuch
a3b266761e Fix audit loading from the env and consider enable env variable (#9467)
Audit was not working properly when enabled from the environment
caused by a typo in the code.

This commit fixes that but also consider the following variables:
  `MINIO_LOGGER_WEBHOOK_ENABLE_*` and 
`MINIO_AUDIT_WEBHOOK_ENABLE_*` so the user can use 
this latter to temporarily disable a logger or audit configuration.
2020-04-28 16:10:51 +05:30
Harshavardhana
498389123e avoid unnecessary logging on fresh/newly replaced drives (#9470)
data usage tracker and crawler seem to be logging
non-actionable information on console, which is not
useful and is fixed on its own in almost all deployments,
lets keep this logging to minimal.
2020-04-28 01:16:57 -07:00
Harshavardhana
bc61417284 calculate automatic node based symmetry (#9446)
it is possible in many screnarios that even
if the divisible value is optimal, we may
end up with uneven distribution due to number
of nodes present in the configuration.

added code allow for affinity towards various
ellipses to figure out optimal value across
ellipses such that we can always reach a
symmetric value automatically.

Fixes #9416
2020-04-27 14:39:57 -07:00
Harshavardhana
97d952e61c fix: ensure buckets are preserved if one set returns error (#9468)
the bucket should be deleted if it can be successfully
deleted on all sets, if not we should ensure to
restore those buckets properly.
2020-04-27 14:18:02 -07:00
Klaus Post
073aac3d92 add data update tracking using bloom filter (#9208)
By monitoring PUT/DELETE and heal operations it is possible
to track changed paths and keep a bloom filter for this data. 

This can help prioritize paths to scan. The bloom filter can identify
paths that have not changed, and the few collisions will only result
in a marginal extra workload. This can be implemented on either a
bucket+(1 prefix level) with reasonable performance.

The bloom filter is set to have a false positive rate at 1% at 1M 
entries. A bloom table of this size is about ~2500 bytes when serialized.

To not force a full scan of all paths that have changed cycle bloom
filters would need to be kept, so we guarantee that dirty paths have
been scanned within cycle runs. Until cycle bloom filters have been
collected all paths are considered dirty.
2020-04-27 10:06:21 -07:00
Harshavardhana
eff4127efd Revert "Write files in O_SYNC for fs backend to protect against machine crashes (#9434)"
This reverts commit 4843affd0e.
2020-04-27 09:22:05 -07:00
Harshavardhana
b1c0c32ba6 fix: ignore symlinks in backend filesystems (#9457)
fixes #9419
2020-04-27 06:30:12 -07:00
Harshavardhana
f14bf25cb9 optimize Listen bucket notification implementation (#9444)
this commit avoids lots of tiny allocations, repeated
channel creates which are performed when filtering
the incoming events, unescaping a key just for matching.

also remove deprecated code which is not needed
anymore, avoids unexpected data structure transformations
from the map to slice.
2020-04-27 06:25:05 -07:00
Harshavardhana
f216670814 use context specific to the etcd call (#9458) 2020-04-26 21:42:41 -07:00
Harshavardhana
6ecc98fddb fix: crash in metrics handler when some disks are offline (#9450)
Fixes #9449
2020-04-25 19:48:07 -07:00
Krishna Srinivas
4843affd0e Write files in O_SYNC for fs backend to protect against machine crashes (#9434) 2020-04-25 01:18:54 -07:00
Harshavardhana
558785a4bb fix: config Set/Get decrypt/encrypt using authenticated credentials (#9447)
we have policy available for sub-admin users to set/get/delete
config, but we incorrectly decrypt the content using admin secret
key which in-fact should be the credential authenticating the
request.
2020-04-24 22:36:48 -07:00
Harshavardhana
60d415bb8a deprecate/remove global WORM mode (#9436)
global WORM mode is a complex piece for which
the time has passed, with the advent of S3 compatible
object locking and retention implementation global
WORM is sort of deprecated, this has been mentioned
in our documentation for some time, now the time
has come for this to go.
2020-04-24 16:37:05 -07:00
BigUstad
45e22cf8aa fix: selectObject to return error when object does not exist (#9423) 2020-04-24 13:51:48 -07:00
Klaus Post
e4900b99d7 s3 select: Infer types for comparison (#9438) 2020-04-24 13:02:59 -07:00
Anis Elleuch
20766069a8 add list/delete API service accounts admin API (#9402) 2020-04-24 12:10:09 -07:00
Tim Hughes
e8160c9fae fix: same endpoint for NewLDAPIdentity & NewWithCredentials (#9433)
Also enables use of https endpoints

Fixes  #9431
2020-04-24 10:44:45 +05:30
Harshavardhana
957ecb1b64 use optimal memory while purging cache (#9426)
re-implement the cache purging routine to
avoid using ioutil.ReadDir which can lead
to high allocations when there are cache
directories with lots of content, or
when cache is installed in memory constrainted
environments.

Instead rely on a callback function where we
are not using memory no-more than 8KiB per
cycle.

Precursor for this change refer #9425, original
issue pointed by Caleb Case <caleb@storj.io>
2020-04-23 12:26:13 -07:00
Boaz
ac5061df2c fix: make azure gateway chunk size configurable (#9292) 2020-04-23 02:04:13 -07:00
Tim Hughes
cddb2714ef documentation: fix group search filter (#9420) 2020-04-22 22:29:17 -07:00
Minio Trusted
d7d9cac20b Update yaml files to latest version RELEASE.2020-04-23T00-58-49Z 2020-04-23 01:07:52 +00:00
Harshavardhana
6817c5ea58 migrate mint tests to latest versions (#9424) 2020-04-22 16:06:58 -07:00
Anis Elleuch
4cd6ca02c7 fix: Add missing return in admin requests auth (#9422) 2020-04-22 13:42:01 -07:00
Egon Elbre
a5efcbab51 fix: cacheReader.Close in all paths that don't return it. (#9418) 2020-04-22 12:13:57 -07:00
Egon Elbre
85be7b39ac Call cleanup funcs when skip fails (#9417) 2020-04-22 10:06:56 -07:00
Nitish Tiwari
ebf3dda449 Update server startup example to showcase local erasure code (#9407) 2020-04-21 23:59:13 -07:00
poornas
582953260b Increase response header timeout for gateway (#9400)
fixes: #9295
2020-04-21 19:21:27 -07:00
Minio Trusted
2d1ea86fc6 Update yaml files to latest version RELEASE.2020-04-22T00-11-12Z 2020-04-22 00:19:12 +00:00
Praveen raj Mani
322385f1b6 fix: only show active/available ARNs in server startup banner (#9392) 2020-04-21 09:38:32 -07:00
Anis Elleuch
1b38aed05f fix: Correct typo when registering peer Delete User API (#9403) 2020-04-21 09:31:51 -07:00
Anis Elleuch
a69c98e394 fix: Correct typo when registering peer Delete User API (#9403) 2020-04-21 08:35:19 -07:00
Harshavardhana
282c9f790a fix: validate partNumber in queryParam as part of preConditions (#9386) 2020-04-20 22:01:59 -07:00
Anis Elleuch
2eeb0e6a0b heal: Fix heal buckets result reporting (#9397)
healBucket() was not properly collecting results after healing
buckets. This commit adds After drives information correctly.
2020-04-20 13:48:54 -07:00
Harshavardhana
3ff5bf2369 fix: convert storage class into azure tiers (#9381) 2020-04-19 13:42:56 -07:00
Harshavardhana
69ee28a082 remove OSS gateway due to lack of licensing (#9390)
OSS go sdk lacks licensing terms in their
repository, and there has been no activity

On the issue here https://github.com/aliyun/aliyun-oss-go-sdk/issues/245

This PR is to ensure we remove any dependency code which
lacks explicit license file in their repo.
2020-04-18 22:12:51 -07:00
sreenivas alapati
d02deff3d7 fixed typo in KMS documentation (#9384) 2020-04-18 18:09:25 -07:00
Sidhartha Mani
3e78ea8acc improve obd tests and optimize network (#9378)
- keep long running obd network tests alive
- fix error - wrong number of parents in process OBD info
- ensure that osinfo does not error out when inside containers
- remove limit on max number of connections per client transport

The generic client transport uses a default limit of 64 conns per transport.
This could end up limiting and throttling usage, and artificially slowing
down the performance of MinIO even on hardware capable of doing better.
2020-04-18 11:06:11 -07:00
Harshavardhana
b54c0f0ef3 Add stale/lock bot for issues (#9387) 2020-04-18 11:03:03 -07:00
Praveen raj Mani
c79358c67e notification queue limit has no maxLimit (#9380)
New value defaults to 100K events by default,
but users can tune this value upto any value
they seem necessary.

* increase the limit to maxint64 while validating
2020-04-18 01:20:56 -07:00
Harshavardhana
75107d7698 fix: remove any duplicate statements in policy input (#9385)
Add support for removing duplicate statements automatically
2020-04-17 21:26:42 -07:00
Klaus Post
c4464e36c8 fix: limit HTTP transport tuables to affordable values (#9383)
Close connections pro-actively in transient calls
2020-04-17 11:20:56 -07:00
Harshavardhana
d92db198d1 Add target parsing code for config (#9375)
This code is helper for mcs project
2020-04-16 17:43:14 -07:00
Harshavardhana
8bae956df6 allow copyObject to rotate storageClass of objects (#9362)
Added additional mint tests as well to verify, this
functionality.

Fixes #9357
2020-04-16 17:42:44 -07:00
Eco
7758524703 Add documentation for using MinIO with Veeam (#9355) 2020-04-16 17:36:14 -07:00
Harshavardhana
c82fa2c829 fix: load LDAP users appropriately (#9360)
This PR also fixes issues when

deletePolicy, deleteUser is idempotent so can lead to
issues when client can prematurely timeout, so a retry
call error response should be ignored when call returns
http.StatusNotFound

Fixes #9347
2020-04-16 16:22:34 -07:00
Harshavardhana
a51280fd20 allow config help in gateway mode (#9356)
allow `mc admin config set mygateway/ audit_webhook --env`
to fetch the documentation as needed, this is just to
ensure that our users can still access the relevant
ENV docs while running in gateway mode.
2020-04-16 14:49:12 -07:00
Klaus Post
bd437c1c17 set server base context on gateway http server (#9365) 2020-04-16 11:54:12 -07:00
Harshavardhana
69fb68ef0b fix simplify code to start using context (#9350) 2020-04-16 10:56:18 -07:00
Nitish Tiwari
787dbaff36 Enforce issue templates for new issues (#9364) 2020-04-16 10:54:59 -07:00
Minio Trusted
c50ae1fdbe Update yaml files to latest version RELEASE.2020-04-15T19-42-18Z 2020-04-15 20:00:16 +00:00
Harshavardhana
bde0f444db fix support OBDAdminAction is valid action (#9354) 2020-04-15 12:16:40 -07:00
Klaus Post
6a8298b137 Reduce Mutex test runs (#9345)
Some tests take a long time on CI:

* `--- PASS: TestRWMutex (226.49s)`
* ` --- PASS: TestRWMutex (7.13s)`

Reduce the number of runs.

Before/after locally:

```
--- PASS: TestRWMutex (20.95s)
--- PASS: TestRWMutex (7.13s)

--- PASS: TestMutex (3.01s)
--- PASS: TestMutex (1.65s)
```
2020-04-14 18:39:03 -07:00
Klaus Post
f19cbfad5c fix: use per test context (#9343)
Instead of GlobalContext use a local context for tests.
Most notably this allows stuff created to be shut down 
when tests using it is done. After PR #9345 9331 CI is 
often running out of memory/time.
2020-04-14 17:52:38 -07:00
Minio Trusted
78f2183e70 Update yaml files to latest version RELEASE.2020-04-15T00-39-01Z 2020-04-15 00:46:50 +00:00
Harshavardhana
5c11a46412 update minio-go/parquet-go to latest 2020-04-14 16:53:29 -07:00
Anis Elleuch
8a94aebdb8 config: Add api requests max & deadline configs (#9273)
Add two new configuration entries, api.requests-max and
api.requests-deadline which have the same role of
MINIO_API_REQUESTS_MAX and MINIO_API_REQUESTS_DEADLINE.
2020-04-14 12:46:37 -07:00
Sidhartha Mani
ec11e99667 implement configurable timeout for OBD tests (#9324) 2020-04-14 11:48:32 -07:00
Harshavardhana
37d066b563 fix: deprecate requirement of session token for service accounts (#9320)
This PR fixes couple of behaviors with service accounts

- not need to have session token for service accounts
- service accounts can be generated by any user for themselves
  implicitly, with a valid signature.
- policy input for AddNewServiceAccount API is not fully typed
  allowing for validation before it is sent to the server.
- also bring in additional context for admin API errors if any
  when replying back to client.
- deprecate GetServiceAccount API as we do not need to reply
  back session tokens
2020-04-14 11:28:56 -07:00
Praveen raj Mani
bfec5fe200 fix: fetchLambdaInfo should return consistent results (#9332)
- Introduced a function `FetchRegisteredTargets` which will return
  a complete set of registered targets irrespective to their states,
  if the `returnOnTargetError` flag is set to `False`
- Refactor NewTarget functions to return non-nil targets
- Refactor GetARNList() to return a complete list of configured targets
2020-04-14 11:19:25 -07:00
Bala FA
525287f4b6 remove queue only if index is within the range (#9341)
Fixes minio/mc#3155
2020-04-14 11:06:23 -07:00
Harshavardhana
9054ce73b2 fix: deprecate skyring/uuid and use maintained google/uuid (#9340) 2020-04-14 02:40:05 -07:00
Harshavardhana
d079adc167 fix: remove initGlobalContext writes in tests (#9331)
since we do not close GlobalContext, we do not
need to reinitialize it inside test code
2020-04-13 23:21:01 -07:00
Harshavardhana
a9d401ac10 fix: update docs to mention erasure guide (#9339) 2020-04-14 11:38:14 +05:30
kannappanr
1fa65c7f2f fix: object lock behavior when default lock config is enabled (#9305) 2020-04-13 14:03:23 -07:00
Harshavardhana
cc9b63eb51 add deprecation docs for PostgresSQL/MySQL targets (#9333) 2020-04-13 12:13:33 -07:00
Harshavardhana
7e12eab3ad fix: cleanup madmin docs (#9330) 2020-04-13 10:30:41 +05:30
Roland Huß
fa685d7d9c Make multistage Dockerfile self-contained (#9323)
Picking up all support files from the builder image has the advantage
that the Dockerfile is now fully selfcontained and can be also
run just standalone.

This allows also cross-compilation and pushing with the proper manifests
with Docker Buildkit:

```
docker buildx create --name xbuilder
docker buildx use xbuilder

docker buildx build -f Dockerfile.minio --platform linux/arm/v7,linux/amd64 --progress plain --push -t minio/minio .
```

which also has the advantage that the Dockerfile is the same
for all platforms.

Co-authored-by: Harshavardhana <harsha@minio.io>
2020-04-12 20:03:02 -07:00
Harshavardhana
4314ee1670 fix: remove unusued PerfInfoHandler code (#9328)
- Removes PerfInfo admin API as its not OBDInfo
- Keep the drive path without the metaBucket in OBD
  global latency map.
- Remove all the unused code related to PerfInfo API
- Do not redefined global mib,gib constants use
  humanize.MiByte and humanize.GiByte instead always
2020-04-12 19:37:09 -07:00
Harshavardhana
7d636a7c13 enable --compat flag by default (#9326)
if needed use --no-compat to disable md5sum while
verifying any performance numbers.

bring back --compat behavior as default to avoid
additional documentation and confusing behavior,
as we are working towards improving md5sum to
be faster on AVX instructions, enabling this
should be hardly a problem in future versions
of MinIO.

fixes #8012
fixes #7859
fixes #7642
2020-04-12 18:08:27 -07:00
Harshavardhana
bf9d51cf14 fix: add missing copyright headers in some files (#9321) 2020-04-12 13:55:22 -07:00
Harshavardhana
29e0727b58 fix: regression in CopyObject not preserving ETag in --compat (#9322)
issue found after `git bisect` to commit db41953618
2020-04-11 20:20:30 -07:00
Anis Elleuch
c434dff0a4 posix: Add missing error return in RenameFile() (#9319)
Although it should not happen in most cases.
2020-04-11 11:15:30 -07:00
Taras Parkhomenko
b2a8cb4aba Add SHA-3 support (#9308) 2020-04-10 14:59:52 -07:00
Harshavardhana
b412a222ae Add missing comment key from key list (#9313)
Continuing from previous PR #9304, comment
is a special key is not present in the
default KV list. Add it explicitly when
tokenizing fields as it may be possible that
some clients might try to set comments.
2020-04-10 11:44:28 -07:00
Harshavardhana
79bcb705bf update CREDITS file to reflect new deps (#9311) 2020-04-10 00:16:38 -07:00
Sidhartha Mani
9f81d014f1 fix: drive names in output of parallel obd test (#9312) 2020-04-09 22:44:17 -07:00
Harshavardhana
3184205519 fix: config to support keys with special values (#9304)
This PR adds context-based `k=v` splits based
on the sub-system which was obtained, if the
keys are not provided an error will be thrown
during parsing, if keys are provided with wrong
values an error will be thrown. Keys can now
have values which are of a much more complex
form such as `k="v=v"` or `k=" v = v"`
and other variations.

additionally, deprecate unnecessary postgres/mysql
configuration styles, support only

- connection_string for Postgres
- dsn_string for MySQL

All other parameters are removed.
2020-04-09 21:45:17 -07:00
Minio Trusted
7c919329e8 Update yaml files to latest version RELEASE.2020-04-10T03-34-42Z 2020-04-10 03:47:00 +00:00
Andreas Auernhammer
db41953618 avoid unnecessary KMS requests during single-part PUT (#9220)
This commit fixes a performance issue caused
by too many calls to the external KMS - i.e.
for single-part PUT requests.

In general, the issue is caused by a sub-optimal
code structure. In particular, when the server
encrypts an object it requests a new data encryption
key from the KMS. With this key it does some key
derivation and encrypts the object content and
ETag.

However, to behave S3-compatible the MinIO server
has to return the plaintext ETag to the client
in case SSE-S3.
Therefore, the server code used to decrypt the
(previously encrypted) ETag again by requesting
the data encryption key (KMS decrypt API) from
the KMS.

This leads to 2 KMS API calls (1 generate key and
1 decrypt key) per PUT operation - while only
one KMS call is necessary.

This commit fixes this by fetching a data key only
once from the KMS and keeping the derived object
encryption key around (for the lifetime of the request).

This leads to a significant performance improvement
w.r.t. to PUT workloads:
```
Operation: PUT
Operations: 161 -> 239
Duration: 28s -> 29s
* Average: +47.56% (+25.8 MiB/s) throughput, +47.56% (+2.6) obj/s
* Fastest: +55.49% (+34.5 MiB/s) throughput, +55.49% (+3.5) obj/s
* 50% Median: +58.24% (+32.8 MiB/s) throughput, +58.24% (+3.3) obj/s
* Slowest: +1.83% (+0.6 MiB/s) throughput, +1.83% (+0.1) obj/s
```
2020-04-09 17:01:45 -07:00
Harshavardhana
cea078a593 update browser assets for image-preview feature 2020-04-09 14:34:37 -07:00
Harshavardhana
f44cfb2863 use GlobalContext whenever possible (#9280)
This change is throughout the codebase to
ensure that all codepaths honor GlobalContext
2020-04-09 09:30:02 -07:00
Anis Elleuch
1b45be0d60 lifecycle: Disallow delete when the object is locked (#9272) 2020-04-09 09:28:57 -07:00
Aditya Manthramurthy
6bb693488c Fix policy setting error in LDAP setups (#9303)
Fixes #8667

In addition to the above, if the user is mapped to a policy or 
belongs in a group, the user-info API returns this information, 
but otherwise, the API will now return a non-existent user error.
2020-04-09 01:04:08 -07:00
Harshavardhana
e20e08d700 fix: remove the sleep from listing operations (#9287)
make rest of the Walk() function more predictable,
it was observed that in nominal deployments even
without much workload the drives are generally
slow for respond for readdir operations, for the
sleepDuration factor of 10 this can cause
unexpected slowness in the Listing calls, while
it is good for all other I/O, it may simply slow
down Listing immensely which is not useful.

fixes #9261
2020-04-08 19:42:57 -07:00
Harshavardhana
ac07df2985 start watcher after all creds have been loaded (#9301)
start watcher after all creds have been loaded
to avoid any conflicting locks that might get
deadlocked.

Deprecate unused peer calls for LoadUsers()
2020-04-08 19:00:39 -07:00
Praveen raj Mani
2054ca5c9a fix: honor token based authentication in NATS streaming (#9296)
fixes #9148
2020-04-08 12:45:24 -07:00
Anis Elleuch
e51e465543 delete: Use physical Dir() for proper prefix cleanup in Windows (#9297)
In FS mode under Windows, removing an object will not automatically.
remove parent empty prefixes.

The reason is that path.Dir() was used, however filepath.Dir() is
more appropriate since filepath is physical (meaning it operates
on OS filesystem paths)

This is not caught because failure for Windows CI is not caught.
2020-04-08 11:32:58 -07:00
tweigel-dev
2bbc6a83e8 feature preview of image-objects (#9239) 2020-04-08 10:47:47 -07:00
ebozduman
a78731a3ba Adds info on policy for STS authentication using web-id (#9289) 2020-04-08 10:34:43 -07:00
kumy
f4e779c964 Fix typo in LDAP STS guide (#9294) 2020-04-08 08:58:03 -07:00
Pontus Leitzler
a973402821 add object api check in fs-v1 before returning ready (#9285)
fs-v1 in server mode only checks to see if the path exist, so that it
returns ready before it is indeed ready.

This change adds a check to ensure that the global object api is
available too before reporting ready.

Fixes #9283
2020-04-08 08:53:20 -07:00
Sidhartha Mani
44decbeae0 increase drive OBD blocksize to 4MB (#9258) 2020-04-08 06:04:27 -07:00
César Nieto
3ea1be3c52 allow delete of a group with no policy set (#9288) 2020-04-08 06:03:57 -07:00
Harshavardhana
2642e12d14 fix: change policies API to return and take struct (#9181)
This allows for order guarantees in returned values
can be consumed safely by the caller to avoid any
additional parsing and validation.

Fixes #9171
2020-04-07 19:30:59 -07:00
Harshavardhana
e7276b7b9b fix: make single locks for both IAM and object-store (#9279)
Additionally add context support for IAM sub-system
2020-04-07 14:26:39 -07:00
Harshavardhana
e375341c33 fix: allow any 127.0.0.x as bind IPs (#9281)
It is some times common and convenient to use
just local IPs for testing purposes, 127.0.0.x
are special IPs regardless of being available on
an interface they can be bound to on all operating
systems.

Allow this behavior to work for minio server

fixes #9274
2020-04-07 09:40:20 -07:00
Harshavardhana
2c20716f37 fix: Avoid force delete in compliance/worm mode (#9276)
also, bring in an additional policy to ensure that
force delete bucket is only allowed with the right
policy for the user, just DeleteBucketAction
policy action is not enough.
2020-04-06 17:51:05 -07:00
Harshavardhana
928f5b0564 fix: Quit when the context is canceled in madmin (#9264) 2020-04-06 17:50:14 -07:00
Harshavardhana
91f21ddc47 fix: ignore lost+found properly while reading disks (#9278)
Fixes #9277
2020-04-06 16:51:18 -07:00
Harshavardhana
43a3778b45 fix: support object-remaining-retention-days policy condition (#9259)
This PR also tries to simplify the approach taken in
object-locking implementation by preferential treatment
given towards full validation.

This in-turn has fixed couple of bugs related to
how policy should have been honored when ByPassGovernance
is provided.

Simplifies code a bit, but also duplicates code intentionally
for clarity due to complex nature of object locking
implementation.
2020-04-06 13:44:16 -07:00
Bitworks LLC
b9b1bfefe7 Added a function which allows passing the UID/GID for suexec from the outside. (#9251) 2020-04-06 13:28:23 -07:00
Minio Trusted
05cda35b14 Update yaml files to latest version RELEASE.2020-04-04T05-39-31Z 2020-04-04 05:48:22 +00:00
Harshavardhana
2155e74951 update minio-go to latest v6.0.52 2020-04-03 18:06:50 -07:00
Harshavardhana
4714958e99 fix: possible connection leaks in sets init, heal (#9263) 2020-04-03 18:06:31 -07:00
Minio Trusted
c6e62b9175 Update yaml files to latest version RELEASE.2020-04-02T21-34-49Z 2020-04-02 21:44:04 +00:00
Harshavardhana
ab66b23194 fix: allow listBuckets with listBuckets permission (#9253) 2020-04-02 12:35:22 -07:00
Harshavardhana
73f9d8a636 set default storage class always (#9250)
gateway implementations might not respond
back with right storage class which is
an AWS S3 concept, add default storage
if its empty.
2020-04-02 00:23:09 -07:00
Krishna Srinivas
541a778d7b fix: do not exit on bootstrap Verify() to allow for rolling upgrades (#9235) 2020-04-01 21:40:03 -07:00
Harshavardhana
d49f2ec19c fix: use specified authToken for audit/logger HTTP targets (#9249)
We were not using the auth token specified
even when config supports it.
2020-04-01 20:53:07 -07:00
ebozduman
8dd63a462f fix: ETag returned by OSS endpoint (#9243) 2020-04-01 19:51:12 -07:00
Anis Elleuch
9902c9baaa sql: Add support of escape quote in CSV (#9231)
This commit modifies csv parser, a fork of golang csv
parser to support a custom quote escape character.

The quote escape character is used to escape the quote
character when a csv field contains a quote character
as part of data.
2020-04-01 15:39:34 -07:00
Harshavardhana
7de29e6e6b Add rotating token support for admin API (#9244)
Use the *credentials.Credentials implementation method *Get*

```
func (c *Credentials) Get() (Value, error) {
```

which also handles auto-refresh, this allows for chaining
of various implementations together if necessary or simply
initialize with credentials.NewStaticV4(access, secret, token)

Co-authored-by: Klaus Post <klauspost@gmail.com>
2020-04-01 13:34:20 -07:00
poornas
336460f67e fix: gateway_s3_bytes_sent metric for all API methods (#9242)
Co-authored-by: Harshavardhana <harsha@minio.io>
2020-04-01 12:52:31 -07:00
Bala FA
95e89f1712 proactive deep heal object when a bitrot is detected (#9192) 2020-04-01 12:14:00 -07:00
Harshavardhana
886ae15464 trimpaths when building minio binaries (#9246) 2020-04-01 10:45:11 -07:00
Harshavardhana
d8af244708 Add numeric/date policy conditions (#9233)
add new policy conditions

- NumericEquals
- NumericNotEquals
- NumericLessThan
- NumericLessThanEquals
- NumericGreaterThan
- NumericGreaterThanEquals
- DateEquals
- DateNotEquals
- DateLessThan
- DateLessThanEquals
- DateGreaterThan
- DateGreaterThanEquals
2020-04-01 00:04:25 -07:00
Sidhartha Mani
c8243706b4 Add Parallel NetOBD tests to saturate all nodes at once (#9241) 2020-03-31 17:08:28 -07:00
Harshavardhana
30707659b5 [feature] allow for an odd number of erasure packs (#9221)
Too many deployments come up with an odd number
of hosts or drives, to facilitate even distribution
among those setups allow for odd and prime numbers
based packs.
2020-03-31 09:32:16 -07:00
poornas
90c365a174 fix: allow overwriting objects under lock after retention period (#9232)
fixes #9230
2020-03-31 09:15:42 -07:00
Sidhartha Mani
7b732b566f [Bugfix] Fix Net tests being omitted (#9234) 2020-03-31 01:15:21 -07:00
Harshavardhana
ba52a925f9 fix: delete dangling directories properly (#9222) 2020-03-30 09:48:24 -07:00
ebozduman
fdda5f98c6 Makes mandatory dsn_string parameter optional (#8931) 2020-03-28 22:20:02 -07:00
Ingmar Runge
fa4d627b57 B2 gateway S3 compat: return MD5 hash as ETag from PutObject (#9183)
- B2 does actually return an MD5 hash for newly uploaded objects
  so we can use it to provide better compatibility with S3 client
  libraries that assume the ETag is the MD5 hash such as boto.
- depends on change in blazer library.
- new behaviour is only enabled if MinIO's --compat mode is active.
- behaviour for multipart uploads is unchanged (works fine as is).
2020-03-28 13:59:55 -07:00
Bala FA
2c3e34f001 add force delete option of non-empty bucket (#9166)
passing HTTP header `x-minio-force-delete: true` would 
allow standard S3 API DeleteBucket to delete a non-empty
bucket forcefully.
2020-03-27 21:52:59 -07:00
Anis Elleuch
7f8f1ad4e3 fix: cleanup lifecycle unused code (#9219) 2020-03-27 18:57:50 -07:00
Harshavardhana
6f992134a2 fix: startup load time by reusing storageDisks (#9210) 2020-03-27 14:48:30 -07:00
Sidhartha Mani
0c80bf45d0 Implement oboard diagnostics admin API (#9024)
- Implement a graph algorithm to test network bandwidth from every 
  node to every other node
- Saturate any network bandwidth adaptively, accounting for slow 
  and fast network capacity
- Implement parallel drive OBD tests
- Implement a paging mechanism for OBD test to provide periodic updates to client
- Implement Sys, Process, Host, Mem OBD Infos
2020-03-26 21:07:39 -07:00
Robert Thomas
2777956581 Improve YAML download links listed in K8s doc (#9213) 2020-03-26 11:17:00 -07:00
Anis Elleuch
b207520d98 Fix lifecycle GET: AWS SDK complaints on empty config (#9201) 2020-03-25 21:06:03 -07:00
Minio Trusted
2196fd9cd5 Update yaml files to latest version RELEASE.2020-03-25T07-03-04Z 2020-03-25 07:11:33 +00:00
Krishna Srinivas
ef6304c5c2 Improve connectDisks() performance (#9203) 2020-03-24 23:26:13 -07:00
Nitish Tiwari
6b984410d5 Add support for self-healing related metrics in Prometheus (#9079)
Fixes #8988

Co-authored-by: Anis Elleuch <vadmeste@users.noreply.github.com>
Co-authored-by: Harshavardhana <harsha@minio.io>
2020-03-24 22:40:45 -07:00
Harshavardhana
813e0fc1a8 fix: optimize isConnected to avoid url.String() conversions (#9202)
Stringifying in a loop can tax the system, avoid this
and convert the endpoints to strings early on and
remember them for the lifetime of the server.
2020-03-24 18:53:24 -07:00
Harshavardhana
38cf263409 fix: docs remove goreportcard, its deprecated 2020-03-24 14:51:06 -07:00
Harshavardhana
6f6a2214fc Add rate limiter for S3 API layer (#9196)
- total number of S3 API calls per server
- maximum wait duration for any S3 API call

This implementation is primarily meant for situations
where HDDs are not capable enough to handle the incoming
workload and there is no way to throttle the client.

This feature allows MinIO server to throttle itself
such that we do not overwhelm the HDDs.
2020-03-24 12:43:40 -07:00
Anis Elleuch
791821d590 sa: Allow empty policy to indicate parent user's policy is inherited (#9185) 2020-03-23 14:17:18 -07:00
Harshavardhana
9a951da881 honor the credentials of user admin for encrypt/decrypt (#9194)
Fixes #9193
2020-03-23 14:06:00 -07:00
Praveen raj Mani
e7a0be5bd3 fix: throttling of events during their replay (#9188) 2020-03-23 12:34:39 -07:00
Harshavardhana
ff932ca2a0 fix: log only catastrophic errors in prepare storage (#9189) 2020-03-23 07:32:18 -07:00
poornas
818d3bcaf5 fix: deprecate TestDiskCache test from unit tests (#9187) 2020-03-22 23:46:36 -07:00
Krishna Srinivas
45b1c66195 fix: implement splunk specific listObjects when delimiter=guidSplunk (#9186) 2020-03-22 19:23:47 -07:00
Harshavardhana
da04cb91ce optimize listObjects to list only from 3 random disks (#9184) 2020-03-22 16:33:49 -07:00
Harshavardhana
cfc9cfd84a fix: various optimizations, idiomatic changes (#9179)
- acquire since leader lock for all background operations
  - healing, crawling and applying lifecycle policies.

- simplify lifecyle to avoid network calls, which was a
  bug in implementation - we should hold a leader and
  do everything from there, we have access to entire
  name space.

- make listing, walking not interfere by slowing itself
  down like the crawler.

- effectively use global context everywhere to ensure
  proper shutdown, in cache, lifecycle, healing

- don't read `format.json` for prometheus metrics in
  StorageInfo() call.
2020-03-22 12:16:36 -07:00
Harshavardhana
ea18e51f4d Support multiple LDAP OU's, smAccountName support (#9139)
Fixes #8532
2020-03-21 22:47:26 -07:00
Harshavardhana
3d3beb6a9d Add response header timeouts (#9170)
- Add conservative timeouts upto 3 minutes
  for internode communication
- Add aggressive timeouts of 30 seconds
  for gateway communication

Fixes #9105
Fixes #8732
Fixes #8881
Fixes #8376
Fixes #9028
2020-03-21 22:10:13 -07:00
poornas
27b8f18cce Fix storage info message on startup (#9177) 2020-03-21 10:02:20 -07:00
Harshavardhana
bf545dc320 migrate to new minio-go with latest changes (#9176)
- extract userTags from Get/Head request (#1249)
- fix: Context cancellation not handled (#1250)
- Check for correct http status in remove object tagging (#1248)
- simplify extracting metadata in Head/Get object (#1245)
- fix: close and remove .minio.part file on errors (#1243)
2020-03-20 17:28:36 -07:00
stefan-work
f001e99fcd create the final file with mode 0666 for multipart-uploads (#9173)
NAS gateway creates non-multipart-uploads with mode 0666.
But multipart-uploads are created with a differing mode of 0644.

Both modes should be equal! Else it leads to files with different
permissions based on its file-size. This patch solves that by
using 0666 for both cases.
2020-03-20 15:32:15 -07:00
Harshavardhana
b4bfdc92cc fix: admin console logger changes to log.Info 2020-03-20 15:14:14 -07:00
Harshavardhana
ae654831aa Add madmin package context support (#9172)
This is to improve responsiveness for all
admin API operations and allowing callers
to cancel any on-going admin operations,
if they happen to be waiting too long.
2020-03-20 15:00:44 -07:00
Stephen N
1ffa983a9d added support for SASL/SCRAM on Kafka bucket notifications. (#9168)
fixes #9167
2020-03-20 11:10:27 -07:00
Nitish Tiwari
ecf1566266 Add an option to allow plaintext connection to LDAP/AD Server (#9151) 2020-03-19 19:20:51 -07:00
Minio Trusted
c5b87f93dd Update yaml files to latest version RELEASE.2020-03-19T21-49-00Z 2020-03-19 21:57:16 +00:00
Harshavardhana
b1a2169dcc fix: data usage crawler env handling, usage-cache.bin location (#9163)
canonicalize the ENVs such that we can bring these ENVs 
as part of the config values, as a subsequent change.

- fix location of per bucket usage to `.minio.sys/buckets/<bucket_name>/usage-cache.bin`
- fix location of the overall usage in `json` at `.minio.sys/buckets/.usage.json`
  (avoid conflicts with a bucket named `usage.json` )
- fix location of the overall usage in `msgp` at `.minio.sys/buckets/.usage.bin`
  (avoid conflicts with a bucket named `usage.bin`
2020-03-19 09:47:47 -07:00
Harshavardhana
d45a1808f2 fix: Walk() should require quorum number of disks only (#9164) 2020-03-18 20:56:07 -07:00
Anis Elleuch
db2155551a heal: Pass scan mode to HealObjects to deep scan full quorum objects (#9159)
As an optimization of the healing, HealObjects() avoid sending an
object to the background healing subsystem when the object is
present in all disks.

However, HealObjects() should have checked the scan type, if this
deep, always pass the object to the healing subsystem.
2020-03-18 17:50:00 -07:00
Harshavardhana
09d35d3b4c fix: sts to return appropriate errors (#9161) 2020-03-18 17:25:45 -07:00
Anis Elleuch
5b9342d35c xl: Tree walking should not quit when one disk returns empty (#9160)
Currently, a tree walking, needed to a list objects in a specific
set quits listing as long as it finds no entries in a disk, which
is wrong.

This affected background healing, because the latter is using
tree walk directly. If one object does not exist in the first
disk for example, it will be seemed like the object does not
exist at all and no healing work is needed.

This commit fixes the behavior.
2020-03-18 16:58:05 -07:00
Klaus Post
8d98662633 re-implement data usage crawler to be more efficient (#9075)
Implementation overview: 

https://gist.github.com/klauspost/1801c858d5e0df391114436fdad6987b
2020-03-18 16:19:29 -07:00
Anis Elleuch
7fdeb44372 info: Initialize boot time early so uptime will always be correct (#9154) 2020-03-17 16:37:28 -07:00
poornas
59dced8237 Print node status even in --quiet mode (#9149) 2020-03-17 15:25:00 -07:00
Anis Elleuch
496f4a7dc7 Add service account type in IAM (#9029) 2020-03-17 10:36:13 -07:00
kannappanr
8b880a246a fix: deleteObjectTagging should 204 on success (#9150) 2020-03-16 23:21:24 -07:00
Klaus Post
eeb5942b6b fix: remote profile names and extension (#9145)
Remote profiles are not formatted correctly:

```
profile-172.31.91.126_9000-cpu.pprof
profile-172.31.91.126_9000-goroutines-before.txt
profile-172.31.91.126_9000-goroutines.txt
profiling-172.31.80.49_9000-cpu.pprof.pprof
profiling-172.31.80.49_9000-goroutines-before.txt.pprof
profiling-172.31.80.49_9000-goroutines.txt.pprof
profiling-172.31.86.101_9000-cpu.pprof.pprof
profiling-172.31.86.101_9000-goroutines-before.txt.pprof
profiling-172.31.86.101_9000-goroutines.txt.pprof
profiling-172.31.91.191_9000-cpu.pprof.pprof
profiling-172.31.91.191_9000-goroutines-before.txt.pprof
profiling-172.31.91.191_9000-goroutines.txt.pprof
```

`profiling` -> `profile`, remove extra extension.
2020-03-16 11:39:53 -07:00
yeungc
7ec904d67b fix: wording and update content of chinese docs (#9140) 2020-03-16 10:04:16 -07:00
Harshavardhana
c9212819af fix: lock maintenance should honor quorum (#9138)
The staleness of a lock should be determined by
the quorum number of entries returning stale,
this allows for situations when locks are held
when nodes are down - we don't accidentally
clear locks unintentionally when they are valid
and correct.

Also lock maintenance should be run by all servers,
not one server, stale locks need to be run outside
the requirement for holding distributed locks.

Thanks @klauspost for reproducing this issue
2020-03-15 11:55:52 -07:00
poornas
10fd53d6bb Fix: admin config set API for notifications (#9085)
Filter out targets set via env when
validating incoming config change against
configured notification targets

Fixes #9066
2020-03-14 00:01:15 -07:00
gzur
3fea1d5e35 Align STS web-identity code snippet to documentation (minio#9114) (#9130) 2020-03-13 22:58:53 -07:00
Anis Elleuch
35ecc04223 Support configurable quote character parameter in Select (#8955) 2020-03-13 22:09:34 -07:00
Harshavardhana
3ca9f5ffa3 Update yaml files to latest version RELEASE.2020-03-14T02-21-58Z 2020-03-13 20:05:27 -07:00
Krishna Srinivas
2e9fed1a14 non-empty dirs should not be listed as objects (#9129) 2020-03-13 17:43:00 -07:00
Nitish Tiwari
6b92f3fd99 Add docker files for ARM32 and ARM64 builds (#9132) 2020-03-13 13:37:39 -07:00
Kody A Kantor
06e30b5aa1 Skip building directio on platforms that don't support Direct IO (#9059) 2020-03-12 18:57:41 -07:00
Harshavardhana
603cf2a8bb fix: broken gzip handling with Select API (#9128)
This PR fixes a regression introduced in a1c7c9ea73
2020-03-12 15:34:11 -07:00
Harshavardhana
a54cdb9587 fix: Send x-amz-mp-parts-count for multiparted objects (#9116)
Some AWS SDKs latently rely on this value some times
to calculate the right number of parts during a parallel
GetObject request, this is feature used along with
content-range - we should support this as well.
2020-03-12 12:37:27 -07:00
Andreas Auernhammer
ed4bd20a7c change ca path env. var in KMS guide (#9125)
This commit fixes the env. variable in the
KMS guide used to specify the CA certificates
for the KES server.

Before the env. variable `MINIO_KMS_KES_CAPATH` has
been used - which works in non-containerized environments
due to how MinIO merges the config file and environment
variables. In containerized environments (e.g. docker)
this does not work and trying to specify `MINIO_KMS_KES_CAPATH`
instead of `MINIO_KMS_KES_CA_PATH` eventually leads to MinIO not
trusting the certificate presented by the kes server.

See: cfd12914e1/cmd/crypto/config.go (L186)
2020-03-12 07:47:40 -07:00
Harshavardhana
cfd12914e1 fix: crash in serverInfo handler when ldap is configured (#9123) 2020-03-11 23:13:32 -07:00
Klaus Post
c55aeaf814 Update compression package (#9120)
Fix a potential problem on non-AMD64 platforms and very small files

https://github.com/klauspost/compress/pull/244
2020-03-11 23:02:15 -07:00
Anis Elleuch
fdf65aa9b9 heal: Add info about the next background healing round (#9122)
- avoid setting last heal activity when starting self-healing

This can be confusing to users thinking that the self healing
cycle was already performed.

- add info about the next background healing round
2020-03-11 23:00:31 -07:00
Harshavardhana
69b2aacf5a fix return proper error for OperationTimedout (#9117)
OperationTimedout error occurs when locking
timesout, trying to acquire a lock. This
error should be returned appropriately to
the client with http status "408" (request timedout)

This translation was broken, fix it.
2020-03-11 14:11:04 -07:00
Anis Elleuch
0af62d35a0 xl: Implement posix.DeletePrefixes to enhance delete perf (#9100)
Bulk delete API was using cleanupObjectsBulk() which calls posix
listing and delete API to remove objects internal files in the
backend (xl.json and parts) one by one.

Add DeletePrefixes in the storage API to remove the content
of a directory in a single call.

Also use a remove goroutine for each disk to accelerate removal.
2020-03-11 08:56:36 -07:00
Nitish Tiwari
7c32f3f554 Fix the URL for MinIO update when using custom download server (#9111)
Co-authored-by: Nitish Tiwari <nitish@minio.io>
Co-authored-by: Harshavardhana <harsha@minio.io>
2020-03-11 20:09:20 +05:30
Aditya Manthramurthy
cec8cdb35e S3Select: Handle array selection in from clause (#9076) 2020-03-10 22:34:58 -07:00
Harshavardhana
5ab9cc029d fix: crash observed for anonymous deletes from UI (#9107) 2020-03-09 21:21:35 -07:00
Minio Trusted
667f42515a Update yaml files to latest version RELEASE.2020-03-09T18-26-53Z 2020-03-09 18:38:07 +00:00
Harshavardhana
3614cb7a8b update minio-go library to fix couple of issues (#9099)
- Add PutObjectOptions.PartSize docs (#1239) (03/06/20) <Harshavardhana>
- list: Check EncodingType in list resp before decoding object names (#1238) (03/04/20) <Harshavardhana>
- Add Support for Legal-Hold (#1233) (03/04/20) <kannappanr>
- Add LegalHold API Support (#1226) (02/19/20) <Nitish Tiwari>
- extract userMetadata from event response (#1229) (02/18/20) <Harshavardhana>
- fix: ignore AWS elb endpoints in region extraction (#1228) (02/14/20) <Harshavardhana>
- Bucket and object name length error message fixes (#1227) (02/12/20) <Ville Skyttä>
- Add BucketEncryption apis (#1217) (02/01/20) <ebozduman>
- Set IAM endpoint to default value if unspecified (#1224) (02/01/20) <radix-aw>
- fix testListObjects functional test (#1222) (01/28/20) <poornas>
- fix: retry AccessDenied only if Region is present (#1221) (01/24/20) <Harshavardhana>
- Add new Amazon S3 endpoints (#1220) (01/23/20) <kannappanr>
2020-03-09 12:27:25 +05:30
kumy
b809c84338 fix: notifications doc elaborate env values for targets (#9103) 2020-03-08 18:33:43 -07:00
kannappanr
33edb072a3 Add TopLocksAdminAction to diagsnostics canned policy (#9104) 2020-03-08 18:32:39 -07:00
Harshavardhana
6a00eb10bf fix: allow set drive count of proper divisible values (#9101)
Currently the code assumed some orthogonal requirements
which led situations where when we have a setup where
we have let's say for example 168 drives, the final
set_drive_count chosen was 14. Indeed 168 drives are
divisible by 12 but this wasn't allowed due to an
unexpected requirement to have 12 to be a perfect modulo
of 14 which is not possible. This assumption was incorrect.

This PR fixes this old assumption properly, also adds
few tests and some negative tests as well. Improvements
are seen in error messages as well.
2020-03-08 13:30:25 -07:00
Harshavardhana
792ee48d2c add additional logging during server formatting (#9102) 2020-03-08 12:12:07 -07:00
Minio Trusted
52873ac3a3 Update yaml files to latest version RELEASE.2020-03-06T22-23-56Z 2020-03-06 22:32:45 +00:00
Harshavardhana
88ae0f1196 Improve delete performance by reducing the number of calls (#9092)
- Remove the requirement to honor storage class for deletes
- Improve `posix.DeleteFileBulk` code to Stat the volumeDir
  only once per call, rather than for all object paths.
2020-03-06 13:44:24 -08:00
Anis Elleuch
23a0415eb7 profiling: Fix crash when enabling goroutines profiling (#9097)
This commit replaces 'goroutines' with 'goroutine' when passing it
to pprof library when activating goroutine type profiling
2020-03-06 13:22:47 -08:00
Anis Elleuch
75a0661213 data-usage: Fix the calculation of the next crawling round (#9096)
This commit fixes a simple typo miscalculated the waiting time
until the next round of data crawling to compute the data usage.
2020-03-06 11:34:12 -08:00
ebozduman
a1c7c9ea73 Matches s3 invalid compression format error for 'mc sql' (#9067) 2020-03-05 19:34:04 -08:00
Harshavardhana
7f19a9a617 Add CREDITS file in official MinIO Docker release image (#9091) 2020-03-06 00:22:45 +05:30
kannappanr
2f2c7d91a8 Add new extended list of JWT keys from OpenID group (#9087)
https://www.iana.org/assignments/jwt/jwt.xhtml#claims
2020-03-05 05:05:36 -08:00
Minio Trusted
9ad1c2d07d Update yaml files to latest version RELEASE.2020-03-05T01-04-19Z 2020-03-05 01:10:15 +00:00
467 changed files with 21325 additions and 14029 deletions

8
.github/ISSUE_TEMPLATE/config.yml vendored Normal file
View File

@@ -0,0 +1,8 @@
blank_issues_enabled: false
contact_links:
- name: MinIO Community Support
url: https://slack.min.io
about: Please ask and answer questions here.
- name: MinIO SUBNET Support
url: https://min.io/pricing
about: Join this for Enterprise Support.

39
.github/lock.yml vendored Normal file
View File

@@ -0,0 +1,39 @@
# Configuration for Lock Threads - https://github.com/dessant/lock-threads-app
# Number of days of inactivity before a closed issue or pull request is locked
daysUntilLock: 365
# Skip issues and pull requests created before a given timestamp. Timestamp must
# follow ISO 8601 (`YYYY-MM-DD`). Set to `false` to disable
skipCreatedBefore: false
# Issues and pull requests with these labels will be ignored. Set to `[]` to disable
exemptLabels: []
# Label to add before locking, such as `outdated`. Set to `false` to disable
lockLabel: false
# Comment to post before locking. Set to `false` to disable
lockComment: >-
This thread has been automatically locked since there has not been
any recent activity after it was closed. Please open a new issue for
related bugs.
# Assign `resolved` as the reason for locking. Set to `false` to disable
setLockReason: true
# Limit to only `issues` or `pulls`
only: issues
# Optionally, specify configuration settings just for `issues` or `pulls`
# issues:
# exemptLabels:
# - help-wanted
# lockLabel: outdated
# pulls:
# daysUntilLock: 30
# Repository to extend settings from
# _extends: repo

59
.github/stale.yml vendored Normal file
View File

@@ -0,0 +1,59 @@
# Configuration for probot-stale - https://github.com/probot/stale
# Number of days of inactivity before an Issue or Pull Request becomes stale
daysUntilStale: 90
# Number of days of inactivity before an Issue or Pull Request with the stale label is closed.
# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale.
daysUntilClose: 30
# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled)
onlyLabels: []
# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable
exemptLabels:
- "security"
- "pending discussion"
# Set to true to ignore issues in a project (defaults to false)
exemptProjects: false
# Set to true to ignore issues in a milestone (defaults to false)
exemptMilestones: false
# Set to true to ignore issues with an assignee (defaults to false)
exemptAssignees: false
# Label to use when marking as stale
staleLabel: stale
# Comment to post when marking as stale. Set to `false` to disable
markComment: >-
This issue has been automatically marked as stale because it has not had
recent activity. It will be closed after 21 days if no further activity
occurs. Thank you for your contributions.
# Comment to post when removing the stale label.
# unmarkComment: >
# Your comment here.
# Comment to post when closing a stale Issue or Pull Request.
# closeComment: >
# Your comment here.
# Limit the number of actions per hour, from 1-30. Default is 30
limitPerRun: 1
# Limit to only `issues` or `pulls`
# only: issues
# Optionally, specify configuration settings that are specific to just 'issues' or 'pulls':
# pulls:
# daysUntilStale: 30
# markComment: >
# This pull request has been automatically marked as stale because it has not had
# recent activity. It will be closed if no further activity occurs. Thank you
# for your contributions.
# issues:
# exemptLabels:
# - confirmed

42
.github/workflows/go.yml vendored Normal file
View File

@@ -0,0 +1,42 @@
name: Go
on:
pull_request:
branches:
- master
push:
branches:
- master
jobs:
build:
name: Test on Go ${{ matrix.go-version }} and ${{ matrix.os }}
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.13.x]
os: [ubuntu-latest]
steps:
- name: Set up Go ${{ matrix.go-version }} on ${{ matrix.os }}
uses: actions/setup-go@v1
with:
go-version: ${{ matrix.go-version }}
id: go
- name: Check out code into the Go module directory
uses: actions/checkout@v1
- name: Build on ${{ matrix.os }}
env:
CGO_ENABLED: 0
GO111MODULE: on
SIMPLE_CI: 1
run: |
sudo apt-get install devscripts
make
diff -au <(gofmt -s -d cmd) <(printf "")
diff -au <(gofmt -s -d pkg) <(printf "")
make test-race
make crosscompile
make verify
make verify-healing

View File

@@ -29,12 +29,6 @@ matrix:
- SIMPLE_CI=1
go: 1.13.x
script:
- make
- diff -au <(gofmt -s -d cmd) <(printf "")
- diff -au <(gofmt -s -d pkg) <(printf "")
- make test-race
- make crosscompile
- make verify
- cd browser && npm install && npm run test && cd ..
- bash -c 'shopt -s globstar; shellcheck mint/**/*.sh'

View File

@@ -1,4 +1,4 @@
# MinIO Contribution Guide [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Go Report Card](https://goreportcard.com/badge/minio/minio)](https://goreportcard.com/report/minio/minio) [![Docker Pulls](https://img.shields.io/docker/pulls/minio/minio.svg?maxAge=604800)](https://hub.docker.com/r/minio/minio/)
# MinIO Contribution Guide [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Docker Pulls](https://img.shields.io/docker/pulls/minio/minio.svg?maxAge=604800)](https://hub.docker.com/r/minio/minio/)
``MinIO`` community welcomes your contribution. To make the process as seamless as possible, we recommend you read this contribution guide.

3302
CREDITS

File diff suppressed because it is too large Load Diff

View File

@@ -22,7 +22,8 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
EXPOSE 9000
COPY --from=0 /go/bin/minio /usr/bin/minio
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
COPY --from=0 /go/minio/CREDITS /third_party/
COPY --from=0 /go/minio/dockerscripts/docker-entrypoint.sh /usr/bin/
RUN \
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \

41
Dockerfile.arm.release Normal file
View File

@@ -0,0 +1,41 @@
FROM golang:1.13-alpine as builder
WORKDIR /home
ENV GOPATH /go
ENV CGO_ENABLED 0
ENV GO111MODULE on
RUN \
apk add --no-cache git 'curl>7.61.0' && \
git clone https://github.com/minio/minio && \
curl -L https://github.com/balena-io/qemu/releases/download/v3.0.0%2Bresin/qemu-3.0.0+resin-arm.tar.gz | tar zxvf - -C . && mv qemu-3.0.0+resin-arm/qemu-arm-static .
FROM arm32v7/alpine:3.10
LABEL maintainer="MinIO Inc <dev@min.io>"
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
COPY CREDITS /third_party/
COPY --from=builder /home/qemu-arm-static /usr/bin/qemu-arm-static
ENV MINIO_UPDATE off
ENV MINIO_ACCESS_KEY_FILE=access_key \
MINIO_SECRET_KEY_FILE=secret_key \
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
RUN \
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
curl https://dl.min.io/server/minio/release/linux-arm/minio > /usr/bin/minio && \
chmod +x /usr/bin/minio && \
chmod +x /usr/bin/docker-entrypoint.sh
EXPOSE 9000
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
VOLUME ["/data"]
CMD ["minio"]

41
Dockerfile.arm64.release Normal file
View File

@@ -0,0 +1,41 @@
FROM golang:1.13-alpine as builder
WORKDIR /home
ENV GOPATH /go
ENV CGO_ENABLED 0
ENV GO111MODULE on
RUN \
apk add --no-cache git 'curl>7.61.0' && \
git clone https://github.com/minio/minio && \
curl -L https://github.com/balena-io/qemu/releases/download/v3.0.0%2Bresin/qemu-3.0.0+resin-arm.tar.gz | tar zxvf - -C . && mv qemu-3.0.0+resin-arm/qemu-arm-static .
FROM arm64v8/alpine:3.10
LABEL maintainer="MinIO Inc <dev@min.io>"
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
COPY CREDITS /third_party/
COPY --from=builder /home/qemu-arm-static /usr/bin/qemu-arm-static
ENV MINIO_UPDATE off
ENV MINIO_ACCESS_KEY_FILE=access_key \
MINIO_SECRET_KEY_FILE=secret_key \
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
RUN \
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
curl https://dl.min.io/server/minio/release/linux-arm64/minio > /usr/bin/minio && \
chmod +x /usr/bin/minio && \
chmod +x /usr/bin/docker-entrypoint.sh
EXPOSE 9000
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
VOLUME ["/data"]
CMD ["minio"]

View File

@@ -4,6 +4,7 @@ LABEL maintainer="MinIO Inc <dev@min.io>"
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
COPY minio /usr/bin/
COPY CREDITS /third_party/
ENV MINIO_UPDATE off
ENV MINIO_ACCESS_KEY_FILE=access_key \

View File

@@ -1,17 +1,11 @@
FROM ubuntu:16.04
FROM ubuntu:18.04
ENV DEBIAN_FRONTEND noninteractive
ENV LANG C.UTF-8
ENV GOROOT /usr/local/go
ENV GOPATH /usr/local
ENV GOPATH /usr/local/gopath
ENV PATH $GOPATH/bin:$GOROOT/bin:$PATH
ENV MINT_ROOT_DIR /mint
COPY mint /mint
RUN apt-get --yes update && apt-get --yes upgrade && \

View File

@@ -13,6 +13,7 @@ FROM alpine:3.10
LABEL maintainer="MinIO Inc <dev@min.io>"
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
COPY CREDITS /third_party/
ENV MINIO_UPDATE off
ENV MINIO_ACCESS_KEY_FILE=access_key \

View File

@@ -57,7 +57,7 @@ RUN yarn test
#-------------------------------------------------------------
# Stage 3: Run Gateway Tests
#-------------------------------------------------------------
FROM ubuntu:16.04
FROM ubuntu:18.04
COPY --from=0 /go/src/github.com/minio/minio/minio /usr/bin/minio
COPY buildscripts/gateway-tests.sh /usr/bin/gateway-tests.sh
@@ -66,7 +66,7 @@ COPY mint /mint
ENV DEBIAN_FRONTEND noninteractive
ENV LANG C.UTF-8
ENV GOROOT /usr/local/go
ENV GOPATH /usr/local
ENV GOPATH /usr/local/gopath
ENV PATH $GOPATH/bin:$GOROOT/bin:$PATH
ENV SIMPLE_CI 1
ENV MINT_ROOT_DIR /mint

View File

@@ -22,7 +22,7 @@ getdeps:
ifeq ($(GOARCH),s390x)
@which staticcheck 1>/dev/null || (echo "Installing staticcheck" && GO111MODULE=off go get honnef.co/go/tools/cmd/staticcheck)
else
@which staticcheck 1>/dev/null || (echo "Installing staticcheck" && wget --quiet https://github.com/dominikh/go-tools/releases/download/2019.2.3/staticcheck_${GOOS}_${GOARCH}.tar.gz && tar xf staticcheck_${GOOS}_${GOARCH}.tar.gz && mv staticcheck/staticcheck ${GOPATH}/bin/staticcheck && chmod +x ${GOPATH}/bin/staticcheck && rm -f staticcheck_${GOOS}_${GOARCH}.tar.gz && rm -rf staticcheck)
@which staticcheck 1>/dev/null || (echo "Installing staticcheck" && wget --quiet https://github.com/dominikh/go-tools/releases/download/2020.1.3/staticcheck_${GOOS}_${GOARCH}.tar.gz && tar xf staticcheck_${GOOS}_${GOARCH}.tar.gz && mv staticcheck/staticcheck ${GOPATH}/bin/staticcheck && chmod +x ${GOPATH}/bin/staticcheck && rm -f staticcheck_${GOOS}_${GOARCH}.tar.gz && rm -rf staticcheck)
endif
@which misspell 1>/dev/null || (echo "Installing misspell" && GO111MODULE=off go get -u github.com/client9/misspell/cmd/misspell)
@@ -71,19 +71,19 @@ test-race: verifiers build
# Verify minio binary
verify:
@echo "Verifying build with race"
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
@(env bash $(PWD)/buildscripts/verify-build.sh)
# Verify healing of disks with minio binary
verify-healing:
@echo "Verify healing build with race"
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
@(env bash $(PWD)/buildscripts/verify-healing.sh)
# Builds minio locally.
build: checks
@echo "Building minio binary to './minio'"
@GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
@GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
docker: build
@docker build -t $(TAG) . -f Dockerfile.dev
@@ -101,3 +101,4 @@ clean:
@rm -rvf minio
@rm -rvf build
@rm -rvf release
@rm -rvf .verify*

View File

@@ -1,5 +1,5 @@
# MinIO Quickstart Guide
[![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Go Report Card](https://goreportcard.com/badge/minio/minio)](https://goreportcard.com/report/minio/minio) [![Docker Pulls](https://img.shields.io/docker/pulls/minio/minio.svg?maxAge=604800)](https://hub.docker.com/r/minio/minio/)
[![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Docker Pulls](https://img.shields.io/docker/pulls/minio/minio.svg?maxAge=604800)](https://hub.docker.com/r/minio/minio/)
[![MinIO](https://raw.githubusercontent.com/minio/minio/master/.github/logo.svg?sanitize=true)](https://min.io)
@@ -185,11 +185,5 @@ mc admin update <minio alias, e.g., myminio>
## Contribute to MinIO Project
Please follow MinIO [Contributor's Guide](https://github.com/minio/minio/blob/master/CONTRIBUTING.md)
## Caveats
MinIO in its default mode doesn't use MD5Sum checkums of incoming streams unless requested by the client in `Content-Md5` header for validation. This may lead to incompatibility with rare S3 clients like `s3ql` which unfortunately do not set `Content-Md5` but depend on hex MD5Sum for the stream to be calculated by the server. MinIO considers this as a bug in `s3ql` and should be fixed on the client side because MD5Sum is a poor way to checksum and validate the authenticity of the objects. Although MinIO provides a workaround until client applications are fixed use `--compat` option instead to start the server.
```sh
./minio --compat server /data
```
## License
[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fminio%2Fminio.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fminio%2Fminio?ref=badge_large)

View File

@@ -1,4 +1,4 @@
# MinIO Quickstart Guide [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Go Report Card](https://goreportcard.com/badge/minio/minio)](https://goreportcard.com/report/minio/minio) [![Docker Pulls](https://img.shields.io/docker/pulls/minio/minio.svg?maxAge=604800)](https://hub.docker.com/r/minio/minio/)
# MinIO Quickstart Guide [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Docker Pulls](https://img.shields.io/docker/pulls/minio/minio.svg?maxAge=604800)](https://hub.docker.com/r/minio/minio/)
MinIO 是一个基于Apache License v2.0开源协议的对象存储服务。它兼容亚马逊S3云存储服务接口非常适合于存储大容量非结构化的数据例如图片、视频、日志文件、备份数据和容器/虚拟机镜像等而一个对象文件可以是任意大小从几kb到最大5T不等。

View File

@@ -88,11 +88,6 @@ export class ChangePasswordModal extends React.Component {
canChangePassword() {
const { serverInfo } = this.props
// Password change is not allowed in WORM mode
if (serverInfo.info.isWorm) {
return false
}
// Password change is not allowed for temporary users(STS)
if(serverInfo.userInfo.isTempUser) {
return false

View File

@@ -64,17 +64,6 @@ describe("ChangePasswordModal", () => {
shallow(<ChangePasswordModal serverInfo={serverInfo} />)
})
it("should not allow changing password when isWorm is true", () => {
const newServerInfo = { ...serverInfo, info: { isWorm: true } }
const wrapper = shallow(<ChangePasswordModal serverInfo={newServerInfo} />)
expect(
wrapper
.find("ModalBody")
.childAt(0)
.text()
).toBe("Credentials of this user cannot be updated through MinIO Browser.")
})
it("should not allow changing password when not IAM user", () => {
const newServerInfo = {
...serverInfo,

View File

@@ -14,30 +14,67 @@
* limitations under the License.
*/
import mimedb from 'mime-types'
import mimedb from "mime-types"
const isFolder = (name, contentType) => {
if (name.endsWith('/')) return true
if (name.endsWith("/")) return true
return false
}
const isPdf = (name, contentType) => {
if (contentType === 'application/pdf') return true
if (contentType === "application/pdf") return true
return false
}
const isImage = (name, contentType) => {
if (
contentType === "image/jpeg" ||
contentType === "image/gif" ||
contentType === "image/x-icon" ||
contentType === "image/png" ||
contentType === "image/svg+xml" ||
contentType === "image/tiff" ||
contentType === "image/webp"
)
return true
return false
}
const isZip = (name, contentType) => {
if (!contentType || !contentType.includes('/')) return false
if (contentType.split('/')[1].includes('zip')) return true
if (!contentType || !contentType.includes("/")) return false
if (contentType.split("/")[1].includes("zip")) return true
return false
}
const isCode = (name, contentType) => {
const codeExt = ['c', 'cpp', 'go', 'py', 'java', 'rb', 'js', 'pl', 'fs',
'php', 'css', 'less', 'scss', 'coffee', 'net', 'html',
'rs', 'exs', 'scala', 'hs', 'clj', 'el', 'scm', 'lisp',
'asp', 'aspx']
const ext = name.split('.').reverse()[0]
const codeExt = [
"c",
"cpp",
"go",
"py",
"java",
"rb",
"js",
"pl",
"fs",
"php",
"css",
"less",
"scss",
"coffee",
"net",
"html",
"rs",
"exs",
"scala",
"hs",
"clj",
"el",
"scm",
"lisp",
"asp",
"aspx",
]
const ext = name.split(".").reverse()[0]
for (var i in codeExt) {
if (ext === codeExt[i]) return true
}
@@ -45,9 +82,9 @@ const isCode = (name, contentType) => {
}
const isExcel = (name, contentType) => {
if (!contentType || !contentType.includes('/')) return false
const types = ['excel', 'spreadsheet']
const subType = contentType.split('/')[1]
if (!contentType || !contentType.includes("/")) return false
const types = ["excel", "spreadsheet"]
const subType = contentType.split("/")[1]
for (var i in types) {
if (subType.includes(types[i])) return true
}
@@ -55,9 +92,9 @@ const isExcel = (name, contentType) => {
}
const isDoc = (name, contentType) => {
if (!contentType || !contentType.includes('/')) return false
const types = ['word', '.document']
const subType = contentType.split('/')[1]
if (!contentType || !contentType.includes("/")) return false
const types = ["word", ".document"]
const subType = contentType.split("/")[1]
for (var i in types) {
if (subType.includes(types[i])) return true
}
@@ -65,9 +102,9 @@ const isDoc = (name, contentType) => {
}
const isPresentation = (name, contentType) => {
if (!contentType || !contentType.includes('/')) return false
var types = ['powerpoint', 'presentation']
const subType = contentType.split('/')[1]
if (!contentType || !contentType.includes("/")) return false
var types = ["powerpoint", "presentation"]
const subType = contentType.split("/")[1]
for (var i in types) {
if (subType.includes(types[i])) return true
}
@@ -76,31 +113,32 @@ const isPresentation = (name, contentType) => {
const typeToIcon = (type) => {
return (name, contentType) => {
if (!contentType || !contentType.includes('/')) return false
if (contentType.split('/')[0] === type) return true
if (!contentType || !contentType.includes("/")) return false
if (contentType.split("/")[0] === type) return true
return false
}
}
export const getDataType = (name, contentType) => {
if (contentType === "") {
contentType = mimedb.lookup(name) || 'application/octet-stream'
contentType = mimedb.lookup(name) || "application/octet-stream"
}
const check = [
['folder', isFolder],
['code', isCode],
['audio', typeToIcon('audio')],
['image', typeToIcon('image')],
['video', typeToIcon('video')],
['text', typeToIcon('text')],
['pdf', isPdf],
['zip', isZip],
['excel', isExcel],
['doc', isDoc],
['presentation', isPresentation]
["folder", isFolder],
["code", isCode],
["audio", typeToIcon("audio")],
["image", typeToIcon("image")],
["video", typeToIcon("video")],
["text", typeToIcon("text")],
["pdf", isPdf],
["image", isImage],
["zip", isZip],
["excel", isExcel],
["doc", isDoc],
["presentation", isPresentation],
]
for (var i in check) {
if (check[i][1](name, contentType)) return check[i][0]
}
return 'other'
return "other"
}

View File

@@ -19,18 +19,22 @@ import { connect } from "react-redux"
import { Dropdown } from "react-bootstrap"
import ShareObjectModal from "./ShareObjectModal"
import DeleteObjectConfirmModal from "./DeleteObjectConfirmModal"
import PreviewObjectModal from "./PreviewObjectModal"
import * as objectsActions from "./actions"
import { getDataType } from "../mime.js"
import {
SHARE_OBJECT_EXPIRY_DAYS,
SHARE_OBJECT_EXPIRY_HOURS,
SHARE_OBJECT_EXPIRY_MINUTES
SHARE_OBJECT_EXPIRY_MINUTES,
} from "../constants"
export class ObjectActions extends React.Component {
constructor(props) {
super(props)
this.state = {
showDeleteConfirmation: false
showDeleteConfirmation: false,
showPreview: false,
}
}
shareObject(e) {
@@ -53,7 +57,20 @@ export class ObjectActions extends React.Component {
}
hideDeleteConfirmModal() {
this.setState({
showDeleteConfirmation: false
showDeleteConfirmation: false,
})
}
getObjectURL(objectname, callback) {
const { getObjectURL } = this.props
getObjectURL(objectname, callback)
}
showPreviewModal(e) {
e.preventDefault()
this.setState({ showPreview: true })
}
hidePreviewModal() {
this.setState({
showPreview: false,
})
}
render() {
@@ -69,6 +86,15 @@ export class ObjectActions extends React.Component {
>
<i className="fas fa-share-alt" />
</a>
{getDataType(object.name, object.contentType) == "image" && (
<a
href=""
className="fiad-action"
onClick={this.showPreviewModal.bind(this)}
>
<i className="far fa-file-image" />
</a>
)}
<a
href=""
className="fiad-action"
@@ -77,14 +103,22 @@ export class ObjectActions extends React.Component {
<i className="fas fa-trash-alt" />
</a>
</Dropdown.Menu>
{(showShareObjectModal && shareObjectName === object.name) &&
<ShareObjectModal object={object} />}
{showShareObjectModal && shareObjectName === object.name && (
<ShareObjectModal object={object} />
)}
{this.state.showDeleteConfirmation && (
<DeleteObjectConfirmModal
deleteObject={this.deleteObject.bind(this)}
hideDeleteConfirmModal={this.hideDeleteConfirmModal.bind(this)}
/>
)}
{this.state.showPreview && (
<PreviewObjectModal
object={object}
hidePreviewModal={this.hidePreviewModal.bind(this)}
getObjectURL={this.getObjectURL.bind(this)}
/>
)}
</Dropdown>
)
}
@@ -94,15 +128,17 @@ const mapStateToProps = (state, ownProps) => {
return {
object: ownProps.object,
showShareObjectModal: state.objects.shareObject.show,
shareObjectName: state.objects.shareObject.object
shareObjectName: state.objects.shareObject.object,
}
}
const mapDispatchToProps = dispatch => {
const mapDispatchToProps = (dispatch) => {
return {
shareObject: (object, days, hours, minutes) =>
dispatch(objectsActions.shareObject(object, days, hours, minutes)),
deleteObject: object => dispatch(objectsActions.deleteObject(object))
deleteObject: (object) => dispatch(objectsActions.deleteObject(object)),
getObjectURL: (object, callback) =>
dispatch(objectsActions.getObjectURL(object, callback)),
}
}

View File

@@ -0,0 +1,65 @@
/*
* MinIO Cloud Storage (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import React from "react"
import { Modal, ModalHeader, ModalBody } from "react-bootstrap"
class PreviewObjectModal extends React.Component {
constructor(props) {
super(props)
this.state = {
url: "",
}
props.getObjectURL(props.object.name, (url) => {
this.setState({
url: url,
})
})
}
render() {
const { hidePreviewModal } = this.props
return (
<Modal
show={true}
animation={false}
onHide={hidePreviewModal}
bsSize="large"
>
<ModalHeader>Preview</ModalHeader>
<ModalBody>
<div className="input-group">
{this.state.url && (
<img
alt="Image broken"
src={this.state.url}
style={{ display: "block", width: "100%" }}
/>
)}
</div>
</ModalBody>
<div className="modal-footer">
{
<button className="btn btn-link" onClick={hidePreviewModal}>
Cancel
</button>
}
</div>
</Modal>
)
}
}
export default PreviewObjectModal

View File

@@ -66,6 +66,49 @@ describe("ObjectActions", () => {
expect(deleteObject).toHaveBeenCalledWith("obj1")
})
it("should show PreviewObjectModal when preview action is clicked", () => {
const wrapper = shallow(
<ObjectActions
object={{ name: "obj1", contentType: "image/jpeg"}}
currentPrefix={"pre1/"} />
)
wrapper
.find("a")
.at(1)
.simulate("click", { preventDefault: jest.fn() })
expect(wrapper.state("showPreview")).toBeTruthy()
expect(wrapper.find("PreviewObjectModal").length).toBe(1)
})
it("should hide PreviewObjectModal when cancel button is clicked", () => {
const wrapper = shallow(
<ObjectActions
object={{ name: "obj1" , contentType: "image/jpeg"}}
currentPrefix={"pre1/"} />
)
wrapper
.find("a")
.at(1)
.simulate("click", { preventDefault: jest.fn() })
wrapper.find("PreviewObjectModal").prop("hidePreviewModal")()
wrapper.update()
expect(wrapper.state("showPreview")).toBeFalsy()
expect(wrapper.find("PreviewObjectModal").length).toBe(0)
})
it("should not show PreviewObjectModal when preview action is clicked if object is not an image", () => {
const wrapper = shallow(
<ObjectActions
object={{ name: "obj1"}}
currentPrefix={"pre1/"} />
)
expect(wrapper
.find("a")
.length).toBe(2) // find only the other 2
})
it("should call shareObject with object and expiry", () => {
const shareObject = jest.fn()
const wrapper = shallow(

View File

@@ -19,7 +19,7 @@ import history from "../history"
import {
sortObjectsByName,
sortObjectsBySize,
sortObjectsByDate
sortObjectsByDate,
} from "../utils"
import { getCurrentBucket } from "../buckets/selectors"
import { getCurrentPrefix, getCheckedList } from "./selectors"
@@ -31,7 +31,7 @@ import {
SORT_BY_SIZE,
SORT_BY_LAST_MODIFIED,
SORT_ORDER_ASC,
SORT_ORDER_DESC
SORT_ORDER_DESC,
} from "../constants"
export const SET_LIST = "objects/SET_LIST"
@@ -48,35 +48,35 @@ export const CHECKED_LIST_REMOVE = "objects/CHECKED_LIST_REMOVE"
export const CHECKED_LIST_RESET = "objects/CHECKED_LIST_RESET"
export const SET_LIST_LOADING = "objects/SET_LIST_LOADING"
export const setList = objects => ({
export const setList = (objects) => ({
type: SET_LIST,
objects
objects,
})
export const resetList = () => ({
type: RESET_LIST
type: RESET_LIST,
})
export const setListLoading = listLoading => ({
export const setListLoading = (listLoading) => ({
type: SET_LIST_LOADING,
listLoading
listLoading,
})
export const fetchObjects = () => {
return function(dispatch, getState) {
return function (dispatch, getState) {
dispatch(resetList())
const {
buckets: { currentBucket },
objects: { currentPrefix }
objects: { currentPrefix },
} = getState()
if (currentBucket) {
dispatch(setListLoading(true))
return web
.ListObjects({
bucketName: currentBucket,
prefix: currentPrefix
prefix: currentPrefix,
})
.then(res => {
.then((res) => {
// we need to check if the bucket name and prefix are the same as
// when the request was made before updating the displayed objects
if (
@@ -85,10 +85,10 @@ export const fetchObjects = () => {
) {
let objects = []
if (res.objects) {
objects = res.objects.map(object => {
objects = res.objects.map((object) => {
return {
...object,
name: object.name.replace(currentPrefix, "")
name: object.name.replace(currentPrefix, ""),
}
})
}
@@ -104,13 +104,13 @@ export const fetchObjects = () => {
dispatch(setListLoading(false))
}
})
.catch(err => {
.catch((err) => {
if (web.LoggedIn()) {
dispatch(
alertActions.set({
type: "danger",
message: err.message,
autoClear: true
autoClear: true,
})
)
dispatch(resetList())
@@ -123,8 +123,8 @@ export const fetchObjects = () => {
}
}
export const sortObjects = sortBy => {
return function(dispatch, getState) {
export const sortObjects = (sortBy) => {
return function (dispatch, getState) {
const { objects } = getState()
let sortOrder = SORT_ORDER_ASC
// Reverse sort order if the list is already sorted on same field
@@ -149,18 +149,18 @@ const sortObjectsList = (list, sortBy, sortOrder) => {
}
}
export const setSortBy = sortBy => ({
export const setSortBy = (sortBy) => ({
type: SET_SORT_BY,
sortBy
sortBy,
})
export const setSortOrder = sortOrder => ({
export const setSortOrder = (sortOrder) => ({
type: SET_SORT_ORDER,
sortOrder
sortOrder,
})
export const selectPrefix = prefix => {
return function(dispatch, getState) {
export const selectPrefix = (prefix) => {
return function (dispatch, getState) {
dispatch(setCurrentPrefix(prefix))
dispatch(fetchObjects())
dispatch(resetCheckedList())
@@ -169,49 +169,49 @@ export const selectPrefix = prefix => {
}
}
export const setCurrentPrefix = prefix => {
export const setCurrentPrefix = (prefix) => {
return {
type: SET_CURRENT_PREFIX,
prefix
prefix,
}
}
export const setPrefixWritable = prefixWritable => ({
export const setPrefixWritable = (prefixWritable) => ({
type: SET_PREFIX_WRITABLE,
prefixWritable
prefixWritable,
})
export const deleteObject = object => {
return function(dispatch, getState) {
export const deleteObject = (object) => {
return function (dispatch, getState) {
const currentBucket = getCurrentBucket(getState())
const currentPrefix = getCurrentPrefix(getState())
const objectName = `${currentPrefix}${object}`
return web
.RemoveObject({
bucketName: currentBucket,
objects: [objectName]
objects: [objectName],
})
.then(() => {
dispatch(removeObject(object))
})
.catch(e => {
.catch((e) => {
dispatch(
alertActions.set({
type: "danger",
message: e.message
message: e.message,
})
)
})
}
}
export const removeObject = object => ({
export const removeObject = (object) => ({
type: REMOVE,
object
object,
})
export const deleteCheckedObjects = () => {
return function(dispatch, getState) {
return function (dispatch, getState) {
const checkedObjects = getCheckedList(getState())
for (let i = 0; i < checkedObjects.length; i++) {
dispatch(deleteObject(checkedObjects[i]))
@@ -221,7 +221,7 @@ export const deleteCheckedObjects = () => {
}
export const shareObject = (object, days, hours, minutes) => {
return function(dispatch, getState) {
return function (dispatch, getState) {
const currentBucket = getCurrentBucket(getState())
const currentPrefix = getCurrentPrefix(getState())
const objectName = `${currentPrefix}${object}`
@@ -232,22 +232,22 @@ export const shareObject = (object, days, hours, minutes) => {
host: location.host,
bucket: currentBucket,
object: objectName,
expiry: expiry
expiry: expiry,
})
.then(obj => {
.then((obj) => {
dispatch(showShareObject(object, obj.url))
dispatch(
alertActions.set({
type: "success",
message: `Object shared. Expires in ${days} days ${hours} hours ${minutes} minutes`
message: `Object shared. Expires in ${days} days ${hours} hours ${minutes} minutes`,
})
)
})
.catch(err => {
.catch((err) => {
dispatch(
alertActions.set({
type: "danger",
message: err.message
message: err.message,
})
)
})
@@ -265,7 +265,7 @@ export const shareObject = (object, days, hours, minutes) => {
dispatch(
alertActions.set({
type: "success",
message: `Object shared.`
message: `Object shared.`,
})
)
}
@@ -276,18 +276,17 @@ export const showShareObject = (object, url) => ({
type: SET_SHARE_OBJECT,
show: true,
object,
url
url,
})
export const hideShareObject = (object, url) => ({
type: SET_SHARE_OBJECT,
show: false,
object: "",
url: ""
url: "",
})
export const downloadObject = object => {
return function(dispatch, getState) {
export const getObjectURL = (object, callback) => {
return function (dispatch, getState) {
const currentBucket = getCurrentBucket(getState())
const currentPrefix = getCurrentPrefix(getState())
const objectName = `${currentPrefix}${object}`
@@ -295,52 +294,73 @@ export const downloadObject = object => {
if (web.LoggedIn()) {
return web
.CreateURLToken()
.then(res => {
const url = `${
window.location.origin
}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=${
res.token
}`
window.location = url
.then((res) => {
const url = `${window.location.origin}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=${res.token}`
callback(url)
})
.catch(err => {
.catch((err) => {
dispatch(
alertActions.set({
type: "danger",
message: err.message
message: err.message,
})
)
})
} else {
const url = `${
window.location.origin
}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=`
const url = `${window.location.origin}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=`
callback(url)
}
}
}
export const downloadObject = (object) => {
return function (dispatch, getState) {
const currentBucket = getCurrentBucket(getState())
const currentPrefix = getCurrentPrefix(getState())
const objectName = `${currentPrefix}${object}`
const encObjectName = encodeURI(objectName)
if (web.LoggedIn()) {
return web
.CreateURLToken()
.then((res) => {
const url = `${window.location.origin}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=${res.token}`
window.location = url
})
.catch((err) => {
dispatch(
alertActions.set({
type: "danger",
message: err.message,
})
)
})
} else {
const url = `${window.location.origin}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=`
window.location = url
}
}
}
export const checkObject = object => ({
export const checkObject = (object) => ({
type: CHECKED_LIST_ADD,
object
object,
})
export const uncheckObject = object => ({
export const uncheckObject = (object) => ({
type: CHECKED_LIST_REMOVE,
object
object,
})
export const resetCheckedList = () => ({
type: CHECKED_LIST_RESET
type: CHECKED_LIST_RESET,
})
export const downloadCheckedObjects = () => {
return function(dispatch, getState) {
return function (dispatch, getState) {
const state = getState()
const req = {
bucketName: getCurrentBucket(state),
prefix: getCurrentPrefix(state),
objects: getCheckedList(state)
objects: getCheckedList(state),
}
if (!web.LoggedIn()) {
const requestUrl = location.origin + "/minio/zip?token="
@@ -348,17 +368,15 @@ export const downloadCheckedObjects = () => {
} else {
return web
.CreateURLToken()
.then(res => {
const requestUrl = `${
location.origin
}${minioBrowserPrefix}/zip?token=${res.token}`
.then((res) => {
const requestUrl = `${location.origin}${minioBrowserPrefix}/zip?token=${res.token}`
downloadZip(requestUrl, req, dispatch)
})
.catch(err =>
.catch((err) =>
dispatch(
alertActions.set({
type: "danger",
message: err.message
message: err.message,
})
)
)
@@ -374,11 +392,11 @@ const downloadZip = (url, req, dispatch) => {
xhr.open("POST", url, true)
xhr.responseType = "blob"
xhr.onload = function(e) {
xhr.onload = function (e) {
if (this.status == 200) {
dispatch(resetCheckedList())
var blob = new Blob([this.response], {
type: "octet/stream"
type: "octet/stream",
})
var blobUrl = window.URL.createObjectURL(blob)
var separator = req.prefix.length > 1 ? "-" : ""

1
browser/staticcheck.conf Normal file
View File

@@ -0,0 +1 @@
checks = ["all", "-ST1005", "-ST1000", "-SA4000", "-SA9004", "-SA1019", "-SA1008", "-U1000", "-ST1003", "-ST1018"]

File diff suppressed because one or more lines are too long

View File

@@ -20,11 +20,11 @@ function _build() {
package=$(go list -f '{{.ImportPath}}')
printf -- "--> %15s:%s\n" "${osarch}" "${package}"
# Go build to build the binary.
# go build -trimpath to build the binary.
export GOOS=$os
export GOARCH=$arch
export GO111MODULE=on
go build -tags kqueue -o /dev/null
go build -trimpath -tags kqueue -o /dev/null
}
function main() {

View File

@@ -46,7 +46,10 @@ function main()
gw_pid="$(start_minio_gateway_s3)"
SERVER_ENDPOINT=127.0.0.1:24240 ENABLE_HTTPS=0 ACCESS_KEY=minio \
SECRET_KEY=minio123 MINT_MODE="full" /mint/entrypoint.sh
SECRET_KEY=minio123 MINT_MODE="full" /mint/entrypoint.sh \
aws-sdk-go aws-sdk-java aws-sdk-php aws-sdk-ruby awscli \
healthcheck mc minio-dotnet minio-js \
minio-py s3cmd s3select security
rv=$?
kill "$sr_pid"

View File

@@ -3,5 +3,5 @@
set -e
for d in $(go list ./... | grep -v browser); do
CGO_ENABLED=1 go test -v -race --timeout 20m "$d"
CGO_ENABLED=1 go test -v -race --timeout 50m "$d"
done

View File

@@ -30,33 +30,49 @@ MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server )
function start_minio_3_node() {
declare -a minio_pids
declare -a ARGS
export MINIO_ACCESS_KEY=minio
export MINIO_SECRET_KEY=minio123
start_port=$(shuf -i 10000-65000 -n 1)
for i in $(seq 1 3); do
ARGS+=("http://127.0.0.1:$[8000+$i]${WORK_DIR}/$i/1/ http://127.0.0.1:$[8000+$i]${WORK_DIR}/$i/2/ http://127.0.0.1:$[8000+$i]${WORK_DIR}/$i/3/ http://127.0.0.1:$[8000+$i]${WORK_DIR}/$i/4/ http://127.0.0.1:$[8000+$i]${WORK_DIR}/$i/5/ http://127.0.0.1:$[8000+$i]${WORK_DIR}/$i/6/")
ARGS+=("http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/1/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/2/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/3/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/4/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/5/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/6/")
done
"${MINIO[@]}" --address ":8001" ${ARGS[@]} > "${WORK_DIR}/dist-minio-8001.log" 2>&1 &
"${MINIO[@]}" --address ":$[$start_port+1]" ${ARGS[@]} > "${WORK_DIR}/dist-minio-server1.log" 2>&1 &
minio_pids[0]=$!
disown "${minio_pids[0]}"
"${MINIO[@]}" --address ":8002" ${ARGS[@]} > "${WORK_DIR}/dist-minio-8002.log" 2>&1 &
"${MINIO[@]}" --address ":$[$start_port+2]" ${ARGS[@]} > "${WORK_DIR}/dist-minio-server2.log" 2>&1 &
minio_pids[1]=$!
disown "${minio_pids[1]}"
"${MINIO[@]}" --address ":8003" ${ARGS[@]} > "${WORK_DIR}/dist-minio-8003.log" 2>&1 &
"${MINIO[@]}" --address ":$[$start_port+3]" ${ARGS[@]} > "${WORK_DIR}/dist-minio-server3.log" 2>&1 &
minio_pids[2]=$!
disown "${minio_pids[2]}"
sleep "$1"
echo "${minio_pids[@]}"
for pid in "${minio_pids[@]}"; do
if ! kill "$pid"; then
for i in $(seq 1 3); do
echo "server$i log:"
cat "${WORK_DIR}/dist-minio-server$i.log"
done
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
# forcibly killing, to proceed further properly.
kill -9 "$pid"
sleep 1 # wait 1sec per pid
done
}
function check_online() {
for i in $(seq 1 3); do
if grep -q 'Server switching to safe mode' ${WORK_DIR}/dist-minio-$[8000+$i].log; then
echo "1"
fi
done
if grep -q 'Server switching to safe mode' ${WORK_DIR}/dist-minio-*.log; then
echo "1"
fi
}
function purge()
@@ -75,50 +91,21 @@ function __init__()
}
function perform_test() {
minio_pids=( $(start_minio_3_node 60) )
for pid in "${minio_pids[@]}"; do
if ! kill "$pid"; then
for i in $(seq 1 3); do
echo "server$i log:"
cat "${WORK_DIR}/dist-minio-$[8000+$i].log"
done
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
# forcibly killing, to proceed further properly.
kill -9 "$pid"
done
start_minio_3_node 60
echo "Testing Distributed Erasure setup healing of drives"
echo "Remove the contents of the disks belonging to '${1}' erasure set"
rm -rf ${WORK_DIR}/${1}/*/
minio_pids=( $(start_minio_3_node 60) )
for pid in "${minio_pids[@]}"; do
if ! kill "$pid"; then
for i in $(seq 1 3); do
echo "server$i log:"
cat "${WORK_DIR}/dist-minio-$[8000+$i].log"
done
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
# forcibly killing, to proceed further properly.
# if the previous kill is taking time.
kill -9 "$pid"
done
start_minio_3_node 60
rv=$(check_online)
if [ "$rv" == "1" ]; then
for pid in "${minio_pids[@]}"; do
kill -9 "$pid"
done
pkill -9 minio
for i in $(seq 1 3); do
echo "server$i log:"
cat "${WORK_DIR}/dist-minio-$[8000+$i].log"
cat "${WORK_DIR}/dist-minio-server$i.log"
done
echo "FAILED"
purge "$WORK_DIR"

View File

@@ -35,50 +35,45 @@ import (
"github.com/minio/minio/cmd/config/storageclass"
"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
iampolicy "github.com/minio/minio/pkg/iam/policy"
"github.com/minio/minio/pkg/madmin"
)
func validateAdminReqConfigKV(ctx context.Context, w http.ResponseWriter, r *http.Request) ObjectLayer {
func validateAdminReqConfigKV(ctx context.Context, w http.ResponseWriter, r *http.Request) (auth.Credentials, ObjectLayer) {
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return nil
return auth.Credentials{}, nil
}
// Validate request signature.
_, adminAPIErr := checkAdminRequestAuthType(ctx, r, iampolicy.ConfigUpdateAdminAction, "")
cred, adminAPIErr := checkAdminRequestAuthType(ctx, r, iampolicy.ConfigUpdateAdminAction, "")
if adminAPIErr != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
return nil
return cred, nil
}
return objectAPI
return cred, objectAPI
}
// DelConfigKVHandler - DELETE /minio/admin/v2/del-config-kv
// DelConfigKVHandler - DELETE /minio/admin/v3/del-config-kv
func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "DelConfigKVHandler")
objectAPI := validateAdminReqConfigKV(ctx, w, r)
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
return
}
// Deny if WORM is enabled
if globalWORMEnabled {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
return
}
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
// More than maxConfigSize bytes were available
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
return
}
password := globalActiveCred.SecretKey
password := cred.SecretKey
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
logger.LogIf(ctx, err, logger.Application)
@@ -103,28 +98,22 @@ func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Requ
}
}
// SetConfigKVHandler - PUT /minio/admin/v2/set-config-kv
// SetConfigKVHandler - PUT /minio/admin/v3/set-config-kv
func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetConfigKVHandler")
objectAPI := validateAdminReqConfigKV(ctx, w, r)
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
return
}
// Deny if WORM is enabled
if globalWORMEnabled {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
return
}
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
// More than maxConfigSize bytes were available
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
return
}
password := globalActiveCred.SecretKey
password := cred.SecretKey
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
logger.LogIf(ctx, err, logger.Application)
@@ -162,17 +151,17 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
// Make sure to write backend is encrypted
if globalConfigEncrypted {
saveConfig(context.Background(), objectAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
saveConfig(GlobalContext, objectAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
}
writeSuccessResponseHeadersOnly(w)
}
// GetConfigKVHandler - GET /minio/admin/v2/get-config-kv?key={key}
// GetConfigKVHandler - GET /minio/admin/v3/get-config-kv?key={key}
func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetConfigKVHandler")
objectAPI := validateAdminReqConfigKV(ctx, w, r)
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
return
}
@@ -195,7 +184,7 @@ func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Requ
return
}
password := globalActiveCred.SecretKey
password := cred.SecretKey
econfigData, err := madmin.EncryptData(password, buf.Bytes())
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
@@ -208,7 +197,7 @@ func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Requ
func (a adminAPIHandlers) ClearConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ClearConfigHistoryKVHandler")
objectAPI := validateAdminReqConfigKV(ctx, w, r)
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
return
}
@@ -243,7 +232,7 @@ func (a adminAPIHandlers) ClearConfigHistoryKVHandler(w http.ResponseWriter, r *
func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "RestoreConfigHistoryKVHandler")
objectAPI := validateAdminReqConfigKV(ctx, w, r)
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
return
}
@@ -289,7 +278,7 @@ func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r
func (a adminAPIHandlers) ListConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListConfigHistoryKVHandler")
objectAPI := validateAdminReqConfigKV(ctx, w, r)
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
return
}
@@ -313,7 +302,7 @@ func (a adminAPIHandlers) ListConfigHistoryKVHandler(w http.ResponseWriter, r *h
return
}
password := globalActiveCred.SecretKey
password := cred.SecretKey
econfigData, err := madmin.EncryptData(password, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
@@ -323,11 +312,11 @@ func (a adminAPIHandlers) ListConfigHistoryKVHandler(w http.ResponseWriter, r *h
writeSuccessResponseJSON(w, econfigData)
}
// HelpConfigKVHandler - GET /minio/admin/v2/help-config-kv?subSys={subSys}&key={key}
// HelpConfigKVHandler - GET /minio/admin/v3/help-config-kv?subSys={subSys}&key={key}
func (a adminAPIHandlers) HelpConfigKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "HelpConfigKVHandler")
objectAPI := validateAdminReqConfigKV(ctx, w, r)
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
return
}
@@ -349,28 +338,22 @@ func (a adminAPIHandlers) HelpConfigKVHandler(w http.ResponseWriter, r *http.Req
w.(http.Flusher).Flush()
}
// SetConfigHandler - PUT /minio/admin/v2/config
// SetConfigHandler - PUT /minio/admin/v3/config
func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetConfigHandler")
objectAPI := validateAdminReqConfigKV(ctx, w, r)
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
return
}
// Deny if WORM is enabled
if globalWORMEnabled {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
return
}
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
// More than maxConfigSize bytes were available
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
return
}
password := globalActiveCred.SecretKey
password := cred.SecretKey
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
logger.LogIf(ctx, err, logger.Application)
@@ -403,18 +386,18 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
// Make sure to write backend is encrypted
if globalConfigEncrypted {
saveConfig(context.Background(), objectAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
saveConfig(GlobalContext, objectAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
}
writeSuccessResponseHeadersOnly(w)
}
// GetConfigHandler - GET /minio/admin/v2/config
// GetConfigHandler - GET /minio/admin/v3/config
// Get config.json of this minio setup.
func (a adminAPIHandlers) GetConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetConfigHandler")
objectAPI := validateAdminReqConfigKV(ctx, w, r)
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
return
}
@@ -471,7 +454,7 @@ func (a adminAPIHandlers) GetConfigHandler(w http.ResponseWriter, r *http.Reques
}
}
password := globalActiveCred.SecretKey
password := cred.SecretKey
econfigData, err := madmin.EncryptData(password, []byte(s.String()))
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)

150
cmd/admin-handlers-quota.go Normal file
View File

@@ -0,0 +1,150 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"io/ioutil"
"net/http"
"path"
"github.com/gorilla/mux"
"github.com/minio/minio/cmd/config"
"github.com/minio/minio/pkg/env"
iampolicy "github.com/minio/minio/pkg/iam/policy"
)
const (
bucketQuotaConfigFile = "quota.json"
)
// PutBucketQuotaConfigHandler - PUT Bucket quota configuration.
// ----------
// Places a quota configuration on the specified bucket. The quota
// specified in the quota configuration will be applied by default
// to enforce total quota for the specified bucket.
func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutBucketQuotaConfig")
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SetBucketQuotaAdminAction)
if objectAPI == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
// Turn off quota commands if data usage info is unavailable.
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOff {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminBucketQuotaDisabled), r.URL)
return
}
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
defer r.Body.Close()
data, err := ioutil.ReadAll(r.Body)
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
quotaCfg, err := parseBucketQuota(data)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
configFile := path.Join(bucketConfigPrefix, bucket, bucketQuotaConfigFile)
if err = saveConfig(ctx, objectAPI, configFile, data); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if quotaCfg.Quota > 0 {
globalBucketQuotaSys.Set(bucket, quotaCfg)
globalNotificationSys.PutBucketQuotaConfig(ctx, bucket, quotaCfg)
} else {
globalBucketQuotaSys.Remove(bucket)
globalNotificationSys.RemoveBucketQuotaConfig(ctx, bucket)
}
// Write success response.
writeSuccessResponseHeadersOnly(w)
}
// GetBucketQuotaConfigHandler - gets bucket quota configuration
func (a adminAPIHandlers) GetBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketQuotaConfig")
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetBucketQuotaAdminAction)
if objectAPI == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
configFile := path.Join(bucketConfigPrefix, bucket, bucketQuotaConfigFile)
configData, err := readConfig(ctx, objectAPI, configFile)
if err != nil {
if err != errConfigNotFound {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, BucketQuotaConfigNotFound{Bucket: bucket}), r.URL)
return
}
// Write success response.
writeSuccessResponseJSON(w, configData)
}
// RemoveBucketQuotaConfigHandler - removes Bucket quota configuration.
// ----------
// Removes quota configuration on the specified bucket.
func (a adminAPIHandlers) RemoveBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "RemoveBucketQuotaConfig")
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SetBucketQuotaAdminAction)
if objectAPI == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
configFile := path.Join(bucketConfigPrefix, bucket, bucketQuotaConfigFile)
if err := deleteConfig(ctx, objectAPI, configFile); err != nil {
if err != errConfigNotFound {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, BucketQuotaConfigNotFound{Bucket: bucket}), r.URL)
return
}
globalBucketQuotaSys.Remove(bucket)
globalNotificationSys.RemoveBucketQuotaConfig(ctx, bucket)
// Write success response.
writeSuccessNoContent(w)
}

View File

@@ -1,5 +1,5 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
* MinIO Cloud Storage, (C) 2019-2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -51,7 +51,7 @@ func validateAdminUsersReq(ctx context.Context, w http.ResponseWriter, r *http.R
return objectAPI, cred
}
// RemoveUser - DELETE /minio/admin/v2/remove-user?accessKey=<access_key>
// RemoveUser - DELETE /minio/admin/v3/remove-user?accessKey=<access_key>
func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "RemoveUser")
@@ -60,12 +60,6 @@ func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
return
}
// Deny if WORM is enabled
if globalWORMEnabled {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
return
}
vars := mux.Vars(r)
accessKey := vars["accessKey"]
@@ -93,15 +87,17 @@ func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
}
}
// ListUsers - GET /minio/admin/v2/list-users
// ListUsers - GET /minio/admin/v3/list-users
func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListUsers")
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListUsersAdminAction)
objectAPI, cred := validateAdminUsersReq(ctx, w, r, iampolicy.ListUsersAdminAction)
if objectAPI == nil {
return
}
password := cred.SecretKey
allCredentials, err := globalIAMSys.ListUsers()
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
@@ -114,7 +110,6 @@ func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
return
}
password := globalActiveCred.SecretKey
econfigData, err := madmin.EncryptData(password, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
@@ -124,7 +119,7 @@ func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
writeSuccessResponseJSON(w, econfigData)
}
// GetUserInfo - GET /minio/admin/v2/user-info
// GetUserInfo - GET /minio/admin/v3/user-info
func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetUserInfo")
@@ -151,7 +146,7 @@ func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
writeSuccessResponseJSON(w, data)
}
// UpdateGroupMembers - PUT /minio/admin/v2/update-group-members
// UpdateGroupMembers - PUT /minio/admin/v3/update-group-members
func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "UpdateGroupMembers")
@@ -194,7 +189,7 @@ func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Requ
}
}
// GetGroup - /minio/admin/v2/group?group=mygroup1
// GetGroup - /minio/admin/v3/group?group=mygroup1
func (a adminAPIHandlers) GetGroup(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetGroup")
@@ -221,7 +216,7 @@ func (a adminAPIHandlers) GetGroup(w http.ResponseWriter, r *http.Request) {
writeSuccessResponseJSON(w, body)
}
// ListGroups - GET /minio/admin/v2/groups
// ListGroups - GET /minio/admin/v3/groups
func (a adminAPIHandlers) ListGroups(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListGroups")
@@ -245,7 +240,7 @@ func (a adminAPIHandlers) ListGroups(w http.ResponseWriter, r *http.Request) {
writeSuccessResponseJSON(w, body)
}
// SetGroupStatus - PUT /minio/admin/v2/set-group-status?group=mygroup1&status=enabled
// SetGroupStatus - PUT /minio/admin/v3/set-group-status?group=mygroup1&status=enabled
func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetGroupStatus")
@@ -280,7 +275,7 @@ func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request)
}
}
// SetUserStatus - PUT /minio/admin/v2/set-user-status?accessKey=<access_key>&status=[enabled|disabled]
// SetUserStatus - PUT /minio/admin/v3/set-user-status?accessKey=<access_key>&status=[enabled|disabled]
func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetUserStatus")
@@ -289,12 +284,6 @@ func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request)
return
}
// Deny if WORM is enabled
if globalWORMEnabled {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
return
}
vars := mux.Vars(r)
accessKey := vars["accessKey"]
status := vars["status"]
@@ -319,7 +308,7 @@ func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request)
}
}
// AddUser - PUT /minio/admin/v2/add-user?accessKey=<access_key>
// AddUser - PUT /minio/admin/v3/add-user?accessKey=<access_key>
func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "AddUser")
@@ -328,12 +317,6 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
return
}
// Deny if WORM is enabled
if globalWORMEnabled {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
return
}
vars := mux.Vars(r)
accessKey := vars["accessKey"]
@@ -378,16 +361,209 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
}
}
// InfoCannedPolicy - GET /minio/admin/v2/info-canned-policy?name={policyName}
func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "InfoCannedPolicy")
// AddServiceAccount - PUT /minio/admin/v3/add-service-account
func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "AddServiceAccount")
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, _, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
password := cred.SecretKey
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
return
}
var createReq madmin.AddServiceAccountReq
if err = json.Unmarshal(reqBytes, &createReq); err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
return
}
// Disallow creating service accounts by root user.
if owner {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
return
}
parentUser := cred.AccessKey
if cred.ParentUser != "" {
parentUser = cred.ParentUser
}
newCred, err := globalIAMSys.NewServiceAccount(ctx, parentUser, createReq.Policy)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Notify all other Minio peers to reload user the service account
for _, nerr := range globalNotificationSys.LoadServiceAccount(newCred.AccessKey) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
var createResp = madmin.AddServiceAccountResp{
Credentials: auth.Credentials{
AccessKey: newCred.AccessKey,
SecretKey: newCred.SecretKey,
},
}
data, err := json.Marshal(createResp)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
encryptedData, err := madmin.EncryptData(password, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, encryptedData)
}
// ListServiceAccounts - GET /minio/admin/v3/list-service-accounts
func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListServiceAccounts")
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, _, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
// Disallow creating service accounts by root user.
if owner {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
return
}
parentUser := cred.AccessKey
if cred.ParentUser != "" {
parentUser = cred.ParentUser
}
serviceAccounts, err := globalIAMSys.ListServiceAccounts(ctx, parentUser)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
var listResp = madmin.ListServiceAccountsResp{
Accounts: serviceAccounts,
}
data, err := json.Marshal(listResp)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
encryptedData, err := madmin.EncryptData(cred.SecretKey, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, encryptedData)
}
// DeleteServiceAccount - DELETE /minio/admin/v3/delete-service-account
func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "DeleteServiceAccount")
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, _, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
// Disallow creating service accounts by root user.
if owner {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
return
}
serviceAccount := mux.Vars(r)["accessKey"]
if serviceAccount == "" {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminInvalidArgument), r.URL)
return
}
user, err := globalIAMSys.GetServiceAccountParent(ctx, serviceAccount)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
parentUser := cred.AccessKey
if cred.ParentUser != "" {
parentUser = cred.ParentUser
}
if parentUser != user || user == "" {
// The service account belongs to another user but return not
// found error to mitigate brute force attacks. or the
// serviceAccount doesn't exist.
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServiceAccountNotFound), r.URL)
return
}
err = globalIAMSys.DeleteServiceAccount(ctx, serviceAccount)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessNoContent(w)
}
// InfoCannedPolicyV2 - GET /minio/admin/v2/info-canned-policy?name={policyName}
func (a adminAPIHandlers) InfoCannedPolicyV2(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "InfoCannedPolicyV2")
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetPolicyAdminAction)
if objectAPI == nil {
return
}
data, err := globalIAMSys.InfoPolicy(mux.Vars(r)["name"])
policy, err := globalIAMSys.InfoPolicy(mux.Vars(r)["name"])
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
data, err := json.Marshal(policy)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
@@ -397,7 +573,58 @@ func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Reques
w.(http.Flusher).Flush()
}
// ListCannedPolicies - GET /minio/admin/v2/list-canned-policies
// InfoCannedPolicy - GET /minio/admin/v3/info-canned-policy?name={policyName}
func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "InfoCannedPolicy")
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetPolicyAdminAction)
if objectAPI == nil {
return
}
policy, err := globalIAMSys.InfoPolicy(mux.Vars(r)["name"])
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
json.NewEncoder(w).Encode(policy)
w.(http.Flusher).Flush()
}
// ListCannedPoliciesV2 - GET /minio/admin/v2/list-canned-policies
func (a adminAPIHandlers) ListCannedPoliciesV2(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListCannedPoliciesV2")
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListUserPoliciesAdminAction)
if objectAPI == nil {
return
}
policies, err := globalIAMSys.ListPolicies()
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
policyMap := make(map[string][]byte, len(policies))
for k, p := range policies {
var err error
policyMap[k], err = json.Marshal(p)
if err != nil {
logger.LogIf(ctx, err)
continue
}
}
if err = json.NewEncoder(w).Encode(policyMap); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
w.(http.Flusher).Flush()
}
// ListCannedPolicies - GET /minio/admin/v3/list-canned-policies
func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListCannedPolicies")
@@ -412,7 +639,16 @@ func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Requ
return
}
if err = json.NewEncoder(w).Encode(policies); err != nil {
var newPolicies = make(map[string]iampolicy.Policy)
for name, p := range policies {
_, err = json.Marshal(p)
if err != nil {
logger.LogIf(ctx, err)
continue
}
newPolicies[name] = p
}
if err = json.NewEncoder(w).Encode(newPolicies); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
@@ -420,7 +656,7 @@ func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Requ
w.(http.Flusher).Flush()
}
// RemoveCannedPolicy - DELETE /minio/admin/v2/remove-canned-policy?name=<policy_name>
// RemoveCannedPolicy - DELETE /minio/admin/v3/remove-canned-policy?name=<policy_name>
func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "RemoveCannedPolicy")
@@ -432,12 +668,6 @@ func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Requ
vars := mux.Vars(r)
policyName := vars["name"]
// Deny if WORM is enabled
if globalWORMEnabled {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
return
}
if err := globalIAMSys.DeletePolicy(policyName); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
@@ -452,7 +682,7 @@ func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Requ
}
}
// AddCannedPolicy - PUT /minio/admin/v2/add-canned-policy?name=<policy_name>
// AddCannedPolicy - PUT /minio/admin/v3/add-canned-policy?name=<policy_name>
func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "AddCannedPolicy")
@@ -464,12 +694,6 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
vars := mux.Vars(r)
policyName := vars["name"]
// Deny if WORM is enabled
if globalWORMEnabled {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
return
}
// Error out if Content-Length is missing.
if r.ContentLength <= 0 {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL)
@@ -508,7 +732,7 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
}
}
// SetPolicyForUserOrGroup - PUT /minio/admin/v2/set-policy?policy=xxx&user-or-group=?[&is-group]
// SetPolicyForUserOrGroup - PUT /minio/admin/v3/set-policy?policy=xxx&user-or-group=?[&is-group]
func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetPolicyForUserOrGroup")
@@ -522,15 +746,9 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http
entityName := vars["userOrGroup"]
isGroup := vars["isGroup"] == "true"
// Deny if WORM is enabled
if globalWORMEnabled {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
return
}
if !isGroup {
ok, err := globalIAMSys.IsTempUser(entityName)
if err != nil {
if err != nil && err != errNoSuchUser {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}

View File

@@ -21,7 +21,6 @@ import (
"crypto/subtle"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
@@ -42,13 +41,11 @@ import (
"github.com/minio/minio/cmd/crypto"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/cmd/logger/message/log"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/cpu"
"github.com/minio/minio/pkg/event/target"
"github.com/minio/minio/pkg/handlers"
iampolicy "github.com/minio/minio/pkg/iam/policy"
"github.com/minio/minio/pkg/madmin"
"github.com/minio/minio/pkg/mem"
xnet "github.com/minio/minio/pkg/net"
trace "github.com/minio/minio/pkg/trace"
)
@@ -98,7 +95,7 @@ func updateServer(updateURL, sha256Hex string, latestReleaseTime time.Time) (us
return us, nil
}
// ServerUpdateHandler - POST /minio/admin/v2/update?updateURL={updateURL}
// ServerUpdateHandler - POST /minio/admin/v3/update?updateURL={updateURL}
// ----------
// updates all minio servers and restarts them gracefully.
func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Request) {
@@ -141,9 +138,9 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req
}
if runtime.GOOS == "windows" {
u.Path = path.Dir(u.Path) + "minio.exe"
u.Path = path.Dir(u.Path) + SlashSeparator + "minio.exe"
} else {
u.Path = path.Dir(u.Path) + "minio"
u.Path = path.Dir(u.Path) + SlashSeparator + "minio"
}
updateURL = u.String()
@@ -177,7 +174,7 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req
}
}
// ServiceActionHandler - POST /minio/admin/v2/service?action={action}
// ServiceActionHandler - POST /minio/admin/v3/service?action={action}
// ----------
// restarts/stops minio server gracefully. In a distributed setup,
func (a adminAPIHandlers) ServiceActionHandler(w http.ResponseWriter, r *http.Request) {
@@ -266,7 +263,7 @@ type ServerInfo struct {
Data *ServerInfoData `json:"data"`
}
// StorageInfoHandler - GET /minio/admin/v2/storageinfo
// StorageInfoHandler - GET /minio/admin/v3/storageinfo
// ----------
// Get server information
func (a adminAPIHandlers) StorageInfoHandler(w http.ResponseWriter, r *http.Request) {
@@ -291,7 +288,7 @@ func (a adminAPIHandlers) StorageInfoHandler(w http.ResponseWriter, r *http.Requ
}
// DataUsageInfoHandler - GET /minio/admin/v2/datausage
// DataUsageInfoHandler - GET /minio/admin/v3/datausage
// ----------
// Get server/cluster data usage info
func (a adminAPIHandlers) DataUsageInfoHandler(w http.ResponseWriter, r *http.Request) {
@@ -377,155 +374,6 @@ func (a adminAPIHandlers) AccountingUsageInfoHandler(w http.ResponseWriter, r *h
writeSuccessResponseJSON(w, usageInfoJSON)
}
// ServerCPULoadInfo holds informantion about cpu utilization
// of one minio node. It also reports any errors if encountered
// while trying to reach this server.
type ServerCPULoadInfo struct {
Addr string `json:"addr"`
Error string `json:"error,omitempty"`
Load []cpu.Load `json:"load"`
HistoricLoad []cpu.Load `json:"historicLoad"`
}
// ServerMemUsageInfo holds informantion about memory utilization
// of one minio node. It also reports any errors if encountered
// while trying to reach this server.
type ServerMemUsageInfo struct {
Addr string `json:"addr"`
Error string `json:"error,omitempty"`
Usage []mem.Usage `json:"usage"`
HistoricUsage []mem.Usage `json:"historicUsage"`
}
// ServerNetReadPerfInfo network read performance information.
type ServerNetReadPerfInfo struct {
Addr string `json:"addr"`
ReadThroughput uint64 `json:"readThroughput"`
Error string `json:"error,omitempty"`
}
// PerfInfoHandler - GET /minio/admin/v2/performance?perfType={perfType}
// ----------
// Get all performance information based on input type
// Supported types = drive
func (a adminAPIHandlers) PerfInfoHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PerfInfo")
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.PerfInfoAdminAction)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
switch perfType := vars["perfType"]; perfType {
case "net":
var size int64 = defaultNetPerfSize
if sizeStr, found := vars["size"]; found {
var err error
if size, err = strconv.ParseInt(sizeStr, 10, 64); err != nil || size < 0 {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL)
return
}
}
if !globalIsDistXL {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
return
}
addr := r.Host
if globalIsDistXL {
addr = GetLocalPeer(globalEndpoints)
}
infos := map[string][]ServerNetReadPerfInfo{}
infos[addr] = globalNotificationSys.NetReadPerfInfo(size)
for peer, info := range globalNotificationSys.CollectNetPerfInfo(size) {
infos[peer] = info
}
// Marshal API response
jsonBytes, err := json.Marshal(infos)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Reply with performance information (across nodes in a
// distributed setup) as json.
writeSuccessResponseJSON(w, jsonBytes)
case "drive":
// Drive Perf is only implemented for Erasure coded backends
if !globalIsXL {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
return
}
var size int64 = madmin.DefaultDrivePerfSize
if sizeStr, found := vars["size"]; found {
var err error
if size, err = strconv.ParseInt(sizeStr, 10, 64); err != nil || size <= 0 {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL)
return
}
}
// Get drive performance details from local server's drive(s)
dp := getLocalDrivesPerf(globalEndpoints, size, r)
// Notify all other MinIO peers to report drive performance numbers
dps := globalNotificationSys.DrivePerfInfo(size)
dps = append(dps, dp)
// Marshal API response
jsonBytes, err := json.Marshal(dps)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Reply with performance information (across nodes in a
// distributed setup) as json.
writeSuccessResponseJSON(w, jsonBytes)
case "cpu":
// Get CPU load details from local server's cpu(s)
cpu := getLocalCPULoad(globalEndpoints, r)
// Notify all other MinIO peers to report cpu load numbers
cpus := globalNotificationSys.CPULoadInfo()
cpus = append(cpus, cpu)
// Marshal API response
jsonBytes, err := json.Marshal(cpus)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Reply with cpu load information (across nodes in a
// distributed setup) as json.
writeSuccessResponseJSON(w, jsonBytes)
case "mem":
// Get mem usage details from local server(s)
m := getLocalMemUsage(globalEndpoints, r)
// Notify all other MinIO peers to report mem usage numbers
mems := globalNotificationSys.MemUsageInfo()
mems = append(mems, m)
// Marshal API response
jsonBytes, err := json.Marshal(mems)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Reply with mem usage information (across nodes in a
// distributed setup) as json.
writeSuccessResponseJSON(w, jsonBytes)
default:
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
}
}
func newLockEntry(l lockRequesterInfo, resource, server string) *madmin.LockEntry {
entry := &madmin.LockEntry{
Timestamp: l.Timestamp,
@@ -621,7 +469,7 @@ type StartProfilingResult struct {
Error string `json:"error"`
}
// StartProfilingHandler - POST /minio/admin/v2/profiling/start?profilerType={profilerType}
// StartProfilingHandler - POST /minio/admin/v3/profiling/start?profilerType={profilerType}
// ----------
// Enable server profiling
func (a adminAPIHandlers) StartProfilingHandler(w http.ResponseWriter, r *http.Request) {
@@ -717,7 +565,7 @@ func (f dummyFileInfo) ModTime() time.Time { return f.modTime }
func (f dummyFileInfo) IsDir() bool { return f.isDir }
func (f dummyFileInfo) Sys() interface{} { return f.sys }
// DownloadProfilingHandler - POST /minio/admin/v2/profiling/download
// DownloadProfilingHandler - POST /minio/admin/v3/profiling/download
// ----------
// Download profiling information of all nodes in a zip format
func (a adminAPIHandlers) DownloadProfilingHandler(w http.ResponseWriter, r *http.Request) {
@@ -789,7 +637,7 @@ func extractHealInitParams(vars map[string]string, qParms url.Values, r io.Reade
if hip.clientToken == "" {
jerr := json.NewDecoder(r).Decode(&hip.hs)
if jerr != nil {
logger.LogIf(context.Background(), jerr, logger.Application)
logger.LogIf(GlobalContext, jerr, logger.Application)
err = ErrRequestBodyParse
return
}
@@ -799,7 +647,7 @@ func extractHealInitParams(vars map[string]string, qParms url.Values, r io.Reade
return
}
// HealHandler - POST /minio/admin/v2/heal/
// HealHandler - POST /minio/admin/v3/heal/
// -----------
// Start heal processing and return heal status items.
//
@@ -982,13 +830,24 @@ func (a adminAPIHandlers) BackgroundHealStatusHandler(w http.ResponseWriter, r *
}
// Aggregate healing result
var aggregatedHealStateResult = madmin.BgHealState{}
var aggregatedHealStateResult = madmin.BgHealState{
ScannedItemsCount: bgHealStates[0].ScannedItemsCount,
LastHealActivity: bgHealStates[0].LastHealActivity,
NextHealRound: bgHealStates[0].NextHealRound,
}
bgHealStates = bgHealStates[1:]
for _, state := range bgHealStates {
aggregatedHealStateResult.ScannedItemsCount += state.ScannedItemsCount
if aggregatedHealStateResult.LastHealActivity.Before(state.LastHealActivity) {
if !state.LastHealActivity.IsZero() && aggregatedHealStateResult.LastHealActivity.Before(state.LastHealActivity) {
aggregatedHealStateResult.LastHealActivity = state.LastHealActivity
// The node which has the last heal activity means its
// is the node that is orchestrating self healing operations,
// which also means it is the same node which decides when
// the next self healing operation will be done.
aggregatedHealStateResult.NextHealRound = state.NextHealRound
}
}
if err := json.NewEncoder(w).Encode(aggregatedHealStateResult); err != nil {
@@ -1081,7 +940,7 @@ func toAdminAPIErr(ctx context.Context, err error) APIError {
HTTPStatusCode: http.StatusNotFound,
}
} else {
apiErr = errorCodes.ToAPIErr(toAdminAPIErrCode(ctx, err))
apiErr = errorCodes.ToAPIErrWithErr(toAdminAPIErrCode(ctx, err), err)
}
}
return apiErr
@@ -1104,7 +963,7 @@ func mustTrace(entry interface{}, trcAll, errOnly bool) bool {
return trace
}
// TraceHandler - POST /minio/admin/v2/trace
// TraceHandler - POST /minio/admin/v3/trace
// ----------
// The handler sends http trace to the connected HTTP client.
func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) {
@@ -1121,16 +980,13 @@ func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set(xhttp.ContentType, "text/event-stream")
doneCh := make(chan struct{})
defer close(doneCh)
// Trace Publisher and peer-trace-client uses nonblocking send and hence does not wait for slow receivers.
// Use buffered channel to take care of burst sends or slow w.Write()
traceCh := make(chan interface{}, 4000)
peers := getRestClients(globalEndpoints)
peers := newPeerRestClients(globalEndpoints)
globalHTTPTrace.Subscribe(traceCh, doneCh, func(entry interface{}) bool {
globalHTTPTrace.Subscribe(traceCh, ctx.Done(), func(entry interface{}) bool {
return mustTrace(entry, trcAll, trcErr)
})
@@ -1138,7 +994,7 @@ func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) {
if peer == nil {
continue
}
peer.Trace(traceCh, doneCh, trcAll, trcErr)
peer.Trace(traceCh, ctx.Done(), trcAll, trcErr)
}
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
@@ -1157,7 +1013,7 @@ func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) {
return
}
w.(http.Flusher).Flush()
case <-GlobalServiceDoneCh:
case <-ctx.Done():
return
}
}
@@ -1191,20 +1047,18 @@ func (a adminAPIHandlers) ConsoleLogHandler(w http.ResponseWriter, r *http.Reque
w.Header().Add("Connection", "close")
w.Header().Set(xhttp.ContentType, "text/event-stream")
doneCh := make(chan struct{})
defer close(doneCh)
logCh := make(chan interface{}, 4000)
peers := getRestClients(globalEndpoints)
peers := newPeerRestClients(globalEndpoints)
globalConsoleSys.Subscribe(logCh, doneCh, node, limitLines, logKind, nil)
globalConsoleSys.Subscribe(logCh, ctx.Done(), node, limitLines, logKind, nil)
for _, peer := range peers {
if peer == nil {
continue
}
if node == "" || strings.EqualFold(peer.host.Name, node) {
peer.ConsoleLog(logCh, doneCh)
peer.ConsoleLog(logCh, ctx.Done())
}
}
@@ -1216,8 +1070,8 @@ func (a adminAPIHandlers) ConsoleLogHandler(w http.ResponseWriter, r *http.Reque
for {
select {
case entry := <-logCh:
log := entry.(madmin.LogInfo)
if log.SendLog(node, logKind) {
log, ok := entry.(log.Info)
if ok && log.SendLog(node, logKind) {
if err := enc.Encode(log); err != nil {
return
}
@@ -1228,13 +1082,13 @@ func (a adminAPIHandlers) ConsoleLogHandler(w http.ResponseWriter, r *http.Reque
return
}
w.(http.Flusher).Flush()
case <-GlobalServiceDoneCh:
case <-ctx.Done():
return
}
}
}
// KMSKeyStatusHandler - GET /minio/admin/v2/kms/key/status?key-id=<master-key-id>
// KMSKeyStatusHandler - GET /minio/admin/v3/kms/key/status?key-id=<master-key-id>
func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "KMSKeyStatusHandler")
@@ -1303,64 +1157,169 @@ func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Req
writeSuccessResponseJSON(w, resp)
}
// ServerHardwareInfoHandler - GET /minio/admin/v2/hardwareinfo?Type={hwType}
// OBDInfoHandler - GET /minio/admin/v3/obdinfo
// ----------
// Get all hardware information based on input type
// Supported types = cpu
func (a adminAPIHandlers) ServerHardwareInfoHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "HardwareInfo")
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ServerHardwareInfoAdminAction)
// Get server on-board diagnostics
func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "OBDInfo")
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.OBDInfoAdminAction)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
hardware := vars[madmin.HARDWARE]
obdInfo := madmin.OBDInfo{}
obdInfoCh := make(chan madmin.OBDInfo)
switch madmin.HardwareType(hardware) {
case madmin.CPU:
// Get CPU hardware details from local server's cpu(s)
cpu := getLocalCPUInfo(globalEndpoints, r)
// Notify all other MinIO peers to report cpu hardware
cpus := globalNotificationSys.CPUInfo()
cpus = append(cpus, cpu)
// Marshal API response
jsonBytes, err := json.Marshal(cpus)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Reply with cpu hardware information (across nodes in a
// distributed setup) as json.
writeSuccessResponseJSON(w, jsonBytes)
case madmin.NETWORK:
// Get Network hardware details from local server's network(s)
network := getLocalNetworkInfo(globalEndpoints, r)
// Notify all other MinIO peers to report network hardware
networks := globalNotificationSys.NetworkInfo()
networks = append(networks, network)
// Marshal API response
jsonBytes, err := json.Marshal(networks)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Reply with cpu network information (across nodes in a
// distributed setup) as json.
writeSuccessResponseJSON(w, jsonBytes)
default:
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL)
enc := json.NewEncoder(w)
partialWrite := func(oinfo madmin.OBDInfo) {
obdInfoCh <- oinfo
}
setCommonHeaders(w)
w.Header().Set(xhttp.ContentType, "text/event-stream")
w.WriteHeader(http.StatusOK)
errResp := func(err error) {
errorResponse := getAPIErrorResponse(ctx, toAdminAPIErr(ctx, err), r.URL.String(),
w.Header().Get(xhttp.AmzRequestID), globalDeploymentID)
encodedErrorResponse := encodeResponse(errorResponse)
obdInfo.Error = string(encodedErrorResponse)
logger.LogIf(ctx, enc.Encode(obdInfo))
}
deadline := 3600 * time.Second
if dstr := r.URL.Query().Get("deadline"); dstr != "" {
var err error
deadline, err = time.ParseDuration(dstr)
if err != nil {
errResp(err)
return
}
}
deadlinedCtx, cancel := context.WithTimeout(ctx, deadline)
defer cancel()
nsLock := objectAPI.NewNSLock(deadlinedCtx, minioMetaBucket, "obd-in-progress")
if err := nsLock.GetLock(newDynamicTimeout(deadline, deadline)); err != nil { // returns a locked lock
errResp(err)
return
}
defer nsLock.Unlock()
go func() {
defer close(obdInfoCh)
if cpu, ok := vars["syscpu"]; ok && cpu == "true" {
cpuInfo := getLocalCPUOBDInfo(deadlinedCtx)
obdInfo.Sys.CPUInfo = append(obdInfo.Sys.CPUInfo, cpuInfo)
obdInfo.Sys.CPUInfo = append(obdInfo.Sys.CPUInfo, globalNotificationSys.CPUOBDInfo(deadlinedCtx)...)
partialWrite(obdInfo)
}
if diskHw, ok := vars["sysdiskhw"]; ok && diskHw == "true" {
diskHwInfo := getLocalDiskHwOBD(deadlinedCtx)
obdInfo.Sys.DiskHwInfo = append(obdInfo.Sys.DiskHwInfo, diskHwInfo)
obdInfo.Sys.DiskHwInfo = append(obdInfo.Sys.DiskHwInfo, globalNotificationSys.DiskHwOBDInfo(deadlinedCtx)...)
partialWrite(obdInfo)
}
if osInfo, ok := vars["sysosinfo"]; ok && osInfo == "true" {
osInfo := getLocalOsInfoOBD(deadlinedCtx)
obdInfo.Sys.OsInfo = append(obdInfo.Sys.OsInfo, osInfo)
obdInfo.Sys.OsInfo = append(obdInfo.Sys.OsInfo, globalNotificationSys.OsOBDInfo(deadlinedCtx)...)
partialWrite(obdInfo)
}
if mem, ok := vars["sysmem"]; ok && mem == "true" {
memInfo := getLocalMemOBD(deadlinedCtx)
obdInfo.Sys.MemInfo = append(obdInfo.Sys.MemInfo, memInfo)
obdInfo.Sys.MemInfo = append(obdInfo.Sys.MemInfo, globalNotificationSys.MemOBDInfo(deadlinedCtx)...)
partialWrite(obdInfo)
}
if proc, ok := vars["sysprocess"]; ok && proc == "true" {
procInfo := getLocalProcOBD(deadlinedCtx)
obdInfo.Sys.ProcInfo = append(obdInfo.Sys.ProcInfo, procInfo)
obdInfo.Sys.ProcInfo = append(obdInfo.Sys.ProcInfo, globalNotificationSys.ProcOBDInfo(deadlinedCtx)...)
partialWrite(obdInfo)
}
if config, ok := vars["minioconfig"]; ok && config == "true" {
cfg, err := readServerConfig(ctx, objectAPI)
logger.LogIf(ctx, err)
obdInfo.Minio.Config = cfg
partialWrite(obdInfo)
}
if drive, ok := vars["perfdrive"]; ok && drive == "true" {
// Get drive obd details from local server's drive(s)
driveOBDSerial := getLocalDrivesOBD(deadlinedCtx, false, globalEndpoints, r)
driveOBDParallel := getLocalDrivesOBD(deadlinedCtx, true, globalEndpoints, r)
errStr := ""
if driveOBDSerial.Error != "" {
errStr = "serial: " + driveOBDSerial.Error
}
if driveOBDParallel.Error != "" {
errStr = errStr + " parallel: " + driveOBDParallel.Error
}
driveOBD := madmin.ServerDrivesOBDInfo{
Addr: driveOBDSerial.Addr,
Serial: driveOBDSerial.Serial,
Parallel: driveOBDParallel.Parallel,
Error: errStr,
}
obdInfo.Perf.DriveInfo = append(obdInfo.Perf.DriveInfo, driveOBD)
// Notify all other MinIO peers to report drive obd numbers
driveOBDs := globalNotificationSys.DriveOBDInfo(deadlinedCtx)
obdInfo.Perf.DriveInfo = append(obdInfo.Perf.DriveInfo, driveOBDs...)
partialWrite(obdInfo)
}
if net, ok := vars["perfnet"]; ok && net == "true" && globalIsDistXL {
obdInfo.Perf.Net = append(obdInfo.Perf.Net, globalNotificationSys.NetOBDInfo(deadlinedCtx))
obdInfo.Perf.Net = append(obdInfo.Perf.Net, globalNotificationSys.DispatchNetOBDInfo(deadlinedCtx)...)
obdInfo.Perf.NetParallel = globalNotificationSys.NetOBDParallelInfo(deadlinedCtx)
partialWrite(obdInfo)
}
}()
ticker := time.NewTicker(30 * time.Second)
defer ticker.Stop()
for {
select {
case oinfo, ok := <-obdInfoCh:
if !ok {
return
}
logger.LogIf(ctx, enc.Encode(oinfo))
w.(http.Flusher).Flush()
case <-ticker.C:
if _, err := w.Write([]byte(" ")); err != nil {
return
}
w.(http.Flusher).Flush()
case <-deadlinedCtx.Done():
w.(http.Flusher).Flush()
return
}
}
}
// ServerInfoHandler - GET /minio/admin/v2/info
// ServerInfoHandler - GET /minio/admin/v3/info
// ----------
// Get server information
func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Request) {
@@ -1398,10 +1357,10 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
} else if ldapConn == nil {
ldap.Status = "Not Configured"
} else {
// Close ldap connection to avoid leaks.
ldapConn.Close()
ldap.Status = "online"
}
// Close ldap connection to avoid leaks.
defer ldapConn.Close()
}
log, audit := fetchLoggerInfo(cfg)
@@ -1425,7 +1384,7 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
OffDisks += v
}
backend = madmin.XlBackend{
backend = madmin.XLBackend{
Type: madmin.ErasureType,
OnlineDisks: OnDisks,
OfflineDisks: OffDisks,
@@ -1435,7 +1394,7 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
RRSCParity: storageInfo.Backend.RRSCParity,
}
} else {
backend = madmin.FsBackend{
backend = madmin.FSBackend{
Type: madmin.FsType,
}
}
@@ -1496,7 +1455,7 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
Mode: mode,
Domain: domain,
Region: globalServerRegion,
SQSARN: globalNotificationSys.GetARNList(),
SQSARN: globalNotificationSys.GetARNList(false),
DeploymentID: globalDeploymentID,
Buckets: buckets,
Objects: objects,
@@ -1520,11 +1479,15 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
func fetchLambdaInfo(cfg config.Config) []map[string][]madmin.TargetIDStatus {
// Fetch the targets
targetList, err := notify.RegisterNotificationTargets(cfg, GlobalServiceDoneCh, NewCustomHTTPTransport(), nil, true)
if err != nil {
// Fetch the configured targets
tr := NewGatewayHTTPTransport()
defer tr.CloseIdleConnections()
targetList, err := notify.FetchRegisteredTargets(cfg, GlobalContext.Done(), tr, true, false)
if err != nil && err != notify.ErrTargetsOffline {
logger.LogIf(GlobalContext, err)
return nil
}
lambdaMap := make(map[string][]madmin.TargetIDStatus)
for targetID, target := range targetList.TargetMap() {
@@ -1569,7 +1532,6 @@ func fetchVaultStatus(cfg config.Config) madmin.Vault {
}
if err := checkConnection(kmsInfo.Endpoint, 15*time.Second); err != nil {
vault.Status = "offline"
} else {
vault.Status = "online"
@@ -1637,25 +1599,23 @@ func fetchLoggerInfo(cfg config.Config) ([]madmin.Logger, []madmin.Audit) {
// checkConnection - ping an endpoint , return err in case of no connection
func checkConnection(endpointStr string, timeout time.Duration) error {
u, pErr := xnet.ParseURL(endpointStr)
if pErr != nil {
return pErr
tr := newCustomHTTPTransport(&tls.Config{RootCAs: globalRootCAs}, timeout)()
defer tr.CloseIdleConnections()
ctx, cancel := context.WithTimeout(GlobalContext, timeout)
defer cancel()
req, err := http.NewRequest(http.MethodHead, endpointStr, nil)
if err != nil {
return err
}
tr := newCustomHTTPTransport(
&tls.Config{RootCAs: globalRootCAs},
timeout,
0, /* Default value */
)()
if dErr := u.DialHTTP(tr); dErr != nil {
if urlErr, ok := dErr.(*url.Error); ok {
// To treat "connection refused" errors as un reachable endpoint.
if target.IsConnRefusedErr(urlErr.Err) {
return errors.New("endpoint unreachable, please check your endpoint")
}
}
return dErr
client := &http.Client{Transport: tr}
resp, err := client.Do(req.WithContext(ctx))
if err != nil {
return err
}
defer xhttp.DrainBody(resp.Body)
resp.Body.Close()
return nil
}

View File

@@ -43,7 +43,8 @@ type adminXLTestBed struct {
// prepareAdminXLTestBed - helper function that setups a single-node
// XL backend for admin-handler tests.
func prepareAdminXLTestBed() (*adminXLTestBed, error) {
func prepareAdminXLTestBed(ctx context.Context) (*adminXLTestBed, error) {
// reset global variables to start afresh.
resetTestGlobals()
@@ -52,7 +53,7 @@ func prepareAdminXLTestBed() (*adminXLTestBed, error) {
globalIsXL = true
// Initializing objectLayer for HealFormatHandler.
objLayer, xlDirs, xlErr := initTestXLObjLayer()
objLayer, xlDirs, xlErr := initTestXLObjLayer(ctx)
if xlErr != nil {
return nil, xlErr
}
@@ -70,9 +71,9 @@ func prepareAdminXLTestBed() (*adminXLTestBed, error) {
globalConfigSys = NewConfigSys()
globalIAMSys = NewIAMSys()
globalIAMSys.Init(objLayer)
globalIAMSys.Init(ctx, objLayer)
buckets, err := objLayer.ListBuckets(context.Background())
buckets, err := objLayer.ListBuckets(ctx)
if err != nil {
return nil, err
}
@@ -85,7 +86,7 @@ func prepareAdminXLTestBed() (*adminXLTestBed, error) {
// Setup admin mgmt REST API handlers.
adminRouter := mux.NewRouter()
registerAdminRouter(adminRouter, true, true)
registerAdminRouter(adminRouter, true, true, false)
return &adminXLTestBed{
xlDirs: xlDirs,
@@ -103,20 +104,20 @@ func (atb *adminXLTestBed) TearDown() {
// initTestObjLayer - Helper function to initialize an XL-based object
// layer and set globalObjectAPI.
func initTestXLObjLayer() (ObjectLayer, []string, error) {
func initTestXLObjLayer(ctx context.Context) (ObjectLayer, []string, error) {
xlDirs, err := getRandomDisks(16)
if err != nil {
return nil, nil, err
}
endpoints := mustGetNewEndpoints(xlDirs...)
format, err := waitForFormatXL(true, endpoints, 1, 1, 16, "")
storageDisks, format, err := waitForFormatXL(true, endpoints, 1, 1, 16, "")
if err != nil {
removeRoots(xlDirs)
return nil, nil, err
}
globalPolicySys = NewPolicySys()
objLayer, err := newXLSets(endpoints, format, 1, 16)
objLayer, err := newXLSets(ctx, endpoints, storageDisks, format)
if err != nil {
return nil, nil, err
}
@@ -191,7 +192,10 @@ func getServiceCmdRequest(cmd cmdType, cred auth.Credentials) (*http.Request, er
// testServicesCmdHandler - parametrizes service subcommand tests on
// cmdType value.
func testServicesCmdHandler(cmd cmdType, t *testing.T) {
adminTestBed, err := prepareAdminXLTestBed()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
adminTestBed, err := prepareAdminXLTestBed(ctx)
if err != nil {
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
}
@@ -259,7 +263,10 @@ func buildAdminRequest(queryVal url.Values, method, path string,
}
func TestAdminServerInfo(t *testing.T) {
adminTestBed, err := prepareAdminXLTestBed()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
adminTestBed, err := prepareAdminXLTestBed(ctx)
if err != nil {
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
}
@@ -314,12 +321,12 @@ func TestToAdminAPIErrCode(t *testing.T) {
// 3. Non-admin API specific error.
{
err: errDiskNotFound,
expectedAPIErr: toAPIErrorCode(context.Background(), errDiskNotFound),
expectedAPIErr: toAPIErrorCode(GlobalContext, errDiskNotFound),
},
}
for i, test := range testCases {
actualErr := toAdminAPIErrCode(context.Background(), test.err)
actualErr := toAdminAPIErrCode(GlobalContext, test.err)
if actualErr != test.expectedAPIErr {
t.Errorf("Test %d: Expected %v but received %v",
i+1, test.expectedAPIErr, actualErr)

View File

@@ -106,12 +106,12 @@ func initHealState() *allHealState {
healSeqMap: make(map[string]*healSequence),
}
go healState.periodicHealSeqsClean()
go healState.periodicHealSeqsClean(GlobalContext)
return healState
}
func (ahs *allHealState) periodicHealSeqsClean() {
func (ahs *allHealState) periodicHealSeqsClean(ctx context.Context) {
// Launch clean-up routine to remove this heal sequence (after
// it ends) from the global state after timeout has elapsed.
ticker := time.NewTicker(time.Minute * 5)
@@ -127,7 +127,7 @@ func (ahs *allHealState) periodicHealSeqsClean() {
}
}
ahs.Unlock()
case <-GlobalServiceDoneCh:
case <-ctx.Done():
// server could be restarting - need
// to exit immediately
return
@@ -183,7 +183,7 @@ func (ahs *allHealState) stopHealSequence(path string) ([]byte, APIError) {
}
b, err := json.Marshal(&hsp)
return b, toAdminAPIErr(context.Background(), err)
return b, toAdminAPIErr(GlobalContext, err)
}
// LaunchNewHealSequence - launches a background routine that performs
@@ -306,6 +306,12 @@ func (ahs *allHealState) PopHealStatusJSON(path string,
return jbytes, ErrNone
}
// healSource denotes single entity and heal option.
type healSource struct {
path string // entity path (format, buckets, objects) to heal
opts madmin.HealOpts // optional heal option overrides default setting
}
// healSequence - state for each heal sequence initiated on the
// server.
type healSequence struct {
@@ -315,12 +321,13 @@ type healSequence struct {
// path is just pathJoin(bucket, objPrefix)
path string
// List of entities (format, buckets, objects) to heal
sourceCh chan string
// A channel of entities (format, buckets, objects) to heal
sourceCh chan healSource
// Report healing progress, false if this is a background
// healing since currently there is no entity which will
// receive realtime healing status
// A channel of entities with heal result
respCh chan healResult
// Report healing progress
reportProgress bool
// time at which heal sequence was started
@@ -352,14 +359,23 @@ type healSequence struct {
// the last result index sent to client
lastSentResultIndex int64
// Number of total items scanned
scannedItemsCount int64
// Number of total items scanned against item type
scannedItemsMap map[madmin.HealItemType]int64
// Number of total items healed against item type
healedItemsMap map[madmin.HealItemType]int64
// Number of total items where healing failed against endpoint and drive state
healFailedItemsMap map[string]int64
// The time of the last scan/heal activity
lastHealActivity time.Time
// Holds the request-info for logging
ctx context.Context
// used to lock this structure as it is concurrently accessed
mutex sync.RWMutex
}
// NewHealSequence - creates healSettings, assumes bucket and
@@ -369,9 +385,10 @@ func newHealSequence(bucket, objPrefix, clientAddr string,
reqInfo := &logger.ReqInfo{RemoteHost: clientAddr, API: "Heal", BucketName: bucket}
reqInfo.AppendTags("prefix", objPrefix)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
ctx := logger.SetReqInfo(GlobalContext, reqInfo)
return &healSequence{
respCh: make(chan healResult),
bucket: bucket,
objPrefix: objPrefix,
path: pathJoin(bucket, objPrefix),
@@ -390,9 +407,83 @@ func newHealSequence(bucket, objPrefix, clientAddr string,
traverseAndHealDoneCh: make(chan error),
stopSignalCh: make(chan struct{}),
ctx: ctx,
scannedItemsMap: make(map[madmin.HealItemType]int64),
healedItemsMap: make(map[madmin.HealItemType]int64),
healFailedItemsMap: make(map[string]int64),
}
}
// resetHealStatusCounters - reset the healSequence status counters between
// each monthly background heal scanning activity.
// This is used only in case of Background healing scenario, where
// we use a single long running healSequence which reactively heals
// objects passed to the SourceCh.
func (h *healSequence) resetHealStatusCounters() {
h.mutex.Lock()
defer h.mutex.Unlock()
h.currentStatus.Items = []madmin.HealResultItem{}
h.lastSentResultIndex = 0
h.scannedItemsMap = make(map[madmin.HealItemType]int64)
h.healedItemsMap = make(map[madmin.HealItemType]int64)
h.healFailedItemsMap = make(map[string]int64)
}
// getScannedItemsCount - returns a count of all scanned items
func (h *healSequence) getScannedItemsCount() int64 {
var count int64
h.mutex.RLock()
defer h.mutex.RUnlock()
for _, v := range h.scannedItemsMap {
count = count + v
}
return count
}
// getScannedItemsMap - returns map of all scanned items against type
func (h *healSequence) getScannedItemsMap() map[madmin.HealItemType]int64 {
h.mutex.RLock()
defer h.mutex.RUnlock()
// Make a copy before returning the value
retMap := make(map[madmin.HealItemType]int64, len(h.scannedItemsMap))
for k, v := range h.scannedItemsMap {
retMap[k] = v
}
return retMap
}
// getHealedItemsMap - returns the map of all healed items against type
func (h *healSequence) getHealedItemsMap() map[madmin.HealItemType]int64 {
h.mutex.RLock()
defer h.mutex.RUnlock()
// Make a copy before returning the value
retMap := make(map[madmin.HealItemType]int64, len(h.healedItemsMap))
for k, v := range h.healedItemsMap {
retMap[k] = v
}
return retMap
}
// gethealFailedItemsMap - returns map of all items where heal failed against
// drive endpoint and status
func (h *healSequence) gethealFailedItemsMap() map[string]int64 {
h.mutex.RLock()
defer h.mutex.RUnlock()
// Make a copy before returning the value
retMap := make(map[string]int64, len(h.healFailedItemsMap))
for k, v := range h.healFailedItemsMap {
retMap[k] = v
}
return retMap
}
// isQuitting - determines if the heal sequence is quitting (due to an
// external signal)
func (h *healSequence) isQuitting() bool {
@@ -548,35 +639,62 @@ func (h *healSequence) healSequenceStart() {
}
}
func (h *healSequence) queueHealTask(path string, healType madmin.HealItemType) error {
var respCh = make(chan healResult)
defer close(respCh)
func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItemType) error {
// Send heal request
globalBackgroundHealRoutine.queueHealTask(healTask{path: path, responseCh: respCh, opts: h.settings})
// Wait for answer and push result to the client
res := <-respCh
if !h.reportProgress {
return nil
task := healTask{
path: source.path,
opts: h.settings,
responseCh: h.respCh,
}
res.result.Type = healType
if res.err != nil {
// Object might have been deleted, by the time heal
// was attempted, we should ignore this object and return success.
if isErrObjectNotFound(res.err) {
if !source.opts.Equal(h.settings) {
task.opts = source.opts
}
globalBackgroundHealRoutine.queueHealTask(task)
select {
case res := <-h.respCh:
if !h.reportProgress {
h.mutex.Lock()
defer h.mutex.Unlock()
// Progress is not reported in case of background heal processing.
// Instead we increment relevant counter based on the heal result
// for prometheus reporting.
if res.err != nil && !isErrObjectNotFound(res.err) {
for _, d := range res.result.After.Drives {
// For failed items we report the endpoint and drive state
// This will help users take corrective actions for drives
h.healFailedItemsMap[d.Endpoint+","+d.State]++
}
} else {
// Only object type reported for successful healing
h.healedItemsMap[res.result.Type]++
}
return nil
}
// Only report object error
if healType != madmin.HealItemObject {
return res.err
res.result.Type = healType
if res.err != nil {
// Object might have been deleted, by the time heal
// was attempted, we should ignore this object and return success.
if isErrObjectNotFound(res.err) {
return nil
}
// Only report object error
if healType != madmin.HealItemObject {
return res.err
}
res.result.Detail = res.err.Error()
}
res.result.Detail = res.err.Error()
return h.pushHealResultItem(res.result)
case <-h.ctx.Done():
return nil
case <-h.traverseAndHealDoneCh:
return nil
}
return h.pushHealResultItem(res.result)
}
func (h *healSequence) healItemsFromSourceCh() error {
h.lastHealActivity = UTCNow()
bucketsOnly := true // heal buckets only, not objects.
if err := h.healItems(bucketsOnly); err != nil {
logger.LogIf(h.ctx, err)
@@ -584,28 +702,28 @@ func (h *healSequence) healItemsFromSourceCh() error {
for {
select {
case path := <-h.sourceCh:
case source := <-h.sourceCh:
var itemType madmin.HealItemType
switch {
case path == nopHeal:
case source.path == nopHeal:
continue
case path == SlashSeparator:
case source.path == SlashSeparator:
itemType = madmin.HealItemMetadata
case !strings.Contains(path, SlashSeparator):
case !strings.Contains(source.path, SlashSeparator):
itemType = madmin.HealItemBucket
default:
itemType = madmin.HealItemObject
}
if err := h.queueHealTask(path, itemType); err != nil {
if err := h.queueHealTask(source, itemType); err != nil {
logger.LogIf(h.ctx, err)
}
h.scannedItemsCount++
h.scannedItemsMap[itemType]++
h.lastHealActivity = UTCNow()
case <-h.traverseAndHealDoneCh:
return nil
case <-GlobalServiceDoneCh:
case <-h.ctx.Done():
return nil
}
}
@@ -632,11 +750,6 @@ func (h *healSequence) healItems(bucketsOnly bool) error {
return err
}
// Start healing the background ops prefix.
if err := h.healMinioSysMeta(backgroundOpsMetaPrefix)(); err != nil {
logger.LogIf(h.ctx, err)
}
// Heal buckets and objects
return h.healBuckets(bucketsOnly)
}
@@ -673,12 +786,12 @@ func (h *healSequence) healMinioSysMeta(metaPrefix string) func() error {
// NOTE: Healing on meta is run regardless
// of any bucket being selected, this is to ensure that
// meta are always upto date and correct.
return objectAPI.HealObjects(h.ctx, minioMetaBucket, metaPrefix, func(bucket string, object string) error {
return objectAPI.HealObjects(h.ctx, minioMetaBucket, metaPrefix, h.settings, func(bucket string, object string) error {
if h.isQuitting() {
return errHealStopSignalled
}
herr := h.queueHealTask(pathJoin(bucket, object), madmin.HealItemBucketMetadata)
herr := h.queueHealTask(healSource{path: pathJoin(bucket, object)}, madmin.HealItemBucketMetadata)
// Object might have been deleted, by the time heal
// was attempted we ignore this object an move on.
if isErrObjectNotFound(herr) {
@@ -702,7 +815,7 @@ func (h *healSequence) healDiskFormat() error {
return errServerNotInitialized
}
return h.queueHealTask(SlashSeparator, madmin.HealItemMetadata)
return h.queueHealTask(healSource{path: SlashSeparator}, madmin.HealItemMetadata)
}
// healBuckets - check for all buckets heal or just particular bucket.
@@ -744,7 +857,7 @@ func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
return errServerNotInitialized
}
if err := h.queueHealTask(bucket, madmin.HealItemBucket); err != nil {
if err := h.queueHealTask(healSource{path: bucket}, madmin.HealItemBucket); err != nil {
return err
}
@@ -767,7 +880,7 @@ func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
return nil
}
if err := objectAPI.HealObjects(h.ctx, bucket, h.objPrefix, h.healObject); err != nil {
if err := objectAPI.HealObjects(h.ctx, bucket, h.objPrefix, h.settings, h.healObject); err != nil {
return errFnHealFromAPIErr(h.ctx, err)
}
return nil
@@ -785,5 +898,5 @@ func (h *healSequence) healObject(bucket, object string) error {
return errHealStopSignalled
}
return h.queueHealTask(pathJoin(bucket, object), madmin.HealItemObject)
return h.queueHealTask(healSource{path: pathJoin(bucket, object)}, madmin.HealItemObject)
}

View File

@@ -24,16 +24,18 @@ import (
)
const (
adminPathPrefix = minioReservedBucketPath + "/admin"
adminAPIVersion = madmin.AdminAPIVersion
adminAPIVersionPrefix = SlashSeparator + madmin.AdminAPIVersion
adminPathPrefix = minioReservedBucketPath + "/admin"
adminAPIVersionV2 = madmin.AdminAPIVersionV2
adminAPIVersion = madmin.AdminAPIVersion
adminAPIVersionPrefix = SlashSeparator + adminAPIVersion
adminAPIVersionV2Prefix = SlashSeparator + adminAPIVersionV2
)
// adminAPIHandlers provides HTTP handlers for MinIO admin API.
type adminAPIHandlers struct{}
// registerAdminRouter - Add handler functions for each service REST API routes.
func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool) {
func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps, enableBucketQuotaOps bool) {
adminAPI := adminAPIHandlers{}
// Admin router
@@ -41,127 +43,177 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
/// Service operations
// Restart and stop MinIO service.
adminRouter.Methods(http.MethodPost).Path(adminAPIVersionPrefix+"/service").HandlerFunc(httpTraceAll(adminAPI.ServiceActionHandler)).Queries("action", "{action:.*}")
// Update MinIO servers.
adminRouter.Methods(http.MethodPost).Path(adminAPIVersionPrefix+"/update").HandlerFunc(httpTraceAll(adminAPI.ServerUpdateHandler)).Queries("updateURL", "{updateURL:.*}")
// Info operations
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/info").HandlerFunc(httpTraceAll(adminAPI.ServerInfoHandler))
// Harware Info operations
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/hardware").HandlerFunc(httpTraceAll(adminAPI.ServerHardwareInfoHandler)).Queries("hwType", "{hwType:.*}")
// StorageInfo operations
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/storageinfo").HandlerFunc(httpTraceAll(adminAPI.StorageInfoHandler))
// DataUsageInfo operations
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/datausageinfo").HandlerFunc(httpTraceAll(adminAPI.DataUsageInfoHandler))
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/accountingusageinfo").HandlerFunc(httpTraceAll(adminAPI.AccountingUsageInfoHandler))
if globalIsDistXL || globalIsXL {
/// Heal operations
// Heal processing endpoint.
adminRouter.Methods(http.MethodPost).Path(adminAPIVersionPrefix + "/heal/").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
adminRouter.Methods(http.MethodPost).Path(adminAPIVersionPrefix + "/heal/{bucket}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
adminRouter.Methods(http.MethodPost).Path(adminAPIVersionPrefix + "/heal/{bucket}/{prefix:.*}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
adminRouter.Methods(http.MethodPost).Path(adminAPIVersionPrefix + "/background-heal/status").HandlerFunc(httpTraceAll(adminAPI.BackgroundHealStatusHandler))
/// Health operations
}
// Performance command - return performance details based on input type
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/performance").HandlerFunc(httpTraceAll(adminAPI.PerfInfoHandler)).Queries("perfType", "{perfType:.*}")
// Profiling operations
adminRouter.Methods(http.MethodPost).Path(adminAPIVersionPrefix+"/profiling/start").HandlerFunc(httpTraceAll(adminAPI.StartProfilingHandler)).
Queries("profilerType", "{profilerType:.*}")
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/profiling/download").HandlerFunc(httpTraceAll(adminAPI.DownloadProfilingHandler))
// Config KV operations.
if enableConfigOps {
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/get-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigKVHandler)).Queries("key", "{key:.*}")
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix + "/set-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigKVHandler))
adminRouter.Methods(http.MethodDelete).Path(adminAPIVersionPrefix + "/del-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.DelConfigKVHandler))
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/help-config-kv").HandlerFunc(httpTraceAll(adminAPI.HelpConfigKVHandler)).Queries("subSys", "{subSys:.*}", "key", "{key:.*}")
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/list-config-history-kv").HandlerFunc(httpTraceAll(adminAPI.ListConfigHistoryKVHandler)).Queries("count", "{count:[0-9]+}")
adminRouter.Methods(http.MethodDelete).Path(adminAPIVersionPrefix+"/clear-config-history-kv").HandlerFunc(httpTraceHdrs(adminAPI.ClearConfigHistoryKVHandler)).Queries("restoreId", "{restoreId:.*}")
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix+"/restore-config-history-kv").HandlerFunc(httpTraceHdrs(adminAPI.RestoreConfigHistoryKVHandler)).Queries("restoreId", "{restoreId:.*}")
adminVersions := []string{
adminAPIVersionPrefix,
adminAPIVersionV2Prefix,
}
/// Config operations
if enableConfigOps {
// Get config
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/config").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigHandler))
// Set config
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix + "/config").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigHandler))
for _, adminVersion := range adminVersions {
// Restart and stop MinIO service.
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/service").HandlerFunc(httpTraceAll(adminAPI.ServiceActionHandler)).Queries("action", "{action:.*}")
// Update MinIO servers.
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/update").HandlerFunc(httpTraceAll(adminAPI.ServerUpdateHandler)).Queries("updateURL", "{updateURL:.*}")
// Info operations
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/info").HandlerFunc(httpTraceAll(adminAPI.ServerInfoHandler))
// StorageInfo operations
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/storageinfo").HandlerFunc(httpTraceAll(adminAPI.StorageInfoHandler))
// DataUsageInfo operations
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/datausageinfo").HandlerFunc(httpTraceAll(adminAPI.DataUsageInfoHandler))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/accountingusageinfo").HandlerFunc(httpTraceAll(adminAPI.AccountingUsageInfoHandler))
if globalIsDistXL || globalIsXL {
/// Heal operations
// Heal processing endpoint.
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/{bucket}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/{bucket}/{prefix:.*}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/background-heal/status").HandlerFunc(httpTraceAll(adminAPI.BackgroundHealStatusHandler))
/// Health operations
}
// Profiling operations
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/profiling/start").HandlerFunc(httpTraceAll(adminAPI.StartProfilingHandler)).
Queries("profilerType", "{profilerType:.*}")
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/profiling/download").HandlerFunc(httpTraceAll(adminAPI.DownloadProfilingHandler))
// Config KV operations.
if enableConfigOps {
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigKVHandler)).Queries("key", "{key:.*}")
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/set-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigKVHandler))
adminRouter.Methods(http.MethodDelete).Path(adminVersion + "/del-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.DelConfigKVHandler))
}
// Enable config help in all modes.
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/help-config-kv").HandlerFunc(httpTraceAll(adminAPI.HelpConfigKVHandler)).Queries("subSys", "{subSys:.*}", "key", "{key:.*}")
// Config KV history operations.
if enableConfigOps {
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-config-history-kv").HandlerFunc(httpTraceAll(adminAPI.ListConfigHistoryKVHandler)).Queries("count", "{count:[0-9]+}")
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/clear-config-history-kv").HandlerFunc(httpTraceHdrs(adminAPI.ClearConfigHistoryKVHandler)).Queries("restoreId", "{restoreId:.*}")
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/restore-config-history-kv").HandlerFunc(httpTraceHdrs(adminAPI.RestoreConfigHistoryKVHandler)).Queries("restoreId", "{restoreId:.*}")
}
/// Config import/export bulk operations
if enableConfigOps {
// Get config
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/config").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigHandler))
// Set config
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/config").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigHandler))
}
if enableIAMOps {
// -- IAM APIs --
// Add policy IAM
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.AddCannedPolicy)).Queries("name", "{name:.*}")
// Add user IAM
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-user").HandlerFunc(httpTraceHdrs(adminAPI.AddUser)).Queries("accessKey", "{accessKey:.*}")
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-user-status").HandlerFunc(httpTraceHdrs(adminAPI.SetUserStatus)).Queries("accessKey", "{accessKey:.*}").Queries("status", "{status:.*}")
// Service accounts ops
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/add-service-account").HandlerFunc(httpTraceHdrs(adminAPI.AddServiceAccount))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-service-accounts").HandlerFunc(httpTraceHdrs(adminAPI.ListServiceAccounts))
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/delete-service-account").HandlerFunc(httpTraceHdrs(adminAPI.DeleteServiceAccount)).Queries("accessKey", "{accessKey:.*}")
if adminVersion == adminAPIVersionV2Prefix {
// Info policy IAM v2
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/info-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.InfoCannedPolicyV2)).Queries("name", "{name:.*}")
// List policies v2
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-canned-policies").HandlerFunc(httpTraceHdrs(adminAPI.ListCannedPoliciesV2))
} else {
// Info policy IAM latest
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/info-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.InfoCannedPolicy)).Queries("name", "{name:.*}")
// List policies latest
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-canned-policies").HandlerFunc(httpTraceHdrs(adminAPI.ListCannedPolicies))
}
// Remove policy IAM
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.RemoveCannedPolicy)).Queries("name", "{name:.*}")
// Set user or group policy
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-user-or-group-policy").
HandlerFunc(httpTraceHdrs(adminAPI.SetPolicyForUserOrGroup)).
Queries("policyName", "{policyName:.*}", "userOrGroup", "{userOrGroup:.*}", "isGroup", "{isGroup:true|false}")
// Remove user IAM
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-user").HandlerFunc(httpTraceHdrs(adminAPI.RemoveUser)).Queries("accessKey", "{accessKey:.*}")
// List users
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-users").HandlerFunc(httpTraceHdrs(adminAPI.ListUsers))
// User info
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/user-info").HandlerFunc(httpTraceHdrs(adminAPI.GetUserInfo)).Queries("accessKey", "{accessKey:.*}")
// Add/Remove members from group
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/update-group-members").HandlerFunc(httpTraceHdrs(adminAPI.UpdateGroupMembers))
// Get Group
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/group").HandlerFunc(httpTraceHdrs(adminAPI.GetGroup)).Queries("group", "{group:.*}")
// List Groups
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/groups").HandlerFunc(httpTraceHdrs(adminAPI.ListGroups))
// Set Group Status
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-group-status").HandlerFunc(httpTraceHdrs(adminAPI.SetGroupStatus)).Queries("group", "{group:.*}").Queries("status", "{status:.*}")
}
// Quota operations
if enableConfigOps && enableBucketQuotaOps {
// GetBucketQuotaConfig
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-bucket-quota").HandlerFunc(
httpTraceHdrs(adminAPI.GetBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
// PutBucketQuotaConfig
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-bucket-quota").HandlerFunc(
httpTraceHdrs(adminAPI.PutBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
// RemoveBucketQuotaConfig
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-bucket-quota").HandlerFunc(
httpTraceHdrs(adminAPI.RemoveBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
}
// -- Top APIs --
// Top locks
if globalIsDistXL {
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/top/locks").HandlerFunc(httpTraceHdrs(adminAPI.TopLocksHandler))
}
// HTTP Trace
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/trace").HandlerFunc(adminAPI.TraceHandler)
// Console Logs
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/log").HandlerFunc(httpTraceAll(adminAPI.ConsoleLogHandler))
// -- KMS APIs --
//
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/kms/key/status").HandlerFunc(httpTraceAll(adminAPI.KMSKeyStatusHandler))
if !globalIsGateway {
// -- OBD API --
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/obdinfo").
HandlerFunc(httpTraceHdrs(adminAPI.OBDInfoHandler)).
Queries("perfdrive", "{perfdrive:true|false}",
"perfnet", "{perfnet:true|false}",
"minioinfo", "{minioinfo:true|false}",
"minioconfig", "{minioconfig:true|false}",
"syscpu", "{syscpu:true|false}",
"sysdiskhw", "{sysdiskhw:true|false}",
"sysosinfo", "{sysosinfo:true|false}",
"sysmem", "{sysmem:true|false}",
"sysprocess", "{sysprocess:true|false}",
)
}
}
if enableIAMOps {
// -- IAM APIs --
// Add policy IAM
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix+"/add-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.AddCannedPolicy)).Queries("name",
"{name:.*}")
// Add user IAM
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix+"/add-user").HandlerFunc(httpTraceHdrs(adminAPI.AddUser)).Queries("accessKey", "{accessKey:.*}")
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix+"/set-user-status").HandlerFunc(httpTraceHdrs(adminAPI.SetUserStatus)).
Queries("accessKey", "{accessKey:.*}").Queries("status", "{status:.*}")
// Info policy IAM
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/info-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.InfoCannedPolicy)).Queries("name", "{name:.*}")
// Remove policy IAM
adminRouter.Methods(http.MethodDelete).Path(adminAPIVersionPrefix+"/remove-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.RemoveCannedPolicy)).Queries("name", "{name:.*}")
// Set user or group policy
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix+"/set-user-or-group-policy").
HandlerFunc(httpTraceHdrs(adminAPI.SetPolicyForUserOrGroup)).
Queries("policyName", "{policyName:.*}", "userOrGroup", "{userOrGroup:.*}", "isGroup", "{isGroup:true|false}")
// Remove user IAM
adminRouter.Methods(http.MethodDelete).Path(adminAPIVersionPrefix+"/remove-user").HandlerFunc(httpTraceHdrs(adminAPI.RemoveUser)).Queries("accessKey", "{accessKey:.*}")
// List users
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/list-users").HandlerFunc(httpTraceHdrs(adminAPI.ListUsers))
// User info
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/user-info").HandlerFunc(httpTraceHdrs(adminAPI.GetUserInfo)).Queries("accessKey", "{accessKey:.*}")
// Add/Remove members from group
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix + "/update-group-members").HandlerFunc(httpTraceHdrs(adminAPI.UpdateGroupMembers))
// Get Group
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/group").HandlerFunc(httpTraceHdrs(adminAPI.GetGroup)).Queries("group", "{group:.*}")
// List Groups
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/groups").HandlerFunc(httpTraceHdrs(adminAPI.ListGroups))
// Set Group Status
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix+"/set-group-status").HandlerFunc(httpTraceHdrs(adminAPI.SetGroupStatus)).Queries("group", "{group:.*}").Queries("status", "{status:.*}")
// List policies
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/list-canned-policies").HandlerFunc(httpTraceHdrs(adminAPI.ListCannedPolicies))
}
// -- Top APIs --
// Top locks
if globalIsDistXL {
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/top/locks").HandlerFunc(httpTraceHdrs(adminAPI.TopLocksHandler))
}
// HTTP Trace
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/trace").HandlerFunc(adminAPI.TraceHandler)
// Console Logs
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/log").HandlerFunc(httpTraceAll(adminAPI.ConsoleLogHandler))
// -- KMS APIs --
//
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/kms/key/status").HandlerFunc(httpTraceAll(adminAPI.KMSKeyStatusHandler))
// If none of the routes match add default error handler routes
adminRouter.NotFoundHandler = http.HandlerFunc(httpTraceAll(errorResponseHandler))
adminRouter.MethodNotAllowedHandler = http.HandlerFunc(httpTraceAll(errorResponseHandler))

View File

@@ -17,176 +17,14 @@
package cmd
import (
"net"
"net/http"
"os"
"github.com/minio/minio-go/v6/pkg/set"
"github.com/minio/minio/pkg/cpu"
"github.com/minio/minio/pkg/disk"
"github.com/minio/minio/pkg/madmin"
"github.com/minio/minio/pkg/mem"
cpuhw "github.com/shirou/gopsutil/cpu"
)
// getLocalMemUsage - returns ServerMemUsageInfo for all zones, endpoints.
func getLocalMemUsage(endpointZones EndpointZones, r *http.Request) ServerMemUsageInfo {
var memUsages []mem.Usage
var historicUsages []mem.Usage
seenHosts := set.NewStringSet()
for _, ep := range endpointZones {
for _, endpoint := range ep.Endpoints {
if seenHosts.Contains(endpoint.Host) {
continue
}
seenHosts.Add(endpoint.Host)
// Only proceed for local endpoints
if endpoint.IsLocal {
memUsages = append(memUsages, mem.GetUsage())
historicUsages = append(historicUsages, mem.GetHistoricUsage())
}
}
}
addr := r.Host
if globalIsDistXL {
addr = GetLocalPeer(endpointZones)
}
return ServerMemUsageInfo{
Addr: addr,
Usage: memUsages,
HistoricUsage: historicUsages,
}
}
// getLocalCPULoad - returns ServerCPULoadInfo for all zones, endpoints.
func getLocalCPULoad(endpointZones EndpointZones, r *http.Request) ServerCPULoadInfo {
var cpuLoads []cpu.Load
var historicLoads []cpu.Load
seenHosts := set.NewStringSet()
for _, ep := range endpointZones {
for _, endpoint := range ep.Endpoints {
if seenHosts.Contains(endpoint.Host) {
continue
}
seenHosts.Add(endpoint.Host)
// Only proceed for local endpoints
if endpoint.IsLocal {
cpuLoads = append(cpuLoads, cpu.GetLoad())
historicLoads = append(historicLoads, cpu.GetHistoricLoad())
}
}
}
addr := r.Host
if globalIsDistXL {
addr = GetLocalPeer(endpointZones)
}
return ServerCPULoadInfo{
Addr: addr,
Load: cpuLoads,
HistoricLoad: historicLoads,
}
}
// getLocalDrivesPerf - returns ServerDrivesPerfInfo for all zones, endpoints.
func getLocalDrivesPerf(endpointZones EndpointZones, size int64, r *http.Request) madmin.ServerDrivesPerfInfo {
var dps []disk.Performance
for _, ep := range endpointZones {
for _, endpoint := range ep.Endpoints {
// Only proceed for local endpoints
if endpoint.IsLocal {
if _, err := os.Stat(endpoint.Path); err != nil {
// Since this drive is not available, add relevant details and proceed
dps = append(dps, disk.Performance{Path: endpoint.Path, Error: err.Error()})
continue
}
dp := disk.GetPerformance(pathJoin(endpoint.Path, minioMetaTmpBucket, mustGetUUID()), size)
dp.Path = endpoint.Path
dps = append(dps, dp)
}
}
}
addr := r.Host
if globalIsDistXL {
addr = GetLocalPeer(endpointZones)
}
return madmin.ServerDrivesPerfInfo{
Addr: addr,
Perf: dps,
}
}
// getLocalCPUInfo - returns ServerCPUHardwareInfo for all zones, endpoints.
func getLocalCPUInfo(endpointZones EndpointZones, r *http.Request) madmin.ServerCPUHardwareInfo {
var cpuHardwares []cpuhw.InfoStat
seenHosts := set.NewStringSet()
for _, ep := range endpointZones {
for _, endpoint := range ep.Endpoints {
if seenHosts.Contains(endpoint.Host) {
continue
}
// Add to the list of visited hosts
seenHosts.Add(endpoint.Host)
// Only proceed for local endpoints
if endpoint.IsLocal {
cpuHardware, err := cpuhw.Info()
if err != nil {
return madmin.ServerCPUHardwareInfo{
Error: err.Error(),
}
}
cpuHardwares = append(cpuHardwares, cpuHardware...)
}
}
}
addr := r.Host
if globalIsDistXL {
addr = GetLocalPeer(endpointZones)
}
return madmin.ServerCPUHardwareInfo{
Addr: addr,
CPUInfo: cpuHardwares,
}
}
// getLocalNetworkInfo - returns ServerNetworkHardwareInfo for all zones, endpoints.
func getLocalNetworkInfo(endpointZones EndpointZones, r *http.Request) madmin.ServerNetworkHardwareInfo {
var networkHardwares []net.Interface
seenHosts := set.NewStringSet()
for _, ep := range endpointZones {
for _, endpoint := range ep.Endpoints {
if seenHosts.Contains(endpoint.Host) {
continue
}
// Add to the list of visited hosts
seenHosts.Add(endpoint.Host)
// Only proceed for local endpoints
if endpoint.IsLocal {
networkHardware, err := net.Interfaces()
if err != nil {
return madmin.ServerNetworkHardwareInfo{
Error: err.Error(),
}
}
networkHardwares = append(networkHardwares, networkHardware...)
}
}
}
addr := r.Host
if globalIsDistXL {
addr = GetLocalPeer(endpointZones)
}
return madmin.ServerNetworkHardwareInfo{
Addr: addr,
NetworkInfo: networkHardwares,
}
}
// getLocalServerProperty - returns ServerDrivesPerfInfo for only the
// getLocalServerProperty - returns madmin.ServerProperties for only the
// local endpoints from given list of endpoints
func getLocalServerProperty(endpointZones EndpointZones, r *http.Request) madmin.ServerProperties {
var disks []madmin.Disk

View File

@@ -25,7 +25,6 @@ import (
"strings"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/aliyun/aliyun-oss-go-sdk/oss"
"google.golang.org/api/googleapi"
minio "github.com/minio/minio-go/v6"
@@ -122,7 +121,8 @@ const (
ErrMissingCredTag
ErrCredMalformed
ErrInvalidRegion
ErrInvalidService
ErrInvalidServiceS3
ErrInvalidServiceSTS
ErrInvalidRequestVersion
ErrMissingSignTag
ErrMissingSignHeadersTag
@@ -150,6 +150,7 @@ const (
ErrBadRequest
ErrKeyTooLongError
ErrInvalidBucketObjectLockConfiguration
ErrObjectLockConfigurationNotFound
ErrObjectLockConfigurationNotAllowed
ErrNoSuchObjectLockConfiguration
ErrObjectLocked
@@ -210,6 +211,7 @@ const (
ErrInvalidResourceName
ErrServerNotInitialized
ErrOperationTimedOut
ErrOperationMaxedOut
ErrInvalidRequest
// MinIO storage class error codes
ErrInvalidStorageClass
@@ -233,6 +235,10 @@ const (
ErrAdminCredentialsMismatch
ErrInsecureClientRequest
ErrObjectTampered
// Bucket Quota error codes
ErrAdminBucketQuotaExceeded
ErrAdminNoSuchQuotaConfiguration
ErrAdminBucketQuotaDisabled
ErrHealNotImplemented
ErrHealNoSuchProcess
@@ -332,19 +338,28 @@ const (
ErrAdminProfilerNotEnabled
ErrInvalidDecompressedSize
ErrAddUserInvalidArgument
ErrAdminAccountNotEligible
ErrServiceAccountNotFound
ErrPostPolicyConditionInvalidFormat
)
type errorCodeMap map[APIErrorCode]APIError
func (e errorCodeMap) ToAPIErr(errCode APIErrorCode) APIError {
func (e errorCodeMap) ToAPIErrWithErr(errCode APIErrorCode, err error) APIError {
apiErr, ok := e[errCode]
if !ok {
return e[ErrInternalError]
apiErr = e[ErrInternalError]
}
if err != nil {
apiErr.Description = fmt.Sprintf("%s (%s)", apiErr.Description, err)
}
return apiErr
}
func (e errorCodeMap) ToAPIErr(errCode APIErrorCode) APIError {
return e.ToAPIErrWithErr(errCode, nil)
}
// error code to APIError structure, these fields carry respective
// descriptions for all the error responses.
var errorCodes = errorCodeMap{
@@ -652,9 +667,14 @@ var errorCodes = errorCodeMap{
// FIXME: Should contain the invalid param set as seen in https://github.com/minio/minio/issues/2385.
// right Description: "Error parsing the X-Amz-Credential parameter; incorrect service \"s4\". This endpoint belongs to \"s3\".".
// Need changes to make sure variable messages can be constructed.
ErrInvalidService: {
Code: "AuthorizationQueryParametersError",
Description: "Error parsing the X-Amz-Credential parameter; incorrect service. This endpoint belongs to \"s3\".",
ErrInvalidServiceS3: {
Code: "AuthorizationParametersError",
Description: "Error parsing the Credential/X-Amz-Credential parameter; incorrect service. This endpoint belongs to \"s3\".",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidServiceSTS: {
Code: "AuthorizationParametersError",
Description: "Error parsing the Credential parameter; incorrect service. This endpoint belongs to \"sts\".",
HTTPStatusCode: http.StatusBadRequest,
},
// FIXME: Should contain the invalid param set as seen in https://github.com/minio/minio/issues/2385.
@@ -757,9 +777,14 @@ var errorCodes = errorCodeMap{
Description: "Bucket is missing ObjectLockConfiguration",
HTTPStatusCode: http.StatusBadRequest,
},
ErrObjectLockConfigurationNotFound: {
Code: "ObjectLockConfigurationNotFoundError",
Description: "Object Lock configuration does not exist for this bucket",
HTTPStatusCode: http.StatusNotFound,
},
ErrObjectLockConfigurationNotAllowed: {
Code: "InvalidBucketState",
Description: "Object Lock configuration cannot be enabled on existing buckets.",
Description: "Object Lock configuration cannot be enabled on existing buckets",
HTTPStatusCode: http.StatusConflict,
},
ErrNoSuchObjectLockConfiguration: {
@@ -1068,6 +1093,21 @@ var errorCodes = errorCodeMap{
Description: "Credentials in config mismatch with server environment variables",
HTTPStatusCode: http.StatusServiceUnavailable,
},
ErrAdminBucketQuotaExceeded: {
Code: "XMinioAdminBucketQuotaExceeded",
Description: "Bucket quota exceeded",
HTTPStatusCode: http.StatusBadRequest,
},
ErrAdminNoSuchQuotaConfiguration: {
Code: "XMinioAdminNoSuchQuotaConfiguration",
Description: "The quota configuration does not exist",
HTTPStatusCode: http.StatusNotFound,
},
ErrAdminBucketQuotaDisabled: {
Code: "XMinioAdminBucketQuotaDisabled",
Description: "Quota specified but disk usage crawl is disabled on MinIO server",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInsecureClientRequest: {
Code: "XMinioInsecureClientRequest",
Description: "Cannot respond to plain-text request from TLS-encrypted server",
@@ -1078,6 +1118,11 @@ var errorCodes = errorCodeMap{
Description: "A timeout occurred while trying to lock a resource",
HTTPStatusCode: http.StatusRequestTimeout,
},
ErrOperationMaxedOut: {
Code: "XMinioServerTimedOut",
Description: "A timeout exceeded while waiting to proceed with the request",
HTTPStatusCode: http.StatusRequestTimeout,
},
ErrUnsupportedMetadata: {
Code: "InvalidArgument",
Description: "Your metadata headers are not supported.",
@@ -1573,6 +1618,16 @@ var errorCodes = errorCodeMap{
Description: "User is not allowed to be same as admin access key",
HTTPStatusCode: http.StatusConflict,
},
ErrAdminAccountNotEligible: {
Code: "XMinioInvalidIAMCredentials",
Description: "The administrator key is not eligible for this operation",
HTTPStatusCode: http.StatusConflict,
},
ErrServiceAccountNotFound: {
Code: "XMinioInvalidIAMCredentials",
Description: "The specified service account is not found",
HTTPStatusCode: http.StatusNotFound,
},
ErrPostPolicyConditionInvalidFormat: {
Code: "PostPolicyInvalidKeyName",
Description: "Invalid according to Policy: Policy Condition failed",
@@ -1641,7 +1696,7 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
apiErr = ErrKMSNotConfigured
case crypto.ErrKMSAuthLogin:
apiErr = ErrKMSAuthFailure
case errOperationTimedOut, context.Canceled, context.DeadlineExceeded:
case context.Canceled, context.DeadlineExceeded:
apiErr = ErrOperationTimedOut
case errDiskNotFound:
apiErr = ErrSlowDown
@@ -1747,6 +1802,10 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
apiErr = ErrNoSuchLifecycleConfiguration
case BucketSSEConfigNotFound:
apiErr = ErrNoSuchBucketSSEConfig
case BucketQuotaConfigNotFound:
apiErr = ErrAdminNoSuchQuotaConfiguration
case BucketQuotaExceeded:
apiErr = ErrAdminBucketQuotaExceeded
case *event.ErrInvalidEventName:
apiErr = ErrEventNotification
case *event.ErrInvalidARN:
@@ -1769,6 +1828,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
apiErr = ErrOverlappingFilterNotification
case *event.ErrUnsupportedConfiguration:
apiErr = ErrUnsupportedNotification
case OperationTimedOut:
apiErr = ErrOperationTimedOut
case BackendDown:
apiErr = ErrBackendDown
case ObjectNameTooLong:
@@ -1877,12 +1938,6 @@ func toAPIError(ctx context.Context, err error) APIError {
Description: e.Error(),
HTTPStatusCode: e.Response().StatusCode,
}
case oss.ServiceError:
apiErr = APIError{
Code: e.Code,
Description: e.Message,
HTTPStatusCode: e.StatusCode,
}
// Add more Gateway SDKs here if any in future.
}
}

View File

@@ -24,6 +24,7 @@ import (
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/minio/minio/cmd/crypto"
@@ -81,6 +82,10 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
w.Header()[xhttp.ETag] = []string{"\"" + objInfo.ETag + "\""}
}
if strings.Contains(objInfo.ETag, "-") && len(objInfo.Parts) > 0 {
w.Header().Set(xhttp.AmzMpPartsCount, strconv.Itoa(len(objInfo.Parts)))
}
if objInfo.ContentType != "" {
w.Header().Set(xhttp.ContentType, objInfo.ContentType)
}
@@ -92,6 +97,7 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
if !objInfo.Expires.IsZero() {
w.Header().Set(xhttp.Expires, objInfo.Expires.UTC().Format(http.TimeFormat))
}
if globalCacheConfig.Enabled {
w.Header().Set(xhttp.XCache, objInfo.CacheStatus.String())
w.Header().Set(xhttp.XCacheLookup, objInfo.CacheLookupStatus.String())

View File

@@ -429,7 +429,12 @@ func generateListVersionsResponse(bucket, prefix, marker, delimiter, encodingTyp
content.ETag = "\"" + object.ETag + "\""
}
content.Size = object.Size
content.StorageClass = object.StorageClass
if object.StorageClass != "" {
content.StorageClass = object.StorageClass
} else {
content.StorageClass = globalMinioDefaultStorageClass
}
content.Owner = owner
content.VersionID = "null"
content.IsLatest = true
@@ -475,7 +480,11 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
content.ETag = "\"" + object.ETag + "\""
}
content.Size = object.Size
content.StorageClass = object.StorageClass
if object.StorageClass != "" {
content.StorageClass = object.StorageClass
} else {
content.StorageClass = globalMinioDefaultStorageClass
}
content.Owner = owner
contents = append(contents, content)
}
@@ -521,7 +530,11 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
content.ETag = "\"" + object.ETag + "\""
}
content.Size = object.Size
content.StorageClass = object.StorageClass
if object.StorageClass != "" {
content.StorageClass = object.StorageClass
} else {
content.StorageClass = globalMinioDefaultStorageClass
}
content.Owner = owner
if metadata {
content.UserMetadata = make(StringMap)

View File

@@ -1,5 +1,5 @@
/*
* MinIO Cloud Storage, (C) 2016 MinIO, Inc.
* MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -90,139 +90,197 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
for _, bucket := range routers {
// Object operations
// HeadObject
bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(collectAPIStats("headobject", httpTraceAll(api.HeadObjectHandler)))
bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("headobject", httpTraceAll(api.HeadObjectHandler))))
// CopyObjectPart
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(collectAPIStats("copyobjectpart", httpTraceAll(api.CopyObjectPartHandler))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(maxClients(collectAPIStats("copyobjectpart", httpTraceAll(api.CopyObjectPartHandler)))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
// PutObjectPart
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(collectAPIStats("putobjectpart", httpTraceHdrs(api.PutObjectPartHandler))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("putobjectpart", httpTraceHdrs(api.PutObjectPartHandler)))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
// ListObjectParts
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(collectAPIStats("listobjectparts", httpTraceAll(api.ListObjectPartsHandler))).Queries("uploadId", "{uploadId:.*}")
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("listobjectparts", httpTraceAll(api.ListObjectPartsHandler)))).Queries("uploadId", "{uploadId:.*}")
// CompleteMultipartUpload
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(collectAPIStats("completemutipartupload", httpTraceAll(api.CompleteMultipartUploadHandler))).Queries("uploadId", "{uploadId:.*}")
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("completemutipartupload", httpTraceAll(api.CompleteMultipartUploadHandler)))).Queries("uploadId", "{uploadId:.*}")
// NewMultipartUpload
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(collectAPIStats("newmultipartupload", httpTraceAll(api.NewMultipartUploadHandler))).Queries("uploads", "")
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("newmultipartupload", httpTraceAll(api.NewMultipartUploadHandler)))).Queries("uploads", "")
// AbortMultipartUpload
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(collectAPIStats("abortmultipartupload", httpTraceAll(api.AbortMultipartUploadHandler))).Queries("uploadId", "{uploadId:.*}")
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("abortmultipartupload", httpTraceAll(api.AbortMultipartUploadHandler)))).Queries("uploadId", "{uploadId:.*}")
// GetObjectACL - this is a dummy call.
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(collectAPIStats("getobjectacl", httpTraceHdrs(api.GetObjectACLHandler))).Queries("acl", "")
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("getobjectacl", httpTraceHdrs(api.GetObjectACLHandler)))).Queries("acl", "")
// PutObjectACL - this is a dummy call.
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(collectAPIStats("putobjectacl", httpTraceHdrs(api.PutObjectACLHandler))).Queries("acl", "")
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("putobjectacl", httpTraceHdrs(api.PutObjectACLHandler)))).Queries("acl", "")
// GetObjectTagging
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(collectAPIStats("getobjecttagging", httpTraceHdrs(api.GetObjectTaggingHandler))).Queries("tagging", "")
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("getobjecttagging", httpTraceHdrs(api.GetObjectTaggingHandler)))).Queries("tagging", "")
// PutObjectTagging
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(collectAPIStats("putobjecttagging", httpTraceHdrs(api.PutObjectTaggingHandler))).Queries("tagging", "")
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("putobjecttagging", httpTraceHdrs(api.PutObjectTaggingHandler)))).Queries("tagging", "")
// DeleteObjectTagging
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(collectAPIStats("deleteobjecttagging", httpTraceHdrs(api.DeleteObjectTaggingHandler))).Queries("tagging", "")
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("deleteobjecttagging", httpTraceHdrs(api.DeleteObjectTaggingHandler)))).Queries("tagging", "")
// SelectObjectContent
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(collectAPIStats("selectobjectcontent", httpTraceHdrs(api.SelectObjectContentHandler))).Queries("select", "").Queries("select-type", "2")
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("selectobjectcontent", httpTraceHdrs(api.SelectObjectContentHandler)))).Queries("select", "").Queries("select-type", "2")
// GetObjectRetention
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(collectAPIStats("getobjectretention", httpTraceAll(api.GetObjectRetentionHandler))).Queries("retention", "")
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("getobjectretention", httpTraceAll(api.GetObjectRetentionHandler)))).Queries("retention", "")
// GetObjectLegalHold
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(collectAPIStats("getobjectlegalhold", httpTraceAll(api.GetObjectLegalHoldHandler))).Queries("legal-hold", "")
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("getobjectlegalhold", httpTraceAll(api.GetObjectLegalHoldHandler)))).Queries("legal-hold", "")
// GetObject
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(collectAPIStats("getobject", httpTraceHdrs(api.GetObjectHandler)))
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("getobject", httpTraceHdrs(api.GetObjectHandler))))
// CopyObject
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(collectAPIStats("copyobject", httpTraceAll(api.CopyObjectHandler)))
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(maxClients(collectAPIStats("copyobject", httpTraceAll(api.CopyObjectHandler))))
// PutObjectRetention
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(collectAPIStats("putobjectretention", httpTraceAll(api.PutObjectRetentionHandler))).Queries("retention", "")
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("putobjectretention", httpTraceAll(api.PutObjectRetentionHandler)))).Queries("retention", "")
// PutObjectLegalHold
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(collectAPIStats("putobjectlegalhold", httpTraceAll(api.PutObjectLegalHoldHandler))).Queries("legal-hold", "")
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("putobjectlegalhold", httpTraceAll(api.PutObjectLegalHoldHandler)))).Queries("legal-hold", "")
// PutObject
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(collectAPIStats("putobject", httpTraceHdrs(api.PutObjectHandler)))
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("putobject", httpTraceHdrs(api.PutObjectHandler))))
// DeleteObject
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(collectAPIStats("deleteobject", httpTraceAll(api.DeleteObjectHandler)))
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("deleteobject", httpTraceAll(api.DeleteObjectHandler))))
/// Bucket operations
// GetBucketLocation
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketlocation", httpTraceAll(api.GetBucketLocationHandler))).Queries("location", "")
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketlocation", httpTraceAll(api.GetBucketLocationHandler)))).Queries("location", "")
// GetBucketPolicy
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketpolicy", httpTraceAll(api.GetBucketPolicyHandler))).Queries("policy", "")
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketpolicy", httpTraceAll(api.GetBucketPolicyHandler)))).Queries("policy", "")
// GetBucketLifecycle
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketlifecycle", httpTraceAll(api.GetBucketLifecycleHandler))).Queries("lifecycle", "")
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketlifecycle", httpTraceAll(api.GetBucketLifecycleHandler)))).Queries("lifecycle", "")
// GetBucketEncryption
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketencryption", httpTraceAll(api.GetBucketEncryptionHandler))).Queries("encryption", "")
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketencryption", httpTraceAll(api.GetBucketEncryptionHandler)))).Queries("encryption", "")
// Dummy Bucket Calls
// GetBucketACL -- this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketacl", httpTraceAll(api.GetBucketACLHandler))).Queries("acl", "")
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketacl", httpTraceAll(api.GetBucketACLHandler)))).Queries("acl", "")
// PutBucketACL -- this is a dummy call.
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucketacl", httpTraceAll(api.PutBucketACLHandler))).Queries("acl", "")
bucket.Methods(http.MethodPut).HandlerFunc(
maxClients(collectAPIStats("putbucketacl", httpTraceAll(api.PutBucketACLHandler)))).Queries("acl", "")
// GetBucketCors - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketcors", httpTraceAll(api.GetBucketCorsHandler))).Queries("cors", "")
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketcors", httpTraceAll(api.GetBucketCorsHandler)))).Queries("cors", "")
// GetBucketWebsiteHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketwebsite", httpTraceAll(api.GetBucketWebsiteHandler))).Queries("website", "")
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketwebsite", httpTraceAll(api.GetBucketWebsiteHandler)))).Queries("website", "")
// GetBucketAccelerateHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketaccelerate", httpTraceAll(api.GetBucketAccelerateHandler))).Queries("accelerate", "")
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketaccelerate", httpTraceAll(api.GetBucketAccelerateHandler)))).Queries("accelerate", "")
// GetBucketRequestPaymentHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketrequestpayment", httpTraceAll(api.GetBucketRequestPaymentHandler))).Queries("requestPayment", "")
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketrequestpayment", httpTraceAll(api.GetBucketRequestPaymentHandler)))).Queries("requestPayment", "")
// GetBucketLoggingHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketlogging", httpTraceAll(api.GetBucketLoggingHandler))).Queries("logging", "")
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketlogging", httpTraceAll(api.GetBucketLoggingHandler)))).Queries("logging", "")
// GetBucketLifecycleHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketlifecycle", httpTraceAll(api.GetBucketLifecycleHandler))).Queries("lifecycle", "")
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketlifecycle", httpTraceAll(api.GetBucketLifecycleHandler)))).Queries("lifecycle", "")
// GetBucketReplicationHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketreplication", httpTraceAll(api.GetBucketReplicationHandler))).Queries("replication", "")
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketreplication", httpTraceAll(api.GetBucketReplicationHandler)))).Queries("replication", "")
// GetBucketTaggingHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbuckettagging", httpTraceAll(api.GetBucketTaggingHandler))).Queries("tagging", "")
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbuckettagging", httpTraceAll(api.GetBucketTaggingHandler)))).Queries("tagging", "")
//DeleteBucketWebsiteHandler
bucket.Methods(http.MethodDelete).HandlerFunc(collectAPIStats("deletebucketwebsite", httpTraceAll(api.DeleteBucketWebsiteHandler))).Queries("website", "")
bucket.Methods(http.MethodDelete).HandlerFunc(
maxClients(collectAPIStats("deletebucketwebsite", httpTraceAll(api.DeleteBucketWebsiteHandler)))).Queries("website", "")
// DeleteBucketTaggingHandler
bucket.Methods(http.MethodDelete).HandlerFunc(collectAPIStats("deletebuckettagging", httpTraceAll(api.DeleteBucketTaggingHandler))).Queries("tagging", "")
bucket.Methods(http.MethodDelete).HandlerFunc(
maxClients(collectAPIStats("deletebuckettagging", httpTraceAll(api.DeleteBucketTaggingHandler)))).Queries("tagging", "")
// GetBucketObjectLockConfig
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketobjectlockconfiguration", httpTraceAll(api.GetBucketObjectLockConfigHandler))).Queries("object-lock", "")
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketobjectlockconfiguration", httpTraceAll(api.GetBucketObjectLockConfigHandler)))).Queries("object-lock", "")
// GetBucketVersioning
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketversioning", httpTraceAll(api.GetBucketVersioningHandler))).Queries("versioning", "")
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketversioning", httpTraceAll(api.GetBucketVersioningHandler)))).Queries("versioning", "")
// GetBucketNotification
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketnotification", httpTraceAll(api.GetBucketNotificationHandler))).Queries("notification", "")
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketnotification", httpTraceAll(api.GetBucketNotificationHandler)))).Queries("notification", "")
// ListenBucketNotification
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listenbucketnotification", httpTraceAll(api.ListenBucketNotificationHandler))).Queries("events", "{events:.*}")
// ListMultipartUploads
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listmultipartuploads", httpTraceAll(api.ListMultipartUploadsHandler))).Queries("uploads", "")
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("listmultipartuploads", httpTraceAll(api.ListMultipartUploadsHandler)))).Queries("uploads", "")
// ListObjectsV2M
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listobjectsv2M", httpTraceAll(api.ListObjectsV2MHandler))).Queries("list-type", "2", "metadata", "true")
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("listobjectsv2M", httpTraceAll(api.ListObjectsV2MHandler)))).Queries("list-type", "2", "metadata", "true")
// ListObjectsV2
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listobjectsv2", httpTraceAll(api.ListObjectsV2Handler))).Queries("list-type", "2")
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("listobjectsv2", httpTraceAll(api.ListObjectsV2Handler)))).Queries("list-type", "2")
// ListBucketVersions
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listbucketversions", httpTraceAll(api.ListBucketObjectVersionsHandler))).Queries("versions", "")
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("listbucketversions", httpTraceAll(api.ListBucketObjectVersionsHandler)))).Queries("versions", "")
// ListObjectsV1 (Legacy)
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listobjectsv1", httpTraceAll(api.ListObjectsV1Handler)))
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("listobjectsv1", httpTraceAll(api.ListObjectsV1Handler))))
// PutBucketLifecycle
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucketlifecycle", httpTraceAll(api.PutBucketLifecycleHandler))).Queries("lifecycle", "")
bucket.Methods(http.MethodPut).HandlerFunc(
maxClients(collectAPIStats("putbucketlifecycle", httpTraceAll(api.PutBucketLifecycleHandler)))).Queries("lifecycle", "")
// PutBucketEncryption
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucketencryption", httpTraceAll(api.PutBucketEncryptionHandler))).Queries("encryption", "")
bucket.Methods(http.MethodPut).HandlerFunc(
maxClients(collectAPIStats("putbucketencryption", httpTraceAll(api.PutBucketEncryptionHandler)))).Queries("encryption", "")
// PutBucketPolicy
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucketpolicy", httpTraceAll(api.PutBucketPolicyHandler))).Queries("policy", "")
bucket.Methods(http.MethodPut).HandlerFunc(
maxClients(collectAPIStats("putbucketpolicy", httpTraceAll(api.PutBucketPolicyHandler)))).Queries("policy", "")
// PutBucketObjectLockConfig
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucketobjectlockconfig", httpTraceAll(api.PutBucketObjectLockConfigHandler))).Queries("object-lock", "")
bucket.Methods(http.MethodPut).HandlerFunc(
maxClients(collectAPIStats("putbucketobjectlockconfig", httpTraceAll(api.PutBucketObjectLockConfigHandler)))).Queries("object-lock", "")
// PutBucketVersioning
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucketversioning", httpTraceAll(api.PutBucketVersioningHandler))).Queries("versioning", "")
bucket.Methods(http.MethodPut).HandlerFunc(
maxClients(collectAPIStats("putbucketversioning", httpTraceAll(api.PutBucketVersioningHandler)))).Queries("versioning", "")
// PutBucketNotification
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucketnotification", httpTraceAll(api.PutBucketNotificationHandler))).Queries("notification", "")
bucket.Methods(http.MethodPut).HandlerFunc(
maxClients(collectAPIStats("putbucketnotification", httpTraceAll(api.PutBucketNotificationHandler)))).Queries("notification", "")
// PutBucket
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucket", httpTraceAll(api.PutBucketHandler)))
bucket.Methods(http.MethodPut).HandlerFunc(
maxClients(collectAPIStats("putbucket", httpTraceAll(api.PutBucketHandler))))
// HeadBucket
bucket.Methods(http.MethodHead).HandlerFunc(collectAPIStats("headbucket", httpTraceAll(api.HeadBucketHandler)))
bucket.Methods(http.MethodHead).HandlerFunc(
maxClients(collectAPIStats("headbucket", httpTraceAll(api.HeadBucketHandler))))
// PostPolicy
bucket.Methods(http.MethodPost).HeadersRegexp(xhttp.ContentType, "multipart/form-data*").HandlerFunc(collectAPIStats("postpolicybucket", httpTraceHdrs(api.PostPolicyBucketHandler)))
bucket.Methods(http.MethodPost).HeadersRegexp(xhttp.ContentType, "multipart/form-data*").HandlerFunc(
maxClients(collectAPIStats("postpolicybucket", httpTraceHdrs(api.PostPolicyBucketHandler))))
// DeleteMultipleObjects
bucket.Methods(http.MethodPost).HandlerFunc(collectAPIStats("deletemultipleobjects", httpTraceAll(api.DeleteMultipleObjectsHandler))).Queries("delete", "")
bucket.Methods(http.MethodPost).HandlerFunc(
maxClients(collectAPIStats("deletemultipleobjects", httpTraceAll(api.DeleteMultipleObjectsHandler)))).Queries("delete", "")
// DeleteBucketPolicy
bucket.Methods(http.MethodDelete).HandlerFunc(collectAPIStats("deletebucketpolicy", httpTraceAll(api.DeleteBucketPolicyHandler))).Queries("policy", "")
bucket.Methods(http.MethodDelete).HandlerFunc(
maxClients(collectAPIStats("deletebucketpolicy", httpTraceAll(api.DeleteBucketPolicyHandler)))).Queries("policy", "")
// DeleteBucketLifecycle
bucket.Methods(http.MethodDelete).HandlerFunc(collectAPIStats("deletebucketlifecycle", httpTraceAll(api.DeleteBucketLifecycleHandler))).Queries("lifecycle", "")
bucket.Methods(http.MethodDelete).HandlerFunc(
maxClients(collectAPIStats("deletebucketlifecycle", httpTraceAll(api.DeleteBucketLifecycleHandler)))).Queries("lifecycle", "")
// DeleteBucketEncryption
bucket.Methods(http.MethodDelete).HandlerFunc(collectAPIStats("deletebucketencryption", httpTraceAll(api.DeleteBucketEncryptionHandler))).Queries("encryption", "")
bucket.Methods(http.MethodDelete).HandlerFunc(
maxClients(collectAPIStats("deletebucketencryption", httpTraceAll(api.DeleteBucketEncryptionHandler)))).Queries("encryption", "")
// DeleteBucket
bucket.Methods(http.MethodDelete).HandlerFunc(collectAPIStats("deletebucket", httpTraceAll(api.DeleteBucketHandler)))
bucket.Methods(http.MethodDelete).HandlerFunc(
maxClients(collectAPIStats("deletebucket", httpTraceAll(api.DeleteBucketHandler))))
}
/// Root operation
// ListBuckets
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(collectAPIStats("listbuckets", httpTraceAll(api.ListBucketsHandler)))
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(
maxClients(collectAPIStats("listbuckets", httpTraceAll(api.ListBucketsHandler))))
// If none of the routes match add default error handler routes
apiRouter.NotFoundHandler = http.HandlerFunc(collectAPIStats("notfound", httpTraceAll(errorResponseHandler)))

View File

@@ -26,12 +26,15 @@ import (
"io"
"io/ioutil"
"net/http"
"strconv"
"strings"
"time"
xhttp "github.com/minio/minio/cmd/http"
xjwt "github.com/minio/minio/cmd/jwt"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
"github.com/minio/minio/pkg/bucket/policy"
"github.com/minio/minio/pkg/hash"
iampolicy "github.com/minio/minio/pkg/iam/policy"
@@ -118,9 +121,7 @@ func getRequestAuthType(r *http.Request) authType {
return authTypeUnknown
}
// checkAdminRequestAuthType checks whether the request is a valid signature V2 or V4 request.
// It does not accept presigned or JWT or anonymous requests.
func checkAdminRequestAuthType(ctx context.Context, r *http.Request, action iampolicy.AdminAction, region string) (auth.Credentials, APIErrorCode) {
func validateAdminSignature(ctx context.Context, r *http.Request, region string) (auth.Credentials, map[string]interface{}, bool, APIErrorCode) {
var cred auth.Credentials
var owner bool
s3Err := ErrAccessDenied
@@ -129,7 +130,7 @@ func checkAdminRequestAuthType(ctx context.Context, r *http.Request, action iamp
// We only support admin credentials to access admin APIs.
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
if s3Err != ErrNone {
return cred, s3Err
return cred, nil, owner, s3Err
}
// we only support V4 (no presign) with auth body
@@ -139,14 +140,24 @@ func checkAdminRequestAuthType(ctx context.Context, r *http.Request, action iamp
reqInfo := (&logger.ReqInfo{}).AppendTags("requestHeaders", dumpRequest(r))
ctx := logger.SetReqInfo(ctx, reqInfo)
logger.LogIf(ctx, errors.New(getAPIError(s3Err).Description), logger.Application)
return cred, nil, owner, s3Err
}
var claims map[string]interface{}
claims, s3Err = checkClaimsFromToken(r, cred)
claims, s3Err := checkClaimsFromToken(r, cred)
if s3Err != ErrNone {
return cred, nil, owner, s3Err
}
return cred, claims, owner, ErrNone
}
// checkAdminRequestAuthType checks whether the request is a valid signature V2 or V4 request.
// It does not accept presigned or JWT or anonymous requests.
func checkAdminRequestAuthType(ctx context.Context, r *http.Request, action iampolicy.AdminAction, region string) (auth.Credentials, APIErrorCode) {
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, region)
if s3Err != ErrNone {
return cred, s3Err
}
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: iampolicy.Action(action),
@@ -173,15 +184,13 @@ func getSessionToken(r *http.Request) (token string) {
// Fetch claims in the security token returned by the client, doesn't return
// errors - upon errors the returned claims map will be empty.
func mustGetClaimsFromToken(r *http.Request) map[string]interface{} {
claims, _ := getClaimsFromToken(r)
claims, _ := getClaimsFromToken(r, getSessionToken(r))
return claims
}
// Fetch claims in the security token returned by the client.
func getClaimsFromToken(r *http.Request) (map[string]interface{}, error) {
func getClaimsFromToken(r *http.Request, token string) (map[string]interface{}, error) {
claims := xjwt.NewMapClaims()
token := getSessionToken(r)
if token == "" {
return claims.Map(), nil
}
@@ -211,8 +220,9 @@ func getClaimsFromToken(r *http.Request) (map[string]interface{}, error) {
// If OPA is not set, session token should
// have a policy and its mandatory, reject
// requests without policy claim.
_, pok := claims.Lookup(iamPolicyClaimName())
if !pok {
_, pokOpenID := claims.Lookup(iamPolicyClaimNameOpenID())
_, pokSA := claims.Lookup(iamPolicyClaimNameSA())
if !pokOpenID && !pokSA {
return nil, errAuthentication
}
@@ -226,7 +236,7 @@ func getClaimsFromToken(r *http.Request) (map[string]interface{}, error) {
if err != nil {
// Base64 decoding fails, we should log to indicate
// something is malforming the request sent by client.
logger.LogIf(context.Background(), err, logger.Application)
logger.LogIf(r.Context(), err, logger.Application)
return nil, errAuthentication
}
claims.MapClaims[iampolicy.SessionPolicyName] = string(spBytes)
@@ -241,12 +251,15 @@ func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]in
if token != "" && cred.AccessKey == "" {
return nil, ErrNoAccessKey
}
if cred.IsServiceAccount() && token == "" {
token = cred.SessionToken
}
if subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 {
return nil, ErrInvalidToken
}
claims, err := getClaimsFromToken(r)
claims, err := getClaimsFromToken(r, token)
if err != nil {
return nil, toAPIErrorCode(context.Background(), err)
return nil, toAPIErrorCode(r.Context(), err)
}
return claims, ErrNone
}
@@ -271,7 +284,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
var cred auth.Credentials
switch getRequestAuthType(r) {
case authTypeUnknown, authTypeStreamingSigned:
return accessKey, owner, ErrAccessDenied
return accessKey, owner, ErrSignatureVersionNotSupported
case authTypePresignedV2, authTypeSignedV2:
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
return accessKey, owner, s3Err
@@ -333,7 +346,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
// Request is allowed return the appropriate access key.
return cred.AccessKey, owner, ErrNone
}
return accessKey, owner, ErrAccessDenied
return cred.AccessKey, owner, ErrAccessDenied
}
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
@@ -347,7 +360,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
// Request is allowed return the appropriate access key.
return cred.AccessKey, owner, ErrNone
}
return accessKey, owner, ErrAccessDenied
return cred.AccessKey, owner, ErrAccessDenied
}
// Verify if request has valid AWS Signature Version '2'.
@@ -460,7 +473,107 @@ func (a authHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
a.handler.ServeHTTP(w, r)
return
}
writeErrorResponse(context.Background(), w, errorCodes.ToAPIErr(ErrSignatureVersionNotSupported), r.URL, guessIsBrowserReq(r))
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrSignatureVersionNotSupported), r.URL, guessIsBrowserReq(r))
}
func validateSignature(atype authType, r *http.Request) (auth.Credentials, bool, map[string]interface{}, APIErrorCode) {
var cred auth.Credentials
var owner bool
var s3Err APIErrorCode
switch atype {
case authTypeUnknown, authTypeStreamingSigned:
return cred, owner, nil, ErrSignatureVersionNotSupported
case authTypeSignedV2, authTypePresignedV2:
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
return cred, owner, nil, s3Err
}
cred, owner, s3Err = getReqAccessKeyV2(r)
case authTypePresigned, authTypeSigned:
region := globalServerRegion
if s3Err = isReqAuthenticated(GlobalContext, r, region, serviceS3); s3Err != ErrNone {
return cred, owner, nil, s3Err
}
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
}
if s3Err != ErrNone {
return cred, owner, nil, s3Err
}
claims, s3Err := checkClaimsFromToken(r, cred)
if s3Err != ErrNone {
return cred, owner, nil, s3Err
}
return cred, owner, claims, ErrNone
}
func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate time.Time, retMode objectlock.RetMode, byPassSet bool, r *http.Request, cred auth.Credentials, owner bool, claims map[string]interface{}) (s3Err APIErrorCode) {
var retSet bool
if cred.AccessKey == "" {
conditions := getConditionValues(r, "", "", nil)
conditions["object-lock-mode"] = []string{string(retMode)}
conditions["object-lock-retain-until-date"] = []string{retDate.Format(time.RFC3339)}
if retDays > 0 {
conditions["object-lock-remaining-retention-days"] = []string{strconv.Itoa(retDays)}
}
if retMode == objectlock.RetGovernance && byPassSet {
byPassSet = globalPolicySys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Action: policy.Action(policy.BypassGovernanceRetentionAction),
BucketName: bucketName,
ConditionValues: conditions,
IsOwner: false,
ObjectName: objectName,
})
}
if globalPolicySys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Action: policy.Action(policy.PutObjectRetentionAction),
BucketName: bucketName,
ConditionValues: conditions,
IsOwner: false,
ObjectName: objectName,
}) {
retSet = true
}
if byPassSet || retSet {
return ErrNone
}
return ErrAccessDenied
}
conditions := getConditionValues(r, "", cred.AccessKey, claims)
conditions["object-lock-mode"] = []string{string(retMode)}
conditions["object-lock-retain-until-date"] = []string{retDate.Format(time.RFC3339)}
if retDays > 0 {
conditions["object-lock-remaining-retention-days"] = []string{strconv.Itoa(retDays)}
}
if retMode == objectlock.RetGovernance && byPassSet {
byPassSet = globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: policy.BypassGovernanceRetentionAction,
BucketName: bucketName,
ObjectName: objectName,
ConditionValues: conditions,
IsOwner: owner,
Claims: claims,
})
}
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: policy.PutObjectRetentionAction,
BucketName: bucketName,
ConditionValues: conditions,
ObjectName: objectName,
IsOwner: owner,
Claims: claims,
}) {
retSet = true
}
if byPassSet || retSet {
return ErrNone
}
return ErrAccessDenied
}
// isPutActionAllowed - check if PUT operation is allowed on the resource, this
@@ -471,7 +584,7 @@ func isPutActionAllowed(atype authType, bucketName, objectName string, r *http.R
var owner bool
switch atype {
case authTypeUnknown:
return ErrAccessDenied
return ErrSignatureVersionNotSupported
case authTypeSignedV2, authTypePresignedV2:
cred, owner, s3Err = getReqAccessKeyV2(r)
case authTypeStreamingSigned, authTypePresigned, authTypeSigned:
@@ -487,10 +600,19 @@ func isPutActionAllowed(atype authType, bucketName, objectName string, r *http.R
return s3Err
}
// Do not check for PutObjectRetentionAction permission,
// if mode and retain until date are not set.
// Can happen when bucket has default lock config set
if action == iampolicy.PutObjectRetentionAction &&
r.Header.Get(xhttp.AmzObjectLockMode) == "" &&
r.Header.Get(xhttp.AmzObjectLockRetainUntilDate) == "" {
return ErrNone
}
if cred.AccessKey == "" {
if globalPolicySys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Action: policy.PutObjectAction,
Action: policy.Action(action),
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", "", nil),
IsOwner: false,

View File

@@ -391,6 +391,7 @@ func TestIsReqAuthenticated(t *testing.T) {
}
}
}
func TestCheckAdminRequestAuthType(t *testing.T) {
objLayer, fsDir, err := prepareFS()
if err != nil {
@@ -425,3 +426,48 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
}
}
}
func TestValidateAdminSignature(t *testing.T) {
ctx := context.Background()
objLayer, fsDir, err := prepareFS()
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(fsDir)
if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
t.Fatalf("unable initialize config file, %s", err)
}
creds, err := auth.CreateCredentials("admin", "mypassword")
if err != nil {
t.Fatalf("unable create credential, %s", err)
}
globalActiveCred = creds
testCases := []struct {
AccessKey string
SecretKey string
ErrCode APIErrorCode
}{
{"", "", ErrInvalidAccessKeyID},
{"admin", "", ErrSignatureDoesNotMatch},
{"admin", "wrongpassword", ErrSignatureDoesNotMatch},
{"wronguser", "mypassword", ErrInvalidAccessKeyID},
{"", "mypassword", ErrInvalidAccessKeyID},
{"admin", "mypassword", ErrNone},
}
for i, testCase := range testCases {
req := mustNewRequest("GET", "http://localhost:9000/", 0, nil, t)
if err := signRequestV4(req, testCase.AccessKey, testCase.SecretKey); err != nil {
t.Fatalf("Unable to inititalized new signed http request %s", err)
}
_, _, _, s3Error := validateAdminSignature(ctx, req, globalMinioDefaultRegion)
if s3Error != testCase.ErrCode {
t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i+1, testCase.ErrCode, s3Error)
}
}
}

View File

@@ -66,8 +66,7 @@ func waitForLowHTTPReq(tolerance int32) {
}
// Wait for heal requests and process them
func (h *healRoutine) run() {
ctx := context.Background()
func (h *healRoutine) run(ctx context.Context, objAPI ObjectLayer) {
for {
select {
case task, ok := <-h.tasks:
@@ -76,25 +75,26 @@ func (h *healRoutine) run() {
}
// Wait and proceed if there are active requests
waitForLowHTTPReq(int32(globalEndpoints.Nodes()))
waitForLowHTTPReq(int32(globalEndpoints.NEndpoints()))
var res madmin.HealResultItem
var err error
bucket, object := path2BucketObject(task.path)
switch {
case bucket == "" && object == "":
res, err = bgHealDiskFormat(ctx, task.opts)
res, err = healDiskFormat(ctx, objAPI, task.opts)
case bucket != "" && object == "":
res, err = bgHealBucket(ctx, bucket, task.opts)
res, err = objAPI.HealBucket(ctx, bucket, task.opts.DryRun, task.opts.Remove)
case bucket != "" && object != "":
res, err = bgHealObject(ctx, bucket, object, task.opts)
res, err = objAPI.HealObject(ctx, bucket, object, task.opts)
}
if task.responseCh != nil {
task.responseCh <- healResult{result: res, err: err}
if task.path != slashSeparator && task.path != nopHeal {
ObjectPathUpdated(task.path)
}
task.responseCh <- healResult{result: res, err: err}
case <-h.doneCh:
return
case <-GlobalServiceDoneCh:
case <-ctx.Done():
return
}
}
@@ -108,22 +108,10 @@ func initHealRoutine() *healRoutine {
}
func startBackgroundHealing() {
ctx := context.Background()
var objAPI ObjectLayer
for {
objAPI = newObjectLayerWithoutSafeModeFn()
if objAPI == nil {
time.Sleep(time.Second)
continue
}
break
}
func startBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
// Run the background healer
globalBackgroundHealRoutine = initHealRoutine()
go globalBackgroundHealRoutine.run()
go globalBackgroundHealRoutine.run(ctx, objAPI)
// Launch the background healer sequence to track
// background healing operations
@@ -133,20 +121,14 @@ func startBackgroundHealing() {
globalBackgroundHealState.LaunchNewHealSequence(nh)
}
func initBackgroundHealing() {
go startBackgroundHealing()
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
go startBackgroundHealing(ctx, objAPI)
}
// bgHealDiskFormat - heals format.json, return value indicates if a
// healDiskFormat - heals format.json, return value indicates if a
// failure error occurred.
func bgHealDiskFormat(ctx context.Context, opts madmin.HealOpts) (madmin.HealResultItem, error) {
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil {
return madmin.HealResultItem{}, errServerNotInitialized
}
res, err := objectAPI.HealFormat(ctx, opts.DryRun)
func healDiskFormat(ctx context.Context, objAPI ObjectLayer, opts madmin.HealOpts) (madmin.HealResultItem, error) {
res, err := objAPI.HealFormat(ctx, opts.DryRun)
// return any error, ignore error returned when disks have
// already healed.
@@ -167,24 +149,3 @@ func bgHealDiskFormat(ctx context.Context, opts madmin.HealOpts) (madmin.HealRes
return res, nil
}
// bghealBucket - traverses and heals given bucket
func bgHealBucket(ctx context.Context, bucket string, opts madmin.HealOpts) (madmin.HealResultItem, error) {
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil {
return madmin.HealResultItem{}, errServerNotInitialized
}
return objectAPI.HealBucket(ctx, bucket, opts.DryRun, opts.Remove)
}
// bgHealObject - heal the given object and record result
func bgHealObject(ctx context.Context, bucket, object string, opts madmin.HealOpts) (madmin.HealResultItem, error) {
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil {
return madmin.HealResultItem{}, errServerNotInitialized
}
return objectAPI.HealObject(ctx, bucket, object, opts.DryRun, opts.Remove, opts.ScanMode)
}

View File

@@ -25,32 +25,19 @@ import (
const defaultMonitorNewDiskInterval = time.Minute * 10
func initLocalDisksAutoHeal() {
go monitorLocalDisksAndHeal()
func initLocalDisksAutoHeal(ctx context.Context, objAPI ObjectLayer) {
go monitorLocalDisksAndHeal(ctx, objAPI)
}
// monitorLocalDisksAndHeal - ensures that detected new disks are healed
// 1. Only the concerned erasure set will be listed and healed
// 2. Only the node hosting the disk is responsible to perform the heal
func monitorLocalDisksAndHeal() {
// Wait until the object layer is ready
var objAPI ObjectLayer
for {
objAPI = newObjectLayerWithoutSafeModeFn()
if objAPI == nil {
time.Sleep(time.Second)
continue
}
break
}
func monitorLocalDisksAndHeal(ctx context.Context, objAPI ObjectLayer) {
z, ok := objAPI.(*xlZones)
if !ok {
return
}
ctx := context.Background()
var bgSeq *healSequence
var found bool
@@ -64,64 +51,74 @@ func monitorLocalDisksAndHeal() {
// Perform automatic disk healing when a disk is replaced locally.
for {
time.Sleep(defaultMonitorNewDiskInterval)
// Attempt a heal as the server starts-up first.
localDisksInZoneHeal := make([]Endpoints, len(z.zones))
for i, ep := range globalEndpoints {
localDisksToHeal := Endpoints{}
for _, endpoint := range ep.Endpoints {
if !endpoint.IsLocal {
select {
case <-ctx.Done():
return
case <-time.After(defaultMonitorNewDiskInterval):
// Attempt a heal as the server starts-up first.
localDisksInZoneHeal := make([]Endpoints, len(z.zones))
var healNewDisks bool
for i, ep := range globalEndpoints {
localDisksToHeal := Endpoints{}
for _, endpoint := range ep.Endpoints {
if !endpoint.IsLocal {
continue
}
// Try to connect to the current endpoint
// and reformat if the current disk is not formatted
_, _, err := connectEndpoint(endpoint)
if err == errUnformattedDisk {
localDisksToHeal = append(localDisksToHeal, endpoint)
}
}
if len(localDisksToHeal) == 0 {
continue
}
// Try to connect to the current endpoint
// and reformat if the current disk is not formatted
_, _, err := connectEndpoint(endpoint)
if err == errUnformattedDisk {
localDisksToHeal = append(localDisksToHeal, endpoint)
}
localDisksInZoneHeal[i] = localDisksToHeal
healNewDisks = true
}
if len(localDisksToHeal) == 0 {
// Reformat disks only if needed.
if !healNewDisks {
continue
}
localDisksInZoneHeal[i] = localDisksToHeal
}
// Reformat disks
bgSeq.sourceCh <- SlashSeparator
// Reformat disks
bgSeq.sourceCh <- healSource{path: SlashSeparator}
// Ensure that reformatting disks is finished
bgSeq.sourceCh <- nopHeal
// Ensure that reformatting disks is finished
bgSeq.sourceCh <- healSource{path: nopHeal}
var erasureSetInZoneToHeal = make([][]int, len(localDisksInZoneHeal))
// Compute the list of erasure set to heal
for i, localDisksToHeal := range localDisksInZoneHeal {
var erasureSetToHeal []int
for _, endpoint := range localDisksToHeal {
// Load the new format of this passed endpoint
_, format, err := connectEndpoint(endpoint)
if err != nil {
logger.LogIf(ctx, err)
continue
var erasureSetInZoneToHeal = make([][]int, len(localDisksInZoneHeal))
// Compute the list of erasure set to heal
for i, localDisksToHeal := range localDisksInZoneHeal {
var erasureSetToHeal []int
for _, endpoint := range localDisksToHeal {
// Load the new format of this passed endpoint
_, format, err := connectEndpoint(endpoint)
if err != nil {
logger.LogIf(ctx, err)
continue
}
// Calculate the set index where the current endpoint belongs
setIndex, _, err := findDiskIndex(z.zones[i].format, format)
if err != nil {
logger.LogIf(ctx, err)
continue
}
erasureSetToHeal = append(erasureSetToHeal, setIndex)
}
// Calculate the set index where the current endpoint belongs
setIndex, _, err := findDiskIndex(z.zones[i].format, format)
if err != nil {
logger.LogIf(ctx, err)
continue
}
erasureSetToHeal = append(erasureSetToHeal, setIndex)
erasureSetInZoneToHeal[i] = erasureSetToHeal
}
erasureSetInZoneToHeal[i] = erasureSetToHeal
}
// Heal all erasure sets that need
for i, erasureSetToHeal := range erasureSetInZoneToHeal {
for _, setIndex := range erasureSetToHeal {
err := healErasureSet(ctx, setIndex, z.zones[i].sets[setIndex])
if err != nil {
logger.LogIf(ctx, err)
// Heal all erasure sets that need
for i, erasureSetToHeal := range erasureSetInZoneToHeal {
for _, setIndex := range erasureSetToHeal {
err := healErasureSet(ctx, setIndex, z.zones[i].sets[setIndex])
if err != nil {
logger.LogIf(ctx, err)
}
}
}
}

View File

@@ -28,11 +28,6 @@ import (
humanize "github.com/dustin/go-humanize"
)
// Prepare XL/FS backend for benchmark.
func prepareBenchmarkBackend(instanceType string) (ObjectLayer, []string, error) {
return prepareTestBackend(instanceType)
}
// Benchmark utility functions for ObjectLayer.PutObject().
// Creates Object layer setup ( MakeBucket ) and then runs the PutObject benchmark.
func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
@@ -135,7 +130,9 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
// creates XL/FS backend setup, obtains the object layer and calls the runPutObjectPartBenchmark function.
func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
// create a temp XL/FS backend.
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
if err != nil {
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
}
@@ -149,7 +146,9 @@ func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
// creates XL/FS backend setup, obtains the object layer and calls the runPutObjectBenchmark function.
func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
// create a temp XL/FS backend.
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
if err != nil {
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
}
@@ -163,7 +162,9 @@ func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
// creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for put object.
func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int) {
// create a temp XL/FS backend.
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
if err != nil {
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
}
@@ -242,7 +243,9 @@ func generateBytesData(size int) []byte {
// creates XL/FS backend setup, obtains the object layer and calls the runGetObjectBenchmark function.
func benchmarkGetObject(b *testing.B, instanceType string, objSize int) {
// create a temp XL/FS backend.
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
if err != nil {
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
}
@@ -256,7 +259,9 @@ func benchmarkGetObject(b *testing.B, instanceType string, objSize int) {
// creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for ObjectLayer.GetObject() .
func benchmarkGetObjectParallel(b *testing.B, instanceType string, objSize int) {
// create a temp XL/FS backend.
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
if err != nil {
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
}

View File

@@ -18,7 +18,6 @@ package cmd
import (
"bytes"
"context"
"encoding/hex"
"fmt"
"hash"
@@ -27,6 +26,14 @@ import (
"github.com/minio/minio/cmd/logger"
)
type errHashMismatch struct {
message string
}
func (err *errHashMismatch) Error() string {
return err.message
}
// Calculates bitrot in chunks and writes the hash into the stream.
type streamingBitrotWriter struct {
iow *io.PipeWriter
@@ -132,9 +139,9 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
b.h.Write(buf)
if !bytes.Equal(b.h.Sum(nil), b.hashBytes) {
err = fmt.Errorf("hashes do not match expected %s, got %s",
hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil)))
logger.LogIf(context.Background(), err)
err := &errHashMismatch{fmt.Sprintf("hashes do not match expected %s, got %s",
hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil)))}
logger.LogIf(GlobalContext, err)
return 0, err
}
b.currOffset += int64(len(buf))

View File

@@ -17,7 +17,6 @@
package cmd
import (
"context"
"hash"
"io"
@@ -36,12 +35,12 @@ type wholeBitrotWriter struct {
func (b *wholeBitrotWriter) Write(p []byte) (int, error) {
err := b.disk.AppendFile(b.volume, b.filePath, p)
if err != nil {
logger.LogIf(context.Background(), err)
logger.LogIf(GlobalContext, err)
return 0, err
}
_, err = b.Hash.Write(p)
if err != nil {
logger.LogIf(context.Background(), err)
logger.LogIf(GlobalContext, err)
return 0, err
}
return len(p), nil
@@ -70,14 +69,14 @@ func (b *wholeBitrotReader) ReadAt(buf []byte, offset int64) (n int, err error)
if b.buf == nil {
b.buf = make([]byte, b.tillOffset-offset)
if _, err := b.disk.ReadFile(b.volume, b.filePath, offset, b.buf, b.verifier); err != nil {
ctx := context.Background()
ctx := GlobalContext
logger.GetReqInfo(ctx).AppendTags("disk", b.disk.String())
logger.LogIf(ctx, err)
return 0, err
}
}
if len(b.buf) < len(buf) {
logger.LogIf(context.Background(), errLessData)
logger.LogIf(GlobalContext, errLessData)
return 0, errLessData
}
n = copy(buf, b.buf)

View File

@@ -17,7 +17,6 @@
package cmd
import (
"context"
"errors"
"hash"
"io"
@@ -72,7 +71,7 @@ func (a BitrotAlgorithm) New() hash.Hash {
hh, _ := highwayhash.New(magicHighwayHash256Key) // New will never return error since key is 256 bit
return hh
default:
logger.CriticalIf(context.Background(), errors.New("Unsupported bitrot algorithm"))
logger.CriticalIf(GlobalContext, errors.New("Unsupported bitrot algorithm"))
return nil
}
}
@@ -88,7 +87,7 @@ func (a BitrotAlgorithm) Available() bool {
func (a BitrotAlgorithm) String() string {
name, ok := bitrotAlgorithms[a]
if !ok {
logger.CriticalIf(context.Background(), errors.New("Unsupported bitrot algorithm"))
logger.CriticalIf(GlobalContext, errors.New("Unsupported bitrot algorithm"))
}
return name
}

View File

@@ -62,9 +62,9 @@ func (s1 ServerSystemConfig) Diff(s2 ServerSystemConfig) error {
return fmt.Errorf("Expected platform '%s', found to be running '%s'",
s1.MinioPlatform, s2.MinioPlatform)
}
if s1.MinioEndpoints.Nodes() != s2.MinioEndpoints.Nodes() {
return fmt.Errorf("Expected number of endpoints %d, seen %d", s1.MinioEndpoints.Nodes(),
s2.MinioEndpoints.Nodes())
if s1.MinioEndpoints.NEndpoints() != s2.MinioEndpoints.NEndpoints() {
return fmt.Errorf("Expected number of endpoints %d, seen %d", s1.MinioEndpoints.NEndpoints(),
s2.MinioEndpoints.NEndpoints())
}
for i, ep := range s1.MinioEndpoints {
@@ -110,7 +110,7 @@ func registerBootstrapRESTHandlers(router *mux.Router) {
httpTraceHdrs(server.VerifyHandler))
}
// client to talk to bootstrap Nodes.
// client to talk to bootstrap NEndpoints.
type bootstrapRESTClient struct {
endpoint Endpoint
restClient *rest.Client
@@ -126,7 +126,7 @@ func (client *bootstrapRESTClient) reConnect() {
// permanently. The only way to restore the connection is at the xl-sets layer by xlsets.monitorAndConnectEndpoints()
// after verifying format.json
func (client *bootstrapRESTClient) call(method string, values url.Values, body io.Reader, length int64) (respBody io.ReadCloser, err error) {
return client.callWithContext(context.Background(), method, values, body, length)
return client.callWithContext(GlobalContext, method, values, body, length)
}
// Wrapper to restClient.Call to handle network errors, in case of network error the connection is marked disconnected
@@ -247,7 +247,7 @@ func newBootstrapRESTClient(endpoint Endpoint) (*bootstrapRESTClient, error) {
}
}
trFn := newCustomHTTPTransport(tlsConfig, rest.DefaultRESTTimeout, rest.DefaultRESTTimeout)
trFn := newCustomHTTPTransport(tlsConfig, rest.DefaultRESTTimeout)
restClient, err := rest.NewClient(serverURL, trFn, newAuthToken)
if err != nil {
return nil, err

View File

@@ -44,7 +44,7 @@ func NewBucketSSEConfigSys() *BucketSSEConfigSys {
// load - Loads the bucket encryption configuration for the given list of buckets
func (sys *BucketSSEConfigSys) load(buckets []BucketInfo, objAPI ObjectLayer) error {
for _, bucket := range buckets {
config, err := objAPI.GetBucketSSEConfig(context.Background(), bucket.Name)
config, err := objAPI.GetBucketSSEConfig(GlobalContext, bucket.Name)
if err != nil {
if _, ok := err.(BucketSSEConfigNotFound); ok {
sys.Remove(bucket.Name)
@@ -81,7 +81,7 @@ func (sys *BucketSSEConfigSys) Get(bucket string) (config bucketsse.BucketSSECon
return
}
cfg, err := objAPI.GetBucketSSEConfig(context.Background(), bucket)
cfg, err := objAPI.GetBucketSSEConfig(GlobalContext, bucket)
if err != nil {
return
}
@@ -130,7 +130,7 @@ func saveBucketSSEConfig(ctx context.Context, objAPI ObjectLayer, bucket string,
func getBucketSSEConfig(objAPI ObjectLayer, bucket string) (*bucketsse.BucketSSEConfig, error) {
// Path to bucket-encryption.xml for the given bucket.
configFile := path.Join(bucketConfigPrefix, bucket, bucketSSEConfig)
configData, err := readConfig(context.Background(), objAPI, configFile)
configData, err := readConfig(GlobalContext, objAPI, configFile)
if err != nil {
if err == errConfigNotFound {
err = BucketSSEConfigNotFound{Bucket: bucket}

View File

@@ -17,7 +17,6 @@
package cmd
import (
"context"
"encoding/base64"
"encoding/xml"
"fmt"
@@ -45,10 +44,8 @@ import (
)
const (
getBucketVersioningResponse = `<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>`
objectLockConfig = "object-lock.xml"
bucketObjectLockEnabledConfigFile = "object-lock-enabled.json"
bucketObjectLockEnabledConfig = `{"x-amz-bucket-object-lock-enabled":true}`
getBucketVersioningResponse = `<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>`
objectLockConfig = "object-lock.xml"
)
// Check if there are buckets on server without corresponding entry in etcd backend and
@@ -70,7 +67,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
// Get buckets in the DNS
dnsBuckets, err := globalDNSConfig.List()
if err != nil && err != dns.ErrNoEntriesFound {
logger.LogIf(context.Background(), err)
logger.LogIf(GlobalContext, err)
return
}
@@ -118,12 +115,12 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
for _, err := range g.Wait() {
if err != nil {
logger.LogIf(context.Background(), err)
logger.LogIf(GlobalContext, err)
}
}
for _, bucket := range bucketsInConflict.ToSlice() {
logger.LogIf(context.Background(), fmt.Errorf("Unable to add bucket DNS entry for bucket %s, an entry exists for the same bucket. Use one of these IP addresses %v to access the bucket", bucket, globalDomainIPs.ToSlice()))
logger.LogIf(GlobalContext, fmt.Errorf("Unable to add bucket DNS entry for bucket %s, an entry exists for the same bucket. Use one of these IP addresses %v to access the bucket", bucket, globalDomainIPs.ToSlice()))
}
// Remove buckets that are in DNS for this server, but aren't local
@@ -140,7 +137,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
// We go to here, so we know the bucket no longer exists,
// but is registered in DNS to this server
if err = globalDNSConfig.Delete(bucket); err != nil {
logger.LogIf(context.Background(), fmt.Errorf("Failed to remove DNS entry for %s due to %w",
logger.LogIf(GlobalContext, fmt.Errorf("Failed to remove DNS entry for %s due to %w",
bucket, err))
}
}
@@ -266,7 +263,7 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
listBuckets := objectAPI.ListBuckets
accessKey, owner, s3Error := checkRequestAuthTypeToAccessKey(ctx, r, policy.ListAllMyBucketsAction, "", "")
if s3Error != ErrNone {
if s3Error != ErrNone && s3Error != ErrAccessDenied {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
@@ -293,34 +290,53 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
for i := range bucketsInfo {
meta, err := loadBucketMetadata(ctx, objectAPI, bucketsInfo[i].Name)
logger.LogIf(ctx, err)
if err == nil {
bucketsInfo[i].Created = meta.Created
}
}
}
// Set prefix value for "s3:prefix" policy conditionals.
r.Header.Set("prefix", "")
if s3Error == ErrAccessDenied {
// Set prefix value for "s3:prefix" policy conditionals.
r.Header.Set("prefix", "")
// Set delimiter value for "s3:delimiter" policy conditionals.
r.Header.Set("delimiter", SlashSeparator)
// Set delimiter value for "s3:delimiter" policy conditionals.
r.Header.Set("delimiter", SlashSeparator)
// err will be nil here as we already called this function
// earlier in this request.
claims, _ := getClaimsFromToken(r)
var newBucketsInfo []BucketInfo
for _, bucketInfo := range bucketsInfo {
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: accessKey,
Action: iampolicy.ListBucketAction,
BucketName: bucketInfo.Name,
ConditionValues: getConditionValues(r, "", accessKey, claims),
IsOwner: owner,
ObjectName: "",
Claims: claims,
}) {
newBucketsInfo = append(newBucketsInfo, bucketInfo)
// err will be nil here as we already called this function
// earlier in this request.
claims, _ := getClaimsFromToken(r, getSessionToken(r))
n := 0
// Use the following trick to filter in place
// https://github.com/golang/go/wiki/SliceTricks#filter-in-place
for _, bucketInfo := range bucketsInfo {
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: accessKey,
Action: iampolicy.ListBucketAction,
BucketName: bucketInfo.Name,
ConditionValues: getConditionValues(r, "", accessKey, claims),
IsOwner: owner,
ObjectName: "",
Claims: claims,
}) {
bucketsInfo[n] = bucketInfo
n++
}
}
bucketsInfo = bucketsInfo[:n]
// No buckets can be filtered return access denied error.
if len(bucketsInfo) == 0 {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
}
// Generate response.
response := generateListBucketsResponse(newBucketsInfo)
response := generateListBucketsResponse(bucketsInfo)
encodedSuccessResponse := encodeResponse(response)
// Write response.
@@ -388,11 +404,14 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
}
continue
}
govBypassPerms := checkRequestAuthType(ctx, r, policy.BypassGovernanceRetentionAction, bucket, object.ObjectName)
if _, err := enforceRetentionBypassForDelete(ctx, r, bucket, object.ObjectName, getObjectInfoFn, govBypassPerms); err != ErrNone {
dErrs[index] = err
continue
if _, ok := globalBucketObjectLockConfig.Get(bucket); ok {
if err := enforceRetentionBypassForDelete(ctx, r, bucket, object.ObjectName, getObjectInfoFn); err != ErrNone {
dErrs[index] = err
continue
}
}
// Avoid duplicate objects, we use map to filter them out.
if _, ok := objectsToDelete[object.ObjectName]; !ok {
objectsToDelete[object.ObjectName] = index
@@ -519,16 +538,15 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
return
}
if objectLockEnabled {
configFile := path.Join(bucketConfigPrefix, bucket, bucketObjectLockEnabledConfigFile)
if err = saveConfig(ctx, objectAPI, configFile, []byte(bucketObjectLockEnabledConfig)); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
meta := newBucketMetadata(bucket)
meta.LockEnabled = objectLockEnabled
if err := meta.save(ctx, objectAPI); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if err = globalDNSConfig.Put(bucket); err != nil {
objectAPI.DeleteBucket(ctx, bucket)
objectAPI.DeleteBucket(ctx, bucket, false)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -563,14 +581,17 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
return
}
if objectLockEnabled && !globalIsGateway {
configFile := path.Join(bucketConfigPrefix, bucket, bucketObjectLockEnabledConfigFile)
if err = saveConfig(ctx, objectAPI, configFile, []byte(bucketObjectLockEnabledConfig)); err != nil {
if !globalIsGateway {
meta := newBucketMetadata(bucket)
meta.LockEnabled = objectLockEnabled
if err := meta.save(ctx, objectAPI); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
globalBucketObjectLockConfig.Set(bucket, objectlock.Retention{})
globalNotificationSys.PutBucketObjectLockConfig(ctx, bucket, objectlock.Retention{})
if objectLockEnabled {
globalBucketObjectLockConfig.Set(bucket, objectlock.Retention{})
globalNotificationSys.PutBucketObjectLockConfig(ctx, bucket, objectlock.Retention{})
}
}
// Make sure to add Location information here only for bucket
@@ -742,7 +763,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
}
rawReader := hashReader
pReader := NewPutObjReader(rawReader, nil, nil)
var objectEncryptionKey []byte
var objectEncryptionKey crypto.ObjectKey
// Check if bucket encryption is enabled
_, encEnabled := globalBucketSSEConfigSys.Get(bucket)
@@ -784,7 +805,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
pReader = NewPutObjReader(rawReader, hashReader, objectEncryptionKey)
pReader = NewPutObjReader(rawReader, hashReader, &objectEncryptionKey)
}
}
@@ -883,15 +904,39 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.DeleteBucketAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
forceDelete := false
if value := r.Header.Get(xhttp.MinIOForceDelete); value != "" {
switch value {
case "true":
forceDelete = true
case "false":
default:
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL, guessIsBrowserReq(r))
return
}
}
if forceDelete {
if s3Error := checkRequestAuthType(ctx, r, policy.ForceDeleteBucketAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
} else {
if s3Error := checkRequestAuthType(ctx, r, policy.DeleteBucketAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
}
if _, ok := globalBucketObjectLockConfig.Get(bucket); ok && forceDelete {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r))
return
}
deleteBucket := objectAPI.DeleteBucket
// Attempt to delete bucket.
if err := deleteBucket(ctx, bucket); err != nil {
if err := deleteBucket(ctx, bucket, forceDelete); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -905,6 +950,9 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
}
}
// Delete metadata, only log errors.
logger.LogIf(ctx, newBucketMetadata(bucket).delete(ctx, objectAPI))
globalNotificationSys.DeleteBucket(ctx, bucket)
// Write success response.
@@ -984,11 +1032,6 @@ func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWri
return
}
// Deny if WORM is enabled
if globalWORMEnabled {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketObjectLockConfigurationAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
@@ -1000,27 +1043,23 @@ func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWri
writeErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r))
return
}
configFile := path.Join(bucketConfigPrefix, bucket, bucketObjectLockEnabledConfigFile)
configData, err := readConfig(ctx, objectAPI, configFile)
meta, err := loadBucketMetadata(ctx, objectAPI, bucket)
if err != nil {
aerr := toAPIError(ctx, err)
if err == errConfigNotFound {
aerr = errorCodes.ToAPIErr(ErrObjectLockConfigurationNotAllowed)
}
writeErrorResponse(ctx, w, aerr, r.URL, guessIsBrowserReq(r))
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if !meta.LockEnabled {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrObjectLockConfigurationNotAllowed), r.URL, guessIsBrowserReq(r))
return
}
if string(configData) != bucketObjectLockEnabledConfig {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInternalError), r.URL, guessIsBrowserReq(r))
return
}
data, err := xml.Marshal(config)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
configFile = path.Join(bucketConfigPrefix, bucket, objectLockConfig)
configFile := path.Join(bucketConfigPrefix, bucket, objectLockConfig)
if err = saveConfig(ctx, objectAPI, configFile, data); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
@@ -1061,26 +1100,19 @@ func (api objectAPIHandlers) GetBucketObjectLockConfigHandler(w http.ResponseWri
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
configFile := path.Join(bucketConfigPrefix, bucket, bucketObjectLockEnabledConfigFile)
configData, err := readConfig(ctx, objectAPI, configFile)
meta, err := loadBucketMetadata(ctx, objectAPI, bucket)
if err != nil {
var aerr APIError
if err == errConfigNotFound {
aerr = errorCodes.ToAPIErr(ErrMethodNotAllowed)
} else {
aerr = toAPIError(ctx, err)
}
writeErrorResponse(ctx, w, aerr, r.URL, guessIsBrowserReq(r))
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if !meta.LockEnabled {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrObjectLockConfigurationNotAllowed), r.URL, guessIsBrowserReq(r))
return
}
if string(configData) != bucketObjectLockEnabledConfig {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInternalError), r.URL, guessIsBrowserReq(r))
return
}
configFile = path.Join(bucketConfigPrefix, bucket, objectLockConfig)
configData, err = readConfig(ctx, objectAPI, configFile)
configFile := path.Join(bucketConfigPrefix, bucket, objectLockConfig)
configData, err := readConfig(ctx, objectAPI, configFile)
if err != nil {
if err != errConfigNotFound {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))

View File

@@ -18,7 +18,6 @@ package cmd
import (
"bytes"
"context"
"encoding/xml"
"io/ioutil"
"net/http"
@@ -29,6 +28,51 @@ import (
"github.com/minio/minio/pkg/auth"
)
// Wrapper for calling RemoveBucket HTTP handler tests for both XL multiple disks and single node setup.
func TestRemoveBucketHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testRemoveBucketHandler, []string{"RemoveBucket"})
}
func testRemoveBucketHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) {
_, err := obj.PutObject(GlobalContext, bucketName, "test-object", mustGetPutObjReader(t, bytes.NewBuffer([]byte{}), int64(0), "", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"), ObjectOptions{})
// if object upload fails stop the test.
if err != nil {
t.Fatalf("Error uploading object: <ERROR> %v", err)
}
// initialize httptest Recorder, this records any mutations to response writer inside the handler.
rec := httptest.NewRecorder()
// construct HTTP request for DELETE bucket.
req, err := newTestSignedRequestV4("DELETE", getBucketLocationURL("", bucketName), 0, nil, credentials.AccessKey, credentials.SecretKey, nil)
if err != nil {
t.Fatalf("Test %s: Failed to create HTTP request for RemoveBucketHandler: <ERROR> %v", instanceType, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(rec, req)
switch rec.Code {
case http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent:
t.Fatalf("Test %v: expected failure, but succeeded with %v", instanceType, rec.Code)
}
// Verify response of the V2 signed HTTP request.
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV2 := httptest.NewRecorder()
// construct HTTP request for DELETE bucket.
reqV2, err := newTestSignedRequestV2("DELETE", getBucketLocationURL("", bucketName), 0, nil, credentials.AccessKey, credentials.SecretKey, nil)
if err != nil {
t.Fatalf("Test %s: Failed to create HTTP request for RemoveBucketHandler: <ERROR> %v", instanceType, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(recV2, reqV2)
switch recV2.Code {
case http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent:
t.Fatalf("Test %v: expected failure, but succeeded with %v", instanceType, recV2.Code)
}
}
// Wrapper for calling GetBucketPolicy HTTP handler tests for both XL multiple disks and single node setup.
func TestGetBucketLocationHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testGetBucketLocationHandler, []string{"GetBucketLocation"})
@@ -625,7 +669,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
for i := 0; i < 10; i++ {
objectName := "test-object-" + strconv.Itoa(i)
// uploading the object.
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetPutObjReader(t, bytes.NewBuffer(contentBytes), int64(len(contentBytes)), "", sha256sum), ObjectOptions{})
_, err = obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewBuffer(contentBytes), int64(len(contentBytes)), "", sha256sum), ObjectOptions{})
// if object upload fails stop the test.
if err != nil {
t.Fatalf("Put Object %d: Error uploading object: <ERROR> %v", i, err)

View File

@@ -0,0 +1,303 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bytes"
"encoding/xml"
"net/http"
"net/http/httptest"
"testing"
"github.com/minio/minio/pkg/auth"
)
// Test S3 Bucket lifecycle APIs with wrong credentials
func TestBucketLifecycleWrongCredentials(t *testing.T) {
ExecObjectLayerAPITest(t, testBucketLifecycleHandlersWrongCredentials, []string{"GetBucketLifecycle", "PutBucketLifecycle", "DeleteBucketLifecycle"})
}
// Test for authentication
func testBucketLifecycleHandlersWrongCredentials(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) {
// test cases with sample input and expected output.
testCases := []struct {
method string
bucketName string
accessKey string
secretKey string
// Sent body
body []byte
// Expected response
expectedRespStatus int
lifecycleResponse []byte
errorResponse APIErrorResponse
shouldPass bool
}{
// GET empty credentials
{
method: "GET", bucketName: bucketName,
accessKey: "",
secretKey: "",
expectedRespStatus: http.StatusForbidden,
lifecycleResponse: []byte(""),
errorResponse: APIErrorResponse{
Resource: SlashSeparator + bucketName + SlashSeparator,
Code: "AccessDenied",
Message: "Access Denied.",
},
shouldPass: false,
},
// GET wrong credentials
{
method: "GET", bucketName: bucketName,
accessKey: "abcd",
secretKey: "abcd",
expectedRespStatus: http.StatusForbidden,
lifecycleResponse: []byte(""),
errorResponse: APIErrorResponse{
Resource: SlashSeparator + bucketName + SlashSeparator,
Code: "InvalidAccessKeyId",
Message: "The access key ID you provided does not exist in our records.",
},
shouldPass: false,
},
// PUT empty credentials
{
method: "PUT",
bucketName: bucketName,
accessKey: "",
secretKey: "",
expectedRespStatus: http.StatusForbidden,
lifecycleResponse: []byte(""),
errorResponse: APIErrorResponse{
Resource: SlashSeparator + bucketName + SlashSeparator,
Code: "AccessDenied",
Message: "Access Denied.",
},
shouldPass: false,
},
// PUT wrong credentials
{
method: "PUT",
bucketName: bucketName,
accessKey: "abcd",
secretKey: "abcd",
expectedRespStatus: http.StatusForbidden,
lifecycleResponse: []byte(""),
errorResponse: APIErrorResponse{
Resource: SlashSeparator + bucketName + SlashSeparator,
Code: "InvalidAccessKeyId",
Message: "The access key ID you provided does not exist in our records.",
},
shouldPass: false,
},
// DELETE empty credentials
{
method: "DELETE",
bucketName: bucketName,
accessKey: "",
secretKey: "",
expectedRespStatus: http.StatusForbidden,
lifecycleResponse: []byte(""),
errorResponse: APIErrorResponse{
Resource: SlashSeparator + bucketName + SlashSeparator,
Code: "AccessDenied",
Message: "Access Denied.",
},
shouldPass: false,
},
// DELETE wrong credentials
{
method: "DELETE",
bucketName: bucketName,
accessKey: "abcd",
secretKey: "abcd",
expectedRespStatus: http.StatusForbidden,
lifecycleResponse: []byte(""),
errorResponse: APIErrorResponse{
Resource: SlashSeparator + bucketName + SlashSeparator,
Code: "InvalidAccessKeyId",
Message: "The access key ID you provided does not exist in our records.",
},
shouldPass: false,
},
}
testBucketLifecycle(obj, instanceType, bucketName, apiRouter, t, testCases)
}
// Test S3 Bucket lifecycle APIs
func TestBucketLifecycle(t *testing.T) {
ExecObjectLayerAPITest(t, testBucketLifecycleHandlers, []string{"GetBucketLifecycle", "PutBucketLifecycle", "DeleteBucketLifecycle"})
}
// Simple tests of bucket lifecycle: PUT, GET, DELETE.
// Tests are related and the order is important.
func testBucketLifecycleHandlers(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
creds auth.Credentials, t *testing.T) {
// test cases with sample input and expected output.
testCases := []struct {
method string
bucketName string
accessKey string
secretKey string
// Sent body
body []byte
// Expected response
expectedRespStatus int
lifecycleResponse []byte
errorResponse APIErrorResponse
shouldPass bool
}{
// Test case - 1.
// Filter contains more than (Prefix,Tag,And) rule
{
method: "PUT",
bucketName: bucketName,
accessKey: creds.AccessKey,
secretKey: creds.SecretKey,
body: []byte(`<LifecycleConfiguration><Rule><ID>id</ID><Filter><Prefix>logs/</Prefix><Tag><Key>Key1</Key><Value>Value1</Value></Tag></Filter><Status>Enabled</Status><Expiration><Days>365</Days></Expiration></Rule></LifecycleConfiguration>`),
expectedRespStatus: http.StatusBadRequest,
lifecycleResponse: []byte(``),
errorResponse: APIErrorResponse{
Resource: SlashSeparator + bucketName + SlashSeparator,
Code: "InvalidRequest",
Message: "Filter must have exactly one of Prefix, Tag, or And specified",
},
shouldPass: false,
},
// Date contains wrong format
{
method: "PUT",
bucketName: bucketName,
accessKey: creds.AccessKey,
secretKey: creds.SecretKey,
body: []byte(`<LifecycleConfiguration><Rule><ID>id</ID><Filter><Prefix>logs/</Prefix><Tag><Key>Key1</Key><Value>Value1</Value></Tag></Filter><Status>Enabled</Status><Expiration><Date>365</Date></Expiration></Rule></LifecycleConfiguration>`),
expectedRespStatus: http.StatusBadRequest,
lifecycleResponse: []byte(``),
errorResponse: APIErrorResponse{
Resource: SlashSeparator + bucketName + SlashSeparator,
Code: "InvalidRequest",
Message: "Date must be provided in ISO 8601 format",
},
shouldPass: false,
},
{
method: "PUT",
bucketName: bucketName,
accessKey: creds.AccessKey,
secretKey: creds.SecretKey,
body: []byte(`<?xml version="1.0" encoding="UTF-8"?><LifecycleConfiguration><Rule><ID>id</ID><Filter><Prefix>logs/</Prefix></Filter><Status>Enabled</Status><Expiration><Days>365</Days></Expiration></Rule></LifecycleConfiguration>`),
expectedRespStatus: http.StatusOK,
lifecycleResponse: []byte(``),
errorResponse: APIErrorResponse{},
shouldPass: true,
},
{
method: "GET",
accessKey: creds.AccessKey,
secretKey: creds.SecretKey,
bucketName: bucketName,
body: []byte(``),
expectedRespStatus: http.StatusOK,
lifecycleResponse: []byte(`<LifecycleConfiguration><Rule><ID>id</ID><Status>Enabled</Status><Filter><Prefix>logs/</Prefix></Filter><Expiration><Days>365</Days></Expiration></Rule></LifecycleConfiguration>`),
errorResponse: APIErrorResponse{},
shouldPass: true,
},
{
method: "DELETE",
accessKey: creds.AccessKey,
secretKey: creds.SecretKey,
bucketName: bucketName,
body: []byte(``),
expectedRespStatus: http.StatusNoContent,
lifecycleResponse: []byte(``),
errorResponse: APIErrorResponse{},
shouldPass: true,
},
{
method: "GET",
accessKey: creds.AccessKey,
secretKey: creds.SecretKey,
bucketName: bucketName,
body: []byte(``),
expectedRespStatus: http.StatusNotFound,
lifecycleResponse: []byte(``),
errorResponse: APIErrorResponse{
Resource: SlashSeparator + bucketName + SlashSeparator,
Code: "NoSuchLifecycleConfiguration",
Message: "The lifecycle configuration does not exist",
},
shouldPass: false,
},
}
testBucketLifecycle(obj, instanceType, bucketName, apiRouter, t, testCases)
}
// testBucketLifecycle is a generic testing of lifecycle requests
func testBucketLifecycle(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
t *testing.T, testCases []struct {
method string
bucketName string
accessKey string
secretKey string
body []byte
expectedRespStatus int
lifecycleResponse []byte
errorResponse APIErrorResponse
shouldPass bool
}) {
for i, testCase := range testCases {
// initialize httptest Recorder, this records any mutations to response writer inside the handler.
rec := httptest.NewRecorder()
// construct HTTP request
req, err := newTestSignedRequestV4(testCase.method, getBucketLifecycleURL("", testCase.bucketName),
int64(len(testCase.body)), bytes.NewReader(testCase.body), testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for GetBucketLocationHandler: <ERROR> %v", i+1, instanceType, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(rec, req)
if rec.Code != testCase.expectedRespStatus {
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, rec.Code)
}
if testCase.shouldPass && !bytes.Equal(testCase.lifecycleResponse, rec.Body.Bytes()) {
t.Errorf("Test %d: %s: Expected the response to be `%s`, but instead found `%s`", i+1, instanceType, string(testCase.lifecycleResponse), rec.Body.String())
}
errorResponse := APIErrorResponse{}
err = xml.Unmarshal(rec.Body.Bytes(), &errorResponse)
if err != nil && !testCase.shouldPass {
t.Fatalf("Test %d: %s: Unable to marshal response body %s", i+1, instanceType, rec.Body.String())
}
if errorResponse.Resource != testCase.errorResponse.Resource {
t.Errorf("Test %d: %s: Expected the error resource to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Resource, errorResponse.Resource)
}
if errorResponse.Message != testCase.errorResponse.Message {
t.Errorf("Test %d: %s: Expected the error message to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Message, errorResponse.Message)
}
if errorResponse.Code != testCase.errorResponse.Code {
t.Errorf("Test %d: %s: Expected the error code to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Code, errorResponse.Code)
}
}
}

165
cmd/bucket-meta.go Normal file
View File

@@ -0,0 +1,165 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"encoding/binary"
"errors"
"fmt"
"path"
"time"
"github.com/minio/minio/cmd/logger"
)
const (
legacyBucketObjectLockEnabledConfigFile = "object-lock-enabled.json"
legacyBucketObjectLockEnabledConfig = `{"x-amz-bucket-object-lock-enabled":true}`
bucketMetadataFile = ".metadata.bin"
bucketMetadataFormat = 1
bucketMetadataVersion = 1
)
//go:generate msgp -file $GOFILE -unexported
// bucketMetadata contains bucket metadata.
// When adding/removing fields, regenerate the marshal code using the go generate above.
// Only changing meaning of fields requires a version bump.
// bucketMetadataFormat refers to the format.
// bucketMetadataVersion can be used to track a rolling upgrade of a field.
type bucketMetadata struct {
Name string
Created time.Time
LockEnabled bool
}
// newBucketMetadata creates bucketMetadata with the supplied name and Created to Now.
func newBucketMetadata(name string) bucketMetadata {
return bucketMetadata{
Name: name,
Created: UTCNow(),
}
}
// loadBucketMeta loads the metadata of bucket by name from ObjectLayer o.
// If an error is returned the returned metadata will be default initialized.
func loadBucketMeta(ctx context.Context, o ObjectLayer, name string) (bucketMetadata, error) {
b := newBucketMetadata(name)
configFile := path.Join(bucketConfigPrefix, name, bucketMetadataFile)
data, err := readConfig(ctx, o, configFile)
if err != nil {
return b, err
}
if len(data) <= 4 {
return b, fmt.Errorf("loadBucketMetadata: no data")
}
// Read header
switch binary.LittleEndian.Uint16(data[0:2]) {
case bucketMetadataFormat:
default:
return b, fmt.Errorf("loadBucketMetadata: unknown format: %d", binary.LittleEndian.Uint16(data[0:2]))
}
switch binary.LittleEndian.Uint16(data[2:4]) {
case bucketMetadataVersion:
default:
return b, fmt.Errorf("loadBucketMetadata: unknown version: %d", binary.LittleEndian.Uint16(data[2:4]))
}
// OK, parse data.
_, err = b.UnmarshalMsg(data[4:])
return b, err
}
var errMetaDataConverted = errors.New("metadata converted")
// loadBucketMetadata loads and migrates to bucket metadata.
func loadBucketMetadata(ctx context.Context, objectAPI ObjectLayer, bucket string) (bucketMetadata, error) {
meta, err := loadBucketMeta(ctx, objectAPI, bucket)
if err == nil {
return meta, nil
}
if err != errConfigNotFound {
return meta, err
}
// Control here means old bucket without bucket metadata. Hence we migrate existing settings.
if err = meta.convertLegacyLockconfig(ctx, objectAPI); err != nil {
return meta, err
}
return meta, errMetaDataConverted
}
func (b *bucketMetadata) convertLegacyLockconfig(ctx context.Context, objectAPI ObjectLayer) error {
configFile := path.Join(bucketConfigPrefix, b.Name, legacyBucketObjectLockEnabledConfigFile)
save := func() error {
if err := b.save(ctx, objectAPI); err != nil {
return err
}
logger.LogIf(ctx, deleteConfig(ctx, objectAPI, configFile))
return nil
}
configData, err := readConfig(ctx, objectAPI, configFile)
if err != nil {
if err != errConfigNotFound {
return err
}
return save()
}
if string(configData) != legacyBucketObjectLockEnabledConfig {
return fmt.Errorf("content mismatch in config file %v", configFile)
}
b.LockEnabled = true
return save()
}
// save config to supplied ObjectLayer o.
func (b *bucketMetadata) save(ctx context.Context, o ObjectLayer) error {
data := make([]byte, 4, b.Msgsize()+4)
// Put header
binary.LittleEndian.PutUint16(data[0:2], bucketMetadataFormat)
binary.LittleEndian.PutUint16(data[2:4], bucketMetadataVersion)
// Add data
data, err := b.MarshalMsg(data)
if err != nil {
return err
}
configFile := path.Join(bucketConfigPrefix, b.Name, bucketMetadataFile)
return saveConfig(ctx, o, configFile, data)
}
// delete the config metadata.
// If config does not exist no error is returned.
func (b bucketMetadata) delete(ctx context.Context, o ObjectLayer) error {
configFile := path.Join(bucketConfigPrefix, b.Name, bucketMetadataFile)
err := deleteConfig(ctx, o, configFile)
if err == errConfigNotFound {
// We don't care
err = nil
}
return err
}

160
cmd/bucket-meta_gen.go Normal file
View File

@@ -0,0 +1,160 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *bucketMetadata) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Name":
z.Name, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Name")
return
}
case "Created":
z.Created, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "Created")
return
}
case "LockEnabled":
z.LockEnabled, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "LockEnabled")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z bucketMetadata) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 3
// write "Name"
err = en.Append(0x83, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
if err != nil {
return
}
err = en.WriteString(z.Name)
if err != nil {
err = msgp.WrapError(err, "Name")
return
}
// write "Created"
err = en.Append(0xa7, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteTime(z.Created)
if err != nil {
err = msgp.WrapError(err, "Created")
return
}
// write "LockEnabled"
err = en.Append(0xab, 0x4c, 0x6f, 0x63, 0x6b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteBool(z.LockEnabled)
if err != nil {
err = msgp.WrapError(err, "LockEnabled")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z bucketMetadata) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 3
// string "Name"
o = append(o, 0x83, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
o = msgp.AppendString(o, z.Name)
// string "Created"
o = append(o, 0xa7, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64)
o = msgp.AppendTime(o, z.Created)
// string "LockEnabled"
o = append(o, 0xab, 0x4c, 0x6f, 0x63, 0x6b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64)
o = msgp.AppendBool(o, z.LockEnabled)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *bucketMetadata) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Name":
z.Name, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Name")
return
}
case "Created":
z.Created, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Created")
return
}
case "LockEnabled":
z.LockEnabled, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LockEnabled")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z bucketMetadata) Msgsize() (s int) {
s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 8 + msgp.TimeSize + 12 + msgp.BoolSize
return
}

123
cmd/bucket-meta_gen_test.go Normal file
View File

@@ -0,0 +1,123 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalbucketMetadata(t *testing.T) {
v := bucketMetadata{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgbucketMetadata(b *testing.B) {
v := bucketMetadata{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgbucketMetadata(b *testing.B) {
v := bucketMetadata{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalbucketMetadata(b *testing.B) {
v := bucketMetadata{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodebucketMetadata(t *testing.T) {
v := bucketMetadata{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodebucketMetadata Msgsize() is inaccurate")
}
vn := bucketMetadata{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodebucketMetadata(b *testing.B) {
v := bucketMetadata{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodebucketMetadata(b *testing.B) {
v := bucketMetadata{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}

View File

@@ -22,7 +22,6 @@ import (
"errors"
"io"
"net/http"
"net/url"
"path"
"reflect"
"time"
@@ -116,7 +115,7 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
// With newer config disallowing changing / turning off
// notification targets without removing ARN in notification
// configuration we won't see this problem anymore.
if reflect.DeepEqual(queue.ARN, arnErr.ARN) {
if reflect.DeepEqual(queue.ARN, arnErr.ARN) && i < len(config.QueueList) {
config.QueueList = append(config.QueueList[:i],
config.QueueList[i+1:]...)
}
@@ -282,16 +281,13 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
w.Header().Set(xhttp.ContentType, "text/event-stream")
doneCh := make(chan struct{})
defer close(doneCh)
// Listen Publisher and peer-listen-client uses nonblocking send and hence does not wait for slow receivers.
// Use buffered channel to take care of burst sends or slow w.Write()
listenCh := make(chan interface{}, 4000)
peers := getRestClients(globalEndpoints)
peers := newPeerRestClients(globalEndpoints)
globalHTTPListen.Subscribe(listenCh, doneCh, func(evI interface{}) bool {
globalHTTPListen.Subscribe(listenCh, ctx.Done(), func(evI interface{}) bool {
ev, ok := evI.(event.Event)
if !ok {
return false
@@ -299,18 +295,14 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
if ev.S3.Bucket.Name != values.Get(peerRESTListenBucket) {
return false
}
objectName, uerr := url.QueryUnescape(ev.S3.Object.Key)
if uerr != nil {
objectName = ev.S3.Object.Key
}
return len(rulesMap.Match(ev.EventName, objectName).ToSlice()) != 0
return rulesMap.MatchSimple(ev.EventName, ev.S3.Object.Key)
})
for _, peer := range peers {
if peer == nil {
continue
}
peer.Listen(listenCh, doneCh, values)
peer.Listen(listenCh, ctx.Done(), values)
}
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
@@ -336,7 +328,7 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
return
}
w.(http.Flusher).Flush()
case <-GlobalServiceDoneCh:
case <-ctx.Done():
return
}
}

View File

@@ -18,7 +18,6 @@ package cmd
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
@@ -103,7 +102,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
credentials auth.Credentials, t *testing.T) {
bucketName1 := fmt.Sprintf("%s-1", bucketName)
if err := obj.MakeBucketWithLocation(context.Background(), bucketName1, ""); err != nil {
if err := obj.MakeBucketWithLocation(GlobalContext, bucketName1, ""); err != nil {
t.Fatal(err)
}

View File

@@ -17,11 +17,11 @@
package cmd
import (
"context"
"crypto/x509"
"encoding/gob"
"errors"
"net"
"os"
"path/filepath"
"strings"
"time"
@@ -41,7 +41,7 @@ func init() {
logger.RegisterError(config.FmtError)
// Initialize globalConsoleSys system
globalConsoleSys = NewConsoleLogger(context.Background())
globalConsoleSys = NewConsoleLogger(GlobalContext)
logger.AddTarget(globalConsoleSys)
gob.Register(StorageErr(""))
@@ -149,6 +149,12 @@ func handleCommonCmdArgs(ctx *cli.Context) {
globalCLIContext.Addr = ctx.String("address")
}
// Check "no-compat" flag from command line argument.
globalCLIContext.StrictS3Compat = true
if ctx.IsSet("no-compat") || ctx.GlobalIsSet("no-compat") {
globalCLIContext.StrictS3Compat = false
}
// Set all config, certs and CAs directories.
var configSet, certsSet bool
globalConfigDir, configSet = newConfigDirFromCtx(ctx, "config-dir", defaultConfigDir.Get)
@@ -164,13 +170,17 @@ func handleCommonCmdArgs(ctx *cli.Context) {
globalCertsCADir = &ConfigDir{path: filepath.Join(globalCertsDir.Get(), certsCADir)}
logger.FatalIf(mkdirAllIgnorePerm(globalCertsCADir.Get()), "Unable to create certs CA directory at %s", globalCertsCADir.Get())
// Check "compat" flag from command line argument.
globalCLIContext.StrictS3Compat = ctx.IsSet("compat") || ctx.GlobalIsSet("compat")
}
func handleCommonEnvVars() {
var err error
wormEnabled, err := config.LookupWorm()
if err != nil {
logger.Fatal(config.ErrInvalidWormValue(err), "Invalid worm configuration")
}
if wormEnabled {
logger.Fatal(errors.New("WORM is deprecated"), "global MINIO_WORM support is removed, please downgrade your server or migrate to https://github.com/minio/minio/tree/master/docs/retention")
}
globalBrowserEnabled, err = config.ParseBool(env.Get(config.EnvBrowser, config.EnableOn))
if err != nil {
logger.Fatal(config.ErrInvalidBrowserValue(err), "Invalid MINIO_BROWSER value in environment variable")
@@ -226,9 +236,15 @@ func handleCommonEnvVars() {
globalConfigEncrypted = true
}
globalWORMEnabled, err = config.LookupWorm()
if err != nil {
logger.Fatal(config.ErrInvalidWormValue(err), "Invalid worm configuration")
if env.IsSet(config.EnvAccessKeyOld) && env.IsSet(config.EnvSecretKeyOld) {
oldCred, err := auth.CreateCredentials(env.Get(config.EnvAccessKeyOld, ""), env.Get(config.EnvSecretKeyOld, ""))
if err != nil {
logger.Fatal(config.ErrInvalidCredentials(err),
"Unable to validate the old credentials inherited from the shell environment")
}
globalOldCred = oldCred
os.Unsetenv(config.EnvAccessKeyOld)
os.Unsetenv(config.EnvSecretKeyOld)
}
}

View File

@@ -50,7 +50,11 @@ func readConfig(ctx context.Context, objAPI ObjectLayer, configFile string) ([]b
}
func deleteConfig(ctx context.Context, objAPI ObjectLayer, configFile string) error {
return objAPI.DeleteObject(ctx, minioMetaBucket, configFile)
err := objAPI.DeleteObject(ctx, minioMetaBucket, configFile)
if err != nil && isErrObjectNotFound(err) {
return errConfigNotFound
}
return err
}
func saveConfig(ctx context.Context, objAPI ObjectLayer, configFile string, data []byte) error {

View File

@@ -17,16 +17,15 @@
package cmd
import (
"context"
"fmt"
"strings"
"sync"
"github.com/minio/minio/cmd/config"
"github.com/minio/minio/cmd/config/api"
"github.com/minio/minio/cmd/config/cache"
"github.com/minio/minio/cmd/config/compress"
"github.com/minio/minio/cmd/config/etcd"
xetcd "github.com/minio/minio/cmd/config/etcd"
"github.com/minio/minio/cmd/config/etcd/dns"
xldap "github.com/minio/minio/cmd/config/identity/ldap"
"github.com/minio/minio/cmd/config/identity/openid"
@@ -50,6 +49,7 @@ func initHelp() {
config.IdentityOpenIDSubSys: openid.DefaultKVS,
config.PolicyOPASubSys: opa.DefaultKVS,
config.RegionSubSys: config.DefaultRegionKVS,
config.APISubSys: api.DefaultKVS,
config.CredentialsSubSys: config.DefaultCredentialKVS,
config.KmsVaultSubSys: crypto.DefaultVaultKVS,
config.KmsKesSubSys: crypto.DefaultKesKVS,
@@ -102,6 +102,10 @@ func initHelp() {
Key: config.KmsKesSubSys,
Description: "enable external MinIO key encryption service",
},
config.HelpKV{
Key: config.APISubSys,
Description: "manage global HTTP API call specific features, such as throttling, authentication types, etc.",
},
config.HelpKV{
Key: config.LoggerWebhookSubSys,
Description: "send server logs to webhook endpoints",
@@ -176,6 +180,7 @@ func initHelp() {
var helpMap = map[string]config.HelpKVS{
"": helpSubSys, // Help for all sub-systems.
config.RegionSubSys: config.RegionHelp,
config.APISubSys: api.Help,
config.StorageClassSubSys: storageclass.Help,
config.EtcdSubSys: etcd.Help,
config.CacheSubSys: cache.Help,
@@ -223,6 +228,10 @@ func validateConfig(s config.Config) error {
return err
}
if _, err := api.LookupConfig(s[config.APISubSys][config.Default]); err != nil {
return err
}
if globalIsXL {
if _, err := storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default],
globalXLSetDriveCount); err != nil {
@@ -252,7 +261,7 @@ func validateConfig(s config.Config) error {
}
}
{
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), NewCustomHTTPTransport())
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), NewGatewayHTTPTransport())
if err != nil {
return err
}
@@ -270,17 +279,27 @@ func validateConfig(s config.Config) error {
}
if _, err := openid.LookupConfig(s[config.IdentityOpenIDSubSys][config.Default],
NewCustomHTTPTransport(), xhttp.DrainBody); err != nil {
NewGatewayHTTPTransport(), xhttp.DrainBody); err != nil {
return err
}
if _, err := xldap.Lookup(s[config.IdentityLDAPSubSys][config.Default],
globalRootCAs); err != nil {
return err
{
cfg, err := xldap.Lookup(s[config.IdentityLDAPSubSys][config.Default],
globalRootCAs)
if err != nil {
return err
}
if cfg.Enabled {
conn, cerr := cfg.Connect()
if cerr != nil {
return cerr
}
conn.Close()
}
}
if _, err := opa.LookupConfig(s[config.PolicyOPASubSys][config.Default],
NewCustomHTTPTransport(), xhttp.DrainBody); err != nil {
NewGatewayHTTPTransport(), xhttp.DrainBody); err != nil {
return err
}
@@ -288,12 +307,12 @@ func validateConfig(s config.Config) error {
return err
}
return notify.TestNotificationTargets(s, GlobalServiceDoneCh, NewCustomHTTPTransport(),
return notify.TestNotificationTargets(s, GlobalContext.Done(), NewGatewayHTTPTransport(),
globalNotificationSys.ConfiguredTargetIDs())
}
func lookupConfigs(s config.Config) {
ctx := context.Background()
ctx := GlobalContext
var err error
if !globalActiveCred.IsValid() {
@@ -304,13 +323,13 @@ func lookupConfigs(s config.Config) {
}
}
etcdCfg, err := xetcd.LookupConfig(s[config.EtcdSubSys][config.Default], globalRootCAs)
etcdCfg, err := etcd.LookupConfig(s[config.EtcdSubSys][config.Default], globalRootCAs)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize etcd config: %w", err))
}
if etcdCfg.Enabled {
globalEtcdClient, err = xetcd.New(etcdCfg)
globalEtcdClient, err = etcd.New(etcdCfg)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize etcd config: %w", err))
}
@@ -341,6 +360,18 @@ func lookupConfigs(s config.Config) {
logger.LogIf(ctx, fmt.Errorf("Invalid region configuration: %w", err))
}
apiConfig, err := api.LookupConfig(s[config.APISubSys][config.Default])
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Invalid api configuration: %w", err))
}
apiRequestsMax := apiConfig.APIRequestsMax
if len(globalEndpoints.Hosts()) > 0 {
apiRequestsMax /= len(globalEndpoints.Hosts())
}
globalAPIThrottling.init(apiRequestsMax, apiConfig.APIRequestsDeadline)
if globalIsXL {
globalStorageClass, err = storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default],
globalXLSetDriveCount)
@@ -363,7 +394,7 @@ func lookupConfigs(s config.Config) {
}
}
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), NewCustomHTTPTransport())
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), NewGatewayHTTPTransport())
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to setup KMS config: %w", err))
}
@@ -382,13 +413,13 @@ func lookupConfigs(s config.Config) {
}
globalOpenIDConfig, err = openid.LookupConfig(s[config.IdentityOpenIDSubSys][config.Default],
NewCustomHTTPTransport(), xhttp.DrainBody)
NewGatewayHTTPTransport(), xhttp.DrainBody)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize OpenID: %w", err))
}
opaCfg, err := opa.LookupConfig(s[config.PolicyOPASubSys][config.Default],
NewCustomHTTPTransport(), xhttp.DrainBody)
NewGatewayHTTPTransport(), xhttp.DrainBody)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize OPA: %w", err))
}
@@ -413,18 +444,37 @@ func lookupConfigs(s config.Config) {
for _, l := range loggerCfg.HTTP {
if l.Enabled {
// Enable http logging
logger.AddTarget(http.New(l.Endpoint, loggerUserAgent, string(logger.All), NewCustomHTTPTransport()))
logger.AddTarget(
http.New(http.WithEndpoint(l.Endpoint),
http.WithAuthToken(l.AuthToken),
http.WithUserAgent(loggerUserAgent),
http.WithLogKind(string(logger.All)),
http.WithTransport(NewGatewayHTTPTransport()),
),
)
}
}
for _, l := range loggerCfg.Audit {
if l.Enabled {
// Enable http audit logging
logger.AddAuditTarget(http.New(l.Endpoint, loggerUserAgent, string(logger.All), NewCustomHTTPTransport()))
logger.AddAuditTarget(
http.New(http.WithEndpoint(l.Endpoint),
http.WithAuthToken(l.AuthToken),
http.WithUserAgent(loggerUserAgent),
http.WithLogKind(string(logger.All)),
http.WithTransport(NewGatewayHTTPTransport()),
),
)
}
}
globalConfigTargetList, err = notify.GetNotificationTargets(s, GlobalServiceDoneCh, NewCustomHTTPTransport())
globalConfigTargetList, err = notify.GetNotificationTargets(s, GlobalContext.Done(), NewGatewayHTTPTransport())
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize notification target(s): %w", err))
}
globalEnvTargetList, err = notify.GetNotificationTargets(newServerConfig(), GlobalContext.Done(), NewGatewayHTTPTransport())
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize notification target(s): %w", err))
}
@@ -525,11 +575,11 @@ func newSrvConfig(objAPI ObjectLayer) error {
globalServerConfigMu.Unlock()
// Save config into file.
return saveServerConfig(context.Background(), objAPI, globalServerConfig)
return saveServerConfig(GlobalContext, objAPI, globalServerConfig)
}
func getValidConfig(objAPI ObjectLayer) (config.Config, error) {
return readServerConfig(context.Background(), objAPI)
return readServerConfig(GlobalContext, objAPI)
}
// loadConfig - loads a new config from disk, overrides params

View File

@@ -21,15 +21,13 @@ import (
"context"
"errors"
"fmt"
"os"
"strings"
"time"
"unicode/utf8"
etcd "github.com/coreos/etcd/clientv3"
"github.com/minio/minio/cmd/config"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/env"
"github.com/minio/minio/pkg/madmin"
)
@@ -50,12 +48,17 @@ func handleEncryptedConfigBackend(objAPI ObjectLayer, server bool) error {
// of the object layer.
retryTimerCh := newRetryTimerSimple(doneCh)
var stop bool
rquorum := InsufficientReadQuorum{}
wquorum := InsufficientWriteQuorum{}
for !stop {
select {
case <-retryTimerCh:
if encrypted, err = checkBackendEncrypted(objAPI); err != nil {
if err == errDiskNotFound ||
strings.Contains(err.Error(), InsufficientReadQuorum{}.Error()) {
if errors.Is(err, errDiskNotFound) ||
errors.As(err, &rquorum) ||
isErrBucketNotFound(err) {
logger.Info("Waiting for config backend to be encrypted..")
continue
}
@@ -88,11 +91,6 @@ func handleEncryptedConfigBackend(objAPI ObjectLayer, server bool) error {
}
}
activeCredOld, err := getOldCreds()
if err != nil {
return err
}
doneCh = make(chan struct{})
defer close(doneCh)
@@ -106,10 +104,11 @@ func handleEncryptedConfigBackend(objAPI ObjectLayer, server bool) error {
select {
case <-retryTimerCh:
// Migrate IAM configuration
if err = migrateConfigPrefixToEncrypted(objAPI, activeCredOld, encrypted); err != nil {
if err == errDiskNotFound ||
strings.Contains(err.Error(), InsufficientReadQuorum{}.Error()) ||
strings.Contains(err.Error(), InsufficientWriteQuorum{}.Error()) {
if err = migrateConfigPrefixToEncrypted(objAPI, globalOldCred, encrypted); err != nil {
if errors.Is(err, errDiskNotFound) ||
errors.As(err, &rquorum) ||
errors.As(err, &wquorum) ||
isErrBucketNotFound(err) {
logger.Info("Waiting for config backend to be encrypted..")
continue
}
@@ -140,7 +139,7 @@ func checkBackendEtcdEncrypted(ctx context.Context, client *etcd.Client) (bool,
}
func checkBackendEncrypted(objAPI ObjectLayer) (bool, error) {
data, err := readConfig(context.Background(), objAPI, backendEncryptedFile)
data, err := readConfig(GlobalContext, objAPI, backendEncryptedFile)
if err != nil && err != errConfigNotFound {
return false, err
}
@@ -164,25 +163,7 @@ func decryptData(edata []byte, creds ...auth.Credentials) ([]byte, error) {
return data, err
}
func getOldCreds() (activeCredOld auth.Credentials, err error) {
accessKeyOld := env.Get(config.EnvAccessKeyOld, "")
secretKeyOld := env.Get(config.EnvSecretKeyOld, "")
if accessKeyOld != "" && secretKeyOld != "" {
activeCredOld, err = auth.CreateCredentials(accessKeyOld, secretKeyOld)
if err != nil {
return activeCredOld, err
}
// Once we have obtained the rotating creds
os.Unsetenv(config.EnvAccessKeyOld)
os.Unsetenv(config.EnvSecretKeyOld)
}
return activeCredOld, nil
}
func migrateIAMConfigsEtcdToEncrypted(client *etcd.Client) error {
ctx, cancel := context.WithTimeout(context.Background(), defaultContextTimeout)
defer cancel()
func migrateIAMConfigsEtcdToEncrypted(ctx context.Context, client *etcd.Client) error {
encrypted, err := checkBackendEtcdEncrypted(ctx, client)
if err != nil {
return err
@@ -206,20 +187,15 @@ func migrateIAMConfigsEtcdToEncrypted(client *etcd.Client) error {
}
}
activeCredOld, err := getOldCreds()
if err != nil {
return err
}
if encrypted {
// No key rotation requested, and backend is
// already encrypted. We proceed without migration.
if !activeCredOld.IsValid() {
if !globalOldCred.IsValid() {
return nil
}
// No real reason to rotate if old and new creds are same.
if activeCredOld.Equal(globalActiveCred) {
if globalOldCred.Equal(globalActiveCred) {
return nil
}
@@ -228,7 +204,10 @@ func migrateIAMConfigsEtcdToEncrypted(client *etcd.Client) error {
logger.Info("Attempting encryption of all IAM users and policies on etcd")
}
r, err := client.Get(ctx, minioConfigPrefix, etcd.WithPrefix(), etcd.WithKeysOnly())
listCtx, cancel := context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
r, err := client.Get(listCtx, minioConfigPrefix, etcd.WithPrefix(), etcd.WithKeysOnly())
if err != nil {
return err
}
@@ -254,8 +233,8 @@ func migrateIAMConfigsEtcdToEncrypted(client *etcd.Client) error {
var data []byte
// Is rotating of creds requested?
if activeCredOld.IsValid() {
data, err = decryptData(cdata, activeCredOld, globalActiveCred)
if globalOldCred.IsValid() {
data, err = decryptData(cdata, globalOldCred, globalActiveCred)
if err != nil {
if err == madmin.ErrMaliciousData {
return config.ErrInvalidRotatingCredentialsBackendEncrypted(nil)
@@ -285,7 +264,7 @@ func migrateIAMConfigsEtcdToEncrypted(client *etcd.Client) error {
}
}
if encrypted && globalActiveCred.IsValid() && activeCredOld.IsValid() {
if encrypted && globalActiveCred.IsValid() && globalOldCred.IsValid() {
logger.Info("Rotation complete, please make sure to unset MINIO_ACCESS_KEY_OLD and MINIO_SECRET_KEY_OLD envs")
}
@@ -309,14 +288,14 @@ func migrateConfigPrefixToEncrypted(objAPI ObjectLayer, activeCredOld auth.Crede
logger.Info("Attempting encryption of all config, IAM users and policies on MinIO backend")
}
err := saveConfig(context.Background(), objAPI, backendEncryptedFile, backendEncryptedMigrationIncomplete)
err := saveConfig(GlobalContext, objAPI, backendEncryptedFile, backendEncryptedMigrationIncomplete)
if err != nil {
return err
}
var marker string
for {
res, err := objAPI.ListObjects(context.Background(), minioMetaBucket,
res, err := objAPI.ListObjects(GlobalContext, minioMetaBucket,
minioConfigPrefix, marker, "", maxObjectList)
if err != nil {
return err
@@ -327,7 +306,7 @@ func migrateConfigPrefixToEncrypted(objAPI ObjectLayer, activeCredOld auth.Crede
cencdata []byte
)
cdata, err = readConfig(context.Background(), objAPI, obj.Name)
cdata, err = readConfig(GlobalContext, objAPI, obj.Name)
if err != nil {
return err
}
@@ -360,7 +339,7 @@ func migrateConfigPrefixToEncrypted(objAPI ObjectLayer, activeCredOld auth.Crede
return err
}
if err = saveConfig(context.Background(), objAPI, obj.Name, cencdata); err != nil {
if err = saveConfig(GlobalContext, objAPI, obj.Name, cencdata); err != nil {
return err
}
}
@@ -376,5 +355,5 @@ func migrateConfigPrefixToEncrypted(objAPI ObjectLayer, activeCredOld auth.Crede
logger.Info("Rotation complete, please make sure to unset MINIO_ACCESS_KEY_OLD and MINIO_SECRET_KEY_OLD envs")
}
return saveConfig(context.Background(), objAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
return saveConfig(GlobalContext, objAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
}

View File

@@ -18,7 +18,6 @@ package cmd
import (
"bytes"
"context"
"encoding/json"
"fmt"
"os"
@@ -2430,7 +2429,7 @@ func migrateConfigToMinioSys(objAPI ObjectLayer) (err error) {
defer func() {
if err == nil {
if globalEtcdClient != nil {
deleteKeyEtcd(context.Background(), globalEtcdClient, configFile)
deleteKeyEtcd(GlobalContext, globalEtcdClient, configFile)
} else {
// Rename config.json to config.json.deprecated only upon
// success of this function.
@@ -2440,7 +2439,7 @@ func migrateConfigToMinioSys(objAPI ObjectLayer) (err error) {
}()
// Verify if backend already has the file (after holding lock)
if err = checkConfig(context.Background(), objAPI, configFile); err != errConfigNotFound {
if err = checkConfig(GlobalContext, objAPI, configFile); err != errConfigNotFound {
return err
} // if errConfigNotFound proceed to migrate..
@@ -2466,7 +2465,7 @@ func migrateConfigToMinioSys(objAPI ObjectLayer) (err error) {
// Initialize the server config, if no config exists.
return newSrvConfig(objAPI)
}
return saveServerConfig(context.Background(), objAPI, config)
return saveServerConfig(GlobalContext, objAPI, config)
}
// Migrates '.minio.sys/config.json' to v33.
@@ -2502,7 +2501,7 @@ func migrateMinioSysConfig(objAPI ObjectLayer) error {
}
func checkConfigVersion(objAPI ObjectLayer, configFile string, version string) (bool, []byte, error) {
data, err := readConfig(context.Background(), objAPI, configFile)
data, err := readConfig(GlobalContext, objAPI, configFile)
if err != nil {
return false, nil, err
}
@@ -2548,7 +2547,7 @@ func migrateV27ToV28MinioSys(objAPI ObjectLayer) error {
cfg.Version = "28"
cfg.KMS = crypto.KMSConfig{}
if err = saveServerConfig(context.Background(), objAPI, cfg); err != nil {
if err = saveServerConfig(GlobalContext, objAPI, cfg); err != nil {
return fmt.Errorf("Failed to migrate config from 27 to 28. %w", err)
}
@@ -2575,7 +2574,7 @@ func migrateV28ToV29MinioSys(objAPI ObjectLayer) error {
}
cfg.Version = "29"
if err = saveServerConfig(context.Background(), objAPI, cfg); err != nil {
if err = saveServerConfig(GlobalContext, objAPI, cfg); err != nil {
return fmt.Errorf("Failed to migrate config from 28 to 29. %w", err)
}
@@ -2607,7 +2606,7 @@ func migrateV29ToV30MinioSys(objAPI ObjectLayer) error {
cfg.Compression.Extensions = strings.Split(compress.DefaultExtensions, config.ValueSeparator)
cfg.Compression.MimeTypes = strings.Split(compress.DefaultMimeTypes, config.ValueSeparator)
if err = saveServerConfig(context.Background(), objAPI, cfg); err != nil {
if err = saveServerConfig(GlobalContext, objAPI, cfg); err != nil {
return fmt.Errorf("Failed to migrate config from 29 to 30. %w", err)
}
@@ -2642,7 +2641,7 @@ func migrateV30ToV31MinioSys(objAPI ObjectLayer) error {
AuthToken: "",
}
if err = saveServerConfig(context.Background(), objAPI, cfg); err != nil {
if err = saveServerConfig(GlobalContext, objAPI, cfg); err != nil {
return fmt.Errorf("Failed to migrate config from 30 to 31. %w", err)
}
@@ -2672,7 +2671,7 @@ func migrateV31ToV32MinioSys(objAPI ObjectLayer) error {
cfg.Notify.NSQ = make(map[string]target.NSQArgs)
cfg.Notify.NSQ["1"] = target.NSQArgs{}
if err = saveServerConfig(context.Background(), objAPI, cfg); err != nil {
if err = saveServerConfig(GlobalContext, objAPI, cfg); err != nil {
return fmt.Errorf("Failed to migrate config from 31 to 32. %w", err)
}
@@ -2700,7 +2699,7 @@ func migrateV32ToV33MinioSys(objAPI ObjectLayer) error {
cfg.Version = "33"
if err = saveServerConfig(context.Background(), objAPI, cfg); err != nil {
if err = saveServerConfig(GlobalContext, objAPI, cfg); err != nil {
return fmt.Errorf("Failed to migrate config from '32' to '33' . %w", err)
}
@@ -2777,7 +2776,7 @@ func migrateMinioSysConfigToKV(objAPI ObjectLayer) error {
notify.SetNotifyWebhook(newCfg, k, args)
}
if err = saveServerConfig(context.Background(), objAPI, newCfg); err != nil {
if err = saveServerConfig(GlobalContext, objAPI, newCfg); err != nil {
return err
}

View File

@@ -186,16 +186,14 @@ func (sys *ConfigSys) Load(objAPI ObjectLayer) error {
}
// WatchConfigNASDisk - watches nas disk on periodic basis.
func (sys *ConfigSys) WatchConfigNASDisk(objAPI ObjectLayer) {
func (sys *ConfigSys) WatchConfigNASDisk(ctx context.Context, objAPI ObjectLayer) {
configInterval := globalRefreshIAMInterval
watchDisk := func() {
ticker := time.NewTicker(configInterval)
defer ticker.Stop()
for {
select {
case <-GlobalServiceDoneCh:
case <-ctx.Done():
return
case <-ticker.C:
case <-time.After(configInterval):
loadConfig(objAPI)
}
}

91
cmd/config/api/api.go Normal file
View File

@@ -0,0 +1,91 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package api
import (
"encoding/json"
"errors"
"strconv"
"time"
"github.com/minio/minio/cmd/config"
"github.com/minio/minio/pkg/env"
)
const (
apiRequestsMax = "requests_max"
apiRequestsDeadline = "requests_deadline"
)
// DefaultKVS - default storage class config
var (
DefaultKVS = config.KVS{
config.KV{
Key: apiRequestsMax,
Value: "0",
},
config.KV{
Key: apiRequestsDeadline,
Value: "10s",
},
}
)
// Config storage class configuration
type Config struct {
APIRequestsMax int `json:"requests_max"`
APIRequestsDeadline time.Duration `json:"requests_deadline"`
}
// UnmarshalJSON - Validate SS and RRS parity when unmarshalling JSON.
func (sCfg *Config) UnmarshalJSON(data []byte) error {
type Alias Config
aux := &struct {
*Alias
}{
Alias: (*Alias)(sCfg),
}
return json.Unmarshal(data, &aux)
}
// LookupConfig - lookup api config and override with valid environment settings if any.
func LookupConfig(kvs config.KVS) (cfg Config, err error) {
if err = config.CheckValidKeys(config.APISubSys, kvs, DefaultKVS); err != nil {
return cfg, err
}
// Check environment variables parameters
requestsMax, err := strconv.Atoi(env.Get(config.EnvAPIRequestsMax, kvs.Get(apiRequestsMax)))
if err != nil {
return cfg, err
}
if requestsMax < 0 {
return cfg, errors.New("invalid API max requests value")
}
requestsDeadline, err := time.ParseDuration(env.Get(config.EnvAPIRequestsDeadline, kvs.Get(apiRequestsDeadline)))
if err != nil {
return cfg, err
}
cfg = Config{
APIRequestsMax: requestsMax,
APIRequestsDeadline: requestsDeadline,
}
return cfg, nil
}

37
cmd/config/api/help.go Normal file
View File

@@ -0,0 +1,37 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package api
import "github.com/minio/minio/cmd/config"
// Help template for storageclass feature.
var (
Help = config.HelpKVS{
config.HelpKV{
Key: apiRequestsMax,
Description: `set the maximum number of concurrent requests, e.g. "1600"`,
Optional: true,
Type: "number",
},
config.HelpKV{
Key: apiRequestsDeadline,
Description: `set the deadline for API requests waiting to be processed e.g. "1m"`,
Optional: true,
Type: "duration",
},
}
)

92
cmd/config/certsinfo.go Normal file
View File

@@ -0,0 +1,92 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package config
import (
"crypto/x509"
"crypto/x509/pkix"
"fmt"
"net/http"
"strings"
color "github.com/minio/minio/pkg/color"
)
// Extra ASN1 OIDs that we may need to handle
var (
oidEmailAddress = []int{1, 2, 840, 113549, 1, 9, 1}
)
// printName prints the fields of a distinguished name, which include such
// things as its common name and locality.
func printName(names []pkix.AttributeTypeAndValue, buf *strings.Builder) []string {
values := []string{}
for _, name := range names {
oid := name.Type
if len(oid) == 4 && oid[0] == 2 && oid[1] == 5 && oid[2] == 4 {
switch oid[3] {
case 3:
values = append(values, fmt.Sprintf("CN=%s", name.Value))
case 6:
values = append(values, fmt.Sprintf("C=%s", name.Value))
case 8:
values = append(values, fmt.Sprintf("ST=%s", name.Value))
case 10:
values = append(values, fmt.Sprintf("O=%s", name.Value))
case 11:
values = append(values, fmt.Sprintf("OU=%s", name.Value))
default:
values = append(values, fmt.Sprintf("UnknownOID=%s", name.Type.String()))
}
} else if oid.Equal(oidEmailAddress) {
values = append(values, fmt.Sprintf("emailAddress=%s", name.Value))
} else {
values = append(values, fmt.Sprintf("UnknownOID=%s", name.Type.String()))
}
}
if len(values) > 0 {
buf.WriteString(values[0])
for i := 1; i < len(values); i++ {
buf.WriteString(", " + values[i])
}
buf.WriteString("\n")
}
return values
}
// CertificateText returns a human-readable string representation
// of the certificate cert. The format is similar to the OpenSSL
// way of printing certificates (not identical).
func CertificateText(cert *x509.Certificate) string {
var buf strings.Builder
buf.WriteString(color.Blue("\nCertificate:\n"))
if cert.SignatureAlgorithm != x509.UnknownSignatureAlgorithm {
buf.WriteString(color.Blue("%4sSignature Algorithm: ", "") + color.Bold(fmt.Sprintf("%s\n", cert.SignatureAlgorithm)))
}
// Issuer information
buf.WriteString(color.Blue("%4sIssuer: ", ""))
printName(cert.Issuer.Names, &buf)
// Validity information
buf.WriteString(color.Blue("%4sValidity\n", ""))
buf.WriteString(color.Bold(fmt.Sprintf("%8sNot Before: %s\n", "", cert.NotBefore.Format(http.TimeFormat))))
buf.WriteString(color.Bold(fmt.Sprintf("%8sNot After : %s\n", "", cert.NotAfter.Format(http.TimeFormat))))
return buf.String()
}

View File

@@ -70,6 +70,7 @@ const (
RegionSubSys = "region"
EtcdSubSys = "etcd"
StorageClassSubSys = "storage_class"
APISubSys = "api"
CompressionSubSys = "compression"
KmsVaultSubSys = "kms_vault"
KmsKesSubSys = "kms_kes"
@@ -101,6 +102,7 @@ var SubSystems = set.CreateStringSet([]string{
RegionSubSys,
EtcdSubSys,
CacheSubSys,
APISubSys,
StorageClassSubSys,
CompressionSubSys,
KmsVaultSubSys,
@@ -128,6 +130,7 @@ var SubSystemsSingleTargets = set.CreateStringSet([]string{
RegionSubSys,
EtcdSubSys,
CacheSubSys,
APISubSys,
StorageClassSubSys,
CompressionSubSys,
KmsVaultSubSys,
@@ -196,6 +199,23 @@ func (kvs KVS) Empty() bool {
return len(kvs) == 0
}
// Keys returns the list of keys for the current KVS
func (kvs KVS) Keys() []string {
var keys = make([]string, len(kvs))
var foundComment bool
for i := range kvs {
if kvs[i].Key == madmin.CommentKey {
foundComment = true
}
keys[i] = kvs[i].Key
}
// Comment KV not found, add it explicitly.
if !foundComment {
keys = append(keys, madmin.CommentKey)
}
return keys
}
func (kvs KVS) String() string {
var s strings.Builder
for _, kv := range kvs {
@@ -581,9 +601,20 @@ func (c Config) SetKVS(s string, defaultKVS map[string]KVS) error {
return Errorf("sub-system '%s' only supports single target", subSystemValue[0])
}
tgt := Default
subSys := subSystemValue[0]
if len(subSystemValue) == 2 {
tgt = subSystemValue[1]
}
fields := madmin.KvFields(inputs[1], defaultKVS[subSys].Keys())
if len(fields) == 0 {
return Errorf("sub-system '%s' cannot have empty keys", subSys)
}
var kvs = KVS{}
var prevK string
for _, v := range strings.Fields(inputs[1]) {
for _, v := range fields {
kv := strings.SplitN(v, KvSeparator, 2)
if len(kv) == 0 {
continue
@@ -604,12 +635,6 @@ func (c Config) SetKVS(s string, defaultKVS map[string]KVS) error {
return Errorf("key '%s', cannot have empty value", kv[0])
}
tgt := Default
subSys := subSystemValue[0]
if len(subSystemValue) == 2 {
tgt = subSystemValue[1]
}
_, ok := kvs.Lookup(Enable)
// Check if state is required
_, enableRequired := defaultKVS[subSys].Lookup(Enable)

View File

@@ -17,7 +17,96 @@
package config
import "testing"
import (
"testing"
"github.com/minio/minio/pkg/madmin"
)
func TestKVFields(t *testing.T) {
tests := []struct {
input string
keys []string
expectedFields map[string]struct{}
}{
// No keys present
{
input: "",
keys: []string{"comment"},
expectedFields: map[string]struct{}{},
},
// No keys requested for tokenizing
{
input: `comment="Hi this is my comment ="`,
keys: []string{},
expectedFields: map[string]struct{}{},
},
// Single key requested and present
{
input: `comment="Hi this is my comment ="`,
keys: []string{"comment"},
expectedFields: map[string]struct{}{`comment="Hi this is my comment ="`: {}},
},
// Keys and input order of k=v is same.
{
input: `connection_string="host=localhost port=2832" comment="really long comment"`,
keys: []string{"connection_string", "comment"},
expectedFields: map[string]struct{}{
`connection_string="host=localhost port=2832"`: {},
`comment="really long comment"`: {},
},
},
// Keys with spaces in between
{
input: `enable=on format=namespace connection_string=" host=localhost port=5432 dbname = cesnietor sslmode=disable" table=holicrayoli`,
keys: []string{"enable", "connection_string", "comment", "format", "table"},
expectedFields: map[string]struct{}{
`enable=on`: {},
`format=namespace`: {},
`connection_string=" host=localhost port=5432 dbname = cesnietor sslmode=disable"`: {},
`table=holicrayoli`: {},
},
},
// One of the keys is not present and order of input has changed.
{
input: `comment="really long comment" connection_string="host=localhost port=2832"`,
keys: []string{"connection_string", "comment", "format"},
expectedFields: map[string]struct{}{
`connection_string="host=localhost port=2832"`: {},
`comment="really long comment"`: {},
},
},
// Incorrect delimiter, expected fields should be empty.
{
input: `comment:"really long comment" connection_string:"host=localhost port=2832"`,
keys: []string{"connection_string", "comment"},
expectedFields: map[string]struct{}{},
},
// Incorrect type of input v/s required keys.
{
input: `comme="really long comment" connection_str="host=localhost port=2832"`,
keys: []string{"connection_string", "comment"},
expectedFields: map[string]struct{}{},
},
}
for _, test := range tests {
test := test
t.Run("", func(t *testing.T) {
gotFields := madmin.KvFields(test.input, test.keys)
if len(gotFields) != len(test.expectedFields) {
t.Errorf("Expected keys %d, found %d", len(test.expectedFields), len(gotFields))
}
found := true
for _, field := range gotFields {
_, ok := test.expectedFields[field]
found = found && ok
}
if !found {
t.Errorf("Expected %s, got %s", test.expectedFields, gotFields)
}
})
}
}
func TestValidRegion(t *testing.T) {
tests := []struct {

View File

@@ -33,6 +33,10 @@ const (
EnvPublicIPs = "MINIO_PUBLIC_IPS"
EnvEndpoints = "MINIO_ENDPOINTS"
// API sub-system
EnvAPIRequestsMax = "MINIO_API_REQUESTS_MAX"
EnvAPIRequestsDeadline = "MINIO_API_REQUESTS_DEADLINE"
EnvUpdate = "MINIO_UPDATE"
EnvWorm = "MINIO_WORM" // legacy

View File

@@ -46,7 +46,7 @@ func (u Err) Clone() Err {
}
}
// Return the error message
// Error returns the error message
func (u Err) Error() string {
if u.detail == "" {
if u.msg != "" {

View File

@@ -33,7 +33,7 @@ var (
ErrInvalidErasureSetSize = newErrFn(
"Invalid erasure set size",
"Please check the passed value",
"Erasure set can only accept any of [4, 6, 8, 10, 12, 14, 16] values",
"Erasure set can only accept any of [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] values",
)
ErrInvalidWormValue = newErrFn(

View File

@@ -60,11 +60,7 @@ func (c *CoreDNS) List() (map[string][]SrvRecord, error) {
if record.Key == "" {
continue
}
if _, ok := srvRecords[record.Key]; ok {
srvRecords[record.Key] = append(srvRecords[record.Key], record)
} else {
srvRecords[record.Key] = []SrvRecord{record}
}
srvRecords[record.Key] = append(srvRecords[record.Key], record)
}
}
return srvRecords, nil

View File

@@ -21,7 +21,8 @@ import (
"crypto/x509"
"errors"
"fmt"
"regexp"
"net"
"strings"
"time"
"github.com/minio/minio/cmd/config"
@@ -44,34 +45,45 @@ type Config struct {
STSExpiryDuration string `json:"stsExpiryDuration"`
// Format string for usernames
UsernameFormat string `json:"usernameFormat"`
UsernameFormat string `json:"usernameFormat"`
UsernameFormats []string `json:"-"`
UsernameSearchFilter string `json:"-"`
UsernameSearchBaseDNS []string `json:"-"`
GroupSearchBaseDN string `json:"groupSearchBaseDN"`
GroupSearchFilter string `json:"groupSearchFilter"`
GroupNameAttribute string `json:"groupNameAttribute"`
GroupSearchBaseDN string `json:"groupSearchBaseDN"`
GroupSearchBaseDNS []string `json:"-"`
GroupSearchFilter string `json:"groupSearchFilter"`
GroupNameAttribute string `json:"groupNameAttribute"`
stsExpiryDuration time.Duration // contains converted value
tlsSkipVerify bool // allows skipping TLS verification
serverInsecure bool // allows plain text connection to LDAP Server
rootCAs *x509.CertPool
}
// LDAP keys and envs.
const (
ServerAddr = "server_addr"
STSExpiry = "sts_expiry"
UsernameFormat = "username_format"
GroupSearchFilter = "group_search_filter"
GroupNameAttribute = "group_name_attribute"
GroupSearchBaseDN = "group_search_base_dn"
TLSSkipVerify = "tls_skip_verify"
ServerAddr = "server_addr"
STSExpiry = "sts_expiry"
UsernameFormat = "username_format"
UsernameSearchFilter = "username_search_filter"
UsernameSearchBaseDN = "username_search_base_dn"
GroupSearchFilter = "group_search_filter"
GroupNameAttribute = "group_name_attribute"
GroupSearchBaseDN = "group_search_base_dn"
TLSSkipVerify = "tls_skip_verify"
ServerInsecure = "server_insecure"
EnvServerAddr = "MINIO_IDENTITY_LDAP_SERVER_ADDR"
EnvSTSExpiry = "MINIO_IDENTITY_LDAP_STS_EXPIRY"
EnvTLSSkipVerify = "MINIO_IDENTITY_LDAP_TLS_SKIP_VERIFY"
EnvUsernameFormat = "MINIO_IDENTITY_LDAP_USERNAME_FORMAT"
EnvGroupSearchFilter = "MINIO_IDENTITY_LDAP_GROUP_SEARCH_FILTER"
EnvGroupNameAttribute = "MINIO_IDENTITY_LDAP_GROUP_NAME_ATTRIBUTE"
EnvGroupSearchBaseDN = "MINIO_IDENTITY_LDAP_GROUP_SEARCH_BASE_DN"
EnvServerAddr = "MINIO_IDENTITY_LDAP_SERVER_ADDR"
EnvSTSExpiry = "MINIO_IDENTITY_LDAP_STS_EXPIRY"
EnvTLSSkipVerify = "MINIO_IDENTITY_LDAP_TLS_SKIP_VERIFY"
EnvServerInsecure = "MINIO_IDENTITY_LDAP_SERVER_INSECURE"
EnvUsernameFormat = "MINIO_IDENTITY_LDAP_USERNAME_FORMAT"
EnvUsernameSearchFilter = "MINIO_IDENTITY_LDAP_USERNAME_SEARCH_FILTER"
EnvUsernameSearchBaseDN = "MINIO_IDENTITY_LDAP_USERNAME_SEARCH_BASE_DN"
EnvGroupSearchFilter = "MINIO_IDENTITY_LDAP_GROUP_SEARCH_FILTER"
EnvGroupNameAttribute = "MINIO_IDENTITY_LDAP_GROUP_NAME_ATTRIBUTE"
EnvGroupSearchBaseDN = "MINIO_IDENTITY_LDAP_GROUP_SEARCH_BASE_DN"
)
// DefaultKVS - default config for LDAP config
@@ -82,11 +94,15 @@ var (
Value: "",
},
config.KV{
Key: STSExpiry,
Value: "1h",
Key: UsernameFormat,
Value: "",
},
config.KV{
Key: UsernameFormat,
Key: UsernameSearchFilter,
Value: "",
},
config.KV{
Key: UsernameSearchBaseDN,
Value: "",
},
config.KV{
@@ -101,19 +117,146 @@ var (
Key: GroupSearchBaseDN,
Value: "",
},
config.KV{
Key: STSExpiry,
Value: "1h",
},
config.KV{
Key: TLSSkipVerify,
Value: config.EnableOff,
},
config.KV{
Key: ServerInsecure,
Value: config.EnableOff,
},
}
)
const (
dnDelimiter = ";"
)
func getGroups(conn *ldap.Conn, sreq *ldap.SearchRequest) ([]string, error) {
var groups []string
sres, err := conn.Search(sreq)
if err != nil {
return nil, err
}
for _, entry := range sres.Entries {
// We only queried one attribute,
// so we only look up the first one.
groups = append(groups, entry.Attributes[0].Values...)
}
return groups, nil
}
func (l *Config) bind(conn *ldap.Conn, username, password string) ([]string, error) {
var bindDNS = make([]string, len(l.UsernameFormats))
for i, usernameFormat := range l.UsernameFormats {
bindDN := fmt.Sprintf(usernameFormat, username)
// Bind with user credentials to validate the password
if err := conn.Bind(bindDN, password); err != nil {
return nil, err
}
bindDNS[i] = bindDN
}
return bindDNS, nil
}
var standardAttributes = []string{
"givenName",
"sn",
"cn",
"memberOf",
"email",
}
// Bind - binds to ldap, searches LDAP and returns list of groups.
func (l *Config) Bind(username, password string) ([]string, error) {
conn, err := l.Connect()
if err != nil {
return nil, err
}
defer conn.Close()
bindDNS, err := l.bind(conn, username, password)
if err != nil {
return nil, err
}
var groups []string
if l.UsernameSearchFilter != "" {
for _, userSearchBase := range l.UsernameSearchBaseDNS {
filter := strings.Replace(l.UsernameSearchFilter, "%s",
ldap.EscapeFilter(username), -1)
searchRequest := ldap.NewSearchRequest(
userSearchBase,
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
filter,
standardAttributes,
nil,
)
groups, err = getGroups(conn, searchRequest)
if err != nil {
return nil, err
}
}
}
if l.GroupSearchFilter != "" {
for _, groupSearchBase := range l.GroupSearchBaseDNS {
var filters []string
if l.GroupNameAttribute == "" {
filters = []string{strings.Replace(l.GroupSearchFilter, "%s",
ldap.EscapeFilter(username), -1)}
} else {
// With group name attribute specified, make sure to
// include search queries for CN distinguished name
for _, bindDN := range bindDNS {
filters = append(filters, strings.Replace(l.GroupSearchFilter, "%s",
ldap.EscapeFilter(bindDN), -1))
}
}
for _, filter := range filters {
searchRequest := ldap.NewSearchRequest(
groupSearchBase,
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
filter,
standardAttributes,
nil,
)
var newGroups []string
newGroups, err = getGroups(conn, searchRequest)
if err != nil {
return nil, err
}
groups = append(groups, newGroups...)
}
}
}
return groups, nil
}
// Connect connect to ldap server.
func (l *Config) Connect() (ldapConn *ldap.Conn, err error) {
if l == nil {
// Happens when LDAP is not configured.
return
return nil, errors.New("LDAP is not configured")
}
if _, _, err = net.SplitHostPort(l.ServerAddr); err != nil {
// User default LDAP port if none specified "636"
l.ServerAddr = net.JoinHostPort(l.ServerAddr, "636")
}
if l.serverInsecure {
return ldap.Dial("tcp", l.ServerAddr)
}
return ldap.DialTLS("tcp", l.ServerAddr, &tls.Config{
InsecureSkipVerify: l.tlsSkipVerify,
RootCAs: l.rootCAs,
@@ -154,6 +297,12 @@ func Lookup(kvs config.KVS, rootCAs *x509.CertPool) (l Config, err error) {
l.STSExpiryDuration = v
l.stsExpiryDuration = expDur
}
if v := env.Get(EnvServerInsecure, kvs.Get(ServerInsecure)); v != "" {
l.serverInsecure, err = config.ParseBool(v)
if err != nil {
return l, err
}
}
if v := env.Get(EnvTLSSkipVerify, kvs.Get(TLSSkipVerify)); v != "" {
l.tlsSkipVerify, err = config.ParseBool(v)
if err != nil {
@@ -161,102 +310,44 @@ func Lookup(kvs config.KVS, rootCAs *x509.CertPool) (l Config, err error) {
}
}
if v := env.Get(EnvUsernameFormat, kvs.Get(UsernameFormat)); v != "" {
subs, err := NewSubstituter("username", "test")
if err != nil {
return l, err
if !strings.Contains(v, "%s") {
return l, errors.New("LDAP username format doesn't have '%s' substitution")
}
if _, err := subs.Substitute(v); err != nil {
return l, err
}
l.UsernameFormat = v
l.UsernameFormats = strings.Split(v, dnDelimiter)
} else {
return l, fmt.Errorf("'%s' cannot be empty and must have a value", UsernameFormat)
}
if v := env.Get(EnvUsernameSearchFilter, kvs.Get(UsernameSearchFilter)); v != "" {
if !strings.Contains(v, "%s") {
return l, errors.New("LDAP username search filter doesn't have '%s' substitution")
}
l.UsernameSearchFilter = v
}
if v := env.Get(EnvUsernameSearchBaseDN, kvs.Get(UsernameSearchBaseDN)); v != "" {
l.UsernameSearchBaseDNS = strings.Split(v, dnDelimiter)
}
grpSearchFilter := env.Get(EnvGroupSearchFilter, kvs.Get(GroupSearchFilter))
grpSearchNameAttr := env.Get(EnvGroupNameAttribute, kvs.Get(GroupNameAttribute))
grpSearchBaseDN := env.Get(EnvGroupSearchBaseDN, kvs.Get(GroupSearchBaseDN))
// Either all group params must be set or none must be set.
allNotSet := grpSearchFilter == "" && grpSearchNameAttr == "" && grpSearchBaseDN == ""
allSet := grpSearchFilter != "" && grpSearchNameAttr != "" && grpSearchBaseDN != ""
if !allNotSet && !allSet {
return l, errors.New("All group related parameters must be set")
var allSet bool
if grpSearchFilter != "" {
if grpSearchNameAttr == "" || grpSearchBaseDN == "" {
return l, errors.New("All group related parameters must be set")
}
allSet = true
}
if allSet {
subs, err := NewSubstituter("username", "test", "usernamedn", "test2")
if err != nil {
return l, err
}
if _, err := subs.Substitute(grpSearchFilter); err != nil {
return l, fmt.Errorf("Only username and usernamedn may be substituted in the group search filter string: %s", err)
}
l.GroupSearchFilter = grpSearchFilter
l.GroupNameAttribute = grpSearchNameAttr
subs, err = NewSubstituter("username", "test", "usernamedn", "test2")
if err != nil {
return l, err
}
if _, err := subs.Substitute(grpSearchBaseDN); err != nil {
return l, fmt.Errorf("Only username and usernamedn may be substituted in the base DN string: %s", err)
}
l.GroupSearchBaseDN = grpSearchBaseDN
l.GroupSearchBaseDNS = strings.Split(grpSearchBaseDN, dnDelimiter)
}
l.rootCAs = rootCAs
return l, nil
}
// Substituter - This type is to allow restricted runtime
// substitutions of variables in LDAP configuration items during
// runtime.
type Substituter struct {
vals map[string]string
}
// NewSubstituter - sets up the substituter for usage, for e.g.:
//
// subber := NewSubstituter("username", "john")
func NewSubstituter(v ...string) (Substituter, error) {
if len(v)%2 != 0 {
return Substituter{}, errors.New("Need an even number of arguments")
}
vals := make(map[string]string)
for i := 0; i < len(v); i += 2 {
vals[v[i]] = v[i+1]
}
return Substituter{vals: vals}, nil
}
// Substitute - performs substitution on the given string `t`. Returns
// an error if there are any variables in the input that do not have
// values in the substituter. E.g.:
//
// subber.Substitute("uid=${username},cn=users,dc=example,dc=com")
//
// or
//
// subber.Substitute("uid={username},cn=users,dc=example,dc=com")
//
// returns "uid=john,cn=users,dc=example,dc=com"
//
// whereas:
//
// subber.Substitute("uid=${usernamedn}")
//
// returns an error.
func (s *Substituter) Substitute(t string) (string, error) {
for k, v := range s.vals {
reDollar := regexp.MustCompile(fmt.Sprintf(`\$\{%s\}`, k))
t = reDollar.ReplaceAllLiteralString(t, v)
reFlower := regexp.MustCompile(fmt.Sprintf(`\{%s\}`, k))
t = reFlower.ReplaceAllLiteralString(t, v)
}
// Check if all requested substitutions have been made.
re := regexp.MustCompile(`\{.*\}`)
if re.MatchString(t) {
return "", errors.New("unsupported substitution requested")
}
return t, nil
}

View File

@@ -1,93 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ldap
import (
"testing"
)
func TestSubstituter(t *testing.T) {
tests := []struct {
KV []string
SubstitutableStr string
SubstitutedStr string
ErrExpected bool
}{
{
KV: []string{"username", "john"},
SubstitutableStr: "uid=${username},cn=users,dc=example,dc=com",
SubstitutedStr: "uid=john,cn=users,dc=example,dc=com",
ErrExpected: false,
},
{
KV: []string{"username", "john"},
SubstitutableStr: "uid={username},cn=users,dc=example,dc=com",
SubstitutedStr: "uid=john,cn=users,dc=example,dc=com",
ErrExpected: false,
},
{
KV: []string{"username", "john"},
SubstitutableStr: "(&(objectclass=group)(member=${username}))",
SubstitutedStr: "(&(objectclass=group)(member=john))",
ErrExpected: false,
},
{
KV: []string{"username", "john"},
SubstitutableStr: "(&(objectclass=group)(member={username}))",
SubstitutedStr: "(&(objectclass=group)(member=john))",
ErrExpected: false,
},
{
KV: []string{"username", "john"},
SubstitutableStr: "uid=${{username}},cn=users,dc=example,dc=com",
ErrExpected: true,
},
{
KV: []string{"username", "john"},
SubstitutableStr: "uid=${usernamedn},cn=users,dc=example,dc=com",
ErrExpected: true,
},
{
KV: []string{"username"},
SubstitutableStr: "uid=${usernamedn},cn=users,dc=example,dc=com",
ErrExpected: true,
},
{
KV: []string{"username", "john"},
SubstitutableStr: "(&(objectclass=user)(sAMAccountName={username})(memberOf=CN=myorg,OU=Rialto,OU=Application Managed,OU=Groups,DC=amr,DC=corp,DC=myorg,DC=com))",
SubstitutedStr: "(&(objectclass=user)(sAMAccountName=john)(memberOf=CN=myorg,OU=Rialto,OU=Application Managed,OU=Groups,DC=amr,DC=corp,DC=myorg,DC=com))",
ErrExpected: false,
},
}
for _, test := range tests {
test := test
t.Run(test.SubstitutableStr, func(t *testing.T) {
subber, err := NewSubstituter(test.KV...)
if err != nil && !test.ErrExpected {
t.Errorf("Unexpected failure %s", err)
}
gotStr, err := subber.Substitute(test.SubstitutableStr)
if err != nil && !test.ErrExpected {
t.Errorf("Unexpected failure %s", err)
}
if gotStr != test.SubstitutedStr {
t.Errorf("Expected %s, got %s", test.SubstitutedStr, gotStr)
}
})
}
}

View File

@@ -28,24 +28,33 @@ var (
},
config.HelpKV{
Key: UsernameFormat,
Description: `username bind DNs e.g. "uid=%s,cn=accounts,dc=myldapserver,dc=com"`,
Description: `";" separated list of username bind DNs e.g. "uid=%s,cn=accounts,dc=myldapserver,dc=com"`,
Type: "list",
},
config.HelpKV{
Key: UsernameSearchFilter,
Description: `user search filter, for example "(cn=%s)" or "(sAMAccountName=%s)" or "(uid=%s)"`,
Type: "string",
},
config.HelpKV{
Key: GroupSearchFilter,
Description: `search filter for groups e.g. "(&(objectclass=groupOfNames)(memberUid=%s))"`,
Optional: true,
Type: "string",
},
config.HelpKV{
Key: GroupNameAttribute,
Description: `search attribute for group name e.g. "cn"`,
Optional: true,
Type: "string",
},
config.HelpKV{
Key: GroupSearchBaseDN,
Description: `group search base DNs e.g. "dc=myldapserver,dc=com"`,
Description: `";" separated list of group search base DNs e.g. "dc=myldapserver,dc=com"`,
Type: "list",
},
config.HelpKV{
Key: UsernameSearchBaseDN,
Description: `";" separated list of username search DNs`,
Type: "list",
Optional: true,
},
config.HelpKV{
Key: GroupNameAttribute,
Description: `search attribute for group name e.g. "cn"`,
Optional: true,
Type: "string",
},
@@ -57,7 +66,13 @@ var (
},
config.HelpKV{
Key: TLSSkipVerify,
Description: `trust server TLS without verification, defaults to "on" (verify)`,
Description: `trust server TLS without verification, defaults to "off" (verify)`,
Optional: true,
Type: "on|off",
},
config.HelpKV{
Key: ServerInsecure,
Description: `allow plain text connection to AD/LDAP server, defaults to "off"`,
Optional: true,
Type: "on|off",
},

View File

@@ -0,0 +1,51 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package openid
import (
"crypto"
"github.com/dgrijalva/jwt-go"
// Needed for SHA3 to work - See: https://golang.org/src/crypto/crypto.go?s=1034:1288
_ "golang.org/x/crypto/sha3"
)
// Specific instances for EC256 and company
var (
SigningMethodES3256 *jwt.SigningMethodECDSA
SigningMethodES3384 *jwt.SigningMethodECDSA
SigningMethodES3512 *jwt.SigningMethodECDSA
)
func init() {
// ES256
SigningMethodES3256 = &jwt.SigningMethodECDSA{Name: "ES3256", Hash: crypto.SHA3_256, KeySize: 32, CurveBits: 256}
jwt.RegisterSigningMethod(SigningMethodES3256.Alg(), func() jwt.SigningMethod {
return SigningMethodES3256
})
// ES384
SigningMethodES3384 = &jwt.SigningMethodECDSA{Name: "ES3384", Hash: crypto.SHA3_384, KeySize: 48, CurveBits: 384}
jwt.RegisterSigningMethod(SigningMethodES3384.Alg(), func() jwt.SigningMethod {
return SigningMethodES3384
})
// ES512
SigningMethodES3512 = &jwt.SigningMethodECDSA{Name: "ES3512", Hash: crypto.SHA3_512, KeySize: 66, CurveBits: 521}
jwt.RegisterSigningMethod(SigningMethodES3512.Alg(), func() jwt.SigningMethod {
return SigningMethodES3512
})
}

View File

@@ -30,7 +30,7 @@ import (
"github.com/minio/minio/cmd/config"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/env"
iampolicy "github.com/minio/minio/pkg/iam/policy"
"github.com/minio/minio/pkg/iam/policy"
xnet "github.com/minio/minio/pkg/net"
)
@@ -168,7 +168,10 @@ func updateClaimsExpiry(dsecs string, claims map[string]interface{}) error {
// Validate - validates the access token.
func (p *JWT) Validate(token, dsecs string) (map[string]interface{}, error) {
jp := new(jwtgo.Parser)
jp.ValidMethods = []string{"RS256", "RS384", "RS512", "ES256", "ES384", "ES512"}
jp.ValidMethods = []string{
"RS256", "RS384", "RS512", "ES256", "ES384", "ES512",
"RS3256", "RS3384", "RS3512", "ES3256", "ES3384", "ES3512",
}
keyFuncCallback := func(jwtToken *jwtgo.Token) (interface{}, error) {
kid, ok := jwtToken.Header["kid"].(string)

View File

@@ -0,0 +1,51 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package openid
import (
"crypto"
"github.com/dgrijalva/jwt-go"
// Needed for SHA3 to work - See: https://golang.org/src/crypto/crypto.go?s=1034:1288
_ "golang.org/x/crypto/sha3"
)
// Specific instances for RS256 and company
var (
SigningMethodRS3256 *jwt.SigningMethodRSA
SigningMethodRS3384 *jwt.SigningMethodRSA
SigningMethodRS3512 *jwt.SigningMethodRSA
)
func init() {
// RS3256
SigningMethodRS3256 = &jwt.SigningMethodRSA{Name: "RS3256", Hash: crypto.SHA3_256}
jwt.RegisterSigningMethod(SigningMethodRS3256.Alg(), func() jwt.SigningMethod {
return SigningMethodRS3256
})
// RS3384
SigningMethodRS3384 = &jwt.SigningMethodRSA{Name: "RS3384", Hash: crypto.SHA3_384}
jwt.RegisterSigningMethod(SigningMethodRS3384.Alg(), func() jwt.SigningMethod {
return SigningMethodRS3384
})
// RS3512
SigningMethodRS3512 = &jwt.SigningMethodRSA{Name: "RS3512", Hash: crypto.SHA3_512}
jwt.RegisterSigningMethod(SigningMethodRS3512.Alg(), func() jwt.SigningMethod {
return SigningMethodRS3512
})
}

View File

@@ -24,7 +24,7 @@ import (
const (
formatComment = `'namespace' reflects current bucket/object list and 'access' reflects a journal of object operations, defaults to 'namespace'`
queueDirComment = `staging dir for undelivered messages e.g. '/home/events'`
queueLimitComment = `maximum limit for undelivered messages, defaults to '10000'`
queueLimitComment = `maximum limit for undelivered messages, defaults to '100000'`
)
// Help template inputs for all notification targets
@@ -165,6 +165,12 @@ var (
Optional: true,
Type: "string",
},
config.HelpKV{
Key: target.KafkaSASLMechanism,
Description: "sasl authentication mechanism, default 'plain'",
Optional: true,
Type: "string",
},
config.HelpKV{
Key: target.KafkaTLSClientAuth,
Description: "clientAuth determines the Kafka server's policy for TLS client auth",
@@ -291,7 +297,7 @@ var (
HelpPostgres = config.HelpKVS{
config.HelpKV{
Key: target.PostgresConnectionString,
Description: "Postgres server connection-string",
Description: `Postgres server connection-string e.g. "host=localhost port=5432 dbname=minio_events user=postgres password=password sslmode=disable"`,
Type: "string",
},
config.HelpKV{
@@ -304,36 +310,6 @@ var (
Description: formatComment,
Type: "namespace*|access",
},
config.HelpKV{
Key: target.PostgresHost,
Description: "Postgres server hostname (used only if `connection_string` is empty)",
Optional: true,
Type: "hostname",
},
config.HelpKV{
Key: target.PostgresPort,
Description: "Postgres server port, defaults to `5432` (used only if `connection_string` is empty)",
Optional: true,
Type: "port",
},
config.HelpKV{
Key: target.PostgresUsername,
Description: "database username (used only if `connection_string` is empty)",
Optional: true,
Type: "string",
},
config.HelpKV{
Key: target.PostgresPassword,
Description: "database password (used only if `connection_string` is empty)",
Optional: true,
Type: "string",
},
config.HelpKV{
Key: target.PostgresDatabase,
Description: "database name (used only if `connection_string` is empty)",
Optional: true,
Type: "string",
},
config.HelpKV{
Key: target.PostgresQueueDir,
Description: queueDirComment,
@@ -357,7 +333,8 @@ var (
HelpMySQL = config.HelpKVS{
config.HelpKV{
Key: target.MySQLDSNString,
Description: "MySQL data-source-name connection string",
Description: `MySQL data-source-name connection string e.g. "<user>:<password>@tcp(<host>:<port>)/<database>"`,
Optional: true,
Type: "string",
},
config.HelpKV{
@@ -370,36 +347,6 @@ var (
Description: formatComment,
Type: "namespace*|access",
},
config.HelpKV{
Key: target.MySQLHost,
Description: "MySQL server hostname (used only if `dsn_string` is empty)",
Optional: true,
Type: "hostname",
},
config.HelpKV{
Key: target.MySQLPort,
Description: "MySQL server port (used only if `dsn_string` is empty)",
Optional: true,
Type: "port",
},
config.HelpKV{
Key: target.MySQLUsername,
Description: "database username (used only if `dsn_string` is empty)",
Optional: true,
Type: "string",
},
config.HelpKV{
Key: target.MySQLPassword,
Description: "database password (used only if `dsn_string` is empty)",
Optional: true,
Type: "string",
},
config.HelpKV{
Key: target.MySQLDatabase,
Description: "database name (used only if `dsn_string` is empty)",
Optional: true,
Type: "string",
},
config.HelpKV{
Key: target.MySQLQueueDir,
Description: queueDirComment,

View File

@@ -1,3 +1,19 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package notify
import (

View File

@@ -17,8 +17,10 @@
package notify
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"net/http"
"strconv"
"strings"
@@ -37,12 +39,16 @@ const (
formatAccess = "access"
)
// ErrTargetsOffline - Indicates single/multiple target failures.
var ErrTargetsOffline = errors.New("one or more targets are offline. Please use `mc admin info --json` to check the offline targets")
// TestNotificationTargets is similar to GetNotificationTargets()
// avoids explicit registration.
func TestNotificationTargets(cfg config.Config, doneCh <-chan struct{}, transport *http.Transport,
targetIDs []event.TargetID) error {
test := true
targets, err := RegisterNotificationTargets(cfg, doneCh, transport, targetIDs, test)
returnOnTargetError := true
targets, err := RegisterNotificationTargets(cfg, doneCh, transport, targetIDs, test, returnOnTargetError)
if err == nil {
// Close all targets since we are only testing connections.
for _, t := range targets.TargetMap() {
@@ -57,7 +63,8 @@ func TestNotificationTargets(cfg config.Config, doneCh <-chan struct{}, transpor
// targets, returns error if any.
func GetNotificationTargets(cfg config.Config, doneCh <-chan struct{}, transport *http.Transport) (*event.TargetList, error) {
test := false
return RegisterNotificationTargets(cfg, doneCh, transport, nil, test)
returnOnTargetError := false
return RegisterNotificationTargets(cfg, doneCh, transport, nil, test, returnOnTargetError)
}
// RegisterNotificationTargets - returns TargetList which contains enabled targets in serverConfig.
@@ -65,12 +72,40 @@ func GetNotificationTargets(cfg config.Config, doneCh <-chan struct{}, transport
// * Add a new target in pkg/event/target package.
// * Add newly added target configuration to serverConfig.Notify.<TARGET_NAME>.
// * Handle the configuration in this function to create/add into TargetList.
func RegisterNotificationTargets(cfg config.Config, doneCh <-chan struct{}, transport *http.Transport, targetIDs []event.TargetID, test bool) (_ *event.TargetList, err error) {
func RegisterNotificationTargets(cfg config.Config, doneCh <-chan struct{}, transport *http.Transport, targetIDs []event.TargetID, test bool, returnOnTargetError bool) (*event.TargetList, error) {
targetList, err := FetchRegisteredTargets(cfg, doneCh, transport, test, returnOnTargetError)
if err != nil {
return targetList, err
}
if test {
// Verify if user is trying to disable already configured
// notification targets, based on their target IDs
for _, targetID := range targetIDs {
if !targetList.Exists(targetID) {
return nil, config.Errorf(
"Unable to disable configured targets '%v'",
targetID)
}
}
}
return targetList, nil
}
// FetchRegisteredTargets - Returns a set of configured TargetList
// If `returnOnTargetError` is set to true, The function returns when a target initialization fails
// Else, the function will return a complete TargetList irrespective of errors
func FetchRegisteredTargets(cfg config.Config, doneCh <-chan struct{}, transport *http.Transport, test bool, returnOnTargetError bool) (_ *event.TargetList, err error) {
targetList := event.NewTargetList()
var targetsOffline bool
defer func() {
// Automatically close all connections to targets when an error occur
if err != nil {
// Automatically close all connections to targets when an error occur.
// Close all the targets if returnOnTargetError is set
// Else, close only the failed targets
if err != nil && returnOnTargetError {
for _, t := range targetList.TargetMap() {
_ = t.Close()
}
@@ -137,10 +172,18 @@ func RegisterNotificationTargets(cfg config.Config, doneCh <-chan struct{}, tran
}
newTarget, err := target.NewAMQPTarget(id, args, doneCh, logger.LogOnceIf, test)
if err != nil {
return nil, err
targetsOffline = true
if returnOnTargetError {
return nil, err
}
_ = newTarget.Close()
}
if err = targetList.Add(newTarget); err != nil {
return nil, err
logger.LogIf(context.Background(), err)
if returnOnTargetError {
return nil, err
}
}
}
@@ -150,11 +193,17 @@ func RegisterNotificationTargets(cfg config.Config, doneCh <-chan struct{}, tran
}
newTarget, err := target.NewElasticsearchTarget(id, args, doneCh, logger.LogOnceIf, test)
if err != nil {
return nil, err
targetsOffline = true
if returnOnTargetError {
return nil, err
}
_ = newTarget.Close()
}
if err = targetList.Add(newTarget); err != nil {
return nil, err
logger.LogIf(context.Background(), err)
if returnOnTargetError {
return nil, err
}
}
}
@@ -165,10 +214,17 @@ func RegisterNotificationTargets(cfg config.Config, doneCh <-chan struct{}, tran
args.TLS.RootCAs = transport.TLSClientConfig.RootCAs
newTarget, err := target.NewKafkaTarget(id, args, doneCh, logger.LogOnceIf, test)
if err != nil {
return nil, err
targetsOffline = true
if returnOnTargetError {
return nil, err
}
_ = newTarget.Close()
}
if err = targetList.Add(newTarget); err != nil {
return nil, err
logger.LogIf(context.Background(), err)
if returnOnTargetError {
return nil, err
}
}
}
@@ -179,10 +235,17 @@ func RegisterNotificationTargets(cfg config.Config, doneCh <-chan struct{}, tran
args.RootCAs = transport.TLSClientConfig.RootCAs
newTarget, err := target.NewMQTTTarget(id, args, doneCh, logger.LogOnceIf, test)
if err != nil {
return nil, err
targetsOffline = true
if returnOnTargetError {
return nil, err
}
_ = newTarget.Close()
}
if err = targetList.Add(newTarget); err != nil {
return nil, err
logger.LogIf(context.Background(), err)
if returnOnTargetError {
return nil, err
}
}
}
@@ -192,10 +255,17 @@ func RegisterNotificationTargets(cfg config.Config, doneCh <-chan struct{}, tran
}
newTarget, err := target.NewMySQLTarget(id, args, doneCh, logger.LogOnceIf, test)
if err != nil {
return nil, err
targetsOffline = true
if returnOnTargetError {
return nil, err
}
_ = newTarget.Close()
}
if err = targetList.Add(newTarget); err != nil {
return nil, err
logger.LogIf(context.Background(), err)
if returnOnTargetError {
return nil, err
}
}
}
@@ -205,10 +275,17 @@ func RegisterNotificationTargets(cfg config.Config, doneCh <-chan struct{}, tran
}
newTarget, err := target.NewNATSTarget(id, args, doneCh, logger.LogOnceIf, test)
if err != nil {
return nil, err
targetsOffline = true
if returnOnTargetError {
return nil, err
}
_ = newTarget.Close()
}
if err = targetList.Add(newTarget); err != nil {
return nil, err
logger.LogIf(context.Background(), err)
if returnOnTargetError {
return nil, err
}
}
}
@@ -218,10 +295,17 @@ func RegisterNotificationTargets(cfg config.Config, doneCh <-chan struct{}, tran
}
newTarget, err := target.NewNSQTarget(id, args, doneCh, logger.LogOnceIf, test)
if err != nil {
return nil, err
targetsOffline = true
if returnOnTargetError {
return nil, err
}
_ = newTarget.Close()
}
if err = targetList.Add(newTarget); err != nil {
return nil, err
logger.LogIf(context.Background(), err)
if returnOnTargetError {
return nil, err
}
}
}
@@ -231,10 +315,17 @@ func RegisterNotificationTargets(cfg config.Config, doneCh <-chan struct{}, tran
}
newTarget, err := target.NewPostgreSQLTarget(id, args, doneCh, logger.LogOnceIf, test)
if err != nil {
return nil, err
targetsOffline = true
if returnOnTargetError {
return nil, err
}
_ = newTarget.Close()
}
if err = targetList.Add(newTarget); err != nil {
return nil, err
logger.LogIf(context.Background(), err)
if returnOnTargetError {
return nil, err
}
}
}
@@ -244,10 +335,17 @@ func RegisterNotificationTargets(cfg config.Config, doneCh <-chan struct{}, tran
}
newTarget, err := target.NewRedisTarget(id, args, doneCh, logger.LogOnceIf, test)
if err != nil {
return nil, err
targetsOffline = true
if returnOnTargetError {
return nil, err
}
_ = newTarget.Close()
}
if err = targetList.Add(newTarget); err != nil {
return nil, err
logger.LogIf(context.Background(), err)
if returnOnTargetError {
return nil, err
}
}
}
@@ -257,23 +355,22 @@ func RegisterNotificationTargets(cfg config.Config, doneCh <-chan struct{}, tran
}
newTarget, err := target.NewWebhookTarget(id, args, doneCh, logger.LogOnceIf, transport, test)
if err != nil {
return nil, err
targetsOffline = true
if returnOnTargetError {
return nil, err
}
_ = newTarget.Close()
}
if err := targetList.Add(newTarget); err != nil {
return nil, err
if err = targetList.Add(newTarget); err != nil {
logger.LogIf(context.Background(), err)
if returnOnTargetError {
return nil, err
}
}
}
if test {
// Verify if user is trying to disable already configured
// notification targets, based on their target IDs
for _, targetID := range targetIDs {
if !targetList.Exists(targetID) {
return nil, config.Errorf(
"Unable to disable configured targets '%v'",
targetID)
}
}
if targetsOffline {
return targetList, ErrTargetsOffline
}
return targetList, nil
@@ -306,8 +403,10 @@ func checkValidNotificationKeys(cfg config.Config) error {
if tname != config.Default {
subSysTarget = subSys + config.SubSystemSeparator + tname
}
if err := config.CheckValidKeys(subSysTarget, kv, validKVS); err != nil {
return err
if v, ok := kv.Lookup(config.Enable); ok && v == config.EnableOn {
if err := config.CheckValidKeys(subSysTarget, kv, validKVS); err != nil {
return err
}
}
}
}
@@ -352,6 +451,10 @@ var (
Key: target.KafkaSASLPassword,
Value: "",
},
config.KV{
Key: target.KafkaSASLMechanism,
Value: "plain",
},
config.KV{
Key: target.KafkaClientTLSCert,
Value: "",
@@ -507,9 +610,14 @@ func GetNotifyKafka(kafkaKVS map[string]config.KVS) (map[string]target.KafkaArgs
if k != config.Default {
saslPasswordEnv = saslPasswordEnv + config.Default + k
}
saslMechanismEnv := target.EnvKafkaSASLMechanism
if k != config.Default {
saslMechanismEnv = saslMechanismEnv + config.Default + k
}
kafkaArgs.SASL.Enable = env.Get(saslEnableEnv, kv.Get(target.KafkaSASL)) == config.EnableOn
kafkaArgs.SASL.User = env.Get(saslUsernameEnv, kv.Get(target.KafkaSASLUsername))
kafkaArgs.SASL.Password = env.Get(saslPasswordEnv, kv.Get(target.KafkaSASLPassword))
kafkaArgs.SASL.Mechanism = env.Get(saslMechanismEnv, kv.Get(target.KafkaSASLMechanism))
if err = kafkaArgs.Validate(); err != nil {
return nil, err
@@ -687,26 +795,6 @@ var (
Key: target.MySQLFormat,
Value: formatNamespace,
},
config.KV{
Key: target.MySQLHost,
Value: "",
},
config.KV{
Key: target.MySQLPort,
Value: "",
},
config.KV{
Key: target.MySQLUsername,
Value: "",
},
config.KV{
Key: target.MySQLPassword,
Value: "",
},
config.KV{
Key: target.MySQLDatabase,
Value: "",
},
config.KV{
Key: target.MySQLDSNString,
Value: "",
@@ -743,16 +831,6 @@ func GetNotifyMySQL(mysqlKVS map[string]config.KVS) (map[string]target.MySQLArgs
continue
}
hostEnv := target.EnvMySQLHost
if k != config.Default {
hostEnv = hostEnv + config.Default + k
}
host, err := xnet.ParseURL(env.Get(hostEnv, kv.Get(target.MySQLHost)))
if err != nil {
return nil, err
}
queueLimitEnv := target.EnvMySQLQueueLimit
if k != config.Default {
queueLimitEnv = queueLimitEnv + config.Default + k
@@ -766,30 +844,17 @@ func GetNotifyMySQL(mysqlKVS map[string]config.KVS) (map[string]target.MySQLArgs
if k != config.Default {
formatEnv = formatEnv + config.Default + k
}
dsnStringEnv := target.EnvMySQLDSNString
if k != config.Default {
dsnStringEnv = dsnStringEnv + config.Default + k
}
tableEnv := target.EnvMySQLTable
if k != config.Default {
tableEnv = tableEnv + config.Default + k
}
portEnv := target.EnvMySQLPort
if k != config.Default {
portEnv = portEnv + config.Default + k
}
usernameEnv := target.EnvMySQLUsername
if k != config.Default {
usernameEnv = usernameEnv + config.Default + k
}
passwordEnv := target.EnvMySQLPassword
if k != config.Default {
passwordEnv = passwordEnv + config.Default + k
}
databaseEnv := target.EnvMySQLDatabase
if k != config.Default {
databaseEnv = databaseEnv + config.Default + k
}
queueDirEnv := target.EnvMySQLQueueDir
if k != config.Default {
queueDirEnv = queueDirEnv + config.Default + k
@@ -799,11 +864,6 @@ func GetNotifyMySQL(mysqlKVS map[string]config.KVS) (map[string]target.MySQLArgs
Format: env.Get(formatEnv, kv.Get(target.MySQLFormat)),
DSN: env.Get(dsnStringEnv, kv.Get(target.MySQLDSNString)),
Table: env.Get(tableEnv, kv.Get(target.MySQLTable)),
Host: *host,
Port: env.Get(portEnv, kv.Get(target.MySQLPort)),
User: env.Get(usernameEnv, kv.Get(target.MySQLUsername)),
Password: env.Get(passwordEnv, kv.Get(target.MySQLPassword)),
Database: env.Get(databaseEnv, kv.Get(target.MySQLDatabase)),
QueueDir: env.Get(queueDirEnv, kv.Get(target.MySQLQueueDir)),
QueueLimit: queueLimit,
}
@@ -1171,26 +1231,6 @@ var (
Key: target.PostgresTable,
Value: "",
},
config.KV{
Key: target.PostgresHost,
Value: "",
},
config.KV{
Key: target.PostgresPort,
Value: "",
},
config.KV{
Key: target.PostgresUsername,
Value: "",
},
config.KV{
Key: target.PostgresPassword,
Value: "",
},
config.KV{
Key: target.PostgresDatabase,
Value: "",
},
config.KV{
Key: target.PostgresQueueDir,
Value: "",
@@ -1219,16 +1259,6 @@ func GetNotifyPostgres(postgresKVS map[string]config.KVS) (map[string]target.Pos
continue
}
hostEnv := target.EnvPostgresHost
if k != config.Default {
hostEnv = hostEnv + config.Default + k
}
host, err := xnet.ParseHost(env.Get(hostEnv, kv.Get(target.PostgresHost)))
if err != nil {
return nil, err
}
queueLimitEnv := target.EnvPostgresQueueLimit
if k != config.Default {
queueLimitEnv = queueLimitEnv + config.Default + k
@@ -1254,26 +1284,6 @@ func GetNotifyPostgres(postgresKVS map[string]config.KVS) (map[string]target.Pos
tableEnv = tableEnv + config.Default + k
}
portEnv := target.EnvPostgresPort
if k != config.Default {
portEnv = portEnv + config.Default + k
}
usernameEnv := target.EnvPostgresUsername
if k != config.Default {
usernameEnv = usernameEnv + config.Default + k
}
passwordEnv := target.EnvPostgresPassword
if k != config.Default {
passwordEnv = passwordEnv + config.Default + k
}
databaseEnv := target.EnvPostgresDatabase
if k != config.Default {
databaseEnv = databaseEnv + config.Default + k
}
queueDirEnv := target.EnvPostgresQueueDir
if k != config.Default {
queueDirEnv = queueDirEnv + config.Default + k
@@ -1284,11 +1294,6 @@ func GetNotifyPostgres(postgresKVS map[string]config.KVS) (map[string]target.Pos
Format: env.Get(formatEnv, kv.Get(target.PostgresFormat)),
ConnectionString: env.Get(connectionStringEnv, kv.Get(target.PostgresConnectionString)),
Table: env.Get(tableEnv, kv.Get(target.PostgresTable)),
Host: *host,
Port: env.Get(portEnv, kv.Get(target.PostgresPort)),
User: env.Get(usernameEnv, kv.Get(target.PostgresUsername)),
Password: env.Get(passwordEnv, kv.Get(target.PostgresPassword)),
Database: env.Get(databaseEnv, kv.Get(target.PostgresDatabase)),
QueueDir: env.Get(queueDirEnv, kv.Get(target.PostgresQueueDir)),
QueueLimit: uint64(queueLimit),
}

View File

@@ -24,7 +24,6 @@ import (
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/cmd/logger/message/log"
"github.com/minio/minio/cmd/logger/target/console"
"github.com/minio/minio/pkg/madmin"
xnet "github.com/minio/minio/pkg/net"
"github.com/minio/minio/pkg/pubsub"
)
@@ -75,7 +74,7 @@ func (sys *HTTPConsoleLoggerSys) HasLogListeners() bool {
}
// Subscribe starts console logging for this node.
func (sys *HTTPConsoleLoggerSys) Subscribe(subCh chan interface{}, doneCh chan struct{}, node string, last int, logKind string, filter func(entry interface{}) bool) {
func (sys *HTTPConsoleLoggerSys) Subscribe(subCh chan interface{}, doneCh <-chan struct{}, node string, last int, logKind string, filter func(entry interface{}) bool) {
// Enable console logging for remote client.
if !sys.HasLogListeners() {
logger.AddTarget(sys)
@@ -84,17 +83,20 @@ func (sys *HTTPConsoleLoggerSys) Subscribe(subCh chan interface{}, doneCh chan s
cnt := 0
// by default send all console logs in the ring buffer unless node or limit query parameters
// are set.
var lastN []madmin.LogInfo
var lastN []log.Info
if last > defaultLogBufferCount || last <= 0 {
last = defaultLogBufferCount
}
lastN = make([]madmin.LogInfo, last)
lastN = make([]log.Info, last)
sys.RLock()
sys.logBuf.Do(func(p interface{}) {
if p != nil && (p.(madmin.LogInfo)).SendLog(node, logKind) {
lastN[cnt%last] = p.(madmin.LogInfo)
cnt++
if p != nil {
lg, ok := p.(log.Info)
if ok && lg.SendLog(node, logKind) {
lastN[cnt%last] = lg
cnt++
}
}
})
sys.RUnlock()
@@ -102,7 +104,7 @@ func (sys *HTTPConsoleLoggerSys) Subscribe(subCh chan interface{}, doneCh chan s
if cnt > 0 {
for i := 0; i < last; i++ {
entry := lastN[(cnt+i)%last]
if (entry == madmin.LogInfo{}) {
if (entry == log.Info{}) {
continue
}
select {
@@ -118,12 +120,12 @@ func (sys *HTTPConsoleLoggerSys) Subscribe(subCh chan interface{}, doneCh chan s
// Send log message 'e' to console and publish to console
// log pubsub system
func (sys *HTTPConsoleLoggerSys) Send(e interface{}, logKind string) error {
var lg madmin.LogInfo
var lg log.Info
switch e := e.(type) {
case log.Entry:
lg = madmin.LogInfo{Entry: e, NodeName: sys.nodeName}
lg = log.Info{Entry: e, NodeName: sys.nodeName}
case string:
lg = madmin.LogInfo{ConsoleMsg: e, NodeName: sys.nodeName}
lg = log.Info{ConsoleMsg: e, NodeName: sys.nodeName}
}
sys.pubsub.Publish(lg)

View File

@@ -30,90 +30,27 @@ const (
bgLifecycleTick = time.Hour
)
type lifecycleOps struct {
LastActivity time.Time
}
// Register to the daily objects listing
var globalLifecycleOps = &lifecycleOps{}
func getLocalBgLifecycleOpsStatus() BgLifecycleOpsStatus {
return BgLifecycleOpsStatus{
LastActivity: globalLifecycleOps.LastActivity,
}
}
// initDailyLifecycle starts the routine that receives the daily
// listing of all objects and applies any matching bucket lifecycle
// rules.
func initDailyLifecycle() {
go startDailyLifecycle()
func initDailyLifecycle(ctx context.Context, objAPI ObjectLayer) {
go startDailyLifecycle(ctx, objAPI)
}
func startDailyLifecycle() {
var objAPI ObjectLayer
var ctx = context.Background()
// Wait until the object API is ready
func startDailyLifecycle(ctx context.Context, objAPI ObjectLayer) {
for {
objAPI = newObjectLayerWithoutSafeModeFn()
if objAPI == nil {
time.Sleep(time.Second)
continue
}
break
}
// Calculate the time of the last lifecycle operation in all peers node of the cluster
computeLastLifecycleActivity := func(status []BgOpsStatus) time.Time {
var lastAct time.Time
for _, st := range status {
if st.LifecycleOps.LastActivity.After(lastAct) {
lastAct = st.LifecycleOps.LastActivity
}
}
return lastAct
}
for {
// Check if we should perform lifecycle ops based on the last lifecycle activity, sleep one hour otherwise
allLifecycleStatus := []BgOpsStatus{
{LifecycleOps: getLocalBgLifecycleOpsStatus()},
}
if globalIsDistXL {
allLifecycleStatus = append(allLifecycleStatus, globalNotificationSys.BackgroundOpsStatus()...)
}
lastAct := computeLastLifecycleActivity(allLifecycleStatus)
if !lastAct.IsZero() && time.Since(lastAct) < bgLifecycleInterval {
time.Sleep(bgLifecycleTick)
}
// Perform one lifecycle operation
err := lifecycleRound(ctx, objAPI)
switch err.(type) {
// Unable to hold a lock means there is another
// instance doing the lifecycle round round
case OperationTimedOut:
time.Sleep(bgLifecycleTick)
default:
logger.LogIf(ctx, err)
time.Sleep(time.Minute)
continue
select {
case <-ctx.Done():
return
case <-time.NewTimer(bgLifecycleInterval).C:
// Perform one lifecycle operation
logger.LogIf(ctx, lifecycleRound(ctx, objAPI))
}
}
}
var lifecycleLockTimeout = newDynamicTimeout(60*time.Second, time.Second)
func lifecycleRound(ctx context.Context, objAPI ObjectLayer) error {
// Lock to avoid concurrent lifecycle ops from other nodes
sweepLock := objAPI.NewNSLock(ctx, "system", "daily-lifecycle-ops")
if err := sweepLock.GetLock(lifecycleLockTimeout); err != nil {
return err
}
defer sweepLock.Unlock()
buckets, err := objAPI.ListBuckets(ctx)
if err != nil {
return err
@@ -126,6 +63,8 @@ func lifecycleRound(ctx context.Context, objAPI ObjectLayer) error {
continue
}
_, bucketHasLockConfig := globalBucketObjectLockConfig.Get(bucket.Name)
// Calculate the common prefix of all lifecycle rules
var prefixes []string
for _, rule := range l.Rules {
@@ -148,9 +87,11 @@ func lifecycleRound(ctx context.Context, objAPI ObjectLayer) error {
// Reached maximum delete requests, attempt a delete for now.
break
}
// Find the action that need to be executed
if l.ComputeAction(obj.Name, obj.UserTags, obj.ModTime) == lifecycle.DeleteAction {
if bucketHasLockConfig && enforceRetentionForDeletion(ctx, obj) {
continue
}
objects = append(objects, obj.Name)
}
}
@@ -160,7 +101,7 @@ func lifecycleRound(ctx context.Context, objAPI ObjectLayer) error {
break
}
waitForLowHTTPReq(int32(globalEndpoints.Nodes()))
waitForLowHTTPReq(int32(globalEndpoints.NEndpoints()))
// Deletes a list of objects.
deleteErrs, err := objAPI.DeleteObjects(ctx, bucket.Name, objects)

611
cmd/data-update-tracker.go Normal file
View File

@@ -0,0 +1,611 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bufio"
"bytes"
"context"
"encoding/binary"
"errors"
"io"
"io/ioutil"
"os"
"path"
"sort"
"strings"
"sync"
"time"
"github.com/minio/minio/cmd/config"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/color"
"github.com/minio/minio/pkg/env"
"github.com/willf/bloom"
)
const (
// Estimate bloom filter size. With this many items
dataUpdateTrackerEstItems = 1000000
// ... we want this false positive rate:
dataUpdateTrackerFP = 0.99
dataUpdateTrackerQueueSize = 10000
dataUpdateTrackerFilename = dataUsageBucket + SlashSeparator + ".tracker.bin"
dataUpdateTrackerVersion = 1
dataUpdateTrackerSaveInterval = 5 * time.Minute
// Reset bloom filters every n cycle
dataUpdateTrackerResetEvery = 1000
)
var (
objectUpdatedCh chan<- string
intDataUpdateTracker *dataUpdateTracker
)
func init() {
intDataUpdateTracker = newDataUpdateTracker()
objectUpdatedCh = intDataUpdateTracker.input
}
type dataUpdateTracker struct {
mu sync.Mutex
input chan string
save chan struct{}
debug bool
saveExited chan struct{}
Current dataUpdateFilter
History dataUpdateTrackerHistory
Saved time.Time
}
// newDataUpdateTracker returns a dataUpdateTracker with default settings.
func newDataUpdateTracker() *dataUpdateTracker {
d := &dataUpdateTracker{
Current: dataUpdateFilter{
idx: 1,
},
debug: env.Get(envDataUsageCrawlDebug, config.EnableOff) == config.EnableOn,
input: make(chan string, dataUpdateTrackerQueueSize),
save: make(chan struct{}, 1),
saveExited: make(chan struct{}),
}
d.Current.bf = d.newBloomFilter()
return d
}
type dataUpdateTrackerHistory []dataUpdateFilter
type dataUpdateFilter struct {
idx uint64
bf bloomFilter
}
type bloomFilter struct {
*bloom.BloomFilter
}
// emptyBloomFilter returns an empty bloom filter.
func emptyBloomFilter() bloomFilter {
return bloomFilter{BloomFilter: &bloom.BloomFilter{}}
}
// containsDir returns whether the bloom filter contains a directory.
// Note that objects in XL mode are also considered directories.
func (b bloomFilter) containsDir(in string) bool {
split := splitPathDeterministic(path.Clean(in))
if len(split) == 0 {
return false
}
var tmp [dataUsageHashLen]byte
hashPath(path.Join(split...)).bytes(tmp[:])
return b.Test(tmp[:])
}
// bytes returns the bloom filter serialized as a byte slice.
func (b bloomFilter) bytes() []byte {
if b.BloomFilter == nil {
return nil
}
var buf bytes.Buffer
_, err := b.WriteTo(&buf)
if err != nil {
logger.LogIf(GlobalContext, err)
return nil
}
return buf.Bytes()
}
// sort the dataUpdateTrackerHistory, newest first.
// Returns whether the history is complete.
func (d dataUpdateTrackerHistory) sort() bool {
if len(d) == 0 {
return true
}
sort.Slice(d, func(i, j int) bool {
return d[i].idx > d[j].idx
})
return d[0].idx-d[len(d)-1].idx == uint64(len(d))
}
// removeOlderThan will remove entries older than index 'n'.
func (d *dataUpdateTrackerHistory) removeOlderThan(n uint64) {
d.sort()
dd := *d
end := len(dd)
for i := end - 1; i >= 0; i-- {
if dd[i].idx < n {
end = i
}
}
dd = dd[:end]
*d = dd
}
// newBloomFilter returns a new bloom filter with default settings.
func (d *dataUpdateTracker) newBloomFilter() bloomFilter {
return bloomFilter{bloom.NewWithEstimates(dataUpdateTrackerEstItems, dataUpdateTrackerFP)}
}
// current returns the current index.
func (d *dataUpdateTracker) current() uint64 {
d.mu.Lock()
defer d.mu.Unlock()
return d.Current.idx
}
// start will load the current data from the drives start collecting information and
// start a saver goroutine.
// All of these will exit when the context is canceled.
func (d *dataUpdateTracker) start(ctx context.Context, drives ...string) {
if len(drives) <= 0 {
logger.LogIf(ctx, errors.New("dataUpdateTracker.start: No drives specified"))
return
}
d.load(ctx, drives...)
go d.startCollector(ctx)
go d.startSaver(ctx, dataUpdateTrackerSaveInterval, drives)
}
// load will attempt to load data tracking information from the supplied drives.
// The data will only be loaded if d.Saved is older than the one found on disk.
// The newest working cache will be kept in d.
// If no valid data usage tracker can be found d will remain unchanged.
// If object is shared the caller should lock it.
func (d *dataUpdateTracker) load(ctx context.Context, drives ...string) {
if len(drives) <= 0 {
logger.LogIf(ctx, errors.New("dataUpdateTracker.load: No drives specified"))
return
}
for _, drive := range drives {
cacheFormatPath := pathJoin(drive, dataUpdateTrackerFilename)
f, err := os.Open(cacheFormatPath)
if err != nil {
if os.IsNotExist(err) {
continue
}
logger.LogIf(ctx, err)
continue
}
err = d.deserialize(f, d.Saved)
if err != nil && err != io.EOF {
logger.LogIf(ctx, err)
}
f.Close()
}
}
// startSaver will start a saver that will write d to all supplied drives at specific intervals.
// The saver will save and exit when supplied context is closed.
func (d *dataUpdateTracker) startSaver(ctx context.Context, interval time.Duration, drives []string) {
t := time.NewTicker(interval)
defer t.Stop()
var buf bytes.Buffer
d.mu.Lock()
saveNow := d.save
exited := make(chan struct{})
d.saveExited = exited
d.mu.Unlock()
defer close(exited)
for {
var exit bool
select {
case <-ctx.Done():
exit = true
case <-t.C:
case <-saveNow:
}
buf.Reset()
d.mu.Lock()
d.Saved = UTCNow()
err := d.serialize(&buf)
if d.debug {
logger.Info(color.Green("dataUpdateTracker:")+" Saving: %v bytes, Current idx: %v", buf.Len(), d.Current.idx)
}
d.mu.Unlock()
if err != nil {
logger.LogIf(ctx, err, "Error serializing usage tracker data")
if exit {
return
}
continue
}
if buf.Len() == 0 {
logger.LogIf(ctx, errors.New("zero sized output, skipping save"))
continue
}
for _, drive := range drives {
cacheFormatPath := pathJoin(drive, dataUpdateTrackerFilename)
err := ioutil.WriteFile(cacheFormatPath, buf.Bytes(), os.ModePerm)
if err != nil {
if os.IsNotExist(err) {
continue
}
logger.LogIf(ctx, err)
continue
}
}
if exit {
return
}
}
}
// serialize all data in d to dst.
// Caller should hold lock if d is expected to be shared.
// If an error is returned, there will likely be partial data written to dst.
func (d *dataUpdateTracker) serialize(dst io.Writer) (err error) {
ctx := GlobalContext
var tmp [8]byte
o := bufio.NewWriter(dst)
defer func() {
if err == nil {
err = o.Flush()
}
}()
// Version
if err := o.WriteByte(dataUpdateTrackerVersion); err != nil {
if d.debug {
logger.LogIf(ctx, err)
}
return err
}
// Timestamp.
binary.LittleEndian.PutUint64(tmp[:], uint64(d.Saved.Unix()))
if _, err := o.Write(tmp[:]); err != nil {
if d.debug {
logger.LogIf(ctx, err)
}
return err
}
// Current
binary.LittleEndian.PutUint64(tmp[:], d.Current.idx)
if _, err := o.Write(tmp[:]); err != nil {
if d.debug {
logger.LogIf(ctx, err)
}
return err
}
if _, err := d.Current.bf.WriteTo(o); err != nil {
if d.debug {
logger.LogIf(ctx, err)
}
return err
}
// History
binary.LittleEndian.PutUint64(tmp[:], uint64(len(d.History)))
if _, err := o.Write(tmp[:]); err != nil {
if d.debug {
logger.LogIf(ctx, err)
}
return err
}
for _, bf := range d.History {
// Current
binary.LittleEndian.PutUint64(tmp[:], bf.idx)
if _, err := o.Write(tmp[:]); err != nil {
if d.debug {
logger.LogIf(ctx, err)
}
return err
}
if _, err := bf.bf.WriteTo(o); err != nil {
if d.debug {
logger.LogIf(ctx, err)
}
return err
}
}
return nil
}
// deserialize will deserialize the supplied input if the input is newer than the supplied time.
func (d *dataUpdateTracker) deserialize(src io.Reader, newerThan time.Time) error {
ctx := GlobalContext
var dst dataUpdateTracker
var tmp [8]byte
// Version
if _, err := io.ReadFull(src, tmp[:1]); err != nil {
if d.debug {
logger.LogIf(ctx, err)
}
return err
}
switch tmp[0] {
case dataUpdateTrackerVersion:
default:
return errors.New("dataUpdateTracker: Unknown data version")
}
// Timestamp.
if _, err := io.ReadFull(src, tmp[:8]); err != nil {
if d.debug {
logger.LogIf(ctx, err)
}
return err
}
t := time.Unix(int64(binary.LittleEndian.Uint64(tmp[:])), 0)
if !t.After(newerThan) {
return nil
}
// Current
if _, err := io.ReadFull(src, tmp[:8]); err != nil {
if d.debug {
logger.LogIf(ctx, err)
}
return err
}
dst.Current.idx = binary.LittleEndian.Uint64(tmp[:])
dst.Current.bf = emptyBloomFilter()
if _, err := dst.Current.bf.ReadFrom(src); err != nil {
if d.debug {
logger.LogIf(ctx, err)
}
return err
}
// History
if _, err := io.ReadFull(src, tmp[:8]); err != nil {
if d.debug {
logger.LogIf(ctx, err)
}
return err
}
n := binary.LittleEndian.Uint64(tmp[:])
dst.History = make(dataUpdateTrackerHistory, int(n))
for i, e := range dst.History {
if _, err := io.ReadFull(src, tmp[:8]); err != nil {
if d.debug {
logger.LogIf(ctx, err)
}
return err
}
e.idx = binary.LittleEndian.Uint64(tmp[:])
e.bf = emptyBloomFilter()
if _, err := e.bf.ReadFrom(src); err != nil {
if d.debug {
logger.LogIf(ctx, err)
}
return err
}
dst.History[i] = e
}
// Ignore what remains on the stream.
// Update d:
d.Current = dst.Current
d.History = dst.History
d.Saved = dst.Saved
return nil
}
// start a collector that picks up entries from objectUpdatedCh
// and adds them to the current bloom filter.
func (d *dataUpdateTracker) startCollector(ctx context.Context) {
var tmp [dataUsageHashLen]byte
for {
select {
case <-ctx.Done():
return
case in := <-d.input:
bucket, _ := path2BucketObjectWithBasePath("", in)
if bucket == "" {
if d.debug && len(in) > 0 {
logger.Info(color.Green("data-usage:")+" no bucket (%s)", in)
}
continue
}
if isReservedOrInvalidBucket(bucket, false) {
if false && d.debug {
logger.Info(color.Green("data-usage:")+" isReservedOrInvalidBucket: %v, entry: %v", bucket, in)
}
continue
}
split := splitPathDeterministic(in)
// Add all paths until level 3.
d.mu.Lock()
for i := range split {
if d.debug && false {
logger.Info(color.Green("dataUpdateTracker:") + " Marking path dirty: " + color.Blue(path.Join(split[:i+1]...)))
}
hashPath(path.Join(split[:i+1]...)).bytes(tmp[:])
d.Current.bf.Add(tmp[:])
}
d.mu.Unlock()
}
}
}
// find entry with specified index.
// Returns nil if not found.
func (d dataUpdateTrackerHistory) find(idx uint64) *dataUpdateFilter {
for _, f := range d {
if f.idx == idx {
return &f
}
}
return nil
}
// filterFrom will return a combined bloom filter.
func (d *dataUpdateTracker) filterFrom(ctx context.Context, oldest, newest uint64) *bloomFilterResponse {
bf := d.newBloomFilter()
bfr := bloomFilterResponse{
OldestIdx: oldest,
CurrentIdx: d.Current.idx,
Complete: true,
}
// Loop through each index requested.
for idx := oldest; idx <= newest; idx++ {
v := d.History.find(idx)
if v == nil {
if d.Current.idx == idx {
// Merge current.
err := bf.Merge(d.Current.bf.BloomFilter)
logger.LogIf(ctx, err)
if err != nil {
bfr.Complete = false
}
continue
}
bfr.Complete = false
bfr.OldestIdx = idx + 1
continue
}
err := bf.Merge(v.bf.BloomFilter)
if err != nil {
bfr.Complete = false
logger.LogIf(ctx, err)
continue
}
bfr.NewestIdx = idx
}
var dst bytes.Buffer
_, err := bf.WriteTo(&dst)
if err != nil {
logger.LogIf(ctx, err)
return nil
}
bfr.Filter = dst.Bytes()
return &bfr
}
// cycleFilter will cycle the bloom filter to start recording to index y if not already.
// The response will contain a bloom filter starting at index x up to, but not including index y.
// If y is 0, the response will not update y, but return the currently recorded information
// from the up until and including current y.
func (d *dataUpdateTracker) cycleFilter(ctx context.Context, oldest, current uint64) (*bloomFilterResponse, error) {
d.mu.Lock()
defer d.mu.Unlock()
if current == 0 {
if len(d.History) == 0 {
return d.filterFrom(ctx, d.Current.idx, d.Current.idx), nil
}
d.History.sort()
return d.filterFrom(ctx, d.History[len(d.History)-1].idx, d.Current.idx), nil
}
// Move current to history if new one requested
if d.Current.idx != current {
if d.debug {
logger.Info(color.Green("dataUpdateTracker:")+" cycle bloom filter: %v -> %v", d.Current.idx, current)
}
d.History = append(d.History, d.Current)
d.Current.idx = current
d.Current.bf = d.newBloomFilter()
select {
case d.save <- struct{}{}:
default:
}
}
d.History.removeOlderThan(oldest)
return d.filterFrom(ctx, oldest, current), nil
}
// splitPathDeterministic will split the provided relative path
// deterministically and return up to the first 3 elements of the path.
// Slash and dot prefixes are removed.
// Trailing slashes are removed.
// Returns 0 length if no parts are found after trimming.
func splitPathDeterministic(in string) []string {
split := strings.Split(in, SlashSeparator)
// Trim empty start/end
for len(split) > 0 {
if len(split[0]) > 0 && split[0] != "." {
break
}
split = split[1:]
}
for len(split) > 0 {
if len(split[len(split)-1]) > 0 {
break
}
split = split[:len(split)-1]
}
// Return up to 3 parts.
if len(split) > 3 {
split = split[:3]
}
return split
}
// bloomFilterRequest request bloom filters.
// Current index will be updated to current and entries back to Oldest is returned.
type bloomFilterRequest struct {
Oldest uint64
Current uint64
}
type bloomFilterResponse struct {
// Current index being written to.
CurrentIdx uint64
// Oldest index in the returned bloom filter.
OldestIdx uint64
// Newest Index in the returned bloom filter.
NewestIdx uint64
// Are all indexes between oldest and newest filled?
Complete bool
// Binary data of the bloom filter.
Filter []byte
}
// ObjectPathUpdated indicates a path has been updated.
// The function will never block.
func ObjectPathUpdated(s string) {
select {
case objectUpdatedCh <- s:
default:
}
}

View File

@@ -0,0 +1,262 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path"
"path/filepath"
"sync"
"testing"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/cmd/logger/message/log"
)
type testLoggerI interface {
Helper()
Log(args ...interface{})
}
type testingLogger struct {
mu sync.Mutex
t testLoggerI
}
func (t *testingLogger) Send(entry interface{}, errKind string) error {
t.mu.Lock()
defer t.mu.Unlock()
if t.t == nil {
return nil
}
e, ok := entry.(log.Entry)
if !ok {
return fmt.Errorf("unexpected log entry structure %#v", entry)
}
t.t.Helper()
t.t.Log(e.Level, ":", errKind, e.Message)
return nil
}
func addTestingLogging(t testLoggerI) func() {
tl := &testingLogger{t: t}
logger.AddTarget(tl)
return func() {
tl.mu.Lock()
defer tl.mu.Unlock()
tl.t = nil
}
}
func TestDataUpdateTracker(t *testing.T) {
dut := newDataUpdateTracker()
// Change some defaults.
dut.debug = testing.Verbose()
dut.input = make(chan string)
dut.save = make(chan struct{})
defer addTestingLogging(t)()
dut.Current.bf = dut.newBloomFilter()
tmpDir, err := ioutil.TempDir("", "TestDataUpdateTracker")
if err != nil {
t.Fatal(err)
}
err = os.MkdirAll(filepath.Dir(filepath.Join(tmpDir, dataUpdateTrackerFilename)), os.ModePerm)
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
dut.start(ctx, tmpDir)
var tests = []struct {
in string
check []string // if not empty, check against these instead.
exist bool
}{
{
in: "bucket/directory/file.txt",
check: []string{"bucket", "bucket/", "/bucket", "bucket/directory", "bucket/directory/", "bucket/directory/file.txt", "/bucket/directory/file.txt"},
exist: true,
},
{
// System bucket
in: ".minio.sys/ignoreme/pls",
exist: false,
},
{
// Not a valid bucket
in: "./bucket/okfile.txt",
check: []string{"./bucket/okfile.txt", "/bucket/okfile.txt", "bucket/okfile.txt"},
exist: false,
},
{
// Not a valid bucket
in: "æ/okfile.txt",
check: []string{"æ/okfile.txt", "æ/okfile.txt", "æ"},
exist: false,
},
{
in: "/bucket2/okfile2.txt",
check: []string{"./bucket2/okfile2.txt", "/bucket2/okfile2.txt", "bucket2/okfile2.txt", "bucket2"},
exist: true,
},
{
in: "/bucket3/prefix/okfile2.txt",
check: []string{"./bucket3/prefix/okfile2.txt", "/bucket3/prefix/okfile2.txt", "bucket3/prefix/okfile2.txt", "bucket3/prefix", "bucket3"},
exist: true,
},
}
for _, tt := range tests {
t.Run(tt.in, func(t *testing.T) {
dut.input <- tt.in
dut.input <- "" // Sending empty string ensures the previous is added to filter.
dut.mu.Lock()
defer dut.mu.Unlock()
if len(tt.check) == 0 {
got := dut.Current.bf.containsDir(tt.in)
if got != tt.exist {
// For unlimited tests this could lead to false positives,
// but it should be deterministic.
t.Errorf("entry %q, got: %v, want %v", tt.in, got, tt.exist)
}
return
}
for _, check := range tt.check {
got := dut.Current.bf.containsDir(check)
if got != tt.exist {
// For unlimited tests this could lead to false positives,
// but it should be deterministic.
t.Errorf("entry %q, check: %q, got: %v, want %v", tt.in, check, got, tt.exist)
}
continue
}
})
}
// Cycle to history
_, err = dut.cycleFilter(ctx, 1, 2)
if err != nil {
t.Fatal(err)
}
dut.input <- "cycle2/file.txt"
dut.input <- "" // Sending empty string ensures the previous is added to filter.
tests = append(tests, struct {
in string
check []string
exist bool
}{in: "cycle2/file.txt", exist: true})
// Shut down
cancel()
<-dut.saveExited
if dut.current() != 2 {
t.Fatal("wrong current idx after save. want 2, got:", dut.current())
}
ctx, cancel = context.WithCancel(context.Background())
defer cancel()
// Reload...
dut = newDataUpdateTracker()
dut.start(ctx, tmpDir)
if dut.current() != 2 {
t.Fatal("current idx after load not preserved. want 2, got:", dut.current())
}
bfr2, err := dut.cycleFilter(ctx, 1, 3)
if err != nil {
t.Fatal(err)
}
if !bfr2.Complete {
t.Fatal("Wanted complete, didn't get it")
}
if bfr2.CurrentIdx != 3 {
t.Fatal("wanted index 3, got", bfr2.CurrentIdx)
}
if bfr2.OldestIdx != 1 {
t.Fatal("wanted oldest index 3, got", bfr2.OldestIdx)
}
// Rerun test with returned bfr2
bf := dut.newBloomFilter()
_, err = bf.ReadFrom(bytes.NewBuffer(bfr2.Filter))
if err != nil {
t.Fatal(err)
}
for _, tt := range tests {
t.Run(tt.in+"-reloaded", func(t *testing.T) {
if len(tt.check) == 0 {
got := bf.containsDir(tt.in)
if got != tt.exist {
// For unlimited tests this could lead to false positives,
// but it should be deterministic.
t.Errorf("entry %q, got: %v, want %v", tt.in, got, tt.exist)
}
return
}
for _, check := range tt.check {
got := bf.containsDir(check)
if got != tt.exist {
// For unlimited tests this could lead to false positives,
// but it should be deterministic.
t.Errorf("entry %q, check: %q, got: %v, want %v", tt.in, check, got, tt.exist)
}
continue
}
})
}
}
func BenchmarkDataUpdateTracker(b *testing.B) {
dut := newDataUpdateTracker()
// Change some defaults.
dut.debug = false
dut.input = make(chan string)
dut.save = make(chan struct{})
defer addTestingLogging(b)()
dut.Current.bf = dut.newBloomFilter()
// We do this unbuffered. This will very significantly reduce throughput, so this is a worst case.
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go dut.startCollector(ctx)
input := make([]string, 1000)
rng := rand.New(rand.NewSource(0xabad1dea))
tmp := []string{"bucket", "aprefix", "nextprefixlevel", "maybeobjname", "evendeeper", "ok-one-morelevel", "final.object"}
for i := range input {
tmp := tmp[:1+rng.Intn(cap(tmp)-1)]
input[i] = path.Join(tmp...)
}
b.SetBytes(1)
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
dut.input <- input[rng.Intn(len(input))]
}
}

555
cmd/data-usage-cache.go Normal file
View File

@@ -0,0 +1,555 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bytes"
"context"
"encoding/binary"
"errors"
"fmt"
"io"
"path"
"path/filepath"
"strings"
"time"
"github.com/cespare/xxhash/v2"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/hash"
"github.com/tinylib/msgp/msgp"
)
const dataUsageHashLen = 8
//go:generate msgp -file $GOFILE -unexported
// dataUsageHash is the hash type used.
type dataUsageHash uint64
// sizeHistogram is a size histogram.
type sizeHistogram [dataUsageBucketLen]uint64
//msgp:tuple dataUsageEntry
type dataUsageEntry struct {
// These fields do no include any children.
Size int64
Objects uint64
ObjSizes sizeHistogram
Children dataUsageHashMap
}
//msgp:ignore dataUsageEntryInfo
type dataUsageEntryInfo struct {
Name string
Parent string
Entry dataUsageEntry
}
type dataUsageCacheInfo struct {
// Name of the bucket. Also root element.
Name string
LastUpdate time.Time
NextCycle uint32
BloomFilter []byte `msg:"BloomFilter,omitempty"`
}
// merge other data usage entry into this, excluding children.
func (e *dataUsageEntry) merge(other dataUsageEntry) {
e.Objects += other.Objects
e.Size += other.Size
for i, v := range other.ObjSizes[:] {
e.ObjSizes[i] += v
}
}
// mod returns true if the hash mod cycles == cycle.
func (h dataUsageHash) mod(cycle uint32, cycles uint32) bool {
return uint32(h)%cycles == cycle%cycles
}
// addChildString will add a child based on its name.
// If it already exists it will not be added again.
func (e *dataUsageEntry) addChildString(name string) {
e.addChild(hashPath(name))
}
// addChild will add a child based on its hash.
// If it already exists it will not be added again.
func (e *dataUsageEntry) addChild(hash dataUsageHash) {
if _, ok := e.Children[hash]; ok {
return
}
if e.Children == nil {
e.Children = make(dataUsageHashMap, 1)
}
e.Children[hash] = struct{}{}
}
// find a path in the cache.
// Returns nil if not found.
func (d *dataUsageCache) find(path string) *dataUsageEntry {
due, ok := d.Cache[hashPath(path)]
if !ok {
return nil
}
return &due
}
// dui converts the flattened version of the path to DataUsageInfo.
// As a side effect d will be flattened, use a clone if this is not ok.
func (d *dataUsageCache) dui(path string, buckets []BucketInfo) DataUsageInfo {
e := d.find(path)
if e == nil {
return DataUsageInfo{LastUpdate: UTCNow()}
}
flat := d.flatten(*e)
return DataUsageInfo{
LastUpdate: d.Info.LastUpdate,
ObjectsCount: flat.Objects,
ObjectsTotalSize: uint64(flat.Size),
ObjectsSizesHistogram: flat.ObjSizes.asMap(),
BucketsCount: uint64(len(e.Children)),
BucketsSizes: d.pathSizes(buckets),
}
}
// replace will add or replace an entry in the cache.
// If a parent is specified it will be added to that if not already there.
// If the parent does not exist, it will be added.
func (d *dataUsageCache) replace(path, parent string, e dataUsageEntry) {
hash := hashPath(path)
if d.Cache == nil {
d.Cache = make(map[dataUsageHash]dataUsageEntry, 100)
}
d.Cache[hash] = e
if parent != "" {
phash := hashPath(parent)
p := d.Cache[phash]
p.addChild(hash)
d.Cache[phash] = p
}
}
// replaceHashed add or replaces an entry to the cache based on its hash.
// If a parent is specified it will be added to that if not already there.
// If the parent does not exist, it will be added.
func (d *dataUsageCache) replaceHashed(hash dataUsageHash, parent *dataUsageHash, e dataUsageEntry) {
if d.Cache == nil {
d.Cache = make(map[dataUsageHash]dataUsageEntry, 100)
}
d.Cache[hash] = e
if parent != nil {
p := d.Cache[*parent]
p.addChild(hash)
d.Cache[*parent] = p
}
}
// copyWithChildren will copy entry with hash from src if it exists along with any children.
// If a parent is specified it will be added to that if not already there.
// If the parent does not exist, it will be added.
func (d *dataUsageCache) copyWithChildren(src *dataUsageCache, hash dataUsageHash, parent *dataUsageHash) {
if d.Cache == nil {
d.Cache = make(map[dataUsageHash]dataUsageEntry, 100)
}
e, ok := src.Cache[hash]
if !ok {
return
}
d.Cache[hash] = e
for ch := range e.Children {
if ch == hash {
logger.LogIf(GlobalContext, errors.New("dataUsageCache.copyWithChildren: Circular reference"))
return
}
d.copyWithChildren(src, ch, &hash)
}
if parent != nil {
p := d.Cache[*parent]
p.addChild(hash)
d.Cache[*parent] = p
}
}
// StringAll returns a detailed string representation of all entries in the cache.
func (d *dataUsageCache) StringAll() string {
s := fmt.Sprintf("info:%+v\n", d.Info)
for k, v := range d.Cache {
s += fmt.Sprintf("\t%v: %+v\n", k, v)
}
return strings.TrimSpace(s)
}
// insert the hash into dst.
// dst must be at least dataUsageHashLen bytes long.
func (h dataUsageHash) bytes(dst []byte) {
binary.LittleEndian.PutUint64(dst, uint64(h))
}
// String returns a human readable representation of the string.
func (h dataUsageHash) String() string {
return fmt.Sprintf("%x", uint64(h))
}
// flatten all children of the root into the root element and return it.
func (d *dataUsageCache) flatten(root dataUsageEntry) dataUsageEntry {
for id := range root.Children {
e := d.Cache[id]
if len(e.Children) > 0 {
e = d.flatten(e)
}
root.merge(e)
}
root.Children = nil
return root
}
// add a size to the histogram.
func (h *sizeHistogram) add(size int64) {
// Fetch the histogram interval corresponding
// to the passed object size.
for i, interval := range ObjectsHistogramIntervals {
if size >= interval.start && size <= interval.end {
h[i]++
break
}
}
}
// asMap returns the map as a map[string]uint64.
func (h *sizeHistogram) asMap() map[string]uint64 {
res := make(map[string]uint64, 7)
for i, count := range h {
res[ObjectsHistogramIntervals[i].name] = count
}
return res
}
// pathSizes returns the path sizes as a map.
func (d *dataUsageCache) pathSizes(buckets []BucketInfo) map[string]uint64 {
var dst = make(map[string]uint64, len(buckets))
for _, bucket := range buckets {
e := d.find(bucket.Name)
if e == nil {
continue
}
flat := d.flatten(*e)
dst[bucket.Name] = uint64(flat.Size)
}
return dst
}
// sizeRecursive returns the path as a flattened entry.
func (d *dataUsageCache) sizeRecursive(path string) *dataUsageEntry {
root := d.find(path)
if root == nil || len(root.Children) == 0 {
return root
}
flat := d.flatten(*root)
return &flat
}
// dataUsageCache contains a cache of data usage entries.
//msgp:ignore dataUsageCache
type dataUsageCache struct {
Info dataUsageCacheInfo
Cache map[dataUsageHash]dataUsageEntry
}
// root returns the root of the cache.
func (d *dataUsageCache) root() *dataUsageEntry {
return d.find(d.Info.Name)
}
// rootHash returns the root of the cache.
func (d *dataUsageCache) rootHash() dataUsageHash {
return hashPath(d.Info.Name)
}
// clone returns a copy of the cache with no references to the existing.
func (d *dataUsageCache) clone() dataUsageCache {
clone := dataUsageCache{
Info: d.Info,
Cache: make(map[dataUsageHash]dataUsageEntry, len(d.Cache)),
}
for k, v := range d.Cache {
clone.Cache[k] = v
}
return clone
}
// merge root of other into d.
// children of root will be flattened before being merged.
// Last update time will be set to the last updated.
func (d *dataUsageCache) merge(other dataUsageCache) {
existingRoot := d.root()
otherRoot := other.root()
if existingRoot == nil && otherRoot == nil {
return
}
if otherRoot == nil {
return
}
if existingRoot == nil {
*d = other.clone()
return
}
if other.Info.LastUpdate.After(d.Info.LastUpdate) {
d.Info.LastUpdate = other.Info.LastUpdate
}
existingRoot.merge(*otherRoot)
eHash := d.rootHash()
for key := range otherRoot.Children {
entry := other.Cache[key]
flat := other.flatten(entry)
existing := d.Cache[key]
// If not found, merging simply adds.
existing.merge(flat)
d.replaceHashed(key, &eHash, existing)
}
}
// load the cache content with name from minioMetaBackgroundOpsBucket.
// Only backend errors are returned as errors.
// If the object is not found or unable to deserialize d is cleared and nil error is returned.
func (d *dataUsageCache) load(ctx context.Context, store ObjectLayer, name string) error {
var buf bytes.Buffer
err := store.GetObject(ctx, dataUsageBucket, name, 0, -1, &buf, "", ObjectOptions{})
if err != nil {
if !isErrObjectNotFound(err) && !isErrBucketNotFound(err) {
return toObjectErr(err, dataUsageBucket, name)
}
*d = dataUsageCache{}
return nil
}
err = d.deserialize(buf.Bytes())
if err != nil {
*d = dataUsageCache{}
logger.LogIf(ctx, err)
}
return nil
}
// save the content of the cache to minioMetaBackgroundOpsBucket with the provided name.
func (d *dataUsageCache) save(ctx context.Context, store ObjectLayer, name string) error {
b := d.serialize()
size := int64(len(b))
r, err := hash.NewReader(bytes.NewReader(b), size, "", "", size, false)
if err != nil {
return err
}
_, err = store.PutObject(ctx,
dataUsageBucket,
name,
NewPutObjReader(r, nil, nil),
ObjectOptions{})
if isErrBucketNotFound(err) {
return nil
}
return err
}
// dataUsageCacheVer indicates the cache version.
// Bumping the cache version will drop data from previous versions
// and write new data with the new version.
const dataUsageCacheVer = 1
// serialize the contents of the cache.
func (d *dataUsageCache) serialize() []byte {
// Alloc pessimistically
// dataUsageCacheVer
due := dataUsageEntry{}
msgLen := 1
msgLen += d.Info.Msgsize()
// len(d.Cache)
msgLen += binary.MaxVarintLen64
// Hashes (one for key, assume 1 child/node)
msgLen += len(d.Cache) * dataUsageHashLen * 2
msgLen += len(d.Cache) * due.Msgsize()
// Create destination buffer...
dst := make([]byte, 0, msgLen)
var n int
tmp := make([]byte, 1024)
// byte: version.
dst = append(dst, dataUsageCacheVer)
// Info...
dst, err := d.Info.MarshalMsg(dst)
if err != nil {
panic(err)
}
n = binary.PutUvarint(tmp, uint64(len(d.Cache)))
dst = append(dst, tmp[:n]...)
for k, v := range d.Cache {
// Put key
binary.LittleEndian.PutUint64(tmp[:dataUsageHashLen], uint64(k))
dst = append(dst, tmp[:8]...)
tmp, err = v.MarshalMsg(tmp[:0])
if err != nil {
panic(err)
}
// key, value pairs.
dst = append(dst, tmp...)
}
return dst
}
// deserialize the supplied byte slice into the cache.
func (d *dataUsageCache) deserialize(b []byte) error {
if len(b) < 1 {
return io.ErrUnexpectedEOF
}
switch b[0] {
case 1:
default:
return fmt.Errorf("dataUsageCache: unknown version: %d", int(b[0]))
}
b = b[1:]
// Info...
b, err := d.Info.UnmarshalMsg(b)
if err != nil {
return err
}
cacheLen, n := binary.Uvarint(b)
if n <= 0 {
return fmt.Errorf("dataUsageCache: reading cachelen, n <= 0 ")
}
b = b[n:]
d.Cache = make(map[dataUsageHash]dataUsageEntry, cacheLen)
for i := 0; i < int(cacheLen); i++ {
if len(b) <= dataUsageHashLen {
return io.ErrUnexpectedEOF
}
k := binary.LittleEndian.Uint64(b[:dataUsageHashLen])
b = b[dataUsageHashLen:]
var v dataUsageEntry
b, err = v.UnmarshalMsg(b)
if err != nil {
return err
}
d.Cache[dataUsageHash(k)] = v
}
return nil
}
// Trim this from start+end of hashes.
var hashPathCutSet = dataUsageRoot
func init() {
if dataUsageRoot != string(filepath.Separator) {
hashPathCutSet = dataUsageRoot + string(filepath.Separator)
}
}
// hashPath calculates a hash of the provided string.
func hashPath(data string) dataUsageHash {
if data != dataUsageRoot {
data = strings.Trim(data, hashPathCutSet)
}
data = path.Clean(data)
return dataUsageHash(xxhash.Sum64String(data))
}
//msgp:ignore dataUsageEntryInfo
type dataUsageHashMap map[dataUsageHash]struct{}
// MarshalMsg implements msgp.Marshaler
func (d dataUsageHashMap) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, d.Msgsize())
// Write bin header manually
const mbin32 uint8 = 0xc6
sz := uint32(len(d)) * dataUsageHashLen
o = append(o, mbin32, byte(sz>>24), byte(sz>>16), byte(sz>>8), byte(sz))
var tmp [dataUsageHashLen]byte
for k := range d {
binary.LittleEndian.PutUint64(tmp[:], uint64(k))
o = append(o, tmp[:]...)
}
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (d dataUsageHashMap) Msgsize() (s int) {
s = 5 + len(d)*dataUsageHashLen
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (d *dataUsageHashMap) UnmarshalMsg(bts []byte) (o []byte, err error) {
var hashes []byte
hashes, bts, err = msgp.ReadBytesZC(bts)
if err != nil {
err = msgp.WrapError(err, "dataUsageHashMap")
return
}
var dst = make(dataUsageHashMap, len(hashes)/dataUsageHashLen)
for len(hashes) >= dataUsageHashLen {
dst[dataUsageHash(binary.LittleEndian.Uint64(hashes[:dataUsageHashLen]))] = struct{}{}
hashes = hashes[dataUsageHashLen:]
}
*d = dst
o = bts
return
}
func (d *dataUsageHashMap) DecodeMsg(dc *msgp.Reader) (err error) {
var zb0001 uint32
zb0001, err = dc.ReadBytesHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
var dst = make(dataUsageHashMap, zb0001)
var tmp [8]byte
for i := uint32(0); i < zb0001; i++ {
_, err = io.ReadFull(dc, tmp[:])
if err != nil {
err = msgp.WrapError(err, "dataUsageHashMap")
return
}
dst[dataUsageHash(binary.LittleEndian.Uint64(tmp[:]))] = struct{}{}
}
return nil
}
func (d dataUsageHashMap) EncodeMsg(en *msgp.Writer) (err error) {
err = en.WriteBytesHeader(uint32(len(d)) * dataUsageHashLen)
if err != nil {
err = msgp.WrapError(err)
return
}
var tmp [dataUsageHashLen]byte
for k := range d {
binary.LittleEndian.PutUint64(tmp[:], uint64(k))
_, err = en.Write(tmp[:])
if err != nil {
err = msgp.WrapError(err)
return
}
}
return nil
}

501
cmd/data-usage-cache_gen.go Normal file
View File

@@ -0,0 +1,501 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *dataUsageCacheInfo) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Name":
z.Name, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Name")
return
}
case "LastUpdate":
z.LastUpdate, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "LastUpdate")
return
}
case "NextCycle":
z.NextCycle, err = dc.ReadUint32()
if err != nil {
err = msgp.WrapError(err, "NextCycle")
return
}
case "BloomFilter":
z.BloomFilter, err = dc.ReadBytes(z.BloomFilter)
if err != nil {
err = msgp.WrapError(err, "BloomFilter")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *dataUsageCacheInfo) EncodeMsg(en *msgp.Writer) (err error) {
// omitempty: check for empty values
zb0001Len := uint32(4)
var zb0001Mask uint8 /* 4 bits */
if z.BloomFilter == nil {
zb0001Len--
zb0001Mask |= 0x8
}
// variable map header, size zb0001Len
err = en.Append(0x80 | uint8(zb0001Len))
if err != nil {
return
}
if zb0001Len == 0 {
return
}
// write "Name"
err = en.Append(0xa4, 0x4e, 0x61, 0x6d, 0x65)
if err != nil {
return
}
err = en.WriteString(z.Name)
if err != nil {
err = msgp.WrapError(err, "Name")
return
}
// write "LastUpdate"
err = en.Append(0xaa, 0x4c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65)
if err != nil {
return
}
err = en.WriteTime(z.LastUpdate)
if err != nil {
err = msgp.WrapError(err, "LastUpdate")
return
}
// write "NextCycle"
err = en.Append(0xa9, 0x4e, 0x65, 0x78, 0x74, 0x43, 0x79, 0x63, 0x6c, 0x65)
if err != nil {
return
}
err = en.WriteUint32(z.NextCycle)
if err != nil {
err = msgp.WrapError(err, "NextCycle")
return
}
if (zb0001Mask & 0x8) == 0 { // if not empty
// write "BloomFilter"
err = en.Append(0xab, 0x42, 0x6c, 0x6f, 0x6f, 0x6d, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72)
if err != nil {
return
}
err = en.WriteBytes(z.BloomFilter)
if err != nil {
err = msgp.WrapError(err, "BloomFilter")
return
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *dataUsageCacheInfo) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
zb0001Len := uint32(4)
var zb0001Mask uint8 /* 4 bits */
if z.BloomFilter == nil {
zb0001Len--
zb0001Mask |= 0x8
}
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
if zb0001Len == 0 {
return
}
// string "Name"
o = append(o, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
o = msgp.AppendString(o, z.Name)
// string "LastUpdate"
o = append(o, 0xaa, 0x4c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65)
o = msgp.AppendTime(o, z.LastUpdate)
// string "NextCycle"
o = append(o, 0xa9, 0x4e, 0x65, 0x78, 0x74, 0x43, 0x79, 0x63, 0x6c, 0x65)
o = msgp.AppendUint32(o, z.NextCycle)
if (zb0001Mask & 0x8) == 0 { // if not empty
// string "BloomFilter"
o = append(o, 0xab, 0x42, 0x6c, 0x6f, 0x6f, 0x6d, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72)
o = msgp.AppendBytes(o, z.BloomFilter)
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *dataUsageCacheInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Name":
z.Name, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Name")
return
}
case "LastUpdate":
z.LastUpdate, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastUpdate")
return
}
case "NextCycle":
z.NextCycle, bts, err = msgp.ReadUint32Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "NextCycle")
return
}
case "BloomFilter":
z.BloomFilter, bts, err = msgp.ReadBytesBytes(bts, z.BloomFilter)
if err != nil {
err = msgp.WrapError(err, "BloomFilter")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *dataUsageCacheInfo) Msgsize() (s int) {
s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 11 + msgp.TimeSize + 10 + msgp.Uint32Size + 12 + msgp.BytesPrefixSize + len(z.BloomFilter)
return
}
// DecodeMsg implements msgp.Decodable
func (z *dataUsageEntry) DecodeMsg(dc *msgp.Reader) (err error) {
var zb0001 uint32
zb0001, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
if zb0001 != 4 {
err = msgp.ArrayError{Wanted: 4, Got: zb0001}
return
}
z.Size, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "Size")
return
}
z.Objects, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "Objects")
return
}
var zb0002 uint32
zb0002, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "ObjSizes")
return
}
if zb0002 != uint32(dataUsageBucketLen) {
err = msgp.ArrayError{Wanted: uint32(dataUsageBucketLen), Got: zb0002}
return
}
for za0001 := range z.ObjSizes {
z.ObjSizes[za0001], err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ObjSizes", za0001)
return
}
}
err = z.Children.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Children")
return
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *dataUsageEntry) EncodeMsg(en *msgp.Writer) (err error) {
// array header, size 4
err = en.Append(0x94)
if err != nil {
return
}
err = en.WriteInt64(z.Size)
if err != nil {
err = msgp.WrapError(err, "Size")
return
}
err = en.WriteUint64(z.Objects)
if err != nil {
err = msgp.WrapError(err, "Objects")
return
}
err = en.WriteArrayHeader(uint32(dataUsageBucketLen))
if err != nil {
err = msgp.WrapError(err, "ObjSizes")
return
}
for za0001 := range z.ObjSizes {
err = en.WriteUint64(z.ObjSizes[za0001])
if err != nil {
err = msgp.WrapError(err, "ObjSizes", za0001)
return
}
}
err = z.Children.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Children")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *dataUsageEntry) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// array header, size 4
o = append(o, 0x94)
o = msgp.AppendInt64(o, z.Size)
o = msgp.AppendUint64(o, z.Objects)
o = msgp.AppendArrayHeader(o, uint32(dataUsageBucketLen))
for za0001 := range z.ObjSizes {
o = msgp.AppendUint64(o, z.ObjSizes[za0001])
}
o, err = z.Children.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Children")
return
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *dataUsageEntry) UnmarshalMsg(bts []byte) (o []byte, err error) {
var zb0001 uint32
zb0001, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
if zb0001 != 4 {
err = msgp.ArrayError{Wanted: 4, Got: zb0001}
return
}
z.Size, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Size")
return
}
z.Objects, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Objects")
return
}
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ObjSizes")
return
}
if zb0002 != uint32(dataUsageBucketLen) {
err = msgp.ArrayError{Wanted: uint32(dataUsageBucketLen), Got: zb0002}
return
}
for za0001 := range z.ObjSizes {
z.ObjSizes[za0001], bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ObjSizes", za0001)
return
}
}
bts, err = z.Children.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Children")
return
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *dataUsageEntry) Msgsize() (s int) {
s = 1 + msgp.Int64Size + msgp.Uint64Size + msgp.ArrayHeaderSize + (dataUsageBucketLen * (msgp.Uint64Size)) + z.Children.Msgsize()
return
}
// DecodeMsg implements msgp.Decodable
func (z *dataUsageHash) DecodeMsg(dc *msgp.Reader) (err error) {
{
var zb0001 uint64
zb0001, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err)
return
}
(*z) = dataUsageHash(zb0001)
}
return
}
// EncodeMsg implements msgp.Encodable
func (z dataUsageHash) EncodeMsg(en *msgp.Writer) (err error) {
err = en.WriteUint64(uint64(z))
if err != nil {
err = msgp.WrapError(err)
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z dataUsageHash) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendUint64(o, uint64(z))
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *dataUsageHash) UnmarshalMsg(bts []byte) (o []byte, err error) {
{
var zb0001 uint64
zb0001, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
(*z) = dataUsageHash(zb0001)
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z dataUsageHash) Msgsize() (s int) {
s = msgp.Uint64Size
return
}
// DecodeMsg implements msgp.Decodable
func (z *sizeHistogram) DecodeMsg(dc *msgp.Reader) (err error) {
var zb0001 uint32
zb0001, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
if zb0001 != uint32(dataUsageBucketLen) {
err = msgp.ArrayError{Wanted: uint32(dataUsageBucketLen), Got: zb0001}
return
}
for za0001 := range z {
z[za0001], err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, za0001)
return
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *sizeHistogram) EncodeMsg(en *msgp.Writer) (err error) {
err = en.WriteArrayHeader(uint32(dataUsageBucketLen))
if err != nil {
err = msgp.WrapError(err)
return
}
for za0001 := range z {
err = en.WriteUint64(z[za0001])
if err != nil {
err = msgp.WrapError(err, za0001)
return
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *sizeHistogram) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendArrayHeader(o, uint32(dataUsageBucketLen))
for za0001 := range z {
o = msgp.AppendUint64(o, z[za0001])
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *sizeHistogram) UnmarshalMsg(bts []byte) (o []byte, err error) {
var zb0001 uint32
zb0001, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
if zb0001 != uint32(dataUsageBucketLen) {
err = msgp.ArrayError{Wanted: uint32(dataUsageBucketLen), Got: zb0001}
return
}
for za0001 := range z {
z[za0001], bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, za0001)
return
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *sizeHistogram) Msgsize() (s int) {
s = msgp.ArrayHeaderSize + (dataUsageBucketLen * (msgp.Uint64Size))
return
}

View File

@@ -0,0 +1,349 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshaldataUsageCacheInfo(t *testing.T) {
v := dataUsageCacheInfo{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgdataUsageCacheInfo(b *testing.B) {
v := dataUsageCacheInfo{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgdataUsageCacheInfo(b *testing.B) {
v := dataUsageCacheInfo{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshaldataUsageCacheInfo(b *testing.B) {
v := dataUsageCacheInfo{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodedataUsageCacheInfo(t *testing.T) {
v := dataUsageCacheInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodedataUsageCacheInfo Msgsize() is inaccurate")
}
vn := dataUsageCacheInfo{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodedataUsageCacheInfo(b *testing.B) {
v := dataUsageCacheInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodedataUsageCacheInfo(b *testing.B) {
v := dataUsageCacheInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshaldataUsageEntry(t *testing.T) {
v := dataUsageEntry{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgdataUsageEntry(b *testing.B) {
v := dataUsageEntry{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgdataUsageEntry(b *testing.B) {
v := dataUsageEntry{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshaldataUsageEntry(b *testing.B) {
v := dataUsageEntry{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodedataUsageEntry(t *testing.T) {
v := dataUsageEntry{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodedataUsageEntry Msgsize() is inaccurate")
}
vn := dataUsageEntry{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodedataUsageEntry(b *testing.B) {
v := dataUsageEntry{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodedataUsageEntry(b *testing.B) {
v := dataUsageEntry{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalsizeHistogram(t *testing.T) {
v := sizeHistogram{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgsizeHistogram(b *testing.B) {
v := sizeHistogram{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgsizeHistogram(b *testing.B) {
v := sizeHistogram{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalsizeHistogram(b *testing.B) {
v := sizeHistogram{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodesizeHistogram(t *testing.T) {
v := sizeHistogram{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodesizeHistogram Msgsize() is inaccurate")
}
vn := sizeHistogram{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodesizeHistogram(b *testing.B) {
v := sizeHistogram{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodesizeHistogram(b *testing.B) {
v := sizeHistogram{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}

View File

@@ -19,127 +19,132 @@ package cmd
import (
"bytes"
"context"
"encoding/binary"
"encoding/json"
"errors"
"os"
"path/filepath"
"path"
"strconv"
"time"
jsoniter "github.com/json-iterator/go"
"github.com/minio/minio/cmd/config"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/color"
"github.com/minio/minio/pkg/env"
"github.com/minio/minio/pkg/hash"
"github.com/willf/bloom"
)
const (
dataUsageObjName = "data-usage"
dataUsageCrawlInterval = 12 * time.Hour
dataUsageCrawlConf = "MINIO_DISK_USAGE_CRAWL"
envDataUsageCrawlConf = "MINIO_DISK_USAGE_CRAWL_ENABLE"
envDataUsageCrawlDelay = "MINIO_DISK_USAGE_CRAWL_DELAY"
envDataUsageCrawlDebug = "MINIO_DISK_USAGE_CRAWL_DEBUG"
dataUsageRoot = SlashSeparator
dataUsageBucket = minioMetaBucket + SlashSeparator + bucketMetaPrefix
dataUsageObjName = ".usage.json"
dataUsageCacheName = ".usage-cache.bin"
dataUsageBloomName = ".bloomcycle.bin"
dataUsageSleepPerFolder = 1 * time.Millisecond
dataUsageSleepDefMult = 10.0
dataUsageUpdateDirCycles = 16
dataUsageStartDelay = 5 * time.Minute // Time to wait on startup and between cycles.
)
func initDataUsageStats() {
dataUsageEnabled, err := config.ParseBool(env.Get(dataUsageCrawlConf, config.EnableOn))
if err == nil && !dataUsageEnabled {
return
// initDataUsageStats will start the crawler unless disabled.
func initDataUsageStats(ctx context.Context, objAPI ObjectLayer) {
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOn {
go runDataUsageInfo(ctx, objAPI)
}
go runDataUsageInfoUpdateRoutine()
}
func runDataUsageInfoUpdateRoutine() {
// Wait until the object layer is ready
var objAPI ObjectLayer
for {
objAPI = newObjectLayerWithoutSafeModeFn()
if objAPI == nil {
time.Sleep(time.Second)
continue
}
break
}
runDataUsageInfo(context.Background(), objAPI, GlobalServiceDoneCh)
}
// timeToNextCrawl returns the duration until next crawl should occur
// this is validated by verifying the LastUpdate time.
func timeToCrawl(ctx context.Context, objAPI ObjectLayer) time.Duration {
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objAPI)
func runDataUsageInfo(ctx context.Context, objAPI ObjectLayer) {
// Load current bloom cycle
nextBloomCycle := intDataUpdateTracker.current() + 1
var buf bytes.Buffer
err := objAPI.GetObject(ctx, dataUsageBucket, dataUsageBloomName, 0, -1, &buf, "", ObjectOptions{})
if err != nil {
// Upon an error wait for like 10
// seconds to start the crawler.
return 10 * time.Second
if !isErrObjectNotFound(err) && !isErrBucketNotFound(err) {
logger.LogIf(ctx, err)
}
} else {
if buf.Len() == 8 {
nextBloomCycle = binary.LittleEndian.Uint64(buf.Bytes())
}
}
// File indeed doesn't exist when LastUpdate is zero
// so we have never crawled, start crawl right away.
if dataUsageInfo.LastUpdate.IsZero() {
return 1 * time.Second
for {
select {
case <-ctx.Done():
return
case <-time.NewTimer(dataUsageStartDelay).C:
// Wait before starting next cycle and wait on startup.
results := make(chan DataUsageInfo, 1)
go storeDataUsageInBackend(ctx, objAPI, results)
bf, err := globalNotificationSys.updateBloomFilter(ctx, nextBloomCycle)
logger.LogIf(ctx, err)
err = objAPI.CrawlAndGetDataUsage(ctx, bf, results)
close(results)
logger.LogIf(ctx, err)
if err == nil {
// Store new cycle...
nextBloomCycle++
if nextBloomCycle%dataUpdateTrackerResetEvery == 0 {
if intDataUpdateTracker.debug {
logger.Info(color.Green("runDataUsageInfo:") + " Resetting bloom filter for next runs.")
}
nextBloomCycle++
}
var tmp [8]byte
binary.LittleEndian.PutUint64(tmp[:], nextBloomCycle)
r, err := hash.NewReader(bytes.NewReader(tmp[:]), int64(len(tmp)), "", "", int64(len(tmp)), false)
if err != nil {
logger.LogIf(ctx, err)
continue
}
_, err = objAPI.PutObject(ctx, dataUsageBucket, dataUsageBloomName, NewPutObjReader(r, nil, nil), ObjectOptions{})
if !isErrBucketNotFound(err) {
logger.LogIf(ctx, err)
}
}
}
}
waitDuration := dataUsageInfo.LastUpdate.Sub(UTCNow())
if waitDuration > dataUsageCrawlInterval {
// Waited long enough start crawl in a 1 second
return 1 * time.Second
}
// No crawling needed, ask the routine to wait until
// the daily interval 12hrs - delta between last update
// with current time.
return dataUsageCrawlInterval - waitDuration
}
var dataUsageLockTimeout = lifecycleLockTimeout
func runDataUsageInfo(ctx context.Context, objAPI ObjectLayer, endCh <-chan struct{}) {
locker := objAPI.NewNSLock(ctx, minioMetaBucket, "leader-data-usage-info")
for {
err := locker.GetLock(dataUsageLockTimeout)
// storeDataUsageInBackend will store all objects sent on the gui channel until closed.
func storeDataUsageInBackend(ctx context.Context, objAPI ObjectLayer, gui <-chan DataUsageInfo) {
for dataUsageInfo := range gui {
dataUsageJSON, err := json.Marshal(dataUsageInfo)
if err != nil {
time.Sleep(5 * time.Minute)
logger.LogIf(ctx, err)
continue
}
size := int64(len(dataUsageJSON))
r, err := hash.NewReader(bytes.NewReader(dataUsageJSON), size, "", "", size, false)
if err != nil {
logger.LogIf(ctx, err)
continue
}
// Break without unlocking, this node will acquire
// data usage calculator role for its lifetime.
break
}
for {
wait := timeToCrawl(ctx, objAPI)
select {
case <-endCh:
locker.Unlock()
return
case <-time.NewTimer(wait).C:
// Crawl only when no previous crawl has occurred,
// or its been too long since last crawl.
err := storeDataUsageInBackend(ctx, objAPI, objAPI.CrawlAndGetDataUsage(ctx, endCh))
_, err = objAPI.PutObject(ctx, dataUsageBucket, dataUsageObjName, NewPutObjReader(r, nil, nil), ObjectOptions{})
if !isErrBucketNotFound(err) {
logger.LogIf(ctx, err)
}
}
}
func storeDataUsageInBackend(ctx context.Context, objAPI ObjectLayer, dataUsageInfo DataUsageInfo) error {
dataUsageJSON, err := json.Marshal(dataUsageInfo)
if err != nil {
return err
}
size := int64(len(dataUsageJSON))
r, err := hash.NewReader(bytes.NewReader(dataUsageJSON), size, "", "", size, false)
if err != nil {
return err
}
_, err = objAPI.PutObject(ctx, minioMetaBackgroundOpsBucket, dataUsageObjName, NewPutObjReader(r, nil, nil), ObjectOptions{})
return err
}
func loadDataUsageFromBackend(ctx context.Context, objAPI ObjectLayer) (DataUsageInfo, error) {
var dataUsageInfoJSON bytes.Buffer
err := objAPI.GetObject(ctx, minioMetaBackgroundOpsBucket, dataUsageObjName, 0, -1, &dataUsageInfoJSON, "", ObjectOptions{})
err := objAPI.GetObject(ctx, dataUsageBucket, dataUsageObjName, 0, -1, &dataUsageInfoJSON, "", ObjectOptions{})
if err != nil {
if isErrObjectNotFound(err) {
if isErrObjectNotFound(err) || isErrBucketNotFound(err) {
return DataUsageInfo{}, nil
}
return DataUsageInfo{}, toObjectErr(err, minioMetaBackgroundOpsBucket, dataUsageObjName)
return DataUsageInfo{}, toObjectErr(err, dataUsageBucket, dataUsageObjName)
}
var dataUsageInfo DataUsageInfo
@@ -160,52 +165,345 @@ type Item struct {
type getSizeFn func(item Item) (int64, error)
func updateUsage(basePath string, doneCh <-chan struct{}, waitForLowActiveIO func(), getSize getSizeFn) DataUsageInfo {
var dataUsageInfo = DataUsageInfo{
BucketsSizes: make(map[string]uint64),
ObjectsSizesHistogram: make(map[string]uint64),
type cachedFolder struct {
name string
parent *dataUsageHash
}
type folderScanner struct {
root string
getSize getSizeFn
oldCache dataUsageCache
newCache dataUsageCache
withFilter *bloomFilter
waitForLowActiveIO func()
dataUsageCrawlMult float64
dataUsageCrawlDebug bool
newFolders []cachedFolder
existingFolders []cachedFolder
}
// sleepDuration multiplies the duration d by x and sleeps if is more than 100 micro seconds.
// sleep is limited to max 1 second.
func sleepDuration(d time.Duration, x float64) {
// Don't sleep for really small amount of time
if d := time.Duration(float64(d) * x); d > time.Microsecond*100 {
if d > time.Second {
d = time.Second
}
time.Sleep(d)
}
}
// scanQueuedLevels will scan the provided folders.
// Files found in the folders will be added to f.newCache.
// If final is provided folders will be put into f.newFolders or f.existingFolders.
// If final is not provided the folders found are returned from the function.
func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFolder, final bool) ([]cachedFolder, error) {
var nextFolders []cachedFolder
done := ctx.Done()
for _, folder := range folders {
select {
case <-done:
return nil, ctx.Err()
default:
}
thisHash := hashPath(folder.name)
if _, ok := f.oldCache.Cache[thisHash]; f.withFilter != nil && ok {
// If folder isn't in filter and we have data, skip it completely.
if folder.name != dataUsageRoot && !f.withFilter.containsDir(folder.name) {
f.newCache.copyWithChildren(&f.oldCache, thisHash, folder.parent)
if f.dataUsageCrawlDebug {
logger.Info(color.Green("data-usage:")+" Skipping non-updated folder: %v", folder.name)
}
continue
}
}
f.waitForLowActiveIO()
sleepDuration(dataUsageSleepPerFolder, f.dataUsageCrawlMult)
cache := dataUsageEntry{}
err := readDirFn(path.Join(f.root, folder.name), func(entName string, typ os.FileMode) error {
// Parse
entName = path.Clean(path.Join(folder.name, entName))
bucket, _ := path2BucketObjectWithBasePath(f.root, entName)
if bucket == "" {
if f.dataUsageCrawlDebug {
logger.Info(color.Green("data-usage:")+" no bucket (%s,%s)", f.root, entName)
}
return nil
}
if isReservedOrInvalidBucket(bucket, false) {
if f.dataUsageCrawlDebug {
logger.Info(color.Green("data-usage:")+" invalid bucket: %v, entry: %v", bucket, entName)
}
return nil
}
select {
case <-done:
return ctx.Err()
default:
}
if typ&os.ModeDir != 0 {
h := hashPath(entName)
_, exists := f.oldCache.Cache[h]
cache.addChildString(entName)
this := cachedFolder{name: entName, parent: &thisHash}
cache.addChild(h)
if final {
if exists {
f.existingFolders = append(f.existingFolders, this)
} else {
f.newFolders = append(f.newFolders, this)
}
} else {
nextFolders = append(nextFolders, this)
}
return nil
}
f.waitForLowActiveIO()
// Dynamic time delay.
t := UTCNow()
// Get file size, ignore errors.
size, err := f.getSize(Item{Path: path.Join(f.root, entName), Typ: typ})
sleepDuration(time.Since(t), f.dataUsageCrawlMult)
if err == errSkipFile {
return nil
}
logger.LogIf(ctx, err)
cache.Size += size
cache.Objects++
cache.ObjSizes.add(size)
return nil
})
if err != nil {
return nil, err
}
f.newCache.replaceHashed(thisHash, folder.parent, cache)
}
return nextFolders, nil
}
// deepScanFolder will deep scan a folder and return the size if no error occurs.
func (f *folderScanner) deepScanFolder(ctx context.Context, folder string) (*dataUsageEntry, error) {
var cache dataUsageEntry
done := ctx.Done()
var addDir func(entName string, typ os.FileMode) error
var dirStack = []string{f.root, folder}
addDir = func(entName string, typ os.FileMode) error {
select {
case <-done:
return ctx.Err()
default:
}
f.waitForLowActiveIO()
if typ&os.ModeDir != 0 {
dirStack = append(dirStack, entName)
err := readDirFn(path.Join(dirStack...), addDir)
dirStack = dirStack[:len(dirStack)-1]
sleepDuration(dataUsageSleepPerFolder, f.dataUsageCrawlMult)
return err
}
// Dynamic time delay.
t := UTCNow()
// Get file size, ignore errors.
dirStack = append(dirStack, entName)
fileName := path.Join(dirStack...)
dirStack = dirStack[:len(dirStack)-1]
size, err := f.getSize(Item{Path: fileName, Typ: typ})
// Don't sleep for really small amount of time
sleepDuration(time.Since(t), f.dataUsageCrawlMult)
if err == errSkipFile {
return nil
}
logger.LogIf(ctx, err)
cache.Size += size
cache.Objects++
cache.ObjSizes.add(size)
return nil
}
err := readDirFn(path.Join(dirStack...), addDir)
if err != nil {
return nil, err
}
return &cache, nil
}
// updateUsage will crawl the basepath+cache.Info.Name and return an updated cache.
// The returned cache will always be valid, but may not be updated from the existing.
// Before each operation waitForLowActiveIO is called which can be used to temporarily halt the crawler.
// If the supplied context is canceled the function will return at the first chance.
func updateUsage(ctx context.Context, basePath string, cache dataUsageCache, waitForLowActiveIO func(), getSize getSizeFn) (dataUsageCache, error) {
t := UTCNow()
dataUsageDebug := env.Get(envDataUsageCrawlDebug, config.EnableOff) == config.EnableOn
logPrefix := color.Green("data-usage: ")
logSuffix := color.Blue(" - %v + %v", basePath, cache.Info.Name)
if dataUsageDebug {
defer func() {
logger.Info(logPrefix+" Crawl time: %v"+logSuffix, time.Since(t))
}()
}
fastWalk(basePath, 1, doneCh, func(path string, typ os.FileMode) error {
// Wait for I/O to go down.
waitForLowActiveIO()
if cache.Info.Name == "" {
cache.Info.Name = dataUsageRoot
}
bucket, entry := path2BucketObjectWithBasePath(basePath, path)
if bucket == "" {
return nil
}
delayMult, err := strconv.ParseFloat(env.Get(envDataUsageCrawlDelay, "10.0"), 64)
if err != nil {
logger.LogIf(ctx, err)
delayMult = dataUsageSleepDefMult
}
if isReservedOrInvalidBucket(bucket, false) {
return filepath.SkipDir
}
s := folderScanner{
root: basePath,
getSize: getSize,
oldCache: cache,
newCache: dataUsageCache{Info: cache.Info},
waitForLowActiveIO: waitForLowActiveIO,
newFolders: nil,
existingFolders: nil,
dataUsageCrawlMult: delayMult,
dataUsageCrawlDebug: dataUsageDebug,
}
if entry == "" && typ&os.ModeDir != 0 {
dataUsageInfo.BucketsCount++
dataUsageInfo.BucketsSizes[bucket] = 0
return nil
}
if typ&os.ModeDir != 0 {
return nil
}
t := time.Now()
size, err := getSize(Item{path, typ})
// Use the response time of the getSize call to guess system load.
// Sleep equivalent time.
if d := time.Since(t); d > 100*time.Microsecond {
time.Sleep(d)
}
if len(cache.Info.BloomFilter) > 0 {
s.withFilter = &bloomFilter{BloomFilter: &bloom.BloomFilter{}}
_, err := s.withFilter.ReadFrom(bytes.NewBuffer(cache.Info.BloomFilter))
if err != nil {
return errSkipFile
logger.LogIf(ctx, err, logPrefix+"Error reading bloom filter")
s.withFilter = nil
}
}
if s.dataUsageCrawlDebug {
logger.Info(logPrefix+"Start crawling. Bloom filter: %v"+logSuffix, s.withFilter != nil)
}
done := ctx.Done()
var flattenLevels = 3
// If we are scanning inside a bucket reduce depth by 1.
if cache.Info.Name != dataUsageRoot {
flattenLevels--
}
if s.dataUsageCrawlDebug {
logger.Info(logPrefix+"Cycle: %v, Entries: %v"+logSuffix, cache.Info.NextCycle, len(cache.Cache))
}
// Always scan flattenLevels deep. Cache root is level 0.
todo := []cachedFolder{{name: cache.Info.Name}}
for i := 0; i < flattenLevels; i++ {
if s.dataUsageCrawlDebug {
logger.Info(logPrefix+"Level %v, scanning %v directories."+logSuffix, i, len(todo))
}
select {
case <-done:
return cache, ctx.Err()
default:
}
var err error
todo, err = s.scanQueuedLevels(ctx, todo, i == flattenLevels-1)
if err != nil {
// No useful information...
return cache, err
}
}
if s.dataUsageCrawlDebug {
logger.Info(logPrefix+"New folders: %v"+logSuffix, s.newFolders)
}
// Add new folders first
for _, folder := range s.newFolders {
select {
case <-done:
return s.newCache, ctx.Err()
default:
}
du, err := s.deepScanFolder(ctx, folder.name)
if err != nil {
logger.LogIf(ctx, err)
continue
}
if du == nil {
logger.Info(logPrefix + "no disk usage provided" + logSuffix)
continue
}
dataUsageInfo.ObjectsCount++
dataUsageInfo.ObjectsTotalSize += uint64(size)
dataUsageInfo.BucketsSizes[bucket] += uint64(size)
dataUsageInfo.ObjectsSizesHistogram[objSizeToHistoInterval(uint64(size))]++
return nil
})
s.newCache.replace(folder.name, "", *du)
// Add to parent manually
if folder.parent != nil {
parent := s.newCache.Cache[*folder.parent]
parent.addChildString(folder.name)
}
}
return dataUsageInfo
if s.dataUsageCrawlDebug {
logger.Info(logPrefix+"Existing folders: %v"+logSuffix, len(s.existingFolders))
}
// Do selective scanning of existing folders.
for _, folder := range s.existingFolders {
select {
case <-done:
return s.newCache, ctx.Err()
default:
}
h := hashPath(folder.name)
if !h.mod(s.oldCache.Info.NextCycle, dataUsageUpdateDirCycles) {
s.newCache.replaceHashed(h, folder.parent, s.oldCache.Cache[h])
continue
}
if s.withFilter != nil {
// If folder isn't in filter, skip it completely.
if !s.withFilter.containsDir(folder.name) {
if s.dataUsageCrawlDebug {
logger.Info(logPrefix+"Skipping non-updated folder: %v"+logSuffix, folder)
}
s.newCache.replaceHashed(h, folder.parent, s.oldCache.Cache[h])
continue
}
}
// Update on this cycle...
du, err := s.deepScanFolder(ctx, folder.name)
if err != nil {
logger.LogIf(ctx, err)
continue
}
if du == nil {
logger.LogIf(ctx, errors.New("data-usage: no disk usage provided"))
continue
}
s.newCache.replaceHashed(h, folder.parent, *du)
}
if s.dataUsageCrawlDebug {
logger.Info(logPrefix+"Finished crawl, %v entries"+logSuffix, len(s.newCache.Cache))
}
s.newCache.Info.LastUpdate = UTCNow()
s.newCache.Info.NextCycle++
return s.newCache, nil
}

651
cmd/data-usage_test.go Normal file
View File

@@ -0,0 +1,651 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"io/ioutil"
"os"
"path/filepath"
"testing"
)
type usageTestFile struct {
name string
size int
}
func TestDataUsageUpdate(t *testing.T) {
base, err := ioutil.TempDir("", "TestDataUsageUpdate")
if err != nil {
t.Skip(err)
}
defer os.RemoveAll(base)
var files = []usageTestFile{
{name: "rootfile", size: 10000},
{name: "rootfile2", size: 10000},
{name: "dir1/d1file", size: 2000},
{name: "dir2/d2file", size: 300},
{name: "dir1/dira/dafile", size: 100000},
{name: "dir1/dira/dbfile", size: 200000},
{name: "dir1/dira/dirasub/dcfile", size: 1000000},
{name: "dir1/dira/dirasub/sublevel3/dccccfile", size: 10},
}
createUsageTestFiles(t, base, files)
getSize := func(item Item) (i int64, err error) {
if item.Typ&os.ModeDir == 0 {
s, err := os.Stat(item.Path)
if err != nil {
return 0, err
}
return s.Size(), nil
}
return 0, nil
}
got, err := updateUsage(context.Background(), base, dataUsageCache{}, func() {}, getSize)
if err != nil {
t.Fatal(err)
}
// Test dirs
var want = []struct {
path string
isNil bool
size, objs int
flatten bool
oSizes sizeHistogram
}{
{
path: "/",
size: 1322310,
flatten: true,
objs: 8,
oSizes: sizeHistogram{0: 2, 1: 6},
},
{
path: "/",
size: 20000,
objs: 2,
oSizes: sizeHistogram{1: 2},
},
{
path: "/dir1",
size: 2000,
objs: 1,
oSizes: sizeHistogram{1: 1},
},
{
path: "/dir1/dira",
flatten: true,
size: 1300010,
objs: 4,
oSizes: sizeHistogram{0: 1, 1: 3},
},
{
path: "/dir1/dira/",
flatten: true,
size: 1300010,
objs: 4,
oSizes: sizeHistogram{0: 1, 1: 3},
},
{
path: "/dir1/dira",
size: 300000,
objs: 2,
oSizes: sizeHistogram{0: 0, 1: 2},
},
{
path: "/dir1/dira/",
size: 300000,
objs: 2,
oSizes: sizeHistogram{0: 0, 1: 2},
},
{
path: "/nonexistying",
isNil: true,
},
}
for _, w := range want {
t.Run(w.path, func(t *testing.T) {
e := got.find(w.path)
if w.isNil {
if e != nil {
t.Error("want nil, got", e)
}
return
}
if e == nil {
t.Fatal("got nil result")
}
if w.flatten {
*e = got.flatten(*e)
}
if e.Size != int64(w.size) {
t.Error("got size", e.Size, "want", w.size)
}
if e.Objects != uint64(w.objs) {
t.Error("got objects", e.Objects, "want", w.objs)
}
if e.ObjSizes != w.oSizes {
t.Error("got histogram", e.ObjSizes, "want", w.oSizes)
}
})
}
files = []usageTestFile{
{
name: "newfolder/afile",
size: 4,
},
{
name: "newfolder/anotherone",
size: 1,
},
{
name: "newfolder/anemptyone",
size: 0,
},
{
name: "dir1/fileindir1",
size: 20000,
},
{
name: "dir1/dirc/fileindirc",
size: 20000,
},
{
name: "rootfile3",
size: 1000,
},
}
createUsageTestFiles(t, base, files)
got, err = updateUsage(context.Background(), base, got, func() {}, getSize)
if err != nil {
t.Fatal(err)
}
want = []struct {
path string
isNil bool
size, objs int
flatten bool
oSizes sizeHistogram
}{
{
path: "/",
size: 1363315,
flatten: true,
objs: 14,
oSizes: sizeHistogram{0: 6, 1: 8},
},
{
path: "/",
size: 21000,
objs: 3,
oSizes: sizeHistogram{0: 1, 1: 2},
},
{
path: "/newfolder",
size: 5,
objs: 3,
oSizes: sizeHistogram{0: 3},
},
{
path: "/dir1/dira",
size: 1300010,
flatten: true,
objs: 4,
oSizes: sizeHistogram{0: 1, 1: 3},
},
{
path: "/nonexistying",
isNil: true,
},
}
for _, w := range want {
t.Run(w.path, func(t *testing.T) {
e := got.find(w.path)
if w.isNil {
if e != nil {
t.Error("want nil, got", e)
}
return
}
if e == nil {
t.Fatal("got nil result")
}
if w.flatten {
*e = got.flatten(*e)
}
if e.Size != int64(w.size) {
t.Error("got size", e.Size, "want", w.size)
}
if e.Objects != uint64(w.objs) {
t.Error("got objects", e.Objects, "want", w.objs)
}
if e.ObjSizes != w.oSizes {
t.Error("got histogram", e.ObjSizes, "want", w.oSizes)
}
})
}
files = []usageTestFile{
{
name: "dir1/dira/dirasub/fileindira2",
size: 200,
},
}
createUsageTestFiles(t, base, files)
err = os.RemoveAll(filepath.Join(base, "dir1/dira/dirasub/dcfile"))
if err != nil {
t.Fatal(err)
}
// Changed dir must be picked up in this many cycles.
for i := 0; i < dataUsageUpdateDirCycles; i++ {
got, err = updateUsage(context.Background(), base, got, func() {}, getSize)
if err != nil {
t.Fatal(err)
}
}
want = []struct {
path string
isNil bool
size, objs int
flatten bool
oSizes sizeHistogram
}{
{
path: "/",
size: 363515,
flatten: true,
objs: 14,
oSizes: sizeHistogram{0: 7, 1: 7},
},
{
path: "/dir1/dira",
size: 300210,
objs: 4,
flatten: true,
oSizes: sizeHistogram{0: 2, 1: 2},
},
}
for _, w := range want {
t.Run(w.path, func(t *testing.T) {
e := got.find(w.path)
if w.isNil {
if e != nil {
t.Error("want nil, got", e)
}
return
}
if e == nil {
t.Fatal("got nil result")
}
if w.flatten {
*e = got.flatten(*e)
}
if e.Size != int64(w.size) {
t.Error("got size", e.Size, "want", w.size)
}
if e.Objects != uint64(w.objs) {
t.Error("got objects", e.Objects, "want", w.objs)
}
if e.ObjSizes != w.oSizes {
t.Error("got histogram", e.ObjSizes, "want", w.oSizes)
}
})
}
}
func TestDataUsageUpdatePrefix(t *testing.T) {
base, err := ioutil.TempDir("", "TestDataUpdateUsagePrefix")
if err != nil {
t.Skip(err)
}
base = filepath.Join(base, "bucket")
defer os.RemoveAll(base)
var files = []usageTestFile{
{name: "bucket/rootfile", size: 10000},
{name: "bucket/rootfile2", size: 10000},
{name: "bucket/dir1/d1file", size: 2000},
{name: "bucket/dir2/d2file", size: 300},
{name: "bucket/dir1/dira/dafile", size: 100000},
{name: "bucket/dir1/dira/dbfile", size: 200000},
{name: "bucket/dir1/dira/dirasub/dcfile", size: 1000000},
{name: "bucket/dir1/dira/dirasub/sublevel3/dccccfile", size: 10},
}
createUsageTestFiles(t, base, files)
getSize := func(item Item) (i int64, err error) {
if item.Typ&os.ModeDir == 0 {
s, err := os.Stat(item.Path)
if err != nil {
return 0, err
}
return s.Size(), nil
}
return 0, nil
}
got, err := updateUsage(context.Background(), base, dataUsageCache{Info: dataUsageCacheInfo{Name: "bucket"}}, func() {}, getSize)
if err != nil {
t.Fatal(err)
}
// Test dirs
var want = []struct {
path string
isNil bool
size, objs int
oSizes sizeHistogram
}{
{
path: "flat",
size: 1322310,
objs: 8,
oSizes: sizeHistogram{0: 2, 1: 6},
},
{
path: "bucket/",
size: 20000,
objs: 2,
oSizes: sizeHistogram{1: 2},
},
{
path: "bucket/dir1",
size: 2000,
objs: 1,
oSizes: sizeHistogram{1: 1},
},
{
path: "bucket/dir1/dira",
size: 1300010,
objs: 4,
oSizes: sizeHistogram{0: 1, 1: 3},
},
{
path: "bucket/dir1/dira/",
size: 1300010,
objs: 4,
oSizes: sizeHistogram{0: 1, 1: 3},
},
{
path: "bucket/nonexistying",
isNil: true,
},
}
for _, w := range want {
t.Run(w.path, func(t *testing.T) {
e := got.find(w.path)
if w.path == "flat" {
f := got.flatten(*got.root())
e = &f
}
if w.isNil {
if e != nil {
t.Error("want nil, got", e)
}
return
}
if e == nil {
t.Fatal("got nil result")
}
if e.Size != int64(w.size) {
t.Error("got size", e.Size, "want", w.size)
}
if e.Objects != uint64(w.objs) {
t.Error("got objects", e.Objects, "want", w.objs)
}
if e.ObjSizes != w.oSizes {
t.Error("got histogram", e.ObjSizes, "want", w.oSizes)
}
})
}
files = []usageTestFile{
{
name: "bucket/newfolder/afile",
size: 4,
},
{
name: "bucket/newfolder/anotherone",
size: 1,
},
{
name: "bucket/newfolder/anemptyone",
size: 0,
},
{
name: "bucket/dir1/fileindir1",
size: 20000,
},
{
name: "bucket/dir1/dirc/fileindirc",
size: 20000,
},
{
name: "bucket/rootfile3",
size: 1000,
},
}
createUsageTestFiles(t, base, files)
got, err = updateUsage(context.Background(), base, got, func() {}, getSize)
if err != nil {
t.Fatal(err)
}
want = []struct {
path string
isNil bool
size, objs int
oSizes sizeHistogram
}{
{
path: "flat",
size: 1363315,
objs: 14,
oSizes: sizeHistogram{0: 6, 1: 8},
},
{
path: "bucket/",
size: 21000,
objs: 3,
oSizes: sizeHistogram{0: 1, 1: 2},
},
{
path: "bucket/newfolder",
size: 5,
objs: 3,
oSizes: sizeHistogram{0: 3},
},
{
path: "bucket/dir1/dira",
size: 1300010,
objs: 4,
oSizes: sizeHistogram{0: 1, 1: 3},
},
{
path: "bucket/nonexistying",
isNil: true,
},
}
for _, w := range want {
t.Run(w.path, func(t *testing.T) {
e := got.find(w.path)
if w.path == "flat" {
f := got.flatten(*got.root())
e = &f
}
if w.isNil {
if e != nil {
t.Error("want nil, got", e)
}
return
}
if e == nil {
t.Fatal("got nil result")
}
if e.Size != int64(w.size) {
t.Error("got size", e.Size, "want", w.size)
}
if e.Objects != uint64(w.objs) {
t.Error("got objects", e.Objects, "want", w.objs)
}
if e.ObjSizes != w.oSizes {
t.Error("got histogram", e.ObjSizes, "want", w.oSizes)
}
})
}
files = []usageTestFile{
{
name: "bucket/dir1/dira/dirasub/fileindira2",
size: 200,
},
}
createUsageTestFiles(t, base, files)
err = os.RemoveAll(filepath.Join(base, "bucket/dir1/dira/dirasub/dcfile"))
if err != nil {
t.Fatal(err)
}
// Changed dir must be picked up in this many cycles.
for i := 0; i < dataUsageUpdateDirCycles; i++ {
got, err = updateUsage(context.Background(), base, got, func() {}, getSize)
if err != nil {
t.Fatal(err)
}
}
want = []struct {
path string
isNil bool
size, objs int
oSizes sizeHistogram
}{
{
path: "flat",
size: 363515,
objs: 14,
oSizes: sizeHistogram{0: 7, 1: 7},
},
{
path: "bucket/dir1/dira",
size: 300210,
objs: 4,
oSizes: sizeHistogram{0: 2, 1: 2},
},
}
for _, w := range want {
t.Run(w.path, func(t *testing.T) {
e := got.find(w.path)
if w.path == "flat" {
f := got.flatten(*got.root())
e = &f
}
if w.isNil {
if e != nil {
t.Error("want nil, got", e)
}
return
}
if e == nil {
t.Fatal("got nil result")
}
if e.Size != int64(w.size) {
t.Error("got size", e.Size, "want", w.size)
}
if e.Objects != uint64(w.objs) {
t.Error("got objects", e.Objects, "want", w.objs)
}
if e.ObjSizes != w.oSizes {
t.Error("got histogram", e.ObjSizes, "want", w.oSizes)
}
})
}
}
func createUsageTestFiles(t *testing.T, base string, files []usageTestFile) {
for _, f := range files {
err := os.MkdirAll(filepath.Dir(filepath.Join(base, f.name)), os.ModePerm)
if err != nil {
t.Fatal(err)
}
err = ioutil.WriteFile(filepath.Join(base, f.name), make([]byte, f.size), os.ModePerm)
if err != nil {
t.Fatal(err)
}
}
}
func TestDataUsageCacheSerialize(t *testing.T) {
base, err := ioutil.TempDir("", "TestDataUsageCacheSerialize")
if err != nil {
t.Skip(err)
}
defer os.RemoveAll(base)
var files = []usageTestFile{
{name: "rootfile", size: 10000},
{name: "rootfile2", size: 10000},
{name: "dir1/d1file", size: 2000},
{name: "dir2/d2file", size: 300},
{name: "dir1/dira/dafile", size: 100000},
{name: "dir1/dira/dbfile", size: 200000},
{name: "dir1/dira/dirasub/dcfile", size: 1000000},
{name: "dir1/dira/dirasub/sublevel3/dccccfile", size: 10},
}
createUsageTestFiles(t, base, files)
getSize := func(item Item) (i int64, err error) {
if item.Typ&os.ModeDir == 0 {
s, err := os.Stat(item.Path)
if err != nil {
return 0, err
}
return s.Size(), nil
}
return 0, nil
}
want, err := updateUsage(context.Background(), base, dataUsageCache{}, func() {}, getSize)
if err != nil {
t.Fatal(err)
}
b := want.serialize()
var got dataUsageCache
err = got.deserialize(b)
if err != nil {
t.Fatal(err)
}
if got.Info.LastUpdate.IsZero() {
t.Error("lastupdate not set")
}
if !want.Info.LastUpdate.Equal(got.Info.LastUpdate) {
t.Fatalf("deserialize mismatch\nwant: %+v\ngot: %+v", want, got)
}
}

View File

@@ -1,5 +1,5 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
* MinIO Cloud Storage, (C) 2019-2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -21,10 +21,10 @@ import (
"context"
"crypto/rand"
"encoding/hex"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"strings"
@@ -37,7 +37,6 @@ import (
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/disk"
"github.com/minio/sio"
"github.com/ncw/directio"
"go.uber.org/atomic"
)
@@ -158,7 +157,7 @@ func newDiskCache(dir string, quotaPct, after, lowWatermark, highWatermark int)
onlineMutex: &sync.RWMutex{},
pool: sync.Pool{
New: func() interface{} {
b := directio.AlignedBlock(int(cacheBlkSize))
b := disk.AlignedBlock(int(cacheBlkSize))
return &b
},
},
@@ -179,7 +178,7 @@ func (c *diskCache) diskUsageLow() bool {
di, err := disk.GetInfo(c.dir)
if err != nil {
reqInfo := (&logger.ReqInfo{}).AppendTags("cachePath", c.dir)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
ctx := logger.SetReqInfo(GlobalContext, reqInfo)
logger.LogIf(ctx, err)
return false
}
@@ -194,7 +193,7 @@ func (c *diskCache) diskUsageHigh() bool {
di, err := disk.GetInfo(c.dir)
if err != nil {
reqInfo := (&logger.ReqInfo{}).AppendTags("cachePath", c.dir)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
ctx := logger.SetReqInfo(GlobalContext, reqInfo)
logger.LogIf(ctx, err)
return false
}
@@ -208,7 +207,7 @@ func (c *diskCache) diskAvailable(size int64) bool {
di, err := disk.GetInfo(c.dir)
if err != nil {
reqInfo := (&logger.ReqInfo{}).AppendTags("cachePath", c.dir)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
ctx := logger.SetReqInfo(GlobalContext, reqInfo)
logger.LogIf(ctx, err)
return false
}
@@ -222,15 +221,19 @@ func (c *diskCache) toClear() uint64 {
di, err := disk.GetInfo(c.dir)
if err != nil {
reqInfo := (&logger.ReqInfo{}).AppendTags("cachePath", c.dir)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
ctx := logger.SetReqInfo(GlobalContext, reqInfo)
logger.LogIf(ctx, err)
return 0
}
return bytesToClear(int64(di.Total), int64(di.Free), uint64(c.quotaPct), uint64(c.lowWatermark))
}
var (
errDoneForNow = errors.New("done for now")
)
// Purge cache entries that were not accessed.
func (c *diskCache) purge(ctx context.Context, doneCh <-chan struct{}) {
func (c *diskCache) purge(ctx context.Context) {
if c.diskUsageLow() {
return
}
@@ -242,7 +245,7 @@ func (c *diskCache) purge(ctx context.Context, doneCh <-chan struct{}) {
// need to be cleaned up.
expiry := UTCNow().Add(-cacheExpiryDays)
// defaulting max hits count to 100
scorer, err := newFileScorer(int64(toFree), time.Now().Unix(), 100)
scorer, err := newFileScorer(toFree, time.Now().Unix(), 100)
if err != nil {
logger.LogIf(ctx, err)
return
@@ -264,25 +267,24 @@ func (c *diskCache) purge(ctx context.Context, doneCh <-chan struct{}) {
}
return fm
}
objDirs, err := ioutil.ReadDir(c.dir)
if err != nil {
log.Fatal(err)
}
for _, obj := range objDirs {
if obj.Name() == minioMetaBucket {
continue
filterFn := func(name string, typ os.FileMode) error {
if name == minioMetaBucket {
// Proceed to next file.
return nil
}
cacheDir := pathJoin(c.dir, obj.Name())
cacheDir := pathJoin(c.dir, name)
meta, _, numHits, err := c.statCachedMeta(ctx, cacheDir)
if err != nil {
// delete any partially filled cache entry left behind.
removeAll(cacheDir)
continue
// Proceed to next file.
return nil
}
// stat all cached file ranges and cacheDataFile.
cachedFiles := fiStatFn(meta.Ranges, cacheDataFile, pathJoin(c.dir, obj.Name()))
cachedFiles := fiStatFn(meta.Ranges, cacheDataFile, pathJoin(c.dir, name))
objInfo := meta.ToObjectInfo("", "")
cc := cacheControlOpts(objInfo)
for fname, fi := range cachedFiles {
@@ -292,9 +294,11 @@ func (c *diskCache) purge(ctx context.Context, doneCh <-chan struct{}) {
logger.LogIf(ctx, err)
}
scorer.adjustSaveBytes(-fi.Size())
// break early if sufficient disk space reclaimed.
if c.diskUsageLow() {
return
// if we found disk usage is already low, we return nil filtering is complete.
return errDoneForNow
}
}
continue
@@ -306,12 +310,24 @@ func (c *diskCache) purge(ctx context.Context, doneCh <-chan struct{}) {
if err != nil || (fi.ModTime().Before(expiry) && len(cachedFiles) == 0) {
removeAll(cacheDir)
scorer.adjustSaveBytes(-fi.Size())
continue
// Proceed to next file.
return nil
}
// if we found disk usage is already low, we return nil filtering is complete.
if c.diskUsageLow() {
return
return errDoneForNow
}
// Proceed to next file.
return nil
}
if err := readDirFilterFn(c.dir, filterFn); err != nil {
logger.LogIf(ctx, err)
return
}
for _, path := range scorer.fileNames() {
removeAll(path)
slashIdx := strings.LastIndex(path, SlashSeparator)
@@ -801,7 +817,7 @@ func (c *diskCache) bitrotReadFromCache(ctx context.Context, filePath string, of
if !bytes.Equal(hashBytes, checksumHash) {
err = fmt.Errorf("hashes do not match expected %s, got %s",
hex.EncodeToString(checksumHash), hex.EncodeToString(hashBytes))
logger.LogIf(context.Background(), err)
logger.LogIf(GlobalContext, err)
return err
}
@@ -848,11 +864,11 @@ func (c *diskCache) Get(ctx context.Context, bucket, object string, rs *HTTPRang
if HasSuffix(object, SlashSeparator) {
// The lock taken above is released when
// objReader.Close() is called by the caller.
gr, gerr := NewGetObjectReaderFromReader(bytes.NewBuffer(nil), objInfo, opts.CheckCopyPrecondFn, nsUnlocker)
gr, gerr := NewGetObjectReaderFromReader(bytes.NewBuffer(nil), objInfo, opts, nsUnlocker)
return gr, numHits, gerr
}
fn, off, length, nErr := NewGetObjectReader(rs, objInfo, opts.CheckCopyPrecondFn, nsUnlocker)
fn, off, length, nErr := NewGetObjectReader(rs, objInfo, opts, nsUnlocker)
if nErr != nil {
return nil, numHits, nErr
}

View File

@@ -285,7 +285,7 @@ func isMetadataSame(m1, m2 map[string]string) bool {
}
type fileScorer struct {
saveBytes int64
saveBytes uint64
now int64
maxHits int
// 1/size for consistent score.
@@ -294,21 +294,21 @@ type fileScorer struct {
// queue is a linked list of files we want to delete.
// The list is kept sorted according to score, highest at top, lowest at bottom.
queue list.List
queuedBytes int64
queuedBytes uint64
}
type queuedFile struct {
name string
size int64
size uint64
score float64
}
// newFileScorer allows to collect files to save a specific number of bytes.
// Each file is assigned a score based on its age, size and number of hits.
// A list of files is maintained
func newFileScorer(saveBytes int64, now int64, maxHits int) (*fileScorer, error) {
if saveBytes <= 0 {
return nil, errors.New("newFileScorer: saveBytes <= 0")
func newFileScorer(saveBytes uint64, now int64, maxHits int) (*fileScorer, error) {
if saveBytes == 0 {
return nil, errors.New("newFileScorer: saveBytes = 0")
}
if now < 0 {
return nil, errors.New("newFileScorer: now < 0")
@@ -325,7 +325,7 @@ func (f *fileScorer) addFile(name string, lastAccess time.Time, size int64, hits
// Calculate how much we want to delete this object.
file := queuedFile{
name: name,
size: size,
size: uint64(size),
}
score := float64(f.now - lastAccess.Unix())
// Size as fraction of how much we want to save, 0->1.
@@ -353,7 +353,11 @@ func (f *fileScorer) addFile(name string, lastAccess time.Time, size int64, hits
// Returns true if there still is a need to delete files (saveBytes >0),
// false if no more bytes needs to be saved.
func (f *fileScorer) adjustSaveBytes(n int64) bool {
f.saveBytes += n
if n < 0 {
f.saveBytes -= ^uint64(n - 1)
} else {
f.saveBytes += uint64(n)
}
if f.saveBytes <= 0 {
f.queue.Init()
f.saveBytes = 0

View File

@@ -207,6 +207,7 @@ func (c *cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string
return cacheReader, nil
}
if cc != nil && cc.noStore {
cacheReader.Close()
c.cacheStats.incMiss()
bReader, err := c.GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts)
bReader.ObjInfo.CacheLookupStatus = CacheHit
@@ -220,9 +221,11 @@ func (c *cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string
c.incCacheStats(cacheObjSize)
return cacheReader, nil
} else if err != nil {
if cacheErr == nil {
cacheReader.Close()
}
if _, ok := err.(ObjectNotFound); ok {
if cacheErr == nil {
cacheReader.Close()
// Delete cached entry if backend object
// was deleted.
dcache.Delete(ctx, bucket, object)
@@ -233,13 +236,19 @@ func (c *cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string
}
if !objInfo.IsCacheable() {
if cacheErr == nil {
cacheReader.Close()
}
c.cacheStats.incMiss()
return c.GetObjectNInfoFn(ctx, bucket, object, rs, h, lockType, opts)
}
// skip cache for objects with locks
objRetention := objectlock.GetObjectRetentionMeta(objInfo.UserDefined)
legalHold := objectlock.GetObjectLegalHoldMeta(objInfo.UserDefined)
if objRetention.Mode != objectlock.Invalid || legalHold.Status != "" {
if objRetention.Mode.Valid() || legalHold.Status.Valid() {
if cacheErr == nil {
cacheReader.Close()
}
c.cacheStats.incMiss()
return c.GetObjectNInfoFn(ctx, bucket, object, rs, h, lockType, opts)
}
@@ -312,7 +321,7 @@ func (c *cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string
}()
cleanupBackend := func() { bkReader.Close() }
cleanupPipe := func() { pipeWriter.Close() }
return NewGetObjectReaderFromReader(teeReader, bkReader.ObjInfo, opts.CheckCopyPrecondFn, cleanupBackend, cleanupPipe)
return NewGetObjectReaderFromReader(teeReader, bkReader.ObjInfo, opts, cleanupBackend, cleanupPipe)
}
// Returns ObjectInfo from cache if available.
@@ -501,7 +510,7 @@ func (c *cacheObjects) hashIndex(bucket, object string) int {
// or the global env overrides.
func newCache(config cache.Config) ([]*diskCache, bool, error) {
var caches []*diskCache
ctx := logger.SetReqInfo(context.Background(), &logger.ReqInfo{})
ctx := logger.SetReqInfo(GlobalContext, &logger.ReqInfo{})
formats, migrating, err := loadAndValidateCacheFormat(ctx, config.Drives)
if err != nil {
return nil, false, err
@@ -614,7 +623,7 @@ func (c *cacheObjects) PutObject(ctx context.Context, bucket, object string, r *
// skip cache for objects with locks
objRetention := objectlock.GetObjectRetentionMeta(opts.UserDefined)
legalHold := objectlock.GetObjectLegalHoldMeta(opts.UserDefined)
if objRetention.Mode != objectlock.Invalid || legalHold.Status != "" {
if objRetention.Mode.Valid() || legalHold.Status.Valid() {
dcache.Delete(ctx, bucket, object)
return putObjectFn(ctx, bucket, object, r, opts)
}
@@ -689,17 +698,17 @@ func newServerCacheObjects(ctx context.Context, config cache.Config) (CacheObjec
if migrateSw {
go c.migrateCacheFromV1toV2(ctx)
}
go c.gc(ctx, GlobalServiceDoneCh)
go c.gc(ctx)
return c, nil
}
func (c *cacheObjects) gc(ctx context.Context, doneCh chan struct{}) {
func (c *cacheObjects) gc(ctx context.Context) {
ticker := time.NewTicker(cacheGCInterval)
defer ticker.Stop()
for {
select {
case <-doneCh:
case <-ctx.Done():
return
case <-ticker.C:
if c.migrating {
@@ -714,7 +723,7 @@ func (c *cacheObjects) gc(ctx context.Context, doneCh chan struct{}) {
go func(d *diskCache) {
defer wg.Done()
d.resetGCCounter()
d.purge(ctx, doneCh)
d.purge(ctx)
}(dcache)
}
wg.Wait()

View File

@@ -18,9 +18,7 @@ package cmd
import (
"bytes"
"context"
"io"
"net/http"
"testing"
"github.com/minio/minio/pkg/hash"
@@ -174,84 +172,6 @@ func TestCacheExclusion(t *testing.T) {
}
}
// Test diskCache.
func TestDiskCache(t *testing.T) {
fsDirs, err := getRandomDisks(1)
if err != nil {
t.Fatal(err)
}
d, err := initDiskCaches(fsDirs, 100, 0, 80, 90, t)
if err != nil {
t.Fatal(err)
}
c := cacheObjects{cache: d}
cache := c.cache[0]
ctx := context.Background()
bucketName := "testbucket"
objectName := "testobject"
content := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
etag := "061208c10af71a30c6dcd6cf5d89f0fe"
contentType := "application/zip"
size := len(content)
httpMeta := make(map[string]string)
httpMeta["etag"] = etag
httpMeta["content-type"] = contentType
objInfo := ObjectInfo{}
objInfo.Bucket = bucketName
objInfo.Name = objectName
objInfo.Size = int64(size)
objInfo.ContentType = contentType
objInfo.ETag = etag
objInfo.UserDefined = httpMeta
var opts ObjectOptions
byteReader := bytes.NewReader([]byte(content))
hashReader, err := hash.NewReader(byteReader, int64(size), "", "", int64(size), globalCLIContext.StrictS3Compat)
if err != nil {
t.Fatal(err)
}
err = cache.Put(ctx, bucketName, objectName, hashReader, hashReader.Size(), nil, ObjectOptions{UserDefined: httpMeta}, false)
if err != nil {
t.Fatal(err)
}
cReader, _, err := cache.Get(ctx, bucketName, objectName, nil, http.Header{
"Content-Type": []string{"application/json"},
}, opts)
if err != nil {
t.Fatal(err)
}
cachedObjInfo := cReader.ObjInfo
if !cache.Exists(ctx, bucketName, objectName) {
t.Fatal("Expected object to exist on cache")
}
if cachedObjInfo.ETag != objInfo.ETag {
t.Fatal("Expected ETag to match")
}
if cachedObjInfo.Size != objInfo.Size {
t.Fatal("Size mismatch")
}
if cachedObjInfo.ContentType != objInfo.ContentType {
t.Fatal("Cached content-type does not match")
}
writer := bytes.NewBuffer(nil)
_, err = io.Copy(writer, cReader)
if err != nil {
t.Fatal(err)
}
if ccontent := writer.Bytes(); !bytes.Equal([]byte(content), ccontent) {
t.Errorf("wrong cached file content")
}
cReader.Close()
cache.Delete(ctx, bucketName, objectName)
online := cache.IsOnline()
if !online {
t.Errorf("expected cache drive to be online")
}
}
// Test diskCache with upper bound on max cache use.
func TestDiskCacheMaxUse(t *testing.T) {
fsDirs, err := getRandomDisks(1)
@@ -263,7 +183,7 @@ func TestDiskCacheMaxUse(t *testing.T) {
t.Fatal(err)
}
cache := d[0]
ctx := context.Background()
ctx := GlobalContext
bucketName := "testbucket"
objectName := "testobject"
content := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"

View File

@@ -29,6 +29,7 @@ import (
"net/http"
"path"
"strconv"
"strings"
"github.com/minio/minio-go/v6/pkg/encrypt"
"github.com/minio/minio/cmd/crypto"
@@ -168,39 +169,39 @@ func rotateKey(oldKey []byte, newKey []byte, bucket, object string, metadata map
}
}
func newEncryptMetadata(key []byte, bucket, object string, metadata map[string]string, sseS3 bool) ([]byte, error) {
func newEncryptMetadata(key []byte, bucket, object string, metadata map[string]string, sseS3 bool) (crypto.ObjectKey, error) {
var sealedKey crypto.SealedKey
if sseS3 {
if GlobalKMS == nil {
return nil, errKMSNotConfigured
return crypto.ObjectKey{}, errKMSNotConfigured
}
key, encKey, err := GlobalKMS.GenerateKey(GlobalKMS.KeyID(), crypto.Context{bucket: path.Join(bucket, object)})
if err != nil {
return nil, err
return crypto.ObjectKey{}, err
}
objectKey := crypto.GenerateKey(key, rand.Reader)
sealedKey = objectKey.Seal(key, crypto.GenerateIV(rand.Reader), crypto.S3.String(), bucket, object)
crypto.S3.CreateMetadata(metadata, GlobalKMS.KeyID(), encKey, sealedKey)
return objectKey[:], nil
return objectKey, nil
}
var extKey [32]byte
copy(extKey[:], key)
objectKey := crypto.GenerateKey(extKey, rand.Reader)
sealedKey = objectKey.Seal(extKey, crypto.GenerateIV(rand.Reader), crypto.SSEC.String(), bucket, object)
crypto.SSEC.CreateMetadata(metadata, sealedKey)
return objectKey[:], nil
return objectKey, nil
}
func newEncryptReader(content io.Reader, key []byte, bucket, object string, metadata map[string]string, sseS3 bool) (r io.Reader, encKey []byte, err error) {
func newEncryptReader(content io.Reader, key []byte, bucket, object string, metadata map[string]string, sseS3 bool) (io.Reader, crypto.ObjectKey, error) {
objectEncryptionKey, err := newEncryptMetadata(key, bucket, object, metadata, sseS3)
if err != nil {
return nil, encKey, err
return nil, crypto.ObjectKey{}, err
}
reader, err := sio.EncryptReader(content, sio.Config{Key: objectEncryptionKey[:], MinVersion: sio.Version20})
if err != nil {
return nil, encKey, crypto.ErrInvalidCustomerKey
return nil, crypto.ObjectKey{}, crypto.ErrInvalidCustomerKey
}
return reader, objectEncryptionKey, nil
@@ -225,23 +226,24 @@ func setEncryptionMetadata(r *http.Request, bucket, object string, metadata map[
// EncryptRequest takes the client provided content and encrypts the data
// with the client provided key. It also marks the object as client-side-encrypted
// and sets the correct headers.
func EncryptRequest(content io.Reader, r *http.Request, bucket, object string, metadata map[string]string) (reader io.Reader, objEncKey []byte, err error) {
var key []byte
func EncryptRequest(content io.Reader, r *http.Request, bucket, object string, metadata map[string]string) (io.Reader, crypto.ObjectKey, error) {
if crypto.S3.IsRequested(r.Header) && crypto.SSEC.IsRequested(r.Header) {
return nil, objEncKey, crypto.ErrIncompatibleEncryptionMethod
}
if crypto.SSEC.IsRequested(r.Header) {
key, err = ParseSSECustomerRequest(r)
if err != nil {
return nil, objEncKey, err
}
return nil, crypto.ObjectKey{}, crypto.ErrIncompatibleEncryptionMethod
}
if r.ContentLength > encryptBufferThreshold {
// The encryption reads in blocks of 64KB.
// We add a buffer on bigger files to reduce the number of syscalls upstream.
content = bufio.NewReaderSize(content, encryptBufferSize)
}
var key []byte
if crypto.SSEC.IsRequested(r.Header) {
var err error
key, err = ParseSSECustomerRequest(r)
if err != nil {
return nil, crypto.ObjectKey{}, err
}
}
return newEncryptReader(content, key, bucket, object, metadata, crypto.S3.IsRequested(r.Header))
}
@@ -634,6 +636,47 @@ func (o *ObjectInfo) DecryptedSize() (int64, error) {
return size, nil
}
// DecryptETag decrypts the ETag that is part of given object
// with the given object encryption key.
//
// However, DecryptETag does not try to decrypt the ETag if
// it consists of a 128 bit hex value (32 hex chars) and exactly
// one '-' followed by a 32-bit number.
// This special case adresses randomly-generated ETags generated
// by the MinIO server when running in non-compat mode. These
// random ETags are not encrypt.
//
// Calling DecryptETag with a non-randomly generated ETag will
// fail.
func DecryptETag(key crypto.ObjectKey, object ObjectInfo) (string, error) {
if n := strings.Count(object.ETag, "-"); n > 0 {
if n != 1 {
return "", errObjectTampered
}
i := strings.IndexByte(object.ETag, '-')
if len(object.ETag[:i]) != 32 {
return "", errObjectTampered
}
if _, err := hex.DecodeString(object.ETag[:32]); err != nil {
return "", errObjectTampered
}
if _, err := strconv.ParseInt(object.ETag[i+1:], 10, 32); err != nil {
return "", errObjectTampered
}
return object.ETag, nil
}
etag, err := hex.DecodeString(object.ETag)
if err != nil {
return "", err
}
etag, err = key.UnsealETag(etag)
if err != nil {
return "", err
}
return hex.EncodeToString(etag), nil
}
// For encrypted objects, the ETag sent by client if available
// is stored in encrypted form in the backend. Decrypt the ETag
// if ETag was previously encrypted.
@@ -817,7 +860,7 @@ func (o *ObjectInfo) EncryptedSize() int64 {
// This cannot happen since AWS S3 allows parts to be 5GB at most
// sio max. size is 256 TB
reqInfo := (&logger.ReqInfo{}).AppendTags("size", strconv.FormatUint(size, 10))
ctx := logger.SetReqInfo(context.Background(), reqInfo)
ctx := logger.SetReqInfo(GlobalContext, reqInfo)
logger.CriticalIf(ctx, err)
}
return int64(size)
@@ -845,7 +888,7 @@ func DecryptCopyObjectInfo(info *ObjectInfo, headers http.Header) (errCode APIEr
}
var err error
if info.Size, err = info.DecryptedSize(); err != nil {
errCode = toAPIErrorCode(context.Background(), err)
errCode = toAPIErrorCode(GlobalContext, err)
}
}
return
@@ -939,6 +982,19 @@ func getOpts(ctx context.Context, r *http.Request, bucket, object string) (Objec
encryption encrypt.ServerSide
opts ObjectOptions
)
var partNumber int
var err error
if pn := r.URL.Query().Get("partNumber"); pn != "" {
partNumber, err = strconv.Atoi(pn)
if err != nil {
return opts, err
}
if partNumber < 0 {
return opts, errInvalidArgument
}
}
if GlobalGatewaySSE.SSEC() && crypto.SSEC.IsRequested(r.Header) {
key, err := crypto.SSEC.ParseHTTP(r.Header)
if err != nil {
@@ -947,10 +1003,16 @@ func getOpts(ctx context.Context, r *http.Request, bucket, object string) (Objec
derivedKey := deriveClientKey(key, bucket, object)
encryption, err = encrypt.NewSSEC(derivedKey[:])
logger.CriticalIf(ctx, err)
return ObjectOptions{ServerSideEncryption: encryption}, nil
return ObjectOptions{ServerSideEncryption: encryption, PartNumber: partNumber}, nil
}
// default case of passing encryption headers to backend
return getDefaultOpts(r.Header, false, nil)
opts, err = getDefaultOpts(r.Header, false, nil)
if err != nil {
return opts, err
}
opts.PartNumber = partNumber
return opts, nil
}
// get ObjectOptions for PUT calls from encryption headers and metadata

View File

@@ -256,6 +256,78 @@ func TestDecryptObjectInfo(t *testing.T) {
}
}
var decryptETagTests = []struct {
ObjectKey crypto.ObjectKey
ObjectInfo ObjectInfo
ShouldFail bool
ETag string
}{
{
ObjectKey: [32]byte{},
ObjectInfo: ObjectInfo{ETag: "20000f00f27834c9a2654927546df57f9e998187496394d4ee80f3d9978f85f3c7d81f72600cdbe03d80dc5a13d69354"},
ETag: "8ad3fe6b84bf38489e95c701c84355b6",
},
{
ObjectKey: [32]byte{},
ObjectInfo: ObjectInfo{ETag: "20000f00f27834c9a2654927546df57f9e998187496394d4ee80f3d9978f85f3c7d81f72600cdbe03d80dc5a13d6935"},
ETag: "",
ShouldFail: true, // ETag is not a valid hex value
},
{
ObjectKey: [32]byte{},
ObjectInfo: ObjectInfo{ETag: "00000f00f27834c9a2654927546df57f9e998187496394d4ee80f3d9978f85f3c7d81f72600cdbe03d80dc5a13d69354"},
ETag: "",
ShouldFail: true, // modified ETag
},
// Special tests for ETags that end with a '-x'
{
ObjectKey: [32]byte{},
ObjectInfo: ObjectInfo{ETag: "916516b396f0f4d4f2a0e7177557bec4-1"},
ETag: "916516b396f0f4d4f2a0e7177557bec4-1",
},
{
ObjectKey: [32]byte{},
ObjectInfo: ObjectInfo{ETag: "916516b396f0f4d4f2a0e7177557bec4-738"},
ETag: "916516b396f0f4d4f2a0e7177557bec4-738",
},
{
ObjectKey: [32]byte{},
ObjectInfo: ObjectInfo{ETag: "916516b396f0f4d4f2a0e7177557bec4-Q"},
ETag: "",
ShouldFail: true, // Q is not a number
},
{
ObjectKey: [32]byte{},
ObjectInfo: ObjectInfo{ETag: "16516b396f0f4d4f2a0e7177557bec4-1"},
ETag: "",
ShouldFail: true, // ETag prefix is not a valid hex value
},
{
ObjectKey: [32]byte{},
ObjectInfo: ObjectInfo{ETag: "16516b396f0f4d4f2a0e7177557bec4-1-2"},
ETag: "",
ShouldFail: true, // ETag contains multiple: -
},
}
func TestDecryptETag(t *testing.T) {
for i, test := range decryptETagTests {
etag, err := DecryptETag(test.ObjectKey, test.ObjectInfo)
if err != nil && !test.ShouldFail {
t.Fatalf("Test %d: should succeed but failed: %v", i, err)
}
if err == nil && test.ShouldFail {
t.Fatalf("Test %d: should fail but succeeded", i)
}
if err == nil {
if etag != test.ETag {
t.Fatalf("Test %d: ETag mismatch: got %s - want %s", i, etag, test.ETag)
}
}
}
}
// Tests for issue reproduced when getting the right encrypted
// offset of the object.
func TestGetDecryptedRange_Issue50(t *testing.T) {

View File

@@ -1,5 +1,5 @@
/*
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
* MinIO Cloud Storage, (C) 2018-2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,6 +18,7 @@ package cmd
import (
"fmt"
"sort"
"strconv"
"strings"
@@ -40,7 +41,7 @@ type endpointSet struct {
// Supported set sizes this is used to find the optimal
// single set size.
var setSizes = []uint64{4, 6, 8, 10, 12, 14, 16}
var setSizes = []uint64{4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
// getDivisibleSize - returns a greatest common divisor of
// all the ellipses sizes.
@@ -60,14 +61,75 @@ func getDivisibleSize(totalSizes []uint64) (result uint64) {
// isValidSetSize - checks whether given count is a valid set size for erasure coding.
var isValidSetSize = func(count uint64) bool {
return (count >= setSizes[0] && count <= setSizes[len(setSizes)-1] && count%2 == 0)
return (count >= setSizes[0] && count <= setSizes[len(setSizes)-1])
}
func commonSetDriveCount(divisibleSize uint64, setCounts []uint64) (setSize uint64) {
// prefers setCounts to be sorted for optimal behavior.
if divisibleSize < setCounts[len(setCounts)-1] {
return divisibleSize
}
// Figure out largest value of total_drives_in_erasure_set which results
// in least number of total_drives/total_drives_erasure_set ratio.
prevD := divisibleSize / setCounts[0]
for _, cnt := range setCounts {
if divisibleSize%cnt == 0 {
d := divisibleSize / cnt
if d <= prevD {
prevD = d
setSize = cnt
}
}
}
return setSize
}
// possibleSetCountsWithSymmetry returns symmetrical setCounts based on the
// input argument patterns, the symmetry calculation is to ensure that
// we also use uniform number of drives common across all ellipses patterns.
func possibleSetCountsWithSymmetry(setCounts []uint64, argPatterns []ellipses.ArgPattern) []uint64 {
var newSetCounts = make(map[uint64]struct{})
for _, ss := range setCounts {
var symmetry bool
for _, argPattern := range argPatterns {
for _, p := range argPattern {
if uint64(len(p.Seq)) > ss {
symmetry = uint64(len(p.Seq))%ss == 0
} else {
symmetry = ss%uint64(len(p.Seq)) == 0
}
}
}
// With no arg patterns, it is expected that user knows
// the right symmetry, so either ellipses patterns are
// provided (recommended) or no ellipses patterns.
if _, ok := newSetCounts[ss]; !ok && (symmetry || argPatterns == nil) {
newSetCounts[ss] = struct{}{}
}
}
setCounts = []uint64{}
for setCount := range newSetCounts {
setCounts = append(setCounts, setCount)
}
// Not necessarily needed but it ensures to the readers
// eyes that we prefer a sorted setCount slice for the
// subsequent function to figure out the right common
// divisor, it avoids loops.
sort.Slice(setCounts, func(i, j int) bool {
return setCounts[i] < setCounts[j]
})
return setCounts
}
// getSetIndexes returns list of indexes which provides the set size
// on each index, this function also determines the final set size
// The final set size has the affinity towards choosing smaller
// indexes (total sets)
func getSetIndexes(args []string, totalSizes []uint64, customSetDriveCount uint64) (setIndexes [][]uint64, err error) {
func getSetIndexes(args []string, totalSizes []uint64, customSetDriveCount uint64, argPatterns []ellipses.ArgPattern) (setIndexes [][]uint64, err error) {
if len(totalSizes) == 0 || len(args) == 0 {
return nil, errInvalidArgument
}
@@ -81,24 +143,7 @@ func getSetIndexes(args []string, totalSizes []uint64, customSetDriveCount uint6
}
}
var setSize uint64
commonSize := getDivisibleSize(totalSizes)
if commonSize > setSizes[len(setSizes)-1] {
prevD := commonSize / setSizes[0]
for _, i := range setSizes {
if commonSize%i == 0 {
d := commonSize / i
if d <= prevD {
prevD = d
setSize = i
}
}
}
} else {
setSize = commonSize
}
possibleSetCounts := func(setSize uint64) (ss []uint64) {
for _, s := range setSizes {
if setSize%s == 0 {
@@ -108,20 +153,41 @@ func getSetIndexes(args []string, totalSizes []uint64, customSetDriveCount uint6
return ss
}
setCounts := possibleSetCounts(commonSize)
if len(setCounts) == 0 {
msg := fmt.Sprintf("Incorrect number of endpoints provided %s, number of disks %d is not divisible by any supported erasure set sizes %d", args, commonSize, setSizes)
return nil, config.ErrInvalidNumberOfErasureEndpoints(nil).Msg(msg)
}
var setSize uint64
// Custom set drive count allows to override automatic distribution.
// only meant if you want to further optimize drive distribution.
if customSetDriveCount > 0 {
msg := fmt.Sprintf("Invalid set drive count, leads to non-uniform distribution for the given number of disks. Possible values for custom set count are %d", possibleSetCounts(setSize))
if customSetDriveCount > setSize {
return nil, config.ErrInvalidErasureSetSize(nil).Msg(msg)
}
if setSize%customSetDriveCount != 0 {
msg := fmt.Sprintf("Invalid set drive count. Acceptable values for %d number drives are %d", commonSize, setCounts)
var found bool
for _, ss := range setCounts {
if ss == customSetDriveCount {
found = true
}
}
if !found {
return nil, config.ErrInvalidErasureSetSize(nil).Msg(msg)
}
// No automatic symmetry calculation expected, user is on their own
setSize = customSetDriveCount
globalCustomErasureDriveCount = true
} else {
// Returns possible set counts with symmetry.
setCounts = possibleSetCountsWithSymmetry(setCounts, argPatterns)
// Final set size with all the symmetry accounted for.
setSize = commonSetDriveCount(commonSize, setCounts)
}
// Check whether setSize is with the supported range.
if !isValidSetSize(setSize) {
msg := fmt.Sprintf("Incorrect number of endpoints provided %s", args)
msg := fmt.Sprintf("Incorrect number of endpoints provided %s, number of disks %d is not divisible by any supported erasure set sizes %d", args, commonSize, setSizes)
return nil, config.ErrInvalidNumberOfErasureEndpoints(nil).Msg(msg)
}
@@ -190,7 +256,7 @@ func parseEndpointSet(customSetDriveCount uint64, args ...string) (ep endpointSe
argPatterns[i] = patterns
}
ep.setIndexes, err = getSetIndexes(args, getTotalSizes(argPatterns), customSetDriveCount)
ep.setIndexes, err = getSetIndexes(args, getTotalSizes(argPatterns), customSetDriveCount, argPatterns)
if err != nil {
return endpointSet{}, config.ErrInvalidErasureEndpoints(nil).Msg(err.Error())
}
@@ -212,7 +278,7 @@ func GetAllSets(customSetDriveCount uint64, args ...string) ([][]string, error)
// Check if we have more one args.
if len(args) > 1 {
var err error
setIndexes, err = getSetIndexes(args, []uint64{uint64(len(args))}, customSetDriveCount)
setIndexes, err = getSetIndexes(args, []uint64{uint64(len(args))}, customSetDriveCount, nil)
if err != nil {
return nil, err
}
@@ -246,6 +312,15 @@ func GetAllSets(customSetDriveCount uint64, args ...string) ([][]string, error)
return setArgs, nil
}
// Override set drive count for manual distribution.
const (
EnvErasureSetDriveCount = "MINIO_ERASURE_SET_DRIVE_COUNT"
)
var (
globalCustomErasureDriveCount = false
)
// CreateServerEndpoints - validates and creates new endpoints from input args, supports
// both ellipses and without ellipses transparently.
func createServerEndpoints(serverAddr string, args ...string) (
@@ -256,14 +331,11 @@ func createServerEndpoints(serverAddr string, args ...string) (
return nil, -1, -1, errInvalidArgument
}
if v := env.Get("MINIO_ERASURE_SET_DRIVE_COUNT", ""); v != "" {
if v := env.Get(EnvErasureSetDriveCount, ""); v != "" {
setDriveCount, err = strconv.Atoi(v)
if err != nil {
return nil, -1, -1, config.ErrInvalidErasureSetSize(err)
}
if !isValidSetSize(uint64(setDriveCount)) {
return nil, -1, -1, config.ErrInvalidErasureSetSize(nil)
}
}
if !ellipses.HasEllipses(args...) {

Some files were not shown because too many files have changed in this diff Show More