Compare commits

...

461 Commits

Author SHA1 Message Date
Harshavardhana
9cac385aec add comment on exported error 2020-05-15 18:17:54 -07:00
Harshavardhana
814ddc0923 add missing admin actions, enhance AccountUsageInfo (#9607) 2020-05-15 18:16:45 -07:00
Harshavardhana
247795dd36 add github workflow for windows (#9611)
bye, bye travis
2020-05-15 15:54:39 -07:00
Anis Elleuch
dfadf70a7f mint: Add more SQL tests (#9540) 2020-05-15 11:20:57 -07:00
Harshavardhana
d348ec0f6c avoid double listObjectParts calls improves performance (#9606)
this PR is to avoid double calls across multiple calls
in APIs

- CopyObjectPart
- PutObjectPart
2020-05-15 08:06:45 -07:00
Harshavardhana
b730bd1396 fix: possible race in FS local lockMap (#9598) 2020-05-14 23:59:07 -07:00
Klaus Post
56e0c6adf8 Track if bloom filter is dirty (#9601)
Only save bloom filter on cycles and updates.

Fixes #9600
2020-05-14 21:46:36 -07:00
Anis Elleuch
f44a960dcd tests: Fix one multi-delete test failure in Windows CI (#9602)
There is a disparency of behavior under Linux & Windows about
the returned error when trying to rename a non existant path.

err := os.Rename("/path/does/not/exist", "/tmp/copy")

Linux:
  isSysErrNotDir(err) = false
  os.IsNotExist(err) = true

Windows:
  isSysErrNotDir(err) = true
  os.IsNotExist(err) = true

ENOTDIR in Linux is returned when the destination path
of the rename call contains a file in one of the middle
segments of the path (e.g. /tmp/file/dst, where /tmp/file
is an actual file not a directory)

However, as shown above, Windows has more scenarios when
it returns ENOTDIR. For example, when the source path contains
an inexistant directory in its path.

In that case, we want errFileNotFound returned and not
errFileAccessDenied, so this commit will add a further check to close
the disparency between Windows & Linux.
2020-05-14 18:09:30 -07:00
kannappanr
6c1bbf918d do not add quotes around etag, if already present (#9603) 2020-05-14 17:43:54 -07:00
Anis Elleuch
48e614b167 honor lifecycle expiration with tag rule (#9604) 2020-05-14 16:21:03 -07:00
poornas
fe8d33452b Allow writes for bucket exceeding FIFO quota (#9575)
the quota will be enforced while
deleting oldest entries in FIFO manner.
2020-05-14 15:18:24 -07:00
Andreas Auernhammer
c19ece6921 update KMS guide to reflect latest KES changes (#9591)
This commit updates the two client env. variables:
```
KES_CLIENT_TLS_KEY_FILE
KES_CLIENT_TLS_CERT_FILE
```

The KES CLI client expects the client key and certificate
as `KES_CLIENT_KEY` resp. `KES_CLIENT_CERT`.
2020-05-14 14:02:43 -07:00
Klaus Post
216fa57b88 merge nested hash readers (#9582)
The `ioutil.NopCloser(reader)` was hiding nested hash readers.

We make it an `io.Closer` so it can be attached without wrapping 
and allows for nesting, by merging the requests.
2020-05-14 14:01:31 -07:00
Dzmitry Pasiukevich
a9558ae248 Simplify cast of string to rune slice in wildcard matching (#9577) 2020-05-14 08:20:13 -07:00
Klaus Post
ee9077db7d fix: windows tests for all cases (#9594)
Replaces #9299
2020-05-13 23:55:38 -07:00
Harshavardhana
9c85928740 add formatting message for zones in ordinals (#9596)
Unlike the message
> Formatting 2 zone, 1 set(s), 6 drives per set.

It is more readable as ordinal
> Formatting 2nd zone, 1 set(s), 6 drives per set.
2020-05-13 20:25:29 -07:00
kannappanr
af0309371e mint tests s3cmd remove copy object test (#9595)
test is failing in Azure gateway as a mapping from 
x-amz-storage-class to Azure storage tier is failing 
and there is no way to not send storage class in s3cmd.
2020-05-13 19:55:34 -07:00
ebozduman
ead3c186a6 Fixes broken image preview for anon user (#9584) 2020-05-13 13:26:12 -07:00
Harshavardhana
6ac48a65cb fix: use unused cacheMetrics code in prometheus (#9588)
remove all other unusued/deadcode
2020-05-13 08:15:26 -07:00
poornas
2ecf5ba1de fix panic when checking if es/nats event target is active (#9587) 2020-05-13 06:37:22 -07:00
Krishna Srinivas
94f1a1dea3 add option for O_SYNC writes for standalone FS backend (#9581) 2020-05-12 19:24:59 -07:00
Anis Elleuch
c045ae15e7 fix: avoid undoing bucket creation and return the first err instead (#9578) 2020-05-12 15:20:42 -07:00
Harshavardhana
1756b7c6ff fix: LDAP derivative accounts parentUser validation is not needed (#9573)
* fix: LDAP derivative accounts parentUser validation is not needed

fixes #9435

* Update cmd/iam.go

Co-authored-by: Lenin Alevski <alevsk.8772@gmail.com>

Co-authored-by: Lenin Alevski <alevsk.8772@gmail.com>
2020-05-12 09:21:08 -07:00
Klaus Post
e25ace2151 Forward RPC errors from crawler (#9569)
The `keepHTTPResponseAlive` would cause errors to be 
returned with status OK.

- Add '32' as a filler byte until a response is ready
- '0' to indicate the response is ready to be consumed
- '1' to indicate response has an error which needs
to be returned to the caller

Clear out 'file not found' errors from dir walker, since it may be 
in a folder that has been deleted since it was scanned.
2020-05-11 20:41:38 -07:00
poornas
a8e5a86fa0 Remove brittle tests for cache (#9570) 2020-05-11 15:41:10 -07:00
Harshavardhana
f8edc233ab support multiple policies for temporary users (#9550) 2020-05-11 13:04:11 -07:00
Harshavardhana
337c2a7cb4 add audit logging for all admin calls (#9568)
- add ServiceRestart/ServiceStop actions
- audit log appropriately in all admin handlers

fixes #9522
2020-05-11 10:34:08 -07:00
Harshavardhana
2d735144b9 fix: distributed docs image path 2020-05-11 09:33:55 -07:00
Harshavardhana
0113035237 update distributed setup guide (#9566) 2020-05-11 09:19:10 -07:00
Anis Elleuch
52a1d248b2 policy: Do not return an error for invalid value during parsing (#9442)
s3:HardwareInfo was removed recently. Users having that admin action
stored in the backend will have an issue starting the server.

To fix this, we need to avoid returning an error in Marshal/Unmarshal
when they encounter an invalid action and validate only in specific
location.

Currently the validation is done and in ParseConfig().
2020-05-10 10:55:28 -07:00
Harshavardhana
b5ed42c845 ignore policy/group missing errors appropriately (#9559) 2020-05-09 13:59:12 -07:00
Klaus Post
d9e7cadacf Update reed+solomon (#9562)
Only create encoder when strictly needed.
2020-05-09 09:54:20 -07:00
Harshavardhana
36e88cbd50 fix mint tests in awscli to ignore NotImplemented properly (#9561) 2020-05-08 22:32:10 -07:00
Anis Elleuch
6d76efb9bb Add support of TCP fast open in internode calls (#9486) 2020-05-08 14:33:23 -07:00
Harshavardhana
a1de9cec58 cleanup object-lock/bucket tagging for gateways (#9548)
This PR is to ensure that we call the relevant object
layer APIs for necessary S3 API level functionalities
allowing gateway implementations to return proper
errors as NotImplemented{}

This allows for all our tests in mint to behave
appropriately and can be handled appropriately as
well.
2020-05-08 13:44:44 -07:00
Anis Elleuch
6885c72f32 disable check for DirectIO in standalone FS mode (#9558) 2020-05-08 12:07:51 -07:00
poornas
0f1389e992 Fix azure gateway handling of ETag for CopyObject (#9544)
fixes #9428
2020-05-08 11:30:35 -07:00
Minio Trusted
5bf3eeaa77 Update yaml files to latest version RELEASE.2020-05-08T02-40-49Z 2020-05-08 02:49:15 +00:00
Harshavardhana
9dda1fd624 Remove B2 gateway implementation (#9547)
S3 is now natively supported by B2 cloud storage provider
there is no reason to use specialized gateway for B2 anymore,
our current S3 gateway with caching would work with B2.

Resolves #8584
2020-05-07 19:00:30 -07:00
Harshavardhana
2dc46cb153 Report correct error when O_DIRECT is not supported (#9545)
fixes #9537
2020-05-07 16:12:16 -07:00
remche
0674c0075e add LDAP StartTLS support (#9472) 2020-05-07 15:08:33 -07:00
Harshavardhana
518ef670da update browser assets with new changes (#9543)
- f7c91eff5 - Share button for public objects (#9162) (5 days ago) <Egor Rudinsky>
- 60d415bb8 - deprecate/remove global WORM mode (#9436) (13 days ago) <Harshavardhana>
2020-05-07 13:21:05 -07:00
Harshavardhana
0dd626ec67 fix: requests without bucket should route to the original router (#9541)
requests in federated setups for STS type calls which are
performed at '/' resource should be routed by the muxer,
the assumption is simply such that requests without a bucket
in a federated setup cannot be proxied, so serve them at
current server.
2020-05-07 11:49:04 -07:00
Harshavardhana
53f4c0fdc0 fix: deprecate old settings and elaborate on tuning (#9542)
fixes #9538
2020-05-07 11:08:57 -07:00
P R
7e3ea77fdf Checking for access denied in web browser request. (#9523)
Fixes #9485
2020-05-06 21:31:44 -07:00
Harshavardhana
7290d23b26 Apply partNumber checks only on multipart objects (#9528) 2020-05-06 16:58:09 -07:00
Minio Trusted
24f20eb1bd Update yaml files to latest version RELEASE.2020-05-06T23-23-25Z 2020-05-06 23:32:41 +00:00
Harshavardhana
4c9de098b0 heal buckets during init and make sure to wait on quorum (#9526)
heal buckets properly during expansion, and make sure
to wait for the quorum properly such that healing can
be retried.
2020-05-06 14:25:05 -07:00
Harshavardhana
a2ccba69e5 add kes retries upto two times with jitter backoff (#9527)
KES calls are not retried and under certain situations
when KES is under high load, the request should be
retried automatically.
2020-05-06 11:44:06 -07:00
Harshavardhana
8eb99d3a87 fix: complete multipart upload respond with ETag quoted (#9525)
Fixes #9517
2020-05-05 17:47:54 -07:00
Bala FA
3773874cd3 add bucket tagging support (#9389)
This patch also simplifies object tagging support
2020-05-05 14:18:13 -07:00
Harshavardhana
6c62b1a2ea fix broken retry tests 2020-05-04 22:01:39 -07:00
Harshavardhana
b768645fde fix: unexpected logging with bucket metadata conversions (#9519) 2020-05-04 20:04:06 -07:00
Harshavardhana
7b58dcb28c fix: return context error from context reader (#9507) 2020-05-04 14:33:49 -07:00
Harshavardhana
fea4a1e68e fix logical error in path length handling for windows (#9520)
fixes #9515
2020-05-04 13:11:56 -07:00
Andreas Auernhammer
a9e83dd42c crypto: remove dead code (#9516)
This commit removes some crypto-related code
that is not used anywhere anymore.
2020-05-04 11:41:18 -07:00
Andreas Auernhammer
145f501a21 use HTTP/2 when connecting to KES (#9514)
This commit makes the KES client use HTTP/2
when establishing a connection to the KES server.

This is necessary since the next KES server release
will require HTTP/2.
2020-05-04 10:17:13 -07:00
Harshavardhana
9b3b04ecec allow retries for bucket encryption/policy quorum reloads (#9513)
We should allow quorum errors to be send upwards
such that caller can retry while reading bucket
encryption/policy configs when server is starting
up, this allows distributed setups to load the
configuration properly.

Current code didn't facilitate this and would have
never loaded the actual configs during rolling,
server restarts.
2020-05-04 09:42:58 -07:00
Anis Elleuch
3e063cca5c Show the cause error in startup when directio is not supported (#9497)
This commit tries to create a file using direct i/o in the startup
so the server returns quickly and avoid cryptic other errors.
2020-05-04 08:48:03 -07:00
Harshavardhana
27d716c663 simplify usage of mutexes and atomic constants (#9501) 2020-05-03 22:35:40 -07:00
ebozduman
fbd15cb7b7 Fixes browser delete issue for anon and authorized users (#9440) 2020-05-03 14:01:28 -07:00
Egor Rudinsky
f7c91eff54 Share button for public objects (#9162) 2020-05-01 23:55:53 -07:00
Dmitry Gadeev
a6bdc086a2 fix: use source scheme retrieved from X-Forwarded headers (#9483) 2020-05-01 23:53:01 -07:00
Minio Trusted
1242dd951a Update yaml files to latest version RELEASE.2020-05-01T22-19-14Z 2020-05-01 22:28:00 +00:00
Andreas Auernhammer
d1c8e9f31b update KMS guide to work with latest KES changes (#9498)
This commit updates the KMS guide to reflect the
latest changes in KES. Based on internal design
meetings we made some adjustments to the overall
KES configuration.
This commit ensures that the KMS guide contains
a working KES demo-setup with Vault.
2020-05-01 12:36:30 -07:00
Bala FA
83ccae6c8b Store bucket created time as a metadata (#9465)
Fixes #9459
2020-05-01 09:53:14 -07:00
Frank Wessels
086be07bf5 Fix ndjson unsupported (#9500) 2020-05-01 08:06:29 -07:00
Harshavardhana
28f9c477a8 fix: assume parentUser correctly for serviceAccounts (#9504)
ListServiceAccounts/DeleteServiceAccount didn't work properly
with STS credentials yet due to incorrect Parent user.
2020-05-01 08:05:14 -07:00
Harshavardhana
09571d03a5 avoid unnecessary logging in IAM (#9502) 2020-05-01 18:11:17 +05:30
Harshavardhana
71ce63f79c fix: background heal to call HealFormat only if needed (#9491)
In large setups this avoids unnecessary data transfer
across nodes and potential locks.

This PR also optimizes heal result channel, which should
be avoided for each queueHealTask as its expensive
to create/close channels for large number of objects.
2020-04-30 20:23:00 -07:00
Harshavardhana
5205c9591f print proper certinfo on console when starting up (#9479)
also potentially fix a race in certs.go implementation
while accessing tls.Certificate concurrently.
2020-04-30 16:15:29 -07:00
poornas
9a547dcbfb Add API's for managing bucket quota (#9379)
This PR allows setting a "hard" or "fifo" quota
restriction at the bucket level. Buckets that
have reached the FIFO quota configured, will
automatically be cleaned up in FIFO manner until
bucket usage drops to configured quota.
If a bucket is configured with a "hard" quota
ceiling, all further writes are disallowed.
2020-04-30 15:55:54 -07:00
Anis Elleuch
27632ca6ec audit: Merge ResponseWriter with RecordAPIStats (#9496)
ResponseWriter & RecordAPIStats has similar role, merge them.

This commit will also fix wrong auditing for STS and Web and others
since they are using ResponseWriter instead of the RecordAPIStats.
2020-04-30 11:27:19 -07:00
Harshavardhana
c7470e6e6e fix: go mod tidy 2020-04-29 22:31:34 -07:00
Anis Elleuch
d090a17ed0 fix: Audit tests on the correct response writer type (#9445) 2020-04-29 22:17:36 -07:00
Harshavardhana
c2529260e7 fix: crash observed when position of drives different (#9490)
allocate the disk slice properly before populating
disk by its ID and its position.

Fixes #9416
2020-04-29 13:42:37 -07:00
Arthur Lutz
da87188ff8 fix: tls doc markdown title (#9487) 2020-04-29 12:28:45 -07:00
Harshavardhana
d099039f5d Add new github workflow (#9480) 2020-04-29 09:17:32 -07:00
P R
5dd9cf4398 fix: CopyObject with REPLACE directive deletes existing tags (#9478)
Fixes #9477
2020-04-29 10:26:37 +05:30
Harshavardhana
ab77b216d1 fix: remove restrictions on windows for NAME_MAX (#9469)
Fixes #9393
2020-04-28 17:32:46 -07:00
Minio Trusted
b37a02cddf Update yaml files to latest version RELEASE.2020-04-28T23-56-56Z 2020-04-29 00:05:12 +00:00
Anis Elleuch
c3c3e9087b config: More fixes in parsing Audit & Logger env variables (#9474)
- Add support of missed legacy Logger webhook
- Disable enabling Audit or logger if _ENABLE
  if not explicitly set to "on".
2020-04-28 15:20:40 -07:00
Anis Elleuch
7ad6bc955f show a notice when mixed rootfs & mounted disks is detected (#9471)
A user can incorrectly mounts a newly fresh disk. MinIO will detect
that it is writing with a rootfs disk and will mark it down. However,
it is hard for the user to understand what's going on.

This commit will just print a notice so it will be easy to spot
such use case.
2020-04-28 14:55:01 -07:00
Harshavardhana
7a5271ad96 fix: re-use connections in webhook/elasticsearch (#9461)
- elasticsearch client should rely on the SDK helpers
  instead of pure HTTP calls.
- webhook shouldn't need to check for IsActive() for
  all notifications, failure should be delayed.
- Remove DialHTTP as its never used properly

Fixes #9460
2020-04-28 13:57:56 -07:00
Harshavardhana
1b122526aa fix: add service account support for AssumeRole/LDAPIdentity creds (#9451)
allow generating service accounts for temporary credentials
which have a designated parent, currently OpenID is not yet
supported.

added checks to ensure that service account cannot generate
further service accounts for itself, service accounts can
never be a parent to any credential.
2020-04-28 12:49:56 -07:00
Anis Elleuch
a3b266761e Fix audit loading from the env and consider enable env variable (#9467)
Audit was not working properly when enabled from the environment
caused by a typo in the code.

This commit fixes that but also consider the following variables:
  `MINIO_LOGGER_WEBHOOK_ENABLE_*` and 
`MINIO_AUDIT_WEBHOOK_ENABLE_*` so the user can use 
this latter to temporarily disable a logger or audit configuration.
2020-04-28 16:10:51 +05:30
Harshavardhana
498389123e avoid unnecessary logging on fresh/newly replaced drives (#9470)
data usage tracker and crawler seem to be logging
non-actionable information on console, which is not
useful and is fixed on its own in almost all deployments,
lets keep this logging to minimal.
2020-04-28 01:16:57 -07:00
Harshavardhana
bc61417284 calculate automatic node based symmetry (#9446)
it is possible in many screnarios that even
if the divisible value is optimal, we may
end up with uneven distribution due to number
of nodes present in the configuration.

added code allow for affinity towards various
ellipses to figure out optimal value across
ellipses such that we can always reach a
symmetric value automatically.

Fixes #9416
2020-04-27 14:39:57 -07:00
Harshavardhana
97d952e61c fix: ensure buckets are preserved if one set returns error (#9468)
the bucket should be deleted if it can be successfully
deleted on all sets, if not we should ensure to
restore those buckets properly.
2020-04-27 14:18:02 -07:00
Klaus Post
073aac3d92 add data update tracking using bloom filter (#9208)
By monitoring PUT/DELETE and heal operations it is possible
to track changed paths and keep a bloom filter for this data. 

This can help prioritize paths to scan. The bloom filter can identify
paths that have not changed, and the few collisions will only result
in a marginal extra workload. This can be implemented on either a
bucket+(1 prefix level) with reasonable performance.

The bloom filter is set to have a false positive rate at 1% at 1M 
entries. A bloom table of this size is about ~2500 bytes when serialized.

To not force a full scan of all paths that have changed cycle bloom
filters would need to be kept, so we guarantee that dirty paths have
been scanned within cycle runs. Until cycle bloom filters have been
collected all paths are considered dirty.
2020-04-27 10:06:21 -07:00
Harshavardhana
eff4127efd Revert "Write files in O_SYNC for fs backend to protect against machine crashes (#9434)"
This reverts commit 4843affd0e.
2020-04-27 09:22:05 -07:00
Harshavardhana
b1c0c32ba6 fix: ignore symlinks in backend filesystems (#9457)
fixes #9419
2020-04-27 06:30:12 -07:00
Harshavardhana
f14bf25cb9 optimize Listen bucket notification implementation (#9444)
this commit avoids lots of tiny allocations, repeated
channel creates which are performed when filtering
the incoming events, unescaping a key just for matching.

also remove deprecated code which is not needed
anymore, avoids unexpected data structure transformations
from the map to slice.
2020-04-27 06:25:05 -07:00
Harshavardhana
f216670814 use context specific to the etcd call (#9458) 2020-04-26 21:42:41 -07:00
Harshavardhana
6ecc98fddb fix: crash in metrics handler when some disks are offline (#9450)
Fixes #9449
2020-04-25 19:48:07 -07:00
Krishna Srinivas
4843affd0e Write files in O_SYNC for fs backend to protect against machine crashes (#9434) 2020-04-25 01:18:54 -07:00
Harshavardhana
558785a4bb fix: config Set/Get decrypt/encrypt using authenticated credentials (#9447)
we have policy available for sub-admin users to set/get/delete
config, but we incorrectly decrypt the content using admin secret
key which in-fact should be the credential authenticating the
request.
2020-04-24 22:36:48 -07:00
Harshavardhana
60d415bb8a deprecate/remove global WORM mode (#9436)
global WORM mode is a complex piece for which
the time has passed, with the advent of S3 compatible
object locking and retention implementation global
WORM is sort of deprecated, this has been mentioned
in our documentation for some time, now the time
has come for this to go.
2020-04-24 16:37:05 -07:00
BigUstad
45e22cf8aa fix: selectObject to return error when object does not exist (#9423) 2020-04-24 13:51:48 -07:00
Klaus Post
e4900b99d7 s3 select: Infer types for comparison (#9438) 2020-04-24 13:02:59 -07:00
Anis Elleuch
20766069a8 add list/delete API service accounts admin API (#9402) 2020-04-24 12:10:09 -07:00
Tim Hughes
e8160c9fae fix: same endpoint for NewLDAPIdentity & NewWithCredentials (#9433)
Also enables use of https endpoints

Fixes  #9431
2020-04-24 10:44:45 +05:30
Harshavardhana
957ecb1b64 use optimal memory while purging cache (#9426)
re-implement the cache purging routine to
avoid using ioutil.ReadDir which can lead
to high allocations when there are cache
directories with lots of content, or
when cache is installed in memory constrainted
environments.

Instead rely on a callback function where we
are not using memory no-more than 8KiB per
cycle.

Precursor for this change refer #9425, original
issue pointed by Caleb Case <caleb@storj.io>
2020-04-23 12:26:13 -07:00
Boaz
ac5061df2c fix: make azure gateway chunk size configurable (#9292) 2020-04-23 02:04:13 -07:00
Tim Hughes
cddb2714ef documentation: fix group search filter (#9420) 2020-04-22 22:29:17 -07:00
Minio Trusted
d7d9cac20b Update yaml files to latest version RELEASE.2020-04-23T00-58-49Z 2020-04-23 01:07:52 +00:00
Harshavardhana
6817c5ea58 migrate mint tests to latest versions (#9424) 2020-04-22 16:06:58 -07:00
Anis Elleuch
4cd6ca02c7 fix: Add missing return in admin requests auth (#9422) 2020-04-22 13:42:01 -07:00
Egon Elbre
a5efcbab51 fix: cacheReader.Close in all paths that don't return it. (#9418) 2020-04-22 12:13:57 -07:00
Egon Elbre
85be7b39ac Call cleanup funcs when skip fails (#9417) 2020-04-22 10:06:56 -07:00
Nitish Tiwari
ebf3dda449 Update server startup example to showcase local erasure code (#9407) 2020-04-21 23:59:13 -07:00
poornas
582953260b Increase response header timeout for gateway (#9400)
fixes: #9295
2020-04-21 19:21:27 -07:00
Minio Trusted
2d1ea86fc6 Update yaml files to latest version RELEASE.2020-04-22T00-11-12Z 2020-04-22 00:19:12 +00:00
Praveen raj Mani
322385f1b6 fix: only show active/available ARNs in server startup banner (#9392) 2020-04-21 09:38:32 -07:00
Anis Elleuch
1b38aed05f fix: Correct typo when registering peer Delete User API (#9403) 2020-04-21 09:31:51 -07:00
Anis Elleuch
a69c98e394 fix: Correct typo when registering peer Delete User API (#9403) 2020-04-21 08:35:19 -07:00
Harshavardhana
282c9f790a fix: validate partNumber in queryParam as part of preConditions (#9386) 2020-04-20 22:01:59 -07:00
Anis Elleuch
2eeb0e6a0b heal: Fix heal buckets result reporting (#9397)
healBucket() was not properly collecting results after healing
buckets. This commit adds After drives information correctly.
2020-04-20 13:48:54 -07:00
Harshavardhana
3ff5bf2369 fix: convert storage class into azure tiers (#9381) 2020-04-19 13:42:56 -07:00
Harshavardhana
69ee28a082 remove OSS gateway due to lack of licensing (#9390)
OSS go sdk lacks licensing terms in their
repository, and there has been no activity

On the issue here https://github.com/aliyun/aliyun-oss-go-sdk/issues/245

This PR is to ensure we remove any dependency code which
lacks explicit license file in their repo.
2020-04-18 22:12:51 -07:00
sreenivas alapati
d02deff3d7 fixed typo in KMS documentation (#9384) 2020-04-18 18:09:25 -07:00
Sidhartha Mani
3e78ea8acc improve obd tests and optimize network (#9378)
- keep long running obd network tests alive
- fix error - wrong number of parents in process OBD info
- ensure that osinfo does not error out when inside containers
- remove limit on max number of connections per client transport

The generic client transport uses a default limit of 64 conns per transport.
This could end up limiting and throttling usage, and artificially slowing
down the performance of MinIO even on hardware capable of doing better.
2020-04-18 11:06:11 -07:00
Harshavardhana
b54c0f0ef3 Add stale/lock bot for issues (#9387) 2020-04-18 11:03:03 -07:00
Praveen raj Mani
c79358c67e notification queue limit has no maxLimit (#9380)
New value defaults to 100K events by default,
but users can tune this value upto any value
they seem necessary.

* increase the limit to maxint64 while validating
2020-04-18 01:20:56 -07:00
Harshavardhana
75107d7698 fix: remove any duplicate statements in policy input (#9385)
Add support for removing duplicate statements automatically
2020-04-17 21:26:42 -07:00
Klaus Post
c4464e36c8 fix: limit HTTP transport tuables to affordable values (#9383)
Close connections pro-actively in transient calls
2020-04-17 11:20:56 -07:00
Harshavardhana
d92db198d1 Add target parsing code for config (#9375)
This code is helper for mcs project
2020-04-16 17:43:14 -07:00
Harshavardhana
8bae956df6 allow copyObject to rotate storageClass of objects (#9362)
Added additional mint tests as well to verify, this
functionality.

Fixes #9357
2020-04-16 17:42:44 -07:00
Eco
7758524703 Add documentation for using MinIO with Veeam (#9355) 2020-04-16 17:36:14 -07:00
Harshavardhana
c82fa2c829 fix: load LDAP users appropriately (#9360)
This PR also fixes issues when

deletePolicy, deleteUser is idempotent so can lead to
issues when client can prematurely timeout, so a retry
call error response should be ignored when call returns
http.StatusNotFound

Fixes #9347
2020-04-16 16:22:34 -07:00
Harshavardhana
a51280fd20 allow config help in gateway mode (#9356)
allow `mc admin config set mygateway/ audit_webhook --env`
to fetch the documentation as needed, this is just to
ensure that our users can still access the relevant
ENV docs while running in gateway mode.
2020-04-16 14:49:12 -07:00
Klaus Post
bd437c1c17 set server base context on gateway http server (#9365) 2020-04-16 11:54:12 -07:00
Harshavardhana
69fb68ef0b fix simplify code to start using context (#9350) 2020-04-16 10:56:18 -07:00
Nitish Tiwari
787dbaff36 Enforce issue templates for new issues (#9364) 2020-04-16 10:54:59 -07:00
Minio Trusted
c50ae1fdbe Update yaml files to latest version RELEASE.2020-04-15T19-42-18Z 2020-04-15 20:00:16 +00:00
Harshavardhana
bde0f444db fix support OBDAdminAction is valid action (#9354) 2020-04-15 12:16:40 -07:00
Klaus Post
6a8298b137 Reduce Mutex test runs (#9345)
Some tests take a long time on CI:

* `--- PASS: TestRWMutex (226.49s)`
* ` --- PASS: TestRWMutex (7.13s)`

Reduce the number of runs.

Before/after locally:

```
--- PASS: TestRWMutex (20.95s)
--- PASS: TestRWMutex (7.13s)

--- PASS: TestMutex (3.01s)
--- PASS: TestMutex (1.65s)
```
2020-04-14 18:39:03 -07:00
Klaus Post
f19cbfad5c fix: use per test context (#9343)
Instead of GlobalContext use a local context for tests.
Most notably this allows stuff created to be shut down 
when tests using it is done. After PR #9345 9331 CI is 
often running out of memory/time.
2020-04-14 17:52:38 -07:00
Minio Trusted
78f2183e70 Update yaml files to latest version RELEASE.2020-04-15T00-39-01Z 2020-04-15 00:46:50 +00:00
Harshavardhana
5c11a46412 update minio-go/parquet-go to latest 2020-04-14 16:53:29 -07:00
Anis Elleuch
8a94aebdb8 config: Add api requests max & deadline configs (#9273)
Add two new configuration entries, api.requests-max and
api.requests-deadline which have the same role of
MINIO_API_REQUESTS_MAX and MINIO_API_REQUESTS_DEADLINE.
2020-04-14 12:46:37 -07:00
Sidhartha Mani
ec11e99667 implement configurable timeout for OBD tests (#9324) 2020-04-14 11:48:32 -07:00
Harshavardhana
37d066b563 fix: deprecate requirement of session token for service accounts (#9320)
This PR fixes couple of behaviors with service accounts

- not need to have session token for service accounts
- service accounts can be generated by any user for themselves
  implicitly, with a valid signature.
- policy input for AddNewServiceAccount API is not fully typed
  allowing for validation before it is sent to the server.
- also bring in additional context for admin API errors if any
  when replying back to client.
- deprecate GetServiceAccount API as we do not need to reply
  back session tokens
2020-04-14 11:28:56 -07:00
Praveen raj Mani
bfec5fe200 fix: fetchLambdaInfo should return consistent results (#9332)
- Introduced a function `FetchRegisteredTargets` which will return
  a complete set of registered targets irrespective to their states,
  if the `returnOnTargetError` flag is set to `False`
- Refactor NewTarget functions to return non-nil targets
- Refactor GetARNList() to return a complete list of configured targets
2020-04-14 11:19:25 -07:00
Bala FA
525287f4b6 remove queue only if index is within the range (#9341)
Fixes minio/mc#3155
2020-04-14 11:06:23 -07:00
Harshavardhana
9054ce73b2 fix: deprecate skyring/uuid and use maintained google/uuid (#9340) 2020-04-14 02:40:05 -07:00
Harshavardhana
d079adc167 fix: remove initGlobalContext writes in tests (#9331)
since we do not close GlobalContext, we do not
need to reinitialize it inside test code
2020-04-13 23:21:01 -07:00
Harshavardhana
a9d401ac10 fix: update docs to mention erasure guide (#9339) 2020-04-14 11:38:14 +05:30
kannappanr
1fa65c7f2f fix: object lock behavior when default lock config is enabled (#9305) 2020-04-13 14:03:23 -07:00
Harshavardhana
cc9b63eb51 add deprecation docs for PostgresSQL/MySQL targets (#9333) 2020-04-13 12:13:33 -07:00
Harshavardhana
7e12eab3ad fix: cleanup madmin docs (#9330) 2020-04-13 10:30:41 +05:30
Roland Huß
fa685d7d9c Make multistage Dockerfile self-contained (#9323)
Picking up all support files from the builder image has the advantage
that the Dockerfile is now fully selfcontained and can be also
run just standalone.

This allows also cross-compilation and pushing with the proper manifests
with Docker Buildkit:

```
docker buildx create --name xbuilder
docker buildx use xbuilder

docker buildx build -f Dockerfile.minio --platform linux/arm/v7,linux/amd64 --progress plain --push -t minio/minio .
```

which also has the advantage that the Dockerfile is the same
for all platforms.

Co-authored-by: Harshavardhana <harsha@minio.io>
2020-04-12 20:03:02 -07:00
Harshavardhana
4314ee1670 fix: remove unusued PerfInfoHandler code (#9328)
- Removes PerfInfo admin API as its not OBDInfo
- Keep the drive path without the metaBucket in OBD
  global latency map.
- Remove all the unused code related to PerfInfo API
- Do not redefined global mib,gib constants use
  humanize.MiByte and humanize.GiByte instead always
2020-04-12 19:37:09 -07:00
Harshavardhana
7d636a7c13 enable --compat flag by default (#9326)
if needed use --no-compat to disable md5sum while
verifying any performance numbers.

bring back --compat behavior as default to avoid
additional documentation and confusing behavior,
as we are working towards improving md5sum to
be faster on AVX instructions, enabling this
should be hardly a problem in future versions
of MinIO.

fixes #8012
fixes #7859
fixes #7642
2020-04-12 18:08:27 -07:00
Harshavardhana
bf9d51cf14 fix: add missing copyright headers in some files (#9321) 2020-04-12 13:55:22 -07:00
Harshavardhana
29e0727b58 fix: regression in CopyObject not preserving ETag in --compat (#9322)
issue found after `git bisect` to commit db41953618
2020-04-11 20:20:30 -07:00
Anis Elleuch
c434dff0a4 posix: Add missing error return in RenameFile() (#9319)
Although it should not happen in most cases.
2020-04-11 11:15:30 -07:00
Taras Parkhomenko
b2a8cb4aba Add SHA-3 support (#9308) 2020-04-10 14:59:52 -07:00
Harshavardhana
b412a222ae Add missing comment key from key list (#9313)
Continuing from previous PR #9304, comment
is a special key is not present in the
default KV list. Add it explicitly when
tokenizing fields as it may be possible that
some clients might try to set comments.
2020-04-10 11:44:28 -07:00
Harshavardhana
79bcb705bf update CREDITS file to reflect new deps (#9311) 2020-04-10 00:16:38 -07:00
Sidhartha Mani
9f81d014f1 fix: drive names in output of parallel obd test (#9312) 2020-04-09 22:44:17 -07:00
Harshavardhana
3184205519 fix: config to support keys with special values (#9304)
This PR adds context-based `k=v` splits based
on the sub-system which was obtained, if the
keys are not provided an error will be thrown
during parsing, if keys are provided with wrong
values an error will be thrown. Keys can now
have values which are of a much more complex
form such as `k="v=v"` or `k=" v = v"`
and other variations.

additionally, deprecate unnecessary postgres/mysql
configuration styles, support only

- connection_string for Postgres
- dsn_string for MySQL

All other parameters are removed.
2020-04-09 21:45:17 -07:00
Minio Trusted
7c919329e8 Update yaml files to latest version RELEASE.2020-04-10T03-34-42Z 2020-04-10 03:47:00 +00:00
Andreas Auernhammer
db41953618 avoid unnecessary KMS requests during single-part PUT (#9220)
This commit fixes a performance issue caused
by too many calls to the external KMS - i.e.
for single-part PUT requests.

In general, the issue is caused by a sub-optimal
code structure. In particular, when the server
encrypts an object it requests a new data encryption
key from the KMS. With this key it does some key
derivation and encrypts the object content and
ETag.

However, to behave S3-compatible the MinIO server
has to return the plaintext ETag to the client
in case SSE-S3.
Therefore, the server code used to decrypt the
(previously encrypted) ETag again by requesting
the data encryption key (KMS decrypt API) from
the KMS.

This leads to 2 KMS API calls (1 generate key and
1 decrypt key) per PUT operation - while only
one KMS call is necessary.

This commit fixes this by fetching a data key only
once from the KMS and keeping the derived object
encryption key around (for the lifetime of the request).

This leads to a significant performance improvement
w.r.t. to PUT workloads:
```
Operation: PUT
Operations: 161 -> 239
Duration: 28s -> 29s
* Average: +47.56% (+25.8 MiB/s) throughput, +47.56% (+2.6) obj/s
* Fastest: +55.49% (+34.5 MiB/s) throughput, +55.49% (+3.5) obj/s
* 50% Median: +58.24% (+32.8 MiB/s) throughput, +58.24% (+3.3) obj/s
* Slowest: +1.83% (+0.6 MiB/s) throughput, +1.83% (+0.1) obj/s
```
2020-04-09 17:01:45 -07:00
Harshavardhana
cea078a593 update browser assets for image-preview feature 2020-04-09 14:34:37 -07:00
Harshavardhana
f44cfb2863 use GlobalContext whenever possible (#9280)
This change is throughout the codebase to
ensure that all codepaths honor GlobalContext
2020-04-09 09:30:02 -07:00
Anis Elleuch
1b45be0d60 lifecycle: Disallow delete when the object is locked (#9272) 2020-04-09 09:28:57 -07:00
Aditya Manthramurthy
6bb693488c Fix policy setting error in LDAP setups (#9303)
Fixes #8667

In addition to the above, if the user is mapped to a policy or 
belongs in a group, the user-info API returns this information, 
but otherwise, the API will now return a non-existent user error.
2020-04-09 01:04:08 -07:00
Harshavardhana
e20e08d700 fix: remove the sleep from listing operations (#9287)
make rest of the Walk() function more predictable,
it was observed that in nominal deployments even
without much workload the drives are generally
slow for respond for readdir operations, for the
sleepDuration factor of 10 this can cause
unexpected slowness in the Listing calls, while
it is good for all other I/O, it may simply slow
down Listing immensely which is not useful.

fixes #9261
2020-04-08 19:42:57 -07:00
Harshavardhana
ac07df2985 start watcher after all creds have been loaded (#9301)
start watcher after all creds have been loaded
to avoid any conflicting locks that might get
deadlocked.

Deprecate unused peer calls for LoadUsers()
2020-04-08 19:00:39 -07:00
Praveen raj Mani
2054ca5c9a fix: honor token based authentication in NATS streaming (#9296)
fixes #9148
2020-04-08 12:45:24 -07:00
Anis Elleuch
e51e465543 delete: Use physical Dir() for proper prefix cleanup in Windows (#9297)
In FS mode under Windows, removing an object will not automatically.
remove parent empty prefixes.

The reason is that path.Dir() was used, however filepath.Dir() is
more appropriate since filepath is physical (meaning it operates
on OS filesystem paths)

This is not caught because failure for Windows CI is not caught.
2020-04-08 11:32:58 -07:00
tweigel-dev
2bbc6a83e8 feature preview of image-objects (#9239) 2020-04-08 10:47:47 -07:00
ebozduman
a78731a3ba Adds info on policy for STS authentication using web-id (#9289) 2020-04-08 10:34:43 -07:00
kumy
f4e779c964 Fix typo in LDAP STS guide (#9294) 2020-04-08 08:58:03 -07:00
Pontus Leitzler
a973402821 add object api check in fs-v1 before returning ready (#9285)
fs-v1 in server mode only checks to see if the path exist, so that it
returns ready before it is indeed ready.

This change adds a check to ensure that the global object api is
available too before reporting ready.

Fixes #9283
2020-04-08 08:53:20 -07:00
Sidhartha Mani
44decbeae0 increase drive OBD blocksize to 4MB (#9258) 2020-04-08 06:04:27 -07:00
César Nieto
3ea1be3c52 allow delete of a group with no policy set (#9288) 2020-04-08 06:03:57 -07:00
Harshavardhana
2642e12d14 fix: change policies API to return and take struct (#9181)
This allows for order guarantees in returned values
can be consumed safely by the caller to avoid any
additional parsing and validation.

Fixes #9171
2020-04-07 19:30:59 -07:00
Harshavardhana
e7276b7b9b fix: make single locks for both IAM and object-store (#9279)
Additionally add context support for IAM sub-system
2020-04-07 14:26:39 -07:00
Harshavardhana
e375341c33 fix: allow any 127.0.0.x as bind IPs (#9281)
It is some times common and convenient to use
just local IPs for testing purposes, 127.0.0.x
are special IPs regardless of being available on
an interface they can be bound to on all operating
systems.

Allow this behavior to work for minio server

fixes #9274
2020-04-07 09:40:20 -07:00
Harshavardhana
2c20716f37 fix: Avoid force delete in compliance/worm mode (#9276)
also, bring in an additional policy to ensure that
force delete bucket is only allowed with the right
policy for the user, just DeleteBucketAction
policy action is not enough.
2020-04-06 17:51:05 -07:00
Harshavardhana
928f5b0564 fix: Quit when the context is canceled in madmin (#9264) 2020-04-06 17:50:14 -07:00
Harshavardhana
91f21ddc47 fix: ignore lost+found properly while reading disks (#9278)
Fixes #9277
2020-04-06 16:51:18 -07:00
Harshavardhana
43a3778b45 fix: support object-remaining-retention-days policy condition (#9259)
This PR also tries to simplify the approach taken in
object-locking implementation by preferential treatment
given towards full validation.

This in-turn has fixed couple of bugs related to
how policy should have been honored when ByPassGovernance
is provided.

Simplifies code a bit, but also duplicates code intentionally
for clarity due to complex nature of object locking
implementation.
2020-04-06 13:44:16 -07:00
Bitworks LLC
b9b1bfefe7 Added a function which allows passing the UID/GID for suexec from the outside. (#9251) 2020-04-06 13:28:23 -07:00
Minio Trusted
05cda35b14 Update yaml files to latest version RELEASE.2020-04-04T05-39-31Z 2020-04-04 05:48:22 +00:00
Harshavardhana
2155e74951 update minio-go to latest v6.0.52 2020-04-03 18:06:50 -07:00
Harshavardhana
4714958e99 fix: possible connection leaks in sets init, heal (#9263) 2020-04-03 18:06:31 -07:00
Minio Trusted
c6e62b9175 Update yaml files to latest version RELEASE.2020-04-02T21-34-49Z 2020-04-02 21:44:04 +00:00
Harshavardhana
ab66b23194 fix: allow listBuckets with listBuckets permission (#9253) 2020-04-02 12:35:22 -07:00
Harshavardhana
73f9d8a636 set default storage class always (#9250)
gateway implementations might not respond
back with right storage class which is
an AWS S3 concept, add default storage
if its empty.
2020-04-02 00:23:09 -07:00
Krishna Srinivas
541a778d7b fix: do not exit on bootstrap Verify() to allow for rolling upgrades (#9235) 2020-04-01 21:40:03 -07:00
Harshavardhana
d49f2ec19c fix: use specified authToken for audit/logger HTTP targets (#9249)
We were not using the auth token specified
even when config supports it.
2020-04-01 20:53:07 -07:00
ebozduman
8dd63a462f fix: ETag returned by OSS endpoint (#9243) 2020-04-01 19:51:12 -07:00
Anis Elleuch
9902c9baaa sql: Add support of escape quote in CSV (#9231)
This commit modifies csv parser, a fork of golang csv
parser to support a custom quote escape character.

The quote escape character is used to escape the quote
character when a csv field contains a quote character
as part of data.
2020-04-01 15:39:34 -07:00
Harshavardhana
7de29e6e6b Add rotating token support for admin API (#9244)
Use the *credentials.Credentials implementation method *Get*

```
func (c *Credentials) Get() (Value, error) {
```

which also handles auto-refresh, this allows for chaining
of various implementations together if necessary or simply
initialize with credentials.NewStaticV4(access, secret, token)

Co-authored-by: Klaus Post <klauspost@gmail.com>
2020-04-01 13:34:20 -07:00
poornas
336460f67e fix: gateway_s3_bytes_sent metric for all API methods (#9242)
Co-authored-by: Harshavardhana <harsha@minio.io>
2020-04-01 12:52:31 -07:00
Bala FA
95e89f1712 proactive deep heal object when a bitrot is detected (#9192) 2020-04-01 12:14:00 -07:00
Harshavardhana
886ae15464 trimpaths when building minio binaries (#9246) 2020-04-01 10:45:11 -07:00
Harshavardhana
d8af244708 Add numeric/date policy conditions (#9233)
add new policy conditions

- NumericEquals
- NumericNotEquals
- NumericLessThan
- NumericLessThanEquals
- NumericGreaterThan
- NumericGreaterThanEquals
- DateEquals
- DateNotEquals
- DateLessThan
- DateLessThanEquals
- DateGreaterThan
- DateGreaterThanEquals
2020-04-01 00:04:25 -07:00
Sidhartha Mani
c8243706b4 Add Parallel NetOBD tests to saturate all nodes at once (#9241) 2020-03-31 17:08:28 -07:00
Harshavardhana
30707659b5 [feature] allow for an odd number of erasure packs (#9221)
Too many deployments come up with an odd number
of hosts or drives, to facilitate even distribution
among those setups allow for odd and prime numbers
based packs.
2020-03-31 09:32:16 -07:00
poornas
90c365a174 fix: allow overwriting objects under lock after retention period (#9232)
fixes #9230
2020-03-31 09:15:42 -07:00
Sidhartha Mani
7b732b566f [Bugfix] Fix Net tests being omitted (#9234) 2020-03-31 01:15:21 -07:00
Harshavardhana
ba52a925f9 fix: delete dangling directories properly (#9222) 2020-03-30 09:48:24 -07:00
ebozduman
fdda5f98c6 Makes mandatory dsn_string parameter optional (#8931) 2020-03-28 22:20:02 -07:00
Ingmar Runge
fa4d627b57 B2 gateway S3 compat: return MD5 hash as ETag from PutObject (#9183)
- B2 does actually return an MD5 hash for newly uploaded objects
  so we can use it to provide better compatibility with S3 client
  libraries that assume the ETag is the MD5 hash such as boto.
- depends on change in blazer library.
- new behaviour is only enabled if MinIO's --compat mode is active.
- behaviour for multipart uploads is unchanged (works fine as is).
2020-03-28 13:59:55 -07:00
Bala FA
2c3e34f001 add force delete option of non-empty bucket (#9166)
passing HTTP header `x-minio-force-delete: true` would 
allow standard S3 API DeleteBucket to delete a non-empty
bucket forcefully.
2020-03-27 21:52:59 -07:00
Anis Elleuch
7f8f1ad4e3 fix: cleanup lifecycle unused code (#9219) 2020-03-27 18:57:50 -07:00
Harshavardhana
6f992134a2 fix: startup load time by reusing storageDisks (#9210) 2020-03-27 14:48:30 -07:00
Sidhartha Mani
0c80bf45d0 Implement oboard diagnostics admin API (#9024)
- Implement a graph algorithm to test network bandwidth from every 
  node to every other node
- Saturate any network bandwidth adaptively, accounting for slow 
  and fast network capacity
- Implement parallel drive OBD tests
- Implement a paging mechanism for OBD test to provide periodic updates to client
- Implement Sys, Process, Host, Mem OBD Infos
2020-03-26 21:07:39 -07:00
Robert Thomas
2777956581 Improve YAML download links listed in K8s doc (#9213) 2020-03-26 11:17:00 -07:00
Anis Elleuch
b207520d98 Fix lifecycle GET: AWS SDK complaints on empty config (#9201) 2020-03-25 21:06:03 -07:00
Minio Trusted
2196fd9cd5 Update yaml files to latest version RELEASE.2020-03-25T07-03-04Z 2020-03-25 07:11:33 +00:00
Krishna Srinivas
ef6304c5c2 Improve connectDisks() performance (#9203) 2020-03-24 23:26:13 -07:00
Nitish Tiwari
6b984410d5 Add support for self-healing related metrics in Prometheus (#9079)
Fixes #8988

Co-authored-by: Anis Elleuch <vadmeste@users.noreply.github.com>
Co-authored-by: Harshavardhana <harsha@minio.io>
2020-03-24 22:40:45 -07:00
Harshavardhana
813e0fc1a8 fix: optimize isConnected to avoid url.String() conversions (#9202)
Stringifying in a loop can tax the system, avoid this
and convert the endpoints to strings early on and
remember them for the lifetime of the server.
2020-03-24 18:53:24 -07:00
Harshavardhana
38cf263409 fix: docs remove goreportcard, its deprecated 2020-03-24 14:51:06 -07:00
Harshavardhana
6f6a2214fc Add rate limiter for S3 API layer (#9196)
- total number of S3 API calls per server
- maximum wait duration for any S3 API call

This implementation is primarily meant for situations
where HDDs are not capable enough to handle the incoming
workload and there is no way to throttle the client.

This feature allows MinIO server to throttle itself
such that we do not overwhelm the HDDs.
2020-03-24 12:43:40 -07:00
Anis Elleuch
791821d590 sa: Allow empty policy to indicate parent user's policy is inherited (#9185) 2020-03-23 14:17:18 -07:00
Harshavardhana
9a951da881 honor the credentials of user admin for encrypt/decrypt (#9194)
Fixes #9193
2020-03-23 14:06:00 -07:00
Praveen raj Mani
e7a0be5bd3 fix: throttling of events during their replay (#9188) 2020-03-23 12:34:39 -07:00
Harshavardhana
ff932ca2a0 fix: log only catastrophic errors in prepare storage (#9189) 2020-03-23 07:32:18 -07:00
poornas
818d3bcaf5 fix: deprecate TestDiskCache test from unit tests (#9187) 2020-03-22 23:46:36 -07:00
Krishna Srinivas
45b1c66195 fix: implement splunk specific listObjects when delimiter=guidSplunk (#9186) 2020-03-22 19:23:47 -07:00
Harshavardhana
da04cb91ce optimize listObjects to list only from 3 random disks (#9184) 2020-03-22 16:33:49 -07:00
Harshavardhana
cfc9cfd84a fix: various optimizations, idiomatic changes (#9179)
- acquire since leader lock for all background operations
  - healing, crawling and applying lifecycle policies.

- simplify lifecyle to avoid network calls, which was a
  bug in implementation - we should hold a leader and
  do everything from there, we have access to entire
  name space.

- make listing, walking not interfere by slowing itself
  down like the crawler.

- effectively use global context everywhere to ensure
  proper shutdown, in cache, lifecycle, healing

- don't read `format.json` for prometheus metrics in
  StorageInfo() call.
2020-03-22 12:16:36 -07:00
Harshavardhana
ea18e51f4d Support multiple LDAP OU's, smAccountName support (#9139)
Fixes #8532
2020-03-21 22:47:26 -07:00
Harshavardhana
3d3beb6a9d Add response header timeouts (#9170)
- Add conservative timeouts upto 3 minutes
  for internode communication
- Add aggressive timeouts of 30 seconds
  for gateway communication

Fixes #9105
Fixes #8732
Fixes #8881
Fixes #8376
Fixes #9028
2020-03-21 22:10:13 -07:00
poornas
27b8f18cce Fix storage info message on startup (#9177) 2020-03-21 10:02:20 -07:00
Harshavardhana
bf545dc320 migrate to new minio-go with latest changes (#9176)
- extract userTags from Get/Head request (#1249)
- fix: Context cancellation not handled (#1250)
- Check for correct http status in remove object tagging (#1248)
- simplify extracting metadata in Head/Get object (#1245)
- fix: close and remove .minio.part file on errors (#1243)
2020-03-20 17:28:36 -07:00
stefan-work
f001e99fcd create the final file with mode 0666 for multipart-uploads (#9173)
NAS gateway creates non-multipart-uploads with mode 0666.
But multipart-uploads are created with a differing mode of 0644.

Both modes should be equal! Else it leads to files with different
permissions based on its file-size. This patch solves that by
using 0666 for both cases.
2020-03-20 15:32:15 -07:00
Harshavardhana
b4bfdc92cc fix: admin console logger changes to log.Info 2020-03-20 15:14:14 -07:00
Harshavardhana
ae654831aa Add madmin package context support (#9172)
This is to improve responsiveness for all
admin API operations and allowing callers
to cancel any on-going admin operations,
if they happen to be waiting too long.
2020-03-20 15:00:44 -07:00
Stephen N
1ffa983a9d added support for SASL/SCRAM on Kafka bucket notifications. (#9168)
fixes #9167
2020-03-20 11:10:27 -07:00
Nitish Tiwari
ecf1566266 Add an option to allow plaintext connection to LDAP/AD Server (#9151) 2020-03-19 19:20:51 -07:00
Minio Trusted
c5b87f93dd Update yaml files to latest version RELEASE.2020-03-19T21-49-00Z 2020-03-19 21:57:16 +00:00
Harshavardhana
b1a2169dcc fix: data usage crawler env handling, usage-cache.bin location (#9163)
canonicalize the ENVs such that we can bring these ENVs 
as part of the config values, as a subsequent change.

- fix location of per bucket usage to `.minio.sys/buckets/<bucket_name>/usage-cache.bin`
- fix location of the overall usage in `json` at `.minio.sys/buckets/.usage.json`
  (avoid conflicts with a bucket named `usage.json` )
- fix location of the overall usage in `msgp` at `.minio.sys/buckets/.usage.bin`
  (avoid conflicts with a bucket named `usage.bin`
2020-03-19 09:47:47 -07:00
Harshavardhana
d45a1808f2 fix: Walk() should require quorum number of disks only (#9164) 2020-03-18 20:56:07 -07:00
Anis Elleuch
db2155551a heal: Pass scan mode to HealObjects to deep scan full quorum objects (#9159)
As an optimization of the healing, HealObjects() avoid sending an
object to the background healing subsystem when the object is
present in all disks.

However, HealObjects() should have checked the scan type, if this
deep, always pass the object to the healing subsystem.
2020-03-18 17:50:00 -07:00
Harshavardhana
09d35d3b4c fix: sts to return appropriate errors (#9161) 2020-03-18 17:25:45 -07:00
Anis Elleuch
5b9342d35c xl: Tree walking should not quit when one disk returns empty (#9160)
Currently, a tree walking, needed to a list objects in a specific
set quits listing as long as it finds no entries in a disk, which
is wrong.

This affected background healing, because the latter is using
tree walk directly. If one object does not exist in the first
disk for example, it will be seemed like the object does not
exist at all and no healing work is needed.

This commit fixes the behavior.
2020-03-18 16:58:05 -07:00
Klaus Post
8d98662633 re-implement data usage crawler to be more efficient (#9075)
Implementation overview: 

https://gist.github.com/klauspost/1801c858d5e0df391114436fdad6987b
2020-03-18 16:19:29 -07:00
Anis Elleuch
7fdeb44372 info: Initialize boot time early so uptime will always be correct (#9154) 2020-03-17 16:37:28 -07:00
poornas
59dced8237 Print node status even in --quiet mode (#9149) 2020-03-17 15:25:00 -07:00
Anis Elleuch
496f4a7dc7 Add service account type in IAM (#9029) 2020-03-17 10:36:13 -07:00
kannappanr
8b880a246a fix: deleteObjectTagging should 204 on success (#9150) 2020-03-16 23:21:24 -07:00
Klaus Post
eeb5942b6b fix: remote profile names and extension (#9145)
Remote profiles are not formatted correctly:

```
profile-172.31.91.126_9000-cpu.pprof
profile-172.31.91.126_9000-goroutines-before.txt
profile-172.31.91.126_9000-goroutines.txt
profiling-172.31.80.49_9000-cpu.pprof.pprof
profiling-172.31.80.49_9000-goroutines-before.txt.pprof
profiling-172.31.80.49_9000-goroutines.txt.pprof
profiling-172.31.86.101_9000-cpu.pprof.pprof
profiling-172.31.86.101_9000-goroutines-before.txt.pprof
profiling-172.31.86.101_9000-goroutines.txt.pprof
profiling-172.31.91.191_9000-cpu.pprof.pprof
profiling-172.31.91.191_9000-goroutines-before.txt.pprof
profiling-172.31.91.191_9000-goroutines.txt.pprof
```

`profiling` -> `profile`, remove extra extension.
2020-03-16 11:39:53 -07:00
yeungc
7ec904d67b fix: wording and update content of chinese docs (#9140) 2020-03-16 10:04:16 -07:00
Harshavardhana
c9212819af fix: lock maintenance should honor quorum (#9138)
The staleness of a lock should be determined by
the quorum number of entries returning stale,
this allows for situations when locks are held
when nodes are down - we don't accidentally
clear locks unintentionally when they are valid
and correct.

Also lock maintenance should be run by all servers,
not one server, stale locks need to be run outside
the requirement for holding distributed locks.

Thanks @klauspost for reproducing this issue
2020-03-15 11:55:52 -07:00
poornas
10fd53d6bb Fix: admin config set API for notifications (#9085)
Filter out targets set via env when
validating incoming config change against
configured notification targets

Fixes #9066
2020-03-14 00:01:15 -07:00
gzur
3fea1d5e35 Align STS web-identity code snippet to documentation (minio#9114) (#9130) 2020-03-13 22:58:53 -07:00
Anis Elleuch
35ecc04223 Support configurable quote character parameter in Select (#8955) 2020-03-13 22:09:34 -07:00
Harshavardhana
3ca9f5ffa3 Update yaml files to latest version RELEASE.2020-03-14T02-21-58Z 2020-03-13 20:05:27 -07:00
Krishna Srinivas
2e9fed1a14 non-empty dirs should not be listed as objects (#9129) 2020-03-13 17:43:00 -07:00
Nitish Tiwari
6b92f3fd99 Add docker files for ARM32 and ARM64 builds (#9132) 2020-03-13 13:37:39 -07:00
Kody A Kantor
06e30b5aa1 Skip building directio on platforms that don't support Direct IO (#9059) 2020-03-12 18:57:41 -07:00
Harshavardhana
603cf2a8bb fix: broken gzip handling with Select API (#9128)
This PR fixes a regression introduced in a1c7c9ea73
2020-03-12 15:34:11 -07:00
Harshavardhana
a54cdb9587 fix: Send x-amz-mp-parts-count for multiparted objects (#9116)
Some AWS SDKs latently rely on this value some times
to calculate the right number of parts during a parallel
GetObject request, this is feature used along with
content-range - we should support this as well.
2020-03-12 12:37:27 -07:00
Andreas Auernhammer
ed4bd20a7c change ca path env. var in KMS guide (#9125)
This commit fixes the env. variable in the
KMS guide used to specify the CA certificates
for the KES server.

Before the env. variable `MINIO_KMS_KES_CAPATH` has
been used - which works in non-containerized environments
due to how MinIO merges the config file and environment
variables. In containerized environments (e.g. docker)
this does not work and trying to specify `MINIO_KMS_KES_CAPATH`
instead of `MINIO_KMS_KES_CA_PATH` eventually leads to MinIO not
trusting the certificate presented by the kes server.

See: cfd12914e1/cmd/crypto/config.go (L186)
2020-03-12 07:47:40 -07:00
Harshavardhana
cfd12914e1 fix: crash in serverInfo handler when ldap is configured (#9123) 2020-03-11 23:13:32 -07:00
Klaus Post
c55aeaf814 Update compression package (#9120)
Fix a potential problem on non-AMD64 platforms and very small files

https://github.com/klauspost/compress/pull/244
2020-03-11 23:02:15 -07:00
Anis Elleuch
fdf65aa9b9 heal: Add info about the next background healing round (#9122)
- avoid setting last heal activity when starting self-healing

This can be confusing to users thinking that the self healing
cycle was already performed.

- add info about the next background healing round
2020-03-11 23:00:31 -07:00
Harshavardhana
69b2aacf5a fix return proper error for OperationTimedout (#9117)
OperationTimedout error occurs when locking
timesout, trying to acquire a lock. This
error should be returned appropriately to
the client with http status "408" (request timedout)

This translation was broken, fix it.
2020-03-11 14:11:04 -07:00
Anis Elleuch
0af62d35a0 xl: Implement posix.DeletePrefixes to enhance delete perf (#9100)
Bulk delete API was using cleanupObjectsBulk() which calls posix
listing and delete API to remove objects internal files in the
backend (xl.json and parts) one by one.

Add DeletePrefixes in the storage API to remove the content
of a directory in a single call.

Also use a remove goroutine for each disk to accelerate removal.
2020-03-11 08:56:36 -07:00
Nitish Tiwari
7c32f3f554 Fix the URL for MinIO update when using custom download server (#9111)
Co-authored-by: Nitish Tiwari <nitish@minio.io>
Co-authored-by: Harshavardhana <harsha@minio.io>
2020-03-11 20:09:20 +05:30
Aditya Manthramurthy
cec8cdb35e S3Select: Handle array selection in from clause (#9076) 2020-03-10 22:34:58 -07:00
Harshavardhana
5ab9cc029d fix: crash observed for anonymous deletes from UI (#9107) 2020-03-09 21:21:35 -07:00
Minio Trusted
667f42515a Update yaml files to latest version RELEASE.2020-03-09T18-26-53Z 2020-03-09 18:38:07 +00:00
Harshavardhana
3614cb7a8b update minio-go library to fix couple of issues (#9099)
- Add PutObjectOptions.PartSize docs (#1239) (03/06/20) <Harshavardhana>
- list: Check EncodingType in list resp before decoding object names (#1238) (03/04/20) <Harshavardhana>
- Add Support for Legal-Hold (#1233) (03/04/20) <kannappanr>
- Add LegalHold API Support (#1226) (02/19/20) <Nitish Tiwari>
- extract userMetadata from event response (#1229) (02/18/20) <Harshavardhana>
- fix: ignore AWS elb endpoints in region extraction (#1228) (02/14/20) <Harshavardhana>
- Bucket and object name length error message fixes (#1227) (02/12/20) <Ville Skyttä>
- Add BucketEncryption apis (#1217) (02/01/20) <ebozduman>
- Set IAM endpoint to default value if unspecified (#1224) (02/01/20) <radix-aw>
- fix testListObjects functional test (#1222) (01/28/20) <poornas>
- fix: retry AccessDenied only if Region is present (#1221) (01/24/20) <Harshavardhana>
- Add new Amazon S3 endpoints (#1220) (01/23/20) <kannappanr>
2020-03-09 12:27:25 +05:30
kumy
b809c84338 fix: notifications doc elaborate env values for targets (#9103) 2020-03-08 18:33:43 -07:00
kannappanr
33edb072a3 Add TopLocksAdminAction to diagsnostics canned policy (#9104) 2020-03-08 18:32:39 -07:00
Harshavardhana
6a00eb10bf fix: allow set drive count of proper divisible values (#9101)
Currently the code assumed some orthogonal requirements
which led situations where when we have a setup where
we have let's say for example 168 drives, the final
set_drive_count chosen was 14. Indeed 168 drives are
divisible by 12 but this wasn't allowed due to an
unexpected requirement to have 12 to be a perfect modulo
of 14 which is not possible. This assumption was incorrect.

This PR fixes this old assumption properly, also adds
few tests and some negative tests as well. Improvements
are seen in error messages as well.
2020-03-08 13:30:25 -07:00
Harshavardhana
792ee48d2c add additional logging during server formatting (#9102) 2020-03-08 12:12:07 -07:00
Minio Trusted
52873ac3a3 Update yaml files to latest version RELEASE.2020-03-06T22-23-56Z 2020-03-06 22:32:45 +00:00
Harshavardhana
88ae0f1196 Improve delete performance by reducing the number of calls (#9092)
- Remove the requirement to honor storage class for deletes
- Improve `posix.DeleteFileBulk` code to Stat the volumeDir
  only once per call, rather than for all object paths.
2020-03-06 13:44:24 -08:00
Anis Elleuch
23a0415eb7 profiling: Fix crash when enabling goroutines profiling (#9097)
This commit replaces 'goroutines' with 'goroutine' when passing it
to pprof library when activating goroutine type profiling
2020-03-06 13:22:47 -08:00
Anis Elleuch
75a0661213 data-usage: Fix the calculation of the next crawling round (#9096)
This commit fixes a simple typo miscalculated the waiting time
until the next round of data crawling to compute the data usage.
2020-03-06 11:34:12 -08:00
ebozduman
a1c7c9ea73 Matches s3 invalid compression format error for 'mc sql' (#9067) 2020-03-05 19:34:04 -08:00
Harshavardhana
7f19a9a617 Add CREDITS file in official MinIO Docker release image (#9091) 2020-03-06 00:22:45 +05:30
kannappanr
2f2c7d91a8 Add new extended list of JWT keys from OpenID group (#9087)
https://www.iana.org/assignments/jwt/jwt.xhtml#claims
2020-03-05 05:05:36 -08:00
Minio Trusted
9ad1c2d07d Update yaml files to latest version RELEASE.2020-03-05T01-04-19Z 2020-03-05 01:10:15 +00:00
kannappanr
07a7f329e7 xl: Fix counting offline disks in StorageInfo (#9082)
Recent modification in the code led to incorrect calculation
of offline disks.

This commit saves the endpoint list in a xlObjects then we know
the name of each disk.
2020-03-04 16:18:32 -08:00
kannappanr
c7ca791c58 fix: lock expiry on zoned setups (#9084)
lock ownership is limited to endpoints on first zone,
as we do not hold locks on other zones in an expanded
setup. current code unintentionally expired active locks
when it couldn't see ownership from the secondary zone
which leads to unexpected bugs as locking fails to work
as expected.
2020-03-04 16:06:17 -08:00
kannappanr
d9be8bc693 Add env. variable to disable data usage crawling (#9086) 2020-03-04 15:51:03 -08:00
poornas
9fc7537f2a Enforce md5sum checks for object retention APIs (#9030)
this PR enforces md5sum verification for following
API's to be compatible with AWS S3 spec
 - PutObjectRetention
 - PutObjectLegalHold

Co-authored-by: Harshavardhana <harsha@minio.io>
2020-03-04 07:04:12 -08:00
Klaus Post
f1b2462193 Add goroutine profiles (#9078)
Allow downloading goroutine dump to help detect leaks
or overuse of goroutines.

Extensions are now type dependent.

Change `profiling` -> `profile` prefix, since that is what they are 
not the abstract concept.
2020-03-04 06:58:12 -08:00
Harshavardhana
8fbf2b0b2a enable compilation on Linux arm/386 (#9077) 2020-03-03 22:27:47 +03:00
poornas
c93157019f Allow gc to run in parallel on cache drives (#9051) 2020-03-03 06:42:26 +03:00
Harshavardhana
e3b44c3829 Remove partName, partETag requirement (#9044)
This is a precursor change before versioning,
removes/deprecates the requirement of remembering
partName and partETag which are not useful after
a multipart transaction has finished.

This PR reduces the overall size of the backend
JSON for large file uploads.
2020-03-03 03:29:30 +03:00
poornas
978bd4e2c4 check cacheControl not nil before access (#9055)
Fixes: #9053
2020-02-27 10:57:00 -08:00
Minio Trusted
bb942c7311 Update yaml files to latest version RELEASE.2020-02-27T00-23-05Z 2020-02-27 00:32:47 +00:00
poornas
5d25b10f72 Fix panic in StorageInfo call (#9050) 2020-02-26 15:29:50 -08:00
poornas
eac02c04f7 Fix sporadic failure in TestDiskCacheMaxUse (#9049) 2020-02-26 13:31:15 -08:00
Harshavardhana
1330e59307 accessKeyId missing should return appropriate error in AssumeRole (#9048)
For a non-existent user server would return STS not initialized
```
aws --profile harsha --endpoint-url http://localhost:9000 \
      sts assume-role \
      --role-arn arn:xxx:xxx:xxx:xxxx \
      --role-session-name anything
```

instead return an appropriate error as expected by STS API

Additionally also format the `trace` output for STS APIs
2020-02-26 12:26:47 -08:00
Harshavardhana
2dd14c0b89 print version with proper indentation (#9047)
currently version is printed as

> VERSION:
> DEVELOPMENT.2020-02-26T14-30-02Z

this is what we want

> VERSION:
>   DEVELOPMENT.2020-02-26T14-30-02Z
>
2020-02-26 23:09:08 +05:30
Hamid
5b8975bf4b Simplify redis access event format to faciliate parsing (#9046) 2020-02-26 09:23:32 -08:00
Harshavardhana
6f66f1a910 close channel upon error in Walk()'er (#9042) 2020-02-25 19:58:58 -08:00
Kody A Kantor
deb3911f5e cpu package fails to build on illumos (#9036) 2020-02-25 10:58:18 -08:00
Harshavardhana
23a8411732 Add a generic Walk()'er to list a bucket, optinally prefix (#9026)
This generic Walk() is used by likes of Lifecyle, or
KMS to rotate keys or any other functionality which
relies on this functionality.
2020-02-25 21:22:28 +05:30
Harshavardhana
ece0d4ac53 simplify recordAPIStats wrapper for ResponseWriters (#9034) 2020-02-24 09:45:32 -08:00
Harshavardhana
4c92bec619 allow rolling upgrades, remove same MinIO version requirement (#9033)
Upgrades between releases are failing due to strict
rule to avoid rolling upgrades, it is enough to
bump up APIs between versions to allow for quorum
failure and wait times. Authentication failures are
catastrophic in nature which leads to server not
be able to upgrade properly.

Fixes #9021
Fixes #8968
2020-02-24 10:32:30 +05:30
Harshavardhana
dcd63b4146 fix: avoid double ListBuckets() loading object lock (#9031) 2020-02-24 06:39:11 +05:30
poornas
224b4f13b8 Add cache eviction low and high watermarks (#8958)
To allow better control the cache eviction process.

Introduce MINIO_CACHE_WATERMARK_LOW and 
MINIO_CACHE_WATERMARK_HIGH env. variables to specify 
when to stop/start cache eviction process. 

Deprecate MINIO_CACHE_EXPIRY environment variable. Cache 
gc sweeps at 30 minute intervals whenever high watermark is
reached to clear least recently accessed entries in the cache
until sufficient space is cleared to reach the low watermark.

Garbage collection uses an adaptive file scoring approach based
on last access time, with greater weights assigned to larger
objects and those with more hits to find the candidates for eviction.

Thanks to @klauspost for this file scoring algorithm

Co-authored-by: Klaus Post <klauspost@minio.io>
2020-02-23 19:03:39 +05:30
Harshavardhana
51a9d1bdb7 Avoid unnecessary allocations for XML parsing (#9017) 2020-02-23 09:06:46 +05:30
Klaus Post
b2db1e96e2 Remove crawler concurrency (#9023)
Only have one crawler per disk. Removes locking, but keep
fastwalk itself able to run concurrently.
2020-02-21 20:50:16 +05:30
Harshavardhana
ab7d3cd508 fix: Speed up multi-object delete by taking bulk locks (#8974)
Change distributed locking to allow taking bulk locks
across objects, reduces usually 1000 calls to 1.

Also allows for situations where multiple clients sends
delete requests to objects with following names

```
{1,2,3,4,5}
```

```
{5,4,3,2,1}
```

will block and ensure that we do not fail the request
on each other.
2020-02-21 11:29:57 +05:30
Harshavardhana
852fb320f7 Add all supported scopes from discovery doc (#9015)
Fixes #9010
2020-02-21 08:06:05 +05:30
Minio Trusted
8fb37a8417 Update yaml files to latest version RELEASE.2020-02-20T22-51-23Z 2020-02-20 23:00:50 +00:00
Anis Elleuch
d4dcf1d722 metrics: Use StorageInfo() instead to have consistent info (#9006)
Metrics used to have its own code to calculate offline disks.
StorageInfo() was avoided because it is an expensive operation
by sending calls to all nodes.

To make metrics & server info share the same code, a new
argument `local` is added to StorageInfo() so it will only
query local disks when needed.

Metrics now calls StorageInfo() as server info handler does
but with the local flag set to false.

Co-authored-by: Praveen raj Mani <praveen@minio.io>
Co-authored-by: Harshavardhana <harsha@minio.io>
2020-02-20 09:21:33 +05:30
poornas
02a59a04d1 Fix error messages returned by (Put)GetObjectLegalHold (#9013)
fiixing some minor discrepancies between aws s3 responses
vs minio server
2020-02-19 08:15:48 +05:30
Harshavardhana
16a6e68d7b fix: indicate PutBucketEncryption as a valid policy action (#9009) 2020-02-18 10:32:53 -08:00
Praveen raj Mani
1b427ddb69 Support for Kafka version in the config (#9001)
Add a field for the Kafka version in the config. The user can explicitly 
set the version of the Kafka cluster.

Fixes #8768
2020-02-17 07:56:33 +05:30
Harshavardhana
02acff7fac fix: cross platform builds update simdjson-go (#9005)
Fixes #9003
2020-02-16 08:37:27 -08:00
Harshavardhana
712e82344c acl: Support PUT calls with success for 'private' ACL's (#9000)
Add dummy calls which respond success when ACL's
are set to be private and fails, if user tries
to change them from their default 'private'

Some applications such as nuxeo may have an
unnecessary requirement for this operation,
we support this anyways such that don't have
to fully implement the functionality just that
we can respond with success for default ACLs
2020-02-16 11:37:52 +05:30
Krishnan Parthasarathi
9f298d2311 Omit empty algorithm tags in bucket encryption XML (#8987)
- Bucket encryption config returned by MinIO would always have the xml namespace
set
- Make unit tests in pkg/bucket/encryption more robust
2020-02-13 22:12:42 -08:00
poornas
716a52f261 Fix hang in cache copyobject call (#8993)
Avoid GetObjectNInfo call from cache in CopyObjectHandler
- in the case of server side copy with metadata replacement,
the reader returned from cache is never consumed, but the net
effect of GetObjectNInfo from cache layer, is cache holding a
write lock to fill the cache. Subsequent stat operation on cache in
CopyObject is not able to acquire a read lock, thus causing the hang.

Fixes #8991
2020-02-13 15:32:26 -08:00
Klaus Post
e4020fb41f SIMDJSON S3 select input (#8401) 2020-02-13 14:03:52 -08:00
Harshavardhana
d1144c2c7e reference format obtained doesn't need further validation (#8964)
we don't need to validateFormats again once we have obtained
reference format, because it is possible that at this stage
another server is doing a disk heal during startup, once
in a while due to delays we get false positives and our
server doesn't start.

Format in quorum as reference format can be assumed as valid
and we proceed further, until and unless HealFormat re-inits
the disks after a successful heal.

Also use separate port for healing tests to avoid any
conflicts with regular build testing.

Fixes #8884
2020-02-13 14:01:41 -08:00
Harshavardhana
78125ee853 enable minio-java mint tests (#8990) 2020-02-13 11:46:42 -08:00
Harshavardhana
9ecd66007f fix: reduce the load on CPU when loading users/policies (#8984)
Trying to be conservative by slowing ourselves down
on a regular basis.
2020-02-13 06:36:23 -08:00
Nitish Tiwari
64ec17b463 Fix Legal Hold request parsing (#8981)
AWS S3 doesn't enforce the URL in XMLNS, accordingly, removing the
URL in XMLNS for ObjectLegalHold.

This was found while testing https://github.com/minio/minio-go/pull/1226
2020-02-13 01:18:29 -08:00
Anis Elleuch
6b9805e891 fix: Avoid crash when there is an error testing a target notif (#8986)
RegisterNotificationTargets() cleans up all connections
that it makes to notification targets when an error occurs
during its execution.

However there is a typo in the code that makes the function to always
try to access to a nil pointer in the defer code since the function
in question will always return nil in the case of any error.

This commit fixes the typo in the code.
2020-02-13 11:26:23 +05:30
poornas
013773065c Save metadata correctly in cache.json on PUT (#8985)
fixes #8979
2020-02-13 08:49:32 +05:30
Anis Elleuch
7d6766adc6 fix: erroneous high value for gateway received bytes metrics (#8978)
http.Request.ContentLength can be negative, which affects
the gateway_s3_bytes_received value in Prometheus output.

The commit only increases the value of the total received bytes
in gateway mode when r.ContentLength is greater than zero.
2020-02-12 10:15:00 +05:30
Harshavardhana
c56c2f5fd3 fix routing issue for esoteric characters in gorilla/mux (#8967)
First step is to ensure that Path component is not decoded
by gorilla/mux to avoid routing issues while handling
certain characters while uploading through PutObject()

Delay the decoding and use PathUnescape() to escape
the `object` path component.

Thanks to @buengese and @ncw for neat test cases for us
to test with.

Fixes #8950
Fixes #8647
2020-02-12 09:08:02 +05:30
Nitish Tiwari
7e819d00ea Fix Error Code for ObjectTagging Parsing (#8971)
Also add Mint tests
2020-02-11 17:42:28 -08:00
Nitish Tiwari
33767266e7 Fix Tagging XML Unmarshalling (#8977)
AWS S3 doesn't strictly enforce the presence of URL in tagging XML.
This PR updates MinIO to behave similarly.

Fixes #8976
2020-02-11 14:35:33 -08:00
Nitish Tiwari
63be4709b7 Add metrics support for Azure & GCS Gateway (#8954)
We added support for caching and S3 related metrics in #8591. As
a continuation, it would be helpful to add support for Azure & GCS
gateway related metrics as well.
2020-02-11 21:08:01 +05:30
astorath
6b1f2fc133 fix: using correct response on get_bucket_lifecycle_configuration (#8962) 2020-02-08 16:46:59 +05:30
Minio Trusted
f749a9bf0e Update yaml files to latest version RELEASE.2020-02-07T23-28-16Z 2020-02-07 23:35:04 +00:00
poornas
9b4d46a6ed evict cached entry for server side copy (#8947)
Fixes #8942
2020-02-07 14:36:46 -08:00
Anis Elleuch
502e652b7a fix: Avoid closing target in RegisterNotificationTargets (#8966)
This will prevent a double target Close() call when fetchLambdaInfo()
is executed (mc admin info)

This fixes a crash when mc admin info is called.
2020-02-07 14:35:56 -08:00
Anis Elleuch
de924605a1 Import CSV parser library (#8927)
The CSV library code is imported from Go 1.13.6
2020-02-07 16:25:36 +05:30
Nitish Tiwari
15e2ea2c96 Fix an issue where MinIO was logging every error twice (#8953)
The logging subsystem was initialized under init() method in
both gateway-main.go and server-main.go which are part of
same package. This created two logging targets and hence
errors were logged twice. This PR moves the init() method
to common-main.go
2020-02-07 13:48:07 +05:30
Minio Trusted
07edb7c7f8 Update yaml files to latest version RELEASE.2020-02-07T04-56-50Z 2020-02-07 05:03:28 +00:00
Klaus Post
d0cea7adea Fix stream read IO count (#8961)
Streams are returning a readcloser and returning would 
decrement io count instantly, fix it.


change maxActiveIOCount to 3, meaning it will pause
crawling if 3 operations are running.
2020-02-07 09:43:55 +05:30
Klaus Post
2165d45d3f Time getSize and use to estimate latency (#8959)
Remove the random sleep. This is running in 4 goroutines, 
so mostly doing nothing.

We use the getSize latency to estimate system load, 
meaning when there is little load on the system and 
we get the result fast we sleep a little.

If it took a long time we have high load and release
ourselves longer.

We are sleeping inside the mutex so this affects all
goroutines doing IO.
2020-02-07 09:05:55 +05:30
Anis Elleuch
6d5d77f62c usage typo: Fix creating .minio.sys/background-ops bucket (#8957)
Due to a typo in the code, a cluster was not correctly creating
`background-ops` in all disks and nodes print the following error:

minio3_1  | API: SYSTEM()
minio3_1  | Time: 19:32:45 UTC 02/06/2020
minio3_1  | DeploymentID: d67c20fa-4a1e-41f5-b319-7e3e90f425d8
minio3_1  | Error: Bucket not found: .minio.sys/background-ops
minio3_1  |        2: cmd/data-usage.go:109:cmd.runDataUsageInfo()
minio3_1  |        1: cmd/data-usage.go:56:cmd.runDataUsageInfoUpdateRoutine()

This commit fixes the typo.
2020-02-06 13:12:36 -08:00
Harshavardhana
49df290270 Add metadata parsing to be inside mutex to slow down (#8952)
Adding mutex slows down the crawler to avoid large
spikes in CPU, also add millisecond interval jitter
in calculation of disk usage to slow down the spikes
further.
2020-02-06 00:22:11 -08:00
Nesty92
b1bfd75fcf Aws SDK go functional test: add SSE-C CopyObject test (#8938) 2020-02-06 00:21:03 -08:00
Nitish Tiwari
e5951e30d0 Add support for Object Tagging in LifeCycle configuration (#8880)
Fixes #8870

Co-Authored-By: Krishnan Parthasarathi <krisis@users.noreply.github.com>
2020-02-06 13:20:10 +05:30
poornas
45d725c0a3 Fix docs link for vault legacy (#8948) 2020-02-06 10:15:58 +05:30
Harshavardhana
c2c5b09bb1 Avoid object names with '//' to avoid hash inconsistencies (#8946)
This is to fix a situation where an object name incorrectly
is sent with '//' in its path heirarchy, we should reject
such object names because they may be hashed to a set where
the object might not originally belong because, this can
cause situations where once object is uploaded we cannot
delete it anymore.

Fixes #8873
2020-02-06 08:29:38 +05:30
Andreas Auernhammer
086fbb745e fix and improve KMS server info (#8944)
This commit fixes typos in the displayed server info
w.r.t. the KMS and removes the update status.

For more information about why the update status
is removed see: PR #8943
2020-02-06 06:18:34 +05:30
Andreas Auernhammer
4f37c8ccf2 refine the KMS admin API (#8943)
This commit removes the `Update` functionality
from the admin API. While this is technically
a breaking change I think this will not cause
any harm because:
 - The KMS admin API is not complete, yet.
   At the moment only the status can be fetched.
 - The `mc` integration hasn't been merged yet.
   So no `mc` client could have used this API
   in the past.

The `Update`/`Rewrap` status is not useful anymore.
It provided a way to migrate from one master key version
to another. However, KES does not support the concept of
key versions. Instead, key migration should be implemented
as migration from one master key to another.

Basically, the `Update` functionality has been implemented just
for Vault.
2020-02-05 22:47:35 +05:30
Krishnan Parthasarathi
026265f8f7 Add support for bucket encryption feature (#8890)
- pkg/bucket/encryption provides support for handling bucket 
  encryption configuration
- changes under cmd/ provide support for AES256 algorithm only

Co-Authored-By: Poorna  <poornas@users.noreply.github.com>
Co-authored-by: Harshavardhana <harsha@minio.io>
2020-02-05 15:12:34 +05:30
Andreas Auernhammer
f91c072f61 re-write the KMS get started guide (#8936)
This commit updates the KMS getting started guide
and replaces the legacy MinIO<-->Vault setup with a
MinIO<-->KES<-->Vault setup.

Therefore, add some architecture ASCII diagrams and
provide a step-by-step guide to setup Vault, KES and
MinIO such that MinIO can encrypt objects with KES +
Vault.

The legacy Vault guide has been moved to `./vault-legacy.md`.

Co-authored-by: Harshavardhana <harsha@minio.io>
2020-02-05 12:38:47 +05:30
Anis Elleuch
52bdbcd046 Add new admin API to return Accounting Usage (#8689) 2020-02-04 18:20:39 -08:00
poornas
301c50b721 Add canned diagnostics policy for admin users (#8937) 2020-02-04 17:58:38 -08:00
Harshavardhana
e9c111c8d0 Avoid unnecessary statPart() calls in PutObjectPart (#8905)
Assume `xl.json` as the source of truth for all operations.
2020-02-04 10:04:37 +05:30
poornas
278a165674 Allow caching based on a configurable number of hits. (#8891)
Co-authored-by: Harshavardhana <harsha@minio.io>
2020-02-04 09:10:01 +05:30
ebozduman
a05af9bb98 Fixes several typos and clarifies some explanations in MySQL doc. (#8908) 2020-02-03 18:55:05 -08:00
Anis Elleuch
e934c3e2a2 usage: Fix buckets count calculation when no object is present (#8929)
XL crawling wrongly returns a zero buckets count when
there are no objects uploaded in the server yet. The reason is 
data of the crawler of posix returns invalid result when all 
disks has zero objects.

A simple fix is to always pick the crawling result of the first 
disk but choose over the result of the disk which has the most 
objects in it.
2020-02-04 06:57:47 +05:30
Harshavardhana
2d295a31de Avoid select inside a recursive function to avoid CPU spikes (#8923)
Additionally also allow configurable go-routines
2020-02-03 16:45:59 -08:00
Harshavardhana
9bbf5cb74f fix: Avoid re-reading bucket names from etcd (#8924)
This helps improve performance when there are
1000+ bucket entries on etcd, improves the
startup time significantly.
2020-02-03 13:54:20 +05:30
Harshavardhana
680e493065 fix a crash in base64 buffer pool (#8925)
looks like 1024 buffer size is not enough in
all situations, use 8192 instead which
can satisfy all the rare situations that
may arise in base64 decoding.
2020-02-03 08:42:32 +05:30
poornas
1ea2449269 NAS gateway: fix notification initialization (#8920)
Co-authored-by: Harshavardhana <harsha@minio.io>
2020-02-02 15:22:07 +05:30
Harshavardhana
7ce63b3078 fix: multi-delete API write quorum failures (#8926)
multi-delete API failed with write quorum errors
under following situations

- list of files requested for delete doesn't exist
  anymore can lead to quorum errors and failure
- due to usage of query param for paths, for really
  long paths MinIO server rejects these requests as
  malformed as unexpected.

This was reproduced with warp
2020-02-01 18:11:29 -08:00
Anis Elleuch
7432b5c9b2 Use user CAs in checkEndpoint() call (#8911)
The server info handler makes a http connection to other
nodes to check if they are up but does not load the custom
CAs in ~/.minio/certs/CAs.

This commit fix it.

Co-authored-by: Harshavardhana <harsha@minio.io>
2020-02-02 07:15:29 +05:30
Harshavardhana
d76160c245 Initialize only one retry timer for all sub-systems (#8913)
Also make sure that we create buckets on all zones
successfully, do not run quick heal buckets if not
running with expansion.
2020-02-02 06:37:43 +05:30
poornas
5d838edcef Fix panic in ServerInfoHandler when (#8915)
Co-authored-by: Harshavardhana <harsha@minio.io>
2020-02-01 17:50:04 +05:30
poornas
c9116e6bd7 trace - log request body (#8917) 2020-02-01 02:39:49 -08:00
Harshavardhana
d7dc9aaf52 fix: remove response header timeout (#8919)
Adding respone header timeout seems to have
premature timeout like consequences which
leads to potential disconnections.
2020-02-01 08:31:55 +05:30
Harshavardhana
bfe8a9bccc jwt: Simplify JWT parsing (#8802)
JWT parsing is simplified by using a custom claim
data structure such as MapClaims{}, also writes
a custom Unmarshaller for faster unmarshalling.

- Avoid as much reflections as possible
- Provide the right types for functions as much
  as possible
- Avoid strings.Join, strings.Split to reduce
  allocations, rely on indexes directly.
2020-01-31 08:29:22 +05:30
Klaus Post
9990464cd5 Fix recursive deep scan of buckets (#8900) 2020-01-30 17:20:07 +05:30
poornas
881e983ed9 Fix Retention, ObjectLock, LegalHold struct namespaces correctly. (#8909)
Reverts #8903 to allow structs to be unmarshalled 
even if the namespace is missing.
2020-01-30 09:58:05 +05:30
Nesty92
f6a7d4d29b Add CreateBucketError, ListMultipartUploads functional test (#8886) 2020-01-29 07:10:03 -08:00
Harshavardhana
f98616dce7 heal: Optimize heal listing by avoiding batches (#8901)
Also limit the heal per object if there is incoming
requests by suspending heal for longer periods of time.
2020-01-29 12:05:44 +05:30
Ashish Kumar Sinha
5bd0e95eef Set default namespace for necessary structs (#8903) 2020-01-29 10:19:38 +05:30
Li Yi
0414f01b6e doc: match code on sysctl -w vm.swappiness=1 (#8894) 2020-01-27 16:19:15 -08:00
Harshavardhana
0cbebf0f57 Rename pkg/{tagging,lifecycle} to pkg/bucket sub-directory (#8892)
Rename to allow for more such features to come in a more
proper hierarchical manner.
2020-01-27 14:12:34 -08:00
Harshavardhana
4cb6ebcfa2 test: print more relevant info in healing failure (#8895) 2020-01-27 14:56:36 +05:30
poornas
2232e095d5 Make admin permissions more granular for admin handlers. (#8888) 2020-01-26 20:47:52 -06:00
Minio Trusted
aae337f5b5 Update yaml files to latest version RELEASE.2020-01-25T02-50-51Z 2020-01-25 02:57:23 +00:00
poornas
a78e5d4763 Add missing error check in cache GetObjectNInfo (#8889) 2020-01-24 15:49:16 -08:00
Harshavardhana
cf37c7997e Heal bucket only on missing drives in quorum (#8883)
MakeVol shouldn't be called in heal bucket
when bucket doesn't really exist in quorum.
2020-01-24 15:38:07 -08:00
Harshavardhana
1ffbb5c24c fix racy tests when editing xl.getDisks (#8879) 2020-01-23 11:50:09 -08:00
Harshavardhana
b9c48e0ab0 fix return appropriate error for MakeBucket in federation (#8878) 2020-01-22 08:25:28 -08:00
Harshavardhana
fe5d599802 fix: STS creds without "aud" should be honored with STS checks (#8868)
Fixes #8865
2020-01-22 15:09:46 +05:30
Aditya Manthramurthy
55063906b5 Fix group add/remove membership bug (#8877) 2020-01-21 19:00:41 -08:00
Klaus Post
c7178d2066 Profiling: Add base, fix memory profiling (#8850)
For 'snapshot' type profiles, record a 'before' profile that can be used 
as `go tool pprof -base=before ...` to compare before and after.

"Before" profiles are included in the zipped package.

[`runtime.MemProfileRate`](https://golang.org/pkg/runtime/#pkg-variables) 
should not be updated while the application is running, so we set it at startup.

Co-authored-by: Harshavardhana <harsha@minio.io>
2020-01-21 15:49:25 -08:00
Harshavardhana
f14f60a487 fix: Avoid double usage calculation on every restart (#8856)
On every restart of the server, usage was being
calculated which is not useful instead wait for
sufficient time to start the crawling routine.

This PR also avoids lots of double allocations
through strings, optimizes usage of string builders
and also avoids crawling through symbolic links.

Fixes #8844
2020-01-21 14:07:49 -08:00
Harshavardhana
e2b3c083aa fix: close and drain the response body always (#8847) 2020-01-21 02:46:58 -08:00
Anis Elleuch
3011692d93 doc: Add missing ``` in compression documentation (#8872) 2020-01-21 15:50:54 +05:30
Harshavardhana
86252ec7e1 fix: document _ENABLE for all notification targets (#8864)
Fixes #8863
2020-01-20 16:48:19 -08:00
Harshavardhana
ef1aa870c5 cleanup unneeded files, update credits (#8858)
additionally add code of conduct
2020-01-20 10:38:58 -08:00
bijialin
c260182412 zh_CN docs: add Expanding existing distributed setup (#8862) 2020-01-20 08:51:09 -08:00
Nitish Tiwari
61c17c8933 Add ObjectTagging Support (#8754)
This PR adds support for AWS S3 ObjectTagging API as explained here
https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html
2020-01-20 08:45:59 -08:00
Forest Lovewood
dd93eee1e3 Implement bucket caching for b2 gateway (#8820)
fixes #8739 #6806
2020-01-20 22:13:38 +05:30
Harshavardhana
7a400542ae Remove usage of GORPOXY let 'go' decide the defaults (#8855) 2020-01-19 09:17:42 -08:00
Harshavardhana
88286cf8d0 fix: support pre-sign signature for STS tokens (#8826)
Fixes #8391
2020-01-18 17:04:50 -08:00
Klaus Post
8cb6184f1d Fix erasure block allocation (#8851)
Small blocks are undersized when file size isn't divisible by the 
shard could leading to allocation in *reedsolomon.Split()*
2020-01-18 14:21:58 -08:00
Zhongpeng Lin
b849fd7a75 remove gorilla/rpc replacement (#8852) 2020-01-18 14:21:13 -08:00
Harshavardhana
09ee145e9c gw/hdfs: indicate hdfs gateway is production ready (#8848) 2020-01-18 07:25:03 -08:00
Harshavardhana
04dfcbfdd7 update minio-go to latest master (#8849) 2020-01-18 04:05:17 -08:00
Harshavardhana
23e46f9dba log formatting only the first time (#8846) 2020-01-17 15:39:07 -08:00
Harshavardhana
fc5213258e posix: Do not take disk offline on I/O errors (#8836)
Choosing maxAllowedIOError is arbitrary and
prone to errors, when drives might be perfectly
capable of taking I/O with only few locations
return I/O error. This is a hindrance of sort
where backend filesystems like ZFS can automatically
fix and handle these scenarios.

The added problem with current approach that we
take the drive offline, making it virtually impossible
to bring it online without restart the server which
is not desirable on a busy cluster. Remove this state
such that let the backend return error appropriately
to caller and let the caller decide what to do with
the error.
2020-01-17 13:34:43 -08:00
kannappanr
005ebbb9b2 Typo: Add missing '<' character (#8845)
Fixes #8833
2020-01-17 13:31:46 -08:00
Anis Elleuch
017067e11f data-usage: Avoid crawling duplicated call (#8843)
This fix will also picks 3 and not 4 disks from a single erasure set.
2020-01-17 09:59:37 -08:00
Harshavardhana
2bb69033e5 http: fail appropriately and return standard Go error (#8837)
return http.ErrServerClosed with proper body when
server is shutting down, allowing more context instead
of just returning '503' which doesn't mean the same
thing.
2020-01-17 05:48:39 -08:00
Harshavardhana
fca4ee84c9 gw/hdfs: listing should list directories properly (#8827)
Fixes #8822
2020-01-16 17:11:25 -08:00
poornas
60e60f68dd Add support for object locking with legal hold. (#8634) 2020-01-16 15:41:56 -08:00
Minio Trusted
ba758361b3 Update yaml files to latest version RELEASE.2020-01-16T22-40-29Z 2020-01-16 22:47:41 +00:00
Harshavardhana
c6b218e5df fix: readiness should return 200 OK with first zone online (#8834) 2020-01-16 13:49:25 -08:00
Anis Elleuch
c18fbdb29a posix: Remove a non needed nil check in DiskInfo() (#8830)
posix.DiskInfo() returns errFaultyDisk when posix is nil,
but there is no way that this would happen any time, therefore
removing un-needed code.
2020-01-16 11:27:50 -08:00
Harshavardhana
b1ad99edbf fix: avoid crash copy map before reading (#8825)
code of this form is always racy, when the
map itself is being written to as well

```
func (r Map) retMap() map[string]string {
     .. lock ..
     return r.internalMap
}

func (r Map) addMap(k, v string) {
     .. lock ..
     r.internalMap[k] = v
}
```

Anyone reading from `retMap()` is not protected
because of locking and we need to make sure
to avoid code in this manner. Always safe to
copy the map and return.
2020-01-16 01:35:30 -08:00
Minio Trusted
080e0c2323 Update yaml files to latest version RELEASE.2020-01-16T03-05-44Z 2020-01-16 03:11:40 +00:00
Anis Elleuch
935546d5ca xl: Implement MRF healing (#8470) 2020-01-15 18:30:32 -08:00
Harshavardhana
64fde1ab95 xl/zones: return errNoHealRequired when no heal is required (#8821)
Zone abstraction of object layer was returning `nil`
incorrectly under situations where disk healing is
not required. Returning `nil` is considered as healing
successful, which leads to unexpected ReloadFormat()
peer notification calls during startup.

This PR fixes this behavior properly for zones.
2020-01-15 17:19:13 -08:00
Anis Elleuch
169e8742fc mint: Use https for access to maven reposity (#8824)
Maven repository requires HTTPS now. This lead to issues
building mint image in aws-sdk-java & minio-java.

The PR fixes the issue and also bump aws sdk version in
aws-sdk-java to the latest.
2020-01-15 15:48:49 -08:00
Anis Elleuch
069876e262 xl: All nodes create meta volumes in its local disks (#8786)
Meta volumes directories, tmp/, background-ops/, etc..
undr .minio.sys are created when disks are formatted
but also when the cluster is started.

However using MakeVolBulk() is not appropriate in the
case of a user migrating from a version which does not
have .minio.sys/background-ops/. The reason is that
MakeVolBulk() exits early when an error is occured:
errVolumeExists in this case, which is expected since
some directories such as tmp/ already exist.

This commit will avoid use MakeVolBulk and use MakeVol
instead.

Also the PR will make each node creates meta volumes
in its local disks and stop relying on the first disk
since the first node could be offline.
2020-01-15 12:36:52 -08:00
Harshavardhana
442e1698cb heal: Avoid spinning up object healing during startup (#8819)
auto-heal disks, metadata and buckets in background but
not objects, let the auto heal kick in for objects after
the cluster has been up for a while.
2020-01-15 01:08:39 -08:00
poornas
d76518eeb9 Remove TestPutObjectPartDiskNotFound unit test (#8815) 2020-01-14 18:46:33 -08:00
Harshavardhana
0879a4f743 rest/storage: Remove racy LastError usage (#8817)
instead perform a liveness check call to
verify if server is online and print relevant
errors.

Also introduce a StorageErr string error type
instead of errors.New() deprecate usage of
VerifyFileError, DeleteFileError for gob,
change in datastructure also requires bump in
storage REST version to v13.

Fixes #8811
2020-01-14 18:45:17 -08:00
Harshavardhana
9be7066715 fix: Hold locks before closing all drives (#8818)
Fixes #8813
2020-01-14 17:13:58 -08:00
Klaus Post
d8660b30cc Reduce MemProfileRate (#8814)
Enabling the memory profiling has a significant impact on performance.

Reduce the profiling rate by 2 orders of magnitude. It is still 128x smaller than default so it should be plenty.
2020-01-14 16:18:45 -08:00
poornas
30922148fb Fix bug preventing overwrite of object if (#8796)
object lock config is enabled for a bucket.

Creating a bucket with object lock configuration
enabled does not automatically cause WORM protection
to be applied. PUT operation needs to specifically
request object locking or bucket has to have default
retention settings configured.

Fixes regression introduced in #8657
2020-01-13 17:29:31 -08:00
Klaus Post
37b32199e3 Validate XL sets on format (#8779)
When formatting a set validate if a host failure will likely lead to data loss.

While we don't know what config will be set in the future 
evaluate to our best knowledge, assuming default settings.
2020-01-13 13:09:10 -08:00
ebozduman
d74818b227 Typo in MySQL notification doc (#8807) 2020-01-13 13:06:42 -08:00
Klaus Post
627fdfeab7 Fix Windows console printing (#8805)
Print to console which does translation and not directly to stdout.

Fixes #8804
2020-01-13 13:05:51 -08:00
Harshavardhana
3320878dfb Add missing SSE_MASTER_KEY_FILE entry (#8800)
Fixes #8757
2020-01-13 12:31:18 +05:30
Harshavardhana
3f20011862 docs: update how to query the keys and args (#8797)
remove link to missing config.json.sample
2020-01-11 15:13:13 -08:00
Etienne
5f34b5e6a5 Update README.md (#8788) 2020-01-10 21:08:09 -08:00
poornas
9199033db7 Set X-Cache and X-Cache-Lookup headers for cache (#8794)
X-Cache sets cache status of HIT if object is
served from the disk cache, or MISS otherwise.
X-Cache-Lookup is set to HIT if object was found
in the cache even if not served (for e.g. if cache
 entry was invalidated by ETag verification)
2020-01-10 20:21:13 -08:00
Klaus Post
2bf6cf0e15 Enable multiple concurrent profile types (#8792) 2020-01-10 17:19:58 -08:00
Harshavardhana
686d4656de fix: set appropriate defaults when new keys added (#8795)
A new key was added in identity_openid recently
required explicitly for client to set the optional
value without that it would be empty, handle this
appropriately.

Fixes #8787
2020-01-10 16:57:18 -08:00
Harshavardhana
5aa5dcdc6d lock: improve locker initialization at init (#8776)
Use reference format to initialize lockers
during startup, also handle `nil` for NetLocker
in dsync and remove *errorLocker* implementation

Add further tuning parameters such as

 - DialTimeout is now 15 seconds from 30 seconds
 - KeepAliveTimeout is not 20 seconds, 5 seconds
   more than default 15 seconds
 - ResponseHeaderTimeout to 10 seconds
 - ExpectContinueTimeout is reduced to 3 seconds
 - DualStack is enabled by default remove setting
   it to `true`
 - Reduce IdleConnTimeout to 30 seconds from
   1 minute to avoid idleConn build up

Fixes #8773
2020-01-10 02:35:06 -08:00
Harshavardhana
0a70bc24ac Disallow only policy statements which are exactly same (#8785) 2020-01-09 19:29:57 -08:00
Kevin Humphreys
656146b699 doc: Prometheus metrics name fix (#8774)
changed docs to reflect proper Prometheus metrics
2020-01-09 18:36:58 -08:00
Harshavardhana
5e40b9a563 fix: docs for live/ready check implementation details 2020-01-09 18:29:24 -08:00
Joe Adams
89d1221217 Fix typo in prometheus monitoring docs (#8780) 2020-01-09 09:08:41 -08:00
Praveen raj Mani
4cd1bbb50a This PR fixes two things (#8772)
- Stop spawning store replay routines when testing the notification targets
- Properly honor the target.Close() to clean the resources used

Fixes #8707

Co-authored-by: Harshavardhana <harsha@minio.io>
2020-01-09 19:45:44 +05:30
Harshavardhana
c2cde6beb5 policy: Allow duplicate statements with different effects (#8775)
This allows "Allow" and "Deny" conflicting statements,
where we evaluate to implicit "Deny".
2020-01-08 23:00:54 -08:00
Harshavardhana
abc1c1070a Add custom policy claim name (#8764)
In certain organizations policy claim names
can be not just 'policy' but also things like
'roles', the value of this field might also
be *string* or *[]string* support this as well

In this PR we are still not supporting multiple
policies per STS account which will require a
more comprehensive change.
2020-01-08 17:21:58 -08:00
poornas
fd56aa42a6 Fix error message wording for PutObjectLockConfig (#8759)
Co-Authored-By: kannappanr <30541348+kannappanr@users.noreply.github.com>
Co-authored-by: Harshavardhana <harsha@minio.io>
2020-01-08 15:36:23 -08:00
Klaus Post
3d318bae76 init: Use constant time retries (#8769)
Exponential backoff does not seem like a good fit for
this function since we can expect a few roundtrips on
initial startup.

This retry loop get slow pretty quickly with initial
wait being 1 second and each try being double the
wait until 30 seconds is reached.

Instead simply try 2 times per second.
2020-01-08 13:37:34 -08:00
Harshavardhana
aa2e89bfe3 Use jsoniter whenever applicable instead of encoding/json (#8766)
This PR adds jsoniter package to replace encoding/json
in places where faster json unmarshal is necessary
whenever input JSON is large enough.

Some benchmarking comparison between jsoniter and enconding/json

benchmark                            old MB/s     new MB/s     speedup
BenchmarkParseUnmarshal/N10-4        110.02       331.17       3.01x
BenchmarkParseUnmarshal/N100-4       125.74       524.09       4.17x
BenchmarkParseUnmarshal/N500-4       131.68       542.60       4.12x
BenchmarkParseUnmarshal/N1000-4      133.93       514.88       3.84x
BenchmarkParseUnmarshal/N5000-4      122.10       415.36       3.40x
BenchmarkParseUnmarshal/N10000-4     132.13       403.90       3.06x
2020-01-08 17:01:42 +05:30
Harshavardhana
60813bef29 Allow proper setCount SLAs across zones (#8752)
Fixes scenario where zones are appropriately
handled, along with supporting overriding set
count. The new fix also ensures that we handle
the various setup types properly.

Update documentation to properly indicate the
behavior.

Fixes #8750

Co-authored-by: Nitish Tiwari <nitish@minio.io>
2020-01-07 09:13:44 -08:00
Harshavardhana
b123be5612 fix: browser should listBuckets from etcd in global federation (#8760) 2020-01-07 09:03:00 +05:30
Harshavardhana
933c60bc3a Add crypto context errors (#8740)
Currently when connections to vault fail, client
perpetually retries this leads to assumptions that
the server has issues and masks the problem.

Re-purpose *crypto.Error* type to send appropriate
errors back to the client.
2020-01-06 16:15:22 -08:00
ebozduman
796cca4166 Creates zipped files with correct mod times for objects (#8693) 2020-01-06 12:43:00 -08:00
Klaus Post
fe379f9428 Copy metadata on update (#8755)
Fixes #8706

Co-authored-by: Harshavardhana <harsha@minio.io>
2020-01-06 10:15:44 -08:00
Harshavardhana
ae0b165431 fix: --anonymous flag shouldn't print any keys (#8753)
Fixes #8744
2020-01-06 22:12:47 +05:30
Harshavardhana
7a06e158f1 Add docs for sysctl tuning parameters (#8749) 2020-01-06 00:30:04 -08:00
Harshavardhana
5eab3db344 Fix doc reference for prometheus (#8748) 2020-01-05 13:44:39 -08:00
Brian Candler
9f44fcd540 Clarify behaviour of erasure coding sets (#8745) 2020-01-05 13:00:11 -08:00
Harshavardhana
b9b2b37366 Fix SECURITY.md format and newlines 2020-01-03 17:49:34 -08:00
George Xie
7f31d933a8 fixes some typos, for CREDITS change (#8743) 2020-01-03 17:49:01 -08:00
Harshavardhana
6695fd6a61 Add more context aware error for policy parsing errors (#8726)
In existing functionality we simply return a generic
error such as "MalformedPolicy" which indicates just
a generic string "invalid resource" which is not very
meaningful when there might be multiple types of errors
during policy parsing. This PR ensures that we send
these errors back to client to indicate the actual
error, brings in two concrete types such as

 - iampolicy.Error
 - policy.Error

Refer #8202
2020-01-03 11:28:52 -08:00
Minio Trusted
84e55e2e6f Update yaml files to latest version RELEASE.2020-01-03T19-12-21Z 2020-01-03 19:20:28 +00:00
Harshavardhana
b00cda8ad4 Avoid running lock maintenance from all nodes (#8737)
Co-Authored-By: Krishnan Parthasarathi <krisis@users.noreply.github.com>
2020-01-03 23:11:07 +05:30
Anis Elleuch
d861edfc00 xl: Print the correct err msg when access to the backend is forbidden (#8735)
minio server /data{1..4} shows an error about inability to bind a port, though
the real problem is /data{1..4} cannot be created because of the lack of
permissions.

This commit fix the behavior.
2020-01-03 21:15:26 +05:30
Harshavardhana
dd311623df Update design doc with zone implementation details (#8738)
Fixes #8719
2020-01-02 16:46:16 -08:00
Harshavardhana
cb935980a5 Fix version to be release-tag (#8730) 2020-01-02 20:18:32 +05:30
Praveen raj Mani
157721f694 Fix readiness to return 200 for read-only mode (#8728)
- We should declare a cluster ready even if read quorum is achieved (atleast n/2 disks are online).
- Such that, all the zones should have enough read quorum. Thus making the cluster ready for reads.
2020-01-02 05:05:01 -08:00
Nitish Tiwari
97d799b9f0 doc: Update Kubernetes examples to use Readiness Checks (#8727) 2020-01-01 08:25:09 -08:00
Harshavardhana
0b7bd024fb Fix dependencies graph for minio source compilation (#8717)
We had messy cyclical dependency problem with `mc`
due to dependencies in pkg/console, moved the pkg/console
to minio for more control and also to avoid any further
cyclical dependencies of `mc` clobbering up the
dependencies on server.

Fixes #8659
2019-12-31 09:36:13 +05:30
Harshavardhana
3af70b36fd Disallow creating buckets even with different domains (#8716)
If two distinct clusters are started with different domains
along with single common domain, this situation was leading
to conflicting buckets getting created on different clusters

To avoid this do not prematurely error out if the key has no
entries, let the caller decide on which entry matches and
which entry is valid. This allows support for MINIO_DOMAIN
with one common domain, but each cluster may have their own
domains.

Fixes #8705
2019-12-30 17:11:47 -08:00
Minio Trusted
8eba97da74 Update yaml files to latest version RELEASE.2019-12-30T05-45-39Z 2019-12-30 05:56:46 +00:00
660 changed files with 39060 additions and 23849 deletions

8
.github/ISSUE_TEMPLATE/config.yml vendored Normal file
View File

@@ -0,0 +1,8 @@
blank_issues_enabled: false
contact_links:
- name: MinIO Community Support
url: https://slack.min.io
about: Please ask and answer questions here.
- name: MinIO SUBNET Support
url: https://min.io/pricing
about: Join this for Enterprise Support.

39
.github/lock.yml vendored Normal file
View File

@@ -0,0 +1,39 @@
# Configuration for Lock Threads - https://github.com/dessant/lock-threads-app
# Number of days of inactivity before a closed issue or pull request is locked
daysUntilLock: 365
# Skip issues and pull requests created before a given timestamp. Timestamp must
# follow ISO 8601 (`YYYY-MM-DD`). Set to `false` to disable
skipCreatedBefore: false
# Issues and pull requests with these labels will be ignored. Set to `[]` to disable
exemptLabels: []
# Label to add before locking, such as `outdated`. Set to `false` to disable
lockLabel: false
# Comment to post before locking. Set to `false` to disable
lockComment: >-
This thread has been automatically locked since there has not been
any recent activity after it was closed. Please open a new issue for
related bugs.
# Assign `resolved` as the reason for locking. Set to `false` to disable
setLockReason: true
# Limit to only `issues` or `pulls`
only: issues
# Optionally, specify configuration settings just for `issues` or `pulls`
# issues:
# exemptLabels:
# - help-wanted
# lockLabel: outdated
# pulls:
# daysUntilLock: 30
# Repository to extend settings from
# _extends: repo

59
.github/stale.yml vendored Normal file
View File

@@ -0,0 +1,59 @@
# Configuration for probot-stale - https://github.com/probot/stale
# Number of days of inactivity before an Issue or Pull Request becomes stale
daysUntilStale: 90
# Number of days of inactivity before an Issue or Pull Request with the stale label is closed.
# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale.
daysUntilClose: 30
# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled)
onlyLabels: []
# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable
exemptLabels:
- "security"
- "pending discussion"
# Set to true to ignore issues in a project (defaults to false)
exemptProjects: false
# Set to true to ignore issues in a milestone (defaults to false)
exemptMilestones: false
# Set to true to ignore issues with an assignee (defaults to false)
exemptAssignees: false
# Label to use when marking as stale
staleLabel: stale
# Comment to post when marking as stale. Set to `false` to disable
markComment: >-
This issue has been automatically marked as stale because it has not had
recent activity. It will be closed after 21 days if no further activity
occurs. Thank you for your contributions.
# Comment to post when removing the stale label.
# unmarkComment: >
# Your comment here.
# Comment to post when closing a stale Issue or Pull Request.
# closeComment: >
# Your comment here.
# Limit the number of actions per hour, from 1-30. Default is 30
limitPerRun: 1
# Limit to only `issues` or `pulls`
# only: issues
# Optionally, specify configuration settings that are specific to just 'issues' or 'pulls':
# pulls:
# daysUntilStale: 30
# markComment: >
# This pull request has been automatically marked as stale because it has not had
# recent activity. It will be closed if no further activity occurs. Thank you
# for your contributions.
# issues:
# exemptLabels:
# - confirmed

52
.github/workflows/go.yml vendored Normal file
View File

@@ -0,0 +1,52 @@
name: Go
on:
pull_request:
branches:
- master
push:
branches:
- master
jobs:
build:
name: Test on Go ${{ matrix.go-version }} and ${{ matrix.os }}
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.13.x]
os: [ubuntu-latest, windows-latest]
steps:
- uses: actions/checkout@v2
- uses: actions/setup-node@v1
with:
node-version: '12'
- uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go-version }}
- name: Build on ${{ matrix.os }}
if: matrix.os == 'windows-latest'
env:
CGO_ENABLED: 0
GO111MODULE: on
SIMPLE_CI: 1
run: |
go build --ldflags="-s -w" -o %GOPATH%\bin\minio.exe
go test -v --timeout 30m ./...
- name: Build on ${{ matrix.os }}
if: matrix.os == 'ubuntu-latest'
env:
CGO_ENABLED: 0
GO111MODULE: on
SIMPLE_CI: 1
run: |
sudo apt-get install devscripts shellcheck
make
diff -au <(gofmt -s -d cmd) <(printf "")
diff -au <(gofmt -s -d pkg) <(printf "")
make test-race
make crosscompile
make verify
make verify-healing
cd browser && npm install && npm run test && cd ..
bash -c 'shopt -s globstar; shellcheck mint/**/*.sh'

View File

@@ -1,4 +0,0 @@
{
"asi": true,
"esnext": true
}

View File

@@ -1,4 +0,0 @@
{
"numFilesToCheck": 10,
"requiredOrgs": ["minio"]
}

View File

@@ -1,59 +0,0 @@
go_import_path: github.com/minio/minio
language: go
addons:
apt:
packages:
- shellcheck
services:
- docker
# this ensures PRs based on a local branch are not built twice
# the downside is that a PR targeting a different branch is not built
# but as a workaround you can add the branch to this list
branches:
only:
- master
matrix:
include:
- os: linux
dist: trusty
sudo: required
env:
- ARCH=x86_64
- CGO_ENABLED=0
- GO111MODULE=on
- SIMPLE_CI=1
go: 1.13.x
script:
- make
- diff -au <(gofmt -s -d cmd) <(printf "")
- diff -au <(gofmt -s -d pkg) <(printf "")
- for d in $(go list ./... | grep -v browser); do CGO_ENABLED=1 go test -v -race --timeout 20m "$d"; done
- make verifiers
- make crosscompile
- make verify
- cd browser && npm install && npm run test && cd ..
- bash -c 'shopt -s globstar; shellcheck mint/**/*.sh'
- os: windows
env:
- ARCH=x86_64
- CGO_ENABLED=0
- GO111MODULE=on
- SIMPLE_CI=1
go: 1.13.x
script:
- go build --ldflags="$(go run buildscripts/gen-ldflags.go)" -o %GOPATH%\bin\minio.exe
- for d in $(go list ./... | grep -v browser); do go test -v --timeout 20m "$d"; done
before_script:
# Add an IPv6 config - see the corresponding Travis issue
# https://github.com/travis-ci/travis-ci/issues/8361
- if [[ "${TRAVIS_OS_NAME}" == "linux" ]]; then sudo sh -c 'echo 0 > /proc/sys/net/ipv6/conf/all/disable_ipv6'; fi
before_install:
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then nvm install stable ; fi

View File

@@ -1,4 +1,4 @@
# MinIO Contribution Guide [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Go Report Card](https://goreportcard.com/badge/minio/minio)](https://goreportcard.com/report/minio/minio) [![Docker Pulls](https://img.shields.io/docker/pulls/minio/minio.svg?maxAge=604800)](https://hub.docker.com/r/minio/minio/)
# MinIO Contribution Guide [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Docker Pulls](https://img.shields.io/docker/pulls/minio/minio.svg?maxAge=604800)](https://hub.docker.com/r/minio/minio/)
``MinIO`` community welcomes your contribution. To make the process as seamless as possible, we recommend you read this contribution guide.

5544
CREDITS

File diff suppressed because it is too large Load Diff

View File

@@ -5,24 +5,25 @@ LABEL maintainer="MinIO Inc <dev@min.io>"
ENV GOPATH /go
ENV CGO_ENABLED 0
ENV GO111MODULE on
ENV GOPROXY https://proxy.golang.org
RUN \
apk add --no-cache git && \
git clone https://github.com/minio/minio && cd minio && \
go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)"
FROM alpine:3.9
FROM alpine:3.10
ENV MINIO_UPDATE off
ENV MINIO_ACCESS_KEY_FILE=access_key \
MINIO_SECRET_KEY_FILE=secret_key \
MINIO_KMS_MASTER_KEY_FILE=kms_master_key
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
EXPOSE 9000
COPY --from=0 /go/bin/minio /usr/bin/minio
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
COPY --from=0 /go/minio/CREDITS /third_party/
COPY --from=0 /go/minio/dockerscripts/docker-entrypoint.sh /usr/bin/
RUN \
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \

41
Dockerfile.arm.release Normal file
View File

@@ -0,0 +1,41 @@
FROM golang:1.13-alpine as builder
WORKDIR /home
ENV GOPATH /go
ENV CGO_ENABLED 0
ENV GO111MODULE on
RUN \
apk add --no-cache git 'curl>7.61.0' && \
git clone https://github.com/minio/minio && \
curl -L https://github.com/balena-io/qemu/releases/download/v3.0.0%2Bresin/qemu-3.0.0+resin-arm.tar.gz | tar zxvf - -C . && mv qemu-3.0.0+resin-arm/qemu-arm-static .
FROM arm32v7/alpine:3.10
LABEL maintainer="MinIO Inc <dev@min.io>"
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
COPY CREDITS /third_party/
COPY --from=builder /home/qemu-arm-static /usr/bin/qemu-arm-static
ENV MINIO_UPDATE off
ENV MINIO_ACCESS_KEY_FILE=access_key \
MINIO_SECRET_KEY_FILE=secret_key \
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
RUN \
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
curl https://dl.min.io/server/minio/release/linux-arm/minio > /usr/bin/minio && \
chmod +x /usr/bin/minio && \
chmod +x /usr/bin/docker-entrypoint.sh
EXPOSE 9000
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
VOLUME ["/data"]
CMD ["minio"]

41
Dockerfile.arm64.release Normal file
View File

@@ -0,0 +1,41 @@
FROM golang:1.13-alpine as builder
WORKDIR /home
ENV GOPATH /go
ENV CGO_ENABLED 0
ENV GO111MODULE on
RUN \
apk add --no-cache git 'curl>7.61.0' && \
git clone https://github.com/minio/minio && \
curl -L https://github.com/balena-io/qemu/releases/download/v3.0.0%2Bresin/qemu-3.0.0+resin-arm.tar.gz | tar zxvf - -C . && mv qemu-3.0.0+resin-arm/qemu-arm-static .
FROM arm64v8/alpine:3.10
LABEL maintainer="MinIO Inc <dev@min.io>"
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
COPY CREDITS /third_party/
COPY --from=builder /home/qemu-arm-static /usr/bin/qemu-arm-static
ENV MINIO_UPDATE off
ENV MINIO_ACCESS_KEY_FILE=access_key \
MINIO_SECRET_KEY_FILE=secret_key \
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
RUN \
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
curl https://dl.min.io/server/minio/release/linux-arm64/minio > /usr/bin/minio && \
chmod +x /usr/bin/minio && \
chmod +x /usr/bin/docker-entrypoint.sh
EXPOSE 9000
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
VOLUME ["/data"]
CMD ["minio"]

View File

@@ -1,14 +1,16 @@
FROM alpine:3.9
FROM alpine:3.10
LABEL maintainer="MinIO Inc <dev@min.io>"
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
COPY minio /usr/bin/
COPY CREDITS /third_party/
ENV MINIO_UPDATE off
ENV MINIO_ACCESS_KEY_FILE=access_key \
MINIO_SECRET_KEY_FILE=secret_key \
MINIO_KMS_MASTER_KEY_FILE=kms_master_key
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
RUN \
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \

View File

@@ -1,17 +1,11 @@
FROM ubuntu:16.04
FROM ubuntu:18.04
ENV DEBIAN_FRONTEND noninteractive
ENV LANG C.UTF-8
ENV GOROOT /usr/local/go
ENV GOPATH /usr/local
ENV GOPATH /usr/local/gopath
ENV PATH $GOPATH/bin:$GOROOT/bin:$PATH
ENV MINT_ROOT_DIR /mint
COPY mint /mint
RUN apt-get --yes update && apt-get --yes upgrade && \

View File

@@ -8,16 +8,18 @@ RUN \
apk add --no-cache git && \
git clone https://github.com/minio/minio
FROM alpine:3.9
FROM alpine:3.10
LABEL maintainer="MinIO Inc <dev@min.io>"
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
COPY CREDITS /third_party/
ENV MINIO_UPDATE off
ENV MINIO_ACCESS_KEY_FILE=access_key \
MINIO_SECRET_KEY_FILE=secret_key \
MINIO_KMS_MASTER_KEY_FILE=kms_master_key
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
RUN \
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \

View File

@@ -34,11 +34,13 @@ USER ci
RUN make
RUN bash -c 'diff -au <(gofmt -s -d cmd) <(printf "")'
RUN bash -c 'diff -au <(gofmt -s -d pkg) <(printf "")'
RUN for d in $(go list ./... | grep -v browser); do go test -v -race --timeout 20m "$d"; done
RUN make verifiers
RUN make test-race
RUN make crosscompile
RUN make verify
## -- add healing tests
RUN make verify-healing
#-------------------------------------------------------------
# Stage 2: Test Frontend
#-------------------------------------------------------------
@@ -55,7 +57,7 @@ RUN yarn test
#-------------------------------------------------------------
# Stage 3: Run Gateway Tests
#-------------------------------------------------------------
FROM ubuntu:16.04
FROM ubuntu:18.04
COPY --from=0 /go/src/github.com/minio/minio/minio /usr/bin/minio
COPY buildscripts/gateway-tests.sh /usr/bin/gateway-tests.sh
@@ -64,7 +66,7 @@ COPY mint /mint
ENV DEBIAN_FRONTEND noninteractive
ENV LANG C.UTF-8
ENV GOROOT /usr/local/go
ENV GOPATH /usr/local
ENV GOPATH /usr/local/gopath
ENV PATH $GOPATH/bin:$GOROOT/bin:$PATH
ENV SIMPLE_CI 1
ENV MINT_ROOT_DIR /mint

View File

@@ -22,7 +22,7 @@ getdeps:
ifeq ($(GOARCH),s390x)
@which staticcheck 1>/dev/null || (echo "Installing staticcheck" && GO111MODULE=off go get honnef.co/go/tools/cmd/staticcheck)
else
@which staticcheck 1>/dev/null || (echo "Installing staticcheck" && wget --quiet https://github.com/dominikh/go-tools/releases/download/2019.2.3/staticcheck_${GOOS}_${GOARCH}.tar.gz && tar xf staticcheck_${GOOS}_${GOARCH}.tar.gz && mv staticcheck/staticcheck ${GOPATH}/bin/staticcheck && chmod +x ${GOPATH}/bin/staticcheck && rm -f staticcheck_${GOOS}_${GOARCH}.tar.gz && rm -rf staticcheck)
@which staticcheck 1>/dev/null || (echo "Installing staticcheck" && wget --quiet https://github.com/dominikh/go-tools/releases/download/2020.1.3/staticcheck_${GOOS}_${GOARCH}.tar.gz && tar xf staticcheck_${GOOS}_${GOARCH}.tar.gz && mv staticcheck/staticcheck ${GOPATH}/bin/staticcheck && chmod +x ${GOPATH}/bin/staticcheck && rm -f staticcheck_${GOOS}_${GOARCH}.tar.gz && rm -rf staticcheck)
endif
@which misspell 1>/dev/null || (echo "Installing misspell" && GO111MODULE=off go get -u github.com/client9/misspell/cmd/misspell)
@@ -64,20 +64,26 @@ test: verifiers build
@echo "Running unit tests"
@GO111MODULE=on CGO_ENABLED=0 go test -tags kqueue ./... 1>/dev/null
test-race: verifiers build
@echo "Running unit tests under -race"
@(env bash $(PWD)/buildscripts/race.sh)
# Verify minio binary
verify:
@echo "Verifying build with race"
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
@(env bash $(PWD)/buildscripts/verify-build.sh)
coverage: build
@echo "Running all coverage for minio"
@(env bash $(PWD)/buildscripts/go-coverage.sh)
# Verify healing of disks with minio binary
verify-healing:
@echo "Verify healing build with race"
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
@(env bash $(PWD)/buildscripts/verify-healing.sh)
# Builds minio locally.
build: checks
@echo "Building minio binary to './minio'"
@GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
@GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
docker: build
@docker build -t $(TAG) . -f Dockerfile.dev
@@ -95,3 +101,4 @@ clean:
@rm -rvf minio
@rm -rvf build
@rm -rvf release
@rm -rvf .verify*

4
NOTICE
View File

@@ -1,4 +1,4 @@
MinIO Cloud Storage, (C) 2014,2015 MinIO, Inc.
MinIO Cloud Storage, (C) 2014-2020 MinIO, Inc.
This product includes software developed at MinIO, Inc.
(https://min.io/).
@@ -6,4 +6,4 @@ This product includes software developed at MinIO, Inc.
The MinIO project contains unmodified/modified subcomponents too with
separate copyright notices and license terms. Your use of the source
code for these subcomponents is subject to the terms and conditions
of the following licenses.
of Apache License Version 2.0

View File

@@ -1,5 +1,5 @@
# MinIO Quickstart Guide
[![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Go Report Card](https://goreportcard.com/badge/minio/minio)](https://goreportcard.com/report/minio/minio) [![Docker Pulls](https://img.shields.io/docker/pulls/minio/minio.svg?maxAge=604800)](https://hub.docker.com/r/minio/minio/)
[![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Docker Pulls](https://img.shields.io/docker/pulls/minio/minio.svg?maxAge=604800)](https://hub.docker.com/r/minio/minio/)
[![MinIO](https://raw.githubusercontent.com/minio/minio/master/.github/logo.svg?sanitize=true)](https://min.io)
@@ -21,7 +21,7 @@ docker run -p 9000:9000 minio/minio:edge server /data
> NOTE: Docker will not display the default keys unless you start the container with the `-it`(interactive TTY) argument. Generally, it is not recommended to use default keys with containers. Please visit MinIO Docker quickstart guide for more information [here](https://docs.min.io/docs/minio-docker-quickstart-guide)
## macOS
### Homebrew
### Homebrew (recommended)
Install minio packages using [Homebrew](http://brew.sh/)
```sh
brew install minio/stable/minio
@@ -74,7 +74,7 @@ minio.exe server D:\Photos
## FreeBSD
### Port
Install minio packages using [pkg](https://github.com/freebsd/pkg)
Install minio packages using [pkg](https://github.com/freebsd/pkg), MinIO doesn't officially build FreeBSD binaries but is maintained by FreeBSD upstream [here](https://www.freshports.org/www/minio).
```sh
pkg install minio
@@ -185,11 +185,5 @@ mc admin update <minio alias, e.g., myminio>
## Contribute to MinIO Project
Please follow MinIO [Contributor's Guide](https://github.com/minio/minio/blob/master/CONTRIBUTING.md)
## Caveats
MinIO in its default mode doesn't use MD5Sum checkums of incoming streams unless requested by the client in `Content-Md5` header for validation. This may lead to incompatibility with rare S3 clients like `s3ql` which unfortunately do not set `Content-Md5` but depend on hex MD5Sum for the stream to be calculated by the server. MinIO considers this as a bug in `s3ql` and should be fixed on the client side because MD5Sum is a poor way to checksum and validate the authenticity of the objects. Although MinIO provides a workaround until client applications are fixed use `--compat` option instead to start the server.
```sh
./minio --compat server /data
```
## License
[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fminio%2Fminio.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fminio%2Fminio?ref=badge_large)

View File

@@ -1,4 +1,4 @@
# MinIO Quickstart Guide [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Go Report Card](https://goreportcard.com/badge/minio/minio)](https://goreportcard.com/report/minio/minio) [![Docker Pulls](https://img.shields.io/docker/pulls/minio/minio.svg?maxAge=604800)](https://hub.docker.com/r/minio/minio/)
# MinIO Quickstart Guide [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Docker Pulls](https://img.shields.io/docker/pulls/minio/minio.svg?maxAge=604800)](https://hub.docker.com/r/minio/minio/)
MinIO 是一个基于Apache License v2.0开源协议的对象存储服务。它兼容亚马逊S3云存储服务接口非常适合于存储大容量非结构化的数据例如图片、视频、日志文件、备份数据和容器/虚拟机镜像等而一个对象文件可以是任意大小从几kb到最大5T不等。

View File

@@ -8,12 +8,12 @@ Whenever there is a security update you just need to upgrade to the latest versi
## Reporting a Vulnerability
All security bugs in [minio/minio](https://github,com/minio/minio) (or other minio/* repositories)
should be reported by email to security@min.io. Your email will be acknowledged within 48 hours,
and you'll receive a more detailed response to your email within 72 hours indicating the next steps
in handling your report.
should be reported by email to security@min.io. Your email will be acknowledged within 48 hours,
and you'll receive a more detailed response to your email within 72 hours indicating the next steps
in handling your report.
Please, provide a detailed explanation of the issue. In particular, outline the type of the security
issue (DoS, authentication bypass, information disclouse, ...) and the assumptions you're making (e.g. do
Please, provide a detailed explanation of the issue. In particular, outline the type of the security
issue (DoS, authentication bypass, information disclose, ...) and the assumptions you're making (e.g. do
you need access credentials for a successful exploit).
If you have not received a reply to your email within 48 hours or you have not heard from the security team
@@ -28,14 +28,14 @@ MinIO uses the following disclosure process:
1. Once the security report is received one member of the security team tries to verify and reproduce
the issue and determines the impact it has.
2. A member of the security team will respond and either confrim or reject the security report.
If the report is rejected the repsonse explains why.
2. A member of the security team will respond and either confirm or reject the security report.
If the report is rejected the response explains why.
3. Code is audited to find any potential similar problems.
4. Fixes are prepared for the latest release.
4. Fixes are prepared for the latest release.
5. On the date that the fixes are applied a security advisory will be published on https://blog.min.io.
Please inform us in your report email whether MinIO should mention your contribution w.r.t. fixing
the security issue. By default MinIO will **not** publish this information to protect your privacy.
This process can take some time, especially when coordination is required with maintainers of other projects.
Every effort will be made to handle the bug in as timely a manner as possible, however it's important that we
follow the process described above to ensure that disclosures are handled consistently.
Every effort will be made to handle the bug in as timely a manner as possible, however it's important that we
follow the process described above to ensure that disclosures are handled consistently.

View File

@@ -88,11 +88,6 @@ export class ChangePasswordModal extends React.Component {
canChangePassword() {
const { serverInfo } = this.props
// Password change is not allowed in WORM mode
if (serverInfo.info.isWorm) {
return false
}
// Password change is not allowed for temporary users(STS)
if(serverInfo.userInfo.isTempUser) {
return false

View File

@@ -148,7 +148,8 @@ export class Login extends React.Component {
<OpenIDLoginButton
className="btn openid-btn"
clientId={this.state.clientId}
authorizationEndpoint={this.state.discoveryDoc.authorization_endpoint}
authEp={this.state.discoveryDoc.authorization_endpoint}
authScopes={this.state.discoveryDoc.scopes_supported}
>
Log in with OpenID
</OpenIDLoginButton>

View File

@@ -66,6 +66,7 @@ export class OpenIDLogin extends React.Component {
const authURL = buildOpenIDAuthURL(
this.state.discoveryDoc.authorization_endpoint,
this.state.discoveryDoc.scopes_supported,
redirectURI,
this.state.clientID,
nonce

View File

@@ -27,7 +27,7 @@ export class OpenIDLoginButton extends React.Component {
handleClick(event) {
event.stopPropagation()
const { authorizationEndpoint, clientId } = this.props
const { authEp, authScopes, clientId } = this.props
let redirectURI = window.location.href.split("#")[0]
if (redirectURI.endsWith('/')) {
@@ -40,7 +40,7 @@ export class OpenIDLoginButton extends React.Component {
const nonce = getRandomString(16)
storage.setItem(OPEN_ID_NONCE_KEY, nonce)
const authURL = buildOpenIDAuthURL(authorizationEndpoint, redirectURI, clientId, nonce)
const authURL = buildOpenIDAuthURL(authEp, authScopes, redirectURI, clientId, nonce)
window.location = authURL
}

View File

@@ -64,17 +64,6 @@ describe("ChangePasswordModal", () => {
shallow(<ChangePasswordModal serverInfo={serverInfo} />)
})
it("should not allow changing password when isWorm is true", () => {
const newServerInfo = { ...serverInfo, info: { isWorm: true } }
const wrapper = shallow(<ChangePasswordModal serverInfo={newServerInfo} />)
expect(
wrapper
.find("ModalBody")
.childAt(0)
.text()
).toBe("Credentials of this user cannot be updated through MinIO Browser.")
})
it("should not allow changing password when not IAM user", () => {
const newServerInfo = {
...serverInfo,

View File

@@ -0,0 +1,24 @@
/*
* MinIO Cloud Storage (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { createSelector } from "reselect"
export const getServerInfo = state => state.browser.serverInfo
export const hasServerPublicDomain = createSelector(
getServerInfo,
serverInfo => Boolean(serverInfo.info && serverInfo.info.domains && serverInfo.info.domains.length),
)

View File

@@ -16,14 +16,13 @@
export const OPEN_ID_NONCE_KEY = 'openIDKey'
export const buildOpenIDAuthURL = (authorizationEndpoint, redirectURI, clientID, nonce) => {
export const buildOpenIDAuthURL = (authEp, authScopes, redirectURI, clientID, nonce) => {
const params = new URLSearchParams()
params.set("response_type", "id_token")
params.set("scope", "openid")
params.set("scope", authScopes.join(" "))
params.set("client_id", clientID)
params.set("redirect_uri", redirectURI)
params.set("nonce", nonce)
return `${authorizationEndpoint}?${params.toString()}`
return `${authEp}?${params.toString()}`
}

View File

@@ -14,30 +14,67 @@
* limitations under the License.
*/
import mimedb from 'mime-types'
import mimedb from "mime-types"
const isFolder = (name, contentType) => {
if (name.endsWith('/')) return true
if (name.endsWith("/")) return true
return false
}
const isPdf = (name, contentType) => {
if (contentType === 'application/pdf') return true
if (contentType === "application/pdf") return true
return false
}
const isImage = (name, contentType) => {
if (
contentType === "image/jpeg" ||
contentType === "image/gif" ||
contentType === "image/x-icon" ||
contentType === "image/png" ||
contentType === "image/svg+xml" ||
contentType === "image/tiff" ||
contentType === "image/webp"
)
return true
return false
}
const isZip = (name, contentType) => {
if (!contentType || !contentType.includes('/')) return false
if (contentType.split('/')[1].includes('zip')) return true
if (!contentType || !contentType.includes("/")) return false
if (contentType.split("/")[1].includes("zip")) return true
return false
}
const isCode = (name, contentType) => {
const codeExt = ['c', 'cpp', 'go', 'py', 'java', 'rb', 'js', 'pl', 'fs',
'php', 'css', 'less', 'scss', 'coffee', 'net', 'html',
'rs', 'exs', 'scala', 'hs', 'clj', 'el', 'scm', 'lisp',
'asp', 'aspx']
const ext = name.split('.').reverse()[0]
const codeExt = [
"c",
"cpp",
"go",
"py",
"java",
"rb",
"js",
"pl",
"fs",
"php",
"css",
"less",
"scss",
"coffee",
"net",
"html",
"rs",
"exs",
"scala",
"hs",
"clj",
"el",
"scm",
"lisp",
"asp",
"aspx",
]
const ext = name.split(".").reverse()[0]
for (var i in codeExt) {
if (ext === codeExt[i]) return true
}
@@ -45,9 +82,9 @@ const isCode = (name, contentType) => {
}
const isExcel = (name, contentType) => {
if (!contentType || !contentType.includes('/')) return false
const types = ['excel', 'spreadsheet']
const subType = contentType.split('/')[1]
if (!contentType || !contentType.includes("/")) return false
const types = ["excel", "spreadsheet"]
const subType = contentType.split("/")[1]
for (var i in types) {
if (subType.includes(types[i])) return true
}
@@ -55,9 +92,9 @@ const isExcel = (name, contentType) => {
}
const isDoc = (name, contentType) => {
if (!contentType || !contentType.includes('/')) return false
const types = ['word', '.document']
const subType = contentType.split('/')[1]
if (!contentType || !contentType.includes("/")) return false
const types = ["word", ".document"]
const subType = contentType.split("/")[1]
for (var i in types) {
if (subType.includes(types[i])) return true
}
@@ -65,9 +102,9 @@ const isDoc = (name, contentType) => {
}
const isPresentation = (name, contentType) => {
if (!contentType || !contentType.includes('/')) return false
var types = ['powerpoint', 'presentation']
const subType = contentType.split('/')[1]
if (!contentType || !contentType.includes("/")) return false
var types = ["powerpoint", "presentation"]
const subType = contentType.split("/")[1]
for (var i in types) {
if (subType.includes(types[i])) return true
}
@@ -76,31 +113,32 @@ const isPresentation = (name, contentType) => {
const typeToIcon = (type) => {
return (name, contentType) => {
if (!contentType || !contentType.includes('/')) return false
if (contentType.split('/')[0] === type) return true
if (!contentType || !contentType.includes("/")) return false
if (contentType.split("/")[0] === type) return true
return false
}
}
export const getDataType = (name, contentType) => {
if (contentType === "") {
contentType = mimedb.lookup(name) || 'application/octet-stream'
contentType = mimedb.lookup(name) || "application/octet-stream"
}
const check = [
['folder', isFolder],
['code', isCode],
['audio', typeToIcon('audio')],
['image', typeToIcon('image')],
['video', typeToIcon('video')],
['text', typeToIcon('text')],
['pdf', isPdf],
['zip', isZip],
['excel', isExcel],
['doc', isDoc],
['presentation', isPresentation]
["folder", isFolder],
["code", isCode],
["audio", typeToIcon("audio")],
["image", typeToIcon("image")],
["video", typeToIcon("video")],
["text", typeToIcon("text")],
["pdf", isPdf],
["image", isImage],
["zip", isZip],
["excel", isExcel],
["doc", isDoc],
["presentation", isPresentation],
]
for (var i in check) {
if (check[i][1](name, contentType)) return check[i][0]
}
return 'other'
return "other"
}

View File

@@ -19,18 +19,22 @@ import { connect } from "react-redux"
import { Dropdown } from "react-bootstrap"
import ShareObjectModal from "./ShareObjectModal"
import DeleteObjectConfirmModal from "./DeleteObjectConfirmModal"
import PreviewObjectModal from "./PreviewObjectModal"
import * as objectsActions from "./actions"
import { getDataType } from "../mime.js"
import {
SHARE_OBJECT_EXPIRY_DAYS,
SHARE_OBJECT_EXPIRY_HOURS,
SHARE_OBJECT_EXPIRY_MINUTES
SHARE_OBJECT_EXPIRY_MINUTES,
} from "../constants"
export class ObjectActions extends React.Component {
constructor(props) {
super(props)
this.state = {
showDeleteConfirmation: false
showDeleteConfirmation: false,
showPreview: false,
}
}
shareObject(e) {
@@ -53,7 +57,20 @@ export class ObjectActions extends React.Component {
}
hideDeleteConfirmModal() {
this.setState({
showDeleteConfirmation: false
showDeleteConfirmation: false,
})
}
getObjectURL(objectname, callback) {
const { getObjectURL } = this.props
getObjectURL(objectname, callback)
}
showPreviewModal(e) {
e.preventDefault()
this.setState({ showPreview: true })
}
hidePreviewModal() {
this.setState({
showPreview: false,
})
}
render() {
@@ -69,6 +86,15 @@ export class ObjectActions extends React.Component {
>
<i className="fas fa-share-alt" />
</a>
{getDataType(object.name, object.contentType) == "image" && (
<a
href=""
className="fiad-action"
onClick={this.showPreviewModal.bind(this)}
>
<i className="far fa-file-image" />
</a>
)}
<a
href=""
className="fiad-action"
@@ -77,14 +103,22 @@ export class ObjectActions extends React.Component {
<i className="fas fa-trash-alt" />
</a>
</Dropdown.Menu>
{(showShareObjectModal && shareObjectName === object.name) &&
<ShareObjectModal object={object} />}
{showShareObjectModal && shareObjectName === object.name && (
<ShareObjectModal object={object} />
)}
{this.state.showDeleteConfirmation && (
<DeleteObjectConfirmModal
deleteObject={this.deleteObject.bind(this)}
hideDeleteConfirmModal={this.hideDeleteConfirmModal.bind(this)}
/>
)}
{this.state.showPreview && (
<PreviewObjectModal
object={object}
hidePreviewModal={this.hidePreviewModal.bind(this)}
getObjectURL={this.getObjectURL.bind(this)}
/>
)}
</Dropdown>
)
}
@@ -94,15 +128,17 @@ const mapStateToProps = (state, ownProps) => {
return {
object: ownProps.object,
showShareObjectModal: state.objects.shareObject.show,
shareObjectName: state.objects.shareObject.object
shareObjectName: state.objects.shareObject.object,
}
}
const mapDispatchToProps = dispatch => {
const mapDispatchToProps = (dispatch) => {
return {
shareObject: (object, days, hours, minutes) =>
dispatch(objectsActions.shareObject(object, days, hours, minutes)),
deleteObject: object => dispatch(objectsActions.deleteObject(object))
deleteObject: (object) => dispatch(objectsActions.deleteObject(object)),
getObjectURL: (object, callback) =>
dispatch(objectsActions.getObjectURL(object, callback)),
}
}

View File

@@ -0,0 +1,68 @@
/*
* MinIO Cloud Storage (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import React from "react"
import { Modal, ModalHeader, ModalBody } from "react-bootstrap"
class PreviewObjectModal extends React.Component {
constructor(props) {
super(props)
this.state = {
url: "",
}
}
componentDidMount() {
this.props.getObjectURL(this.props.object.name, (url) => {
this.setState({
url: url,
})
})
}
render() {
const { hidePreviewModal } = this.props
return (
<Modal
show={true}
animation={false}
onHide={hidePreviewModal}
bsSize="large"
>
<ModalHeader>Preview</ModalHeader>
<ModalBody>
<div className="input-group">
{this.state.url && (
<img
alt="Image broken"
src={this.state.url}
style={{ display: "block", width: "100%" }}
/>
)}
</div>
</ModalBody>
<div className="modal-footer">
{
<button className="btn btn-link" onClick={hidePreviewModal}>
Cancel
</button>
}
</div>
</Modal>
)
}
}
export default PreviewObjectModal

View File

@@ -77,7 +77,8 @@ export class ShareObjectModal extends React.Component {
hideShareObject()
}
render() {
const { shareObjectDetails, shareObject, hideShareObject } = this.props
const { shareObjectDetails, hideShareObject } = this.props
const url = `${window.location.protocol}//${shareObjectDetails.url}`
return (
<Modal
show={true}
@@ -93,11 +94,12 @@ export class ShareObjectModal extends React.Component {
type="text"
ref={node => (this.copyTextInput = node)}
readOnly="readOnly"
value={window.location.protocol + "//" + shareObjectDetails.url}
value={url}
onClick={() => this.copyTextInput.select()}
/>
</div>
<div
{shareObjectDetails.showExpiryDate && (
<div
className="input-group"
style={{ display: web.LoggedIn() ? "block" : "none" }}
>
@@ -174,10 +176,11 @@ export class ShareObjectModal extends React.Component {
</div>
</div>
</div>
)}
</ModalBody>
<div className="modal-footer">
<CopyToClipboard
text={window.location.protocol + "//" + shareObjectDetails.url}
text={url}
onCopy={this.onUrlCopied.bind(this)}
>
<button className="btn btn-success">Copy Link</button>

View File

@@ -66,6 +66,49 @@ describe("ObjectActions", () => {
expect(deleteObject).toHaveBeenCalledWith("obj1")
})
it("should show PreviewObjectModal when preview action is clicked", () => {
const wrapper = shallow(
<ObjectActions
object={{ name: "obj1", contentType: "image/jpeg"}}
currentPrefix={"pre1/"} />
)
wrapper
.find("a")
.at(1)
.simulate("click", { preventDefault: jest.fn() })
expect(wrapper.state("showPreview")).toBeTruthy()
expect(wrapper.find("PreviewObjectModal").length).toBe(1)
})
it("should hide PreviewObjectModal when cancel button is clicked", () => {
const wrapper = shallow(
<ObjectActions
object={{ name: "obj1" , contentType: "image/jpeg"}}
currentPrefix={"pre1/"} />
)
wrapper
.find("a")
.at(1)
.simulate("click", { preventDefault: jest.fn() })
wrapper.find("PreviewObjectModal").prop("hidePreviewModal")()
wrapper.update()
expect(wrapper.state("showPreview")).toBeFalsy()
expect(wrapper.find("PreviewObjectModal").length).toBe(0)
})
it("should not show PreviewObjectModal when preview action is clicked if object is not an image", () => {
const wrapper = shallow(
<ObjectActions
object={{ name: "obj1"}}
currentPrefix={"pre1/"} />
)
expect(wrapper
.find("a")
.length).toBe(2) // find only the other 2
})
it("should call shareObject with object and expiry", () => {
const shareObject = jest.fn()
const wrapper = shallow(

View File

@@ -34,7 +34,7 @@ describe("ShareObjectModal", () => {
shallow(
<ShareObjectModal
object={{ name: "obj1" }}
shareObjectDetails={{ show: true, object: "obj1", url: "test" }}
shareObjectDetails={{ show: true, object: "obj1", url: "test", showExpiryDate: true }}
/>
)
})
@@ -44,7 +44,7 @@ describe("ShareObjectModal", () => {
const wrapper = shallow(
<ShareObjectModal
object={{ name: "obj1" }}
shareObjectDetails={{ show: true, object: "obj1", url: "test" }}
shareObjectDetails={{ show: true, object: "obj1", url: "test", showExpiryDate: true }}
hideShareObject={hideShareObject}
/>
)
@@ -59,7 +59,7 @@ describe("ShareObjectModal", () => {
const wrapper = shallow(
<ShareObjectModal
object={{ name: "obj1" }}
shareObjectDetails={{ show: true, object: "obj1", url: "test" }}
shareObjectDetails={{ show: true, object: "obj1", url: "test", showExpiryDate: true }}
/>
)
expect(
@@ -76,7 +76,7 @@ describe("ShareObjectModal", () => {
const wrapper = shallow(
<ShareObjectModal
object={{ name: "obj1" }}
shareObjectDetails={{ show: true, object: "obj1", url: "test" }}
shareObjectDetails={{ show: true, object: "obj1", url: "test", showExpiryDate: true }}
hideShareObject={hideShareObject}
showCopyAlert={showCopyAlert}
/>
@@ -89,8 +89,15 @@ describe("ShareObjectModal", () => {
describe("Update expiry values", () => {
const props = {
object: { name: "obj1" },
shareObjectDetails: { show: true, object: "obj1", url: "test" }
shareObjectDetails: { show: true, object: "obj1", url: "test", showExpiryDate: true }
}
it("should not show expiry values if shared with public link", () => {
const shareObjectDetails = { show: true, object: "obj1", url: "test", showExpiryDate: false }
const wrapper = shallow(<ShareObjectModal {...props} shareObjectDetails={shareObjectDetails} />)
expect(wrapper.find('.set-expire').exists()).toEqual(false)
})
it("should have default expiry values", () => {
const wrapper = shallow(<ShareObjectModal {...props} />)
expect(wrapper.state("expiry")).toEqual({

View File

@@ -34,6 +34,7 @@ jest.mock("../../web", () => ({
.mockReturnValueOnce(false)
.mockReturnValueOnce(true)
.mockReturnValueOnce(true)
.mockReturnValueOnce(true)
.mockReturnValueOnce(false),
ListObjects: jest.fn(({ bucketName }) => {
if (bucketName === "test-deny") {
@@ -69,7 +70,14 @@ jest.mock("../../web", () => ({
})
.mockImplementationOnce(() => {
return Promise.resolve({ token: "test" })
})
}),
GetBucketPolicy: jest.fn(({ bucketName, prefix }) => {
if (!bucketName) {
return Promise.reject({ message: "Invalid bucket" })
}
if (bucketName === 'test-public') return Promise.resolve({ policy: 'readonly' })
return Promise.resolve({})
})
}))
const middlewares = [thunk]
@@ -295,7 +303,8 @@ describe("Objects actions", () => {
type: "objects/SET_SHARE_OBJECT",
show: true,
object: "b.txt",
url: "test"
url: "test",
showExpiryDate: true
}
]
store.dispatch(actionsObjects.showShareObject("b.txt", "test"))
@@ -321,14 +330,16 @@ describe("Objects actions", () => {
it("creates objects/SET_SHARE_OBJECT when object is shared", () => {
const store = mockStore({
buckets: { currentBucket: "bk1" },
objects: { currentPrefix: "pre1/" }
objects: { currentPrefix: "pre1/" },
browser: { serverInfo: {} },
})
const expectedActions = [
{
type: "objects/SET_SHARE_OBJECT",
show: true,
object: "a.txt",
url: "https://test.com/bk1/pre1/b.txt"
url: "https://test.com/bk1/pre1/b.txt",
showExpiryDate: true
},
{
type: "alert/SET",
@@ -347,10 +358,42 @@ describe("Objects actions", () => {
})
})
it("creates objects/SET_SHARE_OBJECT when object is shared with public link", () => {
const store = mockStore({
buckets: { currentBucket: "test-public" },
objects: { currentPrefix: "pre1/" },
browser: { serverInfo: { info: { domains: ['public.com'] }} },
})
const expectedActions = [
{
type: "objects/SET_SHARE_OBJECT",
show: true,
object: "a.txt",
url: "public.com/test-public/pre1/a.txt",
showExpiryDate: false
},
{
type: "alert/SET",
alert: {
type: "success",
message: "Object shared.",
id: alertActions.alertId
}
}
]
return store
.dispatch(actionsObjects.shareObject("a.txt", 1, 0, 0))
.then(() => {
const actions = store.getActions()
expect(actions).toEqual(expectedActions)
})
})
it("creates alert/SET when shareObject is failed", () => {
const store = mockStore({
buckets: { currentBucket: "" },
objects: { currentPrefix: "pre1/" }
objects: { currentPrefix: "pre1/" },
browser: { serverInfo: {} },
})
const expectedActions = [
{

View File

@@ -19,20 +19,20 @@ import history from "../history"
import {
sortObjectsByName,
sortObjectsBySize,
sortObjectsByDate
sortObjectsByDate,
} from "../utils"
import { getCurrentBucket } from "../buckets/selectors"
import { getCurrentPrefix, getCheckedList } from "./selectors"
import * as alertActions from "../alert/actions"
import * as bucketActions from "../buckets/actions"
import {
minioBrowserPrefix,
SORT_BY_NAME,
SORT_BY_SIZE,
SORT_BY_LAST_MODIFIED,
SORT_ORDER_ASC,
SORT_ORDER_DESC
SORT_ORDER_DESC,
} from "../constants"
import { getServerInfo, hasServerPublicDomain } from '../browser/selectors'
export const SET_LIST = "objects/SET_LIST"
export const RESET_LIST = "objects/RESET_LIST"
@@ -48,35 +48,35 @@ export const CHECKED_LIST_REMOVE = "objects/CHECKED_LIST_REMOVE"
export const CHECKED_LIST_RESET = "objects/CHECKED_LIST_RESET"
export const SET_LIST_LOADING = "objects/SET_LIST_LOADING"
export const setList = objects => ({
export const setList = (objects) => ({
type: SET_LIST,
objects
objects,
})
export const resetList = () => ({
type: RESET_LIST
type: RESET_LIST,
})
export const setListLoading = listLoading => ({
export const setListLoading = (listLoading) => ({
type: SET_LIST_LOADING,
listLoading
listLoading,
})
export const fetchObjects = () => {
return function(dispatch, getState) {
return function (dispatch, getState) {
dispatch(resetList())
const {
buckets: { currentBucket },
objects: { currentPrefix }
objects: { currentPrefix },
} = getState()
if (currentBucket) {
dispatch(setListLoading(true))
return web
.ListObjects({
bucketName: currentBucket,
prefix: currentPrefix
prefix: currentPrefix,
})
.then(res => {
.then((res) => {
// we need to check if the bucket name and prefix are the same as
// when the request was made before updating the displayed objects
if (
@@ -85,10 +85,10 @@ export const fetchObjects = () => {
) {
let objects = []
if (res.objects) {
objects = res.objects.map(object => {
objects = res.objects.map((object) => {
return {
...object,
name: object.name.replace(currentPrefix, "")
name: object.name.replace(currentPrefix, ""),
}
})
}
@@ -104,13 +104,13 @@ export const fetchObjects = () => {
dispatch(setListLoading(false))
}
})
.catch(err => {
.catch((err) => {
if (web.LoggedIn()) {
dispatch(
alertActions.set({
type: "danger",
message: err.message,
autoClear: true
autoClear: true,
})
)
dispatch(resetList())
@@ -123,8 +123,8 @@ export const fetchObjects = () => {
}
}
export const sortObjects = sortBy => {
return function(dispatch, getState) {
export const sortObjects = (sortBy) => {
return function (dispatch, getState) {
const { objects } = getState()
let sortOrder = SORT_ORDER_ASC
// Reverse sort order if the list is already sorted on same field
@@ -149,18 +149,18 @@ const sortObjectsList = (list, sortBy, sortOrder) => {
}
}
export const setSortBy = sortBy => ({
export const setSortBy = (sortBy) => ({
type: SET_SORT_BY,
sortBy
sortBy,
})
export const setSortOrder = sortOrder => ({
export const setSortOrder = (sortOrder) => ({
type: SET_SORT_ORDER,
sortOrder
sortOrder,
})
export const selectPrefix = prefix => {
return function(dispatch, getState) {
export const selectPrefix = (prefix) => {
return function (dispatch, getState) {
dispatch(setCurrentPrefix(prefix))
dispatch(fetchObjects())
dispatch(resetCheckedList())
@@ -169,49 +169,49 @@ export const selectPrefix = prefix => {
}
}
export const setCurrentPrefix = prefix => {
export const setCurrentPrefix = (prefix) => {
return {
type: SET_CURRENT_PREFIX,
prefix
prefix,
}
}
export const setPrefixWritable = prefixWritable => ({
export const setPrefixWritable = (prefixWritable) => ({
type: SET_PREFIX_WRITABLE,
prefixWritable
prefixWritable,
})
export const deleteObject = object => {
return function(dispatch, getState) {
export const deleteObject = (object) => {
return function (dispatch, getState) {
const currentBucket = getCurrentBucket(getState())
const currentPrefix = getCurrentPrefix(getState())
const objectName = `${currentPrefix}${object}`
return web
.RemoveObject({
bucketName: currentBucket,
objects: [objectName]
objects: [objectName],
})
.then(() => {
dispatch(removeObject(object))
})
.catch(e => {
.catch((e) => {
dispatch(
alertActions.set({
type: "danger",
message: e.message
message: e.message,
})
)
})
}
}
export const removeObject = object => ({
export const removeObject = (object) => ({
type: REMOVE,
object
object,
})
export const deleteCheckedObjects = () => {
return function(dispatch, getState) {
return function (dispatch, getState) {
const checkedObjects = getCheckedList(getState())
for (let i = 0; i < checkedObjects.length; i++) {
dispatch(deleteObject(checkedObjects[i]))
@@ -221,33 +221,52 @@ export const deleteCheckedObjects = () => {
}
export const shareObject = (object, days, hours, minutes) => {
return function(dispatch, getState) {
return function (dispatch, getState) {
const hasServerDomain = hasServerPublicDomain(getState())
const currentBucket = getCurrentBucket(getState())
const currentPrefix = getCurrentPrefix(getState())
const objectName = `${currentPrefix}${object}`
const expiry = days * 24 * 60 * 60 + hours * 60 * 60 + minutes * 60
if (web.LoggedIn()) {
return web
.PresignedGet({
host: location.host,
bucket: currentBucket,
object: objectName,
expiry: expiry
.GetBucketPolicy({ bucketName: currentBucket, prefix: currentPrefix })
.catch(() => ({ policy: null }))
.then(({ policy }) => {
if (hasServerDomain && ['readonly', 'readwrite'].includes(policy)) {
const domain = getServerInfo(getState()).info.domains[0]
const url = `${domain}/${currentBucket}/${encodeURI(objectName)}`
dispatch(showShareObject(object, url, false))
dispatch(
alertActions.set({
type: "success",
message: "Object shared."
})
)
} else {
return web
.PresignedGet({
host: location.host,
bucket: currentBucket,
object: objectName,
expiry: expiry
})
}
})
.then(obj => {
.then((obj) => {
if (!obj) return
dispatch(showShareObject(object, obj.url))
dispatch(
alertActions.set({
type: "success",
message: `Object shared. Expires in ${days} days ${hours} hours ${minutes} minutes`
message: `Object shared. Expires in ${days} days ${hours} hours ${minutes} minutes`,
})
)
})
.catch(err => {
.catch((err) => {
dispatch(
alertActions.set({
type: "danger",
message: err.message
message: err.message,
})
)
})
@@ -265,29 +284,29 @@ export const shareObject = (object, days, hours, minutes) => {
dispatch(
alertActions.set({
type: "success",
message: `Object shared.`
message: `Object shared.`,
})
)
}
}
}
export const showShareObject = (object, url) => ({
export const showShareObject = (object, url, showExpiryDate = true) => ({
type: SET_SHARE_OBJECT,
show: true,
object,
url
url,
showExpiryDate,
})
export const hideShareObject = (object, url) => ({
type: SET_SHARE_OBJECT,
show: false,
object: "",
url: ""
url: "",
})
export const downloadObject = object => {
return function(dispatch, getState) {
export const getObjectURL = (object, callback) => {
return function (dispatch, getState) {
const currentBucket = getCurrentBucket(getState())
const currentPrefix = getCurrentPrefix(getState())
const objectName = `${currentPrefix}${object}`
@@ -295,52 +314,73 @@ export const downloadObject = object => {
if (web.LoggedIn()) {
return web
.CreateURLToken()
.then(res => {
const url = `${
window.location.origin
}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=${
res.token
}`
window.location = url
.then((res) => {
const url = `${window.location.origin}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=${res.token}`
callback(url)
})
.catch(err => {
.catch((err) => {
dispatch(
alertActions.set({
type: "danger",
message: err.message
message: err.message,
})
)
})
} else {
const url = `${
window.location.origin
}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=`
const url = `${window.location.origin}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=`
callback(url)
}
}
}
export const downloadObject = (object) => {
return function (dispatch, getState) {
const currentBucket = getCurrentBucket(getState())
const currentPrefix = getCurrentPrefix(getState())
const objectName = `${currentPrefix}${object}`
const encObjectName = encodeURI(objectName)
if (web.LoggedIn()) {
return web
.CreateURLToken()
.then((res) => {
const url = `${window.location.origin}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=${res.token}`
window.location = url
})
.catch((err) => {
dispatch(
alertActions.set({
type: "danger",
message: err.message,
})
)
})
} else {
const url = `${window.location.origin}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=`
window.location = url
}
}
}
export const checkObject = object => ({
export const checkObject = (object) => ({
type: CHECKED_LIST_ADD,
object
object,
})
export const uncheckObject = object => ({
export const uncheckObject = (object) => ({
type: CHECKED_LIST_REMOVE,
object
object,
})
export const resetCheckedList = () => ({
type: CHECKED_LIST_RESET
type: CHECKED_LIST_RESET,
})
export const downloadCheckedObjects = () => {
return function(dispatch, getState) {
return function (dispatch, getState) {
const state = getState()
const req = {
bucketName: getCurrentBucket(state),
prefix: getCurrentPrefix(state),
objects: getCheckedList(state)
objects: getCheckedList(state),
}
if (!web.LoggedIn()) {
const requestUrl = location.origin + "/minio/zip?token="
@@ -348,17 +388,15 @@ export const downloadCheckedObjects = () => {
} else {
return web
.CreateURLToken()
.then(res => {
const requestUrl = `${
location.origin
}${minioBrowserPrefix}/zip?token=${res.token}`
.then((res) => {
const requestUrl = `${location.origin}${minioBrowserPrefix}/zip?token=${res.token}`
downloadZip(requestUrl, req, dispatch)
})
.catch(err =>
.catch((err) =>
dispatch(
alertActions.set({
type: "danger",
message: err.message
message: err.message,
})
)
)
@@ -374,11 +412,11 @@ const downloadZip = (url, req, dispatch) => {
xhr.open("POST", url, true)
xhr.responseType = "blob"
xhr.onload = function(e) {
xhr.onload = function (e) {
if (this.status == 200) {
dispatch(resetCheckedList())
var blob = new Blob([this.response], {
type: "octet/stream"
type: "octet/stream",
})
var blobUrl = window.URL.createObjectURL(blob)
var separator = req.prefix.length > 1 ? "-" : ""

View File

@@ -89,7 +89,8 @@ export default (
shareObject: {
show: action.show,
object: action.object,
url: action.url
url: action.url,
showExpiryDate: action.showExpiryDate
}
}
case actionsObjects.CHECKED_LIST_ADD:

1
browser/staticcheck.conf Normal file
View File

@@ -0,0 +1 @@
checks = ["all", "-ST1005", "-ST1000", "-SA4000", "-SA9004", "-SA1019", "-SA1008", "-U1000", "-ST1003", "-ST1018"]

File diff suppressed because one or more lines are too long

View File

@@ -1,67 +0,0 @@
#!/bin/bash
# usage: ./benchcmp.sh <commit-sha1> <commit-sha2>
# Exit on any non zero return value on execution of a command.
set -e
# path of benchcmp.
benchcmp=${GOPATH}/bin/benchcmp
# function which runs the benchmark comparison.
RunBenchCmp () {
# Path for storing output of benchmark at commit 1.
commit1Bench=/tmp/minio-$1.bench
# Path for storing output of benchmark at commit 2.
commit2Bench=/tmp/minio-$2.bench
# switch to commit $1.
git checkout $1
# Check if the benchmark results for given commit 1 already exists.
# Benchmarks are time/resource consuming operations, run only if the the results doesn't exist.
if [[ ! -f $commit1Bench ]]
then
echo "Running benchmarks at $1"
go test -run=NONE -bench=. | tee $commit1Bench
fi
# get back to the commit from which it was started.
git checkout -
echo "Checking into commit $2"
# switch to commit $2
git checkout $2
# Check if the benchmark results for given commit 2 already exists.
# Benchmarks are time/resource consuming operations, run only if the the results doesn't exist.
if [[ ! -f $commit2Bench ]]
then
# Running benchmarks at $2.
echo "Running benchmarks at $2"
go test -run=NONE -bench=. | tee $commit2Bench
fi
# get back to the commit from which it was started.
git checkout -
# Comparing the benchmarks.
echo "Running benchmark comparison between $1 and $2 ..."
$benchcmp $commit1Bench $commit2Bench
echo "Done."
}
# check if 2 commit SHA's of snapshots of code for which benchmp has to be done is provided.
if [ ! $# -eq 2 ]
then
# exit if commit SHA's are not provided.
echo $#
echo "Need Commit SHA's of 2 snapshots to be supplied to run benchmark comparison."
exit 1
fi
# check if benchcmp exists.
if [[ -x "$benchcmp" ]]
then
RunBenchCmp $1 $2
else
# install benchcmp if doesnt't exist.
echo "fetching Benchcmp..."
go get -u golang.org/x/tools/cmd/benchcmp
echo "Done."
RunBenchCmp $1 $2
fi

View File

@@ -1,14 +1,15 @@
#!/bin/bash
set -e
# Enable tracing if set.
[ -n "$BASH_XTRACEFD" ] && set -ex
[ -n "$BASH_XTRACEFD" ] && set -x
function _init() {
## All binaries are static make sure to disable CGO.
export CGO_ENABLED=0
## List of architectures and OS to test coss compilation.
SUPPORTED_OSARCH="linux/ppc64le linux/arm64 linux/s390x darwin/amd64 freebsd/amd64"
SUPPORTED_OSARCH="linux/ppc64le linux/arm64 linux/s390x darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386"
}
function _build() {
@@ -19,11 +20,11 @@ function _build() {
package=$(go list -f '{{.ImportPath}}')
printf -- "--> %15s:%s\n" "${osarch}" "${package}"
# Go build to build the binary.
# go build -trimpath to build the binary.
export GOOS=$os
export GOARCH=$arch
export GO111MODULE=on
go build -tags kqueue -o /dev/null
go build -trimpath -tags kqueue -o /dev/null
}
function main() {

View File

@@ -46,7 +46,10 @@ function main()
gw_pid="$(start_minio_gateway_s3)"
SERVER_ENDPOINT=127.0.0.1:24240 ENABLE_HTTPS=0 ACCESS_KEY=minio \
SECRET_KEY=minio123 MINT_MODE="full" /mint/entrypoint.sh
SECRET_KEY=minio123 MINT_MODE="full" /mint/entrypoint.sh \
aws-sdk-go aws-sdk-java aws-sdk-php aws-sdk-ruby awscli \
healthcheck mc minio-dotnet minio-js \
minio-py s3cmd s3select security
rv=$?
kill "$sr_pid"

View File

@@ -1,5 +0,0 @@
#!/usr/bin/env bash
set -e
GO111MODULE=on CGO_ENABLED=0 go test -v -coverprofile=coverage.txt -covermode=atomic ./...

7
buildscripts/race.sh Executable file
View File

@@ -0,0 +1,7 @@
#!/usr/bin/env bash
set -e
for d in $(go list ./... | grep -v browser); do
CGO_ENABLED=1 go test -v -race --timeout 50m "$d"
done

View File

@@ -154,34 +154,6 @@ function run_test_erasure_sets() {
return "$rv"
}
function run_test_dist_erasure_sets_ipv6()
{
minio_pids=( $(start_minio_dist_erasure_sets_ipv6) )
export SERVER_ENDPOINT="[::1]:9000"
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
for pid in "${minio_pids[@]}"; do
kill "$pid"
done
sleep 3
if [ "$rv" -ne 0 ]; then
for i in $(seq 0 9); do
echo "server$i log:"
cat "$WORK_DIR/dist-minio-v6-900$i.log"
done
fi
for i in $(seq 0 9); do
rm -f "$WORK_DIR/dist-minio-v6-900$i.log"
done
return "$rv"
}
function run_test_zone_erasure_sets()
{
minio_pids=( $(start_minio_zone_erasure_sets) )

126
buildscripts/verify-healing.sh Executable file
View File

@@ -0,0 +1,126 @@
#!/bin/bash
#
# MinIO Cloud Storage, (C) 2020 MinIO, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -e
set -E
set -o pipefail
if [ ! -x "$PWD/minio" ]; then
echo "minio executable binary not found in current directory"
exit 1
fi
WORK_DIR="$PWD/.verify-$RANDOM"
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server )
function start_minio_3_node() {
declare -a minio_pids
declare -a ARGS
export MINIO_ACCESS_KEY=minio
export MINIO_SECRET_KEY=minio123
start_port=$(shuf -i 10000-65000 -n 1)
for i in $(seq 1 3); do
ARGS+=("http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/1/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/2/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/3/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/4/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/5/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/6/")
done
"${MINIO[@]}" --address ":$[$start_port+1]" ${ARGS[@]} > "${WORK_DIR}/dist-minio-server1.log" 2>&1 &
minio_pids[0]=$!
disown "${minio_pids[0]}"
"${MINIO[@]}" --address ":$[$start_port+2]" ${ARGS[@]} > "${WORK_DIR}/dist-minio-server2.log" 2>&1 &
minio_pids[1]=$!
disown "${minio_pids[1]}"
"${MINIO[@]}" --address ":$[$start_port+3]" ${ARGS[@]} > "${WORK_DIR}/dist-minio-server3.log" 2>&1 &
minio_pids[2]=$!
disown "${minio_pids[2]}"
sleep "$1"
for pid in "${minio_pids[@]}"; do
if ! kill "$pid"; then
for i in $(seq 1 3); do
echo "server$i log:"
cat "${WORK_DIR}/dist-minio-server$i.log"
done
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
# forcibly killing, to proceed further properly.
kill -9 "$pid"
sleep 1 # wait 1sec per pid
done
}
function check_online() {
if grep -q 'Server switching to safe mode' ${WORK_DIR}/dist-minio-*.log; then
echo "1"
fi
}
function purge()
{
rm -rf "$1"
}
function __init__()
{
echo "Initializing environment"
mkdir -p "$WORK_DIR"
mkdir -p "$MINIO_CONFIG_DIR"
## version is purposefully set to '3' for minio to migrate configuration file
echo '{"version": "3", "credential": {"accessKey": "minio", "secretKey": "minio123"}, "region": "us-east-1"}' > "$MINIO_CONFIG_DIR/config.json"
}
function perform_test() {
start_minio_3_node 60
echo "Testing Distributed Erasure setup healing of drives"
echo "Remove the contents of the disks belonging to '${1}' erasure set"
rm -rf ${WORK_DIR}/${1}/*/
start_minio_3_node 60
rv=$(check_online)
if [ "$rv" == "1" ]; then
pkill -9 minio
for i in $(seq 1 3); do
echo "server$i log:"
cat "${WORK_DIR}/dist-minio-server$i.log"
done
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
}
function main()
{
perform_test "2"
perform_test "1"
perform_test "3"
}
( __init__ "$@" && main "$@" )
rv=$?
purge "$WORK_DIR"
exit "$rv"

View File

@@ -1,5 +1,5 @@
/*
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
* MinIO Cloud Storage, (C) 2018-2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,11 +18,14 @@ package cmd
import (
"encoding/xml"
"io"
"net/http"
"net/url"
"github.com/gorilla/mux"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/policy"
"github.com/minio/minio/pkg/bucket/policy"
)
// Data types used for returning dummy access control
@@ -50,6 +53,71 @@ type accessControlPolicy struct {
} `xml:"AccessControlList"`
}
// PutBucketACLHandler - PUT Bucket ACL
// -----------------
// This operation uses the ACL subresource
// to set ACL for a bucket, this is a dummy call
// only responds success if the ACL is private.
func (api objectAPIHandlers) PutBucketACLHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutBucketACL")
defer logger.AuditLog(w, r, "PutBucketACL", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
// Allow putBucketACL if policy action is set, since this is a dummy call
// we are simply re-purposing the bucketPolicyAction.
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketPolicyAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Before proceeding validate if bucket exists.
_, err := objAPI.GetBucketInfo(ctx, bucket)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
aclHeader := r.Header.Get(xhttp.AmzACL)
if aclHeader == "" {
acl := &accessControlPolicy{}
if err = xmlDecoder(r.Body, acl, r.ContentLength); err != nil {
if err == io.EOF {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingSecurityHeader),
r.URL, guessIsBrowserReq(r))
return
}
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if len(acl.AccessControlList.Grants) == 0 {
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL, guessIsBrowserReq(r))
return
}
if acl.AccessControlList.Grants[0].Permission != "FULL_CONTROL" {
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL, guessIsBrowserReq(r))
return
}
}
if aclHeader != "" && aclHeader != "private" {
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL, guessIsBrowserReq(r))
return
}
w.(http.Flusher).Flush()
}
// GetBucketACLHandler - GET Bucket ACL
// -----------------
// This operation uses the ACL
@@ -99,6 +167,71 @@ func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.
w.(http.Flusher).Flush()
}
// PutObjectACLHandler - PUT Object ACL
// -----------------
// This operation uses the ACL subresource
// to set ACL for a bucket, this is a dummy call
// only responds success if the ACL is private.
func (api objectAPIHandlers) PutObjectACLHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutObjectACL")
defer logger.AuditLog(w, r, "PutObjectACL", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
object, err := url.PathUnescape(vars["object"])
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
// Allow putObjectACL if policy action is set, since this is a dummy call
// we are simply re-purposing the bucketPolicyAction.
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketPolicyAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Before proceeding validate if object exists.
_, err = objAPI.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
aclHeader := r.Header.Get(xhttp.AmzACL)
if aclHeader == "" {
acl := &accessControlPolicy{}
if err = xmlDecoder(r.Body, acl, r.ContentLength); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if len(acl.AccessControlList.Grants) == 0 {
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL, guessIsBrowserReq(r))
return
}
if acl.AccessControlList.Grants[0].Permission != "FULL_CONTROL" {
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL, guessIsBrowserReq(r))
return
}
}
if aclHeader != "" && aclHeader != "private" {
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL, guessIsBrowserReq(r))
return
}
w.(http.Flusher).Flush()
}
// GetObjectACLHandler - GET Object ACL
// -----------------
// This operation uses the ACL
@@ -110,7 +243,11 @@ func (api objectAPIHandlers) GetObjectACLHandler(w http.ResponseWriter, r *http.
vars := mux.Vars(r)
bucket := vars["bucket"]
object := vars["object"]
object, err := url.PathUnescape(vars["object"])
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
objAPI := api.ObjectAPI()
if objAPI == nil {
@@ -126,7 +263,7 @@ func (api objectAPIHandlers) GetObjectACLHandler(w http.ResponseWriter, r *http.
}
// Before proceeding validate if object exists.
_, err := objAPI.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
_, err = objAPI.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return

View File

@@ -35,50 +35,47 @@ import (
"github.com/minio/minio/cmd/config/storageclass"
"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
iampolicy "github.com/minio/minio/pkg/iam/policy"
"github.com/minio/minio/pkg/madmin"
)
func validateAdminReqConfigKV(ctx context.Context, w http.ResponseWriter, r *http.Request) ObjectLayer {
func validateAdminReqConfigKV(ctx context.Context, w http.ResponseWriter, r *http.Request) (auth.Credentials, ObjectLayer) {
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return nil
return auth.Credentials{}, nil
}
// Validate request signature.
_, adminAPIErr := checkAdminRequestAuthType(ctx, r, iampolicy.ConfigUpdateAdminAction, "")
cred, adminAPIErr := checkAdminRequestAuthType(ctx, r, iampolicy.ConfigUpdateAdminAction, "")
if adminAPIErr != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
return nil
return cred, nil
}
return objectAPI
return cred, objectAPI
}
// DelConfigKVHandler - DELETE /minio/admin/v2/del-config-kv
// DelConfigKVHandler - DELETE /minio/admin/v3/del-config-kv
func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "DelConfigKVHandler")
ctx := newContext(r, w, "DeleteConfigKV")
objectAPI := validateAdminReqConfigKV(ctx, w, r)
defer logger.AuditLog(w, r, "DeleteConfigKV", mustGetClaimsFromToken(r))
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
return
}
// Deny if WORM is enabled
if globalWORMEnabled {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
return
}
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
// More than maxConfigSize bytes were available
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
return
}
password := globalActiveCred.SecretKey
password := cred.SecretKey
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
logger.LogIf(ctx, err, logger.Application)
@@ -103,28 +100,24 @@ func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Requ
}
}
// SetConfigKVHandler - PUT /minio/admin/v2/set-config-kv
// SetConfigKVHandler - PUT /minio/admin/v3/set-config-kv
func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetConfigKVHandler")
ctx := newContext(r, w, "SetConfigKV")
objectAPI := validateAdminReqConfigKV(ctx, w, r)
defer logger.AuditLog(w, r, "SetConfigKV", mustGetClaimsFromToken(r))
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
return
}
// Deny if WORM is enabled
if globalWORMEnabled {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
return
}
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
// More than maxConfigSize bytes were available
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
return
}
password := globalActiveCred.SecretKey
password := cred.SecretKey
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
logger.LogIf(ctx, err, logger.Application)
@@ -162,17 +155,19 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
// Make sure to write backend is encrypted
if globalConfigEncrypted {
saveConfig(context.Background(), objectAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
saveConfig(GlobalContext, objectAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
}
writeSuccessResponseHeadersOnly(w)
}
// GetConfigKVHandler - GET /minio/admin/v2/get-config-kv?key={key}
// GetConfigKVHandler - GET /minio/admin/v3/get-config-kv?key={key}
func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetConfigKVHandler")
ctx := newContext(r, w, "GetConfigKV")
objectAPI := validateAdminReqConfigKV(ctx, w, r)
defer logger.AuditLog(w, r, "GetConfigKV", mustGetClaimsFromToken(r))
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
return
}
@@ -195,7 +190,7 @@ func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Requ
return
}
password := globalActiveCred.SecretKey
password := cred.SecretKey
econfigData, err := madmin.EncryptData(password, buf.Bytes())
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
@@ -206,9 +201,11 @@ func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Requ
}
func (a adminAPIHandlers) ClearConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ClearConfigHistoryKVHandler")
ctx := newContext(r, w, "ClearConfigHistoryKV")
objectAPI := validateAdminReqConfigKV(ctx, w, r)
defer logger.AuditLog(w, r, "ClearConfigHistoryKV", mustGetClaimsFromToken(r))
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
return
}
@@ -241,9 +238,11 @@ func (a adminAPIHandlers) ClearConfigHistoryKVHandler(w http.ResponseWriter, r *
// RestoreConfigHistoryKVHandler - restores a config with KV settings for the given KV id.
func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "RestoreConfigHistoryKVHandler")
ctx := newContext(r, w, "RestoreConfigHistoryKV")
objectAPI := validateAdminReqConfigKV(ctx, w, r)
defer logger.AuditLog(w, r, "RestoreConfigHistoryKV", mustGetClaimsFromToken(r))
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
return
}
@@ -287,9 +286,11 @@ func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r
// ListConfigHistoryKVHandler - lists all the KV ids.
func (a adminAPIHandlers) ListConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListConfigHistoryKVHandler")
ctx := newContext(r, w, "ListConfigHistoryKV")
objectAPI := validateAdminReqConfigKV(ctx, w, r)
defer logger.AuditLog(w, r, "ListConfigHistoryKV", mustGetClaimsFromToken(r))
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
return
}
@@ -313,7 +314,7 @@ func (a adminAPIHandlers) ListConfigHistoryKVHandler(w http.ResponseWriter, r *h
return
}
password := globalActiveCred.SecretKey
password := cred.SecretKey
econfigData, err := madmin.EncryptData(password, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
@@ -323,11 +324,13 @@ func (a adminAPIHandlers) ListConfigHistoryKVHandler(w http.ResponseWriter, r *h
writeSuccessResponseJSON(w, econfigData)
}
// HelpConfigKVHandler - GET /minio/admin/v2/help-config-kv?subSys={subSys}&key={key}
// HelpConfigKVHandler - GET /minio/admin/v3/help-config-kv?subSys={subSys}&key={key}
func (a adminAPIHandlers) HelpConfigKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "HelpConfigKVHandler")
ctx := newContext(r, w, "HelpConfigKV")
objectAPI := validateAdminReqConfigKV(ctx, w, r)
defer logger.AuditLog(w, r, "HelpHistoryKV", mustGetClaimsFromToken(r))
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
return
}
@@ -349,28 +352,24 @@ func (a adminAPIHandlers) HelpConfigKVHandler(w http.ResponseWriter, r *http.Req
w.(http.Flusher).Flush()
}
// SetConfigHandler - PUT /minio/admin/v2/config
// SetConfigHandler - PUT /minio/admin/v3/config
func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetConfigHandler")
ctx := newContext(r, w, "SetConfig")
objectAPI := validateAdminReqConfigKV(ctx, w, r)
defer logger.AuditLog(w, r, "SetConfig", mustGetClaimsFromToken(r))
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
return
}
// Deny if WORM is enabled
if globalWORMEnabled {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
return
}
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
// More than maxConfigSize bytes were available
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
return
}
password := globalActiveCred.SecretKey
password := cred.SecretKey
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
logger.LogIf(ctx, err, logger.Application)
@@ -403,18 +402,20 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
// Make sure to write backend is encrypted
if globalConfigEncrypted {
saveConfig(context.Background(), objectAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
saveConfig(GlobalContext, objectAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
}
writeSuccessResponseHeadersOnly(w)
}
// GetConfigHandler - GET /minio/admin/v2/config
// GetConfigHandler - GET /minio/admin/v3/config
// Get config.json of this minio setup.
func (a adminAPIHandlers) GetConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetConfigHandler")
ctx := newContext(r, w, "GetConfig")
objectAPI := validateAdminReqConfigKV(ctx, w, r)
defer logger.AuditLog(w, r, "GetConfig", mustGetClaimsFromToken(r))
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
return
}
@@ -471,7 +472,7 @@ func (a adminAPIHandlers) GetConfigHandler(w http.ResponseWriter, r *http.Reques
}
}
password := globalActiveCred.SecretKey
password := cred.SecretKey
econfigData, err := madmin.EncryptData(password, []byte(s.String()))
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)

159
cmd/admin-handlers-quota.go Normal file
View File

@@ -0,0 +1,159 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"io/ioutil"
"net/http"
"path"
"github.com/gorilla/mux"
"github.com/minio/minio/cmd/config"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/env"
iampolicy "github.com/minio/minio/pkg/iam/policy"
)
const (
bucketQuotaConfigFile = "quota.json"
)
// PutBucketQuotaConfigHandler - PUT Bucket quota configuration.
// ----------
// Places a quota configuration on the specified bucket. The quota
// specified in the quota configuration will be applied by default
// to enforce total quota for the specified bucket.
func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutBucketQuotaConfig")
defer logger.AuditLog(w, r, "PutBucketQuotaConfig", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SetBucketQuotaAdminAction)
if objectAPI == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
// Turn off quota commands if data usage info is unavailable.
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOff {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminBucketQuotaDisabled), r.URL)
return
}
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
defer r.Body.Close()
data, err := ioutil.ReadAll(r.Body)
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
quotaCfg, err := parseBucketQuota(data)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
configFile := path.Join(bucketConfigPrefix, bucket, bucketQuotaConfigFile)
if err = saveConfig(ctx, objectAPI, configFile, data); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if quotaCfg.Quota > 0 {
globalBucketQuotaSys.Set(bucket, quotaCfg)
globalNotificationSys.PutBucketQuotaConfig(ctx, bucket, quotaCfg)
} else {
globalBucketQuotaSys.Remove(bucket)
globalNotificationSys.RemoveBucketQuotaConfig(ctx, bucket)
}
// Write success response.
writeSuccessResponseHeadersOnly(w)
}
// GetBucketQuotaConfigHandler - gets bucket quota configuration
func (a adminAPIHandlers) GetBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketQuotaConfig")
defer logger.AuditLog(w, r, "GetBucketQuotaConfig", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetBucketQuotaAdminAction)
if objectAPI == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
configFile := path.Join(bucketConfigPrefix, bucket, bucketQuotaConfigFile)
configData, err := readConfig(ctx, objectAPI, configFile)
if err != nil {
if err != errConfigNotFound {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, BucketQuotaConfigNotFound{Bucket: bucket}), r.URL)
return
}
// Write success response.
writeSuccessResponseJSON(w, configData)
}
// RemoveBucketQuotaConfigHandler - removes Bucket quota configuration.
// ----------
// Removes quota configuration on the specified bucket.
func (a adminAPIHandlers) RemoveBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "RemoveBucketQuotaConfig")
defer logger.AuditLog(w, r, "RemoveBucketQuotaConfig", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SetBucketQuotaAdminAction)
if objectAPI == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
configFile := path.Join(bucketConfigPrefix, bucket, bucketQuotaConfigFile)
if err := deleteConfig(ctx, objectAPI, configFile); err != nil {
if err != errConfigNotFound {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, BucketQuotaConfigNotFound{Bucket: bucket}), r.URL)
return
}
globalBucketQuotaSys.Remove(bucket)
globalNotificationSys.RemoveBucketQuotaConfig(ctx, bucket)
// Write success response.
writeSuccessNoContent(w)
}

View File

@@ -1,5 +1,5 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
* MinIO Cloud Storage, (C) 2019-2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -51,21 +51,17 @@ func validateAdminUsersReq(ctx context.Context, w http.ResponseWriter, r *http.R
return objectAPI, cred
}
// RemoveUser - DELETE /minio/admin/v2/remove-user?accessKey=<access_key>
// RemoveUser - DELETE /minio/admin/v3/remove-user?accessKey=<access_key>
func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "RemoveUser")
defer logger.AuditLog(w, r, "RemoveUser", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.DeleteUserAdminAction)
if objectAPI == nil {
return
}
// Deny if WORM is enabled
if globalWORMEnabled {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
return
}
vars := mux.Vars(r)
accessKey := vars["accessKey"]
@@ -93,15 +89,19 @@ func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
}
}
// ListUsers - GET /minio/admin/v2/list-users
// ListUsers - GET /minio/admin/v3/list-users
func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListUsers")
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListUsersAdminAction)
defer logger.AuditLog(w, r, "ListUsers", mustGetClaimsFromToken(r))
objectAPI, cred := validateAdminUsersReq(ctx, w, r, iampolicy.ListUsersAdminAction)
if objectAPI == nil {
return
}
password := cred.SecretKey
allCredentials, err := globalIAMSys.ListUsers()
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
@@ -114,7 +114,6 @@ func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
return
}
password := globalActiveCred.SecretKey
econfigData, err := madmin.EncryptData(password, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
@@ -124,10 +123,12 @@ func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
writeSuccessResponseJSON(w, econfigData)
}
// GetUserInfo - GET /minio/admin/v2/user-info
// GetUserInfo - GET /minio/admin/v3/user-info
func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetUserInfo")
defer logger.AuditLog(w, r, "GetUserInfo", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetUserAdminAction)
if objectAPI == nil {
return
@@ -151,10 +152,12 @@ func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
writeSuccessResponseJSON(w, data)
}
// UpdateGroupMembers - PUT /minio/admin/v2/update-group-members
// UpdateGroupMembers - PUT /minio/admin/v3/update-group-members
func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "UpdateGroupMembers")
defer logger.AuditLog(w, r, "UpdateGroupMembers", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.AddUserToGroupAdminAction)
if objectAPI == nil {
return
@@ -194,10 +197,12 @@ func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Requ
}
}
// GetGroup - /minio/admin/v2/group?group=mygroup1
// GetGroup - /minio/admin/v3/group?group=mygroup1
func (a adminAPIHandlers) GetGroup(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetGroup")
defer logger.AuditLog(w, r, "GetGroup", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetGroupAdminAction)
if objectAPI == nil {
return
@@ -221,10 +226,12 @@ func (a adminAPIHandlers) GetGroup(w http.ResponseWriter, r *http.Request) {
writeSuccessResponseJSON(w, body)
}
// ListGroups - GET /minio/admin/v2/groups
// ListGroups - GET /minio/admin/v3/groups
func (a adminAPIHandlers) ListGroups(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListGroups")
defer logger.AuditLog(w, r, "ListGroups", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListGroupsAdminAction)
if objectAPI == nil {
return
@@ -245,10 +252,12 @@ func (a adminAPIHandlers) ListGroups(w http.ResponseWriter, r *http.Request) {
writeSuccessResponseJSON(w, body)
}
// SetGroupStatus - PUT /minio/admin/v2/set-group-status?group=mygroup1&status=enabled
// SetGroupStatus - PUT /minio/admin/v3/set-group-status?group=mygroup1&status=enabled
func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetGroupStatus")
defer logger.AuditLog(w, r, "SetGroupStatus", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.EnableGroupAdminAction)
if objectAPI == nil {
return
@@ -280,21 +289,17 @@ func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request)
}
}
// SetUserStatus - PUT /minio/admin/v2/set-user-status?accessKey=<access_key>&status=[enabled|disabled]
// SetUserStatus - PUT /minio/admin/v3/set-user-status?accessKey=<access_key>&status=[enabled|disabled]
func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetUserStatus")
defer logger.AuditLog(w, r, "SetUserStatus", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.EnableUserAdminAction)
if objectAPI == nil {
return
}
// Deny if WORM is enabled
if globalWORMEnabled {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
return
}
vars := mux.Vars(r)
accessKey := vars["accessKey"]
status := vars["status"]
@@ -319,21 +324,17 @@ func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request)
}
}
// AddUser - PUT /minio/admin/v2/add-user?accessKey=<access_key>
// AddUser - PUT /minio/admin/v3/add-user?accessKey=<access_key>
func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "AddUser")
defer logger.AuditLog(w, r, "AddUser", mustGetClaimsFromToken(r))
objectAPI, cred := validateAdminUsersReq(ctx, w, r, iampolicy.CreateUserAdminAction)
if objectAPI == nil {
return
}
// Deny if WORM is enabled
if globalWORMEnabled {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
return
}
vars := mux.Vars(r)
accessKey := vars["accessKey"]
@@ -378,16 +379,322 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
}
}
// InfoCannedPolicy - GET /minio/admin/v2/info-canned-policy?name={policyName}
func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "InfoCannedPolicy")
// AddServiceAccount - PUT /minio/admin/v3/add-service-account
func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "AddServiceAccount")
defer logger.AuditLog(w, r, "AddServiceAccount", mustGetClaimsFromToken(r))
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, _, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
password := cred.SecretKey
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
return
}
var createReq madmin.AddServiceAccountReq
if err = json.Unmarshal(reqBytes, &createReq); err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
return
}
// Disallow creating service accounts by root user.
if owner {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
return
}
parentUser := cred.AccessKey
if cred.ParentUser != "" {
parentUser = cred.ParentUser
}
newCred, err := globalIAMSys.NewServiceAccount(ctx, parentUser, createReq.Policy)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Notify all other Minio peers to reload user the service account
for _, nerr := range globalNotificationSys.LoadServiceAccount(newCred.AccessKey) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
var createResp = madmin.AddServiceAccountResp{
Credentials: auth.Credentials{
AccessKey: newCred.AccessKey,
SecretKey: newCred.SecretKey,
},
}
data, err := json.Marshal(createResp)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
encryptedData, err := madmin.EncryptData(password, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, encryptedData)
}
// ListServiceAccounts - GET /minio/admin/v3/list-service-accounts
func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListServiceAccounts")
defer logger.AuditLog(w, r, "ListServiceAccounts", mustGetClaimsFromToken(r))
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, _, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
// Disallow creating service accounts by root user.
if owner {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
return
}
parentUser := cred.AccessKey
if cred.ParentUser != "" {
parentUser = cred.ParentUser
}
serviceAccounts, err := globalIAMSys.ListServiceAccounts(ctx, parentUser)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
var listResp = madmin.ListServiceAccountsResp{
Accounts: serviceAccounts,
}
data, err := json.Marshal(listResp)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
encryptedData, err := madmin.EncryptData(cred.SecretKey, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, encryptedData)
}
// DeleteServiceAccount - DELETE /minio/admin/v3/delete-service-account
func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "DeleteServiceAccount")
defer logger.AuditLog(w, r, "DeleteServiceAccount", mustGetClaimsFromToken(r))
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, _, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
// Disallow creating service accounts by root user.
if owner {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
return
}
serviceAccount := mux.Vars(r)["accessKey"]
if serviceAccount == "" {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminInvalidArgument), r.URL)
return
}
user, err := globalIAMSys.GetServiceAccountParent(ctx, serviceAccount)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
parentUser := cred.AccessKey
if cred.ParentUser != "" {
parentUser = cred.ParentUser
}
if parentUser != user || user == "" {
// The service account belongs to another user but return not
// found error to mitigate brute force attacks. or the
// serviceAccount doesn't exist.
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServiceAccountNotFound), r.URL)
return
}
err = globalIAMSys.DeleteServiceAccount(ctx, serviceAccount)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessNoContent(w)
}
// AccountUsageInfoHandler returns usage
func (a adminAPIHandlers) AccountUsageInfoHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "AccountUsageInfo")
defer logger.AuditLog(w, r, "AccountUsageInfo", mustGetClaimsFromToken(r))
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
// Set prefix value for "s3:prefix" policy conditionals.
r.Header.Set("prefix", "")
// Set delimiter value for "s3:delimiter" policy conditionals.
r.Header.Set("delimiter", SlashSeparator)
isAllowedAccess := func(bucketName string) (rd, wr bool) {
// Use the following trick to filter in place
// https://github.com/golang/go/wiki/SliceTricks#filter-in-place
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: iampolicy.ListBucketAction,
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
IsOwner: owner,
ObjectName: "",
Claims: claims,
}) {
rd = true
}
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: iampolicy.PutObjectAction,
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
IsOwner: owner,
ObjectName: "",
Claims: claims,
}) {
wr = true
}
return rd, wr
}
buckets, err := objectAPI.ListBuckets(ctx)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Load the latest calculated data usage
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
if err != nil {
logger.LogIf(ctx, err)
}
accountName := cred.AccessKey
if cred.ParentUser != "" {
accountName = cred.ParentUser
}
acctInfo := madmin.AccountUsageInfo{
AccountName: accountName,
}
for _, bucket := range buckets {
rd, wr := isAllowedAccess(bucket.Name)
if rd || wr {
var size uint64
// Fetch the data usage of the current bucket
if !dataUsageInfo.LastUpdate.IsZero() && len(dataUsageInfo.BucketsSizes) > 0 {
size = dataUsageInfo.BucketsSizes[bucket.Name]
}
acctInfo.Buckets = append(acctInfo.Buckets, madmin.BucketUsageInfo{
Name: bucket.Name,
Created: bucket.Created,
Size: size,
Access: madmin.AccountAccess{
Read: rd,
Write: wr,
},
})
}
}
usageInfoJSON, err := json.Marshal(acctInfo)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, usageInfoJSON)
}
// InfoCannedPolicyV2 - GET /minio/admin/v2/info-canned-policy?name={policyName}
func (a adminAPIHandlers) InfoCannedPolicyV2(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "InfoCannedPolicyV2")
defer logger.AuditLog(w, r, "InfoCannedPolicyV2", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetPolicyAdminAction)
if objectAPI == nil {
return
}
data, err := globalIAMSys.InfoPolicy(mux.Vars(r)["name"])
policy, err := globalIAMSys.InfoPolicy(mux.Vars(r)["name"])
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
data, err := json.Marshal(policy)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
@@ -397,9 +704,32 @@ func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Reques
w.(http.Flusher).Flush()
}
// ListCannedPolicies - GET /minio/admin/v2/list-canned-policies
func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListCannedPolicies")
// InfoCannedPolicy - GET /minio/admin/v3/info-canned-policy?name={policyName}
func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "InfoCannedPolicy")
defer logger.AuditLog(w, r, "InfoCannedPolicy", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetPolicyAdminAction)
if objectAPI == nil {
return
}
policy, err := globalIAMSys.InfoPolicy(mux.Vars(r)["name"])
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
json.NewEncoder(w).Encode(policy)
w.(http.Flusher).Flush()
}
// ListCannedPoliciesV2 - GET /minio/admin/v2/list-canned-policies
func (a adminAPIHandlers) ListCannedPoliciesV2(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListCannedPoliciesV2")
defer logger.AuditLog(w, r, "ListCannedPoliciesV2", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListUserPoliciesAdminAction)
if objectAPI == nil {
@@ -412,7 +742,16 @@ func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Requ
return
}
if err = json.NewEncoder(w).Encode(policies); err != nil {
policyMap := make(map[string][]byte, len(policies))
for k, p := range policies {
var err error
policyMap[k], err = json.Marshal(p)
if err != nil {
logger.LogIf(ctx, err)
continue
}
}
if err = json.NewEncoder(w).Encode(policyMap); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
@@ -420,10 +759,46 @@ func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Requ
w.(http.Flusher).Flush()
}
// RemoveCannedPolicy - DELETE /minio/admin/v2/remove-canned-policy?name=<policy_name>
// ListCannedPolicies - GET /minio/admin/v3/list-canned-policies
func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListCannedPolicies")
defer logger.AuditLog(w, r, "ListCannedPolicies", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListUserPoliciesAdminAction)
if objectAPI == nil {
return
}
policies, err := globalIAMSys.ListPolicies()
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
var newPolicies = make(map[string]iampolicy.Policy)
for name, p := range policies {
_, err = json.Marshal(p)
if err != nil {
logger.LogIf(ctx, err)
continue
}
newPolicies[name] = p
}
if err = json.NewEncoder(w).Encode(newPolicies); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
w.(http.Flusher).Flush()
}
// RemoveCannedPolicy - DELETE /minio/admin/v3/remove-canned-policy?name=<policy_name>
func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "RemoveCannedPolicy")
defer logger.AuditLog(w, r, "RemoveCannedPolicy", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.DeletePolicyAdminAction)
if objectAPI == nil {
return
@@ -432,12 +807,6 @@ func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Requ
vars := mux.Vars(r)
policyName := vars["name"]
// Deny if WORM is enabled
if globalWORMEnabled {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
return
}
if err := globalIAMSys.DeletePolicy(policyName); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
@@ -452,10 +821,12 @@ func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Requ
}
}
// AddCannedPolicy - PUT /minio/admin/v2/add-canned-policy?name=<policy_name>
// AddCannedPolicy - PUT /minio/admin/v3/add-canned-policy?name=<policy_name>
func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "AddCannedPolicy")
defer logger.AuditLog(w, r, "AddCannedPolicy", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.CreatePolicyAdminAction)
if objectAPI == nil {
return
@@ -464,12 +835,6 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
vars := mux.Vars(r)
policyName := vars["name"]
// Deny if WORM is enabled
if globalWORMEnabled {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
return
}
// Error out if Content-Length is missing.
if r.ContentLength <= 0 {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL)
@@ -484,7 +849,7 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
iamPolicy, err := iampolicy.ParseConfig(io.LimitReader(r.Body, r.ContentLength))
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMalformedPolicy), r.URL)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
@@ -508,10 +873,12 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
}
}
// SetPolicyForUserOrGroup - PUT /minio/admin/v2/set-policy?policy=xxx&user-or-group=?[&is-group]
// SetPolicyForUserOrGroup - PUT /minio/admin/v3/set-policy?policy=xxx&user-or-group=?[&is-group]
func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetPolicyForUserOrGroup")
defer logger.AuditLog(w, r, "SetPolicyForUserOrGroup", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.AttachPolicyAdminAction)
if objectAPI == nil {
return
@@ -522,15 +889,9 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http
entityName := vars["userOrGroup"]
isGroup := vars["isGroup"] == "true"
// Deny if WORM is enabled
if globalWORMEnabled {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
return
}
if !isGroup {
ok, err := globalIAMSys.IsTempUser(entityName)
if err != nil {
if err != nil && err != errNoSuchUser {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}

File diff suppressed because it is too large Load Diff

View File

@@ -43,12 +43,17 @@ type adminXLTestBed struct {
// prepareAdminXLTestBed - helper function that setups a single-node
// XL backend for admin-handler tests.
func prepareAdminXLTestBed() (*adminXLTestBed, error) {
func prepareAdminXLTestBed(ctx context.Context) (*adminXLTestBed, error) {
// reset global variables to start afresh.
resetTestGlobals()
// Set globalIsXL to indicate that the setup uses an erasure
// code backend.
globalIsXL = true
// Initializing objectLayer for HealFormatHandler.
objLayer, xlDirs, xlErr := initTestXLObjLayer()
objLayer, xlDirs, xlErr := initTestXLObjLayer(ctx)
if xlErr != nil {
return nil, xlErr
}
@@ -63,21 +68,12 @@ func prepareAdminXLTestBed() (*adminXLTestBed, error) {
globalEndpoints = mustGetZoneEndpoints(xlDirs...)
// Set globalIsXL to indicate that the setup uses an erasure
// code backend.
globalIsXL = true
// Init global heal state
if globalIsXL {
globalAllHealState = initHealState()
}
globalConfigSys = NewConfigSys()
globalIAMSys = NewIAMSys()
globalIAMSys.Init(objLayer)
globalIAMSys.Init(ctx, objLayer)
buckets, err := objLayer.ListBuckets(context.Background())
buckets, err := objLayer.ListBuckets(ctx)
if err != nil {
return nil, err
}
@@ -90,7 +86,7 @@ func prepareAdminXLTestBed() (*adminXLTestBed, error) {
// Setup admin mgmt REST API handlers.
adminRouter := mux.NewRouter()
registerAdminRouter(adminRouter, true, true)
registerAdminRouter(adminRouter, true, true, false)
return &adminXLTestBed{
xlDirs: xlDirs,
@@ -108,20 +104,20 @@ func (atb *adminXLTestBed) TearDown() {
// initTestObjLayer - Helper function to initialize an XL-based object
// layer and set globalObjectAPI.
func initTestXLObjLayer() (ObjectLayer, []string, error) {
func initTestXLObjLayer(ctx context.Context) (ObjectLayer, []string, error) {
xlDirs, err := getRandomDisks(16)
if err != nil {
return nil, nil, err
}
endpoints := mustGetNewEndpoints(xlDirs...)
format, err := waitForFormatXL(true, endpoints, 1, 16, "")
storageDisks, format, err := waitForFormatXL(true, endpoints, 1, 1, 16, "")
if err != nil {
removeRoots(xlDirs)
return nil, nil, err
}
globalPolicySys = NewPolicySys()
objLayer, err := newXLSets(endpoints, format, 1, 16)
objLayer, err := newXLSets(ctx, endpoints, storageDisks, format)
if err != nil {
return nil, nil, err
}
@@ -196,7 +192,10 @@ func getServiceCmdRequest(cmd cmdType, cred auth.Credentials) (*http.Request, er
// testServicesCmdHandler - parametrizes service subcommand tests on
// cmdType value.
func testServicesCmdHandler(cmd cmdType, t *testing.T) {
adminTestBed, err := prepareAdminXLTestBed()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
adminTestBed, err := prepareAdminXLTestBed(ctx)
if err != nil {
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
}
@@ -264,10 +263,14 @@ func buildAdminRequest(queryVal url.Values, method, path string,
}
func TestAdminServerInfo(t *testing.T) {
adminTestBed, err := prepareAdminXLTestBed()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
adminTestBed, err := prepareAdminXLTestBed(ctx)
if err != nil {
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
}
defer adminTestBed.TearDown()
// Initialize admin peers to make admin RPC calls.
@@ -318,12 +321,12 @@ func TestToAdminAPIErrCode(t *testing.T) {
// 3. Non-admin API specific error.
{
err: errDiskNotFound,
expectedAPIErr: toAPIErrorCode(context.Background(), errDiskNotFound),
expectedAPIErr: toAPIErrorCode(GlobalContext, errDiskNotFound),
},
}
for i, test := range testCases {
actualErr := toAdminAPIErrCode(context.Background(), test.err)
actualErr := toAdminAPIErrCode(GlobalContext, test.err)
if actualErr != test.expectedAPIErr {
t.Errorf("Test %d: Expected %v but received %v",
i+1, test.expectedAPIErr, actualErr)

View File

@@ -60,9 +60,8 @@ const (
)
var (
errHealIdleTimeout = fmt.Errorf("healing results were not consumed for too long")
errHealPushStopNDiscard = fmt.Errorf("heal push stopped due to heal stop signal")
errHealStopSignalled = fmt.Errorf("heal stop signaled")
errHealIdleTimeout = fmt.Errorf("healing results were not consumed for too long")
errHealStopSignalled = fmt.Errorf("heal stop signaled")
errFnHealFromAPIErr = func(ctx context.Context, err error) error {
apiErr := toAPIError(ctx, err)
@@ -73,10 +72,6 @@ var (
// healSequenceStatus - accumulated status of the heal sequence
type healSequenceStatus struct {
// lock to update this structure as it is concurrently
// accessed
updateLock *sync.RWMutex
// summary and detail for failures
Summary healStatusSummary `json:"Summary"`
FailureDetail string `json:"Detail,omitempty"`
@@ -106,19 +101,17 @@ func initHealState() *allHealState {
healSeqMap: make(map[string]*healSequence),
}
go healState.periodicHealSeqsClean()
go healState.periodicHealSeqsClean(GlobalContext)
return healState
}
func (ahs *allHealState) periodicHealSeqsClean() {
func (ahs *allHealState) periodicHealSeqsClean(ctx context.Context) {
// Launch clean-up routine to remove this heal sequence (after
// it ends) from the global state after timeout has elapsed.
ticker := time.NewTicker(time.Minute * 5)
defer ticker.Stop()
for {
select {
case <-ticker.C:
case <-time.After(time.Minute * 5):
now := UTCNow()
ahs.Lock()
for path, h := range ahs.healSeqMap {
@@ -127,7 +120,7 @@ func (ahs *allHealState) periodicHealSeqsClean() {
}
}
ahs.Unlock()
case <-GlobalServiceDoneCh:
case <-ctx.Done():
// server could be restarting - need
// to exit immediately
return
@@ -183,7 +176,7 @@ func (ahs *allHealState) stopHealSequence(path string) ([]byte, APIError) {
}
b, err := json.Marshal(&hsp)
return b, toAdminAPIErr(context.Background(), err)
return b, toAdminAPIErr(GlobalContext, err)
}
// LaunchNewHealSequence - launches a background routine that performs
@@ -202,9 +195,7 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
existsAndLive := false
he, exists := ahs.getHealSequence(h.path)
if exists {
if !he.hasEnded() || len(he.currentStatus.Items) > 0 {
existsAndLive = true
}
existsAndLive = !he.hasEnded()
}
if existsAndLive {
@@ -277,8 +268,8 @@ func (ahs *allHealState) PopHealStatusJSON(path string,
}
// Take lock to access and update the heal-sequence
h.currentStatus.updateLock.Lock()
defer h.currentStatus.updateLock.Unlock()
h.mutex.Lock()
defer h.mutex.Unlock()
numItems := len(h.currentStatus.Items)
@@ -289,23 +280,27 @@ func (ahs *allHealState) PopHealStatusJSON(path string,
lastResultIndex = h.currentStatus.Items[numItems-1].ResultIndex
}
// After sending status to client, and before relinquishing
// the updateLock, reset Item to nil and record the result
// index sent to the client.
defer func(i int64) {
h.lastSentResultIndex = i
h.currentStatus.Items = nil
}(lastResultIndex)
h.lastSentResultIndex = lastResultIndex
jbytes, err := json.Marshal(h.currentStatus)
if err != nil {
h.currentStatus.Items = nil
logger.LogIf(h.ctx, err)
return nil, ErrInternalError
}
h.currentStatus.Items = nil
return jbytes, ErrNone
}
// healSource denotes single entity and heal option.
type healSource struct {
path string // entity path (format, buckets, objects) to heal
opts madmin.HealOpts // optional heal option overrides default setting
}
// healSequence - state for each heal sequence initiated on the
// server.
type healSequence struct {
@@ -315,12 +310,13 @@ type healSequence struct {
// path is just pathJoin(bucket, objPrefix)
path string
// List of entities (format, buckets, objects) to heal
sourceCh chan string
// A channel of entities (format, buckets, objects) to heal
sourceCh chan healSource
// Report healing progress, false if this is a background
// healing since currently there is no entity which will
// receive realtime healing status
// A channel of entities with heal result
respCh chan healResult
// Report healing progress
reportProgress bool
// time at which heal sequence was started
@@ -345,33 +341,42 @@ type healSequence struct {
// completed
traverseAndHealDoneCh chan error
// channel to signal heal sequence to stop (e.g. from the
// heal-stop API)
stopSignalCh chan struct{}
// canceler to cancel heal sequence.
cancelCtx context.CancelFunc
// the last result index sent to client
lastSentResultIndex int64
// Number of total items scanned
scannedItemsCount int64
// Number of total items scanned against item type
scannedItemsMap map[madmin.HealItemType]int64
// Number of total items healed against item type
healedItemsMap map[madmin.HealItemType]int64
// Number of total items where healing failed against endpoint and drive state
healFailedItemsMap map[string]int64
// The time of the last scan/heal activity
lastHealActivity time.Time
// Holds the request-info for logging
ctx context.Context
// used to lock this structure as it is concurrently accessed
mutex sync.RWMutex
}
// NewHealSequence - creates healSettings, assumes bucket and
// objPrefix are already validated.
func newHealSequence(bucket, objPrefix, clientAddr string,
func newHealSequence(ctx context.Context, bucket, objPrefix, clientAddr string,
numDisks int, hs madmin.HealOpts, forceStart bool) *healSequence {
reqInfo := &logger.ReqInfo{RemoteHost: clientAddr, API: "Heal", BucketName: bucket}
reqInfo.AppendTags("prefix", objPrefix)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
ctx, cancel := context.WithCancel(logger.SetReqInfo(ctx, reqInfo))
return &healSequence{
respCh: make(chan healResult),
bucket: bucket,
objPrefix: objPrefix,
path: pathJoin(bucket, objPrefix),
@@ -385,19 +390,92 @@ func newHealSequence(bucket, objPrefix, clientAddr string,
Summary: healNotStartedStatus,
HealSettings: hs,
NumDisks: numDisks,
updateLock: &sync.RWMutex{},
},
traverseAndHealDoneCh: make(chan error),
stopSignalCh: make(chan struct{}),
cancelCtx: cancel,
ctx: ctx,
scannedItemsMap: make(map[madmin.HealItemType]int64),
healedItemsMap: make(map[madmin.HealItemType]int64),
healFailedItemsMap: make(map[string]int64),
}
}
// resetHealStatusCounters - reset the healSequence status counters between
// each monthly background heal scanning activity.
// This is used only in case of Background healing scenario, where
// we use a single long running healSequence which reactively heals
// objects passed to the SourceCh.
func (h *healSequence) resetHealStatusCounters() {
h.mutex.Lock()
defer h.mutex.Unlock()
h.currentStatus.Items = []madmin.HealResultItem{}
h.lastSentResultIndex = 0
h.scannedItemsMap = make(map[madmin.HealItemType]int64)
h.healedItemsMap = make(map[madmin.HealItemType]int64)
h.healFailedItemsMap = make(map[string]int64)
}
// getScannedItemsCount - returns a count of all scanned items
func (h *healSequence) getScannedItemsCount() int64 {
var count int64
h.mutex.RLock()
defer h.mutex.RUnlock()
for _, v := range h.scannedItemsMap {
count = count + v
}
return count
}
// getScannedItemsMap - returns map of all scanned items against type
func (h *healSequence) getScannedItemsMap() map[madmin.HealItemType]int64 {
h.mutex.RLock()
defer h.mutex.RUnlock()
// Make a copy before returning the value
retMap := make(map[madmin.HealItemType]int64, len(h.scannedItemsMap))
for k, v := range h.scannedItemsMap {
retMap[k] = v
}
return retMap
}
// getHealedItemsMap - returns the map of all healed items against type
func (h *healSequence) getHealedItemsMap() map[madmin.HealItemType]int64 {
h.mutex.RLock()
defer h.mutex.RUnlock()
// Make a copy before returning the value
retMap := make(map[madmin.HealItemType]int64, len(h.healedItemsMap))
for k, v := range h.healedItemsMap {
retMap[k] = v
}
return retMap
}
// gethealFailedItemsMap - returns map of all items where heal failed against
// drive endpoint and status
func (h *healSequence) gethealFailedItemsMap() map[string]int64 {
h.mutex.RLock()
defer h.mutex.RUnlock()
// Make a copy before returning the value
retMap := make(map[string]int64, len(h.healFailedItemsMap))
for k, v := range h.healFailedItemsMap {
retMap[k] = v
}
return retMap
}
// isQuitting - determines if the heal sequence is quitting (due to an
// external signal)
func (h *healSequence) isQuitting() bool {
select {
case <-h.stopSignalCh:
case <-h.ctx.Done():
return true
default:
return false
@@ -406,19 +484,15 @@ func (h *healSequence) isQuitting() bool {
// check if the heal sequence has ended
func (h *healSequence) hasEnded() bool {
h.currentStatus.updateLock.RLock()
summary := h.currentStatus.Summary
h.currentStatus.updateLock.RUnlock()
return summary == healStoppedStatus || summary == healFinishedStatus
h.mutex.RLock()
ended := len(h.currentStatus.Items) == 0 || h.currentStatus.Summary == healStoppedStatus || h.currentStatus.Summary == healFinishedStatus
h.mutex.RUnlock()
return ended
}
// stops the heal sequence - safe to call multiple times.
func (h *healSequence) stop() {
select {
case <-h.stopSignalCh:
default:
close(h.stopSignalCh)
}
h.cancelCtx()
}
// pushHealResultItem - pushes a heal result item for consumption in
@@ -445,29 +519,27 @@ func (h *healSequence) pushHealResultItem(r madmin.HealResultItem) error {
var itemsLen int
for {
h.currentStatus.updateLock.Lock()
h.mutex.Lock()
itemsLen = len(h.currentStatus.Items)
if itemsLen == maxUnconsumedHealResultItems {
// unlock and wait to check again if we can push
h.currentStatus.updateLock.Unlock()
// wait for a second, or quit if an external
// stop signal is received or the
// unconsumedTimer fires.
select {
// Check after a second
case <-time.After(time.Second):
h.mutex.Unlock()
continue
case <-h.stopSignalCh:
case <-h.ctx.Done():
h.mutex.Unlock()
// discard result and return.
return errHealPushStopNDiscard
return errHealStopSignalled
// Timeout if no results consumed for too
// long.
// Timeout if no results consumed for too long.
case <-unconsumedTimer.C:
h.mutex.Unlock()
return errHealIdleTimeout
}
}
break
@@ -484,13 +556,7 @@ func (h *healSequence) pushHealResultItem(r madmin.HealResultItem) error {
h.currentStatus.Items = append(h.currentStatus.Items, r)
// release lock
h.currentStatus.updateLock.Unlock()
// This is a "safe" point for the heal sequence to quit if
// signaled externally.
if h.isQuitting() {
return errHealStopSignalled
}
h.mutex.Unlock()
return nil
}
@@ -504,10 +570,10 @@ func (h *healSequence) pushHealResultItem(r madmin.HealResultItem) error {
// the heal-sequence.
func (h *healSequence) healSequenceStart() {
// Set status as running
h.currentStatus.updateLock.Lock()
h.mutex.Lock()
h.currentStatus.Summary = healRunningStatus
h.currentStatus.StartTime = UTCNow()
h.currentStatus.updateLock.Unlock()
h.mutex.Unlock()
if h.sourceCh == nil {
go h.traverseAndHeal()
@@ -517,25 +583,27 @@ func (h *healSequence) healSequenceStart() {
select {
case err, ok := <-h.traverseAndHealDoneCh:
if !ok {
return
}
h.mutex.Lock()
h.endTime = UTCNow()
h.currentStatus.updateLock.Lock()
defer h.currentStatus.updateLock.Unlock()
// Heal traversal is complete.
if ok {
if err == nil {
// heal traversal succeeded.
h.currentStatus.Summary = healFinishedStatus
} else {
// heal traversal had an error.
h.currentStatus.Summary = healStoppedStatus
h.currentStatus.FailureDetail = err.Error()
} else {
// heal traversal succeeded.
h.currentStatus.Summary = healFinishedStatus
}
case <-h.stopSignalCh:
h.mutex.Unlock()
case <-h.ctx.Done():
h.mutex.Lock()
h.endTime = UTCNow()
h.currentStatus.updateLock.Lock()
h.currentStatus.Summary = healStoppedStatus
h.currentStatus.FailureDetail = errHealStopSignalled.Error()
h.currentStatus.updateLock.Unlock()
h.mutex.Unlock()
// drain traverse channel so the traversal
// go-routine does not leak.
@@ -548,81 +616,108 @@ func (h *healSequence) healSequenceStart() {
}
}
func (h *healSequence) queueHealTask(path string, healType madmin.HealItemType) error {
var respCh = make(chan healResult)
defer close(respCh)
func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItemType) error {
// Send heal request
globalBackgroundHealRoutine.queueHealTask(healTask{path: path, responseCh: respCh, opts: h.settings})
// Wait for answer and push result to the client
res := <-respCh
if !h.reportProgress {
return nil
task := healTask{
path: source.path,
opts: h.settings,
responseCh: h.respCh,
}
res.result.Type = healType
if res.err != nil {
// Object might have been deleted, by the time heal
// was attempted, we should ignore this object and return success.
if isErrObjectNotFound(res.err) {
return nil
}
// Only report object error
if healType != madmin.HealItemObject {
if !source.opts.Equal(h.settings) {
task.opts = source.opts
}
globalBackgroundHealRoutine.queueHealTask(task)
select {
case res := <-h.respCh:
if !h.reportProgress {
// Object might have been deleted, by the time heal
// was attempted, we should ignore this object and
// return success.
if isErrObjectNotFound(res.err) {
return nil
}
h.mutex.Lock()
defer h.mutex.Unlock()
// Progress is not reported in case of background heal processing.
// Instead we increment relevant counter based on the heal result
// for prometheus reporting.
if res.err != nil {
for _, d := range res.result.After.Drives {
// For failed items we report the endpoint and drive state
// This will help users take corrective actions for drives
h.healFailedItemsMap[d.Endpoint+","+d.State]++
}
} else {
// Only object type reported for successful healing
h.healedItemsMap[res.result.Type]++
}
// Report caller of any failure
return res.err
}
res.result.Detail = res.err.Error()
res.result.Type = healType
if res.err != nil {
// Object might have been deleted, by the time heal
// was attempted, we should ignore this object and return success.
if isErrObjectNotFound(res.err) {
return nil
}
// Only report object error
if healType != madmin.HealItemObject {
return res.err
}
res.result.Detail = res.err.Error()
}
return h.pushHealResultItem(res.result)
case <-h.ctx.Done():
return nil
}
return h.pushHealResultItem(res.result)
}
func (h *healSequence) healItemsFromSourceCh() error {
h.lastHealActivity = UTCNow()
// Start healing the config prefix.
if err := h.healMinioSysMeta(minioConfigPrefix)(); err != nil {
bucketsOnly := true // heal buckets only, not objects.
if err := h.healItems(bucketsOnly); err != nil {
logger.LogIf(h.ctx, err)
}
// Start healing the bucket config prefix.
if err := h.healMinioSysMeta(bucketConfigPrefix)(); err != nil {
logger.LogIf(h.ctx, err)
}
for {
select {
case source, ok := <-h.sourceCh:
if !ok {
return nil
}
var itemType madmin.HealItemType
switch {
case source.path == nopHeal:
continue
case source.path == SlashSeparator:
itemType = madmin.HealItemMetadata
case !strings.Contains(source.path, SlashSeparator):
itemType = madmin.HealItemBucket
default:
itemType = madmin.HealItemObject
}
// Start healing the background ops prefix.
if err := h.healMinioSysMeta(backgroundOpsMetaPrefix)(); err != nil {
logger.LogIf(h.ctx, err)
}
if err := h.queueHealTask(source, itemType); err != nil {
logger.LogIf(h.ctx, err)
}
for path := range h.sourceCh {
var itemType madmin.HealItemType
switch {
case path == nopHeal:
continue
case path == SlashSeparator:
itemType = madmin.HealItemMetadata
case !strings.Contains(path, SlashSeparator):
itemType = madmin.HealItemBucket
default:
itemType = madmin.HealItemObject
h.scannedItemsMap[itemType]++
h.lastHealActivity = UTCNow()
case <-h.ctx.Done():
return nil
}
if err := h.queueHealTask(path, itemType); err != nil {
logger.LogIf(h.ctx, err)
}
h.scannedItemsCount++
h.lastHealActivity = UTCNow()
}
return nil
}
func (h *healSequence) healFromSourceCh() {
h.healItemsFromSourceCh()
close(h.traverseAndHealDoneCh)
}
func (h *healSequence) healItems() error {
func (h *healSequence) healItems(bucketsOnly bool) error {
// Start with format healing
if err := h.healDiskFormat(); err != nil {
return err
@@ -638,13 +733,8 @@ func (h *healSequence) healItems() error {
return err
}
// Start healing the background ops prefix.
if err := h.healMinioSysMeta(backgroundOpsMetaPrefix)(); err != nil {
logger.LogIf(h.ctx, err)
}
// Heal buckets and objects
return h.healBuckets()
return h.healBuckets(bucketsOnly)
}
// traverseAndHeal - traverses on-disk data and performs healing
@@ -655,13 +745,8 @@ func (h *healSequence) healItems() error {
// has to wait until a safe point is reached, such as between scanning
// two objects.
func (h *healSequence) traverseAndHeal() {
if err := h.healItems(); err != nil {
if h.isQuitting() {
err = errHealStopSignalled
}
h.traverseAndHealDoneCh <- err
}
bucketsOnly := false // Heals buckets and objects also.
h.traverseAndHealDoneCh <- h.healItems(bucketsOnly)
close(h.traverseAndHealDoneCh)
}
@@ -678,12 +763,12 @@ func (h *healSequence) healMinioSysMeta(metaPrefix string) func() error {
// NOTE: Healing on meta is run regardless
// of any bucket being selected, this is to ensure that
// meta are always upto date and correct.
return objectAPI.HealObjects(h.ctx, minioMetaBucket, metaPrefix, func(bucket string, object string) error {
return objectAPI.HealObjects(h.ctx, minioMetaBucket, metaPrefix, h.settings, func(bucket string, object string) error {
if h.isQuitting() {
return errHealStopSignalled
}
herr := h.queueHealTask(pathJoin(bucket, object), madmin.HealItemBucketMetadata)
herr := h.queueHealTask(healSource{path: pathJoin(bucket, object)}, madmin.HealItemBucketMetadata)
// Object might have been deleted, by the time heal
// was attempted we ignore this object an move on.
if isErrObjectNotFound(herr) {
@@ -707,18 +792,18 @@ func (h *healSequence) healDiskFormat() error {
return errServerNotInitialized
}
return h.queueHealTask(SlashSeparator, madmin.HealItemMetadata)
return h.queueHealTask(healSource{path: SlashSeparator}, madmin.HealItemMetadata)
}
// healBuckets - check for all buckets heal or just particular bucket.
func (h *healSequence) healBuckets() error {
func (h *healSequence) healBuckets(bucketsOnly bool) error {
if h.isQuitting() {
return errHealStopSignalled
}
// 1. If a bucket was specified, heal only the bucket.
if h.bucket != "" {
return h.healBucket(h.bucket)
return h.healBucket(h.bucket, bucketsOnly)
}
// Get current object layer instance.
@@ -733,7 +818,7 @@ func (h *healSequence) healBuckets() error {
}
for _, bucket := range buckets {
if err = h.healBucket(bucket.Name); err != nil {
if err = h.healBucket(bucket.Name, bucketsOnly); err != nil {
return err
}
}
@@ -742,17 +827,21 @@ func (h *healSequence) healBuckets() error {
}
// healBucket - traverses and heals given bucket
func (h *healSequence) healBucket(bucket string) error {
func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil {
return errServerNotInitialized
}
if err := h.queueHealTask(bucket, madmin.HealItemBucket); err != nil {
if err := h.queueHealTask(healSource{path: bucket}, madmin.HealItemBucket); err != nil {
return err
}
if bucketsOnly {
return nil
}
if !h.settings.Recursive {
if h.objPrefix != "" {
// Check if an object named as the objPrefix exists,
@@ -768,7 +857,7 @@ func (h *healSequence) healBucket(bucket string) error {
return nil
}
if err := objectAPI.HealObjects(h.ctx, bucket, h.objPrefix, h.healObject); err != nil {
if err := objectAPI.HealObjects(h.ctx, bucket, h.objPrefix, h.settings, h.healObject); err != nil {
return errFnHealFromAPIErr(h.ctx, err)
}
return nil
@@ -776,15 +865,15 @@ func (h *healSequence) healBucket(bucket string) error {
// healObject - heal the given object and record result
func (h *healSequence) healObject(bucket, object string) error {
if h.isQuitting() {
return errHealStopSignalled
}
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil {
return errServerNotInitialized
}
return h.queueHealTask(pathJoin(bucket, object), madmin.HealItemObject)
if h.isQuitting() {
return errHealStopSignalled
}
return h.queueHealTask(healSource{path: pathJoin(bucket, object)}, madmin.HealItemObject)
}

View File

@@ -24,16 +24,18 @@ import (
)
const (
adminPathPrefix = minioReservedBucketPath + "/admin"
adminAPIVersion = madmin.AdminAPIVersion
adminAPIVersionPrefix = SlashSeparator + madmin.AdminAPIVersion
adminPathPrefix = minioReservedBucketPath + "/admin"
adminAPIVersionV2 = madmin.AdminAPIVersionV2
adminAPIVersion = madmin.AdminAPIVersion
adminAPIVersionPrefix = SlashSeparator + adminAPIVersion
adminAPIVersionV2Prefix = SlashSeparator + adminAPIVersionV2
)
// adminAPIHandlers provides HTTP handlers for MinIO admin API.
type adminAPIHandlers struct{}
// registerAdminRouter - Add handler functions for each service REST API routes.
func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool) {
func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps, enableBucketQuotaOps bool) {
adminAPI := adminAPIHandlers{}
// Admin router
@@ -41,125 +43,178 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
/// Service operations
// Restart and stop MinIO service.
adminRouter.Methods(http.MethodPost).Path(adminAPIVersionPrefix+"/service").HandlerFunc(httpTraceAll(adminAPI.ServiceActionHandler)).Queries("action", "{action:.*}")
// Update MinIO servers.
adminRouter.Methods(http.MethodPost).Path(adminAPIVersionPrefix+"/update").HandlerFunc(httpTraceAll(adminAPI.ServerUpdateHandler)).Queries("updateURL", "{updateURL:.*}")
// Info operations
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/info").HandlerFunc(httpTraceAll(adminAPI.ServerInfoHandler))
// Harware Info operations
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/hardware").HandlerFunc(httpTraceAll(adminAPI.ServerHardwareInfoHandler)).Queries("hwType", "{hwType:.*}")
// StorageInfo operations
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/storageinfo").HandlerFunc(httpTraceAll(adminAPI.StorageInfoHandler))
// DataUsageInfo operations
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/datausageinfo").HandlerFunc(httpTraceAll(adminAPI.DataUsageInfoHandler))
if globalIsDistXL || globalIsXL {
/// Heal operations
// Heal processing endpoint.
adminRouter.Methods(http.MethodPost).Path(adminAPIVersionPrefix + "/heal/").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
adminRouter.Methods(http.MethodPost).Path(adminAPIVersionPrefix + "/heal/{bucket}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
adminRouter.Methods(http.MethodPost).Path(adminAPIVersionPrefix + "/heal/{bucket}/{prefix:.*}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
adminRouter.Methods(http.MethodPost).Path(adminAPIVersionPrefix + "/background-heal/status").HandlerFunc(httpTraceAll(adminAPI.BackgroundHealStatusHandler))
/// Health operations
}
// Performance command - return performance details based on input type
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/performance").HandlerFunc(httpTraceAll(adminAPI.PerfInfoHandler)).Queries("perfType", "{perfType:.*}")
// Profiling operations
adminRouter.Methods(http.MethodPost).Path(adminAPIVersionPrefix+"/profiling/start").HandlerFunc(httpTraceAll(adminAPI.StartProfilingHandler)).
Queries("profilerType", "{profilerType:.*}")
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/profiling/download").HandlerFunc(httpTraceAll(adminAPI.DownloadProfilingHandler))
// Config KV operations.
if enableConfigOps {
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/get-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigKVHandler)).Queries("key", "{key:.*}")
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix + "/set-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigKVHandler))
adminRouter.Methods(http.MethodDelete).Path(adminAPIVersionPrefix + "/del-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.DelConfigKVHandler))
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/help-config-kv").HandlerFunc(httpTraceAll(adminAPI.HelpConfigKVHandler)).Queries("subSys", "{subSys:.*}", "key", "{key:.*}")
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/list-config-history-kv").HandlerFunc(httpTraceAll(adminAPI.ListConfigHistoryKVHandler)).Queries("count", "{count:[0-9]+}")
adminRouter.Methods(http.MethodDelete).Path(adminAPIVersionPrefix+"/clear-config-history-kv").HandlerFunc(httpTraceHdrs(adminAPI.ClearConfigHistoryKVHandler)).Queries("restoreId", "{restoreId:.*}")
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix+"/restore-config-history-kv").HandlerFunc(httpTraceHdrs(adminAPI.RestoreConfigHistoryKVHandler)).Queries("restoreId", "{restoreId:.*}")
adminVersions := []string{
adminAPIVersionPrefix,
adminAPIVersionV2Prefix,
}
/// Config operations
if enableConfigOps {
// Get config
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/config").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigHandler))
// Set config
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix + "/config").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigHandler))
for _, adminVersion := range adminVersions {
// Restart and stop MinIO service.
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/service").HandlerFunc(httpTraceAll(adminAPI.ServiceHandler)).Queries("action", "{action:.*}")
// Update MinIO servers.
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/update").HandlerFunc(httpTraceAll(adminAPI.ServerUpdateHandler)).Queries("updateURL", "{updateURL:.*}")
// Info operations
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/info").HandlerFunc(httpTraceAll(adminAPI.ServerInfoHandler))
// StorageInfo operations
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/storageinfo").HandlerFunc(httpTraceAll(adminAPI.StorageInfoHandler))
// DataUsageInfo operations
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/datausageinfo").HandlerFunc(httpTraceAll(adminAPI.DataUsageInfoHandler))
if globalIsDistXL || globalIsXL {
/// Heal operations
// Heal processing endpoint.
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/{bucket}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/{bucket}/{prefix:.*}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/background-heal/status").HandlerFunc(httpTraceAll(adminAPI.BackgroundHealStatusHandler))
/// Health operations
}
// Profiling operations
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/profiling/start").HandlerFunc(httpTraceAll(adminAPI.StartProfilingHandler)).
Queries("profilerType", "{profilerType:.*}")
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/profiling/download").HandlerFunc(httpTraceAll(adminAPI.DownloadProfilingHandler))
// Config KV operations.
if enableConfigOps {
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigKVHandler)).Queries("key", "{key:.*}")
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/set-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigKVHandler))
adminRouter.Methods(http.MethodDelete).Path(adminVersion + "/del-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.DelConfigKVHandler))
}
// Enable config help in all modes.
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/help-config-kv").HandlerFunc(httpTraceAll(adminAPI.HelpConfigKVHandler)).Queries("subSys", "{subSys:.*}", "key", "{key:.*}")
// Config KV history operations.
if enableConfigOps {
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-config-history-kv").HandlerFunc(httpTraceAll(adminAPI.ListConfigHistoryKVHandler)).Queries("count", "{count:[0-9]+}")
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/clear-config-history-kv").HandlerFunc(httpTraceHdrs(adminAPI.ClearConfigHistoryKVHandler)).Queries("restoreId", "{restoreId:.*}")
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/restore-config-history-kv").HandlerFunc(httpTraceHdrs(adminAPI.RestoreConfigHistoryKVHandler)).Queries("restoreId", "{restoreId:.*}")
}
/// Config import/export bulk operations
if enableConfigOps {
// Get config
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/config").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigHandler))
// Set config
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/config").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigHandler))
}
if enableIAMOps {
// -- IAM APIs --
// Add policy IAM
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.AddCannedPolicy)).Queries("name", "{name:.*}")
// Add user IAM
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/accountusageinfo").HandlerFunc(httpTraceAll(adminAPI.AccountUsageInfoHandler))
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-user").HandlerFunc(httpTraceHdrs(adminAPI.AddUser)).Queries("accessKey", "{accessKey:.*}")
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-user-status").HandlerFunc(httpTraceHdrs(adminAPI.SetUserStatus)).Queries("accessKey", "{accessKey:.*}").Queries("status", "{status:.*}")
// Service accounts ops
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/add-service-account").HandlerFunc(httpTraceHdrs(adminAPI.AddServiceAccount))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-service-accounts").HandlerFunc(httpTraceHdrs(adminAPI.ListServiceAccounts))
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/delete-service-account").HandlerFunc(httpTraceHdrs(adminAPI.DeleteServiceAccount)).Queries("accessKey", "{accessKey:.*}")
if adminVersion == adminAPIVersionV2Prefix {
// Info policy IAM v2
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/info-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.InfoCannedPolicyV2)).Queries("name", "{name:.*}")
// List policies v2
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-canned-policies").HandlerFunc(httpTraceHdrs(adminAPI.ListCannedPoliciesV2))
} else {
// Info policy IAM latest
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/info-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.InfoCannedPolicy)).Queries("name", "{name:.*}")
// List policies latest
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-canned-policies").HandlerFunc(httpTraceHdrs(adminAPI.ListCannedPolicies))
}
// Remove policy IAM
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.RemoveCannedPolicy)).Queries("name", "{name:.*}")
// Set user or group policy
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-user-or-group-policy").
HandlerFunc(httpTraceHdrs(adminAPI.SetPolicyForUserOrGroup)).
Queries("policyName", "{policyName:.*}", "userOrGroup", "{userOrGroup:.*}", "isGroup", "{isGroup:true|false}")
// Remove user IAM
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-user").HandlerFunc(httpTraceHdrs(adminAPI.RemoveUser)).Queries("accessKey", "{accessKey:.*}")
// List users
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-users").HandlerFunc(httpTraceHdrs(adminAPI.ListUsers))
// User info
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/user-info").HandlerFunc(httpTraceHdrs(adminAPI.GetUserInfo)).Queries("accessKey", "{accessKey:.*}")
// Add/Remove members from group
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/update-group-members").HandlerFunc(httpTraceHdrs(adminAPI.UpdateGroupMembers))
// Get Group
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/group").HandlerFunc(httpTraceHdrs(adminAPI.GetGroup)).Queries("group", "{group:.*}")
// List Groups
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/groups").HandlerFunc(httpTraceHdrs(adminAPI.ListGroups))
// Set Group Status
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-group-status").HandlerFunc(httpTraceHdrs(adminAPI.SetGroupStatus)).Queries("group", "{group:.*}").Queries("status", "{status:.*}")
}
// Quota operations
if enableConfigOps && enableBucketQuotaOps {
// GetBucketQuotaConfig
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-bucket-quota").HandlerFunc(
httpTraceHdrs(adminAPI.GetBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
// PutBucketQuotaConfig
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-bucket-quota").HandlerFunc(
httpTraceHdrs(adminAPI.PutBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
// RemoveBucketQuotaConfig
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-bucket-quota").HandlerFunc(
httpTraceHdrs(adminAPI.RemoveBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
}
// -- Top APIs --
// Top locks
if globalIsDistXL {
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/top/locks").HandlerFunc(httpTraceHdrs(adminAPI.TopLocksHandler))
}
// HTTP Trace
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/trace").HandlerFunc(adminAPI.TraceHandler)
// Console Logs
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/log").HandlerFunc(httpTraceAll(adminAPI.ConsoleLogHandler))
// -- KMS APIs --
//
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/kms/key/status").HandlerFunc(httpTraceAll(adminAPI.KMSKeyStatusHandler))
if !globalIsGateway {
// -- OBD API --
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/obdinfo").
HandlerFunc(httpTraceHdrs(adminAPI.OBDInfoHandler)).
Queries("perfdrive", "{perfdrive:true|false}",
"perfnet", "{perfnet:true|false}",
"minioinfo", "{minioinfo:true|false}",
"minioconfig", "{minioconfig:true|false}",
"syscpu", "{syscpu:true|false}",
"sysdiskhw", "{sysdiskhw:true|false}",
"sysosinfo", "{sysosinfo:true|false}",
"sysmem", "{sysmem:true|false}",
"sysprocess", "{sysprocess:true|false}",
)
}
}
if enableIAMOps {
// -- IAM APIs --
// Add policy IAM
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix+"/add-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.AddCannedPolicy)).Queries("name",
"{name:.*}")
// Add user IAM
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix+"/add-user").HandlerFunc(httpTraceHdrs(adminAPI.AddUser)).Queries("accessKey", "{accessKey:.*}")
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix+"/set-user-status").HandlerFunc(httpTraceHdrs(adminAPI.SetUserStatus)).
Queries("accessKey", "{accessKey:.*}").Queries("status", "{status:.*}")
// Info policy IAM
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/info-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.InfoCannedPolicy)).Queries("name", "{name:.*}")
// Remove policy IAM
adminRouter.Methods(http.MethodDelete).Path(adminAPIVersionPrefix+"/remove-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.RemoveCannedPolicy)).Queries("name", "{name:.*}")
// Set user or group policy
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix+"/set-user-or-group-policy").
HandlerFunc(httpTraceHdrs(adminAPI.SetPolicyForUserOrGroup)).
Queries("policyName", "{policyName:.*}", "userOrGroup", "{userOrGroup:.*}", "isGroup", "{isGroup:true|false}")
// Remove user IAM
adminRouter.Methods(http.MethodDelete).Path(adminAPIVersionPrefix+"/remove-user").HandlerFunc(httpTraceHdrs(adminAPI.RemoveUser)).Queries("accessKey", "{accessKey:.*}")
// List users
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/list-users").HandlerFunc(httpTraceHdrs(adminAPI.ListUsers))
// User info
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/user-info").HandlerFunc(httpTraceHdrs(adminAPI.GetUserInfo)).Queries("accessKey", "{accessKey:.*}")
// Add/Remove members from group
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix + "/update-group-members").HandlerFunc(httpTraceHdrs(adminAPI.UpdateGroupMembers))
// Get Group
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/group").HandlerFunc(httpTraceHdrs(adminAPI.GetGroup)).Queries("group", "{group:.*}")
// List Groups
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/groups").HandlerFunc(httpTraceHdrs(adminAPI.ListGroups))
// Set Group Status
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix+"/set-group-status").HandlerFunc(httpTraceHdrs(adminAPI.SetGroupStatus)).Queries("group", "{group:.*}").Queries("status", "{status:.*}")
// List policies
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/list-canned-policies").HandlerFunc(httpTraceHdrs(adminAPI.ListCannedPolicies))
}
// -- Top APIs --
// Top locks
if globalIsDistXL {
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/top/locks").HandlerFunc(httpTraceHdrs(adminAPI.TopLocksHandler))
}
// HTTP Trace
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/trace").HandlerFunc(adminAPI.TraceHandler)
// Console Logs
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/log").HandlerFunc(httpTraceAll(adminAPI.ConsoleLogHandler))
// -- KMS APIs --
//
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/kms/key/status").HandlerFunc(httpTraceAll(adminAPI.KMSKeyStatusHandler))
// If none of the routes match add default error handler routes
adminRouter.NotFoundHandler = http.HandlerFunc(httpTraceAll(errorResponseHandler))
adminRouter.MethodNotAllowedHandler = http.HandlerFunc(httpTraceAll(errorResponseHandler))

View File

@@ -17,224 +17,59 @@
package cmd
import (
"net"
"net/http"
"os"
"strings"
"github.com/minio/minio-go/pkg/set"
"github.com/minio/minio/pkg/cpu"
"github.com/minio/minio/pkg/disk"
"github.com/minio/minio/pkg/madmin"
"github.com/minio/minio/pkg/mem"
cpuhw "github.com/shirou/gopsutil/cpu"
)
// getLocalMemUsage - returns ServerMemUsageInfo for all zones, endpoints.
func getLocalMemUsage(endpointZones EndpointZones, r *http.Request) ServerMemUsageInfo {
var memUsages []mem.Usage
var historicUsages []mem.Usage
seenHosts := set.NewStringSet()
for _, ep := range endpointZones {
for _, endpoint := range ep.Endpoints {
if seenHosts.Contains(endpoint.Host) {
continue
}
seenHosts.Add(endpoint.Host)
// Only proceed for local endpoints
if endpoint.IsLocal {
memUsages = append(memUsages, mem.GetUsage())
historicUsages = append(historicUsages, mem.GetHistoricUsage())
}
}
}
addr := r.Host
if globalIsDistXL {
addr = GetLocalPeer(endpointZones)
}
return ServerMemUsageInfo{
Addr: addr,
Usage: memUsages,
HistoricUsage: historicUsages,
}
}
// getLocalCPULoad - returns ServerCPULoadInfo for all zones, endpoints.
func getLocalCPULoad(endpointZones EndpointZones, r *http.Request) ServerCPULoadInfo {
var cpuLoads []cpu.Load
var historicLoads []cpu.Load
seenHosts := set.NewStringSet()
for _, ep := range endpointZones {
for _, endpoint := range ep.Endpoints {
if seenHosts.Contains(endpoint.Host) {
continue
}
seenHosts.Add(endpoint.Host)
// Only proceed for local endpoints
if endpoint.IsLocal {
cpuLoads = append(cpuLoads, cpu.GetLoad())
historicLoads = append(historicLoads, cpu.GetHistoricLoad())
}
}
}
addr := r.Host
if globalIsDistXL {
addr = GetLocalPeer(endpointZones)
}
return ServerCPULoadInfo{
Addr: addr,
Load: cpuLoads,
HistoricLoad: historicLoads,
}
}
// getLocalDrivesPerf - returns ServerDrivesPerfInfo for all zones, endpoints.
func getLocalDrivesPerf(endpointZones EndpointZones, size int64, r *http.Request) madmin.ServerDrivesPerfInfo {
var dps []disk.Performance
for _, ep := range endpointZones {
for _, endpoint := range ep.Endpoints {
// Only proceed for local endpoints
if endpoint.IsLocal {
if _, err := os.Stat(endpoint.Path); err != nil {
// Since this drive is not available, add relevant details and proceed
dps = append(dps, disk.Performance{Path: endpoint.Path, Error: err.Error()})
continue
}
dp := disk.GetPerformance(pathJoin(endpoint.Path, minioMetaTmpBucket, mustGetUUID()), size)
dp.Path = endpoint.Path
dps = append(dps, dp)
}
}
}
addr := r.Host
if globalIsDistXL {
addr = GetLocalPeer(endpointZones)
}
return madmin.ServerDrivesPerfInfo{
Addr: addr,
Perf: dps,
}
}
// getLocalCPUInfo - returns ServerCPUHardwareInfo for all zones, endpoints.
func getLocalCPUInfo(endpointZones EndpointZones, r *http.Request) madmin.ServerCPUHardwareInfo {
var cpuHardwares []cpuhw.InfoStat
seenHosts := set.NewStringSet()
for _, ep := range endpointZones {
for _, endpoint := range ep.Endpoints {
if seenHosts.Contains(endpoint.Host) {
continue
}
// Add to the list of visited hosts
seenHosts.Add(endpoint.Host)
// Only proceed for local endpoints
if endpoint.IsLocal {
cpuHardware, err := cpuhw.Info()
if err != nil {
return madmin.ServerCPUHardwareInfo{
Error: err.Error(),
}
}
cpuHardwares = append(cpuHardwares, cpuHardware...)
}
}
}
addr := r.Host
if globalIsDistXL {
addr = GetLocalPeer(endpointZones)
}
return madmin.ServerCPUHardwareInfo{
Addr: addr,
CPUInfo: cpuHardwares,
}
}
// getLocalNetworkInfo - returns ServerNetworkHardwareInfo for all zones, endpoints.
func getLocalNetworkInfo(endpointZones EndpointZones, r *http.Request) madmin.ServerNetworkHardwareInfo {
var networkHardwares []net.Interface
seenHosts := set.NewStringSet()
for _, ep := range endpointZones {
for _, endpoint := range ep.Endpoints {
if seenHosts.Contains(endpoint.Host) {
continue
}
// Add to the list of visited hosts
seenHosts.Add(endpoint.Host)
// Only proceed for local endpoints
if endpoint.IsLocal {
networkHardware, err := net.Interfaces()
if err != nil {
return madmin.ServerNetworkHardwareInfo{
Error: err.Error(),
}
}
networkHardwares = append(networkHardwares, networkHardware...)
}
}
}
addr := r.Host
if globalIsDistXL {
addr = GetLocalPeer(endpointZones)
}
return madmin.ServerNetworkHardwareInfo{
Addr: addr,
NetworkInfo: networkHardwares,
}
}
// getLocalServerProperty - returns ServerDrivesPerfInfo for only the
// getLocalServerProperty - returns madmin.ServerProperties for only the
// local endpoints from given list of endpoints
func getLocalServerProperty(endpointZones EndpointZones, r *http.Request) madmin.ServerProperties {
var di madmin.Disk
var disks []madmin.Disk
addr := r.Host
if globalIsDistXL {
addr = GetLocalPeer(endpointZones)
}
network := make(map[string]string)
hosts := set.NewStringSet()
for _, ep := range endpointZones {
for _, endpoint := range ep.Endpoints {
url := strings.Replace(endpoint.URL.String(), endpoint.Path, "", -1)
if url == "" {
url = r.Host
nodeName := endpoint.Host
if nodeName == "" {
nodeName = r.Host
}
hosts.Add(url)
// Only proceed for local endpoints
if endpoint.IsLocal {
url = fetchAddress(url)
network[url] = "online"
if _, err := os.Stat(endpoint.Path); err != nil {
continue
// Only proceed for local endpoints
network[nodeName] = "online"
var di = madmin.Disk{
DrivePath: endpoint.Path,
}
diInfo, err := disk.GetInfo(endpoint.Path)
if err != nil {
if os.IsNotExist(err) || isSysErrPathNotFound(err) {
di.State = madmin.DriveStateMissing
} else {
di.State = madmin.DriveStateCorrupt
}
} else {
di.State = madmin.DriveStateOk
di.DrivePath = endpoint.Path
di.TotalSpace = diInfo.Total
di.UsedSpace = diInfo.Total - diInfo.Free
di.Utilization = float64((diInfo.Total - diInfo.Free) / diInfo.Total * 100)
}
diInfo, _ := disk.GetInfo(endpoint.Path)
di.State = "ok"
di.DrivePath = endpoint.Path
di.TotalSpace = diInfo.Total
di.UsedSpace = diInfo.Total - diInfo.Free
di.Utilization = float64((diInfo.Total - diInfo.Free) / diInfo.Total * 100)
disks = append(disks, di)
}
}
}
for host := range hosts {
_, present := network[host]
if !present {
err := checkConnection(host)
host = fetchAddress(host)
if err != nil {
network[host] = "offline"
} else {
network[host] = "online"
_, present := network[nodeName]
if !present {
err := IsServerResolvable(endpoint)
if err == nil {
network[nodeName] = "online"
} else {
network[nodeName] = "offline"
}
}
}
}
}
@@ -249,13 +84,3 @@ func getLocalServerProperty(endpointZones EndpointZones, r *http.Request) madmin
Disks: disks,
}
}
// Replaces http and https from address
func fetchAddress(address string) string {
if strings.Contains(address, "http://") {
address = strings.Replace(address, "http://", "", -1)
} else if strings.Contains(address, "https://") {
address = strings.Replace(address, "https://", "", -1)
}
return address
}

View File

@@ -21,17 +21,21 @@ import (
"encoding/xml"
"fmt"
"net/http"
"net/url"
"strings"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/aliyun/aliyun-oss-go-sdk/oss"
"google.golang.org/api/googleapi"
minio "github.com/minio/minio-go/v6"
"github.com/minio/minio-go/v6/pkg/tags"
"github.com/minio/minio/cmd/config/etcd/dns"
"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/bucket/lifecycle"
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
"github.com/minio/minio/pkg/bucket/policy"
"github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/hash"
)
@@ -90,9 +94,12 @@ const (
ErrMissingContentLength
ErrMissingContentMD5
ErrMissingRequestBodyError
ErrMissingSecurityHeader
ErrNoSuchBucket
ErrNoSuchBucketPolicy
ErrNoSuchBucketLifecycle
ErrNoSuchLifecycleConfiguration
ErrNoSuchBucketSSEConfig
ErrNoSuchKey
ErrNoSuchUpload
ErrNoSuchVersion
@@ -114,7 +121,8 @@ const (
ErrMissingCredTag
ErrCredMalformed
ErrInvalidRegion
ErrInvalidService
ErrInvalidServiceS3
ErrInvalidServiceSTS
ErrInvalidRequestVersion
ErrMissingSignTag
ErrMissingSignHeadersTag
@@ -142,11 +150,16 @@ const (
ErrBadRequest
ErrKeyTooLongError
ErrInvalidBucketObjectLockConfiguration
ErrObjectLockConfigurationNotFound
ErrObjectLockConfigurationNotAllowed
ErrNoSuchObjectLockConfiguration
ErrObjectLocked
ErrInvalidRetentionDate
ErrPastObjectLockRetainDate
ErrUnknownWORMModeDirective
ErrBucketTaggingNotFound
ErrObjectLockInvalidHeaders
ErrInvalidTagDirective
// Add new error codes here.
// SSE-S3 related API errors
@@ -199,6 +212,7 @@ const (
ErrInvalidResourceName
ErrServerNotInitialized
ErrOperationTimedOut
ErrOperationMaxedOut
ErrInvalidRequest
// MinIO storage class error codes
ErrInvalidStorageClass
@@ -222,6 +236,10 @@ const (
ErrAdminCredentialsMismatch
ErrInsecureClientRequest
ErrObjectTampered
// Bucket Quota error codes
ErrAdminBucketQuotaExceeded
ErrAdminNoSuchQuotaConfiguration
ErrAdminBucketQuotaDisabled
ErrHealNotImplemented
ErrHealNoSuchProcess
@@ -321,19 +339,28 @@ const (
ErrAdminProfilerNotEnabled
ErrInvalidDecompressedSize
ErrAddUserInvalidArgument
ErrAdminAccountNotEligible
ErrServiceAccountNotFound
ErrPostPolicyConditionInvalidFormat
)
type errorCodeMap map[APIErrorCode]APIError
func (e errorCodeMap) ToAPIErr(errCode APIErrorCode) APIError {
func (e errorCodeMap) ToAPIErrWithErr(errCode APIErrorCode, err error) APIError {
apiErr, ok := e[errCode]
if !ok {
return e[ErrInternalError]
apiErr = e[ErrInternalError]
}
if err != nil {
apiErr.Description = fmt.Sprintf("%s (%s)", apiErr.Description, err)
}
return apiErr
}
func (e errorCodeMap) ToAPIErr(errCode APIErrorCode) APIError {
return e.ToAPIErrWithErr(errCode, nil)
}
// error code to APIError structure, these fields carry respective
// descriptions for all the error responses.
var errorCodes = errorCodeMap{
@@ -462,6 +489,11 @@ var errorCodes = errorCodeMap{
Description: "Missing required header for this request: Content-Md5.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrMissingSecurityHeader: {
Code: "MissingSecurityHeader",
Description: "Your request was missing a required header",
HTTPStatusCode: http.StatusBadRequest,
},
ErrMissingRequestBodyError: {
Code: "MissingRequestBodyError",
Description: "Request body is empty.",
@@ -482,6 +514,16 @@ var errorCodes = errorCodeMap{
Description: "The bucket lifecycle configuration does not exist",
HTTPStatusCode: http.StatusNotFound,
},
ErrNoSuchLifecycleConfiguration: {
Code: "NoSuchLifecycleConfiguration",
Description: "The lifecycle configuration does not exist",
HTTPStatusCode: http.StatusNotFound,
},
ErrNoSuchBucketSSEConfig: {
Code: "ServerSideEncryptionConfigurationNotFoundError",
Description: "The server side encryption configuration was not found",
HTTPStatusCode: http.StatusNotFound,
},
ErrNoSuchKey: {
Code: "NoSuchKey",
Description: "The specified key does not exist.",
@@ -626,9 +668,14 @@ var errorCodes = errorCodeMap{
// FIXME: Should contain the invalid param set as seen in https://github.com/minio/minio/issues/2385.
// right Description: "Error parsing the X-Amz-Credential parameter; incorrect service \"s4\". This endpoint belongs to \"s3\".".
// Need changes to make sure variable messages can be constructed.
ErrInvalidService: {
Code: "AuthorizationQueryParametersError",
Description: "Error parsing the X-Amz-Credential parameter; incorrect service. This endpoint belongs to \"s3\".",
ErrInvalidServiceS3: {
Code: "AuthorizationParametersError",
Description: "Error parsing the Credential/X-Amz-Credential parameter; incorrect service. This endpoint belongs to \"s3\".",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidServiceSTS: {
Code: "AuthorizationParametersError",
Description: "Error parsing the Credential parameter; incorrect service. This endpoint belongs to \"sts\".",
HTTPStatusCode: http.StatusBadRequest,
},
// FIXME: Should contain the invalid param set as seen in https://github.com/minio/minio/issues/2385.
@@ -731,6 +778,26 @@ var errorCodes = errorCodeMap{
Description: "Bucket is missing ObjectLockConfiguration",
HTTPStatusCode: http.StatusBadRequest,
},
ErrBucketTaggingNotFound: {
Code: "NoSuchTagSet",
Description: "The TagSet does not exist",
HTTPStatusCode: http.StatusNotFound,
},
ErrObjectLockConfigurationNotFound: {
Code: "ObjectLockConfigurationNotFoundError",
Description: "Object Lock configuration does not exist for this bucket",
HTTPStatusCode: http.StatusNotFound,
},
ErrObjectLockConfigurationNotAllowed: {
Code: "InvalidBucketState",
Description: "Object Lock configuration cannot be enabled on existing buckets",
HTTPStatusCode: http.StatusConflict,
},
ErrNoSuchObjectLockConfiguration: {
Code: "NoSuchObjectLockConfiguration",
Description: "The specified object does not have a ObjectLock configuration",
HTTPStatusCode: http.StatusBadRequest,
},
ErrObjectLocked: {
Code: "InvalidRequest",
Description: "Object is WORM protected and cannot be overwritten",
@@ -822,6 +889,11 @@ var errorCodes = errorCodeMap{
Description: "Your metadata headers exceed the maximum allowed metadata size.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidTagDirective: {
Code: "InvalidArgument",
Description: "Unknown tag directive.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidEncryptionMethod: {
Code: "InvalidRequest",
Description: "The encryption method specified is not supported",
@@ -1027,6 +1099,21 @@ var errorCodes = errorCodeMap{
Description: "Credentials in config mismatch with server environment variables",
HTTPStatusCode: http.StatusServiceUnavailable,
},
ErrAdminBucketQuotaExceeded: {
Code: "XMinioAdminBucketQuotaExceeded",
Description: "Bucket quota exceeded",
HTTPStatusCode: http.StatusBadRequest,
},
ErrAdminNoSuchQuotaConfiguration: {
Code: "XMinioAdminNoSuchQuotaConfiguration",
Description: "The quota configuration does not exist",
HTTPStatusCode: http.StatusNotFound,
},
ErrAdminBucketQuotaDisabled: {
Code: "XMinioAdminBucketQuotaDisabled",
Description: "Quota specified but disk usage crawl is disabled on MinIO server",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInsecureClientRequest: {
Code: "XMinioInsecureClientRequest",
Description: "Cannot respond to plain-text request from TLS-encrypted server",
@@ -1037,6 +1124,11 @@ var errorCodes = errorCodeMap{
Description: "A timeout occurred while trying to lock a resource",
HTTPStatusCode: http.StatusRequestTimeout,
},
ErrOperationMaxedOut: {
Code: "XMinioServerTimedOut",
Description: "A timeout exceeded while waiting to proceed with the request",
HTTPStatusCode: http.StatusRequestTimeout,
},
ErrUnsupportedMetadata: {
Code: "InvalidArgument",
Description: "Your metadata headers are not supported.",
@@ -1532,6 +1624,16 @@ var errorCodes = errorCodeMap{
Description: "User is not allowed to be same as admin access key",
HTTPStatusCode: http.StatusConflict,
},
ErrAdminAccountNotEligible: {
Code: "XMinioInvalidIAMCredentials",
Description: "The administrator key is not eligible for this operation",
HTTPStatusCode: http.StatusConflict,
},
ErrServiceAccountNotFound: {
Code: "XMinioInvalidIAMCredentials",
Description: "The specified service account is not found",
HTTPStatusCode: http.StatusNotFound,
},
ErrPostPolicyConditionInvalidFormat: {
Code: "PostPolicyInvalidKeyName",
Description: "Invalid according to Policy: Policy Condition failed",
@@ -1547,7 +1649,7 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
if err == nil {
return ErrNone
}
// Verify if the underlying error is signature mismatch.
switch err {
case errInvalidArgument:
apiErr = ErrAdminInvalidArgument
@@ -1600,18 +1702,20 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
apiErr = ErrKMSNotConfigured
case crypto.ErrKMSAuthLogin:
apiErr = ErrKMSAuthFailure
case errOperationTimedOut, context.Canceled, context.DeadlineExceeded:
case context.Canceled, context.DeadlineExceeded:
apiErr = ErrOperationTimedOut
case errDiskNotFound:
apiErr = ErrSlowDown
case errInvalidRetentionDate:
case objectlock.ErrInvalidRetentionDate:
apiErr = ErrInvalidRetentionDate
case errPastObjectLockRetainDate:
case objectlock.ErrPastObjectLockRetainDate:
apiErr = ErrPastObjectLockRetainDate
case errUnknownWORMModeDirective:
case objectlock.ErrUnknownWORMModeDirective:
apiErr = ErrUnknownWORMModeDirective
case errObjectLockInvalidHeaders:
case objectlock.ErrObjectLockInvalidHeaders:
apiErr = ErrObjectLockInvalidHeaders
case objectlock.ErrMalformedXML:
apiErr = ErrMalformedXML
}
// Compression errors
@@ -1701,7 +1805,17 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
case BucketPolicyNotFound:
apiErr = ErrNoSuchBucketPolicy
case BucketLifecycleNotFound:
apiErr = ErrNoSuchBucketLifecycle
apiErr = ErrNoSuchLifecycleConfiguration
case BucketSSEConfigNotFound:
apiErr = ErrNoSuchBucketSSEConfig
case BucketTaggingNotFound:
apiErr = ErrBucketTaggingNotFound
case BucketObjectLockConfigNotFound:
apiErr = ErrObjectLockConfigurationNotFound
case BucketQuotaConfigNotFound:
apiErr = ErrAdminNoSuchQuotaConfiguration
case BucketQuotaExceeded:
apiErr = ErrAdminBucketQuotaExceeded
case *event.ErrInvalidEventName:
apiErr = ErrEventNotification
case *event.ErrInvalidARN:
@@ -1724,10 +1838,10 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
apiErr = ErrOverlappingFilterNotification
case *event.ErrUnsupportedConfiguration:
apiErr = ErrUnsupportedNotification
case OperationTimedOut:
apiErr = ErrOperationTimedOut
case BackendDown:
apiErr = ErrBackendDown
case crypto.Error:
apiErr = ErrObjectTampered
case ObjectNameTooLong:
apiErr = ErrKeyTooLongError
default:
@@ -1772,9 +1886,41 @@ func toAPIError(ctx context.Context, err error) APIError {
// their internal error types. This code is only
// useful with gateway implementations.
switch e := err.(type) {
case *xml.SyntaxError:
apiErr = APIError{
Code: "MalformedXML",
Description: fmt.Sprintf("%s (%s)", errorCodes[ErrMalformedXML].Description,
e.Error()),
HTTPStatusCode: errorCodes[ErrMalformedXML].HTTPStatusCode,
}
case url.EscapeError:
apiErr = APIError{
Code: "XMinioInvalidObjectName",
Description: fmt.Sprintf("%s (%s)", errorCodes[ErrInvalidObjectName].Description,
e.Error()),
HTTPStatusCode: http.StatusBadRequest,
}
case lifecycle.Error:
apiErr = APIError{
Code: "InvalidRequest",
Description: e.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case tags.Error:
apiErr = APIError{
Code: e.Code(),
Description: e.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case policy.Error:
apiErr = APIError{
Code: "MalformedPolicy",
Description: e.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case crypto.Error:
apiErr = APIError{
Code: "XKMSInternalError",
Code: "XMinIOEncryptionError",
Description: e.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
@@ -1802,12 +1948,6 @@ func toAPIError(ctx context.Context, err error) APIError {
Description: e.Error(),
HTTPStatusCode: e.Response().StatusCode,
}
case oss.ServiceError:
apiErr = APIError{
Code: e.Code,
Description: e.Message,
HTTPStatusCode: e.StatusCode,
}
// Add more Gateway SDKs here if any in future.
}
}

View File

@@ -22,7 +22,9 @@ import (
"encoding/xml"
"fmt"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/minio/minio/cmd/crypto"
@@ -80,6 +82,10 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
w.Header()[xhttp.ETag] = []string{"\"" + objInfo.ETag + "\""}
}
if strings.Contains(objInfo.ETag, "-") && len(objInfo.Parts) > 0 {
w.Header().Set(xhttp.AmzMpPartsCount, strconv.Itoa(len(objInfo.Parts)))
}
if objInfo.ContentType != "" {
w.Header().Set(xhttp.ContentType, objInfo.ContentType)
}
@@ -92,6 +98,18 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
w.Header().Set(xhttp.Expires, objInfo.Expires.UTC().Format(http.TimeFormat))
}
if globalCacheConfig.Enabled {
w.Header().Set(xhttp.XCache, objInfo.CacheStatus.String())
w.Header().Set(xhttp.XCacheLookup, objInfo.CacheLookupStatus.String())
}
// Set tag count if object has tags
tags, _ := url.ParseQuery(objInfo.UserTags)
tagCount := len(tags)
if tagCount != 0 {
w.Header().Set(xhttp.AmzTagCount, strconv.Itoa(tagCount))
}
// Set all other user defined metadata.
for k, v := range objInfo.UserDefined {
if HasPrefix(k, ReservedMetadataPrefix) {

View File

@@ -429,7 +429,12 @@ func generateListVersionsResponse(bucket, prefix, marker, delimiter, encodingTyp
content.ETag = "\"" + object.ETag + "\""
}
content.Size = object.Size
content.StorageClass = object.StorageClass
if object.StorageClass != "" {
content.StorageClass = object.StorageClass
} else {
content.StorageClass = globalMinioDefaultStorageClass
}
content.Owner = owner
content.VersionID = "null"
content.IsLatest = true
@@ -475,7 +480,11 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
content.ETag = "\"" + object.ETag + "\""
}
content.Size = object.Size
content.StorageClass = object.StorageClass
if object.StorageClass != "" {
content.StorageClass = object.StorageClass
} else {
content.StorageClass = globalMinioDefaultStorageClass
}
content.Owner = owner
contents = append(contents, content)
}
@@ -521,7 +530,11 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
content.ETag = "\"" + object.ETag + "\""
}
content.Size = object.Size
content.StorageClass = object.StorageClass
if object.StorageClass != "" {
content.StorageClass = object.StorageClass
} else {
content.StorageClass = globalMinioDefaultStorageClass
}
content.Owner = owner
if metadata {
content.UserMetadata = make(StringMap)
@@ -588,7 +601,8 @@ func generateCompleteMultpartUploadResponse(bucket, key, location, etag string)
Location: location,
Bucket: bucket,
Key: key,
ETag: etag,
// AWS S3 quotes the ETag in XML, make sure we are compatible here.
ETag: "\"" + etag + "\"",
}
}
@@ -753,17 +767,6 @@ func writeErrorResponseJSON(ctx context.Context, w http.ResponseWriter, err APIE
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON)
}
// writeVersionMismatchResponse - writes custom error responses for version mismatches.
func writeVersionMismatchResponse(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL, isJSON bool) {
if isJSON {
// Generate error response.
errorResponse := getAPIErrorResponse(ctx, err, reqURL.String(), w.Header().Get(xhttp.AmzRequestID), globalDeploymentID)
writeResponse(w, err.HTTPStatusCode, encodeResponseJSON(errorResponse), mimeJSON)
} else {
writeResponse(w, err.HTTPStatusCode, []byte(err.Description), mimeNone)
}
}
// writeCustomErrorResponseJSON - similar to writeErrorResponseJSON,
// but accepts the error message directly (this allows messages to be
// dynamically generated.)

View File

@@ -1,5 +1,5 @@
/*
* MinIO Cloud Storage, (C) 2016 MinIO, Inc.
* MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -90,123 +90,200 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
for _, bucket := range routers {
// Object operations
// HeadObject
bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(collectAPIStats("headobject", httpTraceAll(api.HeadObjectHandler)))
bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("headobject", httpTraceAll(api.HeadObjectHandler))))
// CopyObjectPart
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(collectAPIStats("copyobjectpart", httpTraceAll(api.CopyObjectPartHandler))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(maxClients(collectAPIStats("copyobjectpart", httpTraceAll(api.CopyObjectPartHandler)))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
// PutObjectPart
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(collectAPIStats("putobjectpart", httpTraceHdrs(api.PutObjectPartHandler))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("putobjectpart", httpTraceHdrs(api.PutObjectPartHandler)))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
// ListObjectParts
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(collectAPIStats("listobjectparts", httpTraceAll(api.ListObjectPartsHandler))).Queries("uploadId", "{uploadId:.*}")
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("listobjectparts", httpTraceAll(api.ListObjectPartsHandler)))).Queries("uploadId", "{uploadId:.*}")
// CompleteMultipartUpload
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(collectAPIStats("completemutipartupload", httpTraceAll(api.CompleteMultipartUploadHandler))).Queries("uploadId", "{uploadId:.*}")
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("completemutipartupload", httpTraceAll(api.CompleteMultipartUploadHandler)))).Queries("uploadId", "{uploadId:.*}")
// NewMultipartUpload
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(collectAPIStats("newmultipartupload", httpTraceAll(api.NewMultipartUploadHandler))).Queries("uploads", "")
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("newmultipartupload", httpTraceAll(api.NewMultipartUploadHandler)))).Queries("uploads", "")
// AbortMultipartUpload
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(collectAPIStats("abortmultipartupload", httpTraceAll(api.AbortMultipartUploadHandler))).Queries("uploadId", "{uploadId:.*}")
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("abortmultipartupload", httpTraceAll(api.AbortMultipartUploadHandler)))).Queries("uploadId", "{uploadId:.*}")
// GetObjectACL - this is a dummy call.
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(collectAPIStats("getobjectacl", httpTraceHdrs(api.GetObjectACLHandler))).Queries("acl", "")
// GetObjectTagging - this is a dummy call.
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(collectAPIStats("getobjecttagging", httpTraceHdrs(api.GetObjectTaggingHandler))).Queries("tagging", "")
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("getobjectacl", httpTraceHdrs(api.GetObjectACLHandler)))).Queries("acl", "")
// PutObjectACL - this is a dummy call.
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("putobjectacl", httpTraceHdrs(api.PutObjectACLHandler)))).Queries("acl", "")
// GetObjectTagging
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("getobjecttagging", httpTraceHdrs(api.GetObjectTaggingHandler)))).Queries("tagging", "")
// PutObjectTagging
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("putobjecttagging", httpTraceHdrs(api.PutObjectTaggingHandler)))).Queries("tagging", "")
// DeleteObjectTagging
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("deleteobjecttagging", httpTraceHdrs(api.DeleteObjectTaggingHandler)))).Queries("tagging", "")
// SelectObjectContent
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(collectAPIStats("selectobjectcontent", httpTraceHdrs(api.SelectObjectContentHandler))).Queries("select", "").Queries("select-type", "2")
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("selectobjectcontent", httpTraceHdrs(api.SelectObjectContentHandler)))).Queries("select", "").Queries("select-type", "2")
// GetObjectRetention
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(collectAPIStats("getobjectretention", httpTraceHdrs(api.GetObjectRetentionHandler))).Queries("retention", "")
// GetObject
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(collectAPIStats("getobject", httpTraceHdrs(api.GetObjectHandler)))
// CopyObject
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(collectAPIStats("copyobject", httpTraceAll(api.CopyObjectHandler)))
// PutObjectRetention
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(collectAPIStats("putobjectretention", httpTraceHdrs(api.PutObjectRetentionHandler))).Queries("retention", "")
// PutObject
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(collectAPIStats("putobject", httpTraceHdrs(api.PutObjectHandler)))
// DeleteObject
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(collectAPIStats("deleteobject", httpTraceAll(api.DeleteObjectHandler)))
// PutObjectLegalHold
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(collectAPIStats("putobjectlegalhold", httpTraceHdrs(api.PutObjectLegalHoldHandler))).Queries("legal-hold", "")
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("getobjectretention", httpTraceAll(api.GetObjectRetentionHandler)))).Queries("retention", "")
// GetObjectLegalHold
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(collectAPIStats("getobjectlegalhold", httpTraceHdrs(api.GetObjectLegalHoldHandler))).Queries("legal-hold", "")
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("getobjectlegalhold", httpTraceAll(api.GetObjectLegalHoldHandler)))).Queries("legal-hold", "")
// GetObject
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("getobject", httpTraceHdrs(api.GetObjectHandler))))
// CopyObject
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(maxClients(collectAPIStats("copyobject", httpTraceAll(api.CopyObjectHandler))))
// PutObjectRetention
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("putobjectretention", httpTraceAll(api.PutObjectRetentionHandler)))).Queries("retention", "")
// PutObjectLegalHold
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("putobjectlegalhold", httpTraceAll(api.PutObjectLegalHoldHandler)))).Queries("legal-hold", "")
// PutObject
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("putobject", httpTraceHdrs(api.PutObjectHandler))))
// DeleteObject
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("deleteobject", httpTraceAll(api.DeleteObjectHandler))))
/// Bucket operations
// GetBucketLocation
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketlocation", httpTraceAll(api.GetBucketLocationHandler))).Queries("location", "")
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketlocation", httpTraceAll(api.GetBucketLocationHandler)))).Queries("location", "")
// GetBucketPolicy
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketpolicy", httpTraceAll(api.GetBucketPolicyHandler))).Queries("policy", "")
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketpolicy", httpTraceAll(api.GetBucketPolicyHandler)))).Queries("policy", "")
// GetBucketLifecycle
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketlifecycle", httpTraceAll(api.GetBucketLifecycleHandler))).Queries("lifecycle", "")
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketlifecycle", httpTraceAll(api.GetBucketLifecycleHandler)))).Queries("lifecycle", "")
// GetBucketEncryption
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketencryption", httpTraceAll(api.GetBucketEncryptionHandler)))).Queries("encryption", "")
// Dummy Bucket Calls
// GetBucketACL -- this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketacl", httpTraceAll(api.GetBucketACLHandler))).Queries("acl", "")
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketacl", httpTraceAll(api.GetBucketACLHandler)))).Queries("acl", "")
// PutBucketACL -- this is a dummy call.
bucket.Methods(http.MethodPut).HandlerFunc(
maxClients(collectAPIStats("putbucketacl", httpTraceAll(api.PutBucketACLHandler)))).Queries("acl", "")
// GetBucketCors - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketcors", httpTraceAll(api.GetBucketCorsHandler))).Queries("cors", "")
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketcors", httpTraceAll(api.GetBucketCorsHandler)))).Queries("cors", "")
// GetBucketWebsiteHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketwebsite", httpTraceAll(api.GetBucketWebsiteHandler))).Queries("website", "")
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketwebsite", httpTraceAll(api.GetBucketWebsiteHandler)))).Queries("website", "")
// GetBucketAccelerateHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketaccelerate", httpTraceAll(api.GetBucketAccelerateHandler))).Queries("accelerate", "")
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketaccelerate", httpTraceAll(api.GetBucketAccelerateHandler)))).Queries("accelerate", "")
// GetBucketRequestPaymentHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketrequestpayment", httpTraceAll(api.GetBucketRequestPaymentHandler))).Queries("requestPayment", "")
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketrequestpayment", httpTraceAll(api.GetBucketRequestPaymentHandler)))).Queries("requestPayment", "")
// GetBucketLoggingHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketlogging", httpTraceAll(api.GetBucketLoggingHandler))).Queries("logging", "")
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketlogging", httpTraceAll(api.GetBucketLoggingHandler)))).Queries("logging", "")
// GetBucketLifecycleHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketlifecycle", httpTraceAll(api.GetBucketLifecycleHandler))).Queries("lifecycle", "")
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketlifecycle", httpTraceAll(api.GetBucketLifecycleHandler)))).Queries("lifecycle", "")
// GetBucketReplicationHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketreplication", httpTraceAll(api.GetBucketReplicationHandler))).Queries("replication", "")
// GetBucketTaggingHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbuckettagging", httpTraceAll(api.GetBucketTaggingHandler))).Queries("tagging", "")
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketreplication", httpTraceAll(api.GetBucketReplicationHandler)))).Queries("replication", "")
// GetBucketTaggingHandler
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbuckettagging", httpTraceAll(api.GetBucketTaggingHandler)))).Queries("tagging", "")
//DeleteBucketWebsiteHandler
bucket.Methods(http.MethodDelete).HandlerFunc(collectAPIStats("deletebucketwebsite", httpTraceAll(api.DeleteBucketWebsiteHandler))).Queries("website", "")
bucket.Methods(http.MethodDelete).HandlerFunc(
maxClients(collectAPIStats("deletebucketwebsite", httpTraceAll(api.DeleteBucketWebsiteHandler)))).Queries("website", "")
// DeleteBucketTaggingHandler
bucket.Methods(http.MethodDelete).HandlerFunc(collectAPIStats("deletebuckettagging", httpTraceAll(api.DeleteBucketTaggingHandler))).Queries("tagging", "")
bucket.Methods(http.MethodDelete).HandlerFunc(
maxClients(collectAPIStats("deletebuckettagging", httpTraceAll(api.DeleteBucketTaggingHandler)))).Queries("tagging", "")
// GetBucketObjectLockConfig
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketobjectlockconfiguration", httpTraceAll(api.GetBucketObjectLockConfigHandler))).Queries("object-lock", "")
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketobjectlockconfiguration", httpTraceAll(api.GetBucketObjectLockConfigHandler)))).Queries("object-lock", "")
// GetBucketVersioning
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketversioning", httpTraceAll(api.GetBucketVersioningHandler))).Queries("versioning", "")
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketversioning", httpTraceAll(api.GetBucketVersioningHandler)))).Queries("versioning", "")
// GetBucketNotification
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketnotification", httpTraceAll(api.GetBucketNotificationHandler))).Queries("notification", "")
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketnotification", httpTraceAll(api.GetBucketNotificationHandler)))).Queries("notification", "")
// ListenBucketNotification
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listenbucketnotification", httpTraceAll(api.ListenBucketNotificationHandler))).Queries("events", "{events:.*}")
// ListMultipartUploads
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listmultipartuploads", httpTraceAll(api.ListMultipartUploadsHandler))).Queries("uploads", "")
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("listmultipartuploads", httpTraceAll(api.ListMultipartUploadsHandler)))).Queries("uploads", "")
// ListObjectsV2M
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listobjectsv2M", httpTraceAll(api.ListObjectsV2MHandler))).Queries("list-type", "2", "metadata", "true")
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("listobjectsv2M", httpTraceAll(api.ListObjectsV2MHandler)))).Queries("list-type", "2", "metadata", "true")
// ListObjectsV2
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listobjectsv2", httpTraceAll(api.ListObjectsV2Handler))).Queries("list-type", "2")
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("listobjectsv2", httpTraceAll(api.ListObjectsV2Handler)))).Queries("list-type", "2")
// ListBucketVersions
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listbucketversions", httpTraceAll(api.ListBucketObjectVersionsHandler))).Queries("versions", "")
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("listbucketversions", httpTraceAll(api.ListBucketObjectVersionsHandler)))).Queries("versions", "")
// ListObjectsV1 (Legacy)
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listobjectsv1", httpTraceAll(api.ListObjectsV1Handler)))
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("listobjectsv1", httpTraceAll(api.ListObjectsV1Handler))))
// PutBucketLifecycle
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucketlifecycle", httpTraceAll(api.PutBucketLifecycleHandler))).Queries("lifecycle", "")
bucket.Methods(http.MethodPut).HandlerFunc(
maxClients(collectAPIStats("putbucketlifecycle", httpTraceAll(api.PutBucketLifecycleHandler)))).Queries("lifecycle", "")
// PutBucketEncryption
bucket.Methods(http.MethodPut).HandlerFunc(
maxClients(collectAPIStats("putbucketencryption", httpTraceAll(api.PutBucketEncryptionHandler)))).Queries("encryption", "")
// PutBucketPolicy
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucketpolicy", httpTraceAll(api.PutBucketPolicyHandler))).Queries("policy", "")
bucket.Methods(http.MethodPut).HandlerFunc(
maxClients(collectAPIStats("putbucketpolicy", httpTraceAll(api.PutBucketPolicyHandler)))).Queries("policy", "")
// PutBucketObjectLockConfig
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucketobjectlockconfig", httpTraceAll(api.PutBucketObjectLockConfigHandler))).Queries("object-lock", "")
bucket.Methods(http.MethodPut).HandlerFunc(
maxClients(collectAPIStats("putbucketobjectlockconfig", httpTraceAll(api.PutBucketObjectLockConfigHandler)))).Queries("object-lock", "")
// PutBucketTaggingHandler
bucket.Methods(http.MethodPut).HandlerFunc(
maxClients(collectAPIStats("putbuckettagging", httpTraceAll(api.PutBucketTaggingHandler)))).Queries("tagging", "")
// PutBucketVersioning
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucketversioning", httpTraceAll(api.PutBucketVersioningHandler))).Queries("versioning", "")
bucket.Methods(http.MethodPut).HandlerFunc(
maxClients(collectAPIStats("putbucketversioning", httpTraceAll(api.PutBucketVersioningHandler)))).Queries("versioning", "")
// PutBucketNotification
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucketnotification", httpTraceAll(api.PutBucketNotificationHandler))).Queries("notification", "")
bucket.Methods(http.MethodPut).HandlerFunc(
maxClients(collectAPIStats("putbucketnotification", httpTraceAll(api.PutBucketNotificationHandler)))).Queries("notification", "")
// PutBucket
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucket", httpTraceAll(api.PutBucketHandler)))
bucket.Methods(http.MethodPut).HandlerFunc(
maxClients(collectAPIStats("putbucket", httpTraceAll(api.PutBucketHandler))))
// HeadBucket
bucket.Methods(http.MethodHead).HandlerFunc(collectAPIStats("headbucket", httpTraceAll(api.HeadBucketHandler)))
bucket.Methods(http.MethodHead).HandlerFunc(
maxClients(collectAPIStats("headbucket", httpTraceAll(api.HeadBucketHandler))))
// PostPolicy
bucket.Methods(http.MethodPost).HeadersRegexp(xhttp.ContentType, "multipart/form-data*").HandlerFunc(collectAPIStats("postpolicybucket", httpTraceHdrs(api.PostPolicyBucketHandler)))
bucket.Methods(http.MethodPost).HeadersRegexp(xhttp.ContentType, "multipart/form-data*").HandlerFunc(
maxClients(collectAPIStats("postpolicybucket", httpTraceHdrs(api.PostPolicyBucketHandler))))
// DeleteMultipleObjects
bucket.Methods(http.MethodPost).HandlerFunc(collectAPIStats("deletemultipleobjects", httpTraceAll(api.DeleteMultipleObjectsHandler))).Queries("delete", "")
bucket.Methods(http.MethodPost).HandlerFunc(
maxClients(collectAPIStats("deletemultipleobjects", httpTraceAll(api.DeleteMultipleObjectsHandler)))).Queries("delete", "")
// DeleteBucketPolicy
bucket.Methods(http.MethodDelete).HandlerFunc(collectAPIStats("deletebucketpolicy", httpTraceAll(api.DeleteBucketPolicyHandler))).Queries("policy", "")
bucket.Methods(http.MethodDelete).HandlerFunc(
maxClients(collectAPIStats("deletebucketpolicy", httpTraceAll(api.DeleteBucketPolicyHandler)))).Queries("policy", "")
// DeleteBucketLifecycle
bucket.Methods(http.MethodDelete).HandlerFunc(collectAPIStats("deletebucketlifecycle", httpTraceAll(api.DeleteBucketLifecycleHandler))).Queries("lifecycle", "")
bucket.Methods(http.MethodDelete).HandlerFunc(
maxClients(collectAPIStats("deletebucketlifecycle", httpTraceAll(api.DeleteBucketLifecycleHandler)))).Queries("lifecycle", "")
// DeleteBucketEncryption
bucket.Methods(http.MethodDelete).HandlerFunc(
maxClients(collectAPIStats("deletebucketencryption", httpTraceAll(api.DeleteBucketEncryptionHandler)))).Queries("encryption", "")
// DeleteBucket
bucket.Methods(http.MethodDelete).HandlerFunc(collectAPIStats("deletebucket", httpTraceAll(api.DeleteBucketHandler)))
bucket.Methods(http.MethodDelete).HandlerFunc(
maxClients(collectAPIStats("deletebucket", httpTraceAll(api.DeleteBucketHandler))))
}
/// Root operation
// ListBuckets
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(collectAPIStats("listbuckets", httpTraceAll(api.ListBucketsHandler)))
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(
maxClients(collectAPIStats("listbuckets", httpTraceAll(api.ListBucketsHandler))))
// If none of the routes match add default error handler routes
apiRouter.NotFoundHandler = http.HandlerFunc(collectAPIStats("notfound", httpTraceAll(errorResponseHandler)))

View File

@@ -26,15 +26,18 @@ import (
"io"
"io/ioutil"
"net/http"
"strconv"
"strings"
"time"
jwtgo "github.com/dgrijalva/jwt-go"
xhttp "github.com/minio/minio/cmd/http"
xjwt "github.com/minio/minio/cmd/jwt"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
"github.com/minio/minio/pkg/bucket/policy"
"github.com/minio/minio/pkg/hash"
iampolicy "github.com/minio/minio/pkg/iam/policy"
"github.com/minio/minio/pkg/policy"
)
// Verify if request has JWT.
@@ -118,9 +121,7 @@ func getRequestAuthType(r *http.Request) authType {
return authTypeUnknown
}
// checkAdminRequestAuthType checks whether the request is a valid signature V2 or V4 request.
// It does not accept presigned or JWT or anonymous requests.
func checkAdminRequestAuthType(ctx context.Context, r *http.Request, action iampolicy.AdminAction, region string) (auth.Credentials, APIErrorCode) {
func validateAdminSignature(ctx context.Context, r *http.Request, region string) (auth.Credentials, map[string]interface{}, bool, APIErrorCode) {
var cred auth.Credentials
var owner bool
s3Err := ErrAccessDenied
@@ -129,7 +130,7 @@ func checkAdminRequestAuthType(ctx context.Context, r *http.Request, action iamp
// We only support admin credentials to access admin APIs.
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
if s3Err != ErrNone {
return cred, s3Err
return cred, nil, owner, s3Err
}
// we only support V4 (no presign) with auth body
@@ -139,14 +140,24 @@ func checkAdminRequestAuthType(ctx context.Context, r *http.Request, action iamp
reqInfo := (&logger.ReqInfo{}).AppendTags("requestHeaders", dumpRequest(r))
ctx := logger.SetReqInfo(ctx, reqInfo)
logger.LogIf(ctx, errors.New(getAPIError(s3Err).Description), logger.Application)
return cred, nil, owner, s3Err
}
var claims map[string]interface{}
claims, s3Err = checkClaimsFromToken(r, cred)
claims, s3Err := checkClaimsFromToken(r, cred)
if s3Err != ErrNone {
return cred, nil, owner, s3Err
}
return cred, claims, owner, ErrNone
}
// checkAdminRequestAuthType checks whether the request is a valid signature V2 or V4 request.
// It does not accept presigned or JWT or anonymous requests.
func checkAdminRequestAuthType(ctx context.Context, r *http.Request, action iampolicy.AdminAction, region string) (auth.Credentials, APIErrorCode) {
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, region)
if s3Err != ErrNone {
return cred, s3Err
}
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: iampolicy.Action(action),
@@ -173,18 +184,18 @@ func getSessionToken(r *http.Request) (token string) {
// Fetch claims in the security token returned by the client, doesn't return
// errors - upon errors the returned claims map will be empty.
func mustGetClaimsFromToken(r *http.Request) map[string]interface{} {
claims, _ := getClaimsFromToken(r)
claims, _ := getClaimsFromToken(r, getSessionToken(r))
return claims
}
// Fetch claims in the security token returned by the client.
func getClaimsFromToken(r *http.Request) (map[string]interface{}, error) {
claims := make(map[string]interface{})
token := getSessionToken(r)
func getClaimsFromToken(r *http.Request, token string) (map[string]interface{}, error) {
claims := xjwt.NewMapClaims()
if token == "" {
return claims, nil
return claims.Map(), nil
}
stsTokenCallback := func(jwtToken *jwtgo.Token) (interface{}, error) {
stsTokenCallback := func(claims *xjwt.MapClaims) ([]byte, error) {
// JWT token for x-amz-security-token is signed with admin
// secret key, temporary credentials become invalid if
// server admin credentials change. This is done to ensure
@@ -195,66 +206,43 @@ func getClaimsFromToken(r *http.Request) (map[string]interface{}, error) {
// on the client side and is treated like an opaque value.
return []byte(globalActiveCred.SecretKey), nil
}
p := &jwtgo.Parser{
ValidMethods: []string{
jwtgo.SigningMethodHS256.Alg(),
jwtgo.SigningMethodHS512.Alg(),
},
}
jtoken, err := p.ParseWithClaims(token, jwtgo.MapClaims(claims), stsTokenCallback)
if err != nil {
return nil, err
}
if !jtoken.Valid {
if err := xjwt.ParseWithClaims(token, claims, stsTokenCallback); err != nil {
return nil, errAuthentication
}
v, ok := claims["accessKey"]
if !ok {
return nil, errInvalidAccessKeyID
}
if _, ok = v.(string); !ok {
return nil, errInvalidAccessKeyID
}
if globalPolicyOPA == nil {
// If OPA is not set and if ldap claim key is set,
// allow the claim.
if _, ok := claims[ldapUser]; ok {
return claims, nil
// If OPA is not set and if ldap claim key is set, allow the claim.
if _, ok := claims.Lookup(ldapUser); ok {
return claims.Map(), nil
}
// If OPA is not set, session token should
// have a policy and its mandatory, reject
// requests without policy claim.
p, pok := claims[iamPolicyName()]
if !pok {
_, pokOpenID := claims.Lookup(iamPolicyClaimNameOpenID())
_, pokSA := claims.Lookup(iamPolicyClaimNameSA())
if !pokOpenID && !pokSA {
return nil, errAuthentication
}
if _, pok = p.(string); !pok {
return nil, errAuthentication
}
sp, spok := claims[iampolicy.SessionPolicyName]
// Sub policy is optional, if not set return success.
sp, spok := claims.Lookup(iampolicy.SessionPolicyName)
if !spok {
return claims, nil
}
// Sub policy is set but its not a string, reject such requests
spStr, spok := sp.(string)
if !spok {
return nil, errAuthentication
return claims.Map(), nil
}
// Looks like subpolicy is set and is a string, if set then its
// base64 encoded, decode it. Decoding fails reject such requests.
spBytes, err := base64.StdEncoding.DecodeString(spStr)
spBytes, err := base64.StdEncoding.DecodeString(sp)
if err != nil {
// Base64 decoding fails, we should log to indicate
// something is malforming the request sent by client.
logger.LogIf(context.Background(), err, logger.Application)
logger.LogIf(r.Context(), err, logger.Application)
return nil, errAuthentication
}
claims[iampolicy.SessionPolicyName] = string(spBytes)
claims.MapClaims[iampolicy.SessionPolicyName] = string(spBytes)
}
return claims, nil
return claims.Map(), nil
}
// Fetch claims in the security token returned by the client and validate the token.
@@ -263,12 +251,15 @@ func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]in
if token != "" && cred.AccessKey == "" {
return nil, ErrNoAccessKey
}
if cred.IsServiceAccount() && token == "" {
token = cred.SessionToken
}
if subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 {
return nil, ErrInvalidToken
}
claims, err := getClaimsFromToken(r)
claims, err := getClaimsFromToken(r, token)
if err != nil {
return nil, toAPIErrorCode(context.Background(), err)
return nil, toAPIErrorCode(r.Context(), err)
}
return claims, ErrNone
}
@@ -293,7 +284,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
var cred auth.Credentials
switch getRequestAuthType(r) {
case authTypeUnknown, authTypeStreamingSigned:
return accessKey, owner, ErrAccessDenied
return accessKey, owner, ErrSignatureVersionNotSupported
case authTypePresignedV2, authTypeSignedV2:
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
return accessKey, owner, s3Err
@@ -355,7 +346,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
// Request is allowed return the appropriate access key.
return cred.AccessKey, owner, ErrNone
}
return accessKey, owner, ErrAccessDenied
return cred.AccessKey, owner, ErrAccessDenied
}
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
@@ -369,7 +360,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
// Request is allowed return the appropriate access key.
return cred.AccessKey, owner, ErrNone
}
return accessKey, owner, ErrAccessDenied
return cred.AccessKey, owner, ErrAccessDenied
}
// Verify if request has valid AWS Signature Version '2'.
@@ -402,12 +393,11 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
err error
contentMD5, contentSHA256 []byte
)
// Extract 'Content-Md5' if present.
if _, ok := r.Header[xhttp.ContentMD5]; ok {
contentMD5, err = base64.StdEncoding.Strict().DecodeString(r.Header.Get(xhttp.ContentMD5))
if err != nil || len(contentMD5) == 0 {
return ErrInvalidDigest
}
contentMD5, err = checkValidMD5(r.Header)
if err != nil {
return ErrInvalidDigest
}
// Extract either 'X-Amz-Content-Sha256' header or 'X-Amz-Content-Sha256' query parameter (if V4 presigned)
@@ -433,7 +423,7 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
if err != nil {
return toAPIErrorCode(ctx, err)
}
r.Body = ioutil.NopCloser(reader)
r.Body = reader
return ErrNone
}
@@ -483,7 +473,107 @@ func (a authHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
a.handler.ServeHTTP(w, r)
return
}
writeErrorResponse(context.Background(), w, errorCodes.ToAPIErr(ErrSignatureVersionNotSupported), r.URL, guessIsBrowserReq(r))
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrSignatureVersionNotSupported), r.URL, guessIsBrowserReq(r))
}
func validateSignature(atype authType, r *http.Request) (auth.Credentials, bool, map[string]interface{}, APIErrorCode) {
var cred auth.Credentials
var owner bool
var s3Err APIErrorCode
switch atype {
case authTypeUnknown, authTypeStreamingSigned:
return cred, owner, nil, ErrSignatureVersionNotSupported
case authTypeSignedV2, authTypePresignedV2:
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
return cred, owner, nil, s3Err
}
cred, owner, s3Err = getReqAccessKeyV2(r)
case authTypePresigned, authTypeSigned:
region := globalServerRegion
if s3Err = isReqAuthenticated(GlobalContext, r, region, serviceS3); s3Err != ErrNone {
return cred, owner, nil, s3Err
}
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
}
if s3Err != ErrNone {
return cred, owner, nil, s3Err
}
claims, s3Err := checkClaimsFromToken(r, cred)
if s3Err != ErrNone {
return cred, owner, nil, s3Err
}
return cred, owner, claims, ErrNone
}
func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate time.Time, retMode objectlock.RetMode, byPassSet bool, r *http.Request, cred auth.Credentials, owner bool, claims map[string]interface{}) (s3Err APIErrorCode) {
var retSet bool
if cred.AccessKey == "" {
conditions := getConditionValues(r, "", "", nil)
conditions["object-lock-mode"] = []string{string(retMode)}
conditions["object-lock-retain-until-date"] = []string{retDate.Format(time.RFC3339)}
if retDays > 0 {
conditions["object-lock-remaining-retention-days"] = []string{strconv.Itoa(retDays)}
}
if retMode == objectlock.RetGovernance && byPassSet {
byPassSet = globalPolicySys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Action: policy.Action(policy.BypassGovernanceRetentionAction),
BucketName: bucketName,
ConditionValues: conditions,
IsOwner: false,
ObjectName: objectName,
})
}
if globalPolicySys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Action: policy.Action(policy.PutObjectRetentionAction),
BucketName: bucketName,
ConditionValues: conditions,
IsOwner: false,
ObjectName: objectName,
}) {
retSet = true
}
if byPassSet || retSet {
return ErrNone
}
return ErrAccessDenied
}
conditions := getConditionValues(r, "", cred.AccessKey, claims)
conditions["object-lock-mode"] = []string{string(retMode)}
conditions["object-lock-retain-until-date"] = []string{retDate.Format(time.RFC3339)}
if retDays > 0 {
conditions["object-lock-remaining-retention-days"] = []string{strconv.Itoa(retDays)}
}
if retMode == objectlock.RetGovernance && byPassSet {
byPassSet = globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: policy.BypassGovernanceRetentionAction,
BucketName: bucketName,
ObjectName: objectName,
ConditionValues: conditions,
IsOwner: owner,
Claims: claims,
})
}
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: policy.PutObjectRetentionAction,
BucketName: bucketName,
ConditionValues: conditions,
ObjectName: objectName,
IsOwner: owner,
Claims: claims,
}) {
retSet = true
}
if byPassSet || retSet {
return ErrNone
}
return ErrAccessDenied
}
// isPutActionAllowed - check if PUT operation is allowed on the resource, this
@@ -494,7 +584,7 @@ func isPutActionAllowed(atype authType, bucketName, objectName string, r *http.R
var owner bool
switch atype {
case authTypeUnknown:
return ErrAccessDenied
return ErrSignatureVersionNotSupported
case authTypeSignedV2, authTypePresignedV2:
cred, owner, s3Err = getReqAccessKeyV2(r)
case authTypeStreamingSigned, authTypePresigned, authTypeSigned:
@@ -510,10 +600,19 @@ func isPutActionAllowed(atype authType, bucketName, objectName string, r *http.R
return s3Err
}
// Do not check for PutObjectRetentionAction permission,
// if mode and retain until date are not set.
// Can happen when bucket has default lock config set
if action == iampolicy.PutObjectRetentionAction &&
r.Header.Get(xhttp.AmzObjectLockMode) == "" &&
r.Header.Get(xhttp.AmzObjectLockRetainUntilDate) == "" {
return ErrNone
}
if cred.AccessKey == "" {
if globalPolicySys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Action: policy.PutObjectAction,
Action: policy.Action(action),
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", "", nil),
IsOwner: false,

View File

@@ -391,6 +391,7 @@ func TestIsReqAuthenticated(t *testing.T) {
}
}
}
func TestCheckAdminRequestAuthType(t *testing.T) {
objLayer, fsDir, err := prepareFS()
if err != nil {
@@ -425,3 +426,48 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
}
}
}
func TestValidateAdminSignature(t *testing.T) {
ctx := context.Background()
objLayer, fsDir, err := prepareFS()
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(fsDir)
if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
t.Fatalf("unable initialize config file, %s", err)
}
creds, err := auth.CreateCredentials("admin", "mypassword")
if err != nil {
t.Fatalf("unable create credential, %s", err)
}
globalActiveCred = creds
testCases := []struct {
AccessKey string
SecretKey string
ErrCode APIErrorCode
}{
{"", "", ErrInvalidAccessKeyID},
{"admin", "", ErrSignatureDoesNotMatch},
{"admin", "wrongpassword", ErrSignatureDoesNotMatch},
{"wronguser", "mypassword", ErrInvalidAccessKeyID},
{"", "mypassword", ErrInvalidAccessKeyID},
{"admin", "mypassword", ErrNone},
}
for i, testCase := range testCases {
req := mustNewRequest("GET", "http://localhost:9000/", 0, nil, t)
if err := signRequestV4(req, testCase.AccessKey, testCase.SecretKey); err != nil {
t.Fatalf("Unable to inititalized new signed http request %s", err)
}
_, _, _, s3Error := validateAdminSignature(ctx, req, globalMinioDefaultRegion)
if s3Error != testCase.ErrCode {
t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i+1, testCase.ErrCode, s3Error)
}
}
}

View File

@@ -52,43 +52,49 @@ func (h *healRoutine) queueHealTask(task healTask) {
h.tasks <- task
}
func waitForLowHTTPReq(tolerance int32) {
if httpServer := newHTTPServerFn(); httpServer != nil {
// Wait at max 10 minute for an inprogress request before proceeding to heal
waitCount := 600
// Any requests in progress, delay the heal.
for (httpServer.GetRequestCount() >= tolerance) &&
waitCount > 0 {
waitCount--
time.Sleep(1 * time.Second)
}
}
}
// Wait for heal requests and process them
func (h *healRoutine) run() {
ctx := context.Background()
func (h *healRoutine) run(ctx context.Context, objAPI ObjectLayer) {
for {
select {
case task, ok := <-h.tasks:
if !ok {
break
}
if httpServer := newHTTPServerFn(); httpServer != nil {
// Wait at max 10 minute for an inprogress request before proceeding to heal
waitCount := 600
// Any requests in progress, delay the heal.
for (httpServer.GetRequestCount() >= int32(globalEndpoints.Nodes())) &&
waitCount > 0 {
waitCount--
time.Sleep(1 * time.Second)
}
}
// Wait and proceed if there are active requests
waitForLowHTTPReq(int32(globalEndpoints.NEndpoints()))
var res madmin.HealResultItem
var err error
bucket, object := urlPath2BucketObjectName(task.path)
bucket, object := path2BucketObject(task.path)
switch {
case bucket == "" && object == "":
res, err = bgHealDiskFormat(ctx, task.opts)
res, err = healDiskFormat(ctx, objAPI, task.opts)
case bucket != "" && object == "":
res, err = bgHealBucket(ctx, bucket, task.opts)
res, err = objAPI.HealBucket(ctx, bucket, task.opts.DryRun, task.opts.Remove)
case bucket != "" && object != "":
res, err = bgHealObject(ctx, bucket, object, task.opts)
res, err = objAPI.HealObject(ctx, bucket, object, task.opts)
}
if task.responseCh != nil {
task.responseCh <- healResult{result: res, err: err}
if task.path != slashSeparator && task.path != nopHeal {
ObjectPathUpdated(task.path)
}
task.responseCh <- healResult{result: res, err: err}
case <-h.doneCh:
return
case <-GlobalServiceDoneCh:
case <-ctx.Done():
return
}
}
@@ -102,45 +108,27 @@ func initHealRoutine() *healRoutine {
}
func startBackgroundHealing() {
ctx := context.Background()
var objAPI ObjectLayer
for {
objAPI = newObjectLayerWithoutSafeModeFn()
if objAPI == nil {
time.Sleep(time.Second)
continue
}
break
}
func startBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
// Run the background healer
globalBackgroundHealRoutine = initHealRoutine()
go globalBackgroundHealRoutine.run()
go globalBackgroundHealRoutine.run(ctx, objAPI)
// Launch the background healer sequence to track
// background healing operations
info := objAPI.StorageInfo(ctx)
info := objAPI.StorageInfo(ctx, false)
numDisks := info.Backend.OnlineDisks.Sum() + info.Backend.OfflineDisks.Sum()
nh := newBgHealSequence(numDisks)
globalBackgroundHealState.LaunchNewHealSequence(nh)
}
func initBackgroundHealing() {
go startBackgroundHealing()
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
go startBackgroundHealing(ctx, objAPI)
}
// bgHealDiskFormat - heals format.json, return value indicates if a
// healDiskFormat - heals format.json, return value indicates if a
// failure error occurred.
func bgHealDiskFormat(ctx context.Context, opts madmin.HealOpts) (madmin.HealResultItem, error) {
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil {
return madmin.HealResultItem{}, errServerNotInitialized
}
res, err := objectAPI.HealFormat(ctx, opts.DryRun)
func healDiskFormat(ctx context.Context, objAPI ObjectLayer, opts madmin.HealOpts) (madmin.HealResultItem, error) {
res, err := objAPI.HealFormat(ctx, opts.DryRun)
// return any error, ignore error returned when disks have
// already healed.
@@ -161,24 +149,3 @@ func bgHealDiskFormat(ctx context.Context, opts madmin.HealOpts) (madmin.HealRes
return res, nil
}
// bghealBucket - traverses and heals given bucket
func bgHealBucket(ctx context.Context, bucket string, opts madmin.HealOpts) (madmin.HealResultItem, error) {
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil {
return madmin.HealResultItem{}, errServerNotInitialized
}
return objectAPI.HealBucket(ctx, bucket, opts.DryRun, opts.Remove)
}
// bgHealObject - heal the given object and record result
func bgHealObject(ctx context.Context, bucket, object string, opts madmin.HealOpts) (madmin.HealResultItem, error) {
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil {
return madmin.HealResultItem{}, errServerNotInitialized
}
return objectAPI.HealObject(ctx, bucket, object, opts.DryRun, opts.Remove, opts.ScanMode)
}

View File

@@ -25,32 +25,19 @@ import (
const defaultMonitorNewDiskInterval = time.Minute * 10
func initLocalDisksAutoHeal() {
go monitorLocalDisksAndHeal()
func initLocalDisksAutoHeal(ctx context.Context, objAPI ObjectLayer) {
go monitorLocalDisksAndHeal(ctx, objAPI)
}
// monitorLocalDisksAndHeal - ensures that detected new disks are healed
// 1. Only the concerned erasure set will be listed and healed
// 2. Only the node hosting the disk is responsible to perform the heal
func monitorLocalDisksAndHeal() {
// Wait until the object layer is ready
var objAPI ObjectLayer
for {
objAPI = newObjectLayerWithoutSafeModeFn()
if objAPI == nil {
time.Sleep(time.Second)
continue
}
break
}
func monitorLocalDisksAndHeal(ctx context.Context, objAPI ObjectLayer) {
z, ok := objAPI.(*xlZones)
if !ok {
return
}
ctx := context.Background()
var bgSeq *healSequence
var found bool
@@ -62,64 +49,76 @@ func monitorLocalDisksAndHeal() {
time.Sleep(time.Second)
}
// Perform automatic disk healing when a new one is inserted
// Perform automatic disk healing when a disk is replaced locally.
for {
time.Sleep(defaultMonitorNewDiskInterval)
localDisksInZoneHeal := make([]Endpoints, len(z.zones))
for i, ep := range globalEndpoints {
localDisksToHeal := Endpoints{}
for _, endpoint := range ep.Endpoints {
if !endpoint.IsLocal {
select {
case <-ctx.Done():
return
case <-time.After(defaultMonitorNewDiskInterval):
// Attempt a heal as the server starts-up first.
localDisksInZoneHeal := make([]Endpoints, len(z.zones))
var healNewDisks bool
for i, ep := range globalEndpoints {
localDisksToHeal := Endpoints{}
for _, endpoint := range ep.Endpoints {
if !endpoint.IsLocal {
continue
}
// Try to connect to the current endpoint
// and reformat if the current disk is not formatted
_, _, err := connectEndpoint(endpoint)
if err == errUnformattedDisk {
localDisksToHeal = append(localDisksToHeal, endpoint)
}
}
if len(localDisksToHeal) == 0 {
continue
}
// Try to connect to the current endpoint
// and reformat if the current disk is not formatted
_, _, err := connectEndpoint(endpoint)
if err == errUnformattedDisk {
localDisksToHeal = append(localDisksToHeal, endpoint)
}
localDisksInZoneHeal[i] = localDisksToHeal
healNewDisks = true
}
if len(localDisksToHeal) == 0 {
// Reformat disks only if needed.
if !healNewDisks {
continue
}
localDisksInZoneHeal[i] = localDisksToHeal
}
// Reformat disks
bgSeq.sourceCh <- SlashSeparator
// Ensure that reformatting disks is finished
bgSeq.sourceCh <- nopHeal
// Reformat disks
bgSeq.sourceCh <- healSource{path: SlashSeparator}
var erasureSetInZoneToHeal = make([][]int, len(localDisksInZoneHeal))
// Compute the list of erasure set to heal
for i, localDisksToHeal := range localDisksInZoneHeal {
var erasureSetToHeal []int
for _, endpoint := range localDisksToHeal {
// Load the new format of this passed endpoint
_, format, err := connectEndpoint(endpoint)
if err != nil {
logger.LogIf(ctx, err)
continue
// Ensure that reformatting disks is finished
bgSeq.sourceCh <- healSource{path: nopHeal}
var erasureSetInZoneToHeal = make([][]int, len(localDisksInZoneHeal))
// Compute the list of erasure set to heal
for i, localDisksToHeal := range localDisksInZoneHeal {
var erasureSetToHeal []int
for _, endpoint := range localDisksToHeal {
// Load the new format of this passed endpoint
_, format, err := connectEndpoint(endpoint)
if err != nil {
logger.LogIf(ctx, err)
continue
}
// Calculate the set index where the current endpoint belongs
setIndex, _, err := findDiskIndex(z.zones[i].format, format)
if err != nil {
logger.LogIf(ctx, err)
continue
}
erasureSetToHeal = append(erasureSetToHeal, setIndex)
}
// Calculate the set index where the current endpoint belongs
setIndex, _, err := findDiskIndex(z.zones[i].format, format)
if err != nil {
logger.LogIf(ctx, err)
continue
}
erasureSetToHeal = append(erasureSetToHeal, setIndex)
erasureSetInZoneToHeal[i] = erasureSetToHeal
}
erasureSetInZoneToHeal[i] = erasureSetToHeal
}
// Heal all erasure sets that need
for i, erasureSetToHeal := range erasureSetInZoneToHeal {
for _, setIndex := range erasureSetToHeal {
err := healErasureSet(ctx, setIndex, z.zones[i].sets[setIndex])
if err != nil {
logger.LogIf(ctx, err)
// Heal all erasure sets that need
for i, erasureSetToHeal := range erasureSetInZoneToHeal {
for _, setIndex := range erasureSetToHeal {
err := healErasureSet(ctx, setIndex, z.zones[i].sets[setIndex])
if err != nil {
logger.LogIf(ctx, err)
}
}
}
}

View File

@@ -28,11 +28,6 @@ import (
humanize "github.com/dustin/go-humanize"
)
// Prepare XL/FS backend for benchmark.
func prepareBenchmarkBackend(instanceType string) (ObjectLayer, []string, error) {
return prepareTestBackend(instanceType)
}
// Benchmark utility functions for ObjectLayer.PutObject().
// Creates Object layer setup ( MakeBucket ) and then runs the PutObject benchmark.
func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
@@ -40,7 +35,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
// obtains random bucket name.
bucket := getRandomBucketName()
// create bucket.
err = obj.MakeBucketWithLocation(context.Background(), bucket, "")
err = obj.MakeBucketWithLocation(context.Background(), bucket, "", false)
if err != nil {
b.Fatal(err)
}
@@ -81,7 +76,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
object := getRandomObjectName()
// create bucket.
err = obj.MakeBucketWithLocation(context.Background(), bucket, "")
err = obj.MakeBucketWithLocation(context.Background(), bucket, "", false)
if err != nil {
b.Fatal(err)
}
@@ -135,7 +130,9 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
// creates XL/FS backend setup, obtains the object layer and calls the runPutObjectPartBenchmark function.
func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
// create a temp XL/FS backend.
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
if err != nil {
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
}
@@ -149,7 +146,9 @@ func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
// creates XL/FS backend setup, obtains the object layer and calls the runPutObjectBenchmark function.
func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
// create a temp XL/FS backend.
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
if err != nil {
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
}
@@ -163,7 +162,9 @@ func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
// creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for put object.
func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int) {
// create a temp XL/FS backend.
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
if err != nil {
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
}
@@ -180,7 +181,7 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
// obtains random bucket name.
bucket := getRandomBucketName()
// create bucket.
err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false)
if err != nil {
b.Fatal(err)
}
@@ -242,7 +243,9 @@ func generateBytesData(size int) []byte {
// creates XL/FS backend setup, obtains the object layer and calls the runGetObjectBenchmark function.
func benchmarkGetObject(b *testing.B, instanceType string, objSize int) {
// create a temp XL/FS backend.
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
if err != nil {
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
}
@@ -256,7 +259,9 @@ func benchmarkGetObject(b *testing.B, instanceType string, objSize int) {
// creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for ObjectLayer.GetObject() .
func benchmarkGetObjectParallel(b *testing.B, instanceType string, objSize int) {
// create a temp XL/FS backend.
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
if err != nil {
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
}
@@ -273,7 +278,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
// obtains random bucket name.
bucket := getRandomBucketName()
// create bucket.
err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false)
if err != nil {
b.Fatal(err)
}
@@ -317,7 +322,7 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
// obtains random bucket name.
bucket := getRandomBucketName()
// create bucket.
err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false)
if err != nil {
b.Fatal(err)
}

View File

@@ -18,7 +18,6 @@ package cmd
import (
"bytes"
"context"
"encoding/hex"
"fmt"
"hash"
@@ -27,6 +26,14 @@ import (
"github.com/minio/minio/cmd/logger"
)
type errHashMismatch struct {
message string
}
func (err *errHashMismatch) Error() string {
return err.message
}
// Calculates bitrot in chunks and writes the hash into the stream.
type streamingBitrotWriter struct {
iow *io.PipeWriter
@@ -132,9 +139,9 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
b.h.Write(buf)
if !bytes.Equal(b.h.Sum(nil), b.hashBytes) {
err = fmt.Errorf("hashes do not match expected %s, got %s",
hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil)))
logger.LogIf(context.Background(), err)
err := &errHashMismatch{fmt.Sprintf("hashes do not match expected %s, got %s",
hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil)))}
logger.LogIf(GlobalContext, err)
return 0, err
}
b.currOffset += int64(len(buf))

View File

@@ -17,7 +17,6 @@
package cmd
import (
"context"
"hash"
"io"
@@ -36,12 +35,12 @@ type wholeBitrotWriter struct {
func (b *wholeBitrotWriter) Write(p []byte) (int, error) {
err := b.disk.AppendFile(b.volume, b.filePath, p)
if err != nil {
logger.LogIf(context.Background(), err)
logger.LogIf(GlobalContext, err)
return 0, err
}
_, err = b.Hash.Write(p)
if err != nil {
logger.LogIf(context.Background(), err)
logger.LogIf(GlobalContext, err)
return 0, err
}
return len(p), nil
@@ -70,14 +69,14 @@ func (b *wholeBitrotReader) ReadAt(buf []byte, offset int64) (n int, err error)
if b.buf == nil {
b.buf = make([]byte, b.tillOffset-offset)
if _, err := b.disk.ReadFile(b.volume, b.filePath, offset, b.buf, b.verifier); err != nil {
ctx := context.Background()
ctx := GlobalContext
logger.GetReqInfo(ctx).AppendTags("disk", b.disk.String())
logger.LogIf(ctx, err)
return 0, err
}
}
if len(b.buf) < len(buf) {
logger.LogIf(context.Background(), errLessData)
logger.LogIf(GlobalContext, errLessData)
return 0, errLessData
}
n = copy(buf, b.buf)

View File

@@ -17,7 +17,6 @@
package cmd
import (
"context"
"errors"
"hash"
"io"
@@ -72,7 +71,7 @@ func (a BitrotAlgorithm) New() hash.Hash {
hh, _ := highwayhash.New(magicHighwayHash256Key) // New will never return error since key is 256 bit
return hh
default:
logger.CriticalIf(context.Background(), errors.New("Unsupported bitrot algorithm"))
logger.CriticalIf(GlobalContext, errors.New("Unsupported bitrot algorithm"))
return nil
}
}
@@ -88,7 +87,7 @@ func (a BitrotAlgorithm) Available() bool {
func (a BitrotAlgorithm) String() string {
name, ok := bitrotAlgorithms[a]
if !ok {
logger.CriticalIf(context.Background(), errors.New("Unsupported bitrot algorithm"))
logger.CriticalIf(GlobalContext, errors.New("Unsupported bitrot algorithm"))
}
return name
}

View File

@@ -29,7 +29,7 @@ import (
"time"
"github.com/gorilla/mux"
"github.com/minio/minio-go/pkg/set"
"github.com/minio/minio-go/v6/pkg/set"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/cmd/rest"
@@ -62,9 +62,9 @@ func (s1 ServerSystemConfig) Diff(s2 ServerSystemConfig) error {
return fmt.Errorf("Expected platform '%s', found to be running '%s'",
s1.MinioPlatform, s2.MinioPlatform)
}
if s1.MinioEndpoints.Nodes() != s2.MinioEndpoints.Nodes() {
return fmt.Errorf("Expected number of endpoints %d, seen %d", s1.MinioEndpoints.Nodes(),
s2.MinioEndpoints.Nodes())
if s1.MinioEndpoints.NEndpoints() != s2.MinioEndpoints.NEndpoints() {
return fmt.Errorf("Expected number of endpoints %d, seen %d", s1.MinioEndpoints.NEndpoints(),
s2.MinioEndpoints.NEndpoints())
}
for i, ep := range s1.MinioEndpoints {
@@ -110,7 +110,7 @@ func registerBootstrapRESTHandlers(router *mux.Router) {
httpTraceHdrs(server.VerifyHandler))
}
// client to talk to bootstrap Nodes.
// client to talk to bootstrap NEndpoints.
type bootstrapRESTClient struct {
endpoint Endpoint
restClient *rest.Client
@@ -126,7 +126,7 @@ func (client *bootstrapRESTClient) reConnect() {
// permanently. The only way to restore the connection is at the xl-sets layer by xlsets.monitorAndConnectEndpoints()
// after verifying format.json
func (client *bootstrapRESTClient) call(method string, values url.Values, body io.Reader, length int64) (respBody io.ReadCloser, err error) {
return client.callWithContext(context.Background(), method, values, body, length)
return client.callWithContext(GlobalContext, method, values, body, length)
}
// Wrapper to restClient.Call to handle network errors, in case of network error the connection is marked disconnected
@@ -247,7 +247,7 @@ func newBootstrapRESTClient(endpoint Endpoint) (*bootstrapRESTClient, error) {
}
}
trFn := newCustomHTTPTransport(tlsConfig, rest.DefaultRESTTimeout, rest.DefaultRESTTimeout)
trFn := newCustomHTTPTransport(tlsConfig, rest.DefaultRESTTimeout)
restClient, err := rest.NewClient(serverURL, trFn, newAuthToken)
if err != nil {
return nil, err

View File

@@ -0,0 +1,188 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"encoding/xml"
"fmt"
"io"
"net/http"
"github.com/gorilla/mux"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
bucketsse "github.com/minio/minio/pkg/bucket/encryption"
"github.com/minio/minio/pkg/bucket/policy"
)
const (
// Bucket Encryption configuration file name.
bucketSSEConfig = "bucket-encryption.xml"
)
// PutBucketEncryptionHandler - Stores given bucket encryption configuration
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html
func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutBucketEncryption")
defer logger.AuditLog(w, r, "PutBucketEncryption", mustGetClaimsFromToken(r))
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
// PutBucketEncyrption API requires Content-Md5
if _, ok := r.Header[xhttp.ContentMD5]; !ok {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL, guessIsBrowserReq(r))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketEncryptionAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Check if bucket exists.
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Parse bucket encryption xml
encConfig, err := validateBucketSSEConfig(io.LimitReader(r.Body, maxBucketSSEConfigSize))
if err != nil {
apiErr := APIError{
Code: "MalformedXML",
Description: fmt.Sprintf("%s (%s)", errorCodes[ErrMalformedXML].Description, err),
HTTPStatusCode: errorCodes[ErrMalformedXML].HTTPStatusCode,
}
writeErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r))
return
}
// Return error if KMS is not initialized
if GlobalKMS == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL, guessIsBrowserReq(r))
return
}
// Store the bucket encryption configuration in the object layer
if err = objAPI.SetBucketSSEConfig(ctx, bucket, encConfig); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Update the in-memory bucket encryption cache
globalBucketSSEConfigSys.Set(bucket, encConfig)
// Update peer MinIO servers of the updated bucket encryption config
globalNotificationSys.SetBucketSSEConfig(ctx, bucket, encConfig)
writeSuccessResponseHeadersOnly(w)
}
// GetBucketEncryptionHandler - Returns bucket policy configuration
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html
func (api objectAPIHandlers) GetBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketEncryption")
defer logger.AuditLog(w, r, "GetBucketEncryption", mustGetClaimsFromToken(r))
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketEncryptionAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Check if bucket exists
var err error
if _, err = objAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Fetch bucket encryption configuration from object layer
var encConfig *bucketsse.BucketSSEConfig
if encConfig, err = objAPI.GetBucketSSEConfig(ctx, bucket); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
var encConfigData []byte
if encConfigData, err = xml.Marshal(encConfig); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Write bucket encryption configuration to client
writeSuccessResponseXML(w, encConfigData)
}
// DeleteBucketEncryptionHandler - Removes bucket encryption configuration
func (api objectAPIHandlers) DeleteBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "DeleteBucketEncryption")
defer logger.AuditLog(w, r, "DeleteBucketEncryption", mustGetClaimsFromToken(r))
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketEncryptionAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Check if bucket exists
var err error
if _, err = objAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Delete bucket encryption config from object layer
if err = objAPI.DeleteBucketSSEConfig(ctx, bucket); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Remove entry from the in-memory bucket encryption cache
globalBucketSSEConfigSys.Remove(bucket)
// Update peer MinIO servers of the updated bucket encryption config
globalNotificationSys.RemoveBucketSSEConfig(ctx, bucket)
writeSuccessNoContent(w)
}

170
cmd/bucket-encryption.go Normal file
View File

@@ -0,0 +1,170 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bytes"
"context"
"encoding/xml"
"errors"
"io"
"path"
"sync"
bucketsse "github.com/minio/minio/pkg/bucket/encryption"
)
// BucketSSEConfigSys - in-memory cache of bucket encryption config
type BucketSSEConfigSys struct {
sync.RWMutex
bucketSSEConfigMap map[string]*bucketsse.BucketSSEConfig
}
// NewBucketSSEConfigSys - Creates an empty in-memory bucket encryption configuration cache
func NewBucketSSEConfigSys() *BucketSSEConfigSys {
return &BucketSSEConfigSys{
bucketSSEConfigMap: make(map[string]*bucketsse.BucketSSEConfig),
}
}
// load - Loads the bucket encryption configuration for the given list of buckets
func (sys *BucketSSEConfigSys) load(buckets []BucketInfo, objAPI ObjectLayer) error {
for _, bucket := range buckets {
config, err := objAPI.GetBucketSSEConfig(GlobalContext, bucket.Name)
if err != nil {
if _, ok := err.(BucketSSEConfigNotFound); ok {
continue
}
// Quorum errors should be returned.
return err
}
sys.Set(bucket.Name, config)
}
return nil
}
// Init - Initializes in-memory bucket encryption config cache for the given list of buckets
func (sys *BucketSSEConfigSys) Init(buckets []BucketInfo, objAPI ObjectLayer) error {
if objAPI == nil {
return errServerNotInitialized
}
// We don't cache bucket encryption config in gateway mode, nothing to do.
if globalIsGateway {
return nil
}
// Load bucket encryption config cache once during boot.
return sys.load(buckets, objAPI)
}
// Get - gets bucket encryption config for the given bucket.
func (sys *BucketSSEConfigSys) Get(bucket string) (config *bucketsse.BucketSSEConfig, ok bool) {
// We don't cache bucket encryption config in gateway mode.
if globalIsGateway {
objAPI := newObjectLayerWithoutSafeModeFn()
if objAPI == nil {
return
}
cfg, err := objAPI.GetBucketSSEConfig(GlobalContext, bucket)
if err != nil {
return
}
return cfg, true
}
sys.Lock()
defer sys.Unlock()
config, ok = sys.bucketSSEConfigMap[bucket]
return
}
// Set - sets bucket encryption config to given bucket name.
func (sys *BucketSSEConfigSys) Set(bucket string, config *bucketsse.BucketSSEConfig) {
// We don't cache bucket encryption config in gateway mode.
if globalIsGateway {
return
}
sys.Lock()
defer sys.Unlock()
sys.bucketSSEConfigMap[bucket] = config
}
// Remove - removes bucket encryption config for given bucket.
func (sys *BucketSSEConfigSys) Remove(bucket string) {
sys.Lock()
defer sys.Unlock()
delete(sys.bucketSSEConfigMap, bucket)
}
// saveBucketSSEConfig - save bucket encryption config for given bucket.
func saveBucketSSEConfig(ctx context.Context, objAPI ObjectLayer, bucket string, config *bucketsse.BucketSSEConfig) error {
data, err := xml.Marshal(config)
if err != nil {
return err
}
// Path to store bucket encryption config for the given bucket.
configFile := path.Join(bucketConfigPrefix, bucket, bucketSSEConfig)
return saveConfig(ctx, objAPI, configFile, data)
}
// getBucketSSEConfig - get bucket encryption config for given bucket.
func getBucketSSEConfig(objAPI ObjectLayer, bucket string) (*bucketsse.BucketSSEConfig, error) {
// Path to bucket-encryption.xml for the given bucket.
configFile := path.Join(bucketConfigPrefix, bucket, bucketSSEConfig)
configData, err := readConfig(GlobalContext, objAPI, configFile)
if err != nil {
if err == errConfigNotFound {
err = BucketSSEConfigNotFound{Bucket: bucket}
}
return nil, err
}
return bucketsse.ParseBucketSSEConfig(bytes.NewReader(configData))
}
// removeBucketSSEConfig - removes bucket encryption config for given bucket.
func removeBucketSSEConfig(ctx context.Context, objAPI ObjectLayer, bucket string) error {
// Path to bucket-encryption.xml for the given bucket.
configFile := path.Join(bucketConfigPrefix, bucket, bucketSSEConfig)
if err := deleteConfig(ctx, objAPI, configFile); err != nil {
if err == errConfigNotFound {
return BucketSSEConfigNotFound{Bucket: bucket}
}
return err
}
return nil
}
// validateBucketSSEConfig parses bucket encryption configuration and validates if it is supported by MinIO.
func validateBucketSSEConfig(r io.Reader) (*bucketsse.BucketSSEConfig, error) {
encConfig, err := bucketsse.ParseBucketSSEConfig(r)
if err != nil {
return nil, err
}
if len(encConfig.Rules) == 1 && encConfig.Rules[0].DefaultEncryptionAction.Algorithm == bucketsse.AES256 {
return encConfig, nil
}
return nil, errors.New("Unsupported bucket encryption configuration")
}

View File

@@ -0,0 +1,70 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bytes"
"errors"
"testing"
)
func TestValidateBucketSSEConfig(t *testing.T) {
testCases := []struct {
inputXML string
expectedErr error
shouldPass bool
}{
// MinIO supported XML
{
inputXML: `<ServerSideEncryptionConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Rule>
<ApplyServerSideEncryptionByDefault>
<SSEAlgorithm>AES256</SSEAlgorithm>
</ApplyServerSideEncryptionByDefault>
</Rule>
</ServerSideEncryptionConfiguration>`,
expectedErr: nil,
shouldPass: true,
},
// Unsupported XML
{
inputXML: `<ServerSideEncryptionConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Rule>
<ApplyServerSideEncryptionByDefault>
<SSEAlgorithm>aws:kms</SSEAlgorithm>
<KMSMasterKeyID>arn:aws:kms:us-east-1:1234/5678example</KMSMasterKeyID>
</ApplyServerSideEncryptionByDefault>
</Rule>
</ServerSideEncryptionConfiguration>`,
expectedErr: errors.New("Unsupported bucket encryption configuration"),
shouldPass: false,
},
}
for i, tc := range testCases {
_, err := validateBucketSSEConfig(bytes.NewReader([]byte(tc.inputXML)))
if tc.shouldPass && err != nil {
t.Fatalf("Test case %d: Expected to succeed but got %s", i+1, err)
}
if !tc.shouldPass {
if err == nil || err != nil && err.Error() != tc.expectedErr.Error() {
t.Fatalf("Test case %d: Expected %s but got %s", i+1, tc.expectedErr, err)
}
}
}
}

View File

@@ -24,7 +24,7 @@ import (
"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/policy"
"github.com/minio/minio/pkg/bucket/policy"
)
// Validate all the ListObjects query arguments, returns an APIErrorCode

View File

@@ -1,5 +1,5 @@
/*
* MinIO Cloud Storage, (C) 2015, 2016, 2017, 2018 MinIO, Inc.
* MinIO Cloud Storage, (C) 2015-2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,12 +17,10 @@
package cmd
import (
"context"
"encoding/base64"
"encoding/xml"
"fmt"
"io"
"net"
"net/http"
"net/url"
"path"
@@ -32,23 +30,24 @@ import (
"github.com/gorilla/mux"
"github.com/minio/minio-go/v6/pkg/set"
"github.com/minio/minio-go/v6/pkg/tags"
"github.com/minio/minio/cmd/config/etcd/dns"
"github.com/minio/minio/cmd/crypto"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
"github.com/minio/minio/pkg/bucket/policy"
"github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/handlers"
"github.com/minio/minio/pkg/hash"
iampolicy "github.com/minio/minio/pkg/iam/policy"
"github.com/minio/minio/pkg/policy"
"github.com/minio/minio/pkg/sync/errgroup"
)
const (
getBucketVersioningResponse = `<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>`
objectLockConfig = "object-lock.xml"
bucketObjectLockEnabledConfigFile = "object-lock-enabled.json"
bucketObjectLockEnabledConfig = `{"x-amz-bucket-object-lock-enabled":true}`
getBucketVersioningResponse = `<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>`
objectLockConfig = "object-lock.xml"
bucketTaggingConfigFile = "tagging.xml"
)
// Check if there are buckets on server without corresponding entry in etcd backend and
@@ -70,83 +69,78 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
// Get buckets in the DNS
dnsBuckets, err := globalDNSConfig.List()
if err != nil && err != dns.ErrNoEntriesFound {
logger.LogIf(context.Background(), err)
logger.LogIf(GlobalContext, err)
return
}
bucketSet := set.NewStringSet()
bucketsSet := set.NewStringSet()
bucketsToBeUpdated := set.NewStringSet()
bucketsInConflict := set.NewStringSet()
for _, bucket := range buckets {
bucketsSet.Add(bucket.Name)
r, ok := dnsBuckets[bucket.Name]
if !ok {
bucketsToBeUpdated.Add(bucket.Name)
continue
}
if !globalDomainIPs.Intersection(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() {
if globalDomainIPs.Difference(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() {
// No difference in terms of domainIPs and nothing
// has changed so we don't change anything on the etcd.
continue
}
// if domain IPs intersect then it won't be an empty set.
// such an intersection means that bucket exists on etcd.
// but if we do see a difference with local domain IPs with
// hostSlice from etcd then we should update with newer
// domainIPs, we proceed to do that here.
bucketsToBeUpdated.Add(bucket.Name)
continue
}
// No IPs seem to intersect, this means that bucket exists but has
// different IP addresses perhaps from a different deployment.
// bucket names are globally unique in federation at a given
// path prefix, name collision is not allowed. We simply log
// an error and continue.
bucketsInConflict.Add(bucket.Name)
}
// Add buckets that are not registered with the DNS
// Add/update buckets that are not registered with the DNS
g := errgroup.WithNErrs(len(buckets))
for index := range buckets {
bucketSet.Add(buckets[index].Name)
bucketsToBeUpdatedSlice := bucketsToBeUpdated.ToSlice()
for index := range bucketsToBeUpdatedSlice {
index := index
g.Go(func() error {
r, gerr := globalDNSConfig.Get(buckets[index].Name)
if gerr != nil {
if gerr == dns.ErrNoEntriesFound {
return globalDNSConfig.Put(buckets[index].Name)
}
return gerr
}
if !globalDomainIPs.Intersection(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() {
if globalDomainIPs.Difference(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() {
// No difference in terms of domainIPs and nothing
// has changed so we don't change anything on the etcd.
return nil
}
// if domain IPs intersect then it won't be an empty set.
// such an intersection means that bucket exists on etcd.
// but if we do see a difference with local domain IPs with
// hostSlice from etcd then we should update with newer
// domainIPs, we proceed to do that here.
return globalDNSConfig.Put(buckets[index].Name)
}
// No IPs seem to intersect, this means that bucket exists but has
// different IP addresses perhaps from a different deployment.
// bucket names are globally unique in federation at a given
// path prefix, name collision is not allowed. We simply log
// an error and continue.
return fmt.Errorf("Unable to add bucket DNS entry for bucket %s, an entry exists for the same bucket. Use one of these IP addresses %v to access the bucket", buckets[index].Name, globalDomainIPs.ToSlice())
return globalDNSConfig.Put(bucketsToBeUpdatedSlice[index])
}, index)
}
for _, err := range g.Wait() {
if err != nil {
logger.LogIf(context.Background(), err)
logger.LogIf(GlobalContext, err)
}
}
g = errgroup.WithNErrs(len(dnsBuckets))
// Remove buckets that are in DNS for this server, but aren't local
for index := range dnsBuckets {
index := index
g.Go(func() error {
// This is a local bucket that exists, so we can continue
if bucketSet.Contains(dnsBuckets[index].Key) {
return nil
}
// This is not for our server, so we can continue
hostPort := net.JoinHostPort(dnsBuckets[index].Host, string(dnsBuckets[index].Port))
if globalDomainIPs.Intersection(set.CreateStringSet(hostPort)).IsEmpty() {
return nil
}
// We go to here, so we know the bucket no longer exists,
// but is registered in DNS to this server
if err := globalDNSConfig.DeleteRecord(dnsBuckets[index]); err != nil {
return fmt.Errorf("Failed to remove DNS entry for %s due to %w",
dnsBuckets[index].Key, err)
}
return nil
}, index)
for _, bucket := range bucketsInConflict.ToSlice() {
logger.LogIf(GlobalContext, fmt.Errorf("Unable to add bucket DNS entry for bucket %s, an entry exists for the same bucket. Use one of these IP addresses %v to access the bucket", bucket, globalDomainIPs.ToSlice()))
}
for _, err := range g.Wait() {
if err != nil {
logger.LogIf(context.Background(), err)
// Remove buckets that are in DNS for this server, but aren't local
for bucket, records := range dnsBuckets {
if bucketsSet.Contains(bucket) {
continue
}
if globalDomainIPs.Intersection(set.CreateStringSet(getHostsSlice(records)...)).IsEmpty() {
// This is not for our server, so we can continue
continue
}
// We go to here, so we know the bucket no longer exists,
// but is registered in DNS to this server
if err = globalDNSConfig.Delete(bucket); err != nil {
logger.LogIf(GlobalContext, fmt.Errorf("Failed to remove DNS entry for %s due to %w",
bucket, err))
}
}
}
@@ -271,7 +265,7 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
listBuckets := objectAPI.ListBuckets
accessKey, owner, s3Error := checkRequestAuthTypeToAccessKey(ctx, r, policy.ListAllMyBucketsAction, "", "")
if s3Error != ErrNone {
if s3Error != ErrNone && s3Error != ErrAccessDenied {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
@@ -284,16 +278,11 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
bucketSet := set.NewStringSet()
for _, dnsRecord := range dnsBuckets {
if bucketSet.Contains(dnsRecord.Key) {
continue
}
for _, dnsRecords := range dnsBuckets {
bucketsInfo = append(bucketsInfo, BucketInfo{
Name: dnsRecord.Key,
Created: dnsRecord.CreationDate,
Name: dnsRecords[0].Key,
Created: dnsRecords[0].CreationDate,
})
bucketSet.Add(dnsRecord.Key)
}
} else {
// Invoke the list buckets.
@@ -305,32 +294,43 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
}
}
// Set prefix value for "s3:prefix" policy conditionals.
r.Header.Set("prefix", "")
if s3Error == ErrAccessDenied {
// Set prefix value for "s3:prefix" policy conditionals.
r.Header.Set("prefix", "")
// Set delimiter value for "s3:delimiter" policy conditionals.
r.Header.Set("delimiter", SlashSeparator)
// Set delimiter value for "s3:delimiter" policy conditionals.
r.Header.Set("delimiter", SlashSeparator)
// err will be nil here as we already called this function
// earlier in this request.
claims, _ := getClaimsFromToken(r)
var newBucketsInfo []BucketInfo
for _, bucketInfo := range bucketsInfo {
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: accessKey,
Action: iampolicy.ListBucketAction,
BucketName: bucketInfo.Name,
ConditionValues: getConditionValues(r, "", accessKey, claims),
IsOwner: owner,
ObjectName: "",
Claims: claims,
}) {
newBucketsInfo = append(newBucketsInfo, bucketInfo)
// err will be nil here as we already called this function
// earlier in this request.
claims, _ := getClaimsFromToken(r, getSessionToken(r))
n := 0
// Use the following trick to filter in place
// https://github.com/golang/go/wiki/SliceTricks#filter-in-place
for _, bucketInfo := range bucketsInfo {
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: accessKey,
Action: iampolicy.ListBucketAction,
BucketName: bucketInfo.Name,
ConditionValues: getConditionValues(r, "", accessKey, claims),
IsOwner: owner,
ObjectName: "",
Claims: claims,
}) {
bucketsInfo[n] = bucketInfo
n++
}
}
bucketsInfo = bucketsInfo[:n]
// No buckets can be filtered return access denied error.
if len(bucketsInfo) == 0 {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
}
// Generate response.
response := generateListBucketsResponse(newBucketsInfo)
response := generateListBucketsResponse(bucketsInfo)
encodedSuccessResponse := encodeResponse(response)
// Write response.
@@ -352,6 +352,13 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
return
}
// Content-Md5 is requied should be set
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
if _, ok := r.Header[xhttp.ContentMD5]; !ok {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL, guessIsBrowserReq(r))
return
}
// Content-Length is required and should be non-zero
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
if r.ContentLength <= 0 {
@@ -359,34 +366,14 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
return
}
// Content-Md5 is requied should be set
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
if _, ok := r.Header["Content-Md5"]; !ok {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL, guessIsBrowserReq(r))
return
}
// Allocate incoming content length bytes.
var deleteXMLBytes []byte
const maxBodySize = 2 * 1000 * 1024 // The max. XML contains 1000 object names (each at most 1024 bytes long) + XML overhead
if r.ContentLength > maxBodySize { // Only allocated memory for at most 1000 objects
deleteXMLBytes = make([]byte, maxBodySize)
} else {
deleteXMLBytes = make([]byte, r.ContentLength)
}
// Read incoming body XML bytes.
if _, err := io.ReadFull(r.Body, deleteXMLBytes); err != nil {
logger.LogIf(ctx, err, logger.Application)
writeErrorResponse(ctx, w, toAdminAPIErr(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// The max. XML contains 100000 object names (each at most 1024 bytes long) + XML overhead
const maxBodySize = 2 * 100000 * 1024
// Unmarshal list of keys to be deleted.
deleteObjects := &DeleteObjectsRequest{}
if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil {
if err := xmlDecoder(r.Body, deleteObjects, maxBodySize); err != nil {
logger.LogIf(ctx, err, logger.Application)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedXML), r.URL, guessIsBrowserReq(r))
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -411,11 +398,14 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
}
continue
}
govBypassPerms := checkRequestAuthType(ctx, r, policy.BypassGovernanceRetentionAction, bucket, object.ObjectName)
if _, err := enforceRetentionBypassForDelete(ctx, r, bucket, object.ObjectName, getObjectInfoFn, govBypassPerms); err != ErrNone {
dErrs[index] = err
continue
if _, ok := globalBucketObjectLockSys.Get(bucket); ok {
if err := enforceRetentionBypassForDelete(ctx, r, bucket, object.ObjectName, getObjectInfoFn); err != ErrNone {
dErrs[index] = err
continue
}
}
// Avoid duplicate objects, we use map to filter them out.
if _, ok := objectsToDelete[object.ObjectName]; !ok {
objectsToDelete[object.ObjectName] = index
@@ -533,24 +523,17 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
}
if globalDNSConfig != nil {
if _, err := globalDNSConfig.Get(bucket); err != nil {
sr, err := globalDNSConfig.Get(bucket)
if err != nil {
if err == dns.ErrNoEntriesFound {
// Proceed to creating a bucket.
if err = objectAPI.MakeBucketWithLocation(ctx, bucket, location); err != nil {
if err = objectAPI.MakeBucketWithLocation(ctx, bucket, location, objectLockEnabled); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if objectLockEnabled {
configFile := path.Join(bucketConfigPrefix, bucket, bucketObjectLockEnabledConfigFile)
if err = saveConfig(ctx, objectAPI, configFile, []byte(bucketObjectLockEnabledConfig)); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
}
if err = globalDNSConfig.Put(bucket); err != nil {
objectAPI.DeleteBucket(ctx, bucket)
objectAPI.DeleteBucket(ctx, bucket, false)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -566,25 +549,29 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
return
}
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrBucketAlreadyOwnedByYou), r.URL, guessIsBrowserReq(r))
apiErr := ErrBucketAlreadyExists
if !globalDomainIPs.Intersection(set.CreateStringSet(getHostsSlice(sr)...)).IsEmpty() {
apiErr = ErrBucketAlreadyOwnedByYou
}
// No IPs seem to intersect, this means that bucket exists but has
// different IP addresses perhaps from a different deployment.
// bucket names are globally unique in federation at a given
// path prefix, name collision is not allowed. Return appropriate error.
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(apiErr), r.URL, guessIsBrowserReq(r))
return
}
// Proceed to creating a bucket.
err := objectAPI.MakeBucketWithLocation(ctx, bucket, location)
err := objectAPI.MakeBucketWithLocation(ctx, bucket, location, objectLockEnabled)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if objectLockEnabled {
configFile := path.Join(bucketConfigPrefix, bucket, bucketObjectLockEnabledConfigFile)
if err = saveConfig(ctx, objectAPI, configFile, []byte(bucketObjectLockEnabledConfig)); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
globalBucketObjectLockConfig.Set(bucket, Retention{})
globalNotificationSys.PutBucketObjectLockConfig(ctx, bucket, Retention{})
if objectLockEnabled && !globalIsGateway {
ret := &objectlock.Retention{}
globalBucketObjectLockSys.Set(bucket, ret)
globalNotificationSys.PutBucketObjectLockConfig(ctx, bucket, ret)
}
// Make sure to add Location information here only for bucket
@@ -620,7 +607,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
bucket := mux.Vars(r)["bucket"]
// To detect if the client has disconnected.
r.Body = &detectDisconnect{r.Body, r.Context().Done()}
r.Body = &contextReader{r.Body, r.Context()}
// Require Content-Length to be set in the request
size := r.ContentLength
@@ -756,10 +743,12 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
}
rawReader := hashReader
pReader := NewPutObjReader(rawReader, nil, nil)
var objectEncryptionKey []byte
var objectEncryptionKey crypto.ObjectKey
// Check if bucket encryption is enabled
_, encEnabled := globalBucketSSEConfigSys.Get(bucket)
// This request header needs to be set prior to setting ObjectOptions
if globalAutoEncryption && !crypto.SSEC.IsRequested(r.Header) {
if (globalAutoEncryption || encEnabled) && !crypto.SSEC.IsRequested(r.Header) {
r.Header.Add(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
}
// get gateway encryption options
@@ -796,7 +785,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
pReader = NewPutObjReader(rawReader, hashReader, objectEncryptionKey)
pReader = NewPutObjReader(rawReader, hashReader, &objectEncryptionKey)
}
}
@@ -895,15 +884,39 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.DeleteBucketAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
forceDelete := false
if value := r.Header.Get(xhttp.MinIOForceDelete); value != "" {
switch value {
case "true":
forceDelete = true
case "false":
default:
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL, guessIsBrowserReq(r))
return
}
}
if forceDelete {
if s3Error := checkRequestAuthType(ctx, r, policy.ForceDeleteBucketAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
} else {
if s3Error := checkRequestAuthType(ctx, r, policy.DeleteBucketAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
}
if _, ok := globalBucketObjectLockSys.Get(bucket); ok && forceDelete {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r))
return
}
deleteBucket := objectAPI.DeleteBucket
// Attempt to delete bucket.
if err := deleteBucket(ctx, bucket); err != nil {
if err := deleteBucket(ctx, bucket, forceDelete); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -911,7 +924,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
if globalDNSConfig != nil {
if err := globalDNSConfig.Delete(bucket); err != nil {
// Deleting DNS entry failed, attempt to create the bucket again.
objectAPI.MakeBucketWithLocation(ctx, bucket, "")
objectAPI.MakeBucketWithLocation(ctx, bucket, "", false)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -996,54 +1009,31 @@ func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWri
return
}
// Deny if WORM is enabled
if globalWORMEnabled {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketObjectLockConfigurationAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
config, err := parseObjectLockConfig(r.Body)
config, err := objectlock.ParseObjectLockConfig(r.Body)
if err != nil {
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
apiErr.Description = err.Error()
writeErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r))
return
}
configFile := path.Join(bucketConfigPrefix, bucket, bucketObjectLockEnabledConfigFile)
configData, err := readConfig(ctx, objectAPI, configFile)
if err != nil {
aerr := toAPIError(ctx, err)
if err == errConfigNotFound {
aerr = errorCodes.ToAPIErr(ErrMethodNotAllowed)
}
writeErrorResponse(ctx, w, aerr, r.URL, guessIsBrowserReq(r))
if err = objectAPI.SetBucketObjectLockConfig(ctx, bucket, config); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if string(configData) != bucketObjectLockEnabledConfig {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInternalError), r.URL, guessIsBrowserReq(r))
return
}
data, err := xml.Marshal(config)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
configFile = path.Join(bucketConfigPrefix, bucket, objectLockConfig)
if err = saveConfig(ctx, objectAPI, configFile, data); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if config.Rule != nil {
retention := config.ToRetention()
globalBucketObjectLockConfig.Set(bucket, retention)
globalBucketObjectLockSys.Set(bucket, retention)
globalNotificationSys.PutBucketObjectLockConfig(ctx, bucket, retention)
} else {
globalBucketObjectLockConfig.Remove(bucket)
globalNotificationSys.RemoveBucketObjectLockConfig(ctx, bucket)
globalBucketObjectLockSys.Set(bucket, &objectlock.Retention{})
globalNotificationSys.PutBucketObjectLockConfig(ctx, bucket, &objectlock.Retention{})
}
// Write success response.
@@ -1073,38 +1063,125 @@ func (api objectAPIHandlers) GetBucketObjectLockConfigHandler(w http.ResponseWri
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
configFile := path.Join(bucketConfigPrefix, bucket, bucketObjectLockEnabledConfigFile)
configData, err := readConfig(ctx, objectAPI, configFile)
lkCfg, err := objectAPI.GetBucketObjectLockConfig(ctx, bucket)
if err != nil {
var aerr APIError
if err == errConfigNotFound {
aerr = errorCodes.ToAPIErr(ErrMethodNotAllowed)
} else {
aerr = toAPIError(ctx, err)
}
writeErrorResponse(ctx, w, aerr, r.URL, guessIsBrowserReq(r))
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if string(configData) != bucketObjectLockEnabledConfig {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInternalError), r.URL, guessIsBrowserReq(r))
return
}
configFile = path.Join(bucketConfigPrefix, bucket, objectLockConfig)
configData, err = readConfig(ctx, objectAPI, configFile)
configData, err := xml.Marshal(lkCfg)
if err != nil {
if err != errConfigNotFound {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if configData, err = xml.Marshal(newObjectLockConfig()); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Write success response.
writeSuccessResponseXML(w, configData)
}
// PutBucketTaggingHandler - PUT Bucket tagging.
// ----------
func (api objectAPIHandlers) PutBucketTaggingHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutBucketTagging")
defer logger.AuditLog(w, r, "PutBucketTagging", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketTaggingAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
tags, err := tags.ParseBucketXML(io.LimitReader(r.Body, r.ContentLength))
if err != nil {
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
apiErr.Description = err.Error()
writeErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r))
return
}
if err = objectAPI.SetBucketTagging(ctx, bucket, tags); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Write success response.
writeSuccessResponseHeadersOnly(w)
}
// GetBucketTaggingHandler - GET Bucket tagging.
// ----------
func (api objectAPIHandlers) GetBucketTaggingHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketTagging")
defer logger.AuditLog(w, r, "GetBucketTagging", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
// check if user has permissions to perform this operation
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketTaggingAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
t, err := objectAPI.GetBucketTagging(ctx, bucket)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
configData, err := xml.Marshal(t)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Write success response.
writeSuccessResponseXML(w, configData)
}
// DeleteBucketTaggingHandler - DELETE Bucket tagging.
// ----------
func (api objectAPIHandlers) DeleteBucketTaggingHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "DeleteBucketTagging")
defer logger.AuditLog(w, r, "DeleteBucketTagging", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketTaggingAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
if err := objectAPI.DeleteBucketTagging(ctx, bucket); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Write success response.
writeSuccessResponseHeadersOnly(w)
}

View File

@@ -18,7 +18,6 @@ package cmd
import (
"bytes"
"context"
"encoding/xml"
"io/ioutil"
"net/http"
@@ -29,6 +28,51 @@ import (
"github.com/minio/minio/pkg/auth"
)
// Wrapper for calling RemoveBucket HTTP handler tests for both XL multiple disks and single node setup.
func TestRemoveBucketHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testRemoveBucketHandler, []string{"RemoveBucket"})
}
func testRemoveBucketHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) {
_, err := obj.PutObject(GlobalContext, bucketName, "test-object", mustGetPutObjReader(t, bytes.NewBuffer([]byte{}), int64(0), "", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"), ObjectOptions{})
// if object upload fails stop the test.
if err != nil {
t.Fatalf("Error uploading object: <ERROR> %v", err)
}
// initialize httptest Recorder, this records any mutations to response writer inside the handler.
rec := httptest.NewRecorder()
// construct HTTP request for DELETE bucket.
req, err := newTestSignedRequestV4("DELETE", getBucketLocationURL("", bucketName), 0, nil, credentials.AccessKey, credentials.SecretKey, nil)
if err != nil {
t.Fatalf("Test %s: Failed to create HTTP request for RemoveBucketHandler: <ERROR> %v", instanceType, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(rec, req)
switch rec.Code {
case http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent:
t.Fatalf("Test %v: expected failure, but succeeded with %v", instanceType, rec.Code)
}
// Verify response of the V2 signed HTTP request.
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV2 := httptest.NewRecorder()
// construct HTTP request for DELETE bucket.
reqV2, err := newTestSignedRequestV2("DELETE", getBucketLocationURL("", bucketName), 0, nil, credentials.AccessKey, credentials.SecretKey, nil)
if err != nil {
t.Fatalf("Test %s: Failed to create HTTP request for RemoveBucketHandler: <ERROR> %v", instanceType, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(recV2, reqV2)
switch recV2.Code {
case http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent:
t.Fatalf("Test %v: expected failure, but succeeded with %v", instanceType, recV2.Code)
}
}
// Wrapper for calling GetBucketPolicy HTTP handler tests for both XL multiple disks and single node setup.
func TestGetBucketLocationHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testGetBucketLocationHandler, []string{"GetBucketLocation"})
@@ -625,7 +669,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
for i := 0; i < 10; i++ {
objectName := "test-object-" + strconv.Itoa(i)
// uploading the object.
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetPutObjReader(t, bytes.NewBuffer(contentBytes), int64(len(contentBytes)), "", sha256sum), ObjectOptions{})
_, err = obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewBuffer(contentBytes), int64(len(contentBytes)), "", sha256sum), ObjectOptions{})
// if object upload fails stop the test.
if err != nil {
t.Fatalf("Put Object %d: Error uploading object: <ERROR> %v", i, err)

View File

@@ -22,9 +22,10 @@ import (
"net/http"
"github.com/gorilla/mux"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/lifecycle"
"github.com/minio/minio/pkg/policy"
"github.com/minio/minio/pkg/bucket/lifecycle"
"github.com/minio/minio/pkg/bucket/policy"
)
const (
@@ -48,6 +49,12 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
vars := mux.Vars(r)
bucket := vars["bucket"]
// PutBucketLifecycle always needs a Content-Md5
if _, ok := r.Header[xhttp.ContentMD5]; !ok {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL, guessIsBrowserReq(r))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketLifecycleAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
@@ -59,15 +66,9 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
return
}
// PutBucketLifecycle always needs a Content-Md5
if _, ok := r.Header["Content-Md5"]; !ok {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL, guessIsBrowserReq(r))
return
}
bucketLifecycle, err := lifecycle.ParseLifecycleConfig(io.LimitReader(r.Body, r.ContentLength))
if err != nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedXML), r.URL, guessIsBrowserReq(r))
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -76,7 +77,7 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
return
}
globalLifecycleSys.Set(bucket, *bucketLifecycle)
globalLifecycleSys.Set(bucket, bucketLifecycle)
globalNotificationSys.SetBucketLifecycle(ctx, bucket, bucketLifecycle)
// Success.

View File

@@ -0,0 +1,303 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bytes"
"encoding/xml"
"net/http"
"net/http/httptest"
"testing"
"github.com/minio/minio/pkg/auth"
)
// Test S3 Bucket lifecycle APIs with wrong credentials
func TestBucketLifecycleWrongCredentials(t *testing.T) {
ExecObjectLayerAPITest(t, testBucketLifecycleHandlersWrongCredentials, []string{"GetBucketLifecycle", "PutBucketLifecycle", "DeleteBucketLifecycle"})
}
// Test for authentication
func testBucketLifecycleHandlersWrongCredentials(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) {
// test cases with sample input and expected output.
testCases := []struct {
method string
bucketName string
accessKey string
secretKey string
// Sent body
body []byte
// Expected response
expectedRespStatus int
lifecycleResponse []byte
errorResponse APIErrorResponse
shouldPass bool
}{
// GET empty credentials
{
method: "GET", bucketName: bucketName,
accessKey: "",
secretKey: "",
expectedRespStatus: http.StatusForbidden,
lifecycleResponse: []byte(""),
errorResponse: APIErrorResponse{
Resource: SlashSeparator + bucketName + SlashSeparator,
Code: "AccessDenied",
Message: "Access Denied.",
},
shouldPass: false,
},
// GET wrong credentials
{
method: "GET", bucketName: bucketName,
accessKey: "abcd",
secretKey: "abcd",
expectedRespStatus: http.StatusForbidden,
lifecycleResponse: []byte(""),
errorResponse: APIErrorResponse{
Resource: SlashSeparator + bucketName + SlashSeparator,
Code: "InvalidAccessKeyId",
Message: "The access key ID you provided does not exist in our records.",
},
shouldPass: false,
},
// PUT empty credentials
{
method: "PUT",
bucketName: bucketName,
accessKey: "",
secretKey: "",
expectedRespStatus: http.StatusForbidden,
lifecycleResponse: []byte(""),
errorResponse: APIErrorResponse{
Resource: SlashSeparator + bucketName + SlashSeparator,
Code: "AccessDenied",
Message: "Access Denied.",
},
shouldPass: false,
},
// PUT wrong credentials
{
method: "PUT",
bucketName: bucketName,
accessKey: "abcd",
secretKey: "abcd",
expectedRespStatus: http.StatusForbidden,
lifecycleResponse: []byte(""),
errorResponse: APIErrorResponse{
Resource: SlashSeparator + bucketName + SlashSeparator,
Code: "InvalidAccessKeyId",
Message: "The access key ID you provided does not exist in our records.",
},
shouldPass: false,
},
// DELETE empty credentials
{
method: "DELETE",
bucketName: bucketName,
accessKey: "",
secretKey: "",
expectedRespStatus: http.StatusForbidden,
lifecycleResponse: []byte(""),
errorResponse: APIErrorResponse{
Resource: SlashSeparator + bucketName + SlashSeparator,
Code: "AccessDenied",
Message: "Access Denied.",
},
shouldPass: false,
},
// DELETE wrong credentials
{
method: "DELETE",
bucketName: bucketName,
accessKey: "abcd",
secretKey: "abcd",
expectedRespStatus: http.StatusForbidden,
lifecycleResponse: []byte(""),
errorResponse: APIErrorResponse{
Resource: SlashSeparator + bucketName + SlashSeparator,
Code: "InvalidAccessKeyId",
Message: "The access key ID you provided does not exist in our records.",
},
shouldPass: false,
},
}
testBucketLifecycle(obj, instanceType, bucketName, apiRouter, t, testCases)
}
// Test S3 Bucket lifecycle APIs
func TestBucketLifecycle(t *testing.T) {
ExecObjectLayerAPITest(t, testBucketLifecycleHandlers, []string{"GetBucketLifecycle", "PutBucketLifecycle", "DeleteBucketLifecycle"})
}
// Simple tests of bucket lifecycle: PUT, GET, DELETE.
// Tests are related and the order is important.
func testBucketLifecycleHandlers(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
creds auth.Credentials, t *testing.T) {
// test cases with sample input and expected output.
testCases := []struct {
method string
bucketName string
accessKey string
secretKey string
// Sent body
body []byte
// Expected response
expectedRespStatus int
lifecycleResponse []byte
errorResponse APIErrorResponse
shouldPass bool
}{
// Test case - 1.
// Filter contains more than (Prefix,Tag,And) rule
{
method: "PUT",
bucketName: bucketName,
accessKey: creds.AccessKey,
secretKey: creds.SecretKey,
body: []byte(`<LifecycleConfiguration><Rule><ID>id</ID><Filter><Prefix>logs/</Prefix><Tag><Key>Key1</Key><Value>Value1</Value></Tag></Filter><Status>Enabled</Status><Expiration><Days>365</Days></Expiration></Rule></LifecycleConfiguration>`),
expectedRespStatus: http.StatusBadRequest,
lifecycleResponse: []byte(``),
errorResponse: APIErrorResponse{
Resource: SlashSeparator + bucketName + SlashSeparator,
Code: "InvalidRequest",
Message: "Filter must have exactly one of Prefix, Tag, or And specified",
},
shouldPass: false,
},
// Date contains wrong format
{
method: "PUT",
bucketName: bucketName,
accessKey: creds.AccessKey,
secretKey: creds.SecretKey,
body: []byte(`<LifecycleConfiguration><Rule><ID>id</ID><Filter><Prefix>logs/</Prefix><Tag><Key>Key1</Key><Value>Value1</Value></Tag></Filter><Status>Enabled</Status><Expiration><Date>365</Date></Expiration></Rule></LifecycleConfiguration>`),
expectedRespStatus: http.StatusBadRequest,
lifecycleResponse: []byte(``),
errorResponse: APIErrorResponse{
Resource: SlashSeparator + bucketName + SlashSeparator,
Code: "InvalidRequest",
Message: "Date must be provided in ISO 8601 format",
},
shouldPass: false,
},
{
method: "PUT",
bucketName: bucketName,
accessKey: creds.AccessKey,
secretKey: creds.SecretKey,
body: []byte(`<?xml version="1.0" encoding="UTF-8"?><LifecycleConfiguration><Rule><ID>id</ID><Filter><Prefix>logs/</Prefix></Filter><Status>Enabled</Status><Expiration><Days>365</Days></Expiration></Rule></LifecycleConfiguration>`),
expectedRespStatus: http.StatusOK,
lifecycleResponse: []byte(``),
errorResponse: APIErrorResponse{},
shouldPass: true,
},
{
method: "GET",
accessKey: creds.AccessKey,
secretKey: creds.SecretKey,
bucketName: bucketName,
body: []byte(``),
expectedRespStatus: http.StatusOK,
lifecycleResponse: []byte(`<LifecycleConfiguration><Rule><ID>id</ID><Status>Enabled</Status><Filter><Prefix>logs/</Prefix></Filter><Expiration><Days>365</Days></Expiration></Rule></LifecycleConfiguration>`),
errorResponse: APIErrorResponse{},
shouldPass: true,
},
{
method: "DELETE",
accessKey: creds.AccessKey,
secretKey: creds.SecretKey,
bucketName: bucketName,
body: []byte(``),
expectedRespStatus: http.StatusNoContent,
lifecycleResponse: []byte(``),
errorResponse: APIErrorResponse{},
shouldPass: true,
},
{
method: "GET",
accessKey: creds.AccessKey,
secretKey: creds.SecretKey,
bucketName: bucketName,
body: []byte(``),
expectedRespStatus: http.StatusNotFound,
lifecycleResponse: []byte(``),
errorResponse: APIErrorResponse{
Resource: SlashSeparator + bucketName + SlashSeparator,
Code: "NoSuchLifecycleConfiguration",
Message: "The lifecycle configuration does not exist",
},
shouldPass: false,
},
}
testBucketLifecycle(obj, instanceType, bucketName, apiRouter, t, testCases)
}
// testBucketLifecycle is a generic testing of lifecycle requests
func testBucketLifecycle(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
t *testing.T, testCases []struct {
method string
bucketName string
accessKey string
secretKey string
body []byte
expectedRespStatus int
lifecycleResponse []byte
errorResponse APIErrorResponse
shouldPass bool
}) {
for i, testCase := range testCases {
// initialize httptest Recorder, this records any mutations to response writer inside the handler.
rec := httptest.NewRecorder()
// construct HTTP request
req, err := newTestSignedRequestV4(testCase.method, getBucketLifecycleURL("", testCase.bucketName),
int64(len(testCase.body)), bytes.NewReader(testCase.body), testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for GetBucketLocationHandler: <ERROR> %v", i+1, instanceType, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(rec, req)
if rec.Code != testCase.expectedRespStatus {
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, rec.Code)
}
if testCase.shouldPass && !bytes.Equal(testCase.lifecycleResponse, rec.Body.Bytes()) {
t.Errorf("Test %d: %s: Expected the response to be `%s`, but instead found `%s`", i+1, instanceType, string(testCase.lifecycleResponse), rec.Body.String())
}
errorResponse := APIErrorResponse{}
err = xml.Unmarshal(rec.Body.Bytes(), &errorResponse)
if err != nil && !testCase.shouldPass {
t.Fatalf("Test %d: %s: Unable to marshal response body %s", i+1, instanceType, rec.Body.String())
}
if errorResponse.Resource != testCase.errorResponse.Resource {
t.Errorf("Test %d: %s: Expected the error resource to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Resource, errorResponse.Resource)
}
if errorResponse.Message != testCase.errorResponse.Message {
t.Errorf("Test %d: %s: Expected the error message to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Message, errorResponse.Message)
}
if errorResponse.Code != testCase.errorResponse.Code {
t.Errorf("Test %d: %s: Expected the error code to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Code, errorResponse.Code)
}
}
}

View File

@@ -20,13 +20,10 @@ import (
"bytes"
"context"
"encoding/xml"
"fmt"
"path"
"strings"
"sync"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/lifecycle"
"github.com/minio/minio/pkg/bucket/lifecycle"
)
const (
@@ -38,11 +35,11 @@ const (
// LifecycleSys - Bucket lifecycle subsystem.
type LifecycleSys struct {
sync.RWMutex
bucketLifecycleMap map[string]lifecycle.Lifecycle
bucketLifecycleMap map[string]*lifecycle.Lifecycle
}
// Set - sets lifecycle config to given bucket name.
func (sys *LifecycleSys) Set(bucketName string, lifecycle lifecycle.Lifecycle) {
func (sys *LifecycleSys) Set(bucketName string, lifecycle *lifecycle.Lifecycle) {
if globalIsGateway {
// no-op
return
@@ -55,7 +52,7 @@ func (sys *LifecycleSys) Set(bucketName string, lifecycle lifecycle.Lifecycle) {
}
// Get - gets lifecycle config associated to a given bucket name.
func (sys *LifecycleSys) Get(bucketName string) (lifecycle lifecycle.Lifecycle, ok bool) {
func (sys *LifecycleSys) Get(bucketName string) (lc *lifecycle.Lifecycle, ok bool) {
if globalIsGateway {
// When gateway is enabled, no cached value
// is used to validate life cycle policies.
@@ -63,14 +60,18 @@ func (sys *LifecycleSys) Get(bucketName string) (lifecycle lifecycle.Lifecycle,
if objAPI == nil {
return
}
l, err := objAPI.GetBucketLifecycle(context.Background(), bucketName)
return *l, err == nil
l, err := objAPI.GetBucketLifecycle(GlobalContext, bucketName)
if err != nil {
return
}
return l, true
}
sys.Lock()
defer sys.Unlock()
l, ok := sys.bucketLifecycleMap[bucketName]
return l, ok
lc, ok = sys.bucketLifecycleMap[bucketName]
return
}
func saveLifecycleConfig(ctx context.Context, objAPI ObjectLayer, bucketName string, bucketLifecycle *lifecycle.Lifecycle) error {
@@ -88,7 +89,7 @@ func saveLifecycleConfig(ctx context.Context, objAPI ObjectLayer, bucketName str
func getLifecycleConfig(objAPI ObjectLayer, bucketName string) (*lifecycle.Lifecycle, error) {
// Construct path to lifecycle.xml for the given bucket.
configFile := path.Join(bucketConfigPrefix, bucketName, bucketLifecycleConfig)
configData, err := readConfig(context.Background(), objAPI, configFile)
configData, err := readConfig(GlobalContext, objAPI, configFile)
if err != nil {
if err == errConfigNotFound {
err = BucketLifecycleNotFound{Bucket: bucketName}
@@ -103,8 +104,8 @@ func removeLifecycleConfig(ctx context.Context, objAPI ObjectLayer, bucketName s
// Construct path to lifecycle.xml for the given bucket.
configFile := path.Join(bucketConfigPrefix, bucketName, bucketLifecycleConfig)
if err := objAPI.DeleteObject(ctx, minioMetaBucket, configFile); err != nil {
if _, ok := err.(ObjectNotFound); ok {
if err := deleteConfig(ctx, objAPI, configFile); err != nil {
if err == errConfigNotFound {
return BucketLifecycleNotFound{Bucket: bucketName}
}
return err
@@ -115,7 +116,7 @@ func removeLifecycleConfig(ctx context.Context, objAPI ObjectLayer, bucketName s
// NewLifecycleSys - creates new lifecycle system.
func NewLifecycleSys() *LifecycleSys {
return &LifecycleSys{
bucketLifecycleMap: make(map[string]lifecycle.Lifecycle),
bucketLifecycleMap: make(map[string]*lifecycle.Lifecycle),
}
}
@@ -131,52 +132,29 @@ func (sys *LifecycleSys) Init(buckets []BucketInfo, objAPI ObjectLayer) error {
return nil
}
doneCh := make(chan struct{})
defer close(doneCh)
// Initializing lifecycle needs a retry mechanism for
// the following reasons:
// - Read quorum is lost just after the initialization
// of the object layer.
retryTimerCh := newRetryTimerSimple(doneCh)
for {
select {
case <-retryTimerCh:
// Load LifecycleSys once during boot.
if err := sys.load(buckets, objAPI); err != nil {
if err == errDiskNotFound ||
strings.Contains(err.Error(), InsufficientReadQuorum{}.Error()) ||
strings.Contains(err.Error(), InsufficientWriteQuorum{}.Error()) {
logger.Info("Waiting for lifecycle subsystem to be initialized..")
continue
}
return err
}
return nil
case <-globalOSSignalCh:
return fmt.Errorf("Initializing Lifecycle sub-system gracefully stopped")
}
}
// Load LifecycleSys once during boot.
return sys.load(buckets, objAPI)
}
// Loads lifecycle policies for all buckets into LifecycleSys.
func (sys *LifecycleSys) load(buckets []BucketInfo, objAPI ObjectLayer) error {
for _, bucket := range buckets {
config, err := objAPI.GetBucketLifecycle(context.Background(), bucket.Name)
config, err := getLifecycleConfig(objAPI, bucket.Name)
if err != nil {
if _, ok := err.(BucketLifecycleNotFound); ok {
sys.Remove(bucket.Name)
continue
}
continue
// Quorum errors should be returned.
return err
}
sys.Set(bucket.Name, *config)
sys.Set(bucket.Name, config)
}
return nil
}
// Remove - removes policy for given bucket name.
// Remove - removes lifecycle config for given bucket name.
func (sys *LifecycleSys) Remove(bucketName string) {
sys.Lock()
defer sys.Unlock()

165
cmd/bucket-meta.go Normal file
View File

@@ -0,0 +1,165 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"encoding/binary"
"errors"
"fmt"
"path"
"time"
"github.com/minio/minio/cmd/logger"
)
const (
legacyBucketObjectLockEnabledConfigFile = "object-lock-enabled.json"
legacyBucketObjectLockEnabledConfig = `{"x-amz-bucket-object-lock-enabled":true}`
bucketMetadataFile = ".metadata.bin"
bucketMetadataFormat = 1
bucketMetadataVersion = 1
)
//go:generate msgp -file $GOFILE -unexported
// bucketMetadata contains bucket metadata.
// When adding/removing fields, regenerate the marshal code using the go generate above.
// Only changing meaning of fields requires a version bump.
// bucketMetadataFormat refers to the format.
// bucketMetadataVersion can be used to track a rolling upgrade of a field.
type bucketMetadata struct {
Name string
Created time.Time
LockEnabled bool
}
// newBucketMetadata creates bucketMetadata with the supplied name and Created to Now.
func newBucketMetadata(name string) bucketMetadata {
return bucketMetadata{
Name: name,
Created: UTCNow(),
}
}
// loadBucketMeta loads the metadata of bucket by name from ObjectLayer o.
// If an error is returned the returned metadata will be default initialized.
func loadBucketMeta(ctx context.Context, o ObjectLayer, name string) (bucketMetadata, error) {
b := newBucketMetadata(name)
configFile := path.Join(bucketConfigPrefix, name, bucketMetadataFile)
data, err := readConfig(ctx, o, configFile)
if err != nil {
return b, err
}
if len(data) <= 4 {
return b, fmt.Errorf("loadBucketMetadata: no data")
}
// Read header
switch binary.LittleEndian.Uint16(data[0:2]) {
case bucketMetadataFormat:
default:
return b, fmt.Errorf("loadBucketMetadata: unknown format: %d", binary.LittleEndian.Uint16(data[0:2]))
}
switch binary.LittleEndian.Uint16(data[2:4]) {
case bucketMetadataVersion:
default:
return b, fmt.Errorf("loadBucketMetadata: unknown version: %d", binary.LittleEndian.Uint16(data[2:4]))
}
// OK, parse data.
_, err = b.UnmarshalMsg(data[4:])
return b, err
}
var errMetaDataConverted = errors.New("metadata converted")
// loadBucketMetadata loads and migrates to bucket metadata.
func loadBucketMetadata(ctx context.Context, objectAPI ObjectLayer, bucket string) (bucketMetadata, error) {
meta, err := loadBucketMeta(ctx, objectAPI, bucket)
if err == nil {
return meta, nil
}
if err != errConfigNotFound {
return meta, err
}
// Control here means old bucket without bucket metadata. Hence we migrate existing settings.
if err = meta.convertLegacyLockconfig(ctx, objectAPI); err != nil {
return meta, err
}
return meta, errMetaDataConverted
}
func (b *bucketMetadata) convertLegacyLockconfig(ctx context.Context, objectAPI ObjectLayer) error {
configFile := path.Join(bucketConfigPrefix, b.Name, legacyBucketObjectLockEnabledConfigFile)
save := func() error {
if err := b.save(ctx, objectAPI); err != nil {
return err
}
if err := deleteConfig(ctx, objectAPI, configFile); err != nil && !errors.Is(err, errConfigNotFound) {
logger.LogIf(ctx, err)
}
return nil
}
configData, err := readConfig(ctx, objectAPI, configFile)
if err != nil {
if !errors.Is(err, errConfigNotFound) {
return err
}
return save()
}
if string(configData) != legacyBucketObjectLockEnabledConfig {
return fmt.Errorf("content mismatch in config file %v", configFile)
}
b.LockEnabled = true
return save()
}
// save config to supplied ObjectLayer o.
func (b *bucketMetadata) save(ctx context.Context, o ObjectLayer) error {
data := make([]byte, 4, b.Msgsize()+4)
// Put header
binary.LittleEndian.PutUint16(data[0:2], bucketMetadataFormat)
binary.LittleEndian.PutUint16(data[2:4], bucketMetadataVersion)
// Add data
data, err := b.MarshalMsg(data)
if err != nil {
return err
}
configFile := path.Join(bucketConfigPrefix, b.Name, bucketMetadataFile)
return saveConfig(ctx, o, configFile, data)
}
// removeBucketMeta bucket metadata.
// If config does not exist no error is returned.
func removeBucketMeta(ctx context.Context, obj ObjectLayer, bucket string) error {
configFile := path.Join(bucketConfigPrefix, bucket, bucketMetadataFile)
if err := deleteConfig(ctx, obj, configFile); err != nil && err != errConfigNotFound {
return err
}
return nil
}

160
cmd/bucket-meta_gen.go Normal file
View File

@@ -0,0 +1,160 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *bucketMetadata) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Name":
z.Name, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Name")
return
}
case "Created":
z.Created, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "Created")
return
}
case "LockEnabled":
z.LockEnabled, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "LockEnabled")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z bucketMetadata) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 3
// write "Name"
err = en.Append(0x83, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
if err != nil {
return
}
err = en.WriteString(z.Name)
if err != nil {
err = msgp.WrapError(err, "Name")
return
}
// write "Created"
err = en.Append(0xa7, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteTime(z.Created)
if err != nil {
err = msgp.WrapError(err, "Created")
return
}
// write "LockEnabled"
err = en.Append(0xab, 0x4c, 0x6f, 0x63, 0x6b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteBool(z.LockEnabled)
if err != nil {
err = msgp.WrapError(err, "LockEnabled")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z bucketMetadata) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 3
// string "Name"
o = append(o, 0x83, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
o = msgp.AppendString(o, z.Name)
// string "Created"
o = append(o, 0xa7, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64)
o = msgp.AppendTime(o, z.Created)
// string "LockEnabled"
o = append(o, 0xab, 0x4c, 0x6f, 0x63, 0x6b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64)
o = msgp.AppendBool(o, z.LockEnabled)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *bucketMetadata) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Name":
z.Name, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Name")
return
}
case "Created":
z.Created, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Created")
return
}
case "LockEnabled":
z.LockEnabled, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LockEnabled")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z bucketMetadata) Msgsize() (s int) {
s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 8 + msgp.TimeSize + 12 + msgp.BoolSize
return
}

123
cmd/bucket-meta_gen_test.go Normal file
View File

@@ -0,0 +1,123 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalbucketMetadata(t *testing.T) {
v := bucketMetadata{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgbucketMetadata(b *testing.B) {
v := bucketMetadata{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgbucketMetadata(b *testing.B) {
v := bucketMetadata{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalbucketMetadata(b *testing.B) {
v := bucketMetadata{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodebucketMetadata(t *testing.T) {
v := bucketMetadata{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodebucketMetadata Msgsize() is inaccurate")
}
vn := bucketMetadata{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodebucketMetadata(b *testing.B) {
v := bucketMetadata{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodebucketMetadata(b *testing.B) {
v := bucketMetadata{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}

View File

@@ -22,7 +22,6 @@ import (
"errors"
"io"
"net/http"
"net/url"
"path"
"reflect"
"time"
@@ -30,8 +29,8 @@ import (
"github.com/gorilla/mux"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/bucket/policy"
"github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/policy"
)
const (
@@ -84,8 +83,8 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
config.XMLNS = "http://s3.amazonaws.com/doc/2006-03-01/"
config.SetRegion(globalServerRegion)
config.XMLNS = "http://s3.amazonaws.com/doc/2006-03-01/"
notificationBytes, err := xml.Marshal(config)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
@@ -116,7 +115,7 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
// With newer config disallowing changing / turning off
// notification targets without removing ARN in notification
// configuration we won't see this problem anymore.
if reflect.DeepEqual(queue.ARN, arnErr.ARN) {
if reflect.DeepEqual(queue.ARN, arnErr.ARN) && i < len(config.QueueList) {
config.QueueList = append(config.QueueList[:i],
config.QueueList[i+1:]...)
}
@@ -282,16 +281,13 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
w.Header().Set(xhttp.ContentType, "text/event-stream")
doneCh := make(chan struct{})
defer close(doneCh)
// Listen Publisher and peer-listen-client uses nonblocking send and hence does not wait for slow receivers.
// Use buffered channel to take care of burst sends or slow w.Write()
listenCh := make(chan interface{}, 4000)
peers := getRestClients(globalEndpoints)
peers := newPeerRestClients(globalEndpoints)
globalHTTPListen.Subscribe(listenCh, doneCh, func(evI interface{}) bool {
globalHTTPListen.Subscribe(listenCh, ctx.Done(), func(evI interface{}) bool {
ev, ok := evI.(event.Event)
if !ok {
return false
@@ -299,18 +295,14 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
if ev.S3.Bucket.Name != values.Get(peerRESTListenBucket) {
return false
}
objectName, uerr := url.QueryUnescape(ev.S3.Object.Key)
if uerr != nil {
objectName = ev.S3.Object.Key
}
return len(rulesMap.Match(ev.EventName, objectName).ToSlice()) != 0
return rulesMap.MatchSimple(ev.EventName, ev.S3.Object.Key)
})
for _, peer := range peers {
if peer == nil {
continue
}
peer.Listen(listenCh, doneCh, values)
peer.Listen(listenCh, ctx.Done(), values)
}
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
@@ -336,7 +328,7 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
return
}
w.(http.Flusher).Flush()
case <-GlobalServiceDoneCh:
case <-ctx.Done():
return
}
}

535
cmd/bucket-object-lock.go Normal file
View File

@@ -0,0 +1,535 @@
/*
* MinIO Cloud Storage, (C) 2019-2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bytes"
"context"
"encoding/xml"
"errors"
"math"
"net/http"
"path"
"sync"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
"github.com/minio/minio/pkg/bucket/policy"
)
// BucketObjectLockSys - map of bucket and retention configuration.
type BucketObjectLockSys struct {
sync.RWMutex
retentionMap map[string]*objectlock.Retention
}
// Set - set retention configuration.
func (sys *BucketObjectLockSys) Set(bucketName string, retention *objectlock.Retention) {
if globalIsGateway {
// no-op
return
}
sys.Lock()
sys.retentionMap[bucketName] = retention
sys.Unlock()
}
// Get - Get retention configuration.
func (sys *BucketObjectLockSys) Get(bucketName string) (r *objectlock.Retention, ok bool) {
if globalIsGateway {
// When gateway is enabled, no cached value
// is used to validate bucket object lock configuration
objAPI := newObjectLayerWithoutSafeModeFn()
if objAPI == nil {
return
}
lc, err := objAPI.GetBucketObjectLockConfig(GlobalContext, bucketName)
if err != nil {
return
}
return lc.ToRetention(), true
}
sys.RLock()
defer sys.RUnlock()
r, ok = sys.retentionMap[bucketName]
return r, ok
}
// Remove - removes retention sysuration.
func (sys *BucketObjectLockSys) Remove(bucketName string) {
sys.Lock()
delete(sys.retentionMap, bucketName)
sys.Unlock()
}
// Similar to enforceRetentionBypassForDelete but for WebUI
func enforceRetentionBypassForDeleteWeb(ctx context.Context, r *http.Request, bucket, object string, getObjectInfoFn GetObjectInfoFn, govBypassPerms bool) APIErrorCode {
opts, err := getOpts(ctx, r, bucket, object)
if err != nil {
return toAPIErrorCode(ctx, err)
}
oi, err := getObjectInfoFn(ctx, bucket, object, opts)
if err != nil {
return toAPIErrorCode(ctx, err)
}
lhold := objectlock.GetObjectLegalHoldMeta(oi.UserDefined)
if lhold.Status.Valid() && lhold.Status == objectlock.LegalHoldOn {
return ErrObjectLocked
}
ret := objectlock.GetObjectRetentionMeta(oi.UserDefined)
if ret.Mode.Valid() {
switch ret.Mode {
case objectlock.RetCompliance:
// In compliance mode, a protected object version can't be overwritten
// or deleted by any user, including the root user in your AWS account.
// When an object is locked in compliance mode, its retention mode can't
// be changed, and its retention period can't be shortened. Compliance mode
// ensures that an object version can't be overwritten or deleted for the
// duration of the retention period.
t, err := objectlock.UTCNowNTP()
if err != nil {
logger.LogIf(ctx, err)
return ErrObjectLocked
}
if !ret.RetainUntilDate.Before(t) {
return ErrObjectLocked
}
return ErrNone
case objectlock.RetGovernance:
// In governance mode, users can't overwrite or delete an object
// version or alter its lock settings unless they have special
// permissions. With governance mode, you protect objects against
// being deleted by most users, but you can still grant some users
// permission to alter the retention settings or delete the object
// if necessary. You can also use governance mode to test retention-period
// settings before creating a compliance-mode retention period.
// To override or remove governance-mode retention settings, a
// user must have the s3:BypassGovernanceRetention permission
// and must explicitly include x-amz-bypass-governance-retention:true
// as a request header with any request that requires overriding
// governance mode.
byPassSet := govBypassPerms && objectlock.IsObjectLockGovernanceBypassSet(r.Header)
if !byPassSet {
t, err := objectlock.UTCNowNTP()
if err != nil {
logger.LogIf(ctx, err)
return ErrObjectLocked
}
if !ret.RetainUntilDate.Before(t) {
return ErrObjectLocked
}
if !govBypassPerms {
return ErrObjectLocked
}
return ErrNone
}
}
}
return ErrNone
}
// enforceRetentionForDeletion checks if it is appropriate to remove an
// object according to locking configuration when this is lifecycle/ bucket quota asking.
func enforceRetentionForDeletion(ctx context.Context, objInfo ObjectInfo) (locked bool) {
lhold := objectlock.GetObjectLegalHoldMeta(objInfo.UserDefined)
if lhold.Status.Valid() && lhold.Status == objectlock.LegalHoldOn {
return true
}
ret := objectlock.GetObjectRetentionMeta(objInfo.UserDefined)
if ret.Mode.Valid() && (ret.Mode == objectlock.RetCompliance || ret.Mode == objectlock.RetGovernance) {
t, err := objectlock.UTCNowNTP()
if err != nil {
logger.LogIf(ctx, err)
return true
}
if ret.RetainUntilDate.After(t) {
return true
}
}
return false
}
// enforceRetentionBypassForDelete enforces whether an existing object under governance can be deleted
// with governance bypass headers set in the request.
// Objects under site wide WORM can never be overwritten.
// For objects in "Governance" mode, overwrite is allowed if a) object retention date is past OR
// governance bypass headers are set and user has governance bypass permissions.
// Objects in "Compliance" mode can be overwritten only if retention date is past.
func enforceRetentionBypassForDelete(ctx context.Context, r *http.Request, bucket, object string, getObjectInfoFn GetObjectInfoFn) APIErrorCode {
opts, err := getOpts(ctx, r, bucket, object)
if err != nil {
return toAPIErrorCode(ctx, err)
}
oi, err := getObjectInfoFn(ctx, bucket, object, opts)
if err != nil {
return toAPIErrorCode(ctx, err)
}
lhold := objectlock.GetObjectLegalHoldMeta(oi.UserDefined)
if lhold.Status.Valid() && lhold.Status == objectlock.LegalHoldOn {
return ErrObjectLocked
}
ret := objectlock.GetObjectRetentionMeta(oi.UserDefined)
if ret.Mode.Valid() {
switch ret.Mode {
case objectlock.RetCompliance:
// In compliance mode, a protected object version can't be overwritten
// or deleted by any user, including the root user in your AWS account.
// When an object is locked in compliance mode, its retention mode can't
// be changed, and its retention period can't be shortened. Compliance mode
// ensures that an object version can't be overwritten or deleted for the
// duration of the retention period.
t, err := objectlock.UTCNowNTP()
if err != nil {
logger.LogIf(ctx, err)
return ErrObjectLocked
}
if !ret.RetainUntilDate.Before(t) {
return ErrObjectLocked
}
return ErrNone
case objectlock.RetGovernance:
// In governance mode, users can't overwrite or delete an object
// version or alter its lock settings unless they have special
// permissions. With governance mode, you protect objects against
// being deleted by most users, but you can still grant some users
// permission to alter the retention settings or delete the object
// if necessary. You can also use governance mode to test retention-period
// settings before creating a compliance-mode retention period.
// To override or remove governance-mode retention settings, a
// user must have the s3:BypassGovernanceRetention permission
// and must explicitly include x-amz-bypass-governance-retention:true
// as a request header with any request that requires overriding
// governance mode.
//
byPassSet := objectlock.IsObjectLockGovernanceBypassSet(r.Header)
if !byPassSet {
t, err := objectlock.UTCNowNTP()
if err != nil {
logger.LogIf(ctx, err)
return ErrObjectLocked
}
if !ret.RetainUntilDate.Before(t) {
return ErrObjectLocked
}
return ErrNone
}
// https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes
// If you try to delete objects protected by governance mode and have s3:BypassGovernanceRetention
// or s3:GetBucketObjectLockConfiguration permissions, the operation will succeed.
govBypassPerms1 := checkRequestAuthType(ctx, r, policy.BypassGovernanceRetentionAction, bucket, object)
govBypassPerms2 := checkRequestAuthType(ctx, r, policy.GetBucketObjectLockConfigurationAction, bucket, object)
if govBypassPerms1 != ErrNone && govBypassPerms2 != ErrNone {
return ErrAccessDenied
}
}
}
return ErrNone
}
// enforceRetentionBypassForPut enforces whether an existing object under governance can be overwritten
// with governance bypass headers set in the request.
// Objects under site wide WORM cannot be overwritten.
// For objects in "Governance" mode, overwrite is allowed if a) object retention date is past OR
// governance bypass headers are set and user has governance bypass permissions.
// Objects in compliance mode can be overwritten only if retention date is being extended. No mode change is permitted.
func enforceRetentionBypassForPut(ctx context.Context, r *http.Request, bucket, object string, getObjectInfoFn GetObjectInfoFn, objRetention *objectlock.ObjectRetention, cred auth.Credentials, owner bool, claims map[string]interface{}) (ObjectInfo, APIErrorCode) {
byPassSet := objectlock.IsObjectLockGovernanceBypassSet(r.Header)
opts, err := getOpts(ctx, r, bucket, object)
if err != nil {
return ObjectInfo{}, toAPIErrorCode(ctx, err)
}
oi, err := getObjectInfoFn(ctx, bucket, object, opts)
if err != nil {
return oi, toAPIErrorCode(ctx, err)
}
t, err := objectlock.UTCNowNTP()
if err != nil {
logger.LogIf(ctx, err)
return oi, ErrObjectLocked
}
// Pass in relative days from current time, to additionally to verify "object-lock-remaining-retention-days" policy if any.
days := int(math.Ceil(math.Abs(objRetention.RetainUntilDate.Sub(t).Hours()) / 24))
ret := objectlock.GetObjectRetentionMeta(oi.UserDefined)
if ret.Mode.Valid() {
// Retention has expired you may change whatever you like.
if ret.RetainUntilDate.Before(t) {
perm := isPutRetentionAllowed(bucket, object,
days, objRetention.RetainUntilDate.Time,
objRetention.Mode, byPassSet, r, cred,
owner, claims)
return oi, perm
}
switch ret.Mode {
case objectlock.RetGovernance:
govPerm := isPutRetentionAllowed(bucket, object, days,
objRetention.RetainUntilDate.Time, objRetention.Mode,
byPassSet, r, cred, owner, claims)
// Governance mode retention period cannot be shortened, if x-amz-bypass-governance is not set.
if !byPassSet {
if objRetention.Mode != objectlock.RetGovernance || objRetention.RetainUntilDate.Before((ret.RetainUntilDate.Time)) {
return oi, ErrObjectLocked
}
}
return oi, govPerm
case objectlock.RetCompliance:
// Compliance retention mode cannot be changed or shortened.
// https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes
if objRetention.Mode != objectlock.RetCompliance || objRetention.RetainUntilDate.Before((ret.RetainUntilDate.Time)) {
return oi, ErrObjectLocked
}
compliancePerm := isPutRetentionAllowed(bucket, object,
days, objRetention.RetainUntilDate.Time, objRetention.Mode,
false, r, cred, owner, claims)
return oi, compliancePerm
}
return oi, ErrNone
} // No pre-existing retention metadata present.
perm := isPutRetentionAllowed(bucket, object,
days, objRetention.RetainUntilDate.Time,
objRetention.Mode, byPassSet, r, cred, owner, claims)
return oi, perm
}
// checkPutObjectLockAllowed enforces object retention policy and legal hold policy
// for requests with WORM headers
// See https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-managing.html for the spec.
// For non-existing objects with object retention headers set, this method returns ErrNone if bucket has
// locking enabled and user has requisite permissions (s3:PutObjectRetention)
// If object exists on object store and site wide WORM enabled - this method
// returns an error. For objects in "Governance" mode, overwrite is allowed if the retention date has expired.
// For objects in "Compliance" mode, retention date cannot be shortened, and mode cannot be altered.
// For objects with legal hold header set, the s3:PutObjectLegalHold permission is expected to be set
// Both legal hold and retention can be applied independently on an object
func checkPutObjectLockAllowed(ctx context.Context, r *http.Request, bucket, object string, getObjectInfoFn GetObjectInfoFn, retentionPermErr, legalHoldPermErr APIErrorCode) (objectlock.RetMode, objectlock.RetentionDate, objectlock.ObjectLegalHold, APIErrorCode) {
var mode objectlock.RetMode
var retainDate objectlock.RetentionDate
var legalHold objectlock.ObjectLegalHold
retentionRequested := objectlock.IsObjectLockRetentionRequested(r.Header)
legalHoldRequested := objectlock.IsObjectLockLegalHoldRequested(r.Header)
retentionCfg, isWORMBucket := globalBucketObjectLockSys.Get(bucket)
if !isWORMBucket {
if legalHoldRequested || retentionRequested {
return mode, retainDate, legalHold, ErrInvalidBucketObjectLockConfiguration
}
// If this not a WORM enabled bucket, we should return right here.
return mode, retainDate, legalHold, ErrNone
}
var objExists bool
opts, err := getOpts(ctx, r, bucket, object)
if err != nil {
return mode, retainDate, legalHold, toAPIErrorCode(ctx, err)
}
t, err := objectlock.UTCNowNTP()
if err != nil {
logger.LogIf(ctx, err)
return mode, retainDate, legalHold, ErrObjectLocked
}
if objInfo, err := getObjectInfoFn(ctx, bucket, object, opts); err == nil {
objExists = true
r := objectlock.GetObjectRetentionMeta(objInfo.UserDefined)
if r.Mode == objectlock.RetCompliance && r.RetainUntilDate.After(t) {
return mode, retainDate, legalHold, ErrObjectLocked
}
mode = r.Mode
retainDate = r.RetainUntilDate
legalHold = objectlock.GetObjectLegalHoldMeta(objInfo.UserDefined)
// Disallow overwriting an object on legal hold
if legalHold.Status == objectlock.LegalHoldOn {
return mode, retainDate, legalHold, ErrObjectLocked
}
}
if legalHoldRequested {
var lerr error
if legalHold, lerr = objectlock.ParseObjectLockLegalHoldHeaders(r.Header); lerr != nil {
return mode, retainDate, legalHold, toAPIErrorCode(ctx, err)
}
}
if retentionRequested {
legalHold, err := objectlock.ParseObjectLockLegalHoldHeaders(r.Header)
if err != nil {
return mode, retainDate, legalHold, toAPIErrorCode(ctx, err)
}
rMode, rDate, err := objectlock.ParseObjectLockRetentionHeaders(r.Header)
if err != nil {
return mode, retainDate, legalHold, toAPIErrorCode(ctx, err)
}
if objExists && retainDate.After(t) {
return mode, retainDate, legalHold, ErrObjectLocked
}
if retentionPermErr != ErrNone {
return mode, retainDate, legalHold, retentionPermErr
}
return rMode, rDate, legalHold, ErrNone
}
if !retentionRequested && isWORMBucket {
if retentionPermErr != ErrNone {
return mode, retainDate, legalHold, retentionPermErr
}
t, err := objectlock.UTCNowNTP()
if err != nil {
logger.LogIf(ctx, err)
return mode, retainDate, legalHold, ErrObjectLocked
}
// AWS S3 just creates a new version of object when an object is being overwritten.
if objExists && retainDate.After(t) {
return mode, retainDate, legalHold, ErrObjectLocked
}
if !legalHoldRequested && !retentionCfg.IsEmpty() {
// inherit retention from bucket configuration
return retentionCfg.Mode, objectlock.RetentionDate{Time: t.Add(retentionCfg.Validity)}, legalHold, ErrNone
}
return "", objectlock.RetentionDate{}, legalHold, ErrNone
}
return mode, retainDate, legalHold, ErrNone
}
func readBucketObjectLockConfig(ctx context.Context, objAPI ObjectLayer, bucket string) (*objectlock.Config, error) {
meta, err := loadBucketMetadata(ctx, objAPI, bucket)
if err != nil && err != errMetaDataConverted {
return nil, toObjectErr(err, bucket)
}
if !meta.LockEnabled {
return nil, BucketObjectLockConfigNotFound{Bucket: bucket}
}
configFile := path.Join(bucketConfigPrefix, bucket, objectLockConfig)
configData, err := readConfig(ctx, objAPI, configFile)
if err != nil {
if err != errConfigNotFound {
return nil, toObjectErr(err, bucket)
}
return objectlock.NewObjectLockConfig(), nil
}
cfg, err := objectlock.ParseObjectLockConfig(bytes.NewReader(configData))
if err != nil {
return nil, toObjectErr(err, bucket)
}
return cfg, nil
}
func saveBucketObjectLockConfig(ctx context.Context, objAPI ObjectLayer, bucket string, config *objectlock.Config) error {
meta, err := loadBucketMetadata(ctx, objAPI, bucket)
if err != nil && err != errMetaDataConverted {
return toObjectErr(err, bucket)
}
if !meta.LockEnabled {
return BucketObjectLockConfigNotFound{Bucket: bucket}
}
data, err := xml.Marshal(config)
if err != nil {
return toObjectErr(err, bucket)
}
configFile := path.Join(bucketConfigPrefix, bucket, objectLockConfig)
if err = saveConfig(ctx, objAPI, configFile, data); err != nil {
return toObjectErr(err, bucket)
}
return nil
}
// NewBucketObjectLockSys returns initialized BucketObjectLockSys
func NewBucketObjectLockSys() *BucketObjectLockSys {
return &BucketObjectLockSys{
retentionMap: make(map[string]*objectlock.Retention),
}
}
// Init - initializes bucket object lock config system for all buckets
func (sys *BucketObjectLockSys) Init(buckets []BucketInfo, objAPI ObjectLayer) error {
if objAPI == nil {
return errServerNotInitialized
}
// In gateway mode, we always fetch the bucket object lock configuration from the gateway backend.
// So, this is a no-op for gateway servers.
if globalIsGateway {
return nil
}
// Load BucketObjectLockSys once during boot.
return sys.load(buckets, objAPI)
}
func (sys *BucketObjectLockSys) load(buckets []BucketInfo, objAPI ObjectLayer) error {
for _, bucket := range buckets {
ctx := logger.SetReqInfo(GlobalContext, &logger.ReqInfo{BucketName: bucket.Name})
meta, err := loadBucketMetadata(ctx, objAPI, bucket.Name)
if err != nil {
if err != errMetaDataConverted {
return err
}
meta.Created = bucket.Created
logger.LogIf(ctx, meta.save(ctx, objAPI))
}
if !meta.LockEnabled {
continue
}
configFile := path.Join(bucketConfigPrefix, bucket.Name, objectLockConfig)
configData, err := readConfig(ctx, objAPI, configFile)
if err != nil {
if errors.Is(err, errConfigNotFound) {
globalBucketObjectLockSys.Set(bucket.Name, &objectlock.Retention{})
continue
}
return err
}
config, err := objectlock.ParseObjectLockConfig(bytes.NewReader(configData))
if err != nil {
return err
}
retention := &objectlock.Retention{}
if config.Rule != nil {
retention = config.ToRetention()
}
globalBucketObjectLockSys.Set(bucket.Name, retention)
}
return nil
}

View File

@@ -24,7 +24,7 @@ import (
humanize "github.com/dustin/go-humanize"
"github.com/gorilla/mux"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/policy"
"github.com/minio/minio/pkg/bucket/policy"
)
const (
@@ -77,7 +77,7 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
bucketPolicy, err := policy.ParseConfig(io.LimitReader(r.Body, r.ContentLength), bucket)
if err != nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPolicy), r.URL, guessIsBrowserReq(r))
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -92,7 +92,7 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
return
}
globalPolicySys.Set(bucket, *bucketPolicy)
globalPolicySys.Set(bucket, bucketPolicy)
globalNotificationSys.SetBucketPolicy(ctx, bucket, bucketPolicy)
// Success.

View File

@@ -18,7 +18,6 @@ package cmd
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
@@ -29,8 +28,8 @@ import (
"testing"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/policy"
"github.com/minio/minio/pkg/policy/condition"
"github.com/minio/minio/pkg/bucket/policy"
"github.com/minio/minio/pkg/bucket/policy/condition"
)
func getAnonReadOnlyBucketPolicy(bucketName string) *policy.Policy {
@@ -103,7 +102,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
credentials auth.Credentials, t *testing.T) {
bucketName1 := fmt.Sprintf("%s-1", bucketName)
if err := obj.MakeBucketWithLocation(context.Background(), bucketName1, ""); err != nil {
if err := obj.MakeBucketWithLocation(GlobalContext, bucketName1, "", false); err != nil {
t.Fatal(err)
}

View File

@@ -20,27 +20,30 @@ import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"net/url"
"path"
"strconv"
"strings"
"sync"
"time"
jsoniter "github.com/json-iterator/go"
miniogopolicy "github.com/minio/minio-go/v6/pkg/policy"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/bucket/policy"
"github.com/minio/minio/pkg/handlers"
"github.com/minio/minio/pkg/policy"
)
// PolicySys - policy subsystem.
type PolicySys struct {
sync.RWMutex
bucketPolicyMap map[string]policy.Policy
bucketPolicyMap map[string]*policy.Policy
}
// Set - sets policy to given bucket name. If policy is empty, existing policy is removed.
func (sys *PolicySys) Set(bucketName string, policy policy.Policy) {
func (sys *PolicySys) Set(bucketName string, policy *policy.Policy) {
if globalIsGateway {
// Set policy is a non-op under gateway mode.
return
@@ -71,7 +74,7 @@ func (sys *PolicySys) IsAllowed(args policy.Args) bool {
// is used to validate bucket policies.
objAPI := newObjectLayerFn()
if objAPI != nil {
config, err := objAPI.GetBucketPolicy(context.Background(), args.BucketName)
config, err := objAPI.GetBucketPolicy(GlobalContext, args.BucketName)
if err == nil {
return config.IsAllowed(args)
}
@@ -94,12 +97,12 @@ func (sys *PolicySys) IsAllowed(args policy.Args) bool {
// Loads policies for all buckets into PolicySys.
func (sys *PolicySys) load(buckets []BucketInfo, objAPI ObjectLayer) error {
for _, bucket := range buckets {
config, err := objAPI.GetBucketPolicy(context.Background(), bucket.Name)
config, err := getPolicyConfig(objAPI, bucket.Name)
if err != nil {
if _, ok := err.(BucketPolicyNotFound); ok {
sys.Remove(bucket.Name)
continue
}
continue
return err
}
// This part is specifically written to handle migration
// when the Version string is empty, this was allowed
@@ -110,12 +113,12 @@ func (sys *PolicySys) load(buckets []BucketInfo, objAPI ObjectLayer) error {
logger.Info("Found in-consistent bucket policies, Migrating them for Bucket: (%s)", bucket.Name)
config.Version = policy.DefaultVersion
if err = savePolicyConfig(context.Background(), objAPI, bucket.Name, config); err != nil {
logger.LogIf(context.Background(), err)
if err = savePolicyConfig(GlobalContext, objAPI, bucket.Name, config); err != nil {
logger.LogIf(GlobalContext, err)
return err
}
}
sys.Set(bucket.Name, *config)
sys.Set(bucket.Name, config)
}
return nil
}
@@ -123,7 +126,7 @@ func (sys *PolicySys) load(buckets []BucketInfo, objAPI ObjectLayer) error {
// Init - initializes policy system from policy.json of all buckets.
func (sys *PolicySys) Init(buckets []BucketInfo, objAPI ObjectLayer) error {
if objAPI == nil {
return errInvalidArgument
return errServerNotInitialized
}
// In gateway mode, we don't need to load the policies
@@ -132,62 +135,55 @@ func (sys *PolicySys) Init(buckets []BucketInfo, objAPI ObjectLayer) error {
return nil
}
doneCh := make(chan struct{})
defer close(doneCh)
// Initializing policy needs a retry mechanism for
// the following reasons:
// - Read quorum is lost just after the initialization
// of the object layer.
retryTimerCh := newRetryTimerSimple(doneCh)
for {
select {
case <-retryTimerCh:
// Load PolicySys once during boot.
if err := sys.load(buckets, objAPI); err != nil {
if err == errDiskNotFound ||
strings.Contains(err.Error(), InsufficientReadQuorum{}.Error()) ||
strings.Contains(err.Error(), InsufficientWriteQuorum{}.Error()) {
logger.Info("Waiting for policy subsystem to be initialized..")
continue
}
return err
}
return nil
case <-globalOSSignalCh:
return fmt.Errorf("Initializing Policy sub-system gracefully stopped")
}
}
// Load PolicySys once during boot.
return sys.load(buckets, objAPI)
}
// NewPolicySys - creates new policy system.
func NewPolicySys() *PolicySys {
return &PolicySys{
bucketPolicyMap: make(map[string]policy.Policy),
bucketPolicyMap: make(map[string]*policy.Policy),
}
}
func getConditionValues(request *http.Request, locationConstraint string, username string, claims map[string]interface{}) map[string][]string {
func getConditionValues(r *http.Request, lc string, username string, claims map[string]interface{}) map[string][]string {
currTime := UTCNow()
principalType := func() string {
if username != "" {
return "User"
}
return "Anonymous"
}()
principalType := "Anonymous"
if username != "" {
principalType = "User"
}
args := map[string][]string{
"CurrenTime": {currTime.Format(event.AMZTimeFormat)},
"EpochTime": {fmt.Sprintf("%d", currTime.Unix())},
"CurrentTime": {currTime.Format(time.RFC3339)},
"EpochTime": {strconv.FormatInt(currTime.Unix(), 10)},
"SecureTransport": {strconv.FormatBool(r.TLS != nil)},
"SourceIp": {handlers.GetSourceIP(r)},
"UserAgent": {r.UserAgent()},
"Referer": {r.Referer()},
"principaltype": {principalType},
"SecureTransport": {fmt.Sprintf("%t", request.TLS != nil)},
"SourceIp": {handlers.GetSourceIP(request)},
"UserAgent": {request.UserAgent()},
"Referer": {request.Referer()},
"userid": {username},
"username": {username},
}
for key, values := range request.Header {
if lc != "" {
args["LocationConstraint"] = []string{lc}
}
cloneHeader := r.Header.Clone()
for _, objLock := range []string{
xhttp.AmzObjectLockMode,
xhttp.AmzObjectLockLegalHold,
xhttp.AmzObjectLockRetainUntilDate,
} {
if values, ok := cloneHeader[objLock]; ok {
args[strings.TrimPrefix(objLock, "X-Amz-")] = values
}
cloneHeader.Del(objLock)
}
for key, values := range cloneHeader {
if existingValues, found := args[key]; found {
args[key] = append(existingValues, values...)
} else {
@@ -195,7 +191,23 @@ func getConditionValues(request *http.Request, locationConstraint string, userna
}
}
for key, values := range request.URL.Query() {
var cloneURLValues = url.Values{}
for k, v := range r.URL.Query() {
cloneURLValues[k] = v
}
for _, objLock := range []string{
xhttp.AmzObjectLockMode,
xhttp.AmzObjectLockLegalHold,
xhttp.AmzObjectLockRetainUntilDate,
} {
if values, ok := cloneURLValues[objLock]; ok {
args[strings.TrimPrefix(objLock, "X-Amz-")] = values
}
cloneURLValues.Del(objLock)
}
for key, values := range cloneURLValues {
if existingValues, found := args[key]; found {
args[key] = append(existingValues, values...)
} else {
@@ -203,10 +215,6 @@ func getConditionValues(request *http.Request, locationConstraint string, userna
}
}
if locationConstraint != "" {
args["LocationConstraint"] = []string{locationConstraint}
}
// JWT specific values
for k, v := range claims {
vStr, ok := v.(string)
@@ -214,6 +222,7 @@ func getConditionValues(request *http.Request, locationConstraint string, userna
args[k] = []string{vStr}
}
}
return args
}
@@ -222,7 +231,7 @@ func getPolicyConfig(objAPI ObjectLayer, bucketName string) (*policy.Policy, err
// Construct path to policy.json for the given bucket.
configFile := path.Join(bucketConfigPrefix, bucketName, bucketPolicyConfig)
configData, err := readConfig(context.Background(), objAPI, configFile)
configData, err := readConfig(GlobalContext, objAPI, configFile)
if err != nil {
if err == errConfigNotFound {
err = BucketPolicyNotFound{Bucket: bucketName}
@@ -275,6 +284,7 @@ func PolicyToBucketAccessPolicy(bucketPolicy *policy.Policy) (*miniogopolicy.Buc
}
var policyInfo miniogopolicy.BucketAccessPolicy
var json = jsoniter.ConfigCompatibleWithStandardLibrary
if err = json.Unmarshal(data, &policyInfo); err != nil {
// This should not happen because data is valid to JSON data.
return nil, err
@@ -292,6 +302,7 @@ func BucketAccessPolicyToPolicy(policyInfo *miniogopolicy.BucketAccessPolicy) (*
}
var bucketPolicy policy.Policy
var json = jsoniter.ConfigCompatibleWithStandardLibrary
if err = json.Unmarshal(data, &bucketPolicy); err != nil {
// This should not happen because data is valid to JSON data.
return nil, err

277
cmd/bucket-quota.go Normal file
View File

@@ -0,0 +1,277 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"encoding/json"
"errors"
"fmt"
"path"
"sync"
"time"
"github.com/minio/minio/cmd/config"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/env"
"github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/madmin"
)
// BucketQuotaSys - map of bucket and quota configuration.
type BucketQuotaSys struct {
sync.RWMutex
quotaMap map[string]madmin.BucketQuota
}
// Set - set quota configuration.
func (sys *BucketQuotaSys) Set(bucketName string, q madmin.BucketQuota) {
sys.Lock()
sys.quotaMap[bucketName] = q
sys.Unlock()
}
// Get - Get quota configuration.
func (sys *BucketQuotaSys) Get(bucketName string) (q madmin.BucketQuota, ok bool) {
sys.RLock()
defer sys.RUnlock()
q, ok = sys.quotaMap[bucketName]
return
}
// Remove - removes quota configuration.
func (sys *BucketQuotaSys) Remove(bucketName string) {
sys.Lock()
delete(sys.quotaMap, bucketName)
sys.Unlock()
}
// Buckets - list of buckets with quota configuration
func (sys *BucketQuotaSys) Buckets() []string {
sys.RLock()
defer sys.RUnlock()
var buckets []string
for k := range sys.quotaMap {
buckets = append(buckets, k)
}
return buckets
}
// Init initialize bucket quota sys configuration with all buckets.
func (sys *BucketQuotaSys) Init(buckets []BucketInfo, objAPI ObjectLayer) error {
if objAPI == nil {
return errServerNotInitialized
}
// In gateway mode, we do not support bucket quota.
// So, this is a no-op for gateway servers.
if globalIsGateway {
return nil
}
return sys.load(buckets, objAPI)
}
func (sys *BucketQuotaSys) load(buckets []BucketInfo, objAPI ObjectLayer) error {
for _, bucket := range buckets {
ctx := logger.SetReqInfo(GlobalContext, &logger.ReqInfo{BucketName: bucket.Name})
configFile := path.Join(bucketConfigPrefix, bucket.Name, bucketQuotaConfigFile)
configData, err := readConfig(ctx, objAPI, configFile)
if err != nil {
if errors.Is(err, errConfigNotFound) {
continue
}
return err
}
quotaCfg, err := parseBucketQuota(configData)
if err != nil {
return err
}
globalBucketQuotaSys.Set(bucket.Name, quotaCfg)
}
return nil
}
// NewBucketQuotaSys returns initialized BucketQuotaSys
func NewBucketQuotaSys() *BucketQuotaSys {
return &BucketQuotaSys{quotaMap: map[string]madmin.BucketQuota{}}
}
// parseBucketQuota parses BucketQuota from json
func parseBucketQuota(data []byte) (quotaCfg madmin.BucketQuota, err error) {
err = json.Unmarshal(data, &quotaCfg)
if err != nil {
return
}
if !quotaCfg.Type.IsValid() {
return quotaCfg, fmt.Errorf("Invalid quota type %s", quotaCfg.Type)
}
return
}
type bucketStorageCache struct {
bucketsSizes map[string]uint64
lastUpdate time.Time
mu sync.Mutex
}
func (b *bucketStorageCache) check(ctx context.Context, q madmin.BucketQuota, bucket string, size int64) error {
b.mu.Lock()
defer b.mu.Unlock()
if time.Since(b.lastUpdate) > 10*time.Second {
dui, err := loadDataUsageFromBackend(ctx, newObjectLayerWithoutSafeModeFn())
if err != nil {
return err
}
b.lastUpdate = time.Now()
b.bucketsSizes = dui.BucketsSizes
}
currUsage := b.bucketsSizes[bucket]
if (currUsage + uint64(size)) > q.Quota {
return BucketQuotaExceeded{Bucket: bucket}
}
return nil
}
func enforceBucketQuota(ctx context.Context, bucket string, size int64) error {
if size < 0 {
return nil
}
q, ok := globalBucketQuotaSys.Get(bucket)
if !ok {
return nil
}
if q.Type == madmin.FIFOQuota {
return nil
}
return globalBucketStorageCache.check(ctx, q, bucket, size)
}
const (
bgQuotaInterval = 1 * time.Hour
)
// initQuotaEnforcement starts the routine that deletes objects in bucket
// that exceeds the FIFO quota
func initQuotaEnforcement(ctx context.Context, objAPI ObjectLayer) {
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOn {
go startBucketQuotaEnforcement(ctx, objAPI)
}
}
func startBucketQuotaEnforcement(ctx context.Context, objAPI ObjectLayer) {
for {
select {
case <-ctx.Done():
return
case <-time.NewTimer(bgQuotaInterval).C:
logger.LogIf(ctx, enforceFIFOQuota(ctx, objAPI))
}
}
}
// enforceFIFOQuota deletes objects in FIFO order until sufficient objects
// have been deleted so as to bring bucket usage within quota
func enforceFIFOQuota(ctx context.Context, objectAPI ObjectLayer) error {
// Turn off quota enforcement if data usage info is unavailable.
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOff {
return nil
}
for _, bucket := range globalBucketQuotaSys.Buckets() {
// Check if the current bucket has quota restrictions, if not skip it
cfg, ok := globalBucketQuotaSys.Get(bucket)
if !ok {
continue
}
if cfg.Type != madmin.FIFOQuota {
continue
}
_, bucketHasLockConfig := globalBucketObjectLockSys.Get(bucket)
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
if err != nil {
return err
}
var toFree uint64
if dataUsageInfo.BucketsSizes[bucket] > cfg.Quota {
toFree = dataUsageInfo.BucketsSizes[bucket] - cfg.Quota
}
if toFree <= 0 {
continue
}
// Allocate new results channel to receive ObjectInfo.
objInfoCh := make(chan ObjectInfo)
// Walk through all objects
if err := objectAPI.Walk(ctx, bucket, "", objInfoCh); err != nil {
return err
}
// reuse the fileScorer used by disk cache to score entries by
// ModTime to find the oldest objects in bucket to delete. In
// the context of bucket quota enforcement - number of hits are
// irrelevant.
scorer, err := newFileScorer(toFree, time.Now().Unix(), 1)
if err != nil {
return err
}
for obj := range objInfoCh {
// skip objects currently under retention
if bucketHasLockConfig && enforceRetentionForDeletion(ctx, obj) {
continue
}
scorer.addFile(obj.Name, obj.ModTime, obj.Size, 1)
}
var objects []string
numKeys := len(scorer.fileNames())
for i, key := range scorer.fileNames() {
objects = append(objects, key)
if len(objects) < maxObjectList && (i < numKeys-1) {
// skip deletion until maxObjectList or end of slice
continue
}
if len(objects) == 0 {
break
}
// Deletes a list of objects.
deleteErrs, err := objectAPI.DeleteObjects(ctx, bucket, objects)
if err != nil {
logger.LogIf(ctx, err)
} else {
for i := range deleteErrs {
if deleteErrs[i] != nil {
logger.LogIf(ctx, deleteErrs[i])
continue
}
// Notify object deleted event.
sendEvent(eventArgs{
EventName: event.ObjectRemovedDelete,
BucketName: bucket,
Object: ObjectInfo{
Name: objects[i],
},
Host: "Internal: [FIFO-QUOTA-EXPIRY]",
})
}
objects = nil
}
}
}
return nil
}

61
cmd/bucket-tagging.go Normal file
View File

@@ -0,0 +1,61 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"encoding/xml"
"path"
"github.com/minio/minio-go/v6/pkg/tags"
)
func saveBucketTagging(ctx context.Context, objAPI ObjectLayer, bucket string, t *tags.Tags) error {
data, err := xml.Marshal(t)
if err != nil {
return toObjectErr(err, bucket)
}
configFile := path.Join(bucketConfigPrefix, bucket, bucketTaggingConfigFile)
if err := saveConfig(ctx, objAPI, configFile, data); err != nil {
return toObjectErr(err, bucket)
}
return nil
}
func deleteBucketTagging(ctx context.Context, objAPI ObjectLayer, bucket string) error {
configFile := path.Join(bucketConfigPrefix, bucket, bucketTaggingConfigFile)
if err := deleteConfig(ctx, objAPI, configFile); err != nil && err != errConfigNotFound {
return toObjectErr(err, bucket)
}
return nil
}
func readBucketTagging(ctx context.Context, objAPI ObjectLayer, bucket string) (*tags.Tags, error) {
configFile := path.Join(bucketConfigPrefix, bucket, bucketTaggingConfigFile)
configData, err := readConfig(ctx, objAPI, configFile)
if err != nil {
if err == errConfigNotFound {
return nil, BucketTaggingNotFound{Bucket: bucket}
}
return nil, toObjectErr(err, bucket)
}
t := &tags.Tags{}
if err = xml.Unmarshal(configData, t); err != nil {
return nil, toObjectErr(err, bucket)
}
return t, nil
}

View File

@@ -18,8 +18,10 @@ package cmd
import (
"crypto/x509"
"encoding/gob"
"errors"
"net"
"os"
"path/filepath"
"strings"
"time"
@@ -34,6 +36,17 @@ import (
"github.com/minio/minio/pkg/env"
)
func init() {
logger.Init(GOPATH, GOROOT)
logger.RegisterError(config.FmtError)
// Initialize globalConsoleSys system
globalConsoleSys = NewConsoleLogger(GlobalContext)
logger.AddTarget(globalConsoleSys)
gob.Register(StorageErr(""))
}
func verifyObjectLayerFeatures(name string, objAPI ObjectLayer) {
if (globalAutoEncryption || GlobalKMS != nil) && !objAPI.IsEncryptionSupported() {
logger.Fatal(errInvalidArgument,
@@ -136,6 +149,12 @@ func handleCommonCmdArgs(ctx *cli.Context) {
globalCLIContext.Addr = ctx.String("address")
}
// Check "no-compat" flag from command line argument.
globalCLIContext.StrictS3Compat = true
if ctx.IsSet("no-compat") || ctx.GlobalIsSet("no-compat") {
globalCLIContext.StrictS3Compat = false
}
// Set all config, certs and CAs directories.
var configSet, certsSet bool
globalConfigDir, configSet = newConfigDirFromCtx(ctx, "config-dir", defaultConfigDir.Get)
@@ -151,18 +170,27 @@ func handleCommonCmdArgs(ctx *cli.Context) {
globalCertsCADir = &ConfigDir{path: filepath.Join(globalCertsDir.Get(), certsCADir)}
logger.FatalIf(mkdirAllIgnorePerm(globalCertsCADir.Get()), "Unable to create certs CA directory at %s", globalCertsCADir.Get())
// Check "compat" flag from command line argument.
globalCLIContext.StrictS3Compat = ctx.IsSet("compat") || ctx.GlobalIsSet("compat")
}
func handleCommonEnvVars() {
var err error
wormEnabled, err := config.LookupWorm()
if err != nil {
logger.Fatal(config.ErrInvalidWormValue(err), "Invalid worm configuration")
}
if wormEnabled {
logger.Fatal(errors.New("WORM is deprecated"), "global MINIO_WORM support is removed, please downgrade your server or migrate to https://github.com/minio/minio/tree/master/docs/retention")
}
globalBrowserEnabled, err = config.ParseBool(env.Get(config.EnvBrowser, config.EnableOn))
if err != nil {
logger.Fatal(config.ErrInvalidBrowserValue(err), "Invalid MINIO_BROWSER value in environment variable")
}
globalFSOSync, err = config.ParseBool(env.Get(config.EnvFSOSync, config.EnableOff))
if err != nil {
logger.Fatal(config.ErrInvalidFSOSyncValue(err), "Invalid MINIO_FS_OSYNC value in environment variable")
}
domains := env.Get(config.EnvDomain, "")
if len(domains) != 0 {
for _, domainName := range strings.Split(domains, config.ValueSeparator) {
@@ -213,9 +241,15 @@ func handleCommonEnvVars() {
globalConfigEncrypted = true
}
globalWORMEnabled, err = config.LookupWorm()
if err != nil {
logger.Fatal(config.ErrInvalidWormValue(err), "Invalid worm configuration")
if env.IsSet(config.EnvAccessKeyOld) && env.IsSet(config.EnvSecretKeyOld) {
oldCred, err := auth.CreateCredentials(env.Get(config.EnvAccessKeyOld, ""), env.Get(config.EnvSecretKeyOld, ""))
if err != nil {
logger.Fatal(config.ErrInvalidCredentials(err),
"Unable to validate the old credentials inherited from the shell environment")
}
globalOldCred = oldCred
os.Unsetenv(config.EnvAccessKeyOld)
os.Unsetenv(config.EnvSecretKeyOld)
}
}

View File

@@ -50,7 +50,11 @@ func readConfig(ctx context.Context, objAPI ObjectLayer, configFile string) ([]b
}
func deleteConfig(ctx context.Context, objAPI ObjectLayer, configFile string) error {
return objAPI.DeleteObject(ctx, minioMetaBucket, configFile)
err := objAPI.DeleteObject(ctx, minioMetaBucket, configFile)
if err != nil && isErrObjectNotFound(err) {
return errConfigNotFound
}
return err
}
func saveConfig(ctx context.Context, objAPI ObjectLayer, configFile string, data []byte) error {

View File

@@ -17,16 +17,15 @@
package cmd
import (
"context"
"fmt"
"strings"
"sync"
"github.com/minio/minio/cmd/config"
"github.com/minio/minio/cmd/config/api"
"github.com/minio/minio/cmd/config/cache"
"github.com/minio/minio/cmd/config/compress"
"github.com/minio/minio/cmd/config/etcd"
xetcd "github.com/minio/minio/cmd/config/etcd"
"github.com/minio/minio/cmd/config/etcd/dns"
xldap "github.com/minio/minio/cmd/config/identity/ldap"
"github.com/minio/minio/cmd/config/identity/openid"
@@ -38,6 +37,7 @@ import (
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/cmd/logger/target/http"
"github.com/minio/minio/pkg/env"
"github.com/minio/minio/pkg/madmin"
)
func initHelp() {
@@ -49,6 +49,7 @@ func initHelp() {
config.IdentityOpenIDSubSys: openid.DefaultKVS,
config.PolicyOPASubSys: opa.DefaultKVS,
config.RegionSubSys: config.DefaultRegionKVS,
config.APISubSys: api.DefaultKVS,
config.CredentialsSubSys: config.DefaultCredentialKVS,
config.KmsVaultSubSys: crypto.DefaultVaultKVS,
config.KmsKesSubSys: crypto.DefaultKesKVS,
@@ -101,6 +102,10 @@ func initHelp() {
Key: config.KmsKesSubSys,
Description: "enable external MinIO key encryption service",
},
config.HelpKV{
Key: config.APISubSys,
Description: "manage global HTTP API call specific features, such as throttling, authentication types, etc.",
},
config.HelpKV{
Key: config.LoggerWebhookSubSys,
Description: "send server logs to webhook endpoints",
@@ -175,6 +180,7 @@ func initHelp() {
var helpMap = map[string]config.HelpKVS{
"": helpSubSys, // Help for all sub-systems.
config.RegionSubSys: config.RegionHelp,
config.APISubSys: api.Help,
config.StorageClassSubSys: storageclass.Help,
config.EtcdSubSys: etcd.Help,
config.CacheSubSys: cache.Help,
@@ -222,6 +228,10 @@ func validateConfig(s config.Config) error {
return err
}
if _, err := api.LookupConfig(s[config.APISubSys][config.Default]); err != nil {
return err
}
if globalIsXL {
if _, err := storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default],
globalXLSetDriveCount); err != nil {
@@ -251,7 +261,7 @@ func validateConfig(s config.Config) error {
}
}
{
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), NewCustomHTTPTransport())
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), NewGatewayHTTPTransport())
if err != nil {
return err
}
@@ -269,17 +279,27 @@ func validateConfig(s config.Config) error {
}
if _, err := openid.LookupConfig(s[config.IdentityOpenIDSubSys][config.Default],
NewCustomHTTPTransport(), xhttp.DrainBody); err != nil {
NewGatewayHTTPTransport(), xhttp.DrainBody); err != nil {
return err
}
if _, err := xldap.Lookup(s[config.IdentityLDAPSubSys][config.Default],
globalRootCAs); err != nil {
return err
{
cfg, err := xldap.Lookup(s[config.IdentityLDAPSubSys][config.Default],
globalRootCAs)
if err != nil {
return err
}
if cfg.Enabled {
conn, cerr := cfg.Connect()
if cerr != nil {
return cerr
}
conn.Close()
}
}
if _, err := opa.LookupConfig(s[config.PolicyOPASubSys][config.Default],
NewCustomHTTPTransport(), xhttp.DrainBody); err != nil {
NewGatewayHTTPTransport(), xhttp.DrainBody); err != nil {
return err
}
@@ -287,12 +307,12 @@ func validateConfig(s config.Config) error {
return err
}
return notify.TestNotificationTargets(s, GlobalServiceDoneCh, NewCustomHTTPTransport(),
return notify.TestNotificationTargets(s, GlobalContext.Done(), NewGatewayHTTPTransport(),
globalNotificationSys.ConfiguredTargetIDs())
}
func lookupConfigs(s config.Config) {
ctx := context.Background()
ctx := GlobalContext
var err error
if !globalActiveCred.IsValid() {
@@ -303,13 +323,13 @@ func lookupConfigs(s config.Config) {
}
}
etcdCfg, err := xetcd.LookupConfig(s[config.EtcdSubSys][config.Default], globalRootCAs)
etcdCfg, err := etcd.LookupConfig(s[config.EtcdSubSys][config.Default], globalRootCAs)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize etcd config: %w", err))
}
if etcdCfg.Enabled {
globalEtcdClient, err = xetcd.New(etcdCfg)
globalEtcdClient, err = etcd.New(etcdCfg)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize etcd config: %w", err))
}
@@ -340,6 +360,18 @@ func lookupConfigs(s config.Config) {
logger.LogIf(ctx, fmt.Errorf("Invalid region configuration: %w", err))
}
apiConfig, err := api.LookupConfig(s[config.APISubSys][config.Default])
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Invalid api configuration: %w", err))
}
apiRequestsMax := apiConfig.APIRequestsMax
if len(globalEndpoints.Hosts()) > 0 {
apiRequestsMax /= len(globalEndpoints.Hosts())
}
globalAPIThrottling.init(apiRequestsMax, apiConfig.APIRequestsDeadline)
if globalIsXL {
globalStorageClass, err = storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default],
globalXLSetDriveCount)
@@ -362,7 +394,7 @@ func lookupConfigs(s config.Config) {
}
}
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), NewCustomHTTPTransport())
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), NewGatewayHTTPTransport())
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to setup KMS config: %w", err))
}
@@ -381,13 +413,13 @@ func lookupConfigs(s config.Config) {
}
globalOpenIDConfig, err = openid.LookupConfig(s[config.IdentityOpenIDSubSys][config.Default],
NewCustomHTTPTransport(), xhttp.DrainBody)
NewGatewayHTTPTransport(), xhttp.DrainBody)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize OpenID: %w", err))
}
opaCfg, err := opa.LookupConfig(s[config.PolicyOPASubSys][config.Default],
NewCustomHTTPTransport(), xhttp.DrainBody)
NewGatewayHTTPTransport(), xhttp.DrainBody)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize OPA: %w", err))
}
@@ -412,18 +444,37 @@ func lookupConfigs(s config.Config) {
for _, l := range loggerCfg.HTTP {
if l.Enabled {
// Enable http logging
logger.AddTarget(http.New(l.Endpoint, loggerUserAgent, string(logger.All), NewCustomHTTPTransport()))
logger.AddTarget(
http.New(http.WithEndpoint(l.Endpoint),
http.WithAuthToken(l.AuthToken),
http.WithUserAgent(loggerUserAgent),
http.WithLogKind(string(logger.All)),
http.WithTransport(NewGatewayHTTPTransport()),
),
)
}
}
for _, l := range loggerCfg.Audit {
if l.Enabled {
// Enable http audit logging
logger.AddAuditTarget(http.New(l.Endpoint, loggerUserAgent, string(logger.All), NewCustomHTTPTransport()))
logger.AddAuditTarget(
http.New(http.WithEndpoint(l.Endpoint),
http.WithAuthToken(l.AuthToken),
http.WithUserAgent(loggerUserAgent),
http.WithLogKind(string(logger.All)),
http.WithTransport(NewGatewayHTTPTransport()),
),
)
}
}
globalConfigTargetList, err = notify.GetNotificationTargets(s, GlobalServiceDoneCh, NewCustomHTTPTransport())
globalConfigTargetList, err = notify.GetNotificationTargets(s, GlobalContext.Done(), NewGatewayHTTPTransport())
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize notification target(s): %w", err))
}
globalEnvTargetList, err = notify.GetNotificationTargets(newServerConfig(), GlobalContext.Done(), NewGatewayHTTPTransport())
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize notification target(s): %w", err))
}
@@ -469,6 +520,20 @@ func GetHelp(subSys, key string, envOnly bool) (Help, error) {
envHelp := config.HelpKVS{}
if envOnly {
// Only for multiple targets, make sure
// to list the ENV, for regular k/v EnableKey is
// implicit, for ENVs we cannot make it implicit.
if subSysHelp.MultipleTargets {
envK := config.EnvPrefix + strings.Join([]string{
strings.ToTitle(subSys), strings.ToTitle(madmin.EnableKey),
}, config.EnvWordDelimiter)
envHelp = append(envHelp, config.HelpKV{
Key: envK,
Description: fmt.Sprintf("enable %s target, default is 'off'", subSys),
Optional: false,
Type: "on|off",
})
}
for _, hkv := range h {
envK := config.EnvPrefix + strings.Join([]string{
strings.ToTitle(subSys), strings.ToTitle(hkv.Key),
@@ -510,11 +575,11 @@ func newSrvConfig(objAPI ObjectLayer) error {
globalServerConfigMu.Unlock()
// Save config into file.
return saveServerConfig(context.Background(), objAPI, globalServerConfig)
return saveServerConfig(GlobalContext, objAPI, globalServerConfig)
}
func getValidConfig(objAPI ObjectLayer) (config.Config, error) {
return readServerConfig(context.Background(), objAPI)
return readServerConfig(GlobalContext, objAPI)
}
// loadConfig - loads a new config from disk, overrides params

View File

@@ -21,15 +21,13 @@ import (
"context"
"errors"
"fmt"
"os"
"strings"
"time"
"unicode/utf8"
etcd "github.com/coreos/etcd/clientv3"
"github.com/minio/minio/cmd/config"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/env"
"github.com/minio/minio/pkg/madmin"
)
@@ -38,37 +36,10 @@ func handleEncryptedConfigBackend(objAPI ObjectLayer, server bool) error {
return nil
}
// If its server mode or nas gateway, migrate the backend.
doneCh := make(chan struct{})
var encrypted bool
var err error
// Migrating Config backend needs a retry mechanism for
// the following reasons:
// - Read quorum is lost just after the initialization
// of the object layer.
retryTimerCh := newRetryTimerSimple(doneCh)
var stop bool
for !stop {
select {
case <-retryTimerCh:
if encrypted, err = checkBackendEncrypted(objAPI); err != nil {
if err == errDiskNotFound ||
strings.Contains(err.Error(), InsufficientReadQuorum{}.Error()) {
logger.Info("Waiting for config backend to be encrypted..")
continue
}
close(doneCh)
return err
}
stop = true
case <-globalOSSignalCh:
close(doneCh)
return fmt.Errorf("Config encryption process stopped gracefully")
}
encrypted, err := checkBackendEncrypted(objAPI)
if err != nil {
return fmt.Errorf("Unable to encrypt config %w", err)
}
close(doneCh)
if encrypted {
// backend is encrypted, but credentials are not specified
@@ -84,42 +55,16 @@ func handleEncryptedConfigBackend(objAPI ObjectLayer, server bool) error {
return nil
}
if !globalActiveCred.IsValid() {
return errInvalidArgument
return config.ErrMissingCredentialsBackendEncrypted(nil)
}
}
activeCredOld, err := getOldCreds()
if err != nil {
return err
// Migrate IAM configuration
if err = migrateConfigPrefixToEncrypted(objAPI, globalOldCred, encrypted); err != nil {
return fmt.Errorf("Unable to migrate all config at .minio.sys/config/: %w", err)
}
doneCh = make(chan struct{})
defer close(doneCh)
retryTimerCh = newRetryTimerSimple(doneCh)
// Migrating Config backend needs a retry mechanism for
// the following reasons:
// - Read quorum is lost just after the initialization
// of the object layer.
for {
select {
case <-retryTimerCh:
// Migrate IAM configuration
if err = migrateConfigPrefixToEncrypted(objAPI, activeCredOld, encrypted); err != nil {
if err == errDiskNotFound ||
strings.Contains(err.Error(), InsufficientReadQuorum{}.Error()) ||
strings.Contains(err.Error(), InsufficientWriteQuorum{}.Error()) {
logger.Info("Waiting for config backend to be encrypted..")
continue
}
return err
}
return nil
case <-globalOSSignalCh:
return fmt.Errorf("Config encryption process stopped gracefully")
}
}
return nil
}
const (
@@ -140,7 +85,7 @@ func checkBackendEtcdEncrypted(ctx context.Context, client *etcd.Client) (bool,
}
func checkBackendEncrypted(objAPI ObjectLayer) (bool, error) {
data, err := readConfig(context.Background(), objAPI, backendEncryptedFile)
data, err := readConfig(GlobalContext, objAPI, backendEncryptedFile)
if err != nil && err != errConfigNotFound {
return false, err
}
@@ -164,25 +109,7 @@ func decryptData(edata []byte, creds ...auth.Credentials) ([]byte, error) {
return data, err
}
func getOldCreds() (activeCredOld auth.Credentials, err error) {
accessKeyOld := env.Get(config.EnvAccessKeyOld, "")
secretKeyOld := env.Get(config.EnvSecretKeyOld, "")
if accessKeyOld != "" && secretKeyOld != "" {
activeCredOld, err = auth.CreateCredentials(accessKeyOld, secretKeyOld)
if err != nil {
return activeCredOld, err
}
// Once we have obtained the rotating creds
os.Unsetenv(config.EnvAccessKeyOld)
os.Unsetenv(config.EnvSecretKeyOld)
}
return activeCredOld, nil
}
func migrateIAMConfigsEtcdToEncrypted(client *etcd.Client) error {
ctx, cancel := context.WithTimeout(context.Background(), defaultContextTimeout)
defer cancel()
func migrateIAMConfigsEtcdToEncrypted(ctx context.Context, client *etcd.Client) error {
encrypted, err := checkBackendEtcdEncrypted(ctx, client)
if err != nil {
return err
@@ -206,20 +133,15 @@ func migrateIAMConfigsEtcdToEncrypted(client *etcd.Client) error {
}
}
activeCredOld, err := getOldCreds()
if err != nil {
return err
}
if encrypted {
// No key rotation requested, and backend is
// already encrypted. We proceed without migration.
if !activeCredOld.IsValid() {
if !globalOldCred.IsValid() {
return nil
}
// No real reason to rotate if old and new creds are same.
if activeCredOld.Equal(globalActiveCred) {
if globalOldCred.Equal(globalActiveCred) {
return nil
}
@@ -228,7 +150,10 @@ func migrateIAMConfigsEtcdToEncrypted(client *etcd.Client) error {
logger.Info("Attempting encryption of all IAM users and policies on etcd")
}
r, err := client.Get(ctx, minioConfigPrefix, etcd.WithPrefix(), etcd.WithKeysOnly())
listCtx, cancel := context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
r, err := client.Get(listCtx, minioConfigPrefix, etcd.WithPrefix(), etcd.WithKeysOnly())
if err != nil {
return err
}
@@ -254,8 +179,8 @@ func migrateIAMConfigsEtcdToEncrypted(client *etcd.Client) error {
var data []byte
// Is rotating of creds requested?
if activeCredOld.IsValid() {
data, err = decryptData(cdata, activeCredOld, globalActiveCred)
if globalOldCred.IsValid() {
data, err = decryptData(cdata, globalOldCred, globalActiveCred)
if err != nil {
if err == madmin.ErrMaliciousData {
return config.ErrInvalidRotatingCredentialsBackendEncrypted(nil)
@@ -267,7 +192,12 @@ func migrateIAMConfigsEtcdToEncrypted(client *etcd.Client) error {
}
if !utf8.Valid(data) {
return errors.New("config data not in plain-text form")
_, err = decryptData(data, globalActiveCred)
if err == nil {
// Config is already encrypted with right keys
continue
}
return errors.New("config data not in plain-text form or encrypted")
}
cencdata, err = madmin.EncryptData(globalActiveCred.String(), data)
@@ -279,9 +209,11 @@ func migrateIAMConfigsEtcdToEncrypted(client *etcd.Client) error {
return err
}
}
if encrypted && globalActiveCred.IsValid() {
if encrypted && globalActiveCred.IsValid() && globalOldCred.IsValid() {
logger.Info("Rotation complete, please make sure to unset MINIO_ACCESS_KEY_OLD and MINIO_SECRET_KEY_OLD envs")
}
return saveKeyEtcd(ctx, client, backendEncryptedFile, backendEncryptedMigrationComplete)
}
@@ -302,14 +234,15 @@ func migrateConfigPrefixToEncrypted(objAPI ObjectLayer, activeCredOld auth.Crede
logger.Info("Attempting encryption of all config, IAM users and policies on MinIO backend")
}
err := saveConfig(context.Background(), objAPI, backendEncryptedFile, backendEncryptedMigrationIncomplete)
err := saveConfig(GlobalContext, objAPI, backendEncryptedFile, backendEncryptedMigrationIncomplete)
if err != nil {
return err
}
var marker string
for {
res, err := objAPI.ListObjects(context.Background(), minioMetaBucket, minioConfigPrefix, marker, "", maxObjectList)
res, err := objAPI.ListObjects(GlobalContext, minioMetaBucket,
minioConfigPrefix, marker, "", maxObjectList)
if err != nil {
return err
}
@@ -319,7 +252,7 @@ func migrateConfigPrefixToEncrypted(objAPI ObjectLayer, activeCredOld auth.Crede
cencdata []byte
)
cdata, err = readConfig(context.Background(), objAPI, obj.Name)
cdata, err = readConfig(GlobalContext, objAPI, obj.Name)
if err != nil {
return err
}
@@ -339,7 +272,12 @@ func migrateConfigPrefixToEncrypted(objAPI ObjectLayer, activeCredOld auth.Crede
}
if !utf8.Valid(data) {
return errors.New("config data not in plain-text form")
_, err = decryptData(data, globalActiveCred)
if err == nil {
// Config is already encrypted with right keys
continue
}
return errors.New("config data not in plain-text form or encrypted")
}
cencdata, err = madmin.EncryptData(globalActiveCred.String(), data)
@@ -347,7 +285,7 @@ func migrateConfigPrefixToEncrypted(objAPI ObjectLayer, activeCredOld auth.Crede
return err
}
if err = saveConfig(context.Background(), objAPI, obj.Name, cencdata); err != nil {
if err = saveConfig(GlobalContext, objAPI, obj.Name, cencdata); err != nil {
return err
}
}
@@ -359,9 +297,9 @@ func migrateConfigPrefixToEncrypted(objAPI ObjectLayer, activeCredOld auth.Crede
marker = res.NextMarker
}
if encrypted && globalActiveCred.IsValid() {
if encrypted && globalActiveCred.IsValid() && activeCredOld.IsValid() {
logger.Info("Rotation complete, please make sure to unset MINIO_ACCESS_KEY_OLD and MINIO_SECRET_KEY_OLD envs")
}
return saveConfig(context.Background(), objAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
return saveConfig(GlobalContext, objAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
}

View File

@@ -18,7 +18,6 @@ package cmd
import (
"bytes"
"context"
"encoding/json"
"fmt"
"os"
@@ -2430,7 +2429,7 @@ func migrateConfigToMinioSys(objAPI ObjectLayer) (err error) {
defer func() {
if err == nil {
if globalEtcdClient != nil {
deleteKeyEtcd(context.Background(), globalEtcdClient, configFile)
deleteKeyEtcd(GlobalContext, globalEtcdClient, configFile)
} else {
// Rename config.json to config.json.deprecated only upon
// success of this function.
@@ -2440,7 +2439,7 @@ func migrateConfigToMinioSys(objAPI ObjectLayer) (err error) {
}()
// Verify if backend already has the file (after holding lock)
if err = checkConfig(context.Background(), objAPI, configFile); err != errConfigNotFound {
if err = checkConfig(GlobalContext, objAPI, configFile); err != errConfigNotFound {
return err
} // if errConfigNotFound proceed to migrate..
@@ -2466,7 +2465,7 @@ func migrateConfigToMinioSys(objAPI ObjectLayer) (err error) {
// Initialize the server config, if no config exists.
return newSrvConfig(objAPI)
}
return saveServerConfig(context.Background(), objAPI, config)
return saveServerConfig(GlobalContext, objAPI, config)
}
// Migrates '.minio.sys/config.json' to v33.
@@ -2502,7 +2501,7 @@ func migrateMinioSysConfig(objAPI ObjectLayer) error {
}
func checkConfigVersion(objAPI ObjectLayer, configFile string, version string) (bool, []byte, error) {
data, err := readConfig(context.Background(), objAPI, configFile)
data, err := readConfig(GlobalContext, objAPI, configFile)
if err != nil {
return false, nil, err
}
@@ -2548,7 +2547,7 @@ func migrateV27ToV28MinioSys(objAPI ObjectLayer) error {
cfg.Version = "28"
cfg.KMS = crypto.KMSConfig{}
if err = saveServerConfig(context.Background(), objAPI, cfg); err != nil {
if err = saveServerConfig(GlobalContext, objAPI, cfg); err != nil {
return fmt.Errorf("Failed to migrate config from 27 to 28. %w", err)
}
@@ -2575,7 +2574,7 @@ func migrateV28ToV29MinioSys(objAPI ObjectLayer) error {
}
cfg.Version = "29"
if err = saveServerConfig(context.Background(), objAPI, cfg); err != nil {
if err = saveServerConfig(GlobalContext, objAPI, cfg); err != nil {
return fmt.Errorf("Failed to migrate config from 28 to 29. %w", err)
}
@@ -2607,7 +2606,7 @@ func migrateV29ToV30MinioSys(objAPI ObjectLayer) error {
cfg.Compression.Extensions = strings.Split(compress.DefaultExtensions, config.ValueSeparator)
cfg.Compression.MimeTypes = strings.Split(compress.DefaultMimeTypes, config.ValueSeparator)
if err = saveServerConfig(context.Background(), objAPI, cfg); err != nil {
if err = saveServerConfig(GlobalContext, objAPI, cfg); err != nil {
return fmt.Errorf("Failed to migrate config from 29 to 30. %w", err)
}
@@ -2642,7 +2641,7 @@ func migrateV30ToV31MinioSys(objAPI ObjectLayer) error {
AuthToken: "",
}
if err = saveServerConfig(context.Background(), objAPI, cfg); err != nil {
if err = saveServerConfig(GlobalContext, objAPI, cfg); err != nil {
return fmt.Errorf("Failed to migrate config from 30 to 31. %w", err)
}
@@ -2672,7 +2671,7 @@ func migrateV31ToV32MinioSys(objAPI ObjectLayer) error {
cfg.Notify.NSQ = make(map[string]target.NSQArgs)
cfg.Notify.NSQ["1"] = target.NSQArgs{}
if err = saveServerConfig(context.Background(), objAPI, cfg); err != nil {
if err = saveServerConfig(GlobalContext, objAPI, cfg); err != nil {
return fmt.Errorf("Failed to migrate config from 31 to 32. %w", err)
}
@@ -2700,7 +2699,7 @@ func migrateV32ToV33MinioSys(objAPI ObjectLayer) error {
cfg.Version = "33"
if err = saveServerConfig(context.Background(), objAPI, cfg); err != nil {
if err = saveServerConfig(GlobalContext, objAPI, cfg); err != nil {
return fmt.Errorf("Failed to migrate config from '32' to '33' . %w", err)
}
@@ -2777,7 +2776,7 @@ func migrateMinioSysConfigToKV(objAPI ObjectLayer) error {
notify.SetNotifyWebhook(newCfg, k, args)
}
if err = saveServerConfig(context.Background(), objAPI, newCfg); err != nil {
if err = saveServerConfig(GlobalContext, objAPI, newCfg); err != nil {
return err
}

View File

@@ -20,14 +20,13 @@ import (
"bytes"
"context"
"encoding/json"
"fmt"
"path"
"sort"
"strings"
"time"
jsoniter "github.com/json-iterator/go"
"github.com/minio/minio/cmd/config"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/madmin"
)
@@ -41,9 +40,6 @@ const (
// MinIO configuration file.
minioConfigFile = "config.json"
// MinIO configuration backup file
minioConfigBackupFile = minioConfigFile + ".backup"
)
func listServerConfigHistory(ctx context.Context, objAPI ObjectLayer, withData bool, count int) (
@@ -170,6 +166,7 @@ func readServerConfig(ctx context.Context, objAPI ObjectLayer) (config.Config, e
}
var config = config.New()
var json = jsoniter.ConfigCompatibleWithStandardLibrary
if err = json.Unmarshal(configData, &config); err != nil {
return nil, err
}
@@ -186,16 +183,14 @@ func (sys *ConfigSys) Load(objAPI ObjectLayer) error {
}
// WatchConfigNASDisk - watches nas disk on periodic basis.
func (sys *ConfigSys) WatchConfigNASDisk(objAPI ObjectLayer) {
func (sys *ConfigSys) WatchConfigNASDisk(ctx context.Context, objAPI ObjectLayer) {
configInterval := globalRefreshIAMInterval
watchDisk := func() {
ticker := time.NewTicker(configInterval)
defer ticker.Stop()
for {
select {
case <-GlobalServiceDoneCh:
case <-ctx.Done():
return
case <-ticker.C:
case <-time.After(configInterval):
loadConfig(objAPI)
}
}
@@ -210,33 +205,7 @@ func (sys *ConfigSys) Init(objAPI ObjectLayer) error {
return errInvalidArgument
}
doneCh := make(chan struct{})
defer close(doneCh)
// Initializing configuration needs a retry mechanism for
// the following reasons:
// - Read quorum is lost just after the initialization
// of the object layer.
// - Write quorum not met when upgrading configuration
// version is needed.
retryTimerCh := newRetryTimerSimple(doneCh)
for {
select {
case <-retryTimerCh:
if err := initConfig(objAPI); err != nil {
if err == errDiskNotFound ||
strings.Contains(err.Error(), InsufficientReadQuorum{}.Error()) ||
strings.Contains(err.Error(), InsufficientWriteQuorum{}.Error()) {
logger.Info("Waiting for configuration to be initialized..")
continue
}
return err
}
return nil
case <-globalOSSignalCh:
return fmt.Errorf("Initializing config sub-system gracefully stopped")
}
}
return initConfig(objAPI)
}
// NewConfigSys - creates new config system object.

91
cmd/config/api/api.go Normal file
View File

@@ -0,0 +1,91 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package api
import (
"encoding/json"
"errors"
"strconv"
"time"
"github.com/minio/minio/cmd/config"
"github.com/minio/minio/pkg/env"
)
const (
apiRequestsMax = "requests_max"
apiRequestsDeadline = "requests_deadline"
)
// DefaultKVS - default storage class config
var (
DefaultKVS = config.KVS{
config.KV{
Key: apiRequestsMax,
Value: "0",
},
config.KV{
Key: apiRequestsDeadline,
Value: "10s",
},
}
)
// Config storage class configuration
type Config struct {
APIRequestsMax int `json:"requests_max"`
APIRequestsDeadline time.Duration `json:"requests_deadline"`
}
// UnmarshalJSON - Validate SS and RRS parity when unmarshalling JSON.
func (sCfg *Config) UnmarshalJSON(data []byte) error {
type Alias Config
aux := &struct {
*Alias
}{
Alias: (*Alias)(sCfg),
}
return json.Unmarshal(data, &aux)
}
// LookupConfig - lookup api config and override with valid environment settings if any.
func LookupConfig(kvs config.KVS) (cfg Config, err error) {
if err = config.CheckValidKeys(config.APISubSys, kvs, DefaultKVS); err != nil {
return cfg, err
}
// Check environment variables parameters
requestsMax, err := strconv.Atoi(env.Get(config.EnvAPIRequestsMax, kvs.Get(apiRequestsMax)))
if err != nil {
return cfg, err
}
if requestsMax < 0 {
return cfg, errors.New("invalid API max requests value")
}
requestsDeadline, err := time.ParseDuration(env.Get(config.EnvAPIRequestsDeadline, kvs.Get(apiRequestsDeadline)))
if err != nil {
return cfg, err
}
cfg = Config{
APIRequestsMax: requestsMax,
APIRequestsDeadline: requestsDeadline,
}
return cfg, nil
}

37
cmd/config/api/help.go Normal file
View File

@@ -0,0 +1,37 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package api
import "github.com/minio/minio/cmd/config"
// Help template for storageclass feature.
var (
Help = config.HelpKVS{
config.HelpKV{
Key: apiRequestsMax,
Description: `set the maximum number of concurrent requests, e.g. "1600"`,
Optional: true,
Type: "number",
},
config.HelpKV{
Key: apiRequestsDeadline,
Description: `set the deadline for API requests waiting to be processed e.g. "1m"`,
Optional: true,
Type: "duration",
},
}
)

View File

@@ -28,12 +28,15 @@ import (
// Config represents cache config settings
type Config struct {
Enabled bool `json:"-"`
Drives []string `json:"drives"`
Expiry int `json:"expiry"`
MaxUse int `json:"maxuse"`
Quota int `json:"quota"`
Exclude []string `json:"exclude"`
Enabled bool `json:"-"`
Drives []string `json:"drives"`
Expiry int `json:"expiry"`
MaxUse int `json:"maxuse"`
Quota int `json:"quota"`
Exclude []string `json:"exclude"`
After int `json:"after"`
WatermarkLow int `json:"watermark_low"`
WatermarkHigh int `json:"watermark_high"`
}
// UnmarshalJSON - implements JSON unmarshal interface for unmarshalling
@@ -60,7 +63,18 @@ func (cfg *Config) UnmarshalJSON(data []byte) (err error) {
if _cfg.Quota < 0 {
return errors.New("config quota value should not be null or negative")
}
if _cfg.After < 0 {
return errors.New("cache after value should not be less than 0")
}
if _cfg.WatermarkLow < 0 || _cfg.WatermarkLow > 100 {
return errors.New("config low watermark value should be between 0 and 100")
}
if _cfg.WatermarkHigh < 0 || _cfg.WatermarkHigh > 100 {
return errors.New("config high watermark value should be between 0 and 100")
}
if _cfg.WatermarkLow > 0 && (_cfg.WatermarkLow >= _cfg.WatermarkHigh) {
return errors.New("config low watermark value should be less than high watermark")
}
return nil
}

View File

@@ -50,5 +50,23 @@ var (
Optional: true,
Type: "sentence",
},
config.HelpKV{
Key: After,
Description: `minimum accesses before caching an object`,
Optional: true,
Type: "number",
},
config.HelpKV{
Key: WatermarkLow,
Description: `% of cache use at which to stop cache eviction`,
Optional: true,
Type: "number",
},
config.HelpKV{
Key: WatermarkHigh,
Description: `% of cache use at which to start cache eviction`,
Optional: true,
Type: "number",
},
}
)

View File

@@ -26,21 +26,31 @@ import (
// Cache ENVs
const (
Drives = "drives"
Exclude = "exclude"
Expiry = "expiry"
MaxUse = "maxuse"
Quota = "quota"
Drives = "drives"
Exclude = "exclude"
Expiry = "expiry"
MaxUse = "maxuse"
Quota = "quota"
After = "after"
WatermarkLow = "watermark_low"
WatermarkHigh = "watermark_high"
EnvCacheDrives = "MINIO_CACHE_DRIVES"
EnvCacheExclude = "MINIO_CACHE_EXCLUDE"
EnvCacheExpiry = "MINIO_CACHE_EXPIRY"
EnvCacheMaxUse = "MINIO_CACHE_MAXUSE"
EnvCacheQuota = "MINIO_CACHE_QUOTA"
EnvCacheAfter = "MINIO_CACHE_AFTER"
EnvCacheWatermarkLow = "MINIO_CACHE_WATERMARK_LOW"
EnvCacheWatermarkHigh = "MINIO_CACHE_WATERMARK_HIGH"
EnvCacheDrives = "MINIO_CACHE_DRIVES"
EnvCacheExclude = "MINIO_CACHE_EXCLUDE"
EnvCacheExpiry = "MINIO_CACHE_EXPIRY"
EnvCacheMaxUse = "MINIO_CACHE_MAXUSE"
EnvCacheQuota = "MINIO_CACHE_QUOTA"
EnvCacheEncryptionMasterKey = "MINIO_CACHE_ENCRYPTION_MASTER_KEY"
DefaultExpiry = "90"
DefaultQuota = "80"
DefaultExpiry = "90"
DefaultQuota = "80"
DefaultAfter = "0"
DefaultWaterMarkLow = "70"
DefaultWaterMarkHigh = "80"
)
// DefaultKVS - default KV settings for caching.
@@ -62,6 +72,18 @@ var (
Key: Quota,
Value: DefaultQuota,
},
config.KV{
Key: After,
Value: DefaultAfter,
},
config.KV{
Key: WatermarkLow,
Value: DefaultWaterMarkLow,
},
config.KV{
Key: WatermarkHigh,
Value: DefaultWaterMarkHigh,
},
}
)
@@ -79,7 +101,6 @@ func Enabled(kvs config.KVS) bool {
// variables and merge them with provided CacheConfiguration.
func LookupConfig(kvs config.KVS) (Config, error) {
cfg := Config{}
if err := config.CheckValidKeys(config.CacheSubSys, kvs, DefaultKVS); err != nil {
return cfg, err
}
@@ -121,9 +142,7 @@ func LookupConfig(kvs config.KVS) (Config, error) {
return cfg, config.ErrInvalidCacheQuota(err)
}
cfg.Quota = cfg.MaxUse
}
if quotaStr := env.Get(EnvCacheQuota, kvs.Get(Quota)); quotaStr != "" {
} else if quotaStr := env.Get(EnvCacheQuota, kvs.Get(Quota)); quotaStr != "" {
cfg.Quota, err = strconv.Atoi(quotaStr)
if err != nil {
return cfg, config.ErrInvalidCacheQuota(err)
@@ -136,5 +155,45 @@ func LookupConfig(kvs config.KVS) (Config, error) {
cfg.MaxUse = cfg.Quota
}
if afterStr := env.Get(EnvCacheAfter, kvs.Get(After)); afterStr != "" {
cfg.After, err = strconv.Atoi(afterStr)
if err != nil {
return cfg, config.ErrInvalidCacheAfter(err)
}
// after should be a valid value >= 0.
if cfg.After < 0 {
err := errors.New("cache after value cannot be less than 0")
return cfg, config.ErrInvalidCacheAfter(err)
}
}
if lowWMStr := env.Get(EnvCacheWatermarkLow, kvs.Get(WatermarkLow)); lowWMStr != "" {
cfg.WatermarkLow, err = strconv.Atoi(lowWMStr)
if err != nil {
return cfg, config.ErrInvalidCacheWatermarkLow(err)
}
// WatermarkLow should be a valid percentage.
if cfg.WatermarkLow < 0 || cfg.WatermarkLow > 100 {
err := errors.New("config min watermark value should be between 0 and 100")
return cfg, config.ErrInvalidCacheWatermarkLow(err)
}
}
if highWMStr := env.Get(EnvCacheWatermarkHigh, kvs.Get(WatermarkHigh)); highWMStr != "" {
cfg.WatermarkHigh, err = strconv.Atoi(highWMStr)
if err != nil {
return cfg, config.ErrInvalidCacheWatermarkHigh(err)
}
// MaxWatermark should be a valid percentage.
if cfg.WatermarkHigh < 0 || cfg.WatermarkHigh > 100 {
err := errors.New("config high watermark value should be between 0 and 100")
return cfg, config.ErrInvalidCacheWatermarkHigh(err)
}
}
if cfg.WatermarkLow > cfg.WatermarkHigh {
err := errors.New("config high watermark value should be greater than low watermark value")
return cfg, config.ErrInvalidCacheWatermarkHigh(err)
}
return cfg, nil
}

92
cmd/config/certsinfo.go Normal file
View File

@@ -0,0 +1,92 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package config
import (
"crypto/x509"
"crypto/x509/pkix"
"fmt"
"net/http"
"strings"
color "github.com/minio/minio/pkg/color"
)
// Extra ASN1 OIDs that we may need to handle
var (
oidEmailAddress = []int{1, 2, 840, 113549, 1, 9, 1}
)
// printName prints the fields of a distinguished name, which include such
// things as its common name and locality.
func printName(names []pkix.AttributeTypeAndValue, buf *strings.Builder) []string {
values := []string{}
for _, name := range names {
oid := name.Type
if len(oid) == 4 && oid[0] == 2 && oid[1] == 5 && oid[2] == 4 {
switch oid[3] {
case 3:
values = append(values, fmt.Sprintf("CN=%s", name.Value))
case 6:
values = append(values, fmt.Sprintf("C=%s", name.Value))
case 8:
values = append(values, fmt.Sprintf("ST=%s", name.Value))
case 10:
values = append(values, fmt.Sprintf("O=%s", name.Value))
case 11:
values = append(values, fmt.Sprintf("OU=%s", name.Value))
default:
values = append(values, fmt.Sprintf("UnknownOID=%s", name.Type.String()))
}
} else if oid.Equal(oidEmailAddress) {
values = append(values, fmt.Sprintf("emailAddress=%s", name.Value))
} else {
values = append(values, fmt.Sprintf("UnknownOID=%s", name.Type.String()))
}
}
if len(values) > 0 {
buf.WriteString(values[0])
for i := 1; i < len(values); i++ {
buf.WriteString(", " + values[i])
}
buf.WriteString("\n")
}
return values
}
// CertificateText returns a human-readable string representation
// of the certificate cert. The format is similar to the OpenSSL
// way of printing certificates (not identical).
func CertificateText(cert *x509.Certificate) string {
var buf strings.Builder
buf.WriteString(color.Blue("\nCertificate:\n"))
if cert.SignatureAlgorithm != x509.UnknownSignatureAlgorithm {
buf.WriteString(color.Blue("%4sSignature Algorithm: ", "") + color.Bold(fmt.Sprintf("%s\n", cert.SignatureAlgorithm)))
}
// Issuer information
buf.WriteString(color.Blue("%4sIssuer: ", ""))
printName(cert.Issuer.Names, &buf)
// Validity information
buf.WriteString(color.Blue("%4sValidity\n", ""))
buf.WriteString(color.Bold(fmt.Sprintf("%8sNot Before: %s\n", "", cert.NotBefore.Format(http.TimeFormat))))
buf.WriteString(color.Bold(fmt.Sprintf("%8sNot After : %s\n", "", cert.NotAfter.Format(http.TimeFormat))))
return buf.String()
}

View File

@@ -24,7 +24,7 @@ import (
"regexp"
"strings"
"github.com/minio/minio-go/pkg/set"
"github.com/minio/minio-go/v6/pkg/set"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/env"
"github.com/minio/minio/pkg/madmin"
@@ -70,6 +70,7 @@ const (
RegionSubSys = "region"
EtcdSubSys = "etcd"
StorageClassSubSys = "storage_class"
APISubSys = "api"
CompressionSubSys = "compression"
KmsVaultSubSys = "kms_vault"
KmsKesSubSys = "kms_kes"
@@ -101,6 +102,7 @@ var SubSystems = set.CreateStringSet([]string{
RegionSubSys,
EtcdSubSys,
CacheSubSys,
APISubSys,
StorageClassSubSys,
CompressionSubSys,
KmsVaultSubSys,
@@ -128,6 +130,7 @@ var SubSystemsSingleTargets = set.CreateStringSet([]string{
RegionSubSys,
EtcdSubSys,
CacheSubSys,
APISubSys,
StorageClassSubSys,
CompressionSubSys,
KmsVaultSubSys,
@@ -196,6 +199,23 @@ func (kvs KVS) Empty() bool {
return len(kvs) == 0
}
// Keys returns the list of keys for the current KVS
func (kvs KVS) Keys() []string {
var keys = make([]string, len(kvs))
var foundComment bool
for i := range kvs {
if kvs[i].Key == madmin.CommentKey {
foundComment = true
}
keys[i] = kvs[i].Key
}
// Comment KV not found, add it explicitly.
if !foundComment {
keys = append(keys, madmin.CommentKey)
}
return keys
}
func (kvs KVS) String() string {
var s strings.Builder
for _, kv := range kvs {
@@ -581,9 +601,20 @@ func (c Config) SetKVS(s string, defaultKVS map[string]KVS) error {
return Errorf("sub-system '%s' only supports single target", subSystemValue[0])
}
tgt := Default
subSys := subSystemValue[0]
if len(subSystemValue) == 2 {
tgt = subSystemValue[1]
}
fields := madmin.KvFields(inputs[1], defaultKVS[subSys].Keys())
if len(fields) == 0 {
return Errorf("sub-system '%s' cannot have empty keys", subSys)
}
var kvs = KVS{}
var prevK string
for _, v := range strings.Fields(inputs[1]) {
for _, v := range fields {
kv := strings.SplitN(v, KvSeparator, 2)
if len(kv) == 0 {
continue
@@ -604,12 +635,6 @@ func (c Config) SetKVS(s string, defaultKVS map[string]KVS) error {
return Errorf("key '%s', cannot have empty value", kv[0])
}
tgt := Default
subSys := subSystemValue[0]
if len(subSystemValue) == 2 {
tgt = subSystemValue[1]
}
_, ok := kvs.Lookup(Enable)
// Check if state is required
_, enableRequired := defaultKVS[subSys].Lookup(Enable)
@@ -621,6 +646,12 @@ func (c Config) SetKVS(s string, defaultKVS map[string]KVS) error {
currKVS, ok := c[subSys][tgt]
if !ok {
currKVS = defaultKVS[subSys]
} else {
for _, kv := range defaultKVS[subSys] {
if _, ok = currKVS.Lookup(kv.Key); !ok {
currKVS.Set(kv.Key, kv.Value)
}
}
}
for _, kv := range kvs {

Some files were not shown because too many files have changed in this diff Show More