Compare commits

..

400 Commits

Author SHA1 Message Date
Anis Elleuch
b05825ffe8 s3: Fix precondition failed in CopyObjectPart when src is encrypted (#7276)
CopyObject precondition checks into GetObjectReader
in order to perform SSE-C pre-condition checks using the
last 32 bytes of encrypted ETag rather than the decrypted
ETag

This also necessitates moving precondition checks for
gateways to gateway layer rather than object handler check
2019-03-06 12:38:41 -08:00
kannappanr
39ddb78c75 CORS: Expose all headers on response (#7331)
Fixes #7289
2019-03-06 11:58:53 -08:00
Harshavardhana
12eb71828b Fix posix tests for SimpleCI (#7328) 2019-03-05 19:53:01 -08:00
Praveen raj Mani
c0a1369b73 Construct dynamic XML error responses for postpolicyform validation (#7321)
Fixes #7314
2019-03-05 12:10:47 -08:00
kannappanr
c57159a0fe fs mode: List already existing buckets with capital letters (#7244)
if a bucket with `Captialized letters` is created, `InvalidBucketName` error
will be returned. 
In the case of pre-existing buckets, it will be listed.

Fixes #6938
2019-03-05 10:42:32 -08:00
Kale Blankenship
ef132c5714 Replace snappy.Writer/io.Pipe with snappyCompressReader. (#7316)
Prevents deferred close functions from being called while still
  attempting to copy reader to snappyWriter.
 Reduces code duplication when compressing objects.
2019-03-05 08:35:37 -08:00
Aditya Manthramurthy
c54b0c0ca1 Fix a race in tests (#7326) 2019-03-05 21:34:17 +05:30
Aditya Manthramurthy
e8e9cd3e74 Close GlobalServiceDoneCh when quitting (#7322)
This change allows indefinitely running go-routines to cleanup
gracefully.

This channel is now closed at the beginning of each test so that
long-running go-routines quit and a new one is assigned.
2019-03-04 14:33:14 -08:00
Sidhartha Mani
f7611bcd48 update simple-ci scripts (#7309) 2019-03-03 21:59:14 +05:30
Aditya Manthramurthy
19c10cb4d0 Update vendored sjson (includes a bug fix) (#7317) 2019-03-01 15:04:38 -08:00
poornas
6b4c6f69af Remove minio-go validator from vendor dir (#7312) 2019-02-28 12:16:26 -08:00
poornas
2564147ab4 Filter Expires header from user metadata (#7269)
Instead save it as a struct field in ObjectInfo as it is
a standard HTTP header - Fixes minio/mc#2690
2019-02-28 11:01:25 -08:00
Harshavardhana
c3ca954684 Implement AssumeRole API for Minio users (#7267)
For actual API reference read here

https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html

Documentation is added and updated as well at docs/sts/assume-role.md

Fixes #6381
2019-02-27 17:46:55 -08:00
Harshavardhana
ce588d1489 Improve ListObjects performance by listing in parallel (#7270)
The side affect of this change memory
increase, but this is a trade-off between
performance and actual memory usage.

For all practical scenarios this should be
an adequate change.
2019-02-27 14:39:22 -08:00
Sidhartha Mani
b983da957d run gateway mint test in full mode (#7296) 2019-02-27 10:03:23 -08:00
Harshavardhana
5e69a107d8 Support detecting mountpoints correctly (#7288)
Currently windows support was relying on Symlink as
a way to detect a drive, this doesn't work in latest
Windows 2016, fix this to use a proper mechanism by
using win32 APIs.

Additionally also add support for detecting bind mounts
on Linux.
2019-02-26 18:04:53 -08:00
Dee Koder
5085bef397 Share button icon is changed (#7300) 2019-02-26 16:34:29 -08:00
poornas
8022a6efd9 Return ETag for 0-byte object prefixes (#7291)
Fixes: #7290
2019-02-26 15:09:14 -08:00
Minio Trusted
d3125857a8 Update yaml files to latest version RELEASE.2019-02-26T19-51-46Z 2019-02-26 19:58:09 +00:00
Praveen raj Mani
78d116c487 Event persistence for MQTT (#7268)
- The events will be persisted in queueStore if `queueDir` is set.
- Else, if queueDir is not set events persist in memory.

The events are replayed back when the mqtt broker is back online.
2019-02-25 18:01:13 -08:00
dcharbonnier
2fc341394d fix json syntax (#7285) 2019-02-25 13:02:59 +05:30
Anis Elleuch
6584c7ea2b s3: Encode StartAfter when encoding type is passed (#7281)
In ObjectList V2, StartAfter needs to be encoded when encoding-type
is specified.
2019-02-24 18:50:28 -08:00
Harshavardhana
2520e535a0 Allow lazyQuotes for certain types of CSV (#7278)
Set lazyQuotes to true, to allow a quote to appear
in an unquote field and a non-doubled quote may
appear in a quoted field.
2019-02-24 06:51:02 -08:00
Anis Elleuch
5efbe8a1b3 s3: Add support of encodingType parameter (#7265)
This commit honors encoding-type parameter in object listing,
parts listing and multipart uploads listing.
2019-02-24 11:44:24 +05:30
Nitish Tiwari
dab314900d Fix healthcheck script to wait for netstat command output (#7275)
Fixes #7272
2019-02-23 09:32:53 -08:00
Harshavardhana
7923b83953 Support multiple-domains in MINIO_DOMAIN (#7274)
Fixes #7173
2019-02-23 08:48:01 +05:30
Aditya Manthramurthy
80a351633f Update vendorized bcicen/jstream (#7257)
- Includes an error handling fix that is waiting to be merged upstream
- Uses order-preserving (un)marshalling for JSON objects.
2019-02-20 23:59:23 -08:00
Harshavardhana
bedcb7442a Write xml.Header first instead of spaces to handle XML parsers (#7253)
Clients like AWS SDK Java and AWS cli XML parsers are
unable to handle on `\r\n` characters to avoid these
errors send XML header first and write white space characters
instead.

Also handle cases to avoid double WriteHeader calls
2019-02-21 11:50:15 +05:30
Harshavardhana
2232b0b55f Fix mac build failure for healthcheck binary (#7263) 2019-02-21 08:11:16 +05:30
Harshavardhana
91576d416d Fix GetLocalPeer usage in perf handlers (#7249)
GetLocalPeer usage should be fixed and used only
once per call for not all local endpoints.
2019-02-20 16:04:55 -08:00
Minio Trusted
3aabe45fd9 Update yaml files to latest version RELEASE.2019-02-20T22-44-29Z 2019-02-20 22:49:40 +00:00
Krishna Srinivas
6dd26b8231 Detect change in underlying mounted disks (#7229) 2019-02-20 13:32:29 -08:00
poornas
e098852a80 Revert PR #7241 to fix vault renewal (#7259)
- Current implementation was spawning renewer goroutines
without waiting for the lease duration to end. Remove vault renewer
and call vault.RenewToken directly and manage reauthentication if
lease expired.
2019-02-20 12:23:59 -08:00
Nitish Tiwari
1e82c4a7c4 Implement Docker healthcheck script in Go (#7105)
Go script makes it easy to read/maintain. Also updated the timeout
in Dockerfiles from 5s to default 30s and test interval to 1m

Higher timeout makes sense as server may sometimes respond slowly
if under high load as reported in #6974

Fixes #6974
2019-02-20 21:42:03 +05:30
Krishna Srinivas
ce960565b1 Validate and reject unusual requests (#7258) 2019-02-19 21:02:41 -08:00
poornas
755e675d5c Fix: send decrypted size to notification event (#7248) 2019-02-19 14:14:26 +05:30
Harshavardhana
b6c00405ec Do not pro-actively return false in isObjectDir() (#7246)
We should change the logic for both isObject()
and isObjectDir() leaf detection to be done
with quorum, due to how our directory navigation
works - this allows for properly deleting all
the dangling directories or objects if any.
2019-02-15 16:21:19 -08:00
Harshavardhana
8f62935448 Add proper requestID for STS errors (#7245) 2019-02-14 17:54:33 -08:00
Harshavardhana
396d78352d Support HTTP/2.0 (#7204)
Fixes #6704
2019-02-14 17:53:46 -08:00
Aditya Manthramurthy
8a405cab2f COUNT() function in select should return an int (#7243) 2019-02-13 16:32:59 -08:00
Minio Trusted
6d778a883f Update yaml files to latest version RELEASE.2019-02-14T00-21-45Z 2019-02-14 00:27:52 +00:00
Harshavardhana
a51781e5cf Use context to fill in more details about error XML (#7232) 2019-02-13 16:07:21 -08:00
Krishna Srinivas
90213ff1b2 Detect peer reboots to invalidate current storage REST clients (#7227) 2019-02-13 15:29:46 -08:00
Andreas Auernhammer
6f764a8efd crypto: fix nil pointer dereference of vault secret (#7241)
This commit fixes a nil pointer dereference issue
that can occur when the Vault KMS returns e.g. a 404
with an empty HTTP response. The Vault client SDK
does not treat that as error and returns nil for
the error and the secret.

Further it simplifies the token renewal and
re-authentication mechanism by using a single
background go-routine.

The control-flow of Vault authentications looks
like this:
1. `authenticate()`: Initial login and start of background job
2. Background job starts a `vault.Renewer` to renew the token
3. a) If this succeeds the token gets updated
   b) If this fails the background job tries to login again
4. If the login in 3b. succeeded goto 2. If it fails
   goto 3b.
2019-02-13 15:25:32 -08:00
Harshavardhana
df35d7db9d Introduce staticcheck for stricter builds (#7035) 2019-02-13 18:29:36 +05:30
Harshavardhana
4ba77a916d Select should return early errors as XML (#7230)
Currently, we were sending errors in Select binary format,
which is incompatible with AWS S3 behavior, errors in binary
are  sent after HTTP status code is already 200 OK - i.e it
happens during the evaluation of the record reader.
2019-02-13 13:18:11 +05:30
Anis Elleuch
f9fecf0e76 storage: Increase the timeout of storage REST requests (#7218)
This commit increases storage REST requests to 5 minutes, this includes
the opening TCP connection, and sending/receiving data. This will reduce
clients receiving errors when the server is under high load.
2019-02-12 23:27:33 -08:00
Minio Trusted
9f9e0fe085 Update yaml files to latest version RELEASE.2019-02-12T21-58-47Z 2019-02-12 22:07:04 +00:00
Aditya Manthramurthy
ee5b3622a5 Evaluate where clause in aggregation queries (#7235) 2019-02-12 13:54:26 -08:00
Krishna Srinivas
14544d8d84 Validate incoming requests (#7234) 2019-02-12 13:24:14 -08:00
Harshavardhana
118270d76f Vendor the upstream changes with Avx512 (#7225)
Thanks to @fwessels we have Avx512 support with 4x improvement
2019-02-12 17:32:27 +05:30
Harshavardhana
fef5416b3c Support unknown gateway errors and convert at handler layer (#7219)
Different gateway implementations due to different backend
API errors, might return different unsupported errors at
our handler layer. Current code posed a problem for us because
this information was lost and we would convert it to InternalError
in this situation all S3 clients end up retrying the request.

To avoid this unexpected situation implement a way to support
this cleanly such that the underlying information is not lost
which is returned by gateway.
2019-02-12 14:55:52 +05:30
Harshavardhana
9f87283cd5 Revert and bring back B2 gateway implementation (#7224)
This PR is simply a revert of 3265112d04
just for B2 gateway.
2019-02-12 12:44:22 +05:30
Harshavardhana
b8955fe577 Fix DummyHandlers to authorize and send/validate correct XMLs (#7223) 2019-02-11 17:58:26 -08:00
Nitish Tiwari
13c3b8afe2 Update Redis client used for bucket notifications (#7213)
Current Redis client used https://github.com/garyburd/redigo/
has been archived and further development is done at
https://github.com/gomodule/redigo

This commit updates the client library accordingly.

Fixes #6392
2019-02-11 19:06:27 +05:30
Harshavardhana
a8cd70f3e5 Remove GPL go-lzo dependency for parquet-go (#7220)
Also remove any other unused dependencies
2019-02-11 14:57:24 +05:30
Harshavardhana
082f777281 Revamp bucket metadata healing (#7208)
Bucket metadata healing in the current code was executed multiple
times each time for a given set. Bucket metadata just like
objects are hashed in accordance with its name on any given set,
to allow hashing to play a role we should let the top level
code decide where to navigate.

Current code also had 3 bucket metadata files hardcoded, whereas
we should make it generic by listing and navigating the .minio.sys
to heal such objects.

We also had another bug where due to isObjectDangling changes
without pre-existing bucket metadata files, we were erroneously
reporting it as grey/corrupted objects.

This PR fixes all of the above items.
2019-02-11 09:23:13 +05:30
John Liu
9600e2b35e Comment Typo: Changed 'jason' to 'json` (#7216) 2019-02-10 05:49:00 -08:00
Prashant Shahi
f75f707ff4 Added documentation for MINIO_HTTP_TRACE (#7207) 2019-02-10 11:57:43 +05:30
poornas
40b8d11209 Move metadata into ObjectOptions for NewMultipart and PutObject (#7060) 2019-02-09 11:01:06 +05:30
Sidhartha Mani
c1b3f1994b remove unnecessary buffer while discarding stream (#7214) 2019-02-08 19:29:51 -08:00
Adam
18c4ecbbef Fix mc event add event flag (#7210) 2019-02-08 16:18:34 -08:00
ebozduman
dd52e5ebe9 Implements dummy tagging handlers for Terraform (#7040) 2019-02-08 16:18:13 -08:00
Praveen raj Mani
8af1f0cc7b Improved error message for user and access key conflict (#7190) 2019-02-07 17:25:58 -08:00
Harshavardhana
85e939636f Fix JSON parser handling for certain objects (#7162)
This PR also adds some comments and simplifies
the code. Primary handling is done to ensure
that we make sure to honor cached buffer.

Added unit tests as well

Fixes #7141
2019-02-07 08:04:42 +05:30
poornas
d203e7e1cc azure gateway: return MD5Sum as ETag for S3 API compatibility (#6884)
Fixes #6872.

This PR refactors multipart upload implementation to use a per
part metadata file which is cleaned up at the end of the upload
2019-02-06 16:58:43 -08:00
Aditya Manthramurthy
4aa9ee153b Fix S3 Select request XML parsing (#7202) 2019-02-06 13:25:52 -08:00
Minio Trusted
5fb813a5cc Update yaml files to latest version RELEASE.2019-02-06T21-16-36Z 2019-02-06 21:24:08 +00:00
Harshavardhana
817269475f Make sure to drain body upon an error (#7197)
Also cleanup redundant code and use it at a common place
2019-02-06 12:07:03 -08:00
Krishna Srinivas
2d168b532b Allow format.json healing on dev/test setup (single node XL, all root disks) (#7170) 2019-02-06 11:44:19 -08:00
Aditya Manthramurthy
fd4e15c116 Flush the records staging buffer periodically (#7193)
- Staging buffer is flushed every 500ms. In cases where the result
  records are slowly generated (e.g. when a where condition
  matches very few records), this change causes the server to send
  results even though the staging buffer is not full.

- Refactor messageWriter code to use simpler channel based
  co-ordination instead of atomic variables.
2019-02-06 16:03:05 +05:30
Krishna Srinivas
3dfbe0f68c Send white spaces to client till completeMultipart() process completes (#7198) 2019-02-05 20:58:09 -08:00
Harshavardhana
30135eed86 Redo how to handle stale dangling files (#7171)
foo.CORRUPTED should never be created because when
multiple sets are involved we would hash the file
to wrong a location, this PR removes the code.

But allows DeleteBucket() to work properly to delete
dangling buckets/objects. Also adds another option
to Healing where a user needs to specify `--remove`
such that all dangling objects will be deleted with
user confirmation.
2019-02-05 17:58:48 -08:00
Harshavardhana
e4081aee62 Added support for reading body in STS API (#7188)
STS API supports both URL query params and reading
from a body.
2019-02-05 15:47:11 -08:00
kannappanr
df418a2783 Create Cors handler with permissive configuration (#7186)
Create new Cors handler allowing all origins with all standard
methods with any header and credentials.

Fixes #7181
2019-02-05 14:06:52 -08:00
kannappanr
9a65f6dc97 Remove duplicate code in object-handlers.go (#7176)
removed duplicate code in CompleteMultipartUploadHandler
and CopyObjectPartHandler.
2019-02-05 13:36:38 -08:00
Aditya Manthramurthy
f04f8bbc78 Add support for Timestamp data type in SQL Select (#7185)
This change adds support for casting strings to Timestamp via CAST:
`CAST('2010T' AS TIMESTAMP)`

It also implements the following date-time functions:
  - UTCNOW()
  - DATE_ADD()
  - DATE_DIFF()
  - EXTRACT()

For values passed to these functions, date-types are automatically
inferred.
2019-02-04 20:54:45 -08:00
Harshavardhana
ea6d61ab1f Use loadCachedConfigs appropriately to load ENVs (#7187) 2019-02-04 10:31:11 +05:30
Krishna Srinivas
6f08edfb36 Use O_EXCL when creating file as we never overwrite an existing file (#7189) 2019-02-01 19:01:06 -08:00
Sidhartha Mani
e9fdea05c6 Enable CI control from repository: Add Dockerfile.simpleci (#7122) 2019-02-01 12:04:28 -08:00
Harshavardhana
e005910051 Add more information in our select docs (#7177) 2019-02-01 11:34:56 -08:00
Anis Elleuch
de2c106386 xl: ListObjectParts uses the latest valid xl meta (#7184)
ListObjectParts is using xl.readXLMetaParts which picks the first
xl meta found in any disk, which is an inconsistent information.

E.g.: In a middle of a multipart upload, one node can go offline
and get back later with an outdated multipart information.
2019-02-01 08:58:41 -08:00
Harshavardhana
32a6dd1dd6 Remove sporadic tests which fail on windows (#7178) 2019-01-31 16:48:47 -08:00
Harshavardhana
432aec73d9 Return proper errors for invalid bodies (#7179) 2019-01-31 07:19:09 -08:00
Anis Elleuch
36dae04671 CopyObjectPart: remove duplicated etag decryption (#7174) 2019-01-30 19:33:31 -08:00
Minio Trusted
9dc9f03c02 Update yaml files to latest version RELEASE.2019-01-31T00-31-19Z 2019-01-31 00:37:43 +00:00
Krishna Srinivas
b18c0478e7 Only heal on disks where we are sure that healing is needed (#7148) 2019-01-30 10:53:57 -08:00
Anis Elleuch
2d9860e875 heal: Fix healing empty directories (#7154)
This commit fixes the computation of Before/After healing state
for empty directories.

Issues before the commit:
- Before state doesn't reflect the real status (no StatVol() called)
- For any MakeVol() error, healObjectDir is exited directly, which is
  wrong.
2019-01-30 10:51:56 -08:00
kannappanr
d3553f8dfc Bucket Heal: Do not add empty endpoint entry (#7172)
Currently during a heal of a bucket, if one disk is offline an empty endpoint entry is added.
Then another entry with the missing endpoint is also added.

This results in more entries than disks being added.

Code that adds empty endpoint has been removed.
2019-01-30 10:40:43 -08:00
Harshavardhana
e1ae90c12b Make sure to pass the right username for correct ConditionValues (#7169)
Without passing proper username value would result in AccessDenied
errors when policies with `{aws:username}` substitutions are used.

Fixes #7165
2019-01-30 14:21:09 +05:30
Sidhartha Mani
34e7259f95 Add Historic CPU and memory stats (#7136)
Collect historic cpu and mem stats.  Also, use actual values 
instead of formatted strings while returning to the client. The string 
formatting prevents values from being processed by the server or 
by the client without parsing it. 

This change will allow the values to be processed (eg. 
compute rolling-average over the lifetime of the minio server)
and offloads the formatting to the client.
2019-01-30 12:47:32 +05:30
poornas
d0015b4d66 update kms docs example to set a longer period for token renewal (#7149) 2019-01-29 08:04:07 -08:00
poornas
3467460456 Fix vault client to autorenew or reauthenticate (#7161)
Switch to Vault API's Renewer for token renewal.If
token can no longer be renewed, reauthenticate to
get a fresh token.
2019-01-29 16:57:23 +05:30
Harshavardhana
64b5701971 Support AWS envs creds for non-aws endpoints in S3 gateway (#7156)
We made a change previously in #7111 which moved support
for AWS envs only for AWS S3 endpoint. Some users requested
that this be added back to Non-AWS endpoints as well as
they require separate credentials for backend authentication
from security point of view.
2019-01-29 16:05:20 +05:30
Praveen raj Mani
fad59da29d clientID removed in the MQTT config (#7157)
More than one client can't use the same clientID for MQTT connection. 
This causes problem in distributed deployments where config is shared 
across nodes, as each Minio instance tries to connect to MQTT using the
same clientID.

This commit removes the clientID field in config, and allows
MQTT client to create random clientID for each node.
2019-01-29 15:00:15 +05:30
Aditya Manthramurthy
91c839ad28 Use a buffer to collect SQL Select result rows (#7158)
Batching records into a single SQL Select message in the response
leads to significant speed up as the message header overhead is made
negligible.

This change leads to a speed up of 3-5x for queries that select many
small records.
2019-01-28 20:00:18 -08:00
Aditya Manthramurthy
2786055df4 Add new SQL parser to support S3 Select syntax (#7102)
- New parser written from scratch, allows easier and complete parsing
  of the full S3 Select SQL syntax. Parser definition is directly
  provided by the AST defined for the SQL grammar.

- Bring support to parse and interpret SQL involving JSON path
  expressions; evaluation of JSON path expressions will be
  subsequently added.

- Bring automatic type inference and conversion for untyped
  values (e.g. CSV data).
2019-01-28 17:59:48 -08:00
Harshavardhana
0a28c28a8c Avoid code which looks at local files when etcd is configured (#7144)
This situation happens only in gateway nas which supports
etcd based `config.json` to support all FS mode features.

The issue was we would try to migrate something which doesn't
exist when etcd is configured which leads to inconsistent
server configs in memory.

This PR fixes this situation by properly loading config after
initialization, avoiding backend disk config migration to be
done only if etcd is not configured.
2019-01-28 13:31:35 -08:00
Harshavardhana
526546d588 Remove '.minio.sys/tmp' files in background (#7124)
If it does happen that we have a lot files in '.minio.sys/tmp',
minio startup might block deleting this folder. Rename and
delete in background instead to allow Minio to start serving
requests.
2019-01-25 13:33:28 -08:00
Aditya Manthramurthy
2053b3414f Reduce heal parallelism (#7155)
To avoid a large number of concurrent connections between minio
servers and to reduce CPU pressure, it is better to limit the number
of objects healed in parallel to number_of_CPUs.
2019-01-25 13:11:17 -08:00
kannappanr
ce870466ff Top Locks command implementation (#7052)
API to list locks used in distributed XL mode
2019-01-24 07:22:14 -08:00
Harshavardhana
964e354d06 Fix liveness check for NAS gateway (#7142)
Current master throws '503' unavailable for liveness check
```
~ curl -v http://localhost:9000/minio/health/live
> GET /minio/health/live HTTP/1.1
...
...
< HTTP/1.1 503 Service Unavailable
```

With this fix liveness check returns error appropriately
```
~ curl -v http://localhost:9000/minio/health/live
> GET /minio/health/live HTTP/1.1
...
...
< HTTP/1.1 200 OK
```
2019-01-24 19:14:05 +05:30
kannappanr
8ee8ad777c logger: do not interpret encoded url as format string (#7110)
Error logger currently interprets encoded url in the error as a format string.
2019-01-24 00:30:00 -08:00
Krishna Srinivas
82af0be1aa Healing process should not heal root disk (#7089) 2019-01-23 15:29:29 -08:00
Minio Trusted
e8c18bc145 Update yaml files to latest version RELEASE.2019-01-23T23-18-58Z 2019-01-23 23:25:02 +00:00
Harshavardhana
bd25f31100 Use IAM creds only if endpoint is S3 (#7111)
Requirements like being able to run minio gateway in ec2
pointing to a Minio deployment wouldn't work properly
because IAM creds take precendence on ec2.

Add checks such that we only enable AWS specific features
if our backend URL points to actual AWS S3 not S3 compatible
endpoints.
2019-01-23 11:12:33 -08:00
Harshavardhana
ee7dcc2903 Handle errs returned with etcd properly for config init and migration (#7134)
Returning unexpected errors can cause problems for config handling,
which is what led gateway deployments with etcd to misbehave and
had stopped working properly
2019-01-23 11:10:59 -08:00
Harshavardhana
55ef51a99d Vendorize all recent changes to minio-go (#7135)
- Default support for S3 dualstack endpoints (IPv6 support)
- Support granular policy conditionals in List operations
- Support proxy cookies for stickiness
2019-01-23 19:22:09 +05:30
Anis Elleuch
dc2348daa5 heal: Preserve deployment ID from reference format.json (#7126)
Deployment ID is not copied into new formats after healing format. Although,
this is not critical since a new deployment ID will be generated and set in the
next cluster restart, it is still much better if we don't change the deployment
id of a cluster for a better tracking.
2019-01-22 18:32:06 -08:00
Aditya Manthramurthy
042d7f25e4 Fix regexp matcher of browser assets and paths (#7083)
Fix regexp matcher for special assets for the browser to clash with
less of the object namespace.

Assets should now be loaded with the /minio/ prefix. Previously,
favicon.ico (and others) could be loaded at any path matching
/minio/*/favicon.ico. This clashes with a large part of the object
namespace. With this change, /minio/favicon.ico will serve the favicon
but not /minio/mybucket/favicon.ico

Fixes #7077
2019-01-22 10:58:28 -08:00
Andreas Auernhammer
8c1b649b2d load system CAs before trying to load custom CAs (#7133)
This changes causes `getRootCAs` to always load system-wide CAs.
Any additional custom CAs (at `certs/CA/`) are added to the certificate pool
of system CAs.

The previous behavior was incorrect since all no system-wide CAs were
loaded if either there were CAs under `certs/CA` or the `certs/CA`
directory didn't exist at all.
2019-01-22 09:18:06 -08:00
Kumar Sukhani
f03ccec912 Fix slack Link (#7131) 2019-01-22 19:53:50 +05:30
Nitish Tiwari
0bb65f84bb Add example for IPv6 for address flag (#7127) 2019-01-22 15:55:27 +05:30
Harshavardhana
8e0910ab3e Fix build issues on BSDs in pkg/cpu (#7116)
Also add a cross compile script to test always cross
compilation for some well known platforms and architectures
, we support out of box compilation of these platforms even
if we don't make an official release build.

This script is to avoid regressions in this area when we
add platform dependent code.
2019-01-22 09:27:23 +05:30
Harshavardhana
5353edcc38 Support policy variable replacement (#7085)
This PR supports iam and bucket policies to have
policy variable replacements in resource and
condition key values.

For example
- ${aws:username}
- ${aws:userid}
2019-01-21 10:27:14 +05:30
Harshavardhana
3265112d04 Remove gateway implementations for manta, sia and b2 (#7115) 2019-01-20 08:10:58 -08:00
Harshavardhana
4fdacb8b14 Add policy conditions support for Listing operations on browser (#7106)
Fixes https://github.com/minio/minio/issues/7095
2019-01-20 12:50:01 +05:30
Krishna Srinivas
267f183fc8 Do not do StorageInfo() and ListBuckets() for FS/Erasure in health check handler (#7090)
Health checking programs very frequently use /minio/health/live 
to check health, hence we can avoid doing StorageInfo() and 
ListBuckets() for FS/Erasure backend.
2019-01-20 10:28:36 +05:30
Harshavardhana
3d22a9d84f Support rootCAs for notification targets (#7108)
Add support for RootCAs for notification targets
mqtt and webhook
2019-01-20 09:57:18 +05:30
Krishna Srinivas
51ec61ee94 Fix healing whole file bitrot (#7123)
* Use 0-byte file for bitrot verification of whole-file-bitrot files

Also pass the right checksum information for bitrot verification

* Copy xlMeta info from latest meta except []checksums and []Parts while healing
2019-01-20 07:58:40 +05:30
Harshavardhana
74c2048ea9 Add proper contexts with timeouts for etcd operations (#7097)
This fixes an issue of perceived hang when incorrect
unreachable URLs are specified in MINIO_ETCD_ENDPOINTS
variable.

Fixes #7096
2019-01-18 09:36:45 -08:00
Krishna Srinivas
730ac5381c Simplify parallelReader.Read() (#7109)
Simplify parallelReader.Read() which also fixes previous 
implementation where it was returning before all the parallel 
reading go-routines had terminated which caused race conditions.
2019-01-18 21:18:24 +05:30
Alex Simenduev
6dd8a83c5a change credential chain order in s3 gateway to mimic official docs (#7091) 2019-01-17 10:31:51 -08:00
Harshavardhana
1a7e6d4768 Handle multiple conditions in policies (#7079)
Fixes #7078
2019-01-17 10:28:24 -08:00
Krishna Srinivas
98c950aacd Streaming bitrot verification support (#7004) 2019-01-17 18:28:18 +05:30
Minio Trusted
94c52e3816 Update yaml files to latest version RELEASE.2019-01-16T21-44-08Z 2019-01-16 21:51:40 +00:00
Harshavardhana
8766c5eb22 Add version as part of Server: header (#7100)
This was agreed after discussing with @abperiasamy, we
borrowed the idea from Apache's own documentation.
2019-01-16 13:38:41 -08:00
kannappanr
e0d22359e7 Fix lint warnings (#7099) 2019-01-16 12:49:20 -08:00
Harshavardhana
6dd13e68c2 Support V2 signatures when autoencryption is enabled (#7084)
When auto-encryption is turned on, we pro-actively add SSEHeader
for all PUT, POST operations. This is unusual for V2 signature
calculation because V2 signature doesn't have a pre-defined set
of signed headers in the request like V4 signature. According to
V2 we should canonicalize all incoming supported HTTP headers.

Make sure to validate signatures before we mutate http headers
2019-01-16 12:12:06 -08:00
Harshavardhana
633001c8ba Inherit certsDir from configDir if latter is set (#7098)
This is to ensure backward compatibility for all existing
deployments which use custom config dir to point to their
certs directory.
2019-01-16 12:04:32 -08:00
Bala FA
e23a42305c Rebase minio/parquet-go and fix null handling. (#7067) 2019-01-16 21:52:04 +05:30
Krishna Srinivas
63d2583e91 Avoid holding write lock on config in situations where it is not needed (#7082)
This is to allow the cluster to come up when N/2 number of disks is available.
2019-01-16 13:59:21 +05:30
Harshavardhana
a2f66abbe8 Update STS API docs with Version query param (#7071) 2019-01-16 09:38:32 +05:30
Andreas Auernhammer
b28661b673 doc: add security documentation to provide some background info (#7028)
This commit adds some documentation about the design of the
SSE-C and SSE-S3 implementation. It describes how the Minio server
encrypt objects and manages keys.
2019-01-15 14:27:57 -08:00
Harshavardhana
e8791ae274 Remove Minio server arch, version from Server: header (#7074) 2019-01-15 13:16:11 +05:30
Scott Dunlop
309975d477 Add missing time import to counter_darwin.go (#7081) 2019-01-14 17:21:27 -08:00
Praveen raj Mani
6571641735 Persist offline mqtt events in the queueDir and replay (#7037) 2019-01-14 12:39:00 +05:30
Harshavardhana
8757c963ba Migrate all Peer communication to common Notification subsystem (#7031)
Deprecate the use of Admin Peers concept and migrate all peer
communication to Notification subsystem. This finally allows
for a common subsystem for all peer notification in case of
distributed server deployments.
2019-01-14 12:14:20 +05:30
Praveen raj Mani
9a71f2fdfa link to ppc64le binary added (#7065)
Fixes #7063
2019-01-11 20:16:19 +05:30
Nick Craig-Wood
9c26fe47b0 Fix server side copy of files with ? in - fixes #7058 (#7059)
Before this change the CopyObjectHandler and the CopyObjectPartHandler
both looked for a `versionId` parameter on the `X-Amz-Copy-Source` URL
for the version of the object to be copied on the URL unescaped version
of the header.  This meant that files that had question marks in were
truncated after the question mark so that files with `?` in their
names could not be server side copied.

After this change the URL unescaping is done during the parsing of the
`versionId` parameter which fixes the problem.

This change also introduces the same logic for the
`X-Amz-Copy-Source-Version-Id` header field which was previously
ignored, namely returning an error if it is present and not `null`
since minio does not currently support versions.

S3 Docs:
- https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html
- https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html
2019-01-10 13:10:10 -08:00
Sidhartha Mani
f3f47d8cd3 Add ServerCPULoadInfo() and ServerMemUsageInfo() admin API (#7038) 2019-01-09 19:04:19 -08:00
Minio Trusted
de1d39e436 Update yaml files to latest version RELEASE.2019-01-10T00-21-20Z 2019-01-10 00:28:50 +00:00
poornas
ed1275a063 Fix copy from encrypted multipart to single encrypted part (#7056)
When source is encrypted multipart object and the parts are not
evenly divisible by DARE package block size, target encrypted size
will not necessarily be the same as encrypted source object.
2019-01-09 15:17:21 -08:00
kannappanr
a7d407fa42 Display message on failure to get lock on format.json in fs mode on startup (#6538)
Retry to see if the lock is free. Retry time will increase binomially.
2019-01-09 10:13:04 -08:00
Anis Elleuch
4e6e05f8e0 virtual host: Fix making new buckets (#7054)
This commit removes old code preventing PUT requests with '/' as a path,
because this is not needed anymore after the introduction of the virtual
host style in Minio server code.

'PUT /' when global domain is not configured already returns 405 Method
Not Allowed http error.
2019-01-09 11:59:41 +05:30
Bala FA
b0deea27df Refactor s3select to support parquet. (#7023)
Also handle pretty formatted JSON documents.
2019-01-08 16:53:04 -08:00
Kaan Kabalak
e98d89274f Upgrade to Webpack 4 (#7045) 2019-01-08 11:04:59 -08:00
kannappanr
c59206bcd3 GCS ListMultipartUploads: Don't return on first uploadid (#7014)
ListMultipartUploads code returns only the first uploadid.

Fixes #7011
2019-01-08 11:03:28 -08:00
Harshavardhana
7f2d439baa Avoid printing in S3 tests (#7043) 2019-01-07 22:32:30 +05:30
poornas
5a80cbec2a Add double encryption at S3 gateway. (#6423)
This PR adds pass-through, single encryption at gateway and double
encryption support (gateway encryption with pass through of SSE
headers to backend).

If KMS is set up (either with Vault as KMS or using
MINIO_SSE_MASTER_KEY),gateway will automatically perform
single encryption. If MINIO_GATEWAY_SSE is set up in addition to
Vault KMS, double encryption is performed.When neither KMS nor
MINIO_GATEWAY_SSE is set, do a pass through to backend.

When double encryption is specified, MINIO_GATEWAY_SSE can be set to
"C" for SSE-C encryption at gateway and backend, "S3" for SSE-S3
encryption at gateway/backend or both to support more than one option.

Fixes #6323, #6696
2019-01-05 14:16:42 -08:00
Harshavardhana
2d19011a1d Add support for AssumeRoleWithWebIdentity (#6985) 2019-01-04 13:48:12 -08:00
Harshavardhana
e82dcd195c Deprecate config-dir bring in certs-dir for TLS configuration (#7033)
This PR is to provide indication that config-dir will be removed
in future and all users should migrate to new --certs-dir option

Fixes #7016
Fixes #7032
2019-01-02 10:05:16 -08:00
Nitish Tiwari
fcb56d864c Add ServerDrivesPerfInfo() admin API (#6969)
This is part of implementation for mc admin health command. The
ServerDrivesPerfInfo() admin API returns read and write speed
information for all the drives (local and remote) in a given Minio
server deployment.

Part of minio/mc#2606
2018-12-31 09:46:44 -08:00
Krishnan Parthasarathi
75cd4201b0 Update go-sql-driver/mysql package (#7019) 2018-12-29 21:59:03 +05:30
Harshavardhana
f24c017e9a Move docker edge to latest Go as well (#7030) 2018-12-28 17:24:24 -08:00
Harshavardhana
b5280ba243 Migrate to Go version 1.11.4 (#7026) 2018-12-28 14:04:39 -08:00
Harshavardhana
2a0e4b6f58 Add boolean function condition support (#7027) 2018-12-28 12:18:58 -08:00
Minio Trusted
1898961ce3 Update yaml files to latest version RELEASE.2018-12-27T18-33-08Z 2018-12-27 18:41:05 +00:00
Krishnan Parthasarathi
236796ebd6 Add etcd as prerequisite for multi-user in gateway (#7022) 2018-12-27 07:22:18 +05:30
Harshavardhana
4e4f855b30 Add support for new policy conditions (#7024)
This PR implements following condition types

- StringEqualsIgnoreCase and StringNotEqualsIgnoreCase
- BinaryEquals
2018-12-26 17:39:30 -08:00
Harshavardhana
2db22deb93 Fix policy bugs Null conditions and canonical names (#7021)
This PR fixes two different issues

- Null condition implementation
- HTTP Canonical request value names

This PR fixes handling of null conditions and
handle HTTP canonical names in request values.

This PR was tested with policies mentioned in the following blog
https://aws.amazon.com/blogs/security/how-to-prevent-uploads-of-unencrypted-objects-to-amazon-s3/

Fixes #6955
2018-12-26 02:03:28 -08:00
Harshavardhana
fb8d0d7cf7 Add support for hostname lookups instead of IPs in MINIO_PUBLIC_IPS (#7018)
DNS names will be resolved to their respective IPs if specified
in MINIO_PUBLIC_IPS.

Fixes #6862
2018-12-23 03:08:21 -08:00
Harshavardhana
a536cf5dc0 Buffconn should buffer upto maxHeaderBytes to avoid ErrBufferFull (#7017)
It can happen with erroneous clients which do not send `Host:`
header until 4k worth of header bytes have been read. This can lead
to Peek() method of bufio to fail with ErrBufferFull.

To avoid this we should make sure that Peek buffer is as large as
our maxHeaderBytes count.
2018-12-23 12:03:04 +05:30
Harshavardhana
b9b68e9331 Add multi-stage build of docker edge image (#7005)
This is to reduce the overall size of the image,
we only retain the binary that was built in previous stage.
2018-12-22 06:36:48 +05:30
Anis Elleuch
632022971b s3: Don't set NextMarker when listing is not truncated (#7012)
Setting NextMarker when IsTruncated is not set seems to be confusing
AWS C++ SDK, this commit will avoid setting any string in NextMarker.
2018-12-20 13:30:25 -08:00
Harshavardhana
def04f01cf Update reedsolomon/highwayhash to start using ppc64le support (#7003)
Thanks to @fwessels for the upstream work on reedsolomon and
highwayhash which has resulted in 10x performance improvement
on ppc64 architecture.
2018-12-20 23:17:05 +05:30
Minio Trusted
bc67410548 Update yaml files to latest version RELEASE.2018-12-19T23-46-24Z 2018-12-19 23:54:02 +00:00
kannappanr
7881791a91 CopyObject:Set Content-Type to application/octet-stream if it is not set (#6958) 2018-12-19 14:31:45 -08:00
Harshavardhana
d2f8f8c7ee Fix ETag handling with auto-encryption with CopyObject conditions (#7000)
minio-java tests were failing under multiple places when
auto encryption was turned on, handle all the cases properly

This PR fixes

 - CopyObject should decrypt ETag before it does if-match
 - CopyObject should not try to preserve metadata of source
   when rotating keys, unless explicitly asked by the user.
 - We should not try to decrypt Compressed object etag, the
   potential case was if user sets encryption headers along
   with compression enabled.
2018-12-19 14:12:53 -08:00
kannappanr
8c32311b80 Change lock name to include names instead of hash. (#6886)
Previously lockname included the hash of the bucket, object
and uploadid.

This is a part of fix required to list oldest locks on the server.
2018-12-19 13:57:51 -08:00
Ashish Kumar Sinha
9bb88e610e Deletion of subfolders of multipart (#6961)
Delete subfolders under multipart folder upon completion of CompleteMultipartUpload, AbortMultipartUpload and cleanupStaleMultipartUploads functions
2018-12-19 11:27:10 -08:00
Harshavardhana
d1e41695fe Add support for federation on browser (#6891) 2018-12-19 18:43:47 +05:30
Aditya Manthramurthy
2aeb3fbe86 Fix csv output delimiter bug (#6994) 2018-12-19 11:49:06 +05:30
Anis Elleuch
99b843a64e Add anonymous flag to prevent logging sensitive information (#6899) 2018-12-18 16:08:11 -08:00
Harshavardhana
4f31a9a33b Reload users upon AddUser on peers (#6975)
Also migrate ReloadFormat to notification subsystem,
remove GetConfig() we do not use this API anymore
2018-12-18 14:39:21 -08:00
Nitish Tiwari
65ddff8899 Fix NAS Gateway Docker command example (#6967)
Fixes #6965
2018-12-18 14:37:17 -08:00
Harshavardhana
e7c902bbbc Return proper errors when admin API is not initialized (#6988)
Especially in gateway IAM admin APIs are not enabled
if etcd is not enabled, we should enable admin API though
but only enable IAM and Config APIs with etcd configured.
2018-12-18 13:03:26 -08:00
Andreas Auernhammer
5a5895203b add howto generate a master key and add master key disclaimer (#6992)
This commit adds a section to the master key documentation
describing how to generate a random 256 bit master key.

Further this commit adds a warning that master keys are not
recommended for production systems because it's (currently)
not possible to replace a master key (e.g. in case of compromise).
2018-12-18 13:00:32 -08:00
poornas
7da0336ac8 Make sure env are loaded before gateway layer initialization (#6989) 2018-12-18 10:42:09 -08:00
Harshavardhana
3be616de3f Send deployment ID in notification event response elements (#6991) 2018-12-18 10:05:26 -08:00
Harshavardhana
c5bf22fd90 Turn off printing IPv6 endpoints when listening on all interfaces (#6986)
By default when we listen on all interfaces, we print all the
endpoints that at local to all interfaces including IPv6
addresses. Remove IPv6 addresses in endpoint list to be
printed in endpoints unless explicitly specified with '--address'
2018-12-18 21:56:30 +05:30
poornas
7c9f934875 Disallow SSE requests when object layer has encryption disabled (#6981) 2018-12-14 21:39:59 -08:00
Eco
b6f9b24b30 Small corrections and example for auto-encryption (#6982) 2018-12-14 16:21:41 -08:00
poornas
13cb814a0e update KMS README.md to set approle env (#6978) 2018-12-14 14:03:16 -08:00
Andreas Auernhammer
d264d2c899 add auto-encryption feature (#6523)
This commit adds an auto-encryption feature which allows
the Minio operator to ensure that uploaded objects are
always encrypted.

This change adds the `autoEncryption` configuration option
as part of the KMS conifguration and the ENV. variable
`MINIO_SSE_AUTO_ENCRYPTION:{on,off}`.

It also updates the KMS documentation according to the
changes.

Fixes #6502
2018-12-14 13:35:48 -08:00
Harshavardhana
bebaff269c Support IPv6 in minio command line (#6947)
Fixes #6946
2018-12-14 13:07:46 +05:30
Harshavardhana
52b159b1db Allow versionId to be null for Delete,CopyObjectPart (#6972) 2018-12-14 11:34:37 +05:30
Nitish Tiwari
324834e4da Remove duplicate switch case (#6966)
Fixes #6948
2018-12-13 21:58:48 -08:00
Harshavardhana
c2ed1347d9 Do not list objects unless specified in policy (#6970)
Currently we use GetObject to check if we are allowed to list,
this might be a security problem since there are many users now
who actively disable a publicly readable listing, anyone who
can guess the browser URL can list the objects.

This PR turns off this behavior and provides a more expected way
based on the policies.

This PR also additionally improves the Download() object
implementation to use a more streamlined code.

These are precursor changes to facilitate federation and web
identity support in browser.
2018-12-14 09:45:09 +05:30
Anis Elleuch
50f6f9fe58 S3 api: Ignore encoding in xml body (#6953)
One user reported having discovered the following error:

API: SYSTEM()
Time: 20:06:17 UTC 12/06/2018
Error: xml: encoding "US-ASCII" declared but Decoder.CharsetReader is nil
1: cmd/handler-utils.go:43:cmd.parseLocationConstraint()
2: cmd/auth-handler.go:250:cmd.checkRequestAuthType()
3: cmd/bucket-handlers.go:411:cmd.objectAPIHandlers.PutBucketHandler()
4: cmd/api-router.go100cmd.(objectAPIHandlers).PutBucketHandler-fm()
5: net/http/server.go:1947:http.HandlerFunc.ServeHTTP()

Hence, adding support of different xml encoding. Although there
is no clear specification about it, even setting "GARBAGE" as an xml
encoding won't change the behavior of AWS, hence the encoding seems
to be ignored.

This commit will follow that behavior and will ignore encoding field
and consider all xml as utf8 encoded.
2018-12-13 12:09:50 -08:00
Minio Trusted
48cb0ea34b Update yaml files to latest version RELEASE.2018-12-13T02-04-19Z 2018-12-13 02:09:53 +00:00
Harshavardhana
6f7c99a333 Allow versionId to be null for Copy,Get,Head API calls (#6942)
Fixes #6935
2018-12-12 11:43:44 -08:00
Harshavardhana
3498f5b0ec List exact DNS entries for a requested bucketName (#6936)
Currently we would end up considering common prefix
buckets to be part of the same DNS service record,
which leads to Minio server wrongly forwarding the
records to incorrect IPs.
2018-12-12 10:47:03 -08:00
Andreas Auernhammer
21d8c0fd13 refactor vault configuration and add master-key KMS (#6488)
This refactors the vault configuration by moving the
vault-related environment variables to `environment.go`
(Other ENV should follow in the future to have a central
place for adding / handling ENV instead of magic constants
and handling across different files)

Further this commit adds master-key SSE-S3 support.
The operator can specify a SSE-S3 master key using
`MINIO_SSE_MASTER_KEY` which will be used as master key
to derive and encrypt per-object keys for SSE-S3
requests.

This commit is also a pre-condition for SSE-S3
auto-encyption support.

Fixes #6329
2018-12-12 12:20:29 +05:30
Kale Blankenship
79b9a9ce46 Provide actual size in events instead of compressed size. (#6950)
Previous behavior did not check if the object was compressed and
incorrectly reported the stored size rather than the actual object
size.
2018-12-11 17:30:15 -08:00
Harshavardhana
b9b353db4b Add env to support synchronous ops for all calls (#6877) 2018-12-11 16:22:56 -08:00
poornas
11a9b317a3 Disable ListenBucket notifications for NAS gateway (#6954) 2018-12-11 16:16:09 -08:00
Praveen raj Mani
9af7d627ac Preserve the compression headers while copying (#6952)
Fixes #6951
2018-12-11 12:05:41 -08:00
Harshavardhana
76d9d54603 Filter listing buckets based on user level access (#6940)
Fixes #6701
2018-12-10 22:57:22 +05:30
Harshavardhana
4c7c571875 Support JSON to CSV and CSV to JSON output format conversion (#6910)
This PR implements one of the pending items in issue #6286
in S3 API a user can request CSV output for a JSON document
and a JSON output for a CSV document. This PR refactors
the code a little bit to bring this feature.
2018-12-07 14:55:32 -08:00
James Neiman, President
313ba74b09 Update to Minio GCS Gateway (#6887) 2018-12-06 10:09:37 -08:00
Harshavardhana
3e124315c8 Increase the keep alive timeout to 30 secs (#6924)
Go by default uses a 3 * minute, we should
atleast use 30 secs as 10 secs is too aggressive.
2018-12-06 22:56:16 +05:30
Minio Trusted
78a0fd951e Update yaml files to latest version RELEASE.2018-12-06T01-27-43Z 2018-12-06 01:35:43 +00:00
Anis Elleuch
40852801ea fix: Better check of RPC type requests (#6927)
guessIsRPCReq() considers all POST requests as RPC but doesn't
check if this is an object operation API or not, which is actually
confusing bucket forwarder handler when it receives a new multipart
upload API which is a POST http request.

Due to this bug, users having a federated setup are not able to
upload a multipart object using an endpoint which doesn't actually
contain the specified bucket that will store the object.

Hence this commit will fix the described issue.
2018-12-05 14:28:48 -08:00
poornas
f6980c4630 fix ConfigSys and NotificationSys initialization for NAS (#6920) 2018-12-05 14:03:42 -08:00
Harshavardhana
8fcc787cba Register notFound handler only once per root router (#6926)
registering notFound handler more than once causes
gorilla mux to return error for all registered paths
greater than > 8. This might be a bug in the gorilla/mux
but we shouldn't be using it this way. NotFound handler
should be only registered once per root router.

Fixes #6915
2018-12-05 11:54:12 -08:00
Praveen raj Mani
4e6d3c093f Errors in notification config should not crash the server (#6881)
Fixes #6870
2018-12-04 18:27:12 -08:00
Anis Elleuch
61145361fd Fallback to non-loopback IF addresses for Domain IPs (#6918)
When MINIO_PUBLIC_IPS is not specified and no endpoints are passed
as arguments, fallback to the address of non loop-back interfaces.

This is useful so users can avoid setting MINIO_PUBLIC_IPS in docker
or orchestration scripts, ince users naturally setup an internal
network that connects all instances.
2018-12-04 17:35:22 -08:00
James Neiman, President
950b4ad9af Update to How to secure access to Minio server with TLS (#6845) 2018-12-04 17:30:39 -08:00
Harshavardhana
6add646130 Fix logging of initialization errors in distributed mode (#6914) 2018-12-04 10:25:56 -08:00
Harshavardhana
20e61fb362 Redirect browser requests only if browser is enabled (#6909)
This PR fixes an issue introduced in PR #6848, when
browser is disabled we shouldn't re-direct the requests
returning AccessDenied.

Fixes #6907
2018-12-04 13:08:24 +05:30
Bala FA
18ced1102c handle post policy only if it is set. (#6852)
Previously policy in post form is assumed to be set always.  This is
fixed by doing the check when policy is set.
2018-12-03 12:01:28 -08:00
Harshavardhana
d6af3c1237 Add bucket notification support for NAS gateway (#6908)
Fixes #6885
2018-12-03 14:02:14 +05:30
Andreas Auernhammer
5549a44566 rename vault namespace env variable to be more idiomatic (#6905)
This commit renames the env variable for vault namespaces
such that it begins with `MINIO_SSE_`. This is the prefix
for all Minio SSE related env. variables (like KMS).
2018-12-01 05:28:49 -08:00
Praveen raj Mani
e7af31c2ff Removed clientID from NATS-Streaming Config (#6391)
clientID must be a unique `UUID` for each connections. Now, the
server generates it, rather considering the config.

Removing it as it is non-beneficial right now.

Fixes #6364
2018-11-30 10:46:17 +05:30
Minio Trusted
e7971b1d55 Update yaml files to latest version RELEASE.2018-11-30T03-56-59Z 2018-11-30 04:02:10 +00:00
Harshavardhana
26120d7838 Ignore permission errors on config-dir (#6894) 2018-11-29 18:14:05 -08:00
Harshavardhana
bef7c01c58 Choose right users in federation mode for CopyObject (#6895) 2018-11-29 17:35:11 -08:00
poornas
6a8ccc5925 update README.md (#6893) 2018-11-29 15:50:57 -08:00
kannappanr
d85199e9de Vendorize minio-go (#6883)
Fixes #6873
2018-11-29 11:13:03 -08:00
Harshavardhana
8608a84c23 Ignore hidden directory .snapshot for NetApp volumes (#6889) 2018-11-29 11:39:21 +05:30
Harshavardhana
b7226f4c82 Add transaction lock when migrating configs (#6878)
When migrating configs it happens often that some
servers fail to start due to version mismatch etc.

Hold a transaction lock such that all servers get
serialized.
2018-11-28 17:38:23 -08:00
Pontus Leitzler
f930ffe9e2 Fix gcs context (#6869) 2018-11-28 13:49:51 +05:30
James Neiman, President
b50a245208 Update to Minio Multi-Tenant Deployment Guide (#6871)
Initial edits.
2018-11-27 18:03:07 -08:00
poornas
45bb11e020 Set namespace on vault client if VAULT_NAMESPACE env is set (#6867) 2018-11-27 14:42:32 -08:00
jingsam
b65cf281fd Update azure.md (#6834) 2018-11-27 14:05:27 -08:00
Xie Yanbo
f781548b0c fix typo (#6812) 2018-11-27 14:04:50 -08:00
jingsam
25ee8e74f7 Update README.md (#6832) 2018-11-27 14:04:11 -08:00
jingsam
c975d2cc7e Update README.md (#6833) 2018-11-27 14:03:08 -08:00
jingsam
ea66528739 Update gcs.md (#6835) 2018-11-27 14:02:23 -08:00
Harshavardhana
e1164103d4 Reject if tokens are missing for temp credentials (#6860) 2018-11-27 13:24:04 -08:00
Harshavardhana
83fe70f710 Fix CopyObject regression calculating md5sum (#6868)
CopyObject() failed to calculate proper md5sum
when without encryption headers. This is a regression
fix perhaps introduced in commit 5f6d717b7a

Fixes https://github.com/minio/minio-go/issues/1044
2018-11-27 13:23:32 -08:00
Harshavardhana
12a6523fb2 Do not delete parts in multipart if 0 bytes (#6855)
This can create inconsistencies i.e Parts might have
lesser number of parts than ChecksumInfos. This will
result in object to be not readable.

This PR also allows for deleting previously created
corrupted objects.
2018-11-26 13:20:21 -08:00
Harshavardhana
dba61867e8 Redirect browser requests returning AccessDenied (#6848)
Anonymous requests from S3 resources returning
AccessDenied should be auto redirected to browser
for login.
2018-11-26 12:15:12 -08:00
Anis Elleuch
dd092f6c2b gateway: Properly set globalMinioPort (#6859)
globalMinioPort is used in federation which stores the address
and the port number of the server hosting the specified bucket,
this latter uses globalMinioPort but this latter is not set in
startup of the gateway mode.

This commit fixes the behavior.
2018-11-26 23:19:38 +05:30
Nitish Tiwari
2a810c7da2 Improve du thread performance (#6849) 2018-11-26 10:35:14 +05:30
Nitish Tiwari
dd8c2aa5c6 Cleanup Kubernetes documentation (#6861)
Also add details on why Readiness checks are not recommended for Minio
StatefulSets.
2018-11-25 13:34:20 -08:00
Harshavardhana
9e3fce441e Audit log claims from token (#6847) 2018-11-22 09:33:24 +05:30
Harshavardhana
d4265f9a13 Simplify OPA to use rootCAs custom transport (#6843)
Also close the connections properly to use the
connection pooling properly for HTTP clients.
2018-11-22 08:31:05 +05:30
Minio Trusted
2fc024e880 Update yaml files to latest version RELEASE.2018-11-22T02-51-56Z 2018-11-22 02:57:28 +00:00
Harshavardhana
eddf468aef Lock the targetList properly in go-routines (#6838)
Fixes #6483
2018-11-21 21:25:54 +05:30
Ashish Kumar Sinha
b0d04b9a81 Retry Connection for RabbitMQ (#6837)
Add retries to connect to RabbitMQ 5 times 
with 2s interval

Fixes #6807
2018-11-21 08:37:29 +05:30
Harshavardhana
a9de303d8b Update command line docs (#6839) 2018-11-20 17:35:33 -08:00
Anis Elleuch
69bd6df464 storage: Implement Close() in REST client (#6826)
Calling /minio/prometheuses/metrics calls xlSets.StorageInfo() which creates a new
storage REST client and closes it. However, currently, closing does nothing
to the underlying opened http client.

This commit introduces a closing behavior by calling CloseIdleConnections
provided by http.Transport upon the initialization of this latter.
2018-11-20 11:07:19 -08:00
Harshavardhana
bfb505aa8e Refactor logging in more Go idiomatic style (#6816)
This refactor brings a change which allows
targets to be added in a cleaner way and also
audit is now moved out.

This PR also simplifies logger dependency for auditing
2018-11-19 14:47:03 -08:00
poornas
d732b1ff9d Fix to cache objects on downloads (#6828)
fixes #6817
2018-11-19 11:00:46 -08:00
Igor K
ef517bd0da update for build on DilOS (#6770) 2018-11-19 19:40:02 +05:30
Minio Trusted
32d837cf88 Update yaml files to latest version RELEASE.2018-11-17T01-23-48Z 2018-11-17 01:30:29 +00:00
Anis Elleuch
7b579caf68 heal: Fix heal sequences cleanup process (#6780)
The current code triggers a timeout to cleanup a heal seq from
healSeqMap, but we don't know if the user did or not launch a new
healing sequence with the same path.

Add endTime to healSequence struct and add a periodic heal-sequence
cleaner to remove heal sequences only if this latter is older than
10 minutes.
2018-11-16 14:59:51 -08:00
Harshavardhana
272b8003d6 Honor header only when requested for use (#6815) 2018-11-16 10:27:48 -08:00
Anis Elleuch
1c24c93f73 storage: Upgrade REST version after adding WriteAll API (#6819)
Rolling update doesn't work properly because Storage REST API has
a new API WriteAll() but without API version number increase.

Also be sure to return 404 for unknown http paths.
2018-11-15 12:18:58 -08:00
Harshavardhana
712abc7958 Fix anonymous downloads URL generation (#6800)
Fixes #6778
2018-11-14 18:05:10 -08:00
poornas
5f6d717b7a Fix: Preserve MD5Sum for SSE encrypted objects (#6680)
To conform with AWS S3 Spec on ETag for SSE-S3 encrypted objects,
encrypt client sent MD5Sum and store it on backend as ETag.Extend
this behavior to SSE-C encrypted objects.
2018-11-14 17:36:41 -08:00
Harshavardhana
7e1661f4fa Performance improvements to SELECT API on certain query operations (#6752)
This improves the performance of certain queries dramatically,
such as 'count(*)' etc.

Without this PR
```
~ time mc select --query "select count(*) from S3Object" myminio/sjm-airlines/star2000.csv.gz
2173762

real	0m42.464s
user	0m0.071s
sys	0m0.010s
```

With this PR
```
~ time mc select --query "select count(*) from S3Object" myminio/sjm-airlines/star2000.csv.gz
2173762

real	0m17.603s
user	0m0.093s
sys	0m0.008s
```

Almost a 250% improvement in performance. This PR avoids a lot of type
conversions and instead relies on raw sequences of data and interprets
them lazily.

```
benchcmp old new
benchmark                        old ns/op       new ns/op       delta
BenchmarkSQLAggregate_100K-4     551213          259782          -52.87%
BenchmarkSQLAggregate_1M-4       6981901985      2432413729      -65.16%
BenchmarkSQLAggregate_2M-4       13511978488     4536903552      -66.42%
BenchmarkSQLAggregate_10M-4      68427084908     23266283336     -66.00%

benchmark                        old allocs     new allocs     delta
BenchmarkSQLAggregate_100K-4     2366           485            -79.50%
BenchmarkSQLAggregate_1M-4       47455492       21462860       -54.77%
BenchmarkSQLAggregate_2M-4       95163637       43110771       -54.70%
BenchmarkSQLAggregate_10M-4      476959550      216906510      -54.52%

benchmark                        old bytes       new bytes      delta
BenchmarkSQLAggregate_100K-4     1233079         1086024        -11.93%
BenchmarkSQLAggregate_1M-4       2607984120      557038536      -78.64%
BenchmarkSQLAggregate_2M-4       5254103616      1128149168     -78.53%
BenchmarkSQLAggregate_10M-4      26443524872     5722715992     -78.36%
```
2018-11-14 15:55:10 -08:00
Pontus Leitzler
f9779b24ad Enable default vet flags (#6810)
Enable default vet flags except experimental
2018-11-14 10:23:44 -08:00
Harshavardhana
f1f23f6f11 Add sync mode for 'xl.json' (#6798)
xl.json is the source of truth for all erasure
coded objects, without which we won't be able to
read the objects properly. This PR enables sync
mode for writing `xl.json` such all writes go hit
the disk and are persistent under situations such
as abrupt power failures on servers running Minio.
2018-11-14 19:48:35 +05:30
Harshavardhana
cf26c937e4 Remove UA worm and cache (#6809) 2018-11-14 13:18:55 +05:30
Krishna Srinivas
f19f957668 Remove unused repos from vendor.json (#6808) 2018-11-14 10:00:25 +05:30
Harshavardhana
d6572879a8 Check for STS Action first to allow browser requests (#6796) 2018-11-13 15:53:06 -08:00
Anis Elleuch
b6ab8f50fa azure: Support non standard Azure cloud environments (#6712)
This change will allow users to enter the endpoint of the
storage account if this latter belongs to a different Azure
cloud environment, such as US gov cloud.

e.g:

  `MINIO_ACCESS_KEY=testaccount \
     MINIO_SECRET_KEY=accountsecretkey \
     minio gateway azure https://testaccount.blob.usgovcloudapi.net`
2018-11-13 15:51:49 -08:00
Harshavardhana
c82acc599a Treat empty xl.json as file not found (#6804)
If the buffer is empty we can avoid parsing
it and treat it essentially as `xl.json`
is effectively missing.
2018-11-13 11:57:03 -08:00
Andreas Auernhammer
2447bb58dd change received system signal output to upper case (#6761)
This commit slightly improves the output of the minio server
in case of a received system signal.
2018-11-12 15:07:16 -08:00
Harshavardhana
a55a298e00 Make sure to log unhandled errors always (#6784)
In many situations, while testing we encounter
ErrInternalError, to reduce logging we have
removed logging from quite a few places which
is acceptable but when ErrInternalError occurs
we should have a facility to log the corresponding
error, this helps to debug Minio server.
2018-11-12 11:07:43 -08:00
Harshavardhana
2929c1832d Add sample STS request/response output (#6794) 2018-11-12 07:53:55 -08:00
Chester Li
aa2d8583ad Check key length before adding a new user. (#6790)
User's key should satisfy the requirement of `mc config host add`.
Check access key and secret key length before adding a new user,
avoid creating a useless user which cannot be added into config
host or log into the browser.
2018-11-09 15:48:24 -08:00
kannappanr
df2d75a2a3 Cleanup unnecessary logs (#6788) 2018-11-09 14:03:37 -08:00
Harshavardhana
b24b320807 Set notification namespace for NotificationConfiguration (#6789) 2018-11-09 10:40:03 -08:00
kannappanr
c872c1f1dc Return default ETag if fs.json is empty (#6787) 2018-11-09 10:34:59 -08:00
Harshavardhana
a40610d331 Re-populate public key if JWT fails to parse (#6786)
This is done such that if WSO2 was re-configured
with new TLS certs, and newer tokens are signed
with a newer public key. Once populated parse the JWT
again
2018-11-08 17:01:20 -08:00
Harshavardhana
38978eb2aa Avoid decrypting encrypted multipart final size (#6776)
Multipart object final size is not a contiguous
encrypted object representation, so trying to
decrypt this size will lead to an error in some
cases. The multipart object should be detected first
and then decoded with its respective parts instead.

This PR handles this situation properly, added a
test as well to detect these in the future.
2018-11-08 10:27:21 -08:00
Harshavardhana
ca7c3a3278 Add 'mc config host add' command in multi-user doc (#6777) 2018-11-08 09:42:47 -08:00
Harshavardhana
d58fc68137 Fix shadowing issue in elasticsearch target (#6774) 2018-11-07 12:09:03 -08:00
Matthias Schneider
71c66464c1 feature: added nsq as broker for events (#6740) 2018-11-07 10:23:13 -08:00
Harshavardhana
bf414068a3 Parse and return proper errors with x-amz-security-token (#6766)
This PR also simplifies the token and access key validation
across our signature handling.
2018-11-07 20:10:03 +05:30
Eco
88959ce600 Format correction in server limits doc (#6773) 2018-11-06 14:50:11 -08:00
Ashish Kumar Sinha
572719872d Event Notification for ElasticSearch (#6764)
Using access format for Event Notification for Elastic Search
2018-11-06 11:38:54 -08:00
Minio Trusted
bdea19b583 Update yaml files to latest version RELEASE.2018-11-06T01-01-02Z 2018-11-06 01:05:53 +00:00
poornas
eb1f9c9916 Update KMS readme with vault quick start guide (#6747) 2018-11-05 13:01:18 -08:00
Andreas Auernhammer
d07fb41fe8 add key-rotation for SSE-S3 objects (#6755)
This commit adds key-rotation for SSE-S3 objects.
To execute a key-rotation a SSE-S3 client must
 - specify the `X-Amz-Server-Side-Encryption: AES256` header
   for the destination
 - The source == destination for the COPY operation.

Fixes #6754
2018-11-05 10:26:10 -08:00
Harshavardhana
a9cda850ca Add forceStop flag to provide facility to stop healing (#6718)
This PR also makes sure that we deal with HTTP request
count by ignoring the on-going heal operation, i.e
do not wait on itself.
2018-11-04 19:24:16 -08:00
Harshavardhana
bef0318c36 Support audit logs with additional fields (#6738)
This PR adds support

- Request query params
- Request headers
- Response headers

AuditLogEntry is exported and versioned as well
starting with this PR.
2018-11-02 18:40:08 -07:00
Harshavardhana
3f19ea98bb Honor certs from config-dir (#6757)
This was a regression introduced in 9fe51e392b
2018-11-02 11:53:45 -07:00
Praveen raj Mani
c96073f985 Test for multiple values for x-amz-meta header added (#6753) 2018-11-02 11:32:18 -07:00
Harshavardhana
d2f240c791 Ignore windows hidden folders (#6735)
On Windows erasure coding setup if

```
~ minio server V:\ W:\ X:\ Z:\
```

is not possible due to NTFS creating couple of
hidden folders, this PR allows minio to use
the entire drive.
2018-11-02 11:31:55 -07:00
Harshavardhana
6491dfbbd6 Fix etcd TLS handling (#6748)
etcd fails to connect if TLS config is set, make TLS
conditional to input arguments instead
2018-11-01 21:41:11 -07:00
kannappanr
d9cfa5fcd3 Shouldn't require space in HTTP host header (#6743)
Fixes #6741
2018-10-31 15:31:42 -07:00
Aarushi Arya
89b14639a9 avoid using URL encoding to generate keys (#6731) 2018-10-31 15:07:20 -07:00
Harshavardhana
3f744c0361 Fix mimedb update files (#6744) 2018-10-31 14:15:27 -07:00
kannappanr
9ed7fb4916 Do not call multiple response.WriteHeader calls (#6733)
Execute method in s3Select package makes a response.WriteHeader call.

Not calling it again in SelectObjectContentHandler function in case of
error in s3Select.Execute call.
2018-10-31 14:09:26 -07:00
Eco
4280e68de3 Document not to use autogenerated keys with containers (#6737) 2018-10-31 12:09:16 +05:30
Harshavardhana
f162d7bd97 Performance improvements by re-using record buffer (#6622)
Avoid unnecessary pointer reference allocations
when not needed, for example

- *SelectFuncs{}
- *Row{}
2018-10-31 08:48:01 +05:30
Harshavardhana
36990aeafd Avoid double bucket validation in DeleteObjectHandler (#6720)
On a heavily loaded server, getBucketInfo() becomes slow,
one can easily observe deleting an object causes many
additional network calls.

This PR is to let the underlying call return the actual
error and write it back to the client.
2018-10-30 16:07:57 -07:00
Harshavardhana
9fe51e392b Support etcd TLS certficates (#6719)
This PR supports two models for etcd certs

- Client-to-server transport security with HTTPS
- Client-to-server authentication with HTTPS client certificates
2018-10-29 11:14:12 -07:00
Harshavardhana
7e879a45d5 Add policy claim support for JWT (#6660)
This way temporary credentials can use canned
policies on the server without configuring OPA.
2018-10-29 11:08:59 -07:00
poornas
1c911c5f40 Fix: Validate copy-part encryption header and metadata (#6725)
Otherwise CopyObjectPart would continue to upload part with incorrect
encryption option and fail when upload is finalized
2018-10-29 06:40:34 -07:00
poornas
bd8dc17b7a gateway s3:Make sure to convert s3 errors to ObjectLayer errors (#6717) 2018-10-28 22:11:20 -07:00
Harshavardhana
8491a29ec3 Fix healing bucket properly (#6716)
Bucket should be healed properly if it partially exists
on only one set, since bucket is common for all sets.

Fixes #6710
2018-10-28 14:13:17 -07:00
Pontus Leitzler
81d21850ec Root CAs can be used for backend without TLS (#6711) 2018-10-28 06:21:00 +05:30
kannappanr
c6ec3fdfba Return error response header only for HEAD method (#6709) 2018-10-26 18:03:17 -07:00
Anis Elleuch
88c3dd49c6 copy: Ensure that the user has GET access to the src object (#6715) 2018-10-26 16:12:44 -07:00
kannappanr
6869f6d9dd Remove unwanted logs (#6708) 2018-10-26 14:41:25 -07:00
Harshavardhana
3f643acb99 HealBucket was double counting endpoints (#6707)
Endpoint comparisons blindly without looking
if its local is wrong because the actual drive
for a local disk is always going to provide just
the path without the HTTP endpoint.

Add code such that this is taken care properly in
all situations. Without this PR HealBucket() would
wrongly conclude that the healing doesn't have quorum
when there are larger number of local disks involved.

Fixes #6703
2018-10-26 10:25:52 -07:00
Pontus Leitzler
ea73accefd Remove h2 from NextProtos since it doesn't work (#6705) 2018-10-26 12:48:39 +05:30
Harshavardhana
555d54371c Fix CopyObjectPart broken source encryption support (#6699)
Current master didn't support CopyObjectPart when source
was encrypted, this PR fixes this by allowing range
CopySource decryption at different sequence numbers.

Fixes #6698
2018-10-25 08:50:06 -07:00
Harshavardhana
bab4c90c45 Fix broken links in docs (#6700) 2018-10-25 11:39:31 +05:30
Minio Trusted
a2fc0b14d6 Update yaml files to latest version RELEASE.2018-10-25T01-27-03Z 2018-10-25 01:31:57 +00:00
Harshavardhana
bf66e9a529 Reload etcd users and policies properly (#6694)
Currently there was a bug in how we reload users and policies
which leads to users/policies going missing due to wrong path
construction.

Fixes #6693
2018-10-24 17:40:06 -07:00
Harshavardhana
fde8c38638 Add default canned policies (#6690) 2018-10-24 17:14:27 -07:00
Kaan Kabalak
e6252dee5a Fix links not working on Docs site (#6692)
The relative link paths that weren't working have been changed to
direct links to the corresponding Github pages.
2018-10-24 17:00:26 -07:00
Praveen raj Mani
ecb042aa1c Copy and CopyPart changes for compression (#6669)
This PR fixes

- The target object should be compressed even if the
  source object is not compressed.

- The actual size for an encrypted object should be the
  `decryptedSize`
2018-10-23 11:46:20 -07:00
Anis Elleuch
e29009d347 Register postgre driver in pkg/event/target (#6689)
Commit 5c13765168 removed postgre registration triggerd
by the automatic gofmt command but it was the only where pg is registered. This commit
fixes behavior and adds unit tests to check whether postgre & sql are registered or not.
2018-10-23 11:44:46 -07:00
Pontus Leitzler
9631d65552 Fix goroutine test fatalf (#6682)
Use t.Error/t.ErrorF instead if t.Fatal/t.Fatalf

Add returns to achieve same behaviour as earlier
2018-10-23 09:44:20 -07:00
Nitish Tiwari
7b7be66fa1 Cleanup Kubernetes documentation (#6678) 2018-10-23 18:22:43 +05:30
Harshavardhana
b99aaab42e Sid value can be any unicode character support it (#6676)
Fixes #6476
2018-10-23 16:11:06 +05:30
Nitish Tiwari
32bd1b31e9 Fix images for 8 node distributed deployment (#6685)
fixes #6633
2018-10-23 10:50:38 +05:30
Eco
f287b15e71 docs/minio-limits.md formatting (#6683)
Formatted docs to show missing "\" character, added "/" to list of unsupported chars and made note of the fact that list is not exhaustive.
2018-10-22 21:00:46 -07:00
Andreas Auernhammer
586466584f fix wrong actual part size assignment in CopyObjectPart (#6652)
This commit fixes a wrong assignment to `actualPartSize`.
The `actualPartSize` for an encrypted src object is not `srcInfo.Size`
because that's the encrypted object size which is larger than the
actual object size. So the actual part size for an encrypted
object is the decrypted size of `srcInfo.Size`.
2018-10-22 14:23:23 -07:00
Ashish Kumar Sinha
c0b4bf0a3e SQL select query for CSV/JSON (#6648)
select * , select column names have been implemented for CSV.
select * is implemented for JSON.
2018-10-22 12:12:22 -07:00
Praveen raj Mani
acf46cc3b5 SetConfigHandler should avoid setting an invalid notification config (#6679)
Fixes #6642 
Fixes #6641
2018-10-22 11:51:26 -07:00
Guido García
06ef8248c3 docs: add link to s3 gateway (#6666)
Minor change: Add a link to S3 gateway to make it easier to find that info.
2018-10-22 11:47:13 -07:00
Aarushi Arya
7c2ae4eaf7 Remove tmp file and multipart folder in FS mode. (#6677)
Fixes #6588
2018-10-22 07:36:30 -07:00
Harshavardhana
989d7af9ac Handle corrupted files correctly by fixing quorum (#6670)
This PR completes the missing functionality from #6592
2018-10-19 11:00:09 -07:00
Andreas Auernhammer
8a6c3aa3cd crypto: add RemoveInternalEntries function (#6616)
This commit adds a function for removing crypto-specific
internal entries from the object metadata.

See #6604
2018-10-19 10:50:52 -07:00
Harshavardhana
62b560510b Fix SSE-C source decryption handling (#6671)
Without this fix we have room for two different type of
errors.
- Source is encrypted and we didn't provide any source encryption keys

This results in Incomplete body error to be returned back to the client
since source is encrypted and we gave the reader as is to the object
layer which was of a decrypted value leading to "IncompleteBody"

- Source is not encrypted and we provided source encryption keys.

This results in a corrupted object on the destination which is
considered encrypted but cannot be read by the server and returns
the following error.

```
<Error><Code>XMinioObjectTampered</Code><Message>The requested object
was modified and may be compromised</Message><Resource>/id-platform-gamma/
</Resource><RequestId>155EDC3E86BFD4DA</RequestId><HostId>3L137</HostId>
</Error>
```
2018-10-19 10:41:13 -07:00
Harshavardhana
0edfb32621 Fix multi-user doc (#6662) 2018-10-19 12:35:44 +05:30
poornas
7e0f1eb8b5 Fix: verify client sent md5sum in encrypted PutObjectPart request (#6668)
This PR also removes check for SSE-S3 headers as this
is not required by S3 specification.
2018-10-18 16:05:05 -07:00
Pontus Leitzler
b43e8337b1 Add error handling in api-resource.go (#6651) 2018-10-18 07:31:46 -07:00
Harshavardhana
30040fba45 Add windows CI on travis (#6661) 2018-10-18 08:48:53 +05:30
Minio Trusted
44cf9ac62f Update yaml files to latest version RELEASE.2018-10-18T00-28-58Z 2018-10-18 00:34:26 +00:00
poornas
3b55357045 fix:Init globalIAMSys for ExecObjectLayerAPITest (#6636) 2018-10-17 17:25:50 -07:00
Harshavardhana
18d9a20ff6 Enable admin users API on gateway (#6659)
This is only enabled when etcd is enabled, healing is only
enabled for erasure coded backend.
2018-10-17 17:25:16 -07:00
Anis Elleuch
6590aba6d2 xl: PUT an empty dir on an existing prefix succeed (#6658)
This commit fixes a regression introduced in f187a16962
the regression returned AccessDenied when a client is trying to create an empty
directory on a existing prefix, though it should return 200 OK to be close as
much as possible to S3 specification.
2018-10-17 16:37:02 -07:00
Harshavardhana
2e81f27d27 Allow all browser calls to honor multi-users (#6645)
- GetAuth
- SetAuth
- GenerateAuth

Disallow changing bucket level metadata or creating/deleting buckets.
2018-10-17 16:23:09 -07:00
Anis Elleuch
ae3c05aa37 Avoid printing i/o closed pipe error message (#6654)
Since refactoring to GetObjectNInfo style, there are many cases
when i/o closed pipe is printed like, downloading an object
with wrong encryption key. This PR removes the log.
2018-10-17 15:52:18 -07:00
Praveen raj Mani
cef044178c Treat columns with spaces inbetween [s3Select] (#6597)
replace the double/single quotes with backticks for the xwb1989/sqlparser
to recognise such queries.

Fixes #6589
2018-10-17 11:01:26 -07:00
Pontus Leitzler
c998d1ac8c Add missing error check (#6632) 2018-10-17 10:57:12 -07:00
Eco
3457e504cf Spelling changes and fixed link (#6596) 2018-10-17 10:55:55 -07:00
Pontus Leitzler
7f0cca075a Update lint installation (#6650) 2018-10-17 21:59:04 +05:30
Wenjie
088c595e01 handle exception InvalidPart (#6649) 2018-10-17 21:50:58 +05:30
Harshavardhana
26b4b466df Fix a typo in multi-user doc (#6643) 2018-10-16 20:39:44 -07:00
Andreas Auernhammer
fdf691fdcc move SSE-C TLS enforcement into generic handler (#6639)
This commit moves the check that SSE-C requests
must be made over TLS into a generic HTTP handler.

Since the HTTP server uses custom TCP connection handling
it is not possible to use `http.Request.TLS` to check
for TLS connections. So using `globalIsSSL` is the only
option to detect whether the request is made over TLS.
By extracting this check into a separate handler it's possible
to refactor other parts of the SSE handling code further.
2018-10-16 19:22:09 -07:00
Harshavardhana
88c8c2d6cd Fix browser login with multi users (#6644) 2018-10-16 18:44:58 -07:00
Nitish Tiwari
ef585037a0 Update config documentation (#6634) 2018-10-16 16:45:04 -07:00
Anis Elleuch
362ebdcbab xl: Add '.CORRUPTED' prefix to corrupted objects (#6592)
getObjectInfo() checks if a given object has enough xl.json in disks,
returns 404 if not and added .CORRUPTED suffix to object.
2018-10-16 15:49:35 -07:00
Harshavardhana
b251454dd6 Fix toggling users status (#6640) 2018-10-16 14:55:23 -07:00
Harshavardhana
21c8693d9c Disable printing access/secrets in systemd (#6621)
Minio when run as a service in `systemd` should
avoid printing access/secret keys.
2018-10-16 13:19:12 -07:00
Harshavardhana
1e7e5e297c Add canned policy support (#6637)
This PR adds an additional API where we can create
a new set of canned policies which can be used with one
or many users.
2018-10-16 12:48:19 -07:00
kannappanr
c7f180ffa9 Add code to translate errInvalidEncryptionParameters to APIErrcode (#6625)
Fixes #6623
2018-10-16 12:27:34 -07:00
kannappanr
b8bd8d6a03 Validate user provided SSE-C key on Head Object API (#6600)
Fixes #6598
2018-10-16 12:24:27 -07:00
Andreas Auernhammer
baec331e84 crypto: add functions for sealing/unsealing the etag for SSE (#6618)
This commit adds two functions for sealing/unsealing the
etag (a.k.a. content MD5) in case of SSE single-part upload.

Sealing the ETag is neccessary in case of SSE-S3 to preserve
the security guarantees. In case of SSE-S3 AWS returns the
content-MD5 of the plaintext object as ETag. However, we
must not store the MD5 of the plaintext for encrypted objects.
Otherwise it becomes possible for an attacker to detect
equal/non-equal encrypted objects. Therefore we encrypt
the ETag before storing on the backend. But we only need
to encrypt the ETag (content-MD5) if the client send it -
otherwise the client cannot verify it anyway.
2018-10-16 10:02:19 -07:00
poornas
557f382477 cache: remove cache space constraint (#6635)
relax cache constraint of requiring 100 times size of object
being cached for better cache utilization.
2018-10-16 11:06:42 +05:30
Harshavardhana
23b166b318 Remove applying custom policies with STS access keys (#6626)
Move away from allowing custom policies, all policies in
STS come from OPA otherwise they fail.
2018-10-15 12:44:03 -07:00
Anis Elleuch
81a481e098 profiling: Fix downloading tracing profiling data (#6599)
pkg/pprof saves tracing profiling data in a different file name
(trace.out) in contrary to other profiling mode.
2018-10-15 11:13:19 -07:00
Anis Elleuch
5b3090dffc encryption: Fix copy from encrypted multipart to single part (#6604)
CopyObject handler forgot to remove multipart encryption flag in metadata
when source is an encrypted multipart object and the target is also encrypted
but single part object.

This PR also simplifies the code to facilitate review.
2018-10-15 11:07:36 -07:00
Harshavardhana
3ef3fefd54 Add ListUsers API to list all configured users in IAM (#6619) 2018-10-13 12:48:43 +05:30
Andreas Auernhammer
28e25eac78 crypto: add helper functions for unsealing object keys (#6609)
This commit adds 3 helper functions for SSE-C and SSE-S3
to simplify object key unsealing in server code.

See #6600
2018-10-12 18:06:38 -07:00
Harshavardhana
b0c9ae7490 Add audit logging for S3 and Web handlers (#6571)
This PR brings an additional logger implementation
called AuditLog which logs to http targets

The intention is to use AuditLog to log all incoming
requests, this is used as a mechanism by external log
collection entities for processing Minio requests.
2018-10-12 12:25:59 -07:00
Harshavardhana
143e7fe300 Add etcd support to support STS on gateway mode (#6531) 2018-10-12 11:32:18 -07:00
Andreas Auernhammer
f09e7ca764 fix travis CI build (#6620)
This commit fixes the Travis CI build by
correcting the golint import path
2018-10-11 14:58:44 -07:00
Mariska Hoogenboom
fae284d6b9 Docs fix for restart issue with orchestrated minio stack (#6606) (#6613) 2018-10-11 14:41:19 +05:30
poornas
83d8e01c81 fix: Close cacheReader if cache entry has expired (#6610)
prevent locking issues

Fixes #6602
2018-10-10 23:01:24 -07:00
poornas
110458cd10 Fix: Disallow requests with SSE-KMS headers (#6587)
Addresses issue #6582. Minio server currently does not
have SSE-KMS support. Reject requests with SSE-KMS headers
with NotImplementedErr
2018-10-09 15:04:53 -07:00
Aditya Manthramurthy
e3eec89d24 Optimize string processing in select (#6593)
Reduce allocations during string concatenation and simplify some
processing code.
2018-10-09 14:02:19 -07:00
Harshavardhana
54ae364def Introduce STS client grants API and OPA policy integration (#6168)
This PR introduces two new features

- AWS STS compatible STS API named AssumeRoleWithClientGrants

```
POST /?Action=AssumeRoleWithClientGrants&Token=<jwt>
```

This API endpoint returns temporary access credentials, access
tokens signature types supported by this API

  - RSA keys
  - ECDSA keys

Fetches the required public key from the JWKS endpoints, provides
them as rsa or ecdsa public keys.

- External policy engine support, in this case OPA policy engine

- Credentials are stored on disks
2018-10-09 14:00:01 -07:00
Aditya Manthramurthy
16a100b597 Fix out-of-bound array access crash in select processing (#6594)
Fix test case.
2018-10-09 09:45:56 -07:00
Anis Elleuch
cbc5d78a09 Handle read/quorum errors when initializing all subsystems (#6585)
- Only require len(disks)/2 to initialize the cluster
- Fix checking of read/write quorm in subsystems init
- Add retry mechanism in policy and notification to avoid aborting in case of read/write quorums errors
2018-10-08 15:47:13 -07:00
Minio Trusted
d8a2975a68 Update yaml files to latest version RELEASE.2018-10-06T00-15-16Z 2018-10-06 00:19:47 +00:00
Anis Elleuch
66d911653f xl: Fix typo in PutObjectPart when part size is 10Mb (#6574)
PutObjectPart forgot to allocate buffer memory when the size
of the uploaded part is exactly equal to blockSizeV1 = 10 Mb.
2018-10-05 17:09:50 -07:00
Anis Elleuch
ca6f795504 xl: check for correct err variable when prepareFile fails in PutObjectPart (#6573)
in xl.PutObjectPart call, prepareFile detected an error when the storage
is exhausted but we were returning the wrong error.

With this commit, users can see the correct error message when their disks
become full.
2018-10-05 17:09:20 -07:00
Eco
2af0f11731 Update readme.md (#6568) 2018-10-05 16:25:22 -07:00
Harshavardhana
c3408f4f04 Send correct bucket notifications from web handlers (#6572)
Upload, Download, DownloadZip were incomplete
2018-10-05 11:20:00 -07:00
Minio Trusted
b92c324254 Update yaml files to latest version RELEASE.2018-10-05T01-03-03Z 2018-10-05 01:08:39 +00:00
Krishna Srinivas
81bee93b8d Move remote disk StorageAPI abstraction from RPC to REST (#6464) 2018-10-04 17:44:06 -07:00
Ashish Kumar Sinha
670f9788e3 Count(*) to give integer value (#6564)
The Max, Min functions were giving float value even when they were integers.  
Resolved max and Min to return integers in that scenario.

Fixes #6472
2018-10-04 17:33:53 -07:00
Anis Elleuch
f187a16962 xl: Require full write quorum in rename() as general rule (#6535)
Simplify the logic of using rename() in xl. Currently, renaming
doesn't require the source object/dir to be existent in at least
read quorum disks, since there is no apparent reason for it
to be as a general rule, this commit will just simplify the
logic to avoid possible inconsistency in the backend in the future.
2018-10-04 17:22:49 -07:00
Anis Elleuch
e031f2b614 xl: Fix typo in PutObject when uploading a 10Mb file (#6567)
PutObject forgot to allocate memory when the size of the uploaded
file is exactly equal to blockSizeV1 = 10 Mb.
2018-10-04 15:37:15 -07:00
Harshavardhana
c2b7b82ef4 Verify bitrot in ReadFile correctly when verifier is set (#6563)
This is a major regression introduced in this commit

ce02ab613d is the first bad commit
commit ce02ab613d
Author: Krishna Srinivas <634494+krishnasrinivas@users.noreply.github.com>
Date:   Mon Aug 6 15:14:08 2018 -0700

    Simplify erasure code by separating bitrot from erasure code (#5959)

:040000 040000 794f58d82a d2201ebfc8 M	cmd

This effects all distributed server deployments since this commit

All the following releases are affected

- RELEASE.2018-09-25T21-34-43Z
- RELEASE.2018-09-12T18-49-56Z
- RELEASE.2018-09-11T01-39-21Z
- RELEASE.2018-09-01T00-38-25Z
- RELEASE.2018-08-25T01-56-38Z
- RELEASE.2018-08-21T00-37-20Z
- RELEASE.2018-08-18T03-49-57Z

Thanks to Anis for reproducing the issue
2018-10-04 10:16:38 -07:00
Harshavardhana
02ad9d6072 Avoid unsolicited response over idle http client (#6562)
Without this PR minio server is writing an erroneous
response to clients on an idle connections which ends
up printing following message

```
Unsolicited response received on idle HTTP channel
```

This PR would avoid sending responses on idle connections
.i.e routine network errors.
2018-10-04 10:13:12 -07:00
Anis Elleuch
ea9408ccbb worm: when enabled, avoid renaming the existing object in tmp directory (#6560)
In XL PutObject & CompleteMultipartUpload, the existing object is renamed
to the temporary directory before checking if worm is enabled or not.

Most of the times, this doesn't cause an issue unless two uploads to the
same location occurs at the same time. Since there is no locking in object
handlers, both uploads will reach XL layer. The second client acquiring
write lock in put object or complete upload in XL will rename the object
to the temporary directory before doing the check and returning the error (wrong!).

This commit fixes then the behavior: no rename to temporary directory if
worm is enabled.
2018-10-03 20:39:24 -07:00
Pontus Leitzler
307765591d Use GetObjectInfo instead of GetObjectNInfo before cache decision (#6553) 2018-10-03 11:02:32 -07:00
Harshavardhana
5d859b2178 gateway/azure: allow putObject to support block based upload (#6552)
Current implementation simply uses all the memory locally
and crashes when a large upload is initiated using Minio
browser UI.

This PR uploads stream in blocks and finally commits the blocks
allowing for low memory footprint while uploading large objects
through Minio browser UI.

This PR also adds ETag compatibility for single PUT operations.

Fixes #6542
Fixes #6550
2018-10-02 23:08:16 -07:00
Harshavardhana
223967fd32 Return always a default heal item upon unexpected error (#6556)
Never return an empty result item even upon error,
choose all the default values and based on the errors
make sure to send right result reply.
2018-10-02 17:13:51 -07:00
Pontus Leitzler
274b35154c Fix go1.11 vet shadow errors (#6555) 2018-10-02 15:21:34 -07:00
Harshavardhana
c05ced08bb Handle Range requests on empty objects (#6557)
Return a proper error on empty objects, S3 returns
416 Invalid Range on objects which are empty.

We should return the same.
2018-10-02 12:48:51 -07:00
Praveen raj Mani
c7722fbb1b Simplify pkg mimedb (#6549)
Content-Type resolution can now use a function `TypeByExtension(extension)` 
to resolve to the respective content-type.
2018-10-02 11:48:17 +05:30
Harshavardhana
f163bed40d Add Vault support for custom CAs directory (#6527) 2018-10-01 13:49:10 -07:00
Harshavardhana
b4772849f9 Heal recursively all entries in config/ prefix (#6545)
This to ensure that we heal all entries in config/
prefix, we will have IAM and STS related files which
are being introduced in #6168 PR

This is a change to ensure that we heal all of them
properly, not just `config.json`
2018-10-01 22:24:26 +05:30
Harshavardhana
839a758a36 Fix a crash due to race between Abort/CompleteMultipart (#6544)
Fixes #6429
2018-10-01 09:50:09 -07:00
Harshavardhana
aebfceeafb Heal backend configuration file (#6532)
Fixes #6461
2018-09-29 13:47:01 +05:30
Anis Elleuch
83d7ec09c1 Disable restarting server after setting a new config (#6521)
Also disable listening to service restart event in tests since
we don't do this anymore.
2018-09-28 12:10:51 -07:00
Harshavardhana
8c29f69b00 Fix racy error communication inside go-routine (#6539)
Use CloseWithError to communicate errors in pipe,
this PR also fixes potential shadowing of error
2018-09-28 13:14:59 +05:30
Praveen raj Mani
ce9d36d954 Add object compression support (#6292)
Add support for streaming (golang/LZ77/snappy) compression.
2018-09-28 09:06:17 +05:30
Anis Elleuch
5c765bc63e Fix one possible data race in admin tests (#6537)
go test shows the following warning:
```
WARNING: DATA RACE
Write at 0x000002909e18 by goroutine 276:
  github.com/minio/minio/cmd.testAdminCmdRunnerSignalService()
      /home/travis/gopath/src/github.com/minio/minio/cmd/admin-rpc_test.go:44 +0x94

 Previous read at 0x000002909e18 by goroutine 194:
  github.com/minio/minio/cmd.testServiceSignalReceiver()
      /home/travis/gopath/src/github.com/minio/minio/cmd/admin-handlers_test.go:467 +0x70
```

The reason for this data race is that some admin tests are not waiting for go routines
that they created to be properly exited, which triggers the race detector.
2018-09-27 17:16:30 -07:00
Anis Elleuch
6c7c6bec91 profiler: Download API returns error when all nodes fail (#6525)
When download profiling data API fails to gather profiling data
from all nodes for any reason (including profiler not enabled),
return 400 http code with the appropriate json message.
2018-09-27 10:34:37 -07:00
poornas
ed703c065d Add ObjectOptions to GetObjectNInfo (#6533) 2018-09-27 15:36:45 +05:30
Aditya Manthramurthy
387584356f Remove unused range parsing code and update tests (#6530) 2018-09-27 15:24:07 +05:30
Harshavardhana
1111419d4a Add debugging for mutex, tracing (#6522) 2018-09-27 09:32:05 +05:30
Anis Elleuch
20378821cf madmin: close http response when returning an error (#6526)
httpRespToErrorResponse() usually reads the http response when
the http error code is not expected to parse the json error
response in the http body, however it was never properly closing
the connection. This PR fixes the behavior.
2018-09-26 11:03:35 -07:00
Pontus Leitzler
ce1bfa6de8 Removed unused vendored dependencies (#6520) 2018-09-25 14:51:51 -07:00
Minio Trusted
6c26227081 Update yaml files to latest version RELEASE.2018-09-25T21-34-43Z 2018-09-25 21:39:03 +00:00
938 changed files with 94770 additions and 68906 deletions

3
.gitignore vendored
View File

@@ -22,4 +22,5 @@ parts/
prime/
stage/
.sia_temp/
config.json
config.json
healthcheck

View File

@@ -1,10 +1,4 @@
go_import_path: github.com/minio/minio
sudo: required
services:
- docker
dist: trusty
language: go
@@ -18,20 +12,39 @@ branches:
matrix:
include:
- os: linux
dist: trusty
sudo: required
env:
- ARCH=x86_64
go: 1.10.3
- CGO_ENABLED=0
go: 1.11.4
script:
- make
- diff -au <(gofmt -s -d cmd) <(printf "")
- diff -au <(gofmt -s -d pkg) <(printf "")
- make test GOFLAGS="-timeout 15m -race -v"
- for d in $(go list ./... | grep -v browser); do CGO_ENABLED=1 go test -v -race --timeout 15m "$d"; done
- make verifiers
- make crosscompile
- make verify
- make coverage
- cd browser && yarn && yarn test && cd ..
- os: windows
env:
- ARCH=x86_64
- CGO_ENABLED=0
go: 1.11.4
script:
- go build --ldflags="$(go run buildscripts/gen-ldflags.go)" -o %GOPATH%\bin\minio.exe
- for d in $(go list ./... | grep -v browser); do CGO_ENABLED=1 go test -v -race --timeout 20m "$d"; done
- bash buildscripts/go-coverage.sh
before_script:
# Add an IPv6 config - see the corresponding Travis issue
# https://github.com/travis-ci/travis-ci/issues/8361
- if [[ "${TRAVIS_OS_NAME}" == "linux" ]]; then sudo sh -c 'echo 0 > /proc/sys/net/ipv6/conf/all/disable_ipv6'; fi
before_install:
- nvm install stable
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then nvm install stable ; fi
after_success:
- bash <(curl -s https://codecov.io/bash)

View File

@@ -68,4 +68,4 @@ To remove a dependency
- Run `make pkg-remove PKG=foo/bar` from top-level directory
### What are the coding guidelines for Minio?
``Minio`` is fully conformant with Golang style. Refer: [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project. If you observe offending code, please feel free to send a pull request or ping us on [Slack](slack.minio.io).
``Minio`` is fully conformant with Golang style. Refer: [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project. If you observe offending code, please feel free to send a pull request or ping us on [Slack](https://slack.minio.io).

View File

@@ -1,34 +1,39 @@
FROM golang:1.10.1-alpine3.7
FROM golang:1.11.4-alpine3.7
LABEL maintainer="Minio Inc <dev@minio.io>"
ENV GOPATH /go
ENV PATH $PATH:$GOPATH/bin
ENV CGO_ENABLED 0
WORKDIR /go/src/github.com/minio/
RUN \
apk add --no-cache git && \
go get -v -d github.com/minio/minio && \
cd /go/src/github.com/minio/minio && \
go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)" && \
go build -ldflags "-s -w" -o /usr/bin/healthcheck dockerscripts/healthcheck.go
FROM alpine:3.7
ENV MINIO_UPDATE off
ENV MINIO_ACCESS_KEY_FILE=access_key \
MINIO_SECRET_KEY_FILE=secret_key
WORKDIR /go/src/github.com/minio/
EXPOSE 9000
COPY dockerscripts/docker-entrypoint.sh dockerscripts/healthcheck.sh /usr/bin/
COPY --from=0 /go/bin/minio /usr/bin/minio
COPY --from=0 /usr/bin/healthcheck /usr/bin/healthcheck
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
RUN \
apk add --no-cache ca-certificates 'curl>7.61.0' && \
apk add --no-cache --virtual .build-deps git && \
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
go get -v -d github.com/minio/minio && \
cd /go/src/github.com/minio/minio && \
go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)" && \
rm -rf /go/pkg /go/src /usr/local/go && apk del .build-deps
EXPOSE 9000
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
VOLUME ["/data"]
HEALTHCHECK --interval=30s --timeout=5s \
CMD /usr/bin/healthcheck.sh
HEALTHCHECK --interval=1m CMD healthcheck
CMD ["minio"]

View File

@@ -2,7 +2,7 @@ FROM alpine:3.7
LABEL maintainer="Minio Inc <dev@minio.io>"
COPY dockerscripts/docker-entrypoint.sh dockerscripts/healthcheck.sh /usr/bin/
COPY dockerscripts/docker-entrypoint.sh dockerscripts/healthcheck /usr/bin/
COPY minio /usr/bin/
ENV MINIO_UPDATE off
@@ -14,7 +14,7 @@ RUN \
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
chmod +x /usr/bin/minio && \
chmod +x /usr/bin/docker-entrypoint.sh && \
chmod +x /usr/bin/healthcheck.sh
chmod +x /usr/bin/healthcheck
EXPOSE 9000
@@ -22,7 +22,6 @@ ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
VOLUME ["/data"]
HEALTHCHECK --interval=30s --timeout=5s \
CMD /usr/bin/healthcheck.sh
HEALTHCHECK --interval=1m CMD healthcheck
CMD ["minio"]

View File

@@ -1,8 +1,22 @@
FROM golang:1.11.4-alpine3.7
ENV GOPATH /go
ENV CGO_ENABLED 0
WORKDIR /go/src/github.com/minio/
RUN \
apk add --no-cache git && \
go get -v -d github.com/minio/minio && \
cd /go/src/github.com/minio/minio/dockerscripts && \
go build -ldflags "-s -w" -o /usr/bin/healthcheck healthcheck.go
FROM alpine:3.7
LABEL maintainer="Minio Inc <dev@minio.io>"
COPY dockerscripts/docker-entrypoint.sh dockerscripts/healthcheck.sh /usr/bin/
COPY --from=0 /usr/bin/healthcheck /usr/bin/healthcheck
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
ENV MINIO_UPDATE off
ENV MINIO_ACCESS_KEY_FILE=access_key \
@@ -14,7 +28,7 @@ RUN \
curl https://dl.minio.io/server/minio/release/linux-amd64/minio > /usr/bin/minio && \
chmod +x /usr/bin/minio && \
chmod +x /usr/bin/docker-entrypoint.sh && \
chmod +x /usr/bin/healthcheck.sh
chmod +x /usr/bin/healthcheck
EXPOSE 9000
@@ -22,7 +36,6 @@ ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
VOLUME ["/data"]
HEALTHCHECK --interval=30s --timeout=5s \
CMD /usr/bin/healthcheck.sh
HEALTHCHECK --interval=1m CMD healthcheck
CMD ["minio"]

72
Dockerfile.simpleci Normal file
View File

@@ -0,0 +1,72 @@
#-------------------------------------------------------------
# Stage 1: Build and Unit tests
#-------------------------------------------------------------
FROM golang:1.11.4
COPY . /go/src/github.com/minio/minio
WORKDIR /go/src/github.com/minio/minio
RUN apt-get update && apt-get install -y jq
RUN make
RUN bash -c 'diff -au <(gofmt -s -d cmd) <(printf "")'
RUN bash -c 'diff -au <(gofmt -s -d pkg) <(printf "")'
RUN apt-get update && \
apt-get -y install sudo
RUN touch /etc/sudoers
RUN echo "root ALL=(ALL) ALL" >> /etc/sudoers
RUN echo "ci ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
RUN echo "Defaults env_reset" >> /etc/sudoers
RUN echo 'Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/go:/usr/local/go/bin"' >> /etc/sudoers
RUN mkdir -p /home/ci/.cache
RUN groupadd -g 999 ci && \
useradd -r -u 999 -g ci ci
RUN chown -R ci:ci /go
RUN chown -R ci:ci /home/ci
USER ci
RUN for d in $(go list ./... | grep -v browser); do go test -v -race --timeout 15m "$d"; done
RUN make verifiers
RUN make crosscompile
RUN make coverage
RUN make verify
#-------------------------------------------------------------
# Stage 2: Test Frontend
#-------------------------------------------------------------
FROM node:10.15-stretch-slim
COPY browser /minio/browser
WORKDIR /minio/browser
RUN yarn
RUN yarn test
#-------------------------------------------------------------
# Stage 3: Run Gateway Tests
#-------------------------------------------------------------
FROM ubuntu:16.04
COPY --from=0 /go/src/github.com/minio/minio/minio ./minio
COPY buildscripts/gateway-tests.sh ./gateway-tests.sh
RUN apt-get update && apt-get install -y git wget jq curl dnsmasq
RUN wget https://dl.google.com/go/go1.11.4.linux-amd64.tar.gz && \
tar -C /usr/local -xzf go1.11.4.linux-amd64.tar.gz
ENV DEBIAN_FRONTEND noninteractive
ENV LANG C.UTF-8
ENV GOROOT /usr/local/go
RUN mkdir -p /go
ENV GOPATH /go
ENV PATH $GOPATH/bin:$GOROOT/bin:$PATH
RUN ./gateway-tests.sh

View File

@@ -13,41 +13,33 @@ checks:
@(env bash $(PWD)/buildscripts/checkgopath.sh)
getdeps:
@echo "Installing golint" && go get -u github.com/golang/lint/golint
@echo "Installing gocyclo" && go get -u github.com/fzipp/gocyclo
@echo "Installing deadcode" && go get -u github.com/remyoudompheng/go-misc/deadcode
@echo "Installing golint" && go get -u golang.org/x/lint/golint
@echo "Installing staticcheck" && go get -u honnef.co/go/tools/...
@echo "Installing misspell" && go get -u github.com/client9/misspell/cmd/misspell
@echo "Installing ineffassign" && go get -u github.com/gordonklaus/ineffassign
verifiers: getdeps vet fmt lint cyclo deadcode spelling
crosscompile:
@(env bash $(PWD)/buildscripts/cross-compile.sh)
verifiers: getdeps vet fmt lint staticcheck spelling
vet:
@echo "Running $@"
@go tool vet -atomic -bool -copylocks -nilfunc -printf -shadow -rangeloops -unreachable -unsafeptr -unusedresult cmd
@go tool vet -atomic -bool -copylocks -nilfunc -printf -shadow -rangeloops -unreachable -unsafeptr -unusedresult pkg
@go vet github.com/minio/minio/...
fmt:
@echo "Running $@"
@gofmt -d cmd
@gofmt -d pkg
@gofmt -d cmd/
@gofmt -d pkg/
lint:
@echo "Running $@"
@${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/cmd...
@${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/pkg...
@${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/cmd/...
@${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/pkg/...
ineffassign:
staticcheck:
@echo "Running $@"
@${GOPATH}/bin/ineffassign .
cyclo:
@echo "Running $@"
@${GOPATH}/bin/gocyclo -over 200 cmd
@${GOPATH}/bin/gocyclo -over 200 pkg
deadcode:
@echo "Running $@"
@${GOPATH}/bin/deadcode -test $(shell go list ./...) || true
@${GOPATH}/bin/staticcheck github.com/minio/minio/cmd/...
@${GOPATH}/bin/staticcheck github.com/minio/minio/pkg/...
spelling:
@${GOPATH}/bin/misspell -locale US -error `find cmd/`
@@ -60,7 +52,7 @@ spelling:
check: test
test: verifiers build
@echo "Running unit tests"
@go test $(GOFLAGS) -tags kqueue ./...
@CGO_ENABLED=0 go test -tags kqueue ./...
verify: build
@echo "Verifying build"
@@ -73,7 +65,8 @@ coverage: build
# Builds minio locally.
build: checks
@echo "Building minio binary to './minio'"
@CGO_ENABLED=0 go build -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio
@GOFLAGS="" CGO_ENABLED=0 go build -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio
@GOFLAGS="" CGO_ENABLED=0 go build -tags kqueue --ldflags="-s -w" -o $(PWD)/dockerscripts/healthcheck $(PWD)/dockerscripts/healthcheck.go
docker: build
@docker build -t $(TAG) . -f Dockerfile.dev
@@ -102,6 +95,7 @@ install: build
clean:
@echo "Cleaning up all the generated files"
@find . -name '*.test' | xargs rm -fv
@find . -name '*~' | xargs rm -fv
@rm -rvf minio
@rm -rvf build
@rm -rvf release

View File

@@ -17,7 +17,7 @@ docker run -p 9000:9000 minio/minio server /data
docker pull minio/minio:edge
docker run -p 9000:9000 minio/minio:edge server /data
```
Please visit Minio Docker quickstart guide for more [here](https://docs.minio.io/docs/minio-docker-quickstart-guide)
Note: Docker will not display the autogenerated keys unless you start the container with the `-it`(interactive TTY) argument. Generally, it is not recommended to use autogenerated keys with containers. Please visit Minio Docker quickstart guide for more information [here](https://docs.minio.io/docs/minio-docker-quickstart-guide)
## macOS
### Homebrew
@@ -53,6 +53,15 @@ chmod +x minio
./minio server /data
```
| Platform| Architecture | URL|
| ----------| -------- | ------|
|GNU/Linux|ppc64le|https://dl.minio.io/server/minio/release/linux-ppc64le/minio |
```sh
wget https://dl.minio.io/server/minio/release/linux-ppc64le/minio
chmod +x minio
./minio server /data
```
## Microsoft Windows
### Binary Download
| Platform| Architecture | URL|

View File

@@ -1,55 +0,0 @@
# version format
version: "{build}"
# Operating system (build VM template)
os: Windows Server 2012 R2
# Platform.
platform: x64
clone_folder: c:\gopath\src\github.com\minio\minio
# Environment variables
environment:
GOPATH: c:\gopath
GOROOT: c:\go110
# scripts that run after cloning repository
install:
- set PATH=%GOPATH%\bin;%GOROOT%\bin;%PATH%
- go version
- go env
- python --version
# To run your custom scripts instead of automatic MSBuild
build_script:
# Compile
# We need to disable firewall - https://github.com/appveyor/ci/issues/1579#issuecomment-309830648
- ps: Disable-NetFirewallRule -DisplayName 'File and Printer Sharing (SMB-Out)'
- appveyor AddCompilationMessage "Starting Compile"
- cd c:\gopath\src\github.com\minio\minio
- go run buildscripts/gen-ldflags.go > temp.txt
- set /p BUILD_LDFLAGS=<temp.txt
- go build -ldflags="%BUILD_LDFLAGS%" -o %GOPATH%\bin\minio.exe
- appveyor AddCompilationMessage "Compile Success"
# To run your custom scripts instead of automatic tests
test_script:
# Unit tests
- ps: Add-AppveyorTest "Unit Tests" -Outcome Running
- mkdir build\coverage
- for /f "" %%G in ('go list github.com/minio/minio/... ^| find /i /v "browser/"') do ( go test -v -timeout 20m -race %%G )
- go test -v -timeout 20m -coverprofile=build\coverage\coverage.txt -covermode=atomic github.com/minio/minio/cmd
- ps: Update-AppveyorTest "Unit Tests" -Outcome Passed
after_test:
- go tool cover -html=build\coverage\coverage.txt -o build\coverage\coverage.html
- ps: Push-AppveyorArtifact build\coverage\coverage.txt
- ps: Push-AppveyorArtifact build\coverage\coverage.html
# Upload coverage report.
- "SET PATH=C:\\Python34;C:\\Python34\\Scripts;%PATH%"
- pip install codecov
- codecov -X gcov -f "build\coverage\coverage.txt"
# to disable deployment
deploy: off

View File

@@ -5,13 +5,13 @@
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Minio Browser</title>
<link rel="stylesheet" href="loader.css" type="text/css">
<link rel="stylesheet" href="/minio/loader.css" type="text/css">
</head>
<body>
<div class="page-load">
<div class="pl-inner">
<img src="logo.svg" alt="">
<img src="/minio/logo.svg" alt="">
</div>
</div>
<div id="root"></div>
@@ -51,6 +51,6 @@
<![endif]-->
<script>currentUiVersion = 'MINIO_UI_VERSION'</script>
<script src="index_bundle.js"></script>
<script src="/minio/index_bundle.js"></script>
</body>
</html>

View File

@@ -67,7 +67,7 @@ export class ObjectActions extends React.Component {
className="fiad-action"
onClick={this.shareObject.bind(this)}
>
<i className="fa fa-copy" />
<i className="fa fa-share-alt" />
</a>
<a
href=""

View File

@@ -26,6 +26,8 @@ jest.mock("../../web", () => ({
.fn(() => true)
.mockReturnValueOnce(true)
.mockReturnValueOnce(false)
.mockReturnValueOnce(true)
.mockReturnValueOnce(true)
.mockReturnValueOnce(false),
ListObjects: jest.fn(({ bucketName }) => {
if (bucketName === "test-deny") {
@@ -405,7 +407,7 @@ describe("Objects actions", () => {
store.dispatch(actionsObjects.downloadObject("obj1"))
const url = `${
window.location.origin
}${minioBrowserPrefix}/download/bk1/${encodeURI("pre1/obj1")}?token=''`
}${minioBrowserPrefix}/download/bk1/${encodeURI("pre1/obj1")}?token=`
expect(setLocation).toHaveBeenCalledWith(url)
})

View File

@@ -16,11 +16,7 @@
import web from "../web"
import history from "../history"
import {
sortObjectsByName,
sortObjectsBySize,
sortObjectsByDate
} from "../utils"
import { sortObjectsByName, sortObjectsBySize, sortObjectsByDate } from "../utils"
import { getCurrentBucket } from "../buckets/selectors"
import { getCurrentPrefix, getCheckedList } from "./selectors"
import * as alertActions from "../alert/actions"
@@ -60,10 +56,7 @@ export const appendList = (objects, marker, isTruncated) => ({
export const fetchObjects = append => {
return function(dispatch, getState) {
const {
buckets: { currentBucket },
objects: { currentPrefix, marker }
} = getState()
const {buckets: {currentBucket}, objects: {currentPrefix, marker}} = getState()
if (currentBucket) {
return web
.ListObjects({
@@ -110,7 +103,7 @@ export const fetchObjects = append => {
export const sortObjects = sortBy => {
return function(dispatch, getState) {
const { objects } = getState()
const {objects} = getState()
const sortOrder = objects.sortBy == sortBy ? !objects.sortOrder : true
dispatch(setSortBy(sortBy))
dispatch(setSortOrder(sortOrder))
@@ -210,30 +203,39 @@ export const shareObject = (object, days, hours, minutes) => {
const currentPrefix = getCurrentPrefix(getState())
const objectName = `${currentPrefix}${object}`
const expiry = days * 24 * 60 * 60 + hours * 60 * 60 + minutes * 60
return web
.PresignedGet({
host: location.host,
bucket: currentBucket,
object: objectName,
expiry
})
.then(obj => {
dispatch(showShareObject(object, obj.url))
dispatch(
alertActions.set({
type: "success",
message: `Object shared. Expires in ${days} days ${hours} hours ${minutes} minutes`
})
)
})
.catch(err => {
dispatch(
alertActions.set({
type: "danger",
message: err.message
})
)
})
if (web.LoggedIn()) {
return web
.PresignedGet({
host: location.host,
bucket: currentBucket,
object: objectName
})
.then(obj => {
dispatch(showShareObject(object, obj.url))
dispatch(
alertActions.set({
type: "success",
message: `Object shared. Expires in ${days} days ${hours} hours ${minutes} minutes`
})
)
})
.catch(err => {
dispatch(
alertActions.set({
type: "danger",
message: err.message
})
)
})
} else {
dispatch(showShareObject(object, `${location.host}` + '/' + `${currentBucket}` + '/' + encodeURI(objectName)))
dispatch(
alertActions.set({
type: "success",
message: `Object shared.`
})
)
}
}
}
@@ -279,7 +281,7 @@ export const downloadObject = object => {
} else {
const url = `${
window.location.origin
}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=''`
}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=`
window.location = url
}
}
@@ -308,7 +310,7 @@ export const downloadCheckedObjects = () => {
objects: getCheckedList(state)
}
if (!web.LoggedIn()) {
const requestUrl = location.origin + "/minio/zip?token=''"
const requestUrl = location.origin + "/minio/zip?token="
downloadZip(requestUrl, req, dispatch)
} else {
return web
@@ -319,14 +321,13 @@ export const downloadCheckedObjects = () => {
}${minioBrowserPrefix}/zip?token=${res.token}`
downloadZip(requestUrl, req, dispatch)
})
.catch(err =>
dispatch(
alertActions.set({
type: "danger",
message: err.message
})
)
.catch(err => dispatch(
alertActions.set({
type: "danger",
message: err.message
})
)
)
}
}
}
@@ -349,8 +350,7 @@ const downloadZip = (url, req, dispatch) => {
var separator = req.prefix.length > 1 ? "-" : ""
anchor.href = blobUrl
anchor.download =
req.bucketName + separator + req.prefix.slice(0, -1) + ".zip"
anchor.download = req.bucketName + separator + req.prefix.slice(0, -1) + ".zip"
anchor.click()
window.URL.revokeObjectURL(blobUrl)

View File

@@ -13,8 +13,7 @@
"setupTestFrameworkScriptFile": "./app/js/jest/setup.js",
"testURL": "https://localhost:8080",
"moduleNameMapper": {
"\\.(jpg|jpeg|png|gif|eot|otf|webp|svg|ttf|woff|woff2|mp4|webm|wav|mp3|m4a|aac|oga)$":
"<rootDir>/app/js/jest/__mocks__/fileMock.js",
"\\.(jpg|jpeg|png|gif|eot|otf|webp|svg|ttf|woff|woff2|mp4|webm|wav|mp3|m4a|aac|oga)$": "<rootDir>/app/js/jest/__mocks__/fileMock.js",
"\\.(css|scss)$": "identity-obj-proxy"
}
},
@@ -40,29 +39,31 @@
"babel-preset-es2015": "^6.14.0",
"babel-preset-react": "^6.11.1",
"babel-register": "^6.14.0",
"copy-webpack-plugin": "^0.3.3",
"copy-webpack-plugin": "^4.6.0",
"css-loader": "^0.23.1",
"enzyme": "^3.3.0",
"enzyme-adapter-react-16": "^1.1.1",
"esformatter": "^0.10.0",
"esformatter-jsx": "^7.4.1",
"esformatter-jsx-ignore": "^1.0.6",
"html-webpack-plugin": "^2.30.1",
"html-webpack-plugin": "^3.2.0",
"jest": "^22.1.4",
"jest-enzyme": "^4.0.2",
"json-loader": "^0.5.4",
"less": "^2.7.1",
"less-loader": "^2.2.3",
"purifycss-webpack-plugin": "^2.0.3",
"less": "^3.9.0",
"less-loader": "^4.1.0",
"purgecss-webpack-plugin": "^1.4.0",
"style-loader": "^0.13.1",
"url-loader": "^0.5.7",
"webpack-dev-server": "^2.11.1"
"webpack-cli": "^3.2.0",
"webpack-dev-server": "^3.1.14"
},
"dependencies": {
"bootstrap": "^3.3.6",
"classnames": "^2.2.3",
"expect": "^1.20.2",
"font-awesome": "^4.7.0",
"glob-all": "^3.1.0",
"history": "^4.7.2",
"humanize": "0.0.9",
"identity-obj-proxy": "^3.0.0",
@@ -89,6 +90,6 @@
"reselect": "^3.0.1",
"superagent": "^3.8.2",
"superagent-es6-promise": "^1.0.0",
"webpack": "^3.10.0"
"webpack": "^4.28.3"
}
}

File diff suppressed because one or more lines are too long

View File

@@ -16,11 +16,13 @@
var webpack = require('webpack')
var path = require('path')
var glob = require('glob-all')
var CopyWebpackPlugin = require('copy-webpack-plugin')
var purify = require("purifycss-webpack-plugin")
var PurgecssPlugin = require('purgecss-webpack-plugin')
var exports = {
context: __dirname,
mode: 'development',
entry: [
path.resolve(__dirname, 'app/index.js')
],
@@ -99,12 +101,11 @@ var exports = {
{from: 'app/index.html'}
]),
new webpack.ContextReplacementPlugin(/moment[\\\/]locale$/, /^\.\/(en)$/),
new purify({
basePath: __dirname,
paths: [
"app/index.html",
"app/js/*.js"
]
new PurgecssPlugin({
paths: glob.sync([
path.join(__dirname, 'app/index.html'),
path.join(__dirname, 'app/js/*.js')
])
})
]
}

View File

@@ -16,11 +16,13 @@
var webpack = require('webpack')
var path = require('path')
var glob = require('glob-all')
var CopyWebpackPlugin = require('copy-webpack-plugin')
var purify = require("purifycss-webpack-plugin")
var PurgecssPlugin = require('purgecss-webpack-plugin')
var exports = {
context: __dirname,
mode: 'production',
entry: [
path.resolve(__dirname, 'app/index.js')
],
@@ -74,16 +76,12 @@ var exports = {
{from: 'app/img/logo.svg'},
{from: 'app/index.html'}
]),
new webpack.DefinePlugin({
'process.env.NODE_ENV': '"production"'
}),
new webpack.ContextReplacementPlugin(/moment[\\\/]locale$/, /^\.\/(en)$/),
new purify({
basePath: __dirname,
paths: [
"app/index.html",
"app/js/*.js"
]
new PurgecssPlugin({
paths: glob.sync([
path.join(__dirname, 'app/index.html'),
path.join(__dirname, 'app/js/*.js')
])
})
]
}

File diff suppressed because it is too large Load Diff

35
buildscripts/cross-compile.sh Executable file
View File

@@ -0,0 +1,35 @@
#!/bin/bash
# Enable tracing if set.
[ -n "$BASH_XTRACEFD" ] && set -ex
function _init() {
## All binaries are static make sure to disable CGO.
export CGO_ENABLED=0
## List of architectures and OS to test coss compilation.
SUPPORTED_OSARCH="linux/ppc64le linux/arm64 linux/s390x darwin/amd64 freebsd/amd64"
}
function _build_and_sign() {
local osarch=$1
IFS=/ read -r -a arr <<<"$osarch"
os="${arr[0]}"
arch="${arr[1]}"
package=$(go list -f '{{.ImportPath}}')
printf -- "--> %15s:%s\n" "${osarch}" "${package}"
# Go build to build the binary.
export GOOS=$os
export GOARCH=$arch
go build -tags kqueue -o /dev/null
}
function main() {
echo "Testing builds for OS/Arch: ${SUPPORTED_OSARCH}"
for each_osarch in ${SUPPORTED_OSARCH}; do
_build_and_sign "${each_osarch}"
done
}
_init && main "$@"

61
buildscripts/gateway-tests.sh Executable file
View File

@@ -0,0 +1,61 @@
#!/bin/bash
#
# Minio Cloud Storage, (C) 2019 Minio, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -e
set -E
set -o pipefail
export SERVER_ENDPOINT=127.0.0.1:24240
export ENABLE_HTTPS=0
export ACCESS_KEY=minio
export SECRET_KEY=minio123
export MINIO_ACCESS_KEY=minio
export MINIO_SECRET_KEY=minio123
export AWS_ACCESS_KEY_ID=minio
export AWS_SECRET_ACCESS_KEY=minio123
trap "cat server.log;cat gateway.log" SIGHUP SIGINT SIGTERM
./minio --quiet --json server data --address 127.0.0.1:24242 > server.log &
sleep 3
./minio --quiet --json gateway s3 http://127.0.0.1:24242 --address 127.0.0.1:24240 > gateway.log &
sleep 3
mkdir -p /mint
git clone https://github.com/minio/mint /mint
cd /mint
export MINT_ROOT_DIR=${MINT_ROOT_DIR:-/mint}
export MINT_RUN_CORE_DIR="$MINT_ROOT_DIR/run/core"
export MINT_RUN_SECURITY_DIR="$MINT_ROOT_DIR/run/security"
export MINT_MODE="full"
export WGET="wget --quiet --no-check-certificate"
go get github.com/go-ini/ini
./create-data-files.sh
./preinstall.sh
# install mint app packages
for pkg in "build"/*/install.sh; do
echo "Running $pkg"
$pkg
done
./postinstall.sh
/mint/entrypoint.sh || cat server.log gateway.log fail

View File

@@ -4,7 +4,7 @@ set -e
echo "" > coverage.txt
for d in $(go list ./... | grep -v browser); do
go test -coverprofile=profile.out -covermode=atomic "$d"
CGO_ENABLED=0 go test -v -coverprofile=profile.out -covermode=atomic "$d"
if [ -f profile.out ]; then
cat profile.out >> coverage.txt
rm profile.out

View File

@@ -68,6 +68,36 @@ function start_minio_erasure_sets()
echo "$minio_pid"
}
function start_minio_dist_erasure_sets_ipv6()
{
declare -a minio_pids
export MINIO_ACCESS_KEY=$ACCESS_KEY
export MINIO_SECRET_KEY=$SECRET_KEY
"${MINIO[@]}" server --address="[::1]:9000" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9000.log" 2>&1 &
minio_pids[0]=$!
"${MINIO[@]}" server --address="[::1]:9001" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9001.log" 2>&1 &
minio_pids[1]=$!
"${MINIO[@]}" server --address="[::1]:9002" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9002.log" 2>&1 &
minio_pids[2]=$!
"${MINIO[@]}" server --address="[::1]:9003" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9003.log" 2>&1 &
minio_pids[3]=$!
"${MINIO[@]}" server --address="[::1]:9004" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9004.log" 2>&1 &
minio_pids[4]=$!
"${MINIO[@]}" server --address="[::1]:9005" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9005.log" 2>&1 &
minio_pids[5]=$!
"${MINIO[@]}" server --address="[::1]:9006" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9006.log" 2>&1 &
minio_pids[6]=$!
"${MINIO[@]}" server --address="[::1]:9007" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9007.log" 2>&1 &
minio_pids[7]=$!
"${MINIO[@]}" server --address="[::1]:9008" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9008.log" 2>&1 &
minio_pids[8]=$!
"${MINIO[@]}" server --address="[::1]:9009" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9009.log" 2>&1 &
minio_pids[9]=$!
sleep 35
echo "${minio_pids[@]}"
}
function start_minio_dist_erasure_sets()
{
declare -a minio_pids
@@ -161,6 +191,34 @@ function run_test_erasure_sets() {
return "$rv"
}
function run_test_dist_erasure_sets_ipv6()
{
minio_pids=( $(start_minio_dist_erasure_sets_ipv6) )
export SERVER_ENDPOINT="[::1]:9000"
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
for pid in "${minio_pids[@]}"; do
kill "$pid"
done
sleep 3
if [ "$rv" -ne 0 ]; then
for i in $(seq 0 9); do
echo "server$i log:"
cat "$WORK_DIR/dist-minio-v6-900$i.log"
done
fi
for i in $(seq 0 9); do
rm -f "$WORK_DIR/dist-minio-v6-900$i.log"
done
return "$rv"
}
function run_test_dist_erasure_sets()
{
minio_pids=( $(start_minio_dist_erasure_sets) )
@@ -237,6 +295,7 @@ function run_test_gateway_s3()
{
minio_pid="$(start_minio_gateway_s3)"
export SERVER_ENDPOINT="127.0.0.1:9000"
export ACCESS_KEY=Q3AM3UQ867SPQQA43P2F
export SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
@@ -279,6 +338,7 @@ function __init__()
exit 1
fi
sed -i 's|-sS|-sSg|g' "$FUNCTIONAL_TESTS"
chmod a+x "$FUNCTIONAL_TESTS"
}
@@ -319,6 +379,13 @@ function main()
exit 1
fi
echo "Testing in Distributed Erasure setup as sets with ipv6"
if ! run_test_dist_erasure_sets_ipv6; then
echo "FAILED"
rm -fr "$WORK_DIR"
exit 1
fi
echo "Testing in Gateway S3 setup"
if ! run_test_gateway_s3; then
echo "FAILED"

View File

@@ -21,6 +21,7 @@ import (
"net/http"
"github.com/gorilla/mux"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/policy"
)
@@ -56,26 +57,28 @@ type accessControlPolicy struct {
func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketACL")
defer logger.AuditLog(w, r, "GetBucketACL", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
// Allow getBucketACL if policy action is set, since this is a dummy call
// we are simply re-purposing the bucketPolicyAction.
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Before proceeding validate if bucket exists.
_, err := objAPI.GetBucketInfo(ctx, bucket)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -89,7 +92,7 @@ func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.
Permission: "FULL_CONTROL",
})
if err := xml.NewEncoder(w).Encode(acl); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -103,27 +106,29 @@ func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.
func (api objectAPIHandlers) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetObjectACL")
defer logger.AuditLog(w, r, "GetObjectACL", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
object := vars["object"]
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
// Allow getObjectACL if policy action is set, since this is a dummy call
// we are simply re-purposing the bucketPolicyAction.
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Before proceeding validate if object exists.
_, err := objAPI.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -137,7 +142,7 @@ func (api objectAPIHandlers) GetObjectACLHandler(w http.ResponseWriter, r *http.
Permission: "FULL_CONTROL",
})
if err := xml.NewEncoder(w).Encode(acl); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}

File diff suppressed because it is too large Load Diff

View File

@@ -27,8 +27,8 @@ import (
"net/http/httptest"
"net/url"
"strings"
"sync"
"testing"
"time"
"github.com/gorilla/mux"
"github.com/minio/minio/pkg/auth"
@@ -37,177 +37,203 @@ import (
var (
configJSON = []byte(`{
"version": "29",
"credential": {
"accessKey": "minio",
"secretKey": "minio123"
"version": "33",
"credential": {
"accessKey": "minio",
"secretKey": "minio123"
},
"region": "us-east-1",
"worm": "off",
"storageclass": {
"standard": "",
"rrs": ""
},
"cache": {
"drives": [],
"expiry": 90,
"maxuse": 80,
"exclude": []
},
"kms": {
"vault": {
"endpoint": "",
"auth": {
"type": "",
"approle": {
"id": "",
"secret": ""
}
},
"key-id": {
"name": "",
"version": 0
}
}
},
"notify": {
"amqp": {
"1": {
"enable": false,
"url": "",
"exchange": "",
"routingKey": "",
"exchangeType": "",
"deliveryMode": 0,
"mandatory": false,
"immediate": false,
"durable": false,
"internal": false,
"noWait": false,
"autoDeleted": false
}
},
"elasticsearch": {
"1": {
"enable": false,
"format": "namespace",
"url": "",
"index": ""
}
},
"kafka": {
"1": {
"enable": false,
"brokers": null,
"topic": "",
"tls": {
"enable": false,
"skipVerify": false,
"clientAuth": 0
},
"sasl": {
"enable": false,
"username": "",
"password": ""
}
}
},
"mqtt": {
"1": {
"enable": false,
"broker": "",
"topic": "",
"qos": 0,
"username": "",
"password": "",
"reconnectInterval": 0,
"keepAliveInterval": 0,
"queueDir": "",
"queueLimit": 0
}
},
"mysql": {
"1": {
"enable": false,
"format": "namespace",
"dsnString": "",
"table": "",
"host": "",
"port": "",
"user": "",
"password": "",
"database": ""
}
},
"nats": {
"1": {
"enable": false,
"address": "",
"subject": "",
"username": "",
"password": "",
"token": "",
"secure": false,
"pingInterval": 0,
"streaming": {
"enable": false,
"clusterID": "",
"async": false,
"maxPubAcksInflight": 0
}
}
},
"region": "",
"worm": "off",
"storageclass": {
"standard": "",
"rrs": ""
},
"cache": {
"drives": [],
"expiry": 90,
"maxuse": 80,
"exclude": []
},
"kms": {
"vault": {
"endpoint": "",
"auth": {
"type": "",
"approle": {
"id": "",
"secret": ""
}
},
"key-id": {
"name": "",
"version": 0
}
"nsq": {
"1": {
"enable": false,
"nsqdAddress": "",
"topic": "",
"tls": {
"enable": false,
"skipVerify": false
}
},
"notify": {
"amqp": {
"1": {
"enable": false,
"url": "",
"exchange": "",
"routingKey": "",
"exchangeType": "",
"deliveryMode": 0,
"mandatory": false,
"immediate": false,
"durable": false,
"internal": false,
"noWait": false,
"autoDeleted": false
}
},
"elasticsearch": {
"1": {
"enable": false,
"format": "",
"url": "",
"index": ""
}
},
"kafka": {
"1": {
"enable": false,
"brokers": null,
"topic": "",
"tls" : {
"enable" : false,
"skipVerify" : false,
"clientAuth" : 0
},
"sasl" : {
"enable" : false,
"username" : "",
"password" : ""
}
}
},
"mqtt": {
"1": {
"enable": false,
"broker": "",
"topic": "",
"qos": 0,
"clientId": "",
"username": "",
"password": "",
"reconnectInterval": 0,
"keepAliveInterval": 0
}
},
"mysql": {
"1": {
"enable": false,
"format": "",
"dsnString": "",
"table": "",
"host": "",
"port": "",
"user": "",
"password": "",
"database": ""
}
},
"nats": {
"1": {
"enable": false,
"address": "",
"subject": "",
"username": "",
"password": "",
"token": "",
"secure": false,
"pingInterval": 0,
"streaming": {
"enable": false,
"clusterID": "",
"clientID": "",
"async": false,
"maxPubAcksInflight": 0
}
}
},
"postgresql": {
"1": {
"enable": false,
"format": "",
"connectionString": "",
"table": "",
"host": "",
"port": "",
"user": "",
"password": "",
"database": ""
}
},
"redis": {
"1": {
"enable": false,
"format": "",
"address": "",
"password": "",
"key": ""
}
},
"webhook": {
"1": {
"enable": false,
"endpoint": ""
}
}
},
"logger": {
"console": {
"enabled": true
},
"http": {
"1": {
"enabled": false,
"endpoint": "http://user:example@localhost:9001/api/endpoint"
}
}
}
}`)
}
},
"postgresql": {
"1": {
"enable": false,
"format": "namespace",
"connectionString": "",
"table": "",
"host": "",
"port": "",
"user": "",
"password": "",
"database": ""
}
},
"redis": {
"1": {
"enable": false,
"format": "namespace",
"address": "",
"password": "",
"key": ""
}
},
"webhook": {
"1": {
"enable": false,
"endpoint": ""
}
}
},
"logger": {
"console": {
"enabled": true
},
"http": {
"1": {
"enabled": false,
"endpoint": "https://username:password@example.com/api"
}
}
},
"compress": {
"enabled": false,
"extensions":[".txt",".log",".csv",".json"],
"mime-types":["text/csv","text/plain","application/json"]
},
"openid": {
"jwks": {
"url": ""
}
},
"policy": {
"opa": {
"url": "",
"authToken": ""
}
}
}
`)
)
// adminXLTestBed - encapsulates subsystems that need to be setup for
// admin-handler unit tests.
type adminXLTestBed struct {
configPath string
xlDirs []string
objLayer ObjectLayer
router *mux.Router
xlDirs []string
objLayer ObjectLayer
router *mux.Router
}
// prepareAdminXLTestBed - helper function that setups a single-node
@@ -243,14 +269,20 @@ func prepareAdminXLTestBed() (*adminXLTestBed, error) {
// Init global heal state
initAllHealState(globalIsXL)
globalNotificationSys = NewNotificationSys(globalServerConfig, globalEndpoints)
globalConfigSys = NewConfigSys()
globalIAMSys = NewIAMSys()
globalIAMSys.Init(objLayer)
// Create new policy system.
globalPolicySys = NewPolicySys()
globalPolicySys.Init(objLayer)
globalNotificationSys = NewNotificationSys(globalServerConfig, globalEndpoints)
globalNotificationSys.Init(objLayer)
// Setup admin mgmt REST API handlers.
adminRouter := mux.NewRouter()
registerAdminRouter(adminRouter)
registerAdminRouter(adminRouter, true, true)
return &adminXLTestBed{
xlDirs: xlDirs,
@@ -281,8 +313,8 @@ func (atb *adminXLTestBed) GenerateHealTestData(t *testing.T) {
for i := 0; i < 10; i++ {
objectName := fmt.Sprintf("%s-%d", objName, i)
_, err = atb.objLayer.PutObject(context.Background(), bucketName, objectName,
mustGetHashReader(t, bytes.NewReader([]byte("hello")),
int64(len("hello")), "", ""), nil, ObjectOptions{})
mustGetPutObjReader(t, bytes.NewReader([]byte("hello")),
int64(len("hello")), "", ""), ObjectOptions{})
if err != nil {
t.Fatalf("Failed to create %s - %v", objectName,
err)
@@ -294,13 +326,13 @@ func (atb *adminXLTestBed) GenerateHealTestData(t *testing.T) {
{
objName := "mpObject"
uploadID, err := atb.objLayer.NewMultipartUpload(context.Background(), bucketName,
objName, nil, ObjectOptions{})
objName, ObjectOptions{})
if err != nil {
t.Fatalf("mp new error: %v", err)
}
_, err = atb.objLayer.PutObjectPart(context.Background(), bucketName, objName,
uploadID, 3, mustGetHashReader(t, bytes.NewReader(
uploadID, 3, mustGetPutObjReader(t, bytes.NewReader(
[]byte("hello")), int64(len("hello")), "", ""), ObjectOptions{})
if err != nil {
t.Fatalf("mp put error: %v", err)
@@ -480,6 +512,8 @@ func getServiceCmdRequest(cmd cmdType, cred auth.Credentials, body []byte) (*htt
// Set body
req.Body = ioutil.NopCloser(bytes.NewReader(body))
req.ContentLength = int64(len(body))
// Set sha-sum header
req.Header.Set("X-Amz-Content-Sha256", getSHA256Hash(body))
@@ -504,17 +538,22 @@ func testServicesCmdHandler(cmd cmdType, t *testing.T) {
// single node setup, this degenerates to a simple function
// call under the hood.
globalMinioAddr = "127.0.0.1:9000"
initGlobalAdminPeers(mustGetNewEndpointList("http://127.0.0.1:9000/d1"))
var wg sync.WaitGroup
// Setting up a go routine to simulate ServerRouter's
// handleServiceSignals for stop and restart commands.
if cmd == restartCmd {
go testServiceSignalReceiver(cmd, t)
wg.Add(1)
go func() {
defer wg.Done()
testServiceSignalReceiver(cmd, t)
}()
}
credentials := globalServerConfig.GetCredential()
body, err := json.Marshal(madmin.ServiceAction{
cmd.toServiceActionValue()})
Action: cmd.toServiceActionValue()})
if err != nil {
t.Fatalf("JSONify error: %v", err)
}
@@ -545,6 +584,9 @@ func testServicesCmdHandler(cmd cmdType, t *testing.T) {
t.Errorf("Expected to receive %d status code but received %d. Body (%s)",
http.StatusOK, rec.Code, string(resp))
}
// Wait until testServiceSignalReceiver() called in a goroutine quits.
wg.Wait()
}
// Test for service status management REST API.
@@ -569,7 +611,6 @@ func TestServiceSetCreds(t *testing.T) {
// single node setup, this degenerates to a simple function
// call under the hood.
globalMinioAddr = "127.0.0.1:9000"
initGlobalAdminPeers(mustGetNewEndpointList("http://127.0.0.1:9000/d1"))
credentials := globalServerConfig.GetCredential()
@@ -601,7 +642,7 @@ func TestServiceSetCreds(t *testing.T) {
t.Fatalf("JSONify err: %v", err)
}
ebody, err := madmin.EncryptServerConfigData(credentials.SecretKey, body)
ebody, err := madmin.EncryptData(credentials.SecretKey, body)
if err != nil {
t.Fatal(err)
}
@@ -668,7 +709,6 @@ func TestGetConfigHandler(t *testing.T) {
// Initialize admin peers to make admin RPC calls.
globalMinioAddr = "127.0.0.1:9000"
initGlobalAdminPeers(mustGetNewEndpointList("http://127.0.0.1:9000/d1"))
// Prepare query params for get-config mgmt REST API.
queryVal := url.Values{}
@@ -697,18 +737,13 @@ func TestSetConfigHandler(t *testing.T) {
// Initialize admin peers to make admin RPC calls.
globalMinioAddr = "127.0.0.1:9000"
initGlobalAdminPeers(mustGetNewEndpointList("http://127.0.0.1:9000/d1"))
// SetConfigHandler restarts minio setup - need to start a
// signal receiver to receive on globalServiceSignalCh.
go testServiceSignalReceiver(restartCmd, t)
// Prepare query params for set-config mgmt REST API.
queryVal := url.Values{}
queryVal.Set("config", "")
password := globalServerConfig.GetCredential().SecretKey
econfigJSON, err := madmin.EncryptServerConfigData(password, configJSON)
econfigJSON, err := madmin.EncryptData(password, configJSON)
if err != nil {
t.Fatal(err)
}
@@ -722,13 +757,13 @@ func TestSetConfigHandler(t *testing.T) {
rec := httptest.NewRecorder()
adminTestBed.router.ServeHTTP(rec, req)
if rec.Code != http.StatusOK {
t.Errorf("Expected to succeed but failed with %d", rec.Code)
t.Errorf("Expected to succeed but failed with %d, body: %s", rec.Code, rec.Body)
}
// Check that a very large config file returns an error.
{
// Make a large enough config string
invalidCfg := []byte(strings.Repeat("A", maxConfigJSONSize+1))
invalidCfg := []byte(strings.Repeat("A", maxEConfigJSONSize+1))
req, err := buildAdminRequest(queryVal, http.MethodPut, "/config",
int64(len(invalidCfg)), bytes.NewReader(invalidCfg))
if err != nil {
@@ -737,7 +772,7 @@ func TestSetConfigHandler(t *testing.T) {
rec := httptest.NewRecorder()
adminTestBed.router.ServeHTTP(rec, req)
respBody := string(rec.Body.Bytes())
respBody := rec.Body.String()
if rec.Code != http.StatusBadRequest ||
!strings.Contains(respBody, "Configuration data provided exceeds the allowed maximum of") {
t.Errorf("Got unexpected response code or body %d - %s", rec.Code, respBody)
@@ -756,9 +791,9 @@ func TestSetConfigHandler(t *testing.T) {
rec := httptest.NewRecorder()
adminTestBed.router.ServeHTTP(rec, req)
respBody := string(rec.Body.Bytes())
respBody := rec.Body.String()
if rec.Code != http.StatusBadRequest ||
!strings.Contains(respBody, "JSON configuration provided has objects with duplicate keys") {
!strings.Contains(respBody, "JSON configuration provided is of incorrect format") {
t.Errorf("Got unexpected response code or body %d - %s", rec.Code, respBody)
}
}
@@ -773,7 +808,6 @@ func TestAdminServerInfo(t *testing.T) {
// Initialize admin peers to make admin RPC calls.
globalMinioAddr = "127.0.0.1:9000"
initGlobalAdminPeers(mustGetNewEndpointList("http://127.0.0.1:9000/d1"))
// Prepare query params for set-config mgmt REST API.
queryVal := url.Values{}
@@ -813,8 +847,8 @@ func TestAdminServerInfo(t *testing.T) {
}
}
// TestToAdminAPIErr - test for toAdminAPIErr helper function.
func TestToAdminAPIErr(t *testing.T) {
// TestToAdminAPIErrCode - test for toAdminAPIErrCode helper function.
func TestToAdminAPIErrCode(t *testing.T) {
testCases := []struct {
err error
expectedAPIErr APIErrorCode
@@ -832,180 +866,15 @@ func TestToAdminAPIErr(t *testing.T) {
// 3. Non-admin API specific error.
{
err: errDiskNotFound,
expectedAPIErr: toAPIErrorCode(errDiskNotFound),
expectedAPIErr: toAPIErrorCode(context.Background(), errDiskNotFound),
},
}
for i, test := range testCases {
actualErr := toAdminAPIErrCode(test.err)
actualErr := toAdminAPIErrCode(context.Background(), test.err)
if actualErr != test.expectedAPIErr {
t.Errorf("Test %d: Expected %v but received %v",
i+1, test.expectedAPIErr, actualErr)
}
}
}
func mkHealStartReq(t *testing.T, bucket, prefix string,
opts madmin.HealOpts) *http.Request {
body, err := json.Marshal(opts)
if err != nil {
t.Fatalf("Unable marshal heal opts")
}
path := fmt.Sprintf("/minio/admin/v1/heal/%s", bucket)
if bucket != "" && prefix != "" {
path += "/" + prefix
}
req, err := newTestRequest("POST", path,
int64(len(body)), bytes.NewReader(body))
if err != nil {
t.Fatalf("Failed to construct request - %v", err)
}
cred := globalServerConfig.GetCredential()
err = signRequestV4(req, cred.AccessKey, cred.SecretKey)
if err != nil {
t.Fatalf("Failed to sign request - %v", err)
}
return req
}
func mkHealStatusReq(t *testing.T, bucket, prefix,
clientToken string) *http.Request {
path := fmt.Sprintf("/minio/admin/v1/heal/%s", bucket)
if bucket != "" && prefix != "" {
path += "/" + prefix
}
path += fmt.Sprintf("?clientToken=%s", clientToken)
req, err := newTestRequest("POST", path, 0, nil)
if err != nil {
t.Fatalf("Failed to construct request - %v", err)
}
cred := globalServerConfig.GetCredential()
err = signRequestV4(req, cred.AccessKey, cred.SecretKey)
if err != nil {
t.Fatalf("Failed to sign request - %v", err)
}
return req
}
func collectHealResults(t *testing.T, adminTestBed *adminXLTestBed, bucket,
prefix, clientToken string, timeLimitSecs int) madmin.HealTaskStatus {
var res, cur madmin.HealTaskStatus
// loop and fetch heal status. have a time-limit to loop over
// all statuses.
timeLimit := UTCNow().Add(time.Second * time.Duration(timeLimitSecs))
for cur.Summary != healStoppedStatus && cur.Summary != healFinishedStatus {
if UTCNow().After(timeLimit) {
t.Fatalf("heal-status loop took too long - clientToken: %s", clientToken)
}
req := mkHealStatusReq(t, bucket, prefix, clientToken)
rec := httptest.NewRecorder()
adminTestBed.router.ServeHTTP(rec, req)
if http.StatusOK != rec.Code {
t.Errorf("Unexpected status code - got %d but expected %d",
rec.Code, http.StatusOK)
break
}
err := json.NewDecoder(rec.Body).Decode(&cur)
if err != nil {
t.Errorf("unable to unmarshal resp: %v", err)
break
}
// all results are accumulated into a slice
// and returned to caller in the end
allItems := append(res.Items, cur.Items...)
res = cur
res.Items = allItems
time.Sleep(time.Millisecond * 200)
}
return res
}
func TestHealStartNStatusHandler(t *testing.T) {
adminTestBed, err := prepareAdminXLTestBed()
if err != nil {
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
}
defer adminTestBed.TearDown()
// gen. test data
adminTestBed.GenerateHealTestData(t)
defer adminTestBed.CleanupHealTestData(t)
// Prepare heal-start request to send to the server.
healOpts := madmin.HealOpts{
Recursive: true,
DryRun: false,
}
bucketName, objName := "mybucket", "myobject-0"
var hss madmin.HealStartSuccess
{
req := mkHealStartReq(t, bucketName, objName, healOpts)
rec := httptest.NewRecorder()
adminTestBed.router.ServeHTTP(rec, req)
if http.StatusOK != rec.Code {
t.Errorf("Unexpected status code - got %d but expected %d",
rec.Code, http.StatusOK)
}
err = json.Unmarshal(rec.Body.Bytes(), &hss)
if err != nil {
t.Fatal("unable to unmarshal response")
}
if hss.ClientToken == "" {
t.Errorf("unexpected result")
}
}
{
// test with an invalid client token
req := mkHealStatusReq(t, bucketName, objName, hss.ClientToken+hss.ClientToken)
rec := httptest.NewRecorder()
adminTestBed.router.ServeHTTP(rec, req)
if rec.Code != http.StatusBadRequest {
t.Errorf("Unexpected status code")
}
}
{
// fetch heal status
results := collectHealResults(t, adminTestBed, bucketName,
objName, hss.ClientToken, 5)
// check if we got back an expected record
foundIt := false
for _, item := range results.Items {
if item.Type == madmin.HealItemObject &&
item.Bucket == bucketName && item.Object == objName {
foundIt = true
}
}
if !foundIt {
t.Error("did not find expected heal record in heal results")
}
// check that the heal settings in the results is the
// same as what we started the heal seq. with.
if results.HealSettings != healOpts {
t.Errorf("unexpected heal settings: %v",
results.HealSettings)
}
if results.Summary == healStoppedStatus {
t.Errorf("heal sequence stopped unexpectedly")
}
}
}

View File

@@ -20,6 +20,7 @@ import (
"context"
"encoding/json"
"fmt"
"net/http"
"runtime"
"strings"
"sync"
@@ -61,9 +62,8 @@ var (
errHealPushStopNDiscard = fmt.Errorf("heal push stopped due to heal stop signal")
errHealStopSignalled = fmt.Errorf("heal stop signaled")
errFnHealFromAPIErr = func(err error) error {
errCode := toAPIErrorCode(err)
apiErr := getAPIError(errCode)
errFnHealFromAPIErr = func(ctx context.Context, err error) error {
apiErr := toAPIError(ctx, err)
return fmt.Errorf("Heal internal error: %s: %s",
apiErr.Code, apiErr.Description)
}
@@ -112,6 +112,32 @@ func initAllHealState(isErasureMode bool) {
globalAllHealState = allHealState{
healSeqMap: make(map[string]*healSequence),
}
go globalAllHealState.periodicHealSeqsClean()
}
func (ahs *allHealState) periodicHealSeqsClean() {
// Launch clean-up routine to remove this heal sequence (after
// it ends) from the global state after timeout has elapsed.
ticker := time.NewTicker(time.Minute * 5)
defer ticker.Stop()
for {
select {
case <-ticker.C:
now := UTCNow()
ahs.Lock()
for path, h := range ahs.healSeqMap {
if h.hasEnded() && h.endTime.Add(keepHealSeqStateDuration).Before(now) {
delete(ahs.healSeqMap, path)
}
}
ahs.Unlock()
case <-GlobalServiceDoneCh:
// server could be restarting - need
// to exit immediately
return
}
}
}
// getHealSequence - Retrieve a heal sequence by path. The second
@@ -123,6 +149,35 @@ func (ahs *allHealState) getHealSequence(path string) (h *healSequence, exists b
return h, exists
}
func (ahs *allHealState) stopHealSequence(path string) ([]byte, APIError) {
var hsp madmin.HealStopSuccess
he, exists := ahs.getHealSequence(path)
if !exists {
hsp = madmin.HealStopSuccess{
ClientToken: "invalid",
StartTime: UTCNow(),
}
} else {
hsp = madmin.HealStopSuccess{
ClientToken: he.clientToken,
ClientAddress: he.clientAddress,
StartTime: he.startTime,
}
he.stop()
for !he.hasEnded() {
time.Sleep(1 * time.Second)
}
ahs.Lock()
defer ahs.Unlock()
// Heal sequence explicitly stopped, remove it.
delete(ahs.healSeqMap, path)
}
b, err := json.Marshal(&hsp)
return b, toAdminAPIErr(context.Background(), err)
}
// LaunchNewHealSequence - launches a background routine that performs
// healing according to the healSequence argument. For each heal
// sequence, state is stored in the `globalAllHealState`, which is a
@@ -134,7 +189,7 @@ func (ahs *allHealState) getHealSequence(path string) (h *healSequence, exists b
// background routine to clean up heal results after the
// aforementioned duration.
func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
respBytes []byte, errCode APIErrorCode, errMsg string) {
respBytes []byte, apiErr APIError, errMsg string) {
existsAndLive := false
he, exists := ahs.getHealSequence(h.path)
@@ -143,22 +198,21 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
existsAndLive = true
}
}
if existsAndLive {
// A heal sequence exists on the given path.
if h.forceStarted {
// stop the running heal sequence - wait for
// it to finish.
// stop the running heal sequence - wait for it to finish.
he.stop()
for !he.hasEnded() {
time.Sleep(10 * time.Second)
time.Sleep(1 * time.Second)
}
} else {
errMsg = "Heal is already running on the given path " +
"(use force-start option to stop and start afresh). " +
fmt.Sprintf("The heal was started by IP %s at %s",
h.clientAddress, h.startTime)
return nil, ErrHealAlreadyRunning, errMsg
fmt.Sprintf("The heal was started by IP %s at %s, token is %s",
h.clientAddress, h.startTime.Format(http.TimeFormat), h.clientToken)
return nil, errorCodes.ToAPIErr(ErrHealAlreadyRunning), errMsg
}
}
@@ -173,7 +227,7 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
errMsg = "The provided heal sequence path overlaps with an existing " +
fmt.Sprintf("heal path: %s", k)
return nil, ErrHealOverlappingPaths, errMsg
return nil, errorCodes.ToAPIErr(ErrHealOverlappingPaths), errMsg
}
}
@@ -183,51 +237,16 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
// Launch top-level background heal go-routine
go h.healSequenceStart()
// Launch clean-up routine to remove this heal sequence (after
// it ends) from the global state after timeout has elapsed.
go func() {
var keepStateTimeout <-chan time.Time
ticker := time.NewTicker(time.Minute)
defer ticker.Stop()
everyMinute := ticker.C
for {
select {
// Check every minute if heal sequence has ended.
case <-everyMinute:
if h.hasEnded() {
keepStateTimeout = time.After(keepHealSeqStateDuration)
everyMinute = nil
}
// This case does not fire until the heal
// sequence completes.
case <-keepStateTimeout:
// Heal sequence has ended, keep
// results state duration has elapsed,
// so purge state.
ahs.Lock()
defer ahs.Unlock()
delete(ahs.healSeqMap, h.path)
return
case <-globalServiceDoneCh:
// server could be restarting - need
// to exit immediately
return
}
}
}()
b, err := json.Marshal(madmin.HealStartSuccess{
ClientToken: h.clientToken,
ClientAddress: h.clientAddress,
StartTime: h.startTime,
})
if err != nil {
logger.LogIf(context.Background(), err)
return nil, ErrInternalError, ""
logger.LogIf(h.ctx, err)
return nil, toAPIError(h.ctx, err), ""
}
return b, ErrNone, ""
return b, noError, ""
}
// PopHealStatusJSON - Called by heal-status API. It fetches the heal
@@ -272,7 +291,7 @@ func (ahs *allHealState) PopHealStatusJSON(path string,
jbytes, err := json.Marshal(h.currentStatus)
if err != nil {
logger.LogIf(context.Background(), err)
logger.LogIf(h.ctx, err)
return nil, ErrInternalError
}
@@ -285,12 +304,15 @@ type healSequence struct {
// bucket, and prefix on which heal seq. was initiated
bucket, objPrefix string
// path is just bucket + "/" + objPrefix
// path is just pathJoin(bucket, objPrefix)
path string
// time at which heal sequence was started
startTime time.Time
// time at which heal sequence has ended
endTime time.Time
// Heal client info
clientToken, clientAddress string
@@ -330,7 +352,7 @@ func newHealSequence(bucket, objPrefix, clientAddr string,
return &healSequence{
bucket: bucket,
objPrefix: objPrefix,
path: bucket + "/" + objPrefix,
path: pathJoin(bucket, objPrefix),
startTime: UTCNow(),
clientToken: mustGetUUID(),
clientAddress: clientAddr,
@@ -385,7 +407,6 @@ func (h *healSequence) stop() {
// sequence automatically resumes. The return value indicates if the
// operation succeeded.
func (h *healSequence) pushHealResultItem(r madmin.HealResultItem) error {
// start a timer to keep an upper time limit to find an empty
// slot to add the given heal result - if no slot is found it
// means that the server is holding the maximum amount of
@@ -469,6 +490,7 @@ func (h *healSequence) healSequenceStart() {
select {
case err, ok := <-h.traverseAndHealDoneCh:
h.endTime = UTCNow()
h.currentStatus.updateLock.Lock()
defer h.currentStatus.updateLock.Unlock()
// Heal traversal is complete.
@@ -482,6 +504,7 @@ func (h *healSequence) healSequenceStart() {
}
case <-h.stopSignalCh:
h.endTime = UTCNow()
h.currentStatus.updateLock.Lock()
h.currentStatus.Summary = healStoppedStatus
h.currentStatus.FailureDetail = errHealStopSignalled.Error()
@@ -521,6 +544,12 @@ func (h *healSequence) traverseAndHeal() {
// Start with format healing
checkErr(h.healDiskFormat)
// Start healing the config prefix.
checkErr(h.healMinioSysMeta(minioConfigPrefix))
// Start healing the bucket config prefix.
checkErr(h.healMinioSysMeta(bucketConfigPrefix))
// Heal buckets and objects
checkErr(h.healBuckets)
@@ -531,9 +560,74 @@ func (h *healSequence) traverseAndHeal() {
close(h.traverseAndHealDoneCh)
}
// healMinioSysMeta - heals all files under a given meta prefix, returns a function
// which in-turn heals the respective meta directory path and any files in int.
func (h *healSequence) healMinioSysMeta(metaPrefix string) func() error {
return func() error {
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil {
return errServerNotInitialized
}
// NOTE: Healing on meta is run regardless
// of any bucket being selected, this is to ensure that
// meta are always upto date and correct.
marker := ""
isTruncated := true
for isTruncated {
if globalHTTPServer != nil {
// Wait at max 1 minute for an inprogress request
// before proceeding to heal
waitCount := 60
// Any requests in progress, delay the heal.
for globalHTTPServer.GetRequestCount() > 2 && waitCount > 0 {
waitCount--
time.Sleep(1 * time.Second)
}
}
// Lists all objects under `config` prefix.
objectInfos, err := objectAPI.ListObjectsHeal(h.ctx, minioMetaBucket, metaPrefix,
marker, "", 1000)
if err != nil {
return errFnHealFromAPIErr(h.ctx, err)
}
for index := range objectInfos.Objects {
if h.isQuitting() {
return errHealStopSignalled
}
o := objectInfos.Objects[index]
res, herr := objectAPI.HealObject(h.ctx, o.Bucket, o.Name, h.settings.DryRun, h.settings.Remove)
// Object might have been deleted, by the time heal
// was attempted we ignore this file an move on.
if isErrObjectNotFound(herr) {
continue
}
if herr != nil {
return herr
}
res.Type = madmin.HealItemBucketMetadata
if err = h.pushHealResultItem(res); err != nil {
return err
}
}
isTruncated = objectInfos.IsTruncated
marker = objectInfos.NextMarker
}
return nil
}
}
// healDiskFormat - heals format.json, return value indicates if a
// failure error occurred.
func (h *healSequence) healDiskFormat() error {
if h.isQuitting() {
return errHealStopSignalled
}
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil {
@@ -544,13 +638,18 @@ func (h *healSequence) healDiskFormat() error {
// return any error, ignore error returned when disks have
// already healed.
if err != nil && err != errNoHealRequired {
return errFnHealFromAPIErr(err)
return errFnHealFromAPIErr(h.ctx, err)
}
// Healing succeeded notify the peers to reload format and re-initialize disks.
// We will not notify peers only if healing succeeded.
if err == nil {
peersReInitFormat(globalAdminPeers, h.settings.DryRun)
for _, nerr := range globalNotificationSys.ReloadFormat(h.settings.DryRun) {
if nerr.Err != nil {
logger.GetReqInfo(h.ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(h.ctx, nerr.Err)
}
}
}
// Push format heal result
@@ -559,6 +658,10 @@ func (h *healSequence) healDiskFormat() error {
// healBuckets - check for all buckets heal or just particular bucket.
func (h *healSequence) healBuckets() error {
if h.isQuitting() {
return errHealStopSignalled
}
// 1. If a bucket was specified, heal only the bucket.
if h.bucket != "" {
return h.healBucket(h.bucket)
@@ -572,7 +675,7 @@ func (h *healSequence) healBuckets() error {
buckets, err := objectAPI.ListBucketsHeal(h.ctx)
if err != nil {
return errFnHealFromAPIErr(err)
return errFnHealFromAPIErr(h.ctx, err)
}
for _, bucket := range buckets {
@@ -586,36 +689,29 @@ func (h *healSequence) healBuckets() error {
// healBucket - traverses and heals given bucket
func (h *healSequence) healBucket(bucket string) error {
if h.isQuitting() {
return errHealStopSignalled
}
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil {
return errServerNotInitialized
}
results, err := objectAPI.HealBucket(h.ctx, bucket, h.settings.DryRun)
// push any available results before checking for error
for _, result := range results {
if perr := h.pushHealResultItem(result); perr != nil {
return perr
}
}
result, err := objectAPI.HealBucket(h.ctx, bucket, h.settings.DryRun, h.settings.Remove)
// handle heal-bucket error
if err != nil {
return err
}
if err = h.pushHealResultItem(result); err != nil {
return err
}
if !h.settings.Recursive {
if h.objPrefix != "" {
// Check if an object named as the objPrefix exists,
// and if so heal it.
_, err = objectAPI.GetObjectInfo(h.ctx, bucket, h.objPrefix, ObjectOptions{})
if err == nil {
err = h.healObject(bucket, h.objPrefix)
if err != nil {
if err = h.healObject(bucket, h.objPrefix); err != nil {
return err
}
}
@@ -624,7 +720,7 @@ func (h *healSequence) healBucket(bucket string) error {
return nil
}
entries := runtime.NumCPU() * globalEndpoints.Nodes()
entries := runtime.NumCPU()
marker := ""
isTruncated := true
@@ -634,7 +730,7 @@ func (h *healSequence) healBucket(bucket string) error {
// before proceeding to heal
waitCount := 60
// Any requests in progress, delay the heal.
for globalHTTPServer.GetRequestCount() > 0 && waitCount > 0 {
for globalHTTPServer.GetRequestCount() > 2 && waitCount > 0 {
waitCount--
time.Sleep(1 * time.Second)
}
@@ -644,7 +740,7 @@ func (h *healSequence) healBucket(bucket string) error {
objectInfos, err := objectAPI.ListObjectsHeal(h.ctx, bucket,
h.objPrefix, marker, "", entries)
if err != nil {
return errFnHealFromAPIErr(err)
return errFnHealFromAPIErr(h.ctx, err)
}
g := errgroup.WithNErrs(len(objectInfos.Objects))
@@ -680,7 +776,10 @@ func (h *healSequence) healObject(bucket, object string) error {
return errServerNotInitialized
}
hri, err := objectAPI.HealObject(h.ctx, bucket, object, h.settings.DryRun)
hri, err := objectAPI.HealObject(h.ctx, bucket, object, h.settings.DryRun, h.settings.Remove)
if isErrObjectNotFound(err) {
return nil
}
if err != nil {
hri.Detail = err.Error()
}

View File

@@ -1,5 +1,5 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
* Minio Cloud Storage, (C) 2016, 2017, 2018, 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -31,7 +31,7 @@ type adminAPIHandlers struct {
}
// registerAdminRouter - Add handler functions for each service REST API routes.
func registerAdminRouter(router *mux.Router) {
func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool) {
adminAPI := adminAPIHandlers{}
// Admin router
@@ -53,28 +53,71 @@ func registerAdminRouter(router *mux.Router) {
// Info operations
adminV1Router.Methods(http.MethodGet).Path("/info").HandlerFunc(httpTraceAll(adminAPI.ServerInfoHandler))
/// Heal operations
if globalIsDistXL || globalIsXL {
/// Heal operations
// Heal processing endpoint.
adminV1Router.Methods(http.MethodPost).Path("/heal/").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
adminV1Router.Methods(http.MethodPost).Path("/heal/{bucket}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
adminV1Router.Methods(http.MethodPost).Path("/heal/{bucket}/{prefix:.*}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
// Heal processing endpoint.
adminV1Router.Methods(http.MethodPost).Path("/heal/").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
adminV1Router.Methods(http.MethodPost).Path("/heal/{bucket}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
adminV1Router.Methods(http.MethodPost).Path("/heal/{bucket}/{prefix:.*}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
/// Health operations
}
// Performance command - return performance details based on input type
adminV1Router.Methods(http.MethodGet).Path("/performance").HandlerFunc(httpTraceAll(adminAPI.PerfInfoHandler)).Queries("perfType", "{perfType:.*}")
// Profiling operations
adminV1Router.Methods(http.MethodPost).Path("/profiling/start/{profiler}").HandlerFunc(httpTraceAll(adminAPI.StartProfilingHandler))
adminV1Router.Methods(http.MethodPost).Path("/profiling/start").HandlerFunc(httpTraceAll(adminAPI.StartProfilingHandler)).
Queries("profilerType", "{profilerType:.*}")
adminV1Router.Methods(http.MethodGet).Path("/profiling/download").HandlerFunc(httpTraceAll(adminAPI.DownloadProfilingHandler))
/// Config operations
if enableConfigOps {
// Update credentials
adminV1Router.Methods(http.MethodPut).Path("/config/credential").HandlerFunc(httpTraceHdrs(adminAPI.UpdateAdminCredentialsHandler))
// Get config
adminV1Router.Methods(http.MethodGet).Path("/config").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigHandler))
// Set config
adminV1Router.Methods(http.MethodPut).Path("/config").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigHandler))
// Update credentials
adminV1Router.Methods(http.MethodPut).Path("/config/credential").HandlerFunc(httpTraceHdrs(adminAPI.UpdateCredentialsHandler))
// Get config
adminV1Router.Methods(http.MethodGet).Path("/config").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigHandler))
// Set config
adminV1Router.Methods(http.MethodPut).Path("/config").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigHandler))
// Get config keys/values
adminV1Router.Methods(http.MethodGet).Path("/config-keys").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigKeysHandler))
// Set config keys/values
adminV1Router.Methods(http.MethodPut).Path("/config-keys").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigKeysHandler))
}
// Get config keys/values
adminV1Router.Methods(http.MethodGet).Path("/config-keys").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigKeysHandler))
// Set config keys/values
adminV1Router.Methods(http.MethodPut).Path("/config-keys").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigKeysHandler))
if enableIAMOps {
// -- IAM APIs --
// Add policy IAM
adminV1Router.Methods(http.MethodPut).Path("/add-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.AddCannedPolicy)).Queries("name",
"{name:.*}")
// Add user IAM
adminV1Router.Methods(http.MethodPut).Path("/add-user").HandlerFunc(httpTraceHdrs(adminAPI.AddUser)).Queries("accessKey", "{accessKey:.*}")
adminV1Router.Methods(http.MethodPut).Path("/set-user-policy").HandlerFunc(httpTraceHdrs(adminAPI.SetUserPolicy)).
Queries("accessKey", "{accessKey:.*}").Queries("name", "{name:.*}")
adminV1Router.Methods(http.MethodPut).Path("/set-user-status").HandlerFunc(httpTraceHdrs(adminAPI.SetUserStatus)).
Queries("accessKey", "{accessKey:.*}").Queries("status", "{status:.*}")
// Remove policy IAM
adminV1Router.Methods(http.MethodDelete).Path("/remove-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.RemoveCannedPolicy)).Queries("name", "{name:.*}")
// Remove user IAM
adminV1Router.Methods(http.MethodDelete).Path("/remove-user").HandlerFunc(httpTraceHdrs(adminAPI.RemoveUser)).Queries("accessKey", "{accessKey:.*}")
// List users
adminV1Router.Methods(http.MethodGet).Path("/list-users").HandlerFunc(httpTraceHdrs(adminAPI.ListUsers))
// List policies
adminV1Router.Methods(http.MethodGet).Path("/list-canned-policies").HandlerFunc(httpTraceHdrs(adminAPI.ListCannedPolicies))
}
// -- Top APIs --
// Top locks
adminV1Router.Methods(http.MethodGet).Path("/top/locks").HandlerFunc(httpTraceHdrs(adminAPI.TopLocksHandler))
// If none of the routes match, return error.
adminV1Router.NotFoundHandler = http.HandlerFunc(httpTraceHdrs(notFoundHandlerJSON))
}

View File

@@ -1,295 +0,0 @@
/*
* Minio Cloud Storage, (C) 2014, 2015, 2016, 2017, 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"crypto/tls"
"fmt"
"net"
"sort"
"strings"
"sync"
"time"
"github.com/minio/minio/cmd/logger"
xnet "github.com/minio/minio/pkg/net"
)
var errUnsupportedSignal = fmt.Errorf("unsupported signal: only restart and stop signals are supported")
// AdminRPCClient - admin RPC client talks to admin RPC server.
type AdminRPCClient struct {
*RPCClient
}
// SignalService - calls SignalService RPC.
func (rpcClient *AdminRPCClient) SignalService(signal serviceSignal) (err error) {
args := SignalServiceArgs{Sig: signal}
reply := VoidReply{}
return rpcClient.Call(adminServiceName+".SignalService", &args, &reply)
}
// ReInitFormat - re-initialize disk format, remotely.
func (rpcClient *AdminRPCClient) ReInitFormat(dryRun bool) error {
args := ReInitFormatArgs{DryRun: dryRun}
reply := VoidReply{}
return rpcClient.Call(adminServiceName+".ReInitFormat", &args, &reply)
}
// ServerInfo - returns the server info of the server to which the RPC call is made.
func (rpcClient *AdminRPCClient) ServerInfo() (sid ServerInfoData, err error) {
err = rpcClient.Call(adminServiceName+".ServerInfo", &AuthArgs{}, &sid)
return sid, err
}
// GetConfig - returns config.json of the remote server.
func (rpcClient *AdminRPCClient) GetConfig() ([]byte, error) {
args := AuthArgs{}
var reply []byte
err := rpcClient.Call(adminServiceName+".GetConfig", &args, &reply)
return reply, err
}
// StartProfiling - starts profiling in the remote server.
func (rpcClient *AdminRPCClient) StartProfiling(profiler string) error {
args := StartProfilingArgs{Profiler: profiler}
reply := VoidReply{}
return rpcClient.Call(adminServiceName+".StartProfiling", &args, &reply)
}
// DownloadProfilingData - returns profiling data of the remote server.
func (rpcClient *AdminRPCClient) DownloadProfilingData() ([]byte, error) {
args := AuthArgs{}
var reply []byte
err := rpcClient.Call(adminServiceName+".DownloadProfilingData", &args, &reply)
return reply, err
}
// NewAdminRPCClient - returns new admin RPC client.
func NewAdminRPCClient(host *xnet.Host) (*AdminRPCClient, error) {
scheme := "http"
if globalIsSSL {
scheme = "https"
}
serviceURL := &xnet.URL{
Scheme: scheme,
Host: host.String(),
Path: adminServicePath,
}
var tlsConfig *tls.Config
if globalIsSSL {
tlsConfig = &tls.Config{
ServerName: host.Name,
RootCAs: globalRootCAs,
}
}
rpcClient, err := NewRPCClient(
RPCClientArgs{
NewAuthTokenFunc: newAuthToken,
RPCVersion: globalRPCAPIVersion,
ServiceName: adminServiceName,
ServiceURL: serviceURL,
TLSConfig: tlsConfig,
},
)
if err != nil {
return nil, err
}
return &AdminRPCClient{rpcClient}, nil
}
// adminCmdRunner - abstracts local and remote execution of admin
// commands like service stop and service restart.
type adminCmdRunner interface {
SignalService(s serviceSignal) error
ReInitFormat(dryRun bool) error
ServerInfo() (ServerInfoData, error)
GetConfig() ([]byte, error)
StartProfiling(string) error
DownloadProfilingData() ([]byte, error)
}
// adminPeer - represents an entity that implements admin API RPCs.
type adminPeer struct {
addr string
cmdRunner adminCmdRunner
isLocal bool
}
// type alias for a collection of adminPeer.
type adminPeers []adminPeer
// makeAdminPeers - helper function to construct a collection of adminPeer.
func makeAdminPeers(endpoints EndpointList) (adminPeerList adminPeers) {
localAddr := GetLocalPeer(endpoints)
if strings.HasPrefix(localAddr, "127.0.0.1:") {
// Use first IPv4 instead of loopback address.
localAddr = net.JoinHostPort(sortIPs(localIP4.ToSlice())[0], globalMinioPort)
}
adminPeerList = append(adminPeerList, adminPeer{
addr: localAddr,
cmdRunner: localAdminClient{},
isLocal: true,
})
for _, hostStr := range GetRemotePeers(endpoints) {
host, err := xnet.ParseHost(hostStr)
logger.FatalIf(err, "Unable to parse Admin RPC Host")
rpcClient, err := NewAdminRPCClient(host)
logger.FatalIf(err, "Unable to initialize Admin RPC Client")
adminPeerList = append(adminPeerList, adminPeer{
addr: hostStr,
cmdRunner: rpcClient,
})
}
return adminPeerList
}
// peersReInitFormat - reinitialize remote object layers to new format.
func peersReInitFormat(peers adminPeers, dryRun bool) error {
errs := make([]error, len(peers))
// Send ReInitFormat RPC call to all nodes.
// for local adminPeer this is a no-op.
wg := sync.WaitGroup{}
for i, peer := range peers {
wg.Add(1)
go func(idx int, peer adminPeer) {
defer wg.Done()
if !peer.isLocal {
errs[idx] = peer.cmdRunner.ReInitFormat(dryRun)
}
}(i, peer)
}
wg.Wait()
return nil
}
// Initialize global adminPeer collection.
func initGlobalAdminPeers(endpoints EndpointList) {
globalAdminPeers = makeAdminPeers(endpoints)
}
// invokeServiceCmd - Invoke Restart/Stop command.
func invokeServiceCmd(cp adminPeer, cmd serviceSignal) (err error) {
switch cmd {
case serviceRestart, serviceStop:
err = cp.cmdRunner.SignalService(cmd)
}
return err
}
// sendServiceCmd - Invoke Restart command on remote peers
// adminPeer followed by on the local peer.
func sendServiceCmd(cps adminPeers, cmd serviceSignal) {
// Send service command like stop or restart to all remote nodes and finally run on local node.
errs := make([]error, len(cps))
var wg sync.WaitGroup
remotePeers := cps[1:]
for i := range remotePeers {
wg.Add(1)
go func(idx int) {
defer wg.Done()
// we use idx+1 because remotePeers slice is 1 position shifted w.r.t cps
errs[idx+1] = invokeServiceCmd(remotePeers[idx], cmd)
}(i)
}
wg.Wait()
errs[0] = invokeServiceCmd(cps[0], cmd)
}
// uptimeSlice - used to sort uptimes in chronological order.
type uptimeSlice []struct {
err error
uptime time.Duration
}
func (ts uptimeSlice) Len() int {
return len(ts)
}
func (ts uptimeSlice) Less(i, j int) bool {
return ts[i].uptime < ts[j].uptime
}
func (ts uptimeSlice) Swap(i, j int) {
ts[i], ts[j] = ts[j], ts[i]
}
// getPeerUptimes - returns the uptime since the last time read quorum
// was established on success. Otherwise returns errXLReadQuorum.
func getPeerUptimes(peers adminPeers) (time.Duration, error) {
// In a single node Erasure or FS backend setup the uptime of
// the setup is the uptime of the single minio server
// instance.
if !globalIsDistXL {
return UTCNow().Sub(globalBootTime), nil
}
uptimes := make(uptimeSlice, len(peers))
// Get up time of all servers.
wg := sync.WaitGroup{}
for i, peer := range peers {
wg.Add(1)
go func(idx int, peer adminPeer) {
defer wg.Done()
serverInfoData, rpcErr := peer.cmdRunner.ServerInfo()
uptimes[idx].uptime, uptimes[idx].err = serverInfoData.Properties.Uptime, rpcErr
}(i, peer)
}
wg.Wait()
// Sort uptimes in chronological order.
sort.Sort(uptimes)
// Pick the readQuorum'th uptime in chronological order. i.e,
// the time at which read quorum was (re-)established.
readQuorum := len(uptimes) / 2
validCount := 0
latestUptime := time.Duration(0)
for _, uptime := range uptimes {
if uptime.err != nil {
logger.LogIf(context.Background(), uptime.err)
continue
}
validCount++
if validCount >= readQuorum {
latestUptime = uptime.uptime
break
}
}
// Less than readQuorum "Admin.Uptime" RPC call returned
// successfully, so read-quorum unavailable.
if validCount < readQuorum {
return time.Duration(0), InsufficientReadQuorum{}
}
return latestUptime, nil
}

View File

@@ -1,103 +0,0 @@
/*
* Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"path"
"github.com/gorilla/mux"
"github.com/minio/minio/cmd/logger"
xrpc "github.com/minio/minio/cmd/rpc"
)
const adminServiceName = "Admin"
const adminServiceSubPath = "/admin"
var adminServicePath = path.Join(minioReservedBucketPath, adminServiceSubPath)
// adminRPCReceiver - Admin RPC receiver for admin RPC server.
type adminRPCReceiver struct {
local *localAdminClient
}
// SignalServiceArgs - provides the signal argument to SignalService RPC
type SignalServiceArgs struct {
AuthArgs
Sig serviceSignal
}
// SignalService - Send a restart or stop signal to the service
func (receiver *adminRPCReceiver) SignalService(args *SignalServiceArgs, reply *VoidReply) error {
return receiver.local.SignalService(args.Sig)
}
// ServerInfo - returns the server info when object layer was initialized on this server.
func (receiver *adminRPCReceiver) ServerInfo(args *AuthArgs, reply *ServerInfoData) (err error) {
*reply, err = receiver.local.ServerInfo()
return err
}
// StartProfilingArgs - holds the RPC argument for StartingProfiling RPC call
type StartProfilingArgs struct {
AuthArgs
Profiler string
}
// StartProfiling - starts profiling of this server
func (receiver *adminRPCReceiver) StartProfiling(args *StartProfilingArgs, reply *VoidReply) error {
return receiver.local.StartProfiling(args.Profiler)
}
// DownloadProfilingData - stops and returns profiling data of this server
func (receiver *adminRPCReceiver) DownloadProfilingData(args *AuthArgs, reply *[]byte) (err error) {
*reply, err = receiver.local.DownloadProfilingData()
return
}
// GetConfig - returns the config.json of this server.
func (receiver *adminRPCReceiver) GetConfig(args *AuthArgs, reply *[]byte) (err error) {
*reply, err = receiver.local.GetConfig()
return err
}
// ReInitFormatArgs - provides dry-run information to re-initialize format.json
type ReInitFormatArgs struct {
AuthArgs
DryRun bool
}
// ReInitFormat - re-init 'format.json'
func (receiver *adminRPCReceiver) ReInitFormat(args *ReInitFormatArgs, reply *VoidReply) error {
return receiver.local.ReInitFormat(args.DryRun)
}
// NewAdminRPCServer - returns new admin RPC server.
func NewAdminRPCServer() (*xrpc.Server, error) {
rpcServer := xrpc.NewServer()
if err := rpcServer.RegisterName(adminServiceName, &adminRPCReceiver{&localAdminClient{}}); err != nil {
return nil, err
}
return rpcServer, nil
}
// registerAdminRPCRouter - creates and registers Admin RPC server and its router.
func registerAdminRPCRouter(router *mux.Router) {
rpcServer, err := NewAdminRPCServer()
logger.FatalIf(err, "Unable to initialize Lock RPC Server")
subrouter := router.PathPrefix(minioReservedBucketPath).Subrouter()
subrouter.Path(adminServiceSubPath).HandlerFunc(httpTraceHdrs(rpcServer.ServeHTTP))
}

View File

@@ -1,239 +0,0 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"net/http"
"net/http/httptest"
"testing"
"time"
xnet "github.com/minio/minio/pkg/net"
)
///////////////////////////////////////////////////////////////////////////////
//
// localAdminClient and AdminRPCClient are adminCmdRunner interface compatible,
// hence below test functions are available for both clients.
//
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
//
// Admin RPC server, adminRPCReceiver and AdminRPCClient are
// inter-dependent, below test functions are sufficient to test all of them.
//
///////////////////////////////////////////////////////////////////////////////
func testAdminCmdRunnerSignalService(t *testing.T, client adminCmdRunner) {
tmpGlobalServiceSignalCh := globalServiceSignalCh
globalServiceSignalCh = make(chan serviceSignal, 10)
defer func() {
globalServiceSignalCh = tmpGlobalServiceSignalCh
}()
testCases := []struct {
signal serviceSignal
expectErr bool
}{
{serviceRestart, false},
{serviceStop, false},
{serviceStatus, true},
{serviceSignal(100), true},
}
for i, testCase := range testCases {
err := client.SignalService(testCase.signal)
expectErr := (err != nil)
if expectErr != testCase.expectErr {
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
}
}
}
func testAdminCmdRunnerReInitFormat(t *testing.T, client adminCmdRunner) {
tmpGlobalObjectAPI := globalObjectAPI
defer func() {
globalObjectAPI = tmpGlobalObjectAPI
}()
testCases := []struct {
objectAPI ObjectLayer
dryRun bool
expectErr bool
}{
{&DummyObjectLayer{}, true, false},
{&DummyObjectLayer{}, false, false},
{nil, true, true},
{nil, false, true},
}
for i, testCase := range testCases {
globalObjectAPI = testCase.objectAPI
err := client.ReInitFormat(testCase.dryRun)
expectErr := (err != nil)
if expectErr != testCase.expectErr {
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
}
}
}
func testAdminCmdRunnerServerInfo(t *testing.T, client adminCmdRunner) {
tmpGlobalBootTime := globalBootTime
tmpGlobalObjectAPI := globalObjectAPI
tmpGlobalConnStats := globalConnStats
tmpGlobalHTTPStats := globalHTTPStats
tmpGlobalNotificationSys := globalNotificationSys
defer func() {
globalBootTime = tmpGlobalBootTime
globalObjectAPI = tmpGlobalObjectAPI
globalConnStats = tmpGlobalConnStats
globalHTTPStats = tmpGlobalHTTPStats
globalNotificationSys = tmpGlobalNotificationSys
}()
endpoints := new(EndpointList)
notificationSys := NewNotificationSys(globalServerConfig, *endpoints)
testCases := []struct {
bootTime time.Time
objectAPI ObjectLayer
connStats *ConnStats
httpStats *HTTPStats
notificationSys *NotificationSys
expectErr bool
}{
{UTCNow(), &DummyObjectLayer{}, newConnStats(), newHTTPStats(), notificationSys, false},
{time.Time{}, nil, nil, nil, nil, true},
{UTCNow(), nil, nil, nil, nil, true},
}
for i, testCase := range testCases {
globalBootTime = testCase.bootTime
globalObjectAPI = testCase.objectAPI
globalConnStats = testCase.connStats
globalHTTPStats = testCase.httpStats
globalNotificationSys = testCase.notificationSys
_, err := client.ServerInfo()
expectErr := (err != nil)
if expectErr != testCase.expectErr {
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
}
}
}
func testAdminCmdRunnerGetConfig(t *testing.T, client adminCmdRunner) {
tmpGlobalServerConfig := globalServerConfig
defer func() {
globalServerConfig = tmpGlobalServerConfig
}()
config := newServerConfig()
testCases := []struct {
config *serverConfig
expectErr bool
}{
{globalServerConfig, false},
{config, false},
}
for i, testCase := range testCases {
globalServerConfig = testCase.config
_, err := client.GetConfig()
expectErr := (err != nil)
if expectErr != testCase.expectErr {
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
}
}
}
func newAdminRPCHTTPServerClient(t *testing.T) (*httptest.Server, *AdminRPCClient, *serverConfig) {
rpcServer, err := NewAdminRPCServer()
if err != nil {
t.Fatalf("unexpected error %v", err)
}
httpServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
rpcServer.ServeHTTP(w, r)
}))
url, err := xnet.ParseURL(httpServer.URL)
if err != nil {
t.Fatalf("unexpected error %v", err)
}
host, err := xnet.ParseHost(url.Host)
if err != nil {
t.Fatalf("unexpected error %v", err)
}
prevGlobalServerConfig := globalServerConfig
globalServerConfig = newServerConfig()
rpcClient, err := NewAdminRPCClient(host)
if err != nil {
t.Fatalf("unexpected error %v", err)
}
return httpServer, rpcClient, prevGlobalServerConfig
}
func TestAdminRPCClientSignalService(t *testing.T) {
httpServer, rpcClient, prevGlobalServerConfig := newAdminRPCHTTPServerClient(t)
defer httpServer.Close()
defer func() {
globalServerConfig = prevGlobalServerConfig
}()
testAdminCmdRunnerSignalService(t, rpcClient)
}
func TestAdminRPCClientReInitFormat(t *testing.T) {
httpServer, rpcClient, prevGlobalServerConfig := newAdminRPCHTTPServerClient(t)
defer httpServer.Close()
defer func() {
globalServerConfig = prevGlobalServerConfig
}()
testAdminCmdRunnerReInitFormat(t, rpcClient)
}
func TestAdminRPCClientServerInfo(t *testing.T) {
httpServer, rpcClient, prevGlobalServerConfig := newAdminRPCHTTPServerClient(t)
defer httpServer.Close()
defer func() {
globalServerConfig = prevGlobalServerConfig
}()
testAdminCmdRunnerServerInfo(t, rpcClient)
}
func TestAdminRPCClientGetConfig(t *testing.T) {
httpServer, rpcClient, prevGlobalServerConfig := newAdminRPCHTTPServerClient(t)
defer httpServer.Close()
defer func() {
globalServerConfig = prevGlobalServerConfig
}()
testAdminCmdRunnerGetConfig(t, rpcClient)
}

View File

@@ -23,92 +23,10 @@ import (
const (
// Response request id.
responseRequestIDKey = "x-amz-request-id"
// Deployment id.
responseDeploymentIDKey = "x-minio-deployment-id"
)
// CSVFileHeaderInfo -Can be either USE IGNORE OR NONE, defines what to do with
// the first row
type CSVFileHeaderInfo string
// Constants for file header info.
const (
CSVFileHeaderInfoNone CSVFileHeaderInfo = "NONE"
CSVFileHeaderInfoIgnore = "IGNORE"
CSVFileHeaderInfoUse = "USE"
)
// SelectCompressionType - ONLY GZIP is supported
type SelectCompressionType string
// Constants for compression types under select API.
const (
SelectCompressionNONE SelectCompressionType = "NONE"
SelectCompressionGZIP = "GZIP"
SelectCompressionBZIP = "BZIP2"
)
// CSVQuoteFields - Can be either Always or AsNeeded
type CSVQuoteFields string
// Constants for csv quote styles.
const (
CSVQuoteFieldsAlways CSVQuoteFields = "Always"
CSVQuoteFieldsAsNeeded = "AsNeeded"
)
// QueryExpressionType - Currently can only be SQL
type QueryExpressionType string
// Constants for expression type.
const (
QueryExpressionTypeSQL QueryExpressionType = "SQL"
)
// JSONType determines json input serialization type.
type JSONType string
// Constants for JSONTypes.
const (
JSONDocumentType JSONType = "Document"
JSONLinesType = "Lines"
)
// ObjectSelectRequest - represents the input select body
type ObjectSelectRequest struct {
XMLName xml.Name `xml:"SelectObjectContentRequest" json:"-"`
Expression string
ExpressionType QueryExpressionType
InputSerialization struct {
CompressionType SelectCompressionType
Parquet *struct{}
CSV *struct {
FileHeaderInfo CSVFileHeaderInfo
RecordDelimiter string
FieldDelimiter string
QuoteCharacter string
QuoteEscapeCharacter string
Comments string
}
JSON *struct {
Type JSONType
}
}
OutputSerialization struct {
CSV *struct {
QuoteFields CSVQuoteFields
RecordDelimiter string
FieldDelimiter string
QuoteCharacter string
QuoteEscapeCharacter string
}
JSON *struct {
RecordDelimiter string
}
}
RequestProgress struct {
Enabled bool
}
}
// ObjectIdentifier carries key name for the object to delete.
type ObjectIdentifier struct {
ObjectName string `xml:"Key"`

View File

@@ -22,12 +22,17 @@ import (
"fmt"
"net/http"
"github.com/Azure/azure-sdk-for-go/storage"
"github.com/aliyun/aliyun-oss-go-sdk/oss"
"google.golang.org/api/googleapi"
minio "github.com/minio/minio-go"
"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/dns"
"github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/s3select"
)
// APIError structure
@@ -68,6 +73,7 @@ const (
ErrInvalidCopyPartRange
ErrInvalidCopyPartRangeSource
ErrInvalidMaxKeys
ErrInvalidEncodingMethod
ErrInvalidMaxUploads
ErrInvalidMaxParts
ErrInvalidPartNumberMarker
@@ -85,6 +91,7 @@ const (
ErrNoSuchBucketPolicy
ErrNoSuchKey
ErrNoSuchUpload
ErrNoSuchVersion
ErrNotImplemented
ErrPreconditionFailed
ErrRequestTimeTooSkewed
@@ -107,7 +114,6 @@ const (
ErrInvalidRequestVersion
ErrMissingSignTag
ErrMissingSignHeadersTag
ErrPolicyAlreadyExpired
ErrMalformedDate
ErrMalformedPresignedDate
ErrMalformedCredentialDate
@@ -150,6 +156,9 @@ const (
ErrKMSNotConfigured
ErrKMSAuthFailure
ErrNoAccessKey
ErrInvalidToken
// Bucket notification related errors.
ErrEventNotification
ErrARNNotification
@@ -187,11 +196,15 @@ const (
// new error codes here.
ErrMalformedJSON
ErrAdminNoSuchUser
ErrAdminNoSuchPolicy
ErrAdminInvalidArgument
ErrAdminInvalidAccessKey
ErrAdminInvalidSecretKey
ErrAdminConfigNoQuorum
ErrAdminConfigTooLarge
ErrAdminConfigBadJSON
ErrAdminConfigDuplicateKeys
ErrAdminCredentialsMismatch
ErrInsecureClientRequest
ErrObjectTampered
@@ -204,7 +217,7 @@ const (
ErrHealOverlappingPaths
ErrIncorrectContinuationToken
//S3 Select Errors
// S3 Select Errors
ErrEmptyRequestBody
ErrUnsupportedFunction
ErrInvalidExpressionType
@@ -249,7 +262,6 @@ const (
ErrParseUnsupportedAlias
ErrParseUnsupportedSyntax
ErrParseUnknownOperator
ErrParseInvalidPathComponent
ErrParseMissingIdentAfterAt
ErrParseUnexpectedOperator
ErrParseUnexpectedTerm
@@ -288,14 +300,28 @@ const (
ErrEvaluatorInvalidTimestampFormatPatternToken
ErrEvaluatorInvalidTimestampFormatPatternSymbol
ErrEvaluatorBindingDoesNotExist
ErrInvalidColumnIndex
ErrMissingHeaders
ErrInvalidColumnIndex
ErrAdminConfigNotificationTargetsFailed
ErrAdminProfilerNotEnabled
ErrInvalidDecompressedSize
ErrAddUserInvalidArgument
)
type errorCodeMap map[APIErrorCode]APIError
func (e errorCodeMap) ToAPIErr(errCode APIErrorCode) APIError {
apiErr, ok := e[errCode]
if !ok {
return e[ErrInternalError]
}
return apiErr
}
// error code to APIError structure, these fields carry respective
// descriptions for all the error responses.
var errorCodeResponse = map[APIErrorCode]APIError{
var errorCodes = errorCodeMap{
ErrInvalidCopyDest: {
Code: "InvalidRequest",
Description: "This copy request is illegal because it is trying to copy an object to itself without changing the object's metadata, storage class, website redirect location or encryption attributes.",
@@ -331,6 +357,11 @@ var errorCodeResponse = map[APIErrorCode]APIError{
Description: "Argument maxKeys must be an integer between 0 and 2147483647",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidEncodingMethod: {
Code: "InvalidArgument",
Description: "Invalid Encoding Method specified in Request",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidMaxParts: {
Code: "InvalidArgument",
Description: "Argument max-parts must be an integer between 0 and 2147483647",
@@ -436,6 +467,11 @@ var errorCodeResponse = map[APIErrorCode]APIError{
Description: "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.",
HTTPStatusCode: http.StatusNotFound,
},
ErrNoSuchVersion: {
Code: "NoSuchVersion",
Description: "Indicates that the version ID specified in the request does not match an existing version.",
HTTPStatusCode: http.StatusNotFound,
},
ErrNotImplemented: {
Code: "NotImplemented",
Description: "A header you provided implies functionality that is not implemented",
@@ -588,11 +624,6 @@ var errorCodeResponse = map[APIErrorCode]APIError{
Description: "Signature header missing SignedHeaders field.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrPolicyAlreadyExpired: {
Code: "AccessDenied",
Description: "Invalid according to Policy: Policy expired.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrMalformedExpires: {
Code: "AuthorizationQueryParametersError",
Description: "X-Amz-Expires should be a number",
@@ -802,6 +833,16 @@ var errorCodeResponse = map[APIErrorCode]APIError{
Description: "Server side encryption specified but KMS authorization failed",
HTTPStatusCode: http.StatusBadRequest,
},
ErrNoAccessKey: {
Code: "AccessDenied",
Description: "No AWSAccessKey was presented",
HTTPStatusCode: http.StatusForbidden,
},
ErrInvalidToken: {
Code: "InvalidTokenId",
Description: "The security token included in the request is invalid",
HTTPStatusCode: http.StatusForbidden,
},
/// S3 extensions.
ErrContentSHA256Mismatch: {
@@ -861,6 +902,21 @@ var errorCodeResponse = map[APIErrorCode]APIError{
Description: "The JSON you provided was not well-formed or did not validate against our published format.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrAdminNoSuchUser: {
Code: "XMinioAdminNoSuchUser",
Description: "The specified user does not exist.",
HTTPStatusCode: http.StatusNotFound,
},
ErrAdminNoSuchPolicy: {
Code: "XMinioAdminNoSuchPolicy",
Description: "The canned policy does not exist.",
HTTPStatusCode: http.StatusNotFound,
},
ErrAdminInvalidArgument: {
Code: "XMinioAdminInvalidArgument",
Description: "Invalid arguments specified.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrAdminInvalidAccessKey: {
Code: "XMinioAdminInvalidAccessKey",
Description: "The access key is invalid.",
@@ -879,11 +935,16 @@ var errorCodeResponse = map[APIErrorCode]APIError{
ErrAdminConfigTooLarge: {
Code: "XMinioAdminConfigTooLarge",
Description: fmt.Sprintf("Configuration data provided exceeds the allowed maximum of %d bytes",
maxConfigJSONSize),
maxEConfigJSONSize),
HTTPStatusCode: http.StatusBadRequest,
},
ErrAdminConfigBadJSON: {
Code: "XMinioAdminConfigBadJSON",
Description: "JSON configuration provided is of incorrect format",
HTTPStatusCode: http.StatusBadRequest,
},
ErrAdminConfigDuplicateKeys: {
Code: "XMinioAdminConfigDuplicateKeys",
Description: "JSON configuration provided has objects with duplicate keys",
HTTPStatusCode: http.StatusBadRequest,
},
@@ -892,6 +953,11 @@ var errorCodeResponse = map[APIErrorCode]APIError{
Description: "Configuration update failed due an unsuccessful attempt to connect to one or more notification servers",
HTTPStatusCode: http.StatusBadRequest,
},
ErrAdminProfilerNotEnabled: {
Code: "XMinioAdminProfilerNotEnabled",
Description: "Unable to perform the requested operation because profiling is not enabled",
HTTPStatusCode: http.StatusBadRequest,
},
ErrAdminCredentialsMismatch: {
Code: "XMinioAdminCredentialsMismatch",
Description: "Credentials in config mismatch with server environment variables",
@@ -1192,11 +1258,6 @@ var errorCodeResponse = map[APIErrorCode]APIError{
Description: "The SQL expression contains an invalid operator.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseInvalidPathComponent: {
Code: "ParseInvalidPathComponent",
Description: "The SQL expression contains an invalid path component.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseMissingIdentAfterAt: {
Code: "ParseMissingIdentAfterAt",
Description: "Did not find the expected identifier after the @ symbol in the SQL expression.",
@@ -1382,11 +1443,6 @@ var errorCodeResponse = map[APIErrorCode]APIError{
Description: "Time stamp format pattern contains an invalid symbol in the SQL expression.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidColumnIndex: {
Code: "InvalidColumnIndex",
Description: "Column index in the SQL expression is invalid.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrEvaluatorBindingDoesNotExist: {
Code: "ErrEvaluatorBindingDoesNotExist",
Description: "A column name or a path provided does not exist in the SQL expression",
@@ -1397,21 +1453,44 @@ var errorCodeResponse = map[APIErrorCode]APIError{
Description: "Some headers in the query are missing from the file. Check the file and try again.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidColumnIndex: {
Code: "InvalidColumnIndex",
Description: "The column index is invalid. Please check the service documentation and try again.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidDecompressedSize: {
Code: "XMinioInvalidDecompressedSize",
Description: "The data provided is unfit for decompression",
HTTPStatusCode: http.StatusBadRequest,
},
ErrAddUserInvalidArgument: {
Code: "XMinioInvalidIAMCredentials",
Description: "User is not allowed to be same as admin access key",
HTTPStatusCode: http.StatusConflict,
},
// Add your error structure here.
}
// toAPIErrorCode - Converts embedded errors. Convenience
// function written to handle all cases where we have known types of
// errors returned by underlying layers.
func toAPIErrorCode(err error) (apiErr APIErrorCode) {
func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
if err == nil {
return ErrNone
}
// Verify if the underlying error is signature mismatch.
switch err {
case errInvalidArgument:
apiErr = ErrAdminInvalidArgument
case errNoSuchUser:
apiErr = ErrAdminNoSuchUser
case errNoSuchPolicy:
apiErr = ErrAdminNoSuchPolicy
case errSignatureMismatch:
apiErr = ErrSignatureDoesNotMatch
case errInvalidRange:
apiErr = ErrInvalidRange
case errDataTooLarge:
apiErr = ErrEntityTooLarge
case errDataTooSmall:
@@ -1421,10 +1500,10 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
case auth.ErrInvalidSecretKeyLength:
apiErr = ErrAdminInvalidSecretKey
// SSE errors
case errInvalidEncryptionParameters:
apiErr = ErrInvalidEncryptionParameters
case crypto.ErrInvalidEncryptionMethod:
apiErr = ErrInvalidEncryptionMethod
case errInsecureSSERequest:
apiErr = ErrInsecureSSECustomerRequest
case crypto.ErrInvalidCustomerAlgorithm:
apiErr = ErrInvalidSSECustomerAlgorithm
case crypto.ErrInvalidCustomerKey:
@@ -1452,168 +1531,11 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
case errOperationTimedOut, context.Canceled, context.DeadlineExceeded:
apiErr = ErrOperationTimedOut
}
switch err {
case s3select.ErrBusy:
apiErr = ErrBusy
case s3select.ErrUnauthorizedAccess:
apiErr = ErrUnauthorizedAccess
case s3select.ErrExpressionTooLong:
apiErr = ErrExpressionTooLong
case s3select.ErrIllegalSQLFunctionArgument:
apiErr = ErrIllegalSQLFunctionArgument
case s3select.ErrInvalidKeyPath:
apiErr = ErrInvalidKeyPath
case s3select.ErrInvalidCompressionFormat:
apiErr = ErrInvalidCompressionFormat
case s3select.ErrInvalidFileHeaderInfo:
apiErr = ErrInvalidFileHeaderInfo
case s3select.ErrInvalidJSONType:
apiErr = ErrInvalidJSONType
case s3select.ErrInvalidQuoteFields:
apiErr = ErrInvalidQuoteFields
case s3select.ErrInvalidRequestParameter:
apiErr = ErrInvalidRequestParameter
case s3select.ErrInvalidDataType:
apiErr = ErrInvalidDataType
case s3select.ErrInvalidTextEncoding:
apiErr = ErrInvalidTextEncoding
case s3select.ErrInvalidTableAlias:
apiErr = ErrInvalidTableAlias
case s3select.ErrMissingRequiredParameter:
apiErr = ErrMissingRequiredParameter
case s3select.ErrObjectSerializationConflict:
apiErr = ErrObjectSerializationConflict
case s3select.ErrUnsupportedSQLOperation:
apiErr = ErrUnsupportedSQLOperation
case s3select.ErrUnsupportedSQLStructure:
apiErr = ErrUnsupportedSQLStructure
case s3select.ErrUnsupportedSyntax:
apiErr = ErrUnsupportedSyntax
case s3select.ErrUnsupportedRangeHeader:
apiErr = ErrUnsupportedRangeHeader
case s3select.ErrLexerInvalidChar:
apiErr = ErrLexerInvalidChar
case s3select.ErrLexerInvalidOperator:
apiErr = ErrLexerInvalidOperator
case s3select.ErrLexerInvalidLiteral:
apiErr = ErrLexerInvalidLiteral
case s3select.ErrLexerInvalidIONLiteral:
apiErr = ErrLexerInvalidIONLiteral
case s3select.ErrParseExpectedDatePart:
apiErr = ErrParseExpectedDatePart
case s3select.ErrParseExpectedKeyword:
apiErr = ErrParseExpectedKeyword
case s3select.ErrParseExpectedTokenType:
apiErr = ErrParseExpectedTokenType
case s3select.ErrParseExpected2TokenTypes:
apiErr = ErrParseExpected2TokenTypes
case s3select.ErrParseExpectedNumber:
apiErr = ErrParseExpectedNumber
case s3select.ErrParseExpectedRightParenBuiltinFunctionCall:
apiErr = ErrParseExpectedRightParenBuiltinFunctionCall
case s3select.ErrParseExpectedTypeName:
apiErr = ErrParseExpectedTypeName
case s3select.ErrParseExpectedWhenClause:
apiErr = ErrParseExpectedWhenClause
case s3select.ErrParseUnsupportedToken:
apiErr = ErrParseUnsupportedToken
case s3select.ErrParseUnsupportedLiteralsGroupBy:
apiErr = ErrParseUnsupportedLiteralsGroupBy
case s3select.ErrParseExpectedMember:
apiErr = ErrParseExpectedMember
case s3select.ErrParseUnsupportedSelect:
apiErr = ErrParseUnsupportedSelect
case s3select.ErrParseUnsupportedCase:
apiErr = ErrParseUnsupportedCase
case s3select.ErrParseUnsupportedCaseClause:
apiErr = ErrParseUnsupportedCaseClause
case s3select.ErrParseUnsupportedAlias:
apiErr = ErrParseUnsupportedAlias
case s3select.ErrParseUnsupportedSyntax:
apiErr = ErrParseUnsupportedSyntax
case s3select.ErrParseUnknownOperator:
apiErr = ErrParseUnknownOperator
case s3select.ErrParseInvalidPathComponent:
apiErr = ErrParseInvalidPathComponent
case s3select.ErrParseMissingIdentAfterAt:
apiErr = ErrParseMissingIdentAfterAt
case s3select.ErrParseUnexpectedOperator:
apiErr = ErrParseUnexpectedOperator
case s3select.ErrParseUnexpectedTerm:
apiErr = ErrParseUnexpectedTerm
case s3select.ErrParseUnexpectedToken:
apiErr = ErrParseUnexpectedToken
case s3select.ErrParseUnexpectedKeyword:
apiErr = ErrParseUnexpectedKeyword
case s3select.ErrParseExpectedExpression:
apiErr = ErrParseExpectedExpression
case s3select.ErrParseExpectedLeftParenAfterCast:
apiErr = ErrParseExpectedLeftParenAfterCast
case s3select.ErrParseExpectedLeftParenValueConstructor:
apiErr = ErrParseExpectedLeftParenValueConstructor
case s3select.ErrParseExpectedLeftParenBuiltinFunctionCall:
apiErr = ErrParseExpectedLeftParenBuiltinFunctionCall
case s3select.ErrParseExpectedArgumentDelimiter:
apiErr = ErrParseExpectedArgumentDelimiter
case s3select.ErrParseCastArity:
apiErr = ErrParseCastArity
case s3select.ErrParseInvalidTypeParam:
apiErr = ErrParseInvalidTypeParam
case s3select.ErrParseEmptySelect:
apiErr = ErrParseEmptySelect
case s3select.ErrParseSelectMissingFrom:
apiErr = ErrParseSelectMissingFrom
case s3select.ErrParseExpectedIdentForGroupName:
apiErr = ErrParseExpectedIdentForGroupName
case s3select.ErrParseExpectedIdentForAlias:
apiErr = ErrParseExpectedIdentForAlias
case s3select.ErrParseUnsupportedCallWithStar:
apiErr = ErrParseUnsupportedCallWithStar
case s3select.ErrParseNonUnaryAgregateFunctionCall:
apiErr = ErrParseNonUnaryAgregateFunctionCall
case s3select.ErrParseMalformedJoin:
apiErr = ErrParseMalformedJoin
case s3select.ErrParseExpectedIdentForAt:
apiErr = ErrParseExpectedIdentForAt
case s3select.ErrParseAsteriskIsNotAloneInSelectList:
apiErr = ErrParseAsteriskIsNotAloneInSelectList
case s3select.ErrParseCannotMixSqbAndWildcardInSelectList:
apiErr = ErrParseCannotMixSqbAndWildcardInSelectList
case s3select.ErrParseInvalidContextForWildcardInSelectList:
apiErr = ErrParseInvalidContextForWildcardInSelectList
case s3select.ErrIncorrectSQLFunctionArgumentType:
apiErr = ErrIncorrectSQLFunctionArgumentType
case s3select.ErrValueParseFailure:
apiErr = ErrValueParseFailure
case s3select.ErrIntegerOverflow:
apiErr = ErrIntegerOverflow
case s3select.ErrLikeInvalidInputs:
apiErr = ErrLikeInvalidInputs
case s3select.ErrCastFailed:
apiErr = ErrCastFailed
case s3select.ErrInvalidCast:
apiErr = ErrInvalidCast
case s3select.ErrEvaluatorInvalidTimestampFormatPattern:
apiErr = ErrEvaluatorInvalidTimestampFormatPattern
case s3select.ErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing:
apiErr = ErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing
case s3select.ErrEvaluatorTimestampFormatPatternDuplicateFields:
apiErr = ErrEvaluatorTimestampFormatPatternDuplicateFields
case s3select.ErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch:
apiErr = ErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch
case s3select.ErrEvaluatorUnterminatedTimestampFormatPatternToken:
apiErr = ErrEvaluatorUnterminatedTimestampFormatPatternToken
case s3select.ErrEvaluatorInvalidTimestampFormatPatternToken:
apiErr = ErrEvaluatorInvalidTimestampFormatPatternToken
case s3select.ErrEvaluatorInvalidTimestampFormatPatternSymbol:
apiErr = ErrEvaluatorInvalidTimestampFormatPatternSymbol
case s3select.ErrInvalidColumnIndex:
apiErr = ErrInvalidColumnIndex
case s3select.ErrEvaluatorBindingDoesNotExist:
apiErr = ErrEvaluatorBindingDoesNotExist
case s3select.ErrMissingHeaders:
apiErr = ErrMissingHeaders
// Compression errors
switch err {
case errInvalidDecompressedSize:
apiErr = ErrInvalidDecompressedSize
}
if apiErr != ErrNone {
@@ -1720,6 +1642,64 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
apiErr = ErrObjectTampered
default:
apiErr = ErrInternalError
// Make sure to log the errors which we cannot translate
// to a meaningful S3 API errors. This is added to aid in
// debugging unexpected/unhandled errors.
logger.LogIf(ctx, err)
}
return apiErr
}
var noError = APIError{}
// toAPIError - Converts embedded errors. Convenience
// function written to handle all cases where we have known types of
// errors returned by underlying layers.
func toAPIError(ctx context.Context, err error) APIError {
if err == nil {
return noError
}
var apiErr = errorCodes.ToAPIErr(toAPIErrorCode(ctx, err))
if apiErr.Code == "InternalError" {
// If we see an internal error try to interpret
// any underlying errors if possible depending on
// their internal error types. This code is only
// useful with gateway implementations.
switch e := err.(type) {
case minio.ErrorResponse:
apiErr = APIError{
Code: e.Code,
Description: e.Message,
HTTPStatusCode: e.StatusCode,
}
case *googleapi.Error:
apiErr = APIError{
Code: "XGCSInternalError",
Description: e.Message,
HTTPStatusCode: e.Code,
}
// GCS may send multiple errors, just pick the first one
// since S3 only sends one Error XML response.
if len(e.Errors) >= 1 {
apiErr.Code = e.Errors[0].Reason
}
case storage.AzureStorageServiceError:
apiErr = APIError{
Code: e.Code,
Description: e.Message,
HTTPStatusCode: e.StatusCode,
}
case oss.ServiceError:
apiErr = APIError{
Code: e.Code,
Description: e.Message,
HTTPStatusCode: e.StatusCode,
}
// Add more Gateway SDKs here if any in future.
}
}
return apiErr
@@ -1727,20 +1707,23 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
// getAPIError provides API Error for input API error code.
func getAPIError(code APIErrorCode) APIError {
if apiErr, ok := errorCodeResponse[code]; ok {
if apiErr, ok := errorCodes[code]; ok {
return apiErr
}
return errorCodeResponse[ErrInternalError]
return errorCodes.ToAPIErr(ErrInternalError)
}
// getErrorResponse gets in standard error and resource value and
// provides a encodable populated response values
func getAPIErrorResponse(err APIError, resource, requestid string) APIErrorResponse {
func getAPIErrorResponse(ctx context.Context, err APIError, resource, requestID, hostID string) APIErrorResponse {
reqInfo := logger.GetReqInfo(ctx)
return APIErrorResponse{
Code: err.Code,
Message: err.Description,
Resource: resource,
RequestID: requestid,
HostID: "3L137",
Code: err.Code,
Message: err.Description,
BucketName: reqInfo.BucketName,
Key: reqInfo.ObjectName,
Resource: resource,
RequestID: requestID,
HostID: hostID,
}
}

View File

@@ -17,6 +17,7 @@
package cmd
import (
"context"
"errors"
"testing"
@@ -24,7 +25,7 @@ import (
"github.com/minio/minio/pkg/hash"
)
var toAPIErrorCodeTests = []struct {
var toAPIErrorTests = []struct {
err error
errCode APIErrorCode
}{
@@ -52,7 +53,6 @@ var toAPIErrorCodeTests = []struct {
{err: errSignatureMismatch, errCode: ErrSignatureDoesNotMatch},
// SSE-C errors
{err: errInsecureSSERequest, errCode: ErrInsecureSSECustomerRequest},
{err: crypto.ErrInvalidCustomerAlgorithm, errCode: ErrInvalidSSECustomerAlgorithm},
{err: crypto.ErrMissingCustomerKey, errCode: ErrMissingSSECustomerKey},
{err: crypto.ErrInvalidCustomerKey, errCode: ErrInvalidSSECustomerKey},
@@ -65,8 +65,9 @@ var toAPIErrorCodeTests = []struct {
}
func TestAPIErrCode(t *testing.T) {
for i, testCase := range toAPIErrorCodeTests {
errCode := toAPIErrorCode(testCase.err)
ctx := context.Background()
for i, testCase := range toAPIErrorTests {
errCode := toAPIErrorCode(ctx, testCase.err)
if errCode != testCase.errCode {
t.Errorf("Test %d: Expected error code %d, got %d", i+1, testCase.errCode, errCode)
}

View File

@@ -36,7 +36,7 @@ func mustGetRequestID(t time.Time) string {
// Write http common headers
func setCommonHeaders(w http.ResponseWriter) {
w.Header().Set("Server", globalServerUserAgent)
w.Header().Set("Server", "Minio/"+ReleaseTag)
// Set `x-amz-bucket-region` only if region is set on the server
// by default minio uses an empty region.
if region := globalServerConfig.GetRegion(); region != "" {
@@ -87,6 +87,9 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
w.Header().Set("Content-Encoding", objInfo.ContentEncoding)
}
if !objInfo.Expires.IsZero() {
w.Header().Set("Expires", objInfo.Expires.UTC().Format(http.TimeFormat))
}
// Set all other user defined metadata.
for k, v := range objInfo.UserDefined {
if hasPrefix(k, ReservedMetadataPrefix) {
@@ -104,7 +107,11 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
if err != nil {
return err
}
case objInfo.IsCompressed():
totalObjectSize = objInfo.GetActualSize()
if totalObjectSize < 0 {
return errInvalidDecompressedSize
}
default:
totalObjectSize = objInfo.Size
}

View File

@@ -22,15 +22,22 @@ import (
)
// Parse bucket url queries
func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string, maxkeys int, encodingType string) {
prefix = values.Get("prefix")
marker = values.Get("marker")
delimiter = values.Get("delimiter")
func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string, maxkeys int, encodingType string, errCode APIErrorCode) {
errCode = ErrNone
if values.Get("max-keys") != "" {
maxkeys, _ = strconv.Atoi(values.Get("max-keys"))
var err error
if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil {
errCode = ErrInvalidMaxKeys
return
}
} else {
maxkeys = maxObjectList
}
prefix = values.Get("prefix")
marker = values.Get("marker")
delimiter = values.Get("delimiter")
encodingType = values.Get("encoding-type")
return
}
@@ -47,44 +54,69 @@ func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimit
}
}
if values.Get("max-keys") != "" {
var err error
if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil {
errCode = ErrInvalidMaxKeys
return
}
} else {
maxkeys = maxObjectList
}
prefix = values.Get("prefix")
token = values.Get("continuation-token")
startAfter = values.Get("start-after")
delimiter = values.Get("delimiter")
if values.Get("max-keys") != "" {
maxkeys, _ = strconv.Atoi(values.Get("max-keys"))
} else {
maxkeys = maxObjectList
}
fetchOwner = values.Get("fetch-owner") == "true"
encodingType = values.Get("encoding-type")
return
}
// Parse bucket url queries for ?uploads
func getBucketMultipartResources(values url.Values) (prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int, encodingType string) {
func getBucketMultipartResources(values url.Values) (prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int, encodingType string, errCode APIErrorCode) {
errCode = ErrNone
if values.Get("max-uploads") != "" {
var err error
if maxUploads, err = strconv.Atoi(values.Get("max-uploads")); err != nil {
errCode = ErrInvalidMaxUploads
return
}
} else {
maxUploads = maxUploadsList
}
prefix = values.Get("prefix")
keyMarker = values.Get("key-marker")
uploadIDMarker = values.Get("upload-id-marker")
delimiter = values.Get("delimiter")
if values.Get("max-uploads") != "" {
maxUploads, _ = strconv.Atoi(values.Get("max-uploads"))
} else {
maxUploads = maxUploadsList
}
encodingType = values.Get("encoding-type")
return
}
// Parse object url queries
func getObjectResources(values url.Values) (uploadID string, partNumberMarker, maxParts int, encodingType string) {
uploadID = values.Get("uploadId")
partNumberMarker, _ = strconv.Atoi(values.Get("part-number-marker"))
func getObjectResources(values url.Values) (uploadID string, partNumberMarker, maxParts int, encodingType string, errCode APIErrorCode) {
var err error
errCode = ErrNone
if values.Get("max-parts") != "" {
maxParts, _ = strconv.Atoi(values.Get("max-parts"))
if maxParts, err = strconv.Atoi(values.Get("max-parts")); err != nil {
errCode = ErrInvalidMaxParts
return
}
} else {
maxParts = maxPartsList
}
if values.Get("part-number-marker") != "" {
if partNumberMarker, err = strconv.Atoi(values.Get("part-number-marker")); err != nil {
errCode = ErrInvalidPartNumberMarker
return
}
}
uploadID = values.Get("uploadId")
encodingType = values.Get("encoding-type")
return
}

View File

@@ -156,7 +156,10 @@ func TestListObjectsV1Resources(t *testing.T) {
}
for i, testCase := range testCases {
prefix, marker, delimiter, maxKeys, encodingType := getListObjectsV1Args(testCase.values)
prefix, marker, delimiter, maxKeys, encodingType, argsErr := getListObjectsV1Args(testCase.values)
if argsErr != ErrNone {
t.Errorf("Test %d: argument parsing failed, got %v", i+1, argsErr)
}
if prefix != testCase.prefix {
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.prefix, prefix)
}
@@ -198,7 +201,10 @@ func TestGetObjectsResources(t *testing.T) {
}
for i, testCase := range testCases {
uploadID, partNumberMarker, maxParts, encodingType := getObjectResources(testCase.values)
uploadID, partNumberMarker, maxParts, encodingType, argsErr := getObjectResources(testCase.values)
if argsErr != ErrNone {
t.Errorf("Test %d: argument parsing failed, got %v", i+1, argsErr)
}
if uploadID != testCase.uploadID {
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.uploadID, uploadID)
}

View File

@@ -1,56 +0,0 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import "net/http"
// Represents additional fields necessary for ErrPartTooSmall S3 error.
type completeMultipartAPIError struct {
// Proposed size represents uploaded size of the part.
ProposedSize int64
// Minimum size allowed epresents the minimum size allowed per
// part. Defaults to 5MB.
MinSizeAllowed int64
// Part number of the part which is incorrect.
PartNumber int
// ETag of the part which is incorrect.
PartETag string
// Other default XML error responses.
APIErrorResponse
}
// writeErrorResponsePartTooSmall - function is used specifically to
// construct a proper error response during CompleteMultipartUpload
// when one of the parts is < 5MB.
// The requirement comes due to the fact that generic ErrorResponse
// XML doesn't carry the additional fields required to send this
// error. So we construct a new type which lies well within the scope
// of this function.
func writePartSmallErrorResponse(w http.ResponseWriter, r *http.Request, err PartTooSmall) {
apiError := getAPIError(toAPIErrorCode(err))
// Generate complete multipart error response.
errorResponse := getAPIErrorResponse(apiError, r.URL.Path, w.Header().Get(responseRequestIDKey))
cmpErrResp := completeMultipartAPIError{err.PartSize, int64(5242880), err.PartNumber, err.PartETag, errorResponse}
encodedErrorResponse := encodeResponse(cmpErrResp)
// respond with 400 bad request.
w.WriteHeader(apiError.HTTPStatusCode)
// Write error body.
w.Write(encodedErrorResponse)
w.(http.Flusher).Flush()
}

View File

@@ -17,6 +17,7 @@
package cmd
import (
"context"
"encoding/xml"
"net/http"
"net/url"
@@ -24,6 +25,7 @@ import (
"strings"
"time"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/handlers"
)
@@ -278,7 +280,7 @@ func getURLScheme(tls bool) string {
}
// getObjectLocation gets the fully qualified URL of an object.
func getObjectLocation(r *http.Request, domain, bucket, object string) string {
func getObjectLocation(r *http.Request, domains []string, bucket, object string) string {
// unit tests do not have host set.
if r.Host == "" {
return path.Clean(r.URL.Path)
@@ -293,15 +295,31 @@ func getObjectLocation(r *http.Request, domain, bucket, object string) string {
Scheme: proto,
}
// If domain is set then we need to use bucket DNS style.
if domain != "" {
for _, domain := range domains {
if strings.Contains(r.Host, domain) {
u.Host = bucket + "." + r.Host
u.Path = path.Join(slashSeparator, object)
break
}
}
return u.String()
}
// s3EncodeName encodes string in response when encodingType
// is specified in AWS S3 requests.
func s3EncodeName(name string, encodingType string) (result string) {
// Quick path to exit
if encodingType == "" {
return name
}
encodingType = strings.ToLower(encodingType)
switch encodingType {
case "url":
return url.QueryEscape(name)
}
return name
}
// generates ListBucketsResponse from array of BucketInfo which can be
// serialized to match XML and JSON API spec output.
func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
@@ -324,7 +342,7 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
}
// generates an ListObjectsV1 response for the said bucket with other enumerated options.
func generateListObjectsV1Response(bucket, prefix, marker, delimiter string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
var contents []Object
var prefixes []CommonPrefix
var owner = Owner{}
@@ -336,7 +354,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter string, max
if object.Name == "" {
continue
}
content.Key = object.Name
content.Key = s3EncodeName(object.Name, encodingType)
content.LastModified = object.ModTime.UTC().Format(timeFormatAMZLong)
if object.ETag != "" {
content.ETag = "\"" + object.ETag + "\""
@@ -346,20 +364,20 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter string, max
content.Owner = owner
contents = append(contents, content)
}
// TODO - support EncodingType in xml decoding
data.Name = bucket
data.Contents = contents
data.Prefix = prefix
data.Marker = marker
data.Delimiter = delimiter
data.EncodingType = encodingType
data.Prefix = s3EncodeName(prefix, encodingType)
data.Marker = s3EncodeName(marker, encodingType)
data.Delimiter = s3EncodeName(delimiter, encodingType)
data.MaxKeys = maxKeys
data.NextMarker = resp.NextMarker
data.NextMarker = s3EncodeName(resp.NextMarker, encodingType)
data.IsTruncated = resp.IsTruncated
for _, prefix := range resp.Prefixes {
var prefixItem = CommonPrefix{}
prefixItem.Prefix = prefix
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
prefixes = append(prefixes, prefixItem)
}
data.CommonPrefixes = prefixes
@@ -367,7 +385,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter string, max
}
// generates an ListObjectsV2 response for the said bucket with other enumerated options.
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string) ListObjectsV2Response {
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string) ListObjectsV2Response {
var contents []Object
var commonPrefixes []CommonPrefix
var owner = Owner{}
@@ -382,7 +400,7 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
if object.Name == "" {
continue
}
content.Key = object.Name
content.Key = s3EncodeName(object.Name, encodingType)
content.LastModified = object.ModTime.UTC().Format(timeFormatAMZLong)
if object.ETag != "" {
content.ETag = "\"" + object.ETag + "\""
@@ -392,20 +410,20 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
content.Owner = owner
contents = append(contents, content)
}
// TODO - support EncodingType in xml decoding
data.Name = bucket
data.Contents = contents
data.StartAfter = startAfter
data.Delimiter = delimiter
data.Prefix = prefix
data.EncodingType = encodingType
data.StartAfter = s3EncodeName(startAfter, encodingType)
data.Delimiter = s3EncodeName(delimiter, encodingType)
data.Prefix = s3EncodeName(prefix, encodingType)
data.MaxKeys = maxKeys
data.ContinuationToken = token
data.NextContinuationToken = nextToken
data.IsTruncated = isTruncated
for _, prefix := range prefixes {
var prefixItem = CommonPrefix{}
prefixItem.Prefix = prefix
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
commonPrefixes = append(commonPrefixes, prefixItem)
}
data.CommonPrefixes = commonPrefixes
@@ -449,11 +467,10 @@ func generateCompleteMultpartUploadResponse(bucket, key, location, etag string)
}
// generates ListPartsResponse from ListPartsInfo.
func generateListPartsResponse(partsInfo ListPartsInfo) ListPartsResponse {
// TODO - support EncodingType in xml decoding
func generateListPartsResponse(partsInfo ListPartsInfo, encodingType string) ListPartsResponse {
listPartsResponse := ListPartsResponse{}
listPartsResponse.Bucket = partsInfo.Bucket
listPartsResponse.Key = partsInfo.Object
listPartsResponse.Key = s3EncodeName(partsInfo.Object, encodingType)
listPartsResponse.UploadID = partsInfo.UploadID
listPartsResponse.StorageClass = globalMinioDefaultStorageClass
listPartsResponse.Initiator.ID = globalMinioDefaultOwnerID
@@ -477,29 +494,29 @@ func generateListPartsResponse(partsInfo ListPartsInfo) ListPartsResponse {
}
// generates ListMultipartUploadsResponse for given bucket and ListMultipartsInfo.
func generateListMultipartUploadsResponse(bucket string, multipartsInfo ListMultipartsInfo) ListMultipartUploadsResponse {
func generateListMultipartUploadsResponse(bucket string, multipartsInfo ListMultipartsInfo, encodingType string) ListMultipartUploadsResponse {
listMultipartUploadsResponse := ListMultipartUploadsResponse{}
listMultipartUploadsResponse.Bucket = bucket
listMultipartUploadsResponse.Delimiter = multipartsInfo.Delimiter
listMultipartUploadsResponse.Delimiter = s3EncodeName(multipartsInfo.Delimiter, encodingType)
listMultipartUploadsResponse.IsTruncated = multipartsInfo.IsTruncated
listMultipartUploadsResponse.EncodingType = multipartsInfo.EncodingType
listMultipartUploadsResponse.Prefix = multipartsInfo.Prefix
listMultipartUploadsResponse.KeyMarker = multipartsInfo.KeyMarker
listMultipartUploadsResponse.NextKeyMarker = multipartsInfo.NextKeyMarker
listMultipartUploadsResponse.EncodingType = encodingType
listMultipartUploadsResponse.Prefix = s3EncodeName(multipartsInfo.Prefix, encodingType)
listMultipartUploadsResponse.KeyMarker = s3EncodeName(multipartsInfo.KeyMarker, encodingType)
listMultipartUploadsResponse.NextKeyMarker = s3EncodeName(multipartsInfo.NextKeyMarker, encodingType)
listMultipartUploadsResponse.MaxUploads = multipartsInfo.MaxUploads
listMultipartUploadsResponse.NextUploadIDMarker = multipartsInfo.NextUploadIDMarker
listMultipartUploadsResponse.UploadIDMarker = multipartsInfo.UploadIDMarker
listMultipartUploadsResponse.CommonPrefixes = make([]CommonPrefix, len(multipartsInfo.CommonPrefixes))
for index, commonPrefix := range multipartsInfo.CommonPrefixes {
listMultipartUploadsResponse.CommonPrefixes[index] = CommonPrefix{
Prefix: commonPrefix,
Prefix: s3EncodeName(commonPrefix, encodingType),
}
}
listMultipartUploadsResponse.Uploads = make([]Upload, len(multipartsInfo.Uploads))
for index, upload := range multipartsInfo.Uploads {
newUpload := Upload{}
newUpload.UploadID = upload.UploadID
newUpload.Key = upload.Object
newUpload.Key = s3EncodeName(upload.Object, encodingType)
newUpload.Initiated = upload.Initiated.UTC().Format(timeFormatAMZLong)
listMultipartUploadsResponse.Uploads[index] = newUpload
}
@@ -568,49 +585,93 @@ func writeSuccessResponseHeadersOnly(w http.ResponseWriter) {
}
// writeErrorRespone writes error headers
func writeErrorResponse(w http.ResponseWriter, errorCode APIErrorCode, reqURL *url.URL) {
switch errorCode {
case ErrSlowDown, ErrServerNotInitialized, ErrReadQuorum, ErrWriteQuorum:
func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL, browser bool) {
switch err.Code {
case "SlowDown", "XMinioServerNotInitialized", "XMinioReadQuorum", "XMinioWriteQuorum":
// Set retry-after header to indicate user-agents to retry request after 120secs.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
w.Header().Set("Retry-After", "120")
case "AccessDenied":
// The request is from browser and also if browser
// is enabled we need to redirect.
if browser && globalIsBrowserEnabled {
w.Header().Set("Location", minioReservedBucketPath+reqURL.Path)
w.WriteHeader(http.StatusTemporaryRedirect)
return
}
}
apiError := getAPIError(errorCode)
// Generate error response.
errorResponse := getAPIErrorResponse(apiError, reqURL.Path, w.Header().Get(responseRequestIDKey))
errorResponse := getAPIErrorResponse(ctx, err, reqURL.Path,
w.Header().Get(responseRequestIDKey), w.Header().Get(responseDeploymentIDKey))
encodedErrorResponse := encodeResponse(errorResponse)
writeResponse(w, apiError.HTTPStatusCode, encodedErrorResponse, mimeXML)
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeXML)
}
func writeErrorResponseHeadersOnly(w http.ResponseWriter, errorCode APIErrorCode) {
apiError := getAPIError(errorCode)
writeResponse(w, apiError.HTTPStatusCode, nil, mimeNone)
func writeErrorResponseHeadersOnly(w http.ResponseWriter, err APIError) {
writeResponse(w, err.HTTPStatusCode, nil, mimeNone)
}
// writeErrorResponseJSON - writes error response in JSON format;
// useful for admin APIs.
func writeErrorResponseJSON(w http.ResponseWriter, errorCode APIErrorCode, reqURL *url.URL) {
apiError := getAPIError(errorCode)
func writeErrorResponseJSON(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL) {
// Generate error response.
errorResponse := getAPIErrorResponse(apiError, reqURL.Path, w.Header().Get(responseRequestIDKey))
errorResponse := getAPIErrorResponse(ctx, err, reqURL.Path, w.Header().Get(responseRequestIDKey), w.Header().Get(responseDeploymentIDKey))
encodedErrorResponse := encodeResponseJSON(errorResponse)
writeResponse(w, apiError.HTTPStatusCode, encodedErrorResponse, mimeJSON)
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON)
}
// writeCustomErrorResponseJSON - similar to writeErrorResponseJSON,
// but accepts the error message directly (this allows messages to be
// dynamically generated.)
func writeCustomErrorResponseJSON(w http.ResponseWriter, errorCode APIErrorCode,
func writeCustomErrorResponseJSON(ctx context.Context, w http.ResponseWriter, err APIError,
errBody string, reqURL *url.URL) {
apiError := getAPIError(errorCode)
reqInfo := logger.GetReqInfo(ctx)
errorResponse := APIErrorResponse{
Code: apiError.Code,
Message: errBody,
Resource: reqURL.Path,
RequestID: "3L137",
HostID: "3L137",
Code: err.Code,
Message: errBody,
Resource: reqURL.Path,
BucketName: reqInfo.BucketName,
Key: reqInfo.ObjectName,
RequestID: w.Header().Get(responseRequestIDKey),
HostID: w.Header().Get(responseDeploymentIDKey),
}
encodedErrorResponse := encodeResponseJSON(errorResponse)
writeResponse(w, apiError.HTTPStatusCode, encodedErrorResponse, mimeJSON)
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON)
}
// writeCustomErrorResponseXML - similar to writeErrorResponse,
// but accepts the error message directly (this allows messages to be
// dynamically generated.)
func writeCustomErrorResponseXML(ctx context.Context, w http.ResponseWriter, err APIError, errBody string, reqURL *url.URL, browser bool) {
switch err.Code {
case "SlowDown", "XMinioServerNotInitialized", "XMinioReadQuorum", "XMinioWriteQuorum":
// Set retry-after header to indicate user-agents to retry request after 120secs.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
w.Header().Set("Retry-After", "120")
case "AccessDenied":
// The request is from browser and also if browser
// is enabled we need to redirect.
if browser && globalIsBrowserEnabled {
w.Header().Set("Location", minioReservedBucketPath+reqURL.Path)
w.WriteHeader(http.StatusTemporaryRedirect)
return
}
}
reqInfo := logger.GetReqInfo(ctx)
errorResponse := APIErrorResponse{
Code: err.Code,
Message: errBody,
Resource: reqURL.Path,
BucketName: reqInfo.BucketName,
Key: reqInfo.ObjectName,
RequestID: w.Header().Get(responseRequestIDKey),
HostID: w.Header().Get(responseDeploymentIDKey),
}
encodedErrorResponse := encodeResponse(errorResponse)
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeXML)
}

View File

@@ -26,7 +26,7 @@ func TestObjectLocation(t *testing.T) {
testCases := []struct {
request *http.Request
bucket, object string
domain string
domains []string
expectedLocation string
}{
// Server binding to localhost IP with https.
@@ -80,7 +80,7 @@ func TestObjectLocation(t *testing.T) {
Host: "mys3.bucket.org",
Header: map[string][]string{},
},
domain: "mys3.bucket.org",
domains: []string{"mys3.bucket.org"},
bucket: "mybucket",
object: "test/1.txt",
expectedLocation: "http://mybucket.mys3.bucket.org/test/1.txt",
@@ -92,14 +92,14 @@ func TestObjectLocation(t *testing.T) {
"X-Forwarded-Scheme": {httpsScheme},
},
},
domain: "mys3.bucket.org",
domains: []string{"mys3.bucket.org"},
bucket: "mybucket",
object: "test/1.txt",
expectedLocation: "https://mybucket.mys3.bucket.org/test/1.txt",
},
}
for i, testCase := range testCases {
gotLocation := getObjectLocation(testCase.request, testCase.domain, testCase.bucket, testCase.object)
gotLocation := getObjectLocation(testCase.request, testCase.domains, testCase.bucket, testCase.object)
if testCase.expectedLocation != gotLocation {
t.Errorf("Test %d: expected %s, got %s", i+1, testCase.expectedLocation, gotLocation)
}

View File

@@ -26,21 +26,26 @@ import (
type objectAPIHandlers struct {
ObjectAPI func() ObjectLayer
CacheAPI func() CacheObjectLayer
// Returns true of handlers should interpret encryption.
EncryptionEnabled func() bool
}
// registerAPIRouter - registers S3 compatible APIs.
func registerAPIRouter(router *mux.Router) {
func registerAPIRouter(router *mux.Router, encryptionEnabled bool) {
// Initialize API.
api := objectAPIHandlers{
ObjectAPI: newObjectLayerFn,
CacheAPI: newCacheObjectsFn,
EncryptionEnabled: func() bool {
return encryptionEnabled
},
}
// API Router
apiRouter := router.PathPrefix("/").Subrouter()
var routers []*mux.Router
if globalDomainName != "" {
routers = append(routers, apiRouter.Host("{bucket:.+}."+globalDomainName).Subrouter())
for _, domainName := range globalDomainNames {
routers = append(routers, apiRouter.Host("{bucket:.+}."+domainName).Subrouter())
}
routers = append(routers, apiRouter.PathPrefix("/{bucket}").Subrouter())
@@ -62,6 +67,8 @@ func registerAPIRouter(router *mux.Router) {
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(httpTraceAll(api.AbortMultipartUploadHandler)).Queries("uploadId", "{uploadId:.*}")
// GetObjectACL - this is a dummy call.
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.GetObjectACLHandler)).Queries("acl", "")
// GetObjectTagging - this is a dummy call.
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.GetObjectTaggingHandler)).Queries("tagging", "")
// SelectObjectContent
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.SelectObjectContentHandler)).Queries("select", "").Queries("select-type", "2")
// GetObject
@@ -79,8 +86,31 @@ func registerAPIRouter(router *mux.Router) {
// GetBucketPolicy
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketPolicyHandler)).Queries("policy", "")
// Dummy Bucket Calls
// GetBucketACL -- this is a dummy call.
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketACLHandler)).Queries("acl", "")
// GetBucketCors - this is a dummy call.
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketCorsHandler)).Queries("cors", "")
// GetBucketWebsiteHandler - this is a dummy call.
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketWebsiteHandler)).Queries("website", "")
// GetBucketVersioningHandler - this is a dummy call.
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketVersioningHandler)).Queries("versioning", "")
// GetBucketAccelerateHandler - this is a dummy call.
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketAccelerateHandler)).Queries("accelerate", "")
// GetBucketRequestPaymentHandler - this is a dummy call.
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketRequestPaymentHandler)).Queries("requestPayment", "")
// GetBucketLoggingHandler - this is a dummy call.
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketLoggingHandler)).Queries("logging", "")
// GetBucketLifecycleHandler - this is a dummy call.
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketLifecycleHandler)).Queries("lifecycle", "")
// GetBucketReplicationHandler - this is a dummy call.
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketReplicationHandler)).Queries("replication", "")
// GetBucketTaggingHandler - this is a dummy call.
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketTaggingHandler)).Queries("tagging", "")
//DeleteBucketWebsiteHandler
bucket.Methods("DELETE").HandlerFunc(httpTraceAll(api.DeleteBucketWebsiteHandler)).Queries("website", "")
// DeleteBucketTaggingHandler
bucket.Methods("DELETE").HandlerFunc(httpTraceAll(api.DeleteBucketTaggingHandler)).Queries("tagging", "")
// GetBucketNotification
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketNotificationHandler)).Queries("notification", "")

View File

@@ -19,6 +19,7 @@ package cmd
import (
"bytes"
"context"
"crypto/subtle"
"encoding/base64"
"encoding/hex"
"errors"
@@ -27,8 +28,11 @@ import (
"net/http"
"strings"
jwtgo "github.com/dgrijalva/jwt-go"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/hash"
iampolicy "github.com/minio/minio/pkg/iam/policy"
"github.com/minio/minio/pkg/policy"
)
@@ -86,6 +90,7 @@ const (
authTypeSigned
authTypeSignedV2
authTypeJWT
authTypeSTS
)
// Get request authentication type.
@@ -104,6 +109,8 @@ func getRequestAuthType(r *http.Request) authType {
return authTypeJWT
} else if isRequestPostPolicySignatureV4(r) {
return authTypePostPolicy
} else if _, ok := r.URL.Query()["Action"]; ok {
return authTypeSTS
} else if _, ok := r.Header["Authorization"]; !ok {
return authTypeAnonymous
}
@@ -112,43 +119,140 @@ func getRequestAuthType(r *http.Request) authType {
// checkAdminRequestAuthType checks whether the request is a valid signature V2 or V4 request.
// It does not accept presigned or JWT or anonymous requests.
func checkAdminRequestAuthType(r *http.Request, region string) APIErrorCode {
func checkAdminRequestAuthType(ctx context.Context, r *http.Request, region string) APIErrorCode {
s3Err := ErrAccessDenied
if _, ok := r.Header["X-Amz-Content-Sha256"]; ok && getRequestAuthType(r) == authTypeSigned && !skipContentSha256Cksum(r) { // we only support V4 (no presign) with auth. body
s3Err = isReqAuthenticated(r, region)
if _, ok := r.Header["X-Amz-Content-Sha256"]; ok &&
getRequestAuthType(r) == authTypeSigned && !skipContentSha256Cksum(r) {
// We only support admin credentials to access admin APIs.
var owner bool
_, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
if s3Err != ErrNone {
return s3Err
}
if !owner {
return ErrAccessDenied
}
// we only support V4 (no presign) with auth body
s3Err = isReqAuthenticated(ctx, r, region, serviceS3)
}
if s3Err != ErrNone {
reqInfo := (&logger.ReqInfo{}).AppendTags("requestHeaders", dumpRequest(r))
ctx := logger.SetReqInfo(context.Background(), reqInfo)
ctx := logger.SetReqInfo(ctx, reqInfo)
logger.LogIf(ctx, errors.New(getAPIError(s3Err).Description))
}
return s3Err
}
func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) APIErrorCode {
isOwner := true
accountName := globalServerConfig.GetCredential().AccessKey
// Fetch the security token set by the client.
func getSessionToken(r *http.Request) (token string) {
token = r.Header.Get("X-Amz-Security-Token")
if token != "" {
return token
}
return r.URL.Query().Get("X-Amz-Security-Token")
}
// Fetch claims in the security token returned by the client, doesn't return
// errors - upon errors the returned claims map will be empty.
func mustGetClaimsFromToken(r *http.Request) map[string]interface{} {
claims, _ := getClaimsFromToken(r)
return claims
}
// Fetch claims in the security token returned by the client.
func getClaimsFromToken(r *http.Request) (map[string]interface{}, error) {
claims := make(map[string]interface{})
token := getSessionToken(r)
if token == "" {
return claims, nil
}
stsTokenCallback := func(jwtToken *jwtgo.Token) (interface{}, error) {
// JWT token for x-amz-security-token is signed with admin
// secret key, temporary credentials become invalid if
// server admin credentials change. This is done to ensure
// that clients cannot decode the token using the temp
// secret keys and generate an entirely new claim by essentially
// hijacking the policies. We need to make sure that this is
// based an admin credential such that token cannot be decoded
// on the client side and is treated like an opaque value.
return []byte(globalServerConfig.GetCredential().SecretKey), nil
}
p := &jwtgo.Parser{
ValidMethods: []string{
jwtgo.SigningMethodHS256.Alg(),
jwtgo.SigningMethodHS512.Alg(),
},
}
jtoken, err := p.ParseWithClaims(token, jwtgo.MapClaims(claims), stsTokenCallback)
if err != nil {
return nil, err
}
if !jtoken.Valid {
return nil, errAuthentication
}
v, ok := claims["accessKey"]
if !ok {
return nil, errInvalidAccessKeyID
}
if _, ok = v.(string); !ok {
return nil, errInvalidAccessKeyID
}
return claims, nil
}
// Fetch claims in the security token returned by the client and validate the token.
func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]interface{}, APIErrorCode) {
token := getSessionToken(r)
if token != "" && cred.AccessKey == "" {
return nil, ErrNoAccessKey
}
if subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 {
return nil, ErrInvalidToken
}
claims, err := getClaimsFromToken(r)
if err != nil {
return nil, toAPIErrorCode(context.Background(), err)
}
return claims, ErrNone
}
// Check request auth type verifies the incoming http request
// - validates the request signature
// - validates the policy action if anonymous tests bucket policies if any,
// for authenticated requests validates IAM policies.
// returns APIErrorCode if any to be replied to the client.
func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (s3Err APIErrorCode) {
var cred auth.Credentials
var owner bool
switch getRequestAuthType(r) {
case authTypeUnknown:
case authTypeUnknown, authTypeStreamingSigned:
return ErrAccessDenied
case authTypePresignedV2, authTypeSignedV2:
if errorCode := isReqAuthenticatedV2(r); errorCode != ErrNone {
return errorCode
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
return s3Err
}
cred, owner, s3Err = getReqAccessKeyV2(r)
case authTypeSigned, authTypePresigned:
region := globalServerConfig.GetRegion()
switch action {
case policy.GetBucketLocationAction, policy.ListAllMyBucketsAction:
region = ""
}
if errorCode := isReqAuthenticated(r, region); errorCode != ErrNone {
return errorCode
if s3Err = isReqAuthenticated(ctx, r, region, serviceS3); s3Err != ErrNone {
return s3Err
}
default:
isOwner = false
accountName = ""
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
}
if s3Err != ErrNone {
return s3Err
}
claims, s3Err := checkClaimsFromToken(r, cred)
if s3Err != ErrNone {
return s3Err
}
// LocationConstraint is valid only for CreateBucketAction.
@@ -174,17 +278,31 @@ func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Ac
r.Body = ioutil.NopCloser(bytes.NewReader(payload))
}
if globalPolicySys.IsAllowed(policy.Args{
AccountName: accountName,
Action: action,
if cred.AccessKey == "" {
if globalPolicySys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Action: action,
BucketName: bucketName,
ConditionValues: getConditionValues(r, locationConstraint, ""),
IsOwner: false,
ObjectName: objectName,
}) {
return ErrNone
}
return ErrAccessDenied
}
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: iampolicy.Action(action),
BucketName: bucketName,
ConditionValues: getConditionValues(r, locationConstraint),
IsOwner: isOwner,
ConditionValues: getConditionValues(r, "", cred.AccessKey),
ObjectName: objectName,
IsOwner: owner,
Claims: claims,
}) {
return ErrNone
}
return ErrAccessDenied
}
@@ -196,21 +314,21 @@ func isReqAuthenticatedV2(r *http.Request) (s3Error APIErrorCode) {
return doesPresignV2SignatureMatch(r)
}
func reqSignatureV4Verify(r *http.Request, region string) (s3Error APIErrorCode) {
sha256sum := getContentSha256Cksum(r)
func reqSignatureV4Verify(r *http.Request, region string, stype serviceType) (s3Error APIErrorCode) {
sha256sum := getContentSha256Cksum(r, stype)
switch {
case isRequestSignatureV4(r):
return doesSignatureMatch(sha256sum, r, region)
return doesSignatureMatch(sha256sum, r, region, stype)
case isRequestPresignedSignatureV4(r):
return doesPresignedSignatureMatch(sha256sum, r, region)
return doesPresignedSignatureMatch(sha256sum, r, region, stype)
default:
return ErrAccessDenied
}
}
// Verify if request has valid AWS Signature Version '4'.
func isReqAuthenticated(r *http.Request, region string) (s3Error APIErrorCode) {
if errCode := reqSignatureV4Verify(r, region); errCode != ErrNone {
func isReqAuthenticated(ctx context.Context, r *http.Request, region string, stype serviceType) (s3Error APIErrorCode) {
if errCode := reqSignatureV4Verify(r, region, stype); errCode != ErrNone {
return errCode
}
@@ -244,9 +362,9 @@ func isReqAuthenticated(r *http.Request, region string) (s3Error APIErrorCode) {
// Verify 'Content-Md5' and/or 'X-Amz-Content-Sha256' if present.
// The verification happens implicit during reading.
reader, err := hash.NewReader(r.Body, -1, hex.EncodeToString(contentMD5), hex.EncodeToString(contentSHA256))
reader, err := hash.NewReader(r.Body, -1, hex.EncodeToString(contentMD5), hex.EncodeToString(contentSHA256), -1)
if err != nil {
return toAPIErrorCode(err)
return toAPIErrorCode(ctx, err)
}
r.Body = ioutil.NopCloser(reader)
return ErrNone
@@ -288,12 +406,67 @@ func (a authHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
} else if aType == authTypeJWT {
// Validate Authorization header if its valid for JWT request.
if !isHTTPRequestValid(r) {
if _, _, authErr := webRequestAuthenticate(r); authErr != nil {
w.WriteHeader(http.StatusUnauthorized)
return
}
a.handler.ServeHTTP(w, r)
return
} else if aType == authTypeSTS {
a.handler.ServeHTTP(w, r)
return
}
writeErrorResponse(w, ErrSignatureVersionNotSupported, r.URL)
writeErrorResponse(context.Background(), w, errorCodes.ToAPIErr(ErrSignatureVersionNotSupported), r.URL, guessIsBrowserReq(r))
}
// isPutAllowed - check if PUT operation is allowed on the resource, this
// call verifies bucket policies and IAM policies, supports multi user
// checks etc.
func isPutAllowed(atype authType, bucketName, objectName string, r *http.Request) (s3Err APIErrorCode) {
var cred auth.Credentials
var owner bool
switch atype {
case authTypeUnknown:
return ErrAccessDenied
case authTypeSignedV2, authTypePresignedV2:
cred, owner, s3Err = getReqAccessKeyV2(r)
case authTypeStreamingSigned, authTypePresigned, authTypeSigned:
region := globalServerConfig.GetRegion()
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
}
if s3Err != ErrNone {
return s3Err
}
claims, s3Err := checkClaimsFromToken(r, cred)
if s3Err != ErrNone {
return s3Err
}
if cred.AccessKey == "" {
if globalPolicySys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Action: policy.PutObjectAction,
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", ""),
IsOwner: false,
ObjectName: objectName,
}) {
return ErrNone
}
return ErrAccessDenied
}
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: policy.PutObjectAction,
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", cred.AccessKey),
ObjectName: objectName,
IsOwner: owner,
Claims: claims,
}) {
return ErrNone
}
return ErrAccessDenied
}

View File

@@ -18,6 +18,7 @@ package cmd
import (
"bytes"
"context"
"io"
"io/ioutil"
"net/http"
@@ -377,11 +378,13 @@ func TestIsReqAuthenticated(t *testing.T) {
{mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrNone},
}
ctx := context.Background()
// Validates all testcases.
for i, testCase := range testCases {
if s3Error := isReqAuthenticated(testCase.req, globalServerConfig.GetRegion()); s3Error != testCase.s3Error {
if _, err := ioutil.ReadAll(testCase.req.Body); toAPIErrorCode(err) != testCase.s3Error {
t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d (got after reading request %d)", i, testCase.s3Error, s3Error, toAPIErrorCode(err))
s3Error := isReqAuthenticated(ctx, testCase.req, globalServerConfig.GetRegion(), serviceS3)
if s3Error != testCase.s3Error {
if _, err := ioutil.ReadAll(testCase.req.Body); toAPIErrorCode(ctx, err) != testCase.s3Error {
t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d (got after reading request %s)", i, testCase.s3Error, s3Error, toAPIError(ctx, err).Code)
}
}
}
@@ -413,8 +416,9 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
{Request: mustNewPresignedV2Request("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
{Request: mustNewPresignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
}
ctx := context.Background()
for i, testCase := range testCases {
if s3Error := checkAdminRequestAuthType(testCase.Request, globalServerConfig.GetRegion()); s3Error != testCase.ErrCode {
if s3Error := checkAdminRequestAuthType(ctx, testCase.Request, globalServerConfig.GetRegion()); s3Error != testCase.ErrCode {
t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i, testCase.ErrCode, s3Error)
}
}

View File

@@ -49,7 +49,6 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
textData := generateBytesData(objSize)
// generate md5sum for the generated data.
// md5sum of the data to written is required as input for PutObject.
metadata := make(map[string]string)
md5hex := getMD5Hash(textData)
sha256hex := ""
@@ -61,7 +60,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
for i := 0; i < b.N; i++ {
// insert the object.
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata, ObjectOptions{})
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
if err != nil {
b.Fatal(err)
}
@@ -96,14 +95,11 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
textData := generateBytesData(objSize)
// generate md5sum for the generated data.
// md5sum of the data to written is required as input for NewMultipartUpload.
metadata := make(map[string]string)
opts := ObjectOptions{}
uploadID, err = obj.NewMultipartUpload(context.Background(), bucket, object, metadata, opts)
uploadID, err = obj.NewMultipartUpload(context.Background(), bucket, object, ObjectOptions{})
if err != nil {
b.Fatal(err)
}
md5hex := getMD5Hash(textData)
sha256hex := ""
var textPartData []byte
@@ -120,10 +116,10 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
} else {
textPartData = textData[j*partSize:]
}
md5hex = getMD5Hash([]byte(textPartData))
md5hex := getMD5Hash([]byte(textPartData))
var partInfo PartInfo
partInfo, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, j,
mustGetHashReader(b, bytes.NewBuffer(textPartData), int64(len(textPartData)), md5hex, sha256hex), opts)
mustGetPutObjReader(b, bytes.NewBuffer(textPartData), int64(len(textPartData)), md5hex, sha256hex), ObjectOptions{})
if err != nil {
b.Fatal(err)
}
@@ -194,7 +190,6 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
// generate etag for the generated data.
// etag of the data to written is required as input for PutObject.
// PutObject is the functions which writes the data onto the FS/XL backend.
metadata := make(map[string]string)
// get text data generated for number of bytes equal to object size.
md5hex := getMD5Hash(textData)
@@ -204,7 +199,7 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
// insert the object.
var objInfo ObjectInfo
objInfo, err = obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata, ObjectOptions{})
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
if err != nil {
b.Fatal(err)
}
@@ -234,10 +229,8 @@ func getRandomByte() []byte {
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
// seeding the random number generator.
rand.Seed(UTCNow().UnixNano())
var b byte
// pick a character randomly.
b = letterBytes[rand.Intn(len(letterBytes))]
return []byte{b}
return []byte{letterBytes[rand.Intn(len(letterBytes))]}
}
// picks a random byte and repeats it to size bytes.
@@ -289,7 +282,6 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
textData := generateBytesData(objSize)
// generate md5sum for the generated data.
// md5sum of the data to written is required as input for PutObject.
metadata := make(map[string]string)
md5hex := getMD5Hash([]byte(textData))
sha256hex := ""
@@ -304,7 +296,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
for pb.Next() {
// insert the object.
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata, ObjectOptions{})
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
if err != nil {
b.Fatal(err)
}
@@ -335,7 +327,6 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
// generate md5sum for the generated data.
// md5sum of the data to written is required as input for PutObject.
// PutObject is the functions which writes the data onto the FS/XL backend.
metadata := make(map[string]string)
md5hex := getMD5Hash([]byte(textData))
sha256hex := ""
@@ -344,7 +335,7 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
// insert the object.
var objInfo ObjectInfo
objInfo, err = obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata, ObjectOptions{})
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
if err != nil {
b.Fatal(err)
}

172
cmd/bitrot-streaming.go Normal file
View File

@@ -0,0 +1,172 @@
/*
* Minio Cloud Storage, (C) 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bytes"
"context"
"encoding/hex"
"hash"
"io"
"github.com/minio/minio/cmd/logger"
)
// Calculates bitrot in chunks and writes the hash into the stream.
type streamingBitrotWriter struct {
iow *io.PipeWriter
h hash.Hash
shardSize int64
canClose chan struct{} // Needed to avoid race explained in Close() call.
// Following two fields are used only to make sure that Write(p) is called such that
// len(p) is always the block size except the last block, i.e prevent programmer errors.
currentBlockIdx int
verifyTillIdx int
}
func (b *streamingBitrotWriter) Write(p []byte) (int, error) {
if b.currentBlockIdx < b.verifyTillIdx && int64(len(p)) != b.shardSize {
// All blocks except last should be of the length b.shardSize
logger.LogIf(context.Background(), errUnexpected)
return 0, errUnexpected
}
if len(p) == 0 {
return 0, nil
}
b.h.Reset()
b.h.Write(p)
hashBytes := b.h.Sum(nil)
n, err := b.iow.Write(hashBytes)
if n != len(hashBytes) {
logger.LogIf(context.Background(), err)
return 0, err
}
n, err = b.iow.Write(p)
b.currentBlockIdx++
return n, err
}
func (b *streamingBitrotWriter) Close() error {
err := b.iow.Close()
// Wait for all data to be written before returning else it causes race conditions.
// Race condition is because of io.PipeWriter implementation. i.e consider the following
// sequent of operations:
// 1) pipe.Write()
// 2) pipe.Close()
// Now pipe.Close() can return before the data is read on the other end of the pipe and written to the disk
// Hence an immediate Read() on the file can return incorrect data.
<-b.canClose
return err
}
// Returns streaming bitrot writer implementation.
func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.WriteCloser {
r, w := io.Pipe()
h := algo.New()
bw := &streamingBitrotWriter{w, h, shardSize, make(chan struct{}), 0, int(length / shardSize)}
go func() {
bitrotSumsTotalSize := ceilFrac(length, shardSize) * int64(h.Size()) // Size used for storing bitrot checksums.
totalFileSize := bitrotSumsTotalSize + length
err := disk.CreateFile(volume, filePath, totalFileSize, r)
if err != nil {
logger.LogIf(context.Background(), err)
r.CloseWithError(err)
}
close(bw.canClose)
}()
return bw
}
// ReadAt() implementation which verifies the bitrot hash available as part of the stream.
type streamingBitrotReader struct {
disk StorageAPI
rc io.ReadCloser
volume string
filePath string
tillOffset int64
currOffset int64
h hash.Hash
shardSize int64
hashBytes []byte
}
func (b *streamingBitrotReader) Close() error {
if b.rc == nil {
return nil
}
return b.rc.Close()
}
func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
var err error
if offset%b.shardSize != 0 {
// Offset should always be aligned to b.shardSize
logger.LogIf(context.Background(), errUnexpected)
return 0, errUnexpected
}
if b.rc == nil {
// For the first ReadAt() call we need to open the stream for reading.
b.currOffset = offset
streamOffset := (offset/b.shardSize)*int64(b.h.Size()) + offset
b.rc, err = b.disk.ReadFileStream(b.volume, b.filePath, streamOffset, b.tillOffset-streamOffset)
if err != nil {
logger.LogIf(context.Background(), err)
return 0, err
}
}
if offset != b.currOffset {
logger.LogIf(context.Background(), errUnexpected)
return 0, errUnexpected
}
b.h.Reset()
_, err = io.ReadFull(b.rc, b.hashBytes)
if err != nil {
logger.LogIf(context.Background(), err)
return 0, err
}
_, err = io.ReadFull(b.rc, buf)
if err != nil {
logger.LogIf(context.Background(), err)
return 0, err
}
b.h.Write(buf)
if !bytes.Equal(b.h.Sum(nil), b.hashBytes) {
err = hashMismatchError{hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil))}
logger.LogIf(context.Background(), err)
return 0, err
}
b.currOffset += int64(len(buf))
return len(buf), nil
}
// Returns streaming bitrot reader implementation.
func newStreamingBitrotReader(disk StorageAPI, volume, filePath string, tillOffset int64, algo BitrotAlgorithm, shardSize int64) *streamingBitrotReader {
h := algo.New()
return &streamingBitrotReader{
disk,
nil,
volume,
filePath,
ceilFrac(tillOffset, shardSize)*int64(h.Size()) + tillOffset,
0,
h,
shardSize,
make([]byte, h.Size()),
}
}

109
cmd/bitrot-whole.go Normal file
View File

@@ -0,0 +1,109 @@
/*
* Minio Cloud Storage, (C) 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"hash"
"io"
"github.com/minio/minio/cmd/logger"
)
// Implementation to calculate bitrot for the whole file.
type wholeBitrotWriter struct {
disk StorageAPI
volume string
filePath string
shardSize int64 // This is the shard size of the erasure logic
hash.Hash // For bitrot hash
// Following two fields are used only to make sure that Write(p) is called such that
// len(p) is always the block size except the last block and prevent programmer errors.
currentBlockIdx int
lastBlockIdx int
}
func (b *wholeBitrotWriter) Write(p []byte) (int, error) {
if b.currentBlockIdx < b.lastBlockIdx && int64(len(p)) != b.shardSize {
// All blocks except last should be of the length b.shardSize
logger.LogIf(context.Background(), errUnexpected)
return 0, errUnexpected
}
err := b.disk.AppendFile(b.volume, b.filePath, p)
if err != nil {
logger.LogIf(context.Background(), err)
return 0, err
}
_, err = b.Hash.Write(p)
if err != nil {
logger.LogIf(context.Background(), err)
return 0, err
}
b.currentBlockIdx++
return len(p), nil
}
func (b *wholeBitrotWriter) Close() error {
return nil
}
// Returns whole-file bitrot writer.
func newWholeBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.WriteCloser {
return &wholeBitrotWriter{disk, volume, filePath, shardSize, algo.New(), 0, int(length / shardSize)}
}
// Implementation to verify bitrot for the whole file.
type wholeBitrotReader struct {
disk StorageAPI
volume string
filePath string
verifier *BitrotVerifier // Holds the bit-rot info
tillOffset int64 // Affects the length of data requested in disk.ReadFile depending on Read()'s offset
buf []byte // Holds bit-rot verified data
}
func (b *wholeBitrotReader) ReadAt(buf []byte, offset int64) (n int, err error) {
if b.buf == nil {
b.buf = make([]byte, b.tillOffset-offset)
if _, err := b.disk.ReadFile(b.volume, b.filePath, offset, b.buf, b.verifier); err != nil {
ctx := context.Background()
logger.GetReqInfo(ctx).AppendTags("disk", b.disk.String())
logger.LogIf(ctx, err)
return 0, err
}
}
if len(b.buf) < len(buf) {
logger.LogIf(context.Background(), errLessData)
return 0, errLessData
}
n = copy(buf, b.buf)
b.buf = b.buf[n:]
return n, nil
}
// Returns whole-file bitrot reader.
func newWholeBitrotReader(disk StorageAPI, volume, filePath string, algo BitrotAlgorithm, tillOffset int64, sum []byte) *wholeBitrotReader {
return &wholeBitrotReader{
disk: disk,
volume: volume,
filePath: filePath,
verifier: &BitrotVerifier{algo, sum},
tillOffset: tillOffset,
buf: nil,
}
}

View File

@@ -20,6 +20,7 @@ import (
"context"
"errors"
"hash"
"io"
"github.com/minio/highwayhash"
"github.com/minio/minio/cmd/logger"
@@ -38,19 +39,22 @@ const (
SHA256 BitrotAlgorithm = 1 + iota
// HighwayHash256 represents the HighwayHash-256 hash function
HighwayHash256
// HighwayHash256S represents the Streaming HighwayHash-256 hash function
HighwayHash256S
// BLAKE2b512 represents the BLAKE2b-512 hash function
BLAKE2b512
)
// DefaultBitrotAlgorithm is the default algorithm used for bitrot protection.
const (
DefaultBitrotAlgorithm = HighwayHash256
DefaultBitrotAlgorithm = HighwayHash256S
)
var bitrotAlgorithms = map[BitrotAlgorithm]string{
SHA256: "sha256",
BLAKE2b512: "blake2b",
HighwayHash256: "highwayhash256",
SHA256: "sha256",
BLAKE2b512: "blake2b",
HighwayHash256: "highwayhash256",
HighwayHash256S: "highwayhash256S",
}
// New returns a new hash.Hash calculating the given bitrot algorithm.
@@ -64,6 +68,9 @@ func (a BitrotAlgorithm) New() hash.Hash {
case HighwayHash256:
hh, _ := highwayhash.New(magicHighwayHash256Key) // New will never return error since key is 256 bit
return hh
case HighwayHash256S:
hh, _ := highwayhash.New(magicHighwayHash256Key) // New will never return error since key is 256 bit
return hh
default:
logger.CriticalIf(context.Background(), errors.New("Unsupported bitrot algorithm"))
return nil
@@ -109,86 +116,72 @@ func BitrotAlgorithmFromString(s string) (a BitrotAlgorithm) {
return
}
// To read bit-rot verified data.
type bitrotReader struct {
disk StorageAPI
volume string
filePath string
verifier *BitrotVerifier // Holds the bit-rot info
endOffset int64 // Affects the length of data requested in disk.ReadFile depending on Read()'s offset
buf []byte // Holds bit-rot verified data
}
// newBitrotReader returns bitrotReader.
// Note that the buffer is allocated later in Read(). This is because we will know the buffer length only
// during the bitrotReader.Read(). Depending on when parallelReader fails-over, the buffer length can be different.
func newBitrotReader(disk StorageAPI, volume, filePath string, algo BitrotAlgorithm, endOffset int64, sum []byte) *bitrotReader {
return &bitrotReader{
disk: disk,
volume: volume,
filePath: filePath,
verifier: &BitrotVerifier{algo, sum},
endOffset: endOffset,
buf: nil,
func newBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.Writer {
if algo == HighwayHash256S {
return newStreamingBitrotWriter(disk, volume, filePath, length, algo, shardSize)
}
return newWholeBitrotWriter(disk, volume, filePath, length, algo, shardSize)
}
// ReadChunk returns requested data.
func (b *bitrotReader) ReadChunk(offset int64, length int64) ([]byte, error) {
if b.buf == nil {
b.buf = make([]byte, b.endOffset-offset)
if _, err := b.disk.ReadFile(b.volume, b.filePath, offset, b.buf, b.verifier); err != nil {
ctx := context.Background()
logger.GetReqInfo(ctx).AppendTags("disk", b.disk.String())
logger.LogIf(ctx, err)
return nil, err
func newBitrotReader(disk StorageAPI, bucket string, filePath string, tillOffset int64, algo BitrotAlgorithm, sum []byte, shardSize int64) io.ReaderAt {
if algo == HighwayHash256S {
return newStreamingBitrotReader(disk, bucket, filePath, tillOffset, algo, shardSize)
}
return newWholeBitrotReader(disk, bucket, filePath, algo, tillOffset, sum)
}
// Close all the readers.
func closeBitrotReaders(rs []io.ReaderAt) {
for _, r := range rs {
if br, ok := r.(*streamingBitrotReader); ok {
br.Close()
}
}
if int64(len(b.buf)) < length {
logger.LogIf(context.Background(), errLessData)
return nil, errLessData
}
retBuf := b.buf[:length]
b.buf = b.buf[length:]
return retBuf, nil
}
// To calculate the bit-rot of the written data.
type bitrotWriter struct {
disk StorageAPI
volume string
filePath string
h hash.Hash
}
// newBitrotWriter returns bitrotWriter.
func newBitrotWriter(disk StorageAPI, volume, filePath string, algo BitrotAlgorithm) *bitrotWriter {
return &bitrotWriter{
disk: disk,
volume: volume,
filePath: filePath,
h: algo.New(),
// Close all the writers.
func closeBitrotWriters(ws []io.Writer) {
for _, w := range ws {
if bw, ok := w.(*streamingBitrotWriter); ok {
bw.Close()
}
}
}
// Append appends the data and while calculating the hash.
func (b *bitrotWriter) Append(buf []byte) error {
n, err := b.h.Write(buf)
if err != nil {
return err
}
if n != len(buf) {
logger.LogIf(context.Background(), errUnexpected)
return errUnexpected
}
if err = b.disk.AppendFile(b.volume, b.filePath, buf); err != nil {
logger.LogIf(context.Background(), err)
return err
// Returns hash sum for whole-bitrot, nil for streaming-bitrot.
func bitrotWriterSum(w io.Writer) []byte {
if bw, ok := w.(*wholeBitrotWriter); ok {
return bw.Sum(nil)
}
return nil
}
// Sum returns bit-rot sum.
func (b *bitrotWriter) Sum() []byte {
return b.h.Sum(nil)
// Verify if a file has bitrot error.
func bitrotCheckFile(disk StorageAPI, volume string, filePath string, tillOffset int64, algo BitrotAlgorithm, sum []byte, shardSize int64) (err error) {
if algo != HighwayHash256S {
buf := []byte{}
// For whole-file bitrot we don't need to read the entire file as the bitrot verify happens on the server side even if we read 0-bytes.
_, err = disk.ReadFile(volume, filePath, 0, buf, NewBitrotVerifier(algo, sum))
return err
}
buf := make([]byte, shardSize)
r := newStreamingBitrotReader(disk, volume, filePath, tillOffset, algo, shardSize)
defer closeBitrotReaders([]io.ReaderAt{r})
var offset int64
for {
if offset == tillOffset {
break
}
var n int
tmpBuf := buf
if int64(len(tmpBuf)) > (tillOffset - offset) {
tmpBuf = tmpBuf[:(tillOffset - offset)]
}
n, err = r.ReadAt(tmpBuf, offset)
if err != nil {
return err
}
offset += int64(n)
}
return nil
}

View File

@@ -17,13 +17,14 @@
package cmd
import (
"io"
"io/ioutil"
"log"
"os"
"testing"
)
func TestBitrotReaderWriter(t *testing.T) {
func testBitrotReaderWriterAlgo(t *testing.T, bitrotAlgo BitrotAlgorithm) {
tmpDir, err := ioutil.TempDir("", "")
if err != nil {
log.Fatal(err)
@@ -40,32 +41,44 @@ func TestBitrotReaderWriter(t *testing.T) {
disk.MakeVol(volume)
writer := newBitrotWriter(disk, volume, filePath, HighwayHash256)
writer := newBitrotWriter(disk, volume, filePath, 35, bitrotAlgo, 10)
err = writer.Append([]byte("aaaaaaaaa"))
_, err = writer.Write([]byte("aaaaaaaaaa"))
if err != nil {
log.Fatal(err)
}
err = writer.Append([]byte("a"))
_, err = writer.Write([]byte("aaaaaaaaaa"))
if err != nil {
log.Fatal(err)
}
err = writer.Append([]byte("aaaaaaaaaa"))
_, err = writer.Write([]byte("aaaaaaaaaa"))
if err != nil {
log.Fatal(err)
}
err = writer.Append([]byte("aaaaa"))
if err != nil {
log.Fatal(err)
}
err = writer.Append([]byte("aaaaaaaaaa"))
_, err = writer.Write([]byte("aaaaa"))
if err != nil {
log.Fatal(err)
}
writer.(io.Closer).Close()
reader := newBitrotReader(disk, volume, filePath, HighwayHash256, 35, writer.Sum())
if _, err = reader.ReadChunk(0, 35); err != nil {
reader := newBitrotReader(disk, volume, filePath, 35, bitrotAlgo, bitrotWriterSum(writer), 10)
b := make([]byte, 10)
if _, err = reader.ReadAt(b, 0); err != nil {
log.Fatal(err)
}
if _, err = reader.ReadAt(b, 10); err != nil {
log.Fatal(err)
}
if _, err = reader.ReadAt(b, 20); err != nil {
log.Fatal(err)
}
if _, err = reader.ReadAt(b[:5], 30); err != nil {
log.Fatal(err)
}
}
func TestAllBitrotAlgorithms(t *testing.T) {
for bitrotAlgo := range bitrotAlgorithms {
testBitrotReaderWriterAlgo(t, bitrotAlgo)
}
}

View File

@@ -18,9 +18,11 @@ package cmd
import (
"net/http"
"strings"
"github.com/gorilla/mux"
"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/policy"
)
@@ -31,12 +33,19 @@ import (
// - delimiter if set should be equal to '/', otherwise the request is rejected.
// - marker if set should have a common prefix with 'prefix' param, otherwise
// the request is rejected.
func validateListObjectsArgs(prefix, marker, delimiter string, maxKeys int) APIErrorCode {
func validateListObjectsArgs(prefix, marker, delimiter, encodingType string, maxKeys int) APIErrorCode {
// Max keys cannot be negative.
if maxKeys < 0 {
return ErrInvalidMaxKeys
}
if encodingType != "" {
// Only url encoding type is supported
if strings.ToLower(encodingType) != "url" {
return ErrInvalidEncodingMethod
}
}
/// Minio special conditions for ListObjects.
// Verify if delimiter is anything other than '/', which we do not support.
@@ -58,36 +67,38 @@ func validateListObjectsArgs(prefix, marker, delimiter string, maxKeys int) APIE
func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListObjectsV2")
defer logger.AuditLog(w, r, "ListObjectsV2", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
urlValues := r.URL.Query()
// Extract all the listObjectsV2 query params to their native values.
prefix, token, startAfter, delimiter, fetchOwner, maxKeys, _, errCode := getListObjectsV2Args(urlValues)
prefix, token, startAfter, delimiter, fetchOwner, maxKeys, encodingType, errCode := getListObjectsV2Args(urlValues)
if errCode != ErrNone {
writeErrorResponse(w, errCode, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(errCode), r.URL, guessIsBrowserReq(r))
return
}
// Validate the query params before beginning to serve the request.
// fetch-owner is not validated since it is a boolean
if s3Error := validateListObjectsArgs(prefix, token, delimiter, maxKeys); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
if s3Error := validateListObjectsArgs(prefix, token, delimiter, encodingType, maxKeys); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
listObjectsV2 := objectAPI.ListObjectsV2
if api.CacheAPI() != nil {
listObjectsV2 = api.CacheAPI().ListObjectsV2
@@ -97,22 +108,33 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
// marshaled into S3 compatible XML header.
listObjectsV2Info, err := listObjectsV2(ctx, bucket, prefix, token, delimiter, maxKeys, fetchOwner, startAfter)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
for i := range listObjectsV2Info.Objects {
if crypto.IsEncrypted(listObjectsV2Info.Objects[i].UserDefined) {
var actualSize int64
if listObjectsV2Info.Objects[i].IsCompressed() {
// Read the decompressed size from the meta.json.
actualSize = listObjectsV2Info.Objects[i].GetActualSize()
if actualSize < 0 {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidDecompressedSize), r.URL, guessIsBrowserReq(r))
return
}
// Set the info.Size to the actualSize.
listObjectsV2Info.Objects[i].Size = actualSize
} else if crypto.IsEncrypted(listObjectsV2Info.Objects[i].UserDefined) {
listObjectsV2Info.Objects[i].ETag = getDecryptedETag(r.Header, listObjectsV2Info.Objects[i], false)
listObjectsV2Info.Objects[i].Size, err = listObjectsV2Info.Objects[i].DecryptedSize()
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
}
}
response := generateListObjectsV2Response(bucket, prefix, token, listObjectsV2Info.NextContinuationToken, startAfter,
delimiter, fetchOwner, listObjectsV2Info.IsTruncated, maxKeys, listObjectsV2Info.Objects, listObjectsV2Info.Prefixes)
delimiter, encodingType, fetchOwner, listObjectsV2Info.IsTruncated, maxKeys, listObjectsV2Info.Objects, listObjectsV2Info.Prefixes)
// Write success response.
writeSuccessResponseXML(w, encodeResponse(response))
@@ -127,57 +149,70 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListObjectsV1")
defer logger.AuditLog(w, r, "ListObjectsV1", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Extract all the litsObjectsV1 query params to their native values.
prefix, marker, delimiter, maxKeys, _ := getListObjectsV1Args(r.URL.Query())
// Validate the maxKeys lowerbound. When maxKeys > 1000, S3 returns 1000 but
// does not throw an error.
if maxKeys < 0 {
writeErrorResponse(w, ErrInvalidMaxKeys, r.URL)
return
} // Validate all the query params before beginning to serve the request.
if s3Error := validateListObjectsArgs(prefix, marker, delimiter, maxKeys); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
prefix, marker, delimiter, maxKeys, encodingType, s3Error := getListObjectsV1Args(r.URL.Query())
if s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Validate all the query params before beginning to serve the request.
if s3Error := validateListObjectsArgs(prefix, marker, delimiter, encodingType, maxKeys); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
listObjects := objectAPI.ListObjects
if api.CacheAPI() != nil {
listObjects = api.CacheAPI().ListObjects
}
// Inititate a list objects operation based on the input params.
// On success would return back ListObjectsInfo object to be
// marshaled into S3 compatible XML header.
listObjectsInfo, err := listObjects(ctx, bucket, prefix, marker, delimiter, maxKeys)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
for i := range listObjectsInfo.Objects {
if crypto.IsEncrypted(listObjectsInfo.Objects[i].UserDefined) {
var actualSize int64
if listObjectsInfo.Objects[i].IsCompressed() {
// Read the decompressed size from the meta.json.
actualSize = listObjectsInfo.Objects[i].GetActualSize()
if actualSize < 0 {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidDecompressedSize), r.URL, guessIsBrowserReq(r))
return
}
// Set the info.Size to the actualSize.
listObjectsInfo.Objects[i].Size = actualSize
} else if crypto.IsEncrypted(listObjectsInfo.Objects[i].UserDefined) {
listObjectsInfo.Objects[i].ETag = getDecryptedETag(r.Header, listObjectsInfo.Objects[i], false)
listObjectsInfo.Objects[i].Size, err = listObjectsInfo.Objects[i].DecryptedSize()
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
}
}
response := generateListObjectsV1Response(bucket, prefix, marker, delimiter, maxKeys, listObjectsInfo)
response := generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType, maxKeys, listObjectsInfo)
// Write success response.
writeSuccessResponseXML(w, encodeResponse(response))

View File

@@ -89,17 +89,19 @@ func initFederatorBackend(objLayer ObjectLayer) {
func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketLocation")
defer logger.AuditLog(w, r, "GetBucketLocation", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketLocationAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
@@ -108,7 +110,7 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
getBucketInfo = api.CacheAPI().GetBucketInfo
}
if _, err := getBucketInfo(ctx, bucket); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -137,40 +139,48 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListMultipartUploads")
defer logger.AuditLog(w, r, "ListMultipartUploads", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketMultipartUploadsAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, _ := getBucketMultipartResources(r.URL.Query())
if maxUploads < 0 {
writeErrorResponse(w, ErrInvalidMaxUploads, r.URL)
prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, encodingType, errCode := getBucketMultipartResources(r.URL.Query())
if errCode != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(errCode), r.URL, guessIsBrowserReq(r))
return
}
if maxUploads < 0 {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidMaxUploads), r.URL, guessIsBrowserReq(r))
return
}
if keyMarker != "" {
// Marker not common with prefix is not implemented.
if !hasPrefix(keyMarker, prefix) {
writeErrorResponse(w, ErrNotImplemented, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
return
}
}
listMultipartsInfo, err := objectAPI.ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// generate response
response := generateListMultipartUploadsResponse(bucket, listMultipartsInfo)
response := generateListMultipartUploadsResponse(bucket, listMultipartsInfo, encodingType)
encodedSuccessResponse := encodeResponse(response)
// write success response.
@@ -184,27 +194,30 @@ func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter,
func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListBuckets")
defer logger.AuditLog(w, r, "ListBuckets", mustGetClaimsFromToken(r))
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
listBuckets := objectAPI.ListBuckets
listBuckets := objectAPI.ListBuckets
if api.CacheAPI() != nil {
listBuckets = api.CacheAPI().ListBuckets
}
if s3Error := checkRequestAuthType(ctx, r, policy.ListAllMyBucketsAction, "", ""); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// If etcd, dns federation configured list buckets from etcd.
var bucketsInfo []BucketInfo
if globalDNSConfig != nil {
dnsBuckets, err := globalDNSConfig.List()
if err != nil && err != dns.ErrNoEntriesFound {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
bucketSet := set.NewStringSet()
@@ -223,7 +236,7 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
var err error
bucketsInfo, err = listBuckets(ctx)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
}
@@ -240,12 +253,14 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "DeleteMultipleObjects")
defer logger.AuditLog(w, r, "DeleteMultipleObjects", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
@@ -254,7 +269,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
// In the event access is denied, a 200 response should still be returned
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
if s3Error != ErrAccessDenied {
writeErrorResponse(w, s3Error, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
}
@@ -262,14 +277,14 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
// Content-Length is required and should be non-zero
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
if r.ContentLength <= 0 {
writeErrorResponse(w, ErrMissingContentLength, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL, guessIsBrowserReq(r))
return
}
// Content-Md5 is requied should be set
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
if _, ok := r.Header["Content-Md5"]; !ok {
writeErrorResponse(w, ErrMissingContentMD5, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL, guessIsBrowserReq(r))
return
}
@@ -285,7 +300,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
// Read incoming body XML bytes.
if _, err := io.ReadFull(r.Body, deleteXMLBytes); err != nil {
logger.LogIf(ctx, err)
writeErrorResponse(w, ErrInternalError, r.URL)
writeErrorResponse(ctx, w, toAdminAPIErr(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -293,7 +308,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
deleteObjects := &DeleteObjectsRequest{}
if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil {
logger.LogIf(ctx, err)
writeErrorResponse(w, ErrMalformedXML, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedXML), r.URL, guessIsBrowserReq(r))
return
}
@@ -301,7 +316,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
if globalWORMEnabled {
// Not required to check whether given objects exist or not, because
// DeleteMultipleObject is always successful irrespective of object existence.
writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r))
return
}
@@ -340,10 +355,11 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
deletedObjects = append(deletedObjects, object)
continue
}
apiErr := toAPIError(ctx, err)
// Error during delete should be collected separately.
deleteErrors = append(deleteErrors, DeleteError{
Code: errorCodeResponse[toAPIErrorCode(err)].Code,
Message: errorCodeResponse[toAPIErrorCode(err)].Description,
Code: apiErr.Code,
Message: apiErr.Description,
Key: object.ObjectName,
})
}
@@ -370,10 +386,11 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
Object: ObjectInfo{
Name: dobj.ObjectName,
},
ReqParams: extractReqParams(r),
UserAgent: r.UserAgent(),
Host: host,
Port: port,
ReqParams: extractReqParams(r),
RespElements: extractRespElements(w),
UserAgent: r.UserAgent(),
Host: host,
Port: port,
})
}
}
@@ -384,9 +401,11 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutBucket")
defer logger.AuditLog(w, r, "PutBucket", mustGetClaimsFromToken(r))
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
@@ -394,21 +413,21 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
bucket := vars["bucket"]
if s3Error := checkRequestAuthType(ctx, r, policy.CreateBucketAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Parse incoming location constraint.
location, s3Error := parseLocationConstraint(r)
if s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Validate if location sent by the client is valid, reject
// requests which do not follow valid region requirements.
if !isValidLocation(location) {
writeErrorResponse(w, ErrInvalidRegion, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRegion), r.URL, guessIsBrowserReq(r))
return
}
@@ -417,33 +436,33 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
if err == dns.ErrNoEntriesFound {
// Proceed to creating a bucket.
if err = objectAPI.MakeBucketWithLocation(ctx, bucket, location); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if err = globalDNSConfig.Put(bucket); err != nil {
objectAPI.DeleteBucket(ctx, bucket)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Make sure to add Location information here only for bucket
w.Header().Set("Location", getObjectLocation(r, globalDomainName, bucket, ""))
w.Header().Set("Location", getObjectLocation(r, globalDomainNames, bucket, ""))
writeSuccessResponseHeadersOnly(w)
return
}
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
writeErrorResponse(w, ErrBucketAlreadyOwnedByYou, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrBucketAlreadyOwnedByYou), r.URL, guessIsBrowserReq(r))
return
}
// Proceed to creating a bucket.
err := objectAPI.MakeBucketWithLocation(ctx, bucket, location)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -460,9 +479,21 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PostPolicyBucket")
defer logger.AuditLog(w, r, "PostPolicyBucket", mustGetClaimsFromToken(r))
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
if crypto.S3KMS.IsRequested(r.Header) { // SSE-KMS is not supported
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
return
}
if !api.EncryptionEnabled() && hasServerSideEncryptionHeader(r.Header) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
return
}
@@ -471,17 +502,17 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
// Require Content-Length to be set in the request
size := r.ContentLength
if size < 0 {
writeErrorResponse(w, ErrMissingContentLength, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL, guessIsBrowserReq(r))
return
}
resource, err := getResource(r.URL.Path, r.Host, globalDomainName)
resource, err := getResource(r.URL.Path, r.Host, globalDomainNames)
if err != nil {
writeErrorResponse(w, ErrInvalidRequest, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL, guessIsBrowserReq(r))
return
}
// Make sure that the URL does not contain object name.
if bucket != filepath.Clean(resource[1:]) {
writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r))
return
}
@@ -490,7 +521,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
reader, err := r.MultipartReader()
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL, guessIsBrowserReq(r))
return
}
@@ -498,7 +529,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
form, err := reader.ReadForm(maxFormMemory)
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL, guessIsBrowserReq(r))
return
}
@@ -509,13 +540,13 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
fileBody, fileName, fileSize, formValues, err := extractPostPolicyFormValues(ctx, form)
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL, guessIsBrowserReq(r))
return
}
// Check if file is provided, error out otherwise.
if fileBody == nil {
writeErrorResponse(w, ErrPOSTFileRequired, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrPOSTFileRequired), r.URL, guessIsBrowserReq(r))
return
}
@@ -537,66 +568,83 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
if successRedirect != "" {
redirectURL, err = url.Parse(successRedirect)
if err != nil {
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL, guessIsBrowserReq(r))
return
}
}
// Verify policy signature.
apiErr := doesPolicySignatureMatch(formValues)
if apiErr != ErrNone {
writeErrorResponse(w, apiErr, r.URL)
errCode := doesPolicySignatureMatch(formValues)
if errCode != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(errCode), r.URL, guessIsBrowserReq(r))
return
}
policyBytes, err := base64.StdEncoding.DecodeString(formValues.Get("Policy"))
if err != nil {
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL, guessIsBrowserReq(r))
return
}
postPolicyForm, err := parsePostPolicyForm(string(policyBytes))
if err != nil {
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
return
}
// Make sure formValues adhere to policy restrictions.
if apiErr = checkPostPolicy(formValues, postPolicyForm); apiErr != ErrNone {
writeErrorResponse(w, apiErr, r.URL)
return
}
// Ensure that the object size is within expected range, also the file size
// should not exceed the maximum single Put size (5 GiB)
lengthRange := postPolicyForm.Conditions.ContentLengthRange
if lengthRange.Valid {
if fileSize < lengthRange.Min {
writeErrorResponse(w, toAPIErrorCode(errDataTooSmall), r.URL)
// Handle policy if it is set.
if len(policyBytes) > 0 {
postPolicyForm, err := parsePostPolicyForm(string(policyBytes))
if err != nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL, guessIsBrowserReq(r))
return
}
if fileSize > lengthRange.Max || isMaxObjectSize(fileSize) {
writeErrorResponse(w, toAPIErrorCode(errDataTooLarge), r.URL)
// Make sure formValues adhere to policy restrictions.
if err = checkPostPolicy(formValues, postPolicyForm); err != nil {
writeCustomErrorResponseXML(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), err.Error(), r.URL, guessIsBrowserReq(r))
return
}
// Ensure that the object size is within expected range, also the file size
// should not exceed the maximum single Put size (5 GiB)
lengthRange := postPolicyForm.Conditions.ContentLengthRange
if lengthRange.Valid {
if fileSize < lengthRange.Min {
writeErrorResponse(ctx, w, toAPIError(ctx, errDataTooSmall), r.URL, guessIsBrowserReq(r))
return
}
if fileSize > lengthRange.Max || isMaxObjectSize(fileSize) {
writeErrorResponse(ctx, w, toAPIError(ctx, errDataTooLarge), r.URL, guessIsBrowserReq(r))
return
}
}
}
// Extract metadata to be saved from received Form.
metadata := make(map[string]string)
err = extractMetadataFromMap(ctx, formValues, metadata)
if err != nil {
writeErrorResponse(w, ErrInternalError, r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
hashReader, err := hash.NewReader(fileBody, fileSize, "", "")
hashReader, err := hash.NewReader(fileBody, fileSize, "", "", fileSize)
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
rawReader := hashReader
pReader := NewPutObjReader(rawReader, nil, nil)
var objectEncryptionKey []byte
// This request header needs to be set prior to setting ObjectOptions
if globalAutoEncryption && !crypto.SSEC.IsRequested(r.Header) {
r.Header.Add(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
}
// get gateway encryption options
var opts ObjectOptions
opts, err = putOpts(ctx, r, bucket, object, metadata)
if err != nil {
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
return
}
if objectAPI.IsEncryptionSupported() {
if hasServerSideEncryptionHeader(formValues) && !hasSuffix(object, slashSeparator) { // handle SSE-C and SSE-S3 requests
var reader io.Reader
@@ -604,31 +652,32 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
if crypto.SSEC.IsRequested(formValues) {
key, err = ParseSSECustomerHeader(formValues)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
}
reader, err = newEncryptReader(hashReader, key, bucket, object, metadata, crypto.S3.IsRequested(formValues))
reader, objectEncryptionKey, err = newEncryptReader(hashReader, key, bucket, object, metadata, crypto.S3.IsRequested(formValues))
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
info := ObjectInfo{Size: fileSize}
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "") // do not try to verify encrypted content
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", fileSize) // do not try to verify encrypted content
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
pReader = NewPutObjReader(rawReader, hashReader, objectEncryptionKey)
}
}
objInfo, err := objectAPI.PutObject(ctx, bucket, object, hashReader, metadata, ObjectOptions{})
objInfo, err := objectAPI.PutObject(ctx, bucket, object, pReader, opts)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
location := getObjectLocation(r, globalDomainName, bucket, object)
location := getObjectLocation(r, globalDomainNames, bucket, object)
w.Header().Set("ETag", `"`+objInfo.ETag+`"`)
w.Header().Set("Location", location)
@@ -640,13 +689,14 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
// Notify object created event.
defer sendEvent(eventArgs{
EventName: event.ObjectCreatedPost,
BucketName: objInfo.Bucket,
Object: objInfo,
ReqParams: extractReqParams(r),
UserAgent: r.UserAgent(),
Host: host,
Port: port,
EventName: event.ObjectCreatedPost,
BucketName: objInfo.Bucket,
Object: objInfo,
ReqParams: extractReqParams(r),
RespElements: extractRespElements(w),
UserAgent: r.UserAgent(),
Host: host,
Port: port,
})
if successRedirect != "" {
@@ -682,17 +732,19 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "HeadBucket")
defer logger.AuditLog(w, r, "HeadBucket", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponseHeadersOnly(w, ErrServerNotInitialized)
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrServerNotInitialized))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone {
writeErrorResponseHeadersOnly(w, s3Error)
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(s3Error))
return
}
@@ -701,7 +753,7 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
getBucketInfo = api.CacheAPI().GetBucketInfo
}
if _, err := getBucketInfo(ctx, bucket); err != nil {
writeErrorResponseHeadersOnly(w, toAPIErrorCode(err))
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
return
}
@@ -712,17 +764,19 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "DeleteBucket")
defer logger.AuditLog(w, r, "DeleteBucket", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.DeleteBucketAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
@@ -732,7 +786,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
}
// Attempt to delete bucket.
if err := deleteBucket(ctx, bucket); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -744,7 +798,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
if err := globalDNSConfig.Delete(bucket); err != nil {
// Deleting DNS entry failed, attempt to create the bucket again.
objectAPI.MakeBucketWithLocation(ctx, bucket, "")
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
}

View File

@@ -95,12 +95,12 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, rec.Code)
}
if !bytes.Equal(testCase.locationResponse, rec.Body.Bytes()) && testCase.shouldPass {
t.Errorf("Test %d: %s: Expected the response to be `%s`, but instead found `%s`", i+1, instanceType, string(testCase.locationResponse), string(rec.Body.Bytes()))
t.Errorf("Test %d: %s: Expected the response to be `%s`, but instead found `%s`", i+1, instanceType, string(testCase.locationResponse), rec.Body.String())
}
errorResponse := APIErrorResponse{}
err = xml.Unmarshal(rec.Body.Bytes(), &errorResponse)
if err != nil && !testCase.shouldPass {
t.Fatalf("Test %d: %s: Unable to marshal response body %s", i+1, instanceType, string(rec.Body.Bytes()))
t.Fatalf("Test %d: %s: Unable to marshal response body %s", i+1, instanceType, rec.Body.String())
}
if errorResponse.Resource != testCase.errorResponse.Resource {
t.Errorf("Test %d: %s: Expected the error resource to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Resource, errorResponse.Resource)
@@ -131,7 +131,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
errorResponse = APIErrorResponse{}
err = xml.Unmarshal(recV2.Body.Bytes(), &errorResponse)
if err != nil && !testCase.shouldPass {
t.Fatalf("Test %d: %s: Unable to marshal response body %s", i+1, instanceType, string(recV2.Body.Bytes()))
t.Fatalf("Test %d: %s: Unable to marshal response body %s", i+1, instanceType, recV2.Body.String())
}
if errorResponse.Resource != testCase.errorResponse.Resource {
t.Errorf("Test %d: %s: Expected the error resource to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Resource, errorResponse.Resource)
@@ -625,7 +625,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
for i := 0; i < 10; i++ {
objectName := "test-object-" + strconv.Itoa(i)
// uploading the object.
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewBuffer(contentBytes), int64(len(contentBytes)), "", sha256sum), nil, ObjectOptions{})
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetPutObjReader(t, bytes.NewBuffer(contentBytes), int64(len(contentBytes)), "", sha256sum), ObjectOptions{})
// if object upload fails stop the test.
if err != nil {
t.Fatalf("Put Object %d: Error uploading object: <ERROR> %v", i, err)
@@ -645,8 +645,8 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
getDeleteErrorList := func(objects []ObjectIdentifier) (deleteErrorList []DeleteError) {
for _, obj := range objects {
deleteErrorList = append(deleteErrorList, DeleteError{
Code: errorCodeResponse[ErrAccessDenied].Code,
Message: errorCodeResponse[ErrAccessDenied].Description,
Code: errorCodes[ErrAccessDenied].Code,
Message: errorCodes[ErrAccessDenied].Description,
Key: obj.ObjectName,
})
}

View File

@@ -44,28 +44,30 @@ var errNoSuchNotifications = errors.New("The specified bucket does not have buck
func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketNotification")
defer logger.AuditLog(w, r, "GetBucketNotification", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucketName := vars["bucket"]
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
if !objAPI.IsNotificationSupported() {
writeErrorResponse(w, ErrNotImplemented, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketNotificationAction, bucketName, ""); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
_, err := objAPI.GetBucketInfo(ctx, bucketName)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -74,17 +76,21 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
if err != nil {
// Ignore errNoSuchNotifications to comply with AWS S3.
if err != errNoSuchNotifications {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
nConfig = &event.Config{}
}
// If xml namespace is empty, set a default value before returning.
if nConfig.XMLNS == "" {
nConfig.XMLNS = "http://s3.amazonaws.com/doc/2006-03-01/"
}
notificationBytes, err := xml.Marshal(nConfig)
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -96,14 +102,16 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutBucketNotification")
defer logger.AuditLog(w, r, "PutBucketNotification", mustGetClaimsFromToken(r))
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
if !objectAPI.IsNotificationSupported() {
writeErrorResponse(w, ErrNotImplemented, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
return
}
@@ -111,36 +119,36 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
bucketName := vars["bucket"]
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketNotificationAction, bucketName, ""); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
_, err := objectAPI.GetBucketInfo(ctx, bucketName)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// PutBucketNotification always needs a Content-Length.
if r.ContentLength <= 0 {
writeErrorResponse(w, ErrMissingContentLength, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL, guessIsBrowserReq(r))
return
}
var config *event.Config
config, err = event.ParseConfig(io.LimitReader(r.Body, r.ContentLength), globalServerConfig.GetRegion(), globalNotificationSys.targetList)
if err != nil {
apiErr := ErrMalformedXML
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
if event.IsEventError(err) {
apiErr = toAPIErrorCode(err)
apiErr = toAPIError(ctx, err)
}
writeErrorResponse(w, apiErr, r.URL)
writeErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r))
return
}
if err = saveNotificationConfig(objectAPI, bucketName, config); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
if err = saveNotificationConfig(ctx, objectAPI, bucketName, config); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -156,22 +164,29 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListenBucketNotification")
defer logger.AuditLog(w, r, "ListenBucketNotification", mustGetClaimsFromToken(r))
// Validate if bucket exists.
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
return
}
if !objAPI.IsNotificationSupported() {
writeErrorResponse(w, ErrNotImplemented, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
if !objAPI.IsNotificationSupported() {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
return
}
if !objAPI.IsListenBucketSupported() {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
return
}
vars := mux.Vars(r)
bucketName := vars["bucket"]
if s3Error := checkRequestAuthType(ctx, r, policy.ListenBucketNotificationAction, bucketName, ""); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
@@ -179,11 +194,13 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
var prefix string
if len(values["prefix"]) > 1 {
writeErrorResponse(w, ErrFilterNamePrefix, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrFilterNamePrefix), r.URL, guessIsBrowserReq(r))
return
}
if len(values["prefix"]) == 1 {
if err := event.ValidateFilterRuleValue(values["prefix"][0]); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -192,11 +209,13 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
var suffix string
if len(values["suffix"]) > 1 {
writeErrorResponse(w, ErrFilterNameSuffix, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrFilterNameSuffix), r.URL, guessIsBrowserReq(r))
return
}
if len(values["suffix"]) == 1 {
if err := event.ValidateFilterRuleValue(values["suffix"][0]); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -209,7 +228,7 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
for _, s := range values["events"] {
eventName, err := event.ParseName(s)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -217,19 +236,19 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
}
if _, err := objAPI.GetBucketInfo(ctx, bucketName); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
host, err := xnet.ParseHost(r.RemoteAddr)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
target, err := target.NewHTTPClientTarget(*host, w)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -237,8 +256,7 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
if err = globalNotificationSys.AddRemoteTarget(bucketName, target, rulesMap); err != nil {
logger.GetReqInfo(ctx).AppendTags("target", target.ID().Name)
logger.LogIf(ctx, err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
defer globalNotificationSys.RemoveRemoteTarget(bucketName, target.ID())
@@ -246,14 +264,13 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
thisAddr, err := xnet.ParseHost(GetLocalPeer(globalEndpoints))
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if err = SaveListener(objAPI, bucketName, eventNames, pattern, target.ID(), *thisAddr); err != nil {
logger.GetReqInfo(ctx).AppendTags("target", target.ID().Name)
logger.LogIf(ctx, err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -263,8 +280,7 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
if err = RemoveListener(objAPI, bucketName, target.ID(), *thisAddr); err != nil {
logger.GetReqInfo(ctx).AppendTags("target", target.ID().Name)
logger.LogIf(ctx, err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
}

View File

@@ -40,9 +40,11 @@ const (
func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutBucketPolicy")
defer logger.AuditLog(w, r, "PutBucketPolicy", mustGetClaimsFromToken(r))
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
@@ -50,43 +52,43 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
bucket := vars["bucket"]
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketPolicyAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Check if bucket exists.
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Error out if Content-Length is missing.
// PutBucketPolicy always needs Content-Length.
if r.ContentLength <= 0 {
writeErrorResponse(w, ErrMissingContentLength, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL, guessIsBrowserReq(r))
return
}
// Error out if Content-Length is beyond allowed size.
if r.ContentLength > maxBucketPolicySize {
writeErrorResponse(w, ErrEntityTooLarge, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL, guessIsBrowserReq(r))
return
}
bucketPolicy, err := policy.ParseConfig(io.LimitReader(r.Body, r.ContentLength), bucket)
if err != nil {
writeErrorResponse(w, ErrMalformedPolicy, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPolicy), r.URL, guessIsBrowserReq(r))
return
}
// Version in policy must not be empty
if bucketPolicy.Version == "" {
writeErrorResponse(w, ErrMalformedPolicy, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPolicy), r.URL, guessIsBrowserReq(r))
return
}
if err = objAPI.SetBucketPolicy(ctx, bucket, bucketPolicy); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -101,9 +103,11 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "DeleteBucketPolicy")
defer logger.AuditLog(w, r, "DeleteBucketPolicy", mustGetClaimsFromToken(r))
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
@@ -111,18 +115,18 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r
bucket := vars["bucket"]
if s3Error := checkRequestAuthType(ctx, r, policy.DeleteBucketPolicyAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Check if bucket exists.
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if err := objAPI.DeleteBucketPolicy(ctx, bucket); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -137,9 +141,11 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r
func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketPolicy")
defer logger.AuditLog(w, r, "GetBucketPolicy", mustGetClaimsFromToken(r))
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
@@ -147,27 +153,26 @@ func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *ht
bucket := vars["bucket"]
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Check if bucket exists.
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Read bucket access policy.
bucketPolicy, err := objAPI.GetBucketPolicy(ctx, bucket)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
policyData, err := json.Marshal(bucketPolicy)
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}

View File

@@ -68,24 +68,6 @@ func parsePublicCertFile(certFile string) (x509Certs []*x509.Certificate, err er
}
func getRootCAs(certsCAsDir string) (*x509.CertPool, error) {
// Get all CA file names.
var caFiles []string
fis, err := readDir(certsCAsDir)
if err != nil {
return nil, err
}
for _, fi := range fis {
// Skip all directories.
if hasSuffix(fi, slashSeparator) {
continue
}
// We are only interested in regular files here.
caFiles = append(caFiles, pathJoin(certsCAsDir, fi))
}
if len(caFiles) == 0 {
return nil, nil
}
rootCAs, _ := x509.SystemCertPool()
if rootCAs == nil {
// In some systems (like Windows) system cert pool is
@@ -94,16 +76,26 @@ func getRootCAs(certsCAsDir string) (*x509.CertPool, error) {
rootCAs = x509.NewCertPool()
}
// Load custom root CAs for client requests
for _, caFile := range caFiles {
caCert, err := ioutil.ReadFile(caFile)
if err != nil {
return nil, err
fis, err := readDir(certsCAsDir)
if err != nil {
if err == errFileNotFound {
err = nil // Return success if CA's directory is missing.
}
rootCAs.AppendCertsFromPEM(caCert)
return rootCAs, err
}
// Load all custom CA files.
for _, fi := range fis {
// Skip all directories.
if hasSuffix(fi, slashSeparator) {
continue
}
caCert, err := ioutil.ReadFile(pathJoin(certsCAsDir, fi))
if err != nil {
return rootCAs, err
}
rootCAs.AppendCertsFromPEM(caCert)
}
return rootCAs, nil
}
@@ -150,24 +142,20 @@ func loadX509KeyPair(certFile, keyFile string) (tls.Certificate, error) {
return cert, nil
}
func getSSLConfig() (x509Certs []*x509.Certificate, rootCAs *x509.CertPool, c *certs.Certs, secureConn bool, err error) {
func getTLSConfig() (x509Certs []*x509.Certificate, c *certs.Certs, secureConn bool, err error) {
if !(isFile(getPublicCertFile()) && isFile(getPrivateKeyFile())) {
return nil, nil, nil, false, nil
return nil, nil, false, nil
}
if x509Certs, err = parsePublicCertFile(getPublicCertFile()); err != nil {
return nil, nil, nil, false, err
return nil, nil, false, err
}
c, err = certs.New(getPublicCertFile(), getPrivateKeyFile(), loadX509KeyPair)
if err != nil {
return nil, nil, nil, false, err
}
if rootCAs, err = getRootCAs(getCADir()); err != nil {
return nil, nil, nil, false, err
return nil, nil, false, err
}
secureConn = true
return x509Certs, rootCAs, c, secureConn, nil
return x509Certs, c, secureConn, nil
}

View File

@@ -223,7 +223,8 @@ func TestGetRootCAs(t *testing.T) {
certCAsDir string
expectedErr error
}{
{"nonexistent-dir", errFileNotFound},
// ignores non-existent directories.
{"nonexistent-dir", nil},
// Ignores directories.
{dir1, nil},
// Ignore empty directory.

View File

@@ -17,6 +17,7 @@
package cmd
import (
"crypto/tls"
"errors"
"net"
"os"
@@ -28,12 +29,13 @@ import (
etcd "github.com/coreos/etcd/clientv3"
dns2 "github.com/miekg/dns"
"github.com/minio/cli"
"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio-go/pkg/set"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/cmd/logger/target/console"
"github.com/minio/minio/cmd/logger/target/http"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/dns"
"github.com/minio/minio-go/pkg/set"
xnet "github.com/minio/minio/pkg/net"
)
// Check for updates and print a notification message
@@ -50,53 +52,129 @@ func checkUpdate(mode string) {
// Load logger targets based on user's configuration
func loadLoggers() {
if globalServerConfig.Logger.Console.Enabled {
// Enable console logging
logger.AddTarget(logger.NewConsole())
auditEndpoint, ok := os.LookupEnv("MINIO_AUDIT_LOGGER_HTTP_ENDPOINT")
if ok {
// Enable audit HTTP logging through ENV.
logger.AddAuditTarget(http.New(auditEndpoint, NewCustomHTTPTransport()))
}
for _, l := range globalServerConfig.Logger.HTTP {
if l.Enabled {
// Enable http logging
logger.AddTarget(logger.NewHTTP(l.Endpoint, NewCustomHTTPTransport()))
loggerEndpoint, ok := os.LookupEnv("MINIO_LOGGER_HTTP_ENDPOINT")
if ok {
// Enable HTTP logging through ENV.
logger.AddTarget(http.New(loggerEndpoint, NewCustomHTTPTransport()))
} else {
for _, l := range globalServerConfig.Logger.HTTP {
if l.Enabled {
// Enable http logging
logger.AddTarget(http.New(l.Endpoint, NewCustomHTTPTransport()))
}
}
}
if globalServerConfig.Logger.Console.Enabled {
// Enable console logging
logger.AddTarget(console.New())
}
}
func newConfigDirFromCtx(ctx *cli.Context, option string, getDefaultDir func() string) (*ConfigDir, bool) {
var dir string
var dirSet bool
switch {
case ctx.IsSet(option):
dir = ctx.String(option)
dirSet = true
case ctx.GlobalIsSet(option):
dir = ctx.GlobalString(option)
dirSet = true
// cli package does not expose parent's option option. Below code is workaround.
if dir == "" || dir == getDefaultDir() {
dirSet = false // Unset to false since GlobalIsSet() true is a false positive.
if ctx.Parent().GlobalIsSet(option) {
dir = ctx.Parent().GlobalString(option)
dirSet = true
}
}
default:
// Neither local nor global option is provided. In this case, try to use
// default directory.
dir = getDefaultDir()
if dir == "" {
logger.FatalIf(errInvalidArgument, "%s option must be provided", option)
}
}
if dir == "" {
logger.FatalIf(errors.New("empty directory"), "%s directory cannot be empty", option)
}
// Disallow relative paths, figure out absolute paths.
dirAbs, err := filepath.Abs(dir)
logger.FatalIf(err, "Unable to fetch absolute path for %s=%s", option, dir)
logger.FatalIf(mkdirAllIgnorePerm(dirAbs), "Unable to create directory specified %s=%s", option, dir)
return &ConfigDir{path: dirAbs}, dirSet
}
func handleCommonCmdArgs(ctx *cli.Context) {
var configDir string
switch {
case ctx.IsSet("config-dir"):
configDir = ctx.String("config-dir")
case ctx.GlobalIsSet("config-dir"):
configDir = ctx.GlobalString("config-dir")
// cli package does not expose parent's "config-dir" option. Below code is workaround.
if configDir == "" || configDir == getConfigDir() {
if ctx.Parent().GlobalIsSet("config-dir") {
configDir = ctx.Parent().GlobalString("config-dir")
}
}
default:
// Neither local nor global config-dir option is provided. In this case, try to use
// default config directory.
configDir = getConfigDir()
if configDir == "" {
logger.FatalIf(errors.New("missing option"), "config-dir option must be provided")
}
// Get "json" flag from command line argument and
// enable json and quite modes if json flag is turned on.
globalCLIContext.JSON = ctx.IsSet("json") || ctx.GlobalIsSet("json")
if globalCLIContext.JSON {
logger.EnableJSON()
}
if configDir == "" {
logger.FatalIf(errors.New("empty directory"), "Configuration directory cannot be empty")
// Get quiet flag from command line argument.
globalCLIContext.Quiet = ctx.IsSet("quiet") || ctx.GlobalIsSet("quiet")
if globalCLIContext.Quiet {
logger.EnableQuiet()
}
// Disallow relative paths, figure out absolute paths.
configDirAbs, err := filepath.Abs(configDir)
logger.FatalIf(err, "Unable to fetch absolute path for config directory %s", configDir)
setConfigDir(configDirAbs)
// Get anonymous flag from command line argument.
globalCLIContext.Anonymous = ctx.IsSet("anonymous") || ctx.GlobalIsSet("anonymous")
if globalCLIContext.Anonymous {
logger.EnableAnonymous()
}
// Fetch address option
globalCLIContext.Addr = ctx.GlobalString("address")
if globalCLIContext.Addr == "" || globalCLIContext.Addr == ":"+globalMinioDefaultPort {
globalCLIContext.Addr = ctx.String("address")
}
// Set all config, certs and CAs directories.
var configSet, certsSet bool
globalConfigDir, configSet = newConfigDirFromCtx(ctx, "config-dir", defaultConfigDir.Get)
globalCertsDir, certsSet = newConfigDirFromCtx(ctx, "certs-dir", defaultCertsDir.Get)
// Remove this code when we deprecate and remove config-dir.
// This code is to make sure we inherit from the config-dir
// option if certs-dir is not provided.
if !certsSet && configSet {
globalCertsDir = &ConfigDir{path: filepath.Join(globalConfigDir.Get(), certsDir)}
}
globalCertsCADir = &ConfigDir{path: filepath.Join(globalCertsDir.Get(), certsCADir)}
logger.FatalIf(mkdirAllIgnorePerm(globalCertsCADir.Get()), "Unable to create certs CA directory at %s", globalCertsCADir.Get())
}
// Parses the given compression exclude list `extensions` or `content-types`.
func parseCompressIncludes(includes []string) ([]string, error) {
for _, e := range includes {
if len(e) == 0 {
return nil, uiErrInvalidCompressionIncludesValue(nil).Msg("extension/mime-type (%s) cannot be empty", e)
}
}
return includes, nil
}
func handleCommonEnvVars() {
compressEnvDelimiter := ","
// Start profiler if env is set.
if profiler := os.Getenv("_MINIO_PROFILER"); profiler != "" {
var err error
@@ -111,6 +189,7 @@ func handleCommonEnvVars() {
if err != nil {
logger.Fatal(uiErrInvalidCredentials(err), "Unable to validate credentials inherited from the shell environment")
}
cred.Expiration = timeSentinel
// credential Envs are set globally.
globalIsEnvCreds = true
@@ -120,7 +199,7 @@ func handleCommonEnvVars() {
if browser := os.Getenv("MINIO_BROWSER"); browser != "" {
browserFlag, err := ParseBoolFlag(browser)
if err != nil {
logger.Fatal(uiErrInvalidBrowserValue(nil).Msg("Unknown value `%s`", browser), "Invalid MINIO_BROWSER environment variable")
logger.Fatal(uiErrInvalidBrowserValue(nil).Msg("Unknown value `%s`", browser), "Invalid MINIO_BROWSER value in environment variable")
}
// browser Envs are set globally, this does not represent
@@ -139,37 +218,90 @@ func handleCommonEnvVars() {
etcdEndpointsEnv, ok := os.LookupEnv("MINIO_ETCD_ENDPOINTS")
if ok {
etcdEndpoints := strings.Split(etcdEndpointsEnv, ",")
var etcdSecure bool
for _, endpoint := range etcdEndpoints {
u, err := xnet.ParseURL(endpoint)
if err != nil {
logger.FatalIf(err, "Unable to initialize etcd with %s", etcdEndpoints)
}
// If one of the endpoint is https, we will use https directly.
etcdSecure = etcdSecure || u.Scheme == "https"
}
var err error
globalEtcdClient, err = etcd.New(etcd.Config{
Endpoints: etcdEndpoints,
DialTimeout: defaultDialTimeout,
DialKeepAliveTime: defaultDialKeepAlive,
})
if etcdSecure {
// This is only to support client side certificate authentication
// https://coreos.com/etcd/docs/latest/op-guide/security.html
etcdClientCertFile, ok1 := os.LookupEnv("MINIO_ETCD_CLIENT_CERT")
etcdClientCertKey, ok2 := os.LookupEnv("MINIO_ETCD_CLIENT_CERT_KEY")
var getClientCertificate func(*tls.CertificateRequestInfo) (*tls.Certificate, error)
if ok1 && ok2 {
getClientCertificate = func(unused *tls.CertificateRequestInfo) (*tls.Certificate, error) {
cert, terr := tls.LoadX509KeyPair(etcdClientCertFile, etcdClientCertKey)
return &cert, terr
}
}
globalEtcdClient, err = etcd.New(etcd.Config{
Endpoints: etcdEndpoints,
DialTimeout: defaultDialTimeout,
DialKeepAliveTime: defaultDialKeepAlive,
TLS: &tls.Config{
RootCAs: globalRootCAs,
GetClientCertificate: getClientCertificate,
},
})
} else {
globalEtcdClient, err = etcd.New(etcd.Config{
Endpoints: etcdEndpoints,
DialTimeout: defaultDialTimeout,
DialKeepAliveTime: defaultDialKeepAlive,
})
}
logger.FatalIf(err, "Unable to initialize etcd with %s", etcdEndpoints)
}
globalDomainName, globalIsEnvDomainName = os.LookupEnv("MINIO_DOMAIN")
if globalDomainName != "" {
if _, ok := dns2.IsDomainName(globalDomainName); !ok {
logger.Fatal(uiErrInvalidDomainValue(nil).Msg("Unknown value `%s`", globalDomainName), "Invalid MINIO_DOMAIN environment variable")
v, ok := os.LookupEnv("MINIO_DOMAIN")
if ok {
for _, domainName := range strings.Split(v, ",") {
if _, ok = dns2.IsDomainName(domainName); !ok {
logger.Fatal(uiErrInvalidDomainValue(nil).Msg("Unknown value `%s`", domainName),
"Invalid MINIO_DOMAIN value in environment variable")
}
globalDomainNames = append(globalDomainNames, domainName)
}
}
minioEndpointsEnv, ok := os.LookupEnv("MINIO_PUBLIC_IPS")
if ok {
minioEndpoints := strings.Split(minioEndpointsEnv, ",")
globalDomainIPs = set.NewStringSet()
for i, ip := range minioEndpoints {
if net.ParseIP(ip) == nil {
logger.FatalIf(errInvalidArgument, "Unable to initialize Minio server with invalid MINIO_PUBLIC_IPS[%d]: %s", i, ip)
var domainIPs = set.NewStringSet()
for _, endpoint := range minioEndpoints {
if net.ParseIP(endpoint) == nil {
// Checking if the IP is a DNS entry.
addrs, err := net.LookupHost(endpoint)
if err != nil {
logger.FatalIf(err, "Unable to initialize Minio server with [%s] invalid entry found in MINIO_PUBLIC_IPS", endpoint)
}
for _, addr := range addrs {
domainIPs.Add(addr)
}
continue
}
globalDomainIPs.Add(ip)
domainIPs.Add(endpoint)
}
updateDomainIPs(domainIPs)
} else {
// Add found interfaces IP address to global domain IPS,
// loopback addresses will be naturally dropped.
updateDomainIPs(localIP4)
}
if globalDomainName != "" && !globalDomainIPs.IsEmpty() && globalEtcdClient != nil {
if len(globalDomainNames) != 0 && !globalDomainIPs.IsEmpty() && globalEtcdClient != nil {
var err error
globalDNSConfig, err = dns.NewCoreDNS(globalDomainName, globalDomainIPs, globalMinioPort, globalEtcdClient)
logger.FatalIf(err, "Unable to initialize DNS config for %s.", globalDomainName)
globalDNSConfig, err = dns.NewCoreDNS(globalDomainNames, globalDomainIPs, globalMinioPort, globalEtcdClient)
logger.FatalIf(err, "Unable to initialize DNS config for %s.", globalDomainNames)
}
if drives := os.Getenv("MINIO_CACHE_DRIVES"); drives != "" {
@@ -246,7 +378,7 @@ func handleCommonEnvVars() {
if worm := os.Getenv("MINIO_WORM"); worm != "" {
wormFlag, err := ParseBoolFlag(worm)
if err != nil {
logger.Fatal(uiErrInvalidWormValue(nil).Msg("Unknown value `%s`", worm), "Unable to validate MINIO_WORM environment variable")
logger.Fatal(uiErrInvalidWormValue(nil).Msg("Unknown value `%s`", worm), "Invalid MINIO_WORM value in environment variable")
}
// worm Envs are set globally, this does not represent
@@ -255,17 +387,27 @@ func handleCommonEnvVars() {
globalWORMEnabled = bool(wormFlag)
}
kmsConf, err := crypto.NewVaultConfig()
if err != nil {
logger.Fatal(err, "Unable to initialize hashicorp vault")
if compress := os.Getenv("MINIO_COMPRESS"); compress != "" {
globalIsCompressionEnabled = strings.EqualFold(compress, "true")
}
if kmsConf.Vault.Endpoint != "" {
kms, err := crypto.NewVault(kmsConf)
if err != nil {
logger.Fatal(err, "Unable to initialize KMS")
compressExtensions := os.Getenv("MINIO_COMPRESS_EXTENSIONS")
compressMimeTypes := os.Getenv("MINIO_COMPRESS_MIMETYPES")
if compressExtensions != "" || compressMimeTypes != "" {
globalIsEnvCompression = true
if compressExtensions != "" {
extensions, err := parseCompressIncludes(strings.Split(compressExtensions, compressEnvDelimiter))
if err != nil {
logger.Fatal(err, "Invalid MINIO_COMPRESS_EXTENSIONS value (`%s`)", extensions)
}
globalCompressExtensions = extensions
}
if compressMimeTypes != "" {
contenttypes, err := parseCompressIncludes(strings.Split(compressMimeTypes, compressEnvDelimiter))
if err != nil {
logger.Fatal(err, "Invalid MINIO_COMPRESS_MIMETYPES value (`%s`)", contenttypes)
}
globalCompressMimeTypes = contenttypes
}
globalKMS = kms
globalKMSKeyID = kmsConf.Vault.Key.Name
globalKMSConfig = kmsConf
}
}

151
cmd/config-common.go Normal file
View File

@@ -0,0 +1,151 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bytes"
"context"
"errors"
"fmt"
etcd "github.com/coreos/etcd/clientv3"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/hash"
)
var errConfigNotFound = errors.New("config file not found")
func readConfig(ctx context.Context, objAPI ObjectLayer, configFile string) ([]byte, error) {
var buffer bytes.Buffer
// Read entire content by setting size to -1
if err := objAPI.GetObject(ctx, minioMetaBucket, configFile, 0, -1, &buffer, "", ObjectOptions{}); err != nil {
// Treat object not found as config not found.
if isErrObjectNotFound(err) {
return nil, errConfigNotFound
}
logger.GetReqInfo(ctx).AppendTags("configFile", configFile)
logger.LogIf(ctx, err)
return nil, err
}
// Return config not found on empty content.
if buffer.Len() == 0 {
return nil, errConfigNotFound
}
return buffer.Bytes(), nil
}
func deleteConfigEtcd(ctx context.Context, client *etcd.Client, configFile string) error {
_, err := client.Delete(ctx, configFile)
return err
}
func deleteConfig(ctx context.Context, objAPI ObjectLayer, configFile string) error {
return objAPI.DeleteObject(ctx, minioMetaBucket, configFile)
}
func saveConfigEtcd(ctx context.Context, client *etcd.Client, configFile string, data []byte) error {
timeoutCtx, cancel := context.WithTimeout(ctx, defaultContextTimeout)
defer cancel()
_, err := client.Put(timeoutCtx, configFile, string(data))
if err == context.DeadlineExceeded {
return fmt.Errorf("etcd setup is unreachable, please check your endpoints %s", client.Endpoints())
} else if err != nil {
return fmt.Errorf("unexpected error %s returned by etcd setup, please check your endpoints %s", err, client.Endpoints())
}
return nil
}
func saveConfig(ctx context.Context, objAPI ObjectLayer, configFile string, data []byte) error {
hashReader, err := hash.NewReader(bytes.NewReader(data), int64(len(data)), "", getSHA256Hash(data), int64(len(data)))
if err != nil {
return err
}
_, err = objAPI.PutObject(ctx, minioMetaBucket, configFile, NewPutObjReader(hashReader, nil, nil), ObjectOptions{})
return err
}
func readConfigEtcd(ctx context.Context, client *etcd.Client, configFile string) ([]byte, error) {
timeoutCtx, cancel := context.WithTimeout(ctx, defaultContextTimeout)
defer cancel()
resp, err := client.Get(timeoutCtx, configFile)
if err != nil {
if err == context.DeadlineExceeded {
return nil, fmt.Errorf("etcd setup is unreachable, please check your endpoints %s", client.Endpoints())
}
return nil, fmt.Errorf("unexpected error %s returned by etcd setup, please check your endpoints %s", err, client.Endpoints())
}
if resp.Count == 0 {
return nil, errConfigNotFound
}
for _, ev := range resp.Kvs {
if string(ev.Key) == configFile {
return ev.Value, nil
}
}
return nil, errConfigNotFound
}
// watchConfigEtcd - watches for changes on `configFile` on etcd and loads them.
func watchConfigEtcd(objAPI ObjectLayer, configFile string, loadCfgFn func(ObjectLayer) error) {
ctx, cancel := context.WithTimeout(context.Background(), defaultContextTimeout)
defer cancel()
for watchResp := range globalEtcdClient.Watch(ctx, configFile) {
for _, event := range watchResp.Events {
if event.IsModify() || event.IsCreate() {
loadCfgFn(objAPI)
}
}
}
}
func checkConfigEtcd(ctx context.Context, client *etcd.Client, configFile string) error {
timeoutCtx, cancel := context.WithTimeout(ctx, defaultContextTimeout)
defer cancel()
resp, err := client.Get(timeoutCtx, configFile)
if err != nil {
if err == context.DeadlineExceeded {
return fmt.Errorf("etcd setup is unreachable, please check your endpoints %s", client.Endpoints())
}
return fmt.Errorf("unexpected error %s returned by etcd setup, please check your endpoints %s", err, client.Endpoints())
}
if resp.Count == 0 {
return errConfigNotFound
}
return nil
}
func checkConfig(ctx context.Context, objAPI ObjectLayer, configFile string) error {
if globalEtcdClient != nil {
return checkConfigEtcd(ctx, globalEtcdClient, configFile)
}
if _, err := objAPI.GetObjectInfo(ctx, minioMetaBucket, configFile, ObjectOptions{}); err != nil {
// Treat object not found as config not found.
if isErrObjectNotFound(err) {
return errConfigNotFound
}
logger.GetReqInfo(ctx).AppendTags("configFile", configFile)
logger.LogIf(ctx, err)
return err
}
return nil
}

View File

@@ -20,15 +20,19 @@ import (
"context"
"errors"
"fmt"
"os"
"reflect"
"sync"
"github.com/minio/minio/cmd/crypto"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/event/target"
"github.com/minio/minio/pkg/iam/policy"
"github.com/minio/minio/pkg/iam/validator"
xnet "github.com/minio/minio/pkg/net"
)
// Steps to move from version N to version N+1
@@ -40,9 +44,9 @@ import (
// 6. Make changes in config-current_test.go for any test change
// Config version
const serverConfigVersion = "29"
const serverConfigVersion = "33"
type serverConfig = serverConfigV29
type serverConfig = serverConfigV33
var (
// globalServerConfig server config.
@@ -173,61 +177,79 @@ func (s *serverConfig) Validate() error {
// Worm, Cache and StorageClass values are already validated during json unmarshal
for _, v := range s.Notify.AMQP {
if err := v.Validate(); err != nil {
return fmt.Errorf("amqp: %s", err.Error())
return fmt.Errorf("amqp: %s", err)
}
}
for _, v := range s.Notify.Elasticsearch {
if err := v.Validate(); err != nil {
return fmt.Errorf("elasticsearch: %s", err.Error())
return fmt.Errorf("elasticsearch: %s", err)
}
}
for _, v := range s.Notify.Kafka {
if err := v.Validate(); err != nil {
return fmt.Errorf("kafka: %s", err.Error())
return fmt.Errorf("kafka: %s", err)
}
}
for _, v := range s.Notify.MQTT {
if err := v.Validate(); err != nil {
return fmt.Errorf("mqtt: %s", err.Error())
return fmt.Errorf("mqtt: %s", err)
}
}
for _, v := range s.Notify.MySQL {
if err := v.Validate(); err != nil {
return fmt.Errorf("mysql: %s", err.Error())
return fmt.Errorf("mysql: %s", err)
}
}
for _, v := range s.Notify.NATS {
if err := v.Validate(); err != nil {
return fmt.Errorf("nats: %s", err.Error())
return fmt.Errorf("nats: %s", err)
}
}
for _, v := range s.Notify.NSQ {
if err := v.Validate(); err != nil {
return fmt.Errorf("nsq: %s", err)
}
}
for _, v := range s.Notify.PostgreSQL {
if err := v.Validate(); err != nil {
return fmt.Errorf("postgreSQL: %s", err.Error())
return fmt.Errorf("postgreSQL: %s", err)
}
}
for _, v := range s.Notify.Redis {
if err := v.Validate(); err != nil {
return fmt.Errorf("redis: %s", err.Error())
return fmt.Errorf("redis: %s", err)
}
}
for _, v := range s.Notify.Webhook {
if err := v.Validate(); err != nil {
return fmt.Errorf("webhook: %s", err.Error())
return fmt.Errorf("webhook: %s", err)
}
}
return nil
}
// SetCompressionConfig sets the current compression config
func (s *serverConfig) SetCompressionConfig(extensions []string, mimeTypes []string) {
s.Compression.Extensions = extensions
s.Compression.MimeTypes = mimeTypes
s.Compression.Enabled = globalIsCompressionEnabled
}
// GetCompressionConfig gets the current compression config
func (s *serverConfig) GetCompressionConfig() compressionConfig {
return s.Compression
}
func (s *serverConfig) loadFromEnvs() {
// If env is set override the credentials from config file.
if globalIsEnvCreds {
@@ -250,8 +272,26 @@ func (s *serverConfig) loadFromEnvs() {
s.SetCacheConfig(globalCacheDrives, globalCacheExcludes, globalCacheExpiry, globalCacheMaxUse)
}
if globalKMS != nil {
s.KMS = globalKMSConfig
if err := Environment.LookupKMSConfig(s.KMS); err != nil {
logger.FatalIf(err, "Unable to setup the KMS")
}
if globalIsEnvCompression {
s.SetCompressionConfig(globalCompressExtensions, globalCompressMimeTypes)
}
if jwksURL, ok := os.LookupEnv("MINIO_IAM_JWKS_URL"); ok {
if u, err := xnet.ParseURL(jwksURL); err == nil {
s.OpenID.JWKS.URL = u
logger.FatalIf(s.OpenID.JWKS.PopulatePublicKey(), "Unable to populate public key from JWKS URL")
}
}
if opaURL, ok := os.LookupEnv("MINIO_IAM_OPA_URL"); ok {
if u, err := xnet.ParseURL(opaURL); err == nil {
s.Policy.OPA.URL = u
s.Policy.OPA.AuthToken = os.Getenv("MINIO_IAM_OPA_AUTHTOKEN")
}
}
}
@@ -325,6 +365,17 @@ func (s *serverConfig) TestNotificationTargets() error {
t.Close()
}
for k, v := range s.Notify.NSQ {
if !v.Enable {
continue
}
t, err := target.NewNSQTarget(k, v)
if err != nil {
return fmt.Errorf("nsq(%s): %s", k, err.Error())
}
t.Close()
}
for k, v := range s.Notify.PostgreSQL {
if !v.Enable {
continue
@@ -366,10 +417,14 @@ func (s *serverConfig) ConfigDiff(t *serverConfig) string {
return "StorageClass configuration differs"
case !reflect.DeepEqual(s.Cache, t.Cache):
return "Cache configuration differs"
case !reflect.DeepEqual(s.Compression, t.Compression):
return "Compression configuration differs"
case !reflect.DeepEqual(s.Notify.AMQP, t.Notify.AMQP):
return "AMQP Notification configuration differs"
case !reflect.DeepEqual(s.Notify.NATS, t.Notify.NATS):
return "NATS Notification configuration differs"
case !reflect.DeepEqual(s.Notify.NSQ, t.Notify.NSQ):
return "NSQ Notification configuration differs"
case !reflect.DeepEqual(s.Notify.Elasticsearch, t.Notify.Elasticsearch):
return "ElasticSearch Notification configuration differs"
case !reflect.DeepEqual(s.Notify.Redis, t.Notify.Redis):
@@ -417,6 +472,11 @@ func newServerConfig() *serverConfig {
},
KMS: crypto.KMSConfig{},
Notify: notifier{},
Compression: compressionConfig{
Enabled: false,
Extensions: globalCompressExtensions,
MimeTypes: globalCompressMimeTypes,
},
}
// Make sure to initialize notification configs.
@@ -430,6 +490,8 @@ func newServerConfig() *serverConfig {
srvCfg.Notify.Redis["1"] = target.RedisArgs{}
srvCfg.Notify.NATS = make(map[string]target.NATSArgs)
srvCfg.Notify.NATS["1"] = target.NATSArgs{}
srvCfg.Notify.NSQ = make(map[string]target.NSQArgs)
srvCfg.Notify.NSQ["1"] = target.NSQArgs{}
srvCfg.Notify.PostgreSQL = make(map[string]target.PostgreSQLArgs)
srvCfg.Notify.PostgreSQL["1"] = target.PostgreSQLArgs{}
srvCfg.Notify.MySQL = make(map[string]target.MySQLArgs)
@@ -473,18 +535,32 @@ func (s *serverConfig) loadToCachedConfigs() {
globalCacheExpiry = cacheConf.Expiry
globalCacheMaxUse = cacheConf.MaxUse
}
if globalKMS == nil {
globalKMSConfig = s.KMS
if kms, err := crypto.NewVault(globalKMSConfig); err == nil {
globalKMS = kms
globalKMSKeyID = globalKMSConfig.Vault.Key.Name
}
if err := Environment.LookupKMSConfig(s.KMS); err != nil {
logger.FatalIf(err, "Unable to setup the KMS")
}
if !globalIsCompressionEnabled {
compressionConf := s.GetCompressionConfig()
globalCompressExtensions = compressionConf.Extensions
globalCompressMimeTypes = compressionConf.MimeTypes
globalIsCompressionEnabled = compressionConf.Enabled
}
globalIAMValidators = getAuthValidators(s)
if s.Policy.OPA.URL != nil && s.Policy.OPA.URL.String() != "" {
globalPolicyOPA = iampolicy.NewOpa(iampolicy.OpaArgs{
URL: s.Policy.OPA.URL,
AuthToken: s.Policy.OPA.AuthToken,
Transport: NewCustomHTTPTransport(),
CloseRespFn: xhttp.DrainBody,
})
}
}
// newConfig - initialize a new server config, saves env parameters if
// newSrvConfig - initialize a new server config, saves env parameters if
// found, otherwise use default parameters
func newConfig(objAPI ObjectLayer) error {
func newSrvConfig(objAPI ObjectLayer) error {
// Initialize server config.
srvCfg := newServerConfig()
@@ -535,6 +611,20 @@ func loadConfig(objAPI ObjectLayer) error {
return nil
}
// getAuthValidators - returns ValidatorList which contains
// enabled providers in server config.
// A new authentication provider is added like below
// * Add a new provider in pkg/iam/validator package.
func getAuthValidators(config *serverConfig) *validator.Validators {
validators := validator.NewValidators()
if config.OpenID.JWKS.URL != nil {
validators.Add(validator.NewJWT(config.OpenID.JWKS))
}
return validators
}
// getNotificationTargets - returns TargetList which contains enabled targets in serverConfig.
// A new notification target is added like below
// * Add a new target in pkg/event/target package.
@@ -591,6 +681,7 @@ func getNotificationTargets(config *serverConfig) *event.TargetList {
for id, args := range config.Notify.MQTT {
if args.Enable {
args.RootCAs = globalRootCAs
newTarget, err := target.NewMQTTTarget(id, args)
if err != nil {
logger.LogIf(context.Background(), err)
@@ -631,6 +722,20 @@ func getNotificationTargets(config *serverConfig) *event.TargetList {
}
}
for id, args := range config.Notify.NSQ {
if args.Enable {
newTarget, err := target.NewNSQTarget(id, args)
if err != nil {
logger.LogIf(context.Background(), err)
continue
}
if err = targetList.Add(newTarget); err != nil {
logger.LogIf(context.Background(), err)
continue
}
}
}
for id, args := range config.Notify.PostgreSQL {
if args.Enable {
newTarget, err := target.NewPostgreSQLTarget(id, args)
@@ -661,6 +766,7 @@ func getNotificationTargets(config *serverConfig) *event.TargetList {
for id, args := range config.Notify.Webhook {
if args.Enable {
args.RootCAs = globalRootCAs
newTarget := target.NewWebhookTarget(id, args)
if err := targetList.Add(newTarget); err != nil {
logger.LogIf(context.Background(), err)

View File

@@ -130,8 +130,8 @@ func TestServerConfigWithEnvs(t *testing.T) {
}
// Check if serverConfig has the correct domain
if globalDomainName != "domain.com" {
t.Errorf("Expected Domain to be `domain.com`, found " + globalDomainName)
if globalDomainNames[0] != "domain.com" {
t.Errorf("Expected Domain to be `domain.com`, found " + globalDomainNames[0])
}
}
@@ -188,7 +188,7 @@ func TestValidateConfig(t *testing.T) {
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "amqp": { "1": { "enable": true, "url": "", "exchange": "", "routingKey": "", "exchangeType": "", "mandatory": false, "immediate": false, "durable": false, "internal": false, "noWait": false, "autoDeleted": false }}}}`, false},
// Test 12 - Test NATS
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "nats": { "1": { "enable": true, "address": "", "subject": "", "username": "", "password": "", "token": "", "secure": false, "pingInterval": 0, "streaming": { "enable": false, "clusterID": "", "clientID": "", "async": false, "maxPubAcksInflight": 0 } } }}}`, false},
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "nats": { "1": { "enable": true, "address": "", "subject": "", "username": "", "password": "", "token": "", "secure": false, "pingInterval": 0, "streaming": { "enable": false, "clusterID": "", "async": false, "maxPubAcksInflight": 0 } } }}}`, false},
// Test 13 - Test ElasticSearch
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "url": "", "index": "" } }}}`, false},
@@ -233,11 +233,14 @@ func TestValidateConfig(t *testing.T) {
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "format": "namespace", "address": "example.com:80", "password": "xxx", "key": "key1" } }}}`, true},
// Test 27 - Test MQTT
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mqtt": { "1": { "enable": true, "broker": "", "topic": "", "qos": 0, "clientId": "", "username": "", "password": ""}}}}`, false},
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mqtt": { "1": { "enable": true, "broker": "", "topic": "", "qos": 0, "username": "", "password": "", "queueDir": "", "queueLimit": 0}}}}`, false},
// Test 28 - Test NSQ
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "nsq": { "1": { "enable": true, "nsqdAddress": "", "topic": ""} }}}`, false},
}
for i, testCase := range testCases {
if err = saveConfig(objLayer, configPath, []byte(testCase.configData)); err != nil {
if err = saveConfig(context.Background(), objLayer, configPath, []byte(testCase.configData)); err != nil {
t.Fatal(err)
}
_, err = getValidConfig(objLayer)
@@ -260,8 +263,16 @@ func TestConfigDiff(t *testing.T) {
{&serverConfig{}, nil, "Given configuration is empty"},
// 2
{
&serverConfig{Credential: auth.Credentials{"u1", "p1"}},
&serverConfig{Credential: auth.Credentials{"u1", "p2"}},
&serverConfig{Credential: auth.Credentials{
AccessKey: "u1",
SecretKey: "p1",
Expiration: timeSentinel,
}},
&serverConfig{Credential: auth.Credentials{
AccessKey: "u1",
SecretKey: "p2",
Expiration: timeSentinel,
}},
"Credential configuration differs",
},
// 3
@@ -285,48 +296,54 @@ func TestConfigDiff(t *testing.T) {
"NATS Notification configuration differs",
},
// 7
{
&serverConfig{Notify: notifier{NSQ: map[string]target.NSQArgs{"1": {Enable: true}}}},
&serverConfig{Notify: notifier{NSQ: map[string]target.NSQArgs{"1": {Enable: false}}}},
"NSQ Notification configuration differs",
},
// 8
{
&serverConfig{Notify: notifier{Elasticsearch: map[string]target.ElasticsearchArgs{"1": {Enable: true}}}},
&serverConfig{Notify: notifier{Elasticsearch: map[string]target.ElasticsearchArgs{"1": {Enable: false}}}},
"ElasticSearch Notification configuration differs",
},
// 8
// 9
{
&serverConfig{Notify: notifier{Redis: map[string]target.RedisArgs{"1": {Enable: true}}}},
&serverConfig{Notify: notifier{Redis: map[string]target.RedisArgs{"1": {Enable: false}}}},
"Redis Notification configuration differs",
},
// 9
// 10
{
&serverConfig{Notify: notifier{PostgreSQL: map[string]target.PostgreSQLArgs{"1": {Enable: true}}}},
&serverConfig{Notify: notifier{PostgreSQL: map[string]target.PostgreSQLArgs{"1": {Enable: false}}}},
"PostgreSQL Notification configuration differs",
},
// 10
// 11
{
&serverConfig{Notify: notifier{Kafka: map[string]target.KafkaArgs{"1": {Enable: true}}}},
&serverConfig{Notify: notifier{Kafka: map[string]target.KafkaArgs{"1": {Enable: false}}}},
"Kafka Notification configuration differs",
},
// 11
// 12
{
&serverConfig{Notify: notifier{Webhook: map[string]target.WebhookArgs{"1": {Enable: true}}}},
&serverConfig{Notify: notifier{Webhook: map[string]target.WebhookArgs{"1": {Enable: false}}}},
"Webhook Notification configuration differs",
},
// 12
// 13
{
&serverConfig{Notify: notifier{MySQL: map[string]target.MySQLArgs{"1": {Enable: true}}}},
&serverConfig{Notify: notifier{MySQL: map[string]target.MySQLArgs{"1": {Enable: false}}}},
"MySQL Notification configuration differs",
},
// 13
// 14
{
&serverConfig{Notify: notifier{MQTT: map[string]target.MQTTArgs{"1": {Enable: true}}}},
&serverConfig{Notify: notifier{MQTT: map[string]target.MQTTArgs{"1": {Enable: false}}}},
"MQTT Notification configuration differs",
},
// 14
// 15
{
&serverConfig{Logger: loggerConfig{
Console: loggerConsole{Enabled: true},

View File

@@ -19,7 +19,6 @@ package cmd
import (
"os"
"path/filepath"
"sync"
homedir "github.com/mitchellh/go-homedir"
)
@@ -41,55 +40,9 @@ const (
privateKeyFile = "private.key"
)
// ConfigDir - configuration directory with locking.
// ConfigDir - points to a user set directory.
type ConfigDir struct {
sync.Mutex
dir string
}
// Set - saves given directory as configuration directory.
func (config *ConfigDir) Set(dir string) {
config.Lock()
defer config.Unlock()
config.dir = dir
}
// Get - returns current configuration directory.
func (config *ConfigDir) Get() string {
config.Lock()
defer config.Unlock()
return config.dir
}
func (config *ConfigDir) getCertsDir() string {
return filepath.Join(config.Get(), certsDir)
}
// GetCADir - returns certificate CA directory.
func (config *ConfigDir) GetCADir() string {
return filepath.Join(config.getCertsDir(), certsCADir)
}
// Create - creates configuration directory tree.
func (config *ConfigDir) Create() error {
return os.MkdirAll(config.GetCADir(), 0700)
}
// GetMinioConfigFile - returns absolute path of config.json file.
func (config *ConfigDir) GetMinioConfigFile() string {
return filepath.Join(config.Get(), minioConfigFile)
}
// GetPublicCertFile - returns absolute path of public.crt file.
func (config *ConfigDir) GetPublicCertFile() string {
return filepath.Join(config.getCertsDir(), publicCertFile)
}
// GetPrivateKeyFile - returns absolute path of private.key file.
func (config *ConfigDir) GetPrivateKeyFile() string {
return filepath.Join(config.getCertsDir(), privateKeyFile)
path string
}
func getDefaultConfigDir() string {
@@ -101,32 +54,54 @@ func getDefaultConfigDir() string {
return filepath.Join(homeDir, defaultMinioConfigDir)
}
var configDir = &ConfigDir{dir: getDefaultConfigDir()}
func setConfigDir(dir string) {
configDir.Set(dir)
func getDefaultCertsDir() string {
return filepath.Join(getDefaultConfigDir(), certsDir)
}
func getConfigDir() string {
return configDir.Get()
func getDefaultCertsCADir() string {
return filepath.Join(getDefaultCertsDir(), certsCADir)
}
func getCADir() string {
return configDir.GetCADir()
var (
// Default config, certs and CA directories.
defaultConfigDir = &ConfigDir{path: getDefaultConfigDir()}
defaultCertsDir = &ConfigDir{path: getDefaultCertsDir()}
defaultCertsCADir = &ConfigDir{path: getDefaultCertsCADir()}
// Points to current configuration directory -- deprecated, to be removed in future.
globalConfigDir = defaultConfigDir
// Points to current certs directory set by user with --certs-dir
globalCertsDir = defaultCertsDir
// Points to relative path to certs directory and is <value-of-certs-dir>/CAs
globalCertsCADir = defaultCertsCADir
)
// Get - returns current directory.
func (dir *ConfigDir) Get() string {
return dir.path
}
func createConfigDir() error {
return configDir.Create()
// Attempts to create all directories, ignores any permission denied errors.
func mkdirAllIgnorePerm(path string) error {
err := os.MkdirAll(path, 0700)
if err != nil {
// It is possible in kubernetes like deployments this directory
// is already mounted and is not writable, ignore any write errors.
if os.IsPermission(err) {
err = nil
}
}
return err
}
func getConfigFile() string {
return configDir.GetMinioConfigFile()
return filepath.Join(globalConfigDir.Get(), minioConfigFile)
}
func getPublicCertFile() string {
return configDir.GetPublicCertFile()
return filepath.Join(globalCertsDir.Get(), publicCertFile)
}
func getPrivateKeyFile() string {
return configDir.GetPrivateKeyFile()
return filepath.Join(globalCertsDir.Get(), privateKeyFile)
}

View File

@@ -18,6 +18,7 @@ package cmd
import (
"context"
"encoding/json"
"fmt"
"os"
"path"
@@ -26,9 +27,10 @@ import (
"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/dns"
"github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/event/target"
"github.com/minio/minio/pkg/iam/policy"
"github.com/minio/minio/pkg/iam/validator"
xnet "github.com/minio/minio/pkg/net"
"github.com/minio/minio/pkg/quick"
)
@@ -228,11 +230,11 @@ func migrateConfig() error {
// Version '1' is not supported anymore and deprecated, safe to delete.
func purgeV1() error {
configFile := filepath.Join(getConfigDir(), "fsUsers.json")
configFile := filepath.Join(globalConfigDir.Get(), "fsUsers.json")
cv1 := &configV1{}
_, err := Load(configFile, cv1)
if os.IsNotExist(err) || err == dns.ErrNoEntriesFound {
if os.IsNotExist(err) {
return nil
} else if err != nil {
return fmt.Errorf("Unable to load config version 1. %v", err)
@@ -916,7 +918,7 @@ func migrateV12ToV13() error {
// Copy over fields from V12 into V13 config struct
srvConfig := &serverConfigV13{
Logger: &loggerV7{},
Notify: &notifier{},
Notify: &notifierV3{},
}
srvConfig.Version = "13"
srvConfig.Credential = cv12.Credential
@@ -996,7 +998,7 @@ func migrateV13ToV14() error {
// Copy over fields from V13 into V14 config struct
srvConfig := &serverConfigV14{
Logger: &loggerV7{},
Notify: &notifier{},
Notify: &notifierV3{},
}
srvConfig.Version = "14"
srvConfig.Credential = cv13.Credential
@@ -1081,7 +1083,7 @@ func migrateV14ToV15() error {
// Copy over fields from V14 into V15 config struct
srvConfig := &serverConfigV15{
Logger: &loggerV7{},
Notify: &notifier{},
Notify: &notifierV3{},
}
srvConfig.Version = "15"
srvConfig.Credential = cv14.Credential
@@ -1171,7 +1173,7 @@ func migrateV15ToV16() error {
// Copy over fields from V15 into V16 config struct
srvConfig := &serverConfigV16{
Logger: &loggers{},
Notify: &notifier{},
Notify: &notifierV3{},
}
srvConfig.Version = "16"
srvConfig.Credential = cv15.Credential
@@ -1261,7 +1263,7 @@ func migrateV16ToV17() error {
// Copy over fields from V16 into V17 config struct
srvConfig := &serverConfigV17{
Logger: &loggers{},
Notify: &notifier{},
Notify: &notifierV3{},
}
srvConfig.Version = "17"
srvConfig.Credential = cv16.Credential
@@ -1382,7 +1384,7 @@ func migrateV17ToV18() error {
// Copy over fields from V17 into V18 config struct
srvConfig := &serverConfigV17{
Logger: &loggers{},
Notify: &notifier{},
Notify: &notifierV3{},
}
srvConfig.Version = "18"
srvConfig.Credential = cv17.Credential
@@ -1484,7 +1486,7 @@ func migrateV18ToV19() error {
// Copy over fields from V18 into V19 config struct
srvConfig := &serverConfigV18{
Logger: &loggers{},
Notify: &notifier{},
Notify: &notifierV3{},
}
srvConfig.Version = "19"
srvConfig.Credential = cv18.Credential
@@ -1590,7 +1592,7 @@ func migrateV19ToV20() error {
// Copy over fields from V19 into V20 config struct
srvConfig := &serverConfigV20{
Logger: &loggers{},
Notify: &notifier{},
Notify: &notifierV3{},
}
srvConfig.Version = "20"
srvConfig.Credential = cv19.Credential
@@ -1694,7 +1696,7 @@ func migrateV20ToV21() error {
// Copy over fields from V20 into V21 config struct
srvConfig := &serverConfigV21{
Notify: &notifier{},
Notify: &notifierV3{},
}
srvConfig.Version = "21"
srvConfig.Credential = cv20.Credential
@@ -1798,7 +1800,7 @@ func migrateV21ToV22() error {
// Copy over fields from V21 into V22 config struct
srvConfig := &serverConfigV22{
Notify: notifier{},
Notify: notifierV3{},
}
srvConfig.Version = "22"
srvConfig.Credential = cv21.Credential
@@ -1902,7 +1904,7 @@ func migrateV22ToV23() error {
// Copy over fields from V22 into V23 config struct
srvConfig := &serverConfigV23{
Notify: notifier{},
Notify: notifierV3{},
}
srvConfig.Version = "23"
srvConfig.Credential = cv22.Credential
@@ -2015,7 +2017,7 @@ func migrateV23ToV24() error {
// Copy over fields from V23 into V24 config struct
srvConfig := &serverConfigV24{
Notify: notifier{},
Notify: notifierV3{},
}
srvConfig.Version = "24"
srvConfig.Credential = cv23.Credential
@@ -2128,7 +2130,7 @@ func migrateV24ToV25() error {
// Copy over fields from V24 into V25 config struct
srvConfig := &serverConfigV25{
Notify: notifier{},
Notify: notifierV3{},
}
srvConfig.Version = "25"
srvConfig.Credential = cv24.Credential
@@ -2246,7 +2248,7 @@ func migrateV25ToV26() error {
// Copy over fields from V25 into V26 config struct
srvConfig := &serverConfigV26{
Notify: notifier{},
Notify: notifierV3{},
}
srvConfig.Version = "26"
srvConfig.Credential = cv25.Credential
@@ -2387,6 +2389,7 @@ func migrateV27ToV28() error {
// config V28 is backward compatible with V27, load the old
// config file in serverConfigV28 struct and initialize KMSConfig
srvConfig := &serverConfigV28{}
_, err := quick.LoadConfig(configFile, globalEtcdClient, srvConfig)
if os.IsNotExist(err) {
@@ -2409,28 +2412,176 @@ func migrateV27ToV28() error {
return nil
}
// Migrates '.minio.sys/config.json' v27 to v28.
// Migrates ${HOME}/.minio/config.json to '<export_path>/.minio.sys/config/config.json'
func migrateConfigToMinioSys(objAPI ObjectLayer) (err error) {
// Construct path to config.json for the given bucket.
configFile := path.Join(minioConfigPrefix, minioConfigFile)
// Verify if config was already available in .minio.sys in which case, nothing more to be done.
if err = checkConfig(context.Background(), objAPI, configFile); err != errConfigNotFound {
return err
}
defer func() {
// Rename config.json to config.json.deprecated only upon
// success of this function.
if err == nil {
os.Rename(getConfigFile(), getConfigFile()+".deprecated")
}
}()
transactionConfigFile := configFile + ".transaction"
// As object layer's GetObject() and PutObject() take respective lock on minioMetaBucket
// and configFile, take a transaction lock to avoid data race between readConfig()
// and saveConfig().
objLock := globalNSMutex.NewNSLock(minioMetaBucket, transactionConfigFile)
if err = objLock.GetLock(globalOperationTimeout); err != nil {
return err
}
defer objLock.Unlock()
// Verify if backend already has the file (after holding lock)
if err = checkConfig(context.Background(), objAPI, configFile); err != errConfigNotFound {
return err
} // if errConfigNotFound proceed to migrate..
var config = &serverConfig{}
if _, err = Load(getConfigFile(), config); err != nil {
if !os.IsNotExist(err) {
return err
}
// Read from deprecate file as well if necessary.
if _, err = Load(getConfigFile()+".deprecated", config); err != nil {
if !os.IsNotExist(err) {
return err
}
// If all else fails simply initialize the server config.
return newSrvConfig(objAPI)
}
}
return saveServerConfig(context.Background(), objAPI, config)
}
// Migrates '.minio.sys/config.json' to v33.
func migrateMinioSysConfig(objAPI ObjectLayer) error {
configFile := path.Join(minioConfigPrefix, minioConfigFile)
// Check if the config version is latest, if not migrate.
ok, _, err := checkConfigVersion(objAPI, configFile, serverConfigVersion)
if err != nil {
return err
}
if ok {
return nil
}
// Construct path to config.json for the given bucket.
transactionConfigFile := configFile + ".transaction"
// As object layer's GetObject() and PutObject() take respective lock on minioMetaBucket
// and configFile, take a transaction lock to avoid data race between readConfig()
// and saveConfig().
objLock := globalNSMutex.NewNSLock(minioMetaBucket, transactionConfigFile)
if err := objLock.GetLock(globalOperationTimeout); err != nil {
return err
}
defer objLock.Unlock()
if err := migrateV27ToV28MinioSys(objAPI); err != nil {
return err
}
return migrateV28ToV29MinioSys(objAPI)
if err := migrateV28ToV29MinioSys(objAPI); err != nil {
return err
}
if err := migrateV29ToV30MinioSys(objAPI); err != nil {
return err
}
if err := migrateV30ToV31MinioSys(objAPI); err != nil {
return err
}
if err := migrateV31ToV32MinioSys(objAPI); err != nil {
return err
}
return migrateV32ToV33MinioSys(objAPI)
}
func migrateV28ToV29MinioSys(objAPI ObjectLayer) error {
func checkConfigVersion(objAPI ObjectLayer, configFile string, version string) (bool, []byte, error) {
data, err := readConfig(context.Background(), objAPI, configFile)
if err != nil {
return false, nil, err
}
var versionConfig struct {
Version string `json:"version"`
}
vcfg := &versionConfig
if err = json.Unmarshal(data, vcfg); err != nil {
return false, nil, err
}
return vcfg.Version == version, data, nil
}
func migrateV27ToV28MinioSys(objAPI ObjectLayer) error {
configFile := path.Join(minioConfigPrefix, minioConfigFile)
srvConfig, err := readServerConfig(context.Background(), objAPI)
ok, data, err := checkConfigVersion(objAPI, configFile, "27")
if err == errConfigNotFound {
return nil
} else if err != nil {
return fmt.Errorf("Unable to load config file. %v", err)
}
if srvConfig.Version != "28" {
if !ok {
return nil
}
srvConfig.Version = "29"
if err = saveServerConfig(context.Background(), objAPI, srvConfig); err != nil {
cfg := &serverConfigV28{}
if err = json.Unmarshal(data, cfg); err != nil {
return err
}
cfg.Version = "28"
cfg.KMS = crypto.KMSConfig{}
data, err = json.Marshal(cfg)
if err != nil {
return err
}
if err = saveConfig(context.Background(), objAPI, configFile, data); err != nil {
return fmt.Errorf("Failed to migrate config from 27 to 28. %v", err)
}
logger.Info(configMigrateMSGTemplate, configFile, "27", "28")
return nil
}
func migrateV28ToV29MinioSys(objAPI ObjectLayer) error {
configFile := path.Join(minioConfigPrefix, minioConfigFile)
ok, data, err := checkConfigVersion(objAPI, configFile, "28")
if err == errConfigNotFound {
return nil
} else if err != nil {
return fmt.Errorf("Unable to load config file. %v", err)
}
if !ok {
return nil
}
cfg := &serverConfigV29{}
if err = json.Unmarshal(data, cfg); err != nil {
return err
}
cfg.Version = "29"
data, err = json.Marshal(cfg)
if err != nil {
return err
}
if err = saveConfig(context.Background(), objAPI, configFile, data); err != nil {
return fmt.Errorf("Failed to migrate config from 28 to 29. %v", err)
}
@@ -2438,24 +2589,147 @@ func migrateV28ToV29MinioSys(objAPI ObjectLayer) error {
return nil
}
func migrateV27ToV28MinioSys(objAPI ObjectLayer) error {
func migrateV29ToV30MinioSys(objAPI ObjectLayer) error {
configFile := path.Join(minioConfigPrefix, minioConfigFile)
srvConfig, err := readServerConfig(context.Background(), objAPI)
ok, data, err := checkConfigVersion(objAPI, configFile, "29")
if err == errConfigNotFound {
return nil
} else if err != nil {
return fmt.Errorf("Unable to load config file. %v", err)
}
if srvConfig.Version != "27" {
if !ok {
return nil
}
srvConfig.Version = "28"
srvConfig.KMS = crypto.KMSConfig{}
if err = saveServerConfig(context.Background(), objAPI, srvConfig); err != nil {
return fmt.Errorf("Failed to migrate config from 27 to 28. %v", err)
cfg := &serverConfigV30{}
if err = json.Unmarshal(data, cfg); err != nil {
return err
}
logger.Info(configMigrateMSGTemplate, configFile, "27", "28")
cfg.Version = "30"
// Init compression config.For future migration, Compression config needs to be copied over from previous version.
cfg.Compression.Enabled = false
cfg.Compression.Extensions = globalCompressExtensions
cfg.Compression.MimeTypes = globalCompressMimeTypes
data, err = json.Marshal(cfg)
if err != nil {
return err
}
if err = saveConfig(context.Background(), objAPI, configFile, data); err != nil {
return fmt.Errorf("Failed to migrate config from 29 to 30. %v", err)
}
logger.Info(configMigrateMSGTemplate, configFile, "29", "30")
return nil
}
func migrateV30ToV31MinioSys(objAPI ObjectLayer) error {
configFile := path.Join(minioConfigPrefix, minioConfigFile)
ok, data, err := checkConfigVersion(objAPI, configFile, "30")
if err == errConfigNotFound {
return nil
} else if err != nil {
return fmt.Errorf("Unable to load config file. %v", err)
}
if !ok {
return nil
}
cfg := &serverConfigV31{}
if err = json.Unmarshal(data, cfg); err != nil {
return err
}
cfg.Version = "31"
cfg.OpenID.JWKS = validator.JWKSArgs{
URL: &xnet.URL{},
}
cfg.Policy.OPA = iampolicy.OpaArgs{
URL: &xnet.URL{},
AuthToken: "",
}
data, err = json.Marshal(cfg)
if err != nil {
return err
}
if err = saveConfig(context.Background(), objAPI, configFile, data); err != nil {
return fmt.Errorf("Failed to migrate config from 30 to 31. %v", err)
}
logger.Info(configMigrateMSGTemplate, configFile, "30", "31")
return nil
}
func migrateV31ToV32MinioSys(objAPI ObjectLayer) error {
configFile := path.Join(minioConfigPrefix, minioConfigFile)
ok, data, err := checkConfigVersion(objAPI, configFile, "31")
if err == errConfigNotFound {
return nil
} else if err != nil {
return fmt.Errorf("Unable to load config file. %v", err)
}
if !ok {
return nil
}
cfg := &serverConfigV32{}
if err = json.Unmarshal(data, cfg); err != nil {
return err
}
cfg.Version = "32"
cfg.Notify.NSQ = make(map[string]target.NSQArgs)
cfg.Notify.NSQ["1"] = target.NSQArgs{}
data, err = json.Marshal(cfg)
if err != nil {
return err
}
if err = saveConfig(context.Background(), objAPI, configFile, data); err != nil {
return fmt.Errorf("Failed to migrate config from 31 to 32. %v", err)
}
logger.Info(configMigrateMSGTemplate, configFile, "31", "32")
return nil
}
func migrateV32ToV33MinioSys(objAPI ObjectLayer) error {
configFile := path.Join(minioConfigPrefix, minioConfigFile)
ok, data, err := checkConfigVersion(objAPI, configFile, "32")
if err == errConfigNotFound {
return nil
} else if err != nil {
return fmt.Errorf("Unable to load config file. %v", err)
}
if !ok {
return nil
}
cfg := &serverConfigV33{}
if err = json.Unmarshal(data, cfg); err != nil {
return err
}
cfg.Version = "33"
data, err = json.Marshal(cfg)
if err != nil {
return err
}
if err = saveConfig(context.Background(), objAPI, configFile, data); err != nil {
return fmt.Errorf("Failed to migrate config from 32 to 33 . %v", err)
}
logger.Info(configMigrateMSGTemplate, configFile, "32", "33")
return nil
}

View File

@@ -39,7 +39,7 @@ func TestServerConfigMigrateV1(t *testing.T) {
t.Fatal(err)
}
defer os.RemoveAll(rootPath)
setConfigDir(rootPath)
globalConfigDir = &ConfigDir{path: rootPath}
globalObjLayerMutex.Lock()
globalObjectAPI = objLayer
@@ -77,7 +77,7 @@ func TestServerConfigMigrateInexistentConfig(t *testing.T) {
}
defer os.RemoveAll(rootPath)
setConfigDir(rootPath)
globalConfigDir = &ConfigDir{path: rootPath}
if err := migrateV2ToV3(); err != nil {
t.Fatal("migrate v2 to v3 should succeed when no config file is found")
@@ -159,14 +159,15 @@ func TestServerConfigMigrateInexistentConfig(t *testing.T) {
}
}
// Test if a config migration from v2 to v29 is successfully done
func TestServerConfigMigrateV2toV29(t *testing.T) {
// Test if a config migration from v2 to v33 is successfully done
func TestServerConfigMigrateV2toV33(t *testing.T) {
rootPath, err := ioutil.TempDir(globalTestTmpDir, "minio-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(rootPath)
setConfigDir(rootPath)
globalConfigDir = &ConfigDir{path: rootPath}
objLayer, fsDir, err := prepareFS()
if err != nil {
@@ -222,6 +223,7 @@ func TestServerConfigMigrateV2toV29(t *testing.T) {
if globalServerConfig.Credential.AccessKey != accessKey {
t.Fatalf("Access key lost during migration, expected: %v, found:%v", accessKey, globalServerConfig.Credential.AccessKey)
}
if globalServerConfig.Credential.SecretKey != secretKey {
t.Fatalf("Secret key lost during migration, expected: %v, found: %v", secretKey, globalServerConfig.Credential.SecretKey)
}
@@ -234,7 +236,8 @@ func TestServerConfigMigrateFaultyConfig(t *testing.T) {
t.Fatal(err)
}
defer os.RemoveAll(rootPath)
setConfigDir(rootPath)
globalConfigDir = &ConfigDir{path: rootPath}
configPath := rootPath + "/" + minioConfigFile
// Create a corrupted config file
@@ -318,7 +321,6 @@ func TestServerConfigMigrateFaultyConfig(t *testing.T) {
if err := migrateV26ToV27(); err == nil {
t.Fatal("migrateConfigV26ToV27() should fail with a corrupted json")
}
if err := migrateV27ToV28(); err == nil {
t.Fatal("migrateConfigV27ToV28() should fail with a corrupted json")
}
@@ -331,7 +333,8 @@ func TestServerConfigMigrateCorruptedConfig(t *testing.T) {
t.Fatal(err)
}
defer os.RemoveAll(rootPath)
setConfigDir(rootPath)
globalConfigDir = &ConfigDir{path: rootPath}
configPath := rootPath + "/" + minioConfigFile
for i := 3; i <= 17; i++ {

View File

@@ -22,6 +22,8 @@ import (
"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/event/target"
"github.com/minio/minio/pkg/iam/policy"
"github.com/minio/minio/pkg/iam/validator"
"github.com/minio/minio/pkg/quick"
)
@@ -261,9 +263,6 @@ type serverConfigV7 struct {
// Notification queue configuration.
Notify notifierV1 `json:"notify"`
// Read Write mutex.
rwMutex *sync.RWMutex
}
// serverConfigV8 server configuration version '8'. Adds NATS notifier
@@ -280,9 +279,6 @@ type serverConfigV8 struct {
// Notification queue configuration.
Notify notifierV1 `json:"notify"`
// Read Write mutex.
rwMutex *sync.RWMutex
}
// serverConfigV9 server configuration version '9'. Adds PostgreSQL
@@ -299,9 +295,6 @@ type serverConfigV9 struct {
// Notification queue configuration.
Notify notifierV1 `json:"notify"`
// Read Write mutex.
rwMutex *sync.RWMutex
}
type loggerV7 struct {
@@ -371,7 +364,7 @@ type serverConfigV12 struct {
Notify notifierV2 `json:"notify"`
}
type notifier struct {
type notifierV3 struct {
AMQP map[string]target.AMQPArgs `json:"amqp"`
Elasticsearch map[string]target.ElasticsearchArgs `json:"elasticsearch"`
Kafka map[string]target.KafkaArgs `json:"kafka"`
@@ -396,7 +389,7 @@ type serverConfigV13 struct {
Logger *loggerV7 `json:"logger"`
// Notification queue configuration.
Notify *notifier `json:"notify"`
Notify *notifierV3 `json:"notify"`
}
// serverConfigV14 server configuration version '14' which is like
@@ -413,7 +406,7 @@ type serverConfigV14 struct {
Logger *loggerV7 `json:"logger"`
// Notification queue configuration.
Notify *notifier `json:"notify"`
Notify *notifierV3 `json:"notify"`
}
// serverConfigV15 server configuration version '15' which is like
@@ -430,7 +423,7 @@ type serverConfigV15 struct {
Logger *loggerV7 `json:"logger"`
// Notification queue configuration.
Notify *notifier `json:"notify"`
Notify *notifierV3 `json:"notify"`
}
// FileLogger is introduced to workaround the dependency about logrus
@@ -468,7 +461,7 @@ type serverConfigV16 struct {
Logger *loggers `json:"logger"`
// Notification queue configuration.
Notify *notifier `json:"notify"`
Notify *notifierV3 `json:"notify"`
}
// serverConfigV17 server configuration version '17' which is like
@@ -487,7 +480,7 @@ type serverConfigV17 struct {
Logger *loggers `json:"logger"`
// Notification queue configuration.
Notify *notifier `json:"notify"`
Notify *notifierV3 `json:"notify"`
}
// serverConfigV18 server configuration version '18' which is like
@@ -506,7 +499,7 @@ type serverConfigV18 struct {
Logger *loggers `json:"logger"`
// Notification queue configuration.
Notify *notifier `json:"notify"`
Notify *notifierV3 `json:"notify"`
}
// serverConfigV19 server configuration version '19' which is like
@@ -524,7 +517,7 @@ type serverConfigV19 struct {
Logger *loggers `json:"logger"`
// Notification queue configuration.
Notify *notifier `json:"notify"`
Notify *notifierV3 `json:"notify"`
}
// serverConfigV20 server configuration version '20' which is like
@@ -543,7 +536,7 @@ type serverConfigV20 struct {
Logger *loggers `json:"logger"`
// Notification queue configuration.
Notify *notifier `json:"notify"`
Notify *notifierV3 `json:"notify"`
}
// serverConfigV21 is just like version '20' without logger field
@@ -558,7 +551,7 @@ type serverConfigV21 struct {
Domain string `json:"domain"`
// Notification queue configuration.
Notify *notifier `json:"notify"`
Notify *notifierV3 `json:"notify"`
}
// serverConfigV22 is just like version '21' with added support
@@ -579,7 +572,7 @@ type serverConfigV22 struct {
StorageClass storageClassConfig `json:"storageclass"`
// Notification queue configuration.
Notify notifier `json:"notify"`
Notify notifierV3 `json:"notify"`
}
// serverConfigV23 is just like version '22' with addition of cache field.
@@ -602,7 +595,7 @@ type serverConfigV23 struct {
Cache CacheConfig `json:"cache"`
// Notification queue configuration.
Notify notifier `json:"notify"`
Notify notifierV3 `json:"notify"`
}
// serverConfigV24 is just like version '23', we had to revert
@@ -626,7 +619,7 @@ type serverConfigV24 struct {
Cache CacheConfig `json:"cache"`
// Notification queue configuration.
Notify notifier `json:"notify"`
Notify notifierV3 `json:"notify"`
}
// serverConfigV25 is just like version '24', stores additionally
@@ -653,7 +646,7 @@ type serverConfigV25 struct {
Cache CacheConfig `json:"cache"`
// Notification queue configuration.
Notify notifier `json:"notify"`
Notify notifierV3 `json:"notify"`
}
// serverConfigV26 is just like version '25', stores additionally
@@ -677,7 +670,7 @@ type serverConfigV26 struct {
Cache CacheConfig `json:"cache"`
// Notification queue configuration.
Notify notifier `json:"notify"`
Notify notifierV3 `json:"notify"`
}
type loggerConsole struct {
@@ -718,7 +711,7 @@ type serverConfigV27 struct {
Cache CacheConfig `json:"cache"`
// Notification queue configuration.
Notify notifier `json:"notify"`
Notify notifierV3 `json:"notify"`
// Logger configuration
Logger loggerConfig `json:"logger"`
@@ -749,14 +742,150 @@ type serverConfigV28 struct {
KMS crypto.KMSConfig `json:"kms"`
// Notification queue configuration.
Notify notifier `json:"notify"`
Notify notifierV3 `json:"notify"`
// Logger configuration
Logger loggerConfig `json:"logger"`
}
// serverConfigV29 is just like version '28', browser and domain are deprecated.
type serverConfigV29 struct {
// serverConfigV29 is just like version '28'.
type serverConfigV29 serverConfigV28
// compressionConfig represents the compression settings.
type compressionConfig struct {
Enabled bool `json:"enabled"`
Extensions []string `json:"extensions"`
MimeTypes []string `json:"mime-types"`
}
// serverConfigV30 is just like version '29', stores additionally
// extensions and mimetypes fields for compression.
type serverConfigV30 struct {
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
Worm BoolFlag `json:"worm"`
// Storage class configuration
StorageClass storageClassConfig `json:"storageclass"`
// Cache configuration
Cache CacheConfig `json:"cache"`
// KMS configuration
KMS crypto.KMSConfig `json:"kms"`
// Notification queue configuration.
Notify notifierV3 `json:"notify"`
// Logger configuration
Logger loggerConfig `json:"logger"`
// Compression configuration
Compression compressionConfig `json:"compress"`
}
// serverConfigV31 is just like version '30', with OPA and OpenID configuration.
type serverConfigV31 struct {
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
Worm BoolFlag `json:"worm"`
// Storage class configuration
StorageClass storageClassConfig `json:"storageclass"`
// Cache configuration
Cache CacheConfig `json:"cache"`
// KMS configuration
KMS crypto.KMSConfig `json:"kms"`
// Notification queue configuration.
Notify notifierV3 `json:"notify"`
// Logger configuration
Logger loggerConfig `json:"logger"`
// Compression configuration
Compression compressionConfig `json:"compress"`
// OpenID configuration
OpenID struct {
// JWKS validator config.
JWKS validator.JWKSArgs `json:"jwks"`
} `json:"openid"`
// External policy enforcements.
Policy struct {
// OPA configuration.
OPA iampolicy.OpaArgs `json:"opa"`
// Add new external policy enforcements here.
} `json:"policy"`
}
type notifier struct {
AMQP map[string]target.AMQPArgs `json:"amqp"`
Elasticsearch map[string]target.ElasticsearchArgs `json:"elasticsearch"`
Kafka map[string]target.KafkaArgs `json:"kafka"`
MQTT map[string]target.MQTTArgs `json:"mqtt"`
MySQL map[string]target.MySQLArgs `json:"mysql"`
NATS map[string]target.NATSArgs `json:"nats"`
NSQ map[string]target.NSQArgs `json:"nsq"`
PostgreSQL map[string]target.PostgreSQLArgs `json:"postgresql"`
Redis map[string]target.RedisArgs `json:"redis"`
Webhook map[string]target.WebhookArgs `json:"webhook"`
}
// serverConfigV32 is just like version '31' with added nsq notifer.
type serverConfigV32 struct {
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
Worm BoolFlag `json:"worm"`
// Storage class configuration
StorageClass storageClassConfig `json:"storageclass"`
// Cache configuration
Cache CacheConfig `json:"cache"`
// KMS configuration
KMS crypto.KMSConfig `json:"kms"`
// Notification queue configuration.
Notify notifier `json:"notify"`
// Logger configuration
Logger loggerConfig `json:"logger"`
// Compression configuration
Compression compressionConfig `json:"compress"`
// OpenID configuration
OpenID struct {
// JWKS validator config.
JWKS validator.JWKSArgs `json:"jwks"`
} `json:"openid"`
// External policy enforcements.
Policy struct {
// OPA configuration.
OPA iampolicy.OpaArgs `json:"opa"`
// Add new external policy enforcements here.
} `json:"policy"`
}
// serverConfigV33 is just like version '32', removes clientID from NATS and MQTT, and adds queueDir, queueLimit with MQTT.
type serverConfigV33 struct {
quick.Config `json:"-"` // ignore interfaces
Version string `json:"version"`
@@ -780,4 +909,21 @@ type serverConfigV29 struct {
// Logger configuration
Logger loggerConfig `json:"logger"`
// Compression configuration
Compression compressionConfig `json:"compress"`
// OpenID configuration
OpenID struct {
// JWKS validator config.
JWKS validator.JWKSArgs `json:"jwks"`
} `json:"openid"`
// External policy enforcements.
Policy struct {
// OPA configuration.
OPA iampolicy.OpaArgs `json:"opa"`
// Add new external policy enforcements here.
} `json:"policy"`
}

View File

@@ -20,16 +20,11 @@ import (
"bytes"
"context"
"encoding/json"
"errors"
"io"
"io/ioutil"
"os"
"path"
"runtime"
"time"
"strings"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/quick"
)
@@ -55,23 +50,14 @@ func saveServerConfig(ctx context.Context, objAPI ObjectLayer, config *serverCon
configFile := path.Join(minioConfigPrefix, minioConfigFile)
if globalEtcdClient != nil {
timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Minute)
_, err = globalEtcdClient.Put(timeoutCtx, configFile, string(data))
defer cancel()
return err
return saveConfigEtcd(ctx, globalEtcdClient, configFile, data)
}
// Create a backup of the current config
reader, err := readConfig(ctx, objAPI, configFile)
oldData, err := readConfig(ctx, objAPI, configFile)
if err == nil {
var oldData []byte
oldData, err = ioutil.ReadAll(reader)
if err != nil {
return err
}
backupConfigFile := path.Join(minioConfigPrefix, minioConfigBackupFile)
err = saveConfig(objAPI, backupConfigFile, oldData)
if err != nil {
if err = saveConfig(ctx, objAPI, backupConfigFile, oldData); err != nil {
return err
}
} else {
@@ -81,40 +67,18 @@ func saveServerConfig(ctx context.Context, objAPI ObjectLayer, config *serverCon
}
// Save the new config in the std config path
return saveConfig(objAPI, configFile, data)
}
func readConfigEtcd(configFile string) ([]byte, error) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
resp, err := globalEtcdClient.Get(ctx, configFile)
defer cancel()
if err != nil {
return nil, err
}
if resp.Count == 0 {
return nil, errConfigNotFound
}
for _, ev := range resp.Kvs {
if string(ev.Key) == configFile {
return ev.Value, nil
}
}
return nil, errConfigNotFound
return saveConfig(ctx, objAPI, configFile, data)
}
func readServerConfig(ctx context.Context, objAPI ObjectLayer) (*serverConfig, error) {
var configData []byte
var err error
configFile := path.Join(minioConfigPrefix, minioConfigFile)
if globalEtcdClient != nil {
configData, err = readConfigEtcd(configFile)
configData, err = readConfigEtcd(ctx, globalEtcdClient, configFile)
} else {
var reader io.Reader
reader, err = readConfig(ctx, objAPI, configFile)
if err != nil {
return nil, err
}
configData, err = ioutil.ReadAll(reader)
configData, err = readConfig(ctx, objAPI, configFile)
}
if err != nil {
return nil, err
@@ -129,82 +93,17 @@ func readServerConfig(ctx context.Context, objAPI ObjectLayer) (*serverConfig, e
}
var config = &serverConfig{}
if err := json.Unmarshal(configData, config); err != nil {
if err = json.Unmarshal(configData, config); err != nil {
return nil, err
}
if err := quick.CheckData(config); err != nil {
if err = quick.CheckData(config); err != nil {
return nil, err
}
return config, nil
}
func checkServerConfigEtcd(configFile string) error {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
resp, err := globalEtcdClient.Get(ctx, configFile)
defer cancel()
if err != nil {
return err
}
if resp.Count == 0 {
return errConfigNotFound
}
return nil
}
func checkServerConfig(ctx context.Context, objAPI ObjectLayer) error {
configFile := path.Join(minioConfigPrefix, minioConfigFile)
if globalEtcdClient != nil {
return checkServerConfigEtcd(configFile)
}
if _, err := objAPI.GetObjectInfo(ctx, minioMetaBucket, configFile, ObjectOptions{}); err != nil {
// Convert ObjectNotFound to errConfigNotFound
if isErrObjectNotFound(err) {
return errConfigNotFound
}
logger.GetReqInfo(ctx).AppendTags("configFile", configFile)
logger.LogIf(ctx, err)
return err
}
return nil
}
func saveConfig(objAPI ObjectLayer, configFile string, data []byte) error {
hashReader, err := hash.NewReader(bytes.NewReader(data), int64(len(data)), "", getSHA256Hash(data))
if err != nil {
return err
}
_, err = objAPI.PutObject(context.Background(), minioMetaBucket, configFile, hashReader, nil, ObjectOptions{})
return err
}
var errConfigNotFound = errors.New("config file not found")
func readConfig(ctx context.Context, objAPI ObjectLayer, configFile string) (*bytes.Buffer, error) {
var buffer bytes.Buffer
// Read entire content by setting size to -1
if err := objAPI.GetObject(ctx, minioMetaBucket, configFile, 0, -1, &buffer, "", ObjectOptions{}); err != nil {
// Convert ObjectNotFound and IncompleteBody errors into errConfigNotFound
if isErrObjectNotFound(err) || isErrIncompleteBody(err) {
return nil, errConfigNotFound
}
logger.GetReqInfo(ctx).AppendTags("configFile", configFile)
logger.LogIf(ctx, err)
return nil, err
}
// Return config not found on empty content.
if buffer.Len() == 0 {
return nil, errConfigNotFound
}
return &buffer, nil
}
// ConfigSys - config system.
type ConfigSys struct{}
@@ -228,22 +127,18 @@ func (sys *ConfigSys) Init(objAPI ObjectLayer) error {
// of the object layer.
// - Write quorum not met when upgrading configuration
// version is needed.
retryTimerCh := newRetryTimerSimple(doneCh)
for {
select {
case _ = <-retryTimerCh:
err := initConfig(objAPI)
if err != nil {
if isInsufficientReadQuorum(err) || isInsufficientWriteQuorum(err) {
logger.Info("Waiting for configuration to be initialized..")
continue
}
return err
for range newRetryTimerSimple(doneCh) {
if err := initConfig(objAPI); err != nil {
if strings.Contains(err.Error(), InsufficientReadQuorum{}.Error()) ||
strings.Contains(err.Error(), InsufficientWriteQuorum{}.Error()) {
logger.Info("Waiting for configuration to be initialized..")
continue
}
return nil
return err
}
break
}
return nil
}
// NewConfigSys - creates new config system object.
@@ -251,42 +146,33 @@ func NewConfigSys() *ConfigSys {
return &ConfigSys{}
}
// Migrates ${HOME}/.minio/config.json to '<export_path>/.minio.sys/config/config.json'
func migrateConfigToMinioSys(objAPI ObjectLayer) error {
defer os.Rename(getConfigFile(), getConfigFile()+".deprecated")
// Verify if backend already has the file.
if err := checkServerConfig(context.Background(), objAPI); err != errConfigNotFound {
return err
} // if errConfigNotFound proceed to migrate..
var config = &serverConfig{}
if _, err := Load(getConfigFile(), config); err != nil {
if !os.IsNotExist(err) {
return err
}
// Read from deprecate file as well if necessary.
if _, err = Load(getConfigFile()+".deprecated", config); err != nil {
return err
}
}
return saveServerConfig(context.Background(), objAPI, config)
}
// Initialize and load config from remote etcd or local config directory
func initConfig(objAPI ObjectLayer) error {
if objAPI == nil {
return errServerNotInitialized
}
configFile := path.Join(minioConfigPrefix, minioConfigFile)
if globalEtcdClient != nil {
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
resp, err := globalEtcdClient.Get(ctx, getConfigFile())
cancel()
if err == nil && resp.Count > 0 {
return migrateConfig()
if err := checkConfigEtcd(context.Background(), globalEtcdClient, getConfigFile()); err != nil {
if err == errConfigNotFound {
// Migrates all configs at old location.
if err = migrateConfig(); err != nil {
return err
}
// Migrates etcd ${HOME}/.minio/config.json to '/config/config.json'
if err = migrateConfigToMinioSys(objAPI); err != nil {
return err
}
} else {
return err
}
}
// Watch config for changes and reloads them.
go watchConfigEtcd(objAPI, configFile, loadConfig)
} else {
if isFile(getConfigFile()) {
if err := migrateConfig(); err != nil {
@@ -296,25 +182,15 @@ func initConfig(objAPI ObjectLayer) error {
// Migrates ${HOME}/.minio/config.json or config.json.deprecated
// to '<export_path>/.minio.sys/config/config.json'
// ignore if the file doesn't exist.
if err := migrateConfigToMinioSys(objAPI); err != nil && !os.IsNotExist(err) {
if err := migrateConfigToMinioSys(objAPI); err != nil {
return err
}
}
if err := checkServerConfig(context.Background(), objAPI); err != nil {
if err == errConfigNotFound {
// Config file does not exist, we create it fresh and return upon success.
if err = newConfig(objAPI); err != nil {
return err
}
} else {
// Migrates backend '<export_path>/.minio.sys/config/config.json' to latest version.
if err := migrateMinioSysConfig(objAPI); err != nil {
return err
}
}
if err := migrateMinioSysConfig(objAPI); err != nil {
return err
}
return loadConfig(objAPI)
}

View File

@@ -17,21 +17,22 @@
package cmd
import (
"context"
"net/http"
"net/url"
)
// Writes S3 compatible copy part range error.
func writeCopyPartErr(w http.ResponseWriter, err error, url *url.URL) {
func writeCopyPartErr(ctx context.Context, w http.ResponseWriter, err error, url *url.URL, browser bool) {
switch err {
case errInvalidRange:
writeErrorResponse(w, ErrInvalidCopyPartRange, url)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopyPartRange), url, browser)
return
case errInvalidRangeSource:
writeErrorResponse(w, ErrInvalidCopyPartRangeSource, url)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopyPartRangeSource), url, browser)
return
default:
writeErrorResponse(w, ErrInternalError, url)
writeErrorResponse(ctx, w, toAPIError(ctx, err), url, browser)
return
}
}
@@ -43,34 +44,6 @@ func writeCopyPartErr(w http.ResponseWriter, err error, url *url.URL) {
// http://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html
// for full details. This function treats an empty rangeString as
// referring to the whole resource.
//
// In addition to parsing the range string, it also validates the
// specified range against the given object size, so that Copy API
// specific error can be returned.
func parseCopyPartRange(rangeString string, resourceSize int64) (offset, length int64, err error) {
var hrange *HTTPRangeSpec
if rangeString != "" {
hrange, err = parseRequestRangeSpec(rangeString)
if err != nil {
return -1, -1, err
}
// Require that both start and end are specified.
if hrange.IsSuffixLength || hrange.Start == -1 || hrange.End == -1 {
return -1, -1, errInvalidRange
}
// Validate specified range against object size.
if hrange.Start >= resourceSize || hrange.End >= resourceSize {
return -1, -1, errInvalidRangeSource
}
}
return hrange.GetOffsetLength(resourceSize)
}
// parseCopyPartRangeSpec transforms a range string (e.g. bytes=3-4) to HTTPRangeSpec
// and returns errors if weird values
func parseCopyPartRangeSpec(rangeString string) (hrange *HTTPRangeSpec, err error) {
hrange, err = parseRequestRangeSpec(rangeString)
if err != nil {

View File

@@ -19,7 +19,7 @@ package cmd
import "testing"
// Test parseCopyPartRange()
func TestParseCopyPartRange(t *testing.T) {
func TestParseCopyPartRangeSpec(t *testing.T) {
// Test success cases.
successCases := []struct {
rangeString string
@@ -29,16 +29,21 @@ func TestParseCopyPartRange(t *testing.T) {
{"bytes=2-5", 2, 5},
{"bytes=2-9", 2, 9},
{"bytes=2-2", 2, 2},
{"", 0, 9},
{"bytes=0000-0006", 0, 6},
}
objectSize := int64(10)
for _, successCase := range successCases {
start, length, err := parseCopyPartRange(successCase.rangeString, 10)
rs, err := parseCopyPartRangeSpec(successCase.rangeString)
if err != nil {
t.Fatalf("expected: <nil>, got: %s", err)
}
start, length, err1 := rs.GetOffsetLength(objectSize)
if err1 != nil {
t.Fatalf("expected: <nil>, got: %s", err1)
}
if start != successCase.offsetBegin {
t.Fatalf("expected: %d, got: %d", successCase.offsetBegin, start)
}
@@ -61,9 +66,11 @@ func TestParseCopyPartRange(t *testing.T) {
"bytes=2 - 5",
"bytes=0-0,-1",
"bytes=2-5 ",
"bytes=-1",
"bytes=1-",
}
for _, rangeString := range invalidRangeStrings {
if _, _, err := parseCopyPartRange(rangeString, 10); err == nil {
if _, err := parseCopyPartRangeSpec(rangeString); err == nil {
t.Fatalf("expected: an error, got: <nil> for range %s", rangeString)
}
}
@@ -74,8 +81,14 @@ func TestParseCopyPartRange(t *testing.T) {
"bytes=20-30",
}
for _, rangeString := range errorRangeString {
if _, _, err := parseCopyPartRange(rangeString, 10); err != errInvalidRangeSource {
t.Fatalf("expected: %s, got: %s", errInvalidRangeSource, err)
rs, err := parseCopyPartRangeSpec(rangeString)
if err == nil {
err1 := checkCopyPartRangeWithSize(rs, objectSize)
if err1 != errInvalidRangeSource {
t.Fatalf("expected: %s, got: %s", errInvalidRangeSource, err)
}
} else {
t.Fatalf("expected: %s, got: <nil>", errInvalidRangeSource)
}
}
}

View File

@@ -16,5 +16,6 @@ package crypto
// KMSConfig has the KMS config for hashicorp vault
type KMSConfig struct {
Vault VaultConfig `json:"vault"`
AutoEncryption bool `json:"-"`
Vault VaultConfig `json:"vault"`
}

View File

@@ -95,7 +95,7 @@ var ssecIsRequestedTests = []struct {
Header http.Header
Expected bool
}{
{Header: http.Header{}, Expected: false}, // 0
{Header: http.Header{}, Expected: false}, // 0
{Header: http.Header{"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"}}, Expected: true}, // 1
{Header: http.Header{"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="}}, Expected: true}, // 2
{Header: http.Header{"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="}}, Expected: true}, // 3
@@ -137,7 +137,7 @@ var ssecCopyIsRequestedTests = []struct {
Header http.Header
Expected bool
}{
{Header: http.Header{}, Expected: false}, // 0
{Header: http.Header{}, Expected: false}, // 0
{Header: http.Header{"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"}}, Expected: true}, // 1
{Header: http.Header{"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="}}, Expected: true}, // 2
{Header: http.Header{"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="}}, Expected: true}, // 3

View File

@@ -140,3 +140,37 @@ func (key ObjectKey) DerivePartKey(id uint32) (partKey [32]byte) {
mac.Sum(partKey[:0])
return partKey
}
// SealETag seals the etag using the object key.
// It does not encrypt empty ETags because such ETags indicate
// that the S3 client hasn't sent an ETag = MD5(object) and
// the backend can pick an ETag value.
func (key ObjectKey) SealETag(etag []byte) []byte {
if len(etag) == 0 { // don't encrypt empty ETag - only if client sent ETag = MD5(object)
return etag
}
var buffer bytes.Buffer
mac := hmac.New(sha256.New, key[:])
mac.Write([]byte("SSE-etag"))
if _, err := sio.Encrypt(&buffer, bytes.NewReader(etag), sio.Config{Key: mac.Sum(nil)}); err != nil {
logger.CriticalIf(context.Background(), errors.New("Unable to encrypt ETag using object key"))
}
return buffer.Bytes()
}
// UnsealETag unseals the etag using the provided object key.
// It does not try to decrypt the ETag if len(etag) == 16
// because such ETags indicate that the S3 client hasn't sent
// an ETag = MD5(object) and the backend has picked an ETag value.
func (key ObjectKey) UnsealETag(etag []byte) ([]byte, error) {
if !IsETagSealed(etag) {
return etag, nil
}
var buffer bytes.Buffer
mac := hmac.New(sha256.New, key[:])
mac.Write([]byte("SSE-etag"))
if _, err := sio.Decrypt(&buffer, bytes.NewReader(etag), sio.Config{Key: mac.Sum(nil)}); err != nil {
return nil, err
}
return buffer.Bytes(), nil
}

View File

@@ -166,3 +166,31 @@ func TestDerivePartKey(t *testing.T) {
}
}
}
var sealUnsealETagTests = []string{
"",
"90682b8e8cc7609c",
"90682b8e8cc7609c4671e1d64c73fc30",
"90682b8e8cc7609c4671e1d64c73fc307fb3104f",
}
func TestSealETag(t *testing.T) {
var key ObjectKey
for i := range key {
key[i] = byte(i)
}
for i, etag := range sealUnsealETagTests {
tag, err := hex.DecodeString(etag)
if err != nil {
t.Errorf("Test %d: failed to decode etag: %s", i, err)
}
sealedETag := key.SealETag(tag)
unsealedETag, err := key.UnsealETag(sealedETag)
if err != nil {
t.Errorf("Test %d: failed to decrypt etag: %s", i, err)
}
if !bytes.Equal(unsealedETag, tag) {
t.Errorf("Test %d: unsealed etag does not match: got %s - want %s", i, hex.EncodeToString(unsealedETag), etag)
}
}
}

View File

@@ -40,6 +40,26 @@ func RemoveSensitiveEntries(metadata map[string]string) { // The functions is te
delete(metadata, SSECopyKey)
}
// RemoveSSEHeaders removes all crypto-specific SSE
// header entries from the metadata map.
func RemoveSSEHeaders(metadata map[string]string) {
delete(metadata, SSEHeader)
delete(metadata, SSECKeyMD5)
delete(metadata, SSECAlgorithm)
}
// RemoveInternalEntries removes all crypto-specific internal
// metadata entries from the metadata map.
func RemoveInternalEntries(metadata map[string]string) {
delete(metadata, SSEMultipart)
delete(metadata, SSEIV)
delete(metadata, SSESealAlgorithm)
delete(metadata, SSECSealedKey)
delete(metadata, S3SealedKey)
delete(metadata, S3KMSKeyID)
delete(metadata, S3KMSSealedKey)
}
// IsEncrypted returns true if the object metadata indicates
// that it was uploaded using some form of server-side-encryption.
//
@@ -219,3 +239,6 @@ func (ssec) ParseMetadata(metadata map[string]string) (sealedKey SealedKey, err
copy(sealedKey.Key[:], encryptedKey)
return sealedKey, nil
}
// IsETagSealed returns true if the etag seems to be encrypted.
func IsETagSealed(etag []byte) bool { return len(etag) > 16 }

View File

@@ -17,6 +17,7 @@ package crypto
import (
"bytes"
"encoding/base64"
"encoding/hex"
"testing"
"github.com/minio/minio/cmd/logger"
@@ -364,3 +365,75 @@ func TestSSECCreateMetadata(t *testing.T) {
}()
_ = SSEC.CreateMetadata(nil, SealedKey{Algorithm: InsecureSealAlgorithm})
}
var isETagSealedTests = []struct {
ETag string
IsSealed bool
}{
{ETag: "", IsSealed: false}, // 0
{ETag: "90682b8e8cc7609c4671e1d64c73fc30", IsSealed: false}, // 1
{ETag: "f201040c9dc593e39ea004dc1323699bcd", IsSealed: true}, // 2 not valid ciphertext but looks like sealed ETag
{ETag: "20000f00fba2ee2ae4845f725964eeb9e092edfabc7ab9f9239e8344341f769a51ce99b4801b0699b92b16a72fa94972", IsSealed: true}, // 3
}
func TestIsETagSealed(t *testing.T) {
for i, test := range isETagSealedTests {
etag, err := hex.DecodeString(test.ETag)
if err != nil {
t.Errorf("Test %d: failed to decode etag: %s", i, err)
}
if sealed := IsETagSealed(etag); sealed != test.IsSealed {
t.Errorf("Test %d: got %v - want %v", i, sealed, test.IsSealed)
}
}
}
var removeInternalEntriesTests = []struct {
Metadata, Expected map[string]string
}{
{ // 0
Metadata: map[string]string{
SSEMultipart: "",
SSEIV: "",
SSESealAlgorithm: "",
SSECSealedKey: "",
S3SealedKey: "",
S3KMSKeyID: "",
S3KMSSealedKey: "",
},
Expected: map[string]string{},
},
{ // 1
Metadata: map[string]string{
SSEMultipart: "",
SSEIV: "",
"X-Amz-Meta-A": "X",
"X-Minio-Internal-B": "Y",
},
Expected: map[string]string{
"X-Amz-Meta-A": "X",
"X-Minio-Internal-B": "Y",
},
},
}
func TestRemoveInternalEntries(t *testing.T) {
isEqual := func(x, y map[string]string) bool {
if len(x) != len(y) {
return false
}
for k, v := range x {
if u, ok := y[k]; !ok || v != u {
return false
}
}
return true
}
for i, test := range removeInternalEntriesTests {
RemoveInternalEntries(test.Metadata)
if !isEqual(test.Metadata, test.Expected) {
t.Errorf("Test %d: got %v - want %v", i, test.Metadata, test.Expected)
}
}
}

View File

@@ -18,6 +18,8 @@ import (
"context"
"errors"
"io"
"net/http"
"path"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/ioutil"
@@ -69,10 +71,59 @@ const (
// domain is "SSE-S3".
func (s3) String() string { return "SSE-S3" }
// UnsealObjectKey extracts and decrypts the sealed object key
// from the metadata using KMS and returns the decrypted object
// key.
func (sse s3) UnsealObjectKey(kms KMS, metadata map[string]string, bucket, object string) (key ObjectKey, err error) {
keyID, kmsKey, sealedKey, err := sse.ParseMetadata(metadata)
if err != nil {
return
}
unsealKey, err := kms.UnsealKey(keyID, kmsKey, Context{bucket: path.Join(bucket, object)})
if err != nil {
return
}
err = key.Unseal(unsealKey, sealedKey, sse.String(), bucket, object)
return
}
// String returns the SSE domain as string. For SSE-C the
// domain is "SSE-C".
func (ssec) String() string { return "SSE-C" }
// UnsealObjectKey extracts and decrypts the sealed object key
// from the metadata using the SSE-C client key of the HTTP headers
// and returns the decrypted object key.
func (sse ssec) UnsealObjectKey(h http.Header, metadata map[string]string, bucket, object string) (key ObjectKey, err error) {
clientKey, err := sse.ParseHTTP(h)
if err != nil {
return
}
return unsealObjectKey(clientKey, metadata, bucket, object)
}
// UnsealObjectKey extracts and decrypts the sealed object key
// from the metadata using the SSE-Copy client key of the HTTP headers
// and returns the decrypted object key.
func (sse ssecCopy) UnsealObjectKey(h http.Header, metadata map[string]string, bucket, object string) (key ObjectKey, err error) {
clientKey, err := sse.ParseHTTP(h)
if err != nil {
return
}
return unsealObjectKey(clientKey, metadata, bucket, object)
}
// unsealObjectKey decrypts and returns the sealed object key
// from the metadata using the SSE-C client key.
func unsealObjectKey(clientKey [32]byte, metadata map[string]string, bucket, object string) (key ObjectKey, err error) {
sealedKey, err := SSEC.ParseMetadata(metadata)
if err != nil {
return
}
err = key.Unseal(clientKey, sealedKey, SSEC.String(), bucket, object)
return
}
// EncryptSinglePart encrypts an io.Reader which must be the
// the body of a single-part PUT request.
func EncryptSinglePart(r io.Reader, key ObjectKey) io.Reader {

View File

@@ -14,7 +14,10 @@
package crypto
import "testing"
import (
"net/http"
"testing"
)
func TestS3String(t *testing.T) {
const Domain = "SSE-S3"
@@ -29,3 +32,195 @@ func TestSSECString(t *testing.T) {
t.Errorf("SSEC's string method returns wrong domain: got '%s' - want '%s'", domain, Domain)
}
}
var ssecUnsealObjectKeyTests = []struct {
Headers http.Header
Bucket, Object string
Metadata map[string]string
ExpectedErr error
}{
{ // 0 - Valid HTTP headers and valid metadata entries for bucket/object
Headers: http.Header{
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
Bucket: "bucket",
Object: "object",
Metadata: map[string]string{
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
"X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256",
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
},
ExpectedErr: nil,
},
{ // 1 - Valid HTTP headers but invalid metadata entries for bucket/object2
Headers: http.Header{
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
Bucket: "bucket",
Object: "object2",
Metadata: map[string]string{
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
"X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256",
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
},
ExpectedErr: ErrSecretKeyMismatch,
},
{ // 2 - Valid HTTP headers but invalid metadata entries for bucket/object
Headers: http.Header{
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
Bucket: "bucket",
Object: "object",
Metadata: map[string]string{
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
},
ExpectedErr: errMissingInternalSealAlgorithm,
},
{ // 3 - Invalid HTTP headers for valid metadata entries for bucket/object
Headers: http.Header{
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
},
Bucket: "bucket",
Object: "object",
Metadata: map[string]string{
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
"X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256",
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
},
ExpectedErr: ErrMissingCustomerKeyMD5,
},
}
func TestSSECUnsealObjectKey(t *testing.T) {
for i, test := range ssecUnsealObjectKeyTests {
if _, err := SSEC.UnsealObjectKey(test.Headers, test.Metadata, test.Bucket, test.Object); err != test.ExpectedErr {
t.Errorf("Test %d: got: %v - want: %v", i, err, test.ExpectedErr)
}
}
}
var sseCopyUnsealObjectKeyTests = []struct {
Headers http.Header
Bucket, Object string
Metadata map[string]string
ExpectedErr error
}{
{ // 0 - Valid HTTP headers and valid metadata entries for bucket/object
Headers: http.Header{
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
Bucket: "bucket",
Object: "object",
Metadata: map[string]string{
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
"X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256",
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
},
ExpectedErr: nil,
},
{ // 1 - Valid HTTP headers but invalid metadata entries for bucket/object2
Headers: http.Header{
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
Bucket: "bucket",
Object: "object2",
Metadata: map[string]string{
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
"X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256",
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
},
ExpectedErr: ErrSecretKeyMismatch,
},
{ // 2 - Valid HTTP headers but invalid metadata entries for bucket/object
Headers: http.Header{
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
Bucket: "bucket",
Object: "object",
Metadata: map[string]string{
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
},
ExpectedErr: errMissingInternalSealAlgorithm,
},
{ // 3 - Invalid HTTP headers for valid metadata entries for bucket/object
Headers: http.Header{
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
},
Bucket: "bucket",
Object: "object",
Metadata: map[string]string{
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
"X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256",
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
},
ExpectedErr: ErrMissingCustomerKeyMD5,
},
}
func TestSSECopyUnsealObjectKey(t *testing.T) {
for i, test := range sseCopyUnsealObjectKeyTests {
if _, err := SSECopy.UnsealObjectKey(test.Headers, test.Metadata, test.Bucket, test.Object); err != test.ExpectedErr {
t.Errorf("Test %d: got: %v - want: %v", i, err, test.ExpectedErr)
}
}
}
var s3UnsealObjectKeyTests = []struct {
KMS KMS
Bucket, Object string
Metadata map[string]string
ExpectedErr error
}{
{ // 0 - Valid KMS key-ID and valid metadata entries for bucket/object
KMS: NewKMS([32]byte{}),
Bucket: "bucket",
Object: "object",
Metadata: map[string]string{
"X-Minio-Internal-Server-Side-Encryption-Iv": "hhVY0LKR1YtZbzAKxTWUfZt5enDfYX6Fxz1ma8Kiudc=",
"X-Minio-Internal-Server-Side-Encryption-S3-Sealed-Key": "IAAfALhsOeD5AE3s5Zgq3DZ5VFGsOa3B0ksVC86veDcaj+fXv2U0VadhPaOKYr9Emd5ssOsO0uIhIIrKiOy9rA==",
"X-Minio-Internal-Server-Side-Encryption-S3-Kms-Sealed-Key": "IAAfAMRS2iw45FsfiF3QXajSYVWj1lxMpQm6DxDGPtADCX6fJQQ4atHBtfpgqJFyeQmIHsm0FBI+UlHw1Lv4ug==",
"X-Minio-Internal-Server-Side-Encryption-S3-Kms-Key-Id": "test-key-1",
"X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256",
},
ExpectedErr: nil,
},
{ // 1 - Valid KMS key-ID for invalid metadata entries for bucket/object
KMS: NewKMS([32]byte{}),
Bucket: "bucket",
Object: "object",
Metadata: map[string]string{
"X-Minio-Internal-Server-Side-Encryption-Iv": "hhVY0LKR1YtZbzAKxTWUfZt5enDfYX6Fxz1ma8Kiudc=",
"X-Minio-Internal-Server-Side-Encryption-S3-Sealed-Key": "IAAfALhsOeD5AE3s5Zgq3DZ5VFGsOa3B0ksVC86veDcaj+fXv2U0VadhPaOKYr9Emd5ssOsO0uIhIIrKiOy9rA==",
"X-Minio-Internal-Server-Side-Encryption-S3-Kms-Sealed-Key": "IAAfAMRS2iw45FsfiF3QXajSYVWj1lxMpQm6DxDGPtADCX6fJQQ4atHBtfpgqJFyeQmIHsm0FBI+UlHw1Lv4ug==",
"X-Minio-Internal-Server-Side-Encryption-S3-Kms-Key-Id": "test-key-1",
},
ExpectedErr: errMissingInternalSealAlgorithm,
},
}
func TestS3UnsealObjectKey(t *testing.T) {
for i, test := range s3UnsealObjectKeyTests {
if _, err := S3.UnsealObjectKey(test.KMS, test.Metadata, test.Bucket, test.Object); err != test.ExpectedErr {
t.Errorf("Test %d: got: %v - want: %v", i, err, test.ExpectedErr)
}
}
}

View File

@@ -19,238 +19,234 @@ import (
"encoding/base64"
"errors"
"fmt"
"os"
"strconv"
"strings"
"time"
vault "github.com/hashicorp/vault/api"
)
const (
// VaultEndpointEnv Vault endpoint environment variable
VaultEndpointEnv = "MINIO_SSE_VAULT_ENDPOINT"
// vaultAuthTypeEnv type of vault auth to be used
vaultAuthTypeEnv = "MINIO_SSE_VAULT_AUTH_TYPE"
// vaultAppRoleIDEnv Vault AppRole ID environment variable
vaultAppRoleIDEnv = "MINIO_SSE_VAULT_APPROLE_ID"
// vaultAppSecretIDEnv Vault AppRole Secret environment variable
vaultAppSecretIDEnv = "MINIO_SSE_VAULT_APPROLE_SECRET"
// vaultKeyVersionEnv Vault Key Version environment variable
vaultKeyVersionEnv = "MINIO_SSE_VAULT_KEY_VERSION"
// vaultKeyNameEnv Vault Encryption Key Name environment variable
vaultKeyNameEnv = "MINIO_SSE_VAULT_KEY_NAME"
)
var (
//ErrKMSAuthLogin is raised when there is a failure authenticating to KMS
ErrKMSAuthLogin = errors.New("Vault service did not return auth info")
)
// VaultKey represents vault encryption key-ring.
type VaultKey struct {
Name string `json:"name"` // The name of the encryption key-ring
Version int `json:"version"` // The key version
}
// VaultAuth represents vault authentication type.
// Currently the only supported authentication type is AppRole.
type VaultAuth struct {
Type string `json:"type"` // The authentication type
AppRole VaultAppRole `json:"approle"` // The AppRole authentication credentials
}
// VaultAppRole represents vault AppRole authentication credentials
type VaultAppRole struct {
ID string `json:"id"` // The AppRole access ID
Secret string `json:"secret"` // The AppRole secret
}
// VaultConfig represents vault configuration.
type VaultConfig struct {
Endpoint string `json:"endpoint"` // The vault API endpoint as URL
CAPath string `json:"-"` // The path to PEM-encoded certificate files used for mTLS. Currently not used in config file.
Auth VaultAuth `json:"auth"` // The vault authentication configuration
Key VaultKey `json:"key-id"` // The named key used for key-generation / decryption.
Namespace string `json:"-"` // The vault namespace of enterprise vault instances
}
// vaultService represents a connection to a vault KMS.
type vaultService struct {
config *VaultConfig
client *vault.Client
secret *vault.Secret
leaseDuration time.Duration
}
// return transit secret engine's path for generate data key operation
func (v *vaultService) genDataKeyEndpoint(key string) string {
return "/transit/datakey/plaintext/" + key
var _ KMS = (*vaultService)(nil) // compiler check that *vaultService implements KMS
// empty/default vault configuration used to check whether a particular is empty.
var emptyVaultConfig = VaultConfig{}
// IsEmpty returns true if the vault config struct is an
// empty configuration.
func (v *VaultConfig) IsEmpty() bool { return *v == emptyVaultConfig }
// Verify returns a nil error if the vault configuration
// is valid. A valid configuration is either empty or
// contains valid non-default values.
func (v *VaultConfig) Verify() (err error) {
if v.IsEmpty() {
return // an empty configuration is valid
}
switch {
case v.Endpoint == "":
err = errors.New("crypto: missing hashicorp vault endpoint")
case strings.ToLower(v.Auth.Type) != "approle":
err = fmt.Errorf("crypto: invalid hashicorp vault authentication type: %s is not supported", v.Auth.Type)
case v.Auth.AppRole.ID == "":
err = errors.New("crypto: missing hashicorp vault AppRole ID")
case v.Auth.AppRole.Secret == "":
err = errors.New("crypto: missing hashicorp vault AppSecret ID")
case v.Key.Name == "":
err = errors.New("crypto: missing hashicorp vault key name")
case v.Key.Version < 0:
err = errors.New("crypto: invalid hashicorp vault key version: The key version must not be negative")
}
return
}
// return transit secret engine's path for decrypt operation
func (v *vaultService) decryptEndpoint(key string) string {
return "/transit/decrypt/" + key
}
// VaultKey represents vault encryption key-id name & version
type VaultKey struct {
Name string `json:"name"`
Version int `json:"version"`
}
// VaultAuth represents vault auth type to use. For now, AppRole is the only supported
// auth type.
type VaultAuth struct {
Type string `json:"type"`
AppRole VaultAppRole `json:"approle"`
}
// VaultAppRole represents vault approle credentials
type VaultAppRole struct {
ID string `json:"id"`
Secret string `json:"secret"`
}
// VaultConfig holds config required to start vault service
type VaultConfig struct {
Endpoint string `json:"endpoint"`
Auth VaultAuth `json:"auth"`
Key VaultKey `json:"key-id"`
}
// validate whether all required env variables needed to start vault service have
// been set
func validateVaultConfig(c *VaultConfig) error {
if c.Endpoint == "" {
return fmt.Errorf("Missing hashicorp vault endpoint - %s is empty", VaultEndpointEnv)
// NewVault initializes Hashicorp Vault KMS by authenticating
// to Vault with the credentials in config and gets a client
// token for future api calls.
func NewVault(config VaultConfig) (KMS, error) {
if config.IsEmpty() {
return nil, errors.New("crypto: the hashicorp vault configuration must not be empty")
}
if strings.ToLower(c.Auth.Type) != "approle" {
return fmt.Errorf("Unsupported hashicorp vault auth type - %s", vaultAuthTypeEnv)
}
if c.Auth.AppRole.ID == "" {
return fmt.Errorf("Missing hashicorp vault AppRole ID - %s is empty", vaultAppRoleIDEnv)
}
if c.Auth.AppRole.Secret == "" {
return fmt.Errorf("Missing hashicorp vault AppSecret ID - %s is empty", vaultAppSecretIDEnv)
}
if c.Key.Name == "" {
return fmt.Errorf("Invalid value set in environment variable %s", vaultKeyNameEnv)
}
if c.Key.Version < 0 {
return fmt.Errorf("Invalid value set in environment variable %s", vaultKeyVersionEnv)
if err := config.Verify(); err != nil {
return nil, err
}
return nil
}
// authenticate to vault with app role id and app role secret, and get a client access token, lease duration
func getVaultAccessToken(client *vault.Client, appRoleID, appSecret string) (token string, duration int, err error) {
data := map[string]interface{}{
"role_id": appRoleID,
"secret_id": appSecret,
vaultCfg := vault.Config{Address: config.Endpoint}
if err := vaultCfg.ConfigureTLS(&vault.TLSConfig{CAPath: config.CAPath}); err != nil {
return nil, err
}
resp, e := client.Logical().Write("auth/approle/login", data)
if e != nil {
return token, duration, e
}
if resp.Auth == nil {
return token, duration, ErrKMSAuthLogin
}
return resp.Auth.ClientToken, resp.Auth.LeaseDuration, nil
}
// NewVaultConfig sets KMSConfig from environment
// variables and performs validations.
func NewVaultConfig() (KMSConfig, error) {
kc := KMSConfig{}
endpoint := os.Getenv(VaultEndpointEnv)
roleID := os.Getenv(vaultAppRoleIDEnv)
roleSecret := os.Getenv(vaultAppSecretIDEnv)
keyName := os.Getenv(vaultKeyNameEnv)
keyVersion := 0
authType := "approle"
if versionStr := os.Getenv(vaultKeyVersionEnv); versionStr != "" {
version, err := strconv.Atoi(versionStr)
if err != nil {
return kc, fmt.Errorf("Unable to parse %s value (`%s`)", vaultKeyVersionEnv, versionStr)
}
keyVersion = version
}
// return if none of the vault env variables are configured
if (endpoint == "") && (roleID == "") && (roleSecret == "") && (keyName == "") && (keyVersion == 0) {
return kc, nil
}
c := VaultConfig{
Endpoint: endpoint,
Auth: VaultAuth{
Type: authType,
AppRole: VaultAppRole{
ID: roleID,
Secret: roleSecret,
},
},
Key: VaultKey{
Version: keyVersion,
Name: keyName,
},
}
if err := validateVaultConfig(&c); err != nil {
return kc, err
}
kc.Vault = c
return kc, nil
}
// NewVault initializes Hashicorp Vault KMS by
// authenticating to Vault with the credentials in KMSConfig,
// and gets a client token for future api calls.
func NewVault(kmsConf KMSConfig) (KMS, error) {
config := kmsConf.Vault
c, err := vault.NewClient(&vault.Config{
Address: config.Endpoint,
})
client, err := vault.NewClient(&vaultCfg)
if err != nil {
return nil, err
}
accessToken, leaseDuration, err := getVaultAccessToken(c, config.Auth.AppRole.ID, config.Auth.AppRole.Secret)
if err != nil {
if config.Namespace != "" {
client.SetNamespace(config.Namespace)
}
v := &vaultService{client: client, config: &config}
if err := v.authenticate(); err != nil {
return nil, err
}
// authenticate and get the access token
c.SetToken(accessToken)
v := vaultService{client: c, config: &config, leaseDuration: time.Duration(leaseDuration)}
v.renewToken(c)
return &v, nil
v.renewToken()
return v, nil
}
func (v *vaultService) renewToken(c *vault.Client) {
retryDelay := 1 * time.Minute
// renewToken starts a new go-routine which renews
// the vault authentication token periodically and re-authenticates
// if the token renewal fails
func (v *vaultService) renewToken() {
retryDelay := v.leaseDuration / 2
go func() {
for {
s, err := c.Auth().Token().RenewSelf(int(v.leaseDuration))
if err != nil {
if v.secret == nil {
if err := v.authenticate(); err != nil {
time.Sleep(retryDelay)
continue
}
}
s, err := v.client.Auth().Token().RenewSelf(int(v.leaseDuration))
if err != nil || s == nil {
v.secret = nil
time.Sleep(retryDelay)
continue
}
nextRenew := s.Auth.LeaseDuration / 2
time.Sleep(time.Duration(nextRenew) * time.Second)
if ok, err := s.TokenIsRenewable(); !ok || err != nil {
v.secret = nil
continue
}
ttl, err := s.TokenTTL()
if err != nil {
v.secret = nil
continue
}
v.secret = s
retryDelay = ttl / 2
time.Sleep(retryDelay)
}
}()
}
// Generates a random plain text key, sealed plain text key from
// Vault. It returns the plaintext key and sealed plaintext key on success
// authenticate logs the app to vault, and starts the auto renewer
// before secret expires
func (v *vaultService) authenticate() (err error) {
payload := map[string]interface{}{
"role_id": v.config.Auth.AppRole.ID,
"secret_id": v.config.Auth.AppRole.Secret,
}
var tokenID string
var ttl time.Duration
var secret *vault.Secret
secret, err = v.client.Logical().Write("auth/approle/login", payload)
if err != nil {
return
}
if secret == nil {
err = ErrKMSAuthLogin
return
}
tokenID, err = secret.TokenID()
if err != nil {
err = ErrKMSAuthLogin
return
}
ttl, err = secret.TokenTTL()
if err != nil {
err = ErrKMSAuthLogin
return
}
v.client.SetToken(tokenID)
v.secret = secret
v.leaseDuration = ttl
return
}
// GenerateKey returns a new plaintext key, generated by the KMS,
// and a sealed version of this plaintext key encrypted using the
// named key referenced by keyID. It also binds the generated key
// cryptographically to the provided context.
func (v *vaultService) GenerateKey(keyID string, ctx Context) (key [32]byte, sealedKey []byte, err error) {
contextStream := new(bytes.Buffer)
ctx.WriteTo(contextStream)
var contextStream bytes.Buffer
ctx.WriteTo(&contextStream)
payload := map[string]interface{}{
"context": base64.StdEncoding.EncodeToString(contextStream.Bytes()),
}
s, err1 := v.client.Logical().Write(v.genDataKeyEndpoint(keyID), payload)
if err1 != nil {
return key, sealedKey, err1
s, err := v.client.Logical().Write(fmt.Sprintf("/transit/datakey/plaintext/%s", keyID), payload)
if err != nil {
return key, sealedKey, err
}
sealKey := s.Data["ciphertext"].(string)
plainKey, err := base64.StdEncoding.DecodeString(s.Data["plaintext"].(string))
if err != nil {
return key, sealedKey, err1
return key, sealedKey, err
}
copy(key[:], []byte(plainKey))
return key, []byte(sealKey), nil
}
// unsealKMSKey unseals the sealedKey using the Vault master key
// referenced by the keyID. The plain text key is returned on success.
// UnsealKey returns the decrypted sealedKey as plaintext key.
// Therefore it sends the sealedKey to the KMS which decrypts
// it using the named key referenced by keyID and responses with
// the plaintext key.
//
// The context must be same context as the one provided while
// generating the plaintext key / sealedKey.
func (v *vaultService) UnsealKey(keyID string, sealedKey []byte, ctx Context) (key [32]byte, err error) {
contextStream := new(bytes.Buffer)
ctx.WriteTo(contextStream)
var contextStream bytes.Buffer
ctx.WriteTo(&contextStream)
payload := map[string]interface{}{
"ciphertext": string(sealedKey),
"context": base64.StdEncoding.EncodeToString(contextStream.Bytes()),
}
s, err1 := v.client.Logical().Write(v.decryptEndpoint(keyID), payload)
if err1 != nil {
return key, err1
s, err := v.client.Logical().Write(fmt.Sprintf("/transit/decrypt/%s", keyID), payload)
if err != nil {
return key, err
}
base64Key := s.Data["plaintext"].(string)
plainKey, err1 := base64.StdEncoding.DecodeString(base64Key)
if err1 != nil {
return key, err1
plainKey, err := base64.StdEncoding.DecodeString(base64Key)
if err != nil {
return key, err
}
copy(key[:], []byte(plainKey))
return key, nil
}

View File

@@ -30,7 +30,6 @@ import (
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/disk"
"github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/lock"
)
@@ -92,7 +91,7 @@ func newCacheFSObjects(dir string, expiry int, maxDiskUsagePct int) (*cacheFSObj
appendFileMap: make(map[string]*fsAppendFile),
}
go fsObjects.cleanupStaleMultipartUploads(context.Background(), globalMultipartCleanupInterval, globalMultipartExpiry, globalServiceDoneCh)
go fsObjects.cleanupStaleMultipartUploads(context.Background(), GlobalMultipartCleanupInterval, GlobalMultipartExpiry, GlobalServiceDoneCh)
cacheFS := cacheFSObjects{
FSObjects: fsObjects,
@@ -159,7 +158,7 @@ func (cfs *cacheFSObjects) purgeTrash() {
for {
select {
case <-globalServiceDoneCh:
case <-GlobalServiceDoneCh:
return
case <-ticker.C:
trashPath := path.Join(cfs.fsPath, minioMetaBucket, cacheTrashDir)
@@ -258,7 +257,7 @@ func (cfs *cacheFSObjects) IsOnline() bool {
}
// Caches the object to disk
func (cfs *cacheFSObjects) Put(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) error {
func (cfs *cacheFSObjects) Put(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) error {
if cfs.diskUsageHigh() {
select {
case cfs.purgeChan <- struct{}{}:
@@ -275,7 +274,7 @@ func (cfs *cacheFSObjects) Put(ctx context.Context, bucket, object string, data
return pErr
}
}
_, err := cfs.PutObject(ctx, bucket, object, data, metadata, opts)
_, err := cfs.PutObject(ctx, bucket, object, data, opts)
// if err is due to disk being offline , mark cache drive as offline
if IsErr(err, baseErrs...) {
cfs.setOnline(false)
@@ -301,7 +300,8 @@ func (cfs *cacheFSObjects) Exists(ctx context.Context, bucket, object string) bo
// Identical to fs PutObject operation except that it uses ETag in metadata
// headers.
func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, retErr error) {
func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, retErr error) {
data := r.Reader
fs := cfs.FSObjects
// Lock the object.
objectLock := fs.nsMutex.NewNSLock(bucket, object)
@@ -312,7 +312,7 @@ func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object
// No metadata is set, allocate a new one.
meta := make(map[string]string)
for k, v := range metadata {
for k, v := range opts.UserDefined {
meta[k] = v
}
@@ -354,7 +354,7 @@ func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object
}
// Validate input data size and it can never be less than zero.
if data.Size() < 0 {
if data.Size() < -1 {
logger.LogIf(ctx, errInvalidArgument)
return ObjectInfo{}, errInvalidArgument
}
@@ -438,7 +438,7 @@ func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object
// Implements S3 compatible initiate multipart API. Operation here is identical
// to fs backend implementation - with the exception that cache FS uses the uploadID
// generated on the backend
func (cfs *cacheFSObjects) NewMultipartUpload(ctx context.Context, bucket, object string, meta map[string]string, uploadID string, opts ObjectOptions) (string, error) {
func (cfs *cacheFSObjects) NewMultipartUpload(ctx context.Context, bucket, object string, uploadID string, opts ObjectOptions) (string, error) {
if cfs.diskUsageHigh() {
select {
case cfs.purgeChan <- struct{}{}:
@@ -472,7 +472,7 @@ func (cfs *cacheFSObjects) NewMultipartUpload(ctx context.Context, bucket, objec
// Initialize fs.json values.
fsMeta := newFSMetaV1()
fsMeta.Meta = meta
fsMeta.Meta = opts.UserDefined
fsMetaBytes, err := json.Marshal(fsMeta)
if err != nil {

View File

@@ -38,8 +38,7 @@ import (
)
const (
// disk cache needs to have cacheSizeMultiplier * object size space free for a cache entry to be created.
cacheSizeMultiplier = 100
// disk cache needs to have object size space free for a cache entry to be created.
cacheTrashDir = "trash"
cacheCleanupInterval = 10 // in minutes
)
@@ -58,19 +57,19 @@ type cacheObjects struct {
// file path patterns to exclude from cache
exclude []string
// Object functions pointing to the corresponding functions of backend implementation.
GetObjectNInfoFn func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType) (gr *GetObjectReader, err error)
GetObjectNInfoFn func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error)
GetObjectFn func(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error)
GetObjectInfoFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
PutObjectFn func(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error)
PutObjectFn func(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error)
DeleteObjectFn func(ctx context.Context, bucket, object string) error
ListObjectsFn func(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error)
ListObjectsV2Fn func(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error)
ListBucketsFn func(ctx context.Context) (buckets []BucketInfo, err error)
GetBucketInfoFn func(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error)
NewMultipartUploadFn func(ctx context.Context, bucket, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error)
PutObjectPartFn func(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader, opts ObjectOptions) (info PartInfo, err error)
NewMultipartUploadFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (uploadID string, err error)
PutObjectPartFn func(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error)
AbortMultipartUploadFn func(ctx context.Context, bucket, object, uploadID string) error
CompleteMultipartUploadFn func(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error)
CompleteMultipartUploadFn func(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error)
DeleteBucketFn func(ctx context.Context, bucket string) error
}
@@ -90,17 +89,17 @@ type CacheObjectLayer interface {
ListBuckets(ctx context.Context) (buckets []BucketInfo, err error)
DeleteBucket(ctx context.Context, bucket string) error
// Object operations.
GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType) (gr *GetObjectReader, err error)
GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error)
GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error)
GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
PutObject(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error)
PutObject(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error)
DeleteObject(ctx context.Context, bucket, object string) error
// Multipart operations.
NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error)
PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader, opts ObjectOptions) (info PartInfo, err error)
NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (uploadID string, err error)
PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error)
AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error
CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error)
CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error)
// Storage operations.
StorageInfo(ctx context.Context) CacheStorageInfo
@@ -183,44 +182,40 @@ func (c cacheObjects) getMetadata(objInfo ObjectInfo) map[string]string {
return metadata
}
func (c cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType) (gr *GetObjectReader, err error) {
bkReader, bkErr := c.GetObjectNInfoFn(ctx, bucket, object, rs, h, writeLock)
if c.isCacheExclude(bucket, object) || !bkReader.ObjInfo.IsCacheable() {
return bkReader, bkErr
func (c cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
if c.isCacheExclude(bucket, object) {
return c.GetObjectNInfoFn(ctx, bucket, object, rs, h, writeLock, opts)
}
// fetch cacheFSObjects if object is currently cached or nearest available cache drive
dcache, err := c.cache.getCachedFSLoc(ctx, bucket, object)
if err != nil {
return bkReader, bkErr
return c.GetObjectNInfoFn(ctx, bucket, object, rs, h, writeLock, opts)
}
backendDown := backendDownError(bkErr)
if bkErr != nil && !backendDown {
cacheReader, cacheErr := dcache.GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts)
objInfo, err := c.GetObjectInfoFn(ctx, bucket, object, opts)
if backendDownError(err) && cacheErr == nil {
return cacheReader, nil
} else if err != nil {
if _, ok := err.(ObjectNotFound); ok {
// Delete the cached entry if backend object was deleted.
// Delete cached entry if backend object was deleted.
dcache.Delete(ctx, bucket, object)
}
return nil, bkErr
return nil, err
}
if !backendDown && filterFromCache(bkReader.ObjInfo.UserDefined) {
return bkReader, bkErr
if !objInfo.IsCacheable() || filterFromCache(objInfo.UserDefined) {
return c.GetObjectNInfoFn(ctx, bucket, object, rs, h, writeLock, opts)
}
if cacheReader, cacheErr := dcache.GetObjectNInfo(ctx, bucket, object, rs, h, lockType); cacheErr == nil {
if backendDown {
// If the backend is down, serve the request from cache.
return cacheReader, nil
}
if cacheReader.ObjInfo.ETag == bkReader.ObjInfo.ETag && !isStaleCache(bkReader.ObjInfo) {
if cacheErr == nil {
if cacheReader.ObjInfo.ETag == objInfo.ETag && !isStaleCache(objInfo) {
// Object is not stale, so serve from cache
return cacheReader, nil
}
cacheReader.Close()
// Object is stale, so delete from cache
dcache.Delete(ctx, bucket, object)
}
@@ -230,13 +225,13 @@ func (c cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
if rs != nil {
// We don't cache partial objects.
return bkReader, bkErr
return c.GetObjectNInfoFn(ctx, bucket, object, rs, h, writeLock, opts)
}
if !dcache.diskAvailable(bkReader.ObjInfo.Size * cacheSizeMultiplier) {
// cache only objects < 1/100th of disk capacity
return bkReader, bkErr
if !dcache.diskAvailable(objInfo.Size) {
return c.GetObjectNInfoFn(ctx, bucket, object, rs, h, writeLock, opts)
}
bkReader, bkErr := c.GetObjectNInfoFn(ctx, bucket, object, rs, h, writeLock, opts)
if bkErr != nil {
return nil, bkErr
}
@@ -244,15 +239,14 @@ func (c cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
// Initialize pipe.
pipeReader, pipeWriter := io.Pipe()
teeReader := io.TeeReader(bkReader, pipeWriter)
hashReader, herr := hash.NewReader(pipeReader, bkReader.ObjInfo.Size, "", "")
hashReader, herr := hash.NewReader(pipeReader, bkReader.ObjInfo.Size, "", "", bkReader.ObjInfo.Size)
if herr != nil {
bkReader.Close()
return nil, herr
}
go func() {
opts := ObjectOptions{}
putErr := dcache.Put(ctx, bucket, object, hashReader, c.getMetadata(bkReader.ObjInfo), opts)
putErr := dcache.Put(ctx, bucket, object, NewPutObjReader(hashReader, nil, nil), ObjectOptions{UserDefined: c.getMetadata(bkReader.ObjInfo)})
// close the write end of the pipe, so the error gets
// propagated to getObjReader
pipeWriter.CloseWithError(putErr)
@@ -260,8 +254,7 @@ func (c cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
cleanupBackend := func() { bkReader.Close() }
cleanupPipe := func() { pipeReader.Close() }
gr = NewGetObjectReaderFromReader(teeReader, bkReader.ObjInfo, cleanupBackend, cleanupPipe)
return gr, nil
return NewGetObjectReaderFromReader(teeReader, bkReader.ObjInfo, opts.CheckCopyPrecondFn, cleanupBackend, cleanupPipe)
}
// Uses cached-object to serve the request. If object is not cached it serves the request from the backend and also
@@ -289,6 +282,10 @@ func (c cacheObjects) GetObject(ctx context.Context, bucket, object string, star
return err
}
if !backendDown && !objInfo.IsCacheable() {
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag, opts)
}
if !backendDown && filterFromCache(objInfo.UserDefined) {
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag, opts)
}
@@ -304,28 +301,26 @@ func (c cacheObjects) GetObject(ctx context.Context, bucket, object string, star
}
dcache.Delete(ctx, bucket, object)
}
if startOffset != 0 || length != objInfo.Size {
if startOffset != 0 || (length > 0 && length != objInfo.Size) {
// We don't cache partial objects.
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag, opts)
}
if !dcache.diskAvailable(objInfo.Size * cacheSizeMultiplier) {
// cache only objects < 1/100th of disk capacity
if !dcache.diskAvailable(objInfo.Size) {
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag, opts)
}
// Initialize pipe.
pipeReader, pipeWriter := io.Pipe()
hashReader, err := hash.NewReader(pipeReader, objInfo.Size, "", "")
hashReader, err := hash.NewReader(pipeReader, objInfo.Size, "", "", objInfo.Size)
if err != nil {
return err
}
go func() {
if err = GetObjectFn(ctx, bucket, object, 0, objInfo.Size, io.MultiWriter(writer, pipeWriter), etag, opts); err != nil {
pipeWriter.CloseWithError(err)
return
}
pipeWriter.Close() // Close writer explicitly signaling we wrote all data.
gerr := GetObjectFn(ctx, bucket, object, 0, objInfo.Size, io.MultiWriter(writer, pipeWriter), etag, opts)
pipeWriter.CloseWithError(gerr) // Close writer explicitly signaling we wrote all data.
}()
err = dcache.Put(ctx, bucket, object, hashReader, c.getMetadata(objInfo), opts)
opts.UserDefined = c.getMetadata(objInfo)
err = dcache.Put(ctx, bucket, object, NewPutObjReader(hashReader, nil, nil), opts)
if err != nil {
return err
}
@@ -649,42 +644,43 @@ func (c cacheObjects) isCacheExclude(bucket, object string) bool {
}
// PutObject - caches the uploaded object for single Put operations
func (c cacheObjects) PutObject(ctx context.Context, bucket, object string, r *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
func (c cacheObjects) PutObject(ctx context.Context, bucket, object string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) {
putObjectFn := c.PutObjectFn
data := r.Reader
dcache, err := c.cache.getCacheFS(ctx, bucket, object)
if err != nil {
// disk cache could not be located,execute backend call.
return putObjectFn(ctx, bucket, object, r, metadata, opts)
return putObjectFn(ctx, bucket, object, r, opts)
}
size := r.Size()
// fetch from backend if there is no space on cache drive
if !dcache.diskAvailable(size * cacheSizeMultiplier) {
return putObjectFn(ctx, bucket, object, r, metadata, opts)
if !dcache.diskAvailable(size) {
return putObjectFn(ctx, bucket, object, r, opts)
}
// fetch from backend if cache exclude pattern or cache-control
// directive set to exclude
if c.isCacheExclude(bucket, object) || filterFromCache(metadata) {
if c.isCacheExclude(bucket, object) || filterFromCache(opts.UserDefined) {
dcache.Delete(ctx, bucket, object)
return putObjectFn(ctx, bucket, object, r, metadata, opts)
return putObjectFn(ctx, bucket, object, r, opts)
}
objInfo = ObjectInfo{}
// Initialize pipe to stream data to backend
pipeReader, pipeWriter := io.Pipe()
hashReader, err := hash.NewReader(pipeReader, size, r.MD5HexString(), r.SHA256HexString())
hashReader, err := hash.NewReader(pipeReader, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize())
if err != nil {
return ObjectInfo{}, err
}
// Initialize pipe to stream data to cache
rPipe, wPipe := io.Pipe()
cHashReader, err := hash.NewReader(rPipe, size, r.MD5HexString(), r.SHA256HexString())
cHashReader, err := hash.NewReader(rPipe, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize())
if err != nil {
return ObjectInfo{}, err
}
oinfoCh := make(chan ObjectInfo)
errCh := make(chan error)
go func() {
oinfo, perr := putObjectFn(ctx, bucket, object, hashReader, metadata, opts)
oinfo, perr := putObjectFn(ctx, bucket, object, NewPutObjReader(hashReader, nil, nil), opts)
if perr != nil {
pipeWriter.CloseWithError(perr)
wPipe.CloseWithError(perr)
@@ -697,14 +693,14 @@ func (c cacheObjects) PutObject(ctx context.Context, bucket, object string, r *h
}()
go func() {
if err = dcache.Put(ctx, bucket, object, cHashReader, metadata, opts); err != nil {
if err = dcache.Put(ctx, bucket, object, NewPutObjReader(cHashReader, nil, nil), opts); err != nil {
wPipe.CloseWithError(err)
return
}
}()
mwriter := io.MultiWriter(pipeWriter, wPipe)
_, err = io.Copy(mwriter, r)
_, err = io.Copy(mwriter, data)
if err != nil {
err = <-errCh
return objInfo, err
@@ -716,68 +712,69 @@ func (c cacheObjects) PutObject(ctx context.Context, bucket, object string, r *h
}
// NewMultipartUpload - Starts a new multipart upload operation to backend and cache.
func (c cacheObjects) NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error) {
func (c cacheObjects) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (uploadID string, err error) {
newMultipartUploadFn := c.NewMultipartUploadFn
if c.isCacheExclude(bucket, object) || filterFromCache(metadata) {
return newMultipartUploadFn(ctx, bucket, object, metadata, opts)
if c.isCacheExclude(bucket, object) || filterFromCache(opts.UserDefined) {
return newMultipartUploadFn(ctx, bucket, object, opts)
}
dcache, err := c.cache.getCacheFS(ctx, bucket, object)
if err != nil {
// disk cache could not be located,execute backend call.
return newMultipartUploadFn(ctx, bucket, object, metadata, opts)
return newMultipartUploadFn(ctx, bucket, object, opts)
}
uploadID, err = newMultipartUploadFn(ctx, bucket, object, metadata, opts)
uploadID, err = newMultipartUploadFn(ctx, bucket, object, opts)
if err != nil {
return
}
// create new multipart upload in cache with same uploadID
dcache.NewMultipartUpload(ctx, bucket, object, metadata, uploadID, opts)
dcache.NewMultipartUpload(ctx, bucket, object, uploadID, opts)
return uploadID, err
}
// PutObjectPart - uploads part to backend and cache simultaneously.
func (c cacheObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader, opts ObjectOptions) (info PartInfo, err error) {
func (c cacheObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *PutObjReader, opts ObjectOptions) (info PartInfo, err error) {
data := r.Reader
putObjectPartFn := c.PutObjectPartFn
dcache, err := c.cache.getCacheFS(ctx, bucket, object)
if err != nil {
// disk cache could not be located,execute backend call.
return putObjectPartFn(ctx, bucket, object, uploadID, partID, data, opts)
return putObjectPartFn(ctx, bucket, object, uploadID, partID, r, opts)
}
if c.isCacheExclude(bucket, object) {
return putObjectPartFn(ctx, bucket, object, uploadID, partID, data, opts)
return putObjectPartFn(ctx, bucket, object, uploadID, partID, r, opts)
}
// make sure cache has at least cacheSizeMultiplier * size available
// make sure cache has at least size space available
size := data.Size()
if !dcache.diskAvailable(size * cacheSizeMultiplier) {
if !dcache.diskAvailable(size) {
select {
case dcache.purgeChan <- struct{}{}:
default:
}
return putObjectPartFn(ctx, bucket, object, uploadID, partID, data, opts)
return putObjectPartFn(ctx, bucket, object, uploadID, partID, r, opts)
}
info = PartInfo{}
// Initialize pipe to stream data to backend
pipeReader, pipeWriter := io.Pipe()
hashReader, err := hash.NewReader(pipeReader, size, data.MD5HexString(), data.SHA256HexString())
hashReader, err := hash.NewReader(pipeReader, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize())
if err != nil {
return
}
// Initialize pipe to stream data to cache
rPipe, wPipe := io.Pipe()
cHashReader, err := hash.NewReader(rPipe, size, data.MD5HexString(), data.SHA256HexString())
cHashReader, err := hash.NewReader(rPipe, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize())
if err != nil {
return
}
pinfoCh := make(chan PartInfo)
errorCh := make(chan error)
go func() {
info, err = putObjectPartFn(ctx, bucket, object, uploadID, partID, hashReader, opts)
info, err = putObjectPartFn(ctx, bucket, object, uploadID, partID, NewPutObjReader(hashReader, nil, nil), opts)
if err != nil {
close(pinfoCh)
pipeWriter.CloseWithError(err)
@@ -789,7 +786,7 @@ func (c cacheObjects) PutObjectPart(ctx context.Context, bucket, object, uploadI
pinfoCh <- info
}()
go func() {
if _, perr := dcache.PutObjectPart(ctx, bucket, object, uploadID, partID, cHashReader, opts); perr != nil {
if _, perr := dcache.PutObjectPart(ctx, bucket, object, uploadID, partID, NewPutObjReader(cHashReader, nil, nil), opts); perr != nil {
wPipe.CloseWithError(perr)
return
}
@@ -831,25 +828,25 @@ func (c cacheObjects) AbortMultipartUpload(ctx context.Context, bucket, object,
}
// CompleteMultipartUpload - completes multipart upload operation on backend and cache.
func (c cacheObjects) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error) {
func (c cacheObjects) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) {
completeMultipartUploadFn := c.CompleteMultipartUploadFn
if c.isCacheExclude(bucket, object) {
return completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts)
return completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts, opts)
}
dcache, err := c.cache.getCacheFS(ctx, bucket, object)
if err != nil {
// disk cache could not be located,execute backend call.
return completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts)
return completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts, opts)
}
// perform backend operation
objInfo, err = completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts)
objInfo, err = completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts, opts)
if err != nil {
return
}
// create new multipart upload in cache with same uploadID
dcache.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts)
dcache.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts)
return
}
@@ -971,8 +968,11 @@ func newServerCacheObjects(config CacheConfig) (CacheObjectLayer, error) {
GetObjectInfoFn: func(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) {
return newObjectLayerFn().GetObjectInfo(ctx, bucket, object, opts)
},
PutObjectFn: func(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
return newObjectLayerFn().PutObject(ctx, bucket, object, data, metadata, opts)
GetObjectNInfoFn: func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
return newObjectLayerFn().GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts)
},
PutObjectFn: func(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) {
return newObjectLayerFn().PutObject(ctx, bucket, object, data, opts)
},
DeleteObjectFn: func(ctx context.Context, bucket, object string) error {
return newObjectLayerFn().DeleteObject(ctx, bucket, object)
@@ -989,17 +989,17 @@ func newServerCacheObjects(config CacheConfig) (CacheObjectLayer, error) {
GetBucketInfoFn: func(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) {
return newObjectLayerFn().GetBucketInfo(ctx, bucket)
},
NewMultipartUploadFn: func(ctx context.Context, bucket, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error) {
return newObjectLayerFn().NewMultipartUpload(ctx, bucket, object, metadata, opts)
NewMultipartUploadFn: func(ctx context.Context, bucket, object string, opts ObjectOptions) (uploadID string, err error) {
return newObjectLayerFn().NewMultipartUpload(ctx, bucket, object, opts)
},
PutObjectPartFn: func(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader, opts ObjectOptions) (info PartInfo, err error) {
PutObjectPartFn: func(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error) {
return newObjectLayerFn().PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts)
},
AbortMultipartUploadFn: func(ctx context.Context, bucket, object, uploadID string) error {
return newObjectLayerFn().AbortMultipartUpload(ctx, bucket, object, uploadID)
},
CompleteMultipartUploadFn: func(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error) {
return newObjectLayerFn().CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts)
CompleteMultipartUploadFn: func(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) {
return newObjectLayerFn().CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts)
},
DeleteBucketFn: func(ctx context.Context, bucket string) error {
return newObjectLayerFn().DeleteBucket(ctx, bucket)

View File

@@ -134,7 +134,7 @@ func TestCacheExclusion(t *testing.T) {
t.Fatal(err)
}
cobj := cobjects.(*cacheObjects)
globalServiceDoneCh <- struct{}{}
GlobalServiceDoneCh <- struct{}{}
testCases := []struct {
bucketName string
objectName string
@@ -192,14 +192,13 @@ func TestDiskCache(t *testing.T) {
objInfo.ContentType = contentType
objInfo.ETag = etag
objInfo.UserDefined = httpMeta
opts := ObjectOptions{}
var opts ObjectOptions
byteReader := bytes.NewReader([]byte(content))
hashReader, err := hash.NewReader(byteReader, int64(size), "", "")
hashReader, err := hash.NewReader(byteReader, int64(size), "", "", int64(size))
if err != nil {
t.Fatal(err)
}
err = cache.Put(ctx, bucketName, objectName, hashReader, httpMeta, opts)
err = cache.Put(ctx, bucketName, objectName, NewPutObjReader(hashReader, nil, nil), ObjectOptions{UserDefined: httpMeta})
if err != nil {
t.Fatal(err)
}
@@ -270,17 +269,17 @@ func TestDiskCacheMaxUse(t *testing.T) {
opts := ObjectOptions{}
byteReader := bytes.NewReader([]byte(content))
hashReader, err := hash.NewReader(byteReader, int64(size), "", "")
hashReader, err := hash.NewReader(byteReader, int64(size), "", "", int64(size))
if err != nil {
t.Fatal(err)
}
if !cache.diskAvailable(int64(size)) {
err = cache.Put(ctx, bucketName, objectName, hashReader, httpMeta, opts)
err = cache.Put(ctx, bucketName, objectName, NewPutObjReader(hashReader, nil, nil), ObjectOptions{UserDefined: httpMeta})
if err != errDiskFull {
t.Fatal("Cache max-use limit violated.")
}
} else {
err = cache.Put(ctx, bucketName, objectName, hashReader, httpMeta, opts)
err = cache.Put(ctx, bucketName, objectName, NewPutObjReader(hashReader, nil, nil), ObjectOptions{UserDefined: httpMeta})
if err != nil {
t.Fatal(err)
}

234
cmd/dummy-handlers.go Normal file
View File

@@ -0,0 +1,234 @@
/*
* Minio Cloud Storage, (C) 2018, 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"encoding/xml"
"net/http"
"github.com/gorilla/mux"
"github.com/minio/minio/pkg/policy"
)
// Data types used for returning dummy tagging XML.
// These variables shouldn't be used elsewhere.
// They are only defined to be used in this file alone.
type tagging struct {
XMLName xml.Name `xml:"Tagging"`
TagSet tagSet `xml:"TagSet"`
}
type tagSet struct {
Tag []tagElem `xml:"Tag"`
}
type tagElem struct {
Key string `xml:"Key"`
Value string `xml:"Value"`
}
// GetBucketWebsite - GET bucket website, a dummy api
func (api objectAPIHandlers) GetBucketWebsiteHandler(w http.ResponseWriter, r *http.Request) {
writeSuccessResponseHeadersOnly(w)
w.(http.Flusher).Flush()
}
// GetBucketVersioning - GET bucket versioning, a dummy api
func (api objectAPIHandlers) GetBucketVersioningHandler(w http.ResponseWriter, r *http.Request) {
writeSuccessResponseHeadersOnly(w)
w.(http.Flusher).Flush()
}
// GetBucketAccelerate - GET bucket accelerate, a dummy api
func (api objectAPIHandlers) GetBucketAccelerateHandler(w http.ResponseWriter, r *http.Request) {
writeSuccessResponseHeadersOnly(w)
w.(http.Flusher).Flush()
}
// GetBucketRequestPaymentHandler - GET bucket requestPayment, a dummy api
func (api objectAPIHandlers) GetBucketRequestPaymentHandler(w http.ResponseWriter, r *http.Request) {
writeSuccessResponseHeadersOnly(w)
w.(http.Flusher).Flush()
}
// GetBucketLoggingHandler - GET bucket logging, a dummy api
func (api objectAPIHandlers) GetBucketLoggingHandler(w http.ResponseWriter, r *http.Request) {
writeSuccessResponseHeadersOnly(w)
w.(http.Flusher).Flush()
}
// GetBucketLifecycleHandler - GET bucket lifecycle, a dummy api
func (api objectAPIHandlers) GetBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
writeSuccessResponseHeadersOnly(w)
w.(http.Flusher).Flush()
}
// GetBucketReplicationHandler - GET bucket replication, a dummy api
func (api objectAPIHandlers) GetBucketReplicationHandler(w http.ResponseWriter, r *http.Request) {
writeSuccessResponseHeadersOnly(w)
w.(http.Flusher).Flush()
}
// DeleteBucketTaggingHandler - DELETE bucket tagging, a dummy api
func (api objectAPIHandlers) DeleteBucketTaggingHandler(w http.ResponseWriter, r *http.Request) {
writeSuccessResponseHeadersOnly(w)
w.(http.Flusher).Flush()
}
// DeleteBucketWebsiteHandler - DELETE bucket website, a dummy api
func (api objectAPIHandlers) DeleteBucketWebsiteHandler(w http.ResponseWriter, r *http.Request) {
writeSuccessResponseHeadersOnly(w)
w.(http.Flusher).Flush()
}
type allowedMethod string
// Define strings
const (
GET allowedMethod = http.MethodGet
PUT allowedMethod = http.MethodPut
HEAD allowedMethod = http.MethodHead
POST allowedMethod = http.MethodPost
DELETE allowedMethod = http.MethodDelete
)
// GetBucketCorsHandler - GET bucket cors, a dummy api
func (api objectAPIHandlers) GetBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketCorsHandler")
type corsRule struct {
AllowedHeaders []string `xml:"AllowedHeaders"`
AllowedMethods []allowedMethod `xml:"AllowedMethod"`
AllowedOrigins []string `xml:"AllowedOrigin"`
ExposeHeaders []string `xml:"ExposeHeader"`
MaxAgeSeconds int64 `xml:"MaxAgeSeconds"`
}
type corsConfiguration struct {
XMLName xml.Name `xml:"CORSConfiguration"`
CorsRule []corsRule `xml:"CORSRule"`
}
vars := mux.Vars(r)
bucket := vars["bucket"]
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
// Allow getBucketCors if policy action is set, since this is a dummy call
// we are simply re-purposing the bucketPolicyAction.
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Validate if bucket exists, before proceeding further...
_, err := objAPI.GetBucketInfo(ctx, bucket)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
cors := &corsConfiguration{}
if err := xml.NewEncoder(w).Encode(cors); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
w.(http.Flusher).Flush()
}
// GetBucketTaggingHandler - GET bucket tagging, a dummy api
func (api objectAPIHandlers) GetBucketTaggingHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketTagging")
vars := mux.Vars(r)
bucket := vars["bucket"]
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
// Allow getBucketTagging if policy action is set, since this is a dummy call
// we are simply re-purposing the bucketPolicyAction.
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Validate if bucket exists, before proceeding further...
_, err := objAPI.GetBucketInfo(ctx, bucket)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
tags := &tagging{}
tags.TagSet.Tag = append(tags.TagSet.Tag, tagElem{})
if err := xml.NewEncoder(w).Encode(tags); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
w.(http.Flusher).Flush()
}
// GetObjectTaggingHandler - GET object tagging, a dummy api
func (api objectAPIHandlers) GetObjectTaggingHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetObjectTagging")
vars := mux.Vars(r)
bucket := vars["bucket"]
object := vars["object"]
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
// Allow getObjectTagging if policy action is set, since this is a dummy call
// we are simply re-purposing the bucketPolicyAction.
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Validate if object exists, before proceeding further...
_, err := objAPI.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
tags := &tagging{}
tags.TagSet.Tag = append(tags.TagSet.Tag, tagElem{})
if err := xml.NewEncoder(w).Encode(tags); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
w.(http.Flusher).Flush()
}

View File

@@ -1,161 +0,0 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"io"
"net/http"
"github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/madmin"
"github.com/minio/minio/pkg/policy"
)
type DummyObjectLayer struct{}
func (api *DummyObjectLayer) Shutdown(context.Context) (err error) {
return
}
func (api *DummyObjectLayer) StorageInfo(context.Context) (si StorageInfo) {
return
}
func (api *DummyObjectLayer) MakeBucketWithLocation(ctx context.Context, bucket string, location string) (err error) {
return
}
func (api *DummyObjectLayer) GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) {
return
}
func (api *DummyObjectLayer) ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) {
return
}
func (api *DummyObjectLayer) DeleteBucket(ctx context.Context, bucket string) (err error) {
return
}
func (api *DummyObjectLayer) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error) {
return
}
func (api *DummyObjectLayer) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) {
return
}
func (api *DummyObjectLayer) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lock LockType) (gr *GetObjectReader, err error) {
return
}
func (api *DummyObjectLayer) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error) {
return
}
func (api *DummyObjectLayer) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
return
}
func (api *DummyObjectLayer) PutObject(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
return
}
func (api *DummyObjectLayer) CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) {
return
}
func (api *DummyObjectLayer) DeleteObject(ctx context.Context, bucket, object string) (err error) {
return
}
func (api *DummyObjectLayer) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error) {
return
}
func (api *DummyObjectLayer) NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error) {
return
}
func (api *DummyObjectLayer) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (info PartInfo, err error) {
return
}
func (api *DummyObjectLayer) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader, opts ObjectOptions) (info PartInfo, err error) {
return
}
func (api *DummyObjectLayer) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListPartsInfo, err error) {
return
}
func (api *DummyObjectLayer) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) (err error) {
return
}
func (api *DummyObjectLayer) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error) {
return
}
func (api *DummyObjectLayer) ReloadFormat(ctx context.Context, dryRun bool) (err error) {
return
}
func (api *DummyObjectLayer) HealFormat(ctx context.Context, dryRun bool) (item madmin.HealResultItem, err error) {
return
}
func (api *DummyObjectLayer) HealBucket(ctx context.Context, bucket string, dryRun bool) (items []madmin.HealResultItem, err error) {
return
}
func (api *DummyObjectLayer) HealObject(ctx context.Context, bucket, object string, dryRun bool) (item madmin.HealResultItem, err error) {
return
}
func (api *DummyObjectLayer) ListBucketsHeal(ctx context.Context) (buckets []BucketInfo, err error) {
return
}
func (api *DummyObjectLayer) ListObjectsHeal(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (info ListObjectsInfo, err error) {
return
}
func (api *DummyObjectLayer) SetBucketPolicy(context.Context, string, *policy.Policy) (err error) {
return
}
func (api *DummyObjectLayer) GetBucketPolicy(context.Context, string) (bucketPolicy *policy.Policy, err error) {
return
}
func (api *DummyObjectLayer) RefreshBucketPolicy(context.Context, string) (err error) {
return
}
func (api *DummyObjectLayer) DeleteBucketPolicy(context.Context, string) (err error) {
return
}
func (api *DummyObjectLayer) IsNotificationSupported() (b bool) {
return
}
func (api *DummyObjectLayer) IsEncryptionSupported() (b bool) {
return
}

View File

@@ -22,12 +22,14 @@ import (
"crypto/rand"
"crypto/subtle"
"encoding/binary"
"encoding/hex"
"errors"
"io"
"net/http"
"path"
"strconv"
"github.com/minio/minio-go/pkg/encrypt"
"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/ioutil"
@@ -37,7 +39,6 @@ import (
var (
// AWS errors for invalid SSE-C requests.
errInsecureSSERequest = errors.New("SSE-C requests require TLS connections")
errEncryptedObject = errors.New("The object was stored using a form of SSE")
errInvalidSSEParameters = errors.New("The SSE-C key for key-rotation is not correct") // special access denied
errKMSNotConfigured = errors.New("KMS not configured for a server side encrypted object")
@@ -55,11 +56,11 @@ const (
// SSEIVSize is the size of the IV data
SSEIVSize = 32 // 32 bytes
// SSE dare package block size.
sseDAREPackageBlockSize = 64 * 1024 // 64KiB bytes
// SSEDAREPackageBlockSize - SSE dare package block size.
SSEDAREPackageBlockSize = 64 * 1024 // 64KiB bytes
// SSE dare package meta padding bytes.
sseDAREPackageMetaSize = 32 // 32 bytes
// SSEDAREPackageMetaSize - SSE dare package meta padding bytes.
SSEDAREPackageMetaSize = 32 // 32 bytes
)
@@ -80,16 +81,31 @@ func hasServerSideEncryptionHeader(header http.Header) bool {
return crypto.S3.IsRequested(header) || crypto.SSEC.IsRequested(header)
}
// isEncryptedMultipart returns true if the current object is
// uploaded by the user using multipart mechanism:
// initiate new multipart, upload part, complete upload
func isEncryptedMultipart(objInfo ObjectInfo) bool {
if len(objInfo.Parts) == 0 {
return false
}
if !crypto.IsMultiPart(objInfo.UserDefined) {
return false
}
for _, part := range objInfo.Parts {
_, err := sio.DecryptedSize(uint64(part.Size))
if err != nil {
return false
}
}
// Further check if this object is uploaded using multipart mechanism
// by the user and it is not about XL internally splitting the
// object into parts in PutObject()
return !(objInfo.backendType == BackendErasure && len(objInfo.ETag) == 32)
}
// ParseSSECopyCustomerRequest parses the SSE-C header fields of the provided request.
// It returns the client provided key on success.
func ParseSSECopyCustomerRequest(h http.Header, metadata map[string]string) (key []byte, err error) {
if !globalIsSSL { // minio only supports HTTP or HTTPS requests not both at the same time
// we cannot use r.TLS == nil here because Go's http implementation reflects on
// the net.Conn and sets the TLS field of http.Request only if it's an tls.Conn.
// Minio uses a BufConn (wrapping a tls.Conn) so the type check within the http package
// will always fail -> r.TLS is always nil even for TLS requests.
return nil, errInsecureSSERequest
}
if crypto.S3.IsEncrypted(metadata) && crypto.SSECopy.IsRequested(h) {
return nil, crypto.ErrIncompatibleEncryptionMethod
}
@@ -106,13 +122,6 @@ func ParseSSECustomerRequest(r *http.Request) (key []byte, err error) {
// ParseSSECustomerHeader parses the SSE-C header fields and returns
// the client provided key on success.
func ParseSSECustomerHeader(header http.Header) (key []byte, err error) {
if !globalIsSSL { // minio only supports HTTP or HTTPS requests not both at the same time
// we cannot use r.TLS == nil here because Go's http implementation reflects on
// the net.Conn and sets the TLS field of http.Request only if it's an tls.Conn.
// Minio uses a BufConn (wrapping a tls.Conn) so the type check within the http package
// will always fail -> r.TLS is always nil even for TLS requests.
return nil, errInsecureSSERequest
}
if crypto.S3.IsRequested(header) && crypto.SSEC.IsRequested(header) {
return key, crypto.ErrIncompatibleEncryptionMethod
}
@@ -149,16 +158,40 @@ func rotateKey(oldKey []byte, newKey []byte, bucket, object string, metadata map
sealedKey = objectKey.Seal(extKey, sealedKey.IV, crypto.SSEC.String(), bucket, object)
crypto.SSEC.CreateMetadata(metadata, sealedKey)
return nil
case crypto.S3.IsEncrypted(metadata):
if GlobalKMS == nil {
return errKMSNotConfigured
}
keyID, kmsKey, sealedKey, err := crypto.S3.ParseMetadata(metadata)
if err != nil {
return err
}
oldKey, err := GlobalKMS.UnsealKey(keyID, kmsKey, crypto.Context{bucket: path.Join(bucket, object)})
if err != nil {
return err
}
var objectKey crypto.ObjectKey
if err = objectKey.Unseal(oldKey, sealedKey, crypto.S3.String(), bucket, object); err != nil {
return err
}
newKey, encKey, err := GlobalKMS.GenerateKey(globalKMSKeyID, crypto.Context{bucket: path.Join(bucket, object)})
if err != nil {
return err
}
sealedKey = objectKey.Seal(newKey, crypto.GenerateIV(rand.Reader), crypto.S3.String(), bucket, object)
crypto.S3.CreateMetadata(metadata, globalKMSKeyID, encKey, sealedKey)
return nil
}
}
func newEncryptMetadata(key []byte, bucket, object string, metadata map[string]string, sseS3 bool) ([]byte, error) {
var sealedKey crypto.SealedKey
if sseS3 {
if globalKMS == nil {
if GlobalKMS == nil {
return nil, errKMSNotConfigured
}
key, encKey, err := globalKMS.GenerateKey(globalKMSKeyID, crypto.Context{bucket: path.Join(bucket, object)})
key, encKey, err := GlobalKMS.GenerateKey(globalKMSKeyID, crypto.Context{bucket: path.Join(bucket, object)})
if err != nil {
return nil, err
}
@@ -174,21 +207,20 @@ func newEncryptMetadata(key []byte, bucket, object string, metadata map[string]s
sealedKey = objectKey.Seal(extKey, crypto.GenerateIV(rand.Reader), crypto.SSEC.String(), bucket, object)
crypto.SSEC.CreateMetadata(metadata, sealedKey)
return objectKey[:], nil
}
func newEncryptReader(content io.Reader, key []byte, bucket, object string, metadata map[string]string, sseS3 bool) (io.Reader, error) {
func newEncryptReader(content io.Reader, key []byte, bucket, object string, metadata map[string]string, sseS3 bool) (r io.Reader, encKey []byte, err error) {
objectEncryptionKey, err := newEncryptMetadata(key, bucket, object, metadata, sseS3)
if err != nil {
return nil, err
return nil, encKey, err
}
reader, err := sio.EncryptReader(content, sio.Config{Key: objectEncryptionKey[:], MinVersion: sio.Version20})
if err != nil {
return nil, crypto.ErrInvalidCustomerKey
return nil, encKey, crypto.ErrInvalidCustomerKey
}
return reader, nil
return reader, objectEncryptionKey, nil
}
// set new encryption metadata from http request headers for SSE-C and generated key from KMS in the case of
@@ -210,19 +242,18 @@ func setEncryptionMetadata(r *http.Request, bucket, object string, metadata map[
// EncryptRequest takes the client provided content and encrypts the data
// with the client provided key. It also marks the object as client-side-encrypted
// and sets the correct headers.
func EncryptRequest(content io.Reader, r *http.Request, bucket, object string, metadata map[string]string) (io.Reader, error) {
func EncryptRequest(content io.Reader, r *http.Request, bucket, object string, metadata map[string]string) (reader io.Reader, objEncKey []byte, err error) {
var (
key []byte
err error
)
if crypto.S3.IsRequested(r.Header) && crypto.SSEC.IsRequested(r.Header) {
return nil, crypto.ErrIncompatibleEncryptionMethod
return nil, objEncKey, crypto.ErrIncompatibleEncryptionMethod
}
if crypto.SSEC.IsRequested(r.Header) {
key, err = ParseSSECustomerRequest(r)
if err != nil {
return nil, err
return nil, objEncKey, err
}
}
return newEncryptReader(content, key, bucket, object, metadata, crypto.S3.IsRequested(r.Header))
@@ -249,7 +280,7 @@ func decryptObjectInfo(key []byte, bucket, object string, metadata map[string]st
default:
return nil, errObjectTampered
case crypto.S3.IsEncrypted(metadata):
if globalKMS == nil {
if GlobalKMS == nil {
return nil, errKMSNotConfigured
}
keyID, kmsKey, sealedKey, err := crypto.S3.ParseMetadata(metadata)
@@ -257,7 +288,7 @@ func decryptObjectInfo(key []byte, bucket, object string, metadata map[string]st
if err != nil {
return nil, err
}
extKey, err := globalKMS.UnsealKey(keyID, kmsKey, crypto.Context{bucket: path.Join(bucket, object)})
extKey, err := GlobalKMS.UnsealKey(keyID, kmsKey, crypto.Context{bucket: path.Join(bucket, object)})
if err != nil {
return nil, err
}
@@ -325,7 +356,7 @@ func DecryptRequestWithSequenceNumberR(client io.Reader, h http.Header, bucket,
// DecryptCopyRequestR - same as DecryptCopyRequest, but with a
// Reader
func DecryptCopyRequestR(client io.Reader, h http.Header, bucket, object string, metadata map[string]string) (io.Reader, error) {
func DecryptCopyRequestR(client io.Reader, h http.Header, bucket, object string, seqNumber uint32, metadata map[string]string) (io.Reader, error) {
var (
key []byte
err error
@@ -336,7 +367,7 @@ func DecryptCopyRequestR(client io.Reader, h http.Header, bucket, object string,
return nil, err
}
}
return newDecryptReader(client, key, bucket, object, 0, metadata)
return newDecryptReader(client, key, bucket, object, seqNumber, metadata)
}
func newDecryptReader(client io.Reader, key []byte, bucket, object string, seqNumber uint32, metadata map[string]string) (io.Reader, error) {
@@ -358,17 +389,6 @@ func newDecryptReaderWithObjectKey(client io.Reader, objectEncryptionKey []byte,
return reader, nil
}
// GetEncryptedOffsetLength - returns encrypted offset and length
// along with sequence number
func GetEncryptedOffsetLength(startOffset, length int64, objInfo ObjectInfo) (seqNumber uint32, encStartOffset, encLength int64) {
if len(objInfo.Parts) == 0 || !crypto.IsMultiPart(objInfo.UserDefined) {
seqNumber, encStartOffset, encLength = getEncryptedSinglePartOffsetLength(startOffset, length, objInfo)
return
}
seqNumber, encStartOffset, encLength = getEncryptedMultipartsOffsetLength(startOffset, length, objInfo)
return
}
// DecryptBlocksRequestR - same as DecryptBlocksRequest but with a
// reader
func DecryptBlocksRequestR(inputReader io.Reader, h http.Header, offset,
@@ -376,13 +396,12 @@ func DecryptBlocksRequestR(inputReader io.Reader, h http.Header, offset,
io.Reader, error) {
bucket, object := oi.Bucket, oi.Name
// Single part case
if len(oi.Parts) == 0 || !crypto.IsMultiPart(oi.UserDefined) {
if !isEncryptedMultipart(oi) {
var reader io.Reader
var err error
if copySource {
reader, err = DecryptCopyRequestR(inputReader, h, bucket, object, oi.UserDefined)
reader, err = DecryptCopyRequestR(inputReader, h, bucket, object, seqNumber, oi.UserDefined)
} else {
reader, err = DecryptRequestWithSequenceNumberR(inputReader, h, bucket, object, seqNumber, oi.UserDefined)
}
@@ -392,8 +411,8 @@ func DecryptBlocksRequestR(inputReader io.Reader, h http.Header, offset,
return reader, nil
}
partDecRelOffset := int64(seqNumber) * sseDAREPackageBlockSize
partEncRelOffset := int64(seqNumber) * (sseDAREPackageBlockSize + sseDAREPackageMetaSize)
partDecRelOffset := int64(seqNumber) * SSEDAREPackageBlockSize
partEncRelOffset := int64(seqNumber) * (SSEDAREPackageBlockSize + SSEDAREPackageMetaSize)
w := &DecryptBlocksReader{
reader: inputReader,
@@ -458,7 +477,7 @@ type DecryptBlocksReader struct {
// Current part index
partIndex int
// Parts information
parts []objectPartInfo
parts []ObjectPartInfo
header http.Header
bucket, object string
metadata map[string]string
@@ -558,7 +577,6 @@ func (d *DecryptBlocksReader) Read(p []byte) (int, error) {
d.partDecRelOffset += int64(n1)
}
return len(p), nil
}
@@ -574,7 +592,7 @@ type DecryptBlocksWriter struct {
// Current part index
partIndex int
// Parts information
parts []objectPartInfo
parts []ObjectPartInfo
req *http.Request
bucket, object string
metadata map[string]string
@@ -708,7 +726,7 @@ func DecryptBlocksRequest(client io.Writer, r *http.Request, bucket, object stri
var seqNumber uint32
var encStartOffset, encLength int64
if len(objInfo.Parts) == 0 || !crypto.IsMultiPart(objInfo.UserDefined) {
if !isEncryptedMultipart(objInfo) {
seqNumber, encStartOffset, encLength = getEncryptedSinglePartOffsetLength(startOffset, length, objInfo)
var writer io.WriteCloser
@@ -724,7 +742,8 @@ func DecryptBlocksRequest(client io.Writer, r *http.Request, bucket, object stri
return writer, encStartOffset, encLength, nil
}
seqNumber, encStartOffset, encLength = getEncryptedMultipartsOffsetLength(startOffset, length, objInfo)
_, encStartOffset, encLength = getEncryptedMultipartsOffsetLength(startOffset, length, objInfo)
var partStartIndex int
var partStartOffset = startOffset
// Skip parts until final offset maps to a particular part offset.
@@ -747,8 +766,8 @@ func DecryptBlocksRequest(client io.Writer, r *http.Request, bucket, object stri
partStartOffset -= int64(decryptedSize)
}
startSeqNum := partStartOffset / sseDAREPackageBlockSize
partEncRelOffset := int64(startSeqNum) * (sseDAREPackageBlockSize + sseDAREPackageMetaSize)
startSeqNum := partStartOffset / SSEDAREPackageBlockSize
partEncRelOffset := int64(startSeqNum) * (SSEDAREPackageBlockSize + SSEDAREPackageMetaSize)
w := &DecryptBlocksWriter{
writer: client,
@@ -793,7 +812,6 @@ func DecryptBlocksRequest(client io.Writer, r *http.Request, bucket, object stri
// getEncryptedMultipartsOffsetLength - fetch sequence number, encrypted start offset and encrypted length.
func getEncryptedMultipartsOffsetLength(offset, length int64, obj ObjectInfo) (uint32, int64, int64) {
// Calculate encrypted offset of a multipart object
computeEncOffset := func(off int64, obj ObjectInfo) (seqNumber uint32, encryptedOffset int64, err error) {
var curPartEndOffset uint64
@@ -840,9 +858,9 @@ func getEncryptedMultipartsOffsetLength(offset, length int64, obj ObjectInfo) (u
// getEncryptedSinglePartOffsetLength - fetch sequence number, encrypted start offset and encrypted length.
func getEncryptedSinglePartOffsetLength(offset, length int64, objInfo ObjectInfo) (seqNumber uint32, encOffset int64, encLength int64) {
onePkgSize := int64(sseDAREPackageBlockSize + sseDAREPackageMetaSize)
onePkgSize := int64(SSEDAREPackageBlockSize + SSEDAREPackageMetaSize)
seqNumber = uint32(offset / sseDAREPackageBlockSize)
seqNumber = uint32(offset / SSEDAREPackageBlockSize)
encOffset = int64(seqNumber) * onePkgSize
// The math to compute the encrypted length is always
// originalLength i.e (offset+length-1) to be divided under
@@ -850,10 +868,10 @@ func getEncryptedSinglePartOffsetLength(offset, length int64, objInfo ObjectInfo
// block. This is then multiplied by final package size which
// is basically 64KiB + 32. Finally negate the encrypted offset
// to get the final encrypted length on disk.
encLength = ((offset+length)/sseDAREPackageBlockSize)*onePkgSize - encOffset
encLength = ((offset+length)/SSEDAREPackageBlockSize)*onePkgSize - encOffset
// Check for the remainder, to figure if we need one extract package to read from.
if (offset+length)%sseDAREPackageBlockSize > 0 {
if (offset+length)%SSEDAREPackageBlockSize > 0 {
encLength += onePkgSize
}
@@ -870,7 +888,7 @@ func (o *ObjectInfo) DecryptedSize() (int64, error) {
if !crypto.IsEncrypted(o.UserDefined) {
return 0, errors.New("Cannot compute decrypted size of an unencrypted object")
}
if len(o.Parts) == 0 || !crypto.IsMultiPart(o.UserDefined) {
if !isEncryptedMultipart(*o) {
size, err := sio.DecryptedSize(uint64(o.Size))
if err != nil {
err = errObjectTampered // assign correct error type
@@ -889,6 +907,64 @@ func (o *ObjectInfo) DecryptedSize() (int64, error) {
return size, nil
}
// For encrypted objects, the ETag sent by client if available
// is stored in encrypted form in the backend. Decrypt the ETag
// if ETag was previously encrypted.
func getDecryptedETag(headers http.Header, objInfo ObjectInfo, copySource bool) (decryptedETag string) {
var (
key [32]byte
err error
)
// If ETag is contentMD5Sum return it as is.
if len(objInfo.ETag) == 32 {
return objInfo.ETag
}
if crypto.IsMultiPart(objInfo.UserDefined) {
return objInfo.ETag
}
if crypto.SSECopy.IsRequested(headers) {
key, err = crypto.SSECopy.ParseHTTP(headers)
if err != nil {
return objInfo.ETag
}
}
// As per AWS S3 Spec, ETag for SSE-C encrypted objects need not be MD5Sum of the data.
// Since server side copy with same source and dest just replaces the ETag, we save
// encrypted content MD5Sum as ETag for both SSE-C and SSE-S3, we standardize the ETag
//encryption across SSE-C and SSE-S3, and only return last 32 bytes for SSE-C
if crypto.SSEC.IsEncrypted(objInfo.UserDefined) && !copySource {
return objInfo.ETag[len(objInfo.ETag)-32:]
}
objectEncryptionKey, err := decryptObjectInfo(key[:], objInfo.Bucket, objInfo.Name, objInfo.UserDefined)
if err != nil {
return objInfo.ETag
}
return tryDecryptETag(objectEncryptionKey, objInfo.ETag, false)
}
// helper to decrypt Etag given object encryption key and encrypted ETag
func tryDecryptETag(key []byte, encryptedETag string, ssec bool) string {
// ETag for SSE-C encrypted objects need not be content MD5Sum.While encrypted
// md5sum is stored internally, return just the last 32 bytes of hex-encoded and
// encrypted md5sum string for SSE-C
if ssec {
return encryptedETag[len(encryptedETag)-32:]
}
var objectKey crypto.ObjectKey
copy(objectKey[:], key)
encBytes, err := hex.DecodeString(encryptedETag)
if err != nil {
return encryptedETag
}
etagBytes, err := objectKey.UnsealETag(encBytes)
if err != nil {
return encryptedETag
}
return hex.EncodeToString(etagBytes)
}
// GetDecryptedRange - To decrypt the range (off, length) of the
// decrypted object stream, we need to read the range (encOff,
// encLength) of the encrypted object stream to decrypt it, and
@@ -910,26 +986,29 @@ func (o *ObjectInfo) GetDecryptedRange(rs *HTTPRangeSpec) (encOff, encLength, sk
}
// Assemble slice of (decrypted) part sizes in `sizes`
var sizes []int64
var decObjSize int64 // decrypted total object size
var partSize uint64
partSize, err = sio.DecryptedSize(uint64(o.Size))
if err != nil {
return
}
sizes := []int64{int64(partSize)}
decObjSize = sizes[0]
if crypto.IsMultiPart(o.UserDefined) {
if isEncryptedMultipart(*o) {
sizes = make([]int64, len(o.Parts))
decObjSize = 0
for i, part := range o.Parts {
var partSize uint64
partSize, err = sio.DecryptedSize(uint64(part.Size))
if err != nil {
err = errObjectTampered
return
}
t := int64(partSize)
sizes[i] = t
decObjSize += t
sizes[i] = int64(partSize)
decObjSize += int64(partSize)
}
} else {
var partSize uint64
partSize, err = sio.DecryptedSize(uint64(o.Size))
if err != nil {
err = errObjectTampered
return
}
sizes = []int64{int64(partSize)}
decObjSize = sizes[0]
}
var off, length int64
@@ -963,11 +1042,11 @@ func (o *ObjectInfo) GetDecryptedRange(rs *HTTPRangeSpec) (encOff, encLength, sk
// partStart is always found in the loop above,
// because off is validated.
sseDAREEncPackageBlockSize := int64(sseDAREPackageBlockSize + sseDAREPackageMetaSize)
startPkgNum := (off - cumulativeSum) / sseDAREPackageBlockSize
sseDAREEncPackageBlockSize := int64(SSEDAREPackageBlockSize + SSEDAREPackageMetaSize)
startPkgNum := (off - cumulativeSum) / SSEDAREPackageBlockSize
// Now we can calculate the number of bytes to skip
skipLen = (off - cumulativeSum) % sseDAREPackageBlockSize
skipLen = (off - cumulativeSum) % SSEDAREPackageBlockSize
encOff = encCumulativeSum + startPkgNum*sseDAREEncPackageBlockSize
// Locate the part containing the end of the required range
@@ -984,7 +1063,7 @@ func (o *ObjectInfo) GetDecryptedRange(rs *HTTPRangeSpec) (encOff, encLength, sk
}
// partEnd is always found in the loop above, because off and
// length are validated.
endPkgNum := (endOffset - cumulativeSum) / sseDAREPackageBlockSize
endPkgNum := (endOffset - cumulativeSum) / SSEDAREPackageBlockSize
// Compute endEncOffset with one additional DARE package (so
// we read the package containing the last desired byte).
endEncOffset := encCumulativeSum + (endPkgNum+1)*sseDAREEncPackageBlockSize
@@ -1024,22 +1103,22 @@ func (o *ObjectInfo) EncryptedSize() int64 {
// decryption succeeded.
//
// DecryptCopyObjectInfo also returns whether the object is encrypted or not.
func DecryptCopyObjectInfo(info *ObjectInfo, headers http.Header) (apiErr APIErrorCode, encrypted bool) {
func DecryptCopyObjectInfo(info *ObjectInfo, headers http.Header) (errCode APIErrorCode, encrypted bool) {
// Directories are never encrypted.
if info.IsDir {
return ErrNone, false
}
if apiErr, encrypted = ErrNone, crypto.IsEncrypted(info.UserDefined); !encrypted && crypto.SSECopy.IsRequested(headers) {
apiErr = ErrInvalidEncryptionParameters
if errCode, encrypted = ErrNone, crypto.IsEncrypted(info.UserDefined); !encrypted && crypto.SSECopy.IsRequested(headers) {
errCode = ErrInvalidEncryptionParameters
} else if encrypted {
if (!crypto.SSECopy.IsRequested(headers) && crypto.SSEC.IsEncrypted(info.UserDefined)) ||
(crypto.SSECopy.IsRequested(headers) && crypto.S3.IsEncrypted(info.UserDefined)) {
apiErr = ErrSSEEncryptedObject
errCode = ErrSSEEncryptedObject
return
}
var err error
if info.Size, err = info.DecryptedSize(); err != nil {
apiErr = toAPIErrorCode(err)
errCode = toAPIErrorCode(context.Background(), err)
}
}
return
@@ -1052,7 +1131,7 @@ func DecryptCopyObjectInfo(info *ObjectInfo, headers http.Header) (apiErr APIErr
// decryption succeeded.
//
// DecryptObjectInfo also returns whether the object is encrypted or not.
func DecryptObjectInfo(info ObjectInfo, headers http.Header) (encrypted bool, err error) {
func DecryptObjectInfo(info *ObjectInfo, headers http.Header) (encrypted bool, err error) {
// Directories are never encrypted.
if info.IsDir {
return false, nil
@@ -1071,6 +1150,123 @@ func DecryptObjectInfo(info ObjectInfo, headers http.Header) (encrypted bool, er
return
}
_, err = info.DecryptedSize()
if crypto.IsEncrypted(info.UserDefined) && !crypto.IsMultiPart(info.UserDefined) {
info.ETag = getDecryptedETag(headers, *info, false)
}
}
return
}
// The customer key in the header is used by the gateway for encryption in the case of
// s3 gateway double encryption. A new client key is derived from the customer provided
// key to be sent to the s3 backend for encryption at the backend.
func deriveClientKey(clientKey [32]byte, bucket, object string) [32]byte {
var key [32]byte
mac := hmac.New(sha256.New, clientKey[:])
mac.Write([]byte(crypto.SSEC.String()))
mac.Write([]byte(path.Join(bucket, object)))
mac.Sum(key[:0])
return key
}
// set encryption options for pass through to backend in the case of gateway and UserDefined metadata
func getDefaultOpts(header http.Header, copySource bool, metadata map[string]string) (opts ObjectOptions, err error) {
var clientKey [32]byte
var sse encrypt.ServerSide
if copySource {
if crypto.SSECopy.IsRequested(header) {
clientKey, err = crypto.SSECopy.ParseHTTP(header)
if err != nil {
return
}
if sse, err = encrypt.NewSSEC(clientKey[:]); err != nil {
return
}
return ObjectOptions{ServerSideEncryption: encrypt.SSECopy(sse), UserDefined: metadata}, nil
}
return
}
if crypto.SSEC.IsRequested(header) {
clientKey, err = crypto.SSEC.ParseHTTP(header)
if err != nil {
return
}
if sse, err = encrypt.NewSSEC(clientKey[:]); err != nil {
return
}
return ObjectOptions{ServerSideEncryption: sse, UserDefined: metadata}, nil
}
if crypto.S3.IsRequested(header) || (metadata != nil && crypto.S3.IsEncrypted(metadata)) {
return ObjectOptions{ServerSideEncryption: encrypt.NewSSE(), UserDefined: metadata}, nil
}
return ObjectOptions{UserDefined: metadata}, nil
}
// get ObjectOptions for GET calls from encryption headers
func getOpts(ctx context.Context, r *http.Request, bucket, object string) (ObjectOptions, error) {
var (
encryption encrypt.ServerSide
opts ObjectOptions
)
if GlobalGatewaySSE.SSEC() && crypto.SSEC.IsRequested(r.Header) {
key, err := crypto.SSEC.ParseHTTP(r.Header)
if err != nil {
return opts, err
}
derivedKey := deriveClientKey(key, bucket, object)
encryption, err = encrypt.NewSSEC(derivedKey[:])
logger.CriticalIf(ctx, err)
return ObjectOptions{ServerSideEncryption: encryption}, nil
}
// default case of passing encryption headers to backend
return getDefaultOpts(r.Header, false, nil)
}
// get ObjectOptions for PUT calls from encryption headers and metadata
func putOpts(ctx context.Context, r *http.Request, bucket, object string, metadata map[string]string) (opts ObjectOptions, err error) {
// In the case of multipart custom format, the metadata needs to be checked in addition to header to see if it
// is SSE-S3 encrypted, primarily because S3 protocol does not require SSE-S3 headers in PutObjectPart calls
if GlobalGatewaySSE.SSES3() && (crypto.S3.IsRequested(r.Header) || crypto.S3.IsEncrypted(metadata)) {
return ObjectOptions{ServerSideEncryption: encrypt.NewSSE(), UserDefined: metadata}, nil
}
if GlobalGatewaySSE.SSEC() && crypto.SSEC.IsRequested(r.Header) {
opts, err = getOpts(ctx, r, bucket, object)
opts.UserDefined = metadata
return
}
// default case of passing encryption headers and UserDefined metadata to backend
return getDefaultOpts(r.Header, false, metadata)
}
// get ObjectOptions for Copy calls with encryption headers provided on the target side and source side metadata
func copyDstOpts(ctx context.Context, r *http.Request, bucket, object string, metadata map[string]string) (opts ObjectOptions, err error) {
return putOpts(ctx, r, bucket, object, metadata)
}
// get ObjectOptions for Copy calls with encryption headers provided on the source side
func copySrcOpts(ctx context.Context, r *http.Request, bucket, object string) (ObjectOptions, error) {
var (
ssec encrypt.ServerSide
opts ObjectOptions
)
if GlobalGatewaySSE.SSEC() && crypto.SSECopy.IsRequested(r.Header) {
key, err := crypto.SSECopy.ParseHTTP(r.Header)
if err != nil {
return opts, err
}
derivedKey := deriveClientKey(key, bucket, object)
ssec, err = encrypt.NewSSEC(derivedKey[:])
if err != nil {
return opts, err
}
return ObjectOptions{ServerSideEncryption: encrypt.SSECopy(ssec)}, nil
}
// default case of passing encryption headers to backend
return getDefaultOpts(r.Header, true, nil)
}

View File

@@ -23,6 +23,7 @@ import (
"testing"
humanize "github.com/dustin/go-humanize"
"github.com/minio/minio-go/pkg/encrypt"
"github.com/minio/minio/cmd/crypto"
"github.com/minio/sio"
)
@@ -38,7 +39,7 @@ var hasServerSideEncryptionHeaderTests = []struct {
{headers: map[string]string{}, sseRequest: false}, // 4
{headers: map[string]string{crypto.SSECopyAlgorithm + " ": "AES256", " " + crypto.SSECopyKey: "key", crypto.SSECopyKeyMD5 + " ": "md5"}, sseRequest: false}, // 5
{headers: map[string]string{crypto.SSECopyAlgorithm: "", crypto.SSECopyKey: "", crypto.SSECopyKeyMD5: ""}, sseRequest: false}, // 6
{headers: map[string]string{crypto.SSEHeader: ""}, sseRequest: true}, // 6
{headers: map[string]string{crypto.SSEHeader: ""}, sseRequest: true}, // 7
}
func TestHasServerSideEncryptionHeader(t *testing.T) {
@@ -90,12 +91,12 @@ var hasSSECustomerHeaderTests = []struct {
{headers: map[string]string{crypto.SSECKeyMD5: "md5"}, sseRequest: true}, // 3
{headers: map[string]string{}, sseRequest: false}, // 4
{headers: map[string]string{crypto.SSECAlgorithm + " ": "AES256", " " + crypto.SSECKey: "key", crypto.SSECKeyMD5 + " ": "md5"}, sseRequest: false}, // 5
{headers: map[string]string{crypto.SSECAlgorithm: "", crypto.SSECKey: "", crypto.SSECKeyMD5: ""}, sseRequest: false}, // 6
{headers: map[string]string{crypto.SSECAlgorithm: "", crypto.SSECKey: "", crypto.SSECKeyMD5: ""}, sseRequest: true}, // 6
{headers: map[string]string{crypto.SSEHeader: ""}, sseRequest: false}, // 7
}
func TesthasSSECustomerHeader(t *testing.T) {
func TestHasSSECustomerHeader(t *testing.T) {
for i, test := range hasSSECustomerHeaderTests {
headers := http.Header{}
for k, v := range test.headers {
@@ -107,228 +108,6 @@ func TesthasSSECustomerHeader(t *testing.T) {
}
}
var parseSSECustomerRequestTests = []struct {
headers map[string]string
useTLS bool
err error
}{
{
headers: map[string]string{
crypto.SSECAlgorithm: "AES256",
crypto.SSECKey: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=", // 0
crypto.SSECKeyMD5: "bY4wkxQejw9mUJfo72k53A==",
},
useTLS: true, err: nil,
},
{
headers: map[string]string{
crypto.SSECAlgorithm: "AES256",
crypto.SSECKey: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=", // 1
crypto.SSECKeyMD5: "bY4wkxQejw9mUJfo72k53A==",
},
useTLS: false, err: errInsecureSSERequest,
},
{
headers: map[string]string{
crypto.SSECAlgorithm: "AES 256",
crypto.SSECKey: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=", // 2
crypto.SSECKeyMD5: "bY4wkxQejw9mUJfo72k53A==",
},
useTLS: true, err: crypto.ErrInvalidCustomerAlgorithm,
},
{
headers: map[string]string{
crypto.SSECAlgorithm: "AES256",
crypto.SSECKey: "NjE0SL87s+ZhYtaTrg5eI5cjhCQLGPVMKenPG2bCJFw=", // 3
crypto.SSECKeyMD5: "H+jq/LwEOEO90YtiTuNFVw==",
},
useTLS: true, err: crypto.ErrCustomerKeyMD5Mismatch,
},
{
headers: map[string]string{
crypto.SSECAlgorithm: "AES256",
crypto.SSECKey: " jE0SL87s+ZhYtaTrg5eI5cjhCQLGPVMKenPG2bCJFw=", // 4
crypto.SSECKeyMD5: "H+jq/LwEOEO90YtiTuNFVw==",
},
useTLS: true, err: crypto.ErrInvalidCustomerKey,
},
{
headers: map[string]string{
crypto.SSECAlgorithm: "AES256",
crypto.SSECKey: "NjE0SL87s+ZhYtaTrg5eI5cjhCQLGPVMKenPG2bCJFw=", // 5
crypto.SSECKeyMD5: " +jq/LwEOEO90YtiTuNFVw==",
},
useTLS: true, err: crypto.ErrCustomerKeyMD5Mismatch,
},
{
headers: map[string]string{
crypto.SSECAlgorithm: "AES256",
crypto.SSECKey: "vFQ9ScFOF6Tu/BfzMS+rVMvlZGJHi5HmGJenJfrfKI45", // 6
crypto.SSECKeyMD5: "9KPgDdZNTHimuYCwnJTp5g==",
},
useTLS: true, err: crypto.ErrInvalidCustomerKey,
},
{
headers: map[string]string{
crypto.SSECAlgorithm: "AES256",
crypto.SSECKey: "", // 7
crypto.SSECKeyMD5: "9KPgDdZNTHimuYCwnJTp5g==",
},
useTLS: true, err: crypto.ErrMissingCustomerKey,
},
{
headers: map[string]string{
crypto.SSECAlgorithm: "AES256",
crypto.SSECKey: "vFQ9ScFOF6Tu/BfzMS+rVMvlZGJHi5HmGJenJfrfKI45", // 8
crypto.SSECKeyMD5: "",
},
useTLS: true, err: crypto.ErrMissingCustomerKeyMD5,
},
{
headers: map[string]string{
crypto.SSECAlgorithm: "AES256",
crypto.SSECKey: "vFQ9ScFOF6Tu/BfzMS+rVMvlZGJHi5HmGJenJfrfKI45", // 8
crypto.SSECKeyMD5: "",
crypto.SSEHeader: "",
},
useTLS: true, err: crypto.ErrIncompatibleEncryptionMethod,
},
}
func TestParseSSECustomerRequest(t *testing.T) {
defer func(flag bool) { globalIsSSL = flag }(globalIsSSL)
for i, test := range parseSSECustomerRequestTests {
headers := http.Header{}
for k, v := range test.headers {
headers.Set(k, v)
}
request := &http.Request{}
request.Header = headers
globalIsSSL = test.useTLS
_, err := ParseSSECustomerRequest(request)
if err != test.err {
t.Errorf("Test %d: Parse returned: %v want: %v", i, err, test.err)
}
}
}
var parseSSECopyCustomerRequestTests = []struct {
headers map[string]string
metadata map[string]string
useTLS bool
err error
}{
{
headers: map[string]string{
crypto.SSECopyAlgorithm: "AES256",
crypto.SSECopyKey: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=", // 0
crypto.SSECopyKeyMD5: "bY4wkxQejw9mUJfo72k53A==",
},
metadata: map[string]string{},
useTLS: true, err: nil,
},
{
headers: map[string]string{
crypto.SSECopyAlgorithm: "AES256",
crypto.SSECopyKey: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=", // 0
crypto.SSECopyKeyMD5: "bY4wkxQejw9mUJfo72k53A==",
},
metadata: map[string]string{"X-Minio-Internal-Server-Side-Encryption-S3-Sealed-Key": base64.StdEncoding.EncodeToString(make([]byte, 64))},
useTLS: true, err: crypto.ErrIncompatibleEncryptionMethod,
},
{
headers: map[string]string{
crypto.SSECopyAlgorithm: "AES256",
crypto.SSECopyKey: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=", // 1
crypto.SSECopyKeyMD5: "bY4wkxQejw9mUJfo72k53A==",
},
metadata: map[string]string{},
useTLS: false, err: errInsecureSSERequest,
},
{
headers: map[string]string{
crypto.SSECopyAlgorithm: "AES 256",
crypto.SSECopyKey: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=", // 2
crypto.SSECopyKeyMD5: "bY4wkxQejw9mUJfo72k53A==",
},
metadata: map[string]string{},
useTLS: true, err: crypto.ErrInvalidCustomerAlgorithm,
},
{
headers: map[string]string{
crypto.SSECopyAlgorithm: "AES256",
crypto.SSECopyKey: "NjE0SL87s+ZhYtaTrg5eI5cjhCQLGPVMKenPG2bCJFw=", // 3
crypto.SSECopyKeyMD5: "H+jq/LwEOEO90YtiTuNFVw==",
},
metadata: map[string]string{},
useTLS: true, err: crypto.ErrCustomerKeyMD5Mismatch,
},
{
headers: map[string]string{
crypto.SSECopyAlgorithm: "AES256",
crypto.SSECopyKey: " jE0SL87s+ZhYtaTrg5eI5cjhCQLGPVMKenPG2bCJFw=", // 4
crypto.SSECopyKeyMD5: "H+jq/LwEOEO90YtiTuNFVw==",
},
metadata: map[string]string{},
useTLS: true, err: crypto.ErrInvalidCustomerKey,
},
{
headers: map[string]string{
crypto.SSECopyAlgorithm: "AES256",
crypto.SSECopyKey: "NjE0SL87s+ZhYtaTrg5eI5cjhCQLGPVMKenPG2bCJFw=", // 5
crypto.SSECopyKeyMD5: " +jq/LwEOEO90YtiTuNFVw==",
},
metadata: map[string]string{},
useTLS: true, err: crypto.ErrCustomerKeyMD5Mismatch,
},
{
headers: map[string]string{
crypto.SSECopyAlgorithm: "AES256",
crypto.SSECopyKey: "vFQ9ScFOF6Tu/BfzMS+rVMvlZGJHi5HmGJenJfrfKI45", // 6
crypto.SSECopyKeyMD5: "9KPgDdZNTHimuYCwnJTp5g==",
},
metadata: map[string]string{},
useTLS: true, err: crypto.ErrInvalidCustomerKey,
},
{
headers: map[string]string{
crypto.SSECopyAlgorithm: "AES256",
crypto.SSECopyKey: "", // 7
crypto.SSECopyKeyMD5: "9KPgDdZNTHimuYCwnJTp5g==",
},
metadata: map[string]string{},
useTLS: true, err: crypto.ErrMissingCustomerKey,
},
{
headers: map[string]string{
crypto.SSECopyAlgorithm: "AES256",
crypto.SSECopyKey: "vFQ9ScFOF6Tu/BfzMS+rVMvlZGJHi5HmGJenJfrfKI45", // 8
crypto.SSECopyKeyMD5: "",
},
metadata: map[string]string{},
useTLS: true, err: crypto.ErrMissingCustomerKeyMD5,
},
}
func TestParseSSECopyCustomerRequest(t *testing.T) {
defer func(flag bool) { globalIsSSL = flag }(globalIsSSL)
for i, test := range parseSSECopyCustomerRequestTests {
headers := http.Header{}
for k, v := range test.headers {
headers.Set(k, v)
}
request := &http.Request{}
request.Header = headers
globalIsSSL = test.useTLS
_, err := ParseSSECopyCustomerRequest(request.Header, test.metadata)
if err != test.err {
t.Errorf("Test %d: Parse returned: %v want: %v", i, err, test.err)
}
}
}
var encryptRequestTests = []struct {
header map[string]string
metadata map[string]string
@@ -362,7 +141,7 @@ func TestEncryptRequest(t *testing.T) {
for k, v := range test.header {
req.Header.Set(k, v)
}
_, err := EncryptRequest(content, req, "bucket", "object", test.metadata)
_, _, err := EncryptRequest(content, req, "bucket", "object", test.metadata)
if err != nil {
t.Fatalf("Test %d: Failed to encrypt request: %v", i, err)
@@ -547,7 +326,7 @@ var decryptObjectInfoTests = []struct {
func TestDecryptObjectInfo(t *testing.T) {
for i, test := range decryptObjectInfoTests {
if encrypted, err := DecryptObjectInfo(test.info, test.headers); err != test.expErr {
if encrypted, err := DecryptObjectInfo(&test.info, test.headers); err != test.expErr {
t.Errorf("Test %d: Decryption returned wrong error code: got %d , want %d", i, err, test.expErr)
} else if enc := crypto.IsEncrypted(test.info.UserDefined); encrypted && enc != encrypted {
t.Errorf("Test %d: Decryption thinks object is encrypted but it is not", i)
@@ -557,6 +336,66 @@ func TestDecryptObjectInfo(t *testing.T) {
}
}
// Tests for issue reproduced when getting the right encrypted
// offset of the object.
func TestGetDecryptedRange_Issue50(t *testing.T) {
rs, err := parseRequestRangeSpec("bytes=594870256-594870263")
if err != nil {
t.Fatal(err)
}
objInfo := ObjectInfo{
Bucket: "bucket",
Name: "object",
Size: 595160760,
UserDefined: map[string]string{
crypto.SSEMultipart: "",
crypto.SSEIV: "HTexa=",
crypto.SSESealAlgorithm: "DAREv2-HMAC-SHA256",
crypto.SSECSealedKey: "IAA8PGAA==",
ReservedMetadataPrefix + "actual-size": "594870264",
"content-type": "application/octet-stream",
"etag": "166b1545b4c1535294ee0686678bea8c-2",
},
Parts: []ObjectPartInfo{
{
Number: 1,
Name: "part.1",
ETag: "etag1",
Size: 297580380,
ActualSize: 297435132,
},
{
Number: 2,
Name: "part.2",
ETag: "etag2",
Size: 297580380,
ActualSize: 297435132,
},
},
}
encOff, encLength, skipLen, seqNumber, partStart, err := objInfo.GetDecryptedRange(rs)
if err != nil {
t.Fatalf("Test: failed %s", err)
}
if encOff != 595127964 {
t.Fatalf("Test: expected %d, got %d", 595127964, encOff)
}
if encLength != 32796 {
t.Fatalf("Test: expected %d, got %d", 32796, encLength)
}
if skipLen != 32756 {
t.Fatalf("Test: expected %d, got %d", 32756, skipLen)
}
if seqNumber != 4538 {
t.Fatalf("Test: expected %d, got %d", 4538, seqNumber)
}
if partStart != 1 {
t.Fatalf("Test: expected %d, got %d", 1, partStart)
}
}
func TestGetDecryptedRange(t *testing.T) {
var (
pkgSz = int64(64) * humanize.KiByte
@@ -666,7 +505,7 @@ func TestGetDecryptedRange(t *testing.T) {
var (
// make a multipart object-info given part sizes
mkMPObj = func(sizes []int64) ObjectInfo {
r := make([]objectPartInfo, len(sizes))
r := make([]ObjectPartInfo, len(sizes))
sum := int64(0)
for i, s := range sizes {
r[i].Number = i
@@ -838,3 +677,84 @@ func TestGetDecryptedRange(t *testing.T) {
}
}
var getDefaultOptsTests = []struct {
headers http.Header
copySource bool
metadata map[string]string
encryptionType encrypt.Type
err error
}{
{headers: http.Header{crypto.SSECAlgorithm: []string{"AES256"},
crypto.SSECKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
crypto.SSECKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}},
copySource: false,
metadata: nil,
encryptionType: encrypt.SSEC,
err: nil}, // 0
{headers: http.Header{crypto.SSECAlgorithm: []string{"AES256"},
crypto.SSECKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
crypto.SSECKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}},
copySource: true,
metadata: nil,
encryptionType: "",
err: nil}, // 1
{headers: http.Header{crypto.SSECAlgorithm: []string{"AES256"},
crypto.SSECKey: []string{"Mz"},
crypto.SSECKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}},
copySource: false,
metadata: nil,
encryptionType: "",
err: crypto.ErrInvalidCustomerKey}, // 2
{headers: http.Header{crypto.SSEHeader: []string{"AES256"}},
copySource: false,
metadata: nil,
encryptionType: encrypt.S3,
err: nil}, // 3
{headers: http.Header{},
copySource: false,
metadata: map[string]string{crypto.S3SealedKey: base64.StdEncoding.EncodeToString(make([]byte, 64)),
crypto.S3KMSKeyID: "kms-key",
crypto.S3KMSSealedKey: "m-key"},
encryptionType: encrypt.S3,
err: nil}, // 4
{headers: http.Header{},
copySource: true,
metadata: map[string]string{crypto.S3SealedKey: base64.StdEncoding.EncodeToString(make([]byte, 64)),
crypto.S3KMSKeyID: "kms-key",
crypto.S3KMSSealedKey: "m-key"},
encryptionType: "",
err: nil}, // 5
{headers: http.Header{crypto.SSECopyAlgorithm: []string{"AES256"},
crypto.SSECopyKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
crypto.SSECopyKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}},
copySource: true,
metadata: nil,
encryptionType: encrypt.SSEC,
err: nil}, // 6
{headers: http.Header{crypto.SSECopyAlgorithm: []string{"AES256"},
crypto.SSECopyKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
crypto.SSECopyKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}},
copySource: false,
metadata: nil,
encryptionType: "",
err: nil}, // 7
}
func TestGetDefaultOpts(t *testing.T) {
for i, test := range getDefaultOptsTests {
opts, err := getDefaultOpts(test.headers, test.copySource, test.metadata)
if test.err != err {
t.Errorf("Case %d: expected err: %v , actual err: %v", i, test.err, err)
}
if err == nil {
if opts.ServerSideEncryption == nil && test.encryptionType != "" {
t.Errorf("Case %d: expected opts to be of %v encryption type", i, test.encryptionType)
}
if opts.ServerSideEncryption != nil && test.encryptionType != opts.ServerSideEncryption.Type() {
t.Errorf("Case %d: expected opts to have encryption type %v but was %v ", i, test.encryptionType, opts.ServerSideEncryption.Type())
}
}
}
}

View File

@@ -226,6 +226,17 @@ func TestGetSetIndexes(t *testing.T) {
}
}
func getHexSequences(start int, number int, paddinglen int) (seq []string) {
for i := start; i <= number; i++ {
if paddinglen == 0 {
seq = append(seq, fmt.Sprintf("%x", i))
} else {
seq = append(seq, fmt.Sprintf(fmt.Sprintf("%%0%dx", paddinglen), i))
}
}
return seq
}
func getSequences(start int, number int, paddinglen int) (seq []string) {
for i := start; i <= number; i++ {
if paddinglen == 0 {
@@ -287,9 +298,9 @@ func TestParseEndpointSet(t *testing.T) {
[]ellipses.ArgPattern{
[]ellipses.Pattern{
{
"/export/set",
"",
getSequences(1, 64, 0),
Prefix: "/export/set",
Suffix: "",
Seq: getSequences(1, 64, 0),
},
},
},
@@ -305,14 +316,14 @@ func TestParseEndpointSet(t *testing.T) {
[]ellipses.ArgPattern{
[]ellipses.Pattern{
{
"",
"",
getSequences(1, 64, 0),
Prefix: "",
Suffix: "",
Seq: getSequences(1, 64, 0),
},
{
"http://minio",
"/export/set",
getSequences(2, 3, 0),
Prefix: "http://minio",
Suffix: "/export/set",
Seq: getSequences(2, 3, 0),
},
},
},
@@ -328,9 +339,9 @@ func TestParseEndpointSet(t *testing.T) {
[]ellipses.ArgPattern{
[]ellipses.Pattern{
{
"http://minio",
".mydomain.net/data",
getSequences(1, 64, 0),
Prefix: "http://minio",
Suffix: ".mydomain.net/data",
Seq: getSequences(1, 64, 0),
},
},
},
@@ -345,14 +356,14 @@ func TestParseEndpointSet(t *testing.T) {
[]ellipses.ArgPattern{
[]ellipses.Pattern{
{
"",
"/data",
getSequences(1, 16, 0),
Prefix: "",
Suffix: "/data",
Seq: getSequences(1, 16, 0),
},
{
"http://rack",
".mydomain.minio",
getSequences(1, 4, 0),
Prefix: "http://rack",
Suffix: ".mydomain.minio",
Seq: getSequences(1, 4, 0),
},
},
},
@@ -368,14 +379,14 @@ func TestParseEndpointSet(t *testing.T) {
[]ellipses.ArgPattern{
[]ellipses.Pattern{
{
"",
"",
getSequences(0, 1, 0),
Prefix: "",
Suffix: "",
Seq: getSequences(0, 1, 0),
},
{
"http://minio",
".mydomain.net/data",
getSequences(0, 15, 0),
Prefix: "http://minio",
Suffix: ".mydomain.net/data",
Seq: getSequences(0, 15, 0),
},
},
},
@@ -391,9 +402,9 @@ func TestParseEndpointSet(t *testing.T) {
[]ellipses.ArgPattern{
[]ellipses.Pattern{
{
"http://server1/data",
"",
getSequences(1, 32, 0),
Prefix: "http://server1/data",
Suffix: "",
Seq: getSequences(1, 32, 0),
},
},
},
@@ -409,9 +420,9 @@ func TestParseEndpointSet(t *testing.T) {
[]ellipses.ArgPattern{
[]ellipses.Pattern{
{
"http://server1/data",
"",
getSequences(1, 32, 2),
Prefix: "http://server1/data",
Suffix: "",
Seq: getSequences(1, 32, 2),
},
},
},
@@ -427,19 +438,19 @@ func TestParseEndpointSet(t *testing.T) {
[]ellipses.ArgPattern{
[]ellipses.Pattern{
{
"",
"",
getSequences(1, 2, 0),
Prefix: "",
Suffix: "",
Seq: getSequences(1, 2, 0),
},
{
"",
"/test",
getSequences(1, 64, 0),
Prefix: "",
Suffix: "/test",
Seq: getSequences(1, 64, 0),
},
{
"http://minio",
"/export/set",
getSequences(2, 3, 0),
Prefix: "http://minio",
Suffix: "/export/set",
Seq: getSequences(2, 3, 0),
},
},
},
@@ -456,14 +467,60 @@ func TestParseEndpointSet(t *testing.T) {
[]ellipses.ArgPattern{
[]ellipses.Pattern{
{
"",
"",
getSequences(1, 10, 0),
Prefix: "",
Suffix: "",
Seq: getSequences(1, 10, 0),
},
{
"/export",
"/disk",
getSequences(1, 10, 0),
Prefix: "/export",
Suffix: "/disk",
Seq: getSequences(1, 10, 0),
},
},
},
nil,
[][]uint64{{10, 10, 10, 10, 10, 10, 10, 10, 10, 10}},
},
true,
},
// IPv6 ellipses with hexadecimal expansion
{
"http://[2001:3984:3989::{1...a}]/disk{1...10}",
endpointSet{
[]ellipses.ArgPattern{
[]ellipses.Pattern{
{
Prefix: "",
Suffix: "",
Seq: getSequences(1, 10, 0),
},
{
Prefix: "http://[2001:3984:3989::",
Suffix: "]/disk",
Seq: getHexSequences(1, 10, 0),
},
},
},
nil,
[][]uint64{{10, 10, 10, 10, 10, 10, 10, 10, 10, 10}},
},
true,
},
// IPv6 ellipses with hexadecimal expansion with 3 position numerics.
{
"http://[2001:3984:3989::{001...00a}]/disk{1...10}",
endpointSet{
[]ellipses.ArgPattern{
[]ellipses.Pattern{
{
Prefix: "",
Suffix: "",
Seq: getSequences(1, 10, 0),
},
{
Prefix: "http://[2001:3984:3989::",
Suffix: "]/disk",
Seq: getHexSequences(1, 10, 3),
},
},
},

View File

@@ -29,6 +29,9 @@ import (
"github.com/minio/minio-go/pkg/set"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/cpu"
"github.com/minio/minio/pkg/disk"
"github.com/minio/minio/pkg/mem"
"github.com/minio/minio/pkg/mountinfo"
)
@@ -91,7 +94,7 @@ func NewEndpoint(arg string) (ep Endpoint, e error) {
// - Scheme field must contain "http" or "https"
// - All field should be empty except Host and Path.
if !((u.Scheme == "http" || u.Scheme == "https") &&
u.User == nil && u.Opaque == "" && u.ForceQuery == false && u.RawQuery == "" && u.Fragment == "") {
u.User == nil && u.Opaque == "" && !u.ForceQuery && u.RawQuery == "" && u.Fragment == "") {
return ep, fmt.Errorf("invalid URL endpoint format")
}
@@ -112,6 +115,9 @@ func NewEndpoint(arg string) (ep Endpoint, e error) {
return ep, fmt.Errorf("invalid URL endpoint format: port number must be between 1 to 65535")
}
}
if i := strings.Index(host, "%"); i > -1 {
host = host[:i]
}
if host == "" {
return ep, fmt.Errorf("invalid URL endpoint format: empty host name")
@@ -152,7 +158,7 @@ func NewEndpoint(arg string) (ep Endpoint, e error) {
// Only check if the arg is an ip address and ask for scheme since its absent.
// localhost, example.com, any FQDN cannot be disambiguated from a regular file path such as
// /mnt/export1. So we go ahead and start the minio server in FS modes in these cases.
if isHostIPv4(arg) {
if isHostIP(arg) {
return ep, fmt.Errorf("invalid URL endpoint format: missing scheme http or https")
}
u = &url.URL{Path: path.Clean(arg)}
@@ -194,6 +200,78 @@ func (endpoints EndpointList) GetString(i int) string {
return endpoints[i].String()
}
// localEndpointsMemUsage - returns ServerMemUsageInfo for only the
// local endpoints from given list of endpoints
func localEndpointsMemUsage(endpoints EndpointList) ServerMemUsageInfo {
var memUsages []mem.Usage
var historicUsages []mem.Usage
scratchSpace := map[string]bool{}
for _, endpoint := range endpoints {
// Only proceed for local endpoints
if endpoint.IsLocal {
if _, ok := scratchSpace[endpoint.Host]; ok {
continue
}
memUsages = append(memUsages, mem.GetUsage())
historicUsages = append(historicUsages, mem.GetHistoricUsage())
scratchSpace[endpoint.Host] = true
}
}
return ServerMemUsageInfo{
Addr: GetLocalPeer(endpoints),
Usage: memUsages,
HistoricUsage: historicUsages,
}
}
// localEndpointsCPULoad - returns ServerCPULoadInfo for only the
// local endpoints from given list of endpoints
func localEndpointsCPULoad(endpoints EndpointList) ServerCPULoadInfo {
var cpuLoads []cpu.Load
var historicLoads []cpu.Load
scratchSpace := map[string]bool{}
for _, endpoint := range endpoints {
// Only proceed for local endpoints
if endpoint.IsLocal {
if _, ok := scratchSpace[endpoint.Host]; ok {
continue
}
cpuLoads = append(cpuLoads, cpu.GetLoad())
historicLoads = append(historicLoads, cpu.GetHistoricLoad())
scratchSpace[endpoint.Host] = true
}
}
return ServerCPULoadInfo{
Addr: GetLocalPeer(endpoints),
Load: cpuLoads,
HistoricLoad: historicLoads,
}
}
// localEndpointsDrivePerf - returns ServerDrivesPerfInfo for only the
// local endpoints from given list of endpoints
func localEndpointsDrivePerf(endpoints EndpointList) ServerDrivesPerfInfo {
var dps []disk.Performance
for _, endpoint := range endpoints {
// Only proceed for local endpoints
if endpoint.IsLocal {
if _, err := os.Stat(endpoint.Path); err != nil {
// Since this drive is not available, add relevant details and proceed
dps = append(dps, disk.Performance{Path: endpoint.Path, Error: err.Error()})
continue
}
dp := disk.GetPerformance(pathJoin(endpoint.Path, minioMetaTmpBucket, mustGetUUID()))
dp.Path = endpoint.Path
dps = append(dps, dp)
}
}
return ServerDrivesPerfInfo{
Addr: GetLocalPeer(endpoints),
Perf: dps,
}
}
// NewEndpointList - returns new endpoint list based on input args.
func NewEndpointList(args ...string) (endpoints EndpointList, err error) {
var endpointType EndpointType
@@ -224,7 +302,6 @@ func NewEndpointList(args ...string) (endpoints EndpointList, err error) {
uniqueArgs.Add(arg)
endpoints = append(endpoints, endpoint)
}
return endpoints, nil
}
@@ -341,7 +418,7 @@ func CreateEndpoints(serverAddr string, args ...[]string) (string, EndpointList,
if err != nil {
host = endpoint.Host
}
hostIPSet, _ := getHostIP4(host)
hostIPSet, _ := getHostIP(host)
if IPSet, ok := pathIPMap[endpoint.Path]; ok {
if !IPSet.Intersection(hostIPSet).IsEmpty() {
return serverAddr, endpoints, setupType,
@@ -411,12 +488,12 @@ func CreateEndpoints(serverAddr string, args ...[]string) (string, EndpointList,
host = localServerAddr
}
ipList, err := getHostIP4(host)
ipList, err := getHostIP(host)
logger.FatalIf(err, "unexpected error when resolving host '%s'", host)
// Filter ipList by IPs those start with '127.'.
// Filter ipList by IPs those start with '127.' or '::1'
loopBackIPs := ipList.FuncMatch(func(ip string, matchString string) bool {
return strings.HasPrefix(ip, "127.")
return strings.HasPrefix(ip, "127.") || strings.HasPrefix(ip, "::1")
}, "")
// If loop back IP is found and ipList contains only loop back IPs, then error out.
@@ -455,7 +532,12 @@ func CreateEndpoints(serverAddr string, args ...[]string) (string, EndpointList,
return serverAddr, endpoints, setupType, err
}
updateDomainIPs(uniqueArgs)
_, dok := os.LookupEnv("MINIO_DOMAIN")
_, eok := os.LookupEnv("MINIO_ETCD_ENDPOINTS")
_, iok := os.LookupEnv("MINIO_PUBLIC_IPS")
if dok && eok && !iok {
updateDomainIPs(uniqueArgs)
}
setupType = DistXLSetupType
return serverAddr, endpoints, setupType, nil
@@ -480,10 +562,10 @@ func GetLocalPeer(endpoints EndpointList) (localPeer string) {
// Local peer can be empty in FS or Erasure coded mode.
// If so, return globalMinioHost + globalMinioPort value.
if globalMinioHost != "" {
return globalMinioHost + ":" + globalMinioPort
return net.JoinHostPort(globalMinioHost, globalMinioPort)
}
return "127.0.0.1:" + globalMinioPort
return net.JoinHostPort("127.0.0.1", globalMinioPort)
}
return peerSet.ToSlice()[0]
}
@@ -509,21 +591,21 @@ func GetRemotePeers(endpoints EndpointList) []string {
return peerSet.ToSlice()
}
// In federated and distributed setup, update IP addresses of the hosts passed in command line
// if MINIO_PUBLIC_IPS are not set manually
func updateDomainIPs(endPoints set.StringSet) {
_, dok := os.LookupEnv("MINIO_DOMAIN")
_, eok := os.LookupEnv("MINIO_ETCD_ENDPOINTS")
_, iok := os.LookupEnv("MINIO_PUBLIC_IPS")
if dok && eok && !iok {
globalDomainIPs = set.NewStringSet()
for e := range endPoints {
host, _, _ := net.SplitHostPort(e)
ipList, _ := getHostIP4(host)
remoteIPList := ipList.FuncMatch(func(ip string, matchString string) bool {
return !strings.HasPrefix(ip, "127.")
}, "")
globalDomainIPs.Add(remoteIPList.ToSlice()[0])
ipList := set.NewStringSet()
for e := range endPoints {
host, _, err := net.SplitHostPort(e)
if err != nil {
if strings.Contains(err.Error(), "missing port in address") {
host = e
} else {
continue
}
}
IPs, _ := getHostIP(host)
ipList = ipList.Union(IPs)
}
globalDomainIPs = ipList.FuncMatch(func(ip string, matchString string) bool {
return !strings.HasPrefix(ip, "127.") || strings.HasPrefix(ip, "::1")
}, "")
}

View File

@@ -304,27 +304,27 @@ func TestCreateEndpoints(t *testing.T) {
}, DistXLSetupType, nil},
}
for _, testCase := range testCases {
for i, testCase := range testCases {
serverAddr, endpoints, setupType, err := CreateEndpoints(testCase.serverAddr, testCase.args...)
if err == nil {
if testCase.expectedErr != nil {
t.Fatalf("error: expected = %v, got = <nil>", testCase.expectedErr)
t.Fatalf("Test (%d) error: expected = %v, got = <nil>", i+1, testCase.expectedErr)
} else {
if serverAddr != testCase.expectedServerAddr {
t.Fatalf("serverAddr: expected = %v, got = %v", testCase.expectedServerAddr, serverAddr)
t.Fatalf("Test (%d) serverAddr: expected = %v, got = %v", i+1, testCase.expectedServerAddr, serverAddr)
}
if !reflect.DeepEqual(endpoints, testCase.expectedEndpoints) {
t.Fatalf("endpoints: expected = %v, got = %v", testCase.expectedEndpoints, endpoints)
t.Fatalf("Test (%d) endpoints: expected = %v, got = %v", i+1, testCase.expectedEndpoints, endpoints)
}
if setupType != testCase.expectedSetupType {
t.Fatalf("setupType: expected = %v, got = %v", testCase.expectedSetupType, setupType)
t.Fatalf("Test (%d) setupType: expected = %v, got = %v", i+1, testCase.expectedSetupType, setupType)
}
}
} else if testCase.expectedErr == nil {
t.Fatalf("error: expected = <nil>, got = %v", err)
t.Fatalf("Test (%d) error: expected = <nil>, got = %v", i+1, err)
} else if err.Error() != testCase.expectedErr.Error() {
t.Fatalf("error: expected = %v, got = %v", testCase.expectedErr, err)
t.Fatalf("Test (%d) error: expected = %v, got = %v", i+1, testCase.expectedErr, err)
}
}
}

182
cmd/environment.go Normal file
View File

@@ -0,0 +1,182 @@
// Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"encoding/hex"
"errors"
"fmt"
"os"
"strconv"
"strings"
"github.com/minio/minio/cmd/crypto"
)
const (
// EnvKMSMasterKey is the environment variable used to specify
// a KMS master key used to protect SSE-S3 per-object keys.
// Valid values must be of the from: "KEY_ID:32_BYTE_HEX_VALUE".
EnvKMSMasterKey = "MINIO_SSE_MASTER_KEY"
// EnvAutoEncryption is the environment variable used to en/disable
// SSE-S3 auto-encryption. SSE-S3 auto-encryption, if enabled,
// requires a valid KMS configuration and turns any non-SSE-C
// request into an SSE-S3 request.
// If present EnvAutoEncryption must be either "on" or "off".
EnvAutoEncryption = "MINIO_SSE_AUTO_ENCRYPTION"
)
const (
// EnvVaultEndpoint is the environment variable used to specify
// the vault HTTPS endpoint.
EnvVaultEndpoint = "MINIO_SSE_VAULT_ENDPOINT"
// EnvVaultAuthType is the environment variable used to specify
// the authentication type for vault.
EnvVaultAuthType = "MINIO_SSE_VAULT_AUTH_TYPE"
// EnvVaultAppRoleID is the environment variable used to specify
// the vault AppRole ID.
EnvVaultAppRoleID = "MINIO_SSE_VAULT_APPROLE_ID"
// EnvVaultAppSecretID is the environment variable used to specify
// the vault AppRole secret corresponding to the AppRole ID.
EnvVaultAppSecretID = "MINIO_SSE_VAULT_APPROLE_SECRET"
// EnvVaultKeyVersion is the environment variable used to specify
// the vault key version.
EnvVaultKeyVersion = "MINIO_SSE_VAULT_KEY_VERSION"
// EnvVaultKeyName is the environment variable used to specify
// the vault named key-ring. In the S3 context it's referred as
// customer master key ID (CMK-ID).
EnvVaultKeyName = "MINIO_SSE_VAULT_KEY_NAME"
// EnvVaultCAPath is the environment variable used to specify the
// path to a directory of PEM-encoded CA cert files. These CA cert
// files are used to authenticate Minio to Vault over mTLS.
EnvVaultCAPath = "MINIO_SSE_VAULT_CAPATH"
// EnvVaultNamespace is the environment variable used to specify
// vault namespace. The vault namespace is used if the enterprise
// version of Hashicorp Vault is used.
EnvVaultNamespace = "MINIO_SSE_VAULT_NAMESPACE"
)
// Environment provides functions for accessing environment
// variables.
var Environment = environment{}
type environment struct{}
// Get retrieves the value of the environment variable named
// by the key. If the variable is present in the environment the
// value (which may be empty) is returned. Otherwise it returns
// the specified default value.
func (environment) Get(key, defaultValue string) string {
if v, ok := os.LookupEnv(key); ok {
return v
}
return defaultValue
}
// Lookup retrieves the value of the environment variable named
// by the key. If the variable is present in the environment the
// value (which may be empty) is returned and the boolean is true.
// Otherwise the returned value will be empty and the boolean will
// be false.
func (environment) Lookup(key string) (string, bool) { return os.LookupEnv(key) }
// LookupKMSConfig extracts the KMS configuration provided by environment
// variables and merge them with the provided KMS configuration. The
// merging follows the following rules:
//
// 1. A valid value provided as environment variable is higher prioritized
// than the provided configuration and overwrites the value from the
// configuration file.
//
// 2. A value specified as environment variable never changes the configuration
// file. So it is never made a persistent setting.
//
// It sets the global KMS configuration according to the merged configuration
// on success.
func (env environment) LookupKMSConfig(config crypto.KMSConfig) (err error) {
// Lookup Hashicorp-Vault configuration & overwrite config entry if ENV var is present
config.Vault.Endpoint = env.Get(EnvVaultEndpoint, config.Vault.Endpoint)
config.Vault.CAPath = env.Get(EnvVaultCAPath, config.Vault.CAPath)
config.Vault.Auth.Type = env.Get(EnvVaultAuthType, config.Vault.Auth.Type)
config.Vault.Auth.AppRole.ID = env.Get(EnvVaultAppRoleID, config.Vault.Auth.AppRole.ID)
config.Vault.Auth.AppRole.Secret = env.Get(EnvVaultAppSecretID, config.Vault.Auth.AppRole.Secret)
config.Vault.Key.Name = env.Get(EnvVaultKeyName, config.Vault.Key.Name)
config.Vault.Namespace = env.Get(EnvVaultNamespace, config.Vault.Namespace)
keyVersion := env.Get(EnvVaultKeyVersion, strconv.Itoa(config.Vault.Key.Version))
config.Vault.Key.Version, err = strconv.Atoi(keyVersion)
if err != nil {
return fmt.Errorf("Invalid ENV variable: Unable to parse %s value (`%s`)", EnvVaultKeyVersion, keyVersion)
}
if err = config.Vault.Verify(); err != nil {
return err
}
// Lookup KMS master keys - only available through ENV.
if masterKey, ok := env.Lookup(EnvKMSMasterKey); ok {
if !config.Vault.IsEmpty() { // Vault and KMS master key provided
return errors.New("Ambiguous KMS configuration: vault configuration and a master key are provided at the same time")
}
globalKMSKeyID, GlobalKMS, err = parseKMSMasterKey(masterKey)
if err != nil {
return err
}
}
if !config.Vault.IsEmpty() {
GlobalKMS, err = crypto.NewVault(config.Vault)
if err != nil {
return err
}
globalKMSKeyID = config.Vault.Key.Name
}
autoEncryption, err := ParseBoolFlag(env.Get(EnvAutoEncryption, "off"))
if err != nil {
return err
}
globalAutoEncryption = bool(autoEncryption)
if globalAutoEncryption && GlobalKMS == nil { // auto-encryption enabled but no KMS
return errors.New("Invalid KMS configuration: auto-encryption is enabled but no valid KMS configuration is present")
}
return nil
}
// parseKMSMasterKey parses the value of the environment variable
// `EnvKMSMasterKey` and returns a key-ID and a master-key KMS on success.
func parseKMSMasterKey(envArg string) (string, crypto.KMS, error) {
values := strings.SplitN(envArg, ":", 2)
if len(values) != 2 {
return "", nil, fmt.Errorf("Invalid KMS master key: %s does not contain a ':'", envArg)
}
var (
keyID = values[0]
hexKey = values[1]
)
if len(hexKey) != 64 { // 2 hex bytes = 1 byte
return "", nil, fmt.Errorf("Invalid KMS master key: %s not a 32 bytes long HEX value", hexKey)
}
var masterKey [32]byte
if _, err := hex.Decode(masterKey[:], []byte(hexKey)); err != nil {
return "", nil, fmt.Errorf("Invalid KMS master key: %s not a 32 bytes long HEX value", hexKey)
}
return keyID, crypto.NewKMS(masterKey), nil
}

View File

@@ -19,29 +19,30 @@ package cmd
import (
"context"
"io"
"sync"
"github.com/minio/minio/cmd/logger"
)
// Reads in parallel from bitrotReaders.
// Reads in parallel from readers.
type parallelReader struct {
readers []*bitrotReader
readers []io.ReaderAt
dataBlocks int
offset int64
shardSize int64
shardFileSize int64
buf [][]byte
}
// newParallelReader returns parallelReader.
func newParallelReader(readers []*bitrotReader, dataBlocks int, offset int64, fileSize int64, blocksize int64) *parallelReader {
shardSize := ceilFrac(blocksize, int64(dataBlocks))
shardFileSize := getErasureShardFileSize(blocksize, fileSize, dataBlocks)
func newParallelReader(readers []io.ReaderAt, e Erasure, offset, totalLength int64) *parallelReader {
return &parallelReader{
readers,
dataBlocks,
(offset / blocksize) * shardSize,
shardSize,
shardFileSize,
e.dataBlocks,
(offset / e.blockSize) * e.ShardSize(),
e.ShardSize(),
e.ShardFileSize(totalLength),
make([][]byte, len(readers)),
}
}
@@ -56,79 +57,83 @@ func (p *parallelReader) canDecode(buf [][]byte) bool {
return bufCount >= p.dataBlocks
}
// Read reads from bitrotReaders in parallel. Returns p.dataBlocks number of bufs.
// Read reads from readers in parallel. Returns p.dataBlocks number of bufs.
func (p *parallelReader) Read() ([][]byte, error) {
type errIdx struct {
idx int
buf []byte
err error
}
errCh := make(chan errIdx)
currReaderIndex := 0
newBuf := make([][]byte, len(p.readers))
var newBufLK sync.RWMutex
if p.offset+p.shardSize > p.shardFileSize {
p.shardSize = p.shardFileSize - p.offset
}
read := func(currReaderIndex int) {
b, err := p.readers[currReaderIndex].ReadChunk(p.offset, p.shardSize)
errCh <- errIdx{currReaderIndex, b, err}
readTriggerCh := make(chan bool, len(p.readers))
for i := 0; i < p.dataBlocks; i++ {
// Setup read triggers for p.dataBlocks number of reads so that it reads in parallel.
readTriggerCh <- true
}
readerCount := 0
for _, r := range p.readers {
if r != nil {
readerCount++
}
}
if readerCount < p.dataBlocks {
return nil, errXLReadQuorum
}
readerCount = 0
for i, r := range p.readers {
if r == nil {
continue
}
go read(i)
readerCount++
if readerCount == p.dataBlocks {
currReaderIndex = i + 1
readerIndex := 0
var wg sync.WaitGroup
// if readTrigger is true, it implies next disk.ReadAt() should be tried
// if readTrigger is false, it implies previous disk.ReadAt() was successful and there is no need
// to try reading the next disk.
for readTrigger := range readTriggerCh {
newBufLK.RLock()
canDecode := p.canDecode(newBuf)
newBufLK.RUnlock()
if canDecode {
break
}
}
for errVal := range errCh {
if errVal.err == nil {
newBuf[errVal.idx] = errVal.buf
if p.canDecode(newBuf) {
p.offset += int64(p.shardSize)
return newBuf, nil
}
continue
}
p.readers[errVal.idx] = nil
for currReaderIndex < len(p.readers) {
if p.readers[currReaderIndex] != nil {
break
}
currReaderIndex++
}
if currReaderIndex == len(p.readers) {
if readerIndex == len(p.readers) {
break
}
go read(currReaderIndex)
currReaderIndex++
if !readTrigger {
continue
}
wg.Add(1)
go func(i int) {
defer wg.Done()
disk := p.readers[i]
if disk == nil {
// Since disk is nil, trigger another read.
readTriggerCh <- true
return
}
if p.buf[i] == nil {
// Reading first time on this disk, hence the buffer needs to be allocated.
// Subsequent reads will re-use this buffer.
p.buf[i] = make([]byte, p.shardSize)
}
// For the last shard, the shardsize might be less than previous shard sizes.
// Hence the following statement ensures that the buffer size is reset to the right size.
p.buf[i] = p.buf[i][:p.shardSize]
_, err := disk.ReadAt(p.buf[i], p.offset)
if err != nil {
p.readers[i] = nil
// Since ReadAt returned error, trigger another read.
readTriggerCh <- true
return
}
newBufLK.Lock()
newBuf[i] = p.buf[i]
newBufLK.Unlock()
// Since ReadAt returned success, there is no need to trigger another read.
readTriggerCh <- false
}(readerIndex)
readerIndex++
}
wg.Wait()
if p.canDecode(newBuf) {
p.offset += p.shardSize
return newBuf, nil
}
return nil, errXLReadQuorum
}
// Decode reads from readers, reconstructs data if needed and writes the data to the writer.
func (e Erasure) Decode(ctx context.Context, writer io.Writer, readers []*bitrotReader, offset, length, totalLength int64) error {
func (e Erasure) Decode(ctx context.Context, writer io.Writer, readers []io.ReaderAt, offset, length, totalLength int64) error {
if offset < 0 || length < 0 {
logger.LogIf(ctx, errInvalidArgument)
return errInvalidArgument
@@ -141,7 +146,7 @@ func (e Erasure) Decode(ctx context.Context, writer io.Writer, readers []*bitrot
return nil
}
reader := newParallelReader(readers, e.dataBlocks, offset, totalLength, e.blockSize)
reader := newParallelReader(readers, e, offset, totalLength)
startBlock := offset / e.blockSize
endBlock := (offset + length) / e.blockSize

View File

@@ -28,7 +28,7 @@ import (
humanize "github.com/dustin/go-humanize"
)
func (d badDisk) ReadFile(volume string, path string, offset int64, buf []byte, verifier *BitrotVerifier) (n int64, err error) {
func (a badDisk) ReadFile(volume string, path string, offset int64, buf []byte, verifier *BitrotVerifier) (n int64, err error) {
return 0, errFaultyDisk
}
@@ -41,26 +41,28 @@ var erasureDecodeTests = []struct {
algorithm BitrotAlgorithm
shouldFail, shouldFailQuorum bool
}{
{dataBlocks: 2, onDisks: 4, offDisks: 0, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 0
{dataBlocks: 3, onDisks: 6, offDisks: 0, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: SHA256, shouldFail: false, shouldFailQuorum: false}, // 1
{dataBlocks: 4, onDisks: 8, offDisks: 0, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 2
{dataBlocks: 5, onDisks: 10, offDisks: 0, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 1, length: oneMiByte - 1, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 3
{dataBlocks: 6, onDisks: 12, offDisks: 0, blocksize: int64(oneMiByte), data: oneMiByte, offset: oneMiByte, length: 0, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 4
{dataBlocks: 7, onDisks: 14, offDisks: 0, blocksize: int64(oneMiByte), data: oneMiByte, offset: 3, length: 1024, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 5
{dataBlocks: 8, onDisks: 16, offDisks: 0, blocksize: int64(oneMiByte), data: oneMiByte, offset: 4, length: 8 * 1024, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 6
{dataBlocks: 7, onDisks: 14, offDisks: 7, blocksize: int64(blockSizeV1), data: oneMiByte, offset: oneMiByte, length: 1, algorithm: DefaultBitrotAlgorithm, shouldFail: true, shouldFailQuorum: false}, // 7
{dataBlocks: 6, onDisks: 12, offDisks: 6, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 8
{dataBlocks: 5, onDisks: 10, offDisks: 5, blocksize: int64(oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 9
{dataBlocks: 4, onDisks: 8, offDisks: 4, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: SHA256, shouldFail: false, shouldFailQuorum: false}, // 10
{dataBlocks: 3, onDisks: 6, offDisks: 3, blocksize: int64(oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 11
{dataBlocks: 2, onDisks: 4, offDisks: 2, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 12
{dataBlocks: 2, onDisks: 4, offDisks: 1, blocksize: int64(oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 13
{dataBlocks: 3, onDisks: 6, offDisks: 2, blocksize: int64(oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 14
{dataBlocks: 4, onDisks: 8, offDisks: 3, blocksize: int64(2 * oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 15
{dataBlocks: 5, onDisks: 10, offDisks: 6, blocksize: int64(oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: true}, // 16
{dataBlocks: 5, onDisks: 10, offDisks: 2, blocksize: int64(blockSizeV1), data: 2 * oneMiByte, offset: oneMiByte, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 17
{dataBlocks: 5, onDisks: 10, offDisks: 1, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 18
{dataBlocks: 6, onDisks: 12, offDisks: 3, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: SHA256, shouldFail: false, shouldFailQuorum: false}, // 19
{dataBlocks: 2, onDisks: 4, offDisks: 0, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 0
{dataBlocks: 3, onDisks: 6, offDisks: 0, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: SHA256, shouldFail: false, shouldFailQuorum: false}, // 1
{dataBlocks: 4, onDisks: 8, offDisks: 0, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 2
{dataBlocks: 5, onDisks: 10, offDisks: 0, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 1, length: oneMiByte - 1, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 3
{dataBlocks: 6, onDisks: 12, offDisks: 0, blocksize: int64(oneMiByte), data: oneMiByte, offset: oneMiByte, length: 0, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false},
// 4
{dataBlocks: 7, onDisks: 14, offDisks: 0, blocksize: int64(oneMiByte), data: oneMiByte, offset: 3, length: 1024, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 5
{dataBlocks: 8, onDisks: 16, offDisks: 0, blocksize: int64(oneMiByte), data: oneMiByte, offset: 4, length: 8 * 1024, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 6
{dataBlocks: 7, onDisks: 14, offDisks: 7, blocksize: int64(blockSizeV1), data: oneMiByte, offset: oneMiByte, length: 1, algorithm: DefaultBitrotAlgorithm, shouldFail: true, shouldFailQuorum: false}, // 7
{dataBlocks: 6, onDisks: 12, offDisks: 6, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 8
{dataBlocks: 5, onDisks: 10, offDisks: 5, blocksize: int64(oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 9
{dataBlocks: 4, onDisks: 8, offDisks: 4, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: SHA256, shouldFail: false, shouldFailQuorum: false}, // 10
{dataBlocks: 3, onDisks: 6, offDisks: 3, blocksize: int64(oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 11
{dataBlocks: 2, onDisks: 4, offDisks: 2, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 12
{dataBlocks: 2, onDisks: 4, offDisks: 1, blocksize: int64(oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 13
{dataBlocks: 3, onDisks: 6, offDisks: 2, blocksize: int64(oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 14
{dataBlocks: 4, onDisks: 8, offDisks: 3, blocksize: int64(2 * oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 15
{dataBlocks: 5, onDisks: 10, offDisks: 6, blocksize: int64(oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: true}, // 16
{dataBlocks: 5, onDisks: 10, offDisks: 2, blocksize: int64(blockSizeV1), data: 2 * oneMiByte, offset: oneMiByte, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 17
{dataBlocks: 5, onDisks: 10, offDisks: 1, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 18
{dataBlocks: 6, onDisks: 12, offDisks: 3, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: SHA256, shouldFail: false, shouldFailQuorum: false},
// 19
{dataBlocks: 6, onDisks: 12, offDisks: 7, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: true}, // 20
{dataBlocks: 8, onDisks: 16, offDisks: 8, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 21
{dataBlocks: 8, onDisks: 16, offDisks: 9, blocksize: int64(oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: true}, // 22
@@ -104,11 +106,12 @@ func TestErasureDecode(t *testing.T) {
writeAlgorithm = DefaultBitrotAlgorithm
}
buffer := make([]byte, test.blocksize, 2*test.blocksize)
writers := make([]*bitrotWriter, len(disks))
writers := make([]io.Writer, len(disks))
for i, disk := range disks {
writers[i] = newBitrotWriter(disk, "testbucket", "object", writeAlgorithm)
writers[i] = newBitrotWriter(disk, "testbucket", "object", erasure.ShardFileSize(test.data), writeAlgorithm, erasure.ShardSize())
}
n, err := erasure.Encode(context.Background(), bytes.NewReader(data[:]), writers, buffer, erasure.dataBlocks+1)
closeBitrotWriters(writers)
if err != nil {
setup.Remove()
t.Fatalf("Test %d: failed to create erasure test file: %v", i, err)
@@ -124,17 +127,19 @@ func TestErasureDecode(t *testing.T) {
}
// Get the checksums of the current part.
bitrotReaders := make([]*bitrotReader, len(disks))
bitrotReaders := make([]io.ReaderAt, len(disks))
for index, disk := range disks {
if disk == OfflineDisk {
continue
}
endOffset := getErasureShardFileEndOffset(test.offset, test.length, test.data, test.blocksize, erasure.dataBlocks)
bitrotReaders[index] = newBitrotReader(disk, "testbucket", "object", writeAlgorithm, endOffset, writers[index].Sum())
tillOffset := erasure.ShardFileTillOffset(test.offset, test.length, test.data)
bitrotReaders[index] = newBitrotReader(disk, "testbucket", "object", tillOffset, writeAlgorithm, bitrotWriterSum(writers[index]), erasure.ShardSize())
}
writer := bytes.NewBuffer(nil)
err = erasure.Decode(context.Background(), writer, bitrotReaders, test.offset, test.length, test.data)
closeBitrotReaders(bitrotReaders)
if err != nil && !test.shouldFail {
t.Errorf("Test %d: should pass but failed with: %v", i, err)
}
@@ -143,31 +148,41 @@ func TestErasureDecode(t *testing.T) {
}
if err == nil {
if content := writer.Bytes(); !bytes.Equal(content, data[test.offset:test.offset+test.length]) {
t.Errorf("Test %d: read retruns wrong file content", i)
t.Errorf("Test %d: read retruns wrong file content.", i)
}
}
for i, r := range bitrotReaders {
if r == nil {
disks[i] = OfflineDisk
}
}
if err == nil && !test.shouldFail {
bitrotReaders = make([]*bitrotReader, len(disks))
bitrotReaders = make([]io.ReaderAt, len(disks))
for index, disk := range disks {
if disk == OfflineDisk {
continue
}
endOffset := getErasureShardFileEndOffset(test.offset, test.length, test.data, test.blocksize, erasure.dataBlocks)
bitrotReaders[index] = newBitrotReader(disk, "testbucket", "object", writeAlgorithm, endOffset, writers[index].Sum())
tillOffset := erasure.ShardFileTillOffset(test.offset, test.length, test.data)
bitrotReaders[index] = newBitrotReader(disk, "testbucket", "object", tillOffset, writeAlgorithm, bitrotWriterSum(writers[index]), erasure.ShardSize())
}
for j := range disks[:test.offDisks] {
bitrotReaders[j].disk = badDisk{nil}
if bitrotReaders[j] == nil {
continue
}
switch r := bitrotReaders[j].(type) {
case *wholeBitrotReader:
r.disk = badDisk{nil}
case *streamingBitrotReader:
r.disk = badDisk{nil}
}
}
if test.offDisks > 0 {
bitrotReaders[0] = nil
}
writer.Reset()
err = erasure.Decode(context.Background(), writer, bitrotReaders, test.offset, test.length, test.data)
closeBitrotReaders(bitrotReaders)
if err != nil && !test.shouldFailQuorum {
t.Errorf("Test %d: should pass but failed with: %v", i, err)
}
@@ -213,12 +228,12 @@ func TestErasureDecodeRandomOffsetLength(t *testing.T) {
t.Fatal(err)
}
writers := make([]*bitrotWriter, len(disks))
writers := make([]io.Writer, len(disks))
for i, disk := range disks {
if disk == nil {
continue
}
writers[i] = newBitrotWriter(disk, "testbucket", "object", DefaultBitrotAlgorithm)
writers[i] = newBitrotWriter(disk, "testbucket", "object", erasure.ShardFileSize(length), DefaultBitrotAlgorithm, erasure.ShardSize())
}
// 10000 iterations with random offsets and lengths.
@@ -227,6 +242,7 @@ func TestErasureDecodeRandomOffsetLength(t *testing.T) {
// Create a test file to read from.
buffer := make([]byte, blockSize, 2*blockSize)
n, err := erasure.Encode(context.Background(), bytes.NewReader(data), writers, buffer, erasure.dataBlocks+1)
closeBitrotWriters(writers)
if err != nil {
t.Fatal(err)
}
@@ -247,15 +263,16 @@ func TestErasureDecodeRandomOffsetLength(t *testing.T) {
expected := data[offset : offset+readLen]
// Get the checksums of the current part.
bitrotReaders := make([]*bitrotReader, len(disks))
bitrotReaders := make([]io.ReaderAt, len(disks))
for index, disk := range disks {
if disk == OfflineDisk {
continue
}
endOffset := getErasureShardFileEndOffset(offset, readLen, length, blockSize, erasure.dataBlocks)
bitrotReaders[index] = newBitrotReader(disk, "testbucket", "object", DefaultBitrotAlgorithm, endOffset, writers[index].Sum())
tillOffset := erasure.ShardFileTillOffset(offset, readLen, length)
bitrotReaders[index] = newStreamingBitrotReader(disk, "testbucket", "object", tillOffset, DefaultBitrotAlgorithm, erasure.ShardSize())
}
err = erasure.Decode(context.Background(), buf, bitrotReaders, offset, readLen, length)
closeBitrotReaders(bitrotReaders)
if err != nil {
t.Fatal(err, offset, readLen)
}
@@ -281,17 +298,18 @@ func benchmarkErasureDecode(data, parity, dataDown, parityDown int, size int64,
b.Fatalf("failed to create ErasureStorage: %v", err)
}
writers := make([]*bitrotWriter, len(disks))
writers := make([]io.Writer, len(disks))
for i, disk := range disks {
if disk == nil {
continue
}
writers[i] = newBitrotWriter(disk, "testbucket", "object", DefaultBitrotAlgorithm)
writers[i] = newBitrotWriter(disk, "testbucket", "object", erasure.ShardFileSize(size), DefaultBitrotAlgorithm, erasure.ShardSize())
}
content := make([]byte, size)
buffer := make([]byte, blockSizeV1, 2*blockSizeV1)
_, err = erasure.Encode(context.Background(), bytes.NewReader(content), writers, buffer, erasure.dataBlocks+1)
closeBitrotWriters(writers)
if err != nil {
b.Fatalf("failed to create erasure test file: %v", err)
}
@@ -307,17 +325,18 @@ func benchmarkErasureDecode(data, parity, dataDown, parityDown int, size int64,
b.SetBytes(size)
b.ReportAllocs()
for i := 0; i < b.N; i++ {
bitrotReaders := make([]*bitrotReader, len(disks))
bitrotReaders := make([]io.ReaderAt, len(disks))
for index, disk := range disks {
if writers[index] == nil {
continue
}
endOffset := getErasureShardFileEndOffset(0, size, size, erasure.blockSize, erasure.dataBlocks)
bitrotReaders[index] = newBitrotReader(disk, "testbucket", "object", DefaultBitrotAlgorithm, endOffset, writers[index].Sum())
tillOffset := erasure.ShardFileTillOffset(0, size, size)
bitrotReaders[index] = newStreamingBitrotReader(disk, "testbucket", "object", tillOffset, DefaultBitrotAlgorithm, erasure.ShardSize())
}
if err = erasure.Decode(context.Background(), bytes.NewBuffer(content[:0]), bitrotReaders, 0, size, size); err != nil {
panic(err)
}
closeBitrotReaders(bitrotReaders)
}
}

View File

@@ -25,15 +25,15 @@ import (
"github.com/minio/minio/cmd/logger"
)
// Writes in parallel to bitrotWriters
// Writes in parallel to writers
type parallelWriter struct {
writers []*bitrotWriter
writers []io.Writer
writeQuorum int
errs []error
}
// Append appends data to bitrotWriters in parallel.
func (p *parallelWriter) Append(ctx context.Context, blocks [][]byte) error {
// Write writes data to writers in parallel.
func (p *parallelWriter) Write(ctx context.Context, blocks [][]byte) error {
var wg sync.WaitGroup
for i := range p.writers {
@@ -45,7 +45,7 @@ func (p *parallelWriter) Append(ctx context.Context, blocks [][]byte) error {
wg.Add(1)
go func(i int) {
defer wg.Done()
p.errs[i] = p.writers[i].Append(blocks[i])
_, p.errs[i] = p.writers[i].Write(blocks[i])
if p.errs[i] != nil {
p.writers[i] = nil
}
@@ -70,7 +70,7 @@ func (p *parallelWriter) Append(ctx context.Context, blocks [][]byte) error {
}
// Encode reads from the reader, erasure-encodes the data and writes to the writers.
func (e *Erasure) Encode(ctx context.Context, src io.Reader, writers []*bitrotWriter, buf []byte, quorum int) (total int64, err error) {
func (e *Erasure) Encode(ctx context.Context, src io.Reader, writers []io.Writer, buf []byte, quorum int) (total int64, err error) {
writer := &parallelWriter{
writers: writers,
writeQuorum: quorum,
@@ -96,7 +96,7 @@ func (e *Erasure) Encode(ctx context.Context, src io.Reader, writers []*bitrotWr
return 0, err
}
if err = writer.Append(ctx, blocks); err != nil {
if err = writer.Write(ctx, blocks); err != nil {
logger.LogIf(ctx, err)
return 0, err
}

View File

@@ -36,6 +36,14 @@ func (a badDisk) AppendFile(volume string, path string, buf []byte) error {
return errFaultyDisk
}
func (a badDisk) ReadFileStream(volume, path string, offset, length int64) (io.ReadCloser, error) {
return nil, errFaultyDisk
}
func (a badDisk) CreateFile(volume, path string, size int64, reader io.Reader) error {
return errFaultyDisk
}
const oneMiByte = 1 * humanize.MiByte
var erasureEncodeTests = []struct {
@@ -87,14 +95,15 @@ func TestErasureEncode(t *testing.T) {
setup.Remove()
t.Fatalf("Test %d: failed to generate random test data: %v", i, err)
}
writers := make([]*bitrotWriter, len(disks))
writers := make([]io.Writer, len(disks))
for i, disk := range disks {
if disk == OfflineDisk {
continue
}
writers[i] = newBitrotWriter(disk, "testbucket", "object", test.algorithm)
writers[i] = newBitrotWriter(disk, "testbucket", "object", erasure.ShardFileSize(int64(len(data[test.offset:]))), test.algorithm, erasure.ShardSize())
}
n, err := erasure.Encode(context.Background(), bytes.NewReader(data[test.offset:]), writers, buffer, erasure.dataBlocks+1)
closeBitrotWriters(writers)
if err != nil && !test.shouldFail {
t.Errorf("Test %d: should pass but failed with: %v", i, err)
}
@@ -110,20 +119,26 @@ func TestErasureEncode(t *testing.T) {
if length := int64(len(data[test.offset:])); n != length {
t.Errorf("Test %d: invalid number of bytes written: got: #%d want #%d", i, n, length)
}
writers := make([]*bitrotWriter, len(disks))
writers := make([]io.Writer, len(disks))
for i, disk := range disks {
if disk == nil {
continue
}
writers[i] = newBitrotWriter(disk, "testbucket", "object2", test.algorithm)
writers[i] = newBitrotWriter(disk, "testbucket", "object2", erasure.ShardFileSize(int64(len(data[test.offset:]))), test.algorithm, erasure.ShardSize())
}
for j := range disks[:test.offDisks] {
writers[j].disk = badDisk{nil}
switch w := writers[j].(type) {
case *wholeBitrotWriter:
w.disk = badDisk{nil}
case *streamingBitrotWriter:
w.iow.CloseWithError(errFaultyDisk)
}
}
if test.offDisks > 0 {
writers[0] = nil
}
n, err = erasure.Encode(context.Background(), bytes.NewReader(data[test.offset:]), writers, buffer, erasure.dataBlocks+1)
closeBitrotWriters(writers)
if err != nil && !test.shouldFailQuorum {
t.Errorf("Test %d: should pass but failed with: %v", i, err)
}
@@ -167,14 +182,16 @@ func benchmarkErasureEncode(data, parity, dataDown, parityDown int, size int64,
b.SetBytes(size)
b.ReportAllocs()
for i := 0; i < b.N; i++ {
writers := make([]*bitrotWriter, len(disks))
writers := make([]io.Writer, len(disks))
for i, disk := range disks {
if disk == OfflineDisk {
continue
}
writers[i] = newBitrotWriter(disk, "testbucket", "object", DefaultBitrotAlgorithm)
disk.DeleteFile("testbucket", "object")
writers[i] = newBitrotWriter(disk, "testbucket", "object", erasure.ShardFileSize(size), DefaultBitrotAlgorithm, erasure.ShardSize())
}
_, err := erasure.Encode(context.Background(), bytes.NewReader(content), writers, buffer, erasure.dataBlocks+1)
closeBitrotWriters(writers)
if err != nil {
panic(err)
}

View File

@@ -25,7 +25,7 @@ import (
// Heal heals the shard files on non-nil writers. Note that the quorum passed is 1
// as healing should continue even if it has been successful healing only one shard file.
func (e Erasure) Heal(ctx context.Context, readers []*bitrotReader, writers []*bitrotWriter, size int64) error {
func (e Erasure) Heal(ctx context.Context, readers []io.ReaderAt, writers []io.Writer, size int64) error {
r, w := io.Pipe()
go func() {
if err := e.Decode(ctx, w, readers, 0, size, size); err != nil {

View File

@@ -21,6 +21,7 @@ import (
"context"
"crypto/rand"
"io"
"os"
"testing"
)
@@ -84,20 +85,21 @@ func TestErasureHeal(t *testing.T) {
t.Fatalf("Test %d: failed to create random test data: %v", i, err)
}
buffer := make([]byte, test.blocksize, 2*test.blocksize)
writers := make([]*bitrotWriter, len(disks))
writers := make([]io.Writer, len(disks))
for i, disk := range disks {
writers[i] = newBitrotWriter(disk, "testbucket", "testobject", test.algorithm)
writers[i] = newBitrotWriter(disk, "testbucket", "testobject", erasure.ShardFileSize(test.size), test.algorithm, erasure.ShardSize())
}
_, err = erasure.Encode(context.Background(), bytes.NewReader(data), writers, buffer, erasure.dataBlocks+1)
closeBitrotWriters(writers)
if err != nil {
setup.Remove()
t.Fatalf("Test %d: failed to create random test data: %v", i, err)
}
readers := make([]*bitrotReader, len(disks))
readers := make([]io.ReaderAt, len(disks))
for i, disk := range disks {
shardFilesize := getErasureShardFileSize(test.blocksize, test.size, erasure.dataBlocks)
readers[i] = newBitrotReader(disk, "testbucket", "testobject", test.algorithm, shardFilesize, writers[i].Sum())
shardFilesize := erasure.ShardFileSize(test.size)
readers[i] = newBitrotReader(disk, "testbucket", "testobject", shardFilesize, test.algorithm, bitrotWriterSum(writers[i]), erasure.ShardSize())
}
// setup stale disks for the test case
@@ -111,22 +113,30 @@ func TestErasureHeal(t *testing.T) {
}
}
for j := 0; j < test.badDisks; j++ {
readers[test.offDisks+j].disk = badDisk{nil}
switch r := readers[test.offDisks+j].(type) {
case *streamingBitrotReader:
r.disk = badDisk{nil}
case *wholeBitrotReader:
r.disk = badDisk{nil}
}
}
for j := 0; j < test.badStaleDisks; j++ {
staleDisks[j] = badDisk{nil}
}
staleWriters := make([]*bitrotWriter, len(staleDisks))
staleWriters := make([]io.Writer, len(staleDisks))
for i, disk := range staleDisks {
if disk == nil {
continue
}
staleWriters[i] = newBitrotWriter(disk, "testbucket", "testobject", test.algorithm)
os.Remove(pathJoin(disk.String(), "testbucket", "testobject"))
staleWriters[i] = newBitrotWriter(disk, "testbucket", "testobject", erasure.ShardFileSize(test.size), test.algorithm, erasure.ShardSize())
}
// test case setup is complete - now call Healfile()
// test case setup is complete - now call Heal()
err = erasure.Heal(context.Background(), readers, staleWriters, test.size)
closeBitrotReaders(readers)
closeBitrotWriters(staleWriters)
if err != nil && !test.shouldFail {
t.Errorf("Test %d: should pass but it failed with: %v", i, err)
}
@@ -140,7 +150,7 @@ func TestErasureHeal(t *testing.T) {
if staleWriters[i] == nil {
continue
}
if !bytes.Equal(staleWriters[i].Sum(), writers[i].Sum()) {
if !bytes.Equal(bitrotWriterSum(staleWriters[i]), bitrotWriterSum(writers[i])) {
t.Errorf("Test %d: heal returned different bitrot checksums", i)
}
}

View File

@@ -20,7 +20,6 @@ import (
"bytes"
"context"
"io"
"strings"
"github.com/klauspost/reedsolomon"
"github.com/minio/minio/cmd/logger"
@@ -82,7 +81,9 @@ func writeDataBlocks(ctx context.Context, dst io.Writer, enBlocks [][]byte, data
if write < int64(len(block)) {
n, err := io.Copy(dst, bytes.NewReader(block[:write]))
if err != nil {
logger.LogIf(ctx, err)
if err != io.ErrClosedPipe {
logger.LogIf(ctx, err)
}
return 0, err
}
totalWritten += n
@@ -91,7 +92,8 @@ func writeDataBlocks(ctx context.Context, dst io.Writer, enBlocks [][]byte, data
// Copy the block.
n, err := io.Copy(dst, bytes.NewReader(block))
if err != nil {
if !strings.Contains(err.Error(), "read/write on closed pipe") {
// The writer will be closed incase of range queries, which will emit ErrClosedPipe.
if err != io.ErrClosedPipe {
logger.LogIf(ctx, err)
}
return 0, err
@@ -107,25 +109,3 @@ func writeDataBlocks(ctx context.Context, dst io.Writer, enBlocks [][]byte, data
// Success.
return totalWritten, nil
}
// Returns shard-file size.
func getErasureShardFileSize(blockSize int64, totalLength int64, dataBlocks int) int64 {
shardSize := ceilFrac(int64(blockSize), int64(dataBlocks))
numShards := totalLength / int64(blockSize)
lastBlockSize := totalLength % int64(blockSize)
lastShardSize := ceilFrac(lastBlockSize, int64(dataBlocks))
return shardSize*numShards + lastShardSize
}
// Returns the endOffset till which bitrotReader should read data using disk.ReadFile()
// partOffset, partLength and partSize are values of the object's part file.
func getErasureShardFileEndOffset(partOffset int64, partLength int64, partSize int64, erasureBlockSize int64, dataBlocks int) int64 {
shardSize := ceilFrac(erasureBlockSize, int64(dataBlocks))
shardFileSize := getErasureShardFileSize(erasureBlockSize, partSize, dataBlocks)
endShard := (partOffset + int64(partLength)) / erasureBlockSize
endOffset := endShard*shardSize + shardSize
if endOffset > shardFileSize {
endOffset = shardFileSize
}
return endOffset
}

View File

@@ -32,18 +32,16 @@ type Erasure struct {
// NewErasure creates a new ErasureStorage.
func NewErasure(ctx context.Context, dataBlocks, parityBlocks int, blockSize int64) (e Erasure, err error) {
shardsize := int(ceilFrac(blockSize, int64(dataBlocks)))
erasure, err := reedsolomon.New(dataBlocks, parityBlocks, reedsolomon.WithAutoGoroutines(shardsize))
if err != nil {
logger.LogIf(ctx, err)
return e, err
}
e = Erasure{
encoder: erasure,
dataBlocks: dataBlocks,
parityBlocks: parityBlocks,
blockSize: blockSize,
}
e.encoder, err = reedsolomon.New(dataBlocks, parityBlocks, reedsolomon.WithAutoGoroutines(int(e.ShardSize())))
if err != nil {
logger.LogIf(ctx, err)
return e, err
}
return
}
@@ -94,3 +92,31 @@ func (e *Erasure) DecodeDataAndParityBlocks(ctx context.Context, data [][]byte)
}
return nil
}
// ShardSize - returns actual shared size from erasure blockSize.
func (e *Erasure) ShardSize() int64 {
return ceilFrac(e.blockSize, int64(e.dataBlocks))
}
// ShardFileSize - returns final erasure size from original size.
func (e *Erasure) ShardFileSize(totalLength int64) int64 {
if totalLength == 0 {
return 0
}
numShards := totalLength / e.blockSize
lastBlockSize := totalLength % int64(e.blockSize)
lastShardSize := ceilFrac(lastBlockSize, int64(e.dataBlocks))
return numShards*e.ShardSize() + lastShardSize
}
// ShardFileTillOffset - returns the effectiv eoffset where erasure reading begins.
func (e *Erasure) ShardFileTillOffset(startOffset, length, totalLength int64) int64 {
shardSize := e.ShardSize()
shardFileSize := e.ShardFileSize(totalLength)
endShard := (startOffset + int64(length)) / e.blockSize
tillOffset := endShard*shardSize + shardSize
if tillOffset > shardFileSize {
tillOffset = shardFileSize
}
return tillOffset
}

View File

@@ -273,7 +273,7 @@ func initFormatFS(ctx context.Context, fsPath string) (rlk *lock.RLockedFile, er
rlk.Close()
return nil, err
}
logger.SetDeploymentID(id)
globalDeploymentID = id
return rlk, nil
}
}
@@ -333,29 +333,43 @@ func formatFSFixDeploymentID(fsFormatPath string) error {
return nil
}
for {
wlk, err := lock.TryLockedOpenFile(fsFormatPath, os.O_RDWR, 0)
formatStartTime := time.Now().Round(time.Second)
getElapsedTime := func() string {
return time.Now().Round(time.Second).Sub(formatStartTime).String()
}
doneCh := make(chan struct{})
defer close(doneCh)
var wlk *lock.LockedFile
for range newRetryTimerSimple(doneCh) {
wlk, err = lock.TryLockedOpenFile(fsFormatPath, os.O_RDWR, 0)
if err == lock.ErrAlreadyLocked {
// Lock already present, sleep and attempt again.
time.Sleep(100 * time.Millisecond)
// Lock already present, sleep and attempt again
logger.Info("Another minio process(es) might be holding a lock to the file %s. Please kill that minio process(es) (elapsed %s)\n", fsFormatPath, getElapsedTime())
continue
}
if err != nil {
return err
}
defer wlk.Close()
err = jsonLoad(wlk, format)
if err != nil {
return err
break
}
// Check if it needs to be updated
if err = jsonLoad(wlk, format); err != nil {
break
}
// Check if format needs to be updated
if format.ID != "" {
return nil
err = nil
break
}
format.ID = mustGetUUID()
return jsonSave(wlk, format)
}
format.ID = mustGetUUID()
if err = jsonSave(wlk, format); err != nil {
break
}
}
if wlk != nil {
wlk.Close()
}
return err
}

View File

@@ -379,6 +379,23 @@ func saveFormatXL(disk StorageAPI, format interface{}) error {
return disk.RenameFile(minioMetaBucket, formatConfigFileTmp, minioMetaBucket, formatConfigFile)
}
var ignoredHiddenDirectories = []string{
minioMetaBucket,
".snapshot",
"lost+found",
"$RECYCLE.BIN",
"System Volume Information",
}
func isIgnoreHiddenDirectories(dir string) bool {
for _, ignDir := range ignoredHiddenDirectories {
if dir == ignDir {
return true
}
}
return false
}
// loadFormatXL - loads format.json from disk.
func loadFormatXL(disk StorageAPI) (format *formatXLV3, err error) {
buf, err := disk.ReadAll(minioMetaBucket, formatConfigFile)
@@ -391,9 +408,7 @@ func loadFormatXL(disk StorageAPI) (format *formatXLV3, err error) {
if err != nil {
return nil, err
}
if len(vols) > 1 || (len(vols) == 1 &&
vols[0].Name != minioMetaBucket &&
vols[0].Name != "lost+found") {
if len(vols) > 1 || (len(vols) == 1 && !isIgnoreHiddenDirectories(vols[0].Name)) {
// 'format.json' not found, but we
// found user data.
return nil, errCorruptedFormat
@@ -881,6 +896,7 @@ func newHealFormatSets(refFormat *formatXLV3, setCount, disksPerSet int, formats
if errs[i*disksPerSet+j] == errUnformattedDisk || errs[i*disksPerSet+j] == nil {
newFormats[i][j] = &formatXLV3{}
newFormats[i][j].Version = refFormat.Version
newFormats[i][j].ID = refFormat.ID
newFormats[i][j].Format = refFormat.Format
newFormats[i][j].XL.Version = refFormat.XL.Version
newFormats[i][j].XL.DistributionAlgo = refFormat.XL.DistributionAlgo

View File

@@ -522,7 +522,7 @@ func TestGetXLID(t *testing.T) {
}
formats[2].ID = "bad-id"
if id, err = formatXLGetDeploymentID(quorumFormat, formats); err != errCorruptedFormat {
if _, err = formatXLGetDeploymentID(quorumFormat, formats); err != errCorruptedFormat {
t.Fatal("Unexpected Success")
}
}
@@ -556,4 +556,13 @@ func TestNewFormatSets(t *testing.T) {
if newFormats == nil {
t.Fatal("Unexpected failure")
}
// Check if deployment IDs are preserved.
for i := range newFormats {
for j := range newFormats[i] {
if newFormats[i][j].ID != quorumFormat.ID {
t.Fatal("Deployment id in the new format is lost")
}
}
}
}

View File

@@ -294,7 +294,7 @@ func fsOpenFile(ctx context.Context, readPath string, offset int64) (io.ReadClos
// Seek to the requested offset.
if offset > 0 {
_, err = fr.Seek(offset, os.SEEK_SET)
_, err = fr.Seek(offset, io.SeekStart)
if err != nil {
logger.LogIf(ctx, err)
return nil, 0, err

View File

@@ -22,9 +22,10 @@ import (
"encoding/json"
"io"
"io/ioutil"
"net/http"
"os"
pathutil "path"
"strings"
"time"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/lock"
@@ -114,7 +115,7 @@ type fsMetaV1 struct {
// Metadata map for current object.
Meta map[string]string `json:"meta,omitempty"`
// parts info for current object - used in encryption.
Parts []objectPartInfo `json:"parts,omitempty"`
Parts []ObjectPartInfo `json:"parts,omitempty"`
}
// IsValid - tells if the format is sane by validating the version
@@ -137,11 +138,7 @@ func (m fsMetaV1) ToObjectInfo(bucket, object string, fi os.FileInfo) ObjectInfo
// Guess content-type from the extension if possible.
if m.Meta["content-type"] == "" {
if objectExt := pathutil.Ext(object); objectExt != "" {
if content, ok := mimedb.DB[strings.ToLower(strings.TrimPrefix(objectExt, "."))]; ok {
m.Meta["content-type"] = content.ContentType
}
}
m.Meta["content-type"] = mimedb.TypeByExtension(pathutil.Ext(object))
}
if hasSuffix(object, slashSeparator) {
@@ -174,7 +171,15 @@ func (m fsMetaV1) ToObjectInfo(bucket, object string, fi os.FileInfo) ObjectInfo
} else {
objInfo.StorageClass = globalMinioDefaultStorageClass
}
var (
t time.Time
e error
)
if exp, ok := m.Meta["expires"]; ok {
if t, e = time.Parse(http.TimeFormat, exp); e == nil {
objInfo.Expires = t.UTC()
}
}
// etag/md5Sum has already been extracted. We need to
// remove to avoid it from appearing as part of
// response headers. e.g, X-Minio-* or X-Amz-*.
@@ -212,9 +217,9 @@ func parseFSMetaMap(fsMetaBuf []byte) map[string]string {
return metaMap
}
func parseFSPartsArray(fsMetaBuf []byte) []objectPartInfo {
func parseFSPartsArray(fsMetaBuf []byte) []ObjectPartInfo {
// Get xlMetaV1.Parts array
var partsArray []objectPartInfo
var partsArray []ObjectPartInfo
partsArrayResult := gjson.GetBytes(fsMetaBuf, "parts")
partsArrayResult.ForEach(func(key, part gjson.Result) bool {
@@ -223,11 +228,13 @@ func parseFSPartsArray(fsMetaBuf []byte) []objectPartInfo {
name := gjson.Get(partJSON, "name").String()
etag := gjson.Get(partJSON, "etag").String()
size := gjson.Get(partJSON, "size").Int()
partsArray = append(partsArray, objectPartInfo{
Number: int(number),
Name: name,
ETag: etag,
Size: size,
actualSize := gjson.Get(partJSON, "actualSize").Int()
partsArray = append(partsArray, ObjectPartInfo{
Number: int(number),
Name: name,
ETag: etag,
Size: size,
ActualSize: int64(actualSize),
})
return true
})

View File

@@ -38,6 +38,9 @@ func TestFSV1MetadataObjInfo(t *testing.T) {
if objInfo.IsDir {
t.Fatal("Unexpected object info value for IsDir", objInfo.IsDir)
}
if !objInfo.Expires.IsZero() {
t.Fatal("Unexpected object info value for Expires ", objInfo.Expires)
}
}
// TestReadFSMetadata - readFSMetadata testing with a healthy and faulty disk
@@ -54,7 +57,7 @@ func TestReadFSMetadata(t *testing.T) {
if err := obj.MakeBucketWithLocation(context.Background(), bucketName, ""); err != nil {
t.Fatal("Unexpected err: ", err)
}
if _, err := obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, ObjectOptions{}); err != nil {
if _, err := obj.PutObject(context.Background(), bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{}); err != nil {
t.Fatal("Unexpected err: ", err)
}
@@ -89,7 +92,7 @@ func TestWriteFSMetadata(t *testing.T) {
if err := obj.MakeBucketWithLocation(context.Background(), bucketName, ""); err != nil {
t.Fatal("Unexpected err: ", err)
}
if _, err := obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, ObjectOptions{}); err != nil {
if _, err := obj.PutObject(context.Background(), bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{}); err != nil {
t.Fatal("Unexpected err: ", err)
}

Some files were not shown because too many files have changed in this diff Show More