Compare commits

...

539 Commits

Author SHA1 Message Date
Harshavardhana
817269475f Make sure to drain body upon an error (#7197)
Also cleanup redundant code and use it at a common place
2019-02-06 12:07:03 -08:00
Krishna Srinivas
2d168b532b Allow format.json healing on dev/test setup (single node XL, all root disks) (#7170) 2019-02-06 11:44:19 -08:00
Aditya Manthramurthy
fd4e15c116 Flush the records staging buffer periodically (#7193)
- Staging buffer is flushed every 500ms. In cases where the result
  records are slowly generated (e.g. when a where condition
  matches very few records), this change causes the server to send
  results even though the staging buffer is not full.

- Refactor messageWriter code to use simpler channel based
  co-ordination instead of atomic variables.
2019-02-06 16:03:05 +05:30
Krishna Srinivas
3dfbe0f68c Send white spaces to client till completeMultipart() process completes (#7198) 2019-02-05 20:58:09 -08:00
Harshavardhana
30135eed86 Redo how to handle stale dangling files (#7171)
foo.CORRUPTED should never be created because when
multiple sets are involved we would hash the file
to wrong a location, this PR removes the code.

But allows DeleteBucket() to work properly to delete
dangling buckets/objects. Also adds another option
to Healing where a user needs to specify `--remove`
such that all dangling objects will be deleted with
user confirmation.
2019-02-05 17:58:48 -08:00
Harshavardhana
e4081aee62 Added support for reading body in STS API (#7188)
STS API supports both URL query params and reading
from a body.
2019-02-05 15:47:11 -08:00
kannappanr
df418a2783 Create Cors handler with permissive configuration (#7186)
Create new Cors handler allowing all origins with all standard
methods with any header and credentials.

Fixes #7181
2019-02-05 14:06:52 -08:00
kannappanr
9a65f6dc97 Remove duplicate code in object-handlers.go (#7176)
removed duplicate code in CompleteMultipartUploadHandler
and CopyObjectPartHandler.
2019-02-05 13:36:38 -08:00
Aditya Manthramurthy
f04f8bbc78 Add support for Timestamp data type in SQL Select (#7185)
This change adds support for casting strings to Timestamp via CAST:
`CAST('2010T' AS TIMESTAMP)`

It also implements the following date-time functions:
  - UTCNOW()
  - DATE_ADD()
  - DATE_DIFF()
  - EXTRACT()

For values passed to these functions, date-types are automatically
inferred.
2019-02-04 20:54:45 -08:00
Harshavardhana
ea6d61ab1f Use loadCachedConfigs appropriately to load ENVs (#7187) 2019-02-04 10:31:11 +05:30
Krishna Srinivas
6f08edfb36 Use O_EXCL when creating file as we never overwrite an existing file (#7189) 2019-02-01 19:01:06 -08:00
Sidhartha Mani
e9fdea05c6 Enable CI control from repository: Add Dockerfile.simpleci (#7122) 2019-02-01 12:04:28 -08:00
Harshavardhana
e005910051 Add more information in our select docs (#7177) 2019-02-01 11:34:56 -08:00
Anis Elleuch
de2c106386 xl: ListObjectParts uses the latest valid xl meta (#7184)
ListObjectParts is using xl.readXLMetaParts which picks the first
xl meta found in any disk, which is an inconsistent information.

E.g.: In a middle of a multipart upload, one node can go offline
and get back later with an outdated multipart information.
2019-02-01 08:58:41 -08:00
Harshavardhana
32a6dd1dd6 Remove sporadic tests which fail on windows (#7178) 2019-01-31 16:48:47 -08:00
Harshavardhana
432aec73d9 Return proper errors for invalid bodies (#7179) 2019-01-31 07:19:09 -08:00
Anis Elleuch
36dae04671 CopyObjectPart: remove duplicated etag decryption (#7174) 2019-01-30 19:33:31 -08:00
Minio Trusted
9dc9f03c02 Update yaml files to latest version RELEASE.2019-01-31T00-31-19Z 2019-01-31 00:37:43 +00:00
Krishna Srinivas
b18c0478e7 Only heal on disks where we are sure that healing is needed (#7148) 2019-01-30 10:53:57 -08:00
Anis Elleuch
2d9860e875 heal: Fix healing empty directories (#7154)
This commit fixes the computation of Before/After healing state
for empty directories.

Issues before the commit:
- Before state doesn't reflect the real status (no StatVol() called)
- For any MakeVol() error, healObjectDir is exited directly, which is
  wrong.
2019-01-30 10:51:56 -08:00
kannappanr
d3553f8dfc Bucket Heal: Do not add empty endpoint entry (#7172)
Currently during a heal of a bucket, if one disk is offline an empty endpoint entry is added.
Then another entry with the missing endpoint is also added.

This results in more entries than disks being added.

Code that adds empty endpoint has been removed.
2019-01-30 10:40:43 -08:00
Harshavardhana
e1ae90c12b Make sure to pass the right username for correct ConditionValues (#7169)
Without passing proper username value would result in AccessDenied
errors when policies with `{aws:username}` substitutions are used.

Fixes #7165
2019-01-30 14:21:09 +05:30
Sidhartha Mani
34e7259f95 Add Historic CPU and memory stats (#7136)
Collect historic cpu and mem stats.  Also, use actual values 
instead of formatted strings while returning to the client. The string 
formatting prevents values from being processed by the server or 
by the client without parsing it. 

This change will allow the values to be processed (eg. 
compute rolling-average over the lifetime of the minio server)
and offloads the formatting to the client.
2019-01-30 12:47:32 +05:30
poornas
d0015b4d66 update kms docs example to set a longer period for token renewal (#7149) 2019-01-29 08:04:07 -08:00
poornas
3467460456 Fix vault client to autorenew or reauthenticate (#7161)
Switch to Vault API's Renewer for token renewal.If
token can no longer be renewed, reauthenticate to
get a fresh token.
2019-01-29 16:57:23 +05:30
Harshavardhana
64b5701971 Support AWS envs creds for non-aws endpoints in S3 gateway (#7156)
We made a change previously in #7111 which moved support
for AWS envs only for AWS S3 endpoint. Some users requested
that this be added back to Non-AWS endpoints as well as
they require separate credentials for backend authentication
from security point of view.
2019-01-29 16:05:20 +05:30
Praveen raj Mani
fad59da29d clientID removed in the MQTT config (#7157)
More than one client can't use the same clientID for MQTT connection. 
This causes problem in distributed deployments where config is shared 
across nodes, as each Minio instance tries to connect to MQTT using the
same clientID.

This commit removes the clientID field in config, and allows
MQTT client to create random clientID for each node.
2019-01-29 15:00:15 +05:30
Aditya Manthramurthy
91c839ad28 Use a buffer to collect SQL Select result rows (#7158)
Batching records into a single SQL Select message in the response
leads to significant speed up as the message header overhead is made
negligible.

This change leads to a speed up of 3-5x for queries that select many
small records.
2019-01-28 20:00:18 -08:00
Aditya Manthramurthy
2786055df4 Add new SQL parser to support S3 Select syntax (#7102)
- New parser written from scratch, allows easier and complete parsing
  of the full S3 Select SQL syntax. Parser definition is directly
  provided by the AST defined for the SQL grammar.

- Bring support to parse and interpret SQL involving JSON path
  expressions; evaluation of JSON path expressions will be
  subsequently added.

- Bring automatic type inference and conversion for untyped
  values (e.g. CSV data).
2019-01-28 17:59:48 -08:00
Harshavardhana
0a28c28a8c Avoid code which looks at local files when etcd is configured (#7144)
This situation happens only in gateway nas which supports
etcd based `config.json` to support all FS mode features.

The issue was we would try to migrate something which doesn't
exist when etcd is configured which leads to inconsistent
server configs in memory.

This PR fixes this situation by properly loading config after
initialization, avoiding backend disk config migration to be
done only if etcd is not configured.
2019-01-28 13:31:35 -08:00
Harshavardhana
526546d588 Remove '.minio.sys/tmp' files in background (#7124)
If it does happen that we have a lot files in '.minio.sys/tmp',
minio startup might block deleting this folder. Rename and
delete in background instead to allow Minio to start serving
requests.
2019-01-25 13:33:28 -08:00
Aditya Manthramurthy
2053b3414f Reduce heal parallelism (#7155)
To avoid a large number of concurrent connections between minio
servers and to reduce CPU pressure, it is better to limit the number
of objects healed in parallel to number_of_CPUs.
2019-01-25 13:11:17 -08:00
kannappanr
ce870466ff Top Locks command implementation (#7052)
API to list locks used in distributed XL mode
2019-01-24 07:22:14 -08:00
Harshavardhana
964e354d06 Fix liveness check for NAS gateway (#7142)
Current master throws '503' unavailable for liveness check
```
~ curl -v http://localhost:9000/minio/health/live
> GET /minio/health/live HTTP/1.1
...
...
< HTTP/1.1 503 Service Unavailable
```

With this fix liveness check returns error appropriately
```
~ curl -v http://localhost:9000/minio/health/live
> GET /minio/health/live HTTP/1.1
...
...
< HTTP/1.1 200 OK
```
2019-01-24 19:14:05 +05:30
kannappanr
8ee8ad777c logger: do not interpret encoded url as format string (#7110)
Error logger currently interprets encoded url in the error as a format string.
2019-01-24 00:30:00 -08:00
Krishna Srinivas
82af0be1aa Healing process should not heal root disk (#7089) 2019-01-23 15:29:29 -08:00
Minio Trusted
e8c18bc145 Update yaml files to latest version RELEASE.2019-01-23T23-18-58Z 2019-01-23 23:25:02 +00:00
Harshavardhana
bd25f31100 Use IAM creds only if endpoint is S3 (#7111)
Requirements like being able to run minio gateway in ec2
pointing to a Minio deployment wouldn't work properly
because IAM creds take precendence on ec2.

Add checks such that we only enable AWS specific features
if our backend URL points to actual AWS S3 not S3 compatible
endpoints.
2019-01-23 11:12:33 -08:00
Harshavardhana
ee7dcc2903 Handle errs returned with etcd properly for config init and migration (#7134)
Returning unexpected errors can cause problems for config handling,
which is what led gateway deployments with etcd to misbehave and
had stopped working properly
2019-01-23 11:10:59 -08:00
Harshavardhana
55ef51a99d Vendorize all recent changes to minio-go (#7135)
- Default support for S3 dualstack endpoints (IPv6 support)
- Support granular policy conditionals in List operations
- Support proxy cookies for stickiness
2019-01-23 19:22:09 +05:30
Anis Elleuch
dc2348daa5 heal: Preserve deployment ID from reference format.json (#7126)
Deployment ID is not copied into new formats after healing format. Although,
this is not critical since a new deployment ID will be generated and set in the
next cluster restart, it is still much better if we don't change the deployment
id of a cluster for a better tracking.
2019-01-22 18:32:06 -08:00
Aditya Manthramurthy
042d7f25e4 Fix regexp matcher of browser assets and paths (#7083)
Fix regexp matcher for special assets for the browser to clash with
less of the object namespace.

Assets should now be loaded with the /minio/ prefix. Previously,
favicon.ico (and others) could be loaded at any path matching
/minio/*/favicon.ico. This clashes with a large part of the object
namespace. With this change, /minio/favicon.ico will serve the favicon
but not /minio/mybucket/favicon.ico

Fixes #7077
2019-01-22 10:58:28 -08:00
Andreas Auernhammer
8c1b649b2d load system CAs before trying to load custom CAs (#7133)
This changes causes `getRootCAs` to always load system-wide CAs.
Any additional custom CAs (at `certs/CA/`) are added to the certificate pool
of system CAs.

The previous behavior was incorrect since all no system-wide CAs were
loaded if either there were CAs under `certs/CA` or the `certs/CA`
directory didn't exist at all.
2019-01-22 09:18:06 -08:00
Kumar Sukhani
f03ccec912 Fix slack Link (#7131) 2019-01-22 19:53:50 +05:30
Nitish Tiwari
0bb65f84bb Add example for IPv6 for address flag (#7127) 2019-01-22 15:55:27 +05:30
Harshavardhana
8e0910ab3e Fix build issues on BSDs in pkg/cpu (#7116)
Also add a cross compile script to test always cross
compilation for some well known platforms and architectures
, we support out of box compilation of these platforms even
if we don't make an official release build.

This script is to avoid regressions in this area when we
add platform dependent code.
2019-01-22 09:27:23 +05:30
Harshavardhana
5353edcc38 Support policy variable replacement (#7085)
This PR supports iam and bucket policies to have
policy variable replacements in resource and
condition key values.

For example
- ${aws:username}
- ${aws:userid}
2019-01-21 10:27:14 +05:30
Harshavardhana
3265112d04 Remove gateway implementations for manta, sia and b2 (#7115) 2019-01-20 08:10:58 -08:00
Harshavardhana
4fdacb8b14 Add policy conditions support for Listing operations on browser (#7106)
Fixes https://github.com/minio/minio/issues/7095
2019-01-20 12:50:01 +05:30
Krishna Srinivas
267f183fc8 Do not do StorageInfo() and ListBuckets() for FS/Erasure in health check handler (#7090)
Health checking programs very frequently use /minio/health/live 
to check health, hence we can avoid doing StorageInfo() and 
ListBuckets() for FS/Erasure backend.
2019-01-20 10:28:36 +05:30
Harshavardhana
3d22a9d84f Support rootCAs for notification targets (#7108)
Add support for RootCAs for notification targets
mqtt and webhook
2019-01-20 09:57:18 +05:30
Krishna Srinivas
51ec61ee94 Fix healing whole file bitrot (#7123)
* Use 0-byte file for bitrot verification of whole-file-bitrot files

Also pass the right checksum information for bitrot verification

* Copy xlMeta info from latest meta except []checksums and []Parts while healing
2019-01-20 07:58:40 +05:30
Harshavardhana
74c2048ea9 Add proper contexts with timeouts for etcd operations (#7097)
This fixes an issue of perceived hang when incorrect
unreachable URLs are specified in MINIO_ETCD_ENDPOINTS
variable.

Fixes #7096
2019-01-18 09:36:45 -08:00
Krishna Srinivas
730ac5381c Simplify parallelReader.Read() (#7109)
Simplify parallelReader.Read() which also fixes previous 
implementation where it was returning before all the parallel 
reading go-routines had terminated which caused race conditions.
2019-01-18 21:18:24 +05:30
Alex Simenduev
6dd8a83c5a change credential chain order in s3 gateway to mimic official docs (#7091) 2019-01-17 10:31:51 -08:00
Harshavardhana
1a7e6d4768 Handle multiple conditions in policies (#7079)
Fixes #7078
2019-01-17 10:28:24 -08:00
Krishna Srinivas
98c950aacd Streaming bitrot verification support (#7004) 2019-01-17 18:28:18 +05:30
Minio Trusted
94c52e3816 Update yaml files to latest version RELEASE.2019-01-16T21-44-08Z 2019-01-16 21:51:40 +00:00
Harshavardhana
8766c5eb22 Add version as part of Server: header (#7100)
This was agreed after discussing with @abperiasamy, we
borrowed the idea from Apache's own documentation.
2019-01-16 13:38:41 -08:00
kannappanr
e0d22359e7 Fix lint warnings (#7099) 2019-01-16 12:49:20 -08:00
Harshavardhana
6dd13e68c2 Support V2 signatures when autoencryption is enabled (#7084)
When auto-encryption is turned on, we pro-actively add SSEHeader
for all PUT, POST operations. This is unusual for V2 signature
calculation because V2 signature doesn't have a pre-defined set
of signed headers in the request like V4 signature. According to
V2 we should canonicalize all incoming supported HTTP headers.

Make sure to validate signatures before we mutate http headers
2019-01-16 12:12:06 -08:00
Harshavardhana
633001c8ba Inherit certsDir from configDir if latter is set (#7098)
This is to ensure backward compatibility for all existing
deployments which use custom config dir to point to their
certs directory.
2019-01-16 12:04:32 -08:00
Bala FA
e23a42305c Rebase minio/parquet-go and fix null handling. (#7067) 2019-01-16 21:52:04 +05:30
Krishna Srinivas
63d2583e91 Avoid holding write lock on config in situations where it is not needed (#7082)
This is to allow the cluster to come up when N/2 number of disks is available.
2019-01-16 13:59:21 +05:30
Harshavardhana
a2f66abbe8 Update STS API docs with Version query param (#7071) 2019-01-16 09:38:32 +05:30
Andreas Auernhammer
b28661b673 doc: add security documentation to provide some background info (#7028)
This commit adds some documentation about the design of the
SSE-C and SSE-S3 implementation. It describes how the Minio server
encrypt objects and manages keys.
2019-01-15 14:27:57 -08:00
Harshavardhana
e8791ae274 Remove Minio server arch, version from Server: header (#7074) 2019-01-15 13:16:11 +05:30
Scott Dunlop
309975d477 Add missing time import to counter_darwin.go (#7081) 2019-01-14 17:21:27 -08:00
Praveen raj Mani
6571641735 Persist offline mqtt events in the queueDir and replay (#7037) 2019-01-14 12:39:00 +05:30
Harshavardhana
8757c963ba Migrate all Peer communication to common Notification subsystem (#7031)
Deprecate the use of Admin Peers concept and migrate all peer
communication to Notification subsystem. This finally allows
for a common subsystem for all peer notification in case of
distributed server deployments.
2019-01-14 12:14:20 +05:30
Praveen raj Mani
9a71f2fdfa link to ppc64le binary added (#7065)
Fixes #7063
2019-01-11 20:16:19 +05:30
Nick Craig-Wood
9c26fe47b0 Fix server side copy of files with ? in - fixes #7058 (#7059)
Before this change the CopyObjectHandler and the CopyObjectPartHandler
both looked for a `versionId` parameter on the `X-Amz-Copy-Source` URL
for the version of the object to be copied on the URL unescaped version
of the header.  This meant that files that had question marks in were
truncated after the question mark so that files with `?` in their
names could not be server side copied.

After this change the URL unescaping is done during the parsing of the
`versionId` parameter which fixes the problem.

This change also introduces the same logic for the
`X-Amz-Copy-Source-Version-Id` header field which was previously
ignored, namely returning an error if it is present and not `null`
since minio does not currently support versions.

S3 Docs:
- https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html
- https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html
2019-01-10 13:10:10 -08:00
Sidhartha Mani
f3f47d8cd3 Add ServerCPULoadInfo() and ServerMemUsageInfo() admin API (#7038) 2019-01-09 19:04:19 -08:00
Minio Trusted
de1d39e436 Update yaml files to latest version RELEASE.2019-01-10T00-21-20Z 2019-01-10 00:28:50 +00:00
poornas
ed1275a063 Fix copy from encrypted multipart to single encrypted part (#7056)
When source is encrypted multipart object and the parts are not
evenly divisible by DARE package block size, target encrypted size
will not necessarily be the same as encrypted source object.
2019-01-09 15:17:21 -08:00
kannappanr
a7d407fa42 Display message on failure to get lock on format.json in fs mode on startup (#6538)
Retry to see if the lock is free. Retry time will increase binomially.
2019-01-09 10:13:04 -08:00
Anis Elleuch
4e6e05f8e0 virtual host: Fix making new buckets (#7054)
This commit removes old code preventing PUT requests with '/' as a path,
because this is not needed anymore after the introduction of the virtual
host style in Minio server code.

'PUT /' when global domain is not configured already returns 405 Method
Not Allowed http error.
2019-01-09 11:59:41 +05:30
Bala FA
b0deea27df Refactor s3select to support parquet. (#7023)
Also handle pretty formatted JSON documents.
2019-01-08 16:53:04 -08:00
Kaan Kabalak
e98d89274f Upgrade to Webpack 4 (#7045) 2019-01-08 11:04:59 -08:00
kannappanr
c59206bcd3 GCS ListMultipartUploads: Don't return on first uploadid (#7014)
ListMultipartUploads code returns only the first uploadid.

Fixes #7011
2019-01-08 11:03:28 -08:00
Harshavardhana
7f2d439baa Avoid printing in S3 tests (#7043) 2019-01-07 22:32:30 +05:30
poornas
5a80cbec2a Add double encryption at S3 gateway. (#6423)
This PR adds pass-through, single encryption at gateway and double
encryption support (gateway encryption with pass through of SSE
headers to backend).

If KMS is set up (either with Vault as KMS or using
MINIO_SSE_MASTER_KEY),gateway will automatically perform
single encryption. If MINIO_GATEWAY_SSE is set up in addition to
Vault KMS, double encryption is performed.When neither KMS nor
MINIO_GATEWAY_SSE is set, do a pass through to backend.

When double encryption is specified, MINIO_GATEWAY_SSE can be set to
"C" for SSE-C encryption at gateway and backend, "S3" for SSE-S3
encryption at gateway/backend or both to support more than one option.

Fixes #6323, #6696
2019-01-05 14:16:42 -08:00
Harshavardhana
2d19011a1d Add support for AssumeRoleWithWebIdentity (#6985) 2019-01-04 13:48:12 -08:00
Harshavardhana
e82dcd195c Deprecate config-dir bring in certs-dir for TLS configuration (#7033)
This PR is to provide indication that config-dir will be removed
in future and all users should migrate to new --certs-dir option

Fixes #7016
Fixes #7032
2019-01-02 10:05:16 -08:00
Nitish Tiwari
fcb56d864c Add ServerDrivesPerfInfo() admin API (#6969)
This is part of implementation for mc admin health command. The
ServerDrivesPerfInfo() admin API returns read and write speed
information for all the drives (local and remote) in a given Minio
server deployment.

Part of minio/mc#2606
2018-12-31 09:46:44 -08:00
Krishnan Parthasarathi
75cd4201b0 Update go-sql-driver/mysql package (#7019) 2018-12-29 21:59:03 +05:30
Harshavardhana
f24c017e9a Move docker edge to latest Go as well (#7030) 2018-12-28 17:24:24 -08:00
Harshavardhana
b5280ba243 Migrate to Go version 1.11.4 (#7026) 2018-12-28 14:04:39 -08:00
Harshavardhana
2a0e4b6f58 Add boolean function condition support (#7027) 2018-12-28 12:18:58 -08:00
Minio Trusted
1898961ce3 Update yaml files to latest version RELEASE.2018-12-27T18-33-08Z 2018-12-27 18:41:05 +00:00
Krishnan Parthasarathi
236796ebd6 Add etcd as prerequisite for multi-user in gateway (#7022) 2018-12-27 07:22:18 +05:30
Harshavardhana
4e4f855b30 Add support for new policy conditions (#7024)
This PR implements following condition types

- StringEqualsIgnoreCase and StringNotEqualsIgnoreCase
- BinaryEquals
2018-12-26 17:39:30 -08:00
Harshavardhana
2db22deb93 Fix policy bugs Null conditions and canonical names (#7021)
This PR fixes two different issues

- Null condition implementation
- HTTP Canonical request value names

This PR fixes handling of null conditions and
handle HTTP canonical names in request values.

This PR was tested with policies mentioned in the following blog
https://aws.amazon.com/blogs/security/how-to-prevent-uploads-of-unencrypted-objects-to-amazon-s3/

Fixes #6955
2018-12-26 02:03:28 -08:00
Harshavardhana
fb8d0d7cf7 Add support for hostname lookups instead of IPs in MINIO_PUBLIC_IPS (#7018)
DNS names will be resolved to their respective IPs if specified
in MINIO_PUBLIC_IPS.

Fixes #6862
2018-12-23 03:08:21 -08:00
Harshavardhana
a536cf5dc0 Buffconn should buffer upto maxHeaderBytes to avoid ErrBufferFull (#7017)
It can happen with erroneous clients which do not send `Host:`
header until 4k worth of header bytes have been read. This can lead
to Peek() method of bufio to fail with ErrBufferFull.

To avoid this we should make sure that Peek buffer is as large as
our maxHeaderBytes count.
2018-12-23 12:03:04 +05:30
Harshavardhana
b9b68e9331 Add multi-stage build of docker edge image (#7005)
This is to reduce the overall size of the image,
we only retain the binary that was built in previous stage.
2018-12-22 06:36:48 +05:30
Anis Elleuch
632022971b s3: Don't set NextMarker when listing is not truncated (#7012)
Setting NextMarker when IsTruncated is not set seems to be confusing
AWS C++ SDK, this commit will avoid setting any string in NextMarker.
2018-12-20 13:30:25 -08:00
Harshavardhana
def04f01cf Update reedsolomon/highwayhash to start using ppc64le support (#7003)
Thanks to @fwessels for the upstream work on reedsolomon and
highwayhash which has resulted in 10x performance improvement
on ppc64 architecture.
2018-12-20 23:17:05 +05:30
Minio Trusted
bc67410548 Update yaml files to latest version RELEASE.2018-12-19T23-46-24Z 2018-12-19 23:54:02 +00:00
kannappanr
7881791a91 CopyObject:Set Content-Type to application/octet-stream if it is not set (#6958) 2018-12-19 14:31:45 -08:00
Harshavardhana
d2f8f8c7ee Fix ETag handling with auto-encryption with CopyObject conditions (#7000)
minio-java tests were failing under multiple places when
auto encryption was turned on, handle all the cases properly

This PR fixes

 - CopyObject should decrypt ETag before it does if-match
 - CopyObject should not try to preserve metadata of source
   when rotating keys, unless explicitly asked by the user.
 - We should not try to decrypt Compressed object etag, the
   potential case was if user sets encryption headers along
   with compression enabled.
2018-12-19 14:12:53 -08:00
kannappanr
8c32311b80 Change lock name to include names instead of hash. (#6886)
Previously lockname included the hash of the bucket, object
and uploadid.

This is a part of fix required to list oldest locks on the server.
2018-12-19 13:57:51 -08:00
Ashish Kumar Sinha
9bb88e610e Deletion of subfolders of multipart (#6961)
Delete subfolders under multipart folder upon completion of CompleteMultipartUpload, AbortMultipartUpload and cleanupStaleMultipartUploads functions
2018-12-19 11:27:10 -08:00
Harshavardhana
d1e41695fe Add support for federation on browser (#6891) 2018-12-19 18:43:47 +05:30
Aditya Manthramurthy
2aeb3fbe86 Fix csv output delimiter bug (#6994) 2018-12-19 11:49:06 +05:30
Anis Elleuch
99b843a64e Add anonymous flag to prevent logging sensitive information (#6899) 2018-12-18 16:08:11 -08:00
Harshavardhana
4f31a9a33b Reload users upon AddUser on peers (#6975)
Also migrate ReloadFormat to notification subsystem,
remove GetConfig() we do not use this API anymore
2018-12-18 14:39:21 -08:00
Nitish Tiwari
65ddff8899 Fix NAS Gateway Docker command example (#6967)
Fixes #6965
2018-12-18 14:37:17 -08:00
Harshavardhana
e7c902bbbc Return proper errors when admin API is not initialized (#6988)
Especially in gateway IAM admin APIs are not enabled
if etcd is not enabled, we should enable admin API though
but only enable IAM and Config APIs with etcd configured.
2018-12-18 13:03:26 -08:00
Andreas Auernhammer
5a5895203b add howto generate a master key and add master key disclaimer (#6992)
This commit adds a section to the master key documentation
describing how to generate a random 256 bit master key.

Further this commit adds a warning that master keys are not
recommended for production systems because it's (currently)
not possible to replace a master key (e.g. in case of compromise).
2018-12-18 13:00:32 -08:00
poornas
7da0336ac8 Make sure env are loaded before gateway layer initialization (#6989) 2018-12-18 10:42:09 -08:00
Harshavardhana
3be616de3f Send deployment ID in notification event response elements (#6991) 2018-12-18 10:05:26 -08:00
Harshavardhana
c5bf22fd90 Turn off printing IPv6 endpoints when listening on all interfaces (#6986)
By default when we listen on all interfaces, we print all the
endpoints that at local to all interfaces including IPv6
addresses. Remove IPv6 addresses in endpoint list to be
printed in endpoints unless explicitly specified with '--address'
2018-12-18 21:56:30 +05:30
poornas
7c9f934875 Disallow SSE requests when object layer has encryption disabled (#6981) 2018-12-14 21:39:59 -08:00
Eco
b6f9b24b30 Small corrections and example for auto-encryption (#6982) 2018-12-14 16:21:41 -08:00
poornas
13cb814a0e update KMS README.md to set approle env (#6978) 2018-12-14 14:03:16 -08:00
Andreas Auernhammer
d264d2c899 add auto-encryption feature (#6523)
This commit adds an auto-encryption feature which allows
the Minio operator to ensure that uploaded objects are
always encrypted.

This change adds the `autoEncryption` configuration option
as part of the KMS conifguration and the ENV. variable
`MINIO_SSE_AUTO_ENCRYPTION:{on,off}`.

It also updates the KMS documentation according to the
changes.

Fixes #6502
2018-12-14 13:35:48 -08:00
Harshavardhana
bebaff269c Support IPv6 in minio command line (#6947)
Fixes #6946
2018-12-14 13:07:46 +05:30
Harshavardhana
52b159b1db Allow versionId to be null for Delete,CopyObjectPart (#6972) 2018-12-14 11:34:37 +05:30
Nitish Tiwari
324834e4da Remove duplicate switch case (#6966)
Fixes #6948
2018-12-13 21:58:48 -08:00
Harshavardhana
c2ed1347d9 Do not list objects unless specified in policy (#6970)
Currently we use GetObject to check if we are allowed to list,
this might be a security problem since there are many users now
who actively disable a publicly readable listing, anyone who
can guess the browser URL can list the objects.

This PR turns off this behavior and provides a more expected way
based on the policies.

This PR also additionally improves the Download() object
implementation to use a more streamlined code.

These are precursor changes to facilitate federation and web
identity support in browser.
2018-12-14 09:45:09 +05:30
Anis Elleuch
50f6f9fe58 S3 api: Ignore encoding in xml body (#6953)
One user reported having discovered the following error:

API: SYSTEM()
Time: 20:06:17 UTC 12/06/2018
Error: xml: encoding "US-ASCII" declared but Decoder.CharsetReader is nil
1: cmd/handler-utils.go:43:cmd.parseLocationConstraint()
2: cmd/auth-handler.go:250:cmd.checkRequestAuthType()
3: cmd/bucket-handlers.go:411:cmd.objectAPIHandlers.PutBucketHandler()
4: cmd/api-router.go100cmd.(objectAPIHandlers).PutBucketHandler-fm()
5: net/http/server.go:1947:http.HandlerFunc.ServeHTTP()

Hence, adding support of different xml encoding. Although there
is no clear specification about it, even setting "GARBAGE" as an xml
encoding won't change the behavior of AWS, hence the encoding seems
to be ignored.

This commit will follow that behavior and will ignore encoding field
and consider all xml as utf8 encoded.
2018-12-13 12:09:50 -08:00
Minio Trusted
48cb0ea34b Update yaml files to latest version RELEASE.2018-12-13T02-04-19Z 2018-12-13 02:09:53 +00:00
Harshavardhana
6f7c99a333 Allow versionId to be null for Copy,Get,Head API calls (#6942)
Fixes #6935
2018-12-12 11:43:44 -08:00
Harshavardhana
3498f5b0ec List exact DNS entries for a requested bucketName (#6936)
Currently we would end up considering common prefix
buckets to be part of the same DNS service record,
which leads to Minio server wrongly forwarding the
records to incorrect IPs.
2018-12-12 10:47:03 -08:00
Andreas Auernhammer
21d8c0fd13 refactor vault configuration and add master-key KMS (#6488)
This refactors the vault configuration by moving the
vault-related environment variables to `environment.go`
(Other ENV should follow in the future to have a central
place for adding / handling ENV instead of magic constants
and handling across different files)

Further this commit adds master-key SSE-S3 support.
The operator can specify a SSE-S3 master key using
`MINIO_SSE_MASTER_KEY` which will be used as master key
to derive and encrypt per-object keys for SSE-S3
requests.

This commit is also a pre-condition for SSE-S3
auto-encyption support.

Fixes #6329
2018-12-12 12:20:29 +05:30
Kale Blankenship
79b9a9ce46 Provide actual size in events instead of compressed size. (#6950)
Previous behavior did not check if the object was compressed and
incorrectly reported the stored size rather than the actual object
size.
2018-12-11 17:30:15 -08:00
Harshavardhana
b9b353db4b Add env to support synchronous ops for all calls (#6877) 2018-12-11 16:22:56 -08:00
poornas
11a9b317a3 Disable ListenBucket notifications for NAS gateway (#6954) 2018-12-11 16:16:09 -08:00
Praveen raj Mani
9af7d627ac Preserve the compression headers while copying (#6952)
Fixes #6951
2018-12-11 12:05:41 -08:00
Harshavardhana
76d9d54603 Filter listing buckets based on user level access (#6940)
Fixes #6701
2018-12-10 22:57:22 +05:30
Harshavardhana
4c7c571875 Support JSON to CSV and CSV to JSON output format conversion (#6910)
This PR implements one of the pending items in issue #6286
in S3 API a user can request CSV output for a JSON document
and a JSON output for a CSV document. This PR refactors
the code a little bit to bring this feature.
2018-12-07 14:55:32 -08:00
James Neiman, President
313ba74b09 Update to Minio GCS Gateway (#6887) 2018-12-06 10:09:37 -08:00
Harshavardhana
3e124315c8 Increase the keep alive timeout to 30 secs (#6924)
Go by default uses a 3 * minute, we should
atleast use 30 secs as 10 secs is too aggressive.
2018-12-06 22:56:16 +05:30
Minio Trusted
78a0fd951e Update yaml files to latest version RELEASE.2018-12-06T01-27-43Z 2018-12-06 01:35:43 +00:00
Anis Elleuch
40852801ea fix: Better check of RPC type requests (#6927)
guessIsRPCReq() considers all POST requests as RPC but doesn't
check if this is an object operation API or not, which is actually
confusing bucket forwarder handler when it receives a new multipart
upload API which is a POST http request.

Due to this bug, users having a federated setup are not able to
upload a multipart object using an endpoint which doesn't actually
contain the specified bucket that will store the object.

Hence this commit will fix the described issue.
2018-12-05 14:28:48 -08:00
poornas
f6980c4630 fix ConfigSys and NotificationSys initialization for NAS (#6920) 2018-12-05 14:03:42 -08:00
Harshavardhana
8fcc787cba Register notFound handler only once per root router (#6926)
registering notFound handler more than once causes
gorilla mux to return error for all registered paths
greater than > 8. This might be a bug in the gorilla/mux
but we shouldn't be using it this way. NotFound handler
should be only registered once per root router.

Fixes #6915
2018-12-05 11:54:12 -08:00
Praveen raj Mani
4e6d3c093f Errors in notification config should not crash the server (#6881)
Fixes #6870
2018-12-04 18:27:12 -08:00
Anis Elleuch
61145361fd Fallback to non-loopback IF addresses for Domain IPs (#6918)
When MINIO_PUBLIC_IPS is not specified and no endpoints are passed
as arguments, fallback to the address of non loop-back interfaces.

This is useful so users can avoid setting MINIO_PUBLIC_IPS in docker
or orchestration scripts, ince users naturally setup an internal
network that connects all instances.
2018-12-04 17:35:22 -08:00
James Neiman, President
950b4ad9af Update to How to secure access to Minio server with TLS (#6845) 2018-12-04 17:30:39 -08:00
Harshavardhana
6add646130 Fix logging of initialization errors in distributed mode (#6914) 2018-12-04 10:25:56 -08:00
Harshavardhana
20e61fb362 Redirect browser requests only if browser is enabled (#6909)
This PR fixes an issue introduced in PR #6848, when
browser is disabled we shouldn't re-direct the requests
returning AccessDenied.

Fixes #6907
2018-12-04 13:08:24 +05:30
Bala FA
18ced1102c handle post policy only if it is set. (#6852)
Previously policy in post form is assumed to be set always.  This is
fixed by doing the check when policy is set.
2018-12-03 12:01:28 -08:00
Harshavardhana
d6af3c1237 Add bucket notification support for NAS gateway (#6908)
Fixes #6885
2018-12-03 14:02:14 +05:30
Andreas Auernhammer
5549a44566 rename vault namespace env variable to be more idiomatic (#6905)
This commit renames the env variable for vault namespaces
such that it begins with `MINIO_SSE_`. This is the prefix
for all Minio SSE related env. variables (like KMS).
2018-12-01 05:28:49 -08:00
Praveen raj Mani
e7af31c2ff Removed clientID from NATS-Streaming Config (#6391)
clientID must be a unique `UUID` for each connections. Now, the
server generates it, rather considering the config.

Removing it as it is non-beneficial right now.

Fixes #6364
2018-11-30 10:46:17 +05:30
Minio Trusted
e7971b1d55 Update yaml files to latest version RELEASE.2018-11-30T03-56-59Z 2018-11-30 04:02:10 +00:00
Harshavardhana
26120d7838 Ignore permission errors on config-dir (#6894) 2018-11-29 18:14:05 -08:00
Harshavardhana
bef7c01c58 Choose right users in federation mode for CopyObject (#6895) 2018-11-29 17:35:11 -08:00
poornas
6a8ccc5925 update README.md (#6893) 2018-11-29 15:50:57 -08:00
kannappanr
d85199e9de Vendorize minio-go (#6883)
Fixes #6873
2018-11-29 11:13:03 -08:00
Harshavardhana
8608a84c23 Ignore hidden directory .snapshot for NetApp volumes (#6889) 2018-11-29 11:39:21 +05:30
Harshavardhana
b7226f4c82 Add transaction lock when migrating configs (#6878)
When migrating configs it happens often that some
servers fail to start due to version mismatch etc.

Hold a transaction lock such that all servers get
serialized.
2018-11-28 17:38:23 -08:00
Pontus Leitzler
f930ffe9e2 Fix gcs context (#6869) 2018-11-28 13:49:51 +05:30
James Neiman, President
b50a245208 Update to Minio Multi-Tenant Deployment Guide (#6871)
Initial edits.
2018-11-27 18:03:07 -08:00
poornas
45bb11e020 Set namespace on vault client if VAULT_NAMESPACE env is set (#6867) 2018-11-27 14:42:32 -08:00
jingsam
b65cf281fd Update azure.md (#6834) 2018-11-27 14:05:27 -08:00
Xie Yanbo
f781548b0c fix typo (#6812) 2018-11-27 14:04:50 -08:00
jingsam
25ee8e74f7 Update README.md (#6832) 2018-11-27 14:04:11 -08:00
jingsam
c975d2cc7e Update README.md (#6833) 2018-11-27 14:03:08 -08:00
jingsam
ea66528739 Update gcs.md (#6835) 2018-11-27 14:02:23 -08:00
Harshavardhana
e1164103d4 Reject if tokens are missing for temp credentials (#6860) 2018-11-27 13:24:04 -08:00
Harshavardhana
83fe70f710 Fix CopyObject regression calculating md5sum (#6868)
CopyObject() failed to calculate proper md5sum
when without encryption headers. This is a regression
fix perhaps introduced in commit 5f6d717b7a

Fixes https://github.com/minio/minio-go/issues/1044
2018-11-27 13:23:32 -08:00
Harshavardhana
12a6523fb2 Do not delete parts in multipart if 0 bytes (#6855)
This can create inconsistencies i.e Parts might have
lesser number of parts than ChecksumInfos. This will
result in object to be not readable.

This PR also allows for deleting previously created
corrupted objects.
2018-11-26 13:20:21 -08:00
Harshavardhana
dba61867e8 Redirect browser requests returning AccessDenied (#6848)
Anonymous requests from S3 resources returning
AccessDenied should be auto redirected to browser
for login.
2018-11-26 12:15:12 -08:00
Anis Elleuch
dd092f6c2b gateway: Properly set globalMinioPort (#6859)
globalMinioPort is used in federation which stores the address
and the port number of the server hosting the specified bucket,
this latter uses globalMinioPort but this latter is not set in
startup of the gateway mode.

This commit fixes the behavior.
2018-11-26 23:19:38 +05:30
Nitish Tiwari
2a810c7da2 Improve du thread performance (#6849) 2018-11-26 10:35:14 +05:30
Nitish Tiwari
dd8c2aa5c6 Cleanup Kubernetes documentation (#6861)
Also add details on why Readiness checks are not recommended for Minio
StatefulSets.
2018-11-25 13:34:20 -08:00
Harshavardhana
9e3fce441e Audit log claims from token (#6847) 2018-11-22 09:33:24 +05:30
Harshavardhana
d4265f9a13 Simplify OPA to use rootCAs custom transport (#6843)
Also close the connections properly to use the
connection pooling properly for HTTP clients.
2018-11-22 08:31:05 +05:30
Minio Trusted
2fc024e880 Update yaml files to latest version RELEASE.2018-11-22T02-51-56Z 2018-11-22 02:57:28 +00:00
Harshavardhana
eddf468aef Lock the targetList properly in go-routines (#6838)
Fixes #6483
2018-11-21 21:25:54 +05:30
Ashish Kumar Sinha
b0d04b9a81 Retry Connection for RabbitMQ (#6837)
Add retries to connect to RabbitMQ 5 times 
with 2s interval

Fixes #6807
2018-11-21 08:37:29 +05:30
Harshavardhana
a9de303d8b Update command line docs (#6839) 2018-11-20 17:35:33 -08:00
Anis Elleuch
69bd6df464 storage: Implement Close() in REST client (#6826)
Calling /minio/prometheuses/metrics calls xlSets.StorageInfo() which creates a new
storage REST client and closes it. However, currently, closing does nothing
to the underlying opened http client.

This commit introduces a closing behavior by calling CloseIdleConnections
provided by http.Transport upon the initialization of this latter.
2018-11-20 11:07:19 -08:00
Harshavardhana
bfb505aa8e Refactor logging in more Go idiomatic style (#6816)
This refactor brings a change which allows
targets to be added in a cleaner way and also
audit is now moved out.

This PR also simplifies logger dependency for auditing
2018-11-19 14:47:03 -08:00
poornas
d732b1ff9d Fix to cache objects on downloads (#6828)
fixes #6817
2018-11-19 11:00:46 -08:00
Igor K
ef517bd0da update for build on DilOS (#6770) 2018-11-19 19:40:02 +05:30
Minio Trusted
32d837cf88 Update yaml files to latest version RELEASE.2018-11-17T01-23-48Z 2018-11-17 01:30:29 +00:00
Anis Elleuch
7b579caf68 heal: Fix heal sequences cleanup process (#6780)
The current code triggers a timeout to cleanup a heal seq from
healSeqMap, but we don't know if the user did or not launch a new
healing sequence with the same path.

Add endTime to healSequence struct and add a periodic heal-sequence
cleaner to remove heal sequences only if this latter is older than
10 minutes.
2018-11-16 14:59:51 -08:00
Harshavardhana
272b8003d6 Honor header only when requested for use (#6815) 2018-11-16 10:27:48 -08:00
Anis Elleuch
1c24c93f73 storage: Upgrade REST version after adding WriteAll API (#6819)
Rolling update doesn't work properly because Storage REST API has
a new API WriteAll() but without API version number increase.

Also be sure to return 404 for unknown http paths.
2018-11-15 12:18:58 -08:00
Harshavardhana
712abc7958 Fix anonymous downloads URL generation (#6800)
Fixes #6778
2018-11-14 18:05:10 -08:00
poornas
5f6d717b7a Fix: Preserve MD5Sum for SSE encrypted objects (#6680)
To conform with AWS S3 Spec on ETag for SSE-S3 encrypted objects,
encrypt client sent MD5Sum and store it on backend as ETag.Extend
this behavior to SSE-C encrypted objects.
2018-11-14 17:36:41 -08:00
Harshavardhana
7e1661f4fa Performance improvements to SELECT API on certain query operations (#6752)
This improves the performance of certain queries dramatically,
such as 'count(*)' etc.

Without this PR
```
~ time mc select --query "select count(*) from S3Object" myminio/sjm-airlines/star2000.csv.gz
2173762

real	0m42.464s
user	0m0.071s
sys	0m0.010s
```

With this PR
```
~ time mc select --query "select count(*) from S3Object" myminio/sjm-airlines/star2000.csv.gz
2173762

real	0m17.603s
user	0m0.093s
sys	0m0.008s
```

Almost a 250% improvement in performance. This PR avoids a lot of type
conversions and instead relies on raw sequences of data and interprets
them lazily.

```
benchcmp old new
benchmark                        old ns/op       new ns/op       delta
BenchmarkSQLAggregate_100K-4     551213          259782          -52.87%
BenchmarkSQLAggregate_1M-4       6981901985      2432413729      -65.16%
BenchmarkSQLAggregate_2M-4       13511978488     4536903552      -66.42%
BenchmarkSQLAggregate_10M-4      68427084908     23266283336     -66.00%

benchmark                        old allocs     new allocs     delta
BenchmarkSQLAggregate_100K-4     2366           485            -79.50%
BenchmarkSQLAggregate_1M-4       47455492       21462860       -54.77%
BenchmarkSQLAggregate_2M-4       95163637       43110771       -54.70%
BenchmarkSQLAggregate_10M-4      476959550      216906510      -54.52%

benchmark                        old bytes       new bytes      delta
BenchmarkSQLAggregate_100K-4     1233079         1086024        -11.93%
BenchmarkSQLAggregate_1M-4       2607984120      557038536      -78.64%
BenchmarkSQLAggregate_2M-4       5254103616      1128149168     -78.53%
BenchmarkSQLAggregate_10M-4      26443524872     5722715992     -78.36%
```
2018-11-14 15:55:10 -08:00
Pontus Leitzler
f9779b24ad Enable default vet flags (#6810)
Enable default vet flags except experimental
2018-11-14 10:23:44 -08:00
Harshavardhana
f1f23f6f11 Add sync mode for 'xl.json' (#6798)
xl.json is the source of truth for all erasure
coded objects, without which we won't be able to
read the objects properly. This PR enables sync
mode for writing `xl.json` such all writes go hit
the disk and are persistent under situations such
as abrupt power failures on servers running Minio.
2018-11-14 19:48:35 +05:30
Harshavardhana
cf26c937e4 Remove UA worm and cache (#6809) 2018-11-14 13:18:55 +05:30
Krishna Srinivas
f19f957668 Remove unused repos from vendor.json (#6808) 2018-11-14 10:00:25 +05:30
Harshavardhana
d6572879a8 Check for STS Action first to allow browser requests (#6796) 2018-11-13 15:53:06 -08:00
Anis Elleuch
b6ab8f50fa azure: Support non standard Azure cloud environments (#6712)
This change will allow users to enter the endpoint of the
storage account if this latter belongs to a different Azure
cloud environment, such as US gov cloud.

e.g:

  `MINIO_ACCESS_KEY=testaccount \
     MINIO_SECRET_KEY=accountsecretkey \
     minio gateway azure https://testaccount.blob.usgovcloudapi.net`
2018-11-13 15:51:49 -08:00
Harshavardhana
c82acc599a Treat empty xl.json as file not found (#6804)
If the buffer is empty we can avoid parsing
it and treat it essentially as `xl.json`
is effectively missing.
2018-11-13 11:57:03 -08:00
Andreas Auernhammer
2447bb58dd change received system signal output to upper case (#6761)
This commit slightly improves the output of the minio server
in case of a received system signal.
2018-11-12 15:07:16 -08:00
Harshavardhana
a55a298e00 Make sure to log unhandled errors always (#6784)
In many situations, while testing we encounter
ErrInternalError, to reduce logging we have
removed logging from quite a few places which
is acceptable but when ErrInternalError occurs
we should have a facility to log the corresponding
error, this helps to debug Minio server.
2018-11-12 11:07:43 -08:00
Harshavardhana
2929c1832d Add sample STS request/response output (#6794) 2018-11-12 07:53:55 -08:00
Chester Li
aa2d8583ad Check key length before adding a new user. (#6790)
User's key should satisfy the requirement of `mc config host add`.
Check access key and secret key length before adding a new user,
avoid creating a useless user which cannot be added into config
host or log into the browser.
2018-11-09 15:48:24 -08:00
kannappanr
df2d75a2a3 Cleanup unnecessary logs (#6788) 2018-11-09 14:03:37 -08:00
Harshavardhana
b24b320807 Set notification namespace for NotificationConfiguration (#6789) 2018-11-09 10:40:03 -08:00
kannappanr
c872c1f1dc Return default ETag if fs.json is empty (#6787) 2018-11-09 10:34:59 -08:00
Harshavardhana
a40610d331 Re-populate public key if JWT fails to parse (#6786)
This is done such that if WSO2 was re-configured
with new TLS certs, and newer tokens are signed
with a newer public key. Once populated parse the JWT
again
2018-11-08 17:01:20 -08:00
Harshavardhana
38978eb2aa Avoid decrypting encrypted multipart final size (#6776)
Multipart object final size is not a contiguous
encrypted object representation, so trying to
decrypt this size will lead to an error in some
cases. The multipart object should be detected first
and then decoded with its respective parts instead.

This PR handles this situation properly, added a
test as well to detect these in the future.
2018-11-08 10:27:21 -08:00
Harshavardhana
ca7c3a3278 Add 'mc config host add' command in multi-user doc (#6777) 2018-11-08 09:42:47 -08:00
Harshavardhana
d58fc68137 Fix shadowing issue in elasticsearch target (#6774) 2018-11-07 12:09:03 -08:00
Matthias Schneider
71c66464c1 feature: added nsq as broker for events (#6740) 2018-11-07 10:23:13 -08:00
Harshavardhana
bf414068a3 Parse and return proper errors with x-amz-security-token (#6766)
This PR also simplifies the token and access key validation
across our signature handling.
2018-11-07 20:10:03 +05:30
Eco
88959ce600 Format correction in server limits doc (#6773) 2018-11-06 14:50:11 -08:00
Ashish Kumar Sinha
572719872d Event Notification for ElasticSearch (#6764)
Using access format for Event Notification for Elastic Search
2018-11-06 11:38:54 -08:00
Minio Trusted
bdea19b583 Update yaml files to latest version RELEASE.2018-11-06T01-01-02Z 2018-11-06 01:05:53 +00:00
poornas
eb1f9c9916 Update KMS readme with vault quick start guide (#6747) 2018-11-05 13:01:18 -08:00
Andreas Auernhammer
d07fb41fe8 add key-rotation for SSE-S3 objects (#6755)
This commit adds key-rotation for SSE-S3 objects.
To execute a key-rotation a SSE-S3 client must
 - specify the `X-Amz-Server-Side-Encryption: AES256` header
   for the destination
 - The source == destination for the COPY operation.

Fixes #6754
2018-11-05 10:26:10 -08:00
Harshavardhana
a9cda850ca Add forceStop flag to provide facility to stop healing (#6718)
This PR also makes sure that we deal with HTTP request
count by ignoring the on-going heal operation, i.e
do not wait on itself.
2018-11-04 19:24:16 -08:00
Harshavardhana
bef0318c36 Support audit logs with additional fields (#6738)
This PR adds support

- Request query params
- Request headers
- Response headers

AuditLogEntry is exported and versioned as well
starting with this PR.
2018-11-02 18:40:08 -07:00
Harshavardhana
3f19ea98bb Honor certs from config-dir (#6757)
This was a regression introduced in 9fe51e392b
2018-11-02 11:53:45 -07:00
Praveen raj Mani
c96073f985 Test for multiple values for x-amz-meta header added (#6753) 2018-11-02 11:32:18 -07:00
Harshavardhana
d2f240c791 Ignore windows hidden folders (#6735)
On Windows erasure coding setup if

```
~ minio server V:\ W:\ X:\ Z:\
```

is not possible due to NTFS creating couple of
hidden folders, this PR allows minio to use
the entire drive.
2018-11-02 11:31:55 -07:00
Harshavardhana
6491dfbbd6 Fix etcd TLS handling (#6748)
etcd fails to connect if TLS config is set, make TLS
conditional to input arguments instead
2018-11-01 21:41:11 -07:00
kannappanr
d9cfa5fcd3 Shouldn't require space in HTTP host header (#6743)
Fixes #6741
2018-10-31 15:31:42 -07:00
Aarushi Arya
89b14639a9 avoid using URL encoding to generate keys (#6731) 2018-10-31 15:07:20 -07:00
Harshavardhana
3f744c0361 Fix mimedb update files (#6744) 2018-10-31 14:15:27 -07:00
kannappanr
9ed7fb4916 Do not call multiple response.WriteHeader calls (#6733)
Execute method in s3Select package makes a response.WriteHeader call.

Not calling it again in SelectObjectContentHandler function in case of
error in s3Select.Execute call.
2018-10-31 14:09:26 -07:00
Eco
4280e68de3 Document not to use autogenerated keys with containers (#6737) 2018-10-31 12:09:16 +05:30
Harshavardhana
f162d7bd97 Performance improvements by re-using record buffer (#6622)
Avoid unnecessary pointer reference allocations
when not needed, for example

- *SelectFuncs{}
- *Row{}
2018-10-31 08:48:01 +05:30
Harshavardhana
36990aeafd Avoid double bucket validation in DeleteObjectHandler (#6720)
On a heavily loaded server, getBucketInfo() becomes slow,
one can easily observe deleting an object causes many
additional network calls.

This PR is to let the underlying call return the actual
error and write it back to the client.
2018-10-30 16:07:57 -07:00
Harshavardhana
9fe51e392b Support etcd TLS certficates (#6719)
This PR supports two models for etcd certs

- Client-to-server transport security with HTTPS
- Client-to-server authentication with HTTPS client certificates
2018-10-29 11:14:12 -07:00
Harshavardhana
7e879a45d5 Add policy claim support for JWT (#6660)
This way temporary credentials can use canned
policies on the server without configuring OPA.
2018-10-29 11:08:59 -07:00
poornas
1c911c5f40 Fix: Validate copy-part encryption header and metadata (#6725)
Otherwise CopyObjectPart would continue to upload part with incorrect
encryption option and fail when upload is finalized
2018-10-29 06:40:34 -07:00
poornas
bd8dc17b7a gateway s3:Make sure to convert s3 errors to ObjectLayer errors (#6717) 2018-10-28 22:11:20 -07:00
Harshavardhana
8491a29ec3 Fix healing bucket properly (#6716)
Bucket should be healed properly if it partially exists
on only one set, since bucket is common for all sets.

Fixes #6710
2018-10-28 14:13:17 -07:00
Pontus Leitzler
81d21850ec Root CAs can be used for backend without TLS (#6711) 2018-10-28 06:21:00 +05:30
kannappanr
c6ec3fdfba Return error response header only for HEAD method (#6709) 2018-10-26 18:03:17 -07:00
Anis Elleuch
88c3dd49c6 copy: Ensure that the user has GET access to the src object (#6715) 2018-10-26 16:12:44 -07:00
kannappanr
6869f6d9dd Remove unwanted logs (#6708) 2018-10-26 14:41:25 -07:00
Harshavardhana
3f643acb99 HealBucket was double counting endpoints (#6707)
Endpoint comparisons blindly without looking
if its local is wrong because the actual drive
for a local disk is always going to provide just
the path without the HTTP endpoint.

Add code such that this is taken care properly in
all situations. Without this PR HealBucket() would
wrongly conclude that the healing doesn't have quorum
when there are larger number of local disks involved.

Fixes #6703
2018-10-26 10:25:52 -07:00
Pontus Leitzler
ea73accefd Remove h2 from NextProtos since it doesn't work (#6705) 2018-10-26 12:48:39 +05:30
Harshavardhana
555d54371c Fix CopyObjectPart broken source encryption support (#6699)
Current master didn't support CopyObjectPart when source
was encrypted, this PR fixes this by allowing range
CopySource decryption at different sequence numbers.

Fixes #6698
2018-10-25 08:50:06 -07:00
Harshavardhana
bab4c90c45 Fix broken links in docs (#6700) 2018-10-25 11:39:31 +05:30
Minio Trusted
a2fc0b14d6 Update yaml files to latest version RELEASE.2018-10-25T01-27-03Z 2018-10-25 01:31:57 +00:00
Harshavardhana
bf66e9a529 Reload etcd users and policies properly (#6694)
Currently there was a bug in how we reload users and policies
which leads to users/policies going missing due to wrong path
construction.

Fixes #6693
2018-10-24 17:40:06 -07:00
Harshavardhana
fde8c38638 Add default canned policies (#6690) 2018-10-24 17:14:27 -07:00
Kaan Kabalak
e6252dee5a Fix links not working on Docs site (#6692)
The relative link paths that weren't working have been changed to
direct links to the corresponding Github pages.
2018-10-24 17:00:26 -07:00
Praveen raj Mani
ecb042aa1c Copy and CopyPart changes for compression (#6669)
This PR fixes

- The target object should be compressed even if the
  source object is not compressed.

- The actual size for an encrypted object should be the
  `decryptedSize`
2018-10-23 11:46:20 -07:00
Anis Elleuch
e29009d347 Register postgre driver in pkg/event/target (#6689)
Commit 5c13765168 removed postgre registration triggerd
by the automatic gofmt command but it was the only where pg is registered. This commit
fixes behavior and adds unit tests to check whether postgre & sql are registered or not.
2018-10-23 11:44:46 -07:00
Pontus Leitzler
9631d65552 Fix goroutine test fatalf (#6682)
Use t.Error/t.ErrorF instead if t.Fatal/t.Fatalf

Add returns to achieve same behaviour as earlier
2018-10-23 09:44:20 -07:00
Nitish Tiwari
7b7be66fa1 Cleanup Kubernetes documentation (#6678) 2018-10-23 18:22:43 +05:30
Harshavardhana
b99aaab42e Sid value can be any unicode character support it (#6676)
Fixes #6476
2018-10-23 16:11:06 +05:30
Nitish Tiwari
32bd1b31e9 Fix images for 8 node distributed deployment (#6685)
fixes #6633
2018-10-23 10:50:38 +05:30
Eco
f287b15e71 docs/minio-limits.md formatting (#6683)
Formatted docs to show missing "\" character, added "/" to list of unsupported chars and made note of the fact that list is not exhaustive.
2018-10-22 21:00:46 -07:00
Andreas Auernhammer
586466584f fix wrong actual part size assignment in CopyObjectPart (#6652)
This commit fixes a wrong assignment to `actualPartSize`.
The `actualPartSize` for an encrypted src object is not `srcInfo.Size`
because that's the encrypted object size which is larger than the
actual object size. So the actual part size for an encrypted
object is the decrypted size of `srcInfo.Size`.
2018-10-22 14:23:23 -07:00
Ashish Kumar Sinha
c0b4bf0a3e SQL select query for CSV/JSON (#6648)
select * , select column names have been implemented for CSV.
select * is implemented for JSON.
2018-10-22 12:12:22 -07:00
Praveen raj Mani
acf46cc3b5 SetConfigHandler should avoid setting an invalid notification config (#6679)
Fixes #6642 
Fixes #6641
2018-10-22 11:51:26 -07:00
Guido García
06ef8248c3 docs: add link to s3 gateway (#6666)
Minor change: Add a link to S3 gateway to make it easier to find that info.
2018-10-22 11:47:13 -07:00
Aarushi Arya
7c2ae4eaf7 Remove tmp file and multipart folder in FS mode. (#6677)
Fixes #6588
2018-10-22 07:36:30 -07:00
Harshavardhana
989d7af9ac Handle corrupted files correctly by fixing quorum (#6670)
This PR completes the missing functionality from #6592
2018-10-19 11:00:09 -07:00
Andreas Auernhammer
8a6c3aa3cd crypto: add RemoveInternalEntries function (#6616)
This commit adds a function for removing crypto-specific
internal entries from the object metadata.

See #6604
2018-10-19 10:50:52 -07:00
Harshavardhana
62b560510b Fix SSE-C source decryption handling (#6671)
Without this fix we have room for two different type of
errors.
- Source is encrypted and we didn't provide any source encryption keys

This results in Incomplete body error to be returned back to the client
since source is encrypted and we gave the reader as is to the object
layer which was of a decrypted value leading to "IncompleteBody"

- Source is not encrypted and we provided source encryption keys.

This results in a corrupted object on the destination which is
considered encrypted but cannot be read by the server and returns
the following error.

```
<Error><Code>XMinioObjectTampered</Code><Message>The requested object
was modified and may be compromised</Message><Resource>/id-platform-gamma/
</Resource><RequestId>155EDC3E86BFD4DA</RequestId><HostId>3L137</HostId>
</Error>
```
2018-10-19 10:41:13 -07:00
Harshavardhana
0edfb32621 Fix multi-user doc (#6662) 2018-10-19 12:35:44 +05:30
poornas
7e0f1eb8b5 Fix: verify client sent md5sum in encrypted PutObjectPart request (#6668)
This PR also removes check for SSE-S3 headers as this
is not required by S3 specification.
2018-10-18 16:05:05 -07:00
Pontus Leitzler
b43e8337b1 Add error handling in api-resource.go (#6651) 2018-10-18 07:31:46 -07:00
Harshavardhana
30040fba45 Add windows CI on travis (#6661) 2018-10-18 08:48:53 +05:30
Minio Trusted
44cf9ac62f Update yaml files to latest version RELEASE.2018-10-18T00-28-58Z 2018-10-18 00:34:26 +00:00
poornas
3b55357045 fix:Init globalIAMSys for ExecObjectLayerAPITest (#6636) 2018-10-17 17:25:50 -07:00
Harshavardhana
18d9a20ff6 Enable admin users API on gateway (#6659)
This is only enabled when etcd is enabled, healing is only
enabled for erasure coded backend.
2018-10-17 17:25:16 -07:00
Anis Elleuch
6590aba6d2 xl: PUT an empty dir on an existing prefix succeed (#6658)
This commit fixes a regression introduced in f187a16962
the regression returned AccessDenied when a client is trying to create an empty
directory on a existing prefix, though it should return 200 OK to be close as
much as possible to S3 specification.
2018-10-17 16:37:02 -07:00
Harshavardhana
2e81f27d27 Allow all browser calls to honor multi-users (#6645)
- GetAuth
- SetAuth
- GenerateAuth

Disallow changing bucket level metadata or creating/deleting buckets.
2018-10-17 16:23:09 -07:00
Anis Elleuch
ae3c05aa37 Avoid printing i/o closed pipe error message (#6654)
Since refactoring to GetObjectNInfo style, there are many cases
when i/o closed pipe is printed like, downloading an object
with wrong encryption key. This PR removes the log.
2018-10-17 15:52:18 -07:00
Praveen raj Mani
cef044178c Treat columns with spaces inbetween [s3Select] (#6597)
replace the double/single quotes with backticks for the xwb1989/sqlparser
to recognise such queries.

Fixes #6589
2018-10-17 11:01:26 -07:00
Pontus Leitzler
c998d1ac8c Add missing error check (#6632) 2018-10-17 10:57:12 -07:00
Eco
3457e504cf Spelling changes and fixed link (#6596) 2018-10-17 10:55:55 -07:00
Pontus Leitzler
7f0cca075a Update lint installation (#6650) 2018-10-17 21:59:04 +05:30
Wenjie
088c595e01 handle exception InvalidPart (#6649) 2018-10-17 21:50:58 +05:30
Harshavardhana
26b4b466df Fix a typo in multi-user doc (#6643) 2018-10-16 20:39:44 -07:00
Andreas Auernhammer
fdf691fdcc move SSE-C TLS enforcement into generic handler (#6639)
This commit moves the check that SSE-C requests
must be made over TLS into a generic HTTP handler.

Since the HTTP server uses custom TCP connection handling
it is not possible to use `http.Request.TLS` to check
for TLS connections. So using `globalIsSSL` is the only
option to detect whether the request is made over TLS.
By extracting this check into a separate handler it's possible
to refactor other parts of the SSE handling code further.
2018-10-16 19:22:09 -07:00
Harshavardhana
88c8c2d6cd Fix browser login with multi users (#6644) 2018-10-16 18:44:58 -07:00
Nitish Tiwari
ef585037a0 Update config documentation (#6634) 2018-10-16 16:45:04 -07:00
Anis Elleuch
362ebdcbab xl: Add '.CORRUPTED' prefix to corrupted objects (#6592)
getObjectInfo() checks if a given object has enough xl.json in disks,
returns 404 if not and added .CORRUPTED suffix to object.
2018-10-16 15:49:35 -07:00
Harshavardhana
b251454dd6 Fix toggling users status (#6640) 2018-10-16 14:55:23 -07:00
Harshavardhana
21c8693d9c Disable printing access/secrets in systemd (#6621)
Minio when run as a service in `systemd` should
avoid printing access/secret keys.
2018-10-16 13:19:12 -07:00
Harshavardhana
1e7e5e297c Add canned policy support (#6637)
This PR adds an additional API where we can create
a new set of canned policies which can be used with one
or many users.
2018-10-16 12:48:19 -07:00
kannappanr
c7f180ffa9 Add code to translate errInvalidEncryptionParameters to APIErrcode (#6625)
Fixes #6623
2018-10-16 12:27:34 -07:00
kannappanr
b8bd8d6a03 Validate user provided SSE-C key on Head Object API (#6600)
Fixes #6598
2018-10-16 12:24:27 -07:00
Andreas Auernhammer
baec331e84 crypto: add functions for sealing/unsealing the etag for SSE (#6618)
This commit adds two functions for sealing/unsealing the
etag (a.k.a. content MD5) in case of SSE single-part upload.

Sealing the ETag is neccessary in case of SSE-S3 to preserve
the security guarantees. In case of SSE-S3 AWS returns the
content-MD5 of the plaintext object as ETag. However, we
must not store the MD5 of the plaintext for encrypted objects.
Otherwise it becomes possible for an attacker to detect
equal/non-equal encrypted objects. Therefore we encrypt
the ETag before storing on the backend. But we only need
to encrypt the ETag (content-MD5) if the client send it -
otherwise the client cannot verify it anyway.
2018-10-16 10:02:19 -07:00
poornas
557f382477 cache: remove cache space constraint (#6635)
relax cache constraint of requiring 100 times size of object
being cached for better cache utilization.
2018-10-16 11:06:42 +05:30
Harshavardhana
23b166b318 Remove applying custom policies with STS access keys (#6626)
Move away from allowing custom policies, all policies in
STS come from OPA otherwise they fail.
2018-10-15 12:44:03 -07:00
Anis Elleuch
81a481e098 profiling: Fix downloading tracing profiling data (#6599)
pkg/pprof saves tracing profiling data in a different file name
(trace.out) in contrary to other profiling mode.
2018-10-15 11:13:19 -07:00
Anis Elleuch
5b3090dffc encryption: Fix copy from encrypted multipart to single part (#6604)
CopyObject handler forgot to remove multipart encryption flag in metadata
when source is an encrypted multipart object and the target is also encrypted
but single part object.

This PR also simplifies the code to facilitate review.
2018-10-15 11:07:36 -07:00
Harshavardhana
3ef3fefd54 Add ListUsers API to list all configured users in IAM (#6619) 2018-10-13 12:48:43 +05:30
Andreas Auernhammer
28e25eac78 crypto: add helper functions for unsealing object keys (#6609)
This commit adds 3 helper functions for SSE-C and SSE-S3
to simplify object key unsealing in server code.

See #6600
2018-10-12 18:06:38 -07:00
Harshavardhana
b0c9ae7490 Add audit logging for S3 and Web handlers (#6571)
This PR brings an additional logger implementation
called AuditLog which logs to http targets

The intention is to use AuditLog to log all incoming
requests, this is used as a mechanism by external log
collection entities for processing Minio requests.
2018-10-12 12:25:59 -07:00
Harshavardhana
143e7fe300 Add etcd support to support STS on gateway mode (#6531) 2018-10-12 11:32:18 -07:00
Andreas Auernhammer
f09e7ca764 fix travis CI build (#6620)
This commit fixes the Travis CI build by
correcting the golint import path
2018-10-11 14:58:44 -07:00
Mariska Hoogenboom
fae284d6b9 Docs fix for restart issue with orchestrated minio stack (#6606) (#6613) 2018-10-11 14:41:19 +05:30
poornas
83d8e01c81 fix: Close cacheReader if cache entry has expired (#6610)
prevent locking issues

Fixes #6602
2018-10-10 23:01:24 -07:00
poornas
110458cd10 Fix: Disallow requests with SSE-KMS headers (#6587)
Addresses issue #6582. Minio server currently does not
have SSE-KMS support. Reject requests with SSE-KMS headers
with NotImplementedErr
2018-10-09 15:04:53 -07:00
Aditya Manthramurthy
e3eec89d24 Optimize string processing in select (#6593)
Reduce allocations during string concatenation and simplify some
processing code.
2018-10-09 14:02:19 -07:00
Harshavardhana
54ae364def Introduce STS client grants API and OPA policy integration (#6168)
This PR introduces two new features

- AWS STS compatible STS API named AssumeRoleWithClientGrants

```
POST /?Action=AssumeRoleWithClientGrants&Token=<jwt>
```

This API endpoint returns temporary access credentials, access
tokens signature types supported by this API

  - RSA keys
  - ECDSA keys

Fetches the required public key from the JWKS endpoints, provides
them as rsa or ecdsa public keys.

- External policy engine support, in this case OPA policy engine

- Credentials are stored on disks
2018-10-09 14:00:01 -07:00
Aditya Manthramurthy
16a100b597 Fix out-of-bound array access crash in select processing (#6594)
Fix test case.
2018-10-09 09:45:56 -07:00
Anis Elleuch
cbc5d78a09 Handle read/quorum errors when initializing all subsystems (#6585)
- Only require len(disks)/2 to initialize the cluster
- Fix checking of read/write quorm in subsystems init
- Add retry mechanism in policy and notification to avoid aborting in case of read/write quorums errors
2018-10-08 15:47:13 -07:00
Minio Trusted
d8a2975a68 Update yaml files to latest version RELEASE.2018-10-06T00-15-16Z 2018-10-06 00:19:47 +00:00
Anis Elleuch
66d911653f xl: Fix typo in PutObjectPart when part size is 10Mb (#6574)
PutObjectPart forgot to allocate buffer memory when the size
of the uploaded part is exactly equal to blockSizeV1 = 10 Mb.
2018-10-05 17:09:50 -07:00
Anis Elleuch
ca6f795504 xl: check for correct err variable when prepareFile fails in PutObjectPart (#6573)
in xl.PutObjectPart call, prepareFile detected an error when the storage
is exhausted but we were returning the wrong error.

With this commit, users can see the correct error message when their disks
become full.
2018-10-05 17:09:20 -07:00
Eco
2af0f11731 Update readme.md (#6568) 2018-10-05 16:25:22 -07:00
Harshavardhana
c3408f4f04 Send correct bucket notifications from web handlers (#6572)
Upload, Download, DownloadZip were incomplete
2018-10-05 11:20:00 -07:00
Minio Trusted
b92c324254 Update yaml files to latest version RELEASE.2018-10-05T01-03-03Z 2018-10-05 01:08:39 +00:00
Krishna Srinivas
81bee93b8d Move remote disk StorageAPI abstraction from RPC to REST (#6464) 2018-10-04 17:44:06 -07:00
Ashish Kumar Sinha
670f9788e3 Count(*) to give integer value (#6564)
The Max, Min functions were giving float value even when they were integers.  
Resolved max and Min to return integers in that scenario.

Fixes #6472
2018-10-04 17:33:53 -07:00
Anis Elleuch
f187a16962 xl: Require full write quorum in rename() as general rule (#6535)
Simplify the logic of using rename() in xl. Currently, renaming
doesn't require the source object/dir to be existent in at least
read quorum disks, since there is no apparent reason for it
to be as a general rule, this commit will just simplify the
logic to avoid possible inconsistency in the backend in the future.
2018-10-04 17:22:49 -07:00
Anis Elleuch
e031f2b614 xl: Fix typo in PutObject when uploading a 10Mb file (#6567)
PutObject forgot to allocate memory when the size of the uploaded
file is exactly equal to blockSizeV1 = 10 Mb.
2018-10-04 15:37:15 -07:00
Harshavardhana
c2b7b82ef4 Verify bitrot in ReadFile correctly when verifier is set (#6563)
This is a major regression introduced in this commit

ce02ab613d is the first bad commit
commit ce02ab613d
Author: Krishna Srinivas <634494+krishnasrinivas@users.noreply.github.com>
Date:   Mon Aug 6 15:14:08 2018 -0700

    Simplify erasure code by separating bitrot from erasure code (#5959)

:040000 040000 794f58d82a d2201ebfc8 M	cmd

This effects all distributed server deployments since this commit

All the following releases are affected

- RELEASE.2018-09-25T21-34-43Z
- RELEASE.2018-09-12T18-49-56Z
- RELEASE.2018-09-11T01-39-21Z
- RELEASE.2018-09-01T00-38-25Z
- RELEASE.2018-08-25T01-56-38Z
- RELEASE.2018-08-21T00-37-20Z
- RELEASE.2018-08-18T03-49-57Z

Thanks to Anis for reproducing the issue
2018-10-04 10:16:38 -07:00
Harshavardhana
02ad9d6072 Avoid unsolicited response over idle http client (#6562)
Without this PR minio server is writing an erroneous
response to clients on an idle connections which ends
up printing following message

```
Unsolicited response received on idle HTTP channel
```

This PR would avoid sending responses on idle connections
.i.e routine network errors.
2018-10-04 10:13:12 -07:00
Anis Elleuch
ea9408ccbb worm: when enabled, avoid renaming the existing object in tmp directory (#6560)
In XL PutObject & CompleteMultipartUpload, the existing object is renamed
to the temporary directory before checking if worm is enabled or not.

Most of the times, this doesn't cause an issue unless two uploads to the
same location occurs at the same time. Since there is no locking in object
handlers, both uploads will reach XL layer. The second client acquiring
write lock in put object or complete upload in XL will rename the object
to the temporary directory before doing the check and returning the error (wrong!).

This commit fixes then the behavior: no rename to temporary directory if
worm is enabled.
2018-10-03 20:39:24 -07:00
Pontus Leitzler
307765591d Use GetObjectInfo instead of GetObjectNInfo before cache decision (#6553) 2018-10-03 11:02:32 -07:00
Harshavardhana
5d859b2178 gateway/azure: allow putObject to support block based upload (#6552)
Current implementation simply uses all the memory locally
and crashes when a large upload is initiated using Minio
browser UI.

This PR uploads stream in blocks and finally commits the blocks
allowing for low memory footprint while uploading large objects
through Minio browser UI.

This PR also adds ETag compatibility for single PUT operations.

Fixes #6542
Fixes #6550
2018-10-02 23:08:16 -07:00
Harshavardhana
223967fd32 Return always a default heal item upon unexpected error (#6556)
Never return an empty result item even upon error,
choose all the default values and based on the errors
make sure to send right result reply.
2018-10-02 17:13:51 -07:00
Pontus Leitzler
274b35154c Fix go1.11 vet shadow errors (#6555) 2018-10-02 15:21:34 -07:00
Harshavardhana
c05ced08bb Handle Range requests on empty objects (#6557)
Return a proper error on empty objects, S3 returns
416 Invalid Range on objects which are empty.

We should return the same.
2018-10-02 12:48:51 -07:00
Praveen raj Mani
c7722fbb1b Simplify pkg mimedb (#6549)
Content-Type resolution can now use a function `TypeByExtension(extension)` 
to resolve to the respective content-type.
2018-10-02 11:48:17 +05:30
Harshavardhana
f163bed40d Add Vault support for custom CAs directory (#6527) 2018-10-01 13:49:10 -07:00
Harshavardhana
b4772849f9 Heal recursively all entries in config/ prefix (#6545)
This to ensure that we heal all entries in config/
prefix, we will have IAM and STS related files which
are being introduced in #6168 PR

This is a change to ensure that we heal all of them
properly, not just `config.json`
2018-10-01 22:24:26 +05:30
Harshavardhana
839a758a36 Fix a crash due to race between Abort/CompleteMultipart (#6544)
Fixes #6429
2018-10-01 09:50:09 -07:00
Harshavardhana
aebfceeafb Heal backend configuration file (#6532)
Fixes #6461
2018-09-29 13:47:01 +05:30
Anis Elleuch
83d7ec09c1 Disable restarting server after setting a new config (#6521)
Also disable listening to service restart event in tests since
we don't do this anymore.
2018-09-28 12:10:51 -07:00
Harshavardhana
8c29f69b00 Fix racy error communication inside go-routine (#6539)
Use CloseWithError to communicate errors in pipe,
this PR also fixes potential shadowing of error
2018-09-28 13:14:59 +05:30
Praveen raj Mani
ce9d36d954 Add object compression support (#6292)
Add support for streaming (golang/LZ77/snappy) compression.
2018-09-28 09:06:17 +05:30
Anis Elleuch
5c765bc63e Fix one possible data race in admin tests (#6537)
go test shows the following warning:
```
WARNING: DATA RACE
Write at 0x000002909e18 by goroutine 276:
  github.com/minio/minio/cmd.testAdminCmdRunnerSignalService()
      /home/travis/gopath/src/github.com/minio/minio/cmd/admin-rpc_test.go:44 +0x94

 Previous read at 0x000002909e18 by goroutine 194:
  github.com/minio/minio/cmd.testServiceSignalReceiver()
      /home/travis/gopath/src/github.com/minio/minio/cmd/admin-handlers_test.go:467 +0x70
```

The reason for this data race is that some admin tests are not waiting for go routines
that they created to be properly exited, which triggers the race detector.
2018-09-27 17:16:30 -07:00
Anis Elleuch
6c7c6bec91 profiler: Download API returns error when all nodes fail (#6525)
When download profiling data API fails to gather profiling data
from all nodes for any reason (including profiler not enabled),
return 400 http code with the appropriate json message.
2018-09-27 10:34:37 -07:00
poornas
ed703c065d Add ObjectOptions to GetObjectNInfo (#6533) 2018-09-27 15:36:45 +05:30
Aditya Manthramurthy
387584356f Remove unused range parsing code and update tests (#6530) 2018-09-27 15:24:07 +05:30
Harshavardhana
1111419d4a Add debugging for mutex, tracing (#6522) 2018-09-27 09:32:05 +05:30
Anis Elleuch
20378821cf madmin: close http response when returning an error (#6526)
httpRespToErrorResponse() usually reads the http response when
the http error code is not expected to parse the json error
response in the http body, however it was never properly closing
the connection. This PR fixes the behavior.
2018-09-26 11:03:35 -07:00
Pontus Leitzler
ce1bfa6de8 Removed unused vendored dependencies (#6520) 2018-09-25 14:51:51 -07:00
Minio Trusted
6c26227081 Update yaml files to latest version RELEASE.2018-09-25T21-34-43Z 2018-09-25 21:39:03 +00:00
Anis Elleuch
aa4e2b1542 Use GetObjectNInfo in CopyObject and CopyObjectPart (#6489) 2018-09-25 12:39:46 -07:00
Harshavardhana
1e5ac39ff3 Return HTTP response error for malformed requests (#6517) 2018-09-25 11:07:26 -07:00
Harshavardhana
ec2295c3dc Quickly build dev docker images using 'make docker' (#6505)
This PR simplifies the process of developer build of local
docker containers using `make docker`.

You need to provide a TAG i.e

```
TAG=y4m4/minio:exp make docker
```
2018-09-25 10:33:25 -07:00
Andreas Auernhammer
8cf7b88cc5 add functions to remove confidential information (#6516)
This commit adds two functions for removing
confidential information - like SSE-C keys -
from HTTP headers / object metadata.

This creates a central point grouping all
headers/entries which must be filtered / removed.

See also https://github.com/minio/minio/pull/6489#discussion_r219797993
of #6489
2018-09-24 21:02:51 +05:30
Harshavardhana
48bfebe442 HEAD on an object should mimic GET without body (#6508)
Add "Range" header support etc.
2018-09-23 22:54:10 +05:30
Pontus Leitzler
df60b3c733 Remove unnecessary contexts passed as data to FatalIf. No need to log an empty context. (#6487) 2018-09-21 16:04:11 -07:00
Aditya Manthramurthy
584cb61bb8 Switch back to GetObjectInfo for HEAD requests (#6513) 2018-09-21 13:48:58 -07:00
kannappanr
c1798cc89a Update GCS storage library to support GetObject API on gzipped objects (#6506)
Fixes #6280
2018-09-21 11:43:10 -07:00
Aditya Manthramurthy
3c8fabd116 Fix cleanup of pipe in GetObjectNInfo handlers (#6509) 2018-09-21 11:42:06 -07:00
Aditya Manthramurthy
36e51d0cee Add GetObjectNInfo to object layer (#6449)
The new call combines GetObjectInfo and GetObject, and returns an
object with a ReadCloser interface.

Also adds a number of end-to-end encryption tests at the handler
level.
2018-09-20 19:22:09 -07:00
Harshavardhana
7d0645fb3a Deprecate domain, browser as config entries (#6498) 2018-09-20 14:56:32 -07:00
Kanagaraj M
7c339e248a fix success alert not shown after object is uploaded (#6500) 2018-09-20 22:54:09 +05:30
Harshavardhana
b62ed5dc90 select API CSV may not be specified (#6493)
This should be present until we support JSON
2018-09-20 15:04:26 +05:30
Harshavardhana
f0641a0406 Avoid changing creds in streaming signature (#6495)
This PR fixes a potential issue where credentials can be
changed in middle of different chunks during data transfer
which can lead to unexpected bugs.
2018-09-19 16:52:05 -07:00
Harshavardhana
3d060f8b64 Peer/rpc should never honor RPC calls without object-layer (#6486)
Fixes #6484
2018-09-19 21:32:56 +05:30
Jay Mundrawala
052a7b8eec Allow minio s3 gateway to use different AWS auth mechanisms (#6422)
Allow minio s3 gateway to use aws environment credentials,
IAM instance credentials, or AWS file credentials.

If AWS_ACCESS_KEY_ID, AWS_SECRET_ACCSES_KEY are set, 
or minio is running on an ec2 instance with IAM instance credentials, 
or there is a file $HOME/.aws/credentials, minio running as an S3
gateway will authenticate with AWS S3 using those one of credentials.

The lookup order:
1. AWS environment varaibles
2. IAM instance credentials
3. $HOME/.aws/credentials
4. minio environment variables

To authenticate with the minio gateway, you will always use the
minio environment variables MINIO_ACCESS_KEY MINIO_SECRET_KEY.
2018-09-19 18:05:30 +05:30
Anis Elleuch
9531cddb06 Add Profiler Admin API (#6463)
Two handlers are added to admin API to enable profiling and disable
profiling of a server in a standalone mode, or all nodes in the
distributed mode.

/minio/admin/profiling/start/{cpu,block,mem}:
  - Start profiling and return starting JSON results, e.g. one
    node is offline.

/minio/admin/profiling/download:
  - Stop the on-going profiling task
  - Stream a zip file which contains all profiling files that can
    be later inspected by go tool pprof
2018-09-18 16:46:35 -07:00
Harshavardhana
6fe9a613c0 Prioritize HTTP requests over Heal (#6468)
Additionally also heal 256 objects at any given
time in parallel.

Fixes #6196
Fixes #6241
2018-09-17 18:28:34 -07:00
Andreas Auernhammer
b729a4e83c Remove brittle TestServerTLSCiphers unit test (#5982)
The test TestServerTLSCiphers seems to fail sometimes for
no obvious reason. Actually the test is not needed
(as unit test) since minio/mint tests the server's TLS ciphers
as part of its security tests.

Fixes #5977
2018-09-17 15:55:09 +05:30
Harshavardhana
a0683d3c1f Send progress only when requested by client in SelectObject (#6467) 2018-09-17 11:52:46 +05:30
Anis Elleuch
66fda7a37f Use retry mechanism when initializing configuration (#6475)
Currently, one node in a cluster can fail to boot with the following error message:

```
ERROR Unable to initialize config system: Storage resources are insufficient for the write operation
```

  This happens when disks are formatted, read quorum is met but write
quorum is not met. In checkServerConfig(), a insufficient read quorum
error is replaced by errConfigNotFound, the code will generate a
new config json and try to save it, but it will fail because write
quorum is not met.

  Replacing read quorum with errConfigNotFound is also wrong because it
can lead, in rare cases, to overwrite the config set by the user.

  So, this commit adds a retry mechanism in configuration initialization
to retry only with read or write quorum errors.

  This commit will also fix the following cases:
 - Read quorum is lost just after the initialization of the object layer.
 - Write quorum not met when upgrading configuration version.
2018-09-15 22:09:51 -07:00
Harshavardhana
a63bc9254d Add 'disk' tag to log output to enhance 'disk not found' errors (#6460) 2018-09-13 21:42:50 -07:00
poornas
14fa0097b0 fix: UploadPart,CopyObjectPart does not need sse-s3 header (#6386)
S3 API spec for UploadPart requires encryption headers to be
specified only for SSE-C
2018-09-13 14:53:03 -07:00
Kanagaraj M
d99c397746 fix public directory listing not working in browser (#6466)
Fixes #6436
2018-09-13 14:43:50 -07:00
Harshavardhana
e3777b1dd9 Combine obtaining resource, host, method into one operation (#6465)
This also adds a reduced timeout for errant connections, to
be quickly closed if they can't even send HTTP headers properly.

Fixes #6459
2018-09-13 18:17:03 +05:30
Minio Trusted
63c03758e6 Update yaml files to latest version RELEASE.2018-09-12T18-49-56Z 2018-09-12 18:55:31 +00:00
Anis Elleuch
ce419c9835 Bump RPC version after ReadFile RPC arguments change (#6457)
ReadFile RPC input argument has been changed in commit a8f5939452959d27674560c6b803daa9,
however, RPC doesn't detect such a change when it calls other nodes with older versions.

Hence, bumping RPC version.

Fixes #6458
2018-09-12 10:45:28 -07:00
Harshavardhana
0c2b708484 for O_RDONLY mode hold shared locks on windows (#6454)
Fixes #6401
2018-09-12 09:29:41 -07:00
Harshavardhana
166e998788 Fix healthcheck for NAS gateway (#6452)
It was expected that in gateway mode, we do not know
the backend types whereas in NAS gateway since its
an extension of FS mode (standalone) this leads to
an issue in LivenessCheckHandler() which would perpetually
return 503, this would affect all kubernetes, openshift
deployments of NAS gateway.
2018-09-11 13:44:10 -07:00
Andreas Auernhammer
267a0a3dfa fix X-Amz-Credential parsing for V4 policy signature (#6451)
This commit fixes an AWS S3 incompatibility issue.
The AccessKeyID may contain one or more `/` which caused
the server to interpret parts of the AccessKeyID as
other `X-Amz-Credential` parameters (like date, region, ...)

This commit fixes this by allowing 5 or more
`X-Amz-Credential` parameter strings and only interpreting
the last 5.

Fixes #6443
2018-09-11 11:17:23 -07:00
Minio Trusted
985fd7d4e7 Update yaml files to latest version RELEASE.2018-09-11T01-39-21Z 2018-09-11 01:43:45 +00:00
Harshavardhana
5479a6e33e Ignore migration if config files don't exist (#6448) 2018-09-10 18:24:57 -07:00
ebozduman
fb4186f6b9 Adds missing info to docs for credentials and domain env. vars. (#6447)
* Adds missing information to documentation for credentials and domain environment variables for distributed minio server startup.
2018-09-10 17:14:40 -07:00
Anis Elleuch
7571582000 Print storage errors during distributed initialization (#6441)
This commit will print connection failures to other disks in other nodes
after 5 retries. It is useful for users to understand why the
distribued cluster fails to boot up.
2018-09-10 16:21:59 -07:00
Harshavardhana
12b4971b70 Rename config.json in config-dir with '.deprecated' extension (#6446)
Fixes #6444
2018-09-10 16:15:47 -07:00
poornas
5c0b98abf0 Add ObjectOptions to ObjectLayer calls (#6382) 2018-09-10 09:42:43 -07:00
Praveen raj Mani
30d4a2cf53 s3select should honour custom record delimiter (#6419)
Allow custom delimiters like `\r\n`, `a`, `\r` etc in input csv and 
replace with `\n`.

Fixes #6403
2018-09-10 21:50:28 +05:30
Anis Elleuch
92bc7caf7a Reword missing credentials error msg (#6418)
Enhance a little bit the error message that is showing
when access & secret keys are not specified in the
environment when running Minio in gateway and server mode.

This commit also removes a redundant check of access/secret keys.
2018-09-09 22:51:48 +05:30
Harshavardhana
19202bae81 Allow backward compatible way to load creds from config.json (#6435)
Print warning message for users to migrate to newer style of distributed
deployment by always setting credentials as ENVs.

Fixes #6434
2018-09-07 11:18:49 -07:00
Praveen raj Mani
e7a4512a90 Redis documentation Fix (#6378)
Fixed few typos and missing `format` field in the example config provided.

Fixes #6356
2018-09-07 07:12:01 -07:00
Kaan Kabalak
ff822e64ca Show pointer when user hovers over folder name (#6412)
To indicate that there is an action tied when the user clicks on the
folder name on the Minio Browser, the default cursor has been replaced
with a pointer on folder name hover. This is a regression from #6193.

Fixes #6411
2018-09-07 14:15:53 +05:30
Annanay Agarwal
7cb87f863e Kafka (sarama) authentication with user/pass (#6291) 2018-09-07 00:01:58 -07:00
Nitish Tiwari
67d8396af4 Fix Manta gateway client creation flow (#6425)
This commit fixes the Manta gateway client creation flow. We now affix
the endpoint scheme with endpoint URL while creating the Manta client
for gateway.

Also add steps in Manta gateway docs on how to run with custom Manta
endpoint.

Fixes #6408
2018-09-07 08:41:42 +05:30
Janko Marohnić
8b0cc376f4 Remove "List Object Parts" from Azure limitations (#6427)
Since https://github.com/minio/minio/pull/5198 has been implemented,
this is not a limitation anymore.
2018-09-06 17:19:51 -07:00
Krishnan Parthasarathi
9e5c4df106 Gateway should honour --address flag while checking port availability (#6428)
Fixes #6426
2018-09-06 16:42:33 -07:00
Andreas Auernhammer
fd8749f42a return Access Denied for invalid SSE keys (#6432)
This commit fixes are regression in the server regarding
handling SSE requests with wrong SSE-C keys.

The server now returns an AWS S3 compatable API error (access denied)
in case of the SSE key does not match the secret key used during upload.

Fixes #6431
2018-09-06 12:31:12 -07:00
Anis Elleuch
5c13765168 postgresql: Disable validation of connectionString field (#6397)
A bug concerning the validation of connectionString is found,
however there is no solution to fix it for now, postgresql API
doesn't help to do that hence disabling validation of that field.
2018-09-06 20:34:52 +05:30
Anis Elleuch
3099af70a3 Add admin get/set config keys API (#6113)
This PR adds two new admin APIs in Minio server and madmin package:
- GetConfigKeys(keys []string) ([]byte, error)
- SetConfigKeys(params map[string]string) (err error)

A key is a path in Minio configuration file, (e.g. notify.webhook.1)

The user will always send a string value when setting it in the config file,
the API will know how to convert the value to the appropriate type. The user
is also able to set a raw json.

Before setting a new config, Minio will validate all fields and try to connect
to notification targets if available.
2018-09-06 20:33:18 +05:30
Harshavardhana
fd1b8491db Drain response body properly for http connection pool (#6415)
Currently Go http connection pool was not being properly
utilized leading to degrading performance as the number
of concurrent requests increased.

As recommended by Go implementation, we have to drain the
response body and close it.
2018-09-05 16:47:14 -07:00
Anis Elleuch
1961f2ef54 xl: Fix removing an empty directory (#6421)
Removing an empty directory is not working because of xl.DeleteObject()
was only checking if the passed prefix is an actual object but it
should also check if it is an empty directory.
2018-09-05 16:38:03 -07:00
Harshavardhana
631c78e655 Bump up soMaxConn backlog for listener to 2048 (#6416)
soMaxConn value is 128 on almost all linux systems,
this value is too low for Minio at times when used
against large concurrent workload e.g: spark applications
this causes a sort of SYN flooding observed by the kernel
to allow for large backlog increase this value to 2048.

With this value we do not see anymore SYN flooding
kernel messages.
2018-09-05 13:16:19 -07:00
Harshavardhana
e0f8b767ba Fail for critical errors early on during prepare storage (#6404) 2018-09-05 10:20:54 -07:00
Harshavardhana
d0d015361c Fix config subsystem to wait on quorum number of formatted disks (#6407) 2018-09-05 20:55:55 +05:30
Krishna Srinivas
81b7e5c7a8 Send length instead of empty buffer for ReadFile() (#6414) 2018-09-04 23:22:05 -07:00
Harshavardhana
9e32cc283f Fix distributed docs to mention homogenous envs (#6405)
Also deprecate old syntax use only ellipses
2018-09-05 08:54:04 +05:30
Krishnan Parthasarathi
1126410e62 Implement ListMultipartUploads, ListObjectParts for GCS gateway (#6377)
ListMultipartUploads implementation is meant for docker-registry
use-case only. It lists only the first upload with a prefix matching
the object being uploaded.
2018-09-04 13:11:33 -07:00
Kanagaraj Mayilsamy
2c46214291 fix browser hang when listobjects is denied (#6385)
When Deny ListObjects policy is set on bucket, the browser
is hanging while trying to list the object. This is now
fixed by showing appropriate error message and an
empty object list.

Fixes #6371
2018-09-04 13:02:02 -07:00
Barnaby Keene
d13bd5b9b5 Remove double backtick that was breaking docs (#6410)
On the documentation site, the double backtick with nothing in between was breaking the page render and making the text itself look quite awkward!
2018-09-04 12:06:57 -07:00
Minio Trusted
c8c70a3750 Update yaml files to latest version RELEASE.2018-09-01T00-38-25Z 2018-09-01 00:43:06 +00:00
ebozduman
882a1a1ccc Stops listing objects for write-only access (#6396) 2018-08-31 13:20:27 -07:00
Harshavardhana
8690d62146 Allow fallback listen if first listener fails (#6380)
On linux listen() uses kernel features TCP_FASTOPEN, DEFER_ACCEPT

Fixes #6379
2018-08-31 13:17:05 -07:00
Anis Elleuch
85117d554f xl: Avoid removing a directory if it is not an object dir (#6395)
DeleteObject should not remove any directory unless the latter
is an empty directory.

Fixes #6394
2018-08-31 13:16:35 -07:00
Harshavardhana
4487f70f08 Revert all GetObjectNInfo related PRs (#6398)
* Revert "Encrypted reader wrapped in NewGetObjectReader should be closed (#6383)"

This reverts commit 53a0bbeb5b.

* Revert "Change SelectAPI to use new GetObjectNInfo API (#6373)"

This reverts commit 5b05df215a.

* Revert "Implement GetObjectNInfo object layer call (#6290)"

This reverts commit e6d740ce09.
2018-08-31 13:10:12 -07:00
Harshavardhana
fb27388101 HTTP headers are case insensitive handle them appropriately (#6390)
An issue was reproduced when minio-js client functional
tests are setting lower case http headers, in our current
master branch we specifically look for canonical host header
which may be not necessarily true for all http clients.
This leads to a perpetual hang on the *net.Conn*.

This PR fixes regression caused by #6206 by handling the
case insensitivity.
2018-08-31 17:00:32 +05:30
Kaan Kabalak
5e7ccc983d UI: Handle policies listed in mc as 'none' by not showing them (#6353) 2018-08-30 15:20:27 -07:00
Bala FA
72fa2b4537 Add RPC counters for HTTP stats. (#6206)
This patch introduces separate counters for HTTP stats for minio
reserved bucket.

Fixes #6158
2018-08-30 14:17:58 +05:30
Praveen raj Mani
5399d91965 Add version for appveyor to use go1.10.4 (#6384) 2018-08-30 14:09:31 +05:30
Harshavardhana
53a0bbeb5b Encrypted reader wrapped in NewGetObjectReader should be closed (#6383) 2018-08-29 19:18:00 -07:00
Harshavardhana
384a862940 Return quorum error based on disks in abortMultipartUpload (#6362)
Fixes #4980
2018-08-29 13:36:19 -07:00
Harshavardhana
029f52880b With no read quorum config should be treated as notFound (#6374)
This will allow the config subsystem to initialize properly
in situations where many servers are coming up in a rolling
fashion.
2018-08-28 14:23:22 -07:00
Harshavardhana
5b05df215a Change SelectAPI to use new GetObjectNInfo API (#6373)
This PR also removes some double checks
2018-08-28 13:08:30 -07:00
Harshavardhana
a13cd7b7c4 Separate build functional tests from Makefile (#6351)
Recently travis seems to have issues with builds after
merge, so this PR is an experiment to fix this.

Failed: https://travis-ci.org/minio/minio/builds/419769285#L2310

Succeeded: https://travis-ci.org/minio/minio/builds/419565606#L2322

This PR was tested to be working properly on my fork to build
on travis, would need to do the same in minio/minio and see
how it goes through.
2018-08-28 13:57:01 +05:30
Anis Elleuch
d524924b80 Fix gateway s3 doc to run custom S3 endpoint (#6369)
To pass a custom S3 endpoint in S3 gateway, the user needs
to specify it as an argument after 'minio gateway s3' and not
as '--address' option since this latter specifies the address
to which the gateway should listen.
2018-08-28 10:09:07 +05:30
Aditya Manthramurthy
e6d740ce09 Implement GetObjectNInfo object layer call (#6290)
This combines calling GetObjectInfo and GetObject while returning a
io.ReadCloser for the object's body. This allows the two operations to
be under a single lock, fixing a race between getting object info and
reading the object body.
2018-08-27 15:28:23 +05:30
Harshavardhana
cea4f82586 Revert "Remove curl after the usage (#6347)" (#6366)
This reverts commit dafa5073cb.

This commit revert also fixes #6365
2018-08-26 23:40:21 -07:00
Minio Trusted
1d6ce115da Update yaml files to latest version RELEASE.2018-08-25T01-56-38Z 2018-08-25 02:04:10 +00:00
Anis Elleuch
06d2dfa31c Fix WORM and BROWSER status calculation (#6360)
One typo introduced in a recent commit miscalculates if worm and browser
are enabled or not. A simple test is also added to detect this issue
in the future if it ever happens again.
2018-08-24 14:36:14 -07:00
poornas
d547873b17 webhandler - display encryption errors properly (#6339)
For encrypted objects, download errors need to be
displayed in web response format instead of xml format.

Fixes #6327
2018-08-24 07:56:24 -07:00
Praveen raj Mani
01721a840a Build fix for go1.11rc1 (#6354)
Vendorized the following packages

- "github.com/rjeczalik/notify"
- "github.com/minio/highwayhash"

Fixes #6315
2018-08-24 13:29:17 +05:30
Krishna Srinivas
52f6d5aafc Rename of structs and methods (#6230)
Rename of ErasureStorage to Erasure (and rename of related variables and methods)
2018-08-23 23:35:37 -07:00
Harshavardhana
2211a5f1b8 Avoid ListenBucket targets to be listed in ServerInfo (#6340)
In current master when you do `mc watch` you can see a
dynamic ARN being listed which exposes the remote IP as well

```
mc watch play/airlines
```

On another terminal
```
mc admin info play
●  play.minio.io:9000
   Uptime : online since 11 hours ago
  Version : 2018-08-22T07:50:45Z
   Region :
 SQS ARNs : arn:minio:sqs::httpclient+51c39c3f-131d-42d9-b212-c5eb1450b9ee+73.222.245.195:33408
    Stats : Incoming 30GiB, Outgoing 7.6GiB
  Storage : Used 7.7GiB
```

SQS ARNs listed as part of ServerInfo should be only external targets,
since listing an ARN here is not useful and it cannot be re-purposed in
any manner.

This PR fixes this issue by filtering out httpclient from the ARN list.

This is a regression introduced in #5294 0e4431725c
2018-08-23 23:31:14 -07:00
Harshavardhana
1ffa6adcd4 Ignore io.EOF returned by ReadFrom for zero byte fs.json (#6346)
Fixes #6256
2018-08-24 11:34:21 +05:30
kannappanr
add57a6938 Add content-length as part of event notification structure (#6341)
Fixes #6321
2018-08-23 14:40:54 -07:00
Manjunath A Kumatagi
dafa5073cb Remove curl after the usage (#6347) 2018-08-23 10:48:29 -07:00
Praveen raj Mani
65e05a06fb Remove notifications Fix (#6082)
Remove all the notifications for an empty rulesMap

Fixes #6053
2018-08-23 22:53:18 +05:30
Dee Koder
5c9354894b Update Issue & PR Templates to have regression information when available (#6342) 2018-08-23 01:41:22 -07:00
Harshavardhana
b01e69e08f Initialize global object layer after all subsystems have initialized (#6333)
This is to ensure that object API operations are not performed
on a server on which subsystems are yet to be initialized.
2018-08-22 23:11:17 -07:00
Raphael Randschau
8601f29d95 select: fix int overflow of math.MaxInt64 on ARM (#6317) 2018-08-22 16:16:04 +05:30
Nitish Tiwari
0aee722e3f Fix Minio browser screenshots in docs (#6334)
Fixes #6308
2018-08-22 13:15:36 +05:30
Harshavardhana
beb6d40ce6 Avoid crash when policy subsystem is not initialized (#6326)
Fixes #6324
2018-08-21 15:38:51 -07:00
poornas
19db921555 CopyObject: fix regression in key rotation (#6331)
After key rotation, metadata was not being replaced with new sealed key.
Regression introduced in commit e71ef905f9
2018-08-21 15:12:00 -07:00
Minio Trusted
68b9e9e7e7 Update yaml files to latest version RELEASE.2018-08-21T00-37-20Z 2018-08-21 00:44:41 +00:00
kannappanr
2d84b02bc4 Check for absence of checksum field and attributes. (#6298)
Fixes #6295
2018-08-20 16:58:47 -07:00
poornas
8b2801bd46 Update documentation to show how to update minio config fields (#6301)
- with recent commit 1fb2e9ef95, config
can no longer be updated by editing config.json. This is because config
has been migrated inside the minio backend. Update documentation on
how to set/get configuration using mc admin config command.
2018-08-20 13:37:10 -07:00
Harshavardhana
7d7e21aebb Merge initConfig logic to ConfigSys (#6312) 2018-08-19 13:57:18 -07:00
Nitish Tiwari
bf14e5ce1b Fix distributed doc as total storage capacity is not displayed anymore (#6309) 2018-08-18 10:01:24 -07:00
Andreas Auernhammer
d531080b7e add SSE-KMS not-implemented error handling (#6234)
This commit adds error handling for SSE-KMS requests to
HEAD, GET, PUT and COPY operations. The server responds
with `not implemented` if a client sends a SSE-KMS
request.
2018-08-17 21:07:19 -07:00
Harshavardhana
a6b8a5487a Fail gateway properly with an error on port conflicts (#6303) 2018-08-17 21:06:36 -07:00
Minio Trusted
6c0d53a1c5 Update yaml files to latest version RELEASE.2018-08-18T03-49-57Z 2018-08-18 03:54:06 +00:00
Harshavardhana
9f14433cbd Ensure that setConfig uses latest functionality (#6302) 2018-08-17 18:51:34 -07:00
Harshavardhana
50a817e3d3 Use new listener which implements enhanced tcp features (#6289)
This package provide customizable TCP net.Listener with various
performance-related options:

 * SO_REUSEPORT. This option allows linear scaling server performance
   on multi-CPU servers.
   See https://www.nginx.com/blog/socket-sharding-nginx-release-1-9-1/ for details.
 * TCP_DEFER_ACCEPT. This option expects the server reads from the accepted
   connection before writing to them.
 * TCP_FASTOPEN. See https://lwn.net/Articles/508865/ for details.
2018-08-17 18:44:02 -07:00
Harshavardhana
5a4a57700b Add select docs and fix return values for Select API (#6300) 2018-08-17 17:11:39 -07:00
Harshavardhana
3de5a3157f Enhance picking valid xlMeta based on quorum (#6297)
This PR borrows the idea from getFormatXLQuorum()
2018-08-17 14:42:04 -07:00
Kaan Kabalak
50dec08002 Correct link paths in Chinese documentation (#6299) 2018-08-17 13:16:17 -07:00
poornas
e71ef905f9 Add support for SSE-S3 server side encryption with vault (#6192)
Add support for sse-s3 encryption with vault as KMS.

Also refactoring code to make use of headers and functions defined in
crypto package and clean up duplicated code.
2018-08-17 12:52:14 -07:00
junpeng liu
3d197c1449 Modify several translation errors (#6038) 2018-08-17 12:04:09 +05:30
Harshavardhana
65de2d68c0 Allow for proper garbage collection pooling bytes.Buffer (#6266) 2018-08-16 18:37:43 -07:00
Harshavardhana
1103ad2d08 Watch for symlinked certs in container envs (#6282)
Fixes #6278
2018-08-16 18:37:21 -07:00
Harshavardhana
eab947cf42 Make sure to update modTime in erasure metadata (#6296)
This is to ensure that when we update xl.json with new
parts have the latest modtime, in-turn avoids consistency
issues when the disk is offline.
2018-08-16 17:55:01 -07:00
Harshavardhana
0fe9e95250 Validate prefixes on all sets (#6294)
This PR fixes a regression introduced in 8eb838bf91
where hashing technique was used on prefixes to get the right set
to perform the operation, this is not correct since prefixes and
their corresponding keys might hash to a different value depending
on the key length.

For prefixes/directories we should look everywhere to support proper
quorum based listing.

Fixes #6293
2018-08-16 16:49:38 -07:00
kannappanr
c7946ab9ab Remove unnecessary error log messages (#6186) 2018-08-16 12:57:49 -07:00
Harshavardhana
f5df3b4795 Remove select docs (#6287)
Select API is sufficiently documented, this doc is also incomplete.

- https://aws.amazon.com/blogs/aws/s3-glacier-select/
- https://aws.amazon.com/blogs/developer/introducing-support-for-amazon-s3-select-in-the-aws-sdk-for-ruby/
- https://aws.amazon.com/blogs/developer/introducing-support-for-amazon-s3-select-in-the-aws-sdk-for-javascript/
- https://aws.amazon.com/blogs/developer/category/storage/s3-select/
2018-08-15 19:47:22 -07:00
Harshavardhana
f26325c988 Support supplying custom drives per set count (#6261) 2018-08-15 16:35:21 -07:00
Arjun Mishra
7c14cdb60e S3 Select API Support for CSV (#6127)
Add support for trivial where clause cases
2018-08-15 03:30:19 -07:00
Harshavardhana
0e02328c98 Migrate config.json from config-dir to backend (#6195)
This PR is the first set of changes to move the config
to the backend, the changes use the existing `config.json`
allows it to be migrated such that we can save it in on
backend disks.

In future releases, we will slowly migrate out of the
current architecture.

Fixes #6182
2018-08-15 10:11:47 +05:30
Harshavardhana
380524ae27 Unlock read lock on uploadID upon errors (#6283) 2018-08-14 18:35:30 -07:00
kannappanr
0286e61aee Log disk not found error just once (#6059)
Modified the LogIf function to log only if the error passed
is not on the ignored errors list.

Currently, only disk not found error is added to the list.
Added a new function in logger package called LogAlwaysIf, 
which will print on any error.

Fixes #5997
2018-08-14 13:58:48 -07:00
wd256
ff29aed05d gcs: Translate S3 user-defined metadata prefix to/from GCS custom metadata prefix (#6270) 2018-08-14 11:53:39 -07:00
Harshavardhana
64f2c61813 Implement memory efficient readdir for windows (#6247)
Fixes #6164
2018-08-09 14:52:29 -07:00
Andreas Auernhammer
525c04fd07 crypto: add SSE-KMS HTTP header detection (#6228)
This commit adds support for detecting SSE-KMS headers.
The server should be able to detect SSE-KMS headers to
at least fail such S3 requests with not implemented.
2018-08-09 13:02:57 -07:00
Sanat Mouli
efac90461a Fix bug in Share Object Modal (#6257)
Fixes #6249
2018-08-09 09:45:43 -07:00
Durgadas Kamath
71979376b5 Add s390x support (#6263) 2018-08-08 15:49:12 -07:00
Anis Elleuch
5a1ae862a7 Avoid sending an error after 206 HTTP code (#6264)
When a S3 client sends a GET Object with a range header, 206 http
code is returned indicating success, however the call of the object
layer's GetObject() inside the handler can return an error and will lead
to writing an XML error message, which is obviously wrong since
we already sent 206 http code. So in the case, we just stop sending
data to the S3 client, this latter can still detect if there is no
error when comparing received data with Content-Length header
in the Get Object response.
2018-08-08 15:39:47 -07:00
Anis Elleuch
6df20734f9 Avoid logging the body of the http 206 response (#6258)
When an S3 client issues a GET request with range specified, Minio
server returns some partial data with 206 http code. The latter
is sent in MINIO_HTTP_TRACE output which is incorrect. This PR
fixes the issue.
2018-08-08 12:34:42 -07:00
Sanat Mouli
98bce72295 Fix output path for go-bindata-assetfs (#6253) 2018-08-08 10:46:02 +05:30
Harshavardhana
2f1756489e Add tracing capabilities for internode rpc Servers (#6254) 2018-08-07 15:21:30 +05:30
Harshavardhana
9719640e34 Use sha256-simd instead of crypto/sha256 (#6252) 2018-08-06 18:00:10 -07:00
Krishna Srinivas
ce02ab613d Simplify erasure code by separating bitrot from erasure code (#5959) 2018-08-06 15:14:08 -07:00
Oleg Kovalov
37de2dbd3b simplifying if-else chains to switches (#6208) 2018-08-06 10:26:40 -07:00
Harshavardhana
a82500f162 Support dumb terminals by turning off color (#6246)
ANSI colors do not work on dumb terminals, in situations
when minio is running as a service under systemd.

This PR ensures we turn off color in those situations.
2018-08-06 18:16:49 +05:30
Harshavardhana
2dede2fdc2 Add reliable RemoveAll to handle racy situations (#6227) 2018-08-06 09:45:28 +05:30
Harshavardhana
13fbb96736 Hold locks granularly in nslockMap (#6242)
With benchmarks increases the performance for small files
by almost 4x times the previous releases.
2018-08-06 08:55:25 +05:30
Harshavardhana
eabfcea34e Add granular locking in retryTicker (#6236)
This is to avoid serializing RPC contention on ongoing
parallel operations, the blocking profile indicating
all calls were being serialized through setRetryTicker.
2018-08-03 18:57:00 -07:00
Andreas Auernhammer
a078703214 catch crypto.* errors and add SSE-S3 invalid algorithm err (#6229)
This commit adds the crypto.* errors to the
`toAPIErrorCode` switch. Further this commit adds an S3
API error code returned whenever the client specifes a
SSE-S3 request with an invalid algorithm parameter.
2018-08-03 16:55:45 -07:00
Harshavardhana
bd2b22572f Increase max idle connections from 100 to 4096 (#6244)
This is to be inline with our benchmarking results
2018-08-03 16:29:28 -07:00
poornas
5f69f04909 nas gateway: fix regression in global bucket policy initialization (#6243)
Fixes #6238
globalPolicySys used to be initialized in fs/xl layer. The referenced
commit moved this logic to server/gateway initialization,but a check
to avoid double initialization prevented globalPolicySys to be loaded
from disk for NAS.

fixes regression from commit be1700f595
2018-08-03 15:12:18 -07:00
Minio Trusted
a1a426e523 Update yaml files to latest version RELEASE.2018-08-02T23-11-36Z 2018-08-02 23:17:30 +00:00
Harshavardhana
a091b1a3ee Fix admin API doc formatting (#6235) 2018-08-02 14:21:38 -07:00
Harshavardhana
556a51120c Deprecate ListLocks and ClearLocks (#6233)
No locks are ever left in memory, we also
have a periodic interval of clearing stale locks
anyways. The lock instrumentation was not complete
and was seldom used.

Deprecate this for now and bring it back later if
it is really needed. This also in-turn seems to improve
performance slightly.
2018-08-02 23:09:42 +05:30
poornas
eb391a53c1 check for syscall errors in posix-errors helper functions (#6232) 2018-08-02 10:38:51 +05:30
Harshavardhana
e17e09ea3c Handle POST object upload without filename param (#6221)
POST mime/multipart upload style can have filename value optional
which leads to implementation issues in Go releases in their
standard mime/multipart library.

When `filename` doesn't exist Go doesn't update `form.File` which
we rely on to extract the incoming file data, strangely when `filename`
is not specified this data is buffered in memory and is now part of
`form.Value` instead of `form.File` which creates an inconsistent
behavior.

This PR tries to fix this in our code for the time being, but ideal PR
would be to fix the upstream mime/multipart library to handle the
above situation consistently.
2018-08-01 14:19:11 -07:00
Andreas Auernhammer
76c423392a crypto: add GenerateIV from random IV generation (#6215)
This commit adds a `GenerateIV` function to simplify
the generation of random IVs.

It adds some unit tests for `GenerateIV` in key_test.go
2018-08-01 01:02:07 -07:00
Aarushi Arya
8e6d756e3a Appropriate error message on unsuccessful update. (#6203) 2018-08-01 01:01:37 -07:00
Andreas Auernhammer
a7c9058375 crypto: implement Stringer for S3 and SSEC (#6216)
This commit adds a `fmt.Stringer` implementation for
SSE-S3 and SSE-C. The string representation is the
domain used for object key sealing.
See: `ObjectKey.Seal(...)` and `ObjectKey.Unseal(...)`
2018-07-31 11:15:12 -07:00
Nitish Tiwari
b16e33bcf5 Fix Kubernetes TLS doc to avoid creating CAs dir on read only mount (#6214) 2018-07-31 10:58:34 -07:00
Nitish Tiwari
197af49c99 Fix healthcheck handler to verify gateway backend liveness (#6218)
Fixes #6217
2018-07-31 10:55:34 -07:00
kannappanr
264cc4020f Return 503 instead of 404 if more than half of disks are not found (#6207)
Fixes #6163
2018-07-31 00:23:29 -07:00
Minio Trusted
df88421087 Update yaml files to latest version RELEASE.2018-07-31T02-11-47Z 2018-07-31 02:17:27 +00:00
Harshavardhana
dbd89bbae3 Remove double RLocks for GetBucketInfo (#6209) 2018-07-30 17:38:52 -07:00
Aditya Manthramurthy
224a272cf2 Fix type of bitrot mismatch error (#6205)
The error type `hashMismatchError` is lost when the error is received
from a remote disk.

Fixes #6201
2018-07-29 15:00:37 +05:30
Harshavardhana
ad86454580 Make sure to handle FaultyDisks in listing ops (#6204)
Continuing from PR 157ed65c35

Our posix.go implementation did not handle  I/O errors
properly on the disks, this led to situations where
top-level callers such as ListObjects might return early
without even verifying all the available disks.

This commit tries to address this in Kubernetes, drbd/nbd based
persistent volumes which can disconnect under load and
result in the situations with disks return I/O errors.

This commit also simplifies listing operation, listing
never returns any error. We can avoid this since we pretty
much ignore most of the errors anyways. When objects are
accessed directly we return proper errors.
2018-07-27 15:32:19 -07:00
Andreas Auernhammer
644c2ce326 crypto: add support for parsing/creating SSE-C/SSE-S3 metadata (#6169)
* crypto: add support for parsing SSE-C/SSE-S3 metadata

This commit adds support for detecting and parsing
SSE-C/SSE-S3 object metadata. With the `IsEncrypted`
functions it is possible to determine whether an object
seems to be encrypted. With the `ParseMetadata` functions
it is possible to validate such metadata and extract the
SSE-C/SSE-S3 related values.

It also fixes some naming issues.

* crypto: add functions for creating SSE object metadata

This commit adds functions for creating SSE-S3 and
SSE-C metadata. It also adds a `CreateMultipartMetadata`
for creating multipart metadata.

For all functions unit tests are included.
2018-07-25 13:35:54 -07:00
Harshavardhana
2debe77586 Remove error returned when part sizes are un-equal (#6183)
Since implementing `pwrite` like implementation would
require a more complex code than background append
implementation, it is better to keep the current code
as is and not implement `pwrite` based functionality.

Closes #4881
2018-07-24 21:31:03 -07:00
Harshavardhana
20480ba3f7 Remove references to MINIO_ENDPOINTS (#6200)
MINIO_ENDPOINTS is a special case scenario
we don't need to document it for now.
2018-07-24 17:17:25 -07:00
kannappanr
4f52d22c36 Fix make test failure (#6185)
Fix shadowing errors.
2018-07-24 14:17:58 -07:00
Kaan Kabalak
cbe8df198e Replace pointer with default cursor on filename hover (#6193) 2018-07-24 13:34:20 +05:30
Harshavardhana
157ed65c35 Fix healthcheck handler to check errors in local disks only (#6184)
Healthcheck handler in current implementation was
performing ListBuckets() to check for liveness of Minio
service. ListBuckets() implementation on the other hand
doesn't do quorum based listing and if one of the disks
returned error, an I/O error it would be lead to kubernetes
taking the minio pod down prematurely even if the disk
is not local to that minio server.

The reason is ListBuckets() call cannot be trusted to
provide us the valid information that we need, Minio is a
clustered application which is designed to handle disk
failures. Error on one of the disks doesn't mean the pod
should become fully non-operational.

This PR attempts to fix this by only checking for alive
disks which are local to each setup and also by simply
performing a Stat() operation, if the Stat() returned
error on all disks local to a particular server then
we can let kubernetes safely take it down, until then
we should be operational.
2018-07-23 12:21:25 -07:00
Minio Trusted
869018ad14 Update yaml files to latest version RELEASE.2018-07-23T18-34-49Z 2018-07-23 18:39:08 +00:00
Harshavardhana
5acc2a6db1 S3 gateway signature probe use a unique bucket (#6190)
This fixes an issue because someone is
using `probe-bucket-sign` bucket name in
region 'eu-central-1'
2018-07-23 10:16:56 -07:00
kannappanr
2cd14f567c Do not set Key and BucketName in ErrorResponse, if empty (#6174) 2018-07-23 14:09:09 +05:30
Harshavardhana
f1be356cc6 Do not use parallel deletes to avoid random I/O (#6178)
The current code for deleting 1000 objects simultaneously
causes significant random I/O, which on slower drives
leads to servers disconnecting in a distributed setup.

Simplify this by serially deleting and reducing the
chattiness of this operation.
2018-07-20 21:21:01 -07:00
kannappanr
76ddf4d32f Log x-amz-request-id as log and XML error response (#6173)
Currently, requestid field in logEntry is not populated, as the
requestid field gets set at the very end.
It is now set before regular handler functions. This is also
useful in setting it as part of the XML error response.

Travis build for ppc64le has been quite inconsistent and stays queued
for most of the time. Removing this build as part of Travis.yml for
the time being.
2018-07-20 18:46:32 -07:00
kannappanr
7b91bd71fe Remove ppc64le support from .travis.yml (#6180)
Travis CI for ppc64le stays in queued state.
Removing the support for now.
2018-07-20 18:39:55 -07:00
Harshavardhana
36ab615518 Remove unused functions and constants (#6175) 2018-07-20 23:37:43 +05:30
kannappanr
963a70053b Do not trace HTTP body of PostPolicyBuckethandler (#6177) 2018-07-20 07:37:46 -07:00
Anis Elleuch
9c5e971a58 Add new console/http loggers (#6066)
- Add console target logging, enabled by default.
- Add http target logging, which supports an endpoint
  with basic authentication (username/password are passed
  in the endpoint url itself)
- HTTP target logging is asynchronous and some logs can be
  dropped if channel buffer (10000) is full
2018-07-19 15:55:06 -07:00
Anis Elleuch
b1c9eb0e01 Disable splitting lines in pretty error messages (#6171)
In a small window, UI error tries to split lines for an eye candy
error message. However, since we show some docs.minio.io links in some
error messages, these links are actually broken and not easily selected
in a X terminal. This PR changes the behavior and won't split lines
anymore.
2018-07-19 15:49:02 -07:00
Nitish Tiwari
b8f4f26cf6 Add S3 gateway documentation (#6165)
Fixes #4830
2018-07-19 11:54:38 -07:00
kannappanr
43cc0096fa Add support for deployment ID (#6144)
deployment ID helps in identifying a minio deployment in the case of remote
logging targets.
2018-07-18 20:17:35 -07:00
Anis Elleuch
e8a008f5b5 Better validation of all config file fields (#6090)
Add Validate() to serverConfig to call it at server
startup and in Admin SetConfig handler to minimize
errors scenario after server restart.
2018-07-18 11:22:29 -07:00
Andreas Auernhammer
758a80e39b crypto: add basic functionality for parsing SSE-C headers (#6148)
This commit adds basic support for SSE-C / SSE-C copy.
This includes functions for determining whether SSE-C
is requested by the S3 client and functions for parsing
such HTTP headers.

All S3 SSE-C parsing errors are exported such that callers
can pattern-match to forward the correct error to S3
clients.

Further the SSE-C related internal metadata entry-keys
are added by this commit.
2018-07-18 10:49:26 -07:00
wd256
3ec4738955 gcs: Use Pager to iterate results in ListObjectsV1/V2 (#6162)
Fixes #6052
2018-07-18 21:49:16 +05:30
Andreas Auernhammer
6c93c60424 crypto: add a basic KMS implementation (#6161)
This commit adds a basic KMS implementation for an
operator-specified SSE-S3 master key. The master key
is wrapped as KMS such that using SSE-S3 with master key
and SSE-S3 with KMS can use the same code.

Bindings for a remote / true KMS (like hashicorp vault)
will be added later on.
2018-07-17 22:40:34 -07:00
Krishna Srinivas
0c9f4c9092 formatMetaV1 should be "inherited" by disk format structs (#6134) 2018-07-16 20:26:42 -07:00
Andreas Auernhammer
289d6ce1d7 crypto: update SSE-S3 and SSE-C key derivation (#6152)
This commit updates the key derivation to reflect the
latest change of crypto/doc.go. This includes handling
the insecure legacy KDF.

Since #6064 is fixed, the 3. test case for object key
generation is enabled again.
2018-07-16 07:49:50 -07:00
Rob Girard
2a12e694f3 Changed command line examples (#6149)
Order for server:disk originally provided wouldn't stripe 
wide and may lead to availability issues.

Also added Short-form examples using {1...n} and a 
warning about {1..2} vs {1...3}
2018-07-14 20:48:38 +05:30
Harshavardhana
db26d3c9e2 Fix handling files at leaf attempting disk.ListDir() (#6155)
Return an ignorable error upon readDir() failure on
a file.
2018-07-14 12:11:48 +05:30
Stefan Husch
914c76a801 Implement lock.Open() to fix #5642 (#6150) 2018-07-13 18:37:02 -07:00
Krishnan Parthasarathi
a1ef90be52 gcs: Limit number of objects listed to max-keys (#6133) 2018-07-13 10:27:26 -07:00
Minio Trusted
7c4a41b933 Update yaml files to latest version RELEASE.2018-07-13T00-09-07Z 2018-07-13 00:43:51 +00:00
Nitish Tiwari
2aa18cafc6 Update federation target to etcd/clientv3 (#6119)
With CoreDNS now supporting etcdv3 as the DNS backend, we
can update our federation target to etcdv3. Users will now be
able to use etcdv3 server as the federation backbone.

Minio will update bucket data to etcdv3 and CoreDNS can pick
that data up and serve it as bucket style DNS path.
2018-07-12 14:12:40 -07:00
Andreas Auernhammer
adf7340394 fix size computation for en/decrypted objects (#6147)
This PR fixes the size calculation for encrypted multipart
objects.
2018-07-12 11:23:32 -07:00
Harshavardhana
b11a8eb3f4 Support multiple values for x-amz-meta header (#6145)
Fixes #5595
2018-07-12 09:40:14 -07:00
Andreas Auernhammer
15771ebe8d Fix decrypted object size and key derivation in CopyObjectPart (#6141)
This commit fixes the size calculation for multipart
objects. The decrypted size of an encrypted multipart
object is the sum of the decrypted part sizes.

Also fixes the key derivation in CopyObjectPart.
Instead of using the same object-encryption-key for each
part now an unique per-part key is derived.

Updates #6139
2018-07-12 21:59:56 +05:30
Praveen raj Mani
44865596db SignatureV4 validation with Metadata in the presignedUrl (#5894)
The `X-Amz-Meta-`/`X-Minio-Meta-` will now be recognized in query string also.

Fixes #5857 #5950
2018-07-10 20:27:10 -07:00
Nitish Tiwari
c9bc7e47b9 Update distributed docs (#6123)
We need to clarify that distributed Minio doesn't strictly need a
fresh drive, instead it just needs a fresh directory on the drive.
2018-07-10 07:32:24 +05:30
Anis Elleuch
be1700f595 Avoid startup abort when a notify target is down (#6126)
Minio server was preventing itself to start when any notification
target is down and not running. The PR changes the behavior by
avoiding startup abort in that case, so the user will still
be able to access Minio server using mc admin commands after
a restart or set config commands.
2018-07-10 07:20:31 +05:30
Minio Trusted
42c5b64e4e Update yaml files to latest version RELEASE.2018-07-10T01-42-11Z 2018-07-10 01:46:03 +00:00
Krishna Srinivas
40ed0d1f5d Support 1GB disk size (#6137)
Pivotal CF by default has 1GB disk option which causes minio to not start
2018-07-09 18:23:49 -07:00
Andreas Auernhammer
b181a693fb fix object rebinding SSE-C security guarantee violation (#6121)
This commit fixes a weakness of the key-encryption-key
derivation for SSE-C encrypted objects. Before this
change the key-encryption-key was not bound to / didn't
depend on the object path. This allows an attacker to
repalce objects - encrypted with the same
client-key - with each other.

This change fixes this issue by updating the
key-encryption-key derivation to include:
 - the domain (in this case SSE-C)
 - a canonical object path representation
 - the encryption & key derivation algorithm

Changing the object path now causes the KDF to derive a
different key-encryption-key such that the object-key
unsealing fails.
Including the domain (SSE-C) and encryption & key
derivation algorithm is not directly neccessary for this
fix. However, both will be included for the SSE-S3 KDF.
So they are included here to avoid updating the KDF
again when we add SSE-S3.

The leagcy KDF 'DARE-SHA256' is only used for existing
objects and never for new objects / key rotation.
2018-07-09 17:18:28 -07:00
Anis Elleuch
4ddc222f46 fix: Propagate bucket policy update in a distributed setup (#6135)
Commit 0d52126023 caused a regression in setting
a new bucket policy in a distributed setup. The reason is that gob is not able
to encode fields declared as interfaces unless we provide GobEncode() and GobDecode()
This PR adds them by using json marshaller and unmarshaller that are already
implemented for Functions interface.
2018-07-09 02:18:48 -07:00
Mike Scarlett
c310cbbe89 Update comments regarding GCS component count (#6131) 2018-07-06 17:07:11 -07:00
Harshavardhana
0ef0d7e685 pkg/certs: On windows watch for directory changes to load certs (#6128)
This PR fixes an issue when configuring Minio TLS on windows
2018-07-05 16:33:37 -07:00
wksw
c62813c887 completed sample config and fix wrong json format (#6096)
missing worm configuration and wrong json format
2018-07-05 13:40:07 +05:30
poornas
1da362538b cache: allow ellipse style entries for MINIO_CACHE_DRIVES (#6088)
Fixes #5863
2018-07-03 16:54:10 -07:00
Krishna Srinivas
e40a5e05e1 Do notification in background to not block S3 client REST calls (#6005) 2018-07-03 11:09:36 -07:00
ebozduman
b0b0fb4c8d Validate Minio config.json file on the client side (#6067) 2018-07-03 11:07:46 -07:00
kannappanr
726e75611e Do not log BucketNotFound errors on minio console (#6114) 2018-07-03 11:04:55 -07:00
Rafael Peria de Sene
317e648c0d Add support for ppc64le (#6116)
Signed-off-by: Rafael Peria de Sene <rpsene@br.ibm.com>
2018-07-03 09:42:19 -07:00
Harshavardhana
80b3e9cb03 use appropriate HTTP status for storage is full (#6117) 2018-07-03 09:40:14 -07:00
Harshavardhana
6c85706c24 Use GetSourceIP for source ip as request params (#6109)
Fixes #6108
2018-07-02 14:40:18 -07:00
Harshavardhana
d7ced9a8b5 Upgrade gjson to bring in new performance improvements (#6106) 2018-07-02 13:31:11 -07:00
Praveen raj Mani
360f3f9335 Checking the existence of the bucket in DeleteObjectHandler (#6085)
Fixes #6077
2018-06-30 22:35:43 -07:00
wd256
25f9b0bc3b Handle ListObjectsV2 start-after parameter in ObjectLayer (#6078) 2018-07-01 09:52:45 +05:30
Harshavardhana
a5453c307f Fix kernel tuning script to ignore write failures (#6107)
Certain SCSI drivers do not allow certain tuning parameters
like nr_requests, max_sectors_kb to be changed, ignore these
errors silently as this script is simply a best effort.

Fixes #6103
2018-06-30 14:55:21 -07:00
Harshavardhana
92a6676a2f Avoid unnecessary logging for policy not found errors (#6104) 2018-06-29 06:30:10 -07:00
Minio Trusted
f53d511798 Update yaml files to latest version RELEASE.2018-06-29T02-11-29Z 2018-06-29 02:14:58 +00:00
1561 changed files with 265519 additions and 123835 deletions

View File

@@ -24,6 +24,10 @@
<!--- How has this issue affected you? What are you trying to accomplish? -->
<!--- Providing context helps us come up with a solution that is most useful in the real world -->
## Regression
<!-- Is this issue a regression? (Yes / No) -->
<!-- If Yes, optionally please include minio version or commit id or PR# that caused this regression, if you have these details. -->
## Your Environment
<!--- Include as many relevant details about the environment you experienced the bug in -->
* Version used (`minio version`):

View File

@@ -7,6 +7,10 @@
<!--- Why is this change required? What problem does it solve? -->
<!--- If it fixes an open issue, please link to the issue here. -->
## Regression
<!-- Is this PR fixing a regression? (Yes / No) -->
<!-- If Yes, optionally please include minio version or commit id or PR# that caused this regression, if you have these details. -->
## How Has This Been Tested?
<!--- Please describe in detail how you tested your changes. -->
<!--- Include details of your testing environment, and the tests you ran to -->

1
.gitignore vendored
View File

@@ -22,3 +22,4 @@ parts/
prime/
stage/
.sia_temp/
config.json

View File

@@ -1,34 +1,50 @@
go_import_path: github.com/minio/minio
sudo: required
services:
- docker
dist: trusty
language: go
os:
- linux
# this ensures PRs based on a local branch are not built twice
# the downside is that a PR targeting a different branch is not built
# but as a workaround you can add the branch to this list
branches:
only:
- master
env:
- ARCH=x86_64
matrix:
include:
- os: linux
dist: trusty
sudo: required
env:
- ARCH=x86_64
- CGO_ENABLED=0
go: 1.11.4
script:
- make
- diff -au <(gofmt -s -d cmd) <(printf "")
- diff -au <(gofmt -s -d pkg) <(printf "")
- for d in $(go list ./... | grep -v browser); do CGO_ENABLED=1 go test -v -race --timeout 15m "$d"; done
- make verifiers
- make crosscompile
- make verify
- make coverage
- cd browser && yarn && yarn test && cd ..
- os: windows
env:
- ARCH=x86_64
- CGO_ENABLED=0
go: 1.11.4
script:
- go build --ldflags="$(go run buildscripts/gen-ldflags.go)" -o %GOPATH%\bin\minio.exe
- for d in $(go list ./... | grep -v browser); do CGO_ENABLED=1 go test -v -race --timeout 20m "$d"; done
- bash buildscripts/go-coverage.sh
before_script:
# Add an IPv6 config - see the corresponding Travis issue
# https://github.com/travis-ci/travis-ci/issues/8361
- if [[ "${TRAVIS_OS_NAME}" == "linux" ]]; then sudo sh -c 'echo 0 > /proc/sys/net/ipv6/conf/all/disable_ipv6'; fi
before_install:
- nvm install stable
script:
## Run all the tests
- make
- diff -au <(gofmt -s -d cmd) <(printf "")
- diff -au <(gofmt -s -d pkg) <(printf "")
- make test GOFLAGS="-timeout 15m -race -v"
- make coverage
- node --version
- cd browser && yarn && yarn test && cd ..
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then nvm install stable ; fi
after_success:
- bash <(curl -s https://codecov.io/bash)
go:
- '1.10.1'

View File

@@ -68,4 +68,4 @@ To remove a dependency
- Run `make pkg-remove PKG=foo/bar` from top-level directory
### What are the coding guidelines for Minio?
``Minio`` is fully conformant with Golang style. Refer: [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project. If you observe offending code, please feel free to send a pull request or ping us on [Slack](slack.minio.io).
``Minio`` is fully conformant with Golang style. Refer: [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project. If you observe offending code, please feel free to send a pull request or ping us on [Slack](https://slack.minio.io).

View File

@@ -1,28 +1,32 @@
FROM golang:1.10.1-alpine3.7
FROM golang:1.11.4-alpine3.7
LABEL maintainer="Minio Inc <dev@minio.io>"
ENV GOPATH /go
ENV PATH $PATH:$GOPATH/bin
ENV CGO_ENABLED 0
WORKDIR /go/src/github.com/minio/
RUN \
apk add --no-cache git && \
go get -v -d github.com/minio/minio && \
cd /go/src/github.com/minio/minio && \
go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)"
FROM alpine:3.7
ENV MINIO_UPDATE off
ENV MINIO_ACCESS_KEY_FILE=access_key \
MINIO_SECRET_KEY_FILE=secret_key
WORKDIR /go/src/github.com/minio/
EXPOSE 9000
COPY --from=0 /go/bin/minio /usr/bin/minio
COPY dockerscripts/docker-entrypoint.sh dockerscripts/healthcheck.sh /usr/bin/
RUN \
apk add --no-cache ca-certificates curl && \
apk add --no-cache --virtual .build-deps git && \
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
go get -v -d github.com/minio/minio && \
cd /go/src/github.com/minio/minio && \
go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)" && \
rm -rf /go/pkg /go/src /usr/local/go && apk del .build-deps
EXPOSE 9000
apk add --no-cache ca-certificates 'curl>7.61.0' && \
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]

View File

@@ -1,27 +1,20 @@
FROM golang:1.10.1-alpine3.7
FROM alpine:3.7
LABEL maintainer="Minio Inc <dev@minio.io>"
ENV GOPATH /go
ENV PATH $PATH:$GOPATH/bin
ENV CGO_ENABLED 0
COPY dockerscripts/docker-entrypoint.sh dockerscripts/healthcheck.sh /usr/bin/
COPY minio /usr/bin/
ENV MINIO_UPDATE off
ENV MINIO_ACCESS_KEY_FILE=access_key \
MINIO_SECRET_KEY_FILE=secret_key
WORKDIR /go/src/github.com/minio/
COPY dockerscripts/docker-entrypoint.sh dockerscripts/healthcheck.sh /usr/bin/
COPY . /go/src/github.com/minio/minio
RUN \
apk add --no-cache ca-certificates curl && \
apk add --no-cache --virtual .build-deps git && \
RUN \
apk add --no-cache ca-certificates 'curl>7.61.0' && \
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
cd /go/src/github.com/minio/minio && \
go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)" && \
rm -rf /go/pkg /go/src /usr/local/go && apk del .build-deps
chmod +x /usr/bin/minio && \
chmod +x /usr/bin/docker-entrypoint.sh && \
chmod +x /usr/bin/healthcheck.sh
EXPOSE 9000

View File

@@ -9,8 +9,7 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
MINIO_SECRET_KEY_FILE=secret_key
RUN \
apk add --no-cache ca-certificates && \
apk add --no-cache --virtual .build-deps curl && \
apk add --no-cache ca-certificates 'curl>7.61.0' && \
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
curl https://dl.minio.io/server/minio/release/linux-amd64/minio > /usr/bin/minio && \
chmod +x /usr/bin/minio && \

49
Dockerfile.simpleci Normal file
View File

@@ -0,0 +1,49 @@
#-------------------------------------------------------------
# Stage 1: Build and Unit tests
#-------------------------------------------------------------
FROM golang:1.11.4
COPY . /go/src/github.com/minio/minio
WORKDIR /go/src/github.com/minio/minio
RUN apt-get update && apt-get install -y jq
RUN make
RUN bash -c 'diff -au <(gofmt -s -d cmd) <(printf "")'
RUN bash -c 'diff -au <(gofmt -s -d pkg) <(printf "")'
RUN for d in $(go list ./... | grep -v browser); do go test -v -race --timeout 15m "$d"; done
RUN make verify
RUN make coverage || true
#-------------------------------------------------------------
# Stage 2: Test Frontend
#-------------------------------------------------------------
FROM node:10.15-stretch-slim
COPY browser /minio/browser
WORKDIR /minio/browser
RUN yarn
RUN yarn test
#-------------------------------------------------------------
# Stage 3: Run Gateway Tests
#-------------------------------------------------------------
FROM ubuntu:16.04
COPY --from=0 /go/src/github.com/minio/minio/minio ./minio
COPY buildscripts/gateway-tests.sh ./gateway-tests.sh
RUN apt-get update && apt-get install -y git wget jq curl dnsmasq
RUN wget https://dl.google.com/go/go1.11.4.linux-amd64.tar.gz && \
tar -C /usr/local -xzf go1.11.4.linux-amd64.tar.gz
ENV DEBIAN_FRONTEND noninteractive
ENV LANG C.UTF-8
ENV GOROOT /usr/local/go
RUN mkdir -p /go
ENV GOPATH /go
ENV PATH $GOPATH/bin:$GOROOT/bin:$PATH
RUN ./gateway-tests.sh

View File

@@ -13,18 +13,21 @@ checks:
@(env bash $(PWD)/buildscripts/checkgopath.sh)
getdeps:
@echo "Installing golint" && go get -u github.com/golang/lint/golint
@echo "Installing golint" && go get -u golang.org/x/lint/golint
@echo "Installing gocyclo" && go get -u github.com/fzipp/gocyclo
@echo "Installing deadcode" && go get -u github.com/remyoudompheng/go-misc/deadcode
@echo "Installing misspell" && go get -u github.com/client9/misspell/cmd/misspell
@echo "Installing ineffassign" && go get -u github.com/gordonklaus/ineffassign
crosscompile:
@(env bash $(PWD)/buildscripts/cross-compile.sh)
verifiers: getdeps vet fmt lint cyclo deadcode spelling
vet:
@echo "Running $@"
@go tool vet -atomic -bool -copylocks -nilfunc -printf -shadow -rangeloops -unreachable -unsafeptr -unusedresult cmd
@go tool vet -atomic -bool -copylocks -nilfunc -printf -shadow -rangeloops -unreachable -unsafeptr -unusedresult pkg
@go tool vet cmd
@go tool vet pkg
fmt:
@echo "Running $@"
@@ -42,8 +45,8 @@ ineffassign:
cyclo:
@echo "Running $@"
@${GOPATH}/bin/gocyclo -over 100 cmd
@${GOPATH}/bin/gocyclo -over 100 pkg
@${GOPATH}/bin/gocyclo -over 200 cmd
@${GOPATH}/bin/gocyclo -over 200 pkg
deadcode:
@echo "Running $@"
@@ -60,7 +63,9 @@ spelling:
check: test
test: verifiers build
@echo "Running unit tests"
@go test $(GOFLAGS) -tags kqueue ./...
@CGO_ENABLED=0 go test -tags kqueue ./...
verify: build
@echo "Verifying build"
@(env bash $(PWD)/buildscripts/verify-build.sh)
@@ -71,7 +76,10 @@ coverage: build
# Builds minio locally.
build: checks
@echo "Building minio binary to './minio'"
@CGO_ENABLED=0 go build -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio
@GOFLAGS="" CGO_ENABLED=0 go build -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio
docker: build
@docker build -t $(TAG) . -f Dockerfile.dev
pkg-add:
@echo "Adding new package $(PKG)"

View File

@@ -17,7 +17,7 @@ docker run -p 9000:9000 minio/minio server /data
docker pull minio/minio:edge
docker run -p 9000:9000 minio/minio:edge server /data
```
Please visit Minio Docker quickstart guide for more [here](https://docs.minio.io/docs/minio-docker-quickstart-guide)
Note: Docker will not display the autogenerated keys unless you start the container with the `-it`(interactive TTY) argument. Generally, it is not recommended to use autogenerated keys with containers. Please visit Minio Docker quickstart guide for more information [here](https://docs.minio.io/docs/minio-docker-quickstart-guide)
## macOS
### Homebrew
@@ -53,6 +53,15 @@ chmod +x minio
./minio server /data
```
| Platform| Architecture | URL|
| ----------| -------- | ------|
|GNU/Linux|ppc64le|https://dl.minio.io/server/minio/release/linux-ppc64le/minio |
```sh
wget https://dl.minio.io/server/minio/release/linux-ppc64le/minio
chmod +x minio
./minio server /data
```
## Microsoft Windows
### Binary Download
| Platform| Architecture | URL|

View File

@@ -1,55 +0,0 @@
# version format
version: "{build}"
# Operating system (build VM template)
os: Windows Server 2012 R2
# Platform.
platform: x64
clone_folder: c:\gopath\src\github.com\minio\minio
# Environment variables
environment:
GOPATH: c:\gopath
GOROOT: c:\go
# scripts that run after cloning repository
install:
- set PATH=%GOPATH%\bin;%GOROOT%\bin;%PATH%
- go version
- go env
- python --version
# To run your custom scripts instead of automatic MSBuild
build_script:
# Compile
# We need to disable firewall - https://github.com/appveyor/ci/issues/1579#issuecomment-309830648
- ps: Disable-NetFirewallRule -DisplayName 'File and Printer Sharing (SMB-Out)'
- appveyor AddCompilationMessage "Starting Compile"
- cd c:\gopath\src\github.com\minio\minio
- go run buildscripts/gen-ldflags.go > temp.txt
- set /p BUILD_LDFLAGS=<temp.txt
- go build -ldflags="%BUILD_LDFLAGS%" -o %GOPATH%\bin\minio.exe
- appveyor AddCompilationMessage "Compile Success"
# To run your custom scripts instead of automatic tests
test_script:
# Unit tests
- ps: Add-AppveyorTest "Unit Tests" -Outcome Running
- mkdir build\coverage
- for /f "" %%G in ('go list github.com/minio/minio/... ^| find /i /v "browser/"') do ( go test -v -timeout 20m -race %%G )
- go test -v -timeout 20m -coverprofile=build\coverage\coverage.txt -covermode=atomic github.com/minio/minio/cmd
- ps: Update-AppveyorTest "Unit Tests" -Outcome Passed
after_test:
- go tool cover -html=build\coverage\coverage.txt -o build\coverage\coverage.html
- ps: Push-AppveyorArtifact build\coverage\coverage.txt
- ps: Push-AppveyorArtifact build\coverage\coverage.html
# Upload coverage report.
- "SET PATH=C:\\Python34;C:\\Python34\\Scripts;%PATH%"
- pip install codecov
- codecov -X gcov -f "build\coverage\coverage.txt"
# to disable deployment
deploy: off

View File

@@ -5,13 +5,13 @@
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Minio Browser</title>
<link rel="stylesheet" href="loader.css" type="text/css">
<link rel="stylesheet" href="/minio/loader.css" type="text/css">
</head>
<body>
<div class="page-load">
<div class="pl-inner">
<img src="logo.svg" alt="">
<img src="/minio/logo.svg" alt="">
</div>
</div>
<div id="root"></div>
@@ -51,6 +51,6 @@
<![endif]-->
<script>currentUiVersion = 'MINIO_UI_VERSION'</script>
<script src="index_bundle.js"></script>
<script src="/minio/index_bundle.js"></script>
</body>
</html>

View File

@@ -22,7 +22,7 @@ export let alertId = 0
export const set = alert => {
const id = alertId++
return (dispatch, getState) => {
if (alert.type !== "danger") {
if (alert.type !== "danger" || alert.autoClear) {
setTimeout(() => {
dispatch({
type: CLEAR,

View File

@@ -67,8 +67,12 @@ const mapDispatchToProps = dispatch => {
return {
fetchBuckets: () => dispatch(actionsBuckets.fetchBuckets()),
setBucketList: buckets => dispatch(actionsBuckets.setList(buckets)),
selectBucket: bucket => dispatch(actionsBuckets.selectBucket(bucket))
selectBucket: (bucket, prefix) =>
dispatch(actionsBuckets.selectBucket(bucket, prefix))
}
}
export default connect(mapStateToProps, mapDispatchToProps)(BucketList)
export default connect(
mapStateToProps,
mapDispatchToProps
)(BucketList)

View File

@@ -14,7 +14,7 @@
* limitations under the License.
*/
import { READ_ONLY, WRITE_ONLY, READ_WRITE } from '../constants'
import { READ_ONLY, WRITE_ONLY, READ_WRITE, NONE } from '../constants'
import React from "react"
import { connect } from "react-redux"
@@ -42,37 +42,40 @@ export class Policy extends React.Component {
render() {
const {policy, prefix} = this.props
let newPrefix = prefix
if (newPrefix === '')
newPrefix = '*'
return (
<div className="pmb-list">
<div className="pmbl-item">
{ newPrefix }
if (policy === NONE) {
return <noscript />
} else {
return (
<div className="pmb-list">
<div className="pmbl-item">
{ newPrefix }
</div>
<div className="pmbl-item">
<select className="form-control"
disabled
value={ policy }>
<option value={ READ_ONLY }>
Read Only
</option>
<option value={ WRITE_ONLY }>
Write Only
</option>
<option value={ READ_WRITE }>
Read and Write
</option>
</select>
</div>
<div className="pmbl-item">
<button className="btn btn-block btn-danger" onClick={ this.removePolicy.bind(this) }>
Remove
</button>
</div>
</div>
<div className="pmbl-item">
<select className="form-control"
disabled
value={ policy }>
<option value={ READ_ONLY }>
Read Only
</option>
<option value={ WRITE_ONLY }>
Write Only
</option>
<option value={ READ_WRITE }>
Read and Write
</option>
</select>
</div>
<div className="pmbl-item">
<button className="btn btn-block btn-danger" onClick={ this.removePolicy.bind(this) }>
Remove
</button>
</div>
</div>
)
)
}
}
}

View File

@@ -17,7 +17,7 @@
import React from "react"
import { shallow, mount } from "enzyme"
import { Policy } from "../Policy"
import { READ_ONLY, WRITE_ONLY, READ_WRITE } from "../../constants"
import { READ_ONLY, WRITE_ONLY, READ_WRITE, NONE } from "../../constants"
import web from "../../web"
jest.mock("../../web", () => ({
@@ -31,6 +31,11 @@ describe("Policy", () => {
shallow(<Policy currentBucket={"bucket"} prefix={"foo"} policy={READ_ONLY} />)
})
it("should not render when policy is listed as 'none'", () => {
const wrapper = shallow(<Policy currentBucket={"bucket"} prefix={"foo"} policy={NONE} />)
expect(wrapper.find(".pmb-list").length).toBe(0)
})
it("should call web.setBucketPolicy and fetchPolicies on submit", () => {
const fetchPolicies = jest.fn()
const wrapper = shallow(

View File

@@ -23,6 +23,7 @@ export const minioBrowserPrefix = p.slice(0, p.indexOf("/", 1))
export const READ_ONLY = "readonly"
export const WRITE_ONLY = "writeonly"
export const READ_WRITE = "readwrite"
export const NONE = "none"
export const SHARE_OBJECT_EXPIRY_DAYS = 5
export const SHARE_OBJECT_EXPIRY_HOURS = 0

View File

@@ -57,7 +57,7 @@ export class ObjectActions extends React.Component {
})
}
render() {
const { object, showShareObjectModal } = this.props
const { object, showShareObjectModal, shareObjectName } = this.props
return (
<Dropdown id={`obj-actions-${object.name}`}>
<Dropdown.Toggle noCaret className="fia-toggle" />
@@ -77,7 +77,8 @@ export class ObjectActions extends React.Component {
<i className="fa fa-trash" />
</a>
</Dropdown.Menu>
{showShareObjectModal && <ShareObjectModal object={object} />}
{(showShareObjectModal && shareObjectName === object.name) &&
<ShareObjectModal object={object} />}
{this.state.showDeleteConfirmation && (
<DeleteObjectConfirmModal
deleteObject={this.deleteObject.bind(this)}
@@ -92,7 +93,8 @@ export class ObjectActions extends React.Component {
const mapStateToProps = (state, ownProps) => {
return {
object: ownProps.object,
showShareObjectModal: state.objects.shareObject.show
showShareObjectModal: state.objects.shareObject.show,
shareObjectName: state.objects.shareObject.object
}
}

View File

@@ -88,8 +88,21 @@ describe("ObjectActions", () => {
object={{ name: "obj1" }}
currentPrefix={"pre1/"}
showShareObjectModal={true}
shareObjectName={"obj1"}
/>
)
expect(wrapper.find("Connect(ShareObjectModal)").length).toBe(1)
})
it("shouldn't render ShareObjectModal when the names of the objects don't match", () => {
const wrapper = shallow(
<ObjectActions
object={{ name: "obj1" }}
currentPrefix={"pre1/"}
showShareObjectModal={true}
shareObjectName={"obj2"}
/>
)
expect(wrapper.find("Connect(ShareObjectModal)").length).toBe(0)
})
})

View File

@@ -19,16 +19,29 @@ import thunk from "redux-thunk"
import * as actionsObjects from "../actions"
import * as alertActions from "../../alert/actions"
import { minioBrowserPrefix } from "../../constants"
import history from "../../history"
jest.mock("../../web", () => ({
LoggedIn: jest.fn(() => true).mockReturnValueOnce(false),
ListObjects: jest.fn(() => {
return Promise.resolve({
objects: [{ name: "test1" }, { name: "test2" }],
istruncated: false,
nextmarker: "test2",
writable: false
})
LoggedIn: jest
.fn(() => true)
.mockReturnValueOnce(true)
.mockReturnValueOnce(false)
.mockReturnValueOnce(true)
.mockReturnValueOnce(true)
.mockReturnValueOnce(false),
ListObjects: jest.fn(({ bucketName }) => {
if (bucketName === "test-deny") {
return Promise.reject({
message: "listobjects is denied"
})
} else {
return Promise.resolve({
objects: [{ name: "test1" }, { name: "test2" }],
istruncated: false,
nextmarker: "test2",
writable: false
})
}
}),
RemoveObject: jest.fn(({ bucketName, objects }) => {
if (!bucketName) {
@@ -160,6 +173,41 @@ describe("Objects actions", () => {
})
})
it("creates objects/RESET_LIST after failing to fetch the objects from bucket with ListObjects denied for LoggedIn users", () => {
const store = mockStore({
buckets: { currentBucket: "test-deny" },
objects: { currentPrefix: "" }
})
const expectedActions = [
{
type: "alert/SET",
alert: {
type: "danger",
message: "listobjects is denied",
id: alertActions.alertId,
autoClear: true
}
},
{
type: "object/RESET_LIST"
}
]
return store.dispatch(actionsObjects.fetchObjects()).then(() => {
const actions = store.getActions()
expect(actions).toEqual(expectedActions)
})
})
it("redirect to login after failing to fetch the objects from bucket for non-LoggedIn users", () => {
const store = mockStore({
buckets: { currentBucket: "test-deny" },
objects: { currentPrefix: "" }
})
return store.dispatch(actionsObjects.fetchObjects()).then(() => {
expect(history.location.pathname.endsWith("/login")).toBeTruthy()
})
})
it("creates objects/SET_SORT_BY and objects/SET_SORT_ORDER when sortObjects is called", () => {
const store = mockStore({
objects: {
@@ -244,7 +292,11 @@ describe("Objects actions", () => {
const expectedActions = [
{
type: "alert/SET",
alert: { type: "danger", message: "Invalid bucket", id: 0 }
alert: {
type: "danger",
message: "Invalid bucket",
id: alertActions.alertId
}
}
]
return store.dispatch(actionsObjects.deleteObject("obj1")).then(() => {
@@ -355,7 +407,7 @@ describe("Objects actions", () => {
store.dispatch(actionsObjects.downloadObject("obj1"))
const url = `${
window.location.origin
}${minioBrowserPrefix}/download/bk1/${encodeURI("pre1/obj1")}?token=''`
}${minioBrowserPrefix}/download/bk1/${encodeURI("pre1/obj1")}?token=`
expect(setLocation).toHaveBeenCalledWith(url)
})

View File

@@ -16,17 +16,15 @@
import web from "../web"
import history from "../history"
import {
sortObjectsByName,
sortObjectsBySize,
sortObjectsByDate
} from "../utils"
import { sortObjectsByName, sortObjectsBySize, sortObjectsByDate } from "../utils"
import { getCurrentBucket } from "../buckets/selectors"
import { getCurrentPrefix, getCheckedList } from "./selectors"
import * as alertActions from "../alert/actions"
import * as bucketActions from "../buckets/actions"
import { minioBrowserPrefix } from "../constants"
export const SET_LIST = "objects/SET_LIST"
export const RESET_LIST = "object/RESET_LIST"
export const APPEND_LIST = "objects/APPEND_LIST"
export const REMOVE = "objects/REMOVE"
export const SET_SORT_BY = "objects/SET_SORT_BY"
@@ -45,6 +43,10 @@ export const setList = (objects, marker, isTruncated) => ({
isTruncated
})
export const resetList = () => ({
type: RESET_LIST
})
export const appendList = (objects, marker, isTruncated) => ({
type: APPEND_LIST,
objects,
@@ -54,47 +56,54 @@ export const appendList = (objects, marker, isTruncated) => ({
export const fetchObjects = append => {
return function(dispatch, getState) {
const {
buckets: { currentBucket },
objects: { currentPrefix, marker }
} = getState()
const {buckets: {currentBucket}, objects: {currentPrefix, marker}} = getState()
if (currentBucket) {
return web
.ListObjects({
bucketName: currentBucket,
prefix: currentPrefix,
marker: append ? marker : ""
})
.then(res => {
let objects = []
if (res.objects) {
objects = res.objects.map(object => {
return {
...object,
name: object.name.replace(currentPrefix, "")
}
})
}
if (append) {
dispatch(appendList(objects, res.nextmarker, res.istruncated))
} else {
dispatch(setList(objects, res.nextmarker, res.istruncated))
dispatch(setSortBy(""))
dispatch(setSortOrder(false))
}
dispatch(setPrefixWritable(res.writable))
})
.catch(err => {
dispatch(alertActions.set({ type: "danger", message: err.message }))
history.push("/login")
})
}
.ListObjects({
bucketName: currentBucket,
prefix: currentPrefix,
marker: append ? marker : ""
})
.then(res => {
let objects = []
if (res.objects) {
objects = res.objects.map(object => {
return {
...object,
name: object.name.replace(currentPrefix, "")
}
})
}
if (append) {
dispatch(appendList(objects, res.nextmarker, res.istruncated))
} else {
dispatch(setList(objects, res.nextmarker, res.istruncated))
dispatch(setSortBy(""))
dispatch(setSortOrder(false))
}
dispatch(setPrefixWritable(res.writable))
})
.catch(err => {
if (web.LoggedIn()) {
dispatch(
alertActions.set({
type: "danger",
message: err.message,
autoClear: true
})
)
dispatch(resetList())
} else {
history.push("/login")
}
})
}
}
}
export const sortObjects = sortBy => {
return function(dispatch, getState) {
const { objects } = getState()
const {objects} = getState()
const sortOrder = objects.sortBy == sortBy ? !objects.sortOrder : true
dispatch(setSortBy(sortBy))
dispatch(setSortOrder(sortOrder))
@@ -194,30 +203,39 @@ export const shareObject = (object, days, hours, minutes) => {
const currentPrefix = getCurrentPrefix(getState())
const objectName = `${currentPrefix}${object}`
const expiry = days * 24 * 60 * 60 + hours * 60 * 60 + minutes * 60
return web
.PresignedGet({
host: location.host,
bucket: currentBucket,
object: objectName,
expiry
})
.then(obj => {
dispatch(showShareObject(object, obj.url))
dispatch(
alertActions.set({
type: "success",
message: `Object shared. Expires in ${days} days ${hours} hours ${minutes} minutes`
})
)
})
.catch(err => {
dispatch(
alertActions.set({
type: "danger",
message: err.message
})
)
})
if (web.LoggedIn()) {
return web
.PresignedGet({
host: location.host,
bucket: currentBucket,
object: objectName
})
.then(obj => {
dispatch(showShareObject(object, obj.url))
dispatch(
alertActions.set({
type: "success",
message: `Object shared. Expires in ${days} days ${hours} hours ${minutes} minutes`
})
)
})
.catch(err => {
dispatch(
alertActions.set({
type: "danger",
message: err.message
})
)
})
} else {
dispatch(showShareObject(object, `${location.host}` + '/' + `${currentBucket}` + '/' + encodeURI(objectName)))
dispatch(
alertActions.set({
type: "success",
message: `Object shared.`
})
)
}
}
}
@@ -263,7 +281,7 @@ export const downloadObject = object => {
} else {
const url = `${
window.location.origin
}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=''`
}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=`
window.location = url
}
}
@@ -292,7 +310,7 @@ export const downloadCheckedObjects = () => {
objects: getCheckedList(state)
}
if (!web.LoggedIn()) {
const requestUrl = location.origin + "/minio/zip?token=''"
const requestUrl = location.origin + "/minio/zip?token="
downloadZip(requestUrl, req, dispatch)
} else {
return web
@@ -303,14 +321,13 @@ export const downloadCheckedObjects = () => {
}${minioBrowserPrefix}/zip?token=${res.token}`
downloadZip(requestUrl, req, dispatch)
})
.catch(err =>
dispatch(
alertActions.set({
type: "danger",
message: err.message
})
)
.catch(err => dispatch(
alertActions.set({
type: "danger",
message: err.message
})
)
)
}
}
}
@@ -333,8 +350,7 @@ const downloadZip = (url, req, dispatch) => {
var separator = req.prefix.length > 1 ? "-" : ""
anchor.href = blobUrl
anchor.download =
req.bucketName + separator + req.prefix.slice(0, -1) + ".zip"
anchor.download = req.bucketName + separator + req.prefix.slice(0, -1) + ".zip"
anchor.click()
window.URL.revokeObjectURL(blobUrl)

View File

@@ -50,6 +50,13 @@ export default (
marker: action.marker,
isTruncated: action.isTruncated
}
case actionsObjects.RESET_LIST:
return {
...state,
list: [],
marker: "",
isTruncated: false
}
case actionsObjects.APPEND_LIST:
return {
...state,

View File

@@ -100,10 +100,24 @@ div.fesl-row {
}
}
.fesl-item-name {
a {
cursor: default;
}
}
/*--------------------------
Icons
----------------------------*/
&[data-type=folder] { .list-type(#a1d6dd, '\f114'); }
&[data-type=folder] {
.list-type(#a1d6dd, '\f114');
.fesl-item-name {
a {
cursor: pointer;
}
}
}
&[data-type=pdf] {.list-type(#fa7775, '\f1c1'); }
&[data-type=zip] { .list-type(#427089, '\f1c6'); }
&[data-type=audio] { .list-type(#009688, '\f1c7'); }

View File

@@ -70,9 +70,9 @@ async.waterfall([
commitId = stdout.replace('\n', '')
if (commitId.length !== 40) throw new Error('commitId invalid : ' + commitId)
assetsFileName = 'ui-assets.go';
var cmd = 'go-bindata-assetfs -pkg browser -nocompress=true production/...'
var cmd = 'go-bindata-assetfs -o bindata_assetfs.go -pkg browser -nocompress=true production/...'
if (!isProduction) {
cmd = 'go-bindata-assetfs -pkg browser -nocompress=true dev/...'
cmd = 'go-bindata-assetfs -o bindata_assetfs.go -pkg browser -nocompress=true dev/...'
}
console.log('Running', cmd)
exec(cmd, cb)

View File

@@ -13,8 +13,7 @@
"setupTestFrameworkScriptFile": "./app/js/jest/setup.js",
"testURL": "https://localhost:8080",
"moduleNameMapper": {
"\\.(jpg|jpeg|png|gif|eot|otf|webp|svg|ttf|woff|woff2|mp4|webm|wav|mp3|m4a|aac|oga)$":
"<rootDir>/app/js/jest/__mocks__/fileMock.js",
"\\.(jpg|jpeg|png|gif|eot|otf|webp|svg|ttf|woff|woff2|mp4|webm|wav|mp3|m4a|aac|oga)$": "<rootDir>/app/js/jest/__mocks__/fileMock.js",
"\\.(css|scss)$": "identity-obj-proxy"
}
},
@@ -40,29 +39,31 @@
"babel-preset-es2015": "^6.14.0",
"babel-preset-react": "^6.11.1",
"babel-register": "^6.14.0",
"copy-webpack-plugin": "^0.3.3",
"copy-webpack-plugin": "^4.6.0",
"css-loader": "^0.23.1",
"enzyme": "^3.3.0",
"enzyme-adapter-react-16": "^1.1.1",
"esformatter": "^0.10.0",
"esformatter-jsx": "^7.4.1",
"esformatter-jsx-ignore": "^1.0.6",
"html-webpack-plugin": "^2.30.1",
"html-webpack-plugin": "^3.2.0",
"jest": "^22.1.4",
"jest-enzyme": "^4.0.2",
"json-loader": "^0.5.4",
"less": "^2.7.1",
"less-loader": "^2.2.3",
"purifycss-webpack-plugin": "^2.0.3",
"less": "^3.9.0",
"less-loader": "^4.1.0",
"purgecss-webpack-plugin": "^1.4.0",
"style-loader": "^0.13.1",
"url-loader": "^0.5.7",
"webpack-dev-server": "^2.11.1"
"webpack-cli": "^3.2.0",
"webpack-dev-server": "^3.1.14"
},
"dependencies": {
"bootstrap": "^3.3.6",
"classnames": "^2.2.3",
"expect": "^1.20.2",
"font-awesome": "^4.7.0",
"glob-all": "^3.1.0",
"history": "^4.7.2",
"humanize": "0.0.9",
"identity-obj-proxy": "^3.0.0",
@@ -89,6 +90,6 @@
"reselect": "^3.0.1",
"superagent": "^3.8.2",
"superagent-es6-promise": "^1.0.0",
"webpack": "^3.10.0"
"webpack": "^4.28.3"
}
}

File diff suppressed because one or more lines are too long

View File

@@ -16,11 +16,13 @@
var webpack = require('webpack')
var path = require('path')
var glob = require('glob-all')
var CopyWebpackPlugin = require('copy-webpack-plugin')
var purify = require("purifycss-webpack-plugin")
var PurgecssPlugin = require('purgecss-webpack-plugin')
var exports = {
context: __dirname,
mode: 'development',
entry: [
path.resolve(__dirname, 'app/index.js')
],
@@ -99,12 +101,11 @@ var exports = {
{from: 'app/index.html'}
]),
new webpack.ContextReplacementPlugin(/moment[\\\/]locale$/, /^\.\/(en)$/),
new purify({
basePath: __dirname,
paths: [
"app/index.html",
"app/js/*.js"
]
new PurgecssPlugin({
paths: glob.sync([
path.join(__dirname, 'app/index.html'),
path.join(__dirname, 'app/js/*.js')
])
})
]
}

View File

@@ -16,11 +16,13 @@
var webpack = require('webpack')
var path = require('path')
var glob = require('glob-all')
var CopyWebpackPlugin = require('copy-webpack-plugin')
var purify = require("purifycss-webpack-plugin")
var PurgecssPlugin = require('purgecss-webpack-plugin')
var exports = {
context: __dirname,
mode: 'production',
entry: [
path.resolve(__dirname, 'app/index.js')
],
@@ -74,16 +76,12 @@ var exports = {
{from: 'app/img/logo.svg'},
{from: 'app/index.html'}
]),
new webpack.DefinePlugin({
'process.env.NODE_ENV': '"production"'
}),
new webpack.ContextReplacementPlugin(/moment[\\\/]locale$/, /^\.\/(en)$/),
new purify({
basePath: __dirname,
paths: [
"app/index.html",
"app/js/*.js"
]
new PurgecssPlugin({
paths: glob.sync([
path.join(__dirname, 'app/index.html'),
path.join(__dirname, 'app/js/*.js')
])
})
]
}

File diff suppressed because it is too large Load Diff

View File

@@ -89,11 +89,11 @@ check_minimum_version() {
assert_is_supported_arch() {
case "${ARCH}" in
x86_64 | amd64 | aarch64 | arm* )
x86_64 | amd64 | aarch64 | ppc64le | arm* | s390x )
return
;;
*)
echo "Arch '${ARCH}' is not supported. Supported Arch: [x86_64, amd64, aarch64, arm*]"
echo "Arch '${ARCH}' is not supported. Supported Arch: [x86_64, amd64, aarch64, ppc64le, arm*, s390x]"
exit 1
esac
}

35
buildscripts/cross-compile.sh Executable file
View File

@@ -0,0 +1,35 @@
#!/bin/bash
# Enable tracing if set.
[ -n "$BASH_XTRACEFD" ] && set -ex
function _init() {
## All binaries are static make sure to disable CGO.
export CGO_ENABLED=0
## List of architectures and OS to test coss compilation.
SUPPORTED_OSARCH="linux/ppc64le linux/arm64 linux/s390x darwin/amd64 freebsd/amd64"
}
function _build_and_sign() {
local osarch=$1
IFS=/ read -r -a arr <<<"$osarch"
os="${arr[0]}"
arch="${arr[1]}"
package=$(go list -f '{{.ImportPath}}')
printf -- "--> %15s:%s\n" "${osarch}" "${package}"
# Go build to build the binary.
export GOOS=$os
export GOARCH=$arch
go build -tags kqueue -o /dev/null
}
function main() {
echo "Testing builds for OS/Arch: ${SUPPORTED_OSARCH}"
for each_osarch in ${SUPPORTED_OSARCH}; do
_build_and_sign "${each_osarch}"
done
}
_init && main "$@"

60
buildscripts/gateway-tests.sh Executable file
View File

@@ -0,0 +1,60 @@
#!/bin/bash
#
# Minio Cloud Storage, (C) 2019 Minio, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -e
set -E
set -o pipefail
export SERVER_ENDPOINT=127.0.0.1:24240
export ENABLE_HTTPS=0
export ACCESS_KEY=minio
export SECRET_KEY=minio123
export MINIO_ACCESS_KEY=minio
export MINIO_SECRET_KEY=minio123
export AWS_ACCESS_KEY_ID=minio
export AWS_SECRET_ACCESS_KEY=minio123
trap "cat server.log;cat gateway.log" SIGHUP SIGINT SIGTERM
./minio --quiet --json server data --address 127.0.0.1:24242 > server.log &
sleep 3
./minio --quiet --json gateway s3 http://127.0.0.1:24242 --address 127.0.0.1:24240 > gateway.log &
sleep 3
mkdir -p /mint
git clone https://github.com/minio/mint /mint
cd /mint
export MINT_ROOT_DIR=${MINT_ROOT_DIR:-/mint}
export MINT_RUN_CORE_DIR="$MINT_ROOT_DIR/run/core"
export MINT_RUN_SECURITY_DIR="$MINT_ROOT_DIR/run/security"
export WGET="wget --quiet --no-check-certificate"
go get github.com/go-ini/ini
./create-data-files.sh
./preinstall.sh
# install mint app packages
for pkg in "build"/*/install.sh; do
echo "Running $pkg"
$pkg
done
./postinstall.sh
/mint/entrypoint.sh || cat server.log gateway.log fail

View File

@@ -4,7 +4,7 @@ set -e
echo "" > coverage.txt
for d in $(go list ./... | grep -v browser); do
go test -coverprofile=profile.out -covermode=atomic "$d"
CGO_ENABLED=0 go test -v -coverprofile=profile.out -covermode=atomic "$d"
if [ -f profile.out ]; then
cat profile.out >> coverage.txt
rm profile.out

View File

@@ -68,9 +68,41 @@ function start_minio_erasure_sets()
echo "$minio_pid"
}
function start_minio_dist_erasure_sets_ipv6()
{
declare -a minio_pids
export MINIO_ACCESS_KEY=$ACCESS_KEY
export MINIO_SECRET_KEY=$SECRET_KEY
"${MINIO[@]}" server --address="[::1]:9000" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9000.log" 2>&1 &
minio_pids[0]=$!
"${MINIO[@]}" server --address="[::1]:9001" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9001.log" 2>&1 &
minio_pids[1]=$!
"${MINIO[@]}" server --address="[::1]:9002" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9002.log" 2>&1 &
minio_pids[2]=$!
"${MINIO[@]}" server --address="[::1]:9003" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9003.log" 2>&1 &
minio_pids[3]=$!
"${MINIO[@]}" server --address="[::1]:9004" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9004.log" 2>&1 &
minio_pids[4]=$!
"${MINIO[@]}" server --address="[::1]:9005" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9005.log" 2>&1 &
minio_pids[5]=$!
"${MINIO[@]}" server --address="[::1]:9006" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9006.log" 2>&1 &
minio_pids[6]=$!
"${MINIO[@]}" server --address="[::1]:9007" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9007.log" 2>&1 &
minio_pids[7]=$!
"${MINIO[@]}" server --address="[::1]:9008" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9008.log" 2>&1 &
minio_pids[8]=$!
"${MINIO[@]}" server --address="[::1]:9009" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9009.log" 2>&1 &
minio_pids[9]=$!
sleep 35
echo "${minio_pids[@]}"
}
function start_minio_dist_erasure_sets()
{
declare -a minio_pids
export MINIO_ACCESS_KEY=$ACCESS_KEY
export MINIO_SECRET_KEY=$SECRET_KEY
"${MINIO[@]}" server --address=:9000 "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets4" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets5" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets6" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets7" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets8" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets9" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets10" "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets11" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets12" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets13" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets14" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets15" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets16" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets17" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets18" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets19" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-9000.log" 2>&1 &
minio_pids[0]=$!
"${MINIO[@]}" server --address=:9001 "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets4" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets5" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets6" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets7" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets8" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets9" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets10" "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets11" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets12" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets13" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets14" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets15" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets16" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets17" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets18" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets19" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-9001.log" 2>&1 &
@@ -99,6 +131,8 @@ function start_minio_dist_erasure_sets()
function start_minio_dist_erasure()
{
declare -a minio_pids
export MINIO_ACCESS_KEY=$ACCESS_KEY
export MINIO_SECRET_KEY=$SECRET_KEY
"${MINIO[@]}" server --address=:9000 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9000.log" 2>&1 &
minio_pids[0]=$!
"${MINIO[@]}" server --address=:9001 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9001.log" 2>&1 &
@@ -157,6 +191,34 @@ function run_test_erasure_sets() {
return "$rv"
}
function run_test_dist_erasure_sets_ipv6()
{
minio_pids=( $(start_minio_dist_erasure_sets_ipv6) )
export SERVER_ENDPOINT="[::1]:9000"
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
for pid in "${minio_pids[@]}"; do
kill "$pid"
done
sleep 3
if [ "$rv" -ne 0 ]; then
for i in $(seq 0 9); do
echo "server$i log:"
cat "$WORK_DIR/dist-minio-v6-900$i.log"
done
fi
for i in $(seq 0 9); do
rm -f "$WORK_DIR/dist-minio-v6-900$i.log"
done
return "$rv"
}
function run_test_dist_erasure_sets()
{
minio_pids=( $(start_minio_dist_erasure_sets) )
@@ -233,6 +295,7 @@ function run_test_gateway_s3()
{
minio_pid="$(start_minio_gateway_s3)"
export SERVER_ENDPOINT="127.0.0.1:9000"
export ACCESS_KEY=Q3AM3UQ867SPQQA43P2F
export SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
@@ -275,6 +338,7 @@ function __init__()
exit 1
fi
sed -i 's|-sS|-sSg|g' "$FUNCTIONAL_TESTS"
chmod a+x "$FUNCTIONAL_TESTS"
}
@@ -315,6 +379,13 @@ function main()
exit 1
fi
echo "Testing in Distributed Erasure setup as sets with ipv6"
if ! run_test_dist_erasure_sets_ipv6; then
echo "FAILED"
rm -fr "$WORK_DIR"
exit 1
fi
echo "Testing in Gateway S3 setup"
if ! run_test_gateway_s3; then
echo "FAILED"

View File

@@ -21,6 +21,7 @@ import (
"net/http"
"github.com/gorilla/mux"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/policy"
)
@@ -54,28 +55,30 @@ type accessControlPolicy struct {
// This operation uses the ACL
// subresource to return the ACL of a specified bucket.
func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, "GetBucketACL")
ctx := newContext(r, w, "GetBucketACL")
defer logger.AuditLog(w, r, "GetBucketACL", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
writeErrorResponse(w, ErrServerNotInitialized, r.URL, guessIsBrowserReq(r))
return
}
// Allow getBucketACL if policy action is set, since this is a dummy call
// we are simply re-purposing the bucketPolicyAction.
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
return
}
// Before proceeding validate if bucket exists.
_, err := objAPI.GetBucketInfo(ctx, bucket)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -89,7 +92,7 @@ func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.
Permission: "FULL_CONTROL",
})
if err := xml.NewEncoder(w).Encode(acl); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -101,7 +104,9 @@ func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.
// This operation uses the ACL
// subresource to return the ACL of a specified object.
func (api objectAPIHandlers) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, "GetObjectACL")
ctx := newContext(r, w, "GetObjectACL")
defer logger.AuditLog(w, r, "GetObjectACL", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
@@ -109,21 +114,21 @@ func (api objectAPIHandlers) GetObjectACLHandler(w http.ResponseWriter, r *http.
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
writeErrorResponse(w, ErrServerNotInitialized, r.URL, guessIsBrowserReq(r))
return
}
// Allow getObjectACL if policy action is set, since this is a dummy call
// we are simply re-purposing the bucketPolicyAction.
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
return
}
// Before proceeding validate if object exists.
_, err := objAPI.GetObjectInfo(ctx, bucket, object)
_, err := objAPI.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -137,7 +142,7 @@ func (api objectAPIHandlers) GetObjectACLHandler(w http.ResponseWriter, r *http.
Permission: "FULL_CONTROL",
})
if err := xml.NewEncoder(w).Encode(acl); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}

File diff suppressed because it is too large Load Diff

View File

@@ -26,8 +26,8 @@ import (
"net/http"
"net/http/httptest"
"net/url"
"os"
"strings"
"sync"
"testing"
"time"
@@ -38,100 +38,194 @@ import (
var (
configJSON = []byte(`{
"version": "13",
"credential": {
"accessKey": "minio",
"secretKey": "minio123"
"version": "33",
"credential": {
"accessKey": "minio",
"secretKey": "minio123"
},
"region": "us-east-1",
"worm": "off",
"storageclass": {
"standard": "",
"rrs": ""
},
"cache": {
"drives": [],
"expiry": 90,
"maxuse": 80,
"exclude": []
},
"kms": {
"vault": {
"endpoint": "",
"auth": {
"type": "",
"approle": {
"id": "",
"secret": ""
}
},
"key-id": {
"name": "",
"version": 0
}
}
},
"notify": {
"amqp": {
"1": {
"enable": false,
"url": "",
"exchange": "",
"routingKey": "",
"exchangeType": "",
"deliveryMode": 0,
"mandatory": false,
"immediate": false,
"durable": false,
"internal": false,
"noWait": false,
"autoDeleted": false
}
},
"elasticsearch": {
"1": {
"enable": false,
"format": "namespace",
"url": "",
"index": ""
}
},
"kafka": {
"1": {
"enable": false,
"brokers": null,
"topic": "",
"tls": {
"enable": false,
"skipVerify": false,
"clientAuth": 0
},
"sasl": {
"enable": false,
"username": "",
"password": ""
}
}
},
"mqtt": {
"1": {
"enable": false,
"broker": "",
"topic": "",
"qos": 0,
"username": "",
"password": "",
"reconnectInterval": 0,
"keepAliveInterval": 0,
"queueDir": ""
}
},
"mysql": {
"1": {
"enable": false,
"format": "namespace",
"dsnString": "",
"table": "",
"host": "",
"port": "",
"user": "",
"password": "",
"database": ""
}
},
"nats": {
"1": {
"enable": false,
"address": "",
"subject": "",
"username": "",
"password": "",
"token": "",
"secure": false,
"pingInterval": 0,
"streaming": {
"enable": false,
"clusterID": "",
"async": false,
"maxPubAcksInflight": 0
}
}
},
"region": "us-west-1",
"logger": {
"console": {
"enable": true,
"level": "fatal"
},
"file": {
"nsq": {
"1": {
"enable": false,
"nsqdAddress": "",
"topic": "",
"tls": {
"enable": false,
"fileName": "",
"level": ""
"skipVerify": false
}
},
"notify": {
"amqp": {
"1": {
"enable": false,
"url": "",
"exchange": "",
"routingKey": "",
"exchangeType": "",
"mandatory": false,
"immediate": false,
"durable": false,
"internal": false,
"noWait": false,
"autoDeleted": false
}
},
"nats": {
"1": {
"enable": false,
"address": "",
"subject": "",
"username": "",
"password": "",
"token": "",
"secure": false,
"pingInterval": 0,
"streaming": {
"enable": false,
"clusterID": "",
"clientID": "",
"async": false,
"maxPubAcksInflight": 0
}
}
},
"elasticsearch": {
"1": {
"enable": false,
"url": "",
"index": ""
}
},
"redis": {
"1": {
"enable": false,
"address": "",
"password": "",
"key": ""
}
},
"postgresql": {
"1": {
"enable": false,
"connectionString": "",
"table": "",
"host": "",
"port": "",
"user": "",
"password": "",
"database": ""
}
},
"kafka": {
"1": {
"enable": false,
"brokers": null,
"topic": ""
}
},
"webhook": {
"1": {
"enable": false,
"endpoint": ""
}
}
}
}`)
}
},
"postgresql": {
"1": {
"enable": false,
"format": "namespace",
"connectionString": "",
"table": "",
"host": "",
"port": "",
"user": "",
"password": "",
"database": ""
}
},
"redis": {
"1": {
"enable": false,
"format": "namespace",
"address": "",
"password": "",
"key": ""
}
},
"webhook": {
"1": {
"enable": false,
"endpoint": ""
}
}
},
"logger": {
"console": {
"enabled": true
},
"http": {
"1": {
"enabled": false,
"endpoint": "https://username:password@example.com/api"
}
}
},
"compress": {
"enabled": false,
"extensions":[".txt",".log",".csv",".json"],
"mime-types":["text/csv","text/plain","application/json"]
},
"openid": {
"jwks": {
"url": ""
}
},
"policy": {
"opa": {
"url": "",
"authToken": ""
}
}
}
`)
)
// adminXLTestBed - encapsulates subsystems that need to be setup for
@@ -149,17 +243,17 @@ func prepareAdminXLTestBed() (*adminXLTestBed, error) {
// reset global variables to start afresh.
resetTestGlobals()
// Initialize minio server config.
rootPath, err := newTestConfig(globalMinioDefaultRegion)
if err != nil {
return nil, err
}
// Initializing objectLayer for HealFormatHandler.
objLayer, xlDirs, xlErr := initTestXLObjLayer()
if xlErr != nil {
return nil, xlErr
}
// Initialize minio server config.
if err := newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
return nil, err
}
// Initialize boot time
globalBootTime = UTCNow()
@@ -176,30 +270,25 @@ func prepareAdminXLTestBed() (*adminXLTestBed, error) {
// Init global heal state
initAllHealState(globalIsXL)
globalNotificationSys, err = NewNotificationSys(globalServerConfig, globalEndpoints)
if err != nil {
return nil, err
}
globalNotificationSys = NewNotificationSys(globalServerConfig, globalEndpoints)
// Create new policy system.
globalPolicySys = NewPolicySys()
// Setup admin mgmt REST API handlers.
adminRouter := mux.NewRouter()
registerAdminRouter(adminRouter)
registerAdminRouter(adminRouter, true, true)
return &adminXLTestBed{
configPath: rootPath,
xlDirs: xlDirs,
objLayer: objLayer,
router: adminRouter,
xlDirs: xlDirs,
objLayer: objLayer,
router: adminRouter,
}, nil
}
// TearDown - method that resets the test bed for subsequent unit
// tests to start afresh.
func (atb *adminXLTestBed) TearDown() {
os.RemoveAll(atb.configPath)
removeRoots(atb.xlDirs)
resetTestGlobals()
}
@@ -219,8 +308,8 @@ func (atb *adminXLTestBed) GenerateHealTestData(t *testing.T) {
for i := 0; i < 10; i++ {
objectName := fmt.Sprintf("%s-%d", objName, i)
_, err = atb.objLayer.PutObject(context.Background(), bucketName, objectName,
mustGetHashReader(t, bytes.NewReader([]byte("hello")),
int64(len("hello")), "", ""), nil)
mustGetPutObjReader(t, bytes.NewReader([]byte("hello")),
int64(len("hello")), "", ""), nil, ObjectOptions{})
if err != nil {
t.Fatalf("Failed to create %s - %v", objectName,
err)
@@ -232,14 +321,14 @@ func (atb *adminXLTestBed) GenerateHealTestData(t *testing.T) {
{
objName := "mpObject"
uploadID, err := atb.objLayer.NewMultipartUpload(context.Background(), bucketName,
objName, nil)
objName, nil, ObjectOptions{})
if err != nil {
t.Fatalf("mp new error: %v", err)
}
_, err = atb.objLayer.PutObjectPart(context.Background(), bucketName, objName,
uploadID, 3, mustGetHashReader(t, bytes.NewReader(
[]byte("hello")), int64(len("hello")), "", ""))
uploadID, 3, mustGetPutObjReader(t, bytes.NewReader(
[]byte("hello")), int64(len("hello")), "", ""), ObjectOptions{})
if err != nil {
t.Fatalf("mp put error: %v", err)
}
@@ -418,6 +507,8 @@ func getServiceCmdRequest(cmd cmdType, cred auth.Credentials, body []byte) (*htt
// Set body
req.Body = ioutil.NopCloser(bytes.NewReader(body))
req.ContentLength = int64(len(body))
// Set sha-sum header
req.Header.Set("X-Amz-Content-Sha256", getSHA256Hash(body))
@@ -442,17 +533,22 @@ func testServicesCmdHandler(cmd cmdType, t *testing.T) {
// single node setup, this degenerates to a simple function
// call under the hood.
globalMinioAddr = "127.0.0.1:9000"
initGlobalAdminPeers(mustGetNewEndpointList("http://127.0.0.1:9000/d1"))
var wg sync.WaitGroup
// Setting up a go routine to simulate ServerRouter's
// handleServiceSignals for stop and restart commands.
if cmd == restartCmd {
go testServiceSignalReceiver(cmd, t)
wg.Add(1)
go func() {
defer wg.Done()
testServiceSignalReceiver(cmd, t)
}()
}
credentials := globalServerConfig.GetCredential()
body, err := json.Marshal(madmin.ServiceAction{
cmd.toServiceActionValue()})
Action: cmd.toServiceActionValue()})
if err != nil {
t.Fatalf("JSONify error: %v", err)
}
@@ -483,6 +579,9 @@ func testServicesCmdHandler(cmd cmdType, t *testing.T) {
t.Errorf("Expected to receive %d status code but received %d. Body (%s)",
http.StatusOK, rec.Code, string(resp))
}
// Wait until testServiceSignalReceiver() called in a goroutine quits.
wg.Wait()
}
// Test for service status management REST API.
@@ -507,7 +606,6 @@ func TestServiceSetCreds(t *testing.T) {
// single node setup, this degenerates to a simple function
// call under the hood.
globalMinioAddr = "127.0.0.1:9000"
initGlobalAdminPeers(mustGetNewEndpointList("http://127.0.0.1:9000/d1"))
credentials := globalServerConfig.GetCredential()
@@ -538,8 +636,14 @@ func TestServiceSetCreds(t *testing.T) {
if err != nil {
t.Fatalf("JSONify err: %v", err)
}
ebody, err := madmin.EncryptData(credentials.SecretKey, body)
if err != nil {
t.Fatal(err)
}
// Construct setCreds request
req, err := getServiceCmdRequest(setCreds, credentials, body)
req, err := getServiceCmdRequest(setCreds, credentials, ebody)
if err != nil {
t.Fatalf("Failed to build service status request %v", err)
}
@@ -570,193 +674,6 @@ func TestServiceSetCreds(t *testing.T) {
}
}
// mkLockQueryVal - helper function to build lock query param.
func mkLockQueryVal(bucket, prefix, durationStr string) url.Values {
qVal := url.Values{}
qVal.Set(string(mgmtBucket), bucket)
qVal.Set(string(mgmtPrefix), prefix)
qVal.Set(string(mgmtLockOlderThan), durationStr)
return qVal
}
// Test for locks list management REST API.
func TestListLocksHandler(t *testing.T) {
adminTestBed, err := prepareAdminXLTestBed()
if err != nil {
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
}
defer adminTestBed.TearDown()
// Initialize admin peers to make admin RPC calls.
globalMinioAddr = "127.0.0.1:9000"
initGlobalAdminPeers(mustGetNewEndpointList("http://127.0.0.1:9000/d1"))
testCases := []struct {
bucket string
prefix string
duration string
expectedStatus int
}{
// Test 1 - valid testcase
{
bucket: "mybucket",
prefix: "myobject",
duration: "1s",
expectedStatus: http.StatusOK,
},
// Test 2 - invalid duration
{
bucket: "mybucket",
prefix: "myprefix",
duration: "invalidDuration",
expectedStatus: http.StatusBadRequest,
},
// Test 3 - invalid bucket name
{
bucket: `invalid\\Bucket`,
prefix: "myprefix",
duration: "1h",
expectedStatus: http.StatusBadRequest,
},
// Test 4 - invalid prefix
{
bucket: "mybucket",
prefix: `invalid\\Prefix`,
duration: "1h",
expectedStatus: http.StatusBadRequest,
},
}
for i, test := range testCases {
queryVal := mkLockQueryVal(test.bucket, test.prefix, test.duration)
req, err := newTestRequest("GET", "/minio/admin/v1/locks?"+queryVal.Encode(), 0, nil)
if err != nil {
t.Fatalf("Test %d - Failed to construct list locks request - %v", i+1, err)
}
cred := globalServerConfig.GetCredential()
err = signRequestV4(req, cred.AccessKey, cred.SecretKey)
if err != nil {
t.Fatalf("Test %d - Failed to sign list locks request - %v", i+1, err)
}
rec := httptest.NewRecorder()
adminTestBed.router.ServeHTTP(rec, req)
if test.expectedStatus != rec.Code {
t.Errorf("Test %d - Expected HTTP status code %d but received %d", i+1, test.expectedStatus, rec.Code)
}
}
}
// Test for locks clear management REST API.
func TestClearLocksHandler(t *testing.T) {
adminTestBed, err := prepareAdminXLTestBed()
if err != nil {
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
}
defer adminTestBed.TearDown()
// Initialize admin peers to make admin RPC calls.
initGlobalAdminPeers(mustGetNewEndpointList("http://127.0.0.1:9000/d1"))
testCases := []struct {
bucket string
prefix string
duration string
expectedStatus int
}{
// Test 1 - valid testcase
{
bucket: "mybucket",
prefix: "myobject",
duration: "1s",
expectedStatus: http.StatusOK,
},
// Test 2 - invalid duration
{
bucket: "mybucket",
prefix: "myprefix",
duration: "invalidDuration",
expectedStatus: http.StatusBadRequest,
},
// Test 3 - invalid bucket name
{
bucket: `invalid\\Bucket`,
prefix: "myprefix",
duration: "1h",
expectedStatus: http.StatusBadRequest,
},
// Test 4 - invalid prefix
{
bucket: "mybucket",
prefix: `invalid\\Prefix`,
duration: "1h",
expectedStatus: http.StatusBadRequest,
},
}
for i, test := range testCases {
queryVal := mkLockQueryVal(test.bucket, test.prefix, test.duration)
req, err := newTestRequest("DELETE", "/minio/admin/v1/locks?"+queryVal.Encode(), 0, nil)
if err != nil {
t.Fatalf("Test %d - Failed to construct clear locks request - %v", i+1, err)
}
cred := globalServerConfig.GetCredential()
err = signRequestV4(req, cred.AccessKey, cred.SecretKey)
if err != nil {
t.Fatalf("Test %d - Failed to sign clear locks request - %v", i+1, err)
}
rec := httptest.NewRecorder()
adminTestBed.router.ServeHTTP(rec, req)
if test.expectedStatus != rec.Code {
t.Errorf("Test %d - Expected HTTP status code %d but received %d", i+1, test.expectedStatus, rec.Code)
}
}
}
// Test for lock query param validation helper function.
func TestValidateLockQueryParams(t *testing.T) {
// reset globals.
// this is to make sure that the tests are not affected by modified globals.
resetTestGlobals()
// initialize NSLock.
initNSLock(false)
// Sample query values for test cases.
allValidVal := mkLockQueryVal("bucket", "prefix", "1s")
invalidBucketVal := mkLockQueryVal(`invalid\\Bucket`, "prefix", "1s")
invalidPrefixVal := mkLockQueryVal("bucket", `invalid\\Prefix`, "1s")
invalidOlderThanVal := mkLockQueryVal("bucket", "prefix", "invalidDuration")
testCases := []struct {
qVals url.Values
apiErr APIErrorCode
}{
{
qVals: invalidBucketVal,
apiErr: ErrInvalidBucketName,
},
{
qVals: invalidPrefixVal,
apiErr: ErrInvalidObjectName,
},
{
qVals: invalidOlderThanVal,
apiErr: ErrInvalidDuration,
},
{
qVals: allValidVal,
apiErr: ErrNone,
},
}
for i, test := range testCases {
_, _, _, apiErr := validateLockQueryParams(test.qVals)
if apiErr != test.apiErr {
t.Errorf("Test %d - Expected error %v but received %v", i+1, test.apiErr, apiErr)
}
}
}
// buildAdminRequest - helper function to build an admin API request.
func buildAdminRequest(queryVal url.Values, method, path string,
contentLength int64, bodySeeker io.ReadSeeker) (*http.Request, error) {
@@ -787,7 +704,6 @@ func TestGetConfigHandler(t *testing.T) {
// Initialize admin peers to make admin RPC calls.
globalMinioAddr = "127.0.0.1:9000"
initGlobalAdminPeers(mustGetNewEndpointList("http://127.0.0.1:9000/d1"))
// Prepare query params for get-config mgmt REST API.
queryVal := url.Values{}
@@ -816,18 +732,19 @@ func TestSetConfigHandler(t *testing.T) {
// Initialize admin peers to make admin RPC calls.
globalMinioAddr = "127.0.0.1:9000"
initGlobalAdminPeers(mustGetNewEndpointList("http://127.0.0.1:9000/d1"))
// SetConfigHandler restarts minio setup - need to start a
// signal receiver to receive on globalServiceSignalCh.
go testServiceSignalReceiver(restartCmd, t)
// Prepare query params for set-config mgmt REST API.
queryVal := url.Values{}
queryVal.Set("config", "")
password := globalServerConfig.GetCredential().SecretKey
econfigJSON, err := madmin.EncryptData(password, configJSON)
if err != nil {
t.Fatal(err)
}
req, err := buildAdminRequest(queryVal, http.MethodPut, "/config",
int64(len(configJSON)), bytes.NewReader(configJSON))
int64(len(econfigJSON)), bytes.NewReader(econfigJSON))
if err != nil {
t.Fatalf("Failed to construct set-config object request - %v", err)
}
@@ -835,23 +752,13 @@ func TestSetConfigHandler(t *testing.T) {
rec := httptest.NewRecorder()
adminTestBed.router.ServeHTTP(rec, req)
if rec.Code != http.StatusOK {
t.Errorf("Expected to succeed but failed with %d", rec.Code)
}
result := setConfigResult{}
err = json.NewDecoder(rec.Body).Decode(&result)
if err != nil {
t.Fatalf("Failed to decode set config result json %v", err)
}
if !result.Status {
t.Error("Expected set-config to succeed, but failed")
t.Errorf("Expected to succeed but failed with %d, body: %s", rec.Code, rec.Body)
}
// Check that a very large config file returns an error.
{
// Make a large enough config string
invalidCfg := []byte(strings.Repeat("A", maxConfigJSONSize+1))
invalidCfg := []byte(strings.Repeat("A", maxEConfigJSONSize+1))
req, err := buildAdminRequest(queryVal, http.MethodPut, "/config",
int64(len(invalidCfg)), bytes.NewReader(invalidCfg))
if err != nil {
@@ -870,7 +777,7 @@ func TestSetConfigHandler(t *testing.T) {
// Check that a config with duplicate keys in an object return
// error.
{
invalidCfg := append(configJSON[:len(configJSON)-1], []byte(`, "version": "15"}`)...)
invalidCfg := append(econfigJSON[:len(econfigJSON)-1], []byte(`, "version": "15"}`)...)
req, err := buildAdminRequest(queryVal, http.MethodPut, "/config",
int64(len(invalidCfg)), bytes.NewReader(invalidCfg))
if err != nil {
@@ -881,7 +788,7 @@ func TestSetConfigHandler(t *testing.T) {
adminTestBed.router.ServeHTTP(rec, req)
respBody := string(rec.Body.Bytes())
if rec.Code != http.StatusBadRequest ||
!strings.Contains(respBody, "JSON configuration provided has objects with duplicate keys") {
!strings.Contains(respBody, "JSON configuration provided is of incorrect format") {
t.Errorf("Got unexpected response code or body %d - %s", rec.Code, respBody)
}
}
@@ -896,7 +803,6 @@ func TestAdminServerInfo(t *testing.T) {
// Initialize admin peers to make admin RPC calls.
globalMinioAddr = "127.0.0.1:9000"
initGlobalAdminPeers(mustGetNewEndpointList("http://127.0.0.1:9000/d1"))
// Prepare query params for set-config mgmt REST API.
queryVal := url.Values{}
@@ -955,12 +861,12 @@ func TestToAdminAPIErr(t *testing.T) {
// 3. Non-admin API specific error.
{
err: errDiskNotFound,
expectedAPIErr: toAPIErrorCode(errDiskNotFound),
expectedAPIErr: toAPIErrorCode(context.Background(), errDiskNotFound),
},
}
for i, test := range testCases {
actualErr := toAdminAPIErrCode(test.err)
actualErr := toAdminAPIErrCode(context.Background(), test.err)
if actualErr != test.expectedAPIErr {
t.Errorf("Test %d: Expected %v but received %v",
i+1, test.expectedAPIErr, actualErr)
@@ -968,81 +874,6 @@ func TestToAdminAPIErr(t *testing.T) {
}
}
func TestWriteSetConfigResponse(t *testing.T) {
rootPath, err := newTestConfig(globalMinioDefaultRegion)
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(rootPath)
testCases := []struct {
status bool
errs []error
}{
// 1. all nodes returned success.
{
status: true,
errs: []error{nil, nil, nil, nil},
},
// 2. some nodes returned errors.
{
status: false,
errs: []error{errDiskNotFound, nil, errDiskAccessDenied, errFaultyDisk},
},
}
testPeers := []adminPeer{
{
addr: "localhost:9001",
},
{
addr: "localhost:9002",
},
{
addr: "localhost:9003",
},
{
addr: "localhost:9004",
},
}
testURL, err := url.Parse("http://dummy.com")
if err != nil {
t.Fatalf("Failed to parse a place-holder url")
}
var actualResult setConfigResult
for i, test := range testCases {
rec := httptest.NewRecorder()
writeSetConfigResponse(rec, testPeers, test.errs, test.status, testURL)
resp := rec.Result()
jsonBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatalf("Test %d: Failed to read response %v", i+1, err)
}
err = json.Unmarshal(jsonBytes, &actualResult)
if err != nil {
t.Fatalf("Test %d: Failed to unmarshal json %v", i+1, err)
}
if actualResult.Status != test.status {
t.Errorf("Test %d: Expected status %v but received %v", i+1, test.status, actualResult.Status)
}
for p, res := range actualResult.NodeResults {
if res.Name != testPeers[p].addr {
t.Errorf("Test %d: Expected node name %s but received %s", i+1, testPeers[p].addr, res.Name)
}
expectedErrMsg := fmt.Sprintf("%v", test.errs[p])
if res.ErrMsg != expectedErrMsg {
t.Errorf("Test %d: Expected error %s but received %s", i+1, expectedErrMsg, res.ErrMsg)
}
expectedErrSet := test.errs[p] != nil
if res.ErrSet != expectedErrSet {
t.Errorf("Test %d: Expected ErrSet %v but received %v", i+1, expectedErrSet, res.ErrSet)
}
}
}
}
func mkHealStartReq(t *testing.T, bucket, prefix string,
opts madmin.HealOpts) *http.Request {
@@ -1129,81 +960,3 @@ func collectHealResults(t *testing.T, adminTestBed *adminXLTestBed, bucket,
return res
}
func TestHealStartNStatusHandler(t *testing.T) {
adminTestBed, err := prepareAdminXLTestBed()
if err != nil {
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
}
defer adminTestBed.TearDown()
// gen. test data
adminTestBed.GenerateHealTestData(t)
defer adminTestBed.CleanupHealTestData(t)
// Prepare heal-start request to send to the server.
healOpts := madmin.HealOpts{
Recursive: true,
DryRun: false,
}
bucketName, objName := "mybucket", "myobject-0"
var hss madmin.HealStartSuccess
{
req := mkHealStartReq(t, bucketName, objName, healOpts)
rec := httptest.NewRecorder()
adminTestBed.router.ServeHTTP(rec, req)
if http.StatusOK != rec.Code {
t.Errorf("Unexpected status code - got %d but expected %d",
rec.Code, http.StatusOK)
}
err = json.Unmarshal(rec.Body.Bytes(), &hss)
if err != nil {
t.Fatal("unable to unmarshal response")
}
if hss.ClientToken == "" {
t.Errorf("unexpected result")
}
}
{
// test with an invalid client token
req := mkHealStatusReq(t, bucketName, objName, hss.ClientToken+hss.ClientToken)
rec := httptest.NewRecorder()
adminTestBed.router.ServeHTTP(rec, req)
if rec.Code != http.StatusBadRequest {
t.Errorf("Unexpected status code")
}
}
{
// fetch heal status
results := collectHealResults(t, adminTestBed, bucketName,
objName, hss.ClientToken, 5)
// check if we got back an expected record
foundIt := false
for _, item := range results.Items {
if item.Type == madmin.HealItemObject &&
item.Bucket == bucketName && item.Object == objName {
foundIt = true
}
}
if !foundIt {
t.Error("did not find expected heal record in heal results")
}
// check that the heal settings in the results is the
// same as what we started the heal seq. with.
if results.HealSettings != healOpts {
t.Errorf("unexpected heal settings: %v",
results.HealSettings)
}
if results.Summary == healStoppedStatus {
t.Errorf("heal sequence stopped unexpectedly")
}
}
}

View File

@@ -20,12 +20,15 @@ import (
"context"
"encoding/json"
"fmt"
"net/http"
"runtime"
"strings"
"sync"
"time"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/madmin"
"github.com/minio/minio/pkg/sync/errgroup"
)
// healStatusSummary - overall short summary of a healing sequence
@@ -59,8 +62,8 @@ var (
errHealPushStopNDiscard = fmt.Errorf("heal push stopped due to heal stop signal")
errHealStopSignalled = fmt.Errorf("heal stop signaled")
errFnHealFromAPIErr = func(err error) error {
errCode := toAPIErrorCode(err)
errFnHealFromAPIErr = func(ctx context.Context, err error) error {
errCode := toAPIErrorCode(ctx, err)
apiErr := getAPIError(errCode)
return fmt.Errorf("Heal internal error: %s: %s",
apiErr.Code, apiErr.Description)
@@ -110,6 +113,32 @@ func initAllHealState(isErasureMode bool) {
globalAllHealState = allHealState{
healSeqMap: make(map[string]*healSequence),
}
go globalAllHealState.periodicHealSeqsClean()
}
func (ahs *allHealState) periodicHealSeqsClean() {
// Launch clean-up routine to remove this heal sequence (after
// it ends) from the global state after timeout has elapsed.
ticker := time.NewTicker(time.Minute * 5)
defer ticker.Stop()
for {
select {
case <-ticker.C:
now := UTCNow()
ahs.Lock()
for path, h := range ahs.healSeqMap {
if h.hasEnded() && h.endTime.Add(keepHealSeqStateDuration).Before(now) {
delete(ahs.healSeqMap, path)
}
}
ahs.Unlock()
case <-GlobalServiceDoneCh:
// server could be restarting - need
// to exit immediately
return
}
}
}
// getHealSequence - Retrieve a heal sequence by path. The second
@@ -121,6 +150,35 @@ func (ahs *allHealState) getHealSequence(path string) (h *healSequence, exists b
return h, exists
}
func (ahs *allHealState) stopHealSequence(path string) ([]byte, APIErrorCode) {
var hsp madmin.HealStopSuccess
he, exists := ahs.getHealSequence(path)
if !exists {
hsp = madmin.HealStopSuccess{
ClientToken: "invalid",
StartTime: UTCNow(),
}
} else {
hsp = madmin.HealStopSuccess{
ClientToken: he.clientToken,
ClientAddress: he.clientAddress,
StartTime: he.startTime,
}
he.stop()
for !he.hasEnded() {
time.Sleep(1 * time.Second)
}
ahs.Lock()
defer ahs.Unlock()
// Heal sequence explicitly stopped, remove it.
delete(ahs.healSeqMap, path)
}
b, err := json.Marshal(&hsp)
return b, toAdminAPIErrCode(context.Background(), err)
}
// LaunchNewHealSequence - launches a background routine that performs
// healing according to the healSequence argument. For each heal
// sequence, state is stored in the `globalAllHealState`, which is a
@@ -141,20 +199,20 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
existsAndLive = true
}
}
if existsAndLive {
// A heal sequence exists on the given path.
if h.forceStarted {
// stop the running heal sequence - wait for
// it to finish.
// stop the running heal sequence - wait for it to finish.
he.stop()
for !he.hasEnded() {
time.Sleep(10 * time.Second)
time.Sleep(1 * time.Second)
}
} else {
errMsg = "Heal is already running on the given path " +
"(use force-start option to stop and start afresh). " +
fmt.Sprintf("The heal was started by IP %s at %s",
h.clientAddress, h.startTime)
fmt.Sprintf("The heal was started by IP %s at %s, token is %s",
h.clientAddress, h.startTime.Format(http.TimeFormat), h.clientToken)
return nil, ErrHealAlreadyRunning, errMsg
}
@@ -181,48 +239,13 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
// Launch top-level background heal go-routine
go h.healSequenceStart()
// Launch clean-up routine to remove this heal sequence (after
// it ends) from the global state after timeout has elapsed.
go func() {
var keepStateTimeout <-chan time.Time
ticker := time.NewTicker(time.Minute)
defer ticker.Stop()
everyMinute := ticker.C
for {
select {
// Check every minute if heal sequence has ended.
case <-everyMinute:
if h.hasEnded() {
keepStateTimeout = time.After(keepHealSeqStateDuration)
everyMinute = nil
}
// This case does not fire until the heal
// sequence completes.
case <-keepStateTimeout:
// Heal sequence has ended, keep
// results state duration has elapsed,
// so purge state.
ahs.Lock()
defer ahs.Unlock()
delete(ahs.healSeqMap, h.path)
return
case <-globalServiceDoneCh:
// server could be restarting - need
// to exit immediately
return
}
}
}()
b, err := json.Marshal(madmin.HealStartSuccess{
ClientToken: h.clientToken,
ClientAddress: h.clientAddress,
StartTime: h.startTime,
})
if err != nil {
logger.LogIf(context.Background(), err)
logger.LogIf(h.ctx, err)
return nil, ErrInternalError, ""
}
return b, ErrNone, ""
@@ -270,7 +293,7 @@ func (ahs *allHealState) PopHealStatusJSON(path string,
jbytes, err := json.Marshal(h.currentStatus)
if err != nil {
logger.LogIf(context.Background(), err)
logger.LogIf(h.ctx, err)
return nil, ErrInternalError
}
@@ -283,12 +306,15 @@ type healSequence struct {
// bucket, and prefix on which heal seq. was initiated
bucket, objPrefix string
// path is just bucket + "/" + objPrefix
// path is just pathJoin(bucket, objPrefix)
path string
// time at which heal sequence was started
startTime time.Time
// time at which heal sequence has ended
endTime time.Time
// Heal client info
clientToken, clientAddress string
@@ -328,7 +354,7 @@ func newHealSequence(bucket, objPrefix, clientAddr string,
return &healSequence{
bucket: bucket,
objPrefix: objPrefix,
path: bucket + "/" + objPrefix,
path: pathJoin(bucket, objPrefix),
startTime: UTCNow(),
clientToken: mustGetUUID(),
clientAddress: clientAddr,
@@ -383,7 +409,6 @@ func (h *healSequence) stop() {
// sequence automatically resumes. The return value indicates if the
// operation succeeded.
func (h *healSequence) pushHealResultItem(r madmin.HealResultItem) error {
// start a timer to keep an upper time limit to find an empty
// slot to add the given heal result - if no slot is found it
// means that the server is holding the maximum amount of
@@ -467,6 +492,7 @@ func (h *healSequence) healSequenceStart() {
select {
case err, ok := <-h.traverseAndHealDoneCh:
h.endTime = UTCNow()
h.currentStatus.updateLock.Lock()
defer h.currentStatus.updateLock.Unlock()
// Heal traversal is complete.
@@ -480,6 +506,7 @@ func (h *healSequence) healSequenceStart() {
}
case <-h.stopSignalCh:
h.endTime = UTCNow()
h.currentStatus.updateLock.Lock()
h.currentStatus.Summary = healStoppedStatus
h.currentStatus.FailureDetail = errHealStopSignalled.Error()
@@ -519,6 +546,9 @@ func (h *healSequence) traverseAndHeal() {
// Start with format healing
checkErr(h.healDiskFormat)
// Start healing the config.
checkErr(h.healConfig)
// Heal buckets and objects
checkErr(h.healBuckets)
@@ -529,9 +559,72 @@ func (h *healSequence) traverseAndHeal() {
close(h.traverseAndHealDoneCh)
}
// healConfig - heals config.json, retrun value indicates if a failure occurred.
func (h *healSequence) healConfig() error {
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil {
return errServerNotInitialized
}
// NOTE: Healing on configs is run regardless
// of any bucket being selected, this is to ensure that
// configs are always uptodate and correct.
marker := ""
isTruncated := true
for isTruncated {
if globalHTTPServer != nil {
// Wait at max 1 minute for an inprogress request
// before proceeding to heal
waitCount := 60
// Any requests in progress, delay the heal.
for globalHTTPServer.GetRequestCount() > 2 && waitCount > 0 {
waitCount--
time.Sleep(1 * time.Second)
}
}
// Lists all objects under `config` prefix.
objectInfos, err := objectAPI.ListObjectsHeal(h.ctx, minioMetaBucket, minioConfigPrefix,
marker, "", 1000)
if err != nil {
return errFnHealFromAPIErr(h.ctx, err)
}
for index := range objectInfos.Objects {
if h.isQuitting() {
return errHealStopSignalled
}
o := objectInfos.Objects[index]
res, herr := objectAPI.HealObject(h.ctx, o.Bucket, o.Name, h.settings.DryRun, h.settings.Remove)
// Object might have been deleted, by the time heal
// was attempted we ignore this file an move on.
if isErrObjectNotFound(herr) {
continue
}
if herr != nil {
return herr
}
res.Type = madmin.HealItemBucketMetadata
if err = h.pushHealResultItem(res); err != nil {
return err
}
}
isTruncated = objectInfos.IsTruncated
marker = objectInfos.NextMarker
}
return nil
}
// healDiskFormat - heals format.json, return value indicates if a
// failure error occurred.
func (h *healSequence) healDiskFormat() error {
if h.isQuitting() {
return errHealStopSignalled
}
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil {
@@ -542,13 +635,18 @@ func (h *healSequence) healDiskFormat() error {
// return any error, ignore error returned when disks have
// already healed.
if err != nil && err != errNoHealRequired {
return errFnHealFromAPIErr(err)
return errFnHealFromAPIErr(h.ctx, err)
}
// Healing succeeded notify the peers to reload format and re-initialize disks.
// We will not notify peers only if healing succeeded.
if err == nil {
peersReInitFormat(globalAdminPeers, h.settings.DryRun)
for _, nerr := range globalNotificationSys.ReloadFormat(h.settings.DryRun) {
if nerr.Err != nil {
logger.GetReqInfo(h.ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(h.ctx, nerr.Err)
}
}
}
// Push format heal result
@@ -557,6 +655,10 @@ func (h *healSequence) healDiskFormat() error {
// healBuckets - check for all buckets heal or just particular bucket.
func (h *healSequence) healBuckets() error {
if h.isQuitting() {
return errHealStopSignalled
}
// 1. If a bucket was specified, heal only the bucket.
if h.bucket != "" {
return h.healBucket(h.bucket)
@@ -570,7 +672,7 @@ func (h *healSequence) healBuckets() error {
buckets, err := objectAPI.ListBucketsHeal(h.ctx)
if err != nil {
return errFnHealFromAPIErr(err)
return errFnHealFromAPIErr(h.ctx, err)
}
for _, bucket := range buckets {
@@ -584,17 +686,13 @@ func (h *healSequence) healBuckets() error {
// healBucket - traverses and heals given bucket
func (h *healSequence) healBucket(bucket string) error {
if h.isQuitting() {
return errHealStopSignalled
}
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil {
return errServerNotInitialized
}
results, err := objectAPI.HealBucket(h.ctx, bucket, h.settings.DryRun)
results, err := objectAPI.HealBucket(h.ctx, bucket, h.settings.DryRun, h.settings.Remove)
// push any available results before checking for error
for _, result := range results {
if perr := h.pushHealResultItem(result); perr != nil {
@@ -610,10 +708,9 @@ func (h *healSequence) healBucket(bucket string) error {
if h.objPrefix != "" {
// Check if an object named as the objPrefix exists,
// and if so heal it.
_, err = objectAPI.GetObjectInfo(h.ctx, bucket, h.objPrefix)
_, err = objectAPI.GetObjectInfo(h.ctx, bucket, h.objPrefix, ObjectOptions{})
if err == nil {
err = h.healObject(bucket, h.objPrefix)
if err != nil {
if err = h.healObject(bucket, h.objPrefix); err != nil {
return err
}
}
@@ -622,17 +719,40 @@ func (h *healSequence) healBucket(bucket string) error {
return nil
}
entries := runtime.NumCPU()
marker := ""
isTruncated := true
for isTruncated {
objectInfos, err := objectAPI.ListObjectsHeal(h.ctx, bucket,
h.objPrefix, marker, "", 1000)
if err != nil {
return errFnHealFromAPIErr(err)
if globalHTTPServer != nil {
// Wait at max 1 minute for an inprogress request
// before proceeding to heal
waitCount := 60
// Any requests in progress, delay the heal.
for globalHTTPServer.GetRequestCount() > 2 && waitCount > 0 {
waitCount--
time.Sleep(1 * time.Second)
}
}
for _, o := range objectInfos.Objects {
if err := h.healObject(o.Bucket, o.Name); err != nil {
// Heal numCPU * nodes objects at a time.
objectInfos, err := objectAPI.ListObjectsHeal(h.ctx, bucket,
h.objPrefix, marker, "", entries)
if err != nil {
return errFnHealFromAPIErr(h.ctx, err)
}
g := errgroup.WithNErrs(len(objectInfos.Objects))
for index := range objectInfos.Objects {
index := index
g.Go(func() error {
o := objectInfos.Objects[index]
return h.healObject(o.Bucket, o.Name)
}, index)
}
for _, err := range g.Wait() {
if err != nil {
return err
}
}
@@ -655,7 +775,10 @@ func (h *healSequence) healObject(bucket, object string) error {
return errServerNotInitialized
}
hri, err := objectAPI.HealObject(h.ctx, bucket, object, h.settings.DryRun)
hri, err := objectAPI.HealObject(h.ctx, bucket, object, h.settings.DryRun, h.settings.Remove)
if isErrObjectNotFound(err) {
return nil
}
if err != nil {
hri.Detail = err.Error()
}

View File

@@ -1,5 +1,5 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
* Minio Cloud Storage, (C) 2016, 2017, 2018, 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -31,7 +31,7 @@ type adminAPIHandlers struct {
}
// registerAdminRouter - Add handler functions for each service REST API routes.
func registerAdminRouter(router *mux.Router) {
func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool) {
adminAPI := adminAPIHandlers{}
// Admin router
@@ -53,26 +53,71 @@ func registerAdminRouter(router *mux.Router) {
// Info operations
adminV1Router.Methods(http.MethodGet).Path("/info").HandlerFunc(httpTraceAll(adminAPI.ServerInfoHandler))
/// Lock operations
if globalIsDistXL || globalIsXL {
/// Heal operations
// List Locks
adminV1Router.Methods(http.MethodGet).Path("/locks").HandlerFunc(httpTraceAll(adminAPI.ListLocksHandler))
// Clear locks
adminV1Router.Methods(http.MethodDelete).Path("/locks").HandlerFunc(httpTraceAll(adminAPI.ClearLocksHandler))
// Heal processing endpoint.
adminV1Router.Methods(http.MethodPost).Path("/heal/").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
adminV1Router.Methods(http.MethodPost).Path("/heal/{bucket}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
adminV1Router.Methods(http.MethodPost).Path("/heal/{bucket}/{prefix:.*}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
/// Heal operations
/// Health operations
// Heal processing endpoint.
adminV1Router.Methods(http.MethodPost).Path("/heal/").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
adminV1Router.Methods(http.MethodPost).Path("/heal/{bucket}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
adminV1Router.Methods(http.MethodPost).Path("/heal/{bucket}/{prefix:.*}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
}
// Performance command - return performance details based on input type
adminV1Router.Methods(http.MethodGet).Path("/performance").HandlerFunc(httpTraceAll(adminAPI.PerfInfoHandler)).Queries("perfType", "{perfType:.*}")
// Profiling operations
adminV1Router.Methods(http.MethodPost).Path("/profiling/start").HandlerFunc(httpTraceAll(adminAPI.StartProfilingHandler)).
Queries("profilerType", "{profilerType:.*}")
adminV1Router.Methods(http.MethodGet).Path("/profiling/download").HandlerFunc(httpTraceAll(adminAPI.DownloadProfilingHandler))
/// Config operations
if enableConfigOps {
// Update credentials
adminV1Router.Methods(http.MethodPut).Path("/config/credential").HandlerFunc(httpTraceHdrs(adminAPI.UpdateAdminCredentialsHandler))
// Get config
adminV1Router.Methods(http.MethodGet).Path("/config").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigHandler))
// Set config
adminV1Router.Methods(http.MethodPut).Path("/config").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigHandler))
// Update credentials
adminV1Router.Methods(http.MethodPut).Path("/config/credential").HandlerFunc(httpTraceAll(adminAPI.UpdateCredentialsHandler))
// Get config
adminV1Router.Methods(http.MethodGet).Path("/config").HandlerFunc(httpTraceAll(adminAPI.GetConfigHandler))
// Set config
adminV1Router.Methods(http.MethodPut).Path("/config").HandlerFunc(httpTraceAll(adminAPI.SetConfigHandler))
// Get config keys/values
adminV1Router.Methods(http.MethodGet).Path("/config-keys").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigKeysHandler))
// Set config keys/values
adminV1Router.Methods(http.MethodPut).Path("/config-keys").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigKeysHandler))
}
if enableIAMOps {
// -- IAM APIs --
// Add policy IAM
adminV1Router.Methods(http.MethodPut).Path("/add-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.AddCannedPolicy)).Queries("name",
"{name:.*}")
// Add user IAM
adminV1Router.Methods(http.MethodPut).Path("/add-user").HandlerFunc(httpTraceHdrs(adminAPI.AddUser)).Queries("accessKey", "{accessKey:.*}")
adminV1Router.Methods(http.MethodPut).Path("/set-user-policy").HandlerFunc(httpTraceHdrs(adminAPI.SetUserPolicy)).
Queries("accessKey", "{accessKey:.*}").Queries("name", "{name:.*}")
adminV1Router.Methods(http.MethodPut).Path("/set-user-status").HandlerFunc(httpTraceHdrs(adminAPI.SetUserStatus)).
Queries("accessKey", "{accessKey:.*}").Queries("status", "{status:.*}")
// Remove policy IAM
adminV1Router.Methods(http.MethodDelete).Path("/remove-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.RemoveCannedPolicy)).Queries("name", "{name:.*}")
// Remove user IAM
adminV1Router.Methods(http.MethodDelete).Path("/remove-user").HandlerFunc(httpTraceHdrs(adminAPI.RemoveUser)).Queries("accessKey", "{accessKey:.*}")
// List users
adminV1Router.Methods(http.MethodGet).Path("/list-users").HandlerFunc(httpTraceHdrs(adminAPI.ListUsers))
// List policies
adminV1Router.Methods(http.MethodGet).Path("/list-canned-policies").HandlerFunc(httpTraceHdrs(adminAPI.ListCannedPolicies))
}
// -- Top APIs --
// Top locks
adminV1Router.Methods(http.MethodGet).Path("/top/locks").HandlerFunc(httpTraceHdrs(adminAPI.TopLocksHandler))
// If none of the routes match, return error.
adminV1Router.NotFoundHandler = http.HandlerFunc(httpTraceHdrs(notFoundHandlerJSON))
}

View File

@@ -1,551 +0,0 @@
/*
* Minio Cloud Storage, (C) 2014, 2015, 2016, 2017, 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"crypto/tls"
"encoding/json"
"fmt"
"net"
"sort"
"strings"
"sync"
"time"
"github.com/minio/minio/cmd/logger"
xnet "github.com/minio/minio/pkg/net"
)
var errUnsupportedSignal = fmt.Errorf("unsupported signal: only restart and stop signals are supported")
// AdminRPCClient - admin RPC client talks to admin RPC server.
type AdminRPCClient struct {
*RPCClient
}
// SignalService - calls SignalService RPC.
func (rpcClient *AdminRPCClient) SignalService(signal serviceSignal) (err error) {
args := SignalServiceArgs{Sig: signal}
reply := VoidReply{}
return rpcClient.Call(adminServiceName+".SignalService", &args, &reply)
}
// ReInitFormat - re-initialize disk format, remotely.
func (rpcClient *AdminRPCClient) ReInitFormat(dryRun bool) error {
args := ReInitFormatArgs{DryRun: dryRun}
reply := VoidReply{}
return rpcClient.Call(adminServiceName+".ReInitFormat", &args, &reply)
}
// ListLocks - Sends list locks command to remote server via RPC.
func (rpcClient *AdminRPCClient) ListLocks(bucket, prefix string, duration time.Duration) ([]VolumeLockInfo, error) {
args := ListLocksQuery{
Bucket: bucket,
Prefix: prefix,
Duration: duration,
}
var reply []VolumeLockInfo
err := rpcClient.Call(adminServiceName+".ListLocks", &args, &reply)
return reply, err
}
// ServerInfo - returns the server info of the server to which the RPC call is made.
func (rpcClient *AdminRPCClient) ServerInfo() (sid ServerInfoData, err error) {
err = rpcClient.Call(adminServiceName+".ServerInfo", &AuthArgs{}, &sid)
return sid, err
}
// GetConfig - returns config.json of the remote server.
func (rpcClient *AdminRPCClient) GetConfig() ([]byte, error) {
args := AuthArgs{}
var reply []byte
err := rpcClient.Call(adminServiceName+".GetConfig", &args, &reply)
return reply, err
}
// WriteTmpConfig - writes config file content to a temporary file on a remote node.
func (rpcClient *AdminRPCClient) WriteTmpConfig(tmpFileName string, configBytes []byte) error {
args := WriteConfigArgs{
TmpFileName: tmpFileName,
Buf: configBytes,
}
reply := VoidReply{}
err := rpcClient.Call(adminServiceName+".WriteTmpConfig", &args, &reply)
logger.LogIf(context.Background(), err)
return err
}
// CommitConfig - Move the new config in tmpFileName onto config.json on a remote node.
func (rpcClient *AdminRPCClient) CommitConfig(tmpFileName string) error {
args := CommitConfigArgs{FileName: tmpFileName}
reply := VoidReply{}
err := rpcClient.Call(adminServiceName+".CommitConfig", &args, &reply)
logger.LogIf(context.Background(), err)
return err
}
// NewAdminRPCClient - returns new admin RPC client.
func NewAdminRPCClient(host *xnet.Host) (*AdminRPCClient, error) {
scheme := "http"
if globalIsSSL {
scheme = "https"
}
serviceURL := &xnet.URL{
Scheme: scheme,
Host: host.String(),
Path: adminServicePath,
}
var tlsConfig *tls.Config
if globalIsSSL {
tlsConfig = &tls.Config{
ServerName: host.Name,
RootCAs: globalRootCAs,
}
}
rpcClient, err := NewRPCClient(
RPCClientArgs{
NewAuthTokenFunc: newAuthToken,
RPCVersion: globalRPCAPIVersion,
ServiceName: adminServiceName,
ServiceURL: serviceURL,
TLSConfig: tlsConfig,
},
)
if err != nil {
return nil, err
}
return &AdminRPCClient{rpcClient}, nil
}
// adminCmdRunner - abstracts local and remote execution of admin
// commands like service stop and service restart.
type adminCmdRunner interface {
SignalService(s serviceSignal) error
ReInitFormat(dryRun bool) error
ListLocks(bucket, prefix string, duration time.Duration) ([]VolumeLockInfo, error)
ServerInfo() (ServerInfoData, error)
GetConfig() ([]byte, error)
WriteTmpConfig(tmpFileName string, configBytes []byte) error
CommitConfig(tmpFileName string) error
}
// adminPeer - represents an entity that implements admin API RPCs.
type adminPeer struct {
addr string
cmdRunner adminCmdRunner
isLocal bool
}
// type alias for a collection of adminPeer.
type adminPeers []adminPeer
// makeAdminPeers - helper function to construct a collection of adminPeer.
func makeAdminPeers(endpoints EndpointList) (adminPeerList adminPeers) {
localAddr := GetLocalPeer(endpoints)
if strings.HasPrefix(localAddr, "127.0.0.1:") {
// Use first IPv4 instead of loopback address.
localAddr = net.JoinHostPort(sortIPs(localIP4.ToSlice())[0], globalMinioPort)
}
adminPeerList = append(adminPeerList, adminPeer{
addr: localAddr,
cmdRunner: localAdminClient{},
isLocal: true,
})
for _, hostStr := range GetRemotePeers(endpoints) {
host, err := xnet.ParseHost(hostStr)
logger.FatalIf(err, "Unable to parse Admin RPC Host", context.Background())
rpcClient, err := NewAdminRPCClient(host)
logger.FatalIf(err, "Unable to initialize Admin RPC Client", context.Background())
adminPeerList = append(adminPeerList, adminPeer{
addr: hostStr,
cmdRunner: rpcClient,
})
}
return adminPeerList
}
// peersReInitFormat - reinitialize remote object layers to new format.
func peersReInitFormat(peers adminPeers, dryRun bool) error {
errs := make([]error, len(peers))
// Send ReInitFormat RPC call to all nodes.
// for local adminPeer this is a no-op.
wg := sync.WaitGroup{}
for i, peer := range peers {
wg.Add(1)
go func(idx int, peer adminPeer) {
defer wg.Done()
if !peer.isLocal {
errs[idx] = peer.cmdRunner.ReInitFormat(dryRun)
}
}(i, peer)
}
wg.Wait()
return nil
}
// Initialize global adminPeer collection.
func initGlobalAdminPeers(endpoints EndpointList) {
globalAdminPeers = makeAdminPeers(endpoints)
}
// invokeServiceCmd - Invoke Restart/Stop command.
func invokeServiceCmd(cp adminPeer, cmd serviceSignal) (err error) {
switch cmd {
case serviceRestart, serviceStop:
err = cp.cmdRunner.SignalService(cmd)
}
return err
}
// sendServiceCmd - Invoke Restart command on remote peers
// adminPeer followed by on the local peer.
func sendServiceCmd(cps adminPeers, cmd serviceSignal) {
// Send service command like stop or restart to all remote nodes and finally run on local node.
errs := make([]error, len(cps))
var wg sync.WaitGroup
remotePeers := cps[1:]
for i := range remotePeers {
wg.Add(1)
go func(idx int) {
defer wg.Done()
// we use idx+1 because remotePeers slice is 1 position shifted w.r.t cps
errs[idx+1] = invokeServiceCmd(remotePeers[idx], cmd)
}(i)
}
wg.Wait()
errs[0] = invokeServiceCmd(cps[0], cmd)
}
// listPeerLocksInfo - fetch list of locks held on the given bucket,
// matching prefix held longer than duration from all peer servers.
func listPeerLocksInfo(peers adminPeers, bucket, prefix string, duration time.Duration) ([]VolumeLockInfo, error) {
// Used to aggregate volume lock information from all nodes.
allLocks := make([][]VolumeLockInfo, len(peers))
errs := make([]error, len(peers))
var wg sync.WaitGroup
localPeer := peers[0]
remotePeers := peers[1:]
for i, remotePeer := range remotePeers {
wg.Add(1)
go func(idx int, remotePeer adminPeer) {
defer wg.Done()
// `remotePeers` is right-shifted by one position relative to `peers`
allLocks[idx], errs[idx] = remotePeer.cmdRunner.ListLocks(bucket, prefix, duration)
}(i+1, remotePeer)
}
wg.Wait()
allLocks[0], errs[0] = localPeer.cmdRunner.ListLocks(bucket, prefix, duration)
// Summarizing errors received for ListLocks RPC across all
// nodes. N B the possible unavailability of quorum in errors
// applies only to distributed setup.
errCount, err := reduceErrs(errs, []error{})
if err != nil {
if errCount >= (len(peers)/2 + 1) {
return nil, err
}
return nil, InsufficientReadQuorum{}
}
// Group lock information across nodes by (bucket, object)
// pair. For readability only.
paramLockMap := make(map[nsParam][]VolumeLockInfo)
for _, nodeLocks := range allLocks {
for _, lockInfo := range nodeLocks {
param := nsParam{
volume: lockInfo.Bucket,
path: lockInfo.Object,
}
paramLockMap[param] = append(paramLockMap[param], lockInfo)
}
}
groupedLockInfos := []VolumeLockInfo{}
for _, volLocks := range paramLockMap {
groupedLockInfos = append(groupedLockInfos, volLocks...)
}
return groupedLockInfos, nil
}
// uptimeSlice - used to sort uptimes in chronological order.
type uptimeSlice []struct {
err error
uptime time.Duration
}
func (ts uptimeSlice) Len() int {
return len(ts)
}
func (ts uptimeSlice) Less(i, j int) bool {
return ts[i].uptime < ts[j].uptime
}
func (ts uptimeSlice) Swap(i, j int) {
ts[i], ts[j] = ts[j], ts[i]
}
// getPeerUptimes - returns the uptime since the last time read quorum
// was established on success. Otherwise returns errXLReadQuorum.
func getPeerUptimes(peers adminPeers) (time.Duration, error) {
// In a single node Erasure or FS backend setup the uptime of
// the setup is the uptime of the single minio server
// instance.
if !globalIsDistXL {
return UTCNow().Sub(globalBootTime), nil
}
uptimes := make(uptimeSlice, len(peers))
// Get up time of all servers.
wg := sync.WaitGroup{}
for i, peer := range peers {
wg.Add(1)
go func(idx int, peer adminPeer) {
defer wg.Done()
serverInfoData, rpcErr := peer.cmdRunner.ServerInfo()
uptimes[idx].uptime, uptimes[idx].err = serverInfoData.Properties.Uptime, rpcErr
}(i, peer)
}
wg.Wait()
// Sort uptimes in chronological order.
sort.Sort(uptimes)
// Pick the readQuorum'th uptime in chronological order. i.e,
// the time at which read quorum was (re-)established.
readQuorum := len(uptimes) / 2
validCount := 0
latestUptime := time.Duration(0)
for _, uptime := range uptimes {
if uptime.err != nil {
logger.LogIf(context.Background(), uptime.err)
continue
}
validCount++
if validCount >= readQuorum {
latestUptime = uptime.uptime
break
}
}
// Less than readQuorum "Admin.Uptime" RPC call returned
// successfully, so read-quorum unavailable.
if validCount < readQuorum {
return time.Duration(0), InsufficientReadQuorum{}
}
return latestUptime, nil
}
// getPeerConfig - Fetches config.json from all nodes in the setup and
// returns the one that occurs in a majority of them.
func getPeerConfig(peers adminPeers) ([]byte, error) {
if !globalIsDistXL {
return peers[0].cmdRunner.GetConfig()
}
errs := make([]error, len(peers))
configs := make([][]byte, len(peers))
// Get config from all servers.
wg := sync.WaitGroup{}
for i, peer := range peers {
wg.Add(1)
go func(idx int, peer adminPeer) {
defer wg.Done()
configs[idx], errs[idx] = peer.cmdRunner.GetConfig()
}(i, peer)
}
wg.Wait()
// Find the maximally occurring config among peers in a
// distributed setup.
serverConfigs := make([]serverConfig, len(peers))
for i, configBytes := range configs {
if errs[i] != nil {
continue
}
// Unmarshal the received config files.
err := json.Unmarshal(configBytes, &serverConfigs[i])
if err != nil {
reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", peers[i].addr)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
return nil, err
}
}
configJSON, err := getValidServerConfig(serverConfigs, errs)
if err != nil {
logger.LogIf(context.Background(), err)
return nil, err
}
// Return the config.json that was present quorum or more
// number of disks.
return json.Marshal(configJSON)
}
// getValidServerConfig - finds the server config that is present in
// quorum or more number of servers.
func getValidServerConfig(serverConfigs []serverConfig, errs []error) (scv serverConfig, e error) {
// majority-based quorum
quorum := len(serverConfigs)/2 + 1
// Count the number of disks a config.json was found in.
configCounter := make([]int, len(serverConfigs))
// We group equal serverConfigs by the lowest index of the
// same value; e.g, let us take the following serverConfigs
// in a 4-node setup,
// serverConfigs == [c1, c2, c1, c1]
// configCounter == [3, 1, 0, 0]
// c1, c2 are the only distinct values that appear. c1 is
// identified by 0, the lowest index it appears in and c2 is
// identified by 1. So, we need to find the number of times
// each of these distinct values occur.
// Invariants:
// 1. At the beginning of the i-th iteration, the number of
// unique configurations seen so far is equal to the number of
// non-zero counter values in config[:i].
// 2. At the beginning of the i-th iteration, the sum of
// elements of configCounter[:i] is equal to the number of
// non-error configurations seen so far.
// For each of the serverConfig ...
for i := range serverConfigs {
// Skip nodes where getConfig failed.
if errs[i] != nil {
continue
}
// Check if it is equal to any of the configurations
// seen so far. If j == i is reached then we have an
// unseen configuration.
for j := 0; j <= i; j++ {
if j < i && configCounter[j] == 0 {
// serverConfigs[j] is known to be
// equal to a value that was already
// seen. See example above for
// clarity.
continue
} else if j < i && serverConfigs[i].ConfigDiff(&serverConfigs[j]) == "" {
// serverConfigs[i] is equal to
// serverConfigs[j], update
// serverConfigs[j]'s counter since it
// is the lower index.
configCounter[j]++
break
} else if j == i {
// serverConfigs[i] is equal to no
// other value seen before. It is
// unique so far.
configCounter[i] = 1
break
} // else invariants specified above are violated.
}
}
// We find the maximally occurring server config and check if
// there is quorum.
var configJSON serverConfig
maxOccurrence := 0
for i, count := range configCounter {
if maxOccurrence < count {
maxOccurrence = count
configJSON = serverConfigs[i]
}
}
// If quorum nodes don't agree.
if maxOccurrence < quorum {
return scv, errXLWriteQuorum
}
return configJSON, nil
}
// Write config contents into a temporary file on all nodes.
func writeTmpConfigPeers(peers adminPeers, tmpFileName string, configBytes []byte) []error {
// For a single-node minio server setup.
if !globalIsDistXL {
err := peers[0].cmdRunner.WriteTmpConfig(tmpFileName, configBytes)
return []error{err}
}
errs := make([]error, len(peers))
// Write config into temporary file on all nodes.
wg := sync.WaitGroup{}
for i, peer := range peers {
wg.Add(1)
go func(idx int, peer adminPeer) {
defer wg.Done()
errs[idx] = peer.cmdRunner.WriteTmpConfig(tmpFileName, configBytes)
}(i, peer)
}
wg.Wait()
// Return bytes written and errors (if any) during writing
// temporary config file.
return errs
}
// Move config contents from the given temporary file onto config.json
// on all nodes.
func commitConfigPeers(peers adminPeers, tmpFileName string) []error {
// For a single-node minio server setup.
if !globalIsDistXL {
return []error{peers[0].cmdRunner.CommitConfig(tmpFileName)}
}
errs := make([]error, len(peers))
// Rename temporary config file into configDir/config.json on
// all nodes.
wg := sync.WaitGroup{}
for i, peer := range peers {
wg.Add(1)
go func(idx int, peer adminPeer) {
defer wg.Done()
errs[idx] = peer.cmdRunner.CommitConfig(tmpFileName)
}(i, peer)
}
wg.Wait()
// Return errors (if any) received during rename.
return errs
}

View File

@@ -1,650 +0,0 @@
/*
* Minio Cloud Storage, (C) 2014, 2015, 2016, 2017, 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"encoding/json"
"fmt"
"net"
"os"
"path"
"path/filepath"
"sort"
"sync"
"time"
"github.com/minio/minio-go/pkg/set"
"github.com/minio/minio/cmd/logger"
)
const (
// Admin service names
signalServiceRPC = "Admin.SignalService"
reInitFormatRPC = "Admin.ReInitFormat"
listLocksRPC = "Admin.ListLocks"
serverInfoDataRPC = "Admin.ServerInfoData"
getConfigRPC = "Admin.GetConfig"
writeTmpConfigRPC = "Admin.WriteTmpConfig"
commitConfigRPC = "Admin.CommitConfig"
)
// localAdminClient - represents admin operation to be executed locally.
type localAdminClient struct {
}
// remoteAdminClient - represents admin operation to be executed
// remotely, via RPC.
type remoteAdminClient struct {
*AuthRPCClient
}
// adminCmdRunner - abstracts local and remote execution of admin
// commands like service stop and service restart.
type adminCmdRunner interface {
SignalService(s serviceSignal) error
ReInitFormat(dryRun bool) error
ListLocks(bucket, prefix string, duration time.Duration) ([]VolumeLockInfo, error)
ServerInfoData() (ServerInfoData, error)
GetConfig() ([]byte, error)
WriteTmpConfig(tmpFileName string, configBytes []byte) error
CommitConfig(tmpFileName string) error
}
var errUnsupportedSignal = fmt.Errorf("unsupported signal: only restart and stop signals are supported")
// SignalService - sends a restart or stop signal to the local server
func (lc localAdminClient) SignalService(s serviceSignal) error {
switch s {
case serviceRestart, serviceStop:
globalServiceSignalCh <- s
default:
return errUnsupportedSignal
}
return nil
}
// ReInitFormat - re-initialize disk format.
func (lc localAdminClient) ReInitFormat(dryRun bool) error {
objectAPI := newObjectLayerFn()
if objectAPI == nil {
return errServerNotInitialized
}
return objectAPI.ReloadFormat(context.Background(), dryRun)
}
// ListLocks - Fetches lock information from local lock instrumentation.
func (lc localAdminClient) ListLocks(bucket, prefix string, duration time.Duration) ([]VolumeLockInfo, error) {
// check if objectLayer is initialized, if not return.
objectAPI := newObjectLayerFn()
if objectAPI == nil {
return nil, errServerNotInitialized
}
return objectAPI.ListLocks(context.Background(), bucket, prefix, duration)
}
func (rc remoteAdminClient) SignalService(s serviceSignal) (err error) {
switch s {
case serviceRestart, serviceStop:
reply := AuthRPCReply{}
err = rc.Call(signalServiceRPC, &SignalServiceArgs{Sig: s},
&reply)
default:
err = errUnsupportedSignal
}
return err
}
// ReInitFormat - re-initialize disk format, remotely.
func (rc remoteAdminClient) ReInitFormat(dryRun bool) error {
reply := AuthRPCReply{}
return rc.Call(reInitFormatRPC, &ReInitFormatArgs{
DryRun: dryRun,
}, &reply)
}
// ListLocks - Sends list locks command to remote server via RPC.
func (rc remoteAdminClient) ListLocks(bucket, prefix string, duration time.Duration) ([]VolumeLockInfo, error) {
listArgs := ListLocksQuery{
Bucket: bucket,
Prefix: prefix,
Duration: duration,
}
var reply ListLocksReply
if err := rc.Call(listLocksRPC, &listArgs, &reply); err != nil {
return nil, err
}
return reply.VolLocks, nil
}
// ServerInfoData - Returns the server info of this server.
func (lc localAdminClient) ServerInfoData() (sid ServerInfoData, e error) {
if globalBootTime.IsZero() {
return sid, errServerNotInitialized
}
// Build storage info
objLayer := newObjectLayerFn()
if objLayer == nil {
return sid, errServerNotInitialized
}
storage := objLayer.StorageInfo(context.Background())
return ServerInfoData{
StorageInfo: storage,
ConnStats: globalConnStats.toServerConnStats(),
HTTPStats: globalHTTPStats.toServerHTTPStats(),
Properties: ServerProperties{
Uptime: UTCNow().Sub(globalBootTime),
Version: Version,
CommitID: CommitID,
SQSARN: globalNotificationSys.GetARNList(),
Region: globalServerConfig.GetRegion(),
},
}, nil
}
// ServerInfo - returns the server info of the server to which the RPC call is made.
func (rc remoteAdminClient) ServerInfoData() (sid ServerInfoData, e error) {
args := AuthRPCArgs{}
reply := ServerInfoDataReply{}
err := rc.Call(serverInfoDataRPC, &args, &reply)
if err != nil {
return sid, err
}
return reply.ServerInfoData, nil
}
// GetConfig - returns config.json of the local server.
func (lc localAdminClient) GetConfig() ([]byte, error) {
if globalServerConfig == nil {
return nil, fmt.Errorf("config not present")
}
return json.Marshal(globalServerConfig)
}
// GetConfig - returns config.json of the remote server.
func (rc remoteAdminClient) GetConfig() ([]byte, error) {
args := AuthRPCArgs{}
reply := ConfigReply{}
if err := rc.Call(getConfigRPC, &args, &reply); err != nil {
return nil, err
}
return reply.Config, nil
}
// WriteTmpConfig - writes config file content to a temporary file on
// the local server.
func (lc localAdminClient) WriteTmpConfig(tmpFileName string, configBytes []byte) error {
return writeTmpConfigCommon(tmpFileName, configBytes)
}
// WriteTmpConfig - writes config file content to a temporary file on
// a remote node.
func (rc remoteAdminClient) WriteTmpConfig(tmpFileName string, configBytes []byte) error {
wArgs := WriteConfigArgs{
TmpFileName: tmpFileName,
Buf: configBytes,
}
err := rc.Call(writeTmpConfigRPC, &wArgs, &WriteConfigReply{})
if err != nil {
logger.LogIf(context.Background(), err)
return err
}
return nil
}
// CommitConfig - Move the new config in tmpFileName onto config.json
// on a local node.
func (lc localAdminClient) CommitConfig(tmpFileName string) error {
configFile := getConfigFile()
tmpConfigFile := filepath.Join(getConfigDir(), tmpFileName)
err := os.Rename(tmpConfigFile, configFile)
reqInfo := (&logger.ReqInfo{}).AppendTags("tmpConfigFile", tmpConfigFile)
reqInfo.AppendTags("configFile", configFile)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
return err
}
// CommitConfig - Move the new config in tmpFileName onto config.json
// on a remote node.
func (rc remoteAdminClient) CommitConfig(tmpFileName string) error {
cArgs := CommitConfigArgs{
FileName: tmpFileName,
}
cReply := CommitConfigReply{}
err := rc.Call(commitConfigRPC, &cArgs, &cReply)
if err != nil {
logger.LogIf(context.Background(), err)
return err
}
return nil
}
// adminPeer - represents an entity that implements admin API RPCs.
type adminPeer struct {
addr string
cmdRunner adminCmdRunner
isLocal bool
}
// type alias for a collection of adminPeer.
type adminPeers []adminPeer
// makeAdminPeers - helper function to construct a collection of adminPeer.
func makeAdminPeers(endpoints EndpointList) (adminPeerList adminPeers) {
thisPeer := globalMinioAddr
if globalMinioHost == "" {
// When host is not explicitly provided simply
// use the first IPv4.
thisPeer = net.JoinHostPort(sortIPs(localIP4.ToSlice())[0], globalMinioPort)
}
adminPeerList = append(adminPeerList, adminPeer{
thisPeer,
localAdminClient{},
true,
})
hostSet := set.CreateStringSet(globalMinioAddr)
cred := globalServerConfig.GetCredential()
serviceEndpoint := path.Join(minioReservedBucketPath, adminPath)
for _, host := range GetRemotePeers(endpoints) {
if hostSet.Contains(host) {
continue
}
hostSet.Add(host)
adminPeerList = append(adminPeerList, adminPeer{
addr: host,
cmdRunner: &remoteAdminClient{newAuthRPCClient(authConfig{
accessKey: cred.AccessKey,
secretKey: cred.SecretKey,
serverAddr: host,
serviceEndpoint: serviceEndpoint,
secureConn: globalIsSSL,
serviceName: "Admin",
})},
})
}
return adminPeerList
}
// peersReInitFormat - reinitialize remote object layers to new format.
func peersReInitFormat(peers adminPeers, dryRun bool) error {
errs := make([]error, len(peers))
// Send ReInitFormat RPC call to all nodes.
// for local adminPeer this is a no-op.
wg := sync.WaitGroup{}
for i, peer := range peers {
wg.Add(1)
go func(idx int, peer adminPeer) {
defer wg.Done()
if !peer.isLocal {
errs[idx] = peer.cmdRunner.ReInitFormat(dryRun)
}
}(i, peer)
}
wg.Wait()
return nil
}
// Initialize global adminPeer collection.
func initGlobalAdminPeers(endpoints EndpointList) {
globalAdminPeers = makeAdminPeers(endpoints)
}
// invokeServiceCmd - Invoke Restart/Stop command.
func invokeServiceCmd(cp adminPeer, cmd serviceSignal) (err error) {
switch cmd {
case serviceRestart, serviceStop:
err = cp.cmdRunner.SignalService(cmd)
}
return err
}
// sendServiceCmd - Invoke Restart command on remote peers
// adminPeer followed by on the local peer.
func sendServiceCmd(cps adminPeers, cmd serviceSignal) {
// Send service command like stop or restart to all remote nodes and finally run on local node.
errs := make([]error, len(cps))
var wg sync.WaitGroup
remotePeers := cps[1:]
for i := range remotePeers {
wg.Add(1)
go func(idx int) {
defer wg.Done()
// we use idx+1 because remotePeers slice is 1 position shifted w.r.t cps
errs[idx+1] = invokeServiceCmd(remotePeers[idx], cmd)
}(i)
}
wg.Wait()
errs[0] = invokeServiceCmd(cps[0], cmd)
}
// listPeerLocksInfo - fetch list of locks held on the given bucket,
// matching prefix held longer than duration from all peer servers.
func listPeerLocksInfo(peers adminPeers, bucket, prefix string, duration time.Duration) ([]VolumeLockInfo, error) {
// Used to aggregate volume lock information from all nodes.
allLocks := make([][]VolumeLockInfo, len(peers))
errs := make([]error, len(peers))
var wg sync.WaitGroup
localPeer := peers[0]
remotePeers := peers[1:]
for i, remotePeer := range remotePeers {
wg.Add(1)
go func(idx int, remotePeer adminPeer) {
defer wg.Done()
// `remotePeers` is right-shifted by one position relative to `peers`
allLocks[idx], errs[idx] = remotePeer.cmdRunner.ListLocks(bucket, prefix, duration)
}(i+1, remotePeer)
}
wg.Wait()
allLocks[0], errs[0] = localPeer.cmdRunner.ListLocks(bucket, prefix, duration)
// Summarizing errors received for ListLocks RPC across all
// nodes. N B the possible unavailability of quorum in errors
// applies only to distributed setup.
errCount, err := reduceErrs(errs, []error{})
if err != nil {
if errCount >= (len(peers)/2 + 1) {
return nil, err
}
return nil, InsufficientReadQuorum{}
}
// Group lock information across nodes by (bucket, object)
// pair. For readability only.
paramLockMap := make(map[nsParam][]VolumeLockInfo)
for _, nodeLocks := range allLocks {
for _, lockInfo := range nodeLocks {
param := nsParam{
volume: lockInfo.Bucket,
path: lockInfo.Object,
}
paramLockMap[param] = append(paramLockMap[param], lockInfo)
}
}
groupedLockInfos := []VolumeLockInfo{}
for _, volLocks := range paramLockMap {
groupedLockInfos = append(groupedLockInfos, volLocks...)
}
return groupedLockInfos, nil
}
// uptimeSlice - used to sort uptimes in chronological order.
type uptimeSlice []struct {
err error
uptime time.Duration
}
func (ts uptimeSlice) Len() int {
return len(ts)
}
func (ts uptimeSlice) Less(i, j int) bool {
return ts[i].uptime < ts[j].uptime
}
func (ts uptimeSlice) Swap(i, j int) {
ts[i], ts[j] = ts[j], ts[i]
}
// getPeerUptimes - returns the uptime since the last time read quorum
// was established on success. Otherwise returns errXLReadQuorum.
func getPeerUptimes(peers adminPeers) (time.Duration, error) {
// In a single node Erasure or FS backend setup the uptime of
// the setup is the uptime of the single minio server
// instance.
if !globalIsDistXL {
return UTCNow().Sub(globalBootTime), nil
}
uptimes := make(uptimeSlice, len(peers))
// Get up time of all servers.
wg := sync.WaitGroup{}
for i, peer := range peers {
wg.Add(1)
go func(idx int, peer adminPeer) {
defer wg.Done()
serverInfoData, rpcErr := peer.cmdRunner.ServerInfoData()
uptimes[idx].uptime, uptimes[idx].err = serverInfoData.Properties.Uptime, rpcErr
}(i, peer)
}
wg.Wait()
// Sort uptimes in chronological order.
sort.Sort(uptimes)
// Pick the readQuorum'th uptime in chronological order. i.e,
// the time at which read quorum was (re-)established.
readQuorum := len(uptimes) / 2
validCount := 0
latestUptime := time.Duration(0)
for _, uptime := range uptimes {
if uptime.err != nil {
logger.LogIf(context.Background(), uptime.err)
continue
}
validCount++
if validCount >= readQuorum {
latestUptime = uptime.uptime
break
}
}
// Less than readQuorum "Admin.Uptime" RPC call returned
// successfully, so read-quorum unavailable.
if validCount < readQuorum {
return time.Duration(0), InsufficientReadQuorum{}
}
return latestUptime, nil
}
// getPeerConfig - Fetches config.json from all nodes in the setup and
// returns the one that occurs in a majority of them.
func getPeerConfig(peers adminPeers) ([]byte, error) {
if !globalIsDistXL {
return peers[0].cmdRunner.GetConfig()
}
errs := make([]error, len(peers))
configs := make([][]byte, len(peers))
// Get config from all servers.
wg := sync.WaitGroup{}
for i, peer := range peers {
wg.Add(1)
go func(idx int, peer adminPeer) {
defer wg.Done()
configs[idx], errs[idx] = peer.cmdRunner.GetConfig()
}(i, peer)
}
wg.Wait()
// Find the maximally occurring config among peers in a
// distributed setup.
serverConfigs := make([]serverConfig, len(peers))
for i, configBytes := range configs {
if errs[i] != nil {
continue
}
// Unmarshal the received config files.
err := json.Unmarshal(configBytes, &serverConfigs[i])
if err != nil {
reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", peers[i].addr)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
return nil, err
}
}
configJSON, err := getValidServerConfig(serverConfigs, errs)
if err != nil {
logger.LogIf(context.Background(), err)
return nil, err
}
// Return the config.json that was present quorum or more
// number of disks.
return json.Marshal(configJSON)
}
// getValidServerConfig - finds the server config that is present in
// quorum or more number of servers.
func getValidServerConfig(serverConfigs []serverConfig, errs []error) (scv serverConfig, e error) {
// majority-based quorum
quorum := len(serverConfigs)/2 + 1
// Count the number of disks a config.json was found in.
configCounter := make([]int, len(serverConfigs))
// We group equal serverConfigs by the lowest index of the
// same value; e.g, let us take the following serverConfigs
// in a 4-node setup,
// serverConfigs == [c1, c2, c1, c1]
// configCounter == [3, 1, 0, 0]
// c1, c2 are the only distinct values that appear. c1 is
// identified by 0, the lowest index it appears in and c2 is
// identified by 1. So, we need to find the number of times
// each of these distinct values occur.
// Invariants:
// 1. At the beginning of the i-th iteration, the number of
// unique configurations seen so far is equal to the number of
// non-zero counter values in config[:i].
// 2. At the beginning of the i-th iteration, the sum of
// elements of configCounter[:i] is equal to the number of
// non-error configurations seen so far.
// For each of the serverConfig ...
for i := range serverConfigs {
// Skip nodes where getConfig failed.
if errs[i] != nil {
continue
}
// Check if it is equal to any of the configurations
// seen so far. If j == i is reached then we have an
// unseen configuration.
for j := 0; j <= i; j++ {
if j < i && configCounter[j] == 0 {
// serverConfigs[j] is known to be
// equal to a value that was already
// seen. See example above for
// clarity.
continue
} else if j < i && serverConfigs[i].ConfigDiff(&serverConfigs[j]) == "" {
// serverConfigs[i] is equal to
// serverConfigs[j], update
// serverConfigs[j]'s counter since it
// is the lower index.
configCounter[j]++
break
} else if j == i {
// serverConfigs[i] is equal to no
// other value seen before. It is
// unique so far.
configCounter[i] = 1
break
} // else invariants specified above are violated.
}
}
// We find the maximally occurring server config and check if
// there is quorum.
var configJSON serverConfig
maxOccurrence := 0
for i, count := range configCounter {
if maxOccurrence < count {
maxOccurrence = count
configJSON = serverConfigs[i]
}
}
// If quorum nodes don't agree.
if maxOccurrence < quorum {
return scv, errXLWriteQuorum
}
return configJSON, nil
}
// Write config contents into a temporary file on all nodes.
func writeTmpConfigPeers(peers adminPeers, tmpFileName string, configBytes []byte) []error {
// For a single-node minio server setup.
if !globalIsDistXL {
err := peers[0].cmdRunner.WriteTmpConfig(tmpFileName, configBytes)
return []error{err}
}
errs := make([]error, len(peers))
// Write config into temporary file on all nodes.
wg := sync.WaitGroup{}
for i, peer := range peers {
wg.Add(1)
go func(idx int, peer adminPeer) {
defer wg.Done()
errs[idx] = peer.cmdRunner.WriteTmpConfig(tmpFileName, configBytes)
}(i, peer)
}
wg.Wait()
// Return bytes written and errors (if any) during writing
// temporary config file.
return errs
}
// Move config contents from the given temporary file onto config.json
// on all nodes.
func commitConfigPeers(peers adminPeers, tmpFileName string) []error {
// For a single-node minio server setup.
if !globalIsDistXL {
return []error{peers[0].cmdRunner.CommitConfig(tmpFileName)}
}
errs := make([]error, len(peers))
// Rename temporary config file into configDir/config.json on
// all nodes.
wg := sync.WaitGroup{}
for i, peer := range peers {
wg.Add(1)
go func(idx int, peer adminPeer) {
defer wg.Done()
errs[idx] = peer.cmdRunner.CommitConfig(tmpFileName)
}(i, peer)
}
wg.Wait()
// Return errors (if any) received during rename.
return errs
}

View File

@@ -1,127 +0,0 @@
/*
* Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"path"
"time"
"github.com/gorilla/mux"
"github.com/minio/minio/cmd/logger"
xrpc "github.com/minio/minio/cmd/rpc"
)
const adminServiceName = "Admin"
const adminServiceSubPath = "/admin"
var adminServicePath = path.Join(minioReservedBucketPath, adminServiceSubPath)
// adminRPCReceiver - Admin RPC receiver for admin RPC server.
type adminRPCReceiver struct {
local *localAdminClient
}
// SignalServiceArgs - provides the signal argument to SignalService RPC
type SignalServiceArgs struct {
AuthArgs
Sig serviceSignal
}
// SignalService - Send a restart or stop signal to the service
func (receiver *adminRPCReceiver) SignalService(args *SignalServiceArgs, reply *VoidReply) error {
return receiver.local.SignalService(args.Sig)
}
// ListLocksQuery - wraps ListLocks API's query values to send over RPC.
type ListLocksQuery struct {
AuthArgs
Bucket string
Prefix string
Duration time.Duration
}
// ListLocks - lists locks held by requests handled by this server instance.
func (receiver *adminRPCReceiver) ListLocks(args *ListLocksQuery, reply *[]VolumeLockInfo) (err error) {
*reply, err = receiver.local.ListLocks(args.Bucket, args.Prefix, args.Duration)
return err
}
// ServerInfo - returns the server info when object layer was initialized on this server.
func (receiver *adminRPCReceiver) ServerInfo(args *AuthArgs, reply *ServerInfoData) (err error) {
*reply, err = receiver.local.ServerInfo()
return err
}
// GetConfig - returns the config.json of this server.
func (receiver *adminRPCReceiver) GetConfig(args *AuthArgs, reply *[]byte) (err error) {
*reply, err = receiver.local.GetConfig()
return err
}
// ReInitFormatArgs - provides dry-run information to re-initialize format.json
type ReInitFormatArgs struct {
AuthArgs
DryRun bool
}
// ReInitFormat - re-init 'format.json'
func (receiver *adminRPCReceiver) ReInitFormat(args *ReInitFormatArgs, reply *VoidReply) error {
return receiver.local.ReInitFormat(args.DryRun)
}
// WriteConfigArgs - wraps the bytes to be written and temporary file name.
type WriteConfigArgs struct {
AuthArgs
TmpFileName string
Buf []byte
}
// WriteTmpConfig - writes the supplied config contents onto the
// supplied temporary file.
func (receiver *adminRPCReceiver) WriteTmpConfig(args *WriteConfigArgs, reply *VoidReply) error {
return receiver.local.WriteTmpConfig(args.TmpFileName, args.Buf)
}
// CommitConfigArgs - wraps the config file name that needs to be
// committed into config.json on this node.
type CommitConfigArgs struct {
AuthArgs
FileName string
}
// CommitConfig - Renames the temporary file into config.json on this node.
func (receiver *adminRPCReceiver) CommitConfig(args *CommitConfigArgs, reply *VoidReply) error {
return receiver.local.CommitConfig(args.FileName)
}
// NewAdminRPCServer - returns new admin RPC server.
func NewAdminRPCServer() (*xrpc.Server, error) {
rpcServer := xrpc.NewServer()
if err := rpcServer.RegisterName(adminServiceName, &adminRPCReceiver{&localAdminClient{}}); err != nil {
return nil, err
}
return rpcServer, nil
}
// registerAdminRPCRouter - creates and registers Admin RPC server and its router.
func registerAdminRPCRouter(router *mux.Router) {
rpcServer, err := NewAdminRPCServer()
logger.FatalIf(err, "Unable to initialize Lock RPC Server", context.Background())
subrouter := router.PathPrefix(minioReservedBucketPath).Subrouter()
subrouter.Path(adminServiceSubPath).Handler(rpcServer)
}

View File

@@ -1,613 +0,0 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"encoding/json"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"reflect"
"testing"
"time"
xnet "github.com/minio/minio/pkg/net"
)
///////////////////////////////////////////////////////////////////////////////
//
// localAdminClient and AdminRPCClient are adminCmdRunner interface compatible,
// hence below test functions are available for both clients.
//
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
//
// Admin RPC server, adminRPCReceiver and AdminRPCClient are
// inter-dependent, below test functions are sufficient to test all of them.
//
///////////////////////////////////////////////////////////////////////////////
func testAdminCmdRunnerSignalService(t *testing.T, client adminCmdRunner) {
tmpGlobalServiceSignalCh := globalServiceSignalCh
globalServiceSignalCh = make(chan serviceSignal, 10)
defer func() {
globalServiceSignalCh = tmpGlobalServiceSignalCh
}()
testCases := []struct {
signal serviceSignal
expectErr bool
}{
{serviceRestart, false},
{serviceStop, false},
{serviceStatus, true},
{serviceSignal(100), true},
}
for i, testCase := range testCases {
err := client.SignalService(testCase.signal)
expectErr := (err != nil)
if expectErr != testCase.expectErr {
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
}
}
}
func testAdminCmdRunnerReInitFormat(t *testing.T, client adminCmdRunner) {
tmpGlobalObjectAPI := globalObjectAPI
defer func() {
globalObjectAPI = tmpGlobalObjectAPI
}()
testCases := []struct {
objectAPI ObjectLayer
dryRun bool
expectErr bool
}{
{&DummyObjectLayer{}, true, false},
{&DummyObjectLayer{}, false, false},
{nil, true, true},
{nil, false, true},
}
for i, testCase := range testCases {
globalObjectAPI = testCase.objectAPI
err := client.ReInitFormat(testCase.dryRun)
expectErr := (err != nil)
if expectErr != testCase.expectErr {
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
}
}
}
func testAdminCmdRunnerListLocks(t *testing.T, client adminCmdRunner) {
tmpGlobalObjectAPI := globalObjectAPI
defer func() {
globalObjectAPI = tmpGlobalObjectAPI
}()
testCases := []struct {
objectAPI ObjectLayer
expectErr bool
}{
{&DummyObjectLayer{}, false},
{nil, true},
}
for i, testCase := range testCases {
globalObjectAPI = testCase.objectAPI
_, err := client.ListLocks("", "", time.Duration(0))
expectErr := (err != nil)
if expectErr != testCase.expectErr {
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
}
}
}
func testAdminCmdRunnerServerInfo(t *testing.T, client adminCmdRunner) {
tmpGlobalBootTime := globalBootTime
tmpGlobalObjectAPI := globalObjectAPI
tmpGlobalConnStats := globalConnStats
tmpGlobalHTTPStats := globalHTTPStats
tmpGlobalNotificationSys := globalNotificationSys
defer func() {
globalBootTime = tmpGlobalBootTime
globalObjectAPI = tmpGlobalObjectAPI
globalConnStats = tmpGlobalConnStats
globalHTTPStats = tmpGlobalHTTPStats
globalNotificationSys = tmpGlobalNotificationSys
}()
endpoints := new(EndpointList)
notificationSys, err := NewNotificationSys(globalServerConfig, *endpoints)
if err != nil {
t.Fatalf("unexpected error %v", err)
}
testCases := []struct {
bootTime time.Time
objectAPI ObjectLayer
connStats *ConnStats
httpStats *HTTPStats
notificationSys *NotificationSys
expectErr bool
}{
{UTCNow(), &DummyObjectLayer{}, newConnStats(), newHTTPStats(), notificationSys, false},
{time.Time{}, nil, nil, nil, nil, true},
{UTCNow(), nil, nil, nil, nil, true},
}
for i, testCase := range testCases {
globalBootTime = testCase.bootTime
globalObjectAPI = testCase.objectAPI
globalConnStats = testCase.connStats
globalHTTPStats = testCase.httpStats
globalNotificationSys = testCase.notificationSys
_, err := client.ServerInfo()
expectErr := (err != nil)
if expectErr != testCase.expectErr {
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
}
}
}
func testAdminCmdRunnerGetConfig(t *testing.T, client adminCmdRunner) {
tmpGlobalServerConfig := globalServerConfig
defer func() {
globalServerConfig = tmpGlobalServerConfig
}()
config := newServerConfig()
testCases := []struct {
config *serverConfig
expectErr bool
}{
{globalServerConfig, false},
{config, false},
}
for i, testCase := range testCases {
globalServerConfig = testCase.config
_, err := client.GetConfig()
expectErr := (err != nil)
if expectErr != testCase.expectErr {
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
}
}
}
func testAdminCmdRunnerWriteTmpConfig(t *testing.T, client adminCmdRunner) {
tmpConfigDir := configDir
defer func() {
configDir = tmpConfigDir
}()
tempDir, err := ioutil.TempDir("", ".AdminCmdRunnerWriteTmpConfig.")
if err != nil {
t.Fatalf("unexpected error %v", err)
}
defer os.RemoveAll(tempDir)
configDir = &ConfigDir{dir: tempDir}
testCases := []struct {
tmpFilename string
configBytes []byte
expectErr bool
}{
{"config1.json", []byte(`{"version":"23","region":"us-west-1a"}`), false},
// Overwrite test.
{"config1.json", []byte(`{"version":"23","region":"us-west-1a","browser":"on"}`), false},
{"config2.json", []byte{}, false},
{"config3.json", nil, false},
}
for i, testCase := range testCases {
err := client.WriteTmpConfig(testCase.tmpFilename, testCase.configBytes)
expectErr := (err != nil)
if expectErr != testCase.expectErr {
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
}
}
}
func testAdminCmdRunnerCommitConfig(t *testing.T, client adminCmdRunner) {
tmpConfigDir := configDir
defer func() {
configDir = tmpConfigDir
}()
tempDir, err := ioutil.TempDir("", ".AdminCmdRunnerCommitConfig.")
if err != nil {
t.Fatalf("unexpected error %v", err)
}
defer os.RemoveAll(tempDir)
configDir = &ConfigDir{dir: tempDir}
err = ioutil.WriteFile(filepath.Join(tempDir, "config.json"), []byte{}, os.ModePerm)
if err != nil {
t.Fatalf("unexpected error %v", err)
}
err = client.WriteTmpConfig("config1.json", []byte(`{"version":"23","region":"us-west-1a"}`))
if err != nil {
t.Fatalf("unexpected error %v", err)
}
testCases := []struct {
tmpFilename string
expectErr bool
}{
{"config1.json", false},
{"config2.json", true},
}
for i, testCase := range testCases {
err := client.CommitConfig(testCase.tmpFilename)
expectErr := (err != nil)
if expectErr != testCase.expectErr {
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
}
}
}
func newAdminRPCHTTPServerClient(t *testing.T) (*httptest.Server, *AdminRPCClient, *serverConfig) {
rpcServer, err := NewAdminRPCServer()
if err != nil {
t.Fatalf("unexpected error %v", err)
}
httpServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
rpcServer.ServeHTTP(w, r)
}))
url, err := xnet.ParseURL(httpServer.URL)
if err != nil {
t.Fatalf("unexpected error %v", err)
}
host, err := xnet.ParseHost(url.Host)
if err != nil {
t.Fatalf("unexpected error %v", err)
}
prevGlobalServerConfig := globalServerConfig
globalServerConfig = newServerConfig()
rpcClient, err := NewAdminRPCClient(host)
if err != nil {
t.Fatalf("unexpected error %v", err)
}
return httpServer, rpcClient, prevGlobalServerConfig
}
func TestAdminRPCClientSignalService(t *testing.T) {
httpServer, rpcClient, prevGlobalServerConfig := newAdminRPCHTTPServerClient(t)
defer httpServer.Close()
defer func() {
globalServerConfig = prevGlobalServerConfig
}()
testAdminCmdRunnerSignalService(t, rpcClient)
}
func TestAdminRPCClientReInitFormat(t *testing.T) {
httpServer, rpcClient, prevGlobalServerConfig := newAdminRPCHTTPServerClient(t)
defer httpServer.Close()
defer func() {
globalServerConfig = prevGlobalServerConfig
}()
testAdminCmdRunnerReInitFormat(t, rpcClient)
}
func TestAdminRPCClientListLocks(t *testing.T) {
httpServer, rpcClient, prevGlobalServerConfig := newAdminRPCHTTPServerClient(t)
defer httpServer.Close()
defer func() {
globalServerConfig = prevGlobalServerConfig
}()
testAdminCmdRunnerListLocks(t, rpcClient)
}
func TestAdminRPCClientServerInfo(t *testing.T) {
httpServer, rpcClient, prevGlobalServerConfig := newAdminRPCHTTPServerClient(t)
defer httpServer.Close()
defer func() {
globalServerConfig = prevGlobalServerConfig
}()
testAdminCmdRunnerServerInfo(t, rpcClient)
}
func TestAdminRPCClientGetConfig(t *testing.T) {
httpServer, rpcClient, prevGlobalServerConfig := newAdminRPCHTTPServerClient(t)
defer httpServer.Close()
defer func() {
globalServerConfig = prevGlobalServerConfig
}()
testAdminCmdRunnerGetConfig(t, rpcClient)
}
func TestAdminRPCClientWriteTmpConfig(t *testing.T) {
httpServer, rpcClient, prevGlobalServerConfig := newAdminRPCHTTPServerClient(t)
defer httpServer.Close()
defer func() {
globalServerConfig = prevGlobalServerConfig
}()
testAdminCmdRunnerWriteTmpConfig(t, rpcClient)
}
func TestAdminRPCClientCommitConfig(t *testing.T) {
httpServer, rpcClient, prevGlobalServerConfig := newAdminRPCHTTPServerClient(t)
defer httpServer.Close()
defer func() {
globalServerConfig = prevGlobalServerConfig
}()
testAdminCmdRunnerCommitConfig(t, rpcClient)
}
var (
config1 = []byte(`{
"version": "13",
"credential": {
"accessKey": "minio",
"secretKey": "minio123"
},
"region": "us-east-1",
"logger": {
"console": {
"enable": true,
"level": "debug"
},
"file": {
"enable": false,
"fileName": "",
"level": ""
}
},
"notify": {
"amqp": {
"1": {
"enable": false,
"url": "",
"exchange": "",
"routingKey": "",
"exchangeType": "",
"mandatory": false,
"immediate": false,
"durable": false,
"internal": false,
"noWait": false,
"autoDeleted": false
}
},
"nats": {
"1": {
"enable": false,
"address": "",
"subject": "",
"username": "",
"password": "",
"token": "",
"secure": false,
"pingInterval": 0,
"streaming": {
"enable": false,
"clusterID": "",
"clientID": "",
"async": false,
"maxPubAcksInflight": 0
}
}
},
"elasticsearch": {
"1": {
"enable": false,
"url": "",
"index": ""
}
},
"redis": {
"1": {
"enable": false,
"address": "",
"password": "",
"key": ""
}
},
"postgresql": {
"1": {
"enable": false,
"connectionString": "",
"table": "",
"host": "",
"port": "",
"user": "",
"password": "",
"database": ""
}
},
"kafka": {
"1": {
"enable": false,
"brokers": null,
"topic": ""
}
},
"webhook": {
"1": {
"enable": false,
"endpoint": ""
}
}
}
}
`)
// diff from config1 - amqp.Enable is True
config2 = []byte(`{
"version": "13",
"credential": {
"accessKey": "minio",
"secretKey": "minio123"
},
"region": "us-east-1",
"logger": {
"console": {
"enable": true,
"level": "debug"
},
"file": {
"enable": false,
"fileName": "",
"level": ""
}
},
"notify": {
"amqp": {
"1": {
"enable": true,
"url": "",
"exchange": "",
"routingKey": "",
"exchangeType": "",
"mandatory": false,
"immediate": false,
"durable": false,
"internal": false,
"noWait": false,
"autoDeleted": false
}
},
"nats": {
"1": {
"enable": false,
"address": "",
"subject": "",
"username": "",
"password": "",
"token": "",
"secure": false,
"pingInterval": 0,
"streaming": {
"enable": false,
"clusterID": "",
"clientID": "",
"async": false,
"maxPubAcksInflight": 0
}
}
},
"elasticsearch": {
"1": {
"enable": false,
"url": "",
"index": ""
}
},
"redis": {
"1": {
"enable": false,
"address": "",
"password": "",
"key": ""
}
},
"postgresql": {
"1": {
"enable": false,
"connectionString": "",
"table": "",
"host": "",
"port": "",
"user": "",
"password": "",
"database": ""
}
},
"kafka": {
"1": {
"enable": false,
"brokers": null,
"topic": ""
}
},
"webhook": {
"1": {
"enable": false,
"endpoint": ""
}
}
}
}
`)
)
// TestGetValidServerConfig - test for getValidServerConfig.
func TestGetValidServerConfig(t *testing.T) {
var c1, c2 serverConfig
err := json.Unmarshal(config1, &c1)
if err != nil {
t.Fatalf("json unmarshal of %s failed: %v", string(config1), err)
}
err = json.Unmarshal(config2, &c2)
if err != nil {
t.Fatalf("json unmarshal of %s failed: %v", string(config2), err)
}
// Valid config.
noErrs := []error{nil, nil, nil, nil}
serverConfigs := []serverConfig{c1, c2, c1, c1}
validConfig, err := getValidServerConfig(serverConfigs, noErrs)
if err != nil {
t.Errorf("Expected a valid config but received %v instead", err)
}
if !reflect.DeepEqual(validConfig, c1) {
t.Errorf("Expected valid config to be %v but received %v", config1, validConfig)
}
// Invalid config - no quorum.
serverConfigs = []serverConfig{c1, c2, c2, c1}
_, err = getValidServerConfig(serverConfigs, noErrs)
if err != errXLWriteQuorum {
t.Errorf("Expected to fail due to lack of quorum but received %v", err)
}
// All errors
allErrs := []error{errDiskNotFound, errDiskNotFound, errDiskNotFound, errDiskNotFound}
serverConfigs = []serverConfig{{}, {}, {}, {}}
_, err = getValidServerConfig(serverConfigs, allErrs)
if err != errXLWriteQuorum {
t.Errorf("Expected to fail due to lack of quorum but received %v", err)
}
}

View File

@@ -23,6 +23,8 @@ import (
const (
// Response request id.
responseRequestIDKey = "x-amz-request-id"
// Deployment id.
responseDeploymentIDKey = "x-minio-deployment-id"
)
// ObjectIdentifier carries key name for the object to delete.

View File

@@ -22,8 +22,10 @@ import (
"fmt"
"net/http"
"github.com/coreos/etcd/client"
"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/dns"
"github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/hash"
)
@@ -40,8 +42,8 @@ type APIErrorResponse struct {
XMLName xml.Name `xml:"Error" json:"-"`
Code string
Message string
Key string
BucketName string
Key string `xml:"Key,omitempty" json:"Key,omitempty"`
BucketName string `xml:"BucketName,omitempty" json:"BucketName,omitempty"`
Resource string
RequestID string `xml:"RequestId" json:"RequestId"`
HostID string `xml:"HostId" json:"HostId"`
@@ -83,6 +85,7 @@ const (
ErrNoSuchBucketPolicy
ErrNoSuchKey
ErrNoSuchUpload
ErrNoSuchVersion
ErrNotImplemented
ErrPreconditionFailed
ErrRequestTimeTooSkewed
@@ -127,8 +130,12 @@ const (
ErrMaximumExpires
ErrSlowDown
ErrInvalidPrefixMarker
ErrBadRequest
// Add new error codes here.
// SSE-S3 related API errors
ErrInvalidEncryptionMethod
// Server-Side-Encryption (with Customer provided key) related API errors.
ErrInsecureSSECustomerRequest
ErrSSEMultipartEncrypted
@@ -140,6 +147,12 @@ const (
ErrMissingSSECustomerKeyMD5
ErrSSECustomerKeyMD5Mismatch
ErrInvalidSSECustomerParameters
ErrIncompatibleEncryptionMethod
ErrKMSNotConfigured
ErrKMSAuthFailure
ErrNoAccessKey
ErrInvalidToken
// Bucket notification related errors.
ErrEventNotification
@@ -169,7 +182,6 @@ const (
ErrInvalidResourceName
ErrServerNotInitialized
ErrOperationTimedOut
ErrPartsSizeUnequal
ErrInvalidRequest
// Minio storage class error codes
ErrInvalidStorageClass
@@ -179,14 +191,19 @@ const (
// new error codes here.
ErrMalformedJSON
ErrAdminNoSuchUser
ErrAdminNoSuchPolicy
ErrAdminInvalidArgument
ErrAdminInvalidAccessKey
ErrAdminInvalidSecretKey
ErrAdminConfigNoQuorum
ErrAdminConfigTooLarge
ErrAdminConfigBadJSON
ErrAdminConfigDuplicateKeys
ErrAdminCredentialsMismatch
ErrInsecureClientRequest
ErrObjectTampered
ErrHealNotImplemented
ErrHealNoSuchProcess
ErrHealInvalidClientToken
@@ -194,6 +211,96 @@ const (
ErrHealAlreadyRunning
ErrHealOverlappingPaths
ErrIncorrectContinuationToken
//S3 Select Errors
ErrEmptyRequestBody
ErrUnsupportedFunction
ErrInvalidExpressionType
ErrBusy
ErrUnauthorizedAccess
ErrExpressionTooLong
ErrIllegalSQLFunctionArgument
ErrInvalidKeyPath
ErrInvalidCompressionFormat
ErrInvalidFileHeaderInfo
ErrInvalidJSONType
ErrInvalidQuoteFields
ErrInvalidRequestParameter
ErrInvalidDataType
ErrInvalidTextEncoding
ErrInvalidDataSource
ErrInvalidTableAlias
ErrMissingRequiredParameter
ErrObjectSerializationConflict
ErrUnsupportedSQLOperation
ErrUnsupportedSQLStructure
ErrUnsupportedSyntax
ErrUnsupportedRangeHeader
ErrLexerInvalidChar
ErrLexerInvalidOperator
ErrLexerInvalidLiteral
ErrLexerInvalidIONLiteral
ErrParseExpectedDatePart
ErrParseExpectedKeyword
ErrParseExpectedTokenType
ErrParseExpected2TokenTypes
ErrParseExpectedNumber
ErrParseExpectedRightParenBuiltinFunctionCall
ErrParseExpectedTypeName
ErrParseExpectedWhenClause
ErrParseUnsupportedToken
ErrParseUnsupportedLiteralsGroupBy
ErrParseExpectedMember
ErrParseUnsupportedSelect
ErrParseUnsupportedCase
ErrParseUnsupportedCaseClause
ErrParseUnsupportedAlias
ErrParseUnsupportedSyntax
ErrParseUnknownOperator
ErrParseMissingIdentAfterAt
ErrParseUnexpectedOperator
ErrParseUnexpectedTerm
ErrParseUnexpectedToken
ErrParseUnexpectedKeyword
ErrParseExpectedExpression
ErrParseExpectedLeftParenAfterCast
ErrParseExpectedLeftParenValueConstructor
ErrParseExpectedLeftParenBuiltinFunctionCall
ErrParseExpectedArgumentDelimiter
ErrParseCastArity
ErrParseInvalidTypeParam
ErrParseEmptySelect
ErrParseSelectMissingFrom
ErrParseExpectedIdentForGroupName
ErrParseExpectedIdentForAlias
ErrParseUnsupportedCallWithStar
ErrParseNonUnaryAgregateFunctionCall
ErrParseMalformedJoin
ErrParseExpectedIdentForAt
ErrParseAsteriskIsNotAloneInSelectList
ErrParseCannotMixSqbAndWildcardInSelectList
ErrParseInvalidContextForWildcardInSelectList
ErrIncorrectSQLFunctionArgumentType
ErrValueParseFailure
ErrEvaluatorInvalidArguments
ErrIntegerOverflow
ErrLikeInvalidInputs
ErrCastFailed
ErrInvalidCast
ErrEvaluatorInvalidTimestampFormatPattern
ErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing
ErrEvaluatorTimestampFormatPatternDuplicateFields
ErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch
ErrEvaluatorUnterminatedTimestampFormatPatternToken
ErrEvaluatorInvalidTimestampFormatPatternToken
ErrEvaluatorInvalidTimestampFormatPatternSymbol
ErrEvaluatorBindingDoesNotExist
ErrMissingHeaders
ErrInvalidColumnIndex
ErrAdminConfigNotificationTargetsFailed
ErrAdminProfilerNotEnabled
ErrInvalidDecompressedSize
)
// error code to APIError structure, these fields carry respective
@@ -339,6 +446,11 @@ var errorCodeResponse = map[APIErrorCode]APIError{
Description: "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.",
HTTPStatusCode: http.StatusNotFound,
},
ErrNoSuchVersion: {
Code: "NoSuchVersion",
Description: "Indicates that the version ID specified in the request does not match an existing version.",
HTTPStatusCode: http.StatusNotFound,
},
ErrNotImplemented: {
Code: "NotImplemented",
Description: "A header you provided implies functionality that is not implemented",
@@ -541,6 +653,11 @@ var errorCodeResponse = map[APIErrorCode]APIError{
Description: "Invalid marker prefix combination",
HTTPStatusCode: http.StatusBadRequest,
},
ErrBadRequest: {
Code: "BadRequest",
Description: "400 BadRequest",
HTTPStatusCode: http.StatusBadRequest,
},
// FIXME: Actual XML error response also contains the header which missed in list of signed header parameters.
ErrUnsignedHeaders: {
@@ -630,6 +747,11 @@ var errorCodeResponse = map[APIErrorCode]APIError{
Description: "Your metadata headers exceed the maximum allowed metadata size.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidEncryptionMethod: {
Code: "InvalidRequest",
Description: "The encryption method specified is not supported",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInsecureSSECustomerRequest: {
Code: "InvalidRequest",
Description: "Requests specifying Server Side Encryption with Customer provided keys must be made over a secure connection.",
@@ -680,6 +802,31 @@ var errorCodeResponse = map[APIErrorCode]APIError{
Description: "The provided encryption parameters did not match the ones used originally.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrIncompatibleEncryptionMethod: {
Code: "InvalidArgument",
Description: "Server side encryption specified with both SSE-C and SSE-S3 headers",
HTTPStatusCode: http.StatusBadRequest,
},
ErrKMSNotConfigured: {
Code: "InvalidArgument",
Description: "Server side encryption specified but KMS is not configured",
HTTPStatusCode: http.StatusBadRequest,
},
ErrKMSAuthFailure: {
Code: "InvalidArgument",
Description: "Server side encryption specified but KMS authorization failed",
HTTPStatusCode: http.StatusBadRequest,
},
ErrNoAccessKey: {
Code: "AccessDenied",
Description: "No AWSAccessKey was presented",
HTTPStatusCode: http.StatusForbidden,
},
ErrInvalidToken: {
Code: "InvalidTokenId",
Description: "The security token included in the request is invalid",
HTTPStatusCode: http.StatusForbidden,
},
/// S3 extensions.
ErrContentSHA256Mismatch: {
@@ -692,7 +839,7 @@ var errorCodeResponse = map[APIErrorCode]APIError{
ErrStorageFull: {
Code: "XMinioStorageFull",
Description: "Storage backend has reached its minimum free disk threshold. Please delete a few objects to proceed.",
HTTPStatusCode: http.StatusInternalServerError,
HTTPStatusCode: http.StatusInsufficientStorage,
},
ErrRequestBodyParse: {
Code: "XMinioRequestBodyParse",
@@ -739,6 +886,21 @@ var errorCodeResponse = map[APIErrorCode]APIError{
Description: "The JSON you provided was not well-formed or did not validate against our published format.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrAdminNoSuchUser: {
Code: "XMinioAdminNoSuchUser",
Description: "The specified user does not exist.",
HTTPStatusCode: http.StatusNotFound,
},
ErrAdminNoSuchPolicy: {
Code: "XMinioAdminNoSuchPolicy",
Description: "The canned policy does not exist.",
HTTPStatusCode: http.StatusNotFound,
},
ErrAdminInvalidArgument: {
Code: "XMinioAdminInvalidArgument",
Description: "Invalid arguments specified.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrAdminInvalidAccessKey: {
Code: "XMinioAdminInvalidAccessKey",
Description: "The access key is invalid.",
@@ -757,14 +919,29 @@ var errorCodeResponse = map[APIErrorCode]APIError{
ErrAdminConfigTooLarge: {
Code: "XMinioAdminConfigTooLarge",
Description: fmt.Sprintf("Configuration data provided exceeds the allowed maximum of %d bytes",
maxConfigJSONSize),
maxEConfigJSONSize),
HTTPStatusCode: http.StatusBadRequest,
},
ErrAdminConfigBadJSON: {
Code: "XMinioAdminConfigBadJSON",
Description: "JSON configuration provided is of incorrect format",
HTTPStatusCode: http.StatusBadRequest,
},
ErrAdminConfigDuplicateKeys: {
Code: "XMinioAdminConfigDuplicateKeys",
Description: "JSON configuration provided has objects with duplicate keys",
HTTPStatusCode: http.StatusBadRequest,
},
ErrAdminConfigNotificationTargetsFailed: {
Code: "XMinioAdminNotificationTargetsTestFailed",
Description: "Configuration update failed due an unsuccessful attempt to connect to one or more notification servers",
HTTPStatusCode: http.StatusBadRequest,
},
ErrAdminProfilerNotEnabled: {
Code: "XMinioAdminProfilerNotEnabled",
Description: "Unable to perform the requested operation because profiling is not enabled",
HTTPStatusCode: http.StatusBadRequest,
},
ErrAdminCredentialsMismatch: {
Code: "XMinioAdminCredentialsMismatch",
Description: "Credentials in config mismatch with server environment variables",
@@ -785,11 +962,6 @@ var errorCodeResponse = map[APIErrorCode]APIError{
Description: "Your metadata headers are not supported.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrPartsSizeUnequal: {
Code: "XMinioPartsSizeUnequal",
Description: "All parts except the last part should be of the same size.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrObjectTampered: {
Code: "XMinioObjectTampered",
Description: errObjectTampered.Error(),
@@ -800,6 +972,7 @@ var errorCodeResponse = map[APIErrorCode]APIError{
Description: "X-Amz-Expires must be less than a week (in seconds); that is, the given X-Amz-Expires must be less than 604800 seconds",
HTTPStatusCode: http.StatusBadRequest,
},
// Generic Invalid-Request error. Should be used for response errors only for unlikely
// corner case errors for which introducing new APIErrorCode is not worth it. LogIf()
// should be used to log the error at the source of the error for debugging purposes.
@@ -848,21 +1021,455 @@ var errorCodeResponse = map[APIErrorCode]APIError{
Description: "The continuation token provided is incorrect",
HTTPStatusCode: http.StatusBadRequest,
},
//S3 Select API Errors
ErrEmptyRequestBody: {
Code: "EmptyRequestBody",
Description: "Request body cannot be empty.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrUnsupportedFunction: {
Code: "UnsupportedFunction",
Description: "Encountered an unsupported SQL function.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidDataSource: {
Code: "InvalidDataSource",
Description: "Invalid data source type. Only CSV and JSON are supported at this time.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidExpressionType: {
Code: "InvalidExpressionType",
Description: "The ExpressionType is invalid. Only SQL expressions are supported at this time.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrBusy: {
Code: "Busy",
Description: "The service is unavailable. Please retry.",
HTTPStatusCode: http.StatusServiceUnavailable,
},
ErrUnauthorizedAccess: {
Code: "UnauthorizedAccess",
Description: "You are not authorized to perform this operation",
HTTPStatusCode: http.StatusUnauthorized,
},
ErrExpressionTooLong: {
Code: "ExpressionTooLong",
Description: "The SQL expression is too long: The maximum byte-length for the SQL expression is 256 KB.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrIllegalSQLFunctionArgument: {
Code: "IllegalSqlFunctionArgument",
Description: "Illegal argument was used in the SQL function.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidKeyPath: {
Code: "InvalidKeyPath",
Description: "Key path in the SQL expression is invalid.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidCompressionFormat: {
Code: "InvalidCompressionFormat",
Description: "The file is not in a supported compression format. Only GZIP is supported at this time.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidFileHeaderInfo: {
Code: "InvalidFileHeaderInfo",
Description: "The FileHeaderInfo is invalid. Only NONE, USE, and IGNORE are supported.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidJSONType: {
Code: "InvalidJsonType",
Description: "The JsonType is invalid. Only DOCUMENT and LINES are supported at this time.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidQuoteFields: {
Code: "InvalidQuoteFields",
Description: "The QuoteFields is invalid. Only ALWAYS and ASNEEDED are supported.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidRequestParameter: {
Code: "InvalidRequestParameter",
Description: "The value of a parameter in SelectRequest element is invalid. Check the service API documentation and try again.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidDataType: {
Code: "InvalidDataType",
Description: "The SQL expression contains an invalid data type.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidTextEncoding: {
Code: "InvalidTextEncoding",
Description: "Invalid encoding type. Only UTF-8 encoding is supported at this time.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidTableAlias: {
Code: "InvalidTableAlias",
Description: "The SQL expression contains an invalid table alias.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrMissingRequiredParameter: {
Code: "MissingRequiredParameter",
Description: "The SelectRequest entity is missing a required parameter. Check the service documentation and try again.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrObjectSerializationConflict: {
Code: "ObjectSerializationConflict",
Description: "The SelectRequest entity can only contain one of CSV or JSON. Check the service documentation and try again.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrUnsupportedSQLOperation: {
Code: "UnsupportedSqlOperation",
Description: "Encountered an unsupported SQL operation.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrUnsupportedSQLStructure: {
Code: "UnsupportedSqlStructure",
Description: "Encountered an unsupported SQL structure. Check the SQL Reference.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrUnsupportedSyntax: {
Code: "UnsupportedSyntax",
Description: "Encountered invalid syntax.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrUnsupportedRangeHeader: {
Code: "UnsupportedRangeHeader",
Description: "Range header is not supported for this operation.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrLexerInvalidChar: {
Code: "LexerInvalidChar",
Description: "The SQL expression contains an invalid character.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrLexerInvalidOperator: {
Code: "LexerInvalidOperator",
Description: "The SQL expression contains an invalid literal.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrLexerInvalidLiteral: {
Code: "LexerInvalidLiteral",
Description: "The SQL expression contains an invalid operator.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrLexerInvalidIONLiteral: {
Code: "LexerInvalidIONLiteral",
Description: "The SQL expression contains an invalid operator.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseExpectedDatePart: {
Code: "ParseExpectedDatePart",
Description: "Did not find the expected date part in the SQL expression.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseExpectedKeyword: {
Code: "ParseExpectedKeyword",
Description: "Did not find the expected keyword in the SQL expression.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseExpectedTokenType: {
Code: "ParseExpectedTokenType",
Description: "Did not find the expected token in the SQL expression.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseExpected2TokenTypes: {
Code: "ParseExpected2TokenTypes",
Description: "Did not find the expected token in the SQL expression.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseExpectedNumber: {
Code: "ParseExpectedNumber",
Description: "Did not find the expected number in the SQL expression.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseExpectedRightParenBuiltinFunctionCall: {
Code: "ParseExpectedRightParenBuiltinFunctionCall",
Description: "Did not find the expected right parenthesis character in the SQL expression.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseExpectedTypeName: {
Code: "ParseExpectedTypeName",
Description: "Did not find the expected type name in the SQL expression.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseExpectedWhenClause: {
Code: "ParseExpectedWhenClause",
Description: "Did not find the expected WHEN clause in the SQL expression. CASE is not supported.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseUnsupportedToken: {
Code: "ParseUnsupportedToken",
Description: "The SQL expression contains an unsupported token.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseUnsupportedLiteralsGroupBy: {
Code: "ParseUnsupportedLiteralsGroupBy",
Description: "The SQL expression contains an unsupported use of GROUP BY.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseExpectedMember: {
Code: "ParseExpectedMember",
Description: "The SQL expression contains an unsupported use of MEMBER.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseUnsupportedSelect: {
Code: "ParseUnsupportedSelect",
Description: "The SQL expression contains an unsupported use of SELECT.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseUnsupportedCase: {
Code: "ParseUnsupportedCase",
Description: "The SQL expression contains an unsupported use of CASE.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseUnsupportedCaseClause: {
Code: "ParseUnsupportedCaseClause",
Description: "The SQL expression contains an unsupported use of CASE.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseUnsupportedAlias: {
Code: "ParseUnsupportedAlias",
Description: "The SQL expression contains an unsupported use of ALIAS.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseUnsupportedSyntax: {
Code: "ParseUnsupportedSyntax",
Description: "The SQL expression contains unsupported syntax.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseUnknownOperator: {
Code: "ParseUnknownOperator",
Description: "The SQL expression contains an invalid operator.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseMissingIdentAfterAt: {
Code: "ParseMissingIdentAfterAt",
Description: "Did not find the expected identifier after the @ symbol in the SQL expression.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseUnexpectedOperator: {
Code: "ParseUnexpectedOperator",
Description: "The SQL expression contains an unexpected operator.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseUnexpectedTerm: {
Code: "ParseUnexpectedTerm",
Description: "The SQL expression contains an unexpected term.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseUnexpectedToken: {
Code: "ParseUnexpectedToken",
Description: "The SQL expression contains an unexpected token.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseUnexpectedKeyword: {
Code: "ParseUnexpectedKeyword",
Description: "The SQL expression contains an unexpected keyword.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseExpectedExpression: {
Code: "ParseExpectedExpression",
Description: "Did not find the expected SQL expression.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseExpectedLeftParenAfterCast: {
Code: "ParseExpectedLeftParenAfterCast",
Description: "Did not find expected the left parenthesis in the SQL expression.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseExpectedLeftParenValueConstructor: {
Code: "ParseExpectedLeftParenValueConstructor",
Description: "Did not find expected the left parenthesis in the SQL expression.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseExpectedLeftParenBuiltinFunctionCall: {
Code: "ParseExpectedLeftParenBuiltinFunctionCall",
Description: "Did not find the expected left parenthesis in the SQL expression.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseExpectedArgumentDelimiter: {
Code: "ParseExpectedArgumentDelimiter",
Description: "Did not find the expected argument delimiter in the SQL expression.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseCastArity: {
Code: "ParseCastArity",
Description: "The SQL expression CAST has incorrect arity.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseInvalidTypeParam: {
Code: "ParseInvalidTypeParam",
Description: "The SQL expression contains an invalid parameter value.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseEmptySelect: {
Code: "ParseEmptySelect",
Description: "The SQL expression contains an empty SELECT.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseSelectMissingFrom: {
Code: "ParseSelectMissingFrom",
Description: "GROUP is not supported in the SQL expression.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseExpectedIdentForGroupName: {
Code: "ParseExpectedIdentForGroupName",
Description: "GROUP is not supported in the SQL expression.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseExpectedIdentForAlias: {
Code: "ParseExpectedIdentForAlias",
Description: "Did not find the expected identifier for the alias in the SQL expression.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseUnsupportedCallWithStar: {
Code: "ParseUnsupportedCallWithStar",
Description: "Only COUNT with (*) as a parameter is supported in the SQL expression.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseNonUnaryAgregateFunctionCall: {
Code: "ParseNonUnaryAgregateFunctionCall",
Description: "Only one argument is supported for aggregate functions in the SQL expression.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseMalformedJoin: {
Code: "ParseMalformedJoin",
Description: "JOIN is not supported in the SQL expression.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseExpectedIdentForAt: {
Code: "ParseExpectedIdentForAt",
Description: "Did not find the expected identifier for AT name in the SQL expression.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseAsteriskIsNotAloneInSelectList: {
Code: "ParseAsteriskIsNotAloneInSelectList",
Description: "Other expressions are not allowed in the SELECT list when '*' is used without dot notation in the SQL expression.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseCannotMixSqbAndWildcardInSelectList: {
Code: "ParseCannotMixSqbAndWildcardInSelectList",
Description: "Cannot mix [] and * in the same expression in a SELECT list in SQL expression.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrParseInvalidContextForWildcardInSelectList: {
Code: "ParseInvalidContextForWildcardInSelectList",
Description: "Invalid use of * in SELECT list in the SQL expression.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrIncorrectSQLFunctionArgumentType: {
Code: "IncorrectSqlFunctionArgumentType",
Description: "Incorrect type of arguments in function call in the SQL expression.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrValueParseFailure: {
Code: "ValueParseFailure",
Description: "Time stamp parse failure in the SQL expression.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrEvaluatorInvalidArguments: {
Code: "EvaluatorInvalidArguments",
Description: "Incorrect number of arguments in the function call in the SQL expression.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrIntegerOverflow: {
Code: "IntegerOverflow",
Description: "Int overflow or underflow in the SQL expression.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrLikeInvalidInputs: {
Code: "LikeInvalidInputs",
Description: "Invalid argument given to the LIKE clause in the SQL expression.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrCastFailed: {
Code: "CastFailed",
Description: "Attempt to convert from one data type to another using CAST failed in the SQL expression.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidCast: {
Code: "InvalidCast",
Description: "Attempt to convert from one data type to another using CAST failed in the SQL expression.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrEvaluatorInvalidTimestampFormatPattern: {
Code: "EvaluatorInvalidTimestampFormatPattern",
Description: "Time stamp format pattern requires additional fields in the SQL expression.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing: {
Code: "EvaluatorInvalidTimestampFormatPatternSymbolForParsing",
Description: "Time stamp format pattern contains a valid format symbol that cannot be applied to time stamp parsing in the SQL expression.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrEvaluatorTimestampFormatPatternDuplicateFields: {
Code: "EvaluatorTimestampFormatPatternDuplicateFields",
Description: "Time stamp format pattern contains multiple format specifiers representing the time stamp field in the SQL expression.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch: {
Code: "EvaluatorUnterminatedTimestampFormatPatternToken",
Description: "Time stamp format pattern contains unterminated token in the SQL expression.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrEvaluatorUnterminatedTimestampFormatPatternToken: {
Code: "EvaluatorInvalidTimestampFormatPatternToken",
Description: "Time stamp format pattern contains an invalid token in the SQL expression.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrEvaluatorInvalidTimestampFormatPatternToken: {
Code: "EvaluatorInvalidTimestampFormatPatternToken",
Description: "Time stamp format pattern contains an invalid token in the SQL expression.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrEvaluatorInvalidTimestampFormatPatternSymbol: {
Code: "EvaluatorInvalidTimestampFormatPatternSymbol",
Description: "Time stamp format pattern contains an invalid symbol in the SQL expression.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrEvaluatorBindingDoesNotExist: {
Code: "ErrEvaluatorBindingDoesNotExist",
Description: "A column name or a path provided does not exist in the SQL expression",
HTTPStatusCode: http.StatusBadRequest,
},
ErrMissingHeaders: {
Code: "MissingHeaders",
Description: "Some headers in the query are missing from the file. Check the file and try again.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidColumnIndex: {
Code: "InvalidColumnIndex",
Description: "The column index is invalid. Please check the service documentation and try again.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidDecompressedSize: {
Code: "XMinioInvalidDecompressedSize",
Description: "The data provided is unfit for decompression",
HTTPStatusCode: http.StatusBadRequest,
},
// Add your error structure here.
}
// toAPIErrorCode - Converts embedded errors. Convenience
// function written to handle all cases where we have known types of
// errors returned by underlying layers.
func toAPIErrorCode(err error) (apiErr APIErrorCode) {
func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
if err == nil {
return ErrNone
}
// Verify if the underlying error is signature mismatch.
switch err {
case errInvalidArgument:
apiErr = ErrAdminInvalidArgument
case errNoSuchUser:
apiErr = ErrAdminNoSuchUser
case errNoSuchPolicy:
apiErr = ErrAdminNoSuchPolicy
case errSignatureMismatch:
apiErr = ErrSignatureDoesNotMatch
case errInvalidRange:
apiErr = ErrInvalidRange
case errDataTooLarge:
apiErr = ErrEntityTooLarge
case errDataTooSmall:
@@ -872,17 +1479,19 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
case auth.ErrInvalidSecretKeyLength:
apiErr = ErrAdminInvalidSecretKey
// SSE errors
case errInsecureSSERequest:
apiErr = ErrInsecureSSECustomerRequest
case errInvalidSSEAlgorithm:
case errInvalidEncryptionParameters:
apiErr = ErrInvalidEncryptionParameters
case crypto.ErrInvalidEncryptionMethod:
apiErr = ErrInvalidEncryptionMethod
case crypto.ErrInvalidCustomerAlgorithm:
apiErr = ErrInvalidSSECustomerAlgorithm
case errInvalidSSEKey:
case crypto.ErrInvalidCustomerKey:
apiErr = ErrInvalidSSECustomerKey
case errMissingSSEKey:
case crypto.ErrMissingCustomerKey:
apiErr = ErrMissingSSECustomerKey
case errMissingSSEKeyMD5:
case crypto.ErrMissingCustomerKeyMD5:
apiErr = ErrMissingSSECustomerKeyMD5
case errSSEKeyMD5Mismatch:
case crypto.ErrCustomerKeyMD5Mismatch:
apiErr = ErrSSECustomerKeyMD5Mismatch
case errObjectTampered:
apiErr = ErrObjectTampered
@@ -890,12 +1499,24 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
apiErr = ErrSSEEncryptedObject
case errInvalidSSEParameters:
apiErr = ErrInvalidSSECustomerParameters
case errSSEKeyMismatch:
case crypto.ErrInvalidCustomerKey, crypto.ErrSecretKeyMismatch:
apiErr = ErrAccessDenied // no access without correct key
case context.Canceled, context.DeadlineExceeded:
case crypto.ErrIncompatibleEncryptionMethod:
apiErr = ErrIncompatibleEncryptionMethod
case errKMSNotConfigured:
apiErr = ErrKMSNotConfigured
case crypto.ErrKMSAuthLogin:
apiErr = ErrKMSAuthFailure
case errOperationTimedOut, context.Canceled, context.DeadlineExceeded:
apiErr = ErrOperationTimedOut
}
// Compression errors
switch err {
case errInvalidDecompressedSize:
apiErr = ErrInvalidDecompressedSize
}
if apiErr != ErrNone {
// If there was a match in the above switch case.
return apiErr
@@ -903,10 +1524,8 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
// etcd specific errors, a key is always a bucket for us return
// ErrNoSuchBucket in such a case.
if e, ok := err.(*client.Error); ok {
if e.Code == client.ErrorCodeKeyNotFound {
return ErrNoSuchBucket
}
if err == dns.ErrNoEntriesFound {
return ErrNoSuchBucket
}
switch err.(type) {
@@ -972,8 +1591,6 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
apiErr = ErrEntityTooLarge
case UnsupportedMetadata:
apiErr = ErrUnsupportedMetadata
case PartsSizeUnequal:
apiErr = ErrPartsSizeUnequal
case BucketPolicyNotFound:
apiErr = ErrNoSuchBucketPolicy
case *event.ErrInvalidEventName:
@@ -1000,8 +1617,14 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
apiErr = ErrUnsupportedNotification
case BackendDown:
apiErr = ErrBackendDown
case crypto.Error:
apiErr = ErrObjectTampered
default:
apiErr = ErrInternalError
// Make sure to log the errors which we cannot translate
// to a meaningful S3 API errors. This is added to aid in
// debugging unexpected/unhandled errors.
logger.LogIf(ctx, err)
}
return apiErr
@@ -1009,17 +1632,20 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
// getAPIError provides API Error for input API error code.
func getAPIError(code APIErrorCode) APIError {
return errorCodeResponse[code]
if apiErr, ok := errorCodeResponse[code]; ok {
return apiErr
}
return errorCodeResponse[ErrInternalError]
}
// getErrorResponse gets in standard error and resource value and
// provides a encodable populated response values
func getAPIErrorResponse(err APIError, resource string) APIErrorResponse {
func getAPIErrorResponse(err APIError, resource, requestid string) APIErrorResponse {
return APIErrorResponse{
Code: err.Code,
Message: err.Description,
Resource: resource,
RequestID: "3L137",
RequestID: requestid,
HostID: "3L137",
}
}

View File

@@ -17,9 +17,11 @@
package cmd
import (
"context"
"errors"
"testing"
"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/pkg/hash"
)
@@ -51,12 +53,11 @@ var toAPIErrorCodeTests = []struct {
{err: errSignatureMismatch, errCode: ErrSignatureDoesNotMatch},
// SSE-C errors
{err: errInsecureSSERequest, errCode: ErrInsecureSSECustomerRequest},
{err: errInvalidSSEAlgorithm, errCode: ErrInvalidSSECustomerAlgorithm},
{err: errMissingSSEKey, errCode: ErrMissingSSECustomerKey},
{err: errInvalidSSEKey, errCode: ErrInvalidSSECustomerKey},
{err: errMissingSSEKeyMD5, errCode: ErrMissingSSECustomerKeyMD5},
{err: errSSEKeyMD5Mismatch, errCode: ErrSSECustomerKeyMD5Mismatch},
{err: crypto.ErrInvalidCustomerAlgorithm, errCode: ErrInvalidSSECustomerAlgorithm},
{err: crypto.ErrMissingCustomerKey, errCode: ErrMissingSSECustomerKey},
{err: crypto.ErrInvalidCustomerKey, errCode: ErrInvalidSSECustomerKey},
{err: crypto.ErrMissingCustomerKeyMD5, errCode: ErrMissingSSECustomerKeyMD5},
{err: crypto.ErrCustomerKeyMD5Mismatch, errCode: ErrSSECustomerKeyMD5Mismatch},
{err: errObjectTampered, errCode: ErrObjectTampered},
{err: nil, errCode: ErrNone},
@@ -64,8 +65,9 @@ var toAPIErrorCodeTests = []struct {
}
func TestAPIErrCode(t *testing.T) {
ctx := context.Background()
for i, testCase := range toAPIErrorCodeTests {
errCode := toAPIErrorCode(testCase.err)
errCode := toAPIErrorCode(ctx, testCase.err)
if errCode != testCase.errCode {
t.Errorf("Test %d: Expected error code %d, got %d", i+1, testCase.errCode, errCode)
}

View File

@@ -24,6 +24,8 @@ import (
"net/http"
"strconv"
"time"
"github.com/minio/minio/cmd/crypto"
)
// Returns a hexadecimal representation of time at the
@@ -34,15 +36,16 @@ func mustGetRequestID(t time.Time) string {
// Write http common headers
func setCommonHeaders(w http.ResponseWriter) {
// Set unique request ID for each reply.
w.Header().Set(responseRequestIDKey, mustGetRequestID(UTCNow()))
w.Header().Set("Server", globalServerUserAgent)
w.Header().Set("Server", "Minio/"+ReleaseTag)
// Set `x-amz-bucket-region` only if region is set on the server
// by default minio uses an empty region.
if region := globalServerConfig.GetRegion(); region != "" {
w.Header().Set("X-Amz-Bucket-Region", region)
}
w.Header().Set("Accept-Ranges", "bytes")
// Remove sensitive information
crypto.RemoveSensitiveHeaders(w.Header())
}
// Encodes the response headers into XML format.
@@ -63,13 +66,10 @@ func encodeResponseJSON(response interface{}) []byte {
}
// Write object header
func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, contentRange *httpRange) {
func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec) (err error) {
// set common headers
setCommonHeaders(w)
// Set content length.
w.Header().Set("Content-Length", strconv.FormatInt(objInfo.Size, 10))
// Set last modified time.
lastModified := objInfo.ModTime.UTC().Format(http.TimeFormat)
w.Header().Set("Last-Modified", lastModified)
@@ -97,11 +97,34 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, contentRange *h
w.Header().Set(k, v)
}
// for providing ranged content
if contentRange != nil && contentRange.offsetBegin > -1 {
// Override content-length
w.Header().Set("Content-Length", strconv.FormatInt(contentRange.getLength(), 10))
w.Header().Set("Content-Range", contentRange.String())
w.WriteHeader(http.StatusPartialContent)
var totalObjectSize int64
switch {
case crypto.IsEncrypted(objInfo.UserDefined):
totalObjectSize, err = objInfo.DecryptedSize()
if err != nil {
return err
}
case objInfo.IsCompressed():
totalObjectSize = objInfo.GetActualSize()
if totalObjectSize < 0 {
return errInvalidDecompressedSize
}
default:
totalObjectSize = objInfo.Size
}
// for providing ranged content
start, rangeLen, err := rs.GetOffsetLength(totalObjectSize)
if err != nil {
return err
}
// Set content length.
w.Header().Set("Content-Length", strconv.FormatInt(rangeLen, 10))
if rs != nil {
contentRange := fmt.Sprintf("bytes %d-%d/%d", start, start+rangeLen-1, totalObjectSize)
w.Header().Set("Content-Range", contentRange)
}
return nil
}

View File

@@ -22,15 +22,22 @@ import (
)
// Parse bucket url queries
func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string, maxkeys int, encodingType string) {
prefix = values.Get("prefix")
marker = values.Get("marker")
delimiter = values.Get("delimiter")
func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string, maxkeys int, encodingType string, errCode APIErrorCode) {
errCode = ErrNone
if values.Get("max-keys") != "" {
maxkeys, _ = strconv.Atoi(values.Get("max-keys"))
var err error
if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil {
errCode = ErrInvalidMaxKeys
return
}
} else {
maxkeys = maxObjectList
}
prefix = values.Get("prefix")
marker = values.Get("marker")
delimiter = values.Get("delimiter")
encodingType = values.Get("encoding-type")
return
}
@@ -47,44 +54,69 @@ func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimit
}
}
if values.Get("max-keys") != "" {
var err error
if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil {
errCode = ErrInvalidMaxKeys
return
}
} else {
maxkeys = maxObjectList
}
prefix = values.Get("prefix")
token = values.Get("continuation-token")
startAfter = values.Get("start-after")
delimiter = values.Get("delimiter")
if values.Get("max-keys") != "" {
maxkeys, _ = strconv.Atoi(values.Get("max-keys"))
} else {
maxkeys = maxObjectList
}
fetchOwner = values.Get("fetch-owner") == "true"
encodingType = values.Get("encoding-type")
return
}
// Parse bucket url queries for ?uploads
func getBucketMultipartResources(values url.Values) (prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int, encodingType string) {
func getBucketMultipartResources(values url.Values) (prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int, encodingType string, errCode APIErrorCode) {
errCode = ErrNone
if values.Get("max-uploads") != "" {
var err error
if maxUploads, err = strconv.Atoi(values.Get("max-uploads")); err != nil {
errCode = ErrInvalidMaxUploads
return
}
} else {
maxUploads = maxUploadsList
}
prefix = values.Get("prefix")
keyMarker = values.Get("key-marker")
uploadIDMarker = values.Get("upload-id-marker")
delimiter = values.Get("delimiter")
if values.Get("max-uploads") != "" {
maxUploads, _ = strconv.Atoi(values.Get("max-uploads"))
} else {
maxUploads = maxUploadsList
}
encodingType = values.Get("encoding-type")
return
}
// Parse object url queries
func getObjectResources(values url.Values) (uploadID string, partNumberMarker, maxParts int, encodingType string) {
uploadID = values.Get("uploadId")
partNumberMarker, _ = strconv.Atoi(values.Get("part-number-marker"))
func getObjectResources(values url.Values) (uploadID string, partNumberMarker, maxParts int, encodingType string, errCode APIErrorCode) {
var err error
errCode = ErrNone
if values.Get("max-parts") != "" {
maxParts, _ = strconv.Atoi(values.Get("max-parts"))
if maxParts, err = strconv.Atoi(values.Get("max-parts")); err != nil {
errCode = ErrInvalidMaxParts
return
}
} else {
maxParts = maxPartsList
}
if values.Get("part-number-marker") != "" {
if partNumberMarker, err = strconv.Atoi(values.Get("part-number-marker")); err != nil {
errCode = ErrInvalidPartNumberMarker
return
}
}
uploadID = values.Get("uploadId")
encodingType = values.Get("encoding-type")
return
}

View File

@@ -156,7 +156,10 @@ func TestListObjectsV1Resources(t *testing.T) {
}
for i, testCase := range testCases {
prefix, marker, delimiter, maxKeys, encodingType := getListObjectsV1Args(testCase.values)
prefix, marker, delimiter, maxKeys, encodingType, argsErr := getListObjectsV1Args(testCase.values)
if argsErr != ErrNone {
t.Errorf("Test %d: argument parsing failed, got %v", i+1, argsErr)
}
if prefix != testCase.prefix {
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.prefix, prefix)
}
@@ -198,7 +201,10 @@ func TestGetObjectsResources(t *testing.T) {
}
for i, testCase := range testCases {
uploadID, partNumberMarker, maxParts, encodingType := getObjectResources(testCase.values)
uploadID, partNumberMarker, maxParts, encodingType, argsErr := getObjectResources(testCase.values)
if argsErr != ErrNone {
t.Errorf("Test %d: argument parsing failed, got %v", i+1, argsErr)
}
if uploadID != testCase.uploadID {
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.uploadID, uploadID)
}

View File

@@ -16,7 +16,10 @@
package cmd
import "net/http"
import (
"context"
"net/http"
)
// Represents additional fields necessary for ErrPartTooSmall S3 error.
type completeMultipartAPIError struct {
@@ -42,9 +45,9 @@ type completeMultipartAPIError struct {
// of this function.
func writePartSmallErrorResponse(w http.ResponseWriter, r *http.Request, err PartTooSmall) {
apiError := getAPIError(toAPIErrorCode(err))
apiError := getAPIError(toAPIErrorCode(context.Background(), err))
// Generate complete multipart error response.
errorResponse := getAPIErrorResponse(apiError, r.URL.Path)
errorResponse := getAPIErrorResponse(apiError, r.URL.Path, w.Header().Get(responseRequestIDKey))
cmpErrResp := completeMultipartAPIError{err.PartSize, int64(5242880), err.PartNumber, err.PartETag, errorResponse}
encodedErrorResponse := encodeResponse(cmpErrResp)

View File

@@ -568,16 +568,25 @@ func writeSuccessResponseHeadersOnly(w http.ResponseWriter) {
}
// writeErrorRespone writes error headers
func writeErrorResponse(w http.ResponseWriter, errorCode APIErrorCode, reqURL *url.URL) {
func writeErrorResponse(w http.ResponseWriter, errorCode APIErrorCode, reqURL *url.URL, browser bool) {
switch errorCode {
case ErrSlowDown, ErrServerNotInitialized, ErrReadQuorum, ErrWriteQuorum:
// Set retry-after header to indicate user-agents to retry request after 120secs.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
w.Header().Set("Retry-After", "120")
case ErrAccessDenied:
// The request is from browser and also if browser
// is enabled we need to redirect.
if browser && globalIsBrowserEnabled {
w.Header().Set("Location", minioReservedBucketPath+reqURL.Path)
w.WriteHeader(http.StatusTemporaryRedirect)
return
}
}
apiError := getAPIError(errorCode)
// Generate error response.
errorResponse := getAPIErrorResponse(apiError, reqURL.Path)
errorResponse := getAPIErrorResponse(apiError, reqURL.Path, w.Header().Get(responseRequestIDKey))
encodedErrorResponse := encodeResponse(errorResponse)
writeResponse(w, apiError.HTTPStatusCode, encodedErrorResponse, mimeXML)
}
@@ -592,7 +601,7 @@ func writeErrorResponseHeadersOnly(w http.ResponseWriter, errorCode APIErrorCode
func writeErrorResponseJSON(w http.ResponseWriter, errorCode APIErrorCode, reqURL *url.URL) {
apiError := getAPIError(errorCode)
// Generate error response.
errorResponse := getAPIErrorResponse(apiError, reqURL.Path)
errorResponse := getAPIErrorResponse(apiError, reqURL.Path, w.Header().Get(responseRequestIDKey))
encodedErrorResponse := encodeResponseJSON(errorResponse)
writeResponse(w, apiError.HTTPStatusCode, encodedErrorResponse, mimeJSON)
}
@@ -608,7 +617,7 @@ func writeCustomErrorResponseJSON(w http.ResponseWriter, errorCode APIErrorCode,
Code: apiError.Code,
Message: errBody,
Resource: reqURL.Path,
RequestID: "3L137",
RequestID: w.Header().Get(responseRequestIDKey),
HostID: "3L137",
}
encodedErrorResponse := encodeResponseJSON(errorResponse)

View File

@@ -20,29 +20,25 @@ import (
"net/http"
"github.com/gorilla/mux"
"github.com/minio/minio/cmd/logger"
)
// objectAPIHandler implements and provides http handlers for S3 API.
type objectAPIHandlers struct {
ObjectAPI func() ObjectLayer
CacheAPI func() CacheObjectLayer
// Returns true of handlers should interpret encryption.
EncryptionEnabled func() bool
}
// registerAPIRouter - registers S3 compatible APIs.
func registerAPIRouter(router *mux.Router) {
var err error
var cacheConfig = globalServerConfig.GetCacheConfig()
if len(cacheConfig.Drives) > 0 {
// initialize the new disk cache objects.
globalCacheObjectAPI, err = newServerCacheObjects(cacheConfig)
logger.FatalIf(err, "Unable to initialize disk caching")
}
func registerAPIRouter(router *mux.Router, encryptionEnabled bool) {
// Initialize API.
api := objectAPIHandlers{
ObjectAPI: newObjectLayerFn,
CacheAPI: newCacheObjectsFn,
EncryptionEnabled: func() bool {
return encryptionEnabled
},
}
// API Router
@@ -71,6 +67,8 @@ func registerAPIRouter(router *mux.Router) {
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(httpTraceAll(api.AbortMultipartUploadHandler)).Queries("uploadId", "{uploadId:.*}")
// GetObjectACL - this is a dummy call.
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.GetObjectACLHandler)).Queries("acl", "")
// SelectObjectContent
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.SelectObjectContentHandler)).Queries("select", "").Queries("select-type", "2")
// GetObject
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.GetObjectHandler))
// CopyObject
@@ -108,7 +106,7 @@ func registerAPIRouter(router *mux.Router) {
// HeadBucket
bucket.Methods("HEAD").HandlerFunc(httpTraceAll(api.HeadBucketHandler))
// PostPolicy
bucket.Methods("POST").HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(httpTraceAll(api.PostPolicyBucketHandler))
bucket.Methods("POST").HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(httpTraceHdrs(api.PostPolicyBucketHandler))
// DeleteMultipleObjects
bucket.Methods("POST").HandlerFunc(httpTraceAll(api.DeleteMultipleObjectsHandler)).Queries("delete", "")
// DeleteBucketPolicy

View File

@@ -19,6 +19,7 @@ package cmd
import (
"bytes"
"context"
"crypto/subtle"
"encoding/base64"
"encoding/hex"
"errors"
@@ -27,8 +28,11 @@ import (
"net/http"
"strings"
jwtgo "github.com/dgrijalva/jwt-go"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/iam/policy"
"github.com/minio/minio/pkg/policy"
)
@@ -86,6 +90,7 @@ const (
authTypeSigned
authTypeSignedV2
authTypeJWT
authTypeSTS
)
// Get request authentication type.
@@ -104,6 +109,8 @@ func getRequestAuthType(r *http.Request) authType {
return authTypeJWT
} else if isRequestPostPolicySignatureV4(r) {
return authTypePostPolicy
} else if _, ok := r.URL.Query()["Action"]; ok {
return authTypeSTS
} else if _, ok := r.Header["Authorization"]; !ok {
return authTypeAnonymous
}
@@ -112,43 +119,140 @@ func getRequestAuthType(r *http.Request) authType {
// checkAdminRequestAuthType checks whether the request is a valid signature V2 or V4 request.
// It does not accept presigned or JWT or anonymous requests.
func checkAdminRequestAuthType(r *http.Request, region string) APIErrorCode {
func checkAdminRequestAuthType(ctx context.Context, r *http.Request, region string) APIErrorCode {
s3Err := ErrAccessDenied
if _, ok := r.Header["X-Amz-Content-Sha256"]; ok && getRequestAuthType(r) == authTypeSigned && !skipContentSha256Cksum(r) { // we only support V4 (no presign) with auth. body
s3Err = isReqAuthenticated(r, region)
if _, ok := r.Header["X-Amz-Content-Sha256"]; ok &&
getRequestAuthType(r) == authTypeSigned && !skipContentSha256Cksum(r) {
// We only support admin credentials to access admin APIs.
var owner bool
_, owner, s3Err = getReqAccessKeyV4(r, region)
if s3Err != ErrNone {
return s3Err
}
if !owner {
return ErrAccessDenied
}
// we only support V4 (no presign) with auth body
s3Err = isReqAuthenticated(ctx, r, region)
}
if s3Err != ErrNone {
reqInfo := (&logger.ReqInfo{}).AppendTags("requestHeaders", dumpRequest(r))
ctx := logger.SetReqInfo(context.Background(), reqInfo)
ctx := logger.SetReqInfo(ctx, reqInfo)
logger.LogIf(ctx, errors.New(getAPIError(s3Err).Description))
}
return s3Err
}
func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) APIErrorCode {
isOwner := true
accountName := globalServerConfig.GetCredential().AccessKey
// Fetch the security token set by the client.
func getSessionToken(r *http.Request) (token string) {
token = r.Header.Get("X-Amz-Security-Token")
if token != "" {
return token
}
return r.URL.Query().Get("X-Amz-Security-Token")
}
// Fetch claims in the security token returned by the client, doesn't return
// errors - upon errors the returned claims map will be empty.
func mustGetClaimsFromToken(r *http.Request) map[string]interface{} {
claims, _ := getClaimsFromToken(r)
return claims
}
// Fetch claims in the security token returned by the client.
func getClaimsFromToken(r *http.Request) (map[string]interface{}, error) {
claims := make(map[string]interface{})
token := getSessionToken(r)
if token == "" {
return claims, nil
}
stsTokenCallback := func(jwtToken *jwtgo.Token) (interface{}, error) {
// JWT token for x-amz-security-token is signed with admin
// secret key, temporary credentials become invalid if
// server admin credentials change. This is done to ensure
// that clients cannot decode the token using the temp
// secret keys and generate an entirely new claim by essentially
// hijacking the policies. We need to make sure that this is
// based an admin credential such that token cannot be decoded
// on the client side and is treated like an opaque value.
return []byte(globalServerConfig.GetCredential().SecretKey), nil
}
p := &jwtgo.Parser{
ValidMethods: []string{
jwtgo.SigningMethodHS256.Alg(),
jwtgo.SigningMethodHS512.Alg(),
},
}
jtoken, err := p.ParseWithClaims(token, jwtgo.MapClaims(claims), stsTokenCallback)
if err != nil {
return nil, err
}
if !jtoken.Valid {
return nil, errAuthentication
}
v, ok := claims["accessKey"]
if !ok {
return nil, errInvalidAccessKeyID
}
if _, ok = v.(string); !ok {
return nil, errInvalidAccessKeyID
}
return claims, nil
}
// Fetch claims in the security token returned by the client and validate the token.
func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]interface{}, APIErrorCode) {
token := getSessionToken(r)
if token != "" && cred.AccessKey == "" {
return nil, ErrNoAccessKey
}
if subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 {
return nil, ErrInvalidToken
}
claims, err := getClaimsFromToken(r)
if err != nil {
return nil, toAPIErrorCode(context.Background(), err)
}
return claims, ErrNone
}
// Check request auth type verifies the incoming http request
// - validates the request signature
// - validates the policy action if anonymous tests bucket policies if any,
// for authenticated requests validates IAM policies.
// returns APIErrorCode if any to be replied to the client.
func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (s3Err APIErrorCode) {
var cred auth.Credentials
var owner bool
switch getRequestAuthType(r) {
case authTypeUnknown:
case authTypeUnknown, authTypeStreamingSigned:
return ErrAccessDenied
case authTypePresignedV2, authTypeSignedV2:
if errorCode := isReqAuthenticatedV2(r); errorCode != ErrNone {
return errorCode
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
return s3Err
}
cred, owner, s3Err = getReqAccessKeyV2(r)
case authTypeSigned, authTypePresigned:
region := globalServerConfig.GetRegion()
switch action {
case policy.GetBucketLocationAction, policy.ListAllMyBucketsAction:
region = ""
}
if errorCode := isReqAuthenticated(r, region); errorCode != ErrNone {
return errorCode
if s3Err = isReqAuthenticated(ctx, r, region); s3Err != ErrNone {
return s3Err
}
default:
isOwner = false
accountName = ""
cred, owner, s3Err = getReqAccessKeyV4(r, region)
}
if s3Err != ErrNone {
return s3Err
}
claims, s3Err := checkClaimsFromToken(r, cred)
if s3Err != ErrNone {
return s3Err
}
// LocationConstraint is valid only for CreateBucketAction.
@@ -174,17 +278,31 @@ func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Ac
r.Body = ioutil.NopCloser(bytes.NewReader(payload))
}
if globalPolicySys.IsAllowed(policy.Args{
AccountName: accountName,
Action: action,
if cred.AccessKey == "" {
if globalPolicySys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Action: action,
BucketName: bucketName,
ConditionValues: getConditionValues(r, locationConstraint, ""),
IsOwner: false,
ObjectName: objectName,
}) {
return ErrNone
}
return ErrAccessDenied
}
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: iampolicy.Action(action),
BucketName: bucketName,
ConditionValues: getConditionValues(r, locationConstraint),
IsOwner: isOwner,
ConditionValues: getConditionValues(r, "", cred.AccessKey),
ObjectName: objectName,
IsOwner: owner,
Claims: claims,
}) {
return ErrNone
}
return ErrAccessDenied
}
@@ -209,7 +327,7 @@ func reqSignatureV4Verify(r *http.Request, region string) (s3Error APIErrorCode)
}
// Verify if request has valid AWS Signature Version '4'.
func isReqAuthenticated(r *http.Request, region string) (s3Error APIErrorCode) {
func isReqAuthenticated(ctx context.Context, r *http.Request, region string) (s3Error APIErrorCode) {
if errCode := reqSignatureV4Verify(r, region); errCode != ErrNone {
return errCode
}
@@ -244,9 +362,9 @@ func isReqAuthenticated(r *http.Request, region string) (s3Error APIErrorCode) {
// Verify 'Content-Md5' and/or 'X-Amz-Content-Sha256' if present.
// The verification happens implicit during reading.
reader, err := hash.NewReader(r.Body, -1, hex.EncodeToString(contentMD5), hex.EncodeToString(contentSHA256))
reader, err := hash.NewReader(r.Body, -1, hex.EncodeToString(contentMD5), hex.EncodeToString(contentSHA256), -1)
if err != nil {
return toAPIErrorCode(err)
return toAPIErrorCode(ctx, err)
}
r.Body = ioutil.NopCloser(reader)
return ErrNone
@@ -288,12 +406,67 @@ func (a authHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
} else if aType == authTypeJWT {
// Validate Authorization header if its valid for JWT request.
if !isHTTPRequestValid(r) {
if _, _, authErr := webRequestAuthenticate(r); authErr != nil {
w.WriteHeader(http.StatusUnauthorized)
return
}
a.handler.ServeHTTP(w, r)
return
} else if aType == authTypeSTS {
a.handler.ServeHTTP(w, r)
return
}
writeErrorResponse(w, ErrSignatureVersionNotSupported, r.URL)
writeErrorResponse(w, ErrSignatureVersionNotSupported, r.URL, guessIsBrowserReq(r))
}
// isPutAllowed - check if PUT operation is allowed on the resource, this
// call verifies bucket policies and IAM policies, supports multi user
// checks etc.
func isPutAllowed(atype authType, bucketName, objectName string, r *http.Request) (s3Err APIErrorCode) {
var cred auth.Credentials
var owner bool
switch atype {
case authTypeUnknown:
return ErrAccessDenied
case authTypeSignedV2, authTypePresignedV2:
cred, owner, s3Err = getReqAccessKeyV2(r)
case authTypeStreamingSigned, authTypePresigned, authTypeSigned:
region := globalServerConfig.GetRegion()
cred, owner, s3Err = getReqAccessKeyV4(r, region)
}
if s3Err != ErrNone {
return s3Err
}
claims, s3Err := checkClaimsFromToken(r, cred)
if s3Err != ErrNone {
return s3Err
}
if cred.AccessKey == "" {
if globalPolicySys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Action: policy.PutObjectAction,
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", ""),
IsOwner: false,
ObjectName: objectName,
}) {
return ErrNone
}
return ErrAccessDenied
}
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: policy.PutObjectAction,
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", cred.AccessKey),
ObjectName: objectName,
IsOwner: owner,
Claims: claims,
}) {
return ErrNone
}
return ErrAccessDenied
}

View File

@@ -18,6 +18,7 @@ package cmd
import (
"bytes"
"context"
"io"
"io/ioutil"
"net/http"
@@ -344,11 +345,14 @@ func mustNewSignedBadMD5Request(method string, urlStr string, contentLength int6
// Tests is requested authenticated function, tests replies for s3 errors.
func TestIsReqAuthenticated(t *testing.T) {
path, err := newTestConfig(globalMinioDefaultRegion)
objLayer, fsDir, err := prepareFS()
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(fsDir)
if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
t.Fatalf("unable initialize config file, %s", err)
}
defer os.RemoveAll(path)
creds, err := auth.CreateCredentials("myuser", "mypassword")
if err != nil {
@@ -374,21 +378,26 @@ func TestIsReqAuthenticated(t *testing.T) {
{mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrNone},
}
ctx := context.Background()
// Validates all testcases.
for i, testCase := range testCases {
if s3Error := isReqAuthenticated(testCase.req, globalServerConfig.GetRegion()); s3Error != testCase.s3Error {
if _, err := ioutil.ReadAll(testCase.req.Body); toAPIErrorCode(err) != testCase.s3Error {
t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d (got after reading request %d)", i, testCase.s3Error, s3Error, toAPIErrorCode(err))
if s3Error := isReqAuthenticated(ctx, testCase.req, globalServerConfig.GetRegion()); s3Error != testCase.s3Error {
if _, err := ioutil.ReadAll(testCase.req.Body); toAPIErrorCode(ctx, err) != testCase.s3Error {
t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d (got after reading request %d)", i, testCase.s3Error, s3Error, toAPIErrorCode(ctx, err))
}
}
}
}
func TestCheckAdminRequestAuthType(t *testing.T) {
path, err := newTestConfig(globalMinioDefaultRegion)
objLayer, fsDir, err := prepareFS()
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(fsDir)
if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
t.Fatalf("unable initialize config file, %s", err)
}
defer os.RemoveAll(path)
creds, err := auth.CreateCredentials("myuser", "mypassword")
if err != nil {
@@ -406,8 +415,9 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
{Request: mustNewPresignedV2Request("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
{Request: mustNewPresignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
}
ctx := context.Background()
for i, testCase := range testCases {
if s3Error := checkAdminRequestAuthType(testCase.Request, globalServerConfig.GetRegion()); s3Error != testCase.ErrCode {
if s3Error := checkAdminRequestAuthType(ctx, testCase.Request, globalServerConfig.GetRegion()); s3Error != testCase.ErrCode {
t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i, testCase.ErrCode, s3Error)
}
}

View File

@@ -22,7 +22,6 @@ import (
"io/ioutil"
"math"
"math/rand"
"os"
"strconv"
"testing"
@@ -62,7 +61,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
for i := 0; i < b.N; i++ {
// insert the object.
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata)
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata, ObjectOptions{})
if err != nil {
b.Fatal(err)
}
@@ -98,7 +97,8 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
// generate md5sum for the generated data.
// md5sum of the data to written is required as input for NewMultipartUpload.
metadata := make(map[string]string)
uploadID, err = obj.NewMultipartUpload(context.Background(), bucket, object, metadata)
opts := ObjectOptions{}
uploadID, err = obj.NewMultipartUpload(context.Background(), bucket, object, metadata, opts)
if err != nil {
b.Fatal(err)
}
@@ -123,7 +123,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
md5hex = getMD5Hash([]byte(textPartData))
var partInfo PartInfo
partInfo, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, j,
mustGetHashReader(b, bytes.NewBuffer(textPartData), int64(len(textPartData)), md5hex, sha256hex))
mustGetPutObjReader(b, bytes.NewBuffer(textPartData), int64(len(textPartData)), md5hex, sha256hex), opts)
if err != nil {
b.Fatal(err)
}
@@ -138,12 +138,6 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
// creates XL/FS backend setup, obtains the object layer and calls the runPutObjectPartBenchmark function.
func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
rootPath, err := newTestConfig(globalMinioDefaultRegion)
if err != nil {
b.Fatalf("Unable to initialize config. %s", err)
}
defer os.RemoveAll(rootPath)
// create a temp XL/FS backend.
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
if err != nil {
@@ -151,18 +145,13 @@ func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
}
// cleaning up the backend by removing all the directories and files created on function return.
defer removeRoots(disks)
// uses *testing.B and the object Layer to run the benchmark.
runPutObjectPartBenchmark(b, objLayer, objSize)
}
// creates XL/FS backend setup, obtains the object layer and calls the runPutObjectBenchmark function.
func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
rootPath, err := newTestConfig(globalMinioDefaultRegion)
if err != nil {
b.Fatalf("Unable to initialize config. %s", err)
}
defer os.RemoveAll(rootPath)
// create a temp XL/FS backend.
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
if err != nil {
@@ -170,18 +159,13 @@ func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
}
// cleaning up the backend by removing all the directories and files created on function return.
defer removeRoots(disks)
// uses *testing.B and the object Layer to run the benchmark.
runPutObjectBenchmark(b, objLayer, objSize)
}
// creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for put object.
func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int) {
rootPath, err := newTestConfig(globalMinioDefaultRegion)
if err != nil {
b.Fatalf("Unable to initialize config. %s", err)
}
defer os.RemoveAll(rootPath)
// create a temp XL/FS backend.
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
if err != nil {
@@ -189,6 +173,7 @@ func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int)
}
// cleaning up the backend by removing all the directories and files created on function return.
defer removeRoots(disks)
// uses *testing.B and the object Layer to run the benchmark.
runPutObjectBenchmarkParallel(b, objLayer, objSize)
}
@@ -196,16 +181,10 @@ func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int)
// Benchmark utility functions for ObjectLayer.GetObject().
// Creates Object layer setup ( MakeBucket, PutObject) and then runs the benchmark.
func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
rootPath, err := newTestConfig(globalMinioDefaultRegion)
if err != nil {
b.Fatalf("Unable to initialize config. %s", err)
}
defer os.RemoveAll(rootPath)
// obtains random bucket name.
bucket := getRandomBucketName()
// create bucket.
err = obj.MakeBucketWithLocation(context.Background(), bucket, "")
err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
if err != nil {
b.Fatal(err)
}
@@ -225,7 +204,7 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
// insert the object.
var objInfo ObjectInfo
objInfo, err = obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata)
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata, ObjectOptions{})
if err != nil {
b.Fatal(err)
}
@@ -240,7 +219,7 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
var buffer = new(bytes.Buffer)
err = obj.GetObject(context.Background(), bucket, "object"+strconv.Itoa(i%10), 0, int64(objSize), buffer, "")
err = obj.GetObject(context.Background(), bucket, "object"+strconv.Itoa(i%10), 0, int64(objSize), buffer, "", ObjectOptions{})
if err != nil {
b.Error(err)
}
@@ -269,12 +248,6 @@ func generateBytesData(size int) []byte {
// creates XL/FS backend setup, obtains the object layer and calls the runGetObjectBenchmark function.
func benchmarkGetObject(b *testing.B, instanceType string, objSize int) {
rootPath, err := newTestConfig(globalMinioDefaultRegion)
if err != nil {
b.Fatalf("Unable to initialize config. %s", err)
}
defer os.RemoveAll(rootPath)
// create a temp XL/FS backend.
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
if err != nil {
@@ -282,18 +255,13 @@ func benchmarkGetObject(b *testing.B, instanceType string, objSize int) {
}
// cleaning up the backend by removing all the directories and files created.
defer removeRoots(disks)
// uses *testing.B and the object Layer to run the benchmark.
runGetObjectBenchmark(b, objLayer, objSize)
}
// creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for ObjectLayer.GetObject() .
func benchmarkGetObjectParallel(b *testing.B, instanceType string, objSize int) {
rootPath, err := newTestConfig(globalMinioDefaultRegion)
if err != nil {
b.Fatalf("Unable to initialize config. %s", err)
}
defer os.RemoveAll(rootPath)
// create a temp XL/FS backend.
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
if err != nil {
@@ -301,6 +269,7 @@ func benchmarkGetObjectParallel(b *testing.B, instanceType string, objSize int)
}
// cleaning up the backend by removing all the directories and files created.
defer removeRoots(disks)
// uses *testing.B and the object Layer to run the benchmark.
runGetObjectBenchmarkParallel(b, objLayer, objSize)
}
@@ -308,16 +277,10 @@ func benchmarkGetObjectParallel(b *testing.B, instanceType string, objSize int)
// Parallel benchmark utility functions for ObjectLayer.PutObject().
// Creates Object layer setup ( MakeBucket ) and then runs the PutObject benchmark.
func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
rootPath, err := newTestConfig(globalMinioDefaultRegion)
if err != nil {
b.Fatalf("Unable to initialize config. %s", err)
}
defer os.RemoveAll(rootPath)
// obtains random bucket name.
bucket := getRandomBucketName()
// create bucket.
err = obj.MakeBucketWithLocation(context.Background(), bucket, "")
err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
if err != nil {
b.Fatal(err)
}
@@ -341,7 +304,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
for pb.Next() {
// insert the object.
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata)
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata, ObjectOptions{})
if err != nil {
b.Fatal(err)
}
@@ -359,16 +322,10 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
// Parallel benchmark utility functions for ObjectLayer.GetObject().
// Creates Object layer setup ( MakeBucket, PutObject) and then runs the benchmark.
func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
rootPath, err := newTestConfig(globalMinioDefaultRegion)
if err != nil {
b.Fatalf("Unable to initialize config. %s", err)
}
defer os.RemoveAll(rootPath)
// obtains random bucket name.
bucket := getRandomBucketName()
// create bucket.
err = obj.MakeBucketWithLocation(context.Background(), bucket, "")
err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
if err != nil {
b.Fatal(err)
}
@@ -387,7 +344,7 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
// insert the object.
var objInfo ObjectInfo
objInfo, err = obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata)
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata, ObjectOptions{})
if err != nil {
b.Fatal(err)
}
@@ -403,7 +360,7 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
b.RunParallel(func(pb *testing.PB) {
i := 0
for pb.Next() {
err = obj.GetObject(context.Background(), bucket, "object"+strconv.Itoa(i), 0, int64(objSize), ioutil.Discard, "")
err = obj.GetObject(context.Background(), bucket, "object"+strconv.Itoa(i), 0, int64(objSize), ioutil.Discard, "", ObjectOptions{})
if err != nil {
b.Error(err)
}

172
cmd/bitrot-streaming.go Normal file
View File

@@ -0,0 +1,172 @@
/*
* Minio Cloud Storage, (C) 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bytes"
"context"
"encoding/hex"
"hash"
"io"
"github.com/minio/minio/cmd/logger"
)
// Calculates bitrot in chunks and writes the hash into the stream.
type streamingBitrotWriter struct {
iow *io.PipeWriter
h hash.Hash
shardSize int64
canClose chan struct{} // Needed to avoid race explained in Close() call.
// Following two fields are used only to make sure that Write(p) is called such that
// len(p) is always the block size except the last block, i.e prevent programmer errors.
currentBlockIdx int
verifyTillIdx int
}
func (b *streamingBitrotWriter) Write(p []byte) (int, error) {
if b.currentBlockIdx < b.verifyTillIdx && int64(len(p)) != b.shardSize {
// All blocks except last should be of the length b.shardSize
logger.LogIf(context.Background(), errUnexpected)
return 0, errUnexpected
}
if len(p) == 0 {
return 0, nil
}
b.h.Reset()
b.h.Write(p)
hashBytes := b.h.Sum(nil)
n, err := b.iow.Write(hashBytes)
if n != len(hashBytes) {
logger.LogIf(context.Background(), err)
return 0, err
}
n, err = b.iow.Write(p)
b.currentBlockIdx++
return n, err
}
func (b *streamingBitrotWriter) Close() error {
err := b.iow.Close()
// Wait for all data to be written before returning else it causes race conditions.
// Race condition is because of io.PipeWriter implementation. i.e consider the following
// sequent of operations:
// 1) pipe.Write()
// 2) pipe.Close()
// Now pipe.Close() can return before the data is read on the other end of the pipe and written to the disk
// Hence an immediate Read() on the file can return incorrect data.
<-b.canClose
return err
}
// Returns streaming bitrot writer implementation.
func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.WriteCloser {
r, w := io.Pipe()
h := algo.New()
bw := &streamingBitrotWriter{w, h, shardSize, make(chan struct{}), 0, int(length / shardSize)}
go func() {
bitrotSumsTotalSize := ceilFrac(length, shardSize) * int64(h.Size()) // Size used for storing bitrot checksums.
totalFileSize := bitrotSumsTotalSize + length
err := disk.CreateFile(volume, filePath, totalFileSize, r)
if err != nil {
logger.LogIf(context.Background(), err)
r.CloseWithError(err)
}
close(bw.canClose)
}()
return bw
}
// ReadAt() implementation which verifies the bitrot hash available as part of the stream.
type streamingBitrotReader struct {
disk StorageAPI
rc io.ReadCloser
volume string
filePath string
tillOffset int64
currOffset int64
h hash.Hash
shardSize int64
hashBytes []byte
}
func (b *streamingBitrotReader) Close() error {
if b.rc == nil {
return nil
}
return b.rc.Close()
}
func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
var err error
if offset%b.shardSize != 0 {
// Offset should always be aligned to b.shardSize
logger.LogIf(context.Background(), errUnexpected)
return 0, errUnexpected
}
if b.rc == nil {
// For the first ReadAt() call we need to open the stream for reading.
b.currOffset = offset
streamOffset := (offset/b.shardSize)*int64(b.h.Size()) + offset
b.rc, err = b.disk.ReadFileStream(b.volume, b.filePath, streamOffset, b.tillOffset-streamOffset)
if err != nil {
logger.LogIf(context.Background(), err)
return 0, err
}
}
if offset != b.currOffset {
logger.LogIf(context.Background(), errUnexpected)
return 0, errUnexpected
}
b.h.Reset()
_, err = io.ReadFull(b.rc, b.hashBytes)
if err != nil {
logger.LogIf(context.Background(), err)
return 0, err
}
_, err = io.ReadFull(b.rc, buf)
if err != nil {
logger.LogIf(context.Background(), err)
return 0, err
}
b.h.Write(buf)
if bytes.Compare(b.h.Sum(nil), b.hashBytes) != 0 {
err = hashMismatchError{hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil))}
logger.LogIf(context.Background(), err)
return 0, err
}
b.currOffset += int64(len(buf))
return len(buf), nil
}
// Returns streaming bitrot reader implementation.
func newStreamingBitrotReader(disk StorageAPI, volume, filePath string, tillOffset int64, algo BitrotAlgorithm, shardSize int64) *streamingBitrotReader {
h := algo.New()
return &streamingBitrotReader{
disk,
nil,
volume,
filePath,
ceilFrac(tillOffset, shardSize)*int64(h.Size()) + tillOffset,
0,
h,
shardSize,
make([]byte, h.Size()),
}
}

109
cmd/bitrot-whole.go Normal file
View File

@@ -0,0 +1,109 @@
/*
* Minio Cloud Storage, (C) 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"hash"
"io"
"github.com/minio/minio/cmd/logger"
)
// Implementation to calculate bitrot for the whole file.
type wholeBitrotWriter struct {
disk StorageAPI
volume string
filePath string
shardSize int64 // This is the shard size of the erasure logic
hash.Hash // For bitrot hash
// Following two fields are used only to make sure that Write(p) is called such that
// len(p) is always the block size except the last block and prevent programmer errors.
currentBlockIdx int
lastBlockIdx int
}
func (b *wholeBitrotWriter) Write(p []byte) (int, error) {
if b.currentBlockIdx < b.lastBlockIdx && int64(len(p)) != b.shardSize {
// All blocks except last should be of the length b.shardSize
logger.LogIf(context.Background(), errUnexpected)
return 0, errUnexpected
}
err := b.disk.AppendFile(b.volume, b.filePath, p)
if err != nil {
logger.LogIf(context.Background(), err)
return 0, err
}
_, err = b.Hash.Write(p)
if err != nil {
logger.LogIf(context.Background(), err)
return 0, err
}
b.currentBlockIdx++
return len(p), nil
}
func (b *wholeBitrotWriter) Close() error {
return nil
}
// Returns whole-file bitrot writer.
func newWholeBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.WriteCloser {
return &wholeBitrotWriter{disk, volume, filePath, shardSize, algo.New(), 0, int(length / shardSize)}
}
// Implementation to verify bitrot for the whole file.
type wholeBitrotReader struct {
disk StorageAPI
volume string
filePath string
verifier *BitrotVerifier // Holds the bit-rot info
tillOffset int64 // Affects the length of data requested in disk.ReadFile depending on Read()'s offset
buf []byte // Holds bit-rot verified data
}
func (b *wholeBitrotReader) ReadAt(buf []byte, offset int64) (n int, err error) {
if b.buf == nil {
b.buf = make([]byte, b.tillOffset-offset)
if _, err := b.disk.ReadFile(b.volume, b.filePath, offset, b.buf, b.verifier); err != nil {
ctx := context.Background()
logger.GetReqInfo(ctx).AppendTags("disk", b.disk.String())
logger.LogIf(ctx, err)
return 0, err
}
}
if len(b.buf) < len(buf) {
logger.LogIf(context.Background(), errLessData)
return 0, errLessData
}
n = copy(buf, b.buf)
b.buf = b.buf[n:]
return n, nil
}
// Returns whole-file bitrot reader.
func newWholeBitrotReader(disk StorageAPI, volume, filePath string, algo BitrotAlgorithm, tillOffset int64, sum []byte) *wholeBitrotReader {
return &wholeBitrotReader{
disk: disk,
volume: volume,
filePath: filePath,
verifier: &BitrotVerifier{algo, sum},
tillOffset: tillOffset,
buf: nil,
}
}

187
cmd/bitrot.go Normal file
View File

@@ -0,0 +1,187 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"errors"
"hash"
"io"
"github.com/minio/highwayhash"
"github.com/minio/minio/cmd/logger"
sha256 "github.com/minio/sha256-simd"
"golang.org/x/crypto/blake2b"
)
// magic HH-256 key as HH-256 hash of the first 100 decimals of π as utf-8 string with a zero key.
var magicHighwayHash256Key = []byte("\x4b\xe7\x34\xfa\x8e\x23\x8a\xcd\x26\x3e\x83\xe6\xbb\x96\x85\x52\x04\x0f\x93\x5d\xa3\x9f\x44\x14\x97\xe0\x9d\x13\x22\xde\x36\xa0")
// BitrotAlgorithm specifies a algorithm used for bitrot protection.
type BitrotAlgorithm uint
const (
// SHA256 represents the SHA-256 hash function
SHA256 BitrotAlgorithm = 1 + iota
// HighwayHash256 represents the HighwayHash-256 hash function
HighwayHash256
// HighwayHash256S represents the Streaming HighwayHash-256 hash function
HighwayHash256S
// BLAKE2b512 represents the BLAKE2b-512 hash function
BLAKE2b512
)
// DefaultBitrotAlgorithm is the default algorithm used for bitrot protection.
const (
DefaultBitrotAlgorithm = HighwayHash256S
)
var bitrotAlgorithms = map[BitrotAlgorithm]string{
SHA256: "sha256",
BLAKE2b512: "blake2b",
HighwayHash256: "highwayhash256",
HighwayHash256S: "highwayhash256S",
}
// New returns a new hash.Hash calculating the given bitrot algorithm.
func (a BitrotAlgorithm) New() hash.Hash {
switch a {
case SHA256:
return sha256.New()
case BLAKE2b512:
b2, _ := blake2b.New512(nil) // New512 never returns an error if the key is nil
return b2
case HighwayHash256:
hh, _ := highwayhash.New(magicHighwayHash256Key) // New will never return error since key is 256 bit
return hh
case HighwayHash256S:
hh, _ := highwayhash.New(magicHighwayHash256Key) // New will never return error since key is 256 bit
return hh
default:
logger.CriticalIf(context.Background(), errors.New("Unsupported bitrot algorithm"))
return nil
}
}
// Available reports whether the given algorithm is available.
func (a BitrotAlgorithm) Available() bool {
_, ok := bitrotAlgorithms[a]
return ok
}
// String returns the string identifier for a given bitrot algorithm.
// If the algorithm is not supported String panics.
func (a BitrotAlgorithm) String() string {
name, ok := bitrotAlgorithms[a]
if !ok {
logger.CriticalIf(context.Background(), errors.New("Unsupported bitrot algorithm"))
}
return name
}
// NewBitrotVerifier returns a new BitrotVerifier implementing the given algorithm.
func NewBitrotVerifier(algorithm BitrotAlgorithm, checksum []byte) *BitrotVerifier {
return &BitrotVerifier{algorithm, checksum}
}
// BitrotVerifier can be used to verify protected data.
type BitrotVerifier struct {
algorithm BitrotAlgorithm
sum []byte
}
// BitrotAlgorithmFromString returns a bitrot algorithm from the given string representation.
// It returns 0 if the string representation does not match any supported algorithm.
// The zero value of a bitrot algorithm is never supported.
func BitrotAlgorithmFromString(s string) (a BitrotAlgorithm) {
for alg, name := range bitrotAlgorithms {
if name == s {
return alg
}
}
return
}
func newBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.Writer {
if algo == HighwayHash256S {
return newStreamingBitrotWriter(disk, volume, filePath, length, algo, shardSize)
}
return newWholeBitrotWriter(disk, volume, filePath, length, algo, shardSize)
}
func newBitrotReader(disk StorageAPI, bucket string, filePath string, tillOffset int64, algo BitrotAlgorithm, sum []byte, shardSize int64) io.ReaderAt {
if algo == HighwayHash256S {
return newStreamingBitrotReader(disk, bucket, filePath, tillOffset, algo, shardSize)
}
return newWholeBitrotReader(disk, bucket, filePath, algo, tillOffset, sum)
}
// Close all the readers.
func closeBitrotReaders(rs []io.ReaderAt) {
for _, r := range rs {
if br, ok := r.(*streamingBitrotReader); ok {
br.Close()
}
}
}
// Close all the writers.
func closeBitrotWriters(ws []io.Writer) {
for _, w := range ws {
if bw, ok := w.(*streamingBitrotWriter); ok {
bw.Close()
}
}
}
// Returns hash sum for whole-bitrot, nil for streaming-bitrot.
func bitrotWriterSum(w io.Writer) []byte {
if bw, ok := w.(*wholeBitrotWriter); ok {
return bw.Sum(nil)
}
return nil
}
// Verify if a file has bitrot error.
func bitrotCheckFile(disk StorageAPI, volume string, filePath string, tillOffset int64, algo BitrotAlgorithm, sum []byte, shardSize int64) (err error) {
if algo != HighwayHash256S {
buf := []byte{}
// For whole-file bitrot we don't need to read the entire file as the bitrot verify happens on the server side even if we read 0-bytes.
_, err = disk.ReadFile(volume, filePath, 0, buf, NewBitrotVerifier(algo, sum))
return err
}
buf := make([]byte, shardSize)
r := newStreamingBitrotReader(disk, volume, filePath, tillOffset, algo, shardSize)
defer closeBitrotReaders([]io.ReaderAt{r})
var offset int64
for {
if offset == tillOffset {
break
}
var n int
tmpBuf := buf
if int64(len(tmpBuf)) > (tillOffset - offset) {
tmpBuf = tmpBuf[:(tillOffset - offset)]
}
n, err = r.ReadAt(tmpBuf, offset)
if err != nil {
return err
}
offset += int64(n)
}
return nil
}

84
cmd/bitrot_test.go Normal file
View File

@@ -0,0 +1,84 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"io"
"io/ioutil"
"log"
"os"
"testing"
)
func testBitrotReaderWriterAlgo(t *testing.T, bitrotAlgo BitrotAlgorithm) {
tmpDir, err := ioutil.TempDir("", "")
if err != nil {
log.Fatal(err)
}
defer os.RemoveAll(tmpDir)
volume := "testvol"
filePath := "testfile"
disk, err := newPosix(tmpDir)
if err != nil {
t.Fatal(err)
}
disk.MakeVol(volume)
writer := newBitrotWriter(disk, volume, filePath, 35, bitrotAlgo, 10)
_, err = writer.Write([]byte("aaaaaaaaaa"))
if err != nil {
log.Fatal(err)
}
_, err = writer.Write([]byte("aaaaaaaaaa"))
if err != nil {
log.Fatal(err)
}
_, err = writer.Write([]byte("aaaaaaaaaa"))
if err != nil {
log.Fatal(err)
}
_, err = writer.Write([]byte("aaaaa"))
if err != nil {
log.Fatal(err)
}
writer.(io.Closer).Close()
reader := newBitrotReader(disk, volume, filePath, 35, bitrotAlgo, bitrotWriterSum(writer), 10)
b := make([]byte, 10)
if _, err = reader.ReadAt(b, 0); err != nil {
log.Fatal(err)
}
if _, err = reader.ReadAt(b, 10); err != nil {
log.Fatal(err)
}
if _, err = reader.ReadAt(b, 20); err != nil {
log.Fatal(err)
}
if _, err = reader.ReadAt(b[:5], 30); err != nil {
log.Fatal(err)
}
}
func TestAllBitrotAlgorithms(t *testing.T) {
for bitrotAlgo := range bitrotAlgorithms {
testBitrotReaderWriterAlgo(t, bitrotAlgo)
}
}

View File

@@ -56,11 +56,12 @@ func (bf *BoolFlag) UnmarshalJSON(data []byte) (err error) {
// ParseBoolFlag - parses string into BoolFlag.
func ParseBoolFlag(s string) (bf BoolFlag, err error) {
if s == "on" {
switch s {
case "on":
bf = true
} else if s == "off" {
case "off":
bf = false
} else {
default:
err = fmt.Errorf("invalid value %s for BoolFlag", s)
}

View File

@@ -20,6 +20,9 @@ import (
"net/http"
"github.com/gorilla/mux"
"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/policy"
)
@@ -54,19 +57,21 @@ func validateListObjectsArgs(prefix, marker, delimiter string, maxKeys int) APIE
// NOTE: It is recommended that this API to be used for application development.
// Minio continues to support ListObjectsV1 for supporting legacy tools.
func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, "ListObjectsV2")
ctx := newContext(r, w, "ListObjectsV2")
defer logger.AuditLog(w, r, "ListObjectsV2", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
writeErrorResponse(w, ErrServerNotInitialized, r.URL, guessIsBrowserReq(r))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
return
}
@@ -76,22 +81,14 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
prefix, token, startAfter, delimiter, fetchOwner, maxKeys, _, errCode := getListObjectsV2Args(urlValues)
if errCode != ErrNone {
writeErrorResponse(w, errCode, r.URL)
writeErrorResponse(w, errCode, r.URL, guessIsBrowserReq(r))
return
}
// In ListObjectsV2 'continuation-token' is the marker.
marker := token
// Check if 'continuation-token' is empty.
if token == "" {
// Then we need to use 'start-after' as marker instead.
marker = startAfter
}
// Validate the query params before beginning to serve the request.
// fetch-owner is not validated since it is a boolean
if s3Error := validateListObjectsArgs(prefix, marker, delimiter, maxKeys); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
if s3Error := validateListObjectsArgs(prefix, token, delimiter, maxKeys); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
return
}
listObjectsV2 := objectAPI.ListObjectsV2
@@ -101,17 +98,28 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
// Inititate a list objects operation based on the input params.
// On success would return back ListObjectsInfo object to be
// marshaled into S3 compatible XML header.
listObjectsV2Info, err := listObjectsV2(ctx, bucket, prefix, marker, delimiter, maxKeys, fetchOwner, startAfter)
listObjectsV2Info, err := listObjectsV2(ctx, bucket, prefix, token, delimiter, maxKeys, fetchOwner, startAfter)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
for i := range listObjectsV2Info.Objects {
if listObjectsV2Info.Objects[i].IsEncrypted() {
var actualSize int64
if listObjectsV2Info.Objects[i].IsCompressed() {
// Read the decompressed size from the meta.json.
actualSize = listObjectsV2Info.Objects[i].GetActualSize()
if actualSize < 0 {
writeErrorResponse(w, ErrInvalidDecompressedSize, r.URL, guessIsBrowserReq(r))
return
}
// Set the info.Size to the actualSize.
listObjectsV2Info.Objects[i].Size = actualSize
} else if crypto.IsEncrypted(listObjectsV2Info.Objects[i].UserDefined) {
listObjectsV2Info.Objects[i].ETag = getDecryptedETag(r.Header, listObjectsV2Info.Objects[i], false)
listObjectsV2Info.Objects[i].Size, err = listObjectsV2Info.Objects[i].DecryptedSize()
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
}
@@ -131,33 +139,39 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
// criteria to return a subset of the objects in a bucket.
//
func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, "ListObjectsV1")
ctx := newContext(r, w, "ListObjectsV1")
defer logger.AuditLog(w, r, "ListObjectsV1", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
writeErrorResponse(w, ErrServerNotInitialized, r.URL, guessIsBrowserReq(r))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
return
}
// Extract all the litsObjectsV1 query params to their native values.
prefix, marker, delimiter, maxKeys, _ := getListObjectsV1Args(r.URL.Query())
prefix, marker, delimiter, maxKeys, _, s3Error := getListObjectsV1Args(r.URL.Query())
if s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
return
}
// Validate the maxKeys lowerbound. When maxKeys > 1000, S3 returns 1000 but
// does not throw an error.
if maxKeys < 0 {
writeErrorResponse(w, ErrInvalidMaxKeys, r.URL)
writeErrorResponse(w, ErrInvalidMaxKeys, r.URL, guessIsBrowserReq(r))
return
} // Validate all the query params before beginning to serve the request.
if s3Error := validateListObjectsArgs(prefix, marker, delimiter, maxKeys); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
return
}
listObjects := objectAPI.ListObjects
@@ -169,20 +183,30 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
// marshaled into S3 compatible XML header.
listObjectsInfo, err := listObjects(ctx, bucket, prefix, marker, delimiter, maxKeys)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
for i := range listObjectsInfo.Objects {
if listObjectsInfo.Objects[i].IsEncrypted() {
var actualSize int64
if listObjectsInfo.Objects[i].IsCompressed() {
// Read the decompressed size from the meta.json.
actualSize = listObjectsInfo.Objects[i].GetActualSize()
if actualSize < 0 {
writeErrorResponse(w, ErrInvalidDecompressedSize, r.URL, guessIsBrowserReq(r))
return
}
// Set the info.Size to the actualSize.
listObjectsInfo.Objects[i].Size = actualSize
} else if crypto.IsEncrypted(listObjectsInfo.Objects[i].UserDefined) {
listObjectsInfo.Objects[i].ETag = getDecryptedETag(r.Header, listObjectsInfo.Objects[i], false)
listObjectsInfo.Objects[i].Size, err = listObjectsInfo.Objects[i].DecryptedSize()
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
}
}
response := generateListObjectsV1Response(bucket, prefix, marker, delimiter, maxKeys, listObjectsInfo)
// Write success response.

View File

@@ -28,20 +28,18 @@ import (
"path"
"path/filepath"
"strings"
"sync"
etcd "github.com/coreos/etcd/client"
"github.com/gorilla/mux"
"github.com/minio/minio-go/pkg/set"
"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/dns"
"github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/handlers"
"github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/policy"
"github.com/minio/minio/pkg/sync/errgroup"
"github.com/minio/minio-go/pkg/set"
)
// Check if there are buckets on server without corresponding entry in etcd backend and
@@ -64,7 +62,7 @@ func initFederatorBackend(objLayer ObjectLayer) {
g.Go(func() error {
r, gerr := globalDNSConfig.Get(b[index].Name)
if gerr != nil {
if etcd.IsKeyNotFound(gerr) || gerr == dns.ErrNoEntriesFound {
if gerr == dns.ErrNoEntriesFound {
return globalDNSConfig.Put(b[index].Name)
}
return gerr
@@ -89,34 +87,30 @@ func initFederatorBackend(objLayer ObjectLayer) {
// -------------------------
// This operation returns bucket location.
func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, "GetBucketLocation")
ctx := newContext(r, w, "GetBucketLocation")
defer logger.AuditLog(w, r, "GetBucketLocation", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
writeErrorResponse(w, ErrServerNotInitialized, r.URL, guessIsBrowserReq(r))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketLocationAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
return
}
bucketLock := globalNSMutex.NewNSLock(bucket, "")
if err := bucketLock.GetRLock(globalObjectTimeout); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
defer bucketLock.RUnlock()
getBucketInfo := objectAPI.GetBucketInfo
if api.CacheAPI() != nil {
getBucketInfo = api.CacheAPI().GetBucketInfo
}
if _, err := getBucketInfo(ctx, bucket); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -143,38 +137,44 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
// uploads in the response.
//
func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, "ListMultipartUploads")
ctx := newContext(r, w, "ListMultipartUploads")
defer logger.AuditLog(w, r, "ListMultipartUploads", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
writeErrorResponse(w, ErrServerNotInitialized, r.URL, guessIsBrowserReq(r))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketMultipartUploadsAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
return
}
prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, _ := getBucketMultipartResources(r.URL.Query())
prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, _, s3Error := getBucketMultipartResources(r.URL.Query())
if s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
return
}
if maxUploads < 0 {
writeErrorResponse(w, ErrInvalidMaxUploads, r.URL)
writeErrorResponse(w, ErrInvalidMaxUploads, r.URL, guessIsBrowserReq(r))
return
}
if keyMarker != "" {
// Marker not common with prefix is not implemented.
if !hasPrefix(keyMarker, prefix) {
writeErrorResponse(w, ErrNotImplemented, r.URL)
writeErrorResponse(w, ErrNotImplemented, r.URL, guessIsBrowserReq(r))
return
}
}
listMultipartsInfo, err := objectAPI.ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// generate response
@@ -190,11 +190,13 @@ func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter,
// This implementation of the GET operation returns a list of all buckets
// owned by the authenticated sender of the request.
func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, "ListBuckets")
ctx := newContext(r, w, "ListBuckets")
defer logger.AuditLog(w, r, "ListBuckets", mustGetClaimsFromToken(r))
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
writeErrorResponse(w, ErrServerNotInitialized, r.URL, guessIsBrowserReq(r))
return
}
listBuckets := objectAPI.ListBuckets
@@ -204,15 +206,15 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
}
if s3Error := checkRequestAuthType(ctx, r, policy.ListAllMyBucketsAction, "", ""); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
return
}
// If etcd, dns federation configured list buckets from etcd.
var bucketsInfo []BucketInfo
if globalDNSConfig != nil {
dnsBuckets, err := globalDNSConfig.List()
if err != nil && !etcd.IsKeyNotFound(err) && err != dns.ErrNoEntriesFound {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
if err != nil && err != dns.ErrNoEntriesFound {
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
bucketSet := set.NewStringSet()
@@ -231,7 +233,7 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
var err error
bucketsInfo, err = listBuckets(ctx)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
}
@@ -246,14 +248,16 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
// DeleteMultipleObjectsHandler - deletes multiple objects.
func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, "DeleteMultipleObjects")
ctx := newContext(r, w, "DeleteMultipleObjects")
defer logger.AuditLog(w, r, "DeleteMultipleObjects", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
writeErrorResponse(w, ErrServerNotInitialized, r.URL, guessIsBrowserReq(r))
return
}
@@ -262,7 +266,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
// In the event access is denied, a 200 response should still be returned
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
if s3Error != ErrAccessDenied {
writeErrorResponse(w, s3Error, r.URL)
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
return
}
}
@@ -270,14 +274,14 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
// Content-Length is required and should be non-zero
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
if r.ContentLength <= 0 {
writeErrorResponse(w, ErrMissingContentLength, r.URL)
writeErrorResponse(w, ErrMissingContentLength, r.URL, guessIsBrowserReq(r))
return
}
// Content-Md5 is requied should be set
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
if _, ok := r.Header["Content-Md5"]; !ok {
writeErrorResponse(w, ErrMissingContentMD5, r.URL)
writeErrorResponse(w, ErrMissingContentMD5, r.URL, guessIsBrowserReq(r))
return
}
@@ -293,7 +297,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
// Read incoming body XML bytes.
if _, err := io.ReadFull(r.Body, deleteXMLBytes); err != nil {
logger.LogIf(ctx, err)
writeErrorResponse(w, ErrInternalError, r.URL)
writeErrorResponse(w, ErrInternalError, r.URL, guessIsBrowserReq(r))
return
}
@@ -301,7 +305,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
deleteObjects := &DeleteObjectsRequest{}
if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil {
logger.LogIf(ctx, err)
writeErrorResponse(w, ErrMalformedXML, r.URL)
writeErrorResponse(w, ErrMalformedXML, r.URL, guessIsBrowserReq(r))
return
}
@@ -309,38 +313,28 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
if globalWORMEnabled {
// Not required to check whether given objects exist or not, because
// DeleteMultipleObject is always successful irrespective of object existence.
writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
writeErrorResponse(w, ErrMethodNotAllowed, r.URL, guessIsBrowserReq(r))
return
}
var wg = &sync.WaitGroup{} // Allocate a new wait group.
var dErrs = make([]error, len(deleteObjects.Objects))
// Delete all requested objects in parallel.
for index, object := range deleteObjects.Objects {
wg.Add(1)
go func(i int, obj ObjectIdentifier) {
defer wg.Done()
// If the request is denied access, each item
// should be marked as 'AccessDenied'
if s3Error == ErrAccessDenied {
dErrs[i] = PrefixAccessDenied{
Bucket: bucket,
Object: obj.ObjectName,
}
return
}
deleteObject := objectAPI.DeleteObject
if api.CacheAPI() != nil {
deleteObject = api.CacheAPI().DeleteObject
}
dErr := deleteObject(ctx, bucket, obj.ObjectName)
if dErr != nil {
dErrs[i] = dErr
}
}(index, object)
deleteObject := objectAPI.DeleteObject
if api.CacheAPI() != nil {
deleteObject = api.CacheAPI().DeleteObject
}
var dErrs = make([]error, len(deleteObjects.Objects))
for index, object := range deleteObjects.Objects {
// If the request is denied access, each item
// should be marked as 'AccessDenied'
if s3Error == ErrAccessDenied {
dErrs[index] = PrefixAccessDenied{
Bucket: bucket,
Object: object.ObjectName,
}
continue
}
dErrs[index] = deleteObject(ctx, bucket, object.ObjectName)
}
wg.Wait()
// Collect deleted objects and errors if any.
var deletedObjects []ObjectIdentifier
@@ -360,8 +354,8 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
}
// Error during delete should be collected separately.
deleteErrors = append(deleteErrors, DeleteError{
Code: errorCodeResponse[toAPIErrorCode(err)].Code,
Message: errorCodeResponse[toAPIErrorCode(err)].Description,
Code: errorCodeResponse[toAPIErrorCode(ctx, err)].Code,
Message: errorCodeResponse[toAPIErrorCode(ctx, err)].Description,
Key: object.ObjectName,
})
}
@@ -375,7 +369,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
// Get host and port from Request.RemoteAddr failing which
// fill them with empty strings.
host, port, err := net.SplitHostPort(r.RemoteAddr)
host, port, err := net.SplitHostPort(handlers.GetSourceIP(r))
if err != nil {
host, port = "", ""
}
@@ -388,10 +382,11 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
Object: ObjectInfo{
Name: dobj.ObjectName,
},
ReqParams: extractReqParams(r),
UserAgent: r.UserAgent(),
Host: host,
Port: port,
ReqParams: extractReqParams(r),
RespElements: extractRespElements(w),
UserAgent: r.UserAgent(),
Host: host,
Port: port,
})
}
}
@@ -400,11 +395,13 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
// ----------
// This implementation of the PUT operation creates a new bucket for authenticated request
func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, "PutBucket")
ctx := newContext(r, w, "PutBucket")
defer logger.AuditLog(w, r, "PutBucket", mustGetClaimsFromToken(r))
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
writeErrorResponse(w, ErrServerNotInitialized, r.URL, guessIsBrowserReq(r))
return
}
@@ -412,35 +409,35 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
bucket := vars["bucket"]
if s3Error := checkRequestAuthType(ctx, r, policy.CreateBucketAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
return
}
// Parse incoming location constraint.
location, s3Error := parseLocationConstraint(r)
if s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
return
}
// Validate if location sent by the client is valid, reject
// requests which do not follow valid region requirements.
if !isValidLocation(location) {
writeErrorResponse(w, ErrInvalidRegion, r.URL)
writeErrorResponse(w, ErrInvalidRegion, r.URL, guessIsBrowserReq(r))
return
}
if globalDNSConfig != nil {
if _, err := globalDNSConfig.Get(bucket); err != nil {
if etcd.IsKeyNotFound(err) || err == dns.ErrNoEntriesFound {
if err == dns.ErrNoEntriesFound {
// Proceed to creating a bucket.
if err = objectAPI.MakeBucketWithLocation(ctx, bucket, location); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if err = globalDNSConfig.Put(bucket); err != nil {
objectAPI.DeleteBucket(ctx, bucket)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -450,18 +447,18 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
writeSuccessResponseHeadersOnly(w)
return
}
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
writeErrorResponse(w, ErrBucketAlreadyOwnedByYou, r.URL)
writeErrorResponse(w, ErrBucketAlreadyOwnedByYou, r.URL, guessIsBrowserReq(r))
return
}
// Proceed to creating a bucket.
err := objectAPI.MakeBucketWithLocation(ctx, bucket, location)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -476,30 +473,39 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
// This implementation of the POST operation handles object creation with a specified
// signature policy in multipart/form-data
func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, "PostPolicyBucket")
ctx := newContext(r, w, "PostPolicyBucket")
defer logger.AuditLog(w, r, "PostPolicyBucket", mustGetClaimsFromToken(r))
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
writeErrorResponse(w, ErrServerNotInitialized, r.URL, guessIsBrowserReq(r))
return
}
if crypto.S3KMS.IsRequested(r.Header) { // SSE-KMS is not supported
writeErrorResponse(w, ErrNotImplemented, r.URL, guessIsBrowserReq(r))
return
}
if !api.EncryptionEnabled() && hasServerSideEncryptionHeader(r.Header) {
writeErrorResponse(w, ErrNotImplemented, r.URL, guessIsBrowserReq(r))
return
}
bucket := mux.Vars(r)["bucket"]
// Require Content-Length to be set in the request
size := r.ContentLength
if size < 0 {
writeErrorResponse(w, ErrMissingContentLength, r.URL)
writeErrorResponse(w, ErrMissingContentLength, r.URL, guessIsBrowserReq(r))
return
}
resource, err := getResource(r.URL.Path, r.Host, globalDomainName)
if err != nil {
writeErrorResponse(w, ErrInvalidRequest, r.URL)
writeErrorResponse(w, ErrInvalidRequest, r.URL, guessIsBrowserReq(r))
return
}
// Make sure that the URL does not contain object name.
if bucket != filepath.Clean(resource[1:]) {
writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
writeErrorResponse(w, ErrMethodNotAllowed, r.URL, guessIsBrowserReq(r))
return
}
@@ -508,7 +514,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
reader, err := r.MultipartReader()
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL, guessIsBrowserReq(r))
return
}
@@ -516,7 +522,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
form, err := reader.ReadForm(maxFormMemory)
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL, guessIsBrowserReq(r))
return
}
@@ -527,13 +533,13 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
fileBody, fileName, fileSize, formValues, err := extractPostPolicyFormValues(ctx, form)
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL, guessIsBrowserReq(r))
return
}
// Check if file is provided, error out otherwise.
if fileBody == nil {
writeErrorResponse(w, ErrPOSTFileRequired, r.URL)
writeErrorResponse(w, ErrPOSTFileRequired, r.URL, guessIsBrowserReq(r))
return
}
@@ -555,7 +561,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
if successRedirect != "" {
redirectURL, err = url.Parse(successRedirect)
if err != nil {
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL, guessIsBrowserReq(r))
return
}
}
@@ -563,83 +569,104 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
// Verify policy signature.
apiErr := doesPolicySignatureMatch(formValues)
if apiErr != ErrNone {
writeErrorResponse(w, apiErr, r.URL)
writeErrorResponse(w, apiErr, r.URL, guessIsBrowserReq(r))
return
}
policyBytes, err := base64.StdEncoding.DecodeString(formValues.Get("Policy"))
if err != nil {
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL, guessIsBrowserReq(r))
return
}
postPolicyForm, err := parsePostPolicyForm(string(policyBytes))
if err != nil {
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
return
}
// Make sure formValues adhere to policy restrictions.
if apiErr = checkPostPolicy(formValues, postPolicyForm); apiErr != ErrNone {
writeErrorResponse(w, apiErr, r.URL)
return
}
// Ensure that the object size is within expected range, also the file size
// should not exceed the maximum single Put size (5 GiB)
lengthRange := postPolicyForm.Conditions.ContentLengthRange
if lengthRange.Valid {
if fileSize < lengthRange.Min {
writeErrorResponse(w, toAPIErrorCode(errDataTooSmall), r.URL)
// Handle policy if it is set.
if len(policyBytes) > 0 {
postPolicyForm, err := parsePostPolicyForm(string(policyBytes))
if err != nil {
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL, guessIsBrowserReq(r))
return
}
if fileSize > lengthRange.Max || isMaxObjectSize(fileSize) {
writeErrorResponse(w, toAPIErrorCode(errDataTooLarge), r.URL)
// Make sure formValues adhere to policy restrictions.
if apiErr = checkPostPolicy(formValues, postPolicyForm); apiErr != ErrNone {
writeErrorResponse(w, apiErr, r.URL, guessIsBrowserReq(r))
return
}
// Ensure that the object size is within expected range, also the file size
// should not exceed the maximum single Put size (5 GiB)
lengthRange := postPolicyForm.Conditions.ContentLengthRange
if lengthRange.Valid {
if fileSize < lengthRange.Min {
writeErrorResponse(w, toAPIErrorCode(ctx, errDataTooSmall), r.URL, guessIsBrowserReq(r))
return
}
if fileSize > lengthRange.Max || isMaxObjectSize(fileSize) {
writeErrorResponse(w, toAPIErrorCode(ctx, errDataTooLarge), r.URL, guessIsBrowserReq(r))
return
}
}
}
// Extract metadata to be saved from received Form.
metadata, err := extractMetadataFromHeader(ctx, formValues)
metadata := make(map[string]string)
err = extractMetadataFromMap(ctx, formValues, metadata)
if err != nil {
writeErrorResponse(w, ErrInternalError, r.URL)
writeErrorResponse(w, ErrInternalError, r.URL, guessIsBrowserReq(r))
return
}
hashReader, err := hash.NewReader(fileBody, fileSize, "", "")
hashReader, err := hash.NewReader(fileBody, fileSize, "", "", fileSize)
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
rawReader := hashReader
pReader := NewPutObjReader(rawReader, nil, nil)
var objectEncryptionKey []byte
// This request header needs to be set prior to setting ObjectOptions
if globalAutoEncryption && !crypto.SSEC.IsRequested(r.Header) {
r.Header.Add(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
}
// get gateway encryption options
var opts ObjectOptions
opts, err = putEncryptionOpts(ctx, r, bucket, object, nil)
if err != nil {
writeErrorResponseHeadersOnly(w, toAPIErrorCode(ctx, err))
return
}
if objectAPI.IsEncryptionSupported() {
if hasSSECustomerHeader(formValues) && !hasSuffix(object, slashSeparator) { // handle SSE-C requests
if hasServerSideEncryptionHeader(formValues) && !hasSuffix(object, slashSeparator) { // handle SSE-C and SSE-S3 requests
var reader io.Reader
var key []byte
key, err = ParseSSECustomerHeader(formValues)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
if crypto.SSEC.IsRequested(formValues) {
key, err = ParseSSECustomerHeader(formValues)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
}
reader, err = newEncryptReader(hashReader, key, metadata)
reader, objectEncryptionKey, err = newEncryptReader(hashReader, key, bucket, object, metadata, crypto.S3.IsRequested(formValues))
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
info := ObjectInfo{Size: fileSize}
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "") // do not try to verify encrypted content
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", fileSize) // do not try to verify encrypted content
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
pReader = NewPutObjReader(rawReader, hashReader, objectEncryptionKey)
}
}
objInfo, err := objectAPI.PutObject(ctx, bucket, object, hashReader, metadata)
objInfo, err := objectAPI.PutObject(ctx, bucket, object, pReader, metadata, opts)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -648,20 +675,21 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
w.Header().Set("Location", location)
// Get host and port from Request.RemoteAddr.
host, port, err := net.SplitHostPort(r.RemoteAddr)
host, port, err := net.SplitHostPort(handlers.GetSourceIP(r))
if err != nil {
host, port = "", ""
}
// Notify object created event.
defer sendEvent(eventArgs{
EventName: event.ObjectCreatedPost,
BucketName: objInfo.Bucket,
Object: objInfo,
ReqParams: extractReqParams(r),
UserAgent: r.UserAgent(),
Host: host,
Port: port,
EventName: event.ObjectCreatedPost,
BucketName: objInfo.Bucket,
Object: objInfo,
ReqParams: extractReqParams(r),
RespElements: extractRespElements(w),
UserAgent: r.UserAgent(),
Host: host,
Port: port,
})
if successRedirect != "" {
@@ -695,7 +723,9 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
// have permission to access it. Otherwise, the operation might
// return responses such as 404 Not Found and 403 Forbidden.
func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, "HeadBucket")
ctx := newContext(r, w, "HeadBucket")
defer logger.AuditLog(w, r, "HeadBucket", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
@@ -716,7 +746,7 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
getBucketInfo = api.CacheAPI().GetBucketInfo
}
if _, err := getBucketInfo(ctx, bucket); err != nil {
writeErrorResponseHeadersOnly(w, toAPIErrorCode(err))
writeErrorResponseHeadersOnly(w, toAPIErrorCode(ctx, err))
return
}
@@ -725,19 +755,21 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
// DeleteBucketHandler - Delete bucket
func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, "DeleteBucket")
ctx := newContext(r, w, "DeleteBucket")
defer logger.AuditLog(w, r, "DeleteBucket", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
writeErrorResponse(w, ErrServerNotInitialized, r.URL, guessIsBrowserReq(r))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.DeleteBucketAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
return
}
@@ -747,22 +779,19 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
}
// Attempt to delete bucket.
if err := deleteBucket(ctx, bucket); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
globalNotificationSys.RemoveNotification(bucket)
globalPolicySys.Remove(bucket)
for nerr := range globalNotificationSys.DeleteBucket(bucket) {
logger.GetReqInfo(ctx).AppendTags("remotePeer", nerr.Host.Name)
logger.LogIf(ctx, nerr.Err)
}
globalNotificationSys.DeleteBucket(ctx, bucket)
if globalDNSConfig != nil {
if err := globalDNSConfig.Delete(bucket); err != nil {
// Deleting DNS entry failed, attempt to create the bucket again.
objectAPI.MakeBucketWithLocation(ctx, bucket, "")
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
}

View File

@@ -84,7 +84,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
// initialize httptest Recorder, this records any mutations to response writer inside the handler.
rec := httptest.NewRecorder()
// construct HTTP request for Get bucket location.
req, err := newTestSignedRequestV4("GET", getBucketLocationURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey)
req, err := newTestSignedRequestV4("GET", getBucketLocationURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for GetBucketLocationHandler: <ERROR> %v", i+1, instanceType, err)
}
@@ -116,7 +116,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV2 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint.
reqV2, err := newTestSignedRequestV2("GET", getBucketLocationURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey)
reqV2, err := newTestSignedRequestV2("GET", getBucketLocationURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
@@ -220,7 +220,7 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
rec := httptest.NewRecorder()
// construct HTTP request for HEAD bucket.
req, err := newTestSignedRequestV4("HEAD", getHEADBucketURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey)
req, err := newTestSignedRequestV4("HEAD", getHEADBucketURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for HeadBucketHandler: <ERROR> %v", i+1, instanceType, err)
}
@@ -235,7 +235,7 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV2 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint.
reqV2, err := newTestSignedRequestV2("HEAD", getHEADBucketURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey)
reqV2, err := newTestSignedRequestV2("HEAD", getHEADBucketURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
@@ -437,7 +437,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
// construct HTTP request for List multipart uploads endpoint.
u := getListMultipartUploadsURLWithParams("", testCase.bucket, testCase.prefix, testCase.keyMarker, testCase.uploadIDMarker, testCase.delimiter, testCase.maxUploads)
req, gerr := newTestSignedRequestV4("GET", u, 0, nil, testCase.accessKey, testCase.secretKey)
req, gerr := newTestSignedRequestV4("GET", u, 0, nil, testCase.accessKey, testCase.secretKey, nil)
if gerr != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for ListMultipartUploadsHandler: <ERROR> %v", i+1, instanceType, gerr)
}
@@ -454,7 +454,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
// construct HTTP request for PUT bucket policy endpoint.
// verify response for V2 signed HTTP request.
reqV2, err := newTestSignedRequestV2("GET", u, 0, nil, testCase.accessKey, testCase.secretKey)
reqV2, err := newTestSignedRequestV2("GET", u, 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
}
@@ -471,7 +471,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
// construct HTTP request for List multipart uploads endpoint.
u := getListMultipartUploadsURLWithParams("", bucketName, "", "", "", "", "")
req, err := newTestSignedRequestV4("GET", u, 0, nil, "", "") // Generate an anonymous request.
req, err := newTestSignedRequestV4("GET", u, 0, nil, "", "", nil) // Generate an anonymous request.
if err != nil {
t.Fatalf("Test %s: Failed to create HTTP request for ListMultipartUploadsHandler: <ERROR> %v", instanceType, err)
}
@@ -551,7 +551,7 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
for i, testCase := range testCases {
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
rec := httptest.NewRecorder()
req, lerr := newTestSignedRequestV4("GET", getListBucketURL(""), 0, nil, testCase.accessKey, testCase.secretKey)
req, lerr := newTestSignedRequestV4("GET", getListBucketURL(""), 0, nil, testCase.accessKey, testCase.secretKey, nil)
if lerr != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for ListBucketsHandler: <ERROR> %v", i+1, instanceType, lerr)
}
@@ -568,7 +568,7 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
// construct HTTP request for PUT bucket policy endpoint.
// verify response for V2 signed HTTP request.
reqV2, err := newTestSignedRequestV2("GET", getListBucketURL(""), 0, nil, testCase.accessKey, testCase.secretKey)
reqV2, err := newTestSignedRequestV2("GET", getListBucketURL(""), 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
@@ -625,7 +625,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
for i := 0; i < 10; i++ {
objectName := "test-object-" + strconv.Itoa(i)
// uploading the object.
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewBuffer(contentBytes), int64(len(contentBytes)), "", sha256sum), nil)
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetPutObjReader(t, bytes.NewBuffer(contentBytes), int64(len(contentBytes)), "", sha256sum), nil, ObjectOptions{})
// if object upload fails stop the test.
if err != nil {
t.Fatalf("Put Object %d: Error uploading object: <ERROR> %v", i, err)
@@ -745,7 +745,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
// Generate a signed or anonymous request based on the testCase
if testCase.accessKey != "" {
req, err = newTestSignedRequestV4("POST", getDeleteMultipleObjectsURL("", bucketName),
int64(len(testCase.objects)), bytes.NewReader(testCase.objects), testCase.accessKey, testCase.secretKey)
int64(len(testCase.objects)), bytes.NewReader(testCase.objects), testCase.accessKey, testCase.secretKey, nil)
} else {
req, err = newTestRequest("POST", getDeleteMultipleObjectsURL("", bucketName),
int64(len(testCase.objects)), bytes.NewReader(testCase.objects))
@@ -785,7 +785,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
nilBucket := "dummy-bucket"
nilObject := ""
nilReq, err := newTestSignedRequestV4("POST", getDeleteMultipleObjectsURL("", nilBucket), 0, nil, "", "")
nilReq, err := newTestSignedRequestV4("POST", getDeleteMultipleObjectsURL("", nilBucket), 0, nil, "", "", nil)
if err != nil {
t.Errorf("Minio %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
}

View File

@@ -42,30 +42,32 @@ var errNoSuchNotifications = errors.New("The specified bucket does not have buck
// as per http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html.
// It returns empty configuration if its not set.
func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, "GetBucketNotification")
ctx := newContext(r, w, "GetBucketNotification")
defer logger.AuditLog(w, r, "GetBucketNotification", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucketName := vars["bucket"]
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
writeErrorResponse(w, ErrServerNotInitialized, r.URL, guessIsBrowserReq(r))
return
}
if !objAPI.IsNotificationSupported() {
writeErrorResponse(w, ErrNotImplemented, r.URL)
writeErrorResponse(w, ErrNotImplemented, r.URL, guessIsBrowserReq(r))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketNotificationAction, bucketName, ""); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
return
}
_, err := objAPI.GetBucketInfo(ctx, bucketName)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -74,17 +76,21 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
if err != nil {
// Ignore errNoSuchNotifications to comply with AWS S3.
if err != errNoSuchNotifications {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
nConfig = &event.Config{}
}
// If xml namespace is empty, set a default value before returning.
if nConfig.XMLNS == "" {
nConfig.XMLNS = "http://s3.amazonaws.com/doc/2006-03-01/"
}
notificationBytes, err := xml.Marshal(nConfig)
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -94,16 +100,18 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
// PutBucketNotificationHandler - This HTTP handler stores given notification configuration as per
// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html.
func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, "PutBucketNotification")
ctx := newContext(r, w, "PutBucketNotification")
defer logger.AuditLog(w, r, "PutBucketNotification", mustGetClaimsFromToken(r))
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
writeErrorResponse(w, ErrServerNotInitialized, r.URL, guessIsBrowserReq(r))
return
}
if !objectAPI.IsNotificationSupported() {
writeErrorResponse(w, ErrNotImplemented, r.URL)
writeErrorResponse(w, ErrNotImplemented, r.URL, guessIsBrowserReq(r))
return
}
@@ -111,19 +119,19 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
bucketName := vars["bucket"]
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketNotificationAction, bucketName, ""); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
return
}
_, err := objectAPI.GetBucketInfo(ctx, bucketName)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// PutBucketNotification always needs a Content-Length.
if r.ContentLength <= 0 {
writeErrorResponse(w, ErrMissingContentLength, r.URL)
writeErrorResponse(w, ErrMissingContentLength, r.URL, guessIsBrowserReq(r))
return
}
@@ -132,24 +140,21 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
if err != nil {
apiErr := ErrMalformedXML
if event.IsEventError(err) {
apiErr = toAPIErrorCode(err)
apiErr = toAPIErrorCode(ctx, err)
}
writeErrorResponse(w, apiErr, r.URL)
writeErrorResponse(w, apiErr, r.URL, guessIsBrowserReq(r))
return
}
if err = saveNotificationConfig(objectAPI, bucketName, config); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
if err = saveNotificationConfig(ctx, objectAPI, bucketName, config); err != nil {
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
rulesMap := config.ToRulesMap()
globalNotificationSys.AddRulesMap(bucketName, rulesMap)
for nerr := range globalNotificationSys.PutBucketNotification(bucketName, rulesMap) {
logger.GetReqInfo(ctx).AppendTags("remotePeer", nerr.Host.Name)
logger.LogIf(ctx, nerr.Err)
}
globalNotificationSys.PutBucketNotification(ctx, bucketName, rulesMap)
writeSuccessResponseHeadersOnly(w)
}
@@ -157,24 +162,30 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
// ListenBucketNotificationHandler - This HTTP handler sends events to the connected HTTP client.
// Client should send prefix/suffix object name to match and events to watch as query parameters.
func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, "ListenBucketNotification")
ctx := newContext(r, w, "ListenBucketNotification")
defer logger.AuditLog(w, r, "ListenBucketNotification", mustGetClaimsFromToken(r))
// Validate if bucket exists.
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
writeErrorResponse(w, ErrServerNotInitialized, r.URL, guessIsBrowserReq(r))
return
}
if !objAPI.IsNotificationSupported() {
writeErrorResponse(w, ErrNotImplemented, r.URL)
writeErrorResponse(w, ErrNotImplemented, r.URL, guessIsBrowserReq(r))
return
}
if !objAPI.IsListenBucketSupported() {
writeErrorResponse(w, ErrNotImplemented, r.URL, guessIsBrowserReq(r))
return
}
vars := mux.Vars(r)
bucketName := vars["bucket"]
if s3Error := checkRequestAuthType(ctx, r, policy.ListenBucketNotificationAction, bucketName, ""); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
return
}
@@ -182,11 +193,11 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
var prefix string
if len(values["prefix"]) > 1 {
writeErrorResponse(w, ErrFilterNamePrefix, r.URL)
writeErrorResponse(w, ErrFilterNamePrefix, r.URL, guessIsBrowserReq(r))
}
if len(values["prefix"]) == 1 {
if err := event.ValidateFilterRuleValue(values["prefix"][0]); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -195,11 +206,11 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
var suffix string
if len(values["suffix"]) > 1 {
writeErrorResponse(w, ErrFilterNameSuffix, r.URL)
writeErrorResponse(w, ErrFilterNameSuffix, r.URL, guessIsBrowserReq(r))
}
if len(values["suffix"]) == 1 {
if err := event.ValidateFilterRuleValue(values["suffix"][0]); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -212,7 +223,7 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
for _, s := range values["events"] {
eventName, err := event.ParseName(s)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@@ -220,28 +231,27 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
}
if _, err := objAPI.GetBucketInfo(ctx, bucketName); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
host, err := xnet.ParseHost(r.RemoteAddr)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
target, err := target.NewHTTPClientTarget(*host, w)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
rulesMap := event.NewRulesMap(eventNames, pattern, target.ID())
if err := globalNotificationSys.AddRemoteTarget(bucketName, target, rulesMap); err != nil {
if err = globalNotificationSys.AddRemoteTarget(bucketName, target, rulesMap); err != nil {
logger.GetReqInfo(ctx).AppendTags("target", target.ID().Name)
logger.LogIf(ctx, err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
defer globalNotificationSys.RemoveRemoteTarget(bucketName, target.ID())
@@ -249,29 +259,23 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
thisAddr, err := xnet.ParseHost(GetLocalPeer(globalEndpoints))
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if err = SaveListener(objAPI, bucketName, eventNames, pattern, target.ID(), *thisAddr); err != nil {
logger.GetReqInfo(ctx).AppendTags("target", target.ID().Name)
logger.LogIf(ctx, err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
errCh := globalNotificationSys.ListenBucketNotification(bucketName, eventNames, pattern, target.ID(), *thisAddr)
for nerr := range errCh {
logger.GetReqInfo(ctx).AppendTags("remotePeer", nerr.Host.Name)
logger.LogIf(ctx, nerr.Err)
}
globalNotificationSys.ListenBucketNotification(ctx, bucketName, eventNames, pattern, target.ID(), *thisAddr)
<-target.DoneCh
if err = RemoveListener(objAPI, bucketName, target.ID(), *thisAddr); err != nil {
logger.GetReqInfo(ctx).AppendTags("target", target.ID().Name)
logger.LogIf(ctx, err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
}

View File

@@ -38,11 +38,13 @@ const (
// PutBucketPolicyHandler - This HTTP handler stores given bucket policy configuration as per
// https://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, "PutBucketPolicy")
ctx := newContext(r, w, "PutBucketPolicy")
defer logger.AuditLog(w, r, "PutBucketPolicy", mustGetClaimsFromToken(r))
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
writeErrorResponse(w, ErrServerNotInitialized, r.URL, guessIsBrowserReq(r))
return
}
@@ -50,51 +52,48 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
bucket := vars["bucket"]
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketPolicyAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
return
}
// Check if bucket exists.
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Error out if Content-Length is missing.
// PutBucketPolicy always needs Content-Length.
if r.ContentLength <= 0 {
writeErrorResponse(w, ErrMissingContentLength, r.URL)
writeErrorResponse(w, ErrMissingContentLength, r.URL, guessIsBrowserReq(r))
return
}
// Error out if Content-Length is beyond allowed size.
if r.ContentLength > maxBucketPolicySize {
writeErrorResponse(w, ErrEntityTooLarge, r.URL)
writeErrorResponse(w, ErrEntityTooLarge, r.URL, guessIsBrowserReq(r))
return
}
bucketPolicy, err := policy.ParseConfig(io.LimitReader(r.Body, r.ContentLength), bucket)
if err != nil {
writeErrorResponse(w, ErrMalformedPolicy, r.URL)
writeErrorResponse(w, ErrMalformedPolicy, r.URL, guessIsBrowserReq(r))
return
}
// Version in policy must not be empty
if bucketPolicy.Version == "" {
writeErrorResponse(w, ErrMalformedPolicy, r.URL)
writeErrorResponse(w, ErrMalformedPolicy, r.URL, guessIsBrowserReq(r))
return
}
if err = objAPI.SetBucketPolicy(ctx, bucket, bucketPolicy); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
globalPolicySys.Set(bucket, *bucketPolicy)
for nerr := range globalNotificationSys.SetBucketPolicy(bucket, bucketPolicy) {
logger.GetReqInfo(ctx).AppendTags("remotePeer", nerr.Host.Name)
logger.LogIf(ctx, nerr.Err)
}
globalNotificationSys.SetBucketPolicy(ctx, bucket, bucketPolicy)
// Success.
writeSuccessNoContent(w)
@@ -102,11 +101,13 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
// DeleteBucketPolicyHandler - This HTTP handler removes bucket policy configuration.
func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, "DeleteBucketPolicy")
ctx := newContext(r, w, "DeleteBucketPolicy")
defer logger.AuditLog(w, r, "DeleteBucketPolicy", mustGetClaimsFromToken(r))
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
writeErrorResponse(w, ErrServerNotInitialized, r.URL, guessIsBrowserReq(r))
return
}
@@ -114,26 +115,23 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r
bucket := vars["bucket"]
if s3Error := checkRequestAuthType(ctx, r, policy.DeleteBucketPolicyAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
return
}
// Check if bucket exists.
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if err := objAPI.DeleteBucketPolicy(ctx, bucket); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
globalPolicySys.Remove(bucket)
for nerr := range globalNotificationSys.RemoveBucketPolicy(bucket) {
logger.GetReqInfo(ctx).AppendTags("remotePeer", nerr.Host.Name)
logger.LogIf(ctx, nerr.Err)
}
globalNotificationSys.RemoveBucketPolicy(ctx, bucket)
// Success.
writeSuccessNoContent(w)
@@ -141,11 +139,13 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r
// GetBucketPolicyHandler - This HTTP handler returns bucket policy configuration.
func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, "GetBucketPolicy")
ctx := newContext(r, w, "GetBucketPolicy")
defer logger.AuditLog(w, r, "GetBucketPolicy", mustGetClaimsFromToken(r))
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
writeErrorResponse(w, ErrServerNotInitialized, r.URL, guessIsBrowserReq(r))
return
}
@@ -153,27 +153,26 @@ func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *ht
bucket := vars["bucket"]
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
return
}
// Check if bucket exists.
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Read bucket access policy.
bucketPolicy, err := objAPI.GetBucketPolicy(ctx, bucket)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
policyData, err := json.Marshal(bucketPolicy)
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}

View File

@@ -254,7 +254,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
recV4 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint.
reqV4, err := newTestSignedRequestV4("PUT", getPutPolicyURL("", testCase.bucketName),
int64(testCase.policyLen), testCase.bucketPolicyReader, testCase.accessKey, testCase.secretKey)
int64(testCase.policyLen), testCase.bucketPolicyReader, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
}
@@ -268,7 +268,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
recV2 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint.
reqV2, err := newTestSignedRequestV2("PUT", getPutPolicyURL("", testCase.bucketName),
int64(testCase.policyLen), testCase.bucketPolicyReader, testCase.accessKey, testCase.secretKey)
int64(testCase.policyLen), testCase.bucketPolicyReader, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
}
@@ -304,7 +304,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
nilBucket := "dummy-bucket"
nilReq, err := newTestSignedRequestV4("PUT", getPutPolicyURL("", nilBucket),
0, nil, "", "")
0, nil, "", "", nil)
if err != nil {
t.Errorf("Minio %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
@@ -346,7 +346,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
recV4 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint.
reqV4, err := newTestSignedRequestV4("PUT", getPutPolicyURL("", testPolicy.bucketName),
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey)
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, err)
}
@@ -360,7 +360,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
recV2 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint.
reqV2, err := newTestSignedRequestV2("PUT", getPutPolicyURL("", testPolicy.bucketName),
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey)
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, err)
}
@@ -417,7 +417,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
recV4 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint.
reqV4, err := newTestSignedRequestV4("GET", getGetPolicyURL("", testCase.bucketName),
0, nil, testCase.accessKey, testCase.secretKey)
0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for GetBucketPolicyHandler: <ERROR> %v", i+1, err)
@@ -437,11 +437,13 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
if recV4.Code != testCase.expectedRespStatus {
// Verify whether the bucket policy fetched is same as the one inserted.
expectedPolicy, err := policy.ParseConfig(strings.NewReader(expectedBucketPolicyStr), testCase.bucketName)
var expectedPolicy *policy.Policy
expectedPolicy, err = policy.ParseConfig(strings.NewReader(expectedBucketPolicyStr), testCase.bucketName)
if err != nil {
t.Fatalf("unexpected error. %v", err)
}
gotPolicy, err := policy.ParseConfig(bytes.NewReader(bucketPolicyReadBuf), testCase.bucketName)
var gotPolicy *policy.Policy
gotPolicy, err = policy.ParseConfig(bytes.NewReader(bucketPolicyReadBuf), testCase.bucketName)
if err != nil {
t.Fatalf("unexpected error. %v", err)
}
@@ -454,7 +456,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
recV2 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint.
reqV2, err := newTestSignedRequestV2("GET", getGetPolicyURL("", testCase.bucketName),
0, nil, testCase.accessKey, testCase.secretKey)
0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for GetBucketPolicyHandler: <ERROR> %v", i+1, err)
}
@@ -509,7 +511,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
nilBucket := "dummy-bucket"
nilReq, err := newTestSignedRequestV4("GET", getGetPolicyURL("", nilBucket),
0, nil, "", "")
0, nil, "", "", nil)
if err != nil {
t.Errorf("Minio %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
@@ -589,7 +591,7 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
recV4 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint.
reqV4, err := newTestSignedRequestV4("PUT", getPutPolicyURL("", testPolicy.bucketName),
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey)
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, err)
}
@@ -639,7 +641,7 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
recV4 := httptest.NewRecorder()
// construct HTTP request for Delete bucket policy endpoint.
reqV4, err := newTestSignedRequestV4("DELETE", getDeletePolicyURL("", testCase.bucketName),
0, nil, testCase.accessKey, testCase.secretKey)
0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for GetBucketPolicyHandler: <ERROR> %v", i+1, err)
}
@@ -661,7 +663,7 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
recV2 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint.
reqV2, err := newTestSignedRequestV2("PUT", getPutPolicyURL("", testPolicy.bucketName),
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey)
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, err)
}
@@ -678,7 +680,7 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
recV2 := httptest.NewRecorder()
// construct HTTP request for Delete bucket policy endpoint.
reqV2, err := newTestSignedRequestV2("DELETE", getDeletePolicyURL("", testCase.bucketName),
0, nil, testCase.accessKey, testCase.secretKey)
0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for GetBucketPolicyHandler: <ERROR> %v", i+1, err)
}
@@ -712,7 +714,7 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
nilBucket := "dummy-bucket"
nilReq, err := newTestSignedRequestV4("DELETE", getDeletePolicyURL("", nilBucket),
0, nil, "", "")
0, nil, "", "", nil)
if err != nil {
t.Errorf("Minio %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)

View File

@@ -68,24 +68,6 @@ func parsePublicCertFile(certFile string) (x509Certs []*x509.Certificate, err er
}
func getRootCAs(certsCAsDir string) (*x509.CertPool, error) {
// Get all CA file names.
var caFiles []string
fis, err := readDir(certsCAsDir)
if err != nil {
return nil, err
}
for _, fi := range fis {
// Skip all directories.
if hasSuffix(fi, slashSeparator) {
continue
}
// We are only interested in regular files here.
caFiles = append(caFiles, pathJoin(certsCAsDir, fi))
}
if len(caFiles) == 0 {
return nil, nil
}
rootCAs, _ := x509.SystemCertPool()
if rootCAs == nil {
// In some systems (like Windows) system cert pool is
@@ -94,16 +76,26 @@ func getRootCAs(certsCAsDir string) (*x509.CertPool, error) {
rootCAs = x509.NewCertPool()
}
// Load custom root CAs for client requests
for _, caFile := range caFiles {
caCert, err := ioutil.ReadFile(caFile)
if err != nil {
return nil, err
fis, err := readDir(certsCAsDir)
if err != nil {
if err == errFileNotFound {
err = nil // Return success if CA's directory is missing.
}
rootCAs.AppendCertsFromPEM(caCert)
return rootCAs, err
}
// Load all custom CA files.
for _, fi := range fis {
// Skip all directories.
if hasSuffix(fi, slashSeparator) {
continue
}
caCert, err := ioutil.ReadFile(pathJoin(certsCAsDir, fi))
if err != nil {
return rootCAs, err
}
rootCAs.AppendCertsFromPEM(caCert)
}
return rootCAs, nil
}
@@ -150,24 +142,20 @@ func loadX509KeyPair(certFile, keyFile string) (tls.Certificate, error) {
return cert, nil
}
func getSSLConfig() (x509Certs []*x509.Certificate, rootCAs *x509.CertPool, c *certs.Certs, secureConn bool, err error) {
func getTLSConfig() (x509Certs []*x509.Certificate, c *certs.Certs, secureConn bool, err error) {
if !(isFile(getPublicCertFile()) && isFile(getPrivateKeyFile())) {
return nil, nil, nil, false, nil
return nil, nil, false, nil
}
if x509Certs, err = parsePublicCertFile(getPublicCertFile()); err != nil {
return nil, nil, nil, false, err
return nil, nil, false, err
}
c, err = certs.New(getPublicCertFile(), getPrivateKeyFile(), loadX509KeyPair)
if err != nil {
return nil, nil, nil, false, err
}
if rootCAs, err = getRootCAs(getCADir()); err != nil {
return nil, nil, nil, false, err
return nil, nil, false, err
}
secureConn = true
return x509Certs, rootCAs, c, secureConn, nil
return x509Certs, c, secureConn, nil
}

View File

@@ -223,7 +223,8 @@ func TestGetRootCAs(t *testing.T) {
certCAsDir string
expectedErr error
}{
{"nonexistent-dir", errFileNotFound},
// ignores non-existent directories.
{"nonexistent-dir", nil},
// Ignores directories.
{dir1, nil},
// Ignore empty directory.

View File

@@ -17,7 +17,7 @@
package cmd
import (
"context"
"crypto/tls"
"errors"
"net"
"os"
@@ -26,14 +26,16 @@ import (
"strings"
"time"
etcd "github.com/coreos/etcd/client"
etcd "github.com/coreos/etcd/clientv3"
dns2 "github.com/miekg/dns"
"github.com/minio/cli"
"github.com/minio/minio-go/pkg/set"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/cmd/logger/target/console"
"github.com/minio/minio/cmd/logger/target/http"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/dns"
"github.com/minio/minio-go/pkg/set"
xnet "github.com/minio/minio/pkg/net"
)
// Check for updates and print a notification message
@@ -48,74 +50,136 @@ func checkUpdate(mode string) {
}
}
// Initialize and load config from remote etcd or local config directory
func initConfig() {
if globalEtcdClient != nil {
kapi := etcd.NewKeysAPI(globalEtcdClient)
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
_, err := kapi.Get(ctx, getConfigFile(), nil)
cancel()
if err == nil {
logger.FatalIf(migrateConfig(), "Config migration failed.")
logger.FatalIf(loadConfig(), "Unable to load config version: '%s'.", serverConfigVersion)
} else {
if etcd.IsKeyNotFound(err) {
logger.FatalIf(newConfig(), "Unable to initialize minio config for the first time.")
logger.Info("Created minio configuration file successfully at %v", globalEtcdClient.Endpoints())
} else {
logger.FatalIf(err, "Unable to load config version: '%s'.", serverConfigVersion)
}
}
return
// Load logger targets based on user's configuration
func loadLoggers() {
auditEndpoint, ok := os.LookupEnv("MINIO_AUDIT_LOGGER_HTTP_ENDPOINT")
if ok {
// Enable audit HTTP logging through ENV.
logger.AddAuditTarget(http.New(auditEndpoint, NewCustomHTTPTransport()))
}
if isFile(getConfigFile()) {
logger.FatalIf(migrateConfig(), "Config migration failed")
logger.FatalIf(loadConfig(), "Unable to load the configuration file")
loggerEndpoint, ok := os.LookupEnv("MINIO_LOGGER_HTTP_ENDPOINT")
if ok {
// Enable HTTP logging through ENV.
logger.AddTarget(http.New(loggerEndpoint, NewCustomHTTPTransport()))
} else {
// Config file does not exist, we create it fresh and return upon success.
logger.FatalIf(newConfig(), "Unable to initialize minio config for the first time")
logger.Info("Created minio configuration file successfully at " + getConfigDir())
for _, l := range globalServerConfig.Logger.HTTP {
if l.Enabled {
// Enable http logging
logger.AddTarget(http.New(l.Endpoint, NewCustomHTTPTransport()))
}
}
}
if globalServerConfig.Logger.Console.Enabled {
// Enable console logging
logger.AddTarget(console.New())
}
}
func newConfigDirFromCtx(ctx *cli.Context, option string, getDefaultDir func() string) (*ConfigDir, bool) {
var dir string
var dirSet bool
switch {
case ctx.IsSet(option):
dir = ctx.String(option)
dirSet = true
case ctx.GlobalIsSet(option):
dir = ctx.GlobalString(option)
dirSet = true
// cli package does not expose parent's option option. Below code is workaround.
if dir == "" || dir == getDefaultDir() {
dirSet = false // Unset to false since GlobalIsSet() true is a false positive.
if ctx.Parent().GlobalIsSet(option) {
dir = ctx.Parent().GlobalString(option)
dirSet = true
}
}
default:
// Neither local nor global option is provided. In this case, try to use
// default directory.
dir = getDefaultDir()
if dir == "" {
logger.FatalIf(errInvalidArgument, "%s option must be provided", option)
}
}
if dir == "" {
logger.FatalIf(errors.New("empty directory"), "%s directory cannot be empty", option)
}
// Disallow relative paths, figure out absolute paths.
dirAbs, err := filepath.Abs(dir)
logger.FatalIf(err, "Unable to fetch absolute path for %s=%s", option, dir)
logger.FatalIf(mkdirAllIgnorePerm(dirAbs), "Unable to create directory specified %s=%s", option, dir)
return &ConfigDir{path: dirAbs}, dirSet
}
func handleCommonCmdArgs(ctx *cli.Context) {
var configDir string
if ctx.IsSet("config-dir") {
configDir = ctx.String("config-dir")
} else if ctx.GlobalIsSet("config-dir") {
configDir = ctx.GlobalString("config-dir")
// cli package does not expose parent's "config-dir" option. Below code is workaround.
if configDir == "" || configDir == getConfigDir() {
if ctx.Parent().GlobalIsSet("config-dir") {
configDir = ctx.Parent().GlobalString("config-dir")
}
}
} else {
// Neither local nor global config-dir option is provided. In this case, try to use
// default config directory.
configDir = getConfigDir()
if configDir == "" {
logger.FatalIf(errors.New("missing option"), "config-dir option must be provided")
}
// Get "json" flag from command line argument and
// enable json and quite modes if jason flag is turned on.
globalCLIContext.JSON = ctx.IsSet("json") || ctx.GlobalIsSet("json")
if globalCLIContext.JSON {
logger.EnableJSON()
}
if configDir == "" {
logger.FatalIf(errors.New("empty directory"), "Configuration directory cannot be empty")
// Get quiet flag from command line argument.
globalCLIContext.Quiet = ctx.IsSet("quiet") || ctx.GlobalIsSet("quiet")
if globalCLIContext.Quiet {
logger.EnableQuiet()
}
// Disallow relative paths, figure out absolute paths.
configDirAbs, err := filepath.Abs(configDir)
logger.FatalIf(err, "Unable to fetch absolute path for config directory %s", configDir)
setConfigDir(configDirAbs)
// Get anonymous flag from command line argument.
globalCLIContext.Anonymous = ctx.IsSet("anonymous") || ctx.GlobalIsSet("anonymous")
if globalCLIContext.Anonymous {
logger.EnableAnonymous()
}
// Fetch address option
globalCLIContext.Addr = ctx.GlobalString("address")
if globalCLIContext.Addr == "" || globalCLIContext.Addr == ":"+globalMinioDefaultPort {
globalCLIContext.Addr = ctx.String("address")
}
// Set all config, certs and CAs directories.
var configSet, certsSet bool
globalConfigDir, configSet = newConfigDirFromCtx(ctx, "config-dir", defaultConfigDir.Get)
globalCertsDir, certsSet = newConfigDirFromCtx(ctx, "certs-dir", defaultCertsDir.Get)
// Remove this code when we deprecate and remove config-dir.
// This code is to make sure we inherit from the config-dir
// option if certs-dir is not provided.
if !certsSet && configSet {
globalCertsDir = &ConfigDir{path: filepath.Join(globalConfigDir.Get(), certsDir)}
}
globalCertsCADir = &ConfigDir{path: filepath.Join(globalCertsDir.Get(), certsCADir)}
logger.FatalIf(mkdirAllIgnorePerm(globalCertsCADir.Get()), "Unable to create certs CA directory at %s", globalCertsCADir.Get())
}
// Parses the given compression exclude list `extensions` or `content-types`.
func parseCompressIncludes(includes []string) ([]string, error) {
for _, e := range includes {
if len(e) == 0 {
return nil, uiErrInvalidCompressionIncludesValue(nil).Msg("extension/mime-type (%s) cannot be empty", e)
}
}
return includes, nil
}
func handleCommonEnvVars() {
compressEnvDelimiter := ","
// Start profiler if env is set.
if profiler := os.Getenv("_MINIO_PROFILER"); profiler != "" {
globalProfiler = startProfiler(profiler)
var err error
globalProfiler, err = startProfiler(profiler, "")
logger.FatalIf(err, "Unable to setup a profiler")
}
accessKey := os.Getenv("MINIO_ACCESS_KEY")
@@ -125,6 +189,7 @@ func handleCommonEnvVars() {
if err != nil {
logger.Fatal(uiErrInvalidCredentials(err), "Unable to validate credentials inherited from the shell environment")
}
cred.Expiration = timeSentinel
// credential Envs are set globally.
globalIsEnvCreds = true
@@ -134,7 +199,7 @@ func handleCommonEnvVars() {
if browser := os.Getenv("MINIO_BROWSER"); browser != "" {
browserFlag, err := ParseBoolFlag(browser)
if err != nil {
logger.Fatal(uiErrInvalidBrowserValue(nil).Msg("Unknown value `%s`", browser), "Unable to validate MINIO_BROWSER environment variable")
logger.Fatal(uiErrInvalidBrowserValue(nil).Msg("Unknown value `%s`", browser), "Invalid MINIO_BROWSER value in environment variable")
}
// browser Envs are set globally, this does not represent
@@ -153,27 +218,82 @@ func handleCommonEnvVars() {
etcdEndpointsEnv, ok := os.LookupEnv("MINIO_ETCD_ENDPOINTS")
if ok {
etcdEndpoints := strings.Split(etcdEndpointsEnv, ",")
var etcdSecure bool
for _, endpoint := range etcdEndpoints {
u, err := xnet.ParseURL(endpoint)
if err != nil {
logger.FatalIf(err, "Unable to initialize etcd with %s", etcdEndpoints)
}
// If one of the endpoint is https, we will use https directly.
etcdSecure = etcdSecure || u.Scheme == "https"
}
var err error
globalEtcdClient, err = etcd.New(etcd.Config{
Endpoints: etcdEndpoints,
Transport: NewCustomHTTPTransport(),
})
if etcdSecure {
// This is only to support client side certificate authentication
// https://coreos.com/etcd/docs/latest/op-guide/security.html
etcdClientCertFile, ok1 := os.LookupEnv("MINIO_ETCD_CLIENT_CERT")
etcdClientCertKey, ok2 := os.LookupEnv("MINIO_ETCD_CLIENT_CERT_KEY")
var getClientCertificate func(*tls.CertificateRequestInfo) (*tls.Certificate, error)
if ok1 && ok2 {
getClientCertificate = func(unused *tls.CertificateRequestInfo) (*tls.Certificate, error) {
cert, terr := tls.LoadX509KeyPair(etcdClientCertFile, etcdClientCertKey)
return &cert, terr
}
}
globalEtcdClient, err = etcd.New(etcd.Config{
Endpoints: etcdEndpoints,
DialTimeout: defaultDialTimeout,
DialKeepAliveTime: defaultDialKeepAlive,
TLS: &tls.Config{
RootCAs: globalRootCAs,
GetClientCertificate: getClientCertificate,
},
})
} else {
globalEtcdClient, err = etcd.New(etcd.Config{
Endpoints: etcdEndpoints,
DialTimeout: defaultDialTimeout,
DialKeepAliveTime: defaultDialKeepAlive,
})
}
logger.FatalIf(err, "Unable to initialize etcd with %s", etcdEndpoints)
}
globalDomainName, globalIsEnvDomainName = os.LookupEnv("MINIO_DOMAIN")
if globalDomainName != "" {
if _, ok = dns2.IsDomainName(globalDomainName); !ok {
logger.Fatal(uiErrInvalidDomainValue(nil).Msg("Unknown value `%s`", globalDomainName), "Invalid MINIO_DOMAIN value in environment variable")
}
}
minioEndpointsEnv, ok := os.LookupEnv("MINIO_PUBLIC_IPS")
if ok {
minioEndpoints := strings.Split(minioEndpointsEnv, ",")
globalDomainIPs = set.NewStringSet()
for i, ip := range minioEndpoints {
if net.ParseIP(ip) == nil {
logger.FatalIf(errInvalidArgument, "Unable to initialize Minio server with invalid MINIO_PUBLIC_IPS[%d]: %s", i, ip)
var domainIPs = set.NewStringSet()
for _, endpoint := range minioEndpoints {
if net.ParseIP(endpoint) == nil {
// Checking if the IP is a DNS entry.
addrs, err := net.LookupHost(endpoint)
if err != nil {
logger.FatalIf(err, "Unable to initialize Minio server with [%s] invalid entry found in MINIO_PUBLIC_IPS", endpoint)
}
for _, addr := range addrs {
domainIPs.Add(addr)
}
continue
}
globalDomainIPs.Add(ip)
domainIPs.Add(endpoint)
}
updateDomainIPs(domainIPs)
} else {
// Add found interfaces IP address to global domain IPS,
// loopback addresses will be naturally dropped.
updateDomainIPs(localIP4)
}
if globalDomainName != "" && !globalDomainIPs.IsEmpty() && globalEtcdClient != nil {
var err error
globalDNSConfig, err = dns.NewCoreDNS(globalDomainName, globalDomainIPs, globalMinioPort, globalEtcdClient)
@@ -254,7 +374,7 @@ func handleCommonEnvVars() {
if worm := os.Getenv("MINIO_WORM"); worm != "" {
wormFlag, err := ParseBoolFlag(worm)
if err != nil {
logger.Fatal(uiErrInvalidWormValue(nil).Msg("Unknown value `%s`", worm), "Unable to validate MINIO_WORM environment variable")
logger.Fatal(uiErrInvalidWormValue(nil).Msg("Unknown value `%s`", worm), "Invalid MINIO_WORM value in environment variable")
}
// worm Envs are set globally, this does not represent
@@ -262,4 +382,28 @@ func handleCommonEnvVars() {
globalIsEnvWORM = true
globalWORMEnabled = bool(wormFlag)
}
if compress := os.Getenv("MINIO_COMPRESS"); compress != "" {
globalIsCompressionEnabled = strings.EqualFold(compress, "true")
}
compressExtensions := os.Getenv("MINIO_COMPRESS_EXTENSIONS")
compressMimeTypes := os.Getenv("MINIO_COMPRESS_MIMETYPES")
if compressExtensions != "" || compressMimeTypes != "" {
globalIsEnvCompression = true
if compressExtensions != "" {
extensions, err := parseCompressIncludes(strings.Split(compressExtensions, compressEnvDelimiter))
if err != nil {
logger.Fatal(err, "Invalid MINIO_COMPRESS_EXTENSIONS value (`%s`)", extensions)
}
globalCompressExtensions = extensions
}
if compressMimeTypes != "" {
contenttypes, err := parseCompressIncludes(strings.Split(compressMimeTypes, compressEnvDelimiter))
if err != nil {
logger.Fatal(err, "Invalid MINIO_COMPRESS_MIMETYPES value (`%s`)", contenttypes)
}
globalCompressMimeTypes = contenttypes
}
}
}

151
cmd/config-common.go Normal file
View File

@@ -0,0 +1,151 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bytes"
"context"
"errors"
"fmt"
etcd "github.com/coreos/etcd/clientv3"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/hash"
)
var errConfigNotFound = errors.New("config file not found")
func readConfig(ctx context.Context, objAPI ObjectLayer, configFile string) ([]byte, error) {
var buffer bytes.Buffer
// Read entire content by setting size to -1
if err := objAPI.GetObject(ctx, minioMetaBucket, configFile, 0, -1, &buffer, "", ObjectOptions{}); err != nil {
// Treat object not found as config not found.
if isErrObjectNotFound(err) {
return nil, errConfigNotFound
}
logger.GetReqInfo(ctx).AppendTags("configFile", configFile)
logger.LogIf(ctx, err)
return nil, err
}
// Return config not found on empty content.
if buffer.Len() == 0 {
return nil, errConfigNotFound
}
return buffer.Bytes(), nil
}
func deleteConfigEtcd(ctx context.Context, client *etcd.Client, configFile string) error {
_, err := client.Delete(ctx, configFile)
return err
}
func deleteConfig(ctx context.Context, objAPI ObjectLayer, configFile string) error {
return objAPI.DeleteObject(ctx, minioMetaBucket, configFile)
}
func saveConfigEtcd(ctx context.Context, client *etcd.Client, configFile string, data []byte) error {
timeoutCtx, cancel := context.WithTimeout(ctx, defaultContextTimeout)
defer cancel()
_, err := client.Put(timeoutCtx, configFile, string(data))
if err == context.DeadlineExceeded {
return fmt.Errorf("etcd setup is unreachable, please check your endpoints %s", client.Endpoints())
} else if err != nil {
return fmt.Errorf("unexpected error %s returned by etcd setup, please check your endpoints %s", err, client.Endpoints())
}
return nil
}
func saveConfig(ctx context.Context, objAPI ObjectLayer, configFile string, data []byte) error {
hashReader, err := hash.NewReader(bytes.NewReader(data), int64(len(data)), "", getSHA256Hash(data), int64(len(data)))
if err != nil {
return err
}
_, err = objAPI.PutObject(ctx, minioMetaBucket, configFile, NewPutObjReader(hashReader, nil, nil), nil, ObjectOptions{})
return err
}
func readConfigEtcd(ctx context.Context, client *etcd.Client, configFile string) ([]byte, error) {
timeoutCtx, cancel := context.WithTimeout(ctx, defaultContextTimeout)
defer cancel()
resp, err := client.Get(timeoutCtx, configFile)
if err != nil {
if err == context.DeadlineExceeded {
return nil, fmt.Errorf("etcd setup is unreachable, please check your endpoints %s", client.Endpoints())
}
return nil, fmt.Errorf("unexpected error %s returned by etcd setup, please check your endpoints %s", err, client.Endpoints())
}
if resp.Count == 0 {
return nil, errConfigNotFound
}
for _, ev := range resp.Kvs {
if string(ev.Key) == configFile {
return ev.Value, nil
}
}
return nil, errConfigNotFound
}
// watchConfigEtcd - watches for changes on `configFile` on etcd and loads them.
func watchConfigEtcd(objAPI ObjectLayer, configFile string, loadCfgFn func(ObjectLayer) error) {
ctx, cancel := context.WithTimeout(context.Background(), defaultContextTimeout)
defer cancel()
for watchResp := range globalEtcdClient.Watch(ctx, configFile) {
for _, event := range watchResp.Events {
if event.IsModify() || event.IsCreate() {
loadCfgFn(objAPI)
}
}
}
}
func checkConfigEtcd(ctx context.Context, client *etcd.Client, configFile string) error {
timeoutCtx, cancel := context.WithTimeout(ctx, defaultContextTimeout)
defer cancel()
resp, err := client.Get(timeoutCtx, configFile)
if err != nil {
if err == context.DeadlineExceeded {
return fmt.Errorf("etcd setup is unreachable, please check your endpoints %s", client.Endpoints())
}
return fmt.Errorf("unexpected error %s returned by etcd setup, please check your endpoints %s", err, client.Endpoints())
}
if resp.Count == 0 {
return errConfigNotFound
}
return nil
}
func checkConfig(ctx context.Context, objAPI ObjectLayer, configFile string) error {
if globalEtcdClient != nil {
return checkConfigEtcd(ctx, globalEtcdClient, configFile)
}
if _, err := objAPI.GetObjectInfo(ctx, minioMetaBucket, configFile, ObjectOptions{}); err != nil {
// Treat object not found as config not found.
if isErrObjectNotFound(err) {
return errConfigNotFound
}
logger.GetReqInfo(ctx).AppendTags("configFile", configFile)
logger.LogIf(ctx, err)
return err
}
return nil
}

View File

@@ -17,17 +17,22 @@
package cmd
import (
"context"
"errors"
"fmt"
"os"
"reflect"
"sync"
"github.com/minio/minio/cmd/crypto"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/event/target"
"github.com/minio/minio/pkg/quick"
"github.com/minio/minio/pkg/iam/policy"
"github.com/minio/minio/pkg/iam/validator"
xnet "github.com/minio/minio/pkg/net"
)
// Steps to move from version N to version N+1
@@ -39,9 +44,9 @@ import (
// 6. Make changes in config-current_test.go for any test change
// Config version
const serverConfigVersion = "26"
const serverConfigVersion = "33"
type serverConfig = serverConfigV26
type serverConfig = serverConfigV33
var (
// globalServerConfig server config.
@@ -62,11 +67,21 @@ func (s *serverConfig) SetRegion(region string) {
// GetRegion get current region.
func (s *serverConfig) GetRegion() string {
if globalIsEnvRegion {
return globalServerRegion
}
if s == nil {
return ""
}
return s.Region
}
// SetCredential sets new credential and returns the previous credential.
func (s *serverConfig) SetCredential(creds auth.Credentials) (prevCred auth.Credentials) {
if creds.IsValid() && globalActiveCred.IsValid() {
globalActiveCred = creds
}
// Save previous credential.
prevCred = s.Credential
@@ -79,15 +94,12 @@ func (s *serverConfig) SetCredential(creds auth.Credentials) (prevCred auth.Cred
// GetCredentials get current credentials.
func (s *serverConfig) GetCredential() auth.Credentials {
if globalActiveCred.IsValid() {
return globalActiveCred
}
return s.Credential
}
// SetBrowser set if browser is enabled.
func (s *serverConfig) SetBrowser(b bool) {
// Set the new value.
s.Browser = BoolFlag(b)
}
// SetWorm set if worm is enabled.
func (s *serverConfig) SetWorm(b bool) {
// Set the new value.
@@ -102,16 +114,23 @@ func (s *serverConfig) SetStorageClass(standardClass, rrsClass storageClass) {
// GetStorageClass reads storage class fields from current config.
// It returns the standard and reduced redundancy storage class struct
func (s *serverConfig) GetStorageClass() (storageClass, storageClass) {
if globalIsStorageClass {
return globalStandardStorageClass, globalRRStorageClass
}
if s == nil {
return storageClass{}, storageClass{}
}
return s.StorageClass.Standard, s.StorageClass.RRS
}
// GetBrowser get current credentials.
func (s *serverConfig) GetBrowser() bool {
return bool(s.Browser)
}
// GetWorm get current credentials.
func (s *serverConfig) GetWorm() bool {
if globalIsEnvWORM {
return globalWORMEnabled
}
if s == nil {
return false
}
return bool(s.Worm)
}
@@ -125,22 +144,262 @@ func (s *serverConfig) SetCacheConfig(drives, exclude []string, expiry int, maxu
// GetCacheConfig gets the current cache config
func (s *serverConfig) GetCacheConfig() CacheConfig {
if globalIsDiskCacheEnabled {
return CacheConfig{
Drives: globalCacheDrives,
Exclude: globalCacheExcludes,
Expiry: globalCacheExpiry,
MaxUse: globalCacheMaxUse,
}
}
if s == nil {
return CacheConfig{}
}
return s.Cache
}
// Save config file to corresponding backend
func Save(configFile string, data interface{}) error {
return quick.SaveConfig(data, configFile, globalEtcdClient)
func (s *serverConfig) Validate() error {
if s == nil {
return nil
}
if s.Version != serverConfigVersion {
return fmt.Errorf("configuration version mismatch. Expected: %s, Got: %s", serverConfigVersion, s.Version)
}
// Validate credential fields only when
// they are not set via the environment
// Error out if global is env credential is not set and config has invalid credential
if !globalIsEnvCreds && !s.Credential.IsValid() {
return errors.New("invalid credential in config file")
}
// Region: nothing to validate
// Worm, Cache and StorageClass values are already validated during json unmarshal
for _, v := range s.Notify.AMQP {
if err := v.Validate(); err != nil {
return fmt.Errorf("amqp: %s", err)
}
}
for _, v := range s.Notify.Elasticsearch {
if err := v.Validate(); err != nil {
return fmt.Errorf("elasticsearch: %s", err)
}
}
for _, v := range s.Notify.Kafka {
if err := v.Validate(); err != nil {
return fmt.Errorf("kafka: %s", err)
}
}
for _, v := range s.Notify.MQTT {
if err := v.Validate(); err != nil {
return fmt.Errorf("mqtt: %s", err)
}
}
for _, v := range s.Notify.MySQL {
if err := v.Validate(); err != nil {
return fmt.Errorf("mysql: %s", err)
}
}
for _, v := range s.Notify.NATS {
if err := v.Validate(); err != nil {
return fmt.Errorf("nats: %s", err)
}
}
for _, v := range s.Notify.NSQ {
if err := v.Validate(); err != nil {
return fmt.Errorf("nsq: %s", err)
}
}
for _, v := range s.Notify.PostgreSQL {
if err := v.Validate(); err != nil {
return fmt.Errorf("postgreSQL: %s", err)
}
}
for _, v := range s.Notify.Redis {
if err := v.Validate(); err != nil {
return fmt.Errorf("redis: %s", err)
}
}
for _, v := range s.Notify.Webhook {
if err := v.Validate(); err != nil {
return fmt.Errorf("webhook: %s", err)
}
}
return nil
}
// Load config from backend
func Load(configFile string, data interface{}) (quick.Config, error) {
return quick.LoadConfig(configFile, globalEtcdClient, data)
// SetCompressionConfig sets the current compression config
func (s *serverConfig) SetCompressionConfig(extensions []string, mimeTypes []string) {
s.Compression.Extensions = extensions
s.Compression.MimeTypes = mimeTypes
s.Compression.Enabled = globalIsCompressionEnabled
}
// GetVersion gets config version from backend
func GetVersion(configFile string) (string, error) {
return quick.GetVersion(configFile, globalEtcdClient)
// GetCompressionConfig gets the current compression config
func (s *serverConfig) GetCompressionConfig() compressionConfig {
return s.Compression
}
func (s *serverConfig) loadFromEnvs() {
// If env is set override the credentials from config file.
if globalIsEnvCreds {
s.SetCredential(globalActiveCred)
}
if globalIsEnvWORM {
s.SetWorm(globalWORMEnabled)
}
if globalIsEnvRegion {
s.SetRegion(globalServerRegion)
}
if globalIsStorageClass {
s.SetStorageClass(globalStandardStorageClass, globalRRStorageClass)
}
if globalIsDiskCacheEnabled {
s.SetCacheConfig(globalCacheDrives, globalCacheExcludes, globalCacheExpiry, globalCacheMaxUse)
}
if err := Environment.LookupKMSConfig(s.KMS); err != nil {
logger.FatalIf(err, "Unable to setup the KMS")
}
if globalIsEnvCompression {
s.SetCompressionConfig(globalCompressExtensions, globalCompressMimeTypes)
}
if jwksURL, ok := os.LookupEnv("MINIO_IAM_JWKS_URL"); ok {
if u, err := xnet.ParseURL(jwksURL); err == nil {
s.OpenID.JWKS.URL = u
logger.FatalIf(s.OpenID.JWKS.PopulatePublicKey(), "Unable to populate public key from JWKS URL")
}
}
if opaURL, ok := os.LookupEnv("MINIO_IAM_OPA_URL"); ok {
if u, err := xnet.ParseURL(opaURL); err == nil {
s.Policy.OPA.URL = u
s.Policy.OPA.AuthToken = os.Getenv("MINIO_IAM_OPA_AUTHTOKEN")
}
}
}
// TestNotificationTargets tries to establish connections to all notification
// targets when enabled. This is a good way to make sure all configurations
// set by the user can work.
func (s *serverConfig) TestNotificationTargets() error {
for k, v := range s.Notify.AMQP {
if !v.Enable {
continue
}
t, err := target.NewAMQPTarget(k, v)
if err != nil {
return fmt.Errorf("amqp(%s): %s", k, err.Error())
}
t.Close()
}
for k, v := range s.Notify.Elasticsearch {
if !v.Enable {
continue
}
t, err := target.NewElasticsearchTarget(k, v)
if err != nil {
return fmt.Errorf("elasticsearch(%s): %s", k, err.Error())
}
t.Close()
}
for k, v := range s.Notify.Kafka {
if !v.Enable {
continue
}
t, err := target.NewKafkaTarget(k, v)
if err != nil {
return fmt.Errorf("kafka(%s): %s", k, err.Error())
}
t.Close()
}
for k, v := range s.Notify.MQTT {
if !v.Enable {
continue
}
t, err := target.NewMQTTTarget(k, v)
if err != nil {
return fmt.Errorf("mqtt(%s): %s", k, err.Error())
}
t.Close()
}
for k, v := range s.Notify.MySQL {
if !v.Enable {
continue
}
t, err := target.NewMySQLTarget(k, v)
if err != nil {
return fmt.Errorf("mysql(%s): %s", k, err.Error())
}
t.Close()
}
for k, v := range s.Notify.NATS {
if !v.Enable {
continue
}
t, err := target.NewNATSTarget(k, v)
if err != nil {
return fmt.Errorf("nats(%s): %s", k, err.Error())
}
t.Close()
}
for k, v := range s.Notify.NSQ {
if !v.Enable {
continue
}
t, err := target.NewNSQTarget(k, v)
if err != nil {
return fmt.Errorf("nsq(%s): %s", k, err.Error())
}
t.Close()
}
for k, v := range s.Notify.PostgreSQL {
if !v.Enable {
continue
}
t, err := target.NewPostgreSQLTarget(k, v)
if err != nil {
return fmt.Errorf("postgreSQL(%s): %s", k, err.Error())
}
t.Close()
}
for k, v := range s.Notify.Redis {
if !v.Enable {
continue
}
t, err := target.NewRedisTarget(k, v)
if err != nil {
return fmt.Errorf("redis(%s): %s", k, err.Error())
}
t.Close()
}
return nil
}
// Returns the string describing a difference with the given
@@ -154,18 +413,18 @@ func (s *serverConfig) ConfigDiff(t *serverConfig) string {
return "Credential configuration differs"
case s.Region != t.Region:
return "Region configuration differs"
case s.Browser != t.Browser:
return "Browser configuration differs"
case s.Domain != t.Domain:
return "Domain configuration differs"
case s.StorageClass != t.StorageClass:
return "StorageClass configuration differs"
case !reflect.DeepEqual(s.Cache, t.Cache):
return "Cache configuration differs"
case !reflect.DeepEqual(s.Compression, t.Compression):
return "Compression configuration differs"
case !reflect.DeepEqual(s.Notify.AMQP, t.Notify.AMQP):
return "AMQP Notification configuration differs"
case !reflect.DeepEqual(s.Notify.NATS, t.Notify.NATS):
return "NATS Notification configuration differs"
case !reflect.DeepEqual(s.Notify.NSQ, t.Notify.NSQ):
return "NSQ Notification configuration differs"
case !reflect.DeepEqual(s.Notify.Elasticsearch, t.Notify.Elasticsearch):
return "ElasticSearch Notification configuration differs"
case !reflect.DeepEqual(s.Notify.Redis, t.Notify.Redis):
@@ -180,6 +439,10 @@ func (s *serverConfig) ConfigDiff(t *serverConfig) string {
return "MySQL Notification configuration differs"
case !reflect.DeepEqual(s.Notify.MQTT, t.Notify.MQTT):
return "MQTT Notification configuration differs"
case !reflect.DeepEqual(s.Logger, t.Logger):
return "Logger configuration differs"
case !reflect.DeepEqual(s.KMS, t.KMS):
return "KMS configuration differs"
case reflect.DeepEqual(s, t):
return ""
default:
@@ -197,7 +460,6 @@ func newServerConfig() *serverConfig {
Version: serverConfigVersion,
Credential: cred,
Region: globalMinioDefaultRegion,
Browser: true,
StorageClass: storageClassConfig{
Standard: storageClass{},
RRS: storageClass{},
@@ -208,7 +470,13 @@ func newServerConfig() *serverConfig {
Expiry: globalCacheExpiry,
MaxUse: globalCacheMaxUse,
},
KMS: crypto.KMSConfig{},
Notify: notifier{},
Compression: compressionConfig{
Enabled: false,
Extensions: globalCompressExtensions,
MimeTypes: globalCompressMimeTypes,
},
}
// Make sure to initialize notification configs.
@@ -222,6 +490,8 @@ func newServerConfig() *serverConfig {
srvCfg.Notify.Redis["1"] = target.RedisArgs{}
srvCfg.Notify.NATS = make(map[string]target.NATSArgs)
srvCfg.Notify.NATS["1"] = target.NATSArgs{}
srvCfg.Notify.NSQ = make(map[string]target.NSQArgs)
srvCfg.Notify.NSQ["1"] = target.NSQArgs{}
srvCfg.Notify.PostgreSQL = make(map[string]target.PostgreSQLArgs)
srvCfg.Notify.PostgreSQL["1"] = target.PostgreSQLArgs{}
srvCfg.Notify.MySQL = make(map[string]target.MySQLArgs)
@@ -235,184 +505,146 @@ func newServerConfig() *serverConfig {
srvCfg.Cache.Exclude = make([]string, 0)
srvCfg.Cache.Expiry = globalCacheExpiry
srvCfg.Cache.MaxUse = globalCacheMaxUse
// Console logging is on by default
srvCfg.Logger.Console.Enabled = true
// Create an example of HTTP logger
srvCfg.Logger.HTTP = make(map[string]loggerHTTP)
srvCfg.Logger.HTTP["target1"] = loggerHTTP{Endpoint: "https://username:password@example.com/api"}
return srvCfg
}
// newConfig - initialize a new server config, saves env parameters if
// found, otherwise use default parameters
func newConfig() error {
// Initialize server config.
srvCfg, err := newQuickConfig(newServerConfig())
if err != nil {
return err
}
// If env is set override the credentials from config file.
if globalIsEnvCreds {
srvCfg.SetCredential(globalActiveCred)
}
if globalIsEnvBrowser {
srvCfg.SetBrowser(globalIsBrowserEnabled)
}
if globalIsEnvWORM {
srvCfg.SetWorm(globalWORMEnabled)
}
if globalIsEnvRegion {
srvCfg.SetRegion(globalServerRegion)
}
if globalIsEnvDomainName {
srvCfg.Domain = globalDomainName
}
if globalIsStorageClass {
srvCfg.SetStorageClass(globalStandardStorageClass, globalRRStorageClass)
}
if globalIsDiskCacheEnabled {
srvCfg.SetCacheConfig(globalCacheDrives, globalCacheExcludes, globalCacheExpiry, globalCacheMaxUse)
}
// hold the mutex lock before a new config is assigned.
// Save the new config globally.
// unlock the mutex.
globalServerConfigMu.Lock()
globalServerConfig = srvCfg
globalServerConfigMu.Unlock()
// Save config into file.
return Save(getConfigFile(), globalServerConfig)
}
// newQuickConfig - initialize a new server config, with an allocated
// quick.Config interface.
func newQuickConfig(srvCfg *serverConfig) (*serverConfig, error) {
qcfg, err := quick.NewConfig(srvCfg, globalEtcdClient)
if err != nil {
return nil, err
}
srvCfg.Config = qcfg
return srvCfg, nil
}
// getValidConfig - returns valid server configuration
func getValidConfig() (*serverConfig, error) {
srvCfg := &serverConfig{
Region: globalMinioDefaultRegion,
Browser: true,
}
var err error
srvCfg, err = newQuickConfig(srvCfg)
if err != nil {
return nil, err
}
configFile := getConfigFile()
if err = srvCfg.Load(configFile); err != nil {
return nil, err
}
if srvCfg.Version != serverConfigVersion {
return nil, fmt.Errorf("configuration version mismatch. Expected: %s, Got: %s", serverConfigVersion, srvCfg.Version)
}
// Validate credential fields only when
// they are not set via the environment
// Error out if global is env credential is not set and config has invalid credential
if !globalIsEnvCreds && !srvCfg.Credential.IsValid() {
return nil, errors.New("invalid credential in config file " + getConfigFile())
}
return srvCfg, nil
}
// loadConfig - loads a new config from disk, overrides params from env
// if found and valid
func loadConfig() error {
srvCfg, err := getValidConfig()
if err != nil {
return uiErrInvalidConfig(nil).Msg(err.Error())
}
// If env is set override the credentials from config file.
if globalIsEnvCreds {
srvCfg.SetCredential(globalActiveCred)
}
if globalIsEnvBrowser {
srvCfg.SetBrowser(globalIsBrowserEnabled)
}
if globalIsEnvRegion {
srvCfg.SetRegion(globalServerRegion)
}
if globalIsEnvDomainName {
srvCfg.Domain = globalDomainName
}
if globalIsStorageClass {
srvCfg.SetStorageClass(globalStandardStorageClass, globalRRStorageClass)
}
if globalIsDiskCacheEnabled {
srvCfg.SetCacheConfig(globalCacheDrives, globalCacheExcludes, globalCacheExpiry, globalCacheMaxUse)
}
// hold the mutex lock before a new config is assigned.
globalServerConfigMu.Lock()
globalServerConfig = srvCfg
func (s *serverConfig) loadToCachedConfigs() {
if !globalIsEnvCreds {
globalActiveCred = globalServerConfig.GetCredential()
}
if !globalIsEnvBrowser {
globalIsBrowserEnabled = globalServerConfig.GetBrowser()
globalActiveCred = s.GetCredential()
}
if !globalIsEnvWORM {
globalWORMEnabled = globalServerConfig.GetWorm()
globalWORMEnabled = s.GetWorm()
}
if !globalIsEnvRegion {
globalServerRegion = globalServerConfig.GetRegion()
}
if !globalIsEnvDomainName {
globalDomainName = globalServerConfig.Domain
globalServerRegion = s.GetRegion()
}
if !globalIsStorageClass {
globalStandardStorageClass, globalRRStorageClass = globalServerConfig.GetStorageClass()
globalStandardStorageClass, globalRRStorageClass = s.GetStorageClass()
}
if !globalIsDiskCacheEnabled {
cacheConf := globalServerConfig.GetCacheConfig()
cacheConf := s.GetCacheConfig()
globalCacheDrives = cacheConf.Drives
globalCacheExcludes = cacheConf.Exclude
globalCacheExpiry = cacheConf.Expiry
globalCacheMaxUse = cacheConf.MaxUse
}
if err := Environment.LookupKMSConfig(s.KMS); err != nil {
logger.FatalIf(err, "Unable to setup the KMS")
}
if !globalIsCompressionEnabled {
compressionConf := s.GetCompressionConfig()
globalCompressExtensions = compressionConf.Extensions
globalCompressMimeTypes = compressionConf.MimeTypes
globalIsCompressionEnabled = compressionConf.Enabled
}
globalIAMValidators = getAuthValidators(s)
if s.Policy.OPA.URL != nil && s.Policy.OPA.URL.String() != "" {
globalPolicyOPA = iampolicy.NewOpa(iampolicy.OpaArgs{
URL: s.Policy.OPA.URL,
AuthToken: s.Policy.OPA.AuthToken,
Transport: NewCustomHTTPTransport(),
CloseRespFn: xhttp.DrainBody,
})
}
}
// newSrvConfig - initialize a new server config, saves env parameters if
// found, otherwise use default parameters
func newSrvConfig(objAPI ObjectLayer) error {
// Initialize server config.
srvCfg := newServerConfig()
// Override any values from ENVs.
srvCfg.loadFromEnvs()
// Load values to cached global values.
srvCfg.loadToCachedConfigs()
// hold the mutex lock before a new config is assigned.
globalServerConfigMu.Lock()
globalServerConfig = srvCfg
globalServerConfigMu.Unlock()
// Save config into file.
return saveServerConfig(context.Background(), objAPI, globalServerConfig)
}
// getValidConfig - returns valid server configuration
func getValidConfig(objAPI ObjectLayer) (*serverConfig, error) {
srvCfg, err := readServerConfig(context.Background(), objAPI)
if err != nil {
return nil, err
}
return srvCfg, srvCfg.Validate()
}
// loadConfig - loads a new config from disk, overrides params from env
// if found and valid
func loadConfig(objAPI ObjectLayer) error {
srvCfg, err := getValidConfig(objAPI)
if err != nil {
return uiErrInvalidConfig(nil).Msg(err.Error())
}
// Override any values from ENVs.
srvCfg.loadFromEnvs()
// Load values to cached global values.
srvCfg.loadToCachedConfigs()
// hold the mutex lock before a new config is assigned.
globalServerConfigMu.Lock()
globalServerConfig = srvCfg
globalServerConfigMu.Unlock()
return nil
}
// getAuthValidators - returns ValidatorList which contains
// enabled providers in server config.
// A new authentication provider is added like below
// * Add a new provider in pkg/iam/validator package.
func getAuthValidators(config *serverConfig) *validator.Validators {
validators := validator.NewValidators()
if config.OpenID.JWKS.URL != nil {
validators.Add(validator.NewJWT(config.OpenID.JWKS))
}
return validators
}
// getNotificationTargets - returns TargetList which contains enabled targets in serverConfig.
// A new notification target is added like below
// * Add a new target in pkg/event/target package.
// * Add newly added target configuration to serverConfig.Notify.<TARGET_NAME>.
// * Handle the configuration in this function to create/add into TargetList.
func getNotificationTargets(config *serverConfig) (*event.TargetList, error) {
func getNotificationTargets(config *serverConfig) *event.TargetList {
targetList := event.NewTargetList()
if config == nil {
return targetList
}
for id, args := range config.Notify.AMQP {
if args.Enable {
newTarget, err := target.NewAMQPTarget(id, args)
if err != nil {
return nil, err
logger.LogIf(context.Background(), err)
continue
}
if err = targetList.Add(newTarget); err != nil {
return nil, err
logger.LogIf(context.Background(), err)
continue
}
}
}
@@ -421,10 +653,14 @@ func getNotificationTargets(config *serverConfig) (*event.TargetList, error) {
if args.Enable {
newTarget, err := target.NewElasticsearchTarget(id, args)
if err != nil {
return nil, err
logger.LogIf(context.Background(), err)
continue
}
if err = targetList.Add(newTarget); err != nil {
return nil, err
logger.LogIf(context.Background(), err)
continue
}
}
}
@@ -433,22 +669,27 @@ func getNotificationTargets(config *serverConfig) (*event.TargetList, error) {
if args.Enable {
newTarget, err := target.NewKafkaTarget(id, args)
if err != nil {
return nil, err
logger.LogIf(context.Background(), err)
continue
}
if err = targetList.Add(newTarget); err != nil {
return nil, err
logger.LogIf(context.Background(), err)
continue
}
}
}
for id, args := range config.Notify.MQTT {
if args.Enable {
args.RootCAs = globalRootCAs
newTarget, err := target.NewMQTTTarget(id, args)
if err != nil {
return nil, err
logger.LogIf(context.Background(), err)
continue
}
if err = targetList.Add(newTarget); err != nil {
return nil, err
logger.LogIf(context.Background(), err)
continue
}
}
}
@@ -457,10 +698,12 @@ func getNotificationTargets(config *serverConfig) (*event.TargetList, error) {
if args.Enable {
newTarget, err := target.NewMySQLTarget(id, args)
if err != nil {
return nil, err
logger.LogIf(context.Background(), err)
continue
}
if err = targetList.Add(newTarget); err != nil {
return nil, err
logger.LogIf(context.Background(), err)
continue
}
}
}
@@ -469,10 +712,26 @@ func getNotificationTargets(config *serverConfig) (*event.TargetList, error) {
if args.Enable {
newTarget, err := target.NewNATSTarget(id, args)
if err != nil {
return nil, err
logger.LogIf(context.Background(), err)
continue
}
if err = targetList.Add(newTarget); err != nil {
return nil, err
logger.LogIf(context.Background(), err)
continue
}
}
}
for id, args := range config.Notify.NSQ {
if args.Enable {
newTarget, err := target.NewNSQTarget(id, args)
if err != nil {
logger.LogIf(context.Background(), err)
continue
}
if err = targetList.Add(newTarget); err != nil {
logger.LogIf(context.Background(), err)
continue
}
}
}
@@ -481,10 +740,12 @@ func getNotificationTargets(config *serverConfig) (*event.TargetList, error) {
if args.Enable {
newTarget, err := target.NewPostgreSQLTarget(id, args)
if err != nil {
return nil, err
logger.LogIf(context.Background(), err)
continue
}
if err = targetList.Add(newTarget); err != nil {
return nil, err
logger.LogIf(context.Background(), err)
continue
}
}
}
@@ -493,22 +754,26 @@ func getNotificationTargets(config *serverConfig) (*event.TargetList, error) {
if args.Enable {
newTarget, err := target.NewRedisTarget(id, args)
if err != nil {
return nil, err
logger.LogIf(context.Background(), err)
continue
}
if err = targetList.Add(newTarget); err != nil {
return nil, err
logger.LogIf(context.Background(), err)
continue
}
}
}
for id, args := range config.Notify.Webhook {
if args.Enable {
args.RootCAs = globalRootCAs
newTarget := target.NewWebhookTarget(id, args)
if err := targetList.Add(newTarget); err != nil {
return nil, err
logger.LogIf(context.Background(), err)
continue
}
}
}
return targetList, nil
return targetList
}

View File

@@ -17,9 +17,9 @@
package cmd
import (
"io/ioutil"
"context"
"os"
"path/filepath"
"path"
"testing"
"github.com/minio/minio/pkg/auth"
@@ -27,12 +27,15 @@ import (
)
func TestServerConfig(t *testing.T) {
rootPath, err := newTestConfig(globalMinioDefaultRegion)
objLayer, fsDir, err := prepareFS()
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(fsDir)
if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
t.Fatalf("Init Test config failed")
}
// remove the root directory after the test ends.
defer os.RemoveAll(rootPath)
if globalServerConfig.GetRegion() != globalMinioDefaultRegion {
t.Errorf("Expecting region `us-east-1` found %s", globalServerConfig.GetRegion())
@@ -49,16 +52,12 @@ func TestServerConfig(t *testing.T) {
t.Errorf("Expecting version %s found %s", globalServerConfig.GetVersion(), serverConfigVersion)
}
// Attempt to save.
if err := globalServerConfig.Save(getConfigFile()); err != nil {
if err := saveServerConfig(context.Background(), objLayer, globalServerConfig); err != nil {
t.Fatalf("Unable to save updated config file %s", err)
}
// Do this only once here.
setConfigDir(rootPath)
// Initialize server config.
if err := loadConfig(); err != nil {
if err := loadConfig(objLayer); err != nil {
t.Fatalf("Unable to initialize from updated config file %s", err)
}
}
@@ -68,6 +67,9 @@ func TestServerConfigWithEnvs(t *testing.T) {
os.Setenv("MINIO_BROWSER", "off")
defer os.Unsetenv("MINIO_BROWSER")
os.Setenv("MINIO_WORM", "on")
defer os.Unsetenv("MINIO_WORM")
os.Setenv("MINIO_ACCESS_KEY", "minio")
defer os.Unsetenv("MINIO_ACCESS_KEY")
@@ -82,61 +84,70 @@ func TestServerConfigWithEnvs(t *testing.T) {
defer resetGlobalIsEnvs()
// Get test root.
rootPath, err := getTestRoot()
objLayer, fsDir, err := prepareFS()
if err != nil {
t.Error(err)
t.Fatal(err)
}
defer os.RemoveAll(fsDir)
if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
t.Fatalf("Init Test config failed")
}
globalObjLayerMutex.Lock()
globalObjectAPI = objLayer
globalObjLayerMutex.Unlock()
serverHandleEnvVars()
// Do this only once here.
setConfigDir(rootPath)
// Init config
initConfig()
initConfig(objLayer)
// remove the root directory after the test ends.
defer os.RemoveAll(rootPath)
// Check if serverConfig has
if globalServerConfig.GetBrowser() {
t.Errorf("Expecting browser is set to false found %v", globalServerConfig.GetBrowser())
// Check if serverConfig has browser disabled
if globalIsBrowserEnabled {
t.Error("Expected browser to be disabled but it is not")
}
// Check if serverConfig has
// Check if serverConfig returns WORM config from the env
if !globalServerConfig.GetWorm() {
t.Error("Expected WORM to be enabled but it is not")
}
// Check if serverConfig has region from the environment
if globalServerConfig.GetRegion() != "us-west-1" {
t.Errorf("Expecting region to be \"us-west-1\" found %v", globalServerConfig.GetRegion())
t.Errorf("Expected region to be \"us-west-1\", found %v", globalServerConfig.GetRegion())
}
// Check if serverConfig has
// Check if serverConfig has credentials from the environment
cred := globalServerConfig.GetCredential()
if cred.AccessKey != "minio" {
t.Errorf("Expecting access key to be `minio` found %s", cred.AccessKey)
t.Errorf("Expected access key to be `minio`, found %s", cred.AccessKey)
}
if cred.SecretKey != "minio123" {
t.Errorf("Expecting access key to be `minio123` found %s", cred.SecretKey)
t.Errorf("Expected access key to be `minio123`, found %s", cred.SecretKey)
}
if globalServerConfig.Domain != "domain.com" {
t.Errorf("Expecting Domain to be `domain.com` found " + globalServerConfig.Domain)
// Check if serverConfig has the correct domain
if globalDomainName != "domain.com" {
t.Errorf("Expected Domain to be `domain.com`, found " + globalDomainName)
}
}
// Tests config validator..
func TestValidateConfig(t *testing.T) {
rootPath, err := newTestConfig(globalMinioDefaultRegion)
objLayer, fsDir, err := prepareFS()
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(fsDir)
if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
t.Fatalf("Init Test config failed")
}
// remove the root directory after the test ends.
defer os.RemoveAll(rootPath)
configPath := filepath.Join(rootPath, minioConfigFile)
configPath := path.Join(minioConfigPrefix, minioConfigFile)
v := serverConfigVersion
testCases := []struct {
@@ -174,66 +185,69 @@ func TestValidateConfig(t *testing.T) {
{`{"version": "` + v + `", "browser": "on", "browser": "on", "region":"us-east-1", "credential" : {"accessKey":"minio", "secretKey":"minio123"}}`, false},
// Test 11 - Test AMQP
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "amqp": { "1": { "enable": true, "url": "", "exchange": "", "routingKey": "", "exchangeType": "", "mandatory": false, "immediate": false, "durable": false, "internal": false, "noWait": false, "autoDeleted": false }}}}`, true},
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "amqp": { "1": { "enable": true, "url": "", "exchange": "", "routingKey": "", "exchangeType": "", "mandatory": false, "immediate": false, "durable": false, "internal": false, "noWait": false, "autoDeleted": false }}}}`, false},
// Test 12 - Test NATS
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "nats": { "1": { "enable": true, "address": "", "subject": "", "username": "", "password": "", "token": "", "secure": false, "pingInterval": 0, "streaming": { "enable": false, "clusterID": "", "clientID": "", "async": false, "maxPubAcksInflight": 0 } } }}}`, true},
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "nats": { "1": { "enable": true, "address": "", "subject": "", "username": "", "password": "", "token": "", "secure": false, "pingInterval": 0, "streaming": { "enable": false, "clusterID": "", "async": false, "maxPubAcksInflight": 0 } } }}}`, false},
// Test 13 - Test ElasticSearch
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "url": "", "index": "" } }}}`, true},
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "url": "", "index": "" } }}}`, false},
// Test 14 - Test Redis
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "address": "", "password": "", "key": "" } }}}`, true},
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "address": "", "password": "", "key": "" } }}}`, false},
// Test 15 - Test PostgreSQL
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "" }}}}`, true},
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "" }}}}`, false},
// Test 16 - Test Kafka
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "kafka": { "1": { "enable": true, "brokers": null, "topic": "" } }}}`, true},
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "kafka": { "1": { "enable": true, "brokers": null, "topic": "" } }}}`, false},
// Test 17 - Test Webhook
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "webhook": { "1": { "enable": true, "endpoint": "" } }}}`, true},
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "webhook": { "1": { "enable": true, "endpoint": "" } }}}`, false},
// Test 18 - Test MySQL
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "" }}}}`, true},
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "" }}}}`, false},
// Test 19 - Test Format for MySQL
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "format": "invalid", "table": "xxx", "host": "10.0.0.1", "port": "3306", "user": "abc", "password": "pqr", "database": "test1" }}}}`, true},
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "format": "invalid", "table": "xxx", "host": "10.0.0.1", "port": "3306", "user": "abc", "password": "pqr", "database": "test1" }}}}`, false},
// Test 20 - Test valid Format for MySQL
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "format": "namespace", "table": "xxx", "host": "10.0.0.1", "port": "3306", "user": "abc", "password": "pqr", "database": "test1" }}}}`, true},
// Test 21 - Test Format for PostgreSQL
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "format": "invalid", "table": "xxx", "host": "myhost", "port": "5432", "user": "abc", "password": "pqr", "database": "test1" }}}}`, true},
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "format": "invalid", "table": "xxx", "host": "myhost", "port": "5432", "user": "abc", "password": "pqr", "database": "test1" }}}}`, false},
// Test 22 - Test valid Format for PostgreSQL
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "format": "namespace", "table": "xxx", "host": "myhost", "port": "5432", "user": "abc", "password": "pqr", "database": "test1" }}}}`, true},
// Test 23 - Test Format for ElasticSearch
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "format": "invalid", "url": "example.com", "index": "myindex" } }}}`, true},
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "format": "invalid", "url": "example.com", "index": "myindex" } }}}`, false},
// Test 24 - Test valid Format for ElasticSearch
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "format": "namespace", "url": "example.com", "index": "myindex" } }}}`, true},
// Test 25 - Test Format for Redis
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "format": "invalid", "address": "example.com:80", "password": "xxx", "key": "key1" } }}}`, true},
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "format": "invalid", "address": "example.com:80", "password": "xxx", "key": "key1" } }}}`, false},
// Test 26 - Test valid Format for Redis
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "format": "namespace", "address": "example.com:80", "password": "xxx", "key": "key1" } }}}`, true},
// Test 27 - Test MQTT
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mqtt": { "1": { "enable": true, "broker": "", "topic": "", "qos": 0, "clientId": "", "username": "", "password": ""}}}}`, true},
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mqtt": { "1": { "enable": true, "broker": "", "topic": "", "qos": 0, "username": "", "password": "", "queueDir": ""}}}}`, false},
// Test 28 - Test NSQ
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "nsq": { "1": { "enable": true, "nsqdAddress": "", "topic": ""} }}}`, false},
}
for i, testCase := range testCases {
if werr := ioutil.WriteFile(configPath, []byte(testCase.configData), 0700); werr != nil {
t.Fatal(werr)
if err = saveConfig(context.Background(), objLayer, configPath, []byte(testCase.configData)); err != nil {
t.Fatal(err)
}
_, verr := getValidConfig()
if testCase.shouldPass && verr != nil {
t.Errorf("Test %d, should pass but it failed with err = %v", i+1, verr)
_, err = getValidConfig(objLayer)
if testCase.shouldPass && err != nil {
t.Errorf("Test %d, should pass but it failed with err = %v", i+1, err)
}
if !testCase.shouldPass && verr == nil {
if !testCase.shouldPass && err == nil {
t.Errorf("Test %d, should fail but it succeeded.", i+1)
}
}
@@ -249,76 +263,98 @@ func TestConfigDiff(t *testing.T) {
{&serverConfig{}, nil, "Given configuration is empty"},
// 2
{
&serverConfig{Credential: auth.Credentials{"u1", "p1"}},
&serverConfig{Credential: auth.Credentials{"u1", "p2"}},
&serverConfig{Credential: auth.Credentials{
AccessKey: "u1",
SecretKey: "p1",
Expiration: timeSentinel,
}},
&serverConfig{Credential: auth.Credentials{
AccessKey: "u1",
SecretKey: "p2",
Expiration: timeSentinel,
}},
"Credential configuration differs",
},
// 3
{&serverConfig{Region: "us-east-1"}, &serverConfig{Region: "us-west-1"}, "Region configuration differs"},
// 4
{&serverConfig{Browser: false}, &serverConfig{Browser: true}, "Browser configuration differs"},
// 5
{&serverConfig{Domain: "domain1"}, &serverConfig{Domain: "domain2"}, "Domain configuration differs"},
// 6
{
&serverConfig{StorageClass: storageClassConfig{storageClass{"1", 8}, storageClass{"2", 6}}},
&serverConfig{StorageClass: storageClassConfig{storageClass{"1", 8}, storageClass{"2", 4}}},
"StorageClass configuration differs",
},
// 7
// 5
{
&serverConfig{Notify: notifier{AMQP: map[string]target.AMQPArgs{"1": {Enable: true}}}},
&serverConfig{Notify: notifier{AMQP: map[string]target.AMQPArgs{"1": {Enable: false}}}},
"AMQP Notification configuration differs",
},
// 8
// 6
{
&serverConfig{Notify: notifier{NATS: map[string]target.NATSArgs{"1": {Enable: true}}}},
&serverConfig{Notify: notifier{NATS: map[string]target.NATSArgs{"1": {Enable: false}}}},
"NATS Notification configuration differs",
},
// 9
// 7
{
&serverConfig{Notify: notifier{NSQ: map[string]target.NSQArgs{"1": {Enable: true}}}},
&serverConfig{Notify: notifier{NSQ: map[string]target.NSQArgs{"1": {Enable: false}}}},
"NSQ Notification configuration differs",
},
// 8
{
&serverConfig{Notify: notifier{Elasticsearch: map[string]target.ElasticsearchArgs{"1": {Enable: true}}}},
&serverConfig{Notify: notifier{Elasticsearch: map[string]target.ElasticsearchArgs{"1": {Enable: false}}}},
"ElasticSearch Notification configuration differs",
},
// 10
// 9
{
&serverConfig{Notify: notifier{Redis: map[string]target.RedisArgs{"1": {Enable: true}}}},
&serverConfig{Notify: notifier{Redis: map[string]target.RedisArgs{"1": {Enable: false}}}},
"Redis Notification configuration differs",
},
// 11
// 10
{
&serverConfig{Notify: notifier{PostgreSQL: map[string]target.PostgreSQLArgs{"1": {Enable: true}}}},
&serverConfig{Notify: notifier{PostgreSQL: map[string]target.PostgreSQLArgs{"1": {Enable: false}}}},
"PostgreSQL Notification configuration differs",
},
// 12
// 11
{
&serverConfig{Notify: notifier{Kafka: map[string]target.KafkaArgs{"1": {Enable: true}}}},
&serverConfig{Notify: notifier{Kafka: map[string]target.KafkaArgs{"1": {Enable: false}}}},
"Kafka Notification configuration differs",
},
// 13
// 12
{
&serverConfig{Notify: notifier{Webhook: map[string]target.WebhookArgs{"1": {Enable: true}}}},
&serverConfig{Notify: notifier{Webhook: map[string]target.WebhookArgs{"1": {Enable: false}}}},
"Webhook Notification configuration differs",
},
// 14
// 13
{
&serverConfig{Notify: notifier{MySQL: map[string]target.MySQLArgs{"1": {Enable: true}}}},
&serverConfig{Notify: notifier{MySQL: map[string]target.MySQLArgs{"1": {Enable: false}}}},
"MySQL Notification configuration differs",
},
// 15
// 14
{
&serverConfig{Notify: notifier{MQTT: map[string]target.MQTTArgs{"1": {Enable: true}}}},
&serverConfig{Notify: notifier{MQTT: map[string]target.MQTTArgs{"1": {Enable: false}}}},
"MQTT Notification configuration differs",
},
// 15
{
&serverConfig{Logger: loggerConfig{
Console: loggerConsole{Enabled: true},
HTTP: map[string]loggerHTTP{"1": {Endpoint: "http://address1"}},
}},
&serverConfig{Logger: loggerConfig{
Console: loggerConsole{Enabled: true},
HTTP: map[string]loggerHTTP{"1": {Endpoint: "http://address2"}},
}},
"Logger configuration differs",
},
}
for i, testCase := range testCases {

View File

@@ -19,7 +19,6 @@ package cmd
import (
"os"
"path/filepath"
"sync"
homedir "github.com/mitchellh/go-homedir"
)
@@ -28,9 +27,6 @@ const (
// Default minio configuration directory where below configuration files/directories are stored.
defaultMinioConfigDir = ".minio"
// Minio configuration file.
minioConfigFile = "config.json"
// Directory contains below files/directories for HTTPS configuration.
certsDir = "certs"
@@ -44,55 +40,9 @@ const (
privateKeyFile = "private.key"
)
// ConfigDir - configuration directory with locking.
// ConfigDir - points to a user set directory.
type ConfigDir struct {
sync.Mutex
dir string
}
// Set - saves given directory as configuration directory.
func (config *ConfigDir) Set(dir string) {
config.Lock()
defer config.Unlock()
config.dir = dir
}
// Get - returns current configuration directory.
func (config *ConfigDir) Get() string {
config.Lock()
defer config.Unlock()
return config.dir
}
func (config *ConfigDir) getCertsDir() string {
return filepath.Join(config.Get(), certsDir)
}
// GetCADir - returns certificate CA directory.
func (config *ConfigDir) GetCADir() string {
return filepath.Join(config.getCertsDir(), certsCADir)
}
// Create - creates configuration directory tree.
func (config *ConfigDir) Create() error {
return os.MkdirAll(config.GetCADir(), 0700)
}
// GetMinioConfigFile - returns absolute path of config.json file.
func (config *ConfigDir) GetMinioConfigFile() string {
return filepath.Join(config.Get(), minioConfigFile)
}
// GetPublicCertFile - returns absolute path of public.crt file.
func (config *ConfigDir) GetPublicCertFile() string {
return filepath.Join(config.getCertsDir(), publicCertFile)
}
// GetPrivateKeyFile - returns absolute path of private.key file.
func (config *ConfigDir) GetPrivateKeyFile() string {
return filepath.Join(config.getCertsDir(), privateKeyFile)
path string
}
func getDefaultConfigDir() string {
@@ -104,32 +54,54 @@ func getDefaultConfigDir() string {
return filepath.Join(homeDir, defaultMinioConfigDir)
}
var configDir = &ConfigDir{dir: getDefaultConfigDir()}
func setConfigDir(dir string) {
configDir.Set(dir)
func getDefaultCertsDir() string {
return filepath.Join(getDefaultConfigDir(), certsDir)
}
func getConfigDir() string {
return configDir.Get()
func getDefaultCertsCADir() string {
return filepath.Join(getDefaultCertsDir(), certsCADir)
}
func getCADir() string {
return configDir.GetCADir()
var (
// Default config, certs and CA directories.
defaultConfigDir = &ConfigDir{path: getDefaultConfigDir()}
defaultCertsDir = &ConfigDir{path: getDefaultCertsDir()}
defaultCertsCADir = &ConfigDir{path: getDefaultCertsCADir()}
// Points to current configuration directory -- deprecated, to be removed in future.
globalConfigDir = defaultConfigDir
// Points to current certs directory set by user with --certs-dir
globalCertsDir = defaultCertsDir
// Points to relative path to certs directory and is <value-of-certs-dir>/CAs
globalCertsCADir = defaultCertsCADir
)
// Get - returns current directory.
func (dir *ConfigDir) Get() string {
return dir.path
}
func createConfigDir() error {
return configDir.Create()
// Attempts to create all directories, ignores any permission denied errors.
func mkdirAllIgnorePerm(path string) error {
err := os.MkdirAll(path, 0700)
if err != nil {
// It is possible in kubernetes like deployments this directory
// is already mounted and is not writable, ignore any write errors.
if os.IsPermission(err) {
err = nil
}
}
return err
}
func getConfigFile() string {
return configDir.GetMinioConfigFile()
return filepath.Join(globalConfigDir.Get(), minioConfigFile)
}
func getPublicCertFile() string {
return configDir.GetPublicCertFile()
return filepath.Join(globalCertsDir.Get(), publicCertFile)
}
func getPrivateKeyFile() string {
return configDir.GetPrivateKeyFile()
return filepath.Join(globalCertsDir.Get(), privateKeyFile)
}

View File

@@ -17,15 +17,20 @@
package cmd
import (
"context"
"encoding/json"
"fmt"
"os"
"path"
"path/filepath"
etcd "github.com/coreos/etcd/client"
"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/event/target"
"github.com/minio/minio/pkg/iam/policy"
"github.com/minio/minio/pkg/iam/validator"
xnet "github.com/minio/minio/pkg/net"
"github.com/minio/minio/pkg/quick"
)
@@ -33,7 +38,22 @@ import (
// DO NOT EDIT following message template, please open a github issue to discuss instead.
var configMigrateMSGTemplate = "Configuration file %s migrated from version '%s' to '%s' successfully."
// Migrates all config versions from "1" to "18".
// Save config file to corresponding backend
func Save(configFile string, data interface{}) error {
return quick.SaveConfig(data, configFile, globalEtcdClient)
}
// Load config from backend
func Load(configFile string, data interface{}) (quick.Config, error) {
return quick.LoadConfig(configFile, globalEtcdClient, data)
}
// GetVersion gets config version from backend
func GetVersion(configFile string) (string, error) {
return quick.GetVersion(configFile, globalEtcdClient)
}
// Migrates all config versions from "1" to "28".
func migrateConfig() error {
// Purge all configs with version '1',
// this is a special case since version '1' used
@@ -45,6 +65,9 @@ func migrateConfig() error {
// Load only config version information.
version, err := GetVersion(getConfigFile())
if err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
@@ -188,6 +211,16 @@ func migrateConfig() error {
return err
}
fallthrough
case "26":
if err = migrateV26ToV27(); err != nil {
return err
}
fallthrough
case "27":
if err = migrateV27ToV28(); err != nil {
return err
}
fallthrough
case serverConfigVersion:
// No migration needed. this always points to current version.
err = nil
@@ -197,11 +230,11 @@ func migrateConfig() error {
// Version '1' is not supported anymore and deprecated, safe to delete.
func purgeV1() error {
configFile := filepath.Join(getConfigDir(), "fsUsers.json")
configFile := filepath.Join(globalConfigDir.Get(), "fsUsers.json")
cv1 := &configV1{}
_, err := Load(configFile, cv1)
if os.IsNotExist(err) || etcd.IsKeyNotFound(err) {
if os.IsNotExist(err) {
return nil
} else if err != nil {
return fmt.Errorf("Unable to load config version 1. %v", err)
@@ -885,7 +918,7 @@ func migrateV12ToV13() error {
// Copy over fields from V12 into V13 config struct
srvConfig := &serverConfigV13{
Logger: &loggerV7{},
Notify: &notifier{},
Notify: &notifierV3{},
}
srvConfig.Version = "13"
srvConfig.Credential = cv12.Credential
@@ -965,7 +998,7 @@ func migrateV13ToV14() error {
// Copy over fields from V13 into V14 config struct
srvConfig := &serverConfigV14{
Logger: &loggerV7{},
Notify: &notifier{},
Notify: &notifierV3{},
}
srvConfig.Version = "14"
srvConfig.Credential = cv13.Credential
@@ -1050,7 +1083,7 @@ func migrateV14ToV15() error {
// Copy over fields from V14 into V15 config struct
srvConfig := &serverConfigV15{
Logger: &loggerV7{},
Notify: &notifier{},
Notify: &notifierV3{},
}
srvConfig.Version = "15"
srvConfig.Credential = cv14.Credential
@@ -1140,7 +1173,7 @@ func migrateV15ToV16() error {
// Copy over fields from V15 into V16 config struct
srvConfig := &serverConfigV16{
Logger: &loggers{},
Notify: &notifier{},
Notify: &notifierV3{},
}
srvConfig.Version = "16"
srvConfig.Credential = cv15.Credential
@@ -1230,7 +1263,7 @@ func migrateV16ToV17() error {
// Copy over fields from V16 into V17 config struct
srvConfig := &serverConfigV17{
Logger: &loggers{},
Notify: &notifier{},
Notify: &notifierV3{},
}
srvConfig.Version = "17"
srvConfig.Credential = cv16.Credential
@@ -1351,7 +1384,7 @@ func migrateV17ToV18() error {
// Copy over fields from V17 into V18 config struct
srvConfig := &serverConfigV17{
Logger: &loggers{},
Notify: &notifier{},
Notify: &notifierV3{},
}
srvConfig.Version = "18"
srvConfig.Credential = cv17.Credential
@@ -1453,7 +1486,7 @@ func migrateV18ToV19() error {
// Copy over fields from V18 into V19 config struct
srvConfig := &serverConfigV18{
Logger: &loggers{},
Notify: &notifier{},
Notify: &notifierV3{},
}
srvConfig.Version = "19"
srvConfig.Credential = cv18.Credential
@@ -1559,7 +1592,7 @@ func migrateV19ToV20() error {
// Copy over fields from V19 into V20 config struct
srvConfig := &serverConfigV20{
Logger: &loggers{},
Notify: &notifier{},
Notify: &notifierV3{},
}
srvConfig.Version = "20"
srvConfig.Credential = cv19.Credential
@@ -1663,7 +1696,7 @@ func migrateV20ToV21() error {
// Copy over fields from V20 into V21 config struct
srvConfig := &serverConfigV21{
Notify: &notifier{},
Notify: &notifierV3{},
}
srvConfig.Version = "21"
srvConfig.Credential = cv20.Credential
@@ -1767,7 +1800,7 @@ func migrateV21ToV22() error {
// Copy over fields from V21 into V22 config struct
srvConfig := &serverConfigV22{
Notify: notifier{},
Notify: notifierV3{},
}
srvConfig.Version = "22"
srvConfig.Credential = cv21.Credential
@@ -1871,7 +1904,7 @@ func migrateV22ToV23() error {
// Copy over fields from V22 into V23 config struct
srvConfig := &serverConfigV23{
Notify: notifier{},
Notify: notifierV3{},
}
srvConfig.Version = "23"
srvConfig.Credential = cv22.Credential
@@ -1984,7 +2017,7 @@ func migrateV23ToV24() error {
// Copy over fields from V23 into V24 config struct
srvConfig := &serverConfigV24{
Notify: notifier{},
Notify: notifierV3{},
}
srvConfig.Version = "24"
srvConfig.Credential = cv23.Credential
@@ -2097,7 +2130,7 @@ func migrateV24ToV25() error {
// Copy over fields from V24 into V25 config struct
srvConfig := &serverConfigV25{
Notify: notifier{},
Notify: notifierV3{},
}
srvConfig.Version = "25"
srvConfig.Credential = cv24.Credential
@@ -2215,7 +2248,7 @@ func migrateV25ToV26() error {
// Copy over fields from V25 into V26 config struct
srvConfig := &serverConfigV26{
Notify: notifier{},
Notify: notifierV3{},
}
srvConfig.Version = "26"
srvConfig.Credential = cv25.Credential
@@ -2317,3 +2350,386 @@ func migrateV25ToV26() error {
logger.Info(configMigrateMSGTemplate, configFile, cv25.Version, srvConfig.Version)
return nil
}
func migrateV26ToV27() error {
configFile := getConfigFile()
// config V27 is backward compatible with V26, load the old
// config file in serverConfigV27 struct and put some examples
// in the new `logger` field
srvConfig := &serverConfigV27{}
_, err := quick.LoadConfig(configFile, globalEtcdClient, srvConfig)
if os.IsNotExist(err) {
return nil
} else if err != nil {
return fmt.Errorf("Unable to load config file. %v", err)
}
if srvConfig.Version != "26" {
return nil
}
srvConfig.Version = "27"
// Enable console logging by default to avoid breaking users
// current deployments
srvConfig.Logger.Console.Enabled = true
srvConfig.Logger.HTTP = make(map[string]loggerHTTP)
srvConfig.Logger.HTTP["1"] = loggerHTTP{}
if err = quick.SaveConfig(srvConfig, configFile, globalEtcdClient); err != nil {
return fmt.Errorf("Failed to migrate config from 26 to 27. %v", err)
}
logger.Info(configMigrateMSGTemplate, configFile, "26", "27")
return nil
}
func migrateV27ToV28() error {
configFile := getConfigFile()
// config V28 is backward compatible with V27, load the old
// config file in serverConfigV28 struct and initialize KMSConfig
srvConfig := &serverConfigV28{}
_, err := quick.LoadConfig(configFile, globalEtcdClient, srvConfig)
if os.IsNotExist(err) {
return nil
} else if err != nil {
return fmt.Errorf("Unable to load config file. %v", err)
}
if srvConfig.Version != "27" {
return nil
}
srvConfig.Version = "28"
srvConfig.KMS = crypto.KMSConfig{}
if err = quick.SaveConfig(srvConfig, configFile, globalEtcdClient); err != nil {
return fmt.Errorf("Failed to migrate config from 27 to 28. %v", err)
}
logger.Info(configMigrateMSGTemplate, configFile, "27", "28")
return nil
}
// Migrates ${HOME}/.minio/config.json to '<export_path>/.minio.sys/config/config.json'
func migrateConfigToMinioSys(objAPI ObjectLayer) (err error) {
// Construct path to config.json for the given bucket.
configFile := path.Join(minioConfigPrefix, minioConfigFile)
// Verify if config was already available in .minio.sys in which case, nothing more to be done.
if err = checkConfig(context.Background(), objAPI, configFile); err != errConfigNotFound {
return err
}
defer func() {
// Rename config.json to config.json.deprecated only upon
// success of this function.
if err == nil {
os.Rename(getConfigFile(), getConfigFile()+".deprecated")
}
}()
transactionConfigFile := configFile + ".transaction"
// As object layer's GetObject() and PutObject() take respective lock on minioMetaBucket
// and configFile, take a transaction lock to avoid data race between readConfig()
// and saveConfig().
objLock := globalNSMutex.NewNSLock(minioMetaBucket, transactionConfigFile)
if err = objLock.GetLock(globalOperationTimeout); err != nil {
return err
}
defer objLock.Unlock()
// Verify if backend already has the file (after holding lock)
if err = checkConfig(context.Background(), objAPI, configFile); err != errConfigNotFound {
return err
} // if errConfigNotFound proceed to migrate..
var config = &serverConfig{}
if _, err = Load(getConfigFile(), config); err != nil {
if !os.IsNotExist(err) {
return err
}
// Read from deprecate file as well if necessary.
if _, err = Load(getConfigFile()+".deprecated", config); err != nil {
if !os.IsNotExist(err) {
return err
}
// If all else fails simply initialize the server config.
return newSrvConfig(objAPI)
}
}
return saveServerConfig(context.Background(), objAPI, config)
}
// Migrates '.minio.sys/config.json' to v33.
func migrateMinioSysConfig(objAPI ObjectLayer) error {
configFile := path.Join(minioConfigPrefix, minioConfigFile)
// Check if the config version is latest, if not migrate.
ok, _, err := checkConfigVersion(objAPI, configFile, serverConfigVersion)
if err != nil {
return err
}
if ok {
return nil
}
// Construct path to config.json for the given bucket.
transactionConfigFile := configFile + ".transaction"
// As object layer's GetObject() and PutObject() take respective lock on minioMetaBucket
// and configFile, take a transaction lock to avoid data race between readConfig()
// and saveConfig().
objLock := globalNSMutex.NewNSLock(minioMetaBucket, transactionConfigFile)
if err := objLock.GetLock(globalOperationTimeout); err != nil {
return err
}
defer objLock.Unlock()
if err := migrateV27ToV28MinioSys(objAPI); err != nil {
return err
}
if err := migrateV28ToV29MinioSys(objAPI); err != nil {
return err
}
if err := migrateV29ToV30MinioSys(objAPI); err != nil {
return err
}
if err := migrateV30ToV31MinioSys(objAPI); err != nil {
return err
}
if err := migrateV31ToV32MinioSys(objAPI); err != nil {
return err
}
return migrateV32ToV33MinioSys(objAPI)
}
func checkConfigVersion(objAPI ObjectLayer, configFile string, version string) (bool, []byte, error) {
data, err := readConfig(context.Background(), objAPI, configFile)
if err != nil {
return false, nil, err
}
var versionConfig struct {
Version string `json:"version"`
}
vcfg := &versionConfig
if err = json.Unmarshal(data, vcfg); err != nil {
return false, nil, err
}
return vcfg.Version == version, data, nil
}
func migrateV27ToV28MinioSys(objAPI ObjectLayer) error {
configFile := path.Join(minioConfigPrefix, minioConfigFile)
ok, data, err := checkConfigVersion(objAPI, configFile, "27")
if err == errConfigNotFound {
return nil
} else if err != nil {
return fmt.Errorf("Unable to load config file. %v", err)
}
if !ok {
return nil
}
cfg := &serverConfigV28{}
if err = json.Unmarshal(data, cfg); err != nil {
return err
}
cfg.Version = "28"
cfg.KMS = crypto.KMSConfig{}
data, err = json.Marshal(cfg)
if err != nil {
return err
}
if err = saveConfig(context.Background(), objAPI, configFile, data); err != nil {
return fmt.Errorf("Failed to migrate config from 27 to 28. %v", err)
}
logger.Info(configMigrateMSGTemplate, configFile, "27", "28")
return nil
}
func migrateV28ToV29MinioSys(objAPI ObjectLayer) error {
configFile := path.Join(minioConfigPrefix, minioConfigFile)
ok, data, err := checkConfigVersion(objAPI, configFile, "28")
if err == errConfigNotFound {
return nil
} else if err != nil {
return fmt.Errorf("Unable to load config file. %v", err)
}
if !ok {
return nil
}
cfg := &serverConfigV29{}
if err = json.Unmarshal(data, cfg); err != nil {
return err
}
cfg.Version = "29"
data, err = json.Marshal(cfg)
if err != nil {
return err
}
if err = saveConfig(context.Background(), objAPI, configFile, data); err != nil {
return fmt.Errorf("Failed to migrate config from 28 to 29. %v", err)
}
logger.Info(configMigrateMSGTemplate, configFile, "28", "29")
return nil
}
func migrateV29ToV30MinioSys(objAPI ObjectLayer) error {
configFile := path.Join(minioConfigPrefix, minioConfigFile)
ok, data, err := checkConfigVersion(objAPI, configFile, "29")
if err == errConfigNotFound {
return nil
} else if err != nil {
return fmt.Errorf("Unable to load config file. %v", err)
}
if !ok {
return nil
}
cfg := &serverConfigV30{}
if err = json.Unmarshal(data, cfg); err != nil {
return err
}
cfg.Version = "30"
// Init compression config.For future migration, Compression config needs to be copied over from previous version.
cfg.Compression.Enabled = false
cfg.Compression.Extensions = globalCompressExtensions
cfg.Compression.MimeTypes = globalCompressMimeTypes
data, err = json.Marshal(cfg)
if err != nil {
return err
}
if err = saveConfig(context.Background(), objAPI, configFile, data); err != nil {
return fmt.Errorf("Failed to migrate config from 29 to 30. %v", err)
}
logger.Info(configMigrateMSGTemplate, configFile, "29", "30")
return nil
}
func migrateV30ToV31MinioSys(objAPI ObjectLayer) error {
configFile := path.Join(minioConfigPrefix, minioConfigFile)
ok, data, err := checkConfigVersion(objAPI, configFile, "30")
if err == errConfigNotFound {
return nil
} else if err != nil {
return fmt.Errorf("Unable to load config file. %v", err)
}
if !ok {
return nil
}
cfg := &serverConfigV31{}
if err = json.Unmarshal(data, cfg); err != nil {
return err
}
cfg.Version = "31"
cfg.OpenID.JWKS = validator.JWKSArgs{
URL: &xnet.URL{},
}
cfg.Policy.OPA = iampolicy.OpaArgs{
URL: &xnet.URL{},
AuthToken: "",
}
data, err = json.Marshal(cfg)
if err != nil {
return err
}
if err = saveConfig(context.Background(), objAPI, configFile, data); err != nil {
return fmt.Errorf("Failed to migrate config from 30 to 31. %v", err)
}
logger.Info(configMigrateMSGTemplate, configFile, "30", "31")
return nil
}
func migrateV31ToV32MinioSys(objAPI ObjectLayer) error {
configFile := path.Join(minioConfigPrefix, minioConfigFile)
ok, data, err := checkConfigVersion(objAPI, configFile, "31")
if err == errConfigNotFound {
return nil
} else if err != nil {
return fmt.Errorf("Unable to load config file. %v", err)
}
if !ok {
return nil
}
cfg := &serverConfigV32{}
if err = json.Unmarshal(data, cfg); err != nil {
return err
}
cfg.Version = "32"
cfg.Notify.NSQ = make(map[string]target.NSQArgs)
cfg.Notify.NSQ["1"] = target.NSQArgs{}
data, err = json.Marshal(cfg)
if err != nil {
return err
}
if err = saveConfig(context.Background(), objAPI, configFile, data); err != nil {
return fmt.Errorf("Failed to migrate config from 31 to 32. %v", err)
}
logger.Info(configMigrateMSGTemplate, configFile, "31", "32")
return nil
}
func migrateV32ToV33MinioSys(objAPI ObjectLayer) error {
configFile := path.Join(minioConfigPrefix, minioConfigFile)
ok, data, err := checkConfigVersion(objAPI, configFile, "32")
if err == errConfigNotFound {
return nil
} else if err != nil {
return fmt.Errorf("Unable to load config file. %v", err)
}
if !ok {
return nil
}
cfg := &serverConfigV33{}
if err = json.Unmarshal(data, cfg); err != nil {
return err
}
cfg.Version = "33"
data, err = json.Marshal(cfg)
if err != nil {
return err
}
if err = saveConfig(context.Background(), objAPI, configFile, data); err != nil {
return fmt.Errorf("Failed to migrate config from 32 to 33 . %v", err)
}
logger.Info(configMigrateMSGTemplate, configFile, "32", "33")
return nil
}

View File

@@ -25,14 +25,25 @@ import (
// Test if config v1 is purged
func TestServerConfigMigrateV1(t *testing.T) {
rootPath, err := newTestConfig(globalMinioDefaultRegion)
objLayer, fsDir, err := prepareFS()
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(fsDir)
err = newTestConfig(globalMinioDefaultRegion, objLayer)
if err != nil {
t.Fatalf("Init Test config failed")
}
// remove the root directory after the test ends.
rootPath, err := ioutil.TempDir(globalTestTmpDir, "minio-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(rootPath)
globalConfigDir = &ConfigDir{path: rootPath}
setConfigDir(rootPath)
globalObjLayerMutex.Lock()
globalObjectAPI = objLayer
globalObjLayerMutex.Unlock()
// Create a V1 config json file and store it
configJSON := "{ \"version\":\"1\", \"accessKeyId\":\"abcde\", \"secretAccessKey\":\"abcdefgh\"}"
@@ -45,13 +56,14 @@ func TestServerConfigMigrateV1(t *testing.T) {
if err := migrateConfig(); err != nil {
t.Fatal("Unexpected error: ", err)
}
// Check if config v1 is removed from filesystem
if _, err := os.Stat(configPath); err == nil || !os.IsNotExist(err) {
t.Fatal("Config V1 file is not purged")
}
// Initialize server config and check again if everything is fine
if err := loadConfig(); err != nil {
if err := loadConfig(objLayer); err != nil {
t.Fatalf("Unable to initialize from updated config file %s", err)
}
}
@@ -59,20 +71,13 @@ func TestServerConfigMigrateV1(t *testing.T) {
// Test if all migrate code returns nil when config file does not
// exist
func TestServerConfigMigrateInexistentConfig(t *testing.T) {
rootPath, err := newTestConfig(globalMinioDefaultRegion)
rootPath, err := ioutil.TempDir(globalTestTmpDir, "minio-")
if err != nil {
t.Fatalf("Init Test config failed")
t.Fatal(err)
}
// remove the root directory after the test ends.
defer os.RemoveAll(rootPath)
setConfigDir(rootPath)
configPath := rootPath + "/" + minioConfigFile
// Remove config file
if err := os.Remove(configPath); err != nil {
t.Fatal("Unexpected error: ", err)
}
globalConfigDir = &ConfigDir{path: rootPath}
if err := migrateV2ToV3(); err != nil {
t.Fatal("migrate v2 to v3 should succeed when no config file is found")
@@ -134,18 +139,42 @@ func TestServerConfigMigrateInexistentConfig(t *testing.T) {
if err := migrateV21ToV22(); err != nil {
t.Fatal("migrate v21 to v22 should succeed when no config file is found")
}
if err := migrateV22ToV23(); err != nil {
t.Fatal("migrate v22 to v23 should succeed when no config file is found")
}
if err := migrateV23ToV24(); err != nil {
t.Fatal("migrate v23 to v24 should succeed when no config file is found")
}
if err := migrateV24ToV25(); err != nil {
t.Fatal("migrate v24 to v25 should succeed when no config file is found")
}
if err := migrateV25ToV26(); err != nil {
t.Fatal("migrate v25 to v26 should succeed when no config file is found")
}
if err := migrateV26ToV27(); err != nil {
t.Fatal("migrate v26 to v27 should succeed when no config file is found")
}
if err := migrateV27ToV28(); err != nil {
t.Fatal("migrate v27 to v28 should succeed when no config file is found")
}
}
// Test if a config migration from v2 to v23 is successfully done
func TestServerConfigMigrateV2toV23(t *testing.T) {
rootPath, err := newTestConfig(globalMinioDefaultRegion)
// Test if a config migration from v2 to v33 is successfully done
func TestServerConfigMigrateV2toV33(t *testing.T) {
rootPath, err := ioutil.TempDir(globalTestTmpDir, "minio-")
if err != nil {
t.Fatalf("Init Test config failed")
t.Fatal(err)
}
// remove the root directory after the test ends.
defer os.RemoveAll(rootPath)
setConfigDir(rootPath)
globalConfigDir = &ConfigDir{path: rootPath}
objLayer, fsDir, err := prepareFS()
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(fsDir)
configPath := rootPath + "/" + minioConfigFile
// Create a corrupted config file
@@ -171,8 +200,16 @@ func TestServerConfigMigrateV2toV23(t *testing.T) {
t.Fatal("Unexpected error: ", err)
}
if err := migrateConfigToMinioSys(objLayer); err != nil {
t.Fatal("Unexpected error: ", err)
}
if err := migrateMinioSysConfig(objLayer); err != nil {
t.Fatal("Unexpected error: ", err)
}
// Initialize server config and check again if everything is fine
if err := loadConfig(); err != nil {
if err := loadConfig(objLayer); err != nil {
t.Fatalf("Unable to initialize from updated config file %s", err)
}
@@ -186,6 +223,7 @@ func TestServerConfigMigrateV2toV23(t *testing.T) {
if globalServerConfig.Credential.AccessKey != accessKey {
t.Fatalf("Access key lost during migration, expected: %v, found:%v", accessKey, globalServerConfig.Credential.AccessKey)
}
if globalServerConfig.Credential.SecretKey != secretKey {
t.Fatalf("Secret key lost during migration, expected: %v, found: %v", secretKey, globalServerConfig.Credential.SecretKey)
}
@@ -193,14 +231,13 @@ func TestServerConfigMigrateV2toV23(t *testing.T) {
// Test if all migrate code returns error with corrupted config files
func TestServerConfigMigrateFaultyConfig(t *testing.T) {
rootPath, err := newTestConfig(globalMinioDefaultRegion)
rootPath, err := ioutil.TempDir(globalTestTmpDir, "minio-")
if err != nil {
t.Fatalf("Init Test config failed")
t.Fatal(err)
}
// remove the root directory after the test ends.
defer os.RemoveAll(rootPath)
setConfigDir(rootPath)
globalConfigDir = &ConfigDir{path: rootPath}
configPath := rootPath + "/" + minioConfigFile
// Create a corrupted config file
@@ -272,18 +309,32 @@ func TestServerConfigMigrateFaultyConfig(t *testing.T) {
if err := migrateV22ToV23(); err == nil {
t.Fatal("migrateConfigV22ToV23() should fail with a corrupted json")
}
if err := migrateV23ToV24(); err == nil {
t.Fatal("migrateConfigV23ToV24() should fail with a corrupted json")
}
if err := migrateV24ToV25(); err == nil {
t.Fatal("migrateConfigV24ToV25() should fail with a corrupted json")
}
if err := migrateV25ToV26(); err == nil {
t.Fatal("migrateConfigV25ToV26() should fail with a corrupted json")
}
if err := migrateV26ToV27(); err == nil {
t.Fatal("migrateConfigV26ToV27() should fail with a corrupted json")
}
if err := migrateV27ToV28(); err == nil {
t.Fatal("migrateConfigV27ToV28() should fail with a corrupted json")
}
}
// Test if all migrate code returns error with corrupted config files
func TestServerConfigMigrateCorruptedConfig(t *testing.T) {
rootPath, err := newTestConfig(globalMinioDefaultRegion)
rootPath, err := ioutil.TempDir(globalTestTmpDir, "minio-")
if err != nil {
t.Fatalf("Init Test config failed")
t.Fatal(err)
}
// remove the root directory after the test ends.
defer os.RemoveAll(rootPath)
setConfigDir(rootPath)
globalConfigDir = &ConfigDir{path: rootPath}
configPath := rootPath + "/" + minioConfigFile
for i := 3; i <= 17; i++ {

View File

@@ -19,8 +19,11 @@ package cmd
import (
"sync"
"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/event/target"
"github.com/minio/minio/pkg/iam/policy"
"github.com/minio/minio/pkg/iam/validator"
"github.com/minio/minio/pkg/quick"
)
@@ -370,7 +373,7 @@ type serverConfigV12 struct {
Notify notifierV2 `json:"notify"`
}
type notifier struct {
type notifierV3 struct {
AMQP map[string]target.AMQPArgs `json:"amqp"`
Elasticsearch map[string]target.ElasticsearchArgs `json:"elasticsearch"`
Kafka map[string]target.KafkaArgs `json:"kafka"`
@@ -395,7 +398,7 @@ type serverConfigV13 struct {
Logger *loggerV7 `json:"logger"`
// Notification queue configuration.
Notify *notifier `json:"notify"`
Notify *notifierV3 `json:"notify"`
}
// serverConfigV14 server configuration version '14' which is like
@@ -412,7 +415,7 @@ type serverConfigV14 struct {
Logger *loggerV7 `json:"logger"`
// Notification queue configuration.
Notify *notifier `json:"notify"`
Notify *notifierV3 `json:"notify"`
}
// serverConfigV15 server configuration version '15' which is like
@@ -429,7 +432,7 @@ type serverConfigV15 struct {
Logger *loggerV7 `json:"logger"`
// Notification queue configuration.
Notify *notifier `json:"notify"`
Notify *notifierV3 `json:"notify"`
}
// FileLogger is introduced to workaround the dependency about logrus
@@ -467,7 +470,7 @@ type serverConfigV16 struct {
Logger *loggers `json:"logger"`
// Notification queue configuration.
Notify *notifier `json:"notify"`
Notify *notifierV3 `json:"notify"`
}
// serverConfigV17 server configuration version '17' which is like
@@ -486,7 +489,7 @@ type serverConfigV17 struct {
Logger *loggers `json:"logger"`
// Notification queue configuration.
Notify *notifier `json:"notify"`
Notify *notifierV3 `json:"notify"`
}
// serverConfigV18 server configuration version '18' which is like
@@ -505,7 +508,7 @@ type serverConfigV18 struct {
Logger *loggers `json:"logger"`
// Notification queue configuration.
Notify *notifier `json:"notify"`
Notify *notifierV3 `json:"notify"`
}
// serverConfigV19 server configuration version '19' which is like
@@ -523,7 +526,7 @@ type serverConfigV19 struct {
Logger *loggers `json:"logger"`
// Notification queue configuration.
Notify *notifier `json:"notify"`
Notify *notifierV3 `json:"notify"`
}
// serverConfigV20 server configuration version '20' which is like
@@ -542,7 +545,7 @@ type serverConfigV20 struct {
Logger *loggers `json:"logger"`
// Notification queue configuration.
Notify *notifier `json:"notify"`
Notify *notifierV3 `json:"notify"`
}
// serverConfigV21 is just like version '20' without logger field
@@ -557,7 +560,7 @@ type serverConfigV21 struct {
Domain string `json:"domain"`
// Notification queue configuration.
Notify *notifier `json:"notify"`
Notify *notifierV3 `json:"notify"`
}
// serverConfigV22 is just like version '21' with added support
@@ -578,7 +581,7 @@ type serverConfigV22 struct {
StorageClass storageClassConfig `json:"storageclass"`
// Notification queue configuration.
Notify notifier `json:"notify"`
Notify notifierV3 `json:"notify"`
}
// serverConfigV23 is just like version '22' with addition of cache field.
@@ -601,7 +604,7 @@ type serverConfigV23 struct {
Cache CacheConfig `json:"cache"`
// Notification queue configuration.
Notify notifier `json:"notify"`
Notify notifierV3 `json:"notify"`
}
// serverConfigV24 is just like version '23', we had to revert
@@ -625,7 +628,7 @@ type serverConfigV24 struct {
Cache CacheConfig `json:"cache"`
// Notification queue configuration.
Notify notifier `json:"notify"`
Notify notifierV3 `json:"notify"`
}
// serverConfigV25 is just like version '24', stores additionally
@@ -652,14 +655,11 @@ type serverConfigV25 struct {
Cache CacheConfig `json:"cache"`
// Notification queue configuration.
Notify notifier `json:"notify"`
Notify notifierV3 `json:"notify"`
}
// serverConfigV26 is just like version '25', stores additionally
// cache max use value in 'CacheConfig'.
//
// IMPORTANT NOTE: When updating this struct make sure that
// serverConfig.ConfigDiff() is updated as necessary.
type serverConfigV26 struct {
quick.Config `json:"-"` // ignore interfaces
@@ -679,5 +679,260 @@ type serverConfigV26 struct {
Cache CacheConfig `json:"cache"`
// Notification queue configuration.
Notify notifier `json:"notify"`
Notify notifierV3 `json:"notify"`
}
type loggerConsole struct {
Enabled bool `json:"enabled"`
}
type loggerHTTP struct {
Enabled bool `json:"enabled"`
Endpoint string `json:"endpoint"`
}
type loggerConfig struct {
Console loggerConsole `json:"console"`
HTTP map[string]loggerHTTP `json:"http"`
}
// serverConfigV27 is just like version '26', stores additionally
// the logger field
//
// IMPORTANT NOTE: When updating this struct make sure that
// serverConfig.ConfigDiff() is updated as necessary.
type serverConfigV27 struct {
quick.Config `json:"-"` // ignore interfaces
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
Browser BoolFlag `json:"browser"`
Worm BoolFlag `json:"worm"`
Domain string `json:"domain"`
// Storage class configuration
StorageClass storageClassConfig `json:"storageclass"`
// Cache configuration
Cache CacheConfig `json:"cache"`
// Notification queue configuration.
Notify notifierV3 `json:"notify"`
// Logger configuration
Logger loggerConfig `json:"logger"`
}
// serverConfigV28 is just like version '27', additionally
// storing KMS config
//
// IMPORTANT NOTE: When updating this struct make sure that
// serverConfig.ConfigDiff() is updated as necessary.
type serverConfigV28 struct {
quick.Config `json:"-"` // ignore interfaces
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
Worm BoolFlag `json:"worm"`
// Storage class configuration
StorageClass storageClassConfig `json:"storageclass"`
// Cache configuration
Cache CacheConfig `json:"cache"`
// KMS configuration
KMS crypto.KMSConfig `json:"kms"`
// Notification queue configuration.
Notify notifierV3 `json:"notify"`
// Logger configuration
Logger loggerConfig `json:"logger"`
}
// serverConfigV29 is just like version '28'.
type serverConfigV29 serverConfigV28
// compressionConfig represents the compression settings.
type compressionConfig struct {
Enabled bool `json:"enabled"`
Extensions []string `json:"extensions"`
MimeTypes []string `json:"mime-types"`
}
// serverConfigV30 is just like version '29', stores additionally
// extensions and mimetypes fields for compression.
type serverConfigV30 struct {
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
Worm BoolFlag `json:"worm"`
// Storage class configuration
StorageClass storageClassConfig `json:"storageclass"`
// Cache configuration
Cache CacheConfig `json:"cache"`
// KMS configuration
KMS crypto.KMSConfig `json:"kms"`
// Notification queue configuration.
Notify notifierV3 `json:"notify"`
// Logger configuration
Logger loggerConfig `json:"logger"`
// Compression configuration
Compression compressionConfig `json:"compress"`
}
// serverConfigV31 is just like version '30', with OPA and OpenID configuration.
type serverConfigV31 struct {
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
Worm BoolFlag `json:"worm"`
// Storage class configuration
StorageClass storageClassConfig `json:"storageclass"`
// Cache configuration
Cache CacheConfig `json:"cache"`
// KMS configuration
KMS crypto.KMSConfig `json:"kms"`
// Notification queue configuration.
Notify notifierV3 `json:"notify"`
// Logger configuration
Logger loggerConfig `json:"logger"`
// Compression configuration
Compression compressionConfig `json:"compress"`
// OpenID configuration
OpenID struct {
// JWKS validator config.
JWKS validator.JWKSArgs `json:"jwks"`
} `json:"openid"`
// External policy enforcements.
Policy struct {
// OPA configuration.
OPA iampolicy.OpaArgs `json:"opa"`
// Add new external policy enforcements here.
} `json:"policy"`
}
type notifier struct {
AMQP map[string]target.AMQPArgs `json:"amqp"`
Elasticsearch map[string]target.ElasticsearchArgs `json:"elasticsearch"`
Kafka map[string]target.KafkaArgs `json:"kafka"`
MQTT map[string]target.MQTTArgs `json:"mqtt"`
MySQL map[string]target.MySQLArgs `json:"mysql"`
NATS map[string]target.NATSArgs `json:"nats"`
NSQ map[string]target.NSQArgs `json:"nsq"`
PostgreSQL map[string]target.PostgreSQLArgs `json:"postgresql"`
Redis map[string]target.RedisArgs `json:"redis"`
Webhook map[string]target.WebhookArgs `json:"webhook"`
}
// serverConfigV32 is just like version '31' with added nsq notifer.
type serverConfigV32 struct {
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
Worm BoolFlag `json:"worm"`
// Storage class configuration
StorageClass storageClassConfig `json:"storageclass"`
// Cache configuration
Cache CacheConfig `json:"cache"`
// KMS configuration
KMS crypto.KMSConfig `json:"kms"`
// Notification queue configuration.
Notify notifier `json:"notify"`
// Logger configuration
Logger loggerConfig `json:"logger"`
// Compression configuration
Compression compressionConfig `json:"compress"`
// OpenID configuration
OpenID struct {
// JWKS validator config.
JWKS validator.JWKSArgs `json:"jwks"`
} `json:"openid"`
// External policy enforcements.
Policy struct {
// OPA configuration.
OPA iampolicy.OpaArgs `json:"opa"`
// Add new external policy enforcements here.
} `json:"policy"`
}
// serverConfigV33 is just like version '32', removes clientID from NATS and MQTT, and adds queueDir with MQTT.
type serverConfigV33 struct {
quick.Config `json:"-"` // ignore interfaces
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
Worm BoolFlag `json:"worm"`
// Storage class configuration
StorageClass storageClassConfig `json:"storageclass"`
// Cache configuration
Cache CacheConfig `json:"cache"`
// KMS configuration
KMS crypto.KMSConfig `json:"kms"`
// Notification queue configuration.
Notify notifier `json:"notify"`
// Logger configuration
Logger loggerConfig `json:"logger"`
// Compression configuration
Compression compressionConfig `json:"compress"`
// OpenID configuration
OpenID struct {
// JWKS validator config.
JWKS validator.JWKSArgs `json:"jwks"`
} `json:"openid"`
// External policy enforcements.
Policy struct {
// OPA configuration.
OPA iampolicy.OpaArgs `json:"opa"`
// Add new external policy enforcements here.
} `json:"policy"`
}

201
cmd/config.go Normal file
View File

@@ -0,0 +1,201 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bytes"
"context"
"encoding/json"
"path"
"runtime"
"strings"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/quick"
)
const (
minioConfigPrefix = "config"
// Minio configuration file.
minioConfigFile = "config.json"
// Minio backup file
minioConfigBackupFile = minioConfigFile + ".backup"
)
func saveServerConfig(ctx context.Context, objAPI ObjectLayer, config *serverConfig) error {
if err := quick.CheckData(config); err != nil {
return err
}
data, err := json.MarshalIndent(config, "", "\t")
if err != nil {
return err
}
configFile := path.Join(minioConfigPrefix, minioConfigFile)
if globalEtcdClient != nil {
return saveConfigEtcd(ctx, globalEtcdClient, configFile, data)
}
// Create a backup of the current config
oldData, err := readConfig(ctx, objAPI, configFile)
if err == nil {
backupConfigFile := path.Join(minioConfigPrefix, minioConfigBackupFile)
if err = saveConfig(ctx, objAPI, backupConfigFile, oldData); err != nil {
return err
}
} else {
if err != errConfigNotFound {
return err
}
}
// Save the new config in the std config path
return saveConfig(ctx, objAPI, configFile, data)
}
func readServerConfig(ctx context.Context, objAPI ObjectLayer) (*serverConfig, error) {
var configData []byte
var err error
configFile := path.Join(minioConfigPrefix, minioConfigFile)
if globalEtcdClient != nil {
configData, err = readConfigEtcd(ctx, globalEtcdClient, configFile)
} else {
configData, err = readConfig(ctx, objAPI, configFile)
}
if err != nil {
return nil, err
}
if runtime.GOOS == "windows" {
configData = bytes.Replace(configData, []byte("\r\n"), []byte("\n"), -1)
}
if err = quick.CheckDuplicateKeys(string(configData)); err != nil {
return nil, err
}
var config = &serverConfig{}
if err = json.Unmarshal(configData, config); err != nil {
return nil, err
}
if err = quick.CheckData(config); err != nil {
return nil, err
}
return config, nil
}
// ConfigSys - config system.
type ConfigSys struct{}
// Load - load config.json.
func (sys *ConfigSys) Load(objAPI ObjectLayer) error {
return sys.Init(objAPI)
}
// Init - initializes config system from config.json.
func (sys *ConfigSys) Init(objAPI ObjectLayer) error {
if objAPI == nil {
return errInvalidArgument
}
doneCh := make(chan struct{})
defer close(doneCh)
// Initializing configuration needs a retry mechanism for
// the following reasons:
// - Read quorum is lost just after the initialization
// of the object layer.
// - Write quorum not met when upgrading configuration
// version is needed.
retryTimerCh := newRetryTimerSimple(doneCh)
for {
select {
case _ = <-retryTimerCh:
err := initConfig(objAPI)
if err != nil {
if strings.Contains(err.Error(), InsufficientReadQuorum{}.Error()) ||
strings.Contains(err.Error(), InsufficientWriteQuorum{}.Error()) {
logger.Info("Waiting for configuration to be initialized..")
continue
}
return err
}
return nil
}
}
}
// NewConfigSys - creates new config system object.
func NewConfigSys() *ConfigSys {
return &ConfigSys{}
}
// Initialize and load config from remote etcd or local config directory
func initConfig(objAPI ObjectLayer) error {
if objAPI == nil {
return errServerNotInitialized
}
configFile := path.Join(minioConfigPrefix, minioConfigFile)
if globalEtcdClient != nil {
if err := checkConfigEtcd(context.Background(), globalEtcdClient, getConfigFile()); err != nil {
if err == errConfigNotFound {
// Migrates all configs at old location.
if err = migrateConfig(); err != nil {
return err
}
// Migrates etcd ${HOME}/.minio/config.json to '/config/config.json'
if err = migrateConfigToMinioSys(objAPI); err != nil {
return err
}
} else {
return err
}
}
// Watch config for changes and reloads them.
go watchConfigEtcd(objAPI, configFile, loadConfig)
} else {
if isFile(getConfigFile()) {
if err := migrateConfig(); err != nil {
return err
}
}
// Migrates ${HOME}/.minio/config.json or config.json.deprecated
// to '<export_path>/.minio.sys/config/config.json'
// ignore if the file doesn't exist.
if err := migrateConfigToMinioSys(objAPI); err != nil {
return err
}
// Migrates backend '<export_path>/.minio.sys/config/config.json' to latest version.
if err := migrateMinioSysConfig(objAPI); err != nil {
return err
}
}
return loadConfig(objAPI)
}

View File

@@ -17,90 +17,52 @@
package cmd
import (
"fmt"
"net/http"
"net/url"
"strconv"
"strings"
)
// Writes S3 compatible copy part range error.
func writeCopyPartErr(w http.ResponseWriter, err error, url *url.URL) {
func writeCopyPartErr(w http.ResponseWriter, err error, url *url.URL, browser bool) {
switch err {
case errInvalidRange:
writeErrorResponse(w, ErrInvalidCopyPartRange, url)
writeErrorResponse(w, ErrInvalidCopyPartRange, url, browser)
return
case errInvalidRangeSource:
writeErrorResponse(w, ErrInvalidCopyPartRangeSource, url)
writeErrorResponse(w, ErrInvalidCopyPartRangeSource, url, browser)
return
default:
writeErrorResponse(w, ErrInternalError, url)
writeErrorResponse(w, ErrInternalError, url, browser)
return
}
}
// Parses x-amz-copy-source-range for CopyObjectPart API. Specifically written to
// differentiate the behavior between regular httpRange header v/s x-amz-copy-source-range.
// The range of bytes to copy from the source object. The range value must use the form
// bytes=first-last, where the first and last are the zero-based byte offsets to copy.
// For example, bytes=0-9 indicates that you want to copy the first ten bytes of the source.
// Parses x-amz-copy-source-range for CopyObjectPart API. Its behavior
// is different from regular HTTP range header. It only supports the
// form `bytes=first-last` where first and last are zero-based byte
// offsets. See
// http://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html
func parseCopyPartRange(rangeString string, resourceSize int64) (hrange *httpRange, err error) {
// Return error if given range string doesn't start with byte range prefix.
if !strings.HasPrefix(rangeString, byteRangePrefix) {
return nil, fmt.Errorf("'%s' does not start with '%s'", rangeString, byteRangePrefix)
// for full details. This function treats an empty rangeString as
// referring to the whole resource.
func parseCopyPartRangeSpec(rangeString string) (hrange *HTTPRangeSpec, err error) {
hrange, err = parseRequestRangeSpec(rangeString)
if err != nil {
return nil, err
}
// Trim byte range prefix.
byteRangeString := strings.TrimPrefix(rangeString, byteRangePrefix)
// Check if range string contains delimiter '-', else return error. eg. "bytes=8"
sepIndex := strings.Index(byteRangeString, "-")
if sepIndex == -1 {
if hrange.IsSuffixLength || hrange.Start < 0 || hrange.End < 0 {
return nil, errInvalidRange
}
offsetBeginString := byteRangeString[:sepIndex]
offsetBegin := int64(-1)
// Convert offsetBeginString only if its not empty.
if len(offsetBeginString) > 0 {
if !validBytePos.MatchString(offsetBeginString) {
return nil, errInvalidRange
}
if offsetBegin, err = strconv.ParseInt(offsetBeginString, 10, 64); err != nil {
return nil, errInvalidRange
}
}
offsetEndString := byteRangeString[sepIndex+1:]
offsetEnd := int64(-1)
// Convert offsetEndString only if its not empty.
if len(offsetEndString) > 0 {
if !validBytePos.MatchString(offsetEndString) {
return nil, errInvalidRange
}
if offsetEnd, err = strconv.ParseInt(offsetEndString, 10, 64); err != nil {
return nil, errInvalidRange
}
}
// rangeString contains first byte positions. eg. "bytes=2-" or
// rangeString contains last bye positions. eg. "bytes=-2"
if offsetBegin == -1 || offsetEnd == -1 {
return nil, errInvalidRange
}
// Last byte position should not be greater than first byte
// position. eg. "bytes=5-2"
if offsetBegin > offsetEnd {
return nil, errInvalidRange
}
// First and last byte positions should not be >= resourceSize.
if offsetBegin >= resourceSize || offsetEnd >= resourceSize {
return nil, errInvalidRangeSource
}
// Success..
return &httpRange{offsetBegin, offsetEnd, resourceSize}, nil
return hrange, nil
}
// checkCopyPartRangeWithSize adds more check to the range string in case of
// copy object part. This API requires having specific start and end range values
// e.g. 'bytes=3-10'. Other use cases will be rejected.
func checkCopyPartRangeWithSize(rs *HTTPRangeSpec, resourceSize int64) (err error) {
if rs == nil {
return nil
}
if rs.IsSuffixLength || rs.Start >= resourceSize || rs.End >= resourceSize {
return errInvalidRangeSource
}
return nil
}

View File

@@ -19,35 +19,37 @@ package cmd
import "testing"
// Test parseCopyPartRange()
func TestParseCopyPartRange(t *testing.T) {
func TestParseCopyPartRangeSpec(t *testing.T) {
// Test success cases.
successCases := []struct {
rangeString string
offsetBegin int64
offsetEnd int64
length int64
}{
{"bytes=2-5", 2, 5, 4},
{"bytes=2-9", 2, 9, 8},
{"bytes=2-2", 2, 2, 1},
{"bytes=0000-0006", 0, 6, 7},
{"bytes=2-5", 2, 5},
{"bytes=2-9", 2, 9},
{"bytes=2-2", 2, 2},
{"bytes=0000-0006", 0, 6},
}
objectSize := int64(10)
for _, successCase := range successCases {
hrange, err := parseCopyPartRange(successCase.rangeString, 10)
rs, err := parseCopyPartRangeSpec(successCase.rangeString)
if err != nil {
t.Fatalf("expected: <nil>, got: %s", err)
}
if hrange.offsetBegin != successCase.offsetBegin {
t.Fatalf("expected: %d, got: %d", successCase.offsetBegin, hrange.offsetBegin)
start, length, err1 := rs.GetOffsetLength(objectSize)
if err1 != nil {
t.Fatalf("expected: <nil>, got: %s", err1)
}
if hrange.offsetEnd != successCase.offsetEnd {
t.Fatalf("expected: %d, got: %d", successCase.offsetEnd, hrange.offsetEnd)
if start != successCase.offsetBegin {
t.Fatalf("expected: %d, got: %d", successCase.offsetBegin, start)
}
if hrange.getLength() != successCase.length {
t.Fatalf("expected: %d, got: %d", successCase.length, hrange.getLength())
if start+length-1 != successCase.offsetEnd {
t.Fatalf("expected: %d, got: %d", successCase.offsetEnd, start+length-1)
}
}
@@ -59,15 +61,16 @@ func TestParseCopyPartRange(t *testing.T) {
"bytes=2-+5",
"bytes=2--5",
"bytes=-",
"",
"2-5",
"bytes = 2-5",
"bytes=2 - 5",
"bytes=0-0,-1",
"bytes=2-5 ",
"bytes=-1",
"bytes=1-",
}
for _, rangeString := range invalidRangeStrings {
if _, err := parseCopyPartRange(rangeString, 10); err == nil {
if _, err := parseCopyPartRangeSpec(rangeString); err == nil {
t.Fatalf("expected: an error, got: <nil> for range %s", rangeString)
}
}
@@ -78,8 +81,14 @@ func TestParseCopyPartRange(t *testing.T) {
"bytes=20-30",
}
for _, rangeString := range errorRangeString {
if _, err := parseCopyPartRange(rangeString, 10); err != errInvalidRangeSource {
t.Fatalf("expected: %s, got: %s", errInvalidRangeSource, err)
rs, err := parseCopyPartRangeSpec(rangeString)
if err == nil {
err1 := checkCopyPartRangeWithSize(rs, objectSize)
if err1 != errInvalidRangeSource {
t.Fatalf("expected: %s, got: %s", errInvalidRangeSource, err)
}
} else {
t.Fatalf("expected: %s, got: <nil>", errInvalidRangeSource)
}
}
}

21
cmd/crypto/config.go Normal file
View File

@@ -0,0 +1,21 @@
// Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package crypto
// KMSConfig has the KMS config for hashicorp vault
type KMSConfig struct {
AutoEncryption bool `json:"-"`
Vault VaultConfig `json:"vault"`
}

View File

@@ -32,7 +32,7 @@
// Input: ClientKey, bucket, object, metadata, object_data
// - IV := Random({0,1}²⁵⁶)
// - ObjectKey := SHA256(ClientKey || Random({0,1}²⁵⁶))
// - KeyEncKey := HMAC-SHA256(ClientKey, IV || bucket || object)
// - KeyEncKey := HMAC-SHA256(ClientKey, IV || 'SSE-C' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
// - SealedKey := DAREv2_Enc(KeyEncKey, ObjectKey)
// - enc_object_data := DAREv2_Enc(ObjectKey, object_data)
// - metadata <- IV
@@ -43,7 +43,7 @@
// Input: ClientKey, bucket, object, metadata, enc_object_data
// - IV <- metadata
// - SealedKey <- metadata
// - KeyEncKey := HMAC-SHA256(ClientKey, IV || bucket || object)
// - KeyEncKey := HMAC-SHA256(ClientKey, IV || 'SSE-C' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
// - ObjectKey := DAREv2_Dec(KeyEncKey, SealedKey)
// - object_data := DAREv2_Dec(ObjectKey, enc_object_data)
// Output: object_data
@@ -64,7 +64,7 @@
// Input: MasterKey, bucket, object, metadata, object_data
// - IV := Random({0,1}²⁵⁶)
// - ObjectKey := SHA256(MasterKey || Random({0,1}²⁵⁶))
// - KeyEncKey := HMAC-SHA256(MasterKey, IV || bucket || object)
// - KeyEncKey := HMAC-SHA256(MasterKey, IV || 'SSE-S3' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
// - SealedKey := DAREv2_Enc(KeyEncKey, ObjectKey)
// - enc_object_data := DAREv2_Enc(ObjectKey, object_data)
// - metadata <- IV
@@ -75,7 +75,7 @@
// Input: MasterKey, bucket, object, metadata, enc_object_data
// - IV <- metadata
// - SealedKey <- metadata
// - KeyEncKey := HMAC-SHA256(MasterKey, IV || bucket || object)
// - KeyEncKey := HMAC-SHA256(MasterKey, IV || 'SSE-S3' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
// - ObjectKey := DAREv2_Dec(KeyEncKey, SealedKey)
// - object_data := DAREv2_Dec(ObjectKey, enc_object_data)
// Output: object_data
@@ -92,7 +92,7 @@
// - Key, EncKey := Generate(KeyID)
// - IV := Random({0,1}²⁵⁶)
// - ObjectKey := SHA256(Key, Random({0,1}²⁵⁶))
// - KeyEncKey := HMAC-SHA256(Key, IV || bucket || object)
// - KeyEncKey := HMAC-SHA256(Key, IV || 'SSE-S3' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
// - SealedKey := DAREv2_Enc(KeyEncKey, ObjectKey)
// - enc_object_data := DAREv2_Enc(ObjectKey, object_data)
// - metadata <- IV
@@ -108,7 +108,7 @@
// - IV <- metadata
// - SealedKey <- metadata
// - Key := Unseal(KeyID, EncKey)
// - KeyEncKey := HMAC-SHA256(Key, IV || bucket || object)
// - KeyEncKey := HMAC-SHA256(Key, IV || 'SSE-S3' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
// - ObjectKey := DAREv2_Dec(KeyEncKey, SealedKey)
// - object_data := DAREv2_Dec(ObjectKey, enc_object_data)
// Output: object_data

View File

@@ -16,8 +16,56 @@ package crypto
import "errors"
// Error is the generic type for any error happening during decrypting
// an object. It indicates that the object itself or its metadata was
// modified accidentally or maliciously.
type Error struct{ msg string }
func (e Error) Error() string { return e.msg }
var (
// ErrInvalidEncryptionMethod indicates that the specified SSE encryption method
// is not supported.
ErrInvalidEncryptionMethod = errors.New("The encryption method is not supported")
// ErrInvalidCustomerAlgorithm indicates that the specified SSE-C algorithm
// is not supported.
ErrInvalidCustomerAlgorithm = errors.New("The SSE-C algorithm is not supported")
// ErrMissingCustomerKey indicates that the HTTP headers contains no SSE-C client key.
ErrMissingCustomerKey = errors.New("The SSE-C request is missing the customer key")
// ErrMissingCustomerKeyMD5 indicates that the HTTP headers contains no SSE-C client key
// MD5 checksum.
ErrMissingCustomerKeyMD5 = errors.New("The SSE-C request is missing the customer key MD5")
// ErrInvalidCustomerKey indicates that the SSE-C client key is not valid - e.g. not a
// base64-encoded string or not 256 bits long.
ErrInvalidCustomerKey = errors.New("The SSE-C client key is invalid")
// ErrSecretKeyMismatch indicates that the provided secret key (SSE-C client key / SSE-S3 KMS key)
// does not match the secret key used during encrypting the object.
ErrSecretKeyMismatch = errors.New("The secret key does not match the secret key used during upload")
// ErrCustomerKeyMD5Mismatch indicates that the SSE-C key MD5 does not match the
// computed MD5 sum. This means that the client provided either the wrong key for
// a certain MD5 checksum or the wrong MD5 for a certain key.
ErrCustomerKeyMD5Mismatch = errors.New("The provided SSE-C key MD5 does not match the computed MD5 of the SSE-C key")
// ErrIncompatibleEncryptionMethod indicates that both SSE-C headers and SSE-S3 headers were specified, and are incompatible
// The client needs to remove the SSE-S3 header or the SSE-C headers
ErrIncompatibleEncryptionMethod = errors.New("Server side encryption specified with both SSE-C and SSE-S3 headers")
)
var (
errMissingInternalIV = Error{"The object metadata is missing the internal encryption IV"}
errMissingInternalSealAlgorithm = Error{"The object metadata is missing the internal seal algorithm"}
errInvalidInternalIV = Error{"The internal encryption IV is malformed"}
errInvalidInternalSealAlgorithm = Error{"The internal seal algorithm is invalid and not supported"}
)
var (
// errOutOfEntropy indicates that the a source of randomness (PRNG) wasn't able
// to produce enough random data. This is fatal error and should cause a panic.
errOutOfEntropy = errors.New("Unable to read enough randomness from the system")
)

View File

@@ -15,16 +15,72 @@
package crypto
import (
"bytes"
"crypto/md5"
"encoding/base64"
"net/http"
"strings"
)
// SSEHeader is the general AWS SSE HTTP header key.
const SSEHeader = "X-Amz-Server-Side-Encryption"
// SSEAlgorithmAES256 is the only supported value for the SSE-S3 or SSE-C algorithm header.
// For SSE-S3 see: https://docs.aws.amazon.com/AmazonS3/latest/dev/SSEUsingRESTAPI.html
// For SSE-C see: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
const SSEAlgorithmAES256 = "AES256"
const (
// SSEKmsID is the HTTP header key referencing the SSE-KMS
// key ID.
SSEKmsID = SSEHeader + "-Aws-Kms-Key-Id"
// SSEKmsContext is the HTTP header key referencing the
// SSE-KMS encryption context.
SSEKmsContext = SSEHeader + "-Context"
)
const (
// SSECAlgorithm is the HTTP header key referencing
// the SSE-C algorithm.
SSECAlgorithm = SSEHeader + "-Customer-Algorithm"
// SSECKey is the HTTP header key referencing the
// SSE-C client-provided key..
SSECKey = SSEHeader + "-Customer-Key"
// SSECKeyMD5 is the HTTP header key referencing
// the MD5 sum of the client-provided key.
SSECKeyMD5 = SSEHeader + "-Customer-Key-Md5"
)
const (
// SSECopyAlgorithm is the HTTP header key referencing
// the SSE-C algorithm for SSE-C copy requests.
SSECopyAlgorithm = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm"
// SSECopyKey is the HTTP header key referencing the SSE-C
// client-provided key for SSE-C copy requests.
SSECopyKey = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key"
// SSECopyKeyMD5 is the HTTP header key referencing the
// MD5 sum of the client key for SSE-C copy requests.
SSECopyKeyMD5 = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5"
)
const (
// SSEAlgorithmAES256 is the only supported value for the SSE-S3 or SSE-C algorithm header.
// For SSE-S3 see: https://docs.aws.amazon.com/AmazonS3/latest/dev/SSEUsingRESTAPI.html
// For SSE-C see: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
SSEAlgorithmAES256 = "AES256"
// SSEAlgorithmKMS is the value of 'X-Amz-Server-Side-Encryption' for SSE-KMS.
// See: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html
SSEAlgorithmKMS = "aws:kms"
)
// RemoveSensitiveHeaders removes confidential encryption
// information - e.g. the SSE-C key - from the HTTP headers.
// It has the same semantics as RemoveSensitiveEntires.
func RemoveSensitiveHeaders(h http.Header) {
h.Del(SSECKey)
h.Del(SSECopyKey)
}
// S3 represents AWS SSE-S3. It provides functionality to handle
// SSE-S3 requests.
@@ -36,14 +92,129 @@ type s3 struct{}
// the S3 client requests SSE-S3.
func (s3) IsRequested(h http.Header) bool {
_, ok := h[SSEHeader]
return ok
return ok && strings.ToLower(h.Get(SSEHeader)) != SSEAlgorithmKMS // Return only true if the SSE header is specified and does not contain the SSE-KMS value
}
// Parse parses the SSE-S3 related HTTP headers and checks
// ParseHTTP parses the SSE-S3 related HTTP headers and checks
// whether they contain valid values.
func (s3) Parse(h http.Header) (err error) {
func (s3) ParseHTTP(h http.Header) (err error) {
if h.Get(SSEHeader) != SSEAlgorithmAES256 {
err = ErrInvalidEncryptionMethod
}
return
}
// S3KMS represents AWS SSE-KMS. It provides functionality to
// handle SSE-KMS requests.
var S3KMS = s3KMS{}
type s3KMS struct{}
// IsRequested returns true if the HTTP headers indicates that
// the S3 client requests SSE-KMS.
func (s3KMS) IsRequested(h http.Header) bool {
if _, ok := h[SSEKmsID]; ok {
return true
}
if _, ok := h[SSEKmsContext]; ok {
return true
}
if _, ok := h[SSEHeader]; ok {
return strings.ToUpper(h.Get(SSEHeader)) != SSEAlgorithmAES256 // Return only true if the SSE header is specified and does not contain the SSE-S3 value
}
return false
}
var (
// SSEC represents AWS SSE-C. It provides functionality to handle
// SSE-C requests.
SSEC = ssec{}
// SSECopy represents AWS SSE-C for copy requests. It provides
// functionality to handle SSE-C copy requests.
SSECopy = ssecCopy{}
)
type ssec struct{}
type ssecCopy struct{}
// IsRequested returns true if the HTTP headers contains
// at least one SSE-C header. SSE-C copy headers are ignored.
func (ssec) IsRequested(h http.Header) bool {
if _, ok := h[SSECAlgorithm]; ok {
return true
}
if _, ok := h[SSECKey]; ok {
return true
}
if _, ok := h[SSECKeyMD5]; ok {
return true
}
return false
}
// IsRequested returns true if the HTTP headers contains
// at least one SSE-C copy header. Regular SSE-C headers
// are ignored.
func (ssecCopy) IsRequested(h http.Header) bool {
if _, ok := h[SSECopyAlgorithm]; ok {
return true
}
if _, ok := h[SSECopyKey]; ok {
return true
}
if _, ok := h[SSECopyKeyMD5]; ok {
return true
}
return false
}
// ParseHTTP parses the SSE-C headers and returns the SSE-C client key
// on success. SSE-C copy headers are ignored.
func (ssec) ParseHTTP(h http.Header) (key [32]byte, err error) {
if h.Get(SSECAlgorithm) != SSEAlgorithmAES256 {
return key, ErrInvalidCustomerAlgorithm
}
if h.Get(SSECKey) == "" {
return key, ErrMissingCustomerKey
}
if h.Get(SSECKeyMD5) == "" {
return key, ErrMissingCustomerKeyMD5
}
clientKey, err := base64.StdEncoding.DecodeString(h.Get(SSECKey))
if err != nil || len(clientKey) != 32 { // The client key must be 256 bits long
return key, ErrInvalidCustomerKey
}
keyMD5, err := base64.StdEncoding.DecodeString(h.Get(SSECKeyMD5))
if md5Sum := md5.Sum(clientKey); err != nil || !bytes.Equal(md5Sum[:], keyMD5) {
return key, ErrCustomerKeyMD5Mismatch
}
copy(key[:], clientKey)
return key, nil
}
// ParseHTTP parses the SSE-C copy headers and returns the SSE-C client key
// on success. Regular SSE-C headers are ignored.
func (ssecCopy) ParseHTTP(h http.Header) (key [32]byte, err error) {
if h.Get(SSECopyAlgorithm) != SSEAlgorithmAES256 {
return key, ErrInvalidCustomerAlgorithm
}
if h.Get(SSECopyKey) == "" {
return key, ErrMissingCustomerKey
}
if h.Get(SSECopyKeyMD5) == "" {
return key, ErrMissingCustomerKeyMD5
}
clientKey, err := base64.StdEncoding.DecodeString(h.Get(SSECopyKey))
if err != nil || len(clientKey) != 32 { // The client key must be 256 bits long
return key, ErrInvalidCustomerKey
}
keyMD5, err := base64.StdEncoding.DecodeString(h.Get(SSECopyKeyMD5))
if md5Sum := md5.Sum(clientKey); err != nil || !bytes.Equal(md5Sum[:], keyMD5) {
return key, ErrCustomerKeyMD5Mismatch
}
copy(key[:], clientKey)
return key, nil
}

View File

@@ -16,28 +16,64 @@ package crypto
import (
"net/http"
"sort"
"testing"
)
var isRequestedTests = []struct {
var kmsIsRequestedTests = []struct {
Header http.Header
Expected bool
}{
{Header: http.Header{"X-Amz-Server-Side-Encryption": []string{"AES256"}}, Expected: true}, // 0
{Header: http.Header{"X-Amz-Server-Side-Encryption": []string{"AES-256"}}, Expected: true}, // 1
{Header: http.Header{"X-Amz-Server-Side-Encryption": []string{""}}, Expected: true}, // 2
{Header: http.Header{"X-Amz-Server-Side-Encryptio": []string{"AES256"}}, Expected: false}, // 3
{Header: http.Header{}, Expected: false}, // 0
{Header: http.Header{"X-Amz-Server-Side-Encryption": []string{"aws:kms"}}, Expected: true}, // 1
{Header: http.Header{"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": []string{"0839-9047947-844842874-481"}}, Expected: true}, // 2
{Header: http.Header{"X-Amz-Server-Side-Encryption-Context": []string{"7PpPLAK26ONlVUGOWlusfg=="}}, Expected: true}, // 3
{
Header: http.Header{
"X-Amz-Server-Side-Encryption": []string{""},
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": []string{""},
"X-Amz-Server-Side-Encryption-Context": []string{""},
},
Expected: true,
}, // 4
{
Header: http.Header{
"X-Amz-Server-Side-Encryption": []string{"AES256"},
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": []string{""},
},
Expected: true,
}, // 5
{Header: http.Header{"X-Amz-Server-Side-Encryption": []string{"AES256"}}, Expected: false}, // 6
}
func TestKMSIsRequested(t *testing.T) {
for i, test := range kmsIsRequestedTests {
if got := S3KMS.IsRequested(test.Header); got != test.Expected {
t.Errorf("Test %d: Wanted %v but got %v", i, test.Expected, got)
}
}
}
var s3IsRequestedTests = []struct {
Header http.Header
Expected bool
}{
{Header: http.Header{"X-Amz-Server-Side-Encryption": []string{"AES256"}}, Expected: true}, // 0
{Header: http.Header{"X-Amz-Server-Side-Encryption": []string{"AES-256"}}, Expected: true}, // 1
{Header: http.Header{"X-Amz-Server-Side-Encryption": []string{""}}, Expected: true}, // 2
{Header: http.Header{"X-Amz-Server-Side-Encryptio": []string{"AES256"}}, Expected: false}, // 3
{Header: http.Header{"X-Amz-Server-Side-Encryption": []string{SSEAlgorithmKMS}}, Expected: false}, // 4
}
func TestS3IsRequested(t *testing.T) {
for i, test := range isRequestedTests {
for i, test := range s3IsRequestedTests {
if got := S3.IsRequested(test.Header); got != test.Expected {
t.Errorf("Test %d: Wanted %v but got %v", i, test.Expected, got)
}
}
}
var parseTests = []struct {
var s3ParseTests = []struct {
Header http.Header
ExpectedErr error
}{
@@ -48,9 +84,353 @@ var parseTests = []struct {
}
func TestS3Parse(t *testing.T) {
for i, test := range parseTests {
if err := S3.Parse(test.Header); err != test.ExpectedErr {
for i, test := range s3ParseTests {
if err := S3.ParseHTTP(test.Header); err != test.ExpectedErr {
t.Errorf("Test %d: Wanted '%v' but got '%v'", i, test.ExpectedErr, err)
}
}
}
var ssecIsRequestedTests = []struct {
Header http.Header
Expected bool
}{
{Header: http.Header{}, Expected: false}, // 0
{Header: http.Header{"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"}}, Expected: true}, // 1
{Header: http.Header{"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="}}, Expected: true}, // 2
{Header: http.Header{"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="}}, Expected: true}, // 3
{
Header: http.Header{
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{""},
"X-Amz-Server-Side-Encryption-Customer-Key": []string{""},
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{""},
},
Expected: true,
}, // 4
{
Header: http.Header{
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
Expected: true,
}, // 5
{
Header: http.Header{
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
Expected: false,
}, // 6
}
func TestSSECIsRequested(t *testing.T) {
for i, test := range ssecIsRequestedTests {
if got := SSEC.IsRequested(test.Header); got != test.Expected {
t.Errorf("Test %d: Wanted %v but got %v", i, test.Expected, got)
}
}
}
var ssecCopyIsRequestedTests = []struct {
Header http.Header
Expected bool
}{
{Header: http.Header{}, Expected: false}, // 0
{Header: http.Header{"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"}}, Expected: true}, // 1
{Header: http.Header{"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="}}, Expected: true}, // 2
{Header: http.Header{"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="}}, Expected: true}, // 3
{
Header: http.Header{
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{""},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{""},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{""},
},
Expected: true,
}, // 4
{
Header: http.Header{
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
Expected: true,
}, // 5
{
Header: http.Header{
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
Expected: false,
}, // 6
}
func TestSSECopyIsRequested(t *testing.T) {
for i, test := range ssecCopyIsRequestedTests {
if got := SSECopy.IsRequested(test.Header); got != test.Expected {
t.Errorf("Test %d: Wanted %v but got %v", i, test.Expected, got)
}
}
}
var ssecParseTests = []struct {
Header http.Header
ExpectedErr error
}{
{
Header: http.Header{
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
ExpectedErr: nil, // 0
},
{
Header: http.Header{
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES-256"}, // invalid algorithm
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
ExpectedErr: ErrInvalidCustomerAlgorithm, // 1
},
{
Header: http.Header{
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Server-Side-Encryption-Customer-Key": []string{""}, // no client key
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
ExpectedErr: ErrMissingCustomerKey, // 2
},
{
Header: http.Header{
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRr.ZXltdXN0cHJvdmlkZWQ="}, // invalid key
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
ExpectedErr: ErrInvalidCustomerKey, // 3
},
{
Header: http.Header{
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{""}, // no key MD5
},
ExpectedErr: ErrMissingCustomerKeyMD5, // 4
},
{
Header: http.Header{
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"DzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="}, // wrong client key
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
ExpectedErr: ErrCustomerKeyMD5Mismatch, // 5
},
{
Header: http.Header{
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{".7PpPLAK26ONlVUGOWlusfg=="}, // wrong key MD5
},
ExpectedErr: ErrCustomerKeyMD5Mismatch, // 6
},
}
func TestSSECParse(t *testing.T) {
var zeroKey [32]byte
for i, test := range ssecParseTests {
key, err := SSEC.ParseHTTP(test.Header)
if err != test.ExpectedErr {
t.Errorf("Test %d: want error '%v' but got '%v'", i, test.ExpectedErr, err)
}
if err != nil && key != zeroKey {
t.Errorf("Test %d: parsing failed and client key is not zero key", i)
}
if err == nil && key == zeroKey {
t.Errorf("Test %d: parsed client key is zero key", i)
}
}
}
var ssecCopyParseTests = []struct {
Header http.Header
ExpectedErr error
}{
{
Header: http.Header{
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
ExpectedErr: nil, // 0
},
{
Header: http.Header{
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES-256"}, // invalid algorithm
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
ExpectedErr: ErrInvalidCustomerAlgorithm, // 1
},
{
Header: http.Header{
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{""}, // no client key
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
ExpectedErr: ErrMissingCustomerKey, // 2
},
{
Header: http.Header{
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRr.ZXltdXN0cHJvdmlkZWQ="}, // invalid key
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
ExpectedErr: ErrInvalidCustomerKey, // 3
},
{
Header: http.Header{
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{""}, // no key MD5
},
ExpectedErr: ErrMissingCustomerKeyMD5, // 4
},
{
Header: http.Header{
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"DzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="}, // wrong client key
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
ExpectedErr: ErrCustomerKeyMD5Mismatch, // 5
},
{
Header: http.Header{
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{".7PpPLAK26ONlVUGOWlusfg=="}, // wrong key MD5
},
ExpectedErr: ErrCustomerKeyMD5Mismatch, // 6
},
}
func TestSSECopyParse(t *testing.T) {
var zeroKey [32]byte
for i, test := range ssecCopyParseTests {
key, err := SSECopy.ParseHTTP(test.Header)
if err != test.ExpectedErr {
t.Errorf("Test %d: want error '%v' but got '%v'", i, test.ExpectedErr, err)
}
if err != nil && key != zeroKey {
t.Errorf("Test %d: parsing failed and client key is not zero key", i)
}
if err == nil && key == zeroKey {
t.Errorf("Test %d: parsed client key is zero key", i)
}
if _, ok := test.Header[SSECKey]; ok {
t.Errorf("Test %d: client key is not removed from HTTP headers after parsing", i)
}
}
}
var removeSensitiveHeadersTests = []struct {
Header, ExpectedHeader http.Header
}{
{
Header: http.Header{
SSECKey: []string{""},
SSECopyKey: []string{""},
},
ExpectedHeader: http.Header{},
},
{ // Standard SSE-C request headers
Header: http.Header{
SSECAlgorithm: []string{SSEAlgorithmAES256},
SSECKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
SSECKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
ExpectedHeader: http.Header{
SSECAlgorithm: []string{SSEAlgorithmAES256},
SSECKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
},
{ // Standard SSE-C + SSE-C-copy request headers
Header: http.Header{
SSECAlgorithm: []string{SSEAlgorithmAES256},
SSECKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
SSECKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
SSECopyKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
SSECopyKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
ExpectedHeader: http.Header{
SSECAlgorithm: []string{SSEAlgorithmAES256},
SSECKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
SSECopyKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
},
{ // Standard SSE-C + metadata request headers
Header: http.Header{
SSECAlgorithm: []string{SSEAlgorithmAES256},
SSECKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
SSECKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
"X-Amz-Meta-Test-1": []string{"Test-1"},
},
ExpectedHeader: http.Header{
SSECAlgorithm: []string{SSEAlgorithmAES256},
SSECKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
"X-Amz-Meta-Test-1": []string{"Test-1"},
},
},
}
func TestRemoveSensitiveHeaders(t *testing.T) {
isEqual := func(x, y http.Header) bool {
if len(x) != len(y) {
return false
}
for k, v := range x {
u, ok := y[k]
if !ok || len(v) != len(u) {
return false
}
sort.Strings(v)
sort.Strings(u)
for j := range v {
if v[j] != u[j] {
return false
}
}
}
return true
}
areKeysEqual := func(h http.Header, metadata map[string]string) bool {
if len(h) != len(metadata) {
return false
}
for k := range h {
if _, ok := metadata[k]; !ok {
return false
}
}
return true
}
for i, test := range removeSensitiveHeadersTests {
metadata := make(map[string]string, len(test.Header))
for k := range test.Header {
metadata[k] = "" // set metadata key - we don't care about the value
}
RemoveSensitiveHeaders(test.Header)
if !isEqual(test.ExpectedHeader, test.Header) {
t.Errorf("Test %d: filtered headers do not match expected headers - got: %v , want: %v", i, test.Header, test.ExpectedHeader)
}
RemoveSensitiveEntries(metadata)
if !areKeysEqual(test.ExpectedHeader, metadata) {
t.Errorf("Test %d: filtered headers do not match expected headers - got: %v , want: %v", i, test.Header, test.ExpectedHeader)
}
}
}

View File

@@ -21,8 +21,9 @@ import (
"crypto/rand"
"encoding/binary"
"errors"
"fmt"
"io"
"path/filepath"
"path"
"github.com/minio/minio/cmd/logger"
sha256 "github.com/minio/sha256-simd"
@@ -34,50 +35,98 @@ import (
type ObjectKey [32]byte
// GenerateKey generates a unique ObjectKey from a 256 bit external key
// and a source of randomness. If random is nil the default PRNG of system
// (crypto/rand) is used.
// and a source of randomness. If random is nil the default PRNG of the
// system (crypto/rand) is used.
func GenerateKey(extKey [32]byte, random io.Reader) (key ObjectKey) {
if random == nil {
random = rand.Reader
}
var nonce [32]byte
if _, err := io.ReadFull(random, nonce[:]); err != nil {
logger.CriticalIf(context.Background(), errors.New("Unable to read enough randomness from the system"))
logger.CriticalIf(context.Background(), errOutOfEntropy)
}
sha := sha256.New()
sha.Write(extKey[:])
sha.Write(nonce[:])
sha.Sum(key[:0])
return
return key
}
// GenerateIV generates a new random 256 bit IV from the provided source
// of randomness. If random is nil the default PRNG of the system
// (crypto/rand) is used.
func GenerateIV(random io.Reader) (iv [32]byte) {
if random == nil {
random = rand.Reader
}
if _, err := io.ReadFull(random, iv[:]); err != nil {
logger.CriticalIf(context.Background(), errOutOfEntropy)
}
return iv
}
// SealedKey represents a sealed object key. It can be stored
// at an untrusted location.
type SealedKey struct {
Key [64]byte // The encrypted and authenticted object-key.
IV [32]byte // The random IV used to encrypt the object-key.
Algorithm string // The sealing algorithm used to encrypt the object key.
}
// Seal encrypts the ObjectKey using the 256 bit external key and IV. The sealed
// key is also cryptographically bound to the object's path (bucket/object).
func (key ObjectKey) Seal(extKey, iv [32]byte, bucket, object string) []byte {
var sealedKey bytes.Buffer
// key is also cryptographically bound to the object's path (bucket/object) and the
// domain (SSE-C or SSE-S3).
func (key ObjectKey) Seal(extKey, iv [32]byte, domain, bucket, object string) SealedKey {
var (
sealingKey [32]byte
encryptedKey bytes.Buffer
)
mac := hmac.New(sha256.New, extKey[:])
mac.Write(iv[:])
mac.Write([]byte(filepath.Join(bucket, object)))
if n, err := sio.Encrypt(&sealedKey, bytes.NewReader(key[:]), sio.Config{Key: mac.Sum(nil)}); n != 64 || err != nil {
mac.Write([]byte(domain))
mac.Write([]byte(SealAlgorithm))
mac.Write([]byte(path.Join(bucket, object))) // use path.Join for canonical 'bucket/object'
mac.Sum(sealingKey[:0])
if n, err := sio.Encrypt(&encryptedKey, bytes.NewReader(key[:]), sio.Config{Key: sealingKey[:]}); n != 64 || err != nil {
logger.CriticalIf(context.Background(), errors.New("Unable to generate sealed key"))
}
return sealedKey.Bytes()
sealedKey := SealedKey{
IV: iv,
Algorithm: SealAlgorithm,
}
copy(sealedKey.Key[:], encryptedKey.Bytes())
return sealedKey
}
// Unseal decrypts a sealed key using the 256 bit external key and IV. Since the sealed key
// is cryptographically bound to the object's path the same bucket/object as during sealing
// Unseal decrypts a sealed key using the 256 bit external key. Since the sealed key
// may be cryptographically bound to the object's path the same bucket/object as during sealing
// must be provided. On success the ObjectKey contains the decrypted sealed key.
func (key *ObjectKey) Unseal(sealedKey []byte, extKey, iv [32]byte, bucket, object string) error {
var unsealedKey bytes.Buffer
mac := hmac.New(sha256.New, extKey[:])
mac.Write(iv[:])
mac.Write([]byte(filepath.Join(bucket, object)))
if n, err := sio.Decrypt(&unsealedKey, bytes.NewReader(sealedKey), sio.Config{Key: mac.Sum(nil)}); n != 32 || err != nil {
return err // TODO(aead): upgrade sio to use sio.Error
func (key *ObjectKey) Unseal(extKey [32]byte, sealedKey SealedKey, domain, bucket, object string) error {
var (
unsealConfig sio.Config
decryptedKey bytes.Buffer
)
switch sealedKey.Algorithm {
default:
return Error{fmt.Sprintf("The sealing algorithm '%s' is not supported", sealedKey.Algorithm)}
case SealAlgorithm:
mac := hmac.New(sha256.New, extKey[:])
mac.Write(sealedKey.IV[:])
mac.Write([]byte(domain))
mac.Write([]byte(SealAlgorithm))
mac.Write([]byte(path.Join(bucket, object))) // use path.Join for canonical 'bucket/object'
unsealConfig = sio.Config{MinVersion: sio.Version20, Key: mac.Sum(nil)}
case InsecureSealAlgorithm:
sha := sha256.New()
sha.Write(extKey[:])
sha.Write(sealedKey.IV[:])
unsealConfig = sio.Config{MinVersion: sio.Version10, Key: sha.Sum(nil)}
}
copy(key[:], unsealedKey.Bytes())
if n, err := sio.Decrypt(&decryptedKey, bytes.NewReader(sealedKey.Key[:]), unsealConfig); n != 32 || err != nil {
return ErrSecretKeyMismatch
}
copy(key[:], decryptedKey.Bytes())
return nil
}
@@ -89,5 +138,39 @@ func (key ObjectKey) DerivePartKey(id uint32) (partKey [32]byte) {
mac := hmac.New(sha256.New, key[:])
mac.Write(bin[:])
mac.Sum(partKey[:0])
return
return partKey
}
// SealETag seals the etag using the object key.
// It does not encrypt empty ETags because such ETags indicate
// that the S3 client hasn't sent an ETag = MD5(object) and
// the backend can pick an ETag value.
func (key ObjectKey) SealETag(etag []byte) []byte {
if len(etag) == 0 { // don't encrypt empty ETag - only if client sent ETag = MD5(object)
return etag
}
var buffer bytes.Buffer
mac := hmac.New(sha256.New, key[:])
mac.Write([]byte("SSE-etag"))
if _, err := sio.Encrypt(&buffer, bytes.NewReader(etag), sio.Config{Key: mac.Sum(nil)}); err != nil {
logger.CriticalIf(context.Background(), errors.New("Unable to encrypt ETag using object key"))
}
return buffer.Bytes()
}
// UnsealETag unseals the etag using the provided object key.
// It does not try to decrypt the ETag if len(etag) == 16
// because such ETags indicate that the S3 client hasn't sent
// an ETag = MD5(object) and the backend has picked an ETag value.
func (key ObjectKey) UnsealETag(etag []byte) ([]byte, error) {
if !IsETagSealed(etag) {
return etag, nil
}
var buffer bytes.Buffer
mac := hmac.New(sha256.New, key[:])
mac.Write([]byte("SSE-etag"))
if _, err := sio.Decrypt(&buffer, bytes.NewReader(etag), sio.Config{Key: mac.Sum(nil)}); err != nil {
return nil, err
}
return buffer.Bytes(), nil
}

View File

@@ -20,6 +20,8 @@ import (
"encoding/hex"
"io"
"testing"
"github.com/minio/minio/cmd/logger"
)
var shortRandom = func(limit int64) io.Reader { return io.LimitReader(rand.Reader, limit) }
@@ -37,14 +39,18 @@ var generateKeyTests = []struct {
Random io.Reader
ShouldPass bool
}{
{ExtKey: [32]byte{}, Random: nil, ShouldPass: true}, // 0
{ExtKey: [32]byte{}, Random: rand.Reader, ShouldPass: true}, // 1
{ExtKey: [32]byte{}, Random: shortRandom(32), ShouldPass: true}, // 2
// {ExtKey: [32]byte{}, Random: shortRandom(31), ShouldPass: false}, // 3 See: https://github.com/minio/minio/issues/6064
{ExtKey: [32]byte{}, Random: nil, ShouldPass: true}, // 0
{ExtKey: [32]byte{}, Random: rand.Reader, ShouldPass: true}, // 1
{ExtKey: [32]byte{}, Random: shortRandom(32), ShouldPass: true}, // 2
{ExtKey: [32]byte{}, Random: shortRandom(31), ShouldPass: false}, // 3
}
func TestGenerateKey(t *testing.T) {
defer func(disableLog bool) { logger.Disable = disableLog }(logger.Disable)
logger.Disable = true
for i, test := range generateKeyTests {
i, test := i, test
func() {
defer recoverTest(i, test.ShouldPass, t)
key := GenerateKey(test.ExtKey, test.Random)
@@ -55,38 +61,64 @@ func TestGenerateKey(t *testing.T) {
}
}
var sealUnsealKeyTests = []struct {
SealExtKey, SealIV [32]byte
SealBucket, SealObject string
var generateIVTests = []struct {
Random io.Reader
ShouldPass bool
}{
{Random: nil, ShouldPass: true}, // 0
{Random: rand.Reader, ShouldPass: true}, // 1
{Random: shortRandom(32), ShouldPass: true}, // 2
{Random: shortRandom(31), ShouldPass: false}, // 3
}
UnsealExtKey, UnsealIV [32]byte
UnsealBucket, UnsealObject string
func TestGenerateIV(t *testing.T) {
defer func(disableLog bool) { logger.Disable = disableLog }(logger.Disable)
logger.Disable = true
for i, test := range generateIVTests {
i, test := i, test
func() {
defer recoverTest(i, test.ShouldPass, t)
iv := GenerateIV(test.Random)
if iv == [32]byte{} {
t.Errorf("Test %d: generated IV is zero IV", i) // check that we generate random and unique IV
}
}()
}
}
var sealUnsealKeyTests = []struct {
SealExtKey, SealIV [32]byte
SealDomain, SealBucket, SealObject string
UnsealExtKey [32]byte
UnsealDomain, UnsealBucket, UnsealObject string
ShouldPass bool
}{
{
SealExtKey: [32]byte{}, SealIV: [32]byte{}, SealBucket: "bucket", SealObject: "object",
UnsealExtKey: [32]byte{}, UnsealIV: [32]byte{}, UnsealBucket: "bucket", UnsealObject: "object",
SealExtKey: [32]byte{}, SealIV: [32]byte{}, SealDomain: "SSE-C", SealBucket: "bucket", SealObject: "object",
UnsealExtKey: [32]byte{}, UnsealDomain: "SSE-C", UnsealBucket: "bucket", UnsealObject: "object",
ShouldPass: true,
}, // 0
{
SealExtKey: [32]byte{}, SealIV: [32]byte{}, SealBucket: "bucket", SealObject: "object",
UnsealExtKey: [32]byte{1}, UnsealIV: [32]byte{0}, UnsealBucket: "bucket", UnsealObject: "object",
SealExtKey: [32]byte{}, SealIV: [32]byte{}, SealDomain: "SSE-C", SealBucket: "bucket", SealObject: "object",
UnsealExtKey: [32]byte{1}, UnsealDomain: "SSE-C", UnsealBucket: "bucket", UnsealObject: "object", // different ext-key
ShouldPass: false,
}, // 1
{
SealExtKey: [32]byte{}, SealIV: [32]byte{}, SealBucket: "bucket", SealObject: "object",
UnsealExtKey: [32]byte{}, UnsealIV: [32]byte{1}, UnsealBucket: "bucket", UnsealObject: "object",
SealExtKey: [32]byte{}, SealIV: [32]byte{}, SealDomain: "SSE-S3", SealBucket: "bucket", SealObject: "object",
UnsealExtKey: [32]byte{}, UnsealDomain: "SSE-C", UnsealBucket: "bucket", UnsealObject: "object", // different domain
ShouldPass: false,
}, // 2
{
SealExtKey: [32]byte{}, SealIV: [32]byte{}, SealBucket: "bucket", SealObject: "object",
UnsealExtKey: [32]byte{}, UnsealIV: [32]byte{}, UnsealBucket: "Bucket", UnsealObject: "object",
SealExtKey: [32]byte{}, SealIV: [32]byte{}, SealDomain: "SSE-C", SealBucket: "bucket", SealObject: "object",
UnsealExtKey: [32]byte{}, UnsealDomain: "SSE-C", UnsealBucket: "Bucket", UnsealObject: "object", // different bucket
ShouldPass: false,
}, // 3
{
SealExtKey: [32]byte{}, SealIV: [32]byte{}, SealBucket: "bucket", SealObject: "object",
UnsealExtKey: [32]byte{}, UnsealIV: [32]byte{}, UnsealBucket: "bucket", UnsealObject: "Object",
SealExtKey: [32]byte{}, SealIV: [32]byte{}, SealDomain: "SSE-C", SealBucket: "bucket", SealObject: "object",
UnsealExtKey: [32]byte{}, UnsealDomain: "SSE-C", UnsealBucket: "bucket", UnsealObject: "Object", // different object
ShouldPass: false,
}, // 4
}
@@ -94,13 +126,22 @@ var sealUnsealKeyTests = []struct {
func TestSealUnsealKey(t *testing.T) {
for i, test := range sealUnsealKeyTests {
key := GenerateKey(test.SealExtKey, rand.Reader)
sealedKey := key.Seal(test.SealExtKey, test.SealIV, test.SealBucket, test.SealObject)
if err := key.Unseal(sealedKey, test.UnsealExtKey, test.UnsealIV, test.UnsealBucket, test.UnsealObject); err == nil && !test.ShouldPass {
sealedKey := key.Seal(test.SealExtKey, test.SealIV, test.SealDomain, test.SealBucket, test.SealObject)
if err := key.Unseal(test.UnsealExtKey, sealedKey, test.UnsealDomain, test.UnsealBucket, test.UnsealObject); err == nil && !test.ShouldPass {
t.Errorf("Test %d should fail but passed successfully", i)
} else if err != nil && test.ShouldPass {
t.Errorf("Test %d should pass put failed: %v", i, err)
}
}
// Test legacy InsecureSealAlgorithm
var extKey, iv [32]byte
key := GenerateKey(extKey, rand.Reader)
sealedKey := key.Seal(extKey, iv, "SSE-S3", "bucket", "object")
sealedKey.Algorithm = InsecureSealAlgorithm
if err := key.Unseal(extKey, sealedKey, "SSE-S3", "bucket", "object"); err == nil {
t.Errorf("'%s' test succeeded but it should fail because the legacy algorithm was used", sealedKey.Algorithm)
}
}
var derivePartKeyTest = []struct {
@@ -125,3 +166,31 @@ func TestDerivePartKey(t *testing.T) {
}
}
}
var sealUnsealETagTests = []string{
"",
"90682b8e8cc7609c",
"90682b8e8cc7609c4671e1d64c73fc30",
"90682b8e8cc7609c4671e1d64c73fc307fb3104f",
}
func TestSealETag(t *testing.T) {
var key ObjectKey
for i := range key {
key[i] = byte(i)
}
for i, etag := range sealUnsealETagTests {
tag, err := hex.DecodeString(etag)
if err != nil {
t.Errorf("Test %d: failed to decode etag: %s", i, err)
}
sealedETag := key.SealETag(tag)
unsealedETag, err := key.UnsealETag(sealedETag)
if err != nil {
t.Errorf("Test %d: failed to decrypt etag: %s", i, err)
}
if !bytes.Equal(unsealedETag, tag) {
t.Errorf("Test %d: unsealed etag does not match: got %s - want %s", i, hex.EncodeToString(unsealedETag), etag)
}
}
}

138
cmd/crypto/kms.go Normal file
View File

@@ -0,0 +1,138 @@
// Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package crypto
import (
"bytes"
"context"
"crypto/hmac"
"crypto/rand"
"errors"
"fmt"
"io"
"sort"
"github.com/minio/minio/cmd/logger"
sha256 "github.com/minio/sha256-simd"
"github.com/minio/sio"
)
// Context is a list of key-value pairs cryptographically
// associated with a certain object.
type Context map[string]string
// WriteTo writes the context in a canonical from to w.
// It returns the number of bytes and the first error
// encounter during writing to w, if any.
//
// WriteTo sorts the context keys and writes the sorted
// key-value pairs as canonical JSON object to w.
func (c Context) WriteTo(w io.Writer) (n int64, err error) {
sortedKeys := make(sort.StringSlice, 0, len(c))
for k := range c {
sortedKeys = append(sortedKeys, k)
}
sort.Sort(sortedKeys)
nn, err := io.WriteString(w, "{")
if err != nil {
return n + int64(nn), err
}
n += int64(nn)
for i, k := range sortedKeys {
s := fmt.Sprintf("\"%s\":\"%s\",", k, c[k])
if i == len(sortedKeys)-1 {
s = s[:len(s)-1] // remove last ','
}
nn, err = io.WriteString(w, s)
if err != nil {
return n + int64(nn), err
}
n += int64(nn)
}
nn, err = io.WriteString(w, "}")
return n + int64(nn), err
}
// KMS represents an active and authenticted connection
// to a Key-Management-Service. It supports generating
// data key generation and unsealing of KMS-generated
// data keys.
type KMS interface {
// GenerateKey generates a new random data key using
// the master key referenced by the keyID. It returns
// the plaintext key and the sealed plaintext key
// on success.
//
// The context is cryptographically bound to the
// generated key. The same context must be provided
// again to unseal the generated key.
GenerateKey(keyID string, context Context) (key [32]byte, sealedKey []byte, err error)
// UnsealKey unseals the sealedKey using the master key
// referenced by the keyID. The provided context must
// match the context used to generate the sealed key.
UnsealKey(keyID string, sealedKey []byte, context Context) (key [32]byte, err error)
}
type masterKeyKMS struct {
masterKey [32]byte
}
// NewKMS returns a basic KMS implementation from a single 256 bit master key.
//
// The KMS accepts any keyID but binds the keyID and context cryptographically
// to the generated keys.
func NewKMS(key [32]byte) KMS { return &masterKeyKMS{masterKey: key} }
func (kms *masterKeyKMS) GenerateKey(keyID string, ctx Context) (key [32]byte, sealedKey []byte, err error) {
if _, err = io.ReadFull(rand.Reader, key[:]); err != nil {
logger.CriticalIf(context.Background(), errOutOfEntropy)
}
var (
buffer bytes.Buffer
derivedKey = kms.deriveKey(keyID, ctx)
)
if n, err := sio.Encrypt(&buffer, bytes.NewReader(key[:]), sio.Config{Key: derivedKey[:]}); err != nil || n != 64 {
logger.CriticalIf(context.Background(), errors.New("KMS: unable to encrypt data key"))
}
sealedKey = buffer.Bytes()
return key, sealedKey, nil
}
func (kms *masterKeyKMS) UnsealKey(keyID string, sealedKey []byte, ctx Context) (key [32]byte, err error) {
var (
buffer bytes.Buffer
derivedKey = kms.deriveKey(keyID, ctx)
)
if n, err := sio.Decrypt(&buffer, bytes.NewReader(sealedKey), sio.Config{Key: derivedKey[:]}); err != nil || n != 32 {
return key, err // TODO(aead): upgrade sio to use sio.Error
}
copy(key[:], buffer.Bytes())
return key, nil
}
func (kms *masterKeyKMS) deriveKey(keyID string, context Context) (key [32]byte) {
if context == nil {
context = Context{}
}
mac := hmac.New(sha256.New, kms.masterKey[:])
mac.Write([]byte(keyID))
context.WriteTo(mac)
mac.Sum(key[:0])
return key
}

84
cmd/crypto/kms_test.go Normal file
View File

@@ -0,0 +1,84 @@
// Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package crypto
import (
"bytes"
"path"
"strings"
"testing"
)
var masterKeyKMSTests = []struct {
GenKeyID, UnsealKeyID string
GenContext, UnsealContext Context
ShouldFail bool
}{
{GenKeyID: "", UnsealKeyID: "", GenContext: Context{}, UnsealContext: nil, ShouldFail: false}, // 0
{GenKeyID: "ac47be7f", UnsealKeyID: "ac47be7f", GenContext: Context{}, UnsealContext: Context{}, ShouldFail: false}, // 1
{GenKeyID: "ac47be7f", UnsealKeyID: "ac47be7f", GenContext: Context{"bucket": "object"}, UnsealContext: Context{"bucket": "object"}, ShouldFail: false}, // 2
{GenKeyID: "", UnsealKeyID: "", GenContext: Context{"bucket": path.Join("bucket", "object")}, UnsealContext: Context{"bucket": path.Join("bucket", "object")}, ShouldFail: false}, // 3
{GenKeyID: "", UnsealKeyID: "", GenContext: Context{"a": "a", "0": "0", "b": "b"}, UnsealContext: Context{"b": "b", "a": "a", "0": "0"}, ShouldFail: false}, // 4
{GenKeyID: "ac47be7f", UnsealKeyID: "ac47be7e", GenContext: Context{}, UnsealContext: Context{}, ShouldFail: true}, // 5
{GenKeyID: "ac47be7f", UnsealKeyID: "ac47be7f", GenContext: Context{"bucket": "object"}, UnsealContext: Context{"Bucket": "object"}, ShouldFail: true}, // 6
{GenKeyID: "", UnsealKeyID: "", GenContext: Context{"bucket": path.Join("bucket", "Object")}, UnsealContext: Context{"bucket": path.Join("bucket", "object")}, ShouldFail: true}, // 7
{GenKeyID: "", UnsealKeyID: "", GenContext: Context{"a": "a", "0": "1", "b": "b"}, UnsealContext: Context{"b": "b", "a": "a", "0": "0"}, ShouldFail: true}, // 8
}
func TestMasterKeyKMS(t *testing.T) {
kms := NewKMS([32]byte{})
for i, test := range masterKeyKMSTests {
key, sealedKey, err := kms.GenerateKey(test.GenKeyID, test.GenContext)
if err != nil {
t.Errorf("Test %d: KMS failed to generate key: %v", i, err)
}
unsealedKey, err := kms.UnsealKey(test.UnsealKeyID, sealedKey, test.UnsealContext)
if err != nil && !test.ShouldFail {
t.Errorf("Test %d: KMS failed to unseal the generated key: %v", i, err)
}
if err == nil && test.ShouldFail {
t.Errorf("Test %d: KMS unsealed the generated successfully but should have failed", i)
}
if !test.ShouldFail && !bytes.Equal(key[:], unsealedKey[:]) {
t.Errorf("Test %d: The generated and unsealed key differ", i)
}
}
}
var contextWriteToTests = []struct {
Context Context
ExpectedJSON string
}{
{Context: Context{}, ExpectedJSON: "{}"}, // 0
{Context: Context{"a": "b"}, ExpectedJSON: `{"a":"b"}`}, // 1
{Context: Context{"a": "b", "c": "d"}, ExpectedJSON: `{"a":"b","c":"d"}`}, // 2
{Context: Context{"c": "d", "a": "b"}, ExpectedJSON: `{"a":"b","c":"d"}`}, // 3
{Context: Context{"0": "1", "-": "2", ".": "#"}, ExpectedJSON: `{"-":"2",".":"#","0":"1"}`}, // 4
}
func TestContextWriteTo(t *testing.T) {
for i, test := range contextWriteToTests {
var jsonContext strings.Builder
if _, err := test.Context.WriteTo(&jsonContext); err != nil {
t.Errorf("Test %d: Failed to encode context: %v", i, err)
continue
}
if s := jsonContext.String(); s != test.ExpectedJSON {
t.Errorf("Test %d: JSON representation differ - got: '%s' want: '%s'", i, s, test.ExpectedJSON)
}
}
}

244
cmd/crypto/metadata.go Normal file
View File

@@ -0,0 +1,244 @@
// Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package crypto
import (
"context"
"encoding/base64"
"fmt"
"github.com/minio/minio/cmd/logger"
)
// IsMultiPart returns true if the object metadata indicates
// that it was uploaded using some form of server-side-encryption
// and the S3 multipart API.
func IsMultiPart(metadata map[string]string) bool {
if _, ok := metadata[SSEMultipart]; ok {
return true
}
return false
}
// RemoveSensitiveEntries removes confidential encryption
// information - e.g. the SSE-C key - from the metadata map.
// It has the same semantics as RemoveSensitiveHeaders.
func RemoveSensitiveEntries(metadata map[string]string) { // The functions is tested in TestRemoveSensitiveHeaders for compatibility reasons
delete(metadata, SSECKey)
delete(metadata, SSECopyKey)
}
// RemoveSSEHeaders removes all crypto-specific SSE
// header entries from the metadata map.
func RemoveSSEHeaders(metadata map[string]string) {
delete(metadata, SSEHeader)
delete(metadata, SSECKeyMD5)
delete(metadata, SSECAlgorithm)
}
// RemoveInternalEntries removes all crypto-specific internal
// metadata entries from the metadata map.
func RemoveInternalEntries(metadata map[string]string) {
delete(metadata, SSEMultipart)
delete(metadata, SSEIV)
delete(metadata, SSESealAlgorithm)
delete(metadata, SSECSealedKey)
delete(metadata, S3SealedKey)
delete(metadata, S3KMSKeyID)
delete(metadata, S3KMSSealedKey)
}
// IsEncrypted returns true if the object metadata indicates
// that it was uploaded using some form of server-side-encryption.
//
// IsEncrypted only checks whether the metadata contains at least
// one entry indicating SSE-C or SSE-S3.
func IsEncrypted(metadata map[string]string) bool {
if _, ok := metadata[SSEIV]; ok {
return true
}
if _, ok := metadata[SSESealAlgorithm]; ok {
return true
}
if IsMultiPart(metadata) {
return true
}
if S3.IsEncrypted(metadata) {
return true
}
if SSEC.IsEncrypted(metadata) {
return true
}
return false
}
// IsEncrypted returns true if the object metadata indicates
// that the object was uploaded using SSE-S3.
func (s3) IsEncrypted(metadata map[string]string) bool {
if _, ok := metadata[S3SealedKey]; ok {
return true
}
if _, ok := metadata[S3KMSKeyID]; ok {
return true
}
if _, ok := metadata[S3KMSSealedKey]; ok {
return true
}
return false
}
// IsEncrypted returns true if the object metadata indicates
// that the object was uploaded using SSE-C.
func (ssec) IsEncrypted(metadata map[string]string) bool {
if _, ok := metadata[SSECSealedKey]; ok {
return true
}
return false
}
// CreateMultipartMetadata adds the multipart flag entry to metadata
// and returns modifed metadata. It allocates a new metadata map if
// metadata is nil.
func CreateMultipartMetadata(metadata map[string]string) map[string]string {
if metadata == nil {
metadata = map[string]string{}
}
metadata[SSEMultipart] = ""
return metadata
}
// CreateMetadata encodes the keyID, the sealed kms data key and the sealed key
// into the metadata and returns the modified metadata. It allocates a new
// metadata map if metadata is nil.
func (s3) CreateMetadata(metadata map[string]string, keyID string, kmsKey []byte, sealedKey SealedKey) map[string]string {
if sealedKey.Algorithm != SealAlgorithm {
logger.CriticalIf(context.Background(), fmt.Errorf("The seal algorithm '%s' is invalid for SSE-S3", sealedKey.Algorithm))
}
if metadata == nil {
metadata = map[string]string{}
}
metadata[S3KMSKeyID] = keyID
metadata[SSESealAlgorithm] = sealedKey.Algorithm
metadata[SSEIV] = base64.StdEncoding.EncodeToString(sealedKey.IV[:])
metadata[S3SealedKey] = base64.StdEncoding.EncodeToString(sealedKey.Key[:])
metadata[S3KMSSealedKey] = base64.StdEncoding.EncodeToString(kmsKey)
return metadata
}
// ParseMetadata extracts all SSE-S3 related values from the object metadata
// and checks whether they are well-formed. It returns the KMS key-ID, the
// sealed KMS key and the sealed object key on success.
func (s3) ParseMetadata(metadata map[string]string) (keyID string, kmsKey []byte, sealedKey SealedKey, err error) {
// Extract all required values from object metadata
b64IV, ok := metadata[SSEIV]
if !ok {
return keyID, kmsKey, sealedKey, errMissingInternalIV
}
algorithm, ok := metadata[SSESealAlgorithm]
if !ok {
return keyID, kmsKey, sealedKey, errMissingInternalSealAlgorithm
}
b64SealedKey, ok := metadata[S3SealedKey]
if !ok {
return keyID, kmsKey, sealedKey, Error{"The object metadata is missing the internal sealed key for SSE-S3"}
}
keyID, ok = metadata[S3KMSKeyID]
if !ok {
return keyID, kmsKey, sealedKey, Error{"The object metadata is missing the internal KMS key-ID for SSE-S3"}
}
b64KMSSealedKey, ok := metadata[S3KMSSealedKey]
if !ok {
return keyID, kmsKey, sealedKey, Error{"The object metadata is missing the internal sealed KMS data key for SSE-S3"}
}
// Check whether all extracted values are well-formed
iv, err := base64.StdEncoding.DecodeString(b64IV)
if err != nil || len(iv) != 32 {
return keyID, kmsKey, sealedKey, errInvalidInternalIV
}
if algorithm != SealAlgorithm {
return keyID, kmsKey, sealedKey, errInvalidInternalSealAlgorithm
}
encryptedKey, err := base64.StdEncoding.DecodeString(b64SealedKey)
if err != nil || len(encryptedKey) != 64 {
return keyID, kmsKey, sealedKey, Error{"The internal sealed key for SSE-S3 is invalid"}
}
kmsKey, err = base64.StdEncoding.DecodeString(b64KMSSealedKey)
if err != nil {
return keyID, kmsKey, sealedKey, Error{"The internal sealed KMS data key for SSE-S3 is invalid"}
}
sealedKey.Algorithm = algorithm
copy(sealedKey.IV[:], iv)
copy(sealedKey.Key[:], encryptedKey)
return keyID, kmsKey, sealedKey, nil
}
// CreateMetadata encodes the sealed key into the metadata and returns the modified metadata.
// It allocates a new metadata map if metadata is nil.
func (ssec) CreateMetadata(metadata map[string]string, sealedKey SealedKey) map[string]string {
if sealedKey.Algorithm != SealAlgorithm {
logger.CriticalIf(context.Background(), fmt.Errorf("The seal algorithm '%s' is invalid for SSE-C", sealedKey.Algorithm))
}
if metadata == nil {
metadata = map[string]string{}
}
metadata[SSESealAlgorithm] = SealAlgorithm
metadata[SSEIV] = base64.StdEncoding.EncodeToString(sealedKey.IV[:])
metadata[SSECSealedKey] = base64.StdEncoding.EncodeToString(sealedKey.Key[:])
return metadata
}
// ParseMetadata extracts all SSE-C related values from the object metadata
// and checks whether they are well-formed. It returns the sealed object key
// on success.
func (ssec) ParseMetadata(metadata map[string]string) (sealedKey SealedKey, err error) {
// Extract all required values from object metadata
b64IV, ok := metadata[SSEIV]
if !ok {
return sealedKey, errMissingInternalIV
}
algorithm, ok := metadata[SSESealAlgorithm]
if !ok {
return sealedKey, errMissingInternalSealAlgorithm
}
b64SealedKey, ok := metadata[SSECSealedKey]
if !ok {
return sealedKey, Error{"The object metadata is missing the internal sealed key for SSE-C"}
}
// Check whether all extracted values are well-formed
iv, err := base64.StdEncoding.DecodeString(b64IV)
if err != nil || len(iv) != 32 {
return sealedKey, errInvalidInternalIV
}
if algorithm != SealAlgorithm && algorithm != InsecureSealAlgorithm {
return sealedKey, errInvalidInternalSealAlgorithm
}
encryptedKey, err := base64.StdEncoding.DecodeString(b64SealedKey)
if err != nil || len(encryptedKey) != 64 {
return sealedKey, Error{"The internal sealed key for SSE-C is invalid"}
}
sealedKey.Algorithm = algorithm
copy(sealedKey.IV[:], iv)
copy(sealedKey.Key[:], encryptedKey)
return sealedKey, nil
}
// IsETagSealed returns true if the etag seems to be encrypted.
func IsETagSealed(etag []byte) bool { return len(etag) > 16 }

439
cmd/crypto/metadata_test.go Normal file
View File

@@ -0,0 +1,439 @@
// Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package crypto
import (
"bytes"
"encoding/base64"
"encoding/hex"
"testing"
"github.com/minio/minio/cmd/logger"
)
var isMultipartTests = []struct {
Metadata map[string]string
Multipart bool
}{
{Multipart: true, Metadata: map[string]string{SSEMultipart: ""}}, // 0
{Multipart: true, Metadata: map[string]string{"X-Minio-Internal-Encrypted-Multipart": ""}}, // 1
{Multipart: true, Metadata: map[string]string{SSEMultipart: "some-value"}}, // 2
{Multipart: false, Metadata: map[string]string{"": ""}}, // 3
{Multipart: false, Metadata: map[string]string{"X-Minio-Internal-EncryptedMultipart": ""}}, // 4
}
func TestIsMultipart(t *testing.T) {
for i, test := range isMultipartTests {
if isMultipart := IsMultiPart(test.Metadata); isMultipart != test.Multipart {
t.Errorf("Test %d: got '%v' - want '%v'", i, isMultipart, test.Multipart)
}
}
}
var isEncryptedTests = []struct {
Metadata map[string]string
Encrypted bool
}{
{Encrypted: true, Metadata: map[string]string{SSEMultipart: ""}}, // 0
{Encrypted: true, Metadata: map[string]string{SSEIV: ""}}, // 1
{Encrypted: true, Metadata: map[string]string{SSESealAlgorithm: ""}}, // 2
{Encrypted: true, Metadata: map[string]string{SSECSealedKey: ""}}, // 3
{Encrypted: true, Metadata: map[string]string{S3SealedKey: ""}}, // 4
{Encrypted: true, Metadata: map[string]string{S3KMSKeyID: ""}}, // 5
{Encrypted: true, Metadata: map[string]string{S3KMSSealedKey: ""}}, // 6
{Encrypted: false, Metadata: map[string]string{"": ""}}, // 7
{Encrypted: false, Metadata: map[string]string{"X-Minio-Internal-Server-Side-Encryption": ""}}, // 8
}
func TestIsEncrypted(t *testing.T) {
for i, test := range isEncryptedTests {
if isEncrypted := IsEncrypted(test.Metadata); isEncrypted != test.Encrypted {
t.Errorf("Test %d: got '%v' - want '%v'", i, isEncrypted, test.Encrypted)
}
}
}
var s3IsEncryptedTests = []struct {
Metadata map[string]string
Encrypted bool
}{
{Encrypted: false, Metadata: map[string]string{SSEMultipart: ""}}, // 0
{Encrypted: false, Metadata: map[string]string{SSEIV: ""}}, // 1
{Encrypted: false, Metadata: map[string]string{SSESealAlgorithm: ""}}, // 2
{Encrypted: false, Metadata: map[string]string{SSECSealedKey: ""}}, // 3
{Encrypted: true, Metadata: map[string]string{S3SealedKey: ""}}, // 4
{Encrypted: true, Metadata: map[string]string{S3KMSKeyID: ""}}, // 5
{Encrypted: true, Metadata: map[string]string{S3KMSSealedKey: ""}}, // 6
{Encrypted: false, Metadata: map[string]string{"": ""}}, // 7
{Encrypted: false, Metadata: map[string]string{"X-Minio-Internal-Server-Side-Encryption": ""}}, // 8
}
func TestS3IsEncrypted(t *testing.T) {
for i, test := range s3IsEncryptedTests {
if isEncrypted := S3.IsEncrypted(test.Metadata); isEncrypted != test.Encrypted {
t.Errorf("Test %d: got '%v' - want '%v'", i, isEncrypted, test.Encrypted)
}
}
}
var ssecIsEncryptedTests = []struct {
Metadata map[string]string
Encrypted bool
}{
{Encrypted: false, Metadata: map[string]string{SSEMultipart: ""}}, // 0
{Encrypted: false, Metadata: map[string]string{SSEIV: ""}}, // 1
{Encrypted: false, Metadata: map[string]string{SSESealAlgorithm: ""}}, // 2
{Encrypted: true, Metadata: map[string]string{SSECSealedKey: ""}}, // 3
{Encrypted: false, Metadata: map[string]string{S3SealedKey: ""}}, // 4
{Encrypted: false, Metadata: map[string]string{S3KMSKeyID: ""}}, // 5
{Encrypted: false, Metadata: map[string]string{S3KMSSealedKey: ""}}, // 6
{Encrypted: false, Metadata: map[string]string{"": ""}}, // 7
{Encrypted: false, Metadata: map[string]string{"X-Minio-Internal-Server-Side-Encryption": ""}}, // 8
}
func TestSSECIsEncrypted(t *testing.T) {
for i, test := range ssecIsEncryptedTests {
if isEncrypted := SSEC.IsEncrypted(test.Metadata); isEncrypted != test.Encrypted {
t.Errorf("Test %d: got '%v' - want '%v'", i, isEncrypted, test.Encrypted)
}
}
}
var s3ParseMetadataTests = []struct {
Metadata map[string]string
ExpectedErr error
DataKey []byte
KeyID string
SealedKey SealedKey
}{
{ExpectedErr: errMissingInternalIV, Metadata: map[string]string{}, DataKey: []byte{}, KeyID: "", SealedKey: SealedKey{}}, // 0
{
ExpectedErr: errMissingInternalSealAlgorithm, Metadata: map[string]string{SSEIV: ""},
DataKey: []byte{}, KeyID: "", SealedKey: SealedKey{},
}, // 1
{
ExpectedErr: Error{"The object metadata is missing the internal sealed key for SSE-S3"},
Metadata: map[string]string{SSEIV: "", SSESealAlgorithm: ""}, DataKey: []byte{}, KeyID: "", SealedKey: SealedKey{},
}, // 2
{
ExpectedErr: Error{"The object metadata is missing the internal KMS key-ID for SSE-S3"},
Metadata: map[string]string{SSEIV: "", SSESealAlgorithm: "", S3SealedKey: ""}, DataKey: []byte{}, KeyID: "", SealedKey: SealedKey{},
}, // 3
{
ExpectedErr: Error{"The object metadata is missing the internal sealed KMS data key for SSE-S3"},
Metadata: map[string]string{SSEIV: "", SSESealAlgorithm: "", S3SealedKey: "", S3KMSKeyID: ""},
DataKey: []byte{}, KeyID: "", SealedKey: SealedKey{},
}, // 4
{
ExpectedErr: errInvalidInternalIV,
Metadata: map[string]string{SSEIV: "", SSESealAlgorithm: "", S3SealedKey: "", S3KMSKeyID: "", S3KMSSealedKey: ""},
DataKey: []byte{}, KeyID: "", SealedKey: SealedKey{},
}, // 5
{
ExpectedErr: errInvalidInternalSealAlgorithm,
Metadata: map[string]string{
SSEIV: base64.StdEncoding.EncodeToString(make([]byte, 32)), SSESealAlgorithm: "", S3SealedKey: "", S3KMSKeyID: "", S3KMSSealedKey: "",
},
DataKey: []byte{}, KeyID: "", SealedKey: SealedKey{},
}, // 6
{
ExpectedErr: Error{"The internal sealed key for SSE-S3 is invalid"},
Metadata: map[string]string{
SSEIV: base64.StdEncoding.EncodeToString(make([]byte, 32)), SSESealAlgorithm: SealAlgorithm, S3SealedKey: "",
S3KMSKeyID: "", S3KMSSealedKey: "",
},
DataKey: []byte{}, KeyID: "", SealedKey: SealedKey{},
}, // 7
{
ExpectedErr: Error{"The internal sealed KMS data key for SSE-S3 is invalid"},
Metadata: map[string]string{
SSEIV: base64.StdEncoding.EncodeToString(make([]byte, 32)), SSESealAlgorithm: SealAlgorithm,
S3SealedKey: base64.StdEncoding.EncodeToString(make([]byte, 64)), S3KMSKeyID: "key-1",
S3KMSSealedKey: ".MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ=", // invalid base64
},
DataKey: []byte{}, KeyID: "key-1", SealedKey: SealedKey{},
}, // 8
{
ExpectedErr: nil,
Metadata: map[string]string{
SSEIV: base64.StdEncoding.EncodeToString(make([]byte, 32)), SSESealAlgorithm: SealAlgorithm,
S3SealedKey: base64.StdEncoding.EncodeToString(make([]byte, 64)), S3KMSKeyID: "", S3KMSSealedKey: "",
},
DataKey: []byte{}, KeyID: "", SealedKey: SealedKey{Algorithm: SealAlgorithm},
}, // 9
{
ExpectedErr: nil,
Metadata: map[string]string{
SSEIV: base64.StdEncoding.EncodeToString(append([]byte{1}, make([]byte, 31)...)), SSESealAlgorithm: SealAlgorithm,
S3SealedKey: base64.StdEncoding.EncodeToString(append([]byte{1}, make([]byte, 63)...)), S3KMSKeyID: "key-1",
S3KMSSealedKey: base64.StdEncoding.EncodeToString(make([]byte, 48)),
},
DataKey: make([]byte, 48), KeyID: "key-1", SealedKey: SealedKey{Algorithm: SealAlgorithm, Key: [64]byte{1}, IV: [32]byte{1}},
}, // 10
}
func TestS3ParseMetadata(t *testing.T) {
for i, test := range s3ParseMetadataTests {
keyID, dataKey, sealedKey, err := S3.ParseMetadata(test.Metadata)
if err != test.ExpectedErr {
t.Errorf("Test %d: got error '%v' - want error '%v'", i, err, test.ExpectedErr)
}
if !bytes.Equal(dataKey, test.DataKey) {
t.Errorf("Test %d: got data key '%v' - want data key '%v'", i, dataKey, test.DataKey)
}
if keyID != test.KeyID {
t.Errorf("Test %d: got key-ID '%v' - want key-ID '%v'", i, keyID, test.KeyID)
}
if sealedKey.Algorithm != test.SealedKey.Algorithm {
t.Errorf("Test %d: got sealed key algorithm '%v' - want sealed key algorithm '%v'", i, sealedKey.Algorithm, test.SealedKey.Algorithm)
}
if !bytes.Equal(sealedKey.Key[:], test.SealedKey.Key[:]) {
t.Errorf("Test %d: got sealed key '%v' - want sealed key '%v'", i, sealedKey.Key, test.SealedKey.Key)
}
if !bytes.Equal(sealedKey.IV[:], test.SealedKey.IV[:]) {
t.Errorf("Test %d: got sealed key IV '%v' - want sealed key IV '%v'", i, sealedKey.IV, test.SealedKey.IV)
}
}
}
var ssecParseMetadataTests = []struct {
Metadata map[string]string
ExpectedErr error
SealedKey SealedKey
}{
{ExpectedErr: errMissingInternalIV, Metadata: map[string]string{}, SealedKey: SealedKey{}}, // 0
{ExpectedErr: errMissingInternalSealAlgorithm, Metadata: map[string]string{SSEIV: ""}, SealedKey: SealedKey{}}, // 1
{
ExpectedErr: Error{"The object metadata is missing the internal sealed key for SSE-C"},
Metadata: map[string]string{SSEIV: "", SSESealAlgorithm: ""}, SealedKey: SealedKey{},
}, // 2
{
ExpectedErr: errInvalidInternalIV,
Metadata: map[string]string{SSEIV: "", SSESealAlgorithm: "", SSECSealedKey: ""}, SealedKey: SealedKey{},
}, // 3
{
ExpectedErr: errInvalidInternalSealAlgorithm,
Metadata: map[string]string{
SSEIV: base64.StdEncoding.EncodeToString(make([]byte, 32)), SSESealAlgorithm: "", SSECSealedKey: "",
},
SealedKey: SealedKey{},
}, // 4
{
ExpectedErr: Error{"The internal sealed key for SSE-C is invalid"},
Metadata: map[string]string{
SSEIV: base64.StdEncoding.EncodeToString(make([]byte, 32)), SSESealAlgorithm: SealAlgorithm, SSECSealedKey: "",
},
SealedKey: SealedKey{},
}, // 5
{
ExpectedErr: nil,
Metadata: map[string]string{
SSEIV: base64.StdEncoding.EncodeToString(make([]byte, 32)), SSESealAlgorithm: SealAlgorithm,
SSECSealedKey: base64.StdEncoding.EncodeToString(make([]byte, 64)),
},
SealedKey: SealedKey{Algorithm: SealAlgorithm},
}, // 6
{
ExpectedErr: nil,
Metadata: map[string]string{
SSEIV: base64.StdEncoding.EncodeToString(append([]byte{1}, make([]byte, 31)...)), SSESealAlgorithm: InsecureSealAlgorithm,
SSECSealedKey: base64.StdEncoding.EncodeToString(append([]byte{1}, make([]byte, 63)...)),
},
SealedKey: SealedKey{Algorithm: InsecureSealAlgorithm, Key: [64]byte{1}, IV: [32]byte{1}},
}, // 7
}
func TestCreateMultipartMetadata(t *testing.T) {
metadata := CreateMultipartMetadata(nil)
if v, ok := metadata[SSEMultipart]; !ok || v != "" {
t.Errorf("Metadata is missing the correct value for '%s': got '%s' - want '%s'", SSEMultipart, v, "")
}
}
func TestSSECParseMetadata(t *testing.T) {
for i, test := range ssecParseMetadataTests {
sealedKey, err := SSEC.ParseMetadata(test.Metadata)
if err != test.ExpectedErr {
t.Errorf("Test %d: got error '%v' - want error '%v'", i, err, test.ExpectedErr)
}
if sealedKey.Algorithm != test.SealedKey.Algorithm {
t.Errorf("Test %d: got sealed key algorithm '%v' - want sealed key algorithm '%v'", i, sealedKey.Algorithm, test.SealedKey.Algorithm)
}
if !bytes.Equal(sealedKey.Key[:], test.SealedKey.Key[:]) {
t.Errorf("Test %d: got sealed key '%v' - want sealed key '%v'", i, sealedKey.Key, test.SealedKey.Key)
}
if !bytes.Equal(sealedKey.IV[:], test.SealedKey.IV[:]) {
t.Errorf("Test %d: got sealed key IV '%v' - want sealed key IV '%v'", i, sealedKey.IV, test.SealedKey.IV)
}
}
}
var s3CreateMetadataTests = []struct {
KeyID string
SealedDataKey []byte
SealedKey SealedKey
}{
{KeyID: "", SealedDataKey: make([]byte, 48), SealedKey: SealedKey{Algorithm: SealAlgorithm}},
{KeyID: "cafebabe", SealedDataKey: make([]byte, 48), SealedKey: SealedKey{Algorithm: SealAlgorithm}},
{KeyID: "deadbeef", SealedDataKey: make([]byte, 32), SealedKey: SealedKey{IV: [32]byte{0xf7}, Key: [64]byte{0xea}, Algorithm: SealAlgorithm}},
}
func TestS3CreateMetadata(t *testing.T) {
defer func(disableLog bool) { logger.Disable = disableLog }(logger.Disable)
logger.Disable = true
for i, test := range s3CreateMetadataTests {
metadata := S3.CreateMetadata(nil, test.KeyID, test.SealedDataKey, test.SealedKey)
keyID, kmsKey, sealedKey, err := S3.ParseMetadata(metadata)
if err != nil {
t.Errorf("Test %d: failed to parse metadata: %v", i, err)
continue
}
if keyID != test.KeyID {
t.Errorf("Test %d: Key-ID mismatch: got '%s' - want '%s'", i, keyID, test.KeyID)
}
if !bytes.Equal(kmsKey, test.SealedDataKey) {
t.Errorf("Test %d: sealed KMS data mismatch: got '%v' - want '%v'", i, kmsKey, test.SealedDataKey)
}
if sealedKey.Algorithm != test.SealedKey.Algorithm {
t.Errorf("Test %d: seal algorithm mismatch: got '%s' - want '%s'", i, sealedKey.Algorithm, test.SealedKey.Algorithm)
}
if !bytes.Equal(sealedKey.IV[:], test.SealedKey.IV[:]) {
t.Errorf("Test %d: IV mismatch: got '%v' - want '%v'", i, sealedKey.IV, test.SealedKey.IV)
}
if !bytes.Equal(sealedKey.Key[:], test.SealedKey.Key[:]) {
t.Errorf("Test %d: sealed key mismatch: got '%v' - want '%v'", i, sealedKey.Key, test.SealedKey.Key)
}
}
defer func() {
if err := recover(); err == nil || err != logger.ErrCritical {
t.Errorf("Expected '%s' panic for invalid seal algorithm but got '%s'", logger.ErrCritical, err)
}
}()
_ = S3.CreateMetadata(nil, "", []byte{}, SealedKey{Algorithm: InsecureSealAlgorithm})
}
var ssecCreateMetadataTests = []struct {
KeyID string
SealedDataKey []byte
SealedKey SealedKey
}{
{KeyID: "", SealedDataKey: make([]byte, 48), SealedKey: SealedKey{Algorithm: SealAlgorithm}},
{KeyID: "cafebabe", SealedDataKey: make([]byte, 48), SealedKey: SealedKey{Algorithm: SealAlgorithm}},
{KeyID: "deadbeef", SealedDataKey: make([]byte, 32), SealedKey: SealedKey{IV: [32]byte{0xf7}, Key: [64]byte{0xea}, Algorithm: SealAlgorithm}},
}
func TestSSECCreateMetadata(t *testing.T) {
defer func(disableLog bool) { logger.Disable = disableLog }(logger.Disable)
logger.Disable = true
for i, test := range ssecCreateMetadataTests {
metadata := SSEC.CreateMetadata(nil, test.SealedKey)
sealedKey, err := SSEC.ParseMetadata(metadata)
if err != nil {
t.Errorf("Test %d: failed to parse metadata: %v", i, err)
continue
}
if sealedKey.Algorithm != test.SealedKey.Algorithm {
t.Errorf("Test %d: seal algorithm mismatch: got '%s' - want '%s'", i, sealedKey.Algorithm, test.SealedKey.Algorithm)
}
if !bytes.Equal(sealedKey.IV[:], test.SealedKey.IV[:]) {
t.Errorf("Test %d: IV mismatch: got '%v' - want '%v'", i, sealedKey.IV, test.SealedKey.IV)
}
if !bytes.Equal(sealedKey.Key[:], test.SealedKey.Key[:]) {
t.Errorf("Test %d: sealed key mismatch: got '%v' - want '%v'", i, sealedKey.Key, test.SealedKey.Key)
}
}
defer func() {
if err := recover(); err == nil || err != logger.ErrCritical {
t.Errorf("Expected '%s' panic for invalid seal algorithm but got '%s'", logger.ErrCritical, err)
}
}()
_ = SSEC.CreateMetadata(nil, SealedKey{Algorithm: InsecureSealAlgorithm})
}
var isETagSealedTests = []struct {
ETag string
IsSealed bool
}{
{ETag: "", IsSealed: false}, // 0
{ETag: "90682b8e8cc7609c4671e1d64c73fc30", IsSealed: false}, // 1
{ETag: "f201040c9dc593e39ea004dc1323699bcd", IsSealed: true}, // 2 not valid ciphertext but looks like sealed ETag
{ETag: "20000f00fba2ee2ae4845f725964eeb9e092edfabc7ab9f9239e8344341f769a51ce99b4801b0699b92b16a72fa94972", IsSealed: true}, // 3
}
func TestIsETagSealed(t *testing.T) {
for i, test := range isETagSealedTests {
etag, err := hex.DecodeString(test.ETag)
if err != nil {
t.Errorf("Test %d: failed to decode etag: %s", i, err)
}
if sealed := IsETagSealed(etag); sealed != test.IsSealed {
t.Errorf("Test %d: got %v - want %v", i, sealed, test.IsSealed)
}
}
}
var removeInternalEntriesTests = []struct {
Metadata, Expected map[string]string
}{
{ // 0
Metadata: map[string]string{
SSEMultipart: "",
SSEIV: "",
SSESealAlgorithm: "",
SSECSealedKey: "",
S3SealedKey: "",
S3KMSKeyID: "",
S3KMSSealedKey: "",
},
Expected: map[string]string{},
},
{ // 1
Metadata: map[string]string{
SSEMultipart: "",
SSEIV: "",
"X-Amz-Meta-A": "X",
"X-Minio-Internal-B": "Y",
},
Expected: map[string]string{
"X-Amz-Meta-A": "X",
"X-Minio-Internal-B": "Y",
},
},
}
func TestRemoveInternalEntries(t *testing.T) {
isEqual := func(x, y map[string]string) bool {
if len(x) != len(y) {
return false
}
for k, v := range x {
if u, ok := y[k]; !ok || v != u {
return false
}
}
return true
}
for i, test := range removeInternalEntriesTests {
RemoveInternalEntries(test.Metadata)
if !isEqual(test.Metadata, test.Expected) {
t.Errorf("Test %d: got %v - want %v", i, test.Metadata, test.Expected)
}
}
}

View File

@@ -18,6 +18,8 @@ import (
"context"
"errors"
"io"
"net/http"
"path"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/ioutil"
@@ -25,16 +27,103 @@ import (
)
const (
// SSEMultipart is the metadata key indicating that the object
// was uploaded using the S3 multipart API and stored using
// some from of server-side-encryption.
SSEMultipart = "X-Minio-Internal-Encrypted-Multipart"
// SSEIV is the metadata key referencing the random initialization
// vector (IV) used for SSE-S3 and SSE-C key derivation.
SSEIV = "X-Minio-Internal-Server-Side-Encryption-Iv"
// SSESealAlgorithm is the metadata key referencing the algorithm
// used by SSE-C and SSE-S3 to encrypt the object.
SSESealAlgorithm = "X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm"
// SSECSealedKey is the metadata key referencing the sealed object-key for SSE-C.
SSECSealedKey = "X-Minio-Internal-Server-Side-Encryption-Sealed-Key"
// S3SealedKey is the metadata key referencing the sealed object-key for SSE-S3.
S3SealedKey = "X-Minio-Internal-Server-Side-Encryption-S3-Sealed-Key"
// S3KMSKeyID is the metadata key referencing the KMS key-id used to
// generate/decrypt the S3-KMS-Sealed-Key. It is only used for SSE-S3 + KMS.
S3KMSKeyID = "X-Minio-Internal-Server-Side-Encryption-S3-Kms-Key-Id"
// S3KMSSealedKey is the metadata key referencing the encrypted key generated
// by KMS. It is only used for SSE-S3 + KMS.
S3KMSSealedKey = "X-Minio-Internal-Server-Side-Encryption-S3-Kms-Sealed-Key"
)
const (
// SealAlgorithm is the encryption/sealing algorithm used to derive & seal
// the key-encryption-key and to en/decrypt the object data.
SealAlgorithm = "DAREv2-HMAC-SHA256"
// InsecureSealAlgorithm is the legacy encryption/sealing algorithm used
// to derive & seal the key-encryption-key and to en/decrypt the object data.
// This algorithm should not be used for new objects because its key derivation
// is not optimal. See: https://github.com/minio/minio/pull/6121
InsecureSealAlgorithm = "DARE-SHA256"
)
// String returns the SSE domain as string. For SSE-S3 the
// domain is "SSE-S3".
func (s3) String() string { return "SSE-S3" }
// UnsealObjectKey extracts and decrypts the sealed object key
// from the metadata using KMS and returns the decrypted object
// key.
func (sse s3) UnsealObjectKey(kms KMS, metadata map[string]string, bucket, object string) (key ObjectKey, err error) {
keyID, kmsKey, sealedKey, err := sse.ParseMetadata(metadata)
if err != nil {
return
}
unsealKey, err := kms.UnsealKey(keyID, kmsKey, Context{bucket: path.Join(bucket, object)})
if err != nil {
return
}
err = key.Unseal(unsealKey, sealedKey, sse.String(), bucket, object)
return
}
// String returns the SSE domain as string. For SSE-C the
// domain is "SSE-C".
func (ssec) String() string { return "SSE-C" }
// UnsealObjectKey extracts and decrypts the sealed object key
// from the metadata using the SSE-C client key of the HTTP headers
// and returns the decrypted object key.
func (sse ssec) UnsealObjectKey(h http.Header, metadata map[string]string, bucket, object string) (key ObjectKey, err error) {
clientKey, err := sse.ParseHTTP(h)
if err != nil {
return
}
return unsealObjectKey(clientKey, metadata, bucket, object)
}
// UnsealObjectKey extracts and decrypts the sealed object key
// from the metadata using the SSE-Copy client key of the HTTP headers
// and returns the decrypted object key.
func (sse ssecCopy) UnsealObjectKey(h http.Header, metadata map[string]string, bucket, object string) (key ObjectKey, err error) {
clientKey, err := sse.ParseHTTP(h)
if err != nil {
return
}
return unsealObjectKey(clientKey, metadata, bucket, object)
}
// unsealObjectKey decrypts and returns the sealed object key
// from the metadata using the SSE-C client key.
func unsealObjectKey(clientKey [32]byte, metadata map[string]string, bucket, object string) (key ObjectKey, err error) {
sealedKey, err := SSEC.ParseMetadata(metadata)
if err != nil {
return
}
err = key.Unseal(clientKey, sealedKey, SSEC.String(), bucket, object)
return
}
// EncryptSinglePart encrypts an io.Reader which must be the
// the body of a single-part PUT request.
func EncryptSinglePart(r io.Reader, key ObjectKey) io.Reader {
@@ -45,6 +134,14 @@ func EncryptSinglePart(r io.Reader, key ObjectKey) io.Reader {
return r
}
// EncryptMultiPart encrypts an io.Reader which must be the body of
// multi-part PUT request. It derives an unique encryption key from
// the partID and the object key.
func EncryptMultiPart(r io.Reader, partID int, key ObjectKey) io.Reader {
partKey := key.DerivePartKey(uint32(partID))
return EncryptSinglePart(r, ObjectKey(partKey))
}
// DecryptSinglePart decrypts an io.Writer which must an object
// uploaded with the single-part PUT API. The offset and length
// specify the requested range.

226
cmd/crypto/sse_test.go Normal file
View File

@@ -0,0 +1,226 @@
// Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package crypto
import (
"net/http"
"testing"
)
func TestS3String(t *testing.T) {
const Domain = "SSE-S3"
if domain := S3.String(); domain != Domain {
t.Errorf("S3's string method returns wrong domain: got '%s' - want '%s'", domain, Domain)
}
}
func TestSSECString(t *testing.T) {
const Domain = "SSE-C"
if domain := SSEC.String(); domain != Domain {
t.Errorf("SSEC's string method returns wrong domain: got '%s' - want '%s'", domain, Domain)
}
}
var ssecUnsealObjectKeyTests = []struct {
Headers http.Header
Bucket, Object string
Metadata map[string]string
ExpectedErr error
}{
{ // 0 - Valid HTTP headers and valid metadata entries for bucket/object
Headers: http.Header{
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
Bucket: "bucket",
Object: "object",
Metadata: map[string]string{
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
"X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256",
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
},
ExpectedErr: nil,
},
{ // 1 - Valid HTTP headers but invalid metadata entries for bucket/object2
Headers: http.Header{
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
Bucket: "bucket",
Object: "object2",
Metadata: map[string]string{
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
"X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256",
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
},
ExpectedErr: ErrSecretKeyMismatch,
},
{ // 2 - Valid HTTP headers but invalid metadata entries for bucket/object
Headers: http.Header{
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
Bucket: "bucket",
Object: "object",
Metadata: map[string]string{
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
},
ExpectedErr: errMissingInternalSealAlgorithm,
},
{ // 3 - Invalid HTTP headers for valid metadata entries for bucket/object
Headers: http.Header{
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
},
Bucket: "bucket",
Object: "object",
Metadata: map[string]string{
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
"X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256",
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
},
ExpectedErr: ErrMissingCustomerKeyMD5,
},
}
func TestSSECUnsealObjectKey(t *testing.T) {
for i, test := range ssecUnsealObjectKeyTests {
if _, err := SSEC.UnsealObjectKey(test.Headers, test.Metadata, test.Bucket, test.Object); err != test.ExpectedErr {
t.Errorf("Test %d: got: %v - want: %v", i, err, test.ExpectedErr)
}
}
}
var sseCopyUnsealObjectKeyTests = []struct {
Headers http.Header
Bucket, Object string
Metadata map[string]string
ExpectedErr error
}{
{ // 0 - Valid HTTP headers and valid metadata entries for bucket/object
Headers: http.Header{
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
Bucket: "bucket",
Object: "object",
Metadata: map[string]string{
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
"X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256",
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
},
ExpectedErr: nil,
},
{ // 1 - Valid HTTP headers but invalid metadata entries for bucket/object2
Headers: http.Header{
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
Bucket: "bucket",
Object: "object2",
Metadata: map[string]string{
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
"X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256",
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
},
ExpectedErr: ErrSecretKeyMismatch,
},
{ // 2 - Valid HTTP headers but invalid metadata entries for bucket/object
Headers: http.Header{
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
Bucket: "bucket",
Object: "object",
Metadata: map[string]string{
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
},
ExpectedErr: errMissingInternalSealAlgorithm,
},
{ // 3 - Invalid HTTP headers for valid metadata entries for bucket/object
Headers: http.Header{
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
},
Bucket: "bucket",
Object: "object",
Metadata: map[string]string{
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
"X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256",
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
},
ExpectedErr: ErrMissingCustomerKeyMD5,
},
}
func TestSSECopyUnsealObjectKey(t *testing.T) {
for i, test := range sseCopyUnsealObjectKeyTests {
if _, err := SSECopy.UnsealObjectKey(test.Headers, test.Metadata, test.Bucket, test.Object); err != test.ExpectedErr {
t.Errorf("Test %d: got: %v - want: %v", i, err, test.ExpectedErr)
}
}
}
var s3UnsealObjectKeyTests = []struct {
KMS KMS
Bucket, Object string
Metadata map[string]string
ExpectedErr error
}{
{ // 0 - Valid KMS key-ID and valid metadata entries for bucket/object
KMS: NewKMS([32]byte{}),
Bucket: "bucket",
Object: "object",
Metadata: map[string]string{
"X-Minio-Internal-Server-Side-Encryption-Iv": "hhVY0LKR1YtZbzAKxTWUfZt5enDfYX6Fxz1ma8Kiudc=",
"X-Minio-Internal-Server-Side-Encryption-S3-Sealed-Key": "IAAfALhsOeD5AE3s5Zgq3DZ5VFGsOa3B0ksVC86veDcaj+fXv2U0VadhPaOKYr9Emd5ssOsO0uIhIIrKiOy9rA==",
"X-Minio-Internal-Server-Side-Encryption-S3-Kms-Sealed-Key": "IAAfAMRS2iw45FsfiF3QXajSYVWj1lxMpQm6DxDGPtADCX6fJQQ4atHBtfpgqJFyeQmIHsm0FBI+UlHw1Lv4ug==",
"X-Minio-Internal-Server-Side-Encryption-S3-Kms-Key-Id": "test-key-1",
"X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256",
},
ExpectedErr: nil,
},
{ // 1 - Valid KMS key-ID for invalid metadata entries for bucket/object
KMS: NewKMS([32]byte{}),
Bucket: "bucket",
Object: "object",
Metadata: map[string]string{
"X-Minio-Internal-Server-Side-Encryption-Iv": "hhVY0LKR1YtZbzAKxTWUfZt5enDfYX6Fxz1ma8Kiudc=",
"X-Minio-Internal-Server-Side-Encryption-S3-Sealed-Key": "IAAfALhsOeD5AE3s5Zgq3DZ5VFGsOa3B0ksVC86veDcaj+fXv2U0VadhPaOKYr9Emd5ssOsO0uIhIIrKiOy9rA==",
"X-Minio-Internal-Server-Side-Encryption-S3-Kms-Sealed-Key": "IAAfAMRS2iw45FsfiF3QXajSYVWj1lxMpQm6DxDGPtADCX6fJQQ4atHBtfpgqJFyeQmIHsm0FBI+UlHw1Lv4ug==",
"X-Minio-Internal-Server-Side-Encryption-S3-Kms-Key-Id": "test-key-1",
},
ExpectedErr: errMissingInternalSealAlgorithm,
},
}
func TestS3UnsealObjectKey(t *testing.T) {
for i, test := range s3UnsealObjectKeyTests {
if _, err := S3.UnsealObjectKey(test.KMS, test.Metadata, test.Bucket, test.Object); err != test.ExpectedErr {
t.Errorf("Test %d: got: %v - want: %v", i, err, test.ExpectedErr)
}
}
}

249
cmd/crypto/vault.go Normal file
View File

@@ -0,0 +1,249 @@
// Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package crypto
import (
"bytes"
"encoding/base64"
"errors"
"fmt"
"strings"
"time"
vault "github.com/hashicorp/vault/api"
"github.com/minio/minio/cmd/logger"
)
var (
//ErrKMSAuthLogin is raised when there is a failure authenticating to KMS
ErrKMSAuthLogin = errors.New("Vault service did not return auth info")
)
// VaultKey represents vault encryption key-ring.
type VaultKey struct {
Name string `json:"name"` // The name of the encryption key-ring
Version int `json:"version"` // The key version
}
// VaultAuth represents vault authentication type.
// Currently the only supported authentication type is AppRole.
type VaultAuth struct {
Type string `json:"type"` // The authentication type
AppRole VaultAppRole `json:"approle"` // The AppRole authentication credentials
}
// VaultAppRole represents vault AppRole authentication credentials
type VaultAppRole struct {
ID string `json:"id"` // The AppRole access ID
Secret string `json:"secret"` // The AppRole secret
}
// VaultConfig represents vault configuration.
type VaultConfig struct {
Endpoint string `json:"endpoint"` // The vault API endpoint as URL
CAPath string `json:"-"` // The path to PEM-encoded certificate files used for mTLS. Currently not used in config file.
Auth VaultAuth `json:"auth"` // The vault authentication configuration
Key VaultKey `json:"key-id"` // The named key used for key-generation / decryption.
Namespace string `json:"-"` // The vault namespace of enterprise vault instances
}
// vaultService represents a connection to a vault KMS.
type vaultService struct {
config *VaultConfig
client *vault.Client
leaseDuration time.Duration
tokenRenewer *vault.Renewer
}
var _ KMS = (*vaultService)(nil) // compiler check that *vaultService implements KMS
// empty/default vault configuration used to check whether a particular is empty.
var emptyVaultConfig = VaultConfig{}
// IsEmpty returns true if the vault config struct is an
// empty configuration.
func (v *VaultConfig) IsEmpty() bool { return *v == emptyVaultConfig }
// Verify returns a nil error if the vault configuration
// is valid. A valid configuration is either empty or
// contains valid non-default values.
func (v *VaultConfig) Verify() (err error) {
if v.IsEmpty() {
return // an empty configuration is valid
}
switch {
case v.Endpoint == "":
err = errors.New("crypto: missing hashicorp vault endpoint")
case strings.ToLower(v.Auth.Type) != "approle":
err = fmt.Errorf("crypto: invalid hashicorp vault authentication type: %s is not supported", v.Auth.Type)
case v.Auth.AppRole.ID == "":
err = errors.New("crypto: missing hashicorp vault AppRole ID")
case v.Auth.AppRole.Secret == "":
err = errors.New("crypto: missing hashicorp vault AppSecret ID")
case v.Key.Name == "":
err = errors.New("crypto: missing hashicorp vault key name")
case v.Key.Version < 0:
err = errors.New("crypto: invalid hashicorp vault key version: The key version must not be negative")
}
return
}
// NewVault initializes Hashicorp Vault KMS by authenticating
// to Vault with the credentials in config and gets a client
// token for future api calls.
func NewVault(config VaultConfig) (KMS, error) {
if config.IsEmpty() {
return nil, errors.New("crypto: the hashicorp vault configuration must not be empty")
}
if err := config.Verify(); err != nil {
return nil, err
}
vaultCfg := vault.Config{Address: config.Endpoint}
if err := vaultCfg.ConfigureTLS(&vault.TLSConfig{CAPath: config.CAPath}); err != nil {
return nil, err
}
client, err := vault.NewClient(&vaultCfg)
if err != nil {
return nil, err
}
if config.Namespace != "" {
client.SetNamespace(config.Namespace)
}
v := &vaultService{client: client, config: &config}
if err := v.authenticate(); err != nil {
return nil, err
}
return v, nil
}
// reauthenticate() tries to login in 1 minute
// intervals until successful.
func (v *vaultService) reauthenticate() {
retryDelay := 1 * time.Minute
go func() {
for {
if err := v.authenticate(); err != nil {
time.Sleep(retryDelay)
continue
}
return
}
}()
}
// renewer calls vault client's renewer that automatically
// renews secret periodically
func (v *vaultService) renewer(secret *vault.Secret) {
renewer, err := v.client.NewRenewer(&vault.RenewerInput{
Secret: secret,
})
if err != nil {
logger.FatalIf(err, "crypto: hashicorp vault token renewer could not be started")
}
v.tokenRenewer = renewer
go renewer.Renew()
defer renewer.Stop()
for {
select {
case err := <-renewer.DoneCh():
if err != nil {
v.reauthenticate()
renewer.Stop()
return
}
// Renewal is now over
case renewal := <-renewer.RenewCh():
v.leaseDuration = time.Duration(renewal.Secret.Auth.LeaseDuration)
}
}
}
// authenticate logs the app to vault, and starts the auto renewer
// before secret expires
func (v *vaultService) authenticate() (err error) {
payload := map[string]interface{}{
"role_id": v.config.Auth.AppRole.ID,
"secret_id": v.config.Auth.AppRole.Secret,
}
var secret *vault.Secret
secret, err = v.client.Logical().Write("auth/approle/login", payload)
if err != nil {
return
}
if secret.Auth == nil {
err = ErrKMSAuthLogin
return
}
v.client.SetToken(secret.Auth.ClientToken)
v.leaseDuration = time.Duration(secret.Auth.LeaseDuration)
go v.renewer(secret)
return
}
// GenerateKey returns a new plaintext key, generated by the KMS,
// and a sealed version of this plaintext key encrypted using the
// named key referenced by keyID. It also binds the generated key
// cryptographically to the provided context.
func (v *vaultService) GenerateKey(keyID string, ctx Context) (key [32]byte, sealedKey []byte, err error) {
var contextStream bytes.Buffer
ctx.WriteTo(&contextStream)
payload := map[string]interface{}{
"context": base64.StdEncoding.EncodeToString(contextStream.Bytes()),
}
s, err := v.client.Logical().Write(fmt.Sprintf("/transit/datakey/plaintext/%s", keyID), payload)
if err != nil {
return key, sealedKey, err
}
sealKey := s.Data["ciphertext"].(string)
plainKey, err := base64.StdEncoding.DecodeString(s.Data["plaintext"].(string))
if err != nil {
return key, sealedKey, err
}
copy(key[:], []byte(plainKey))
return key, []byte(sealKey), nil
}
// UnsealKey returns the decrypted sealedKey as plaintext key.
// Therefore it sends the sealedKey to the KMS which decrypts
// it using the named key referenced by keyID and responses with
// the plaintext key.
//
// The context must be same context as the one provided while
// generating the plaintext key / sealedKey.
func (v *vaultService) UnsealKey(keyID string, sealedKey []byte, ctx Context) (key [32]byte, err error) {
var contextStream bytes.Buffer
ctx.WriteTo(&contextStream)
payload := map[string]interface{}{
"ciphertext": string(sealedKey),
"context": base64.StdEncoding.EncodeToString(contextStream.Bytes()),
}
s, err := v.client.Logical().Write(fmt.Sprintf("/transit/decrypt/%s", keyID), payload)
if err != nil {
return key, err
}
base64Key := s.Data["plaintext"].(string)
plainKey, err := base64.StdEncoding.DecodeString(base64Key)
if err != nil {
return key, err
}
copy(key[:], []byte(plainKey))
return key, nil
}

View File

@@ -18,7 +18,11 @@ package cmd
import (
"encoding/json"
"errors"
"path/filepath"
"strings"
"github.com/minio/minio/pkg/ellipses"
)
// CacheConfig represents cache config settings
@@ -41,6 +45,15 @@ func (cfg *CacheConfig) UnmarshalJSON(data []byte) (err error) {
if err = json.Unmarshal(data, _cfg); err != nil {
return err
}
if _cfg.Expiry < 0 {
return errors.New("config expiry value should not be negative")
}
if _cfg.MaxUse < 0 {
return errors.New("config max use value should not be null or negative")
}
if _, err = parseCacheDrives(_cfg.Drives); err != nil {
return err
}
@@ -52,12 +65,42 @@ func (cfg *CacheConfig) UnmarshalJSON(data []byte) (err error) {
// Parses given cacheDrivesEnv and returns a list of cache drives.
func parseCacheDrives(drives []string) ([]string, error) {
if len(drives) == 0 {
return drives, nil
}
var endpoints []string
for _, d := range drives {
if ellipses.HasEllipses(d) {
s, err := parseCacheDrivePaths(d)
if err != nil {
return nil, err
}
endpoints = append(endpoints, s...)
} else {
endpoints = append(endpoints, d)
}
}
for _, d := range endpoints {
if !filepath.IsAbs(d) {
return nil, uiErrInvalidCacheDrivesValue(nil).Msg("cache dir should be absolute path: %s", d)
}
}
return drives, nil
return endpoints, nil
}
// Parses all arguments and returns a slice of drive paths following the ellipses pattern.
func parseCacheDrivePaths(arg string) (ep []string, err error) {
patterns, perr := ellipses.FindEllipsesPatterns(arg)
if perr != nil {
return []string{}, uiErrInvalidCacheDrivesValue(nil).Msg(perr.Error())
}
for _, lbls := range patterns.Expand() {
ep = append(ep, strings.Join(lbls, ""))
}
return ep, nil
}
// Parses given cacheExcludesEnv and returns a list of cache exclude patterns.

View File

@@ -41,12 +41,32 @@ func TestParseCacheDrives(t *testing.T) {
expectedPatterns []string
success bool
}{"C:/home/drive1;C:/home/drive2;C:/home/drive3", []string{"C:/home/drive1", "C:/home/drive2", "C:/home/drive3"}, true})
testCases = append(testCases, struct {
driveStr string
expectedPatterns []string
success bool
}{"C:/home/drive{1...3}", []string{"C:/home/drive1", "C:/home/drive2", "C:/home/drive3"}, true})
testCases = append(testCases, struct {
driveStr string
expectedPatterns []string
success bool
}{"C:/home/drive{1..3}", []string{}, false})
} else {
testCases = append(testCases, struct {
driveStr string
expectedPatterns []string
success bool
}{"/home/drive1;/home/drive2;/home/drive3", []string{"/home/drive1", "/home/drive2", "/home/drive3"}, true})
testCases = append(testCases, struct {
driveStr string
expectedPatterns []string
success bool
}{"/home/drive{1...3}", []string{"/home/drive1", "/home/drive2", "/home/drive3"}, true})
testCases = append(testCases, struct {
driveStr string
expectedPatterns []string
success bool
}{"/home/drive{1..3}", []string{}, false})
}
for i, testCase := range testCases {
drives, err := parseCacheDrives(strings.Split(testCase.driveStr, cacheEnvDelimiter))

View File

@@ -30,7 +30,6 @@ import (
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/disk"
"github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/lock"
)
@@ -92,7 +91,7 @@ func newCacheFSObjects(dir string, expiry int, maxDiskUsagePct int) (*cacheFSObj
appendFileMap: make(map[string]*fsAppendFile),
}
go fsObjects.cleanupStaleMultipartUploads(context.Background(), globalMultipartCleanupInterval, globalMultipartExpiry, globalServiceDoneCh)
go fsObjects.cleanupStaleMultipartUploads(context.Background(), GlobalMultipartCleanupInterval, GlobalMultipartExpiry, GlobalServiceDoneCh)
cacheFS := cacheFSObjects{
FSObjects: fsObjects,
@@ -159,7 +158,7 @@ func (cfs *cacheFSObjects) purgeTrash() {
for {
select {
case <-globalServiceDoneCh:
case <-GlobalServiceDoneCh:
return
case <-ticker.C:
trashPath := path.Join(cfs.fsPath, minioMetaBucket, cacheTrashDir)
@@ -258,7 +257,7 @@ func (cfs *cacheFSObjects) IsOnline() bool {
}
// Caches the object to disk
func (cfs *cacheFSObjects) Put(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string) error {
func (cfs *cacheFSObjects) Put(ctx context.Context, bucket, object string, data *PutObjReader, metadata map[string]string, opts ObjectOptions) error {
if cfs.diskUsageHigh() {
select {
case cfs.purgeChan <- struct{}{}:
@@ -275,7 +274,7 @@ func (cfs *cacheFSObjects) Put(ctx context.Context, bucket, object string, data
return pErr
}
}
_, err := cfs.PutObject(ctx, bucket, object, data, metadata)
_, err := cfs.PutObject(ctx, bucket, object, data, metadata, opts)
// if err is due to disk being offline , mark cache drive as offline
if IsErr(err, baseErrs...) {
cfs.setOnline(false)
@@ -284,8 +283,8 @@ func (cfs *cacheFSObjects) Put(ctx context.Context, bucket, object string, data
}
// Returns the handle for the cached object
func (cfs *cacheFSObjects) Get(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) (err error) {
return cfs.GetObject(ctx, bucket, object, startOffset, length, writer, etag)
func (cfs *cacheFSObjects) Get(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error) {
return cfs.GetObject(ctx, bucket, object, startOffset, length, writer, etag, opts)
}
// Deletes the cached object
@@ -295,13 +294,14 @@ func (cfs *cacheFSObjects) Delete(ctx context.Context, bucket, object string) (e
// convenience function to check if object is cached on this cacheFSObjects
func (cfs *cacheFSObjects) Exists(ctx context.Context, bucket, object string) bool {
_, err := cfs.GetObjectInfo(ctx, bucket, object)
_, err := cfs.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
return err == nil
}
// Identical to fs PutObject operation except that it uses ETag in metadata
// headers.
func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, retErr error) {
func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object string, r *PutObjReader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, retErr error) {
data := r.Reader
fs := cfs.FSObjects
// Lock the object.
objectLock := fs.nsMutex.NewNSLock(bucket, object)
@@ -354,7 +354,7 @@ func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object
}
// Validate input data size and it can never be less than zero.
if data.Size() < 0 {
if data.Size() < -1 {
logger.LogIf(ctx, errInvalidArgument)
return ObjectInfo{}, errInvalidArgument
}
@@ -438,7 +438,7 @@ func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object
// Implements S3 compatible initiate multipart API. Operation here is identical
// to fs backend implementation - with the exception that cache FS uses the uploadID
// generated on the backend
func (cfs *cacheFSObjects) NewMultipartUpload(ctx context.Context, bucket, object string, meta map[string]string, uploadID string) (string, error) {
func (cfs *cacheFSObjects) NewMultipartUpload(ctx context.Context, bucket, object string, meta map[string]string, uploadID string, opts ObjectOptions) (string, error) {
if cfs.diskUsageHigh() {
select {
case cfs.purgeChan <- struct{}{}:

View File

@@ -31,18 +31,14 @@ import (
"github.com/djherbis/atime"
"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/wildcard"
"github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/wildcard"
)
// list of all errors that can be ignored in tree walk operation in disk cache
var cacheTreeWalkIgnoredErrs = append(baseIgnoredErrs, errDiskAccessDenied, errVolumeNotFound, errFileNotFound)
const (
// disk cache needs to have cacheSizeMultiplier * object size space free for a cache entry to be created.
cacheSizeMultiplier = 100
// disk cache needs to have object size space free for a cache entry to be created.
cacheTrashDir = "trash"
cacheCleanupInterval = 10 // in minutes
)
@@ -61,18 +57,19 @@ type cacheObjects struct {
// file path patterns to exclude from cache
exclude []string
// Object functions pointing to the corresponding functions of backend implementation.
GetObjectFn func(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) (err error)
GetObjectInfoFn func(ctx context.Context, bucket, object string) (objInfo ObjectInfo, err error)
PutObjectFn func(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error)
GetObjectNInfoFn func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error)
GetObjectFn func(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error)
GetObjectInfoFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
PutObjectFn func(ctx context.Context, bucket, object string, data *PutObjReader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error)
DeleteObjectFn func(ctx context.Context, bucket, object string) error
ListObjectsFn func(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error)
ListObjectsV2Fn func(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error)
ListBucketsFn func(ctx context.Context) (buckets []BucketInfo, err error)
GetBucketInfoFn func(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error)
NewMultipartUploadFn func(ctx context.Context, bucket, object string, metadata map[string]string) (uploadID string, err error)
PutObjectPartFn func(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (info PartInfo, err error)
NewMultipartUploadFn func(ctx context.Context, bucket, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error)
PutObjectPartFn func(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error)
AbortMultipartUploadFn func(ctx context.Context, bucket, object, uploadID string) error
CompleteMultipartUploadFn func(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error)
CompleteMultipartUploadFn func(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error)
DeleteBucketFn func(ctx context.Context, bucket string) error
}
@@ -92,21 +89,27 @@ type CacheObjectLayer interface {
ListBuckets(ctx context.Context) (buckets []BucketInfo, err error)
DeleteBucket(ctx context.Context, bucket string) error
// Object operations.
GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) (err error)
GetObjectInfo(ctx context.Context, bucket, object string) (objInfo ObjectInfo, err error)
PutObject(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error)
GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error)
GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error)
GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
PutObject(ctx context.Context, bucket, object string, data *PutObjReader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error)
DeleteObject(ctx context.Context, bucket, object string) error
// Multipart operations.
NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string) (uploadID string, err error)
PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (info PartInfo, err error)
NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error)
PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error)
AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error
CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error)
CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error)
// Storage operations.
StorageInfo(ctx context.Context) CacheStorageInfo
}
// IsCacheable returns if the object should be saved in the cache.
func (o ObjectInfo) IsCacheable() bool {
return !crypto.IsEncrypted(o.UserDefined)
}
// backendDownError returns true if err is due to backend failure or faulty disk if in server mode
func backendDownError(err error) bool {
_, backendDown := err.(BackendDown)
@@ -179,22 +182,99 @@ func (c cacheObjects) getMetadata(objInfo ObjectInfo) map[string]string {
return metadata
}
func (c cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
if c.isCacheExclude(bucket, object) {
return c.GetObjectNInfoFn(ctx, bucket, object, rs, h, writeLock, opts)
}
// fetch cacheFSObjects if object is currently cached or nearest available cache drive
dcache, err := c.cache.getCachedFSLoc(ctx, bucket, object)
if err != nil {
return c.GetObjectNInfoFn(ctx, bucket, object, rs, h, writeLock, opts)
}
cacheReader, cacheErr := dcache.GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts)
objInfo, err := c.GetObjectInfoFn(ctx, bucket, object, opts)
if backendDownError(err) && cacheErr == nil {
return cacheReader, nil
} else if err != nil {
if _, ok := err.(ObjectNotFound); ok {
// Delete cached entry if backend object was deleted.
dcache.Delete(ctx, bucket, object)
}
return nil, err
}
if !objInfo.IsCacheable() || filterFromCache(objInfo.UserDefined) {
return c.GetObjectNInfoFn(ctx, bucket, object, rs, h, writeLock, opts)
}
if cacheErr == nil {
if cacheReader.ObjInfo.ETag == objInfo.ETag && !isStaleCache(objInfo) {
// Object is not stale, so serve from cache
return cacheReader, nil
}
cacheReader.Close()
// Object is stale, so delete from cache
dcache.Delete(ctx, bucket, object)
}
// Since we got here, we are serving the request from backend,
// and also adding the object to the cache.
if rs != nil {
// We don't cache partial objects.
return c.GetObjectNInfoFn(ctx, bucket, object, rs, h, writeLock, opts)
}
if !dcache.diskAvailable(objInfo.Size) {
return c.GetObjectNInfoFn(ctx, bucket, object, rs, h, writeLock, opts)
}
bkReader, bkErr := c.GetObjectNInfoFn(ctx, bucket, object, rs, h, writeLock, opts)
if bkErr != nil {
return nil, bkErr
}
// Initialize pipe.
pipeReader, pipeWriter := io.Pipe()
teeReader := io.TeeReader(bkReader, pipeWriter)
hashReader, herr := hash.NewReader(pipeReader, bkReader.ObjInfo.Size, "", "", bkReader.ObjInfo.Size)
if herr != nil {
bkReader.Close()
return nil, herr
}
go func() {
opts := ObjectOptions{}
putErr := dcache.Put(ctx, bucket, object, NewPutObjReader(hashReader, nil, nil), c.getMetadata(bkReader.ObjInfo), opts)
// close the write end of the pipe, so the error gets
// propagated to getObjReader
pipeWriter.CloseWithError(putErr)
}()
cleanupBackend := func() { bkReader.Close() }
cleanupPipe := func() { pipeReader.Close() }
gr = NewGetObjectReaderFromReader(teeReader, bkReader.ObjInfo, cleanupBackend, cleanupPipe)
return gr, nil
}
// Uses cached-object to serve the request. If object is not cached it serves the request from the backend and also
// stores it in the cache for serving subsequent requests.
func (c cacheObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) (err error) {
func (c cacheObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error) {
GetObjectFn := c.GetObjectFn
GetObjectInfoFn := c.GetObjectInfoFn
if c.isCacheExclude(bucket, object) {
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag)
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag, opts)
}
// fetch cacheFSObjects if object is currently cached or nearest available cache drive
dcache, err := c.cache.getCachedFSLoc(ctx, bucket, object)
if err != nil {
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag)
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag, opts)
}
// stat object on backend
objInfo, err := GetObjectInfoFn(ctx, bucket, object)
objInfo, err := GetObjectInfoFn(ctx, bucket, object, opts)
backendDown := backendDownError(err)
if err != nil && !backendDown {
if _, ok := err.(ObjectNotFound); ok {
@@ -204,43 +284,43 @@ func (c cacheObjects) GetObject(ctx context.Context, bucket, object string, star
return err
}
if !backendDown && filterFromCache(objInfo.UserDefined) {
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag)
if !backendDown && !objInfo.IsCacheable() {
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag, opts)
}
cachedObjInfo, err := dcache.GetObjectInfo(ctx, bucket, object)
if !backendDown && filterFromCache(objInfo.UserDefined) {
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag, opts)
}
cachedObjInfo, err := dcache.GetObjectInfo(ctx, bucket, object, opts)
if err == nil {
if backendDown {
// If the backend is down, serve the request from cache.
return dcache.Get(ctx, bucket, object, startOffset, length, writer, etag)
return dcache.Get(ctx, bucket, object, startOffset, length, writer, etag, opts)
}
if cachedObjInfo.ETag == objInfo.ETag && !isStaleCache(objInfo) {
return dcache.Get(ctx, bucket, object, startOffset, length, writer, etag)
return dcache.Get(ctx, bucket, object, startOffset, length, writer, etag, opts)
}
dcache.Delete(ctx, bucket, object)
}
if startOffset != 0 || length != objInfo.Size {
if startOffset != 0 || (length > 0 && length != objInfo.Size) {
// We don't cache partial objects.
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag)
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag, opts)
}
if !dcache.diskAvailable(objInfo.Size * cacheSizeMultiplier) {
// cache only objects < 1/100th of disk capacity
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag)
if !dcache.diskAvailable(objInfo.Size) {
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag, opts)
}
// Initialize pipe.
pipeReader, pipeWriter := io.Pipe()
hashReader, err := hash.NewReader(pipeReader, objInfo.Size, "", "")
hashReader, err := hash.NewReader(pipeReader, objInfo.Size, "", "", objInfo.Size)
if err != nil {
return err
}
go func() {
if err = GetObjectFn(ctx, bucket, object, 0, objInfo.Size, io.MultiWriter(writer, pipeWriter), etag); err != nil {
pipeWriter.CloseWithError(err)
return
}
pipeWriter.Close() // Close writer explicitly signaling we wrote all data.
gerr := GetObjectFn(ctx, bucket, object, 0, objInfo.Size, io.MultiWriter(writer, pipeWriter), etag, opts)
pipeWriter.CloseWithError(gerr) // Close writer explicitly signaling we wrote all data.
}()
err = dcache.Put(ctx, bucket, object, hashReader, c.getMetadata(objInfo))
err = dcache.Put(ctx, bucket, object, NewPutObjReader(hashReader, nil, nil), c.getMetadata(objInfo), opts)
if err != nil {
return err
}
@@ -249,17 +329,17 @@ func (c cacheObjects) GetObject(ctx context.Context, bucket, object string, star
}
// Returns ObjectInfo from cache if available.
func (c cacheObjects) GetObjectInfo(ctx context.Context, bucket, object string) (ObjectInfo, error) {
func (c cacheObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) {
getObjectInfoFn := c.GetObjectInfoFn
if c.isCacheExclude(bucket, object) {
return getObjectInfoFn(ctx, bucket, object)
return getObjectInfoFn(ctx, bucket, object, opts)
}
// fetch cacheFSObjects if object is currently cached or nearest available cache drive
dcache, err := c.cache.getCachedFSLoc(ctx, bucket, object)
if err != nil {
return getObjectInfoFn(ctx, bucket, object)
return getObjectInfoFn(ctx, bucket, object, opts)
}
objInfo, err := getObjectInfoFn(ctx, bucket, object)
objInfo, err := getObjectInfoFn(ctx, bucket, object, opts)
if err != nil {
if _, ok := err.(ObjectNotFound); ok {
// Delete the cached entry if backend object was deleted.
@@ -270,14 +350,14 @@ func (c cacheObjects) GetObjectInfo(ctx context.Context, bucket, object string)
return ObjectInfo{}, err
}
// when backend is down, serve from cache.
cachedObjInfo, cerr := dcache.GetObjectInfo(ctx, bucket, object)
cachedObjInfo, cerr := dcache.GetObjectInfo(ctx, bucket, object, opts)
if cerr == nil {
return cachedObjInfo, nil
}
return ObjectInfo{}, BackendDown{}
}
// when backend is up, do a sanity check on cached object
cachedObjInfo, err := dcache.GetObjectInfo(ctx, bucket, object)
cachedObjInfo, err := dcache.GetObjectInfo(ctx, bucket, object, opts)
if err != nil {
return objInfo, nil
}
@@ -291,40 +371,32 @@ func (c cacheObjects) GetObjectInfo(ctx context.Context, bucket, object string)
// Returns function "listDir" of the type listDirFunc.
// isLeaf - is used by listDir function to check if an entry is a leaf or non-leaf entry.
// disks - list of fsObjects
func listDirCacheFactory(isLeaf isLeafFunc, treeWalkIgnoredErrs []error, disks []*cacheFSObjects) listDirFunc {
listCacheDirs := func(bucket, prefixDir, prefixEntry string) (dirs []string, err error) {
func listDirCacheFactory(isLeaf isLeafFunc, disks []*cacheFSObjects) listDirFunc {
listCacheDirs := func(bucket, prefixDir, prefixEntry string) (dirs []string) {
var entries []string
for _, disk := range disks {
// ignore disk-caches that might be missing/offline
if disk == nil {
continue
}
fs := disk.FSObjects
entries, err = readDir(pathJoin(fs.fsPath, bucket, prefixDir))
// For any reason disk was deleted or goes offline, continue
// and list from other disks if possible.
fs := disk.FSObjects
var err error
entries, err = readDir(pathJoin(fs.fsPath, bucket, prefixDir))
if err != nil {
if IsErrIgnored(err, treeWalkIgnoredErrs...) {
continue
}
return nil, err
continue
}
// Filter entries that have the prefix prefixEntry.
entries = filterMatchingPrefix(entries, prefixEntry)
dirs = append(dirs, entries...)
}
return dirs, nil
return dirs
}
// listDir - lists all the entries at a given prefix and given entry in the prefix.
listDir := func(bucket, prefixDir, prefixEntry string) (mergedEntries []string, delayIsLeaf bool, err error) {
var cacheEntries []string
cacheEntries, err = listCacheDirs(bucket, prefixDir, prefixEntry)
if err != nil {
return nil, false, err
}
listDir := func(bucket, prefixDir, prefixEntry string) (mergedEntries []string, delayIsLeaf bool) {
cacheEntries := listCacheDirs(bucket, prefixDir, prefixEntry)
for _, entry := range cacheEntries {
// Find elements in entries which are not in mergedEntries
idx := sort.SearchStrings(mergedEntries, entry)
@@ -335,7 +407,7 @@ func listDirCacheFactory(isLeaf isLeafFunc, treeWalkIgnoredErrs []error, disks [
mergedEntries = append(mergedEntries, entry)
sort.Strings(mergedEntries)
}
return mergedEntries, false, nil
return mergedEntries, false
}
return listDir
}
@@ -371,7 +443,7 @@ func (c cacheObjects) listCacheObjects(ctx context.Context, bucket, prefix, mark
return fs.isObjectDir(bucket, object)
}
listDir := listDirCacheFactory(isLeaf, cacheTreeWalkIgnoredErrs, c.cache.cfs)
listDir := listDirCacheFactory(isLeaf, c.cache.cfs)
walkResultCh = startTreeWalk(ctx, bucket, prefix, marker, recursive, listDir, isLeaf, isLeafDir, endWalkCh)
}
@@ -572,42 +644,43 @@ func (c cacheObjects) isCacheExclude(bucket, object string) bool {
}
// PutObject - caches the uploaded object for single Put operations
func (c cacheObjects) PutObject(ctx context.Context, bucket, object string, r *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error) {
func (c cacheObjects) PutObject(ctx context.Context, bucket, object string, r *PutObjReader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
putObjectFn := c.PutObjectFn
data := r.Reader
dcache, err := c.cache.getCacheFS(ctx, bucket, object)
if err != nil {
// disk cache could not be located,execute backend call.
return putObjectFn(ctx, bucket, object, r, metadata)
return putObjectFn(ctx, bucket, object, r, metadata, opts)
}
size := r.Size()
// fetch from backend if there is no space on cache drive
if !dcache.diskAvailable(size * cacheSizeMultiplier) {
return putObjectFn(ctx, bucket, object, r, metadata)
if !dcache.diskAvailable(size) {
return putObjectFn(ctx, bucket, object, r, metadata, opts)
}
// fetch from backend if cache exclude pattern or cache-control
// directive set to exclude
if c.isCacheExclude(bucket, object) || filterFromCache(metadata) {
dcache.Delete(ctx, bucket, object)
return putObjectFn(ctx, bucket, object, r, metadata)
return putObjectFn(ctx, bucket, object, r, metadata, opts)
}
objInfo = ObjectInfo{}
// Initialize pipe to stream data to backend
pipeReader, pipeWriter := io.Pipe()
hashReader, err := hash.NewReader(pipeReader, size, r.MD5HexString(), r.SHA256HexString())
hashReader, err := hash.NewReader(pipeReader, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize())
if err != nil {
return ObjectInfo{}, err
}
// Initialize pipe to stream data to cache
rPipe, wPipe := io.Pipe()
cHashReader, err := hash.NewReader(rPipe, size, r.MD5HexString(), r.SHA256HexString())
cHashReader, err := hash.NewReader(rPipe, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize())
if err != nil {
return ObjectInfo{}, err
}
oinfoCh := make(chan ObjectInfo)
errCh := make(chan error)
go func() {
oinfo, perr := putObjectFn(ctx, bucket, object, hashReader, metadata)
oinfo, perr := putObjectFn(ctx, bucket, object, NewPutObjReader(hashReader, nil, nil), metadata, opts)
if perr != nil {
pipeWriter.CloseWithError(perr)
wPipe.CloseWithError(perr)
@@ -620,14 +693,14 @@ func (c cacheObjects) PutObject(ctx context.Context, bucket, object string, r *h
}()
go func() {
if err = dcache.Put(ctx, bucket, object, cHashReader, metadata); err != nil {
if err = dcache.Put(ctx, bucket, object, NewPutObjReader(cHashReader, nil, nil), metadata, opts); err != nil {
wPipe.CloseWithError(err)
return
}
}()
mwriter := io.MultiWriter(pipeWriter, wPipe)
_, err = io.Copy(mwriter, r)
_, err = io.Copy(mwriter, data)
if err != nil {
err = <-errCh
return objInfo, err
@@ -639,68 +712,69 @@ func (c cacheObjects) PutObject(ctx context.Context, bucket, object string, r *h
}
// NewMultipartUpload - Starts a new multipart upload operation to backend and cache.
func (c cacheObjects) NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string) (uploadID string, err error) {
func (c cacheObjects) NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error) {
newMultipartUploadFn := c.NewMultipartUploadFn
if c.isCacheExclude(bucket, object) || filterFromCache(metadata) {
return newMultipartUploadFn(ctx, bucket, object, metadata)
return newMultipartUploadFn(ctx, bucket, object, metadata, opts)
}
dcache, err := c.cache.getCacheFS(ctx, bucket, object)
if err != nil {
// disk cache could not be located,execute backend call.
return newMultipartUploadFn(ctx, bucket, object, metadata)
return newMultipartUploadFn(ctx, bucket, object, metadata, opts)
}
uploadID, err = newMultipartUploadFn(ctx, bucket, object, metadata)
uploadID, err = newMultipartUploadFn(ctx, bucket, object, metadata, opts)
if err != nil {
return
}
// create new multipart upload in cache with same uploadID
dcache.NewMultipartUpload(ctx, bucket, object, metadata, uploadID)
dcache.NewMultipartUpload(ctx, bucket, object, metadata, uploadID, opts)
return uploadID, err
}
// PutObjectPart - uploads part to backend and cache simultaneously.
func (c cacheObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (info PartInfo, err error) {
func (c cacheObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *PutObjReader, opts ObjectOptions) (info PartInfo, err error) {
data := r.Reader
putObjectPartFn := c.PutObjectPartFn
dcache, err := c.cache.getCacheFS(ctx, bucket, object)
if err != nil {
// disk cache could not be located,execute backend call.
return putObjectPartFn(ctx, bucket, object, uploadID, partID, data)
return putObjectPartFn(ctx, bucket, object, uploadID, partID, r, opts)
}
if c.isCacheExclude(bucket, object) {
return putObjectPartFn(ctx, bucket, object, uploadID, partID, data)
return putObjectPartFn(ctx, bucket, object, uploadID, partID, r, opts)
}
// make sure cache has at least cacheSizeMultiplier * size available
// make sure cache has at least size space available
size := data.Size()
if !dcache.diskAvailable(size * cacheSizeMultiplier) {
if !dcache.diskAvailable(size) {
select {
case dcache.purgeChan <- struct{}{}:
default:
}
return putObjectPartFn(ctx, bucket, object, uploadID, partID, data)
return putObjectPartFn(ctx, bucket, object, uploadID, partID, r, opts)
}
info = PartInfo{}
// Initialize pipe to stream data to backend
pipeReader, pipeWriter := io.Pipe()
hashReader, err := hash.NewReader(pipeReader, size, data.MD5HexString(), data.SHA256HexString())
hashReader, err := hash.NewReader(pipeReader, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize())
if err != nil {
return
}
// Initialize pipe to stream data to cache
rPipe, wPipe := io.Pipe()
cHashReader, err := hash.NewReader(rPipe, size, data.MD5HexString(), data.SHA256HexString())
cHashReader, err := hash.NewReader(rPipe, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize())
if err != nil {
return
}
pinfoCh := make(chan PartInfo)
errorCh := make(chan error)
go func() {
info, err = putObjectPartFn(ctx, bucket, object, uploadID, partID, hashReader)
info, err = putObjectPartFn(ctx, bucket, object, uploadID, partID, NewPutObjReader(hashReader, nil, nil), opts)
if err != nil {
close(pinfoCh)
pipeWriter.CloseWithError(err)
@@ -712,7 +786,7 @@ func (c cacheObjects) PutObjectPart(ctx context.Context, bucket, object, uploadI
pinfoCh <- info
}()
go func() {
if _, perr := dcache.PutObjectPart(ctx, bucket, object, uploadID, partID, cHashReader); perr != nil {
if _, perr := dcache.PutObjectPart(ctx, bucket, object, uploadID, partID, NewPutObjReader(cHashReader, nil, nil), opts); perr != nil {
wPipe.CloseWithError(perr)
return
}
@@ -754,25 +828,25 @@ func (c cacheObjects) AbortMultipartUpload(ctx context.Context, bucket, object,
}
// CompleteMultipartUpload - completes multipart upload operation on backend and cache.
func (c cacheObjects) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error) {
func (c cacheObjects) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) {
completeMultipartUploadFn := c.CompleteMultipartUploadFn
if c.isCacheExclude(bucket, object) {
return completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts)
return completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts, opts)
}
dcache, err := c.cache.getCacheFS(ctx, bucket, object)
if err != nil {
// disk cache could not be located,execute backend call.
return completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts)
return completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts, opts)
}
// perform backend operation
objInfo, err = completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts)
objInfo, err = completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts, opts)
if err != nil {
return
}
// create new multipart upload in cache with same uploadID
dcache.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts)
dcache.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts)
return
}
@@ -888,14 +962,17 @@ func newServerCacheObjects(config CacheConfig) (CacheObjectLayer, error) {
cache: dcache,
exclude: config.Exclude,
listPool: newTreeWalkPool(globalLookupTimeout),
GetObjectFn: func(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) error {
return newObjectLayerFn().GetObject(ctx, bucket, object, startOffset, length, writer, etag)
GetObjectFn: func(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error {
return newObjectLayerFn().GetObject(ctx, bucket, object, startOffset, length, writer, etag, opts)
},
GetObjectInfoFn: func(ctx context.Context, bucket, object string) (ObjectInfo, error) {
return newObjectLayerFn().GetObjectInfo(ctx, bucket, object)
GetObjectInfoFn: func(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) {
return newObjectLayerFn().GetObjectInfo(ctx, bucket, object, opts)
},
PutObjectFn: func(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error) {
return newObjectLayerFn().PutObject(ctx, bucket, object, data, metadata)
GetObjectNInfoFn: func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
return newObjectLayerFn().GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts)
},
PutObjectFn: func(ctx context.Context, bucket, object string, data *PutObjReader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
return newObjectLayerFn().PutObject(ctx, bucket, object, data, metadata, opts)
},
DeleteObjectFn: func(ctx context.Context, bucket, object string) error {
return newObjectLayerFn().DeleteObject(ctx, bucket, object)
@@ -912,17 +989,17 @@ func newServerCacheObjects(config CacheConfig) (CacheObjectLayer, error) {
GetBucketInfoFn: func(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) {
return newObjectLayerFn().GetBucketInfo(ctx, bucket)
},
NewMultipartUploadFn: func(ctx context.Context, bucket, object string, metadata map[string]string) (uploadID string, err error) {
return newObjectLayerFn().NewMultipartUpload(ctx, bucket, object, metadata)
NewMultipartUploadFn: func(ctx context.Context, bucket, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error) {
return newObjectLayerFn().NewMultipartUpload(ctx, bucket, object, metadata, opts)
},
PutObjectPartFn: func(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (info PartInfo, err error) {
return newObjectLayerFn().PutObjectPart(ctx, bucket, object, uploadID, partID, data)
PutObjectPartFn: func(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error) {
return newObjectLayerFn().PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts)
},
AbortMultipartUploadFn: func(ctx context.Context, bucket, object, uploadID string) error {
return newObjectLayerFn().AbortMultipartUpload(ctx, bucket, object, uploadID)
},
CompleteMultipartUploadFn: func(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error) {
return newObjectLayerFn().CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts)
CompleteMultipartUploadFn: func(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) {
return newObjectLayerFn().CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts)
},
DeleteBucketFn: func(ctx context.Context, bucket string) error {
return newObjectLayerFn().DeleteBucket(ctx, bucket)

View File

@@ -19,7 +19,6 @@ package cmd
import (
"bytes"
"context"
"os"
"reflect"
"testing"
"time"
@@ -28,21 +27,15 @@ import (
)
// Initialize cache FS objects.
func initCacheFSObjects(disk string, cacheMaxUse int, t *testing.T) (*cacheFSObjects, error) {
newTestConfig(globalMinioDefaultRegion)
var err error
obj, err := newCacheFSObjects(disk, globalCacheExpiry, cacheMaxUse)
if err != nil {
t.Fatal(err)
}
return obj, nil
func initCacheFSObjects(disk string, cacheMaxUse int) (*cacheFSObjects, error) {
return newCacheFSObjects(disk, globalCacheExpiry, cacheMaxUse)
}
// inits diskCache struct for nDisks
func initDiskCaches(drives []string, cacheMaxUse int, t *testing.T) (*diskCache, error) {
var cfs []*cacheFSObjects
for _, d := range drives {
obj, err := initCacheFSObjects(d, cacheMaxUse, t)
obj, err := initCacheFSObjects(d, cacheMaxUse)
if err != nil {
return nil, err
}
@@ -131,11 +124,6 @@ func TestGetCacheFSMaxUse(t *testing.T) {
// test wildcard patterns for excluding entries from cache
func TestCacheExclusion(t *testing.T) {
rootPath, err := newTestConfig(globalMinioDefaultRegion)
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(rootPath)
fsDirs, err := getRandomDisks(1)
if err != nil {
t.Fatal(err)
@@ -146,7 +134,7 @@ func TestCacheExclusion(t *testing.T) {
t.Fatal(err)
}
cobj := cobjects.(*cacheObjects)
globalServiceDoneCh <- struct{}{}
GlobalServiceDoneCh <- struct{}{}
testCases := []struct {
bucketName string
objectName string
@@ -204,17 +192,18 @@ func TestDiskCache(t *testing.T) {
objInfo.ContentType = contentType
objInfo.ETag = etag
objInfo.UserDefined = httpMeta
opts := ObjectOptions{}
byteReader := bytes.NewReader([]byte(content))
hashReader, err := hash.NewReader(byteReader, int64(size), "", "")
hashReader, err := hash.NewReader(byteReader, int64(size), "", "", int64(size))
if err != nil {
t.Fatal(err)
}
err = cache.Put(ctx, bucketName, objectName, hashReader, httpMeta)
err = cache.Put(ctx, bucketName, objectName, NewPutObjReader(hashReader, nil, nil), httpMeta, opts)
if err != nil {
t.Fatal(err)
}
cachedObjInfo, err := cache.GetObjectInfo(ctx, bucketName, objectName)
cachedObjInfo, err := cache.GetObjectInfo(ctx, bucketName, objectName, opts)
if err != nil {
t.Fatal(err)
}
@@ -231,7 +220,7 @@ func TestDiskCache(t *testing.T) {
t.Fatal("Cached content-type does not match")
}
writer := bytes.NewBuffer(nil)
err = cache.Get(ctx, bucketName, objectName, 0, int64(size), writer, "")
err = cache.Get(ctx, bucketName, objectName, 0, int64(size), writer, "", opts)
if err != nil {
t.Fatal(err)
}
@@ -278,23 +267,24 @@ func TestDiskCacheMaxUse(t *testing.T) {
objInfo.ContentType = contentType
objInfo.ETag = etag
objInfo.UserDefined = httpMeta
opts := ObjectOptions{}
byteReader := bytes.NewReader([]byte(content))
hashReader, err := hash.NewReader(byteReader, int64(size), "", "")
hashReader, err := hash.NewReader(byteReader, int64(size), "", "", int64(size))
if err != nil {
t.Fatal(err)
}
if !cache.diskAvailable(int64(size)) {
err = cache.Put(ctx, bucketName, objectName, hashReader, httpMeta)
err = cache.Put(ctx, bucketName, objectName, NewPutObjReader(hashReader, nil, nil), httpMeta, opts)
if err != errDiskFull {
t.Fatal("Cache max-use limit violated.")
}
} else {
err = cache.Put(ctx, bucketName, objectName, hashReader, httpMeta)
err = cache.Put(ctx, bucketName, objectName, NewPutObjReader(hashReader, nil, nil), httpMeta, opts)
if err != nil {
t.Fatal(err)
}
cachedObjInfo, err := cache.GetObjectInfo(ctx, bucketName, objectName)
cachedObjInfo, err := cache.GetObjectInfo(ctx, bucketName, objectName, opts)
if err != nil {
t.Fatal(err)
}
@@ -311,7 +301,7 @@ func TestDiskCacheMaxUse(t *testing.T) {
t.Fatal("Cached content-type does not match")
}
writer := bytes.NewBuffer(nil)
err = cache.Get(ctx, bucketName, objectName, 0, int64(size), writer, "")
err = cache.Get(ctx, bucketName, objectName, 0, int64(size), writer, "", opts)
if err != nil {
t.Fatal(err)
}

View File

@@ -0,0 +1,182 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"testing"
)
var alphabets = []byte("abcdefghijklmnopqrstuvwxyz0123456789")
// DummyDataGen returns a reader that repeats the bytes in `alphabets`
// upto the desired length.
type DummyDataGen struct {
b []byte
idx, length int64
}
// NewDummyDataGen returns a ReadSeeker over the first `totalLength`
// bytes from the infinite stream consisting of repeated
// concatenations of `alphabets`.
//
// The skipOffset (generally = 0) can be used to skip a given number
// of bytes from the beginning of the infinite stream. This is useful
// to compare such streams of bytes that may be split up, because:
//
// Given the function:
//
// f := func(r io.Reader) string {
// b, _ := ioutil.ReadAll(r)
// return string(b)
// }
//
// for example, the following is true:
//
// f(NewDummyDataGen(100, 0)) == f(NewDummyDataGen(50, 0)) + f(NewDummyDataGen(50, 50))
func NewDummyDataGen(totalLength, skipOffset int64) io.ReadSeeker {
if totalLength < 0 {
panic("Negative length passed to DummyDataGen!")
}
if skipOffset < 0 {
panic("Negative rotations are not allowed")
}
skipOffset = skipOffset % int64(len(alphabets))
as := make([]byte, 2*len(alphabets))
copy(as, alphabets)
copy(as[len(alphabets):], alphabets)
b := as[skipOffset : skipOffset+int64(len(alphabets))]
return &DummyDataGen{
length: totalLength,
b: b,
}
}
func (d *DummyDataGen) Read(b []byte) (n int, err error) {
k := len(b)
numLetters := int64(len(d.b))
for k > 0 && d.idx < d.length {
w := copy(b[len(b)-k:], d.b[d.idx%numLetters:])
k -= w
d.idx += int64(w)
n += w
}
if d.idx >= d.length {
extraBytes := d.idx - d.length
n -= int(extraBytes)
if n < 0 {
n = 0
}
err = io.EOF
}
return
}
func (d *DummyDataGen) Seek(offset int64, whence int) (int64, error) {
switch whence {
case io.SeekStart:
if offset < 0 {
return 0, errors.New("Invalid offset")
}
d.idx = offset
case io.SeekCurrent:
if d.idx+offset < 0 {
return 0, errors.New("Invalid offset")
}
d.idx += offset
case io.SeekEnd:
if d.length+offset < 0 {
return 0, errors.New("Invalid offset")
}
d.idx = d.length + offset
}
return d.idx, nil
}
func TestDummyDataGenerator(t *testing.T) {
readAll := func(r io.Reader) string {
b, _ := ioutil.ReadAll(r)
return string(b)
}
checkEq := func(a, b string) {
if a != b {
t.Fatalf("Unexpected equality failure")
}
}
checkEq(readAll(NewDummyDataGen(0, 0)), "")
checkEq(readAll(NewDummyDataGen(10, 0)), readAll(NewDummyDataGen(10, int64(len(alphabets)))))
checkEq(readAll(NewDummyDataGen(100, 0)), readAll(NewDummyDataGen(50, 0))+readAll(NewDummyDataGen(50, 50)))
r := NewDummyDataGen(100, 0)
r.Seek(int64(len(alphabets)), 0)
checkEq(readAll(r), readAll(NewDummyDataGen(100-int64(len(alphabets)), 0)))
}
// Compares all the bytes returned by the given readers. Any Read
// errors cause a `false` result. A string describing the error is
// also returned.
func cmpReaders(r1, r2 io.Reader) (bool, string) {
bufLen := 32 * 1024
b1, b2 := make([]byte, bufLen), make([]byte, bufLen)
for i := 0; true; i++ {
n1, e1 := io.ReadFull(r1, b1)
n2, e2 := io.ReadFull(r2, b2)
if n1 != n2 {
return false, fmt.Sprintf("Read %d != %d bytes from the readers", n1, n2)
}
if !bytes.Equal(b1[:n1], b2[:n2]) {
return false, fmt.Sprintf("After reading %d equal buffers (32Kib each), we got the following two strings:\n%v\n%v\n",
i, b1, b2)
}
// Check if stream has ended
if (e1 == io.ErrUnexpectedEOF && e2 == io.ErrUnexpectedEOF) || (e1 == io.EOF && e2 == io.EOF) {
break
}
if e1 != nil || e2 != nil {
return false, fmt.Sprintf("Got unexpected error values: %v == %v", e1, e2)
}
}
return true, ""
}
func TestCmpReaders(t *testing.T) {
{
r1 := bytes.NewBuffer([]byte("abc"))
r2 := bytes.NewBuffer([]byte("abc"))
ok, msg := cmpReaders(r1, r2)
if !(ok && msg == "") {
t.Fatalf("unexpected")
}
}
{
r1 := bytes.NewBuffer([]byte("abc"))
r2 := bytes.NewBuffer([]byte("abcd"))
ok, _ := cmpReaders(r1, r2)
if ok {
t.Fatalf("unexpected")
}
}
}

Some files were not shown because too many files have changed in this diff Show More