Compare commits

...

276 Commits

Author SHA1 Message Date
Cesar N
20960b6a2d Update console to v1.6.0 (#19933) 2024-06-13 15:53:53 -07:00
Shubhendu
3bd3470d0b Corrected names of node replication metrics (#19932)
Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2024-06-13 15:26:54 -07:00
Harshavardhana
ba39ed9af7 loadUser() if not able to load() credential return error (#19931) 2024-06-13 15:26:38 -07:00
jiuker
62e6dc950d fix: do not update metadata cache upon headObject() (#19929) 2024-06-13 08:42:02 -07:00
Harshavardhana
5a5046ce45 upgrade all deps and credits (#19930)
Signed-off-by: Harshavardhana <harsha@minio.io>
2024-06-13 08:34:20 -07:00
Klaus Post
ad04afe381 Fix SSEC multipart checksum replication (#19915)
* Multipart SSEC checksums were not transferred.
* Remove key mismatch logging. This key is user-controlled with SSEC.
* If the source is SSEC and the destination reports ErrSSEEncryptedObject, 
  assume replication is good.
2024-06-12 23:56:12 -07:00
Harshavardhana
ba9f0f2480 fix: attempt to fix CI/CD upgrade tests with docker-compose (#19926) 2024-06-12 22:08:11 -07:00
Harshavardhana
d06b63d056 load credential for in-flights requests as singleflight (#19920)
avoid concurrent callers for LoadUser() to even initiate
object read() requests, if an on-going operation is in progress.

this avoids many callers hitting the drives causing I/O
spikes, also allows for loading credentials faster.
2024-06-12 13:47:56 -07:00
Andreas Auernhammer
7ce28c3b1d kms: use GetClientCertificate callback for KES API keys (#19921)
This commit fixes an issue in the KES client configuration
that can cause the following error when connecting to KES:
```
ERROR Failed to connect to KMS: failed to generate data key with KMS key: tls: client certificate is required
```

The Go TLS stack seems to not send a client certificate if it
thinks the client certificate cannot be validated by the peer.
In case of an API key, we don't care about this since we use
public key pinning and the X.509 certificate is just a transport
encoding.

The `GetClientCertificate` seems to be honored always such that
this error does not occur.

Signed-off-by: Andreas Auernhammer <github@aead.dev>
2024-06-12 07:31:26 -07:00
Harshavardhana
e3ac4035b9 decrement requests inqueue correctly after the request is processed (#19918) 2024-06-12 01:13:12 -07:00
Harshavardhana
d21b6daa49 fix: avoid crash when delete() returns an error in batch expiration (#19909) 2024-06-11 06:50:53 -07:00
Minio Trusted
76ebb16688 Update yaml files to latest version RELEASE.2024-06-11T03-13-30Z 2024-06-11 06:11:10 +00:00
Harshavardhana
55aa431578 fix: on windows avoid ':' as part of the object name (#19907)
fixes #18865
avoid-colon
2024-06-10 20:13:30 -07:00
Harshavardhana
614981e566 allow purge expired STS while loading credentials (#19905)
the reason for this is to avoid STS mappings to be
purged without a successful load of other policies,
and all the credentials only loaded successfully
are properly handled.

This also avoids unnecessary cache store which was
implemented earlier for optimization.
2024-06-10 11:45:50 -07:00
Harshavardhana
b8b956a05d add changes to Makefile to support dev build 2024-06-10 10:41:02 -07:00
Klaus Post
d2eed44c78 Fix replication checksum transfer (#19906)
Compression will be disabled by default if SSE-C is specified. So we can still honor SSE-C.
2024-06-10 10:40:33 -07:00
Anis Eleuch
789cbc6fb2 heal: Dangling check to evaluate object parts separately (#19797) 2024-06-10 08:51:27 -07:00
jiuker
0662c90b5c fix: copyObject restore with a specific version, update test cases (#19895) 2024-06-10 08:50:49 -07:00
Klaus Post
a2cab02554 Fix SSE-C checksums (#19896)
Compression will be disabled by default if SSE-C is specified. So we can still honor SSE-C.
2024-06-10 08:31:51 -07:00
Harshavardhana
6c7a21df6b turn-off unexpected debug logging in List() calls (#19903) 2024-06-09 21:34:26 -07:00
Ali Afsharzadeh
f933b0b708 Upgrade setup-helm action from v3 to v4 (#19897) 2024-06-09 02:13:09 -07:00
Shubhendu
9f305273a7 Added tests for replication of checksum headers (#19879)
Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2024-06-08 09:24:15 -07:00
Cesar N
cbd9efcb43 Update console to v1.5.0 (#19899) 2024-06-08 01:12:00 -07:00
Harshavardhana
29a25a538f fix: make sure we list freeVersions like DEL marker with --versions (#19878)
freeVersions() was being incorrectly skipped; list it as
valid objects properly.

Co-authored-by: Krishnan Parthasarathi <Krishnan Parthasarathi>
2024-06-07 15:18:44 -07:00
Harshavardhana
2dd8faaedc remove unnecessary log in Listing() 2024-06-07 14:52:55 -07:00
Klaus Post
f00187033d Two way streams for upcoming locking enhancements (#19796) 2024-06-07 08:51:52 -07:00
Aditya Manthramurthy
c5141d65ac Update docker build script to pull all changes (#19892) 2024-06-07 08:43:38 -07:00
Krishnan Parthasarathi
069c4015cd Don't tier directory objects (#19891)
Directory objects are used by applications that simulate the folder
structure of an on-disk filesystem. These are zero-byte objects with names
ending with '/'. They are only used to check whether a 'folder' exists in
the namespace.
2024-06-07 08:43:17 -07:00
Shubhendu
2f6e03fb60 Calculate correct object size while replication (#19888)
It was missing in case of `replicateObject` but was present for
`replicateAll` already

Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2024-06-06 12:31:01 -07:00
Klaus Post
0fbb945e13 Disable caching of encrypted objects (#19890)
Don't write encrypted objects to cache, if configured.
2024-06-06 11:39:18 -07:00
Anis Eleuch
b94dd835c9 decom: Fix CurrentSize output when generating the status (#19883)
StartSize starts with the raw free space of all disks in the given pool,
however during the status, CurrentSize is not showing the current free
raw space, as expected at least by `mc admin decom status` since it was
written.
2024-06-06 07:30:43 -07:00
Minio Trusted
44fc707423 Update yaml files to latest version RELEASE.2024-06-06T09-36-42Z 2024-06-06 13:36:05 +00:00
Poorna
5aaef9790f replication: pass checksum headers to replica (#19834) 2024-06-06 02:36:42 -07:00
Bala FA
7edc352d23 Add ILM metrics in metrics-v3 (#19539)
Signed-off-by: Bala.FA <bala@minio.io>
2024-06-06 02:36:25 -07:00
Poorna
850a84b08a simplify site replication multipart proxying (#19885) 2024-06-05 18:01:15 -07:00
Taran Pelkey
4148754ce0 Check both given and normalized group DN on LDAP policy detach requests (#19876) 2024-06-05 15:42:40 -07:00
Harshavardhana
2107722829 upgrade go-oidc to fix GO-2024-2631 (#19884) 2024-06-05 15:00:34 -07:00
jiuker
d326ba52e9 feat: support batchJob for windows (#19877) 2024-06-05 08:44:53 -07:00
Sveinn
91e1487de4 Add LDAP public key authentication to SFTP (#19833) 2024-06-05 00:51:13 -07:00
Minio Trusted
5ffb2a9605 Update yaml files to latest version RELEASE.2024-06-04T19-20-08Z 2024-06-04 22:25:53 +00:00
Harshavardhana
17fe91d6d1 chore: update all deps (#19875) 2024-06-04 12:20:08 -07:00
jiuker
90a9f2dd70 fix: log diskerror when detect the disk space failed (#19861) 2024-06-04 09:42:03 -07:00
Harshavardhana
d5e48cfd65 fix: remove DriveOPTimeout for REST callers as they don't work properly (#19873)
Go's net/http is notoriously difficult to have a streaming
deadlines per READ/WRITE on the net.Conn if we add them they
interfere with the Go's internal requirements for a HTTP
connection.

Remove this support for now

fixes #19853
2024-06-04 08:12:57 -07:00
Anis Eleuch
d274566463 race: Fix rare race detected by testing (#19872)
Below is the race warning:

```
WARNING: DATA RACE
Write at 0x00c02d3d27c0 by goroutine 1210:
  github.com/minio/minio/cmd.(*healingTracker).bucketDone()
      github.com/minio/minio/cmd/background-newdisks-heal-ops.go:273 +0x13a
  github.com/minio/minio/cmd.(*erasureObjects).healErasureSet()
      github.com/minio/minio/cmd/global-heal.go:525 +0x2158
  github.com/minio/minio/cmd.healFreshDisk()
      github.com/minio/minio/cmd/background-newdisks-heal-ops.go:450 +0x107e
  github.com/minio/minio/cmd.monitorLocalDisksAndHeal.func1()
      github.com/minio/minio/cmd/background-newdisks-heal-ops.go:528 +0x150
  github.com/minio/minio/cmd.monitorLocalDisksAndHeal.gowrap2()
      github.com/minio/minio/cmd/background-newdisks-heal-ops.go:538 +0x82

Previous read at 0x00c02d3d27c0 by goroutine 1446:
  github.com/minio/minio/cmd.(*erasureObjects).healErasureSet.func5()
      github.com/minio/minio/cmd/global-heal.go:232 +0xfd
```
2024-06-04 08:12:32 -07:00
Shubhendu
39ac720826 Remove hardcoded override as not needed (#19868)
Fixes: https://github.com/minio/minio/issues/19867

Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2024-06-04 06:24:37 -07:00
Shubhendu
21b6204692 Test proxying of DEL marker for bucket replication (#19870)
Make sure to avoid proxying for DEL markers

Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2024-06-04 04:38:26 -07:00
Taran Pelkey
d98faeb26a Check if LDAP User has attached policy before creating Service Account (#19843)
Check if ldap user has policy before creating
2024-06-03 12:58:48 -07:00
Klaus Post
0a63dc199c Add trace sizes to more trace types (#19864)
Add trace sizes to

* ILM traces
* Replication traces
* Healing traces
* Decommission traces
* Rebalance traces
* (s)ftp traces
* http traces.
2024-06-03 08:45:54 -07:00
Anis Eleuch
3ba857dfa1 race: Fix detected test race in the internal audit code (#19865) 2024-06-03 08:44:50 -07:00
Klaus Post
a8554c4022 Update madmin (#19862)
Make it include https://github.com/minio/madmin-go/pull/285
2024-06-03 05:00:14 -07:00
Harshavardhana
ba54b39c02 fix: crash when audit webhook queue_dir is not writable (#19854)
This is regression introduced in #19275 refactor
2024-06-01 20:03:39 -07:00
Anis Eleuch
2a75225569 kafka: _MINIO_KAFKA_DEBUG to enable sarama debug messages (#19849) 2024-06-01 08:02:59 -07:00
Klaus Post
e72429c79c Add sizes to traces (#19851)
added to storage and grid traces. Can provide more context for traces that aren't HTTP. Others may apply.
2024-05-31 22:17:37 -07:00
Klaus Post
c5b3f5553f Add per connection RPC metrics (#19852)
Provides individual and aggregate stats for each RPC connection.

Example:

```
  "rpc": {
   "collectedAt": "2024-05-31T14:33:29.1373103+02:00",
   "connected": 30,
   "disconnected": 0,
   "outgoingStreams": 69,
   "incomingStreams": 0,
   "outgoingBytes": 174822796,
   "incomingBytes": 175821566,
   "outgoingMessages": 768595,
   "incomingMessages": 768589,
   "outQueue": 0,
   "lastPongTime": "2024-05-31T12:33:28Z",
   "byDestination": {
    "http://127.0.0.1:9001": {
     "collectedAt": "2024-05-31T14:33:29.1373103+02:00",
     "connected": 5,
     "disconnected": 0,
     "outgoingStreams": 2,
     "incomingStreams": 0,
     "outgoingBytes": 38432543,
     "incomingBytes": 66604052,
     "outgoingMessages": 229496,
     "incomingMessages": 229575,
     "outQueue": 0,
     "lastPongTime": "2024-05-31T12:33:27Z"
    },
    "http://127.0.0.1:9002": {
     "collectedAt": "2024-05-31T14:33:29.1373103+02:00",
     "connected": 5,
     "disconnected": 0,
     "outgoingStreams": 6,
     "incomingStreams": 0,
     "outgoingBytes": 38215680,
     "incomingBytes": 66121283,
     "outgoingMessages": 228525,
     "incomingMessages": 228510,
     "outQueue": 0,
     "lastPongTime": "2024-05-31T12:33:27Z"
    },
...
```
2024-05-31 22:16:24 -07:00
Klaus Post
d3ae0aaad3 Add max buffering to SFTP (#19848)
Prevent OOM by adversarial use of SFTP upload by setting a 100MB max upload buffer.
2024-05-31 14:28:07 -07:00
Klaus Post
d67bccf861 Add xl-meta partial shard reconstruction (#19841)
Add partial shard reconstruction

* Add partial shard reconstruction
* Fix padding causing the last shard to be rejected
* Add md5 checks on single parts
* Move md5 verified to `verified/filename.ext`
* Move complete (without md5) to `complete/filename.ext.partno`

It's not pretty, but at least now the md5 gives some confidence it works correctly.
2024-05-31 07:49:23 -07:00
Anis Eleuch
1277ad69a6 heal: Remove .healing.bin when all ES drives are healing (#19846)
In the very rare case when all drives in a erasure set need to be healed,
remove .healing.bin from all drives, otherwise it will be stuck in a
loop

Also, fix a unit test that fails sometimes due to wrong test.
2024-05-31 07:48:50 -07:00
Harshavardhana
8f93e81afb change service account embedded policy size limit (#19840)
Bonus: trim-off all the unnecessary spaces to allow
for real 2048 characters in policies for STS handlers
and re-use the code in all STS handlers.
2024-05-30 11:10:41 -07:00
Harshavardhana
4af31e654b avoid pre-populating buffers for deployments < 32GiB memory (#19839) 2024-05-30 04:58:12 -07:00
Harshavardhana
aad50579ba fix: wire up ILM sub-system properly for help (#19836) 2024-05-30 01:14:58 -07:00
Harshavardhana
38d059b0ae fix: single node multi-drive must register local drives properly (#19832)
since #19688 there was a regression introduced during drive
lookups for single node multi-drive setups, drive replacement
would not work correctly without this PR.
2024-05-29 13:12:44 -07:00
Klaus Post
bd4eeb4522 Fix flipped EcM, EcN in metadata header (#19831)
Since this is a tuple encoded field we can just flip the struct members.
2024-05-29 12:14:09 -07:00
jiuker
03e3493288 fix: correct parse the tagging error for PostPolicyBucketHandler (#19825) 2024-05-29 11:50:46 -07:00
Harshavardhana
64baedf5a4 fix: hide prefixes for Hadoop properly (#19821) 2024-05-28 15:53:15 -07:00
Minio Trusted
2f64d5f77e Update yaml files to latest version RELEASE.2024-05-28T17-19-04Z 2024-05-28 19:23:04 +00:00
Anis Eleuch
f79a4ef4d0 policy: More defensive code validating svc:DurationSeconds (#19820)
This does not fix any current issue, but merging https://github.com/minio/madmin-go/pull/282
can lose the validation of the service account expiration time.

Add more defensive code for now. In the future, we should avoid doing
validation in another library.
2024-05-28 10:19:04 -07:00
Taran Pelkey
2d53854b19 Restrict access keys for users and groups to not allow '=' or ',' (#19749)
* initial commit

* Add UTF check

---------

Co-authored-by: Harshavardhana <harsha@minio.io>
2024-05-28 10:14:16 -07:00
Harshavardhana
e5c83535af chore: upgrade deps (#19819)
Signed-off-by: Harshavardhana <harsha@minio.io>
2024-05-28 02:27:44 -07:00
jiuker
c904ef966e feat: support tags for PostPolicy upload (#19816) 2024-05-27 21:44:00 -07:00
Minio Trusted
8f266e0772 Update yaml files to latest version RELEASE.2024-05-27T19-17-46Z 2024-05-27 23:52:43 +00:00
Harshavardhana
e0fe7cc391 fix: information disclosure bug in preconditions GET (#19810)
precondition check was being honored before, validating
if anonymous access is allowed on the metadata of an
object, leading to metadata disclosure of the following
headers.

```
Last-Modified
Etag
x-amz-version-id
Expires:
Cache-Control:
```

although the information presented is minimal in nature,
and of opaque nature. It still simply discloses that an
object by a specific name exists or not without even having
enough permissions.
2024-05-27 12:17:46 -07:00
Harshavardhana
9d20dec56a Revert "remove dataErrs from er.deleteIfDangling code"
This reverts commit 7d75b1e758.

This fails multipart tests we need this code to handle
existing challenges, so wait for the comprehensive fix.
2024-05-26 11:13:29 -07:00
Harshavardhana
597a785253 fix: authenticate LDAP via actual DN instead of normalized DN (#19805)
fix: authenticate LDAP via actual DN instead of normalized DN

Normalized DN is only for internal representation, not for
external communication, any communication to LDAP must be
based on actual user DN. LDAP servers do not understand
normalized DN.

fixes #19757
2024-05-25 06:43:06 -07:00
Harshavardhana
7d75b1e758 remove dataErrs from er.deleteIfDangling code
avoid this until a comprehensive change is
merged such as https://github.com/minio/minio/pull/19797
2024-05-24 18:20:04 -07:00
Aditya Manthramurthy
5f78691fcf ldap: Add user DN attributes list config param (#19758)
This change uses the updated ldap library in minio/pkg (bumped
up to v3). A new config parameter is added for LDAP configuration to
specify extra user attributes to load from the LDAP server and to store
them as additional claims for the user.

A test is added in sts_handlers.go that shows how to access the LDAP
attributes as a claim.

This is in preparation for adding SSH pubkey authentication to MinIO's SFTP
integration.
2024-05-24 16:05:23 -07:00
Shireesh Anjal
a591e06ae5 Add cluster scanner metrics in metrics-v3 (#19517)
endpoint: /minio/metrics/v3/cluster/scanner
metrics:
 - bucket_scans_finished (counter)
 - bucket_scans_started (counter)
 - directories_scanned (counter)
 - last_activity_nano_seconds (gauge)
 - objects_scanned (counter)
 - versions_scanned (counter)
2024-05-24 12:29:25 -07:00
Harshavardhana
443c93c634 compute time spent in ILM properly (#19806) 2024-05-24 12:28:51 -07:00
Shireesh Anjal
5659cddc84 Add cluster config metrics in metrics-v3 (#19507)
endpoint: /minio/metrics/v3/cluster/config
metrics:
- write_quorum
- rrs_parity
- standard_parity
2024-05-24 05:50:46 -07:00
Shireesh Anjal
2a03a34bde Upgrade madmin-go to v3.0.52 (#19798)
This will ensure that content of /proc/cmdline from each server is
captured in the health report.
2024-05-24 05:34:57 -07:00
Shubhendu
1654a9b7e6 Use point in time values for gauge metrics in graphs (#19690)
Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2024-05-24 04:11:51 -07:00
Shireesh Anjal
673a521711 Change endpoint of v3 notification metrics (#19804)
from /cluster/notification to /notification
2024-05-24 04:10:24 -07:00
Harshavardhana
2e23076688 move windows runners to in-house (#19800)
GitHub CI runners for windows have gotten very slow,
moving them to our own hosted runners
2024-05-23 15:29:33 -07:00
Klaus Post
b92ac55250 Add multipart combination to xl-meta (#19780)
Add combination of multiple parts.

Parts will be reconstructed and saved separately and can manually be combined to the complete object.

Parts will be named `(version_id)-(filename).(partnum).(in)complete`.
2024-05-23 09:37:31 -07:00
Shireesh Anjal
7981509cc8 Add cluster and bucket replication metrics in metrics-v3 (#19546)
endpoint: /minio/metrics/v3/cluster/replication
metrics:
- average_active_workers
- average_queued_bytes
- average_queued_count
- average_transfer_rate
- current_active_workers
- current_transfer_rate
- last_minute_queued_bytes
- last_minute_queued_count
- max_active_workers
- max_queued_bytes
- max_queued_count
- max_transfer_rate
- recent_backlog_count

endpoint: /minio/metrics/v3/api/bucket/replication
metrics:
- last_hour_failed_bytes
- last_hour_failed_count
- last_minute_failed_bytes
- last_minute_failed_count
- latency_ms
- proxied_delete_tagging_requests_total
- proxied_get_requests_failures
- proxied_get_requests_total
- proxied_get_tagging_requests_failures
- proxied_get_tagging_requests_total
- proxied_head_requests_failures
- proxied_head_requests_total
- proxied_put_tagging_requests_failures
- proxied_put_tagging_requests_total
- sent_bytes
- sent_count
- total_failed_bytes
- total_failed_count
- proxied_delete_tagging_requests_failures
2024-05-23 00:41:18 -07:00
Krishnan Parthasarathi
6d5bc045bc Disallow ExpiredObjectAllVersions with object lock (#19792)
Relaxes restrictions on Expiration and NoncurrentVersionExpiration
placed by https://github.com/minio/minio/pull/19785.
ref: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-managing.html#object-lock-managing-lifecycle

> Object lifecycle management configurations continue functioning
normally on protected objects, including placing delete markers.
However, a locked version of an object cannot be deleted by a S3
Lifecycle expiration policy. Object Lock is maintained regardless of
the object's storage class and throughout S3 Lifecycle
transitions between storage classes.
2024-05-22 18:12:48 -07:00
Harshavardhana
d38e020b29 remove errant logs for disconnected remote (#19793)
Signed-off-by: Harshavardhana <harsha@minio.io>
2024-05-22 18:12:23 -07:00
Poorna
7d29030292 fix list results returned for spark max-keys=2 listing (#19791)
This PR continues fix #19725 for some unhandled cases
2024-05-22 16:16:34 -07:00
Shubhendu
7c7650b7c3 Add sufficient deadlines and countermeasures to handle hung node scenario (#19688)
Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
Signed-off-by: Harshavardhana <harsha@minio.io>
2024-05-22 16:07:14 -07:00
Harshavardhana
ca80eced24 usage of deadline conn at Accept() breaks websocket (#19789)
fortunately not wired up to use, however if anyone
enables deadlines for conn then sporadically MinIO
startups fail.
2024-05-22 10:49:27 -07:00
Anis Eleuch
d0e0b81d8e Fix race get/set system/audit targest to avoid race errors (#19790) 2024-05-22 09:23:03 -07:00
jiuker
391baa1c9a test: add reject ilm rule test case (#19788) 2024-05-22 04:26:59 -07:00
Harshavardhana
ae14681c3e Revert "Fix two-way stream cancelation and pings (#19763)"
This reverts commit 4d698841f4.
2024-05-22 03:00:00 -07:00
Klaus Post
4d698841f4 Fix two-way stream cancelation and pings (#19763)
Do not log errors on oneway streams when sending ping fails. Instead, cancel the stream.

This also makes sure pings are sent when blocked on sending responses.
2024-05-22 01:25:25 -07:00
jiuker
9906b3ade9 fix: reject ilm rule when bucket LockEnabled (#19785) 2024-05-21 23:50:03 -07:00
Anis Eleuch
bf1769d3e0 xl: Avoid marking a drive offline after one part read failure (#19779)
This commit will fix one rare case of a multipart object that
can be read in theory but GetObject API returned an error.

It turned out that a six years old code was marking a drive offline
when the bitrot streaming fails to read a part in a disk with any error.
This can affect reading a subsequent part, though having enough shards,
but unable to construct because one drive was marked offline earlier.

This commit will remove the drive marking offline code. It will also
close the bitrotstreaming reader before marking it as nil.
2024-05-21 07:36:21 -07:00
Harshavardhana
63e1ad9f29 fix: the user-agent for Veeam 2024-05-20 11:54:52 -07:00
Klaus Post
2c7bcee53f Add cross-version remapped merges to xl-meta (#19765)
Adds `-xver` which can be used with `-export` and `-combine` to attempt to combine files across versions if data is suspected to be the same. Overlapping data is compared.

Bonus: Make `inspect` accept wildcards.
2024-05-19 08:31:54 -07:00
Harshavardhana
1fd90c93ff re-use StorageAPI while loading drive formats (#19770)
Bonus: safe settings for deployment ID to avoid races
2024-05-19 01:06:49 -07:00
Poorna
e947a844c9 Fix test scripts to use mc ready (#19768) 2024-05-18 11:19:01 -07:00
Poorna
4e2d39293a Fix build script to wait for server to come up (#19767) 2024-05-17 14:43:59 -07:00
Krishnan Parthasarathi
1228d6bf1a Return NumVersions in quorum when available (#19766)
Similar to https://github.com/minio/minio/pull/17925
2024-05-17 13:57:37 -07:00
Shireesh Anjal
fc4561c64c Start callhome immediately after enabling (#19764)
Currently, on enabling callhome (or restarting the server), the callhome
job gets scheduled. This means that one has to wait for 24hrs (the
default frequency duration) to see it in action and to figure out if it
is working as expected.

It will be a better user experience to perform the first callhome
execution immediately after enabling it (or on server start if already
enabled).

Also, generate audit event on callhome execution, setting the error
field in case the execution has failed.
2024-05-17 09:53:34 -07:00
Klaus Post
3b7747b42b Tweak multipart uploads (#19756)
* Store ModTime in the upload ID; return it when listing instead of the current time.
* Use this ModTime to expire and skip reading the file info.
* Consistent upload sorting in listing (since it now has the ModTime).
* Exclude healing disks to avoid returning an empty list.
2024-05-17 09:40:09 -07:00
Harshavardhana
e432e79324 avoid calling 'admin info' for disk, cpu, net metrics collection (#19762)
resource metrics collection was incorrectly making fan-out
liveness peer calls where it's not needed.
2024-05-17 08:15:13 -07:00
Harshavardhana
08d74819b6 handle racy updates to globalSite config (#19750)
```
==================
WARNING: DATA RACE
Read at 0x0000082be990 by goroutine 205:
  github.com/minio/minio/cmd.setCommonHeaders()

Previous write at 0x0000082be990 by main goroutine:
  github.com/minio/minio/cmd.lookupConfigs()
```
2024-05-16 16:13:47 -07:00
Poorna
aa3fde1784 Add ListObjectsV2 unit test (#19753)
for PR: #19725
2024-05-15 20:40:51 -07:00
Harshavardhana
0b3eb7f218 add more deadlines and pass around context under most situations (#19752) 2024-05-15 15:19:00 -07:00
Anis Eleuch
69c9496c71 Upgrade github.com/minio/pkg/v2 and other deps (#19747) 2024-05-15 11:04:40 -07:00
Klaus Post
b792b36495 Add Veeam storage class override (#19748)
Recent Veeam is very picky about storage class names. Add `_MINIO_VEEAM_FORCE_SC` env var.

It will override the storage class returned by the storage backend if it is non-standard
and we detect a Veeam client by checking the User Agent.

Applies to HeadObject/GetObject/ListObject*
2024-05-15 11:04:16 -07:00
Harshavardhana
d3db7d31a3 fix: add deadlines for all synchronous REST callers (#19741)
add deadlines that can be dynamically changed via
the drive max timeout values.

Bonus: optimize "file not found" case and hung drives/network - circuit break the check and return right
away instead of waiting.
2024-05-15 09:52:29 -07:00
Shireesh Anjal
c05ca63158 Fix crash on /minio/metrics/v3?list (#19745)
An unchecked map access was causing panic.
2024-05-15 09:06:35 -07:00
Klaus Post
6d3e0c7db6 Tweak one way stream ping (#19743)
Do not log errors on oneway streams when sending ping fails. Instead cancel the stream.

This also makes sure pings are sent when blocked on sending responses.

I will do a separate PR that includes this and adds pings to two-way streams as well as tests for pings.
2024-05-15 08:39:21 -07:00
Shireesh Anjal
0e59e50b39 Capture ttfb api metrics only for GetObject (#19733)
as that is the only API where the TTFB metric is beneficial, and
capturing this for all APIs exponentially increases the response size in
large clusters.
2024-05-14 23:25:13 -07:00
Klaus Post
d4b391de1b Add PutObject Ring Buffer (#19605)
Replace the `io.Pipe` from streamingBitrotWriter -> CreateFile with a fixed size ring buffer.

This will add an output buffer for encoded shards to be written to disk - potentially via RPC.

This will remove blocking when `(*streamingBitrotWriter).Write` is called, and it writes hashes and data.

With current settings, the write looks like this:

```
Outbound
┌───────────────────┐             ┌────────────────┐               ┌───────────────┐                      ┌────────────────┐
│                   │   Parr.     │                │  (http body)  │               │                      │                │
│ Bitrot Hash       │     Write   │      Pipe      │      Read     │  HTTP buffer  │    Write (syscall)   │  TCP Buffer    │
│ Erasure Shard     │ ──────────► │  (unbuffered)  │ ────────────► │   (64K Max)   │ ───────────────────► │    (4MB)       │
│                   │             │                │               │  (io.Copy)    │                      │                │
└───────────────────┘             └────────────────┘               └───────────────┘                      └────────────────┘
```

We write a Hash (32 bytes). Since the pipe is unbuffered, it will block until the 32 bytes have 
been delivered to the TCP buffer, and the next Read hits the Pipe.

Then we write the shard data. This will typically be bigger than 64KB, so it will block until two blocks 
have been read from the pipe.

When we insert a ring buffer:

```
Outbound
┌───────────────────┐             ┌────────────────┐               ┌───────────────┐                      ┌────────────────┐
│                   │             │                │  (http body)  │               │                      │                │
│ Bitrot Hash       │     Write   │  Ring Buffer   │      Read     │  HTTP buffer  │    Write (syscall)   │  TCP Buffer    │
│ Erasure Shard     │ ──────────► │    (2MB)       │ ────────────► │   (64K Max)   │ ───────────────────► │    (4MB)       │
│                   │             │                │               │  (io.Copy)    │                      │                │
└───────────────────┘             └────────────────┘               └───────────────┘                      └────────────────┘
```

The hash+shard will fit within the ring buffer, so writes will not block - but will complete after a 
memcopy. Reads can fill the 64KB buffer if there is data for it.

If the network is congested, the ring buffer will become filled, and all syscalls will be on full buffers.
Only when the ring buffer is filled will erasure coding start blocking.

Since there is always "space" to write output data, we remove the parallel writing since we are 
always writing to memory now, and the goroutine synchronization overhead probably not worth taking. 

If the output were blocked in the existing, we would still wait for it to unblock in parallel write, so it would 
make no difference there - except now the ring buffer smoothes out the load.

There are some micro-optimizations we could look at later. The biggest is that, in most cases, 
we could encode directly to the ring buffer - if we are not at a boundary. Also, "force filling" the 
Read requests (i.e., blocking until a full read can be completed) could be investigated and maybe 
allow concurrent memory on read and write.
2024-05-14 17:11:04 -07:00
Shubhendu
de4d3dac00 Added tests for IAM policies for bucket operations (#19734)
* Added tests for bucket access policies

Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>

* move to correct category of tests

Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>

---------

Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2024-05-14 08:43:07 -07:00
Olli Janatuinen
534e7161df SFTP: Correctly inform client about unsupported commands (#19735) 2024-05-14 03:29:30 -07:00
Harshavardhana
9b219cd646 fix: return quorum based error, temporary failures must be ignored (#19732) 2024-05-14 03:29:17 -07:00
Shireesh Anjal
3bab4822f3 Add logger webhook metrics in metrics-v3 (#19515)
endpoint: /minio/metrics/v3/cluster/webhook
metrics:
- failed_messages (counter)
- online (gauge)
- queue_length (gauge)
- total_messages (counter)
2024-05-14 00:27:33 -07:00
coderwander
3c5f2d8916 fix some typo in struct name comments (#19513)
Signed-off-by: coderwander <770732124@qq.com>
2024-05-14 00:26:50 -07:00
Shireesh Anjal
5808190398 Add more metrics to v3/cluster/erasure-set (#19714)
Metrics being added:

- read_tolerance: No of drive failures that can be tolerated without
  disrupting read operations
- write_tolerance: No of drive failures that can be tolerated without
  disrupting write operations
- read_health: Health of the erasure set in a pool for read operations
  (1=healthy, 0=unhealthy)
- write_health: Health of the erasure set in a pool for write operations
  (1=healthy, 0=unhealthy)
2024-05-14 00:25:56 -07:00
Shireesh Anjal
b2a82248b1 Move /system/go to /debug/go (#19707) 2024-05-14 00:25:37 -07:00
dependabot[bot]
4e5fcca8b9 build(deps): bump golang.org/x/net (#23)
Bumps the go_modules group with 1 update in the /docs/debugging/s3-verify directory: [golang.org/x/net](https://github.com/golang/net).


Updates `golang.org/x/net` from 0.24.0 to 0.25.0
- [Commits](https://github.com/golang/net/compare/v0.24.0...v0.25.0)

---
updated-dependencies:
- dependency-name: golang.org/x/net
  dependency-type: indirect
  dependency-group: go_modules
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-13 10:59:52 -07:00
Klaus Post
c36eaedb93 Re-add "Fix incorrect merging of slash-suffixed objects (#19729)
Adds regression test for #19699

Failures are a bit luck based, since it requires objects to be placed on different sets.

However this generates a failure prior to #19699

* Revert "Revert "Fix incorrect merging of slash-suffixed objects (#19699)""

This reverts commit f30417d9a8.

* Don't override when suffix doesn't match. Instead rely on quorum for each.
2024-05-13 09:30:24 -07:00
Poorna
7752b03add optimize max-keys=2 listing for spark workloads (#19725)
to return results appropriately for versioned buckets, especially
when underlying prefixes have been deleted
2024-05-13 07:57:42 -07:00
jiuker
01bfc78535 Optimization: reuse hashedSecret when LookupConfig (#19724) 2024-05-12 22:52:27 -07:00
Shireesh Anjal
074d70112d Consolidate drive health related metrics into single metric (#19706)
Instead of having "online" and "healing" as two metrics, replace with a
single metric "health" which can have following values:

0 = offline
1 = healthy
2 = healing
2024-05-12 10:23:50 -07:00
Harshavardhana
e8d14c0d90 verify preconditions during CompleteMultipart (#19713)
Bonus: hold the write lock properly to apply
optimistic concurrency during NewMultipartUpload()
2024-05-10 17:31:22 -07:00
Shireesh Anjal
60d7e8143a Move /cluster/audit to /audit (#19708)
As the audit metrics are server level and not 
overall cluster level.
2024-05-10 07:50:39 -07:00
Klaus Post
9667a170de Add usage cache cleanup and lower forced top compaction (#19719)
Lower forced compaction to 250K entries.

If there is more than 250K entries on the top level force compact it and log an error.
2024-05-10 07:49:50 -07:00
Shubhendu
abae30f9e1 Added decom test with KES using sse-s3 and sse-kms (#19695)
Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2024-05-10 01:24:14 -07:00
Minio Trusted
f9311bc9d1 Update yaml files to latest version RELEASE.2024-05-10T01-41-38Z 2024-05-10 02:00:49 +00:00
Harshavardhana
b598402738 fix: unexpected credentials missing while passing 2024-05-09 18:41:38 -07:00
Harshavardhana
bd026b913f remove references for MINIO_SERVER_URL 2024-05-09 17:22:36 -07:00
Harshavardhana
72ff69d9bb add log-prefix name for specifying custom log-name (#19712) 2024-05-09 14:29:37 -07:00
Harshavardhana
f30417d9a8 Revert "Fix incorrect merging of slash-suffixed objects (#19699)"
This reverts commit 2f7a10ab31.
2024-05-09 12:32:05 -07:00
jiuker
47a4ad3cd7 fix: truncate Expiration to second when Add ServiceAccount (#19674)
Truncate Expiration at the second when Add ServiceAccount
2024-05-09 11:08:04 -07:00
Klaus Post
2f7a10ab31 Fix incorrect merging of slash-suffixed objects (#19699)
If two objects share everything but one object has a slash prefix, those would be merged in listings, 
with secondary properties used for a tiebreak.

Example: An object with the key `prefix/obj` would be merged with an object named `prefix/obj/`. 
While this violates the [no object can be a prefix of another](https://min.io/docs/minio/linux/operations/concepts/thresholds.html#conflicting-objects), let's resolve these.

If we have an object with 'name' and a directory named 'name/' discard the directory only - but allow objects 
of 'name' and 'name/' (xldir) to be uniquely returned.

Regression from #15772
2024-05-09 11:05:45 -07:00
Harshavardhana
b534dc69ab deprecate unexpected healing failed counters (#19705)
simplify this to avoid verbose metrics, and make
room for valid metrics to be reported for alerting
etc.
2024-05-09 11:04:41 -07:00
Harshavardhana
7b7d2ea7d4 pass around correct endpoint while registering remote storage (#19710) 2024-05-09 11:03:54 -07:00
Aditya Manthramurthy
e00de1c302 ldap-import: Add additional logs (#19691)
These logs are being added to provide better debugging of LDAP
normalization on IAM import.
2024-05-09 10:52:53 -07:00
Harshavardhana
3549e583a6 results must be a single channel to avoid overwriting healing.bin (#19702) 2024-05-09 10:15:03 -07:00
Andi
f5e3eedf34 chore: use errors.New to replace fmt.Errorf with no parameters (#19568)
Signed-off-by: ChengenH <hce19970702@gmail.com>
2024-05-09 01:44:07 -07:00
Harshavardhana
519dbfebf6 upgrade to go1.22.x 2024-05-09 01:36:00 -07:00
Harshavardhana
9a267f9270 allow caller context during reloads() to cancel (#19687)
canceled callers might linger around longer,
can potentially overwhelm the system. Instead
provider a caller context and canceled callers
don't hold on to them.

Bonus: we have no reason to cache errors, we should
never cache errors otherwise we can potentially have
quorum errors creeping in unexpectedly. We should
let the cache when invalidating hit the actual resources
instead.
2024-05-08 17:51:34 -07:00
Anis Eleuch
67bd71b7a5 grid: Fix a window of a disconnected node not marked as offline (#19703)
LastPong is saved as nanoseconds after a connection or reconnection but
saved as seconds when receiving a pong message. The code deciding if
a pong is too old can be skewed since it assumes LastPong is only in
seconds.
2024-05-08 17:50:13 -07:00
Klaus Post
ec49fff583 Accept multipart checksums with part count (#19680)
Accept multipart uploads where the combined checksum provides the expected part count.

It seems this was added by AWS to make the API more consistent, even if the 
data is entirely superfluous on multiple levels.

Improves AWS S3 compatibility.
2024-05-08 09:18:34 -07:00
Andreas Auernhammer
8b660e18f2 kms: add support for MinKMS and remove some unused/broken code (#19368)
This commit adds support for MinKMS. Now, there are three KMS
implementations in `internal/kms`: Builtin, MinIO KES and MinIO KMS.

Adding another KMS integration required some cleanup. In particular:
 - Various KMS APIs that haven't been and are not used have been
   removed. A lot of the code was broken anyway.
 - Metrics are now monitored by the `kms.KMS` itself. For basic
   metrics this is simpler than collecting metrics for external
   servers. In particular, each KES server returns its own metrics
   and no cluster-level view.
 - The builtin KMS now uses the same en/decryption implemented by
   MinKMS and KES. It still supports decryption of the previous
   ciphertext format. It's backwards compatible.
 - Data encryption keys now include a master key version since MinKMS
   supports multiple versions (~4 billion in total and 10000 concurrent)
   per key name.

Signed-off-by: Andreas Auernhammer <github@aead.dev>
2024-05-07 16:55:37 -07:00
Harshavardhana
981497799a return appropriate error upon reaching maxClients() (#19669) 2024-05-07 13:41:56 -07:00
Minio Trusted
b9bdc17465 Update yaml files to latest version RELEASE.2024-05-07T06-41-25Z 2024-05-07 16:59:52 +00:00
Olli Janatuinen
b413ff9fdb Support user certificate based authentication on SFTP (#19650) 2024-05-06 23:41:25 -07:00
Harshavardhana
6a15580817 fix: collect quorum errors for deletePrefix() (#19685)
do not return error for single drive being offline.
2024-05-06 22:44:46 -07:00
Cesar N
39633a5581 Set Console Redirect URL env variable (#19683) 2024-05-06 19:47:59 -07:00
Alex
1e83f15e2f Update Console version to v1.4.0 (#19684) 2024-05-06 19:47:37 -07:00
Harshavardhana
888d2bb1d8 support ETag value to be '*' (#19682)
This supports '*' as per behavior to
comply with AWS S3 behavior for

- 'If-Match: *'
- 'If-None-Match: *'
2024-05-06 17:08:42 -07:00
Klaus Post
847ee5ac45 Make WalkDir return errors (#19677)
If used, 'opts.Marker` will cause many missed entries since results are returned 
unsorted, and pools are serialized.

Switch to fully concurrent listing and merging across pools to return sorted entries.
2024-05-06 13:27:52 -07:00
jiuker
9a9a49aa84 fix: Ignore AWSAccessKeyId check for SignV2 policy condition (#19673) 2024-05-06 03:52:41 -07:00
Harshavardhana
a03ca80269 support 'mc support perf object' with root login disabled (#19672)
It is expected that whoever is using the credentials which has
the proper set of permissions must be able to run.

`mc support perf object`

While the root login is disabled.
2024-05-06 02:45:10 -07:00
Harshavardhana
523bd769f1 add support for specific error response for InvalidRange (#19668)
fixes #19648

AWS S3 returns the actual object size as part of XML
response for InvalidRange error, this is used apparently
by SDKs to retry the request without the range.
2024-05-05 09:56:21 -07:00
Harshavardhana
8ff70ea5a9 turn-off coloring if we have std{err,out} dumb terminals (#19667) 2024-05-03 17:17:57 -07:00
Harshavardhana
da3e7747ca avoid using 10MiB EC buffers in maxAPI calculations (#19665)
max requests per node is more conservative in its value
causing premature serialization of the calls, avoid it
for newer deployments.
2024-05-03 13:08:20 -07:00
Klaus Post
4afb59e63f fix: walk missing entries with opts.Marker set (#19661)
'opts.Marker` is causing many missed entries if used since results are returned unsorted. Also since pools are serialized.

Switch to do fully concurrent listing and merging across pools to return sorted entries.

Returning errors on listings is impossible with the current API, so document that.

Return an error at once if no drives are found instead of just returning an empty listing and no error.
2024-05-03 10:26:51 -07:00
Harshavardhana
1526e7ece3 extend server config.yaml to support per pool set drive count (#19663)
This is to support deployments migrating from a multi-pooled
wider stripe to lower stripe. MINIO_STORAGE_CLASS_STANDARD
is still expected to be same for all pools. So you can satisfy
adding custom drive count based pools by adjusting the storage
class value.

```
version: v2
address: ':9000'
rootUser: 'minioadmin'
rootPassword: 'minioadmin'
console-address: ':9001'
pools: # Specify the nodes and drives with pools
  -
    args:
        - 'node{11...14}.example.net/data{1...4}'
  -
    args:
        - 'node{15...18}.example.net/data{1...4}'
  -
    args:
        - 'node{19...22}.example.net/data{1...4}'
  -
    args:
        - 'node{23...34}.example.net/data{1...10}'
    set-drive-count: 6
```
2024-05-03 08:54:03 -07:00
Krishnan Parthasarathi
6c07bfee8a With retention, skip actions expiring all versions (#19657)
ILM actions due to ExpiredObjectDeleteAllVersions and
DelMarkerExpiration are ignored when object locking is enabled on a
bucket.
Note: This applies to object versions which may not have retention
configured on them. This applies to all object versions in this bucket,
including those created before the retention config was applied.
2024-05-03 04:18:58 -07:00
Poorna
446c760820 replication: Avoid proxying if requested object is a deletemarker (#19656)
Fixes: #19654
2024-05-02 13:15:54 -07:00
Shireesh Anjal
04f92f1291 Change endpoint format for per-bucket metrics (#19655)
Per-bucket metrics endpoints always start with /bucket and the bucket
name is appended to the path. e.g. if the collector path is /bucket/api,
the endpoint for the bucket "mybucket" would be
/minio/metrics/v3/bucket/api/mybucket

Change the existing bucket api endpoint accordingly from /api/bucket to
/bucket/api
2024-05-02 10:37:57 -07:00
Klaus Post
4a60a7794d Use better gzip for log rotate (#19651)
Should be 2x faster with same usage.
2024-05-02 04:38:40 -07:00
Bala FA
e5b16adb1c Add cluster IAM metrics in metrics-v3 (#19595)
Signed-off-by: Bala.FA <bala@minio.io>
2024-05-02 01:20:42 -07:00
Harshavardhana
402a3ac719 support compression after rotation of logs (#19647) 2024-05-01 15:38:07 -07:00
Aditya Manthramurthy
f3d61c51fc fix: Filter out cust. AssumeRole Token for audit (#19646)
The `Token` parameter is a sensitive value that should not be output in the Audit log for STS AssumeRoleWithCustomToken API.

Bonus: Add a simple tool that echoes audit logs to the console.
2024-05-01 14:31:13 -07:00
Klaus Post
0cde17ae5d Return listing when exceeding min disk errors (#19644)
When listing, with drives returning `errFileNotFound,` `errVolumeNotFound`, or `errUnformattedDisk,`, 
we could get below `minDisks` drives being left.

This would result in a quorum never being reachable for any object. Therefore, the listing 
would continue, but no results would ever be produced.

Include `fnf` in the mindisk check since it is incremented on these errors. This will stop 
listing when minDisks are left.

Allow `opts.minDisks` to not return errVolumeNotFound or errFileNotFound and return that. 
That will allow for good results even if disks return something else.

We switch `errUnformattedDisk` to a regular error. If we have enough of those, we should just fail.
2024-05-01 10:59:08 -07:00
Harshavardhana
8c1bba681b add logrotate support for MinIO logs (#19641) 2024-05-01 10:57:52 -07:00
Klaus Post
dbfb5e797b Wait one minute after startup to restart decommissioning (#19645)
Typically not all drives are connected, so we delay 3 minutes before resuming.
This greatly reduces risk of starting to list unconnected drives, or drives we risk being disconnected soon.

This delay is not applied when starting with an admin call.
2024-05-01 08:18:21 -07:00
Harshavardhana
08ff702434 enhance ListSVCs() API to return more info to avoid InfoSvc() (#19642)
ConsoleUI like applications rely on combination of

ListServiceAccounts() and InfoServiceAccount() to populate
UI elements, however individually these calls can be slow
causing the entire UI to load sluggishly.
2024-05-01 05:41:13 -07:00
Klaus Post
0e2148264a Fix --stfp "mac-algos=..." overwrites cipher algorithms (#19643)
Setting MAC algorithms overwrites cipher algorithms.

Followup to #19636
2024-05-01 04:07:40 -07:00
Minio Trusted
a75f42344b Update yaml files to latest version RELEASE.2024-05-01T01-11-10Z 2024-05-01 02:45:52 +00:00
Krishnan Parthasarathi
7926401cbd ilm: Handle DeleteAllVersions action differently for DEL markers (#19481)
i.e., this rule element doesn't apply to DEL markers.

This is a breaking change to how ExpiredObejctDeleteAllVersions
functions today. This is necessary to avoid the following highly probable
footgun scenario in the future.

Scenario:
The user uses tags-based filtering to select an object's time to live(TTL). 
The application sometimes deletes objects, too, making its latest
version a DEL marker. The previous implementation skipped tag-based filters
if the newest version was DEL marker, voiding the tag-based TTL. The user is
surprised to find objects that have expired sooner than expected.

* Add DelMarkerExpiration action

This ILM action removes all versions of an object if its
the latest version is a DEL marker.

```xml
<DelMarkerObjectExpiration>
    <Days> 10 </Days>
</DelMarkerObjectExpiration>
```

1. Applies only to objects whose,
  • The latest version is a DEL marker.
  • satisfies the number of days criteria
2. Deletes all versions of this object
3. Associated rule can't have tag-based filtering

Includes,
- New bucket event type for deletion due to DelMarkerExpiration
2024-04-30 18:11:10 -07:00
Harshavardhana
8161411c5d fix: a crash in RemoveReplication target (#19640)
calling a remote target remove with a perfectly
well constructed ARN can lead to a crash for a bucket
with no replication configured.

This PR fixes, and adds a crash check for ImportMetadata
as well.
2024-04-30 18:09:56 -07:00
Klaus Post
f64dea2aac Allow custom SFTP algorithm selection (#19636)
Algorithms are comma separated.
Note that valid values does not in all cases represent default values.

`--sftp=pub-key-algos=...` specifies the supported client public key
authentication algorithms. Note that this doesn't include certificate types
since those use the underlying algorithm. This list is sent to the client if
it supports the server-sig-algs extension. Order is irrelevant.

Valid values
```
ssh-ed25519
sk-ssh-ed25519@openssh.com
sk-ecdsa-sha2-nistp256@openssh.com
ecdsa-sha2-nistp256
ecdsa-sha2-nistp384
ecdsa-sha2-nistp521
rsa-sha2-256
rsa-sha2-512
ssh-rsa
ssh-dss
```

`--sftp=kex-algos=...` specifies the supported key-exchange algorithms in preference order.

Valid values:

```
curve25519-sha256
curve25519-sha256@libssh.org
ecdh-sha2-nistp256
ecdh-sha2-nistp384
ecdh-sha2-nistp521
diffie-hellman-group14-sha256
diffie-hellman-group16-sha512
diffie-hellman-group14-sha1
diffie-hellman-group1-sha1
```

`--sftp=cipher-algos=...` specifies the allowed cipher algorithms.
If unspecified then a sensible default is used.

Valid values:
```
aes128-ctr
aes192-ctr
aes256-ctr
aes128-gcm@openssh.com
aes256-gcm@openssh.com
chacha20-poly1305@openssh.com
arcfour256
arcfour128
arcfour
aes128-cbc
3des-cbc
```

`--sftp=mac-algos=...` specifies a default set of MAC algorithms in preference order.
This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed because they have
reached the end of their useful life.

Valid values:

```
hmac-sha2-256-etm@openssh.com
hmac-sha2-512-etm@openssh.com
hmac-sha2-256
hmac-sha2-512
hmac-sha1
hmac-sha1-96
```
2024-04-30 08:15:45 -07:00
Shubhendu
6579304d8c Suppress metrics with zero values (#19638)
This would reduce the size of data in response of metrics
listing. While graphing we can default these metrics with
a zero value if not found.

Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2024-04-30 08:05:22 -07:00
jiuker
6bb10a81a6 avoid data race for testing (#19635) 2024-04-30 08:03:35 -07:00
Klaus Post
3cf8a7c888 Always unfreeze when connection dies (#19634)
Unfreeze as soon as the incoming connection is terminated and don't wait for everything to complete.

We don't want to keep the services frozen if something becomes stuck.
2024-04-29 10:39:04 -07:00
Minio Trusted
2e38bb5175 Update yaml files to latest version RELEASE.2024-04-28T17-53-50Z 2024-04-29 17:09:28 +00:00
Harshavardhana
a372c6a377 a bunch of fixes for error handling (#19627)
- handle errFileCorrupt properly
- micro-optimization of sending done() response quicker
  to close the goroutine.
- fix logger.Event() usage in a couple of places
- handle the rest of the client to return a different error other than
  lastErr() when the client is closed.
2024-04-28 10:53:50 -07:00
Harshavardhana
93b2f8a0c5 helm release v5.2.0
Signed-off-by: Harshavardhana <harsha@minio.io>
2024-04-28 03:14:37 -07:00
opencmit2
1a6568a25d helm support loadBalancerSourceRanges and externalTrafficPolicy (#19245)
Signed-off-by: JinXinWang <opencmit2@126.com>
2024-04-28 03:05:53 -07:00
Poorna
9e95703efc iam reload policy mapping of STS users properly (#19626) 2024-04-27 03:04:10 -07:00
Anis Eleuch
d8e05aca81 heal/list: Fix rare incomplete listing with flaky internode connections (#19625)
listPathRaw() counts errDiskNotFound as a valid error to indicate a
listing stream end. However, storage.WalkDir() is allowed to return
errDiskNotFound anytime since grid.ErrDisconnected is converted to
errDiskNotFound.

This affects fresh disk healing and should affect S3 listing as well.
2024-04-26 12:52:52 -07:00
Praveen raj Mani
410a1ac040 Handle failures in pool rebalancing (#19623) 2024-04-26 12:29:28 -07:00
Shireesh Anjal
4caa3422bd Add process metrics in metrics-v3 (#19612)
endpoint: /minio/metrics/v3/system/process
metrics:
- locks_read_total
- locks_write_total
- cpu_total_seconds
- go_routine_total
- io_rchar_bytes
- io_read_bytes
- io_wchar_bytes
- io_write_bytes
- start_time_seconds
- uptime_seconds
- file_descriptor_limit_total
- file_descriptor_open_total
- syscall_read_total
- syscall_write_total
- resident_memory_bytes
- virtual_memory_bytes
- virtual_memory_max_bytes

Since the standard process collector implements only a subset of these
metrics, remove it and implement our own custom process collector that
captures all the process metrics we need.
2024-04-26 09:07:23 -07:00
Dennis Marttinen
a658b976f5 helm: fix port types in CiliumNetworkPolicy (#19232) 2024-04-26 00:50:24 -07:00
Anis Eleuch
135874ebdc heal: Avoid marking a bucket as done when remote drives are offline (#19587) 2024-04-25 23:32:14 -07:00
Harshavardhana
f4f1c42cba deprecate usage of sha256-simd (#19621)
go1.21 already implements the necessary optimizations
2024-04-25 23:31:35 -07:00
Poorna
e7aa26dc29 fix: allow DeleteObject unversioned objects with insufficient read quorum (#19581)
Since the object is being permanently deleted, the lack of read quorum should not
matter as long as sufficient disks are online to complete the deletion with parity
requirements.

If several pools have the same object with insufficient read quorum, attempt to
delete object from all the pools where it exists
2024-04-25 17:31:12 -07:00
Harshavardhana
c54ffde568 add metrics ioerror counter for alerts on I/O errors (#19618) 2024-04-25 15:01:31 -07:00
Anis Eleuch
9a3c992d7a heal: Fix regression in healing a new fresh drive (#19615) 2024-04-25 14:55:41 -07:00
Aditya Manthramurthy
0c855638de fix: LDAP init. issue when LDAP server is down (#19619)
At server startup, LDAP configuration is validated against the LDAP
server. If the LDAP server is down at that point, we need to cleanly
disable LDAP configuration. Previously, LDAP would remain configured but
error out in strange ways because initialization did not complete
without errors.
2024-04-25 14:28:16 -07:00
Cesar N
943d815783 Update Console UI to v1.3.0 (#19617) 2024-04-25 12:12:03 -07:00
Ramon de Klein
4c0acba62d Fixes an internal error while force-deleting a bucket (#19614) 2024-04-25 09:27:27 -07:00
Aditya Manthramurthy
62c3cdee75 fix: IAM LDAP access key import bug (#19608)
When importing access keys (i.e. service accounts) for LDAP accounts,
we are requiring groups to exist under one of the configured group base
DNs. This is not correct. This change fixes this by only checking for
existence and storing the normalized form of the group DN - we do not
return an error if the group is not under a base DN.

Test is updated to illustrate an import failure that would happen
without this change.
2024-04-25 08:50:16 -07:00
Aditya Manthramurthy
3212d0c8cd fix: IAM import for LDAP should replace mappings (#19607)
Existing IAM import logic for LDAP creates new mappings when the
normalized form of the mapping key differs from the existing mapping key
in storage. This change effectively replaces the existing mapping key by
first deleting it and then recreating with the normalized form of the
mapping key.

For e.g. if an older deployment had a policy mapped to a user DN -

`UID=alice1,OU=people,OU=hwengg,DC=min,DC=io`

instead of adding a mapping for the normalized form -

`uid=alice1,ou=people,ou=hwengg,dc=min,dc=io`

we should replace the existing mapping.

This ensures that duplicates mappings won't remain after the import.

Some additional cleanup cases are also covered. If there are multiple
mappings for the name normalized key such as:

`UID=alice1,OU=people,OU=hwengg,DC=min,DC=io`
`uid=alice1,ou=people,ou=hwengg,DC=min,DC=io`
`uid=alice1,ou=people,ou=hwengg,dc=min,dc=io`

we check if the list of policies mapped to all these keys are exactly
the same, and if so remove all of them and create a single mapping with
the normalized key. However, if the policies mapped to such keys differ,
the import operation returns an error as the server cannot automatically
pick the "right" list of policies to map.
2024-04-25 08:49:53 -07:00
Harshavardhana
1d03bea965 support preserving renameData() on inlined content during overwrites (#19609)
extending #19548 to inlined-data as well.
2024-04-24 18:14:08 -07:00
Klaus Post
fbfeb59658 xl-meta: Allow combining multiple unversioned objects (#19604)
When inspecting files like `.minio.sys/pool.bin` that may be present on multiple sets, use signature to separate them.

Also fixes null versions to actually be useful with `-export -combine`.
2024-04-24 10:56:22 -07:00
Ramon de Klein
701da1282a Validates PostgreSQL table name (#19602) 2024-04-24 10:51:07 -07:00
jiuker
df93ff92ba fix: site-replication will reset group status when add user (#19594) 2024-04-24 08:54:24 -07:00
Shireesh Anjal
77d5331e85 Fix few wrongly defined metric types (#19586)
`minio_cluster_webhook_queue_length` was wrongly defined as `counter`
where-as it should be `gauge`

Following were wrongly defined as `gauge` when they should actually be
`counter`:

- minio_bucket_replication_sent_bytes
- minio_bucket_replication_received_bytes
- minio_bucket_replication_total_failed_bytes
- minio_bucket_replication_total_failed_count
2024-04-23 23:19:40 -07:00
Bala FA
14cdadfb56 Add cluster notification metrics in metrics-v3 (#19533)
Signed-off-by: Bala.FA <bala@minio.io>
2024-04-23 21:10:35 -07:00
Harshavardhana
f3a52cc195 simplify listener implementation setup customizations in right place (#19589) 2024-04-23 21:08:47 -07:00
Aditya Manthramurthy
7640cd24c9 fix: avoid some IAM import errors if LDAP enabled (#19591)
When LDAP is enabled, previously we were:

- rejecting creation of users and groups via the IAM import functionality

- throwing a `not a valid DN` error when non-LDAP group mappings are present

This change allows for these cases as we need to support situations
where the MinIO server contains users, groups and policy mappings
created before LDAP was enabled.
2024-04-23 18:23:08 -07:00
Shireesh Anjal
f7b665347e Add system CPU metrics to metrics-v3 (#19560)
endpoint: /minio/metrics/v3/system/cpu

metrics:
- minio_system_cpu_avg_idle
- minio_system_cpu_avg_iowait
- minio_system_cpu_load
- minio_system_cpu_load_perc
- minio_system_cpu_nice
- minio_system_cpu_steal
- minio_system_cpu_system
- minio_system_cpu_user
2024-04-23 16:56:12 -07:00
Harshavardhana
9693c382a8 make renameData() more defensive during overwrites (#19548)
instead upon any error in renameData(), we still
preserve the existing dataDir in some form for
recoverability in strange situations such as out
of disk space type errors.

Bonus: avoid running list and heal() instead allow
versions disparity to return the actual versions,
uuid to heal. Currently limit this to 100 versions
and lesser disparate objects.

an undo now reverts back the xl.meta from xl.meta.bkp
during overwrites on such flaky setups.

Bonus: Save N depth syscalls via skipping the parents
upon overwrites and versioned updates.

Flaky setup examples are stretch clusters with regular
packet drops etc, we need to add some defensive code
around to avoid dangling objects.
2024-04-23 10:15:52 -07:00
jiuker
ee1047bd52 fix: can't get total disksize for decom status (#19585) 2024-04-23 04:33:28 -07:00
Seiya
5ea5ab162b Remove leading zero strings in return value of (*xlMetaV2)getDataDirs() (#19567)
remove leading zero strings in return value of getDataDirs()
2024-04-22 22:07:37 -07:00
Klaus Post
b5a09ff96b Fix RenameData data race (#19579)
RenameData could start operating on inline data after timing out 
and the call returned due to WithDeadline.

This could cause a buffer to write to the inline data being written.

Since no writes are in `RenameData` and the call is canceled, 
this doesn't present a corruption issue. But a race is a race and 
should be fixed.

Copy inline data to a fresh buffer.
2024-04-22 22:07:19 -07:00
Harshavardhana
95c65f4e8f do not panic on rebalance during server restarts (#19563)
This PR makes a feasible approach to handle all the scenarios
that we must face to avoid returning "panic."

Instead, we must return "errServerNotInitialized" when a
bucketMetadataSys.Get() is called, allowing the caller to
retry their operation and wait.

Bonus fix the way data-usage-cache stores the object.
Instead of storing usage-cache.bin with the bucket as
`.minio.sys/buckets`, the `buckets` must be relative
to the bucket `.minio.sys` as part of the object name.

Otherwise, there is no way to decommission entries at
`.minio.sys/buckets` and their final erasure set positions.

A bucket must never have a `/` in it. Adds code to read()
from existing data-usage.bin upon upgrade.
2024-04-22 10:49:30 -07:00
Harshavardhana
6bfff7532e re-use transport and set stronger backwards compatible Ciphers (#19565)
This PR fixes a few things

- FIPS support for missing for remote transports, causing
  MinIO could end up using non-FIPS Ciphers in FIPS mode

- Avoids too many transports, they all do the same thing
  to make connection pooling work properly re-use them.

- globalTCPOptions must be set before setting transport
  to make sure the client conn deadlines are honored properly.

- GCS warm tier must re-use our transport

- Re-enable trailing headers support.
2024-04-21 04:43:18 -07:00
Harshavardhana
1aa8896ad6 Revert "cleanup: Simplify usage of MinIOSourceProxyRequest (#19553)"
This reverts commit 928c0181bf.

This change was not correct, reverting.

We track 3 states with the ProxyRequest header - if replication process wants
to know if object is already replicated with a HEAD, it shouldn't proxy back
   - Poorna
2024-04-20 02:05:54 -07:00
Krishnan Parthasarathi
3e32ceb39f Disable trailing header support for MinIO tiers (#19561)
AWS S3 trailing header support was recently enabled on the warm tier
client connection to MinIO type remote tiers. With this enabled, we are
seeing the following error message at http transport layer.

> Unsolicited response received on idle HTTP channel starting with "HTTP/1.1 400 Bad Request\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n400 Bad Request"; err=<nil>

This is an interim fix until we identify the root cause for this behaviour in the
minio-go client package.
2024-04-19 19:32:25 -07:00
dependabot[bot]
ca1350b092 build(deps): bump golang.org/x/net from 0.19.0 to 0.23.0 in /docs/debugging/s3-verify (#19559)
build(deps): bump golang.org/x/net in /docs/debugging/s3-verify

Bumps [golang.org/x/net](https://github.com/golang/net) from 0.19.0 to 0.23.0.
- [Commits](https://github.com/golang/net/compare/v0.19.0...v0.23.0)

---
updated-dependencies:
- dependency-name: golang.org/x/net
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-19 14:24:47 -07:00
jiuker
9205434ed3 fix: ignore signaturev2 for policy header check (#19551) 2024-04-19 09:45:54 -07:00
Harshavardhana
cd50e9b4bc make LRU cache global for internode tokens (#19555) 2024-04-19 09:45:14 -07:00
Klaus Post
ec816f3840 Reduce parallelReader allocs (#19558) 2024-04-19 09:44:59 -07:00
Klaus Post
5f774951b1 Store object EC in metadata header (#19534)
Keep the EC in header, so it can be retrieved easily for dynamic quorum calculations.

To not force a full metadata decode on every read the value will be 0/0 for data written in previous versions.

Size is expected to increase by 2 bytes per version, since all valid values can be represented with 1 byte each.

Example:
```
λ xl-meta xl.meta
{
  "Versions": [
    {
      "Header": {
        "EcM": 4,
        "EcN": 8,
        "Flags": 6,
        "ModTime": "2024-04-17T11:46:25.325613+02:00",
        "Signature": "0a409875",
        "Type": 1,
        "VersionID": "8e03504e11234957b2727bc53eda0d55"
      },
...
```

Not used for operations yet.
2024-04-19 09:43:43 -07:00
Harshavardhana
2ca9befd2a add ILM + site-replication tests (#19554) 2024-04-19 05:48:19 -07:00
Harshavardhana
72f5cb577e optimize ftp/sftp upload() implementations to avoid CPU load (#19552) 2024-04-19 05:23:42 -07:00
Robert Lützner
928c0181bf cleanup: Simplify usage of MinIOSourceProxyRequest (#19553)
This replaces a convoluted condition that ultimately evaluated to

"is this HTTP header present in the request or not?"
2024-04-19 05:23:31 -07:00
Harshavardhana
03767d26da fix: get rid of large buffers (#19549)
these lead to run-away usage of memory
beyond which the Go's GC can handle, we
have to re-visit this differently, remove
this for now.
2024-04-19 04:26:59 -07:00
Sveinn
108e6f92d4 updating tests to use new mc --enc flags (#19508) 2024-04-19 01:43:09 -07:00
Harshavardhana
d653a59fc0 fix: flaky getHostIP test 2024-04-18 19:09:56 -07:00
Minio Trusted
01bfdf949a Update yaml files to latest version RELEASE.2024-04-18T19-09-19Z 2024-04-18 20:45:59 +00:00
Aditya Manthramurthy
98f7821eb3 fix: ldap: avoid unnecessary import errors (#19547)
Follow up for #19528

If there are multiple existing DN mappings for the same normalized DN,
if they all have the same policy mapping value, we pick one of them of
them instead of returning an import error.
2024-04-18 12:09:19 -07:00
jiuker
2d3898e0d5 add ftp example for to helm's values.yaml extraArgs field (#19541) 2024-04-18 08:57:22 -07:00
Aditya Manthramurthy
ae46ce9937 ldap: Normalize DNs when importing (#19528)
This is a change to IAM export/import functionality. For LDAP enabled
setups, it performs additional validations:

- for policy mappings on LDAP users and groups, it ensures that the
corresponding user or group DN exists and if so uses a normalized form
of these DNs for storage

- for access keys (service accounts), it updates (i.e. validates
existence and normalizes) the internally stored parent user DN and group
DNs.

This allows for a migration path for setups in which LDAP mappings have
been stored in previous versions of the server, where the name of the
mapping file stored on drives is not in a normalized form.

An administrator needs to execute:

`mc admin iam export ALIAS`

followed by

`mc admin iam import ALIAS /path/to/export/file`

The validations are more strict and returns errors when multiple
mappings are found for the same user/group DN. This is to ensure the
mappings stored by the server are unambiguous and to reduce the
potential for confusion.

Bonus **bug fix**: IAM export of access keys (service accounts) did not
export key name, description and expiration. This is fixed in this
change too.
2024-04-18 08:15:02 -07:00
Anis Eleuch
dfc112c06b list: Fix rare listing continuation freeze (#19524)
Reading the list metacache is not protected by a lock; the code retries when it fails
to read the metacache object, however, it forgot to re-read the metacache object
from the drives, which is necessary, especially if the metacache object is inlined.

This commit will ensure that we always re-read the metacache object from the drives
when it is retrying.
2024-04-17 21:42:11 -07:00
Shireesh Anjal
ca5fab8656 Add cluster audit metrics in metrics-v3 (#19514)
endpoint: /minio/metrics/v3/cluster/audit
metrics:
- failed_messages (counter)
- total_messages (counter)
- target_queue_length (gauge)
2024-04-17 02:18:02 -07:00
Shireesh Anjal
6df76ca73c Add system memory metrics in v3 (#19486)
Following memory metrics will be added under /system/memory

- available
- buffers
- cache
- free
- shared
- total
- used
- used_perc
2024-04-16 22:10:25 -07:00
Harshavardhana
f65dd3e5a2 reload from drive tier-config when in-memory cache is not found (#19527)
avoid probing tier target while reloading() tier config
2024-04-16 22:09:58 -07:00
Harshavardhana
a8d601b64a allow detaching any non-normalized DN (#19525) 2024-04-16 17:36:43 -07:00
Viktor Szépe
73b4794cf7 Improve typos configuration (#19489) 2024-04-16 17:36:28 -07:00
Klaus Post
e2709ea129 ftp: Return current time for prefixes/directories (#19519) 2024-04-16 17:35:55 -07:00
Allan Roger Reid
740ec80819 At server init, use the correct context when creating the KMS Master Key (#19526) 2024-04-16 17:34:45 -07:00
Harshavardhana
d95e054282 update all deps regular cadence (#19523) 2024-04-16 11:48:56 -07:00
Allan Roger Reid
7c1f9667d1 Use GetDuration() helper for MINIO_KMS_KEY_CACHE_INTERVAL as time.Duration (#19512)
Bonus: Use default duration of 10 seconds if invalid input < time.Second is specified
2024-04-16 08:43:39 -07:00
Klaus Post
9246990496 fix: ListObjectVersions returning duplicates when resuming with null version id (#19518)
When resuming a versioned listing where `version-id-marker=null`, the `null` object would 
always be returned, causing duplicate entries to be returned.

Add check against empty version
2024-04-16 08:41:27 -07:00
Markus Wagner
0cf3d93360 removed hardcoded datasource uid (#19477) 2024-04-15 03:03:01 -07:00
Harshavardhana
cb06aee5ac convert multipart-cleanup from a blocking unlink() to a rename to trash (#19495)
unlinking() at two different locations on a disk when there
are lots to purge, this can lead to huge IOwaits, instead
rely on rename() to .trash to avoid running multiple unlinks()
in parallel.
2024-04-15 03:02:39 -07:00
Shubhendu
1c70e9ed1b ILM expiry replication status only if enabled (#19503)
Report ILM expiry replication status only if atleast one site has the
feature enabled.

Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2024-04-15 02:40:39 -07:00
jiuker
f3d6a2dd37 code clean for dynamicSleeper (#19499) 2024-04-15 02:40:19 -07:00
Harshavardhana
d1c58fc2eb remove older deploymentID fix behavior to speed up startup (#19497)
since mid 2018 we do not have any deployments
without deployment-id, it is time to put this
code to rest, this PR removes this old code as
its no longer valuable.

on setups with 1000's of drives these are all
quite expensive operations.
2024-04-15 01:25:46 -07:00
Allan Roger Reid
b8f05b1471 Keep an up-to-date copy of the KMS master key (#19492) 2024-04-15 00:42:50 -07:00
Klaus Post
e7baf78ee8 fix: list operations resuming when hitting different node (#19494)
The rest of the peer clients were not consistent across nodes. So, meta cache requests 
would not go to the same server if a continuation happens on a different node.
2024-04-12 11:13:36 -07:00
guangwu
87299eba10 fix: close sessionPolicyFile in the sts-assume-role example (#19428) 2024-04-12 09:09:55 -07:00
Shubhendu
d3a07c29ba Correct sample for node scrape configuration (#19491)
As node metrics should be scraped per node basis, use a sample
configuartion using all the nodes in targets.

Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2024-04-12 08:49:30 -07:00
Aditya Manthramurthy
8d39b715dc Fix some CI warnings (#19482) 2024-04-12 02:25:58 -07:00
Harshavardhana
7e3166475d simplify common functions in replication (#19480) 2024-04-11 17:27:32 -07:00
Klaus Post
5206c0e883 Inspect: Add error if no results (#19476)
When no results match or another error occurs, add an error to the stream. Keep the "inspect-input.txt" as the only thing in the zip for reference.

Example:

```
λ mc support inspect --airgap myminio/testbucket/fjghfjh/**
mc: Using public key from C:\Users\klaus\mc\support_public.pem
File data successfully downloaded as inspect-data.enc

λ inspect inspect-data.enc
Using private key from support_private.pem
output written to inspect-data.zip
2024/04/11 14:10:51 next stream: GetRawData: No files matched the given pattern

λ unzip -l inspect-data.zip
Archive:  inspect-data.zip
  Length      Date    Time    Name
---------  ---------- -----   ----
      222  2024-04-11 14:10   inspect-input.txt
---------                     -------
      222                     1 file

λ
```

Modifies inspect to read until end of stream to report the error.

Bonus: Add legacy commandline params
2024-04-11 14:22:47 -07:00
Harshavardhana
41ec038523 remove permission denied error for being drive error (#19478) 2024-04-11 14:22:15 -07:00
Shireesh Anjal
08d3d06a06 Add drive metrics in metrics-v3 (#19452)
Add following metrics:

- used_inodes
- total_inodes
- healing
- online
- reads_per_sec
- reads_kb_per_sec
- reads_await
- writes_per_sec
- writes_kb_per_sec
- writes_await
- perc_util

To be able to calculate the `per_sec` values, we capture the IOStats-related 
data in the beginning (along with the time at which they were captured), 
and compare them against the current values subsequently. This is because 
dividing by "time since server uptime." doesn't work in k8s environments.
2024-04-11 10:46:34 -07:00
Harshavardhana
074febd9e1 remove SetDiskLoc() rely on the endpoint values instead (#19475)
the disk location never changes in the lifetime of a
MinIO cluster, even if it did validate this close to the
disk instead at the higher layer.

Return appropriate errors indicating an invalid drive, so
that the drive is not recognized as part of a valid
drive.
2024-04-11 10:45:28 -07:00
Harshavardhana
aa8d25797b update versioning tests to cover CopyObject() (#19472)
adds tests to cover #19468
2024-04-11 02:50:52 -07:00
Alex
8d7d4adb91 Updated Console UI to v1.2.0 (#19467) 2024-04-11 01:31:16 -07:00
Poorna
ffa91f9794 fix CopyObject with replace overwriting inline status (#19468)
Fixes #19450 - internal inline-data header can get overwritten
during copy with replace before this fix.
2024-04-10 23:42:51 -07:00
Harshavardhana
0c31e61343 allow protection from invalid config values (#19460)
we have had numerous reports on some config
values not having default values, causing
features misbehaving and not having default
values set properly.

This PR tries to address all these concerns
once and for all.

Each new sub-system that gets added

- must check for invalid keys
- must have default values set
- must not "return err" when being saved into
  a global state() instead collate as part of
  other subsystem errors allow other sub-systems
  to independently initialize.
2024-04-10 18:10:30 -07:00
Harshavardhana
9b926f7dbe avoid busy loops in bad path component (#19466)
use it in places where we are looking
for such bad path components.
2024-04-10 18:08:52 -07:00
Harshavardhana
35d8728990 handle missing LDAP normalization in SetPolicy() API (#19465) 2024-04-10 15:37:42 -07:00
Allan Roger Reid
f7ed9a75ba Allow specifying the local server with env variable _MINIO_SERVER_LOCAL (#19453)
* Allow specifying the local server, with env variable _MINIO_SERVER_LOCAL, in systems where the hostname cannot be resolved to local IP

* Limit scope of the _MINIO_SERVER_LOCAL solution to only containerized implementations
2024-04-10 09:34:59 -07:00
jiuker
9496c17e13 doc: add Content-Type to s3zip (#19455) 2024-04-10 09:28:27 -07:00
jiuker
ed64e91f06 fix: noHost for collectLocalMetric (#19457) 2024-04-10 09:28:08 -07:00
jiuker
a481825ae1 fix: unknow contentType for ArchiveFileHandler (#19451) 2024-04-09 03:41:25 -07:00
Harshavardhana
7bb0f32332 make if-none-match PUT/POST RFC compliant (#19448)
fixes #19442
2024-04-09 01:17:49 -07:00
Anis Eleuch
c6f8dc431e Add a warning when the total size of an object versions exceeds 1 TiB (#19435) 2024-04-08 10:45:03 -07:00
Alexander Thaller
78f177b8ee Allow setting readOnlyRootFilesystem in securityContext (#19437) 2024-04-08 09:31:05 -07:00
Anis Eleuch
787c44c39d batch-repl: Do not allow both source/target to be remote (#19434)
Return an error when the user specifies endpoints for both source
and target. This can generate many type of errors as the code considers
a deployment remote if its endpoint is specified.
2024-04-08 07:11:38 -07:00
Anis Eleuch
f06fee0364 heal: Add more per disk healing result in the audit (#19427)
HealObject() does not return an error in some cases, for example, when
an object is successfully reconstructed in one disk but fails with other
disks, another case is when a disk does not have the object is temporarily
disconnected

Add the After heal drives result in the audit output for better
analysis.
2024-04-08 02:26:14 -07:00
Harshavardhana
c957e0d426 fix: increase the tiering part size to 128MiB (#19424)
also introduce 8MiB buffer to read from for
bigger parts
2024-04-08 02:22:27 -07:00
Harshavardhana
04101d472f fix: add fallbackDisks for disk healing (#19425) 2024-04-08 02:22:13 -07:00
Minio Trusted
51fc145161 Update yaml files to latest version RELEASE.2024-04-06T05-26-02Z 2024-04-06 06:44:30 +00:00
447 changed files with 21212 additions and 9021 deletions

View File

@@ -3,12 +3,11 @@ name: Crosscompile
on:
pull_request:
branches:
- master
- next
- master
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
concurrency:
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref }}
cancel-in-progress: true
@@ -21,11 +20,11 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.21.x]
go-version: [1.22.x]
os: [ubuntu-latest]
steps:
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3
- uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
check-latest: true

View File

@@ -3,8 +3,7 @@ name: FIPS Build Test
on:
pull_request:
branches:
- master
- next
- master
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
@@ -21,7 +20,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.21.x]
go-version: [1.22.x]
os: [ubuntu-latest]
steps:
- uses: actions/checkout@v4

View File

@@ -3,8 +3,7 @@ name: Healing Functional Tests
on:
pull_request:
branches:
- master
- next
- master
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
@@ -21,7 +20,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.21.x]
go-version: [1.22.x]
os: [ubuntu-latest]
steps:
- uses: actions/checkout@v4

View File

@@ -3,12 +3,11 @@ name: Linters and Tests
on:
pull_request:
branches:
- master
- next
- master
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
concurrency:
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref }}
cancel-in-progress: true
@@ -21,8 +20,8 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.21.x]
os: [ubuntu-latest, windows-latest]
go-version: [1.22.x]
os: [ubuntu-latest, Windows]
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
@@ -30,14 +29,15 @@ jobs:
go-version: ${{ matrix.go-version }}
check-latest: true
- name: Build on ${{ matrix.os }}
if: matrix.os == 'windows-latest'
if: matrix.os == 'Windows'
env:
CGO_ENABLED: 0
GO111MODULE: on
run: |
Set-MpPreference -DisableRealtimeMonitoring $true
netsh int ipv4 set dynamicport tcp start=60000 num=61000
go build --ldflags="-s -w" -o %GOPATH%\bin\minio.exe
go test -v --timeout 50m ./...
go test -v --timeout 120m ./...
- name: Build on ${{ matrix.os }}
if: matrix.os == 'ubuntu-latest'
env:

View File

@@ -3,8 +3,7 @@ name: Functional Tests
on:
pull_request:
branches:
- master
- next
- master
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
@@ -21,7 +20,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.21.x]
go-version: [1.22.x]
os: [ubuntu-latest]
steps:
- uses: actions/checkout@v4

View File

@@ -3,8 +3,7 @@ name: Helm Chart linting
on:
pull_request:
branches:
- master
- next
- master
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
@@ -23,7 +22,7 @@ jobs:
uses: actions/checkout@v4
- name: Install Helm
uses: azure/setup-helm@v3
uses: azure/setup-helm@v4
- name: Run helm lint
run: |

View File

@@ -3,8 +3,7 @@ name: IAM integration
on:
pull_request:
branches:
- master
- next
- master
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
@@ -62,7 +61,7 @@ jobs:
# are turned off - i.e. if ldap="", then ldap server is not enabled for
# the tests.
matrix:
go-version: [1.21.x]
go-version: [1.22.x]
ldap: ["", "localhost:389"]
etcd: ["", "http://localhost:2379"]
openid: ["", "http://127.0.0.1:5556/dex"]
@@ -112,6 +111,12 @@ jobs:
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
go run docs/iam/access-manager-plugin.go &
make test-iam
- name: Test MinIO Old Version data to IAM import current version
if: matrix.ldap == 'ldaphost:389'
env:
_MINIO_LDAP_TEST_SERVER: ${{ matrix.ldap }}
run: |
make test-iam-ldap-upgrade-import
- name: Test LDAP for automatic site replication
if: matrix.ldap == 'localhost:389'
run: |

View File

@@ -4,7 +4,6 @@ on:
pull_request:
branches:
- master
- next
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
@@ -30,7 +29,7 @@ jobs:
- name: setup-go-step
uses: actions/setup-go@v5
with:
go-version: 1.21.x
go-version: 1.22.x
- name: github sha short
id: vars
@@ -56,6 +55,11 @@ jobs:
run: |
${GITHUB_WORKSPACE}/.github/workflows/run-mint.sh "erasure" "minio" "minio123" "${{ steps.vars.outputs.sha_short }}"
# FIXME: renable this back when we have a valid way to add deadlines for PUT()s (internode CreateFile)
# - name: resiliency
# run: |
# ${GITHUB_WORKSPACE}/.github/workflows/run-mint.sh "resiliency" "minio" "minio123" "${{ steps.vars.outputs.sha_short }}"
- name: The job must cleanup
if: ${{ always() }}
run: |

View File

@@ -0,0 +1,78 @@
version: '3.7'
# Settings and configurations that are common for all containers
x-minio-common: &minio-common
image: quay.io/minio/minio:${JOB_NAME}
command: server --console-address ":9001" http://minio{1...4}/rdata{1...2}
expose:
- "9000"
- "9001"
environment:
MINIO_CI_CD: "on"
MINIO_ROOT_USER: "minio"
MINIO_ROOT_PASSWORD: "minio123"
MINIO_KMS_SECRET_KEY: "my-minio-key:OSMM+vkKUTCvQs9YL/CVMIMt43HFhkUpqJxTmGl6rYw="
MINIO_DRIVE_MAX_TIMEOUT: "5s"
healthcheck:
test: ["CMD", "mc", "ready", "local"]
interval: 5s
timeout: 5s
retries: 5
# starts 4 docker containers running minio server instances.
# using nginx reverse proxy, load balancing, you can access
# it through port 9000.
services:
minio1:
<<: *minio-common
hostname: minio1
volumes:
- rdata1-1:/rdata1
- rdata1-2:/rdata2
minio2:
<<: *minio-common
hostname: minio2
volumes:
- rdata2-1:/rdata1
- rdata2-2:/rdata2
minio3:
<<: *minio-common
hostname: minio3
volumes:
- rdata3-1:/rdata1
- rdata3-2:/rdata2
minio4:
<<: *minio-common
hostname: minio4
volumes:
- rdata4-1:/rdata1
- rdata4-2:/rdata2
nginx:
image: nginx:1.19.2-alpine
hostname: nginx
volumes:
- ./nginx-4-node.conf:/etc/nginx/nginx.conf:ro
ports:
- "9000:9000"
- "9001:9001"
depends_on:
- minio1
- minio2
- minio3
- minio4
## By default this config uses default local driver,
## For custom volumes replace with volume driver configuration.
volumes:
rdata1-1:
rdata1-2:
rdata2-1:
rdata2-2:
rdata3-1:
rdata3-2:
rdata4-1:
rdata4-2:

View File

@@ -23,10 +23,9 @@ http {
# include /etc/nginx/conf.d/*.conf;
upstream minio {
server minio1:9000;
server minio2:9000;
server minio3:9000;
server minio4:9000;
server minio1:9000 max_fails=1 fail_timeout=10s;
server minio2:9000 max_fails=1 fail_timeout=10s;
server minio3:9000 max_fails=1 fail_timeout=10s;
}
upstream console {

View File

@@ -23,14 +23,14 @@ http {
# include /etc/nginx/conf.d/*.conf;
upstream minio {
server minio1:9000;
server minio2:9000;
server minio3:9000;
server minio4:9000;
server minio5:9000;
server minio6:9000;
server minio7:9000;
server minio8:9000;
server minio1:9000 max_fails=1 fail_timeout=10s;
server minio2:9000 max_fails=1 fail_timeout=10s;
server minio3:9000 max_fails=1 fail_timeout=10s;
server minio4:9000 max_fails=1 fail_timeout=10s;
server minio5:9000 max_fails=1 fail_timeout=10s;
server minio6:9000 max_fails=1 fail_timeout=10s;
server minio7:9000 max_fails=1 fail_timeout=10s;
server minio8:9000 max_fails=1 fail_timeout=10s;
}
upstream console {

View File

@@ -23,10 +23,10 @@ http {
# include /etc/nginx/conf.d/*.conf;
upstream minio {
server minio1:9000;
server minio2:9000;
server minio3:9000;
server minio4:9000;
server minio1:9000 max_fails=1 fail_timeout=10s;
server minio2:9000 max_fails=1 fail_timeout=10s;
server minio3:9000 max_fails=1 fail_timeout=10s;
server minio4:9000 max_fails=1 fail_timeout=10s;
}
upstream console {

View File

@@ -24,11 +24,6 @@ if [ ! -f ./mc ]; then
chmod +x mc
fi
(
cd ./docs/debugging/s3-check-md5
go install -v
)
export RELEASE=RELEASE.2023-08-29T23-07-35Z
docker-compose -f docker-compose-site1.yaml up -d
@@ -48,10 +43,10 @@ sleep 30s
sleep 5
s3-check-md5 -h
./s3-check-md5 -h
failed_count_site1=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
failed_count_site2=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
failed_count_site1=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
failed_count_site2=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
if [ $failed_count_site1 -ne 0 ]; then
echo "failed with multipart on site1 uploads"
@@ -67,8 +62,8 @@ fi
sleep 5
failed_count_site1=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
failed_count_site2=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
failed_count_site1=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
failed_count_site2=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
## we do not need to fail here, since we are going to test
## upgrading to master, healing and being able to recover
@@ -96,8 +91,8 @@ for i in $(seq 1 10); do
./mc admin heal -r --remove --json site2/ 2>&1 >/dev/null
done
failed_count_site1=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
failed_count_site2=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
failed_count_site1=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
failed_count_site2=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
if [ $failed_count_site1 -ne 0 ]; then
echo "failed with multipart on site1 uploads"
@@ -109,6 +104,43 @@ if [ $failed_count_site2 -ne 0 ]; then
exit 1
fi
# Add user group test
./mc admin user add site1 site-replication-issue-user site-replication-issue-password
./mc admin group add site1 site-replication-issue-group site-replication-issue-user
max_wait_attempts=30
wait_interval=5
attempt=1
while true; do
diff <(./mc admin group info site1 site-replication-issue-group) <(./mc admin group info site2 site-replication-issue-group)
if [[ $? -eq 0 ]]; then
echo "Outputs are consistent."
break
fi
remaining_attempts=$((max_wait_attempts - attempt))
if ((attempt >= max_wait_attempts)); then
echo "Outputs remain inconsistent after $max_wait_attempts attempts. Exiting with error."
exit 1
else
echo "Outputs are inconsistent. Waiting for $wait_interval seconds (attempt $attempt/$max_wait_attempts)."
sleep $wait_interval
fi
((attempt++))
done
status=$(./mc admin group info site1 site-replication-issue-group --json | jq .groupStatus | tr -d '"')
if [[ $status == "enabled" ]]; then
echo "Success"
else
echo "Expected status: enabled, actual status: $status"
exit 1
fi
cleanup
## change working directory

View File

@@ -3,8 +3,7 @@ name: MinIO advanced tests
on:
pull_request:
branches:
- master
- next
- master
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
@@ -22,7 +21,7 @@ jobs:
strategy:
matrix:
go-version: [1.21.x]
go-version: [1.22.x]
steps:
- uses: actions/checkout@v4
@@ -36,6 +35,18 @@ jobs:
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
make test-decom
- name: Test ILM
run: |
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
make test-ilm
- name: Test PBAC
run: |
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
make test-pbac
- name: Test Config File
run: |
sudo sysctl net.ipv6.conf.all.disable_ipv6=0

View File

@@ -3,8 +3,7 @@ name: Root lockdown tests
on:
pull_request:
branches:
- master
- next
- master
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
@@ -21,7 +20,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.21.x]
go-version: [1.22.x]
os: [ubuntu-latest]
steps:

View File

@@ -16,7 +16,7 @@ docker volume rm $(docker volume ls -f dangling=true) || true
cd .github/workflows/mint
docker-compose -f minio-${MODE}.yaml up -d
sleep 30s
sleep 1m
docker system prune -f || true
docker volume prune -f || true
@@ -26,6 +26,9 @@ docker volume rm $(docker volume ls -q -f dangling=true) || true
[ "${MODE}" == "pools" ] && docker-compose -f minio-${MODE}.yaml stop minio2
[ "${MODE}" == "pools" ] && docker-compose -f minio-${MODE}.yaml stop minio6
# Pause one node, to check that all S3 calls work while one node goes wrong
[ "${MODE}" == "resiliency" ] && docker-compose -f minio-${MODE}.yaml pause minio4
docker run --rm --net=mint_default \
--name="mint-${MODE}-${JOB_NAME}" \
-e SERVER_ENDPOINT="nginx:9000" \
@@ -35,6 +38,18 @@ docker run --rm --net=mint_default \
-e MINT_MODE="${MINT_MODE}" \
docker.io/minio/mint:edge
# FIXME: enable this after fixing aws-sdk-java-v2 tests
# # unpause the node, to check that all S3 calls work while one node goes wrong
# [ "${MODE}" == "resiliency" ] && docker-compose -f minio-${MODE}.yaml unpause minio4
# [ "${MODE}" == "resiliency" ] && docker run --rm --net=mint_default \
# --name="mint-${MODE}-${JOB_NAME}" \
# -e SERVER_ENDPOINT="nginx:9000" \
# -e ACCESS_KEY="${ACCESS_KEY}" \
# -e SECRET_KEY="${SECRET_KEY}" \
# -e ENABLE_HTTPS=0 \
# -e MINT_MODE="${MINT_MODE}" \
# docker.io/minio/mint:edge
docker-compose -f minio-${MODE}.yaml down || true
sleep 10s

View File

@@ -3,8 +3,7 @@ name: Shell formatting checks
on:
pull_request:
branches:
- master
- next
- master
permissions:
contents: read

View File

@@ -1,5 +1,5 @@
---
name: Test GitHub Action
name: Spelling
on: [pull_request]
jobs:

View File

@@ -3,8 +3,7 @@ name: Upgrade old version tests
on:
pull_request:
branches:
- master
- next
- master
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
@@ -21,7 +20,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.21.x]
go-version: [1.22.x]
os: [ubuntu-latest]
steps:

View File

@@ -21,11 +21,11 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 1.21.9
go-version: 1.22.4
check-latest: true
- name: Get official govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest
shell: bash
- name: Run govulncheck
run: govulncheck ./...
run: govulncheck -show verbose ./...
shell: bash

11
.gitignore vendored
View File

@@ -43,4 +43,13 @@ docs/debugging/inspect/inspect
docs/debugging/pprofgoparser/pprofgoparser
docs/debugging/reorder-disks/reorder-disks
docs/debugging/populate-hard-links/populate-hardlinks
docs/debugging/xattr/xattr
docs/debugging/xattr/xattr
hash-set
healing-bin
inspect
pprofgoparser
reorder-disks
s3-check-md5
s3-verify
xattr
xl-meta

View File

@@ -12,33 +12,27 @@ extend-ignore-re = [
"[0-9A-Za-z/+=]{64}",
"ZXJuZXQxDjAMBgNVBA-some-junk-Q4wDAYDVQQLEwVNaW5pbzEOMAwGA1UEAxMF",
"eyJmb28iOiJiYXIifQ",
"eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.*",
"MIIDBTCCAe2gAwIBAgIQWHw7h.*",
'http\.Header\{"X-Amz-Server-Side-Encryptio":',
'sessionToken',
"ZoEoZdLlzVbOlT9rbhD7ZN7TLyiYXSAlB79uGEge",
]
[default.extend-words]
"encrypter" = "encrypter"
"requestor" = "requestor"
"KMS" = "KMS"
"kms" = "kms"
"Kms" = "Kms"
"Dur" = "Dur"
"EOF" = "EOF"
"hd" = "hd"
"ws" = "ws"
"guid" = "guid"
"lst" = "lst"
"pn" = "pn"
"Iy" = "Iy"
"ro" = "ro"
"thr" = "thr"
"requestor" = "requestor"
[default.extend-identifiers]
"bui" = "bui"
"toi" = "toi"
"ot" = "ot"
"dm2nd" = "dm2nd"
"HashiCorp" = "HashiCorp"
[type.go.extend-identifiers]
"bui" = "bui"
"dm2nd" = "dm2nd"
"ot" = "ot"
"ParseND" = "ParseND"
"ParseNDStream" = "ParseNDStream"
"pn" = "pn"
"TestGetPartialObjectMisAligned" = "TestGetPartialObjectMisAligned"
"thr" = "thr"
"toi" = "toi"

3088
CREDITS

File diff suppressed because it is too large Load Diff

View File

@@ -45,20 +45,29 @@ lint-fix: getdeps ## runs golangci-lint suite of linters with automatic fixes
@$(GOLANGCI) run --build-tags kqueue --timeout=10m --config ./.golangci.yml --fix
check: test
test: verifiers build build-debugging ## builds minio, runs linters, tests
test: verifiers build ## builds minio, runs linters, tests
@echo "Running unit tests"
@MINIO_API_REQUESTS_MAX=10000 CGO_ENABLED=0 go test -v -tags kqueue ./...
@MINIO_API_REQUESTS_MAX=10000 CGO_ENABLED=0 go test -v -tags kqueue,dev ./...
test-root-disable: install-race
@echo "Running minio root lockdown tests"
@env bash $(PWD)/buildscripts/disable-root.sh
test-ilm: install-race
@echo "Running ILM tests"
@env bash $(PWD)/docs/bucket/replication/setup_ilm_expiry_replication.sh
test-pbac: install-race
@echo "Running bucket policies tests"
@env bash $(PWD)/docs/iam/policies/pbac-tests.sh
test-decom: install-race
@echo "Running minio decom tests"
@env bash $(PWD)/docs/distributed/decom.sh
@env bash $(PWD)/docs/distributed/decom-encrypted.sh
@env bash $(PWD)/docs/distributed/decom-encrypted-sse-s3.sh
@env bash $(PWD)/docs/distributed/decom-compressed-sse-s3.sh
@env bash $(PWD)/docs/distributed/decom-encrypted-kes.sh
test-versioning: install-race
@echo "Running minio versioning tests"
@@ -75,11 +84,15 @@ test-race: verifiers build ## builds minio, runs linters, tests (race)
@echo "Running unit tests under -race"
@(env bash $(PWD)/buildscripts/race.sh)
test-iam: build ## verify IAM (external IDP, etcd backends)
test-iam: install-race ## verify IAM (external IDP, etcd backends)
@echo "Running tests for IAM (external IDP, etcd backends)"
@MINIO_API_REQUESTS_MAX=10000 CGO_ENABLED=0 go test -tags kqueue -v -run TestIAM* ./cmd
@MINIO_API_REQUESTS_MAX=10000 CGO_ENABLED=0 go test -tags kqueue,dev -v -run TestIAM* ./cmd
@echo "Running tests for IAM (external IDP, etcd backends) with -race"
@MINIO_API_REQUESTS_MAX=10000 GORACE=history_size=7 CGO_ENABLED=1 go test -race -tags kqueue -v -run TestIAM* ./cmd
@MINIO_API_REQUESTS_MAX=10000 GORACE=history_size=7 CGO_ENABLED=1 go test -race -tags kqueue,dev -v -run TestIAM* ./cmd
test-iam-ldap-upgrade-import: install-race ## verify IAM (external LDAP IDP)
@echo "Running upgrade tests for IAM (LDAP backend)"
@env bash $(PWD)/buildscripts/minio-iam-ldap-upgrade-import-test.sh
test-sio-error:
@(env bash $(PWD)/docs/bucket/replication/sio-error.sh)
@@ -93,7 +106,10 @@ test-replication-3site:
test-delete-replication:
@(env bash $(PWD)/docs/bucket/replication/delete-replication.sh)
test-replication: install-race test-replication-2site test-replication-3site test-delete-replication test-sio-error ## verify multi site replication
test-delete-marker-proxying:
@(env bash $(PWD)/docs/bucket/replication/test_del_marker_proxying.sh)
test-replication: install-race test-replication-2site test-replication-3site test-delete-replication test-sio-error test-delete-marker-proxying ## verify multi site replication
@echo "Running tests for replicating three sites"
test-site-replication-ldap: install-race ## verify automatic site replication
@@ -114,36 +130,32 @@ test-site-replication-minio: install-race ## verify automatic site replication
@echo "Running tests for automatic site replication of SSE-C objects with compression enabled for site"
@(env bash $(PWD)/docs/site-replication/run-ssec-object-replication-with-compression.sh)
verify: ## verify minio various setups
verify: install-race ## verify minio various setups
@echo "Verifying build with race"
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
@(env bash $(PWD)/buildscripts/verify-build.sh)
verify-healing: ## verify healing and replacing disks with minio binary
verify-healing: install-race ## verify healing and replacing disks with minio binary
@echo "Verify healing build with race"
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
@(env bash $(PWD)/buildscripts/verify-healing.sh)
@(env bash $(PWD)/buildscripts/verify-healing-empty-erasure-set.sh)
@(env bash $(PWD)/buildscripts/heal-inconsistent-versions.sh)
verify-healing-with-root-disks: ## verify healing root disks
verify-healing-with-root-disks: install-race ## verify healing root disks
@echo "Verify healing with root drives"
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
@(env bash $(PWD)/buildscripts/verify-healing-with-root-disks.sh)
verify-healing-with-rewrite: ## verify healing to rewrite old xl.meta -> new xl.meta
verify-healing-with-rewrite: install-race ## verify healing to rewrite old xl.meta -> new xl.meta
@echo "Verify healing with rewrite"
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
@(env bash $(PWD)/buildscripts/rewrite-old-new.sh)
verify-healing-inconsistent-versions: ## verify resolving inconsistent versions
verify-healing-inconsistent-versions: install-race ## verify resolving inconsistent versions
@echo "Verify resolving inconsistent versions build with race"
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
@(env bash $(PWD)/buildscripts/resolve-right-versions.sh)
build-debugging:
@(env bash $(PWD)/docs/debugging/build.sh)
build: checks ## builds minio to $(PWD)
build: checks build-debugging ## builds minio to $(PWD)
@echo "Building minio binary to './minio'"
@CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
@@ -182,15 +194,15 @@ docker: build ## builds minio docker container
@echo "Building minio docker image '$(TAG)'"
@docker build -q --no-cache -t $(TAG) . -f Dockerfile
install-race: checks ## builds minio to $(PWD)
install-race: checks build-debugging ## builds minio to $(PWD)
@echo "Building minio binary with -race to './minio'"
@GORACE=history_size=7 CGO_ENABLED=1 go build -tags kqueue -race -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
@GORACE=history_size=7 CGO_ENABLED=1 go build -tags kqueue,dev -race -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
@echo "Installing minio binary with -race to '$(GOPATH)/bin/minio'"
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/minio $(GOPATH)/bin/minio
@mkdir -p $(GOPATH)/bin && cp -af $(PWD)/minio $(GOPATH)/bin/minio
install: build ## builds minio and installs it to $GOPATH/bin.
@echo "Installing minio binary to '$(GOPATH)/bin/minio'"
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/minio $(GOPATH)/bin/minio
@mkdir -p $(GOPATH)/bin && cp -af $(PWD)/minio $(GOPATH)/bin/minio
@echo "Installation successful. To learn more, try \"minio --help\"."
clean: ## cleanup all generated assets

View File

@@ -210,10 +210,6 @@ For deployments behind a load balancer, proxy, or ingress rule where the MinIO h
For example, consider a MinIO deployment behind a proxy `https://minio.example.net`, `https://console.minio.example.net` with rules for forwarding traffic on port :9000 and :9001 to MinIO and the MinIO Console respectively on the internal network. Set `MINIO_BROWSER_REDIRECT_URL` to `https://console.minio.example.net` to ensure the browser receives a valid reachable URL.
Similarly, if your TLS certificates do not have the IP SAN for the MinIO server host, the MinIO Console may fail to validate the connection to the server. Use the `MINIO_SERVER_URL` environment variable and specify the proxy-accessible hostname of the MinIO server to allow the Console to use the MinIO server API using the TLS certificate.
For example: `export MINIO_SERVER_URL="https://minio.example.net"`
| Dashboard | Creating a bucket |
| ------------- | ------------- |
| ![Dashboard](https://github.com/minio/minio/blob/master/docs/screenshots/pic1.png?raw=true) | ![Dashboard](https://github.com/minio/minio/blob/master/docs/screenshots/pic2.png?raw=true) |

0
buildscripts/checkdeps.sh Normal file → Executable file
View File

View File

@@ -32,6 +32,7 @@ fi
set +e
export MC_HOST_minioadm=http://minioadmin:minioadmin@localhost:9100/
./mc ready minioadm
./mc ls minioadm/
@@ -56,7 +57,7 @@ done
set +e
sleep 10
./mc ready minioadm/
./mc ls minioadm/
if [ $? -ne 0 ]; then
@@ -81,11 +82,12 @@ minio server --address 127.0.0.1:9003 "http://127.0.0.1:9003/tmp/multisiteb/data
minio server --address 127.0.0.1:9004 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \
"http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_2.log 2>&1 &
sleep 20s
export MC_HOST_sitea=http://minioadmin:minioadmin@127.0.0.1:9001
export MC_HOST_siteb=http://minioadmin:minioadmin@127.0.0.1:9004
./mc ready sitea
./mc ready siteb
./mc admin replicate add sitea siteb
./mc admin user add sitea foobar foo12345
@@ -109,11 +111,12 @@ minio server --address 127.0.0.1:9003 "http://127.0.0.1:9003/tmp/multisiteb/data
minio server --address 127.0.0.1:9004 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \
"http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_2.log 2>&1 &
sleep 20s
export MC_HOST_sitea=http://foobar:foo12345@127.0.0.1:9001
export MC_HOST_siteb=http://foobar:foo12345@127.0.0.1:9004
./mc ready sitea
./mc ready siteb
./mc admin user add sitea foobar-admin foo12345
sleep 2s

View File

@@ -0,0 +1,127 @@
#!/bin/bash
# This script is used to test the migration of IAM content from old minio
# instance to new minio instance.
#
# To run it locally, start the LDAP server in github.com/minio/minio-iam-testing
# repo (e.g. make podman-run), and then run this script.
#
# This script assumes that LDAP server is at:
#
# `localhost:1389`
#
# if this is not the case, set the environment variable
# `_MINIO_LDAP_TEST_SERVER`.
OLD_VERSION=RELEASE.2024-03-26T22-10-45Z
OLD_BINARY_LINK=https://dl.min.io/server/minio/release/linux-amd64/archive/minio.${OLD_VERSION}
__init__() {
if which curl &>/dev/null; then
echo "curl is already installed"
else
echo "Installing curl:"
sudo apt install curl -y
fi
export GOPATH=/tmp/gopath
export PATH="${PATH}":"${GOPATH}"/bin
if which mc &>/dev/null; then
echo "mc is already installed"
else
echo "Installing mc:"
go install github.com/minio/mc@latest
fi
if [ ! -x ./minio.${OLD_VERSION} ]; then
echo "Downloading minio.${OLD_VERSION} binary"
curl -o minio.${OLD_VERSION} ${OLD_BINARY_LINK}
chmod +x minio.${OLD_VERSION}
fi
if [ -z "$_MINIO_LDAP_TEST_SERVER" ]; then
export _MINIO_LDAP_TEST_SERVER=localhost:1389
echo "Using default LDAP endpoint: $_MINIO_LDAP_TEST_SERVER"
fi
rm -rf /tmp/data
}
create_iam_content_in_old_minio() {
echo "Creating IAM content in old minio instance."
MINIO_CI_CD=1 ./minio.${OLD_VERSION} server /tmp/data/{1...4} &
sleep 5
set -x
mc alias set old-minio http://localhost:9000 minioadmin minioadmin
mc ready old-minio
mc idp ldap add old-minio \
server_addr=localhost:1389 \
server_insecure=on \
lookup_bind_dn=cn=admin,dc=min,dc=io \
lookup_bind_password=admin \
user_dn_search_base_dn=dc=min,dc=io \
user_dn_search_filter="(uid=%s)" \
group_search_base_dn=ou=swengg,dc=min,dc=io \
group_search_filter="(&(objectclass=groupOfNames)(member=%d))"
mc admin service restart old-minio
mc idp ldap policy attach old-minio readwrite --user=UID=dillon,ou=people,ou=swengg,dc=min,dc=io
mc idp ldap policy attach old-minio readwrite --group=CN=project.c,ou=groups,ou=swengg,dc=min,dc=io
mc idp ldap policy entities old-minio
mc admin cluster iam export old-minio
set +x
mc admin service stop old-minio
}
import_iam_content_in_new_minio() {
echo "Importing IAM content in new minio instance."
# Assume current minio binary exists.
MINIO_CI_CD=1 ./minio server /tmp/data/{1...4} &
sleep 5
set -x
mc alias set new-minio http://localhost:9000 minioadmin minioadmin
echo "BEFORE IMPORT mappings:"
mc ready new-minio
mc idp ldap policy entities new-minio
mc admin cluster iam import new-minio ./old-minio-iam-info.zip
echo "AFTER IMPORT mappings:"
mc idp ldap policy entities new-minio
set +x
# mc admin service stop new-minio
}
verify_iam_content_in_new_minio() {
output=$(mc idp ldap policy entities new-minio --json)
groups=$(echo "$output" | jq -r '.result.policyMappings[] | select(.policy == "readwrite") | .groups[]')
if [ "$groups" != "cn=project.c,ou=groups,ou=swengg,dc=min,dc=io" ]; then
echo "Failed to verify groups: $groups"
exit 1
fi
users=$(echo "$output" | jq -r '.result.policyMappings[] | select(.policy == "readwrite") | .users[]')
if [ "$users" != "uid=dillon,ou=people,ou=swengg,dc=min,dc=io" ]; then
echo "Failed to verify users: $users"
exit 1
fi
mc admin service stop new-minio
}
main() {
create_iam_content_in_old_minio
import_iam_content_in_new_minio
verify_iam_content_in_new_minio
}
(__init__ "$@" && main "$@")

22
buildscripts/minio-upgrade.sh Normal file → Executable file
View File

@@ -4,10 +4,22 @@ trap 'cleanup $LINENO' ERR
# shellcheck disable=SC2120
cleanup() {
MINIO_VERSION=dev docker-compose \
MINIO_VERSION=dev /tmp/gopath/bin/docker-compose \
-f "buildscripts/upgrade-tests/compose.yml" \
rm -s -f
down || true
MINIO_VERSION=dev /tmp/gopath/bin/docker-compose \
-f "buildscripts/upgrade-tests/compose.yml" \
rm || true
for volume in $(docker volume ls -q | grep upgrade); do
docker volume rm ${volume} || true
done
docker volume prune -f
docker system prune -f || true
docker volume prune -f || true
docker volume rm $(docker volume ls -q -f dangling=true) || true
}
verify_checksum_after_heal() {
@@ -60,6 +72,8 @@ __init__() {
go install github.com/docker/compose/v2/cmd@latest
mv -v /tmp/gopath/bin/cmd /tmp/gopath/bin/docker-compose
cleanup
TAG=minio/minio:dev make docker
MINIO_VERSION=RELEASE.2019-12-19T22-52-26Z docker-compose \
@@ -77,11 +91,11 @@ __init__() {
curl -s http://127.0.0.1:9000/minio-test/to-read/hosts | sha256sum
MINIO_VERSION=dev docker-compose -f "buildscripts/upgrade-tests/compose.yml" stop
MINIO_VERSION=dev /tmp/gopath/bin/docker-compose -f "buildscripts/upgrade-tests/compose.yml" stop
}
main() {
MINIO_VERSION=dev docker-compose -f "buildscripts/upgrade-tests/compose.yml" up -d --build
MINIO_VERSION=dev /tmp/gopath/bin/docker-compose -f "buildscripts/upgrade-tests/compose.yml" up -d --build
add_alias

View File

@@ -45,7 +45,8 @@ function verify_rewrite() {
"${MINIO_OLD[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" >"${WORK_DIR}/server1.log" 2>&1 &
pid=$!
disown $pid
sleep 10
"${WORK_DIR}/mc" ready minio/
if ! ps -p ${pid} 1>&2 >/dev/null; then
echo "server1 log:"
@@ -77,7 +78,8 @@ function verify_rewrite() {
"${MINIO[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" >"${WORK_DIR}/server1.log" 2>&1 &
pid=$!
disown $pid
sleep 10
"${WORK_DIR}/mc" ready minio/
if ! ps -p ${pid} 1>&2 >/dev/null; then
echo "server1 log:"
@@ -87,17 +89,12 @@ function verify_rewrite() {
exit 1
fi
(
cd ./docs/debugging/s3-check-md5
go install -v
)
if ! s3-check-md5 \
if ! ./s3-check-md5 \
-debug \
-versions \
-access-key minio \
-secret-key minio123 \
-endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep INTACT; then
-endpoint "http://127.0.0.1:${start_port}/" 2>&1 | grep INTACT; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
@@ -117,7 +114,7 @@ function verify_rewrite() {
go run ./buildscripts/heal-manual.go "127.0.0.1:${start_port}" "minio" "minio123"
sleep 1
if ! s3-check-md5 \
if ! ./s3-check-md5 \
-debug \
-versions \
-access-key minio \

View File

@@ -1,5 +1,3 @@
version: '3.7'
# Settings and configurations that are common for all containers
x-minio-common: &minio-common
image: minio/minio:${MINIO_VERSION}

View File

@@ -15,13 +15,14 @@ WORK_DIR="$PWD/.verify-$RANDOM"
export MINT_MODE=core
export MINT_DATA_DIR="$WORK_DIR/data"
export SERVER_ENDPOINT="127.0.0.1:9000"
export MC_HOST_verify="http://minio:minio123@${SERVER_ENDPOINT}/"
export MC_HOST_verify_ipv6="http://minio:minio123@[::1]:9000/"
export ACCESS_KEY="minio"
export SECRET_KEY="minio123"
export ENABLE_HTTPS=0
export GO111MODULE=on
export GOGC=25
export ENABLE_ADMIN=1
export MINIO_CI_CD=1
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
@@ -36,18 +37,21 @@ function start_minio_fs() {
export MINIO_ROOT_USER=$ACCESS_KEY
export MINIO_ROOT_PASSWORD=$SECRET_KEY
"${MINIO[@]}" server "${WORK_DIR}/fs-disk" >"$WORK_DIR/fs-minio.log" 2>&1 &
sleep 10
"${WORK_DIR}/mc" ready verify
}
function start_minio_erasure() {
"${MINIO[@]}" server "${WORK_DIR}/erasure-disk1" "${WORK_DIR}/erasure-disk2" "${WORK_DIR}/erasure-disk3" "${WORK_DIR}/erasure-disk4" >"$WORK_DIR/erasure-minio.log" 2>&1 &
sleep 15
"${WORK_DIR}/mc" ready verify
}
function start_minio_erasure_sets() {
export MINIO_ENDPOINTS="${WORK_DIR}/erasure-disk-sets{1...32}"
"${MINIO[@]}" server >"$WORK_DIR/erasure-minio-sets.log" 2>&1 &
sleep 15
"${WORK_DIR}/mc" ready verify
}
function start_minio_pool_erasure_sets() {
@@ -57,7 +61,7 @@ function start_minio_pool_erasure_sets() {
"${MINIO[@]}" server --address ":9000" >"$WORK_DIR/pool-minio-9000.log" 2>&1 &
"${MINIO[@]}" server --address ":9001" >"$WORK_DIR/pool-minio-9001.log" 2>&1 &
sleep 40
"${WORK_DIR}/mc" ready verify
}
function start_minio_pool_erasure_sets_ipv6() {
@@ -67,7 +71,7 @@ function start_minio_pool_erasure_sets_ipv6() {
"${MINIO[@]}" server --address="[::1]:9000" >"$WORK_DIR/pool-minio-ipv6-9000.log" 2>&1 &
"${MINIO[@]}" server --address="[::1]:9001" >"$WORK_DIR/pool-minio-ipv6-9001.log" 2>&1 &
sleep 40
"${WORK_DIR}/mc" ready verify_ipv6
}
function start_minio_dist_erasure() {
@@ -78,7 +82,7 @@ function start_minio_dist_erasure() {
"${MINIO[@]}" server --address ":900${i}" >"$WORK_DIR/dist-minio-900${i}.log" 2>&1 &
done
sleep 40
"${WORK_DIR}/mc" ready verify
}
function run_test_fs() {
@@ -222,7 +226,7 @@ function __init__() {
exit 1
fi
(cd "${MC_BUILD_DIR}" && go build -o "$WORK_DIR/mc")
(cd "${MC_BUILD_DIR}" && go build -o "${WORK_DIR}/mc")
# remove mc source.
purge "${MC_BUILD_DIR}"

View File

@@ -0,0 +1,166 @@
#!/bin/bash -e
#
set -E
set -o pipefail
if [ ! -x "$PWD/minio" ]; then
echo "minio executable binary not found in current directory"
exit 1
fi
WORK_DIR="$PWD/.verify-$RANDOM"
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
function start_minio_3_node() {
export MINIO_ROOT_USER=minio
export MINIO_ROOT_PASSWORD=minio123
export MINIO_ERASURE_SET_DRIVE_COUNT=6
export MINIO_CI_CD=1
start_port=$1
args=""
for i in $(seq 1 3); do
args="$args http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/1/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/2/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/3/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/4/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/5/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/6/"
done
"${MINIO[@]}" --address ":$((start_port + 1))" $args >"${WORK_DIR}/dist-minio-server1.log" 2>&1 &
pid1=$!
disown ${pid1}
"${MINIO[@]}" --address ":$((start_port + 2))" $args >"${WORK_DIR}/dist-minio-server2.log" 2>&1 &
pid2=$!
disown $pid2
"${MINIO[@]}" --address ":$((start_port + 3))" $args >"${WORK_DIR}/dist-minio-server3.log" 2>&1 &
pid3=$!
disown $pid3
export MC_HOST_myminio="http://minio:minio123@127.0.0.1:$((start_port + 1))"
/tmp/mc ready myminio
# Wait for all drives to be online and formatted
while [ $(/tmp/mc admin info --json myminio | jq '.info.servers[].drives[].state | select(. != "ok")' | wc -l) -gt 0 ]; do sleep 1; done
# Wait for all drives to be healed
while [ $(/tmp/mc admin info --json myminio | jq '.info.servers[].drives[].healing | select(. != null) | select(. == true)' | wc -l) -gt 0 ]; do sleep 1; done
# Wait for Status: in MinIO output
while true; do
rv=$(check_online)
if [ "$rv" != "1" ]; then
# success
break
fi
# Check if we should retry
retry=$((retry + 1))
if [ $retry -le 20 ]; then
sleep 5
continue
fi
# Failure
for i in $(seq 1 3); do
echo "server$i log:"
cat "${WORK_DIR}/dist-minio-server$i.log"
done
pkill -9 minio
echo "FAILED"
purge "$WORK_DIR"
exit 1
done
if ! ps -p $pid1 1>&2 >/dev/null; then
echo "server1 log:"
cat "${WORK_DIR}/dist-minio-server1.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
if ! ps -p $pid2 1>&2 >/dev/null; then
echo "server2 log:"
cat "${WORK_DIR}/dist-minio-server2.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
if ! ps -p $pid3 1>&2 >/dev/null; then
echo "server3 log:"
cat "${WORK_DIR}/dist-minio-server3.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
if ! pkill minio; then
for i in $(seq 1 3); do
echo "server$i log:"
cat "${WORK_DIR}/dist-minio-server$i.log"
done
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
sleep 1
if pgrep minio; then
# forcibly killing, to proceed further properly.
if ! pkill -9 minio; then
echo "no minio process running anymore, proceed."
fi
fi
}
function check_online() {
if ! grep -q 'Status:' ${WORK_DIR}/dist-minio-*.log; then
echo "1"
fi
}
function purge() {
echo rm -rf "$1"
}
function __init__() {
echo "Initializing environment"
mkdir -p "$WORK_DIR"
mkdir -p "$MINIO_CONFIG_DIR"
## version is purposefully set to '3' for minio to migrate configuration file
echo '{"version": "3", "credential": {"accessKey": "minio", "secretKey": "minio123"}, "region": "us-east-1"}' >"$MINIO_CONFIG_DIR/config.json"
if [ ! -f /tmp/mc ]; then
wget --quiet -O /tmp/mc https://dl.minio.io/client/mc/release/linux-amd64/mc &&
chmod +x /tmp/mc
fi
}
function perform_test() {
start_minio_3_node $2
echo "Testing Distributed Erasure setup healing of drives"
echo "Remove the contents of the disks belonging to '${1}' erasure set"
rm -rf ${WORK_DIR}/${1}/*/
set -x
start_minio_3_node $2
}
function main() {
# use same ports for all tests
start_port=$(shuf -i 10000-65000 -n 1)
perform_test "2" ${start_port}
perform_test "1" ${start_port}
perform_test "3" ${start_port}
}
(__init__ "$@" && main "$@")
rv=$?
purge "$WORK_DIR"
exit "$rv"

View File

@@ -12,17 +12,26 @@ fi
WORK_DIR="$PWD/.verify-$RANDOM"
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
GOPATH=/tmp/gopath
function start_minio_3_node() {
for i in $(seq 1 3); do
rm "${WORK_DIR}/dist-minio-server$i.log"
done
export MINIO_ROOT_USER=minio
export MINIO_ROOT_PASSWORD=minio123
export MINIO_ERASURE_SET_DRIVE_COUNT=6
export MINIO_CI_CD=1
start_port=$2
first_time=$(find ${WORK_DIR}/ | grep format.json | wc -l)
start_port=$1
args=""
for i in $(seq 1 3); do
args="$args http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/1/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/2/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/3/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/4/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/5/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/6/"
for d in $(seq 1 3 5); do
args="$args http://127.0.0.1:$((start_port + 1))${WORK_DIR}/1/${d}/ http://127.0.0.1:$((start_port + 2))${WORK_DIR}/2/${d}/ http://127.0.0.1:$((start_port + 3))${WORK_DIR}/3/${d}/ "
d=$((d + 1))
args="$args http://127.0.0.1:$((start_port + 1))${WORK_DIR}/1/${d}/ http://127.0.0.1:$((start_port + 2))${WORK_DIR}/2/${d}/ http://127.0.0.1:$((start_port + 3))${WORK_DIR}/3/${d}/ "
done
"${MINIO[@]}" --address ":$((start_port + 1))" $args >"${WORK_DIR}/dist-minio-server1.log" 2>&1 &
@@ -37,7 +46,11 @@ function start_minio_3_node() {
pid3=$!
disown $pid3
sleep "$1"
export MC_HOST_myminio="http://minio:minio123@127.0.0.1:$((start_port + 1))"
/tmp/mc ready myminio
[ ${first_time} -eq 0 ] && upload_objects
[ ${first_time} -ne 0 ] && sleep 120
if ! ps -p $pid1 1>&2 >/dev/null; then
echo "server1 log:"
@@ -82,10 +95,23 @@ function start_minio_3_node() {
fi
}
function check_online() {
function check_heal() {
if ! grep -q 'Status:' ${WORK_DIR}/dist-minio-*.log; then
echo "1"
return 1
fi
for ((i = 0; i < 20; i++)); do
test -f ${WORK_DIR}/$1/1/.minio.sys/format.json
v1=$?
nextInES=$(($1 + 1)) && [ $nextInES -gt 3 ] && nextInES=1
foundFiles1=$(find ${WORK_DIR}/$1/1/ | grep -v .minio.sys | grep xl.meta | wc -l)
foundFiles2=$(find ${WORK_DIR}/$nextInES/1/ | grep -v .minio.sys | grep xl.meta | wc -l)
test $foundFiles1 -eq $foundFiles2
v2=$?
[ $v1 == 0 -a $v2 == 0 ] && return 0
sleep 10
done
return 1
}
function purge() {
@@ -99,20 +125,35 @@ function __init__() {
## version is purposefully set to '3' for minio to migrate configuration file
echo '{"version": "3", "credential": {"accessKey": "minio", "secretKey": "minio123"}, "region": "us-east-1"}' >"$MINIO_CONFIG_DIR/config.json"
if [ ! -f /tmp/mc ]; then
wget --quiet -O /tmp/mc https://dl.minio.io/client/mc/release/linux-amd64/mc &&
chmod +x /tmp/mc
fi
}
function upload_objects() {
/tmp/mc mb myminio/testbucket/
for ((i = 0; i < 20; i++)); do
echo "my content" | /tmp/mc pipe myminio/testbucket/file-$i
done
}
function perform_test() {
start_minio_3_node 120 $2
start_port=$2
start_minio_3_node $start_port
echo "Testing Distributed Erasure setup healing of drives"
echo "Remove the contents of the disks belonging to '${1}' erasure set"
echo "Remove the contents of the disks belonging to '${1}' node"
rm -rf ${WORK_DIR}/${1}/*/
set -x
start_minio_3_node 120 $2
start_minio_3_node $start_port
rv=$(check_online)
check_heal ${1}
rv=$?
if [ "$rv" == "1" ]; then
for i in $(seq 1 3); do
echo "server$i log:"

View File

@@ -25,7 +25,7 @@ import (
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
)
// Data types used for returning dummy access control

View File

@@ -40,7 +40,7 @@ import (
"github.com/minio/minio/internal/event"
"github.com/minio/minio/internal/kms"
"github.com/minio/mux"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
)
const (
@@ -735,7 +735,7 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
rpt.SetStatus(bucket, fileName, fmt.Errorf("An Object Lock configuration is present on this bucket, so the versioning state cannot be suspended."))
continue
}
if _, err := getReplicationConfig(ctx, bucket); err == nil && v.Suspended() {
if rcfg, _ := getReplicationConfig(ctx, bucket); rcfg != nil && v.Suspended() {
rpt.SetStatus(bucket, fileName, fmt.Errorf("A replication configuration is present on this bucket, so the versioning state cannot be suspended."))
continue
}
@@ -783,7 +783,7 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
}
switch fileName {
case bucketNotificationConfig:
config, err := event.ParseConfig(io.LimitReader(reader, sz), globalSite.Region, globalEventNotifier.targetList)
config, err := event.ParseConfig(io.LimitReader(reader, sz), globalSite.Region(), globalEventNotifier.targetList)
if err != nil {
rpt.SetStatus(bucket, fileName, fmt.Errorf("%s (%s)", errorCodes[ErrMalformedXML].Description, err))
continue
@@ -837,9 +837,13 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
rpt.SetStatus(bucket, fileName, err)
continue
}
rcfg, err := globalBucketObjectLockSys.Get(bucket)
if err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
// Validate the received bucket policy document
if err = bucketLifecycle.Validate(); err != nil {
if err = bucketLifecycle.Validate(rcfg); err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
@@ -874,8 +878,10 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
}
kmsKey := encConfig.KeyID()
if kmsKey != "" {
kmsContext := kms.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation
_, err := GlobalKMS.GenerateKey(ctx, kmsKey, kmsContext)
_, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{
Name: kmsKey,
AssociatedData: kms.Context{"MinIO admin API": "ServerInfoHandler"}, // Context for a test key operation
})
if err != nil {
if errors.Is(err, kes.ErrKeyNotFound) {
rpt.SetStatus(bucket, fileName, errKMSKeyNotFound)

View File

@@ -27,7 +27,7 @@ import (
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/auth"
"github.com/minio/minio/internal/config"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
)
// validateAdminReq will validate request against and return whether it is allowed.

View File

@@ -37,7 +37,7 @@ import (
"github.com/minio/minio/internal/config/subnet"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
)
// DelConfigKVHandler - DELETE /minio/admin/v3/del-config-kv

View File

@@ -32,8 +32,8 @@ import (
cfgldap "github.com/minio/minio/internal/config/identity/ldap"
"github.com/minio/minio/internal/config/identity/openid"
"github.com/minio/mux"
"github.com/minio/pkg/v2/ldap"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/ldap"
"github.com/minio/pkg/v3/policy"
)
func addOrUpdateIDPHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, isUpdate bool) {

View File

@@ -20,6 +20,7 @@ package cmd
import (
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"strings"
@@ -27,7 +28,8 @@ import (
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/auth"
"github.com/minio/mux"
"github.com/minio/pkg/v2/policy"
xldap "github.com/minio/pkg/v3/ldap"
"github.com/minio/pkg/v3/policy"
)
// ListLDAPPolicyMappingEntities lists users/groups mapped to given/all policies.
@@ -222,9 +224,8 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R
err error
)
// If we are creating svc account for request sender, ensure
// that targetUser is a real user (i.e. not derived
// credentials).
// If we are creating svc account for request sender, ensure that targetUser
// is a real user (i.e. not derived credentials).
if isSvcAccForRequestor {
if requestorIsDerivedCredential {
if requestorParentUser == "" {
@@ -237,12 +238,12 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R
targetGroups = requestorGroups
// Deny if the target user is not LDAP
foundLDAPDN, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(targetUser)
foundResult, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(targetUser)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if foundLDAPDN == "" {
if foundResult == nil {
err := errors.New("Specified user does not exist on LDAP server")
APIErr := errorCodes.ToAPIErrWithErr(ErrAdminNoSuchUser, err)
writeErrorResponseJSON(ctx, w, APIErr, r.URL)
@@ -265,7 +266,8 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R
isDN := globalIAMSys.LDAPConfig.ParsesAsDN(targetUser)
opts.claims[ldapUserN] = targetUser // simple username
targetUser, targetGroups, err = globalIAMSys.LDAPConfig.LookupUserDN(targetUser)
var lookupResult *xldap.DNSearchResult
lookupResult, targetGroups, err = globalIAMSys.LDAPConfig.LookupUserDN(targetUser)
if err != nil {
// if not found, check if DN
if strings.Contains(err.Error(), "User DN not found for:") {
@@ -279,7 +281,26 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
targetUser = lookupResult.NormDN
opts.claims[ldapUser] = targetUser // DN
opts.claims[ldapActualUser] = lookupResult.ActualDN
// Check if this user or their groups have a policy applied.
ldapPolicies, err := globalIAMSys.PolicyDBGet(targetUser, targetGroups...)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if len(ldapPolicies) == 0 {
err = fmt.Errorf("No policy set for user `%s` or any of their groups: `%s`", opts.claims[ldapActualUser], strings.Join(targetGroups, "`,`"))
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminNoSuchUser, err), r.URL)
return
}
// Add LDAP attributes that were looked up into the claims.
for attribKey, attribValue := range lookupResult.Attributes {
opts.claims[ldapAttribPrefix+attribKey] = attribValue
}
}
newCred, updatedAt, err := globalIAMSys.NewServiceAccount(ctx, targetUser, targetGroups, opts)
@@ -386,15 +407,16 @@ func (a adminAPIHandlers) ListAccessKeysLDAP(w http.ResponseWriter, r *http.Requ
}
}
targetAccount, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(userDN)
dnResult, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(userDN)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if targetAccount == "" {
if dnResult == nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errNoSuchUser), r.URL)
return
}
targetAccount := dnResult.NormDN
listType := r.Form.Get("listType")
if listType != "sts-only" && listType != "svcacc-only" && listType != "" {

View File

@@ -27,8 +27,8 @@ import (
"strings"
"github.com/minio/mux"
"github.com/minio/pkg/v2/env"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/env"
"github.com/minio/pkg/v3/policy"
)
var (

View File

@@ -33,7 +33,7 @@ import (
"github.com/minio/madmin-go/v3"
xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/mux"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
)
// SiteReplicationAdd - PUT /minio/admin/v3/site-replication/add
@@ -347,6 +347,18 @@ func (a adminAPIHandlers) SiteReplicationStatus(w http.ResponseWriter, r *http.R
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Report the ILMExpiryStats only if at least one site has replication of ILM expiry enabled
var replicateILMExpiry bool
for _, site := range info.Sites {
if site.ReplicateILMExpiry {
replicateILMExpiry = true
break
}
}
if !replicateILMExpiry {
// explicitly send nil for ILMExpiryStats
info.ILMExpiryStats = nil
}
if err = json.NewEncoder(w).Encode(info); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)

View File

@@ -32,7 +32,7 @@ import (
"github.com/minio/madmin-go/v3"
minio "github.com/minio/minio-go/v7"
"github.com/minio/pkg/v2/sync/errgroup"
"github.com/minio/pkg/v3/sync/errgroup"
)
func runAllIAMConcurrencyTests(suite *TestSuiteIAM, c *check) {

View File

@@ -29,6 +29,7 @@ import (
"sort"
"strconv"
"time"
"unicode/utf8"
"github.com/klauspost/compress/zip"
"github.com/minio/madmin-go/v3"
@@ -36,7 +37,8 @@ import (
"github.com/minio/minio/internal/cachevalue"
"github.com/minio/minio/internal/config/dns"
"github.com/minio/mux"
"github.com/minio/pkg/v2/policy"
xldap "github.com/minio/pkg/v3/ldap"
"github.com/minio/pkg/v3/policy"
"github.com/puzpuzpuz/xsync/v3"
)
@@ -271,7 +273,14 @@ func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Requ
return
}
}
updatedAt, err = globalIAMSys.AddUsersToGroup(ctx, updReq.Group, updReq.Members)
if globalIAMSys.LDAPConfig.Enabled() {
// We don't allow internal group manipulation in this API when LDAP
// is enabled for now.
err = errIAMActionNotAllowed
} else {
updatedAt, err = globalIAMSys.AddUsersToGroup(ctx, updReq.Group, updReq.Members)
}
}
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
@@ -466,6 +475,11 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
return
}
if !utf8.ValidString(accessKey) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAddUserValidUTF), r.URL)
return
}
checkDenyOnly := false
if accessKey == cred.AccessKey {
// Check that there is no explicit deny - otherwise it's allowed
@@ -507,6 +521,12 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
return
}
// We don't allow internal user creation with LDAP enabled for now.
if globalIAMSys.LDAPConfig.Enabled() {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errIAMActionNotAllowed), r.URL)
return
}
updatedAt, err := globalIAMSys.CreateUser(ctx, accessKey, ureq)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
@@ -687,12 +707,20 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
// In case of LDAP we need to resolve the targetUser to a DN and
// query their groups:
opts.claims[ldapUserN] = targetUser // simple username
targetUser, targetGroups, err = globalIAMSys.LDAPConfig.LookupUserDN(targetUser)
var lookupResult *xldap.DNSearchResult
lookupResult, targetGroups, err = globalIAMSys.LDAPConfig.LookupUserDN(targetUser)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
targetUser = lookupResult.NormDN
opts.claims[ldapUser] = targetUser // username DN
opts.claims[ldapActualUser] = lookupResult.ActualDN
// Add LDAP attributes that were looked up into the claims.
for attribKey, attribValue := range lookupResult.Attributes {
opts.claims[ldapAttribPrefix+attribKey] = attribValue
}
// NOTE: if not using LDAP, then internal IDP or open ID is
// being used - in the former, group info is enforced when
@@ -802,7 +830,11 @@ func (a adminAPIHandlers) UpdateServiceAccount(w http.ResponseWriter, r *http.Re
}
condValues := getConditionValues(r, "", cred)
addExpirationToCondValues(updateReq.NewExpiration, condValues)
err = addExpirationToCondValues(updateReq.NewExpiration, condValues)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Permission checks:
//
@@ -1026,8 +1058,13 @@ func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Req
for _, svc := range serviceAccounts {
expiryTime := svc.Expiration
serviceAccountList = append(serviceAccountList, madmin.ServiceAccountInfo{
AccessKey: svc.AccessKey,
Expiration: &expiryTime,
Description: svc.Description,
ParentUser: svc.ParentUser,
Name: svc.Name,
AccountStatus: svc.Status,
AccessKey: svc.AccessKey,
ImpliedPolicy: svc.IsImpliedPolicy(),
Expiration: &expiryTime,
})
}
@@ -1199,9 +1236,9 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
}
bucketStorageCache.InitOnce(10*time.Second,
cachevalue.Opts{ReturnLastGood: true, NoWait: true},
func() (DataUsageInfo, error) {
ctx, done := context.WithTimeout(context.Background(), 2*time.Second)
cachevalue.Opts{ReturnLastGood: true},
func(ctx context.Context) (DataUsageInfo, error) {
ctx, done := context.WithTimeout(ctx, 2*time.Second)
defer done()
return loadDataUsageFromBackend(ctx, objectAPI)
@@ -1556,7 +1593,12 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
}))
}
// SetPolicyForUserOrGroup - PUT /minio/admin/v3/set-policy?policy=xxx&user-or-group=?[&is-group]
// SetPolicyForUserOrGroup - sets a policy on a user or a group.
//
// PUT /minio/admin/v3/set-policy?policy=xxx&user-or-group=?[&is-group]
//
// Deprecated: This API is replaced by attach/detach policy APIs for specific
// type of users (builtin or LDAP).
func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
@@ -1608,6 +1650,32 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http
userType := regUser
if globalIAMSys.GetUsersSysType() == LDAPUsersSysType {
userType = stsUser
// Validate that the user or group exists in LDAP and use the normalized
// form of the entityName (which will be an LDAP DN).
var err error
if isGroup {
var foundGroupDN *xldap.DNSearchResult
var underBaseDN bool
if foundGroupDN, underBaseDN, err = globalIAMSys.LDAPConfig.GetValidatedGroupDN(nil, entityName); err != nil {
iamLogIf(ctx, err)
} else if foundGroupDN == nil || !underBaseDN {
err = errNoSuchGroup
}
entityName = foundGroupDN.NormDN
} else {
var foundUserDN *xldap.DNSearchResult
if foundUserDN, err = globalIAMSys.LDAPConfig.GetValidatedDNForUsername(entityName); err != nil {
iamLogIf(ctx, err)
} else if foundUserDN == nil {
err = errNoSuchUser
}
entityName = foundUserDN.NormDN
}
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
updatedAt, err := globalIAMSys.PolicyDBSet(ctx, entityName, policyName, userType, isGroup)
@@ -1763,9 +1831,20 @@ const (
userPolicyMappingsFile = "user_mappings.json"
groupPolicyMappingsFile = "group_mappings.json"
stsUserPolicyMappingsFile = "stsuser_mappings.json"
iamAssetsDir = "iam-assets"
iamAssetsDir = "iam-assets"
)
var iamExportFiles = []string{
allPoliciesFile,
allUsersFile,
allGroupsFile,
allSvcAcctsFile,
userPolicyMappingsFile,
groupPolicyMappingsFile,
stsUserPolicyMappingsFile,
}
// ExportIAMHandler - exports all iam info as a zipped file
func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
@@ -1804,16 +1883,7 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
return nil
}
iamFiles := []string{
allPoliciesFile,
allUsersFile,
allGroupsFile,
allSvcAcctsFile,
userPolicyMappingsFile,
groupPolicyMappingsFile,
stsUserPolicyMappingsFile,
}
for _, f := range iamFiles {
for _, f := range iamExportFiles {
iamFile := pathJoin(iamAssetsDir, f)
switch f {
case allPoliciesFile:
@@ -1898,7 +1968,7 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
return
}
_, policy, err := globalIAMSys.GetServiceAccount(ctx, acc.Credentials.AccessKey)
sa, policy, err := globalIAMSys.GetServiceAccount(ctx, acc.Credentials.AccessKey)
if err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
return
@@ -1920,6 +1990,9 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
Claims: claims,
SessionPolicy: json.RawMessage(policyJSON),
Status: acc.Credentials.Status,
Name: sa.Name,
Description: sa.Description,
Expiration: &sa.Expiration,
}
}
@@ -2152,12 +2225,12 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
// If group does not exist, then check if the group has beginning and end space characters
// we will reject such group names.
if errors.Is(gerr, errNoSuchGroup) && hasSpaceBE(group) {
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAdminResourceInvalidArgument, err, allGroupsFile, group), r.URL)
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAdminResourceInvalidArgument, gerr, allGroupsFile, group), r.URL)
return
}
}
if _, gerr := globalIAMSys.AddUsersToGroup(ctx, group, grpInfo.Members); gerr != nil {
writeErrorResponseJSON(ctx, w, importError(ctx, err, allGroupsFile, group), r.URL)
writeErrorResponseJSON(ctx, w, importError(ctx, gerr, allGroupsFile, group), r.URL)
return
}
}
@@ -2184,6 +2257,16 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAdminConfigBadJSON, err, allSvcAcctsFile, ""), r.URL)
return
}
// Validations for LDAP enabled deployments.
if globalIAMSys.LDAPConfig.Enabled() {
err := globalIAMSys.NormalizeLDAPAccessKeypairs(ctx, serviceAcctReqs)
if err != nil {
writeErrorResponseJSON(ctx, w, importError(ctx, err, allSvcAcctsFile, ""), r.URL)
return
}
}
for user, svcAcctReq := range serviceAcctReqs {
var sp *policy.Policy
var err error
@@ -2194,7 +2277,8 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
return
}
}
// service account access key cannot have space characters beginning and end of the string.
// service account access key cannot have space characters
// beginning and end of the string.
if hasSpaceBE(svcAcctReq.AccessKey) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminResourceInvalidArgument), r.URL)
return
@@ -2220,20 +2304,14 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
updateReq = false
}
if updateReq {
opts := updateServiceAccountOpts{
secretKey: svcAcctReq.SecretKey,
status: svcAcctReq.Status,
name: svcAcctReq.Name,
description: svcAcctReq.Description,
expiration: svcAcctReq.Expiration,
sessionPolicy: sp,
}
_, err = globalIAMSys.UpdateServiceAccount(ctx, svcAcctReq.AccessKey, opts)
// If the service account exists, we remove it to ensure a
// clean import.
err := globalIAMSys.DeleteServiceAccount(ctx, svcAcctReq.AccessKey, true)
if err != nil {
writeErrorResponseJSON(ctx, w, importError(ctx, err, allSvcAcctsFile, user), r.URL)
delErr := fmt.Errorf("failed to delete existing service account(%s) before importing it: %w", svcAcctReq.AccessKey, err)
writeErrorResponseJSON(ctx, w, importError(ctx, delErr, allSvcAcctsFile, user), r.URL)
return
}
continue
}
opts := newServiceAccountOpts{
accessKey: user,
@@ -2246,18 +2324,6 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
allowSiteReplicatorAccount: false,
}
// In case of LDAP we need to resolve the targetUser to a DN and
// query their groups:
if globalIAMSys.LDAPConfig.Enabled() {
opts.claims[ldapUserN] = svcAcctReq.AccessKey // simple username
targetUser, _, err := globalIAMSys.LDAPConfig.LookupUserDN(svcAcctReq.AccessKey)
if err != nil {
writeErrorResponseJSON(ctx, w, importError(ctx, err, allSvcAcctsFile, user), r.URL)
return
}
opts.claims[ldapUser] = targetUser // username DN
}
if _, _, err = globalIAMSys.NewServiceAccount(ctx, svcAcctReq.Parent, svcAcctReq.Groups, opts); err != nil {
writeErrorResponseJSON(ctx, w, importError(ctx, err, allSvcAcctsFile, user), r.URL)
return
@@ -2326,6 +2392,17 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAdminConfigBadJSON, err, groupPolicyMappingsFile, ""), r.URL)
return
}
// Validations for LDAP enabled deployments.
if globalIAMSys.LDAPConfig.Enabled() {
isGroup := true
err := globalIAMSys.NormalizeLDAPMappingImport(ctx, isGroup, grpPolicyMap)
if err != nil {
writeErrorResponseJSON(ctx, w, importError(ctx, err, groupPolicyMappingsFile, ""), r.URL)
return
}
}
for g, pm := range grpPolicyMap {
if _, err := globalIAMSys.PolicyDBSet(ctx, g, pm.Policies, unknownIAMUserType, true); err != nil {
writeErrorResponseJSON(ctx, w, importError(ctx, err, groupPolicyMappingsFile, g), r.URL)
@@ -2355,6 +2432,16 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAdminConfigBadJSON, err, stsUserPolicyMappingsFile, ""), r.URL)
return
}
// Validations for LDAP enabled deployments.
if globalIAMSys.LDAPConfig.Enabled() {
isGroup := true
err := globalIAMSys.NormalizeLDAPMappingImport(ctx, !isGroup, userPolicyMap)
if err != nil {
writeErrorResponseJSON(ctx, w, importError(ctx, err, stsUserPolicyMappingsFile, ""), r.URL)
return
}
}
for u, pm := range userPolicyMap {
// disallow setting policy mapping if user is a temporary user
ok, _, err := globalIAMSys.IsTempUser(u)
@@ -2366,6 +2453,7 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
writeErrorResponseJSON(ctx, w, importError(ctx, errIAMActionNotAllowed, stsUserPolicyMappingsFile, u), r.URL)
return
}
if _, err := globalIAMSys.PolicyDBSet(ctx, u, pm.Policies, stsUser, false); err != nil {
writeErrorResponseJSON(ctx, w, importError(ctx, err, stsUserPolicyMappingsFile, u), r.URL)
return
@@ -2375,11 +2463,16 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
}
}
func addExpirationToCondValues(exp *time.Time, condValues map[string][]string) {
if exp == nil {
return
func addExpirationToCondValues(exp *time.Time, condValues map[string][]string) error {
if exp == nil || exp.IsZero() || exp.Equal(timeSentinel) {
return nil
}
condValues["DurationSeconds"] = []string{strconv.FormatInt(int64(exp.Sub(time.Now()).Seconds()), 10)}
dur := exp.Sub(time.Now())
if dur <= 0 {
return errors.New("unsupported expiration time")
}
condValues["DurationSeconds"] = []string{strconv.FormatInt(int64(dur.Seconds()), 10)}
return nil
}
func commonAddServiceAccount(r *http.Request) (context.Context, auth.Credentials, newServiceAccountOpts, madmin.AddServiceAccountReq, string, APIError) {
@@ -2407,6 +2500,12 @@ func commonAddServiceAccount(r *http.Request) (context.Context, auth.Credentials
return ctx, auth.Credentials{}, newServiceAccountOpts{}, madmin.AddServiceAccountReq{}, "", errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err)
}
if createReq.Expiration != nil && !createReq.Expiration.IsZero() {
// truncate expiration at the second.
truncateTime := createReq.Expiration.Truncate(time.Second)
createReq.Expiration = &truncateTime
}
// service account access key cannot have space characters beginning and end of the string.
if hasSpaceBE(createReq.AccessKey) {
return ctx, auth.Credentials{}, newServiceAccountOpts{}, madmin.AddServiceAccountReq{}, "", errorCodes.ToAPIErr(ErrAdminResourceInvalidArgument)
@@ -2438,7 +2537,10 @@ func commonAddServiceAccount(r *http.Request) (context.Context, auth.Credentials
}
condValues := getConditionValues(r, "", cred)
addExpirationToCondValues(createReq.Expiration, condValues)
err = addExpirationToCondValues(createReq.Expiration, condValues)
if err != nil {
return ctx, auth.Credentials{}, newServiceAccountOpts{}, madmin.AddServiceAccountReq{}, "", toAdminAPIErr(ctx, err)
}
// Check if action is allowed if creating access key for another user
// Check if action is explicitly denied if for self

View File

@@ -39,7 +39,7 @@ import (
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio-go/v7/pkg/signer"
"github.com/minio/minio/internal/auth"
"github.com/minio/pkg/v2/env"
"github.com/minio/pkg/v3/env"
)
const (

View File

@@ -36,6 +36,7 @@ import (
"net/url"
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"sort"
@@ -49,6 +50,7 @@ import (
"github.com/minio/madmin-go/v3"
"github.com/minio/madmin-go/v3/estream"
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio/internal/auth"
"github.com/minio/minio/internal/dsync"
"github.com/minio/minio/internal/grid"
"github.com/minio/minio/internal/handlers"
@@ -57,9 +59,9 @@ import (
"github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
"github.com/minio/pkg/v2/logger/message/log"
xnet "github.com/minio/pkg/v2/net"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/logger/message/log"
xnet "github.com/minio/pkg/v3/net"
"github.com/minio/pkg/v3/policy"
"github.com/secure-io/sio-go"
"github.com/zeebo/xxh3"
)
@@ -1429,7 +1431,7 @@ func getAggregatedBackgroundHealState(ctx context.Context, o ObjectLayer) (madmi
if globalIsDistErasure {
// Get heal status from other peers
peersHealStates, nerrs := globalNotificationSys.BackgroundHealStatus()
peersHealStates, nerrs := globalNotificationSys.BackgroundHealStatus(ctx)
var errCount int
for _, nerr := range nerrs {
if nerr.Err != nil {
@@ -1575,7 +1577,8 @@ func (a adminAPIHandlers) ClientDevNull(w http.ResponseWriter, r *http.Request)
// NetperfHandler - perform mesh style network throughput test
func (a adminAPIHandlers) NetperfHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.HealthInfoAdminAction)
if objectAPI == nil {
@@ -1596,6 +1599,15 @@ func (a adminAPIHandlers) NetperfHandler(w http.ResponseWriter, r *http.Request)
ctx = lkctx.Context()
defer nsLock.Unlock(lkctx)
// Freeze all incoming S3 API calls before running speedtest.
globalNotificationSys.ServiceFreeze(ctx, true)
// Unfreeze as soon as request context is canceled or when the function returns.
go func() {
<-ctx.Done()
globalNotificationSys.ServiceFreeze(ctx, false)
}()
durationStr := r.Form.Get(peerRESTDuration)
duration, err := time.ParseDuration(durationStr)
if err != nil {
@@ -1616,18 +1628,73 @@ func (a adminAPIHandlers) NetperfHandler(w http.ResponseWriter, r *http.Request)
}
}
func isAllowedRWAccess(r *http.Request, cred auth.Credentials, bucketName string) (rd, wr bool) {
owner := cred.AccessKey == globalActiveCred.AccessKey
// Set prefix value for "s3:prefix" policy conditionals.
r.Header.Set("prefix", "")
// Set delimiter value for "s3:delimiter" policy conditionals.
r.Header.Set("delimiter", SlashSeparator)
isAllowedAccess := func(bucketName string) (rd, wr bool) {
if globalIAMSys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: policy.GetObjectAction,
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", cred),
IsOwner: owner,
ObjectName: "",
Claims: cred.Claims,
}) {
rd = true
}
if globalIAMSys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: policy.PutObjectAction,
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", cred),
IsOwner: owner,
ObjectName: "",
Claims: cred.Claims,
}) {
wr = true
}
return rd, wr
}
return isAllowedAccess(bucketName)
}
// ObjectSpeedTestHandler - reports maximum speed of a cluster by performing PUT and
// GET operations on the server, supports auto tuning by default by automatically
// increasing concurrency and stopping when we have reached the limits on the
// system.
func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.HealthInfoAdminAction)
objectAPI, creds := validateAdminReq(ctx, w, r, policy.HealthInfoAdminAction)
if objectAPI == nil {
return
}
if !globalAPIConfig.permitRootAccess() {
rd, wr := isAllowedRWAccess(r, creds, globalObjectPerfBucket)
if !rd || !wr {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, AdminError{
Code: "XMinioSpeedtestInsufficientPermissions",
Message: fmt.Sprintf("%s does not have read and write access to '%s' bucket", creds.AccessKey,
globalObjectPerfBucket),
StatusCode: http.StatusForbidden,
}), r.URL)
return
}
}
sizeStr := r.Form.Get(peerRESTSize)
durationStr := r.Form.Get(peerRESTDuration)
concurrentStr := r.Form.Get(peerRESTConcurrent)
@@ -1636,6 +1703,7 @@ func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http.
autotune := r.Form.Get("autotune") == "true"
noClear := r.Form.Get("noclear") == "true"
enableSha256 := r.Form.Get("enableSha256") == "true"
enableMultipart := r.Form.Get("enableMultipart") == "true"
size, err := strconv.Atoi(sizeStr)
if err != nil {
@@ -1691,8 +1759,11 @@ func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http.
// Freeze all incoming S3 API calls before running speedtest.
globalNotificationSys.ServiceFreeze(ctx, true)
// unfreeze all incoming S3 API calls after speedtest.
defer globalNotificationSys.ServiceFreeze(ctx, false)
// Unfreeze as soon as request context is canceled or when the function returns.
go func() {
<-ctx.Done()
globalNotificationSys.ServiceFreeze(ctx, false)
}()
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
defer keepAliveTicker.Stop()
@@ -1706,6 +1777,8 @@ func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http.
storageClass: storageClass,
bucketName: customBucket,
enableSha256: enableSha256,
enableMultipart: enableMultipart,
creds: creds,
})
var prevResult madmin.SpeedTestResult
for {
@@ -1792,7 +1865,8 @@ func validateObjPerfOptions(ctx context.Context, storageInfo madmin.StorageInfo,
// DriveSpeedtestHandler - reports throughput of drives available in the cluster
func (a adminAPIHandlers) DriveSpeedtestHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.HealthInfoAdminAction)
if objectAPI == nil {
@@ -1802,8 +1876,11 @@ func (a adminAPIHandlers) DriveSpeedtestHandler(w http.ResponseWriter, r *http.R
// Freeze all incoming S3 API calls before running speedtest.
globalNotificationSys.ServiceFreeze(ctx, true)
// unfreeze all incoming S3 API calls after speedtest.
defer globalNotificationSys.ServiceFreeze(ctx, false)
// Unfreeze as soon as request context is canceled or when the function returns.
go func() {
<-ctx.Done()
globalNotificationSys.ServiceFreeze(ctx, false)
}()
serial := r.Form.Get("serial") == "true"
blockSizeStr := r.Form.Get("blocksize")
@@ -2096,7 +2173,9 @@ func (a adminAPIHandlers) KMSCreateKeyHandler(w http.ResponseWriter, r *http.Req
return
}
if err := GlobalKMS.CreateKey(ctx, r.Form.Get("key-id")); err != nil {
if err := GlobalKMS.CreateKey(ctx, &kms.CreateKeyRequest{
Name: r.Form.Get("key-id"),
}); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
@@ -2117,22 +2196,12 @@ func (a adminAPIHandlers) KMSStatusHandler(w http.ResponseWriter, r *http.Reques
return
}
stat, err := GlobalKMS.Stat(ctx)
stat, err := GlobalKMS.Status(ctx)
if err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
return
}
status := madmin.KMSStatus{
Name: stat.Name,
DefaultKeyID: stat.DefaultKey,
Endpoints: make(map[string]madmin.ItemState, len(stat.Endpoints)),
}
for _, endpoint := range stat.Endpoints {
status.Endpoints[endpoint] = madmin.ItemOnline // TODO(aead): Implement an online check for mTLS
}
resp, err := json.Marshal(status)
resp, err := json.Marshal(stat)
if err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
return
@@ -2154,15 +2223,9 @@ func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Req
return
}
stat, err := GlobalKMS.Stat(ctx)
if err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
return
}
keyID := r.Form.Get("key-id")
if keyID == "" {
keyID = stat.DefaultKey
keyID = GlobalKMS.DefaultKey
}
response := madmin.KMSKeyStatus{
KeyID: keyID,
@@ -2170,7 +2233,10 @@ func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Req
kmsContext := kms.Context{"MinIO admin API": "KMSKeyStatusHandler"} // Context for a test key operation
// 1. Generate a new key using the KMS.
key, err := GlobalKMS.GenerateKey(ctx, keyID, kmsContext)
key, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{
Name: keyID,
AssociatedData: kmsContext,
})
if err != nil {
response.EncryptionErr = err.Error()
resp, err := json.Marshal(response)
@@ -2183,7 +2249,11 @@ func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Req
}
// 2. Verify that we can indeed decrypt the (encrypted) key
decryptedKey, err := GlobalKMS.DecryptKey(key.KeyID, key.Ciphertext, kmsContext)
decryptedKey, err := GlobalKMS.Decrypt(ctx, &kms.DecryptRequest{
Name: key.KeyID,
Ciphertext: key.Ciphertext,
AssociatedData: kmsContext,
})
if err != nil {
response.DecryptionErr = err.Error()
resp, err := json.Marshal(response)
@@ -2277,7 +2347,7 @@ func getServerInfo(ctx context.Context, pools, metrics bool, r *http.Request) ma
notifyTarget := fetchLambdaInfo()
local := getLocalServerProperty(globalEndpoints, r, metrics)
servers := globalNotificationSys.ServerInfo(metrics)
servers := globalNotificationSys.ServerInfo(ctx, metrics)
servers = append(servers, local)
var poolsInfo map[int]map[int]madmin.ErasureSetInfo
@@ -2336,8 +2406,7 @@ func getServerInfo(ctx context.Context, pools, metrics bool, r *http.Request) ma
domain := globalDomainNames
services := madmin.Services{
KMS: fetchKMSStatus(),
KMSStatus: fetchKMSStatusV2(ctx),
KMSStatus: fetchKMSStatus(ctx),
LDAP: ldap,
Logger: log,
Audit: audit,
@@ -2347,7 +2416,7 @@ func getServerInfo(ctx context.Context, pools, metrics bool, r *http.Request) ma
return madmin.InfoMessage{
Mode: string(mode),
Domain: domain,
Region: globalSite.Region,
Region: globalSite.Region(),
SQSARN: globalEventNotifier.GetARNList(false),
DeploymentID: globalDeploymentID(),
Buckets: buckets,
@@ -2375,7 +2444,7 @@ func getKubernetesInfo(dctx context.Context) madmin.KubernetesInfo {
}
client := &http.Client{
Transport: globalHealthChkTransport,
Transport: globalRemoteTargetTransport,
Timeout: 10 * time.Second,
}
@@ -2947,66 +3016,25 @@ func fetchLambdaInfo() []map[string][]madmin.TargetIDStatus {
return notify
}
// fetchKMSStatus fetches KMS-related status information.
func fetchKMSStatus() madmin.KMS {
kmsStat := madmin.KMS{}
if GlobalKMS == nil {
kmsStat.Status = "disabled"
return kmsStat
}
stat, err := GlobalKMS.Stat(context.Background())
if err != nil {
kmsStat.Status = string(madmin.ItemOffline)
return kmsStat
}
if len(stat.Endpoints) == 0 {
kmsStat.Status = stat.Name
return kmsStat
}
kmsStat.Status = string(madmin.ItemOnline)
kmsContext := kms.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation
// 1. Generate a new key using the KMS.
key, err := GlobalKMS.GenerateKey(context.Background(), "", kmsContext)
if err != nil {
kmsStat.Encrypt = fmt.Sprintf("Encryption failed: %v", err)
} else {
kmsStat.Encrypt = "success"
}
// 2. Verify that we can indeed decrypt the (encrypted) key
decryptedKey, err := GlobalKMS.DecryptKey(key.KeyID, key.Ciphertext, kmsContext)
switch {
case err != nil:
kmsStat.Decrypt = fmt.Sprintf("Decryption failed: %v", err)
case subtle.ConstantTimeCompare(key.Plaintext, decryptedKey) != 1:
kmsStat.Decrypt = "Decryption failed: decrypted key does not match generated key"
default:
kmsStat.Decrypt = "success"
}
return kmsStat
}
// fetchKMSStatusV2 fetches KMS-related status information for all instances
func fetchKMSStatusV2(ctx context.Context) []madmin.KMS {
// fetchKMSStatus fetches KMS-related status information for all instances
func fetchKMSStatus(ctx context.Context) []madmin.KMS {
if GlobalKMS == nil {
return []madmin.KMS{}
}
results := GlobalKMS.Verify(ctx)
stats := []madmin.KMS{}
for _, result := range results {
stats = append(stats, madmin.KMS{
Status: result.Status,
Endpoint: result.Endpoint,
Encrypt: result.Encrypt,
Decrypt: result.Decrypt,
Version: result.Version,
})
stat, err := GlobalKMS.Status(ctx)
if err != nil {
kmsLogIf(ctx, err, "failed to fetch KMS status information")
return []madmin.KMS{}
}
stats := make([]madmin.KMS, 0, len(stat.Endpoints))
for endpoint, state := range stat.Endpoints {
stats = append(stats, madmin.KMS{
Status: string(state),
Endpoint: endpoint,
})
}
return stats
}
@@ -3172,11 +3200,11 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
file = strings.ReplaceAll(file, string(os.PathSeparator), "/")
file = filepath.ToSlash(file)
// Reject attempts to traverse parent or absolute paths.
if strings.Contains(file, "..") || strings.Contains(volume, "..") {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
if hasBadPathComponent(volume) || hasBadPathComponent(file) {
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrInvalidResourceName), r.URL)
return
}
@@ -3195,6 +3223,7 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
return
}
}
addErr := func(msg string) {}
// Write a version for making *incompatible* changes.
// The AdminClient will reject any version it does not know.
@@ -3234,6 +3263,11 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
bugLogIf(ctx, stream.AddError(err.Error()))
return
}
addErr = func(msg string) {
inspectZipW.Close()
encStream.Close()
stream.AddError(msg)
}
defer encStream.Close()
inspectZipW = zip.NewWriter(encStream)
@@ -3314,18 +3348,6 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
}
return nil
}
err := o.GetRawData(ctx, volume, file, rawDataFn)
if !errors.Is(err, errFileNotFound) {
adminLogIf(ctx, err)
}
// save the format.json as part of inspect by default
if !(volume == minioMetaBucket && file == formatConfigFile) {
err = o.GetRawData(ctx, minioMetaBucket, formatConfigFile, rawDataFn)
}
if !errors.Is(err, errFileNotFound) {
adminLogIf(ctx, err)
}
// save args passed to inspect command
var sb bytes.Buffer
@@ -3338,6 +3360,24 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
sb.WriteString("\n")
adminLogIf(ctx, embedFileInZip(inspectZipW, "inspect-input.txt", sb.Bytes(), 0o600))
err := o.GetRawData(ctx, volume, file, rawDataFn)
if err != nil {
if errors.Is(err, errFileNotFound) {
addErr("GetRawData: No files matched the given pattern")
return
}
embedFileInZip(inspectZipW, "GetRawData-err.txt", []byte(err.Error()), 0o600)
adminLogIf(ctx, err)
}
// save the format.json as part of inspect by default
if !(volume == minioMetaBucket && file == formatConfigFile) {
err = o.GetRawData(ctx, minioMetaBucket, formatConfigFile, rawDataFn)
}
if !errors.Is(err, errFileNotFound) {
adminLogIf(ctx, err)
}
scheme := "https"
if !globalIsTLS {
scheme = "http"

View File

@@ -63,8 +63,8 @@ const (
)
var (
errHealIdleTimeout = fmt.Errorf("healing results were not consumed for too long")
errHealStopSignalled = fmt.Errorf("heal stop signaled")
errHealIdleTimeout = errors.New("healing results were not consumed for too long")
errHealStopSignalled = errors.New("heal stop signaled")
errFnHealFromAPIErr = func(ctx context.Context, err error) error {
apiErr := toAdminAPIErr(ctx, err)
@@ -455,8 +455,8 @@ type healSequence struct {
// Number of total items healed against item type
healedItemsMap map[madmin.HealItemType]int64
// Number of total items where healing failed against endpoint and drive state
healFailedItemsMap map[string]int64
// Number of total items where healing failed against item type
healFailedItemsMap map[madmin.HealItemType]int64
// The time of the last scan/heal activity
lastHealActivity time.Time
@@ -497,7 +497,7 @@ func newHealSequence(ctx context.Context, bucket, objPrefix, clientAddr string,
ctx: ctx,
scannedItemsMap: make(map[madmin.HealItemType]int64),
healedItemsMap: make(map[madmin.HealItemType]int64),
healFailedItemsMap: make(map[string]int64),
healFailedItemsMap: make(map[madmin.HealItemType]int64),
}
}
@@ -543,12 +543,12 @@ func (h *healSequence) getHealedItemsMap() map[madmin.HealItemType]int64 {
// getHealFailedItemsMap - returns map of all items where heal failed against
// drive endpoint and status
func (h *healSequence) getHealFailedItemsMap() map[string]int64 {
func (h *healSequence) getHealFailedItemsMap() map[madmin.HealItemType]int64 {
h.mutex.RLock()
defer h.mutex.RUnlock()
// Make a copy before returning the value
retMap := make(map[string]int64, len(h.healFailedItemsMap))
retMap := make(map[madmin.HealItemType]int64, len(h.healFailedItemsMap))
for k, v := range h.healFailedItemsMap {
retMap[k] = v
}
@@ -556,29 +556,27 @@ func (h *healSequence) getHealFailedItemsMap() map[string]int64 {
return retMap
}
func (h *healSequence) countFailed(res madmin.HealResultItem) {
func (h *healSequence) countFailed(healType madmin.HealItemType) {
h.mutex.Lock()
defer h.mutex.Unlock()
for _, d := range res.After.Drives {
// For failed items we report the endpoint and drive state
// This will help users take corrective actions for drives
h.healFailedItemsMap[d.Endpoint+","+d.State]++
}
h.healFailedItemsMap[healType]++
h.lastHealActivity = UTCNow()
}
func (h *healSequence) countHeals(healType madmin.HealItemType, healed bool) {
func (h *healSequence) countScanned(healType madmin.HealItemType) {
h.mutex.Lock()
defer h.mutex.Unlock()
if !healed {
h.scannedItemsMap[healType]++
} else {
h.healedItemsMap[healType]++
}
h.scannedItemsMap[healType]++
h.lastHealActivity = UTCNow()
}
func (h *healSequence) countHealed(healType madmin.HealItemType) {
h.mutex.Lock()
defer h.mutex.Unlock()
h.healedItemsMap[healType]++
h.lastHealActivity = UTCNow()
}
@@ -734,7 +732,7 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
task.opts.ScanMode = madmin.HealNormalScan
}
h.countHeals(healType, false)
h.countScanned(healType)
if source.noWait {
select {
@@ -766,6 +764,11 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
// task queued, now wait for the response.
select {
case res := <-task.respCh:
if res.err == nil {
h.countHealed(healType)
} else {
h.countFailed(healType)
}
if !h.reportProgress {
if errors.Is(res.err, errSkipFile) { // this is only sent usually by nopHeal
return nil

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2015-2021 MinIO, Inc.
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
@@ -18,7 +18,6 @@
package cmd
import (
"context"
"math"
"net/http"
"os"
@@ -31,6 +30,7 @@ import (
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/config"
"github.com/minio/minio/internal/kms"
xnet "github.com/minio/pkg/v3/net"
)
// getLocalServerProperty - returns madmin.ServerProperties for only the
@@ -64,9 +64,11 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req
if err := isServerResolvable(endpoint, 5*time.Second); err == nil {
network[nodeName] = string(madmin.ItemOnline)
} else {
network[nodeName] = string(madmin.ItemOffline)
// log once the error
peersLogOnceIf(context.Background(), err, nodeName)
if xnet.IsNetworkOrHostDown(err, false) {
network[nodeName] = string(madmin.ItemOffline)
} else if xnet.IsNetworkOrHostDown(err, true) {
network[nodeName] = "connection attempt timedout"
}
}
}
}

View File

@@ -67,7 +67,7 @@ type ObjectToDelete struct {
ReplicateDecisionStr string `xml:"-"`
}
// createBucketConfiguration container for bucket configuration request from client.
// createBucketLocationConfiguration container for bucket configuration request from client.
// Used for parsing the location from the request body for Makebucket.
type createBucketLocationConfiguration struct {
XMLName xml.Name `xml:"CreateBucketConfiguration" json:"-"`

View File

@@ -48,7 +48,7 @@ import (
levent "github.com/minio/minio/internal/config/lambda/event"
"github.com/minio/minio/internal/event"
"github.com/minio/minio/internal/hash"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
)
// APIError structure
@@ -56,19 +56,23 @@ type APIError struct {
Code string
Description string
HTTPStatusCode int
ObjectSize string
RangeRequested string
}
// APIErrorResponse - error response format
type APIErrorResponse struct {
XMLName xml.Name `xml:"Error" json:"-"`
Code string
Message string
Key string `xml:"Key,omitempty" json:"Key,omitempty"`
BucketName string `xml:"BucketName,omitempty" json:"BucketName,omitempty"`
Resource string
Region string `xml:"Region,omitempty" json:"Region,omitempty"`
RequestID string `xml:"RequestId" json:"RequestId"`
HostID string `xml:"HostId" json:"HostId"`
XMLName xml.Name `xml:"Error" json:"-"`
Code string
Message string
Key string `xml:"Key,omitempty" json:"Key,omitempty"`
BucketName string `xml:"BucketName,omitempty" json:"BucketName,omitempty"`
Resource string
Region string `xml:"Region,omitempty" json:"Region,omitempty"`
RequestID string `xml:"RequestId" json:"RequestId"`
HostID string `xml:"HostId" json:"HostId"`
ActualObjectSize string `xml:"ActualObjectSize,omitempty" json:"ActualObjectSize,omitempty"`
RangeRequested string `xml:"RangeRequested,omitempty" json:"RangeRequested,omitempty"`
}
// APIErrorCode type of error status.
@@ -263,6 +267,7 @@ const (
ErrInvalidResourceName
ErrInvalidLifecycleQueryParameter
ErrServerNotInitialized
ErrBucketMetadataNotInitialized
ErrRequestTimedout
ErrClientDisconnected
ErrTooManyRequests
@@ -282,6 +287,7 @@ const (
ErrAdminNoSuchGroup
ErrAdminGroupNotEmpty
ErrAdminGroupDisabled
ErrAdminInvalidGroupName
ErrAdminNoSuchJob
ErrAdminNoSuchPolicy
ErrAdminPolicyChangeAlreadyApplied
@@ -420,6 +426,7 @@ const (
ErrAdminProfilerNotEnabled
ErrInvalidDecompressedSize
ErrAddUserInvalidArgument
ErrAddUserValidUTF
ErrAdminResourceInvalidArgument
ErrAdminAccountNotEligible
ErrAccountNotEligible
@@ -438,6 +445,8 @@ const (
ErrAdminNoAccessKey
ErrAdminNoSecretKey
ErrIAMNotInitialized
apiErrCodeEnd // This is used only for the testing code
)
@@ -451,9 +460,9 @@ func (e errorCodeMap) ToAPIErrWithErr(errCode APIErrorCode, err error) APIError
if err != nil {
apiErr.Description = fmt.Sprintf("%s (%s)", apiErr.Description, err)
}
if globalSite.Region != "" {
if region := globalSite.Region(); region != "" {
if errCode == ErrAuthorizationHeaderMalformed {
apiErr.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalSite.Region)
apiErr.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", region)
return apiErr
}
}
@@ -960,7 +969,7 @@ var errorCodes = errorCodeMap{
ErrReplicationRemoteConnectionError: {
Code: "XMinioAdminReplicationRemoteConnectionError",
Description: "Remote service connection error",
HTTPStatusCode: http.StatusNotFound,
HTTPStatusCode: http.StatusServiceUnavailable,
},
ErrReplicationBandwidthLimitError: {
Code: "XMinioAdminReplicationBandwidthLimitError",
@@ -1295,7 +1304,17 @@ var errorCodes = errorCodeMap{
},
ErrServerNotInitialized: {
Code: "XMinioServerNotInitialized",
Description: "Server not initialized, please try again.",
Description: "Server not initialized yet, please try again.",
HTTPStatusCode: http.StatusServiceUnavailable,
},
ErrIAMNotInitialized: {
Code: "XMinioIAMNotInitialized",
Description: "IAM sub-system not initialized yet, please try again.",
HTTPStatusCode: http.StatusServiceUnavailable,
},
ErrBucketMetadataNotInitialized: {
Code: "XMinioBucketMetadataNotInitialized",
Description: "Bucket metadata not initialized yet, please try again.",
HTTPStatusCode: http.StatusServiceUnavailable,
},
ErrMalformedJSON: {
@@ -1468,7 +1487,7 @@ var errorCodes = errorCodeMap{
ErrTooManyRequests: {
Code: "TooManyRequests",
Description: "Deadline exceeded while waiting in incoming queue, please reduce your request rate",
HTTPStatusCode: http.StatusServiceUnavailable,
HTTPStatusCode: http.StatusTooManyRequests,
},
ErrUnsupportedMetadata: {
Code: "InvalidArgument",
@@ -2091,6 +2110,16 @@ var errorCodes = errorCodeMap{
Description: "Expected LDAP short username but was given full DN.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrAdminInvalidGroupName: {
Code: "XMinioInvalidGroupName",
Description: "The group name is invalid.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrAddUserValidUTF: {
Code: "XMinioInvalidUTF",
Description: "Invalid UTF-8 character detected.",
HTTPStatusCode: http.StatusBadRequest,
},
}
// toAPIErrorCode - Converts embedded errors. Convenience
@@ -2130,6 +2159,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
apiErr = ErrAdminNoSuchGroup
case errGroupNotEmpty:
apiErr = ErrAdminGroupNotEmpty
case errGroupNameContainsReservedChars:
apiErr = ErrAdminInvalidGroupName
case errNoSuchJob:
apiErr = ErrAdminNoSuchJob
case errNoPolicyToAttachOrDetach:
@@ -2144,6 +2175,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
apiErr = ErrEntityTooSmall
case errAuthentication:
apiErr = ErrAccessDenied
case auth.ErrContainsReservedChars:
apiErr = ErrAdminInvalidAccessKey
case auth.ErrInvalidAccessKeyLength:
apiErr = ErrAdminInvalidAccessKey
case auth.ErrInvalidSecretKeyLength:
@@ -2211,6 +2244,10 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
apiErr = ErrInvalidMaxParts
case ioutil.ErrOverread:
apiErr = ErrExcessData
case errServerNotInitialized:
apiErr = ErrServerNotInitialized
case errBucketMetadataNotInitialized:
apiErr = ErrBucketMetadataNotInitialized
}
// Compression errors
@@ -2402,10 +2439,9 @@ func toAPIError(ctx context.Context, err error) APIError {
apiErr := errorCodes.ToAPIErr(toAPIErrorCode(ctx, err))
switch apiErr.Code {
case "NotImplemented":
desc := fmt.Sprintf("%s (%v)", apiErr.Description, err)
apiErr = APIError{
Code: apiErr.Code,
Description: desc,
Description: fmt.Sprintf("%s (%v)", apiErr.Description, err),
HTTPStatusCode: apiErr.HTTPStatusCode,
}
case "XMinioBackendDown":
@@ -2417,12 +2453,24 @@ func toAPIError(ctx context.Context, err error) APIError {
switch e := err.(type) {
case kms.Error:
apiErr = APIError{
Description: e.Err.Error(),
Code: e.APICode,
HTTPStatusCode: e.HTTPStatusCode,
Description: e.Err,
HTTPStatusCode: e.Code,
}
case batchReplicationJobError:
apiErr = APIError(e)
apiErr = APIError{
Description: e.Description,
Code: e.Code,
HTTPStatusCode: e.HTTPStatusCode,
}
case InvalidRange:
apiErr = APIError{
Code: "InvalidRange",
Description: e.Error(),
HTTPStatusCode: errorCodes[ErrInvalidRange].HTTPStatusCode,
ObjectSize: strconv.FormatInt(e.ResourceSize, 10),
RangeRequested: fmt.Sprintf("%d-%d", e.OffsetBegin, e.OffsetEnd),
}
case InvalidArgument:
apiErr = APIError{
Code: "InvalidArgument",
@@ -2549,13 +2597,15 @@ func getAPIError(code APIErrorCode) APIError {
func getAPIErrorResponse(ctx context.Context, err APIError, resource, requestID, hostID string) APIErrorResponse {
reqInfo := logger.GetReqInfo(ctx)
return APIErrorResponse{
Code: err.Code,
Message: err.Description,
BucketName: reqInfo.BucketName,
Key: reqInfo.ObjectName,
Resource: resource,
Region: globalSite.Region,
RequestID: requestID,
HostID: hostID,
Code: err.Code,
Message: err.Description,
BucketName: reqInfo.BucketName,
Key: reqInfo.ObjectName,
Resource: resource,
Region: globalSite.Region(),
RequestID: requestID,
HostID: hostID,
ActualObjectSize: err.ObjectSize,
RangeRequested: err.RangeRequested,
}
}

View File

@@ -19,6 +19,7 @@ package cmd
import (
"bytes"
"context"
"encoding/json"
"encoding/xml"
"fmt"
@@ -53,7 +54,7 @@ func setCommonHeaders(w http.ResponseWriter) {
// Set `x-amz-bucket-region` only if region is set on the server
// by default minio uses an empty region.
if region := globalSite.Region; region != "" {
if region := globalSite.Region(); region != "" {
w.Header().Set(xhttp.AmzBucketRegion, region)
}
w.Header().Set(xhttp.AcceptRanges, "bytes")
@@ -107,7 +108,7 @@ func setPartsCountHeaders(w http.ResponseWriter, objInfo ObjectInfo) {
}
// Write object header
func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec, opts ObjectOptions) (err error) {
func setObjectHeaders(ctx context.Context, w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec, opts ObjectOptions) (err error) {
// set common headers
setCommonHeaders(w)
@@ -135,7 +136,7 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
// Set tag count if object has tags
if len(objInfo.UserTags) > 0 {
tags, _ := tags.ParseObjectTags(objInfo.UserTags)
if tags.Count() > 0 {
if tags != nil && tags.Count() > 0 {
w.Header()[xhttp.AmzTagCount] = []string{strconv.Itoa(tags.Count())}
if opts.Tagging {
// This is MinIO only extension to return back tags along with the count.
@@ -212,7 +213,7 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
if objInfo.IsRemote() {
// Check if object is being restored. For more information on x-amz-restore header see
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html#API_HeadObject_ResponseSyntax
w.Header()[xhttp.AmzStorageClass] = []string{objInfo.TransitionedObject.Tier}
w.Header()[xhttp.AmzStorageClass] = []string{filterStorageClass(ctx, objInfo.TransitionedObject.Tier)}
}
if lc, err := globalLifecycleSys.Get(objInfo.Bucket); err == nil {

View File

@@ -35,7 +35,7 @@ import (
"github.com/minio/minio/internal/hash"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
xxml "github.com/minio/xxml"
)
@@ -544,7 +544,7 @@ func cleanReservedKeys(metadata map[string]string) map[string]string {
}
// generates an ListBucketVersions response for the said bucket with other enumerated options.
func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo, metadata metaCheckFn) ListVersionsResponse {
func generateListVersionsResponse(ctx context.Context, bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo, metadata metaCheckFn) ListVersionsResponse {
versions := make([]ObjectVersion, 0, len(resp.Objects))
owner := &Owner{
@@ -573,7 +573,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
}
content.Size = object.Size
if object.StorageClass != "" {
content.StorageClass = object.StorageClass
content.StorageClass = filterStorageClass(ctx, object.StorageClass)
} else {
content.StorageClass = globalMinioDefaultStorageClass
}
@@ -634,7 +634,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
}
// generates an ListObjectsV1 response for the said bucket with other enumerated options.
func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
func generateListObjectsV1Response(ctx context.Context, bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
contents := make([]Object, 0, len(resp.Objects))
owner := &Owner{
ID: globalMinioDefaultOwnerID,
@@ -654,7 +654,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
}
content.Size = object.Size
if object.StorageClass != "" {
content.StorageClass = object.StorageClass
content.StorageClass = filterStorageClass(ctx, object.StorageClass)
} else {
content.StorageClass = globalMinioDefaultStorageClass
}
@@ -683,7 +683,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
}
// generates an ListObjectsV2 response for the said bucket with other enumerated options.
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata metaCheckFn) ListObjectsV2Response {
func generateListObjectsV2Response(ctx context.Context, bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata metaCheckFn) ListObjectsV2Response {
contents := make([]Object, 0, len(objects))
var owner *Owner
if fetchOwner {
@@ -707,7 +707,7 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
}
content.Size = object.Size
if object.StorageClass != "" {
content.StorageClass = object.StorageClass
content.StorageClass = filterStorageClass(ctx, object.StorageClass)
} else {
content.StorageClass = globalMinioDefaultStorageClass
}
@@ -789,8 +789,8 @@ func generateInitiateMultipartUploadResponse(bucket, key, uploadID string) Initi
}
// generates CompleteMultipartUploadResponse for given bucket, key, location and ETag.
func generateCompleteMultpartUploadResponse(bucket, key, location string, oi ObjectInfo) CompleteMultipartUploadResponse {
cs := oi.decryptChecksums(0)
func generateCompleteMultipartUploadResponse(bucket, key, location string, oi ObjectInfo, h http.Header) CompleteMultipartUploadResponse {
cs := oi.decryptChecksums(0, h)
c := CompleteMultipartUploadResponse{
Location: location,
Bucket: bucket,
@@ -954,9 +954,9 @@ func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError
switch err.Code {
case "InvalidRegion":
err.Description = fmt.Sprintf("Region does not match; expecting '%s'.", globalSite.Region)
err.Description = fmt.Sprintf("Region does not match; expecting '%s'.", globalSite.Region())
case "AuthorizationHeaderMalformed":
err.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalSite.Region)
err.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalSite.Region())
}
// Similar check to http.checkWriteHeaderCode

View File

@@ -24,7 +24,7 @@ import (
consoleapi "github.com/minio/console/api"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/mux"
"github.com/minio/pkg/v2/wildcard"
"github.com/minio/pkg/v3/wildcard"
"github.com/rs/cors"
)
@@ -64,7 +64,7 @@ func setObjectLayer(o ObjectLayer) {
globalObjLayerMutex.Unlock()
}
// objectAPIHandler implements and provides http handlers for S3 API.
// objectAPIHandlers implements and provides http handlers for S3 API.
type objectAPIHandlers struct {
ObjectAPI func() ObjectLayer
}

File diff suppressed because one or more lines are too long

View File

@@ -41,7 +41,7 @@ import (
xjwt "github.com/minio/minio/internal/jwt"
"github.com/minio/minio/internal/logger"
"github.com/minio/minio/internal/mcontext"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
)
// Verify if request has JWT.
@@ -178,7 +178,7 @@ func validateAdminSignature(ctx context.Context, r *http.Request, region string)
logger.GetReqInfo(ctx).Cred = cred
logger.GetReqInfo(ctx).Owner = owner
logger.GetReqInfo(ctx).Region = globalSite.Region
logger.GetReqInfo(ctx).Region = globalSite.Region()
return cred, owner, ErrNone
}
@@ -368,7 +368,7 @@ func authenticateRequest(ctx context.Context, r *http.Request, action policy.Act
}
cred, owner, s3Err = getReqAccessKeyV2(r)
case authTypeSigned, authTypePresigned:
region := globalSite.Region
region := globalSite.Region()
switch action {
case policy.GetBucketLocationAction, policy.ListAllMyBucketsAction:
region = ""
@@ -384,7 +384,7 @@ func authenticateRequest(ctx context.Context, r *http.Request, action policy.Act
logger.GetReqInfo(ctx).Cred = cred
logger.GetReqInfo(ctx).Owner = owner
logger.GetReqInfo(ctx).Region = globalSite.Region
logger.GetReqInfo(ctx).Region = globalSite.Region()
// region is valid only for CreateBucketAction.
var region string
@@ -684,7 +684,7 @@ func validateSignature(atype authType, r *http.Request) (auth.Credentials, bool,
}
cred, owner, s3Err = getReqAccessKeyV2(r)
case authTypePresigned, authTypeSigned:
region := globalSite.Region
region := globalSite.Region()
if s3Err = isReqAuthenticated(GlobalContext, r, region, serviceS3); s3Err != ErrNone {
return cred, owner, s3Err
}
@@ -745,7 +745,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectName string, r *http.Request, action policy.Action) (s3Err APIErrorCode) {
var cred auth.Credentials
var owner bool
region := globalSite.Region
region := globalSite.Region()
switch atype {
case authTypeUnknown:
return ErrSignatureVersionNotSupported

View File

@@ -28,7 +28,7 @@ import (
"time"
"github.com/minio/minio/internal/auth"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
)
type nullReader struct{}
@@ -403,7 +403,7 @@ func TestIsReqAuthenticated(t *testing.T) {
// Validates all testcases.
for i, testCase := range testCases {
s3Error := isReqAuthenticated(ctx, testCase.req, globalSite.Region, serviceS3)
s3Error := isReqAuthenticated(ctx, testCase.req, globalSite.Region(), serviceS3)
if s3Error != testCase.s3Error {
if _, err := io.ReadAll(testCase.req.Body); toAPIErrorCode(ctx, err) != testCase.s3Error {
t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d (got after reading request %s)", i, testCase.s3Error, s3Error, toAPIError(ctx, err).Code)
@@ -443,7 +443,7 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
{Request: mustNewPresignedRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
}
for i, testCase := range testCases {
if _, s3Error := checkAdminRequestAuth(ctx, testCase.Request, policy.AllAdminActions, globalSite.Region); s3Error != testCase.ErrCode {
if _, s3Error := checkAdminRequestAuth(ctx, testCase.Request, policy.AllAdminActions, globalSite.Region()); s3Error != testCase.ErrCode {
t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i, testCase.ErrCode, s3Error)
}
}

View File

@@ -25,7 +25,7 @@ import (
"time"
"github.com/minio/madmin-go/v3"
"github.com/minio/pkg/v2/env"
"github.com/minio/pkg/v3/env"
)
// healTask represents what to heal along with options
@@ -133,19 +133,20 @@ func (h *healRoutine) AddWorker(ctx context.Context, objAPI ObjectLayer, bgSeq *
}
}
if bgSeq != nil {
// We increment relevant counter based on the heal result for prometheus reporting.
if err != nil {
bgSeq.countFailed(res)
} else {
bgSeq.countHeals(res.Type, false)
}
}
if task.respCh != nil {
task.respCh <- healResult{result: res, err: err}
continue
}
// when respCh is not set caller is not waiting but we
// update the relevant metrics for them
if bgSeq != nil {
if err == nil {
bgSeq.countHealed(res.Type)
} else {
bgSeq.countFailed(res.Type)
}
}
case <-ctx.Done():
return
}

View File

@@ -33,7 +33,7 @@ import (
"github.com/minio/madmin-go/v3"
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio/internal/config"
"github.com/minio/pkg/v2/env"
"github.com/minio/pkg/v3/env"
)
const (
@@ -141,14 +141,14 @@ func initHealingTracker(disk StorageAPI, healID string) *healingTracker {
return h
}
func (h healingTracker) getLastUpdate() time.Time {
func (h *healingTracker) getLastUpdate() time.Time {
h.mu.RLock()
defer h.mu.RUnlock()
return h.LastUpdate
}
func (h healingTracker) getBucket() string {
func (h *healingTracker) getBucket() string {
h.mu.RLock()
defer h.mu.RUnlock()
@@ -162,7 +162,7 @@ func (h *healingTracker) setBucket(bucket string) {
h.Bucket = bucket
}
func (h healingTracker) getObject() string {
func (h *healingTracker) getObject() string {
h.mu.RLock()
defer h.mu.RUnlock()
@@ -453,10 +453,6 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
healingLogEvent(ctx, "Healing of drive '%s' is finished (healed: %d, skipped: %d, failed: %d).", disk, tracker.ItemsHealed, tracker.ItemsSkipped, tracker.ItemsFailed)
if len(tracker.QueuedBuckets) > 0 {
return fmt.Errorf("not all buckets were healed: %v", tracker.QueuedBuckets)
}
if serverDebugLog {
tracker.printTo(os.Stdout)
fmt.Printf("\n")

View File

@@ -33,9 +33,9 @@ import (
"github.com/minio/minio/internal/bucket/versioning"
xhttp "github.com/minio/minio/internal/http"
xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/pkg/v2/env"
"github.com/minio/pkg/v2/wildcard"
"github.com/minio/pkg/v2/workers"
"github.com/minio/pkg/v3/env"
"github.com/minio/pkg/v3/wildcard"
"github.com/minio/pkg/v3/workers"
"gopkg.in/yaml.v3"
)
@@ -432,7 +432,7 @@ func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo
})
if err != nil {
stopFn(exp, err)
batchLogIf(ctx, fmt.Errorf("Failed to expire %s/%s versionID=%s due to %v (attempts=%d)", toExpire[i].Bucket, toExpire[i].Name, toExpire[i].VersionID, err, attempts))
batchLogIf(ctx, fmt.Errorf("Failed to expire %s/%s due to %v (attempts=%d)", exp.Bucket, exp.Name, err, attempts))
} else {
stopFn(exp, err)
success = true
@@ -464,25 +464,25 @@ func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo
copy(toDelCopy, toDel)
var failed int
errs := r.Expire(ctx, api, vc, toDel)
// reslice toDel in preparation for next retry
// attempt
// reslice toDel in preparation for next retry attempt
toDel = toDel[:0]
for i, err := range errs {
if err != nil {
stopFn(toDelCopy[i], err)
batchLogIf(ctx, fmt.Errorf("Failed to expire %s/%s versionID=%s due to %v (attempts=%d)", ri.Bucket, toDelCopy[i].ObjectName, toDelCopy[i].VersionID, err, attempts))
batchLogIf(ctx, fmt.Errorf("Failed to expire %s/%s versionID=%s due to %v (attempts=%d)", ri.Bucket, toDelCopy[i].ObjectName, toDelCopy[i].VersionID,
err, attempts))
failed++
if attempts == retryAttempts { // all retry attempts failed, record failure
if oi, ok := oiCache.Get(toDelCopy[i]); ok {
ri.trackCurrentBucketObject(r.Bucket, *oi, false)
}
} else {
if oi, ok := oiCache.Get(toDelCopy[i]); ok {
ri.trackCurrentBucketObject(r.Bucket, *oi, false, attempts)
}
if attempts != retryAttempts {
// retry
toDel = append(toDel, toDelCopy[i])
}
} else {
stopFn(toDelCopy[i], nil)
if oi, ok := oiCache.Get(toDelCopy[i]); ok {
ri.trackCurrentBucketObject(r.Bucket, *oi, true)
ri.trackCurrentBucketObject(r.Bucket, *oi, true, attempts)
}
}
}
@@ -537,7 +537,7 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
ctx, cancel := context.WithCancel(ctx)
defer cancel()
results := make(chan ObjectInfo, workerSize)
results := make(chan itemOrErr[ObjectInfo], workerSize)
if err := api.Walk(ctx, r.Bucket, r.Prefix, results, WalkOptions{
Marker: lastObject,
LatestOnly: false, // we need to visit all versions of the object to implement purge: retainVersions
@@ -584,11 +584,18 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
versionsCount int
toDel []expireObjInfo
)
failed := true
for result := range results {
if result.Err != nil {
failed = true
batchLogIf(ctx, result.Err)
continue
}
// Apply filter to find the matching rule to apply expiry
// actions accordingly.
// nolint:gocritic
if result.IsLatest {
if result.Item.IsLatest {
// send down filtered entries to be deleted using
// DeleteObjects method
if len(toDel) > 10 { // batch up to 10 objects/versions to be expired simultaneously.
@@ -609,7 +616,7 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
var match BatchJobExpireFilter
var found bool
for _, rule := range r.Rules {
if rule.Matches(result, now) {
if rule.Matches(result.Item, now) {
match = rule
found = true
break
@@ -619,18 +626,18 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
continue
}
prevObj = result
prevObj = result.Item
matchedFilter = match
versionsCount = 1
// Include the latest version
if matchedFilter.Purge.RetainVersions == 0 {
toDel = append(toDel, expireObjInfo{
ObjectInfo: result,
ObjectInfo: result.Item,
ExpireAll: true,
})
continue
}
} else if prevObj.Name == result.Name {
} else if prevObj.Name == result.Item.Name {
if matchedFilter.Purge.RetainVersions == 0 {
continue // including latest version in toDel suffices, skipping other versions
}
@@ -643,7 +650,7 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
continue // retain versions
}
toDel = append(toDel, expireObjInfo{
ObjectInfo: result,
ObjectInfo: result.Item,
})
}
// Send any remaining objects downstream
@@ -658,8 +665,8 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
<-expireDoneCh // waits for the expire goroutine to complete
wk.Wait() // waits for all expire workers to retire
ri.Complete = ri.ObjectsFailed == 0
ri.Failed = ri.ObjectsFailed > 0
ri.Complete = !failed && ri.ObjectsFailed == 0
ri.Failed = failed || ri.ObjectsFailed > 0
globalBatchJobsMetrics.save(job.ID, ri)
// Close the saverQuitCh - this also triggers saving in-memory state

View File

@@ -48,10 +48,10 @@ import (
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/ioutil"
xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/pkg/v2/console"
"github.com/minio/pkg/v2/env"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v2/workers"
"github.com/minio/pkg/v3/console"
"github.com/minio/pkg/v3/env"
"github.com/minio/pkg/v3/policy"
"github.com/minio/pkg/v3/workers"
"gopkg.in/yaml.v3"
)
@@ -447,7 +447,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
} else {
stopFn(oi, nil)
}
ri.trackCurrentBucketObject(r.Target.Bucket, oi, success)
ri.trackCurrentBucketObject(r.Target.Bucket, oi, success, attempts)
globalBatchJobsMetrics.save(job.ID, ri)
// persist in-memory state to disk after every 10secs.
batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
@@ -690,6 +690,7 @@ type batchJobInfo struct {
StartTime time.Time `json:"startTime" msg:"st"`
LastUpdate time.Time `json:"lastUpdate" msg:"lu"`
RetryAttempts int `json:"retryAttempts" msg:"ra"`
Attempts int `json:"attempts" msg:"at"`
Complete bool `json:"complete" msg:"cmp"`
Failed bool `json:"failed" msg:"fld"`
@@ -833,13 +834,15 @@ func (ri *batchJobInfo) clone() *batchJobInfo {
ObjectsFailed: ri.ObjectsFailed,
BytesTransferred: ri.BytesTransferred,
BytesFailed: ri.BytesFailed,
Attempts: ri.Attempts,
}
}
func (ri *batchJobInfo) countItem(size int64, dmarker, success bool) {
func (ri *batchJobInfo) countItem(size int64, dmarker, success bool, attempt int) {
if ri == nil {
return
}
ri.Attempts++
if success {
if dmarker {
ri.DeleteMarkers++
@@ -847,7 +850,19 @@ func (ri *batchJobInfo) countItem(size int64, dmarker, success bool) {
ri.Objects++
ri.BytesTransferred += size
}
if attempt > 1 {
if dmarker {
ri.DeleteMarkersFailed--
} else {
ri.ObjectsFailed--
ri.BytesFailed += size
}
}
} else {
if attempt > 1 {
// Only count first attempt
return
}
if dmarker {
ri.DeleteMarkersFailed++
} else {
@@ -921,7 +936,7 @@ func (ri *batchJobInfo) trackMultipleObjectVersions(bucket string, info ObjectIn
}
}
func (ri *batchJobInfo) trackCurrentBucketObject(bucket string, info ObjectInfo, success bool) {
func (ri *batchJobInfo) trackCurrentBucketObject(bucket string, info ObjectInfo, success bool, attempt int) {
if ri == nil {
return
}
@@ -931,7 +946,7 @@ func (ri *batchJobInfo) trackCurrentBucketObject(bucket string, info ObjectInfo,
ri.Bucket = bucket
ri.Object = info.Name
ri.countItem(info.Size, info.DeleteMarker, success)
ri.countItem(info.Size, info.DeleteMarker, success, attempt)
}
func (ri *batchJobInfo) trackCurrentBucketBatch(bucket string, batch []ObjectInfo) {
@@ -945,7 +960,7 @@ func (ri *batchJobInfo) trackCurrentBucketBatch(bucket string, batch []ObjectInf
ri.Bucket = bucket
for i := range batch {
ri.Object = batch[i].Name
ri.countItem(batch[i].Size, batch[i].DeleteMarker, true)
ri.countItem(batch[i].Size, batch[i].DeleteMarker, true, 1)
}
}
@@ -1057,8 +1072,8 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
c.SetAppInfo("minio-"+batchJobPrefix, r.APIVersion+" "+job.ID)
var (
walkCh = make(chan ObjectInfo, 100)
slowCh = make(chan ObjectInfo, 100)
walkCh = make(chan itemOrErr[ObjectInfo], 100)
slowCh = make(chan itemOrErr[ObjectInfo], 100)
)
if !*r.Source.Snowball.Disable && r.Source.Type.isMinio() && r.Target.Type.isMinio() {
@@ -1084,7 +1099,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
if err := r.writeAsArchive(ctx, api, cl, batch); err != nil {
batchLogIf(ctx, err)
for _, b := range batch {
slowCh <- b
slowCh <- itemOrErr[ObjectInfo]{Item: b}
}
} else {
ri.trackCurrentBucketBatch(r.Source.Bucket, batch)
@@ -1095,12 +1110,12 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
}
}
for obj := range walkCh {
if obj.DeleteMarker || !obj.VersionPurgeStatus.Empty() || obj.Size >= int64(smallerThan) {
if obj.Item.DeleteMarker || !obj.Item.VersionPurgeStatus.Empty() || obj.Item.Size >= int64(smallerThan) {
slowCh <- obj
continue
}
batch = append(batch, obj)
batch = append(batch, obj.Item)
if len(batch) < *r.Source.Snowball.Batch {
continue
@@ -1153,8 +1168,13 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
prevObj := ""
skipReplicate := false
for result := range slowCh {
result := result
for res := range slowCh {
if res.Err != nil {
ri.Failed = true
batchLogIf(ctx, res.Err)
continue
}
result := res.Item
if result.Name != prevObj {
prevObj = result.Name
skipReplicate = result.DeleteMarker && s3Type
@@ -1183,7 +1203,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
} else {
stopFn(result, nil)
}
ri.trackCurrentBucketObject(r.Source.Bucket, result, success)
ri.trackCurrentBucketObject(r.Source.Bucket, result, success, attempts)
globalBatchJobsMetrics.save(job.ID, ri)
// persist in-memory state to disk after every 10secs.
batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
@@ -1231,6 +1251,7 @@ type batchReplicationJobError struct {
Code string
Description string
HTTPStatusCode int
ObjectSize int64
}
func (e batchReplicationJobError) Error() string {
@@ -1247,9 +1268,18 @@ func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest,
return errInvalidArgument
}
if r.Source.Bucket == "" {
if r.Source.Endpoint != "" && r.Target.Endpoint != "" {
return errInvalidArgument
}
if r.Source.Creds.Empty() && r.Target.Creds.Empty() {
return errInvalidArgument
}
if r.Source.Bucket == "" || r.Target.Bucket == "" {
return errInvalidArgument
}
var isRemoteToLocal bool
localBkt := r.Source.Bucket
if r.Source.Endpoint != "" {
@@ -1274,9 +1304,6 @@ func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest,
if err := r.Source.Snowball.Validate(); err != nil {
return err
}
if r.Source.Creds.Empty() && r.Target.Creds.Empty() {
return errInvalidArgument
}
if !r.Source.Creds.Empty() {
if err := r.Source.Creds.Validate(); err != nil {
@@ -1298,9 +1325,6 @@ func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest,
if r.Target.Endpoint != "" && !r.Target.Type.isMinio() && !r.Target.ValidPath() {
return errInvalidArgument
}
if r.Target.Bucket == "" {
return errInvalidArgument
}
if !r.Target.Creds.Empty() {
if err := r.Target.Creds.Validate(); err != nil {
@@ -1308,10 +1332,6 @@ func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest,
}
}
if r.Source.Creds.Empty() && r.Target.Creds.Empty() {
return errInvalidArgument
}
if err := r.Target.Type.Validate(); err != nil {
return err
}
@@ -1456,7 +1476,7 @@ func (j *BatchJobRequest) load(ctx context.Context, api ObjectLayer, name string
func batchReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (putOpts miniogo.PutObjectOptions, err error) {
// TODO: support custom storage class for remote replication
putOpts, err = putReplicationOpts(ctx, "", objInfo)
putOpts, err = putReplicationOpts(ctx, "", objInfo, 0)
if err != nil {
return putOpts, err
}
@@ -1484,7 +1504,7 @@ func (a adminAPIHandlers) ListBatchJobs(w http.ResponseWriter, r *http.Request)
jobType = string(madmin.BatchJobReplicate)
}
resultCh := make(chan ObjectInfo)
resultCh := make(chan itemOrErr[ObjectInfo])
ctx, cancel := context.WithCancel(ctx)
defer cancel()
@@ -1496,8 +1516,12 @@ func (a adminAPIHandlers) ListBatchJobs(w http.ResponseWriter, r *http.Request)
listResult := madmin.ListBatchJobsResult{}
for result := range resultCh {
if result.Err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, result.Err), r.URL)
return
}
req := &BatchJobRequest{}
if err := req.load(ctx, objectAPI, result.Name); err != nil {
if err := req.load(ctx, objectAPI, result.Item.Name); err != nil {
if !errors.Is(err, errNoSuchJob) {
batchLogIf(ctx, err)
}
@@ -1609,7 +1633,7 @@ func (a adminAPIHandlers) StartBatchJob(w http.ResponseWriter, r *http.Request)
return
}
job.ID = fmt.Sprintf("%s:%d", shortuuid.New(), GetProxyEndpointLocalIndex(globalProxyEndpoints))
job.ID = fmt.Sprintf("%s%s%d", shortuuid.New(), getKeySeparator(), GetProxyEndpointLocalIndex(globalProxyEndpoints))
job.User = user
job.Started = time.Now()
@@ -1702,7 +1726,7 @@ func newBatchJobPool(ctx context.Context, o ObjectLayer, workers int) *BatchJobP
}
func (j *BatchJobPool) resume() {
results := make(chan ObjectInfo, 100)
results := make(chan itemOrErr[ObjectInfo], 100)
ctx, cancel := context.WithCancel(j.ctx)
defer cancel()
if err := j.objLayer.Walk(ctx, minioMetaBucket, batchJobPrefix, results, WalkOptions{}); err != nil {
@@ -1710,12 +1734,16 @@ func (j *BatchJobPool) resume() {
return
}
for result := range results {
if result.Err != nil {
batchLogIf(j.ctx, result.Err)
continue
}
// ignore batch-replicate.bin and batch-rotate.bin entries
if strings.HasSuffix(result.Name, slashSeparator) {
if strings.HasSuffix(result.Item.Name, slashSeparator) {
continue
}
req := &BatchJobRequest{}
if err := req.load(ctx, j.objLayer, result.Name); err != nil {
if err := req.load(ctx, j.objLayer, result.Item.Name); err != nil {
batchLogIf(ctx, err)
continue
}

View File

@@ -419,6 +419,12 @@ func (z *batchJobInfo) DecodeMsg(dc *msgp.Reader) (err error) {
err = msgp.WrapError(err, "RetryAttempts")
return
}
case "at":
z.Attempts, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "Attempts")
return
}
case "cmp":
z.Complete, err = dc.ReadBool()
if err != nil {
@@ -492,9 +498,9 @@ func (z *batchJobInfo) DecodeMsg(dc *msgp.Reader) (err error) {
// EncodeMsg implements msgp.Encodable
func (z *batchJobInfo) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 16
// map header, size 17
// write "v"
err = en.Append(0xde, 0x0, 0x10, 0xa1, 0x76)
err = en.Append(0xde, 0x0, 0x11, 0xa1, 0x76)
if err != nil {
return
}
@@ -553,6 +559,16 @@ func (z *batchJobInfo) EncodeMsg(en *msgp.Writer) (err error) {
err = msgp.WrapError(err, "RetryAttempts")
return
}
// write "at"
err = en.Append(0xa2, 0x61, 0x74)
if err != nil {
return
}
err = en.WriteInt(z.Attempts)
if err != nil {
err = msgp.WrapError(err, "Attempts")
return
}
// write "cmp"
err = en.Append(0xa3, 0x63, 0x6d, 0x70)
if err != nil {
@@ -659,9 +675,9 @@ func (z *batchJobInfo) EncodeMsg(en *msgp.Writer) (err error) {
// MarshalMsg implements msgp.Marshaler
func (z *batchJobInfo) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 16
// map header, size 17
// string "v"
o = append(o, 0xde, 0x0, 0x10, 0xa1, 0x76)
o = append(o, 0xde, 0x0, 0x11, 0xa1, 0x76)
o = msgp.AppendInt(o, z.Version)
// string "jid"
o = append(o, 0xa3, 0x6a, 0x69, 0x64)
@@ -678,6 +694,9 @@ func (z *batchJobInfo) MarshalMsg(b []byte) (o []byte, err error) {
// string "ra"
o = append(o, 0xa2, 0x72, 0x61)
o = msgp.AppendInt(o, z.RetryAttempts)
// string "at"
o = append(o, 0xa2, 0x61, 0x74)
o = msgp.AppendInt(o, z.Attempts)
// string "cmp"
o = append(o, 0xa3, 0x63, 0x6d, 0x70)
o = msgp.AppendBool(o, z.Complete)
@@ -765,6 +784,12 @@ func (z *batchJobInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "RetryAttempts")
return
}
case "at":
z.Attempts, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Attempts")
return
}
case "cmp":
z.Complete, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
@@ -839,6 +864,6 @@ func (z *batchJobInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *batchJobInfo) Msgsize() (s int) {
s = 3 + 2 + msgp.IntSize + 4 + msgp.StringPrefixSize + len(z.JobID) + 3 + msgp.StringPrefixSize + len(z.JobType) + 3 + msgp.TimeSize + 3 + msgp.TimeSize + 3 + msgp.IntSize + 4 + msgp.BoolSize + 4 + msgp.BoolSize + 5 + msgp.StringPrefixSize + len(z.Bucket) + 5 + msgp.StringPrefixSize + len(z.Object) + 3 + msgp.Int64Size + 3 + msgp.Int64Size + 4 + msgp.Int64Size + 4 + msgp.Int64Size + 3 + msgp.Int64Size + 3 + msgp.Int64Size
s = 3 + 2 + msgp.IntSize + 4 + msgp.StringPrefixSize + len(z.JobID) + 3 + msgp.StringPrefixSize + len(z.JobType) + 3 + msgp.TimeSize + 3 + msgp.TimeSize + 3 + msgp.IntSize + 3 + msgp.IntSize + 4 + msgp.BoolSize + 4 + msgp.BoolSize + 5 + msgp.StringPrefixSize + len(z.Bucket) + 5 + msgp.StringPrefixSize + len(z.Object) + 3 + msgp.Int64Size + 3 + msgp.Int64Size + 4 + msgp.Int64Size + 4 + msgp.Int64Size + 3 + msgp.Int64Size + 3 + msgp.Int64Size
return
}

View File

@@ -23,7 +23,7 @@ import (
"time"
"github.com/dustin/go-humanize"
"github.com/minio/pkg/v2/wildcard"
"github.com/minio/pkg/v3/wildcard"
"gopkg.in/yaml.v3"
)

View File

@@ -33,8 +33,8 @@ import (
"github.com/minio/minio/internal/crypto"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/kms"
"github.com/minio/pkg/v2/env"
"github.com/minio/pkg/v2/workers"
"github.com/minio/pkg/v3/env"
"github.com/minio/pkg/v3/workers"
)
// keyrotate:
@@ -95,6 +95,7 @@ func (e BatchJobKeyRotateEncryption) Validate() error {
if e.Type == ssekms && spaces {
return crypto.ErrInvalidEncryptionKeyID
}
if e.Type == ssekms && GlobalKMS != nil {
ctx := kms.Context{}
if e.Context != "" {
@@ -113,7 +114,7 @@ func (e BatchJobKeyRotateEncryption) Validate() error {
e.kmsContext[k] = v
}
ctx["MinIO batch API"] = "batchrotate" // Context for a test key operation
if _, err := GlobalKMS.GenerateKey(GlobalContext, e.Key, ctx); err != nil {
if _, err := GlobalKMS.GenerateKey(GlobalContext, &kms.GenerateKeyRequest{Name: e.Key, AssociatedData: ctx}); err != nil {
return err
}
}
@@ -356,7 +357,7 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
retryAttempts := ri.RetryAttempts
ctx, cancel := context.WithCancel(ctx)
results := make(chan ObjectInfo, 100)
results := make(chan itemOrErr[ObjectInfo], 100)
if err := api.Walk(ctx, r.Bucket, r.Prefix, results, WalkOptions{
Marker: lastObject,
Filter: selectObj,
@@ -365,9 +366,14 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
// Do not need to retry if we can't list objects on source.
return err
}
for result := range results {
result := result
failed := false
for res := range results {
if res.Err != nil {
failed = true
batchLogIf(ctx, res.Err)
break
}
result := res.Item
sseKMS := crypto.S3KMS.IsEncrypted(result.UserDefined)
sseS3 := crypto.S3.IsEncrypted(result.UserDefined)
if !sseKMS && !sseS3 { // neither sse-s3 nor sse-kms disallowed
@@ -377,7 +383,6 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
go func() {
defer wk.Give()
for attempts := 1; attempts <= retryAttempts; attempts++ {
attempts := attempts
stopFn := globalBatchJobsMetrics.trace(batchJobMetricKeyRotation, job.ID, attempts)
success := true
if err := r.KeyRotate(ctx, api, result); err != nil {
@@ -387,8 +392,7 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
} else {
stopFn(result, nil)
}
ri.trackCurrentBucketObject(r.Bucket, result, success)
ri.RetryAttempts = attempts
ri.trackCurrentBucketObject(r.Bucket, result, success, attempts)
globalBatchJobsMetrics.save(job.ID, ri)
// persist in-memory state to disk after every 10secs.
batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
@@ -407,8 +411,8 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
}
wk.Wait()
ri.Complete = ri.ObjectsFailed == 0
ri.Failed = ri.ObjectsFailed > 0
ri.Complete = !failed && ri.ObjectsFailed == 0
ri.Failed = failed || ri.ObjectsFailed > 0
globalBatchJobsMetrics.save(job.ID, ri)
// persist in-memory state to disk.
batchLogIf(ctx, ri.updateAfter(ctx, api, 0, job))
@@ -475,8 +479,5 @@ func (r *BatchJobKeyRotateV1) Validate(ctx context.Context, job BatchJobRequest,
}
}
if err := r.Flags.Retry.Validate(); err != nil {
return err
}
return nil
return r.Flags.Retry.Validate()
}

View File

@@ -26,15 +26,17 @@ import (
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/ringbuffer"
)
// Calculates bitrot in chunks and writes the hash into the stream.
type streamingBitrotWriter struct {
iow io.WriteCloser
closeWithErr func(err error) error
closeWithErr func(err error)
h hash.Hash
shardSize int64
canClose *sync.WaitGroup
byteBuf []byte
}
func (b *streamingBitrotWriter) Write(p []byte) (int, error) {
@@ -62,7 +64,10 @@ func (b *streamingBitrotWriter) Write(p []byte) (int, error) {
}
func (b *streamingBitrotWriter) Close() error {
// Close the underlying writer.
// This will also flush the ring buffer if used.
err := b.iow.Close()
// Wait for all data to be written before returning else it causes race conditions.
// Race condition is because of io.PipeWriter implementation. i.e consider the following
// sequent of operations:
@@ -73,29 +78,34 @@ func (b *streamingBitrotWriter) Close() error {
if b.canClose != nil {
b.canClose.Wait()
}
// Recycle the buffer.
if b.byteBuf != nil {
globalBytePoolCap.Load().Put(b.byteBuf)
b.byteBuf = nil
}
return err
}
// newStreamingBitrotWriterBuffer returns streaming bitrot writer implementation.
// The output is written to the supplied writer w.
func newStreamingBitrotWriterBuffer(w io.Writer, algo BitrotAlgorithm, shardSize int64) io.Writer {
return &streamingBitrotWriter{iow: ioutil.NopCloser(w), h: algo.New(), shardSize: shardSize, canClose: nil, closeWithErr: func(err error) error {
// Similar to CloseWithError on pipes we always return nil.
return nil
}}
return &streamingBitrotWriter{iow: ioutil.NopCloser(w), h: algo.New(), shardSize: shardSize, canClose: nil, closeWithErr: func(err error) {}}
}
// Returns streaming bitrot writer implementation.
func newStreamingBitrotWriter(disk StorageAPI, origvolume, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.Writer {
r, w := io.Pipe()
h := algo.New()
buf := globalBytePoolCap.Load().Get()
rb := ringbuffer.NewBuffer(buf[:cap(buf)]).SetBlocking(true)
bw := &streamingBitrotWriter{
iow: ioutil.NewDeadlineWriter(w, globalDriveConfig.GetMaxTimeout()),
closeWithErr: w.CloseWithError,
iow: ioutil.NewDeadlineWriter(rb.WriteCloser(), globalDriveConfig.GetMaxTimeout()),
closeWithErr: rb.CloseWithError,
h: h,
shardSize: shardSize,
canClose: &sync.WaitGroup{},
byteBuf: buf,
}
bw.canClose.Add(1)
go func() {
@@ -106,7 +116,7 @@ func newStreamingBitrotWriter(disk StorageAPI, origvolume, volume, filePath stri
bitrotSumsTotalSize := ceilFrac(length, shardSize) * int64(h.Size()) // Size used for storing bitrot checksums.
totalFileSize = bitrotSumsTotalSize + length
}
r.CloseWithError(disk.CreateFile(context.TODO(), origvolume, volume, filePath, totalFileSize, r))
rb.CloseWithError(disk.CreateFile(context.TODO(), origvolume, volume, filePath, totalFileSize, rb))
}()
return bw
}

View File

@@ -30,7 +30,7 @@ import (
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio/internal/grid"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/env"
"github.com/minio/pkg/v3/env"
)
// To abstract a node over network.

View File

@@ -30,7 +30,7 @@ import (
"github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
)
const (
@@ -85,7 +85,7 @@ func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r
kmsKey := encConfig.KeyID()
if kmsKey != "" {
kmsContext := kms.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation
_, err := GlobalKMS.GenerateKey(ctx, kmsKey, kmsContext)
_, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{Name: kmsKey, AssociatedData: kmsContext})
if err != nil {
if errors.Is(err, kes.ErrKeyNotFound) {
writeErrorResponse(ctx, w, toAPIError(ctx, errKMSKeyNotFound), r.URL)

View File

@@ -61,8 +61,8 @@ import (
"github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v2/sync/errgroup"
"github.com/minio/pkg/v3/policy"
"github.com/minio/pkg/v3/sync/errgroup"
)
const (
@@ -72,6 +72,8 @@ const (
xMinIOErrCodeHeader = "x-minio-error-code"
xMinIOErrDescHeader = "x-minio-error-desc"
postPolicyBucketTagging = "tagging"
)
// Check if there are buckets on server without corresponding entry in etcd backend and
@@ -227,7 +229,7 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
// Generate response.
encodedSuccessResponse := encodeResponse(LocationResponse{})
// Get current region.
region := globalSite.Region
region := globalSite.Region()
if region != globalMinioDefaultRegion {
encodedSuccessResponse = encodeResponse(LocationResponse{
Location: region,
@@ -1415,6 +1417,19 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
return
}
if formValues.Get(postPolicyBucketTagging) != "" {
tags, err := tags.ParseObjectXML(strings.NewReader(formValues.Get(postPolicyBucketTagging)))
if err != nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL)
return
}
tagsStr := tags.String()
opts.UserDefined[xhttp.AmzObjectTagging] = tagsStr
} else {
// avoid user set an invalid tag using `X-Amz-Tagging`
delete(opts.UserDefined, xhttp.AmzObjectTagging)
}
objInfo, err := objectAPI.PutObject(ctx, bucket, object, pReader, opts)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
@@ -1661,7 +1676,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
return
}
case rcfg.HasActiveRules("", true):
case rcfg != nil && rcfg.HasActiveRules("", true):
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
return
}

View File

@@ -32,7 +32,7 @@ import (
// Wrapper for calling RemoveBucket HTTP handler tests for both Erasure multiple disks and single node setup.
func TestRemoveBucketHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testRemoveBucketHandler, []string{"RemoveBucket"})
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testRemoveBucketHandler, endpoints: []string{"RemoveBucket"}})
}
func testRemoveBucketHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
@@ -78,7 +78,7 @@ func testRemoveBucketHandler(obj ObjectLayer, instanceType, bucketName string, a
// Wrapper for calling GetBucketPolicy HTTP handler tests for both Erasure multiple disks and single node setup.
func TestGetBucketLocationHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testGetBucketLocationHandler, []string{"GetBucketLocation"})
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testGetBucketLocationHandler, endpoints: []string{"GetBucketLocation"}})
}
func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
@@ -220,7 +220,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
// Wrapper for calling HeadBucket HTTP handler tests for both Erasure multiple disks and single node setup.
func TestHeadBucketHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testHeadBucketHandler, []string{"HeadBucket"})
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testHeadBucketHandler, endpoints: []string{"HeadBucket"}})
}
func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
@@ -322,7 +322,7 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
// Wrapper for calling TestListMultipartUploadsHandler tests for both Erasure multiple disks and single node setup.
func TestListMultipartUploadsHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testListMultipartUploadsHandler, []string{"ListMultipartUploads"})
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testListMultipartUploadsHandler, endpoints: []string{"ListMultipartUploads"}})
}
// testListMultipartUploadsHandler - Tests validate listing of multipart uploads.
@@ -558,7 +558,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
// Wrapper for calling TestListBucketsHandler tests for both Erasure multiple disks and single node setup.
func TestListBucketsHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testListBucketsHandler, []string{"ListBuckets"})
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testListBucketsHandler, endpoints: []string{"ListBuckets"}})
}
// testListBucketsHandler - Tests validate listing of buckets.
@@ -649,7 +649,7 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
// Wrapper for calling DeleteMultipleObjects HTTP handler tests for both Erasure multiple disks and single node setup.
func TestAPIDeleteMultipleObjectsHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testAPIDeleteMultipleObjectsHandler, []string{"DeleteMultipleObjects", "PutBucketPolicy"})
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testAPIDeleteMultipleObjectsHandler, endpoints: []string{"DeleteMultipleObjects", "PutBucketPolicy"}})
}
func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,

View File

@@ -28,7 +28,7 @@ import (
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
)
const (
@@ -64,7 +64,8 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
}
// Check if bucket exists.
if _, err := objAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
rcfg, err := globalBucketObjectLockSys.Get(bucket)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
@@ -76,7 +77,7 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
}
// Validate the received bucket policy document
if err = bucketLifecycle.Validate(); err != nil {
if err = bucketLifecycle.Validate(rcfg); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}

View File

@@ -29,7 +29,7 @@ import (
// Test S3 Bucket lifecycle APIs with wrong credentials
func TestBucketLifecycleWrongCredentials(t *testing.T) {
ExecObjectLayerAPITest(t, testBucketLifecycleHandlersWrongCredentials, []string{"GetBucketLifecycle", "PutBucketLifecycle", "DeleteBucketLifecycle"})
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testBucketLifecycleHandlersWrongCredentials, endpoints: []string{"GetBucketLifecycle", "PutBucketLifecycle", "DeleteBucketLifecycle"}})
}
// Test for authentication
@@ -145,7 +145,7 @@ func testBucketLifecycleHandlersWrongCredentials(obj ObjectLayer, instanceType,
// Test S3 Bucket lifecycle APIs
func TestBucketLifecycle(t *testing.T) {
ExecObjectLayerAPITest(t, testBucketLifecycleHandlers, []string{"GetBucketLifecycle", "PutBucketLifecycle", "DeleteBucketLifecycle"})
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testBucketLifecycleHandlers, endpoints: []string{"GetBucketLifecycle", "PutBucketLifecycle", "DeleteBucketLifecycle"}})
}
// Simple tests of bucket lifecycle: PUT, GET, DELETE.

View File

@@ -40,7 +40,7 @@ import (
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/minio/internal/s3select"
xnet "github.com/minio/pkg/v2/net"
xnet "github.com/minio/pkg/v3/net"
"github.com/zeebo/xxh3"
)
@@ -72,6 +72,7 @@ func NewLifecycleSys() *LifecycleSys {
}
func ilmTrace(startTime time.Time, duration time.Duration, oi ObjectInfo, event string) madmin.TraceInfo {
sz, _ := oi.GetActualSize()
return madmin.TraceInfo{
TraceType: madmin.TraceILM,
Time: startTime,
@@ -79,6 +80,7 @@ func ilmTrace(startTime time.Time, duration time.Duration, oi ObjectInfo, event
FuncName: event,
Duration: duration,
Path: pathJoin(oi.Bucket, oi.Name),
Bytes: sz,
Error: "",
Message: getSource(4),
Custom: map[string]string{"version-id": oi.VersionID},
@@ -277,6 +279,10 @@ func (es *expiryState) getWorkerCh(h uint64) chan<- expiryOp {
}
func (es *expiryState) ResizeWorkers(n int) {
if n == 0 {
n = 100
}
// Lock to avoid multiple resizes to happen at the same time.
es.mu.Lock()
defer es.mu.Unlock()
@@ -538,6 +544,10 @@ func (t *transitionState) UpdateWorkers(n int) {
}
func (t *transitionState) updateWorkers(n int) {
if n == 0 {
n = 100
}
for t.numWorkers < n {
go t.worker(t.objAPI)
t.numWorkers++
@@ -573,6 +583,10 @@ func enqueueTransitionImmediate(obj ObjectInfo, src lcEventSrc) {
if lc, err := globalLifecycleSys.Get(obj.Bucket); err == nil {
switch event := lc.Eval(obj.ToLifecycleOpts()); event.Action {
case lifecycle.TransitionAction, lifecycle.TransitionVersionAction:
if obj.DeleteMarker || obj.IsDir {
// nothing to transition
return
}
globalTransitionState.queueTransitionTask(obj, event, src)
}
}
@@ -663,11 +677,12 @@ func genTransitionObjName(bucket string) (string, error) {
// is moved to the transition tier. Note that in the case of encrypted objects, entire encrypted stream is moved
// to the transition tier without decrypting or re-encrypting.
func transitionObject(ctx context.Context, objectAPI ObjectLayer, oi ObjectInfo, lae lcAuditEvent) (err error) {
timeILM := globalScannerMetrics.timeILM(lae.Action)
defer func() {
if err != nil {
return
}
globalScannerMetrics.timeILM(lae.Action)(1)
timeILM(1)
}()
opts := ObjectOptions{
@@ -721,12 +736,12 @@ func auditTierActions(ctx context.Context, tier string, bytes int64) func(err er
// getTransitionedObjectReader returns a reader from the transitioned tier.
func getTransitionedObjectReader(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, oi ObjectInfo, opts ObjectOptions) (gr *GetObjectReader, err error) {
tgtClient, err := globalTierConfigMgr.getDriver(oi.TransitionedObject.Tier)
tgtClient, err := globalTierConfigMgr.getDriver(ctx, oi.TransitionedObject.Tier)
if err != nil {
return nil, fmt.Errorf("transition storage class not configured")
return nil, fmt.Errorf("transition storage class not configured: %w", err)
}
fn, off, length, err := NewGetObjectReader(rs, oi, opts)
fn, off, length, err := NewGetObjectReader(rs, oi, opts, h)
if err != nil {
return nil, ErrorRespToObjectError(err, bucket, object)
}

View File

@@ -26,7 +26,7 @@ import (
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
)
// Validate all the ListObjects query arguments, returns an APIErrorCode
@@ -124,7 +124,7 @@ func (api objectAPIHandlers) listObjectVersionsHandler(w http.ResponseWriter, r
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
response := generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType, maxkeys, listObjectVersionsInfo, checkObjMeta)
response := generateListVersionsResponse(ctx, bucket, prefix, marker, versionIDMarker, delimiter, encodingType, maxkeys, listObjectVersionsInfo, checkObjMeta)
// Write success response.
writeSuccessResponseXML(w, encodeResponseList(response))
@@ -202,7 +202,7 @@ func (api objectAPIHandlers) listObjectsV2Handler(ctx context.Context, w http.Re
if r.Header.Get(xMinIOExtract) == "true" && strings.Contains(prefix, archivePattern) {
// Initiate a list objects operation inside a zip file based in the input params
listObjectsV2Info, err = listObjectsV2InArchive(ctx, objectAPI, bucket, prefix, token, delimiter, maxKeys, fetchOwner, startAfter)
listObjectsV2Info, err = listObjectsV2InArchive(ctx, objectAPI, bucket, prefix, token, delimiter, maxKeys, startAfter, r.Header)
} else {
// Initiate a list objects operation based on the input params.
// On success would return back ListObjectsInfo object to be
@@ -219,7 +219,7 @@ func (api objectAPIHandlers) listObjectsV2Handler(ctx context.Context, w http.Re
return
}
response := generateListObjectsV2Response(bucket, prefix, token, listObjectsV2Info.NextContinuationToken, startAfter,
response := generateListObjectsV2Response(ctx, bucket, prefix, token, listObjectsV2Info.NextContinuationToken, startAfter,
delimiter, encodingType, fetchOwner, listObjectsV2Info.IsTruncated,
maxKeys, listObjectsV2Info.Objects, listObjectsV2Info.Prefixes, checkObjMeta)
@@ -231,7 +231,7 @@ func parseRequestToken(token string) (subToken string, nodeIndex int) {
if token == "" {
return token, -1
}
i := strings.Index(token, ":")
i := strings.Index(token, getKeySeparator())
if i < 0 {
return token, -1
}
@@ -318,7 +318,7 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
return
}
response := generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType, maxKeys, listObjectsInfo)
response := generateListObjectsV1Response(ctx, bucket, prefix, marker, delimiter, encodingType, maxKeys, listObjectsInfo)
// Write success response.
writeSuccessResponseXML(w, encodeResponseList(response))

View File

@@ -37,8 +37,8 @@ import (
"github.com/minio/minio/internal/event"
"github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v2/sync/errgroup"
"github.com/minio/pkg/v3/policy"
"github.com/minio/pkg/v3/sync/errgroup"
)
// BucketMetadataSys captures all bucket metadata for a given cluster.
@@ -46,6 +46,7 @@ type BucketMetadataSys struct {
objAPI ObjectLayer
sync.RWMutex
initialized bool
metadataMap map[string]BucketMetadata
}
@@ -433,6 +434,8 @@ func (sys *BucketMetadataSys) GetConfigFromDisk(ctx context.Context, bucket stri
return loadBucketMetadata(ctx, objAPI, bucket)
}
var errBucketMetadataNotInitialized = errors.New("bucket metadata not initialized yet")
// GetConfig returns a specific configuration from the bucket metadata.
// The returned object may not be modified.
// reloaded will be true if metadata refreshed from disk
@@ -454,6 +457,10 @@ func (sys *BucketMetadataSys) GetConfig(ctx context.Context, bucket string) (met
}
meta, err = loadBucketMetadata(ctx, objAPI, bucket)
if err != nil {
if !sys.Initialized() {
// bucket metadata not yet initialized
return newBucketMetadata(bucket), reloaded, errBucketMetadataNotInitialized
}
return meta, reloaded, err
}
sys.Lock()
@@ -498,9 +505,10 @@ func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []Buck
}
errs := g.Wait()
for _, err := range errs {
for index, err := range errs {
if err != nil {
internalLogIf(ctx, err, logger.WarningKind)
internalLogOnceIf(ctx, fmt.Errorf("Unable to load bucket metadata, will be retried: %w", err),
"load-bucket-metadata-"+buckets[index].Name, logger.WarningKind)
}
}
@@ -583,6 +591,14 @@ func (sys *BucketMetadataSys) refreshBucketsMetadataLoop(ctx context.Context, fa
}
}
// Initialized indicates if bucket metadata sys is initialized atleast once.
func (sys *BucketMetadataSys) Initialized() bool {
sys.RLock()
defer sys.RUnlock()
return sys.initialized
}
// Loads bucket metadata for all buckets into BucketMetadataSys.
func (sys *BucketMetadataSys) init(ctx context.Context, buckets []BucketInfo) {
count := 100 // load 100 bucket metadata at a time.
@@ -596,6 +612,10 @@ func (sys *BucketMetadataSys) init(ctx context.Context, buckets []BucketInfo) {
buckets = buckets[count:]
}
sys.Lock()
sys.initialized = true
sys.Unlock()
if globalIsDistErasure {
go sys.refreshBucketsMetadataLoop(ctx, failedBuckets)
}

View File

@@ -41,7 +41,7 @@ import (
"github.com/minio/minio/internal/fips"
"github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
"github.com/minio/sio"
)
@@ -490,7 +490,7 @@ func encryptBucketMetadata(ctx context.Context, bucket string, input []byte, kms
}
metadata := make(map[string]string)
key, err := GlobalKMS.GenerateKey(ctx, "", kmsContext)
key, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{AssociatedData: kmsContext})
if err != nil {
return
}
@@ -519,7 +519,11 @@ func decryptBucketMetadata(input []byte, bucket string, meta map[string]string,
if err != nil {
return nil, err
}
extKey, err := GlobalKMS.DecryptKey(keyID, kmsKey, kmsContext)
extKey, err := GlobalKMS.Decrypt(context.TODO(), &kms.DecryptRequest{
Name: keyID,
Ciphertext: kmsKey,
AssociatedData: kmsContext,
})
if err != nil {
return nil, err
}

View File

@@ -26,7 +26,7 @@ import (
"github.com/minio/minio/internal/event"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
)
const (
@@ -66,8 +66,9 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
config.SetRegion(globalSite.Region)
if err = config.Validate(globalSite.Region, globalEventNotifier.targetList); err != nil {
region := globalSite.Region()
config.SetRegion(region)
if err = config.Validate(region, globalEventNotifier.targetList); err != nil {
arnErr, ok := err.(*event.ErrARNNotFound)
if ok {
for i, queue := range config.QueueList {
@@ -134,7 +135,7 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
return
}
config, err := event.ParseConfig(io.LimitReader(r.Body, r.ContentLength), globalSite.Region, globalEventNotifier.targetList)
config, err := event.ParseConfig(io.LimitReader(r.Body, r.ContentLength), globalSite.Region(), globalEventNotifier.targetList)
if err != nil {
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
if event.IsEventError(err) {

View File

@@ -28,7 +28,7 @@ import (
"github.com/minio/minio/internal/bucket/replication"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
)
// BucketObjectLockSys - map of bucket and retention configuration.
@@ -44,7 +44,6 @@ func (sys *BucketObjectLockSys) Get(bucketName string) (r objectlock.Retention,
if errors.Is(err, errInvalidArgument) {
return r, err
}
logger.CriticalIf(context.Background(), err)
return r, err
}
return config.ToRetention(), nil

View File

@@ -27,7 +27,7 @@ import (
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
)
const (

View File

@@ -29,8 +29,8 @@ import (
"testing"
"github.com/minio/minio/internal/auth"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v2/policy/condition"
"github.com/minio/pkg/v3/policy"
"github.com/minio/pkg/v3/policy/condition"
)
func getAnonReadOnlyBucketPolicy(bucketName string) *policy.BucketPolicy {
@@ -107,7 +107,7 @@ func getAnonWriteOnlyObjectPolicy(bucketName, prefix string) *policy.BucketPolic
// Wrapper for calling Create Bucket and ensure we get one and only one success.
func TestCreateBucket(t *testing.T) {
ExecObjectLayerAPITest(t, testCreateBucket, []string{"MakeBucket"})
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testCreateBucket, endpoints: []string{"MakeBucket"}})
}
// testCreateBucket - Test for calling Create Bucket and ensure we get one and only one success.
@@ -154,7 +154,7 @@ func testCreateBucket(obj ObjectLayer, instanceType, bucketName string, apiRoute
// Wrapper for calling Put Bucket Policy HTTP handler tests for both Erasure multiple disks and single node setup.
func TestPutBucketPolicyHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testPutBucketPolicyHandler, []string{"PutBucketPolicy"})
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testPutBucketPolicyHandler, endpoints: []string{"PutBucketPolicy"}})
}
// testPutBucketPolicyHandler - Test for Bucket policy end point.
@@ -373,7 +373,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
// Wrapper for calling Get Bucket Policy HTTP handler tests for both Erasure multiple disks and single node setup.
func TestGetBucketPolicyHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testGetBucketPolicyHandler, []string{"PutBucketPolicy", "GetBucketPolicy"})
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testGetBucketPolicyHandler, endpoints: []string{"PutBucketPolicy", "GetBucketPolicy"}})
}
// testGetBucketPolicyHandler - Test for end point which fetches the access policy json of the given bucket.
@@ -577,7 +577,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
// Wrapper for calling Delete Bucket Policy HTTP handler tests for both Erasure multiple disks and single node setup.
func TestDeleteBucketPolicyHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testDeleteBucketPolicyHandler, []string{"PutBucketPolicy", "DeleteBucketPolicy"})
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testDeleteBucketPolicyHandler, endpoints: []string{"PutBucketPolicy", "DeleteBucketPolicy"}})
}
// testDeleteBucketPolicyHandler - Test for Delete bucket policy end point.

View File

@@ -32,7 +32,7 @@ import (
"github.com/minio/minio/internal/handlers"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
)
// PolicySys - policy subsystem.

View File

@@ -49,8 +49,8 @@ var bucketStorageCache = cachevalue.New[DataUsageInfo]()
func (sys *BucketQuotaSys) Init(objAPI ObjectLayer) {
bucketStorageCache.InitOnce(10*time.Second,
cachevalue.Opts{ReturnLastGood: true, NoWait: true},
func() (DataUsageInfo, error) {
ctx, done := context.WithTimeout(context.Background(), 2*time.Second)
func(ctx context.Context) (DataUsageInfo, error) {
ctx, done := context.WithTimeout(ctx, 2*time.Second)
defer done()
return loadDataUsageFromBackend(ctx, objAPI)
@@ -59,8 +59,8 @@ func (sys *BucketQuotaSys) Init(objAPI ObjectLayer) {
}
// GetBucketUsageInfo return bucket usage info for a given bucket
func (sys *BucketQuotaSys) GetBucketUsageInfo(bucket string) (BucketUsageInfo, error) {
dui, err := bucketStorageCache.Get()
func (sys *BucketQuotaSys) GetBucketUsageInfo(ctx context.Context, bucket string) (BucketUsageInfo, error) {
dui, err := bucketStorageCache.GetWithCtx(ctx)
timedout := OperationTimedOut{}
if err != nil && !errors.Is(err, context.DeadlineExceeded) && !errors.As(err, &timedout) {
if len(dui.BucketsUsage) > 0 {
@@ -118,7 +118,7 @@ func (sys *BucketQuotaSys) enforceQuotaHard(ctx context.Context, bucket string,
return BucketQuotaExceeded{Bucket: bucket}
}
bui, err := sys.GetBucketUsageInfo(bucket)
bui, err := sys.GetBucketUsageInfo(ctx, bucket)
if err != nil {
return err
}

View File

@@ -34,7 +34,7 @@ import (
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
)
// PutBucketReplicationConfigHandler - PUT Bucket replication configuration.

View File

@@ -534,7 +534,7 @@ func getHealReplicateObjectInfo(oi ObjectInfo, rcfg replicationConfig) Replicate
rstate.ReplicateDecisionStr = dsc.String()
asz, _ := oi.GetActualSize()
return ReplicateObjectInfo{
r := ReplicateObjectInfo{
Name: oi.Name,
Size: oi.Size,
ActualSize: asz,
@@ -558,6 +558,10 @@ func getHealReplicateObjectInfo(oi ObjectInfo, rcfg replicationConfig) Replicate
SSEC: crypto.SSEC.IsEncrypted(oi.UserDefined),
UserTags: oi.UserTags,
}
if r.SSEC {
r.Checksum = oi.Checksum
}
return r
}
// ReplicationState - returns replication state using other internal replication metadata in ObjectInfo

View File

@@ -19,6 +19,7 @@ package cmd
import (
"context"
"encoding/base64"
"encoding/binary"
"errors"
"fmt"
@@ -74,20 +75,18 @@ const (
ObjectLockRetentionTimestamp = "objectlock-retention-timestamp"
// ObjectLockLegalHoldTimestamp - the last time a legal hold metadata modification happened on this cluster for this object version
ObjectLockLegalHoldTimestamp = "objectlock-legalhold-timestamp"
// ReplicationWorkerMultiplier is suggested worker multiplier if traffic exceeds replication worker capacity
ReplicationWorkerMultiplier = 1.5
// ReplicationSsecChecksumHeader - the encrypted checksum of the SSE-C encrypted object.
ReplicationSsecChecksumHeader = "X-Minio-Replication-Ssec-Crc"
)
// gets replication config associated to a given bucket name.
func getReplicationConfig(ctx context.Context, bucketName string) (rc *replication.Config, err error) {
rCfg, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucketName)
if err != nil {
if errors.Is(err, BucketReplicationConfigNotFound{Bucket: bucketName}) || errors.Is(err, errInvalidArgument) {
return rCfg, err
}
logger.CriticalIf(ctx, err)
if err != nil && !errors.Is(err, BucketReplicationConfigNotFound{Bucket: bucketName}) {
return rCfg, err
}
return rCfg, err
return rCfg, nil
}
// validateReplicationDestination returns error if replication destination bucket missing or not configured
@@ -261,10 +260,16 @@ func mustReplicate(ctx context.Context, bucket, object string, mopts mustReplica
if mopts.replicationRequest { // incoming replication request on target cluster
return
}
cfg, err := getReplicationConfig(ctx, bucket)
if err != nil {
replLogOnceIf(ctx, err, bucket)
return
}
if cfg == nil {
return
}
opts := replication.ObjectOpts{
Name: object,
SSEC: crypto.SSEC.IsEncrypted(mopts.meta),
@@ -312,6 +317,7 @@ var standardHeaders = []string{
func hasReplicationRules(ctx context.Context, bucket string, objects []ObjectToDelete) bool {
c, err := getReplicationConfig(ctx, bucket)
if err != nil || c == nil {
replLogOnceIf(ctx, err, bucket)
return false
}
for _, obj := range objects {
@@ -331,6 +337,7 @@ func isStandardHeader(matchHeaderKey string) bool {
func checkReplicateDelete(ctx context.Context, bucket string, dobj ObjectToDelete, oi ObjectInfo, delOpts ObjectOptions, gerr error) (dsc ReplicateDecision) {
rcfg, err := getReplicationConfig(ctx, bucket)
if err != nil || rcfg == nil {
replLogOnceIf(ctx, err, bucket)
return
}
// If incoming request is a replication request, it does not need to be re-replicated.
@@ -758,12 +765,34 @@ func (m caseInsensitiveMap) Lookup(key string) (string, bool) {
return "", false
}
func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (putOpts minio.PutObjectOptions, err error) {
func getCRCMeta(oi ObjectInfo, partNum int, h http.Header) map[string]string {
meta := make(map[string]string)
cs := oi.decryptChecksums(partNum, h)
for k, v := range cs {
cksum := hash.NewChecksumString(k, v)
if cksum == nil {
continue
}
if cksum.Valid() {
meta[cksum.Type.Key()] = v
}
}
return meta
}
func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo, partNum int) (putOpts minio.PutObjectOptions, err error) {
meta := make(map[string]string)
isSSEC := crypto.SSEC.IsEncrypted(objInfo.UserDefined)
for k, v := range objInfo.UserDefined {
// In case of SSE-C objects copy the allowed internal headers as well
if !crypto.SSEC.IsEncrypted(objInfo.UserDefined) || !slices.Contains(maps.Keys(validSSEReplicationHeaders), k) {
if !isSSEC || !slices.Contains(maps.Keys(validSSEReplicationHeaders), k) {
if stringsHasPrefixFold(k, ReservedMetadataPrefixLower) {
if strings.EqualFold(k, ReservedMetadataPrefixLower+"crc") {
for k, v := range getCRCMeta(objInfo, partNum, nil) {
meta[k] = v
}
}
continue
}
if isStandardHeader(k) {
@@ -777,6 +806,17 @@ func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (put
}
}
if len(objInfo.Checksum) > 0 {
// Add encrypted CRC to metadata for SSE-C objects.
if isSSEC {
meta[ReplicationSsecChecksumHeader] = base64.StdEncoding.EncodeToString(objInfo.Checksum)
} else {
for k, v := range getCRCMeta(objInfo, 0, nil) {
meta[k] = v
}
}
}
if sc == "" && (objInfo.StorageClass == storageclass.STANDARD || objInfo.StorageClass == storageclass.RRS) {
sc = objInfo.StorageClass
}
@@ -993,7 +1033,7 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
object := ri.Name
cfg, err := getReplicationConfig(ctx, bucket)
if err != nil {
if err != nil || cfg == nil {
replLogOnceIf(ctx, err, "get-replication-config-"+bucket)
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
@@ -1196,17 +1236,23 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
// make sure we have the latest metadata for metrics calculation
rinfo.PrevReplicationStatus = objInfo.TargetReplicationStatus(tgt.ARN)
size, err := objInfo.GetActualSize()
if err != nil {
replLogIf(ctx, err)
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
Object: objInfo,
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
})
return
// Set the encrypted size for SSE-C objects
var size int64
if crypto.SSEC.IsEncrypted(objInfo.UserDefined) {
size = objInfo.Size
} else {
size, err = objInfo.GetActualSize()
if err != nil {
replLogIf(ctx, err)
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
Object: objInfo,
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
})
return
}
}
if tgt.Bucket == "" {
@@ -1234,7 +1280,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
// use core client to avoid doing multipart on PUT
c := &minio.Core{Client: tgt.Client}
putOpts, err := putReplicationOpts(ctx, tgt.StorageClass, objInfo)
putOpts, err := putReplicationOpts(ctx, tgt.StorageClass, objInfo, 0)
if err != nil {
replLogIf(ctx, fmt.Errorf("failure setting options for replication bucket:%s err:%w", bucket, err))
sendEvent(eventArgs{
@@ -1267,23 +1313,19 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
}
r := bandwidth.NewMonitoredReader(newCtx, globalBucketMonitor, gr, opts)
if objInfo.isMultipart() {
if rinfo.Err = replicateObjectWithMultipart(ctx, c, tgt.Bucket, object,
r, objInfo, putOpts); rinfo.Err != nil {
if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" {
rinfo.ReplicationStatus = replication.Failed
replLogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s): %s (target: %s)", bucket, objInfo.Name, objInfo.VersionID, rinfo.Err, tgt.EndpointURL()))
}
}
rinfo.Err = replicateObjectWithMultipart(ctx, c, tgt.Bucket, object, r, objInfo, putOpts)
} else {
if _, rinfo.Err = c.PutObject(ctx, tgt.Bucket, object, r, size, "", "", putOpts); rinfo.Err != nil {
if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" {
rinfo.ReplicationStatus = replication.Failed
replLogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s): %s (target: %s)", bucket, objInfo.Name, objInfo.VersionID, rinfo.Err, tgt.EndpointURL()))
}
}
_, rinfo.Err = c.PutObject(ctx, tgt.Bucket, object, r, size, "", "", putOpts)
}
if rinfo.Err != nil && minio.IsNetworkOrHostDown(rinfo.Err, true) && !globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
globalBucketTargetSys.markOffline(tgt.EndpointURL())
if rinfo.Err != nil {
if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" {
rinfo.ReplicationStatus = replication.Failed
replLogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s): to (target: %s): %w",
bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err))
}
if minio.IsNetworkOrHostDown(rinfo.Err, true) && !globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
globalBucketTargetSys.markOffline(tgt.EndpointURL())
}
}
return
}
@@ -1376,7 +1418,8 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
}
// Set the encrypted size for SSE-C objects
if crypto.SSEC.IsEncrypted(objInfo.UserDefined) {
isSSEC := crypto.SSEC.IsEncrypted(objInfo.UserDefined)
if isSSEC {
size = objInfo.Size
}
@@ -1436,6 +1479,13 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
return
}
} else {
// SSEC objects will refuse HeadObject without the decryption key.
// Ignore the error, since we know the object exists and versioning prevents overwriting existing versions.
if isSSEC && strings.Contains(cerr.Error(), errorCodes[ErrSSEEncryptedObject].Description) {
rinfo.ReplicationStatus = replication.Completed
rinfo.ReplicationAction = replicateNone
goto applyAction
}
// if target returns error other than NoSuchKey, defer replication attempt
if minio.IsNetworkOrHostDown(cerr, true) && !globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
globalBucketTargetSys.markOffline(tgt.EndpointURL())
@@ -1463,6 +1513,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
return
}
}
applyAction:
rinfo.ReplicationStatus = replication.Completed
rinfo.Size = size
rinfo.ReplicationAction = rAction
@@ -1505,7 +1556,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
}
} else {
var putOpts minio.PutObjectOptions
putOpts, err = putReplicationOpts(ctx, tgt.StorageClass, objInfo)
putOpts, err = putReplicationOpts(ctx, tgt.StorageClass, objInfo, 0)
if err != nil {
replLogIf(ctx, fmt.Errorf("failed to set replicate options for object %s/%s(%s) (target %s) err:%w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), err))
sendEvent(eventArgs{
@@ -1537,27 +1588,19 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
}
r := bandwidth.NewMonitoredReader(newCtx, globalBucketMonitor, gr, opts)
if objInfo.isMultipart() {
if rinfo.Err = replicateObjectWithMultipart(ctx, c, tgt.Bucket, object,
r, objInfo, putOpts); rinfo.Err != nil {
if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" {
rinfo.ReplicationStatus = replication.Failed
replLogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s) to target %s: %w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err))
} else {
rinfo.ReplicationStatus = replication.Completed
}
}
rinfo.Err = replicateObjectWithMultipart(ctx, c, tgt.Bucket, object, r, objInfo, putOpts)
} else {
if _, rinfo.Err = c.PutObject(ctx, tgt.Bucket, object, r, size, "", "", putOpts); rinfo.Err != nil {
if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" {
rinfo.ReplicationStatus = replication.Failed
replLogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s) to target %s: %w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err))
} else {
rinfo.ReplicationStatus = replication.Completed
}
}
_, rinfo.Err = c.PutObject(ctx, tgt.Bucket, object, r, size, "", "", putOpts)
}
if rinfo.Err != nil && minio.IsNetworkOrHostDown(rinfo.Err, true) && !globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
globalBucketTargetSys.markOffline(tgt.EndpointURL())
if rinfo.Err != nil {
if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" {
rinfo.ReplicationStatus = replication.Failed
replLogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s) to target %s: %w",
bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err))
}
if minio.IsNetworkOrHostDown(rinfo.Err, true) && !globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
globalBucketTargetSys.markOffline(tgt.EndpointURL())
}
}
}
return
@@ -1577,7 +1620,7 @@ func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, ob
break
}
if minio.ToErrorResponse(err).Code == "PreconditionFailed" {
return err
return nil
}
attempts++
time.Sleep(time.Duration(rand.Int63n(int64(time.Second))))
@@ -1604,13 +1647,14 @@ func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, ob
}()
var (
hr *hash.Reader
pInfo minio.ObjectPart
hr *hash.Reader
pInfo minio.ObjectPart
isSSEC = crypto.SSEC.IsEncrypted(objInfo.UserDefined)
)
var objectSize int64
for _, partInfo := range objInfo.Parts {
if crypto.SSEC.IsEncrypted(objInfo.UserDefined) {
if isSSEC {
hr, err = hash.NewReader(ctx, io.LimitReader(r, partInfo.Size), partInfo.Size, "", "", partInfo.ActualSize)
} else {
hr, err = hash.NewReader(ctx, io.LimitReader(r, partInfo.ActualSize), partInfo.ActualSize, "", "", partInfo.ActualSize)
@@ -1621,12 +1665,18 @@ func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, ob
cHeader := http.Header{}
cHeader.Add(xhttp.MinIOSourceReplicationRequest, "true")
if !isSSEC {
crc := getCRCMeta(objInfo, partInfo.Number, nil) // No SSE-C keys here.
for k, v := range crc {
cHeader.Add(k, v)
}
}
popts := minio.PutObjectPartOptions{
SSE: opts.ServerSideEncryption,
CustomHeader: cHeader,
}
if crypto.SSEC.IsEncrypted(objInfo.UserDefined) {
if isSSEC {
objectSize += partInfo.Size
pInfo, err = c.PutObjectPart(ctx, bucket, object, uploadID, partInfo.Number, hr, partInfo.Size, popts)
} else {
@@ -1636,22 +1686,33 @@ func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, ob
if err != nil {
return err
}
if !crypto.SSEC.IsEncrypted(objInfo.UserDefined) && pInfo.Size != partInfo.ActualSize {
if !isSSEC && pInfo.Size != partInfo.ActualSize {
return fmt.Errorf("Part size mismatch: got %d, want %d", pInfo.Size, partInfo.ActualSize)
}
uploadedParts = append(uploadedParts, minio.CompletePart{
PartNumber: pInfo.PartNumber,
ETag: pInfo.ETag,
PartNumber: pInfo.PartNumber,
ETag: pInfo.ETag,
ChecksumCRC32: pInfo.ChecksumCRC32,
ChecksumCRC32C: pInfo.ChecksumCRC32C,
ChecksumSHA1: pInfo.ChecksumSHA1,
ChecksumSHA256: pInfo.ChecksumSHA256,
})
}
userMeta := map[string]string{
validSSEReplicationHeaders[ReservedMetadataPrefix+"Actual-Object-Size"]: objInfo.UserDefined[ReservedMetadataPrefix+"actual-size"],
}
if isSSEC && objInfo.UserDefined[ReplicationSsecChecksumHeader] != "" {
userMeta[ReplicationSsecChecksumHeader] = objInfo.UserDefined[ReplicationSsecChecksumHeader]
}
// really big value but its okay on heavily loaded systems. This is just tail end timeout.
cctx, ccancel := context.WithTimeout(ctx, 10*time.Minute)
defer ccancel()
_, err = c.CompleteMultipartUpload(cctx, bucket, object, uploadID, uploadedParts, minio.PutObjectOptions{
UserMetadata: map[string]string{validSSEReplicationHeaders[ReservedMetadataPrefix+"Actual-Object-Size"]: objInfo.UserDefined[ReservedMetadataPrefix+"actual-size"]},
UserMetadata: userMeta,
Internal: minio.AdvancedPutOptions{
SourceMTime: objInfo.ModTime,
SourceETag: objInfo.ETag,
// always set this to distinguish between `mc mirror` replication and serverside
ReplicationRequest: true,
},
@@ -2185,12 +2246,12 @@ type proxyResult struct {
// get Reader from replication target if active-active replication is in place and
// this node returns a 404
func proxyGetToReplicationTarget(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, _ http.Header, opts ObjectOptions, proxyTargets *madmin.BucketTargets) (gr *GetObjectReader, proxy proxyResult, err error) {
func proxyGetToReplicationTarget(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, opts ObjectOptions, proxyTargets *madmin.BucketTargets) (gr *GetObjectReader, proxy proxyResult, err error) {
tgt, oi, proxy := proxyHeadToRepTarget(ctx, bucket, object, rs, opts, proxyTargets)
if !proxy.Proxy {
return nil, proxy, nil
}
fn, _, _, err := NewGetObjectReader(nil, oi, opts)
fn, _, _, err := NewGetObjectReader(nil, oi, opts, h)
if err != nil {
return nil, proxy, err
}
@@ -2245,6 +2306,8 @@ func getProxyTargets(ctx context.Context, bucket, object string, opts ObjectOpti
}
cfg, err := getReplicationConfig(ctx, bucket)
if err != nil || cfg == nil {
replLogOnceIf(ctx, err, bucket)
return &madmin.BucketTargets{}
}
topts := replication.ObjectOpts{Name: object}
@@ -2373,7 +2436,9 @@ func scheduleReplication(ctx context.Context, oi ObjectInfo, o ObjectLayer, dsc
SSEC: crypto.SSEC.IsEncrypted(oi.UserDefined),
UserTags: oi.UserTags,
}
if ri.SSEC {
ri.Checksum = oi.Checksum
}
if dsc.Synchronous() {
replicateObject(ctx, ri, o)
} else {
@@ -2739,7 +2804,7 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object
s.workerCh <- struct{}{}
}()
// Allocate new results channel to receive ObjectInfo.
objInfoCh := make(chan ObjectInfo)
objInfoCh := make(chan itemOrErr[ObjectInfo])
cfg, err := getReplicationConfig(ctx, opts.bucket)
if err != nil {
replLogIf(ctx, fmt.Errorf("replication resync of %s for arn %s failed with %w", opts.bucket, opts.arn, err))
@@ -2853,17 +2918,19 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object
ReplicationProxyRequest: "false",
},
})
sz := roi.Size
if err != nil {
if roi.DeleteMarker && isErrMethodNotAllowed(ErrorRespToObjectError(err, opts.bucket, roi.Name)) {
st.ReplicatedCount++
} else {
st.FailedCount++
}
sz = 0
} else {
st.ReplicatedCount++
st.ReplicatedSize += roi.Size
}
traceFn(err)
traceFn(sz, err)
select {
case <-ctx.Done():
return
@@ -2874,7 +2941,12 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object
}
}(ctx, i)
}
for obj := range objInfoCh {
for res := range objInfoCh {
if res.Err != nil {
resyncStatus = ResyncFailed
replLogIf(ctx, res.Err)
return
}
select {
case <-s.resyncCancelCh:
resyncStatus = ResyncCanceled
@@ -2883,11 +2955,11 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object
return
default:
}
if heal && lastCheckpoint != "" && lastCheckpoint != obj.Name {
if heal && lastCheckpoint != "" && lastCheckpoint != res.Item.Name {
continue
}
lastCheckpoint = ""
roi := getHealReplicateObjectInfo(obj, rcfg)
roi := getHealReplicateObjectInfo(res.Item, rcfg)
if !roi.ExistingObjResync.mustResync() {
continue
}
@@ -2973,17 +3045,17 @@ func (s *replicationResyncer) start(ctx context.Context, objAPI ObjectLayer, opt
return nil
}
func (s *replicationResyncer) trace(resyncID string, path string) func(err error) {
func (s *replicationResyncer) trace(resyncID string, path string) func(sz int64, err error) {
startTime := time.Now()
return func(err error) {
return func(sz int64, err error) {
duration := time.Since(startTime)
if globalTrace.NumSubscribers(madmin.TraceReplicationResync) > 0 {
globalTrace.Publish(replicationResyncTrace(resyncID, startTime, duration, path, err))
globalTrace.Publish(replicationResyncTrace(resyncID, startTime, duration, path, err, sz))
}
}
}
func replicationResyncTrace(resyncID string, startTime time.Time, duration time.Duration, path string, err error) madmin.TraceInfo {
func replicationResyncTrace(resyncID string, startTime time.Time, duration time.Duration, path string, err error, sz int64) madmin.TraceInfo {
var errStr string
if err != nil {
errStr = err.Error()
@@ -2997,6 +3069,7 @@ func replicationResyncTrace(resyncID string, startTime time.Time, duration time.
Duration: duration,
Path: path,
Error: errStr,
Bytes: sz,
}
}
@@ -3138,7 +3211,7 @@ func saveResyncStatus(ctx context.Context, bucket string, brs BucketReplicationR
func getReplicationDiff(ctx context.Context, objAPI ObjectLayer, bucket string, opts madmin.ReplDiffOpts) (chan madmin.DiffInfo, error) {
cfg, err := getReplicationConfig(ctx, bucket)
if err != nil {
replLogIf(ctx, err)
replLogOnceIf(ctx, err, bucket)
return nil, err
}
tgts, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
@@ -3147,7 +3220,7 @@ func getReplicationDiff(ctx context.Context, objAPI ObjectLayer, bucket string,
return nil, err
}
objInfoCh := make(chan ObjectInfo, 10)
objInfoCh := make(chan itemOrErr[ObjectInfo], 10)
if err := objAPI.Walk(ctx, bucket, opts.Prefix, objInfoCh, WalkOptions{}); err != nil {
replLogIf(ctx, err)
return nil, err
@@ -3159,11 +3232,17 @@ func getReplicationDiff(ctx context.Context, objAPI ObjectLayer, bucket string,
diffCh := make(chan madmin.DiffInfo, 4000)
go func() {
defer xioutil.SafeClose(diffCh)
for obj := range objInfoCh {
for res := range objInfoCh {
if res.Err != nil {
diffCh <- madmin.DiffInfo{Err: res.Err}
return
}
if contextCanceled(ctx) {
// Just consume input...
continue
}
obj := res.Item
// Ignore object prefixes which are excluded
// from versioning via the MinIO bucket versioning extension.
if globalBucketVersioningSys.PrefixSuspended(bucket, obj.Name) {
@@ -3231,7 +3310,11 @@ func QueueReplicationHeal(ctx context.Context, bucket string, oi ObjectInfo, ret
if oi.ModTime.IsZero() {
return
}
rcfg, _ := getReplicationConfig(ctx, bucket)
rcfg, err := getReplicationConfig(ctx, bucket)
if err != nil {
replLogOnceIf(ctx, err, bucket)
return
}
tgts, _ := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
queueReplicationHeal(ctx, bucket, oi, replicationConfig{
Config: rcfg,

View File

@@ -428,7 +428,7 @@ func (sys *BucketTargetSys) RemoveTarget(ctx context.Context, bucket, arnStr str
if arn.Type == madmin.ReplicationService {
// reject removal of remote target if replication configuration is present
rcfg, err := getReplicationConfig(ctx, bucket)
if err == nil {
if err == nil && rcfg != nil {
for _, tgtArn := range rcfg.FilterTargetArns(replication.ObjectOpts{OpType: replication.AllReplicationType}) {
if err == nil && (tgtArn == arnStr || rcfg.RoleArn == arnStr) {
sys.RLock()

View File

@@ -28,7 +28,7 @@ import (
"github.com/minio/minio/internal/bucket/versioning"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
)
const (
@@ -82,7 +82,7 @@ func (api objectAPIHandlers) PutBucketVersioningHandler(w http.ResponseWriter, r
}, r.URL)
return
}
if _, err := getReplicationConfig(ctx, bucket); err == nil && v.Suspended() {
if rc, _ := getReplicationConfig(ctx, bucket); rc != nil && v.Suspended() {
writeErrorResponse(ctx, w, APIError{
Code: "InvalidBucketState",
Description: "A replication configuration is present on this bucket, bucket wide versioning cannot be suspended.",

View File

@@ -65,5 +65,5 @@ var (
MinioBannerName = "MinIO Object Storage Server"
// MinioLicense - MinIO server license.
MinioLicense = "GNU AGPLv3 <https://www.gnu.org/licenses/agpl-3.0.html>"
MinioLicense = "GNU AGPLv3 - https://www.gnu.org/licenses/agpl-3.0.html"
)

View File

@@ -80,6 +80,9 @@ func runCallhome(ctx context.Context, objAPI ObjectLayer) bool {
ctx = lkctx.Context()
defer locker.Unlock(lkctx)
// Perform callhome once and then keep running it at regular intervals.
performCallhome(ctx)
callhomeTimer := time.NewTimer(globalCallhomeConfig.FrequencyDur())
defer callhomeTimer.Stop()
@@ -141,11 +144,14 @@ func performCallhome(ctx context.Context) {
select {
case hi, hasMore := <-healthInfoCh:
if !hasMore {
auditOptions := AuditLogOptions{Event: "callhome:diagnostics"}
// Received all data. Send to SUBNET and return
err := sendHealthInfo(ctx, healthInfo)
if err != nil {
internalLogIf(ctx, fmt.Errorf("Unable to perform callhome: %w", err))
auditOptions.Error = err.Error()
}
auditLogInternal(ctx, auditOptions)
return
}
healthInfo = hi

View File

@@ -21,10 +21,8 @@ import (
"bufio"
"bytes"
"context"
"crypto/tls"
"crypto/x509"
"encoding/gob"
"encoding/pem"
"errors"
"fmt"
"net"
@@ -49,7 +47,6 @@ import (
"github.com/minio/console/api/operations"
consoleoauth2 "github.com/minio/console/pkg/auth/idp/oauth2"
consoleCerts "github.com/minio/console/pkg/certs"
"github.com/minio/kms-go/kes"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/set"
@@ -58,11 +55,11 @@ import (
"github.com/minio/minio/internal/config"
"github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/certs"
"github.com/minio/pkg/v2/console"
"github.com/minio/pkg/v2/ellipses"
"github.com/minio/pkg/v2/env"
xnet "github.com/minio/pkg/v2/net"
"github.com/minio/pkg/v3/certs"
"github.com/minio/pkg/v3/console"
"github.com/minio/pkg/v3/env"
xnet "github.com/minio/pkg/v3/net"
"golang.org/x/term"
)
// serverDebugLog will enable debug printing
@@ -73,6 +70,13 @@ var (
)
func init() {
if !term.IsTerminal(int(os.Stdout.Fd())) || !term.IsTerminal(int(os.Stderr.Fd())) {
color.TurnOff()
}
if env.Get("NO_COLOR", "") != "" || env.Get("TERM", "") == "dumb" {
color.TurnOff()
}
if runtime.GOOS == "windows" {
if mousetrap.StartedByExplorer() {
fmt.Printf("Don't double-click %s\n", os.Args[0])
@@ -131,6 +135,9 @@ func minioConfigToConsoleFeatures() {
os.Setenv("CONSOLE_LOG_QUERY_AUTH_TOKEN", value)
}
}
if value := env.Get(config.EnvBrowserRedirectURL, ""); value != "" {
os.Setenv("CONSOLE_BROWSER_REDIRECT_URL", value)
}
// pass the console subpath configuration
if globalBrowserRedirectURL != nil {
subPath := path.Clean(pathJoin(strings.TrimSpace(globalBrowserRedirectURL.Path), SlashSeparator))
@@ -169,7 +176,10 @@ func minioConfigToConsoleFeatures() {
os.Setenv("CONSOLE_STS_DURATION", valueSession)
}
os.Setenv("CONSOLE_MINIO_REGION", globalSite.Region)
os.Setenv("CONSOLE_MINIO_SITE_NAME", globalSite.Name())
os.Setenv("CONSOLE_MINIO_SITE_REGION", globalSite.Region())
os.Setenv("CONSOLE_MINIO_REGION", globalSite.Region())
os.Setenv("CONSOLE_CERT_PASSWD", env.Get("MINIO_CERT_PASSWD", ""))
// This section sets Browser (console) stored config
@@ -392,20 +402,38 @@ func buildServerCtxt(ctx *cli.Context, ctxt *serverCtxt) (err error) {
ctxt.certsDirSet = true
}
memAvailable := availableMemory()
if ctx.IsSet("memlimit") || ctx.GlobalIsSet("memlimit") {
memlimit := ctx.String("memlimit")
if memlimit == "" {
memlimit = ctx.GlobalString("memlimit")
}
mlimit, err := humanize.ParseBytes(memlimit)
if err != nil {
return err
}
if mlimit > memAvailable {
logger.Info("WARNING: maximum memory available (%s) smaller than specified --memlimit=%s, ignoring --memlimit value",
humanize.IBytes(memAvailable), memlimit)
}
ctxt.MemLimit = mlimit
} else {
ctxt.MemLimit = memAvailable
}
if memAvailable < ctxt.MemLimit {
ctxt.MemLimit = memAvailable
}
ctxt.FTP = ctx.StringSlice("ftp")
ctxt.SFTP = ctx.StringSlice("sftp")
ctxt.Interface = ctx.String("interface")
ctxt.UserTimeout = ctx.Duration("conn-user-timeout")
ctxt.ConnReadDeadline = ctx.Duration("conn-read-deadline")
ctxt.ConnWriteDeadline = ctx.Duration("conn-write-deadline")
ctxt.ConnClientReadDeadline = ctx.Duration("conn-client-read-deadline")
ctxt.ConnClientWriteDeadline = ctx.Duration("conn-client-write-deadline")
ctxt.ShutdownTimeout = ctx.Duration("shutdown-timeout")
ctxt.SendBufSize = ctx.Int("send-buf-size")
ctxt.RecvBufSize = ctx.Int("recv-buf-size")
ctxt.IdleTimeout = ctx.Duration("idle-timeout")
ctxt.ReadHeaderTimeout = ctx.Duration("read-header-timeout")
ctxt.MaxIdleConnsPerHost = ctx.Int("max-idle-conns-per-host")
ctxt.UserTimeout = ctx.Duration("conn-user-timeout")
ctxt.ShutdownTimeout = ctx.Duration("shutdown-timeout")
if conf := ctx.String("config"); len(conf) > 0 {
err = mergeServerCtxtFromConfigFile(conf, ctxt)
@@ -852,125 +880,28 @@ func loadRootCredentials() {
// Initialize KMS global variable after valiadating and loading the configuration.
// It depends on KMS env variables and global cli flags.
func handleKMSConfig() {
if env.IsSet(kms.EnvKMSSecretKey) && env.IsSet(kms.EnvKESEndpoint) {
logger.Fatal(errors.New("ambiguous KMS configuration"), fmt.Sprintf("The environment contains %q as well as %q", kms.EnvKMSSecretKey, kms.EnvKESEndpoint))
present, err := kms.IsPresent()
if err != nil {
logger.Fatal(err, "Invalid KMS configuration specified")
}
if !present {
return
}
if env.IsSet(kms.EnvKMSSecretKey) {
KMS, err := kms.Parse(env.Get(kms.EnvKMSSecretKey, ""))
if err != nil {
logger.Fatal(err, "Unable to parse the KMS secret key inherited from the shell environment")
}
GlobalKMS = KMS
KMS, err := kms.Connect(GlobalContext, &kms.ConnectionOptions{
CADir: globalCertsCADir.Get(),
})
if err != nil {
logger.Fatal(err, "Failed to connect to KMS")
}
if env.IsSet(kms.EnvKESEndpoint) {
if env.IsSet(kms.EnvKESAPIKey) {
if env.IsSet(kms.EnvKESClientKey) {
logger.Fatal(errors.New("ambiguous KMS configuration"), fmt.Sprintf("The environment contains %q as well as %q", kms.EnvKESAPIKey, kms.EnvKESClientKey))
}
if env.IsSet(kms.EnvKESClientCert) {
logger.Fatal(errors.New("ambiguous KMS configuration"), fmt.Sprintf("The environment contains %q as well as %q", kms.EnvKESAPIKey, kms.EnvKESClientCert))
}
}
if !env.IsSet(kms.EnvKESKeyName) {
logger.Fatal(errors.New("Invalid KES configuration"), fmt.Sprintf("The mandatory environment variable %q not set", kms.EnvKESKeyName))
}
var endpoints []string
for _, endpoint := range strings.Split(env.Get(kms.EnvKESEndpoint, ""), ",") {
if strings.TrimSpace(endpoint) == "" {
continue
}
if !ellipses.HasEllipses(endpoint) {
endpoints = append(endpoints, endpoint)
continue
}
patterns, err := ellipses.FindEllipsesPatterns(endpoint)
if err != nil {
logger.Fatal(err, fmt.Sprintf("Invalid KES endpoint %q", endpoint))
}
for _, lbls := range patterns.Expand() {
endpoints = append(endpoints, strings.Join(lbls, ""))
}
}
rootCAs, err := certs.GetRootCAs(env.Get(kms.EnvKESServerCA, globalCertsCADir.Get()))
if err != nil {
logger.Fatal(err, fmt.Sprintf("Unable to load X.509 root CAs for KES from %q", env.Get(kms.EnvKESServerCA, globalCertsCADir.Get())))
}
var kmsConf kms.Config
if env.IsSet(kms.EnvKESAPIKey) {
key, err := kes.ParseAPIKey(env.Get(kms.EnvKESAPIKey, ""))
if err != nil {
logger.Fatal(err, fmt.Sprintf("Failed to parse KES API key from %q", env.Get(kms.EnvKESAPIKey, "")))
}
kmsConf = kms.Config{
Endpoints: endpoints,
DefaultKeyID: env.Get(kms.EnvKESKeyName, ""),
APIKey: key,
RootCAs: rootCAs,
}
} else {
loadX509KeyPair := func(certFile, keyFile string) (tls.Certificate, error) {
// Manually load the certificate and private key into memory.
// We need to check whether the private key is encrypted, and
// if so, decrypt it using the user-provided password.
certBytes, err := os.ReadFile(certFile)
if err != nil {
return tls.Certificate{}, fmt.Errorf("Unable to load KES client certificate as specified by the shell environment: %v", err)
}
keyBytes, err := os.ReadFile(keyFile)
if err != nil {
return tls.Certificate{}, fmt.Errorf("Unable to load KES client private key as specified by the shell environment: %v", err)
}
privateKeyPEM, rest := pem.Decode(bytes.TrimSpace(keyBytes))
if len(rest) != 0 {
return tls.Certificate{}, errors.New("Unable to load KES client private key as specified by the shell environment: private key contains additional data")
}
if x509.IsEncryptedPEMBlock(privateKeyPEM) {
keyBytes, err = x509.DecryptPEMBlock(privateKeyPEM, []byte(env.Get(kms.EnvKESClientPassword, "")))
if err != nil {
return tls.Certificate{}, fmt.Errorf("Unable to decrypt KES client private key as specified by the shell environment: %v", err)
}
keyBytes = pem.EncodeToMemory(&pem.Block{Type: privateKeyPEM.Type, Bytes: keyBytes})
}
certificate, err := tls.X509KeyPair(certBytes, keyBytes)
if err != nil {
return tls.Certificate{}, fmt.Errorf("Unable to load KES client certificate as specified by the shell environment: %v", err)
}
return certificate, nil
}
reloadCertEvents := make(chan tls.Certificate, 1)
certificate, err := certs.NewCertificate(env.Get(kms.EnvKESClientCert, ""), env.Get(kms.EnvKESClientKey, ""), loadX509KeyPair)
if err != nil {
logger.Fatal(err, "Failed to load KES client certificate")
}
certificate.Watch(context.Background(), 15*time.Minute, syscall.SIGHUP)
certificate.Notify(reloadCertEvents)
kmsConf = kms.Config{
Endpoints: endpoints,
DefaultKeyID: env.Get(kms.EnvKESKeyName, ""),
Certificate: certificate,
ReloadCertEvents: reloadCertEvents,
RootCAs: rootCAs,
}
}
KMS, err := kms.NewWithConfig(kmsConf)
if err != nil {
logger.Fatal(err, "Unable to initialize a connection to KES as specified by the shell environment")
}
// We check that the default key ID exists or try to create it otherwise.
// This implicitly checks that we can communicate to KES. We don't treat
// a policy error as failure condition since MinIO may not have the permission
// to create keys - just to generate/decrypt data encryption keys.
if err = KMS.CreateKey(context.Background(), env.Get(kms.EnvKESKeyName, "")); err != nil && !errors.Is(err, kes.ErrKeyExists) && !errors.Is(err, kes.ErrNotAllowed) {
logger.Fatal(err, "Unable to initialize a connection to KES as specified by the shell environment")
}
GlobalKMS = KMS
if _, err = KMS.GenerateKey(GlobalContext, &kms.GenerateKeyRequest{}); errors.Is(err, kms.ErrKeyNotFound) {
err = KMS.CreateKey(GlobalContext, &kms.CreateKeyRequest{Name: KMS.DefaultKey})
}
if err != nil && !errors.Is(err, kms.ErrKeyExists) && !errors.Is(err, kms.ErrPermission) {
logger.Fatal(err, "Failed to connect to KMS")
}
GlobalKMS = KMS
}
func getTLSConfig() (x509Certs []*x509.Certificate, manager *certs.Manager, secureConn bool, err error) {

View File

@@ -56,7 +56,7 @@ import (
"github.com/minio/minio/internal/crypto"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/env"
"github.com/minio/pkg/v3/env"
)
func initHelp() {
@@ -240,6 +240,11 @@ func initHelp() {
Description: "manage Browser HTTP specific features, such as Security headers, etc.",
Optional: true,
},
config.HelpKV{
Key: config.ILMSubSys,
Description: "manage ILM settings for expiration and transition workers",
Optional: true,
},
}
if globalIsErasure {
@@ -288,6 +293,7 @@ func initHelp() {
config.DriveSubSys: drive.HelpDrive,
config.CacheSubSys: cache.Help,
config.BrowserSubSys: browser.Help,
config.ILMSubSys: ilm.Help,
}
config.RegisterHelpSubSys(helpMap)
@@ -362,7 +368,7 @@ func validateSubSysConfig(ctx context.Context, s config.Config, subSys string, o
}
case config.IdentityOpenIDSubSys:
if _, err := openid.LookupConfig(s,
NewHTTPTransport(), xhttp.DrainBody, globalSite.Region); err != nil {
NewHTTPTransport(), xhttp.DrainBody, globalSite.Region()); err != nil {
return err
}
case config.IdentityLDAPSubSys:
@@ -383,7 +389,7 @@ func validateSubSysConfig(ctx context.Context, s config.Config, subSys string, o
}
case config.IdentityPluginSubSys:
if _, err := idplugin.LookupConfig(s[config.IdentityPluginSubSys][config.Default],
NewHTTPTransport(), xhttp.DrainBody, globalSite.Region); err != nil {
NewHTTPTransport(), xhttp.DrainBody, globalSite.Region()); err != nil {
return err
}
case config.SubnetSubSys:
@@ -530,10 +536,11 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) {
// but not federation.
globalBucketFederation = etcdCfg.PathPrefix == "" && etcdCfg.Enabled
globalSite, err = config.LookupSite(s[config.SiteSubSys][config.Default], s[config.RegionSubSys][config.Default])
siteCfg, err := config.LookupSite(s[config.SiteSubSys][config.Default], s[config.RegionSubSys][config.Default])
if err != nil {
configLogIf(ctx, fmt.Errorf("Invalid site configuration: %w", err))
}
globalSite.Update(siteCfg)
globalAutoEncryption = crypto.LookupAutoEncryption() // Enable auto-encryption if enabled
if globalAutoEncryption && GlobalKMS == nil {
@@ -566,6 +573,7 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
return errServerNotInitialized
}
var errs []error
setDriveCounts := objAPI.SetDriveCounts()
switch subSys {
case config.APISubSys:
@@ -574,7 +582,7 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
configLogIf(ctx, fmt.Errorf("Invalid api configuration: %w", err))
}
globalAPIConfig.init(apiConfig, setDriveCounts)
globalAPIConfig.init(apiConfig, setDriveCounts, objAPI.Legacy())
autoGenerateRootCredentials() // Generate the KMS root credentials here since we don't know whether API root access is disabled until now.
setRemoteInstanceTransport(NewHTTPTransportWithTimeout(apiConfig.RemoteTransportDeadline))
case config.CompressionSubSys:
@@ -588,26 +596,29 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
case config.HealSubSys:
healCfg, err := heal.LookupConfig(s[config.HealSubSys][config.Default])
if err != nil {
return fmt.Errorf("Unable to apply heal config: %w", err)
errs = append(errs, fmt.Errorf("Unable to apply heal config: %w", err))
} else {
globalHealConfig.Update(healCfg)
}
globalHealConfig.Update(healCfg)
case config.BatchSubSys:
batchCfg, err := batch.LookupConfig(s[config.BatchSubSys][config.Default])
if err != nil {
return fmt.Errorf("Unable to apply batch config: %w", err)
errs = append(errs, fmt.Errorf("Unable to apply batch config: %w", err))
} else {
globalBatchConfig.Update(batchCfg)
}
globalBatchConfig.Update(batchCfg)
case config.ScannerSubSys:
scannerCfg, err := scanner.LookupConfig(s[config.ScannerSubSys][config.Default])
if err != nil {
return fmt.Errorf("Unable to apply scanner config: %w", err)
errs = append(errs, fmt.Errorf("Unable to apply scanner config: %w", err))
} else {
// update dynamic scanner values.
scannerIdleMode.Store(scannerCfg.IdleMode)
scannerCycle.Store(scannerCfg.Cycle)
scannerExcessObjectVersions.Store(scannerCfg.ExcessVersions)
scannerExcessFolders.Store(scannerCfg.ExcessFolders)
configLogIf(ctx, scannerSleeper.Update(scannerCfg.Delay, scannerCfg.MaxWait))
}
// update dynamic scanner values.
scannerIdleMode.Store(scannerCfg.IdleMode)
scannerCycle.Store(scannerCfg.Cycle)
scannerExcessObjectVersions.Store(scannerCfg.ExcessVersions)
scannerExcessFolders.Store(scannerCfg.ExcessFolders)
configLogIf(ctx, scannerSleeper.Update(scannerCfg.Delay, scannerCfg.MaxWait))
case config.LoggerWebhookSubSys:
loggerCfg, err := logger.LookupConfigForSubSys(ctx, s, config.LoggerWebhookSubSys)
if err != nil {
@@ -667,14 +678,12 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
configLogIf(ctx, fmt.Errorf("Unable to initialize storage class config: %w", err))
break
}
// if we validated all setDriveCounts and it was successful
// proceed to store the correct storage class globally.
if i == len(setDriveCounts)-1 {
if i == 0 {
globalStorageClass.Update(sc)
}
}
case config.SubnetSubSys:
subnetConfig, err := subnet.LookupConfig(s[config.SubnetSubSys][config.Default], globalProxyTransport)
subnetConfig, err := subnet.LookupConfig(s[config.SubnetSubSys][config.Default], globalRemoteTargetTransport)
if err != nil {
configLogIf(ctx, fmt.Errorf("Unable to parse subnet configuration: %w", err))
} else {
@@ -693,11 +702,11 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
}
}
case config.DriveSubSys:
if driveConfig, err := drive.LookupConfig(s[config.DriveSubSys][config.Default]); err != nil {
driveConfig, err := drive.LookupConfig(s[config.DriveSubSys][config.Default])
if err != nil {
configLogIf(ctx, fmt.Errorf("Unable to load drive config: %w", err))
} else {
err := globalDriveConfig.Update(driveConfig)
if err != nil {
if err = globalDriveConfig.Update(driveConfig); err != nil {
configLogIf(ctx, fmt.Errorf("Unable to update drive config: %v", err))
}
}
@@ -711,27 +720,32 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
case config.BrowserSubSys:
browserCfg, err := browser.LookupConfig(s[config.BrowserSubSys][config.Default])
if err != nil {
return fmt.Errorf("Unable to apply browser config: %w", err)
errs = append(errs, fmt.Errorf("Unable to apply browser config: %w", err))
} else {
globalBrowserConfig.Update(browserCfg)
}
globalBrowserConfig.Update(browserCfg)
case config.ILMSubSys:
ilmCfg, err := ilm.LookupConfig(s[config.ILMSubSys][config.Default])
if err != nil {
return fmt.Errorf("Unable to apply ilm config: %w", err)
errs = append(errs, fmt.Errorf("Unable to apply ilm config: %w", err))
} else {
if globalTransitionState != nil {
globalTransitionState.UpdateWorkers(ilmCfg.TransitionWorkers)
}
if globalExpiryState != nil {
globalExpiryState.ResizeWorkers(ilmCfg.ExpirationWorkers)
}
globalILMConfig.update(ilmCfg)
}
if globalTransitionState != nil {
globalTransitionState.UpdateWorkers(ilmCfg.TransitionWorkers)
}
if globalExpiryState != nil {
globalExpiryState.ResizeWorkers(ilmCfg.ExpirationWorkers)
}
globalILMConfig.update(ilmCfg)
}
globalServerConfigMu.Lock()
defer globalServerConfigMu.Unlock()
if globalServerConfig != nil {
globalServerConfig[subSys] = s[subSys]
}
if len(errs) > 0 {
return errors.Join(errs...)
}
return nil
}
@@ -746,41 +760,33 @@ func autoGenerateRootCredentials() {
return
}
if manager, ok := GlobalKMS.(kms.KeyManager); ok {
stat, err := GlobalKMS.Stat(GlobalContext)
if err != nil {
kmsLogIf(GlobalContext, err, "Unable to generate root credentials using KMS")
return
}
aKey, err := GlobalKMS.MAC(GlobalContext, &kms.MACRequest{Message: []byte("root access key")})
if errors.Is(err, kes.ErrNotAllowed) || errors.Is(err, errors.ErrUnsupported) {
return // If we don't have permission to compute the HMAC, don't change the cred.
}
if err != nil {
logger.Fatal(err, "Unable to generate root access key using KMS")
}
aKey, err := manager.HMAC(GlobalContext, stat.DefaultKey, []byte("root access key"))
if errors.Is(err, kes.ErrNotAllowed) {
return // If we don't have permission to compute the HMAC, don't change the cred.
}
if err != nil {
logger.Fatal(err, "Unable to generate root access key using KMS")
}
sKey, err := GlobalKMS.MAC(GlobalContext, &kms.MACRequest{Message: []byte("root secret key")})
if err != nil {
// Here, we must have permission. Otherwise, we would have failed earlier.
logger.Fatal(err, "Unable to generate root secret key using KMS")
}
sKey, err := manager.HMAC(GlobalContext, stat.DefaultKey, []byte("root secret key"))
if err != nil {
// Here, we must have permission. Otherwise, we would have failed earlier.
logger.Fatal(err, "Unable to generate root secret key using KMS")
}
accessKey, err := auth.GenerateAccessKey(20, bytes.NewReader(aKey))
if err != nil {
logger.Fatal(err, "Unable to generate root access key")
}
secretKey, err := auth.GenerateSecretKey(32, bytes.NewReader(sKey))
if err != nil {
logger.Fatal(err, "Unable to generate root secret key")
}
accessKey, err := auth.GenerateAccessKey(20, bytes.NewReader(aKey))
if err != nil {
logger.Fatal(err, "Unable to generate root access key")
}
secretKey, err := auth.GenerateSecretKey(32, bytes.NewReader(sKey))
if err != nil {
logger.Fatal(err, "Unable to generate root secret key")
}
logger.Info("Automatically generated root access key and secret key with the KMS")
globalActiveCred = auth.Credentials{
AccessKey: accessKey,
SecretKey: secretKey,
}
logger.Info("Automatically generated root access key and secret key with the KMS")
globalActiveCred = auth.Credentials{
AccessKey: accessKey,
SecretKey: secretKey,
}
}

View File

@@ -39,8 +39,8 @@ func TestServerConfig(t *testing.T) {
t.Fatalf("Init Test config failed")
}
if globalSite.Region != globalMinioDefaultRegion {
t.Errorf("Expecting region `us-east-1` found %s", globalSite.Region)
if globalSite.Region() != globalMinioDefaultRegion {
t.Errorf("Expecting region `us-east-1` found %s", globalSite.Region())
}
// Set new region and verify.
@@ -52,8 +52,8 @@ func TestServerConfig(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if site.Region != "us-west-1" {
t.Errorf("Expecting region `us-west-1` found %s", globalSite.Region)
if site.Region() != "us-west-1" {
t.Errorf("Expecting region `us-west-1` found %s", globalSite.Region())
}
if err := saveServerConfig(context.Background(), objLayer, globalServerConfig); err != nil {

View File

@@ -33,8 +33,8 @@ import (
"github.com/minio/minio/internal/config/storageclass"
"github.com/minio/minio/internal/event/target"
"github.com/minio/minio/internal/logger"
xnet "github.com/minio/pkg/v2/net"
"github.com/minio/pkg/v2/quick"
xnet "github.com/minio/pkg/v3/net"
"github.com/minio/pkg/v3/quick"
)
// Save config file to corresponding backend

View File

@@ -27,7 +27,7 @@ import (
"github.com/minio/minio/internal/config/policy/opa"
"github.com/minio/minio/internal/config/storageclass"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/quick"
"github.com/minio/pkg/v3/quick"
)
// FileLogger is introduced to workaround the dependency about logrus

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2015-2021 MinIO, Inc.
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
@@ -20,6 +20,7 @@ package cmd
import (
"container/ring"
"context"
"io"
"sync"
"sync/atomic"
@@ -28,8 +29,8 @@ import (
"github.com/minio/minio/internal/logger/target/console"
"github.com/minio/minio/internal/logger/target/types"
"github.com/minio/minio/internal/pubsub"
"github.com/minio/pkg/v2/logger/message/log"
xnet "github.com/minio/pkg/v2/net"
"github.com/minio/pkg/v3/logger/message/log"
xnet "github.com/minio/pkg/v3/net"
)
// number of log messages to buffer
@@ -49,10 +50,10 @@ type HTTPConsoleLoggerSys struct {
// NewConsoleLogger - creates new HTTPConsoleLoggerSys with all nodes subscribed to
// the console logging pub sub system
func NewConsoleLogger(ctx context.Context) *HTTPConsoleLoggerSys {
func NewConsoleLogger(ctx context.Context, w io.Writer) *HTTPConsoleLoggerSys {
return &HTTPConsoleLoggerSys{
pubsub: pubsub.New[log.Info, madmin.LogMask](8),
console: console.New(),
console: console.New(w),
logBuf: ring.New(defaultLogBufferCount),
}
}

View File

@@ -36,12 +36,14 @@ import (
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/bucket/lifecycle"
"github.com/minio/minio/internal/bucket/object/lock"
objectlock "github.com/minio/minio/internal/bucket/object/lock"
"github.com/minio/minio/internal/bucket/replication"
"github.com/minio/minio/internal/bucket/versioning"
"github.com/minio/minio/internal/color"
"github.com/minio/minio/internal/config/heal"
"github.com/minio/minio/internal/event"
xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/pkg/v2/console"
"github.com/minio/pkg/v3/console"
uatomic "go.uber.org/atomic"
)
@@ -51,7 +53,7 @@ const (
dataScannerCompactLeastObject = 500 // Compact when there is less than this many objects in a branch.
dataScannerCompactAtChildren = 10000 // Compact when there are this many children in a branch.
dataScannerCompactAtFolders = dataScannerCompactAtChildren / 4 // Compact when this many subfolders in a single folder.
dataScannerForceCompactAtFolders = 1_000_000 // Compact when this many subfolders in a single folder (even top level).
dataScannerForceCompactAtFolders = 250_000 // Compact when this many subfolders in a single folder (even top level).
dataScannerStartDelay = 1 * time.Minute // Time to wait on startup and between cycles.
healDeleteDangling = true
@@ -62,11 +64,12 @@ var (
globalHealConfig heal.Config
// Sleeper values are updated when config is loaded.
scannerSleeper = newDynamicSleeper(2, time.Second, true) // Keep defaults same as config defaults
scannerCycle = uatomic.NewDuration(dataScannerStartDelay)
scannerIdleMode = uatomic.NewInt32(0) // default is throttled when idle
scannerExcessObjectVersions = uatomic.NewInt64(100)
scannerExcessFolders = uatomic.NewInt64(50000)
scannerSleeper = newDynamicSleeper(2, time.Second, true) // Keep defaults same as config defaults
scannerCycle = uatomic.NewDuration(dataScannerStartDelay)
scannerIdleMode = uatomic.NewInt32(0) // default is throttled when idle
scannerExcessObjectVersions = uatomic.NewInt64(100)
scannerExcessObjectVersionsTotalSize = uatomic.NewInt64(1024 * 1024 * 1024 * 1024) // 1 TB
scannerExcessFolders = uatomic.NewInt64(50000)
)
// initDataScanner will start the scanner in the background.
@@ -346,6 +349,7 @@ func scanDataFolder(ctx context.Context, disks []StorageAPI, basePath string, ca
// No useful information...
return cache, err
}
s.newCache.forceCompact(dataScannerCompactAtChildren)
s.newCache.Info.LastUpdate = UTCNow()
s.newCache.Info.NextCycle = cache.Info.NextCycle
return s.newCache, nil
@@ -951,10 +955,32 @@ func (i *scannerItem) applyLifecycle(ctx context.Context, o ObjectLayer, oi Obje
}
versionID := oi.VersionID
vcfg, _ := globalBucketVersioningSys.Get(i.bucket)
rCfg, _ := globalBucketObjectLockSys.Get(i.bucket)
replcfg, _ := getReplicationConfig(ctx, i.bucket)
lcEvt := evalActionFromLifecycle(ctx, *i.lifeCycle, rCfg, replcfg, oi)
var vc *versioning.Versioning
var lr objectlock.Retention
var rcfg *replication.Config
if !isMinioMetaBucketName(i.bucket) {
vc, err = globalBucketVersioningSys.Get(i.bucket)
if err != nil {
scannerLogOnceIf(ctx, err, i.bucket)
return
}
// Check if bucket is object locked.
lr, err = globalBucketObjectLockSys.Get(i.bucket)
if err != nil {
scannerLogOnceIf(ctx, err, i.bucket)
return
}
rcfg, err = getReplicationConfig(ctx, i.bucket)
if err != nil {
scannerLogOnceIf(ctx, err, i.bucket)
return
}
}
lcEvt := evalActionFromLifecycle(ctx, *i.lifeCycle, lr, rcfg, oi)
if i.debug {
if versionID != "" {
console.Debugf(applyActionsLogPrefix+" lifecycle: %q (version-id=%s), Initial scan: %v\n", i.objectPath(), versionID, lcEvt.Action)
@@ -968,11 +994,11 @@ func (i *scannerItem) applyLifecycle(ctx context.Context, o ObjectLayer, oi Obje
// This can happen when,
// - ExpireObjectAllVersions flag is enabled
// - NoncurrentVersionExpiration is applicable
case lifecycle.DeleteVersionAction, lifecycle.DeleteAllVersionsAction:
case lifecycle.DeleteVersionAction, lifecycle.DeleteAllVersionsAction, lifecycle.DelMarkerDeleteAllVersionsAction:
size = 0
case lifecycle.DeleteAction:
// On a non-versioned bucket, DeleteObject removes the only version permanently.
if !vcfg.PrefixEnabled(oi.Name) {
if !vc.PrefixEnabled(oi.Name) {
size = 0
}
}
@@ -1065,7 +1091,7 @@ func (i *scannerItem) applyVersionActions(ctx context.Context, o ObjectLayer, fi
}
// Check if we have many versions after applyNewerNoncurrentVersionLimit.
if len(objInfos) > int(scannerExcessObjectVersions.Load()) {
if len(objInfos) >= int(scannerExcessObjectVersions.Load()) {
// Notify object accessed via a GET request.
sendEvent(eventArgs{
EventName: event.ObjectManyVersions,
@@ -1089,6 +1115,39 @@ func (i *scannerItem) applyVersionActions(ctx context.Context, o ObjectLayer, fi
})
}
cumulativeSize := int64(0)
for _, objInfo := range objInfos {
cumulativeSize += objInfo.Size
}
// Check if the cumulative size of all versions of this object is high.
if cumulativeSize >= scannerExcessObjectVersionsTotalSize.Load() {
// Notify object accessed via a GET request.
sendEvent(eventArgs{
EventName: event.ObjectLargeVersions,
BucketName: i.bucket,
Object: ObjectInfo{
Name: i.objectPath(),
},
UserAgent: "Scanner",
Host: globalLocalNodeName,
RespElements: map[string]string{
"x-minio-versions-count": strconv.Itoa(len(objInfos)),
"x-minio-versions-size": strconv.FormatInt(cumulativeSize, 10),
},
})
auditLogInternal(context.Background(), AuditLogOptions{
Event: "scanner:largeversions",
APIName: "Scanner",
Bucket: i.bucket,
Object: i.objectPath(),
Tags: map[string]interface{}{
"x-minio-versions-count": strconv.Itoa(len(objInfos)),
"x-minio-versions-size": strconv.FormatInt(cumulativeSize, 10),
},
})
}
return objInfos, nil
}
@@ -1104,7 +1163,7 @@ func (i *scannerItem) applyActions(ctx context.Context, o ObjectLayer, oi Object
// Note: objDeleted is true if and only if action ==
// lifecycle.DeleteAllVersionsAction
if action == lifecycle.DeleteAllVersionsAction {
if action.DeleteAll() {
return true, 0
}
@@ -1141,17 +1200,15 @@ func evalActionFromLifecycle(ctx context.Context, lc lifecycle.Lifecycle, lr loc
console.Debugf(applyActionsLogPrefix+" lifecycle: Secondary scan: %v\n", event.Action)
}
if event.Action == lifecycle.NoneAction {
return event
}
if obj.IsLatest && event.Action == lifecycle.DeleteAllVersionsAction {
if lr.LockEnabled && enforceRetentionForDeletion(ctx, obj) {
switch event.Action {
case lifecycle.DeleteAllVersionsAction, lifecycle.DelMarkerDeleteAllVersionsAction:
// Skip if bucket has object locking enabled; To prevent the
// possibility of violating an object retention on one of the
// noncurrent versions of this object.
if lr.LockEnabled {
return lifecycle.Event{Action: lifecycle.NoneAction}
}
}
switch event.Action {
case lifecycle.DeleteVersionAction, lifecycle.DeleteRestoredVersionAction:
// Defensive code, should never happen
if obj.VersionID == "" {
@@ -1176,36 +1233,29 @@ func evalActionFromLifecycle(ctx context.Context, lc lifecycle.Lifecycle, lr loc
}
func applyTransitionRule(event lifecycle.Event, src lcEventSrc, obj ObjectInfo) bool {
if obj.DeleteMarker {
if obj.DeleteMarker || obj.IsDir {
return false
}
globalTransitionState.queueTransitionTask(obj, event, src)
return true
}
func applyExpiryOnTransitionedObject(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo, lcEvent lifecycle.Event, src lcEventSrc) bool {
var err error
defer func() {
if err != nil {
return
}
// Note: DeleteAllVersions action is not supported for
// transitioned objects
globalScannerMetrics.timeILM(lcEvent.Action)(1)
}()
if err = expireTransitionedObject(ctx, objLayer, &obj, lcEvent, src); err != nil {
func applyExpiryOnTransitionedObject(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo, lcEvent lifecycle.Event, src lcEventSrc) (ok bool) {
timeILM := globalScannerMetrics.timeILM(lcEvent.Action)
if err := expireTransitionedObject(ctx, objLayer, &obj, lcEvent, src); err != nil {
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
return false
}
ilmLogIf(ctx, err)
return false
}
timeILM(1)
// Notification already sent in *expireTransitionedObject*, just return 'true' here.
return true
}
func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo, lcEvent lifecycle.Event, src lcEventSrc) bool {
func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo, lcEvent lifecycle.Event, src lcEventSrc) (ok bool) {
traceFn := globalLifecycleSys.trace(obj)
opts := ObjectOptions{
Expiration: ExpirationOptions{Expire: true},
@@ -1227,17 +1277,19 @@ func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLay
dobj ObjectInfo
err error
)
timeILM := globalScannerMetrics.timeILM(lcEvent.Action)
defer func() {
if err != nil {
if !ok {
return
}
if lcEvent.Action != lifecycle.NoneAction {
numVersions := uint64(1)
if lcEvent.Action == lifecycle.DeleteAllVersionsAction {
if lcEvent.Action.DeleteAll() {
numVersions = uint64(obj.NumVersions)
}
globalScannerMetrics.timeILM(lcEvent.Action)(numVersions)
timeILM(numVersions)
}
}()
@@ -1262,8 +1314,11 @@ func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLay
if obj.DeleteMarker {
eventName = event.ObjectRemovedDeleteMarkerCreated
}
if lcEvent.Action.DeleteAll() {
switch lcEvent.Action {
case lifecycle.DeleteAllVersionsAction:
eventName = event.ObjectRemovedDeleteAllVersions
case lifecycle.DelMarkerDeleteAllVersionsAction:
eventName = event.ILMDelMarkerExpirationDelete
}
// Notify object deleted event.
sendEvent(eventArgs{
@@ -1288,7 +1343,7 @@ func applyLifecycleAction(event lifecycle.Event, src lcEventSrc, obj ObjectInfo)
switch action := event.Action; action {
case lifecycle.DeleteVersionAction, lifecycle.DeleteAction,
lifecycle.DeleteRestoredAction, lifecycle.DeleteRestoredVersionAction,
lifecycle.DeleteAllVersionsAction:
lifecycle.DeleteAllVersionsAction, lifecycle.DelMarkerDeleteAllVersionsAction:
success = applyExpiryRule(event, src, obj)
case lifecycle.TransitionAction, lifecycle.TransitionVersionAction:
success = applyTransitionRule(event, src, obj)
@@ -1388,48 +1443,7 @@ func (d *dynamicSleeper) Timer(ctx context.Context) func() {
t := time.Now()
return func() {
doneAt := time.Now()
for {
// Grab current values
d.mu.RLock()
minWait, maxWait := d.minSleep, d.maxSleep
factor := d.factor
cycle := d.cycle
d.mu.RUnlock()
elapsed := doneAt.Sub(t)
// Don't sleep for really small amount of time
wantSleep := time.Duration(float64(elapsed) * factor)
if wantSleep <= minWait {
return
}
if maxWait > 0 && wantSleep > maxWait {
wantSleep = maxWait
}
timer := time.NewTimer(wantSleep)
select {
case <-ctx.Done():
if !timer.Stop() {
<-timer.C
}
if d.isScanner {
globalScannerMetrics.incTime(scannerMetricYield, wantSleep)
}
return
case <-timer.C:
if d.isScanner {
globalScannerMetrics.incTime(scannerMetricYield, wantSleep)
}
return
case <-cycle:
if !timer.Stop() {
// We expired.
<-timer.C
if d.isScanner {
globalScannerMetrics.incTime(scannerMetricYield, wantSleep)
}
return
}
}
}
d.Sleep(ctx, doneAt.Sub(t))
}
}

View File

@@ -20,12 +20,15 @@ package cmd
import (
"context"
"encoding/xml"
"fmt"
"strings"
"sync"
"testing"
"time"
"github.com/google/uuid"
"github.com/minio/minio/internal/bucket/lifecycle"
"github.com/minio/minio/internal/bucket/object/lock"
"github.com/minio/minio/internal/bucket/versioning"
)
@@ -141,3 +144,96 @@ func TestApplyNewerNoncurrentVersionsLimit(t *testing.T) {
}
}
}
func TestEvalActionFromLifecycle(t *testing.T) {
// Tests cover only ExpiredObjectDeleteAllVersions and DelMarkerExpiration actions
obj := ObjectInfo{
Name: "foo",
ModTime: time.Now().Add(-31 * 24 * time.Hour),
Size: 100 << 20,
VersionID: uuid.New().String(),
IsLatest: true,
NumVersions: 4,
}
delMarker := ObjectInfo{
Name: "foo-deleted",
ModTime: time.Now().Add(-61 * 24 * time.Hour),
Size: 0,
VersionID: uuid.New().String(),
IsLatest: true,
DeleteMarker: true,
NumVersions: 4,
}
deleteAllILM := `<LifecycleConfiguration>
<Rule>
<Expiration>
<Days>30</Days>
<ExpiredObjectAllVersions>true</ExpiredObjectAllVersions>
</Expiration>
<Filter></Filter>
<Status>Enabled</Status>
<ID>DeleteAllVersions</ID>
</Rule>
</LifecycleConfiguration>`
delMarkerILM := `<LifecycleConfiguration>
<Rule>
<ID>DelMarkerExpiration</ID>
<Filter></Filter>
<Status>Enabled</Status>
<DelMarkerExpiration>
<Days>60</Days>
</DelMarkerExpiration>
</Rule>
</LifecycleConfiguration>`
deleteAllLc, err := lifecycle.ParseLifecycleConfig(strings.NewReader(deleteAllILM))
if err != nil {
t.Fatalf("Failed to parse deleteAllILM test ILM policy %v", err)
}
delMarkerLc, err := lifecycle.ParseLifecycleConfig(strings.NewReader(delMarkerILM))
if err != nil {
t.Fatalf("Failed to parse delMarkerILM test ILM policy %v", err)
}
tests := []struct {
ilm lifecycle.Lifecycle
retention lock.Retention
obj ObjectInfo
want lifecycle.Action
}{
{
// with object locking
ilm: *deleteAllLc,
retention: lock.Retention{LockEnabled: true},
obj: obj,
want: lifecycle.NoneAction,
},
{
// without object locking
ilm: *deleteAllLc,
retention: lock.Retention{},
obj: obj,
want: lifecycle.DeleteAllVersionsAction,
},
{
// with object locking
ilm: *delMarkerLc,
retention: lock.Retention{LockEnabled: true},
obj: delMarker,
want: lifecycle.NoneAction,
},
{
// without object locking
ilm: *delMarkerLc,
retention: lock.Retention{},
obj: delMarker,
want: lifecycle.DelMarkerDeleteAllVersionsAction,
},
}
for i, test := range tests {
t.Run(fmt.Sprintf("TestEvalAction-%d", i), func(t *testing.T) {
if got := evalActionFromLifecycle(context.TODO(), test.ilm, test.retention, nil, test.obj); got.Action != test.want {
t.Fatalf("Expected %v but got %v", test.want, got)
}
})
}
}

View File

@@ -18,7 +18,6 @@
package cmd
import (
"bytes"
"context"
"errors"
"fmt"
@@ -36,7 +35,6 @@ import (
"github.com/klauspost/compress/zstd"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/bucket/lifecycle"
"github.com/minio/minio/internal/hash"
"github.com/tinylib/msgp/msgp"
"github.com/valyala/bytebufferpool"
)
@@ -731,6 +729,53 @@ func (d *dataUsageCache) reduceChildrenOf(path dataUsageHash, limit int, compact
}
}
// forceCompact will force compact the cache of the top entry.
// If the number of children is more than limit*100, it will compact self.
// When above the limit a cleanup will also be performed to remove any possible abandoned entries.
func (d *dataUsageCache) forceCompact(limit int) {
if d == nil || len(d.Cache) <= limit {
return
}
top := hashPath(d.Info.Name).Key()
topE := d.find(top)
if topE == nil {
scannerLogIf(GlobalContext, errors.New("forceCompact: root not found"))
return
}
// If off by 2 orders of magnitude, compact self and log error.
if len(topE.Children) > dataScannerForceCompactAtFolders {
// If we still have too many children, compact self.
scannerLogOnceIf(GlobalContext, fmt.Errorf("forceCompact: %q has %d children. Force compacting. Expect reduced scanner performance", d.Info.Name, len(topE.Children)), d.Info.Name)
d.reduceChildrenOf(hashPath(d.Info.Name), limit, true)
}
if len(d.Cache) <= limit {
return
}
// Check for abandoned entries.
found := make(map[string]struct{}, len(d.Cache))
// Mark all children recursively
var mark func(entry dataUsageEntry)
mark = func(entry dataUsageEntry) {
for k := range entry.Children {
found[k] = struct{}{}
if ch, ok := d.Cache[k]; ok {
mark(ch)
}
}
}
found[top] = struct{}{}
mark(*topE)
// Delete all entries not found.
for k := range d.Cache {
if _, ok := found[k]; !ok {
delete(d.Cache, k)
}
}
}
// StringAll returns a detailed string representation of all entries in the cache.
func (d *dataUsageCache) StringAll() string {
// Remove bloom filter from print.
@@ -1005,11 +1050,23 @@ func (d *dataUsageCache) load(ctx context.Context, store objectIO, name string)
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
r, err := store.GetObjectNInfo(ctx, dataUsageBucket, name, nil, http.Header{}, ObjectOptions{NoLock: true})
r, err := store.GetObjectNInfo(ctx, minioMetaBucket, pathJoin(bucketMetaPrefix, name), nil, http.Header{}, ObjectOptions{NoLock: true})
if err != nil {
switch err.(type) {
case ObjectNotFound, BucketNotFound:
return false, nil
r, err = store.GetObjectNInfo(ctx, dataUsageBucket, name, nil, http.Header{}, ObjectOptions{NoLock: true})
if err != nil {
switch err.(type) {
case ObjectNotFound, BucketNotFound:
return false, nil
case InsufficientReadQuorum, StorageErr:
return true, nil
}
return false, err
}
err = d.deserialize(r)
r.Close()
return err != nil, nil
case InsufficientReadQuorum, StorageErr:
return true, nil
}
@@ -1070,24 +1127,11 @@ func (d *dataUsageCache) save(ctx context.Context, store objectIO, name string)
}
save := func(name string, timeout time.Duration) error {
hr, err := hash.NewReader(ctx, bytes.NewReader(buf.Bytes()), int64(buf.Len()), "", "", int64(buf.Len()))
if err != nil {
return err
}
// Abandon if more than a minute, so we don't hold up scanner.
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
_, err = store.PutObject(ctx,
dataUsageBucket,
name,
NewPutObjReader(hr),
ObjectOptions{NoLock: true})
if isErrBucketNotFound(err) {
return nil
}
return err
return saveConfig(ctx, store, pathJoin(bucketMetaPrefix, name), buf.Bytes())
}
defer save(name+".bkp", 5*time.Second) // Keep a backup as well

View File

@@ -79,12 +79,12 @@ func loadPrefixUsageFromBackend(ctx context.Context, objAPI ObjectLayer, bucket
prefixUsageCache.InitOnce(30*time.Second,
// No need to fail upon Update() error, fallback to old value.
cachevalue.Opts{ReturnLastGood: true, NoWait: true},
func() (map[string]uint64, error) {
func(ctx context.Context) (map[string]uint64, error) {
m := make(map[string]uint64)
for _, pool := range z.serverPools {
for _, er := range pool.sets {
// Load bucket usage prefixes
ctx, done := context.WithTimeout(context.Background(), 2*time.Second)
ctx, done := context.WithTimeout(ctx, 2*time.Second)
ok := cache.load(ctx, er, bucket+slashSeparator+dataUsageCacheName) == nil
done()
if ok {
@@ -107,7 +107,7 @@ func loadPrefixUsageFromBackend(ctx context.Context, objAPI ObjectLayer, bucket
},
)
return prefixUsageCache.Get()
return prefixUsageCache.GetWithCtx(ctx)
}
func loadDataUsageFromBackend(ctx context.Context, objAPI ObjectLayer) (DataUsageInfo, error) {

View File

@@ -22,7 +22,7 @@ import (
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
)
// Data types used for returning dummy tagging XML.

View File

@@ -110,7 +110,7 @@ func kmsKeyIDFromMetadata(metadata map[string]string) string {
//
// DecryptETags uses a KMS bulk decryption API, if available, which
// is more efficient than decrypting ETags sequentually.
func DecryptETags(ctx context.Context, k kms.KMS, objects []ObjectInfo) error {
func DecryptETags(ctx context.Context, k *kms.KMS, objects []ObjectInfo) error {
const BatchSize = 250 // We process the objects in batches - 250 is a reasonable default.
var (
metadata = make([]map[string]string, 0, BatchSize)
@@ -267,7 +267,11 @@ func rotateKey(ctx context.Context, oldKey []byte, newKeyID string, newKey []byt
if err != nil {
return err
}
oldKey, err := GlobalKMS.DecryptKey(keyID, kmsKey, kms.Context{bucket: path.Join(bucket, object)})
oldKey, err := GlobalKMS.Decrypt(ctx, &kms.DecryptRequest{
Name: keyID,
Ciphertext: kmsKey,
AssociatedData: kms.Context{bucket: path.Join(bucket, object)},
})
if err != nil {
return err
}
@@ -276,7 +280,10 @@ func rotateKey(ctx context.Context, oldKey []byte, newKeyID string, newKey []byt
return err
}
newKey, err := GlobalKMS.GenerateKey(ctx, "", kms.Context{bucket: path.Join(bucket, object)})
newKey, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{
Name: GlobalKMS.DefaultKey,
AssociatedData: kms.Context{bucket: path.Join(bucket, object)},
})
if err != nil {
return err
}
@@ -312,7 +319,10 @@ func rotateKey(ctx context.Context, oldKey []byte, newKeyID string, newKey []byt
if _, ok := kmsCtx[bucket]; !ok {
kmsCtx[bucket] = path.Join(bucket, object)
}
newKey, err := GlobalKMS.GenerateKey(ctx, newKeyID, kmsCtx)
newKey, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{
Name: newKeyID,
AssociatedData: kmsCtx,
})
if err != nil {
return err
}
@@ -352,7 +362,9 @@ func newEncryptMetadata(ctx context.Context, kind crypto.Type, keyID string, key
if GlobalKMS == nil {
return crypto.ObjectKey{}, errKMSNotConfigured
}
key, err := GlobalKMS.GenerateKey(ctx, "", kms.Context{bucket: path.Join(bucket, object)})
key, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{
AssociatedData: kms.Context{bucket: path.Join(bucket, object)},
})
if err != nil {
return crypto.ObjectKey{}, err
}
@@ -379,7 +391,10 @@ func newEncryptMetadata(ctx context.Context, kind crypto.Type, keyID string, key
if _, ok := kmsCtx[bucket]; !ok {
kmsCtx[bucket] = path.Join(bucket, object)
}
key, err := GlobalKMS.GenerateKey(ctx, keyID, kmsCtx)
key, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{
Name: keyID,
AssociatedData: kmsCtx,
})
if err != nil {
if errors.Is(err, kes.ErrKeyNotFound) {
return crypto.ObjectKey{}, errKMSKeyNotFound
@@ -475,11 +490,10 @@ func EncryptRequest(content io.Reader, r *http.Request, bucket, object string, m
func decryptObjectMeta(key []byte, bucket, object string, metadata map[string]string) ([]byte, error) {
switch kind, _ := crypto.IsEncrypted(metadata); kind {
case crypto.S3:
KMS := GlobalKMS
if KMS == nil {
if GlobalKMS == nil {
return nil, errKMSNotConfigured
}
objectKey, err := crypto.S3.UnsealObjectKey(KMS, metadata, bucket, object)
objectKey, err := crypto.S3.UnsealObjectKey(GlobalKMS, metadata, bucket, object)
if err != nil {
return nil, err
}
@@ -1011,7 +1025,9 @@ func DecryptObjectInfo(info *ObjectInfo, r *http.Request) (encrypted bool, err e
if encrypted {
if crypto.SSEC.IsEncrypted(info.UserDefined) {
if !(crypto.SSEC.IsRequested(headers) || crypto.SSECopy.IsRequested(headers)) {
return encrypted, errEncryptedObject
if r.Header.Get(xhttp.MinIOSourceReplicationRequest) != "true" {
return encrypted, errEncryptedObject
}
}
}
@@ -1063,13 +1079,16 @@ func metadataEncrypter(key crypto.ObjectKey) objectMetaEncryptFn {
}
// metadataDecrypter reverses metadataEncrypter.
func (o *ObjectInfo) metadataDecrypter() objectMetaDecryptFn {
func (o *ObjectInfo) metadataDecrypter(h http.Header) objectMetaDecryptFn {
return func(baseKey string, input []byte) ([]byte, error) {
if len(input) == 0 {
return input, nil
}
key, err := decryptObjectMeta(nil, o.Bucket, o.Name, o.UserDefined)
var key []byte
if k, err := crypto.SSEC.ParseHTTP(h); err == nil {
key = k[:]
}
key, err := decryptObjectMeta(key, o.Bucket, o.Name, o.UserDefined)
if err != nil {
return nil, err
}
@@ -1081,13 +1100,13 @@ func (o *ObjectInfo) metadataDecrypter() objectMetaDecryptFn {
// decryptChecksums will attempt to decode checksums and return it/them if set.
// if part > 0, and we have the checksum for the part that will be returned.
func (o *ObjectInfo) decryptPartsChecksums() {
func (o *ObjectInfo) decryptPartsChecksums(h http.Header) {
data := o.Checksum
if len(data) == 0 {
return
}
if _, encrypted := crypto.IsEncrypted(o.UserDefined); encrypted {
decrypted, err := o.metadataDecrypter()("object-checksum", data)
decrypted, err := o.metadataDecrypter(h)("object-checksum", data)
if err != nil {
encLogIf(GlobalContext, err)
return
@@ -1143,15 +1162,17 @@ func (o *ObjectInfo) metadataEncryptFn(headers http.Header) (objectMetaEncryptFn
// decryptChecksums will attempt to decode checksums and return it/them if set.
// if part > 0, and we have the checksum for the part that will be returned.
func (o *ObjectInfo) decryptChecksums(part int) map[string]string {
func (o *ObjectInfo) decryptChecksums(part int, h http.Header) map[string]string {
data := o.Checksum
if len(data) == 0 {
return nil
}
if _, encrypted := crypto.IsEncrypted(o.UserDefined); encrypted {
decrypted, err := o.metadataDecrypter()("object-checksum", data)
decrypted, err := o.metadataDecrypter(h)("object-checksum", data)
if err != nil {
encLogIf(GlobalContext, err)
if err != crypto.ErrSecretKeyMismatch {
encLogIf(GlobalContext, err)
}
return nil
}
data = decrypted

View File

@@ -23,14 +23,13 @@ import (
"net/url"
"runtime"
"sort"
"strconv"
"strings"
"github.com/cespare/xxhash/v2"
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio/internal/config"
"github.com/minio/pkg/v2/ellipses"
"github.com/minio/pkg/v2/env"
"github.com/minio/pkg/v3/ellipses"
"github.com/minio/pkg/v3/env"
)
// This file implements and supports ellipses pattern for
@@ -134,7 +133,7 @@ func possibleSetCountsWithSymmetry(setCounts []uint64, argPatterns []ellipses.Ar
// on each index, this function also determines the final set size
// The final set size has the affinity towards choosing smaller
// indexes (total sets)
func getSetIndexes(args []string, totalSizes []uint64, customSetDriveCount uint64, argPatterns []ellipses.ArgPattern) (setIndexes [][]uint64, err error) {
func getSetIndexes(args []string, totalSizes []uint64, setDriveCount uint64, argPatterns []ellipses.ArgPattern) (setIndexes [][]uint64, err error) {
if len(totalSizes) == 0 || len(args) == 0 {
return nil, errInvalidArgument
}
@@ -142,7 +141,7 @@ func getSetIndexes(args []string, totalSizes []uint64, customSetDriveCount uint6
setIndexes = make([][]uint64, len(totalSizes))
for _, totalSize := range totalSizes {
// Check if totalSize has minimum range upto setSize
if totalSize < setSizes[0] || totalSize < customSetDriveCount {
if totalSize < setSizes[0] || totalSize < setDriveCount {
msg := fmt.Sprintf("Incorrect number of endpoints provided %s", args)
return nil, config.ErrInvalidNumberOfErasureEndpoints(nil).Msg(msg)
}
@@ -167,11 +166,11 @@ func getSetIndexes(args []string, totalSizes []uint64, customSetDriveCount uint6
var setSize uint64
// Custom set drive count allows to override automatic distribution.
// only meant if you want to further optimize drive distribution.
if customSetDriveCount > 0 {
if setDriveCount > 0 {
msg := fmt.Sprintf("Invalid set drive count. Acceptable values for %d number drives are %d", commonSize, setCounts)
var found bool
for _, ss := range setCounts {
if ss == customSetDriveCount {
if ss == setDriveCount {
found = true
}
}
@@ -180,8 +179,7 @@ func getSetIndexes(args []string, totalSizes []uint64, customSetDriveCount uint6
}
// No automatic symmetry calculation expected, user is on their own
setSize = customSetDriveCount
globalCustomErasureDriveCount = true
setSize = setDriveCount
} else {
// Returns possible set counts with symmetry.
setCounts = possibleSetCountsWithSymmetry(setCounts, argPatterns)
@@ -256,7 +254,7 @@ func getTotalSizes(argPatterns []ellipses.ArgPattern) []uint64 {
// Parses all arguments and returns an endpointSet which is a collection
// of endpoints following the ellipses pattern, this is what is used
// by the object layer for initializing itself.
func parseEndpointSet(customSetDriveCount uint64, args ...string) (ep endpointSet, err error) {
func parseEndpointSet(setDriveCount uint64, args ...string) (ep endpointSet, err error) {
argPatterns := make([]ellipses.ArgPattern, len(args))
for i, arg := range args {
patterns, perr := ellipses.FindEllipsesPatterns(arg)
@@ -266,7 +264,7 @@ func parseEndpointSet(customSetDriveCount uint64, args ...string) (ep endpointSe
argPatterns[i] = patterns
}
ep.setIndexes, err = getSetIndexes(args, getTotalSizes(argPatterns), customSetDriveCount, argPatterns)
ep.setIndexes, err = getSetIndexes(args, getTotalSizes(argPatterns), setDriveCount, argPatterns)
if err != nil {
return endpointSet{}, config.ErrInvalidErasureEndpoints(nil).Msg(err.Error())
}
@@ -281,23 +279,14 @@ func parseEndpointSet(customSetDriveCount uint64, args ...string) (ep endpointSe
// specific set size.
// For example: {1...64} is divided into 4 sets each of size 16.
// This applies to even distributed setup syntax as well.
func GetAllSets(args ...string) ([][]string, error) {
var customSetDriveCount uint64
if v := env.Get(EnvErasureSetDriveCount, ""); v != "" {
driveCount, err := strconv.Atoi(v)
if err != nil {
return nil, config.ErrInvalidErasureSetSize(err)
}
customSetDriveCount = uint64(driveCount)
}
func GetAllSets(setDriveCount uint64, args ...string) ([][]string, error) {
var setArgs [][]string
if !ellipses.HasEllipses(args...) {
var setIndexes [][]uint64
// Check if we have more one args.
if len(args) > 1 {
var err error
setIndexes, err = getSetIndexes(args, []uint64{uint64(len(args))}, customSetDriveCount, nil)
setIndexes, err = getSetIndexes(args, []uint64{uint64(len(args))}, setDriveCount, nil)
if err != nil {
return nil, err
}
@@ -311,7 +300,7 @@ func GetAllSets(args ...string) ([][]string, error) {
}
setArgs = s.Get()
} else {
s, err := parseEndpointSet(customSetDriveCount, args...)
s, err := parseEndpointSet(setDriveCount, args...)
if err != nil {
return nil, err
}
@@ -336,8 +325,6 @@ const (
EnvErasureSetDriveCount = "MINIO_ERASURE_SET_DRIVE_COUNT"
)
var globalCustomErasureDriveCount = false
type node struct {
nodeName string
disks []string
@@ -366,8 +353,13 @@ func (el *endpointsList) add(arg string) error {
return nil
}
type poolArgs struct {
args []string
setDriveCount uint64
}
// buildDisksLayoutFromConfFile supports with and without ellipses transparently.
func buildDisksLayoutFromConfFile(pools [][]string) (layout disksLayout, err error) {
func buildDisksLayoutFromConfFile(pools []poolArgs) (layout disksLayout, err error) {
if len(pools) == 0 {
return layout, errInvalidArgument
}
@@ -375,7 +367,7 @@ func buildDisksLayoutFromConfFile(pools [][]string) (layout disksLayout, err err
for _, list := range pools {
var endpointsList endpointsList
for _, arg := range list {
for _, arg := range list.args {
switch {
case ellipses.HasList(arg):
patterns, err := ellipses.FindListPatterns(arg)
@@ -436,7 +428,7 @@ func buildDisksLayoutFromConfFile(pools [][]string) (layout disksLayout, err err
}
}
setArgs, err := GetAllSets(eps...)
setArgs, err := GetAllSets(list.setDriveCount, eps...)
if err != nil {
return layout, err
}
@@ -469,15 +461,21 @@ func mergeDisksLayoutFromArgs(args []string, ctxt *serverCtxt) (err error) {
var setArgs [][]string
v, err := env.GetInt(EnvErasureSetDriveCount, 0)
if err != nil {
return err
}
setDriveCount := uint64(v)
// None of the args have ellipses use the old style.
if ok {
setArgs, err = GetAllSets(args...)
setArgs, err = GetAllSets(setDriveCount, args...)
if err != nil {
return err
}
ctxt.Layout = disksLayout{
legacy: true,
pools: []poolDisksLayout{{layout: setArgs}},
pools: []poolDisksLayout{{layout: setArgs, cmdline: strings.Join(args, " ")}},
}
return
}
@@ -487,7 +485,7 @@ func mergeDisksLayoutFromArgs(args []string, ctxt *serverCtxt) (err error) {
// TODO: support SNSD deployments to be decommissioned in future
return fmt.Errorf("all args must have ellipses for pool expansion (%w) args: %s", errInvalidArgument, args)
}
setArgs, err = GetAllSets(arg)
setArgs, err = GetAllSets(setDriveCount, arg)
if err != nil {
return err
}

Some files were not shown because too many files have changed in this diff Show More