Compare commits

...

1395 Commits

Author SHA1 Message Date
Alex
cfed671ea3 Update Console version to v0.42.1 (#18606)
Signed-off-by: Benjamin Perez <benjamin@bexsoft.net>
2023-12-06 20:16:00 -08:00
Harshavardhana
53ce92b9ca fix: use the right channel to feed the data in (#18605)
this PR fixes a regression in batch replication
where we weren't sending any data from the Walk()
results due to incorrect channels being used.
2023-12-06 18:17:03 -08:00
Shireesh Anjal
7350a29fec Capture percentage of cpu load and memory used (#18596)
By default the cpu load is the cumulative of all cores. Capture the
percentage load (load * 100 / cpu-count)

Also capture the percentage memory used (used * 100 / total)
2023-12-06 13:19:59 -08:00
jiuker
5cc2c62c66 fix: GetFreePort() will get the same port (#18604) 2023-12-06 10:36:42 -08:00
Harshavardhana
4bc5ed6c76 support LDAP service accounts via SFTP, FTP logins (#18599) 2023-12-06 04:31:35 -08:00
Minio Trusted
e99a597899 Update yaml files to latest version RELEASE.2023-12-06T09-09-22Z 2023-12-06 10:16:37 +00:00
Harshavardhana
73dde66dbe stick to go1.19 go.mod (#18600) 2023-12-06 01:09:22 -08:00
Harshavardhana
e30c0e7ca3 Revert "Heal buckets at node level (#18504)"
This reverts commit 708296ae1b.
2023-12-05 22:34:46 -08:00
Klaus Post
8fc200c0cc Truncate long traces for internode communication (#18593)
Prevent excessively long request traces.
2023-12-05 12:16:48 -08:00
Shubhendu
708296ae1b Heal buckets at node level (#18504) 2023-12-05 02:17:35 -08:00
Harshavardhana
fbb5e75e01 avoid run-away goroutine build-up in notification send, use channels (#18533)
use memory for async events when necessary and dequeue them as
needed, for all synchronous events customers must enable

```
MINIO_API_SYNC_EVENTS=on
```

Async events can be lost but is upto to the admin to
decide what they want, we will not create run-away number
of goroutines per event instead we will queue them properly.

Currently the max async workers is set to runtime.GOMAXPROCS(0)
which is more than sufficient in general, but it can be made
configurable in future but may not be needed.
2023-12-05 02:16:33 -08:00
Harshavardhana
f327b21557 handle crashes with ILM expiry changes (#18590) 2023-12-05 01:14:36 -08:00
Harshavardhana
45b7253f39 parallelize renameData() cleanup upon error (#18591) 2023-12-04 14:54:34 -08:00
Harshavardhana
05bb655efc avoid caching metrics for timeout errors per drive (#18584)
Bonus: combine the loop for drive/REST registration.
2023-12-04 11:54:13 -08:00
Harshavardhana
8fdfcfb562 upon RenameData() quorum error delete any partial success (#18586)
there is potential for danglingWrites when quorum failed, where
only some drives took a successful write, generally this is left
to the healing routine to pick it up. However it is better that
we delete it right away to avoid potential for quorum issues on
version signature when there are many versions of an object.
2023-12-04 11:33:39 -08:00
Harshavardhana
e7c144eeac avoid double MRF heal when there is versions disparity (#18585) 2023-12-04 11:13:50 -08:00
Harshavardhana
e98172d72d avoid hot-tier SLA to be tied to warm-tier SLA (#18581)
it is okay if the warm-tier cannot keep up, we should continue
to take I/O at hot-tier, only fail hot-tier or block it when
we are disk full.

Bonus: add metrics counter for these missed tasks, we will
know for sure if one of the node is lagging behind or is
losing too many tasks during transitioning.
2023-12-02 13:02:12 -08:00
Minio Trusted
f2d063e7b9 Update yaml files to latest version RELEASE.2023-12-02T10-51-33Z 2023-12-02 11:14:35 +00:00
Krishnan Parthasarathi
a50f26b7f5 Implement batch-expiration for objects (#17946)
Based on an initial PR from -
https://github.com/minio/minio/pull/17792

But fully completes it with newer finalized YAML spec.
2023-12-02 02:51:33 -08:00
Klaus Post
69294cf98a Disable DMA optimization on windows (#18575)
It appears that Windows can lock up when errors occur. Use regular copy here.
2023-12-01 16:13:19 -08:00
Krishnan Parthasarathi
c397fb6c7a Minor fixes to bucket replication (#18578) 2023-12-01 16:13:08 -08:00
Klaus Post
961b0b524e Do not require restart when a disk is unreachable during node boot (#18576)
A disk that is not able to initialize when an instance is started
will never have a handler registered, which means a user will
need to restart the node after fixing the disk;

This will also prevent showing the wrong 'upgrade is needed.'
error message in that case.

When the disk is still failing, print an error every 30 minutes;
Disk reconnection will be retried every 30 seconds.

Co-authored-by: Anis Elleuch <anis@min.io>
2023-12-01 12:01:14 -08:00
Klaus Post
860fc200b0 Local and Remote hosts swapped in grid traces (#18574)
Local and Remote hosts swapped in grid trace

A bit counter-intuitive, but simple fix.
2023-12-01 08:04:08 -08:00
Harshavardhana
109a9e3f35 skip ILM expired objects from healing (#18569) 2023-12-01 07:56:24 -08:00
Klaus Post
5f971fea6e Fix Mux Connect Error (#18567)
`OpMuxConnectError` was not handled correctly.

Remove local checks for single request handlers so they can 
run before being registered locally.

Bonus: Only log IAM bootstrap on startup.
2023-12-01 00:18:04 -08:00
Harshavardhana
0d7abe3b9f allow hotfixes to generate deb, rpm packages (#18568)
```
using deb packager...
created package: minio-release/linux-amd64/minio_20231120224007.0.0.hotfix.e96ac7272_amd64.deb
using rpm packager...
created package: minio-release/linux-amd64/minio-20231120224007.0.0.hotfix.e96ac7272-1.x86_64.rpm
```
2023-11-30 15:25:51 -08:00
Klaus Post
94fbcd8ebe Add TLS cert checksum (#18557)
It allows validation of whether all certs match across clusters.
2023-11-30 12:13:50 -08:00
Harshavardhana
879d5dd236 site replication must heal policy mappings with correct userType (#18563) 2023-11-30 10:34:18 -08:00
jiuker
34187e047d feat: support elasticsearch notification endpoint compression codec (#18562) 2023-11-30 00:25:03 -08:00
Harshavardhana
0ee722f8c3 cleanup handling of STS isAllowed and simplifies the PolicyDBGet() (#18554) 2023-11-29 16:07:35 -08:00
Anis Eleuch
b7d11141e1 rename Force to Immediate for clarity (#18540) 2023-11-28 22:35:16 -08:00
Harshavardhana
e9babf3dac (chore): update all our deps (#18525) 2023-11-28 14:44:44 -08:00
Klaus Post
0bb81f2e9c Always remove subroute when queuing message on the connection. (#18550) 2023-11-28 11:22:29 -08:00
Klaus Post
bea0b050cd Improve env var config error reporting (#18549)
Improve env var config error

Env vars that were set on current server but not on remotes were not reported in errors.

Add these.
2023-11-28 10:39:02 -08:00
Shubhendu
ce62980d4e Fixed transition rules getting overwritten while healing (#18542)
While healing the latest changes of expiry rules across sites
if target had pre existing transition rules, they were getting
overwritten as cloned latest expiry rules from remote site were
getting written as is. Fixed the same and added test cases as
well.

Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2023-11-28 10:38:35 -08:00
Klaus Post
dc88865908 fix: shadowed error in getObjectFileInfo() (#18548)
This will result in `done <- err == nil` always returning true
for this path, which seems unintentional.
2023-11-28 09:47:41 -08:00
Krishnan Parthasarathi
9fbd931058 Skip versions expired by DeleteAllVersionsAction (#18537)
Object versions expired by DeleteAllVersionsAction must not be included
toward data-usage accounting.
2023-11-28 08:39:21 -08:00
jiuker
b0264bdb90 preserve null version delete marker on suspended bucket version (#18547) 2023-11-28 08:31:33 -08:00
bestgopher
95d6f43cc8 fix(cmd/notification.go): no error when retry successful (#18530) 2023-11-27 22:41:03 -08:00
Anis Eleuch
9cb94eb4a9 cleaning up will delete instead of rename to trash with full disk err (#18534)
moveToTrash() function moves a folder to .trash, for example, when 
doing some object deletions: a data dir that has many parts will be 
renamed to the trash folder; However, ENOSPC is a valid error from 
rename(), and it can cripple a user trying to free some space in an 
entire disk situation.

Therefore, this commit will try to do a recursive delete in that case.
2023-11-27 17:36:02 -08:00
Harshavardhana
bd0819330d avoid Walk() API listing objects without quorum (#18535)
This allows batch replication to basically do not
attempt to copy objects that do not have read quorum.

This PR also allows walk() to provide custom
values for quorum under batch replication, and
key rotation.
2023-11-27 17:20:04 -08:00
Harshavardhana
8d9e83fd99 support passing signatureAge conditional (#18529)
this PR allows following policy

```
{
   "Version": "2012-10-17",
   "Statement": [
      {
         "Sid": "Deny a presigned URL request if the signature is more than 10 min old",
         "Effect": "Deny",
         "Action": "s3:*",
         "Resource": "arn:aws:s3:::DOC-EXAMPLE-BUCKET1/*",
         "Condition": {
            "NumericGreaterThan": {
               "s3:signatureAge": 600000
            }
         }
      }
   ]
}
```

This is to basically disable all pre-signed URLs that are older than 10 minutes.
2023-11-27 11:30:19 -08:00
jiuker
be02333529 feat: drive sub-sys to max timeout reload (#18501) 2023-11-27 09:15:06 -08:00
Harshavardhana
506f121576 remove frivolous logging in transition object (#18526)
AWS S3 closes keep-alive connections frequently
leading to frivolous logs filling up the MinIO
logs when the transition tier is an AWS S3 bucket.

Ignore such transient errors, let MinIO retry
it when it can.
2023-11-26 22:18:09 -08:00
Klaus Post
ca488cce87 Add detailed parameter tracing + custom prefix (#18518)
* Allow per handler custom prefix.
* Add automatic parameter extraction
2023-11-26 01:32:59 -08:00
Shireesh Anjal
11dc723324 Pass SUBNET URL to console (#18503)
When minio runs with MINIO_CI_CD=on, it is expected to communicate
with the locally running SUBNET. This is happening in the case of MinIO
via call home functionality. However, the subnet-related functionality inside the
console continues to talk to the SUBNET production URL. Because of this,
the console cannot be tested with a locally running SUBNET.

Set the env variable CONSOLE_SUBNET_URL correctly in such cases. 
(The console already has code to use the value of this variable
as the subnet URL)
2023-11-24 09:59:35 -08:00
Shubhendu
dd6ea18901 fix: No shallow copy needed when looking at r.Form (#18499)
Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2023-11-24 09:46:55 -08:00
Praveen raj Mani
3369eeb920 Relax batch size limit for kafka events (#18513)
Fixes #18490
2023-11-24 09:07:38 -08:00
Harshavardhana
9032f49f25 DiskInfo() must return errDiskNotFound not internal errors (#18514) 2023-11-24 09:07:14 -08:00
Anis Eleuch
fbc6f3f6e8 snowball-repl: Add support of immediate tiering (#18508)
Also, fix a possible crash when some fields are not added to the batch
snowball yaml
2023-11-22 16:33:11 -08:00
Harshavardhana
fba883839d feat: bring new HDD related performance enhancements (#18239)
Optionally allows customers to enable 

- Enable an external cache to catch GET/HEAD responses 
- Enable skipping disks that are slow to respond in GET/HEAD 
  when we have already achieved a quorum
2023-11-22 13:46:17 -08:00
Krishnan Parthasarathi
a93214ea63 ilm: ObjectSizeLessThan and ObjectSizeGreaterThan (#18500) 2023-11-22 13:42:39 -08:00
Klaus Post
e6b0fc465b tweak healing to include version-id in healing result (#18225) 2023-11-22 12:30:31 -08:00
Anis Eleuch
70fbcfee4a Implement batch snowball (#18485) 2023-11-22 10:51:46 -08:00
Harshavardhana
0b074d0fae use the latest UBI image (#18497) 2023-11-22 09:34:49 -08:00
Sveinn
d67e4d5b17 fix: check for bucket existence before FTP upload (#18496) 2023-11-21 21:36:32 -08:00
Harshavardhana
891c60d83d fix: go mod was point to personal repos with replace remove it 2023-11-21 15:50:39 -08:00
Harshavardhana
fe3e49c4eb use Access(F_OK) do not need to check for permissions (#18492) 2023-11-21 15:08:41 -08:00
Shubhendu
58306a9d34 Replicate Expiry ILM configs while site replication (#18130)
Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2023-11-21 09:48:06 -08:00
jiuker
41091d9472 fix: close http body for es action (#18491) 2023-11-20 22:22:10 -08:00
Harshavardhana
a4cfb5e1ed return errors if dataDir is missing during HeadObject() (#18477)
Bonus: allow replication to attempt Deletes/Puts when
the remote returns quorum errors of some kind, this is
to ensure that MinIO can rewrite the namespace with the
latest version that exists on the source.
2023-11-20 21:33:47 -08:00
Klaus Post
51aa59a737 perf: websocket grid connectivity for all internode communication (#18461)
This PR adds a WebSocket grid feature that allows servers to communicate via 
a single two-way connection.

There are two request types:

* Single requests, which are `[]byte => ([]byte, error)`. This is for efficient small
  roundtrips with small payloads.

* Streaming requests which are `[]byte, chan []byte => chan []byte (and error)`,
  which allows for different combinations of full two-way streams with an initial payload.

Only a single stream is created between two machines - and there is, as such, no
server/client relation since both sides can initiate and handle requests. Which server
initiates the request is decided deterministically on the server names.

Requests are made through a mux client and server, which handles message
passing, congestion, cancelation, timeouts, etc.

If a connection is lost, all requests are canceled, and the calling server will try
to reconnect. Registered handlers can operate directly on byte 
slices or use a higher-level generics abstraction.

There is no versioning of handlers/clients, and incompatible changes should
be handled by adding new handlers.

The request path can be changed to a new one for any protocol changes.

First, all servers create a "Manager." The manager must know its address 
as well as all remote addresses. This will manage all connections.
To get a connection to any remote, ask the manager to provide it given
the remote address using.

```
func (m *Manager) Connection(host string) *Connection
```

All serverside handlers must also be registered on the manager. This will
make sure that all incoming requests are served. The number of in-flight 
requests and responses must also be given for streaming requests.

The "Connection" returned manages the mux-clients. Requests issued
to the connection will be sent to the remote.

* `func (c *Connection) Request(ctx context.Context, h HandlerID, req []byte) ([]byte, error)`
   performs a single request and returns the result. Any deadline provided on the request is
   forwarded to the server, and canceling the context will make the function return at once.

* `func (c *Connection) NewStream(ctx context.Context, h HandlerID, payload []byte) (st *Stream, err error)`
   will initiate a remote call and send the initial payload.

```Go
// A Stream is a two-way stream.
// All responses *must* be read by the caller.
// If the call is canceled through the context,
//The appropriate error will be returned.
type Stream struct {
	// Responses from the remote server.
	// Channel will be closed after an error or when the remote closes.
	// All responses *must* be read by the caller until either an error is returned or the channel is closed.
	// Canceling the context will cause the context cancellation error to be returned.
	Responses <-chan Response

	// Requests sent to the server.
	// If the handler is defined with 0 incoming capacity this will be nil.
	// Channel *must* be closed to signal the end of the stream.
	// If the request context is canceled, the stream will no longer process requests.
	Requests chan<- []byte
}

type Response struct {
	Msg []byte
	Err error
}
```

There are generic versions of the server/client handlers that allow the use of type
safe implementations for data types that support msgpack marshal/unmarshal.
2023-11-20 17:09:35 -08:00
Minio Trusted
8bedb419a9 Update yaml files to latest version RELEASE.2023-11-20T22-40-07Z 2023-11-21 00:54:16 +00:00
jiuker
f56a182b71 fix: close http body when webhook send (#18487) 2023-11-20 14:40:07 -08:00
Shubhendu
317b40ef90 Fixed broken docs link (#18486)
Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2023-11-20 12:04:49 -08:00
Shubhendu
e938ece492 Added guidelines for setting prometheus alerts (#18479)
Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2023-11-19 10:16:08 -08:00
Anis Eleuch
02331a612c batch-repl: Replicate missing metadata and standard headers (#18484)
- Replicate Expires when the source is local or remote
- Replicate metadata when the source is remote
2023-11-18 19:12:44 -08:00
Anis Eleuch
8317557f70 decom: Fix listing quorum to be equal to deletion quorum (#18476)
With an odd number of drives per erasure set setup, the write/quorum is
the half + 1; however the decommissioning listing will still list those
objects and does not consider those as stale.

Fix it by using (N+1)/2 formula.

Co-authored-by: Anis Elleuch <anis@min.io>
2023-11-17 21:09:09 -08:00
Anis Eleuch
1bb7a2a295 Immediate transition ILM to avoid quick deferring to the scanner (#18475)
Immediate transition use case and is mostly used to fill warm
backend with a lot of data when a new deployment is created

Currently, if the transition queue is complete, the transition will be
deferred to the scanner; change this behavior by blocking the PUT request
until the transition queue has a new place for a transition task.
2023-11-17 16:16:46 -08:00
jiuker
215ca58d6a fix: close the http.Body when WebhookTarget isActive (#18467) 2023-11-17 12:02:26 -08:00
Anis Eleuch
12f570a307 audit: Try to send audit even if the status is offline (#18458)
Currently, once the audit becomes offline, there is no code that tries
to reconnect to the audit, at the same time Send() quickly returns with
an error without really trying to send a message the audit endpoint; so
the audit endpoint will never be online again.

Fixing this behavior; the current downside is that we miss printing some
logs when the audit becomes offline; however this information is
available in prometheus

Later, we can refactor internal/logger so the http endpoint can send errors to
console target.
2023-11-17 10:40:28 -08:00
Shubhendu
e4b619ce1a Added graph for Erasure Set Tolerance value (#18472)
Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2023-11-17 10:38:15 -08:00
Harshavardhana
0a286153bb remove checking for BucketInfo() peer call for every PUT() (#18464)
we already validate if the bucket doesn't exist in RenameData()
which can handle this cleanly, instead of making a network call
and returning errors.
2023-11-17 05:29:50 -08:00
Anis Eleuch
22d59e757d Remove stale data in HEAD/GET object (#18460)
Currently if the object does not exist in quorum disks of an erasure
set, the dangling code is never called because the returned error will
be errFileNotFound or errFileVersionNotFound;

With this commit, when errFileNotFound or errFileVersionNotFound is
returning when trying to calculate the quorum of a given object, the
code checks if a disk returned nil, which means a stale object exists in
that disk, that will trigger deleteIfDangling() function
2023-11-16 08:39:53 -08:00
Andreas Auernhammer
0daa2dbf59 health: split liveness and readiness handler (#18457)
This commit splits the liveness and readiness
handler into two separate handlers. In K8S, a
liveness probe is used to determine whether the
pod is in "live" state and functioning at all.
In contrast, the readiness probe is used to
determine whether the pod is ready to serve
requests.

A failing liveness probe causes pod restarts while
a failing readiness probe causes k8s to stop routing
traffic to the pod. Hence, a liveness probe should
be as robust as possible while a readiness probe
should be used to load balancing.

Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/

Signed-off-by: Andreas Auernhammer <github@aead.dev>
2023-11-16 01:51:27 -08:00
Adrian Najera
96c2304ae8 allow MINIO_STS_DURATION to increase the IDP token expiration (#18396)
Share link duration is based on the IDP token expiration,
for the share link to last longer, you may now use
MINIO_STS_DURATION environment variable.
2023-11-15 20:42:31 -08:00
Minio Trusted
343dd2f491 Update yaml files to latest version RELEASE.2023-11-15T20-43-25Z 2023-11-16 01:32:11 +00:00
Praveen raj Mani
38f35463b7 Load bucket configs during the metadata refresh (#18449)
This patch takes care of loading the bucket configs of failed buckets
during the periodic refresh. This makes sure the event notifiers and
remote bucket targets are properly initialized.
2023-11-15 12:43:25 -08:00
Harshavardhana
5573986e8e fix: relax free inode check for single drive deployments (#18437)
users might use MinIO on NFS, GPFS that provide dynamic
inodes and may not even have a concept of free inodes.

to allow users to use MinIO on top of GPFS relax the
free inode check.
2023-11-14 09:31:16 -08:00
Sveinn
f3367a1b20 Adding error handling for network errors in the SFTP layer (#18442) 2023-11-14 09:31:00 -08:00
Sveinn
a3c2f7b0e8 small fix for the PR template (#18443) 2023-11-14 09:29:11 -08:00
Sveinn
8fbec30998 Adding a missing return to fix SFTP Rmdir message (#18438) 2023-11-14 09:26:46 -08:00
Harshavardhana
a7466eeb0e fix: ignore dperf on unformatted/unavailable/unmounted drives (#18435) 2023-11-13 22:32:08 -08:00
Harshavardhana
8b1e819bf3 fix: make sure to purge all the completed in resume() (#18429)
currently previously completed jobs would re-run
even if they are completed, causing incorrect behavior.
2023-11-13 08:15:00 -08:00
Anis Eleuch
fe63664164 prom: Add drive failure tolerance per erasure set (#18424) 2023-11-13 00:59:48 -08:00
Minio Trusted
4598827dcb Update yaml files to latest version RELEASE.2023-11-11T08-14-41Z 2023-11-11 18:01:27 +00:00
Sveinn
9afdb05bf4 fix: file consistency issue on SFTP upload (#18422)
* creating a byte buffer for SFTP file segments
* Adding an error condition for when there are 
  remaining segments in the queue
* Simplification of the queue using a map
2023-11-11 00:14:41 -08:00
Krishnan Parthasarathi
9569a85cee Avoid allocs for MRF on-disk header (#18425) 2023-11-10 19:54:46 -08:00
Harshavardhana
54721b7c7b fix: batch replication from source allow out of band deletes (#18423)
it is possible that ILM or Deletes got triggered on batch
of objects that we are attempting to batch replicate, ignore
this scenario as valid behavior.
2023-11-10 16:12:35 -08:00
Harshavardhana
91d8bddbd1 use sendfile/splice implementation to perform DMA (#18411)
sendfile implementation to perform DMA on all platforms

Go stdlib already supports sendfile/splice implementations
for

- Linux
- Windows
- *BSD
- Solaris

Along with this change however O_DIRECT for reads() must be
removed as well since we need to use sendfile() implementation

The main reason to add O_DIRECT for reads was to reduce the
chances of page-cache causing OOMs for MinIO, however it would
seem that avoiding buffer copies from user-space to kernel space
this issue is not a problem anymore.

There is no Go based memory allocation required, and neither
the page-cache is referenced back to MinIO. This page-
cache reference is fully owned by kernel at this point, this
essentially should solve the problem of page-cache build up.

With this now we also support SG - when NIC supports Scatter/Gather
https://en.wikipedia.org/wiki/Gather/scatter_(vector_addressing)
2023-11-10 10:10:14 -08:00
Harshavardhana
80adc87a14 converge WARM tier object name to hash of deployment+bucket (#18410)
this is to ensure that we can converge and save IOPs
when hot-tier accesses MinIO.
2023-11-10 02:15:13 -08:00
Taran Pelkey
117ad1b65b Loosen requirements to detach policies for LDAP (#18419) 2023-11-09 14:44:43 -08:00
Klaus Post
2229509362 fix: leaking offline disks in MarkOffline() thread (#18414)
`monitorAndConnectEndpoints` will continue to attempt to reconnect offline disks.

Since disks were never closed, a `MarkOffline` would continue to try to check these disks forever.

Close previous disks.
2023-11-09 09:33:32 -08:00
Anis Eleuch
6ef8e87492 Support case insensitive kafka SASL mechanism config values (#18398) 2023-11-08 20:04:01 -08:00
Krishnan Parthasarathi
0a25083fdb Tiered objects require ns locks unlike inlined (#18409) 2023-11-08 20:00:02 -08:00
Sveinn
15137d0327 refactor SFTP to use the new minio/pkg implementation (#18406) 2023-11-08 09:47:05 -08:00
Poorna
8c9974bc0f site replication: avoid propagating bucket b/w settings (#18399)
replication mode and bucket bandwidth are one-way and should not be
propagated to peer cluster.

Regression from #18062
2023-11-08 00:40:25 -08:00
jiuker
079b6c2b50 fix: add err when all bucket resync failed (#18401) 2023-11-08 00:40:08 -08:00
Minio Trusted
0924b34a17 Update yaml files to latest version RELEASE.2023-11-06T22-26-08Z 2023-11-08 08:04:46 +00:00
Harshavardhana
754f7a8a39 replace io.Discard usage to fix some NUMA copy() latencies (#18394)
replace io.Discard usage to fix NUMA copy() latencies

On NUMA systems copying from 8K buffer allocated via
io.Discard leads to large latency build-up for every

```
copy(new8kbuf, largebuf)
```

can in-cur upto 1ms worth of latencies on NUMA systems
due to memory sharding across NUMA nodes.
2023-11-06 14:26:08 -08:00
Harshavardhana
64bafe1dfe skip speedtest bucket from site-replication (#18393) 2023-11-06 11:52:33 -08:00
jiuker
c3e456e7e6 fix: no resyncid when site-replication cancel (#18392) 2023-11-06 01:53:31 -08:00
Harshavardhana
57aaeafd2f update dperf to include NUMA fixes (#18391) 2023-11-04 20:16:14 -07:00
Harshavardhana
3c2e1a87e2 fix: support dropping privileges with arbitrary users (#18386)
fixes #18380
2023-11-03 14:18:18 -07:00
vicmunoz
da95a2d13f fix: object versions metric help (#18388) 2023-11-03 11:43:52 -07:00
Shireesh Anjal
cc5e05fdeb Do not anonymize hostnames by default (#18387)
Anonymize them only if the parameter `anonymize` is set to `strict
2023-11-03 10:09:33 -07:00
Harshavardhana
a79c390cca update console v0.41.0 (#18385)
Signed-off-by: Harshavardhana <harsha@minio.io>
2023-11-02 18:47:09 -07:00
jiuker
8a56af439c fix: siteReplicationSys.startResync return no buckets return if error (#18374) 2023-11-02 16:00:03 -07:00
Shireesh Anjal
f6e581ce54 Capture network device info in health report (#18381) 2023-11-02 09:49:49 -07:00
Minio Trusted
8953f88780 Update yaml files to latest version RELEASE.2023-11-01T18-37-25Z 2023-11-01 21:57:51 +00:00
Harshavardhana
4b4a98d5e5 add support for older CPU via a new container image (#18370)
fixes #18365
2023-11-01 11:37:25 -07:00
Klaus Post
7472818d94 Fix hanging scanner saves (#18368)
Fix various regressions from #18029

* If context is canceled the token is never returned. This will lead to scanner being unable to save and deadlocking.
* Fix backup not being able to get any data (hr empty)
* Reduce backup timeout.
2023-11-01 09:09:28 -07:00
Minio Trusted
ad44fe8d3e Update yaml files to latest version RELEASE.2023-11-01T01-57-10Z 2023-11-01 10:39:41 +00:00
dependabot[bot]
55e713db0a build(deps): bump github.com/nats-io/nkeys from 0.4.5 to 0.4.6 (#18360)
Bumps [github.com/nats-io/nkeys](https://github.com/nats-io/nkeys) from 0.4.5 to 0.4.6.
- [Release notes](https://github.com/nats-io/nkeys/releases)
- [Changelog](https://github.com/nats-io/nkeys/blob/main/.goreleaser.yml)
- [Commits](https://github.com/nats-io/nkeys/compare/v0.4.5...v0.4.6)

---
updated-dependencies:
- dependency-name: github.com/nats-io/nkeys
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-31 18:57:10 -07:00
Taran Pelkey
33322e6638 Change behavior of service account empty policies (#18346)
* Fix embedded/implied policy behavior

* assume implied policy if pased to empty

* fix for all

* Fix failing tests

---------

Co-authored-by: Prakash Senthil Vel <23444145+prakashsvmx@users.noreply.github.com>
2023-10-31 12:30:36 -07:00
Daniel López Guimaraes
a1792ca0d1 fix: relax enforcing filename on PostPolicy (#18336)
The filename is not required to be on the form data.
2023-10-30 21:06:32 -07:00
Harshavardhana
ac8c43fe9c fix: allow missing hot-tier accounting (#18345) 2023-10-30 14:42:11 -07:00
Allan Roger Reid
4d40ee00e9 Add check for reverse proxy setups (#18310)
Add check for reverse proxy setups, to skip check for paths being served by different port on same address.
2023-10-30 10:49:04 -07:00
Adrian Najera
06f59ad631 fix: expiration time for share link when using OpenID (#18297) 2023-10-30 10:21:34 -07:00
Harshavardhana
877e0cac03 fix: tiering statistics handling a bug in clone() implementation (#18342)
Tiering statistics have been broken for some time now, a regression
was introduced in 6f2406b0b6

Bonus fixes an issue where the objects are not assumed to be
of the 'STANDARD' storage-class for the objects that have
not yet tiered, this should be conditional based on the object's
metadata not a default assumption.

This PR also does some cleanup in terms of implementation,

fixes #18070
2023-10-30 09:59:51 -07:00
Shubhendu
ef67c39910 Added graphs for KMS metrics (#18321)
Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2023-10-30 03:20:53 -07:00
Klaus Post
508710f4d1 Re-add duplicate upload id sanity check. (#18339)
https://github.com/minio/minio/pull/18307 partially removed the duplicate upload id check.

While I can't really see how ListDir can return duplicate entries, let's re-add it, since it is a cheap sanity check.
2023-10-29 08:33:30 -07:00
Andreas Auernhammer
3aa3d9cf14 switch minio container base image to ubi-mciro (#18329)
This commit changes the container base image
from ubi-minimal to ubi-micro.

The docker build process happens now in two stages.
The build stage:
 - downloads the latest CA certificate bundle
 - downloads MinIO binary (for requested version/os/arch)
 - downloads MinIO binary signature and verifies it
   using minisign

Then it creates an image based on ubi-micro with just
the minio binary was downloaded and verified during the
build stage.

The build stage is simplified to just verifying the
minisign signature.

Signed-off-by: Andreas Auernhammer <github@aead.dev>
2023-10-28 12:19:49 -07:00
Matthew Toohey
c2fedb4c3f fix: log targetID instead of Name when event error occurs (#18335) 2023-10-28 08:32:57 -07:00
Poorna
03dc65e12d Reload replication targets lazily if missing (#18333)
There can be rare situations where errors seen in bucket metadata
load on startup or subsequent metadata updates can result in missing
replication remotes.

Attempt a refresh of remote targets backed by a good replication config
lazily in 5 minute intervals if there ever occurs a situation where
remote targets go AWOL.
2023-10-27 21:08:53 -07:00
Harshavardhana
b8d62a8068 add MC_CONFIG_DIR to use mc from writable path (#18317)
`mc` will currenly fail inside MinIO pod without
`--config-dir` option, instead use the ENV to
avoid passing flags.
2023-10-26 10:52:10 -07:00
jiuker
dbc2368a7b fix: parse the subsys env error (#18319) 2023-10-26 08:12:57 -07:00
Praveen raj Mani
54aed421b8 fix: update the user cache while adding service accounts with expiry (#18320) 2023-10-26 08:11:29 -07:00
jiuker
d5e8dac1cf fix: canceling the heal caused goroutine to leak. (#18322) 2023-10-26 07:53:06 -07:00
Poorna
96ec8fcba1 Preserve replica timestamps in multipart (#18318)
Also a backward compatibility fix to use x-amz-replica-status
if present as replication status.
2023-10-25 21:24:10 -07:00
Harshavardhana
0663eb69ed fix: do not preserve mtime during CopyObject() metadata updates (#18316)
mtime must be preserved only if destination mtime is set.

fixes #18314
2023-10-25 14:30:56 -07:00
Harshavardhana
0594d37230 update go mod and CREDITS (#18289) 2023-10-25 08:32:59 -07:00
Andreas Auernhammer
3cc30bcc18 upgrade container base image to ubi-9 (#18313)
This commit updates the container base image from
ubi:8.8 to ubi:9.2.

Signed-off-by: Andreas Auernhammer <github@aead.dev>
2023-10-25 08:32:18 -07:00
Minio Trusted
99c1a642a4 Update yaml files to latest version RELEASE.2023-10-25T06-33-25Z 2023-10-25 07:52:49 +00:00
Harshavardhana
c60f54e5be make ListMultipart/ListParts more reliable skip healing disks (#18312)
this PR also fixes old flaky tests, by properly marking disk offline-based tests.
2023-10-24 23:33:25 -07:00
Harshavardhana
483389f2e2 set diskMaxConcurrent to 32 if nrRequests is lower 2023-10-24 17:21:12 -07:00
Harshavardhana
c0f2f84285 avoid racy replicationCount checks (#18311)
resync status may not be upto-date by
the time the resync is over due to how
the timer is triggered.

diff is sufficient to know if replication
happened or not.
2023-10-24 15:30:42 -07:00
Harshavardhana
069d118329 fix: listObjectParts to prefer local and single disks (#18309) 2023-10-24 13:51:57 -07:00
Harshavardhana
a7b1834772 fix: flaky and stupid tests in root lockdown (#18308) 2023-10-24 13:22:44 -07:00
Klaus Post
6415dec37a Improve multipart listing speed (#18307) 2023-10-24 12:06:06 -07:00
Klaus Post
74253e1ddc Fix BackendInfo() race (#18305)
`GetParityForSC` has a value receiver, so Config is copied before the lock is obtained.

Make it pointer receiver.

Fixes:

```
WARNING: DATA RACE
Read at 0x0000079cdd10 by goroutine 190:
  github.com/minio/minio/cmd.(*erasureServerPools).BackendInfo()
      github.com/minio/minio/cmd/erasure-server-pool.go:579 +0x6f
  github.com/minio/minio/cmd.(*erasureServerPools).LocalStorageInfo()
      github.com/minio/minio/cmd/erasure-server-pool.go:614 +0x3c6
  github.com/minio/minio/cmd.(*peerRESTServer).LocalStorageInfoHandler()
      github.com/minio/minio/cmd/peer-rest-server.go:347 +0x4ea
  github.com/minio/minio/cmd.(*peerRESTServer).LocalStorageInfoHandler-fm()
...

WARNING: DATA RACE
Read at 0x0000079cdd10 by goroutine 190:
  github.com/minio/minio/cmd.(*erasureServerPools).BackendInfo()
      github.com/minio/minio/cmd/erasure-server-pool.go:579 +0x6f
  github.com/minio/minio/cmd.(*erasureServerPools).LocalStorageInfo()
      github.com/minio/minio/cmd/erasure-server-pool.go:614 +0x3c6
  github.com/minio/minio/cmd.(*peerRESTServer).LocalStorageInfoHandler()
      github.com/minio/minio/cmd/peer-rest-server.go:347 +0x4ea
  github.com/minio/minio/cmd.(*peerRESTServer).LocalStorageInfoHandler-fm()
```
2023-10-24 08:15:41 -07:00
Minio Trusted
01b3fb91e5 Update yaml files to latest version RELEASE.2023-10-24T04-42-36Z 2023-10-24 05:05:12 +00:00
Harshavardhana
2dc917e87f maxConcurrent must be set only once per node (#18303) 2023-10-23 21:42:36 -07:00
Aditya Manthramurthy
0a284a1a10 fix: SR: Add more info when IAM config differs (#18302)
Provide details on what IAM info mismatched when the validation fails
2023-10-23 21:16:40 -07:00
Harshavardhana
5c8339e1e8 fix: veeam SOS API to higher layers (#18287)
- support populating usage info from scanner info
- support populating quota for the bucket via quota
  settings for the bucket
2023-10-23 13:55:45 -07:00
Harshavardhana
fd37418da2 fix: allow server not initialized error to be retried (#18300)
Since relaxing quorum the error across pools
for ListBuckets(), GetBucketInfo() we hit a
situation where loading IAM could potentially
return an error for second pool that server
is not initialized.

We need to handle this, let the pool come online
and retry transparently - this PR fixes that.
2023-10-23 12:30:20 -07:00
Harshavardhana
bbfea29c2b use object modTime for the event sequencer ID (#18285)
always set modTime after lock is acquired in
completemultipart stage to make sure that the
modTime is not racy.
2023-10-20 19:28:05 -07:00
Harshavardhana
aa703dc903 relax write quorum requirement for ListBuckets()/HeadBucket() (#18288)
Also fix error handling for HeadBucket() to be pool specific
2023-10-20 17:50:21 -07:00
Krishnan Parthasarathi
8cd80fec8c Add unit test for lifecycle.FilterRules (#18284) 2023-10-19 21:33:28 -07:00
Harshavardhana
780882efcf do not check for query params to be signed headers (#18283)
x-amz-signed-headers is meant for HTTP headers only
not for query params, using that to verify things
further can lead to failure.

The generated presigned URL with custom metadata
is already kosher (tamper proof).

fixes #18281
2023-10-19 21:32:49 -07:00
dependabot[bot]
c5636143c6 build(deps): bump github.com/nats-io/nats-server/v2 from 2.9.20 to 2.9.23 (#18282)
build(deps): bump github.com/nats-io/nats-server/v2

Bumps [github.com/nats-io/nats-server/v2](https://github.com/nats-io/nats-server) from 2.9.20 to 2.9.23.
- [Release notes](https://github.com/nats-io/nats-server/releases)
- [Changelog](https://github.com/nats-io/nats-server/blob/main/.goreleaser.yml)
- [Commits](https://github.com/nats-io/nats-server/compare/v2.9.20...v2.9.23)

---
updated-dependencies:
- dependency-name: github.com/nats-io/nats-server/v2
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-19 16:40:59 -07:00
Klaus Post
ba6218b354 fix: resource metrics "concurrent map iteration and map write" (#18273)
`resourceMetricsMap` has no protection against concurrent reads and writes.

Add a mutex and don't use maps from the last iteration.

Bug introduced in #18057

Fixes #18271
2023-10-18 13:28:50 -07:00
Harshavardhana
8e32de3ba9 cache DiskInfo() metrics call separately (#18270) 2023-10-18 11:17:32 -07:00
Klaus Post
e37508fb8f fix: linter errors in Windows specific code (#18276) 2023-10-18 11:08:15 -07:00
Klaus Post
b46a717425 Remove unused config migration (#18277)
None of the migration is called. Remove dead code.
2023-10-18 11:05:24 -07:00
Klaus Post
7926df0b80 Fix globalDeploymentID race (#18275)
globalDeploymentID was being read while it was being set.

Fixes race:

```
WARNING: DATA RACE
Write at 0x0000079605a0 by main goroutine:
  github.com/minio/minio/cmd.connectLoadInitFormats()
      github.com/minio/minio/cmd/prepare-storage.go:269 +0x14f0
  github.com/minio/minio/cmd.waitForFormatErasure()
      github.com/minio/minio/cmd/prepare-storage.go:294 +0x21d
...

Previous read at 0x0000079605a0 by goroutine 105:
  github.com/minio/minio/cmd.newContext()
      github.com/minio/minio/cmd/utils.go:817 +0x31e
  github.com/minio/minio/cmd.adminMiddleware.func1()
      github.com/minio/minio/cmd/admin-router.go:110 +0x96
  net/http.HandlerFunc.ServeHTTP()
      net/http/server.go:2136 +0x47
  github.com/minio/minio/cmd.setBucketForwardingMiddleware.func1()
      github.com/minio/minio/cmd/generic-handlers.go:460 +0xb1a
  net/http.HandlerFunc.ServeHTTP()
      net/http/server.go:2136 +0x47
...
```
2023-10-18 08:06:57 -07:00
Krishnan Parthasarathi
557df666fd Don't skip rules with ExpiredObjectDeleteMarker (#18256) 2023-10-16 22:46:46 -07:00
Harshavardhana
f91b257f50 choose different max_concurrent requests per drive based on HDD/NVMe (#18254)
currently the default for all drives is 512, which is a lot
for HDDs the recent testing has revealed moving this to 32
for HDDs seems like a fair value.
2023-10-16 17:18:13 -07:00
Aditya Manthramurthy
28a2d1eb3d Allow OpenID ARN resource ID to start with a - (#18255) 2023-10-16 13:50:51 -07:00
Minio Trusted
a0ae1489e5 Update yaml files to latest version RELEASE.2023-10-16T04-13-43Z 2023-10-16 05:58:49 +00:00
Harshavardhana
edfb310a59 fix: always load ENVs from files first as soon as server starts (#18247)
This is a regression from #18231, however reading from ENV files
must happen well before any parsing logic is invoked.
2023-10-15 21:13:43 -07:00
Minio Trusted
a2312028b9 Update yaml files to latest version RELEASE.2023-10-14T05-17-22Z 2023-10-14 06:27:13 +00:00
Poorna
78f1f69d57 fix site replication resync status (#18245)
To persist status changes on disk upon completion.

Adds new tests to handle this functionality.
2023-10-13 22:17:22 -07:00
Harshavardhana
e1e33077e8 fix: tests and resync replication status (#18244) 2023-10-13 17:03:34 -07:00
Aditya Manthramurthy
b3e7de010d Remove usage of errors.Join for go1.19 compat (#18243) 2023-10-13 15:14:16 -07:00
Satish Michael
f5b04865f4 Helm Chart: Added "MINIO_IDENTITY_OPENID_REDIRECT_URI" Env Var (#18236)
added redirect uri env

Signed-off-by: Satish Kumar Kadarkarai Main <michael.satish@gmail.com>
2023-10-13 07:46:54 -07:00
Shireesh Anjal
bf1c6edb76 Revert "Capture network device info in health report" (#18241)
Introducing a new version of healthinfo struct for adding this info is
not correct. It needs to be implemented differently without adding a new
version.

This reverts commit 8737025d940f80360ed4b3686b332db5156f6659.
2023-10-13 07:46:36 -07:00
jiuker
2ac7fee017 fix: missing fileName will upload failed when PostPolicyBucketHandler (#18240) 2023-10-13 07:31:23 -07:00
Klaus Post
128256e3ab Add event counters (#18232)
Export metric for global events sent and skipped for the lifetime of the server.
2023-10-12 15:39:22 -07:00
Shireesh Anjal
a66a7f3e97 Capture network device info in health report (#18213) 2023-10-12 15:33:31 -07:00
jiuker
20b79f8945 fix: env depend on the flag (#18231) 2023-10-12 15:32:38 -07:00
Klaus Post
9a877734b2 Fix various poolmeta races (#18230)
There is a fundamental race condition in `newErasureServerPools`, where setObjectLayer is 
called before the poolMeta has been loaded/populated.

We add a placeholder value to this field but disable all saving of the value, so we don't risk 
overwriting the value on disk. Once the value has been loaded or created, it is replaced with 
the proper value, which will also be saved.

Also fixes various accesses of `poolMeta` that were done without locks.

We make the `poolMeta.IsSuspended` return false, even if we shouldn't risk out-of-bounds 
reads anymore.
2023-10-12 15:30:42 -07:00
Harshavardhana
409c391850 implement helpers to get relevant info instead of FileInfo() (#18228) 2023-10-12 15:29:59 -07:00
Klaus Post
763ff085a6 Add CI tests for next branch (#18224) 2023-10-12 06:15:10 -07:00
Shubhendu
5b9656374c Error if target went offline (#18221)
If target went offline while MinIO was down, error once
while trying to send message. If target goes offline during
MinIO server running, it already comes through ping() call
and errors out if target offline.

Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2023-10-12 06:13:57 -07:00
dependabot[bot]
b32014549c build(deps): bump golang.org/x/net from 0.15.0 to 0.17.0 (#18219)
Bumps [golang.org/x/net](https://github.com/golang/net) from 0.15.0 to 0.17.0.
- [Commits](https://github.com/golang/net/compare/v0.15.0...v0.17.0)

---
updated-dependencies:
- dependency-name: golang.org/x/net
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-10-12 01:54:36 -07:00
dependabot[bot]
9476d212bc build(deps): bump golang.org/x/net from 0.14.0 to 0.17.0 in /docs/debugging/s3-verify (#18218)
build(deps): bump golang.org/x/net in /docs/debugging/s3-verify

Bumps [golang.org/x/net](https://github.com/golang/net) from 0.14.0 to 0.17.0.
- [Commits](https://github.com/golang/net/compare/v0.14.0...v0.17.0)

---
updated-dependencies:
- dependency-name: golang.org/x/net
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-10-12 00:39:12 -07:00
jiuker
000928d34e fix: should call func globalOSMetrics.time(s)() when updateOSMetrics (#18209) 2023-10-12 00:08:13 -07:00
Harshavardhana
6829ae5b13 completely remove drive caching layer from gateway days (#18217)
This has already been deprecated for close to a year now.
2023-10-11 21:18:17 -07:00
jiuker
f09756443d fix: a dynamic config will make a panic for addOrUpdateIDP (#18208) 2023-10-11 09:06:40 -07:00
jiuker
5512016885 fix: siteResyncMetrics init will make a deadlock when len(siteReplication) >= 3 (#18206) 2023-10-10 23:27:27 -07:00
Harshavardhana
21ecb941fe fix: avoid counting out of band deletes during disk heal (#18205) 2023-10-10 14:39:48 -07:00
Harshavardhana
77e94087cf fix: calling statfs() call moves the disk head (#18203)
if erasure upgrade is needed rely on the in-memory
values, instead of performing a "DiskInfo()" call.

https://brendangregg.com/blog/2016-09-03/sudden-disk-busy.html

for HDDs these are problematic, lets avoid this because
there is no value in "being" absolutely strict here
in terms of parity. We are okay to increase parity
as we see based on the in-memory online/offline ratio.
2023-10-10 13:47:35 -07:00
Klaus Post
9ab1f25a47 fix : PutObjectExtract data races (#18199)
Several callers to putObjectTar may be fighting to set sc. Move the write out of the loop.

Use static resp, and request elements.

Fixes tests with -race:

```
WARNING: DATA RACE
Read at 0x00c01cd680e0 by goroutine 691354:
  github.com/minio/minio/cmd.objectAPIHandlers.PutObjectExtractHandler.func1()
      e:/gopath/src/github.com/minio/minio/cmd/object-handlers.go:2130 +0x149
  github.com/minio/minio/cmd.untar.func1()
      e:/gopath/src/github.com/minio/minio/cmd/untar.go:250 +0x2b6
  github.com/minio/minio/cmd.untar.func8()
      e:/gopath/src/github.com/minio/minio/cmd/untar.go:261 +0xa4

Previous write at 0x00c01cd680e0 by goroutine 691352:
  github.com/minio/minio/cmd.objectAPIHandlers.PutObjectExtractHandler.func1()
      e:/gopath/src/github.com/minio/minio/cmd/object-handlers.go:2131 +0x15d
  github.com/minio/minio/cmd.untar.func1()
      e:/gopath/src/github.com/minio/minio/cmd/untar.go:250 +0x2b6
  github.com/minio/minio/cmd.untar.func8()
      e:/gopath/src/github.com/minio/minio/cmd/untar.go:261 +0xa4
```
2023-10-10 08:36:44 -07:00
jiuker
aaab7aefbe fix: avoid nil panic upon error in GetObjectNInfo via InnerGetObjectNInfoFn (#18198) 2023-10-10 08:35:33 -07:00
Klaus Post
5b8599e52d Do not log invalid tag errors (#18200)
Eliminate logging on invalid tags:

```
API: PutObjectTagging(bucket=aws-sdk-go-test-aupmzek4341ee2, object=sgehiqp24fwt4hafffmtwzkrqnq325)
Time: 07:40:33 UTC 10/10/2023
DeploymentID: f122cbfa-42b1-428f-9002-39c644cace71
RequestID: 178CAF0DE0A67480
RemoteHost: 127.0.0.1
Host: 127.0.0.1:9001
UserAgent: aws-sdk-go/1.44.257 (go1.21.0; linux; amd64)
Error: Tags cannot be more than 10 (*tags.errTag)
       5: internal\logger\logger.go:259:logger.LogIf()
       4: cmd\api-errors.go:2350:cmd.toAPIErrorCode()
       3: cmd\api-errors.go:2375:cmd.toAPIError()
       2: cmd\object-handlers.go:2912:cmd.objectAPIHandlers.PutObjectTaggingHandler()
       1: net\http\server.go:2136:http.HandlerFunc.ServeHTTP()

API: PutObjectTagging(bucket=aws-sdk-go-test-aupmzek4341ee2, object=sgehiqp24fwt4hafffmtwzkrqnq325)
Time: 07:40:33 UTC 10/10/2023
DeploymentID: f122cbfa-42b1-428f-9002-39c644cace71
RequestID: 178CAF0DE0BEA514
RemoteHost: 127.0.0.1
Host: 127.0.0.1:9001
UserAgent: aws-sdk-go/1.44.257 (go1.21.0; linux; amd64)
Error: Cannot provide multiple Tags with the same key (*tags.errTag)
       5: internal\logger\logger.go:259:logger.LogIf()
       4: cmd\api-errors.go:2350:cmd.toAPIErrorCode()
       3: cmd\api-errors.go:2375:cmd.toAPIError()
       2: cmd\object-handlers.go:2912:cmd.objectAPIHandlers.PutObjectTaggingHandler()
       1: net\http\server.go:2136:http.HandlerFunc.ServeHTTP()

API: PutObjectTagging(bucket=aws-sdk-go-test-aupmzek4341ee2, object=sgehiqp24fwt4hafffmtwzkrqnq325)
Time: 07:40:33 UTC 10/10/2023
DeploymentID: f122cbfa-42b1-428f-9002-39c644cace71
RequestID: 178CAF0DE0E78970
RemoteHost: 127.0.0.1
Host: 127.0.0.1:9001
UserAgent: aws-sdk-go/1.44.257 (go1.21.0; linux; amd64)
Error: The TagKey you have provided is invalid (*tags.errTag)
       5: internal\logger\logger.go:259:logger.LogIf()
       4: cmd\api-errors.go:2350:cmd.toAPIErrorCode()
       3: cmd\api-errors.go:2375:cmd.toAPIError()
       2: cmd\object-handlers.go:2912:cmd.objectAPIHandlers.PutObjectTaggingHandler()
       1: net\http\server.go:2136:http.HandlerFunc.ServeHTTP()

API: PutObjectTagging(bucket=aws-sdk-go-test-aupmzek4341ee2, object=sgehiqp24fwt4hafffmtwzkrqnq325)
Time: 07:40:33 UTC 10/10/2023
DeploymentID: f122cbfa-42b1-428f-9002-39c644cace71
RequestID: 178CAF0DE1002AE8
RemoteHost: 127.0.0.1
Host: 127.0.0.1:9001
UserAgent: aws-sdk-go/1.44.257 (go1.21.0; linux; amd64)
Error: The TagValue you have provided is invalid (*tags.errTag)
       5: internal\logger\logger.go:259:logger.LogIf()
       4: cmd\api-errors.go:2350:cmd.toAPIErrorCode()
       3: cmd\api-errors.go:2375:cmd.toAPIError()
       2: cmd\object-handlers.go:2912:cmd.objectAPIHandlers.PutObjectTaggingHandler()
       1: net\http\server.go:2136:http.HandlerFunc.ServeHTTP()
```
2023-10-10 08:35:03 -07:00
Harshavardhana
74e0c9ab9b reduce unnecessary logging, simplify certain error handling (#18196)
remove a bunch of unnecessary logs
2023-10-10 00:33:42 -07:00
Harshavardhana
dcce83b288 avoid rebalance state for getObjectTags if any (#18197)
fixes #18190
2023-10-09 23:56:26 -07:00
Matthew Toohey
f731e7ea36 Fix current_send_in_progress metric always being zero (#18160) 2023-10-09 17:28:17 -07:00
Maxim Tkachenko
ec30bb89a4 simplify channel send() in WalkDir() (#18186) 2023-10-09 17:27:55 -07:00
Klaus Post
7cd08594f6 Use better host names for metric errors (#18188)
Typically hosts would end up like this:

```
   "hosts": [
        ":9000",
        ":9000",
        ":9000",
...
```

Also add host name to errors.
2023-10-09 17:27:11 -07:00
Aditya Manthramurthy
2b4531f069 fix: O_DIRECT is on only for multi-disk setups (#18194)
Disable it for single disk/unsupported platforms
2023-10-09 17:08:40 -07:00
Harshavardhana
11544a62aa fix: upon write failure on disk journal close the file properly (#18183)
close the file properly before dereferencing *os.File,
this can silently leak fd's in rare cases.

This PR fixes this properly.
2023-10-08 12:17:08 -07:00
Taran Pelkey
18550387d5 fix: DeleteServiceAccount API behavior (#18163) 2023-10-08 12:13:18 -07:00
Minio Trusted
efb03e19e6 Update yaml files to latest version RELEASE.2023-10-07T15-07-38Z 2023-10-08 07:09:45 +00:00
Praveen raj Mani
c27d0583d4 Send kafka notification messages in batches when queue_dir is enabled (#18164)
Fixes #18124
2023-10-07 08:07:38 -07:00
Klaus Post
0de2b9a1b2 Fix panic on double unfreezeServices (#18177)
Calling unfreezeServices twice results in panic:

```
panic: "POST /minio/peer/v32/signalservice?signal=4&sub-sys=": close of nil channel
goroutine 14703 [running]:
runtime/debug.Stack()
	runtime/debug/stack.go:24 +0x65
github.com/minio/minio/cmd.setCriticalErrorHandler.func1.1()
	github.com/minio/minio/cmd/generic-handlers.go:549 +0x8e
panic({0x27c3020, 0x4c9b370})
	runtime/panic.go:884 +0x212
github.com/minio/minio/cmd.unfreezeServices()
	github.com/minio/minio/cmd/service.go:112 +0xc7
github.com/minio/minio/cmd.(*peerRESTServer).SignalServiceHandler(0x0?, {0x4cb6af0, 0xc010b96420}, 0xc01affab00)
	github.com/minio/minio/cmd/peer-rest-server.go:837 +0x13a
net/http.HandlerFunc.ServeHTTP(...)
```

If the function was called a second time `val` would not be nil, but the returned channel `ch` would be, causing the panic.

Check the channel isn't nil and also use Swap for an atomic swap instead of 2 separate operations (though we are in a mutex).
2023-10-06 07:51:50 -06:00
Poorna
9dc29d7687 Avoid ILM expiry on deleted versions that are yet to replicate (#18175)
Fixes #18167
2023-10-06 06:55:15 -06:00
Poorna
72871dbb9a delete replication: avoid overwriting replication decision (#18174)
from ObjectInfo unless version purge status is present. Otherwise
there is potential to make incorrect replication decision if Stat
returned an error
2023-10-05 21:09:45 -06:00
Aditya Manthramurthy
4bda4e4e2b fix: check for disk-level O_DIRECT support (#18173)
Disk level O_DIRECT support checking at xl storage initialization was
conditional on a config setting being enabled. (This never took effect
because config initialization happens after ObjectLayer is ready.) This
is not necessary as the config setting is dynamic - O_DIRECT should be
enabled via runtime config. So we need to do the disk level support
check regardless of the config setting.
2023-10-05 20:54:49 -06:00
Harshavardhana
1971c54a50 update buffer channels for both trace and listen events (#18171)
- Trace needs higher buffered channels than 4000 to ensure
  when we run `mc admin trace -a` it captures all information
  sufficiently.

- Listen event notification needs the event channel to be
  `apiRequestsMaxPerNode` * number of nodes
2023-10-05 18:16:04 -06:00
Cesar N
bb77b89da0 Update MinIO Console version (#18168)
Co-authored-by: cesnietor <>
2023-10-04 16:25:59 -07:00
Anis Eleuch
b336e9a79f fix: loading usage cache to not fail early when reading the backup fails (#18158)
Currently, the retry is not fully used when there is no backup copy of
the data usage; use 5 retry attempts when we don't have any valid data, 
new or backup, unless we have seen an un-recognized error.
2023-10-02 19:22:35 -07:00
Harshavardhana
a2ab21e91c add max-keys=2 optimization for spark workloads (#18154)
comment in the code provides more detailed explanation
on what this PR entails and its assumptions.

this PR reduces the amount of listing() by an order
of magnitude, however there are other such calls that
still needs further optimization that shall be done
in subsequent PRs.
2023-10-02 07:52:59 -06:00
Sveinn
603437e70f Fix startup formatting (#18156)
Percentages in root user names are used for formatting.

Before:
```
S3-API: http://192.168.50.21:9000  http://172.31.96.1:9000  http://127.0.0.1:9000
RootUser: "U4B6Zi!b75DXSPm%!!(MISSING)a(MISSING)vZb"
RootPass: "Q4#Q6y8G%!P(MISSING)x#npP4dudUobU#NBcGB7RMKV4ajYb"

Console: http://192.168.50.21:51915 http://172.31.96.1:51915 http://127.0.0.1:51915
RootUser: "U4B6Zi!b75DXSPm%!!(MISSING)a(MISSING)vZb"
RootPass: "Q4#Q6y8G%!P(MISSING)x#npP4dudUobU#NBcGB7RMKV4ajYb"

Command-line: https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart
FORMAT: %117s MESSAGE: $ mc alias set myminio http://192.168.50.21:9000 "U4B6Zi!b75DXSPm%avZb" "Q4#Q6y8G%%Px#npP4dudUobU#NBcGB7RMKV4ajYb"
   $ mc alias set myminio http://192.168.50.21:9000 "U4B6Zi!b75DXSPm%!a(MISSING)vZb" "Q4#Q6y8G%Px#npP4dudUobU#NBcGB7RMKV4ajYb"
```

After:

```
Status:         1 Online, 0 Offline.
S3-API: http://192.168.50.21:9000  http://172.31.96.1:9000  http://127.0.0.1:9000
RootUser: "U4B6Zi!b75DXSPm%avZb"
RootPass: "Q4#Q6y8G%%Px#npP4dudUobU#NBcGB7RMKV4ajYb"

Console: http://192.168.50.21:52421 http://172.31.96.1:52421 http://127.0.0.1:52421
RootUser: "U4B6Zi!b75DXSPm%avZb"
RootPass: "Q4#Q6y8G%%Px#npP4dudUobU#NBcGB7RMKV4ajYb"

Command-line: https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart
   $ mc alias set myminio http://192.168.50.21:9000 "U4B6Zi!b75DXSPm%avZb" "Q4#Q6y8G%%Px#npP4dudUobU#NBcGB7RMKV4ajYb"
```

No need for special Windows case. `mc` works just fine.
2023-10-02 07:39:47 -06:00
Harshavardhana
db3a9a5990 update missing mc command on multipart-tests 2023-09-30 20:29:45 -07:00
Harshavardhana
24c7e73b4e update helm chart images and release v5.0.14
Signed-off-by: Harshavardhana <harsha@minio.io>
2023-09-30 13:46:10 -07:00
Alik
c053e57068 Add paramaters in Helm chart to load OIDC clientSecret from Secret Resource (#17784) 2023-09-30 13:44:38 -07:00
Shireesh Anjal
6d20ec3bea Add support for resource metrics (#18057)
Add a new endpoint for "resource" metrics `/v2/metrics/resource`

This should return system metrics related to drives, network, CPU and
memory. Except for drives, other metrics should have corresponding "avg"
and "max" values also.

Reuse the real-time feature to capture the required data,
introducing CPU and memory metrics in it.

Collect the data every minute and keep updating the average and max values
accordingly, returning the latest values when the API is called.
2023-09-30 13:40:20 -07:00
Harshavardhana
c50627ee3e Add tests for multipart upload overwrites on versioned buckets (#18142) 2023-09-30 03:13:56 -07:00
Minio Trusted
b3cd893f93 Update yaml files to latest version RELEASE.2023-09-30T07-02-29Z 2023-09-30 07:51:58 +00:00
Anis Eleuch
22d2dbc4e6 decom: Fix infinite retry when the decom is canceled (#18143)
Also, use rand.Float64() since it is thread-safe; otherwise go race
will complain.
2023-09-30 00:02:29 -07:00
Shireesh Anjal
2b5d9428b1 Use latest madmin-go (v3.0.21) (#18138)
This ensures that drive model is included in the partition data inside
the health diagnostics report.
2023-09-29 11:25:34 -07:00
Harshavardhana
d6446cb096 do not return an error in AbortMultipartUpload() (#18135)
returning an error is a bit undefined in AWS S3
as it may return an error or not depending on the
time from AbortMultipartUpload().
2023-09-29 10:28:19 -07:00
Harshavardhana
c34bdc33fb make sure to set Versioned field to ensure rename2 is not called (#18141)
without this the rename2() can rename the previous dataDir
causing issues for different versions of the object, only
latest version is preserved due to this bug.

Added healing code to ensure recovery of such content.
2023-09-29 09:08:24 -07:00
ferhat elmas
dd8547e51c chore: drop unnecessary linter (#18133) 2023-09-29 03:11:31 -07:00
Anis Eleuch
aec023f537 Avoid showing buckets without quorum in each pool (#18125) 2023-09-29 00:58:54 -07:00
Poorna
e101eeeda9 fix: tier addition validation (#18136) 2023-09-28 22:33:24 -07:00
Minio Trusted
f29522269d Update yaml files to latest version RELEASE.2023-09-27T15-22-50Z 2023-09-28 17:49:57 +00:00
Harshavardhana
3c470a6b8b fix: the inspect script to use scheme per deployment (#18118) 2023-09-27 08:22:50 -07:00
Poorna
6bc7d711b3 delete of a missing versionId return 204 (#18117) 2023-09-26 14:02:56 -07:00
Shubhendu
10d5dd3a67 fix: a regression with audit log sending (#18112)
Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2023-09-26 12:23:02 -07:00
Harshavardhana
d9f1df01eb return an error in CopyAligned upon premature EOF (#18110)
add a unit-test to capture this corner case
2023-09-26 11:20:06 -07:00
Harshavardhana
cdeab19673 fix: always check error upon w.Close() in Write() (#18111)
not checking w.Close() can prematurely make us
think that the w.Write() actually succeeded, apparently
Write() may or may not return an error but sometimes
only during a Close() call to the fd we may see the
error from Write() propagate.

Fdatasync(w) on the FD would return an error requiring
Close() error handling is less of a concern, however it may
happen such that fdatasync() did not return an error, where
as Close() would.
2023-09-26 11:04:00 -07:00
Anis Eleuch
22ee678136 tier: Avoid doing versioned operations since not required anymore (#18108)
Currently, setting a new tiering target returns an error when a bucket
is versioned and the tiering credentials does not have authorization to
specify a version-id when reading or removing a specific version;

Since tiering does not require versioning anymore; avoid doing versioned
operations when performing checklist ops while adding a new tiering
configuration.
2023-09-26 00:14:56 -07:00
Poorna
50a8f13e85 site replication: allow setting bandwidth default for bucket (#18062)
This can still be overridden at the bucket level
2023-09-25 15:50:52 -07:00
jiuker
6dec60b6e6 fix: check post policy like AWS S3 (#18074) 2023-09-25 12:35:25 -07:00
Harshavardhana
ac3a19138a fix: set scanning details locally to avoid cached values (#18092)
atomic variable results such as scanning must not use
cached values, instead rely on real-time information.
2023-09-25 08:26:29 -07:00
Klaus Post
21e8e071d7 Improve ListObject Compatibility (#18099)
Do not error out when a provided marker is before or after the prefix, but instead just ignore it if before and return an empty list when after.

Fixes #18093
2023-09-25 08:13:08 -07:00
Klaus Post
57f84a8b4c Add abandoned folder scanning to metrics (#18076)
Include object and versions heal scan times when checking non-empty abandoned folders.

Furthermore don't add delay between healing versions, instead do one per object wait.
2023-09-24 22:15:31 -07:00
Minio Trusted
8a672e70a7 Update yaml files to latest version RELEASE.2023-09-23T03-47-50Z 2023-09-25 01:55:31 +00:00
Aditya Manthramurthy
22041bbcc4 fix: Update policy mapping properly in notification (#18088)
This is fixing a regression from an earlier change where STS account
loading was made lazy.
2023-09-22 20:47:50 -07:00
Harshavardhana
5afb459113 upgrade all dependencies (#18085) 2023-09-22 14:45:19 -07:00
Harshavardhana
91ebac0a00 fix: move abandoned parts check after healing not in ILM path (#18087) 2023-09-22 12:07:52 -07:00
mundry
5fcb1cfd31 fix: broken bucket versioning support in community helm chart (#18003) 2023-09-22 11:32:24 -07:00
Harshavardhana
3a90fb108c only look for metadata if batch replication asks for metadata filters (#18082)
This PR changes the StatObject() to be must have for non-minio source
to being a conditional API call.

- Calls StatObject() when needed
- Calls GetObjectTagging() when needed

These calls if we do without these conditionals can cause a lot of
delays, so we avoid them if not needed in more common scenario.
2023-09-22 11:31:57 -07:00
Anis Eleuch
4eeb48f8e0 Return cached online/offline status for audit/http loggers (#18083)
To avoid having delays in prometheus scrape and in 'mc admin info' command.
2023-09-21 16:58:24 -07:00
Harshavardhana
373d48c8a3 allow admin actions to have proper condition map (#18080)
upgrade minio/pkg to v2.0.2

fixes #18078
2023-09-21 13:22:09 -07:00
Harshavardhana
1472875670 fix: failed messages counting in audit_http metrics (#18075)
all retries must not be counted as failed messages,
a failed message is a single counter not for all
retries, this PR fixes this.

Also we do not need to retry 10-times, instead we should
retry at max 3 times with some jitter to deliver the
messages.
2023-09-21 11:24:56 -07:00
Shubhendu
74cfb207c1 Added check for mandatory MINIO_KMS_KES_KEY_NAME env var (#18077)
If MinIO started with KMS enabled, MINIO_KMS_KES_KEY_NAME should
be set for server to start.

Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2023-09-21 10:37:37 -07:00
Minio Trusted
6a096e7dc7 Update yaml files to latest version RELEASE.2023-09-20T22-49-55Z 2023-09-21 00:42:16 +00:00
Harshavardhana
9788d85ea3 remove logging for invalid metadata values (#18068) 2023-09-20 15:49:55 -07:00
Anis Eleuch
69c0e18685 perf net: Add the endpoint name related to the perf net error (#18063)
In a perf test, one node will run speed test with all nodes. If there is
an error with a peer node, the peer node name is not included in the
error hence confusing the user.

This commit will add the peer endpoint string to the netperf error.
2023-09-19 22:41:06 -07:00
Aditya Manthramurthy
3cac927348 Load STS policy mappings periodically (#18061)
To ensure that policy mappings are current for service accounts
belonging to (non-derived) STS accounts (like an LDAP user's service
account) we periodically reload such mappings.

This is primarily to handle a case where a policy mapping update
notification is missed by a minio node. Such a node would continue to
have the stale mapping in memory because STS creds/mappings were never
periodically scanned from storage.
2023-09-19 17:57:42 -07:00
Harshavardhana
9081346c40 fix: more regressions listing policy mappings (#18060)
also relax ListServiceAccounts() returning error if
no service accounts exist.
2023-09-19 15:23:18 -07:00
Harshavardhana
fcfadb0e51 fix: regression in loading LDAP users policy mappings (#18055)
LDAP users are stored as STS users, we need to load
their policy mappings appropriately.

Fixes a regression caused by #17994
2023-09-19 10:31:56 -07:00
Harshavardhana
2add57cfed apply healing per object at 1024 cycles (#18050)
- we already have MRF for most recent failures
- we trigger healing during HEAD/GET operation

These are enough, also change the default max wait
from 5sec to 1sec for default scanner speed.
2023-09-19 09:24:22 -07:00
Anis Eleuch
c5279ec630 fix: building reorder-disks under darwin (#18053)
Also build debugging tools only in tests or with a specific target
2023-09-19 03:19:26 -07:00
Poorna
b73699fad8 replication: pass user tags while queueing (#18052)
Continues from #18032 - otherwise replication will fail on tag based rules.
2023-09-19 03:18:28 -07:00
Harshavardhana
b8ebe54e53 Revert "skip tiered objects to GLACIER in batch replication (#18044)"
This reverts commit fd421ddd6f.

MinIO already provides `filter` based on metadata that would work
in this scenario already.
2023-09-19 00:05:40 -07:00
Harshavardhana
c3d70e0795 cache usage, prefix-usage, and buckets for AccountInfo up to 10 secs (#18051)
AccountInfo is quite frequently called by the Console UI 
login attempts, when many users are logging in it is important
that we provide them with better responsiveness.

- ListBuckets information is cached every second
- Bucket usage info is cached for up to 10 seconds
- Prefix usage (optional) info is cached for up to 10 secs

Failure to update after cache expiration, would still
allow login which would end up providing information
previously cached.

This allows for seamless responsiveness for the Console UI
logins, and overall responsiveness on a heavily loaded
system.
2023-09-18 22:13:03 -07:00
Harshavardhana
8c4561b8da add all missing go.mod for debugging tools (#18049) 2023-09-18 13:47:03 -07:00
Harshavardhana
fd421ddd6f skip tiered objects to GLACIER in batch replication (#18044)
tiered objects to GLACIER are not readable until
they are restored, we skip these as unreadable
2023-09-18 10:25:31 -07:00
jiuker
9947c01c8e feat: SSE-KMS use uuid instead of read all data to md5. (#17958) 2023-09-18 10:00:54 -07:00
Eng Zer Jun
a00db4267c data-usage-cache: remove redundant nil check (#17970)
From the Go specification:

  "3. If the map is nil, the number of iterations is 0." [1]

Therefore, an additional nil check for before the loop is unnecessary.

[1]: https://go.dev/ref/spec#For_range

Signed-off-by: Eng Zer Jun <engzerjun@gmail.com>
2023-09-16 19:09:29 -07:00
Harshavardhana
36385010f5 use optimized pathJoin instead of path.Join (#18042)
this avoids allocations in scanner routine, they are tiny but 
they allocate a lot over many cycles of the scanner.
2023-09-16 19:08:59 -07:00
Harshavardhana
fa6d082bfd reduce all major allocations in replication path (#18032)
- remove targetClient for passing around via replicationObjectInfo{}
- remove cloing to object info unnecessarily
- remove objectInfo from replicationObjectInfo{} (only require necessary fields)
2023-09-16 02:28:06 -07:00
Minio Trusted
9fab91852a Update yaml files to latest version RELEASE.2023-09-16T01-01-47Z 2023-09-16 07:38:18 +00:00
Poorna
b733e6e83c site replication turn off retry login for admin API calls (#18039)
additionally also mark site offline if n/w is down
2023-09-15 18:01:47 -07:00
Harshavardhana
ce05bb69dc update console v0.39.0 (#18038)
Signed-off-by: Harshavardhana <harsha@minio.io>
2023-09-15 14:01:52 -07:00
Anis Eleuch
37aa5934a1 scanner: Fix loading data usage cache structure (#18037)
Return an empty data usage cache structure when the data usage cache
file does not exist, otherwise, the scanner won't work.
2023-09-15 13:11:08 -07:00
Harshavardhana
1647fc7edc fix: optimize listMultipartUploads to serve via local disks (#18034)
and remove unused getLoadBalancedDisks()
2023-09-15 08:34:03 -07:00
Harshavardhana
7b92687397 remove generating presignedURLs with range header for lambda (#18033) 2023-09-14 21:58:17 -07:00
Anis Eleuch
419e5baf16 fix: webhook notify endpoint with standard ports (#18016) 2023-09-14 20:10:44 -07:00
Alex
dc48cd841a Added MINIO_PROMETHEUS_AUTH_TOKEN env support (#18028)
Signed-off-by: Benjamin Perez <benjamin@bexsoft.net>
2023-09-14 17:28:21 -07:00
Anis Eleuch
b0e1776d6d Do not use a chain for S3 tiering to return better error messages (#18030)
When using a chain provider all providers do not return a valid
access and secret key, an anonymous request is sent, which makes it hard
for users to figure out what is going on

In the case of S3 tiering, when AWS IAM temporary account generation returns
an error, an anonymous login will be used because of the chain provider.
Avoid this and use the AWS IAM provider directly to get a good error
message.
2023-09-14 15:28:20 -07:00
Aditya Manthramurthy
7a7068ee47 Move IAM periodic ops to a single go routine (#18026)
This helps reduce disk operations as these periodic routines would not
run concurrently any more.

Also add expired STS purging periodic operation: Since we do not scan
the on-disk STS credentials (and instead only load them on-demand) a
separate routine is needed to purge expired credentials from storage.
Currently this runs about a quarter as often as IAM refresh.

Also fix a bug where with etcd, STS accounts could get loaded into the
iamUsersMap instead of the iamSTSAccountsMap.
2023-09-14 15:25:17 -07:00
Aditya Manthramurthy
cbc0ef459b Fix policy package import name (#18031)
We do not need to rename the import of minio/pkg/v2/policy as iampolicy
any more.
2023-09-14 14:50:16 -07:00
Harshavardhana
a2aabfabd9 add backups for usage-caches to rely on upon error (#18029)
This allows scanner to avoid lengthy scans, skip
things appropriately and also not lose metrics in
any manner.

reduce longer deadlines for usage-cache loads/saves
to match the disk timeout which is 2minutes now per
IOP.
2023-09-14 11:53:52 -07:00
Harshavardhana
822cbd4b43 add couple of missing things from #18027 2023-09-13 23:26:48 -07:00
Ravind Kumar
3c19a9308d DOCS-987: Reorganizing list.md for better RST compatibility (#18027) 2023-09-13 23:23:37 -07:00
Harshavardhana
32890342ce introduce MINIO_BROWSER_REDIRECT env to enable/disable auto-redirect (#18025) 2023-09-13 18:43:57 -07:00
Aditya Manthramurthy
ed2c2a285f Load STS accounts into IAM cache lazily (#17994)
In situations with large number of STS credentials on disk, IAM load
time is high. To mitigate this, STS accounts will now be loaded into
memory only on demand - i.e. when the credential is used.

In each IAM cache (re)load we skip loading STS credentials and STS
policy mappings into memory. Since STS accounts only expire and cannot
be deleted, there is no risk of invalid credentials being reused,
because credential validity is checked when it is used.
2023-09-13 12:43:46 -07:00
Poorna
18e23bafd9 replication resync: report only the on-disk status (#18017)
Avoid reporting in-memory status since results can vary if different
nodes are queried, resync always runs at a single node.
2023-09-13 10:58:38 -07:00
Harshavardhana
8b8be2695f optimize mkdir calls to avoid base-dir Mkdir attempts (#18021)
Currently we have IOPs of these patterns

```
[OS] os.Mkdir play.min.io:9000 /disk1 2.718µs
[OS] os.Mkdir play.min.io:9000 /disk1/data 2.406µs
[OS] os.Mkdir play.min.io:9000 /disk1/data/.minio.sys 4.068µs
[OS] os.Mkdir play.min.io:9000 /disk1/data/.minio.sys/tmp 2.843µs
[OS] os.Mkdir play.min.io:9000 /disk1/data/.minio.sys/tmp/d89c8ceb-f8d1-4cc6-b483-280f87c4719f 20.152µs
```

It can be seen that we can save quite Nx levels such as
if your drive is mounted at `/disk1/minio` you can simply
skip sending an `Mkdir /disk1/` and `Mkdir /disk1/minio`.

Since they are expected to exist already, this PR adds a way
for us to ignore all paths upto the mount or a directory which
ever has been provided to MinIO setup.
2023-09-13 08:14:36 -07:00
Poorna
96fbf18201 replication: queue existing objects to same workers as incoming (#18020)
Previously existing objects were queued to single worker and MRF re-queues
are also handled by same worker - this does not fully use the available
bandwidth in case there is no incoming workload.
2023-09-12 21:59:15 -07:00
Harshavardhana
c8a57a8fa2 fix: send content-md5 for AWS S3 proactively (#18018)
fixes #17977
2023-09-12 19:11:13 -07:00
Harshavardhana
b1c2dacab3 fix: allow dynamic ports for API only in non-distributed setups (#18019)
fixes #17998
2023-09-12 19:10:49 -07:00
Harshavardhana
65939913b4 update all dependencies (#18012) 2023-09-12 13:16:46 -07:00
Harshavardhana
08b3a466e8 fix: allow concurrent SFTP connections (#18013)
current implementation did not fully implement
the concurrent SFTP connection implementation,
this PR properly handles this.

fixes #17914
2023-09-12 12:41:52 -07:00
Harshavardhana
5aa7c38035 update pkg to v2.0.1 to extend admin actions (#18008) 2023-09-12 01:11:52 -07:00
Harshavardhana
1df5e31706 optimize MRF replication queue to avoid memory leaks (#18007) 2023-09-11 20:59:11 -07:00
Harshavardhana
9f7044aed0 fix: ignore transient errors in read path (#18006)
Errors such as

```
returned an error (context deadline exceeded) (*fmt.wrapError)
```

```
(msgp: too few bytes left to read object) (*fmt.wrapError)
```
2023-09-11 15:29:59 -07:00
Anis Eleuch
41de53996b heal: calculate the number of workers based on NRRequests (#17945) 2023-09-11 14:48:54 -07:00
Harshavardhana
9878031cfd fix: change DISK_ to DRIVE_ for some drive related envs (#18005) 2023-09-11 12:19:22 -07:00
Harshavardhana
e3fbcaeb72 allow scanner key cycle to be empty (#18001)
configs from 2020 server throws an
error due to deprecation of the keys
however an attempt is made to parse
them, we should have chosen existing
defaults - this PR fixes that.
2023-09-09 08:53:32 -07:00
Harshavardhana
ca6dd8be5e use go1.21.1 for vulncheck 2023-09-07 16:15:31 -07:00
Minio Trusted
fba0924b1d Update yaml files to latest version RELEASE.2023-09-07T02-05-02Z 2023-09-07 23:10:40 +00:00
Poorna
703ed46d79 fix: replication of tags while removing (#17989)
A tag removal was not being replicated prior to this change
2023-09-06 19:05:02 -07:00
Harshavardhana
f7ca6c63c2 fix: bucket quota clear and honor existing quota config (#17988) 2023-09-06 19:03:58 -07:00
Harshavardhana
ad69b9907f fix: report bucket metrics for only existing buckets (#17987) 2023-09-06 12:50:46 -07:00
Anis Eleuch
b9269151a4 fix: drive rotational calculation status for partitions (#17986)
Fix drive rotational calculation status

If a MinIO drive path is mounted to a partition and not a real disk,
getting the rotational status would fail because Linux does not expose
that status to partition; In other words,
/sys/block/drive-partition-name/queue/rotational does not exist;

To fix the issue, the code will search for the rotational status of the
disk that hosts the partition, and this can be calculated from the
real path of /sys/class/block/<drive-partition-name>
2023-09-06 12:37:57 -07:00
Shubhendu
bfddbb8b40 Embed file in ZIP with custom permissions (#17954)
This change enables embedding files in ZIP with custom permissions.
Also uses default creds for starting MinIO based on inspect data.

Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2023-09-06 09:24:01 -07:00
Poorna
13a2dc8485 replication resync: avoid blocking on results channel. (#17981)
continues fix in #17775
2023-09-05 20:22:39 -07:00
Harshavardhana
1e51424e8a use syscall.Rename() directly instead of os.Rename() (#17982) 2023-09-05 20:22:23 -07:00
Harshavardhana
5b114b43f7 refactor bandwidth throttling for replication target (#17980)
This refactor is to allow using the bandwidth throttling
for other purposes.
2023-09-05 20:21:59 -07:00
Poorna
812f5a02d7 metrics: fix panic in replication stats reporting (#17979) 2023-09-05 10:26:18 -07:00
Minio Trusted
19f70dbfbf Update yaml files to latest version RELEASE.2023-09-04T19-57-37Z 2023-09-04 21:20:49 +00:00
Aditya Manthramurthy
1c99fb106c Update to minio/pkg/v2 (#17967) 2023-09-04 12:57:37 -07:00
Krishnan Parthasarathi
71c32e9b48 Return successorModTime in quorum when available (#17925) 2023-09-04 08:24:17 -07:00
Harshavardhana
380a59520b add missing testdata for benchmarking 2023-09-02 14:40:38 -07:00
Harshavardhana
3995355150 avoid repeated large allocations for large parts (#17968)
objects with 10,000 parts and many of them can
cause a large memory spike which can potentially
lead to OOM due to lack of GC.

with previous PR reducing the memory usage significantly
in #17963, this PR reduces this further by 80% under
repeated calls.

Scanner sub-system has no use for the slice of Parts(),
it is better left empty.

```
benchmark                            old ns/op     new ns/op     delta
BenchmarkToFileInfo/ToFileInfo-8     295658        188143        -36.36%

benchmark                            old allocs     new allocs     delta
BenchmarkToFileInfo/ToFileInfo-8     61             60             -1.64%

benchmark                            old bytes     new bytes     delta
BenchmarkToFileInfo/ToFileInfo-8     1097210       227255        -79.29%
```
2023-09-02 07:49:24 -07:00
Harshavardhana
8208bcb896 remove all unnecessary logging, logOnce when absolutely needed (#17965) 2023-09-01 16:19:18 -07:00
Poorna
d665e855de replication: remove check for empty version id (#17964) 2023-09-01 13:46:10 -07:00
Harshavardhana
18b3655c99 with xlv2 format we never had to fill in checksumInfo() (#17963)
- this PR avoids sending a large ChecksumInfo slice
  when its not needed

- also for a file with XLV2 format there is no reason
  to allocate Checksum slice while reading
2023-09-01 13:45:58 -07:00
Anis Eleuch
6a8d8f34a5 kafka: Do not require key when sending a message (#17962)
Keys are helpful to ensure the strict ordering of messages, however currently the
code uses a random request id for every log, hence using the request-id
as a Kafka key is not serve any purpose;

This commit removes the usage of the key, to also fix the audit issue from
internal subsystem that does not have a request ID.
2023-09-01 08:37:22 -07:00
Harshavardhana
b1c1f02132 use buffers for pathJoin, to re-use buffers. (#17960)
```
benchmark                        old ns/op     new ns/op     delta
BenchmarkPathJoin/PathJoin-8     79.6          55.3          -30.53%

benchmark                        old allocs     new allocs     delta
BenchmarkPathJoin/PathJoin-8     2              1              -50.00%

benchmark                        old bytes     new bytes     delta
BenchmarkPathJoin/PathJoin-8     48            24            -50.00%
```
2023-08-31 17:58:48 -07:00
Minio Trusted
ea93643e6a Update yaml files to latest version RELEASE.2023-08-31T15-31-16Z 2023-09-01 00:15:57 +00:00
Shubhendu
e47e625f73 Added replication graphs for site replication metrics (#17951)
This dashboard graphs the metrics when site replication is enabled
across MinIO instances.

Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2023-08-31 08:31:16 -07:00
yangw
b13fcaf666 fix: read atomic variable in clientDevNull round trip time (#17955) 2023-08-31 08:31:01 -07:00
Harshavardhana
9458485e43 avoid double logging from healing (#17950) 2023-08-30 18:46:04 -07:00
Shubhendu
0ce9e00ffa Added node scanner and node drives graphs (#17949)
Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2023-08-30 14:01:51 -07:00
Shubhendu
c778c381b5 Added new bucket replication graphs (#17947)
This PR adds new bucket replication graphs for better and granular
monitoring of bucket replication. Also arranged all replication graphs
together.

Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2023-08-30 11:57:41 -07:00
Harshavardhana
0d1fbef751 fix: a possible crash in event target Close() (#17948)
these are possible crashes when the configured
target is still in init() state and never finished
- however a delete config was initiated.
2023-08-30 07:27:45 -07:00
Poorna
b48bbe08b2 Add additional info for replication metrics API (#17293)
to track the replication transfer rate across different nodes,
number of active workers in use and in-queue stats to get
an idea of the current workload.

This PR also adds replication metrics to the site replication
status API. For site replication, prometheus metrics are
no longer at the bucket level - but at the cluster level.

Add prometheus metric to track credential errors since uptime
2023-08-30 01:00:59 -07:00
Minio Trusted
cce90cb2b7 Update yaml files to latest version RELEASE.2023-08-29T23-07-35Z 2023-08-30 02:17:56 +00:00
Harshavardhana
07b1281046 add queue_dir to help message for logger/audit targets 2023-08-29 16:07:35 -07:00
Ravind Kumar
3515b99671 Clarify Community Helm Chart (#17944)
There is some consistent confusion between the Community Helm Chart in this repo and the MinIO Kubernetes Operator Helm Chart.

This change seeks to clarify the differences between the two charts and which ones are community maintained vs MinIO maintained.
2023-08-29 11:27:48 -07:00
Krishnan Parthasarathi
6a67c277eb Reuse types for key-value, notification and retry (#17936) 2023-08-29 11:27:23 -07:00
Harshavardhana
1067dd3011 update minio-go v7.0.63 (#17937)
Signed-off-by: Harshavardhana <harsha@minio.io>
2023-08-28 20:02:14 -07:00
Harshavardhana
7cafdc0512 fix: skip access checks further for known buckets (#17934) 2023-08-28 15:16:41 -07:00
Harshavardhana
8a57b6bced use renameat2 Linux extension syscall (#17757)
this is a faster and safer alternative
on newer kernel versions.
2023-08-27 09:57:11 -07:00
Andrea Longo
6f0ed2a091 Add equivalent mc ilm rule commands for some JSON rule examples (#17923) 2023-08-26 00:33:25 -07:00
Krishnan Parthasarathi
53abd25116 Don't log when object to be tiered is not found (#17924) 2023-08-25 23:34:16 -07:00
Harshavardhana
1ea7826c0e do not have to consider replicationTimestamp for healing and quorum (#17922)
replicationTimestamp might differ if there were retries
in replication and the retried attempt overwrote in
quorum but enough shards with newer timestamp causing
the existing timestamps on xl.meta to be invalid, we
do not rely on this value for anything external.

this is purely a hint for debugging purposes, but there
is no real value in it considering the object itself
is in-tact we do not have to spend time healing this
situation.

we may consider healing this situation in future but
that needs to be decoupled to make sure that we do not
over calculate how much we have to heal.
2023-08-25 15:31:15 -07:00
Harshavardhana
97f4cf48f8 update wording on PR template 2023-08-25 11:57:37 -07:00
Anis Eleuch
0cde37be50 Reduce the number of calls to import bucket metadata (#17899)
For each bucket, save the bucket metadata 
once, call the site replication hook once
2023-08-25 07:59:16 -07:00
jiuker
6aeca54ece fix: replace context by timeout-context from parent-context when selfSpeedTest (#17906) 2023-08-25 07:58:38 -07:00
Harshavardhana
124e28578c remove strict persistence requirements for List() .metacache objects (#17917)
.metacache objects are transient in nature, and are better left to
use page-cache effectively to avoid using more IOPs on the disks.

this allows for incoming calls to be not taxed heavily due to
multiple large batch listings.
2023-08-25 07:58:11 -07:00
Harshavardhana
62c9e500de remove mTime requirement from pre-condition checks (#17916)
given a versionId the mtime is always the same, it
can never be different than its original value.

versionIds also do not conflict, since they are uuid's
and unique practically forever.
2023-08-24 14:33:58 -07:00
jiuker
02cc18ff29 refactor the perf client for TTFB and TotalResponseTime (#17901) 2023-08-24 10:21:08 -07:00
Harshavardhana
ba4566e86d add missing IAM node metrics to cluster and node endpoint (#17908) 2023-08-24 09:26:37 -07:00
Krishnan Parthasarathi
87cb0081ec Retain current and upto NewerNoncurrentVersions versions (#17909)
applyNewerNoncurrentVersionLimit method should pass along versions
unaffected by NewerNoncurrentVersions rule for further ILM evaluation.
2023-08-24 09:26:29 -07:00
Poorna
4a6af93c83 mark replication target offline if network timeouts seen (#17907)
regular target liveness check every 5 secs will toggle state back
as target returns online.
2023-08-24 09:24:26 -07:00
Minio Trusted
a2f0771fd3 Update yaml files to latest version RELEASE.2023-08-23T10-07-06Z 2023-08-23 23:17:59 +00:00
Harshavardhana
af564b8ba0 allow bootstrap to capture time-spent for each initializers (#17900) 2023-08-23 03:07:06 -07:00
Harshavardhana
adb8be069e tune-kafka targets to ensure timeout triggers on hung brokers (#17898)
hung brokers can cause slowness to the entire system
when many callers are hung, leading to large goroutine
build-up.
2023-08-22 20:26:35 -07:00
Klaus Post
7c8746732b Return cancelled storage calls as 499 (#17895)
Make upstream cancels more visible - right now they are just reported as "forbidden".
2023-08-22 11:10:41 -07:00
Klaus Post
f506117edb Reduce memory profiling rate (#17894)
Change profiling from every 4KB to every 128K, reducing the lock contention by a factor of 32.
2023-08-22 07:21:49 -07:00
Harshavardhana
1c5af7c31a serialize queueMRFHeal(), add timeouts and avoid normal build-ups (#17886)
we expect a certain level of IOPs and latency so this is okay.

fixes other miscellaneous bugs

- such as hanging on mrfCh <- when the context is canceled
- queuing MRF heal when the context is canceled
- remove unused saveStateCh channel
2023-08-21 16:44:50 -07:00
Harshavardhana
3a0125fa1f remove unexpected logging from peer calls (#17888)
also make sure RequestID is set for system logs
2023-08-21 14:25:24 -07:00
Daniel Valdivia
328cb0a076 Pass environment variable to control session length to console (#17885)
Signed-off-by: Daniel Valdivia <18384552+dvaldivia@users.noreply.github.com>
2023-08-21 11:55:43 -07:00
Shubhendu
c3c8441a1d Corrected the count of buckets and objects graphs (#17883)
In distributed setup with a load balancer, randmoly any server
would report the metrics `minio_cluster_bucket_total` and
`minio_cluster_usage_object_total` and while graphing it, we should
take max of reported values.

Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2023-08-21 09:04:38 -07:00
jiuker
fa2a8d7209 fix: drain the req.body into io.Discard correctly (#17881) 2023-08-21 01:09:07 -07:00
jiuker
e3ea97c964 fix: replace req context by locker context (#17880) 2023-08-19 22:09:07 -07:00
Mathieu Parent
7219ae530e helm: allow to configure statement policy effect (#17700)
Signed-off-by: Mathieu Parent <mathieu.parent@insee.fr>
2023-08-19 07:39:11 -07:00
Andreas Auernhammer
8f8f8854f0 update minio/kes-go dep to v0.2.0 (#17850)
This commit updates the minio/kes-go dependency
to v0.2.0 and updates the existing code to work
with the new KES APIs.

The `SetPolicy` handler got removed since it
may not get implemented by KES at all and could
not have been used in the past since stateless KES
is read-only w.r.t. policies and identities.

Signed-off-by: Andreas Auernhammer <hi@aead.dev>
2023-08-19 07:37:53 -07:00
Anis Eleuch
4c6869cd9a ilm: Fix cleaning non current null versions (#17876) 2023-08-18 12:55:47 -07:00
Harshavardhana
bc7c0d8624 if object is a delete marker it must skip tags filter in ILM (#17861) 2023-08-18 09:36:23 -07:00
Harshavardhana
11dfc817f3 do not log client canceled events (#17838) 2023-08-17 14:53:43 -07:00
Harshavardhana
dde1a12819 fix: validate incoming uploadID to be base64 encoded (#17865)
Bonus fixes include

- do not have to write final xl.meta (renameData) does this
  already, saves some IOPs.

- make sure to purge the multipart directory properly using
  a recursive delete, otherwise this can easily pile up and
  rely on the stale uploads cleanup.

fixes #17863
2023-08-17 09:37:55 -07:00
Minio Trusted
065fd094d1 Update yaml files to latest version RELEASE.2023-08-16T20-17-30Z 2023-08-16 21:22:41 +00:00
Harshavardhana
d09351bb10 update console release to v0.37.0
Signed-off-by: Harshavardhana <harsha@minio.io>
2023-08-16 13:17:30 -07:00
bh4t
25d38e030b Minor change to text under license page (#17864)
Made minor change to move the text and link 
regarding license details.
2023-08-16 11:28:34 -07:00
Harshavardhana
9ebd10d3f4 Revert "Include SuccessorModTime for FileInfo quorum (#17732)" (#17860)
This reverts commit bf3901342c.

This is to fix a regression caused when there are inconsistent
versions, but one version is in quorum. SuccessorModTime issue
must be fixed differently.
2023-08-16 07:51:33 -07:00
Harshavardhana
8a9b886011 update grafana dashboard with disk -> drive rename (#17857) 2023-08-15 16:04:20 -07:00
Cesar Celis Hernandez
21f0d6b549 Removing deprecated var from minio helm (#17856) 2023-08-15 13:33:55 -07:00
Harshavardhana
3ba927edae fix: batch status reporting after complete (#17852)
batch status can perpetually wait after completion
due to a race between the MetricsHandler() returning
the active metrics in intervals of 1sec and delete
of metrics after job completion.

this PR ensures that we keep the 'status' around
for a while, i.e upto 24hrs for all the batch jobs.
2023-08-15 12:22:30 -07:00
Harshavardhana
c4ca0a5a57 add two more drive metrics when metrics is available (#17854) 2023-08-15 10:55:47 -07:00
Klaus Post
406ea4f281 Fix distributed listing not able to resume (#17855)
Two fields in lifecycles made GOB encoding consistently fail with `gob: type lifecycle.Prefix has no exported fields`.

This meant that in distributed systems listings would never be able to continue and would restart on every call.

Fix issues and be sure to log these errors at least once per bucket. We may see some connectivity errors here, but we shouldn't hide them.
2023-08-15 07:45:25 -07:00
Harshavardhana
64aa7feabd allow specifying lower disks for Walk() (#17829)
useful when you may want Walk() with
reduced quorum requirements.
2023-08-14 21:32:39 -07:00
Poorna
875f4076ec site replication: avoid retries when peer is offline (#17853) 2023-08-14 21:31:41 -07:00
Harshavardhana
4643efe6be fix: add deadline worker pattern for local disk removers (#17845) 2023-08-14 12:28:13 -07:00
Harshavardhana
b760137e1d fix: add proxyByNode for batch jobs as part of their jobId (#17844) 2023-08-11 13:12:35 -07:00
Harshavardhana
5f56f441bf fix: apply common notification code with content-type (#17843) 2023-08-11 11:34:43 -07:00
Klaus Post
96a22bfcbb fix: wrapped io.EOF during ListObjects() (#17842)
When listing getObjectFileInfo can return `io.EOF` if file is being written.

When we wrap the error it will *not* retry upstream, since `io.EOF` is a valid return value.

Allow one retry before returning errors and canceling the listing.
2023-08-11 09:47:16 -07:00
Harshavardhana
6c59b33fb1 add community contribution credits and update PR template (#17840) 2023-08-10 22:08:38 -07:00
Poorna
dfaf735073 replication: fix queuing of large uploads (#17831)
Fixes regression from #17687
2023-08-10 15:48:42 -07:00
Harshavardhana
0d2b7bf94d upgrade console dependency to v0.36.0 (#17839) 2023-08-10 15:48:11 -07:00
Anis Eleuch
7fcfde7f07 s3: Pick a pool with >85% if all other pools are in suspended state (#17826) 2023-08-10 11:06:31 -07:00
jiuker
b1391d1991 feat: support perf client to show TX from client to server (#17718) 2023-08-10 07:14:46 -07:00
Harshavardhana
49c8e16410 update CI/CD to go1.21 (#17828) 2023-08-10 07:13:58 -07:00
Minio Trusted
0e93681589 Update yaml files to latest version RELEASE.2023-08-09T23-30-22Z 2023-08-10 00:09:18 +00:00
Harshavardhana
eb55034dfe optimize deletePrefix, use direct set location via object name (#17827)
* optimize deletePrefix, use direct set location via object name

instead of fanning out the calls for an object force delete
we can assume the set location and not do fan-out calls

* Apply suggestions from code review

Co-authored-by: Krishnan Parthasarathi <krisis@users.noreply.github.com>

---------

Co-authored-by: Krishnan Parthasarathi <krisis@users.noreply.github.com>
2023-08-09 16:30:22 -07:00
Harshavardhana
c45bc32d98 skip disks under scanning when healing disks (#17822)
Bonus:

- avoid calling DiskInfo() calls when missing blocks
  instead heal the object using MRF operation.

- change the max_sleep to 250ms beyond that we will
  not stop healing.
2023-08-09 12:51:47 -07:00
Harshavardhana
6e860b6dc5 count all versions as part of DeleteAllVersionsAction (#17821) 2023-08-09 08:55:19 -07:00
Harshavardhana
b732a673dc reduce logging in bucket replication in retry scenarios (#17820) 2023-08-08 13:27:40 -07:00
Shubhendu
b6b6d6e8d8 Removed replication dashboard (#17815)
As all replication metrics are moved at bucket level, all replication
graphs as well are added under minio-bucket.json. Removing the independent
replication dashboard.

Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2023-08-08 08:13:45 -07:00
Yang Wu
23e4895dfc Create metrics slice when necessary (#17809) 2023-08-07 02:21:22 -07:00
Harshavardhana
8666c55ca6 fix: do not use PrefixEnabled() logic to ignore valid objects (#17677)
ignoring valid objects with valid replication metadata
after the Prefix was disabled must still honor the older
metadata.

this can lead to unexpected results, allow it during
READ phase always.
2023-08-05 13:56:01 -07:00
Anis Eleuch
a3f00c5d5e batch: Strict unmarshal yaml document to avoid user made typos (#17808)
// UnmarshalStrict is like Unmarshal except that any fields that are found
// in the data that do not have corresponding struct members, or mapping
// keys that are duplicates, will result in
// an error.
2023-08-05 13:51:48 -07:00
Poorna
26c23b30f4 replication: set context timeout for NewMultipartUpload calls (#17807) 2023-08-05 12:27:07 -07:00
Anis Eleuch
a436fd513b track client disconnections properly for all ListObjects calls (#17804)
Currently ListObjects* calls were returning 200 OK for timed-out clients,
this makes debugging via `mc admin trace` very hard.
2023-08-04 15:57:27 -07:00
Minio Trusted
3bc34ffd94 Update yaml files to latest version RELEASE.2023-08-04T17-40-21Z 2023-08-04 19:12:46 +00:00
Harshavardhana
533cd8d6df fix: batch replication pull must preserve versionID (#17805)
batch replication pull must preserve versionID regardless
of destination bucket versioning configuration.

This is similar to the issue with decommissioning and rebalancing
2023-08-04 12:09:10 -07:00
Harshavardhana
cb089dcb52 error out by default beyond 10000 versions per object (#17803)
```
You've exceeded the limit on the number of versions you can create on this object
```
2023-08-04 10:40:21 -07:00
Harshavardhana
e0329cfdbb update console v0.35.1 2023-08-03 20:25:21 -07:00
Harshavardhana
239ccc9c40 fix: crash in globalTierJournal when TierConfig is not initialized (#17791) 2023-08-03 14:16:15 -07:00
Poorna
b762fbaf21 sts: validate if iam subsystem initialized in handlers (#17796) 2023-08-03 13:24:25 -07:00
Praveen raj Mani
0285df5a02 fix: prioritize audit_webhook and logger_webhook ENVs over the config KVS (#17783) 2023-08-03 02:47:07 -07:00
Harshavardhana
45fb375c41 allow healing to prefer local disks over remote (#17788) 2023-08-03 02:18:18 -07:00
Harshavardhana
4a4950fe41 fix: honor requested allow origin settings properly (#17789)
fixes #17778
2023-08-02 20:41:21 -07:00
Anis Eleuch
1664fd8bb1 Avoid logging errors twice during transitioned objects expiration (#17782) 2023-08-02 09:06:03 -07:00
Harshavardhana
21cdd2bf5d avoid overwriting metrics on success, save it in defer (#17780) 2023-08-01 22:19:56 -07:00
Harshavardhana
0153f96a20 add deadlines for readMetadata() in listing (#17776)
Bonus: also skip spending time looking for xl.json

- Listing()
- Delete()
2023-08-01 21:52:31 -07:00
Harshavardhana
a7a7533190 add new errors for Disks with timeouts (#17770) 2023-08-01 12:47:50 -07:00
Poorna
311380f8cb replication resync: fix queueing (#17775)
Assign resync of all versions of object to the same worker to avoid locking
contention. Fixes parallel resync implementation in #16707
2023-08-01 11:51:15 -07:00
Harshavardhana
b0f0e53bba fix: make sure to correctly initialize health checks (#17765)
health checks were missing for drives replaced since

- HealFormat() would replace the drives without a health check
- disconnected drives when they reconnect via connectEndpoint()
  the loop also loses health checks for local disks and merges
  these into a single code.
- other than this separate cleanUp, health check variables to avoid
  overloading them with similar requirements.
- also ensure that we compete via context selector for disk monitoring
  such that the canceled disks don't linger around longer waiting for
  the ticker to trigger.
- allow disabling active monitoring.
2023-08-01 10:54:26 -07:00
Klaus Post
004f1e2f66 Fix trailing header signature mismatch (#17774)
Seems like clients may omit a newline at the end of the trailer chunk. Each header should end with a newline. Add that if missing.

Fixes #17662
2023-08-01 08:45:57 -07:00
Harshavardhana
2fa561f22e do not crash on invalid metric values (#17764)
```
minio[1032735]: panic: label value "\xc0.\xc0." is not valid UTF-8
minio[1032735]: goroutine 1781101 [running]:
minio[1032735]: github.com/prometheus/client_golang/prometheus.MustNewConstMetric(...)
```

log such errors for investigation
2023-08-01 00:55:39 -07:00
Harshavardhana
81be718674 fix: optimize DiskInfo() call avoid metrics when not needed (#17763) 2023-07-31 15:20:48 -07:00
Rajat Shinde
8162fd1e20 fix: couple of typos in README.md (#17754) 2023-07-31 11:17:13 -07:00
Sho Ce
49a1e2f98e update-notifier.go: misleading version age message (#17750) 2023-07-31 08:36:19 -07:00
Klaus Post
684c46369c Send events for extracted objects (#17760)
Fixes #17759
2023-07-31 08:33:51 -07:00
Martin Zruban
715c9e3ca9 fix: allow mc executable inside minio containers (#17755) 2023-07-31 00:13:41 -07:00
Harshavardhana
73edd5b8fd introduce 'mc admin config set alias/ api odirect=on' (#17753)
change disable_odirect=off -> odirect=on to make it
easier to understand, instead of making it double
negative.
2023-07-31 00:12:53 -07:00
Harshavardhana
5e5bdf5432 capture total errors data availability and any timeout errors (#17748) 2023-07-29 23:26:26 -07:00
Harshavardhana
48a3e9bc82 update NTP package to fix ipv6 bug (#17752) 2023-07-29 17:43:50 -07:00
Harshavardhana
f13cfcb83e allow disabling O_DIRECT for write ops (#17751)
on really slow systems, O_DIRECT simply kills the drives
allow for a way to disable them.
2023-07-29 15:17:56 -07:00
Anis Eleuch
9c0e8cd15b logger: Avoid slow calls in http logger Send() function (#17747)
Send() is synchronous and can affect the latency of S3 requests when the
logger buffer is full.

Avoid checking if the HTTP target is online or not and increase the
workers anyway since the buffer is already full.

Also, avoid logs flooding when the audit target is down.
2023-07-29 12:49:18 -07:00
Harshavardhana
ad2a70ba06 update console v0.34.0
Signed-off-by: Harshavardhana <harsha@minio.io>
2023-07-28 17:24:05 -07:00
Harshavardhana
731e03fe5a add ReadFileStream deadline for disk call (#17745)
timeout the reader side if hung via disk max timeout
2023-07-28 15:37:53 -07:00
jiuker
f9d029c8fa fix: prevCertificate save not correct in loop (#17744)
fix certs save not correct

Co-authored-by: guozhi.li <guozhi.li@daocloud.io>
2023-07-28 12:57:49 -07:00
Anis Eleuch
7057d00a28 s3: Return invalid bucket name the first thing in all S3 calls (#17742) 2023-07-28 10:49:20 -07:00
Harshavardhana
114fab4c70 export cluster health as prometheus metrics (#17741) 2023-07-28 01:16:53 -07:00
Harshavardhana
c2edbfae55 update all deps and add credits (#17740) 2023-07-27 12:43:25 -07:00
ruspaul013
a92cb66468 Get the signed headers in the order they were signed (#17690)
use pSignValues to get signed headers in order
2023-07-27 11:45:30 -07:00
ruspaul013
535f97ba61 check if metadata headers/url values are equal with signed headers (#17737) 2023-07-27 11:44:56 -07:00
drivebyer
14ebd82dbd fix: missing disk metrics when query metric api from peer (#17738) 2023-07-27 11:44:13 -07:00
Klaus Post
aea7b08a47 Update nats server dependency (#17734)
I don't see why we shouldn't.

Fixes #17730
2023-07-27 07:35:52 -07:00
Harshavardhana
47dcfcbdd4 introduce deadlines on READ operations (#17724) 2023-07-27 07:33:05 -07:00
Krishnan Parthasarathi
bf3901342c Include SuccessorModTime for FileInfo quorum (#17732) 2023-07-26 17:04:16 -07:00
Shubhendu
e1731d9403 Added bucket specific grafana dashboard (#17727)
Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2023-07-26 15:10:11 -07:00
Harshavardhana
b28bcad11b avoid Access() calls on known bucket paths (#17719) 2023-07-26 11:31:40 -07:00
Harshavardhana
a7c71e4c6b protect disk monitoring to avoid busy loop configuration (#17723) 2023-07-25 20:02:22 -07:00
Poorna
1a42693d68 replication: limit larger uploads to a subset of workers (#17687)
Limit large uploads (> 128MiB)  to a max of 10 workers, intent is to avoid
larger uploads from using all replication bandwidth, giving room for smaller
uploads to sync faster.
2023-07-25 20:02:02 -07:00
Harshavardhana
e7b60c4d65 Add slow drive timeouts to match with active disk monitoring (#17701)
allow active disk-monitoring to be configurable, and use
these add deadlines in various call layers for various
syscalls.
2023-07-25 16:58:31 -07:00
Poorna
f95129894d Use decrypted object size while computing object size summary (#17717)
Corrects an issue with encrypted versioned objects being reported under
`unversioned` bin in the object version histogram
2023-07-24 17:13:25 -07:00
Harshavardhana
c32c71c836 allow DNS cache TTL to be configurable (#17709)
this is added for now as a hidden variable
2023-07-24 15:13:35 -07:00
Harshavardhana
14e1ace552 remove serializing WalkDir() across all buckets/prefixes on SSDs (#17707)
slower drives get knocked off because they are too slow via 
active monitoring, we do not need to block calls arbitrarily.

Serializing adds latencies for already slow calls, remove
it for SSDs/NVMEs

Also, add a selection with context when writing to `out <-`
channel, to avoid any potential blocks.
2023-07-24 09:30:19 -07:00
drivebyer
a7fb3a3853 fix: Create metrics slice when necessary in getCacheMetrics() (#17711) 2023-07-24 08:40:21 -07:00
Klaus Post
2da4bd5f1a Revert "don't error when asked for 0-based range on empty objects (#17708) (#17713)
Revert "don't error when asked for 0-based range on empty objects (#17708)"

This reverts commit 7e76d66184.

There is no valid way to specify offsets in a 0-byte file. Blame it on the [RFC](https://datatracker.ietf.org/doc/html/rfc7233#section-4.4)

> The 416 (Range Not Satisfiable) status code indicates that none of the ranges in the 
> request's Range header field (Section 3.1) overlap the current extent of the selected resource...

A request for "bytes=0-" is a request for the first byte of a resource. If the resource is 0-length, 
the range [0,0] does not overlap the resource content and the server responds with an error.
2023-07-24 07:56:28 -07:00
flisk
7e76d66184 don't error when asked for 0-based range on empty objects (#17708)
In a reverse proxying setup, a proxy in front of MinIO may attempt to
request objects in slices for enhanced cache efficiency. Since such a
a proxy cannot have prior knowledge of how large a requested resource is,
it usually sends a header of the form:

        Range: 0-$slice_size

... and, depending on the size of the resource, expects either:

- an empty response, if $resource_size == 0
- a full response, if $resource_size <= $slice_size
- a partial response, if $resource_size > $slice_size

Prior to this change, MinIO would respond 416 Range Not Satisfiable if a
client tried to request a range on an empty resource. This behavior is
technically consistent with RFC9110[1] – However, it renders sliced
reverse proxying, such as implemented in Nginx, broken in the case of
empty files. Nginx itself seems to break this convention to enable
"useful" responses in these cases, and MinIO should probably do that
too.

[1]: https://www.rfc-editor.org/rfc/rfc9110#byte.ranges
2023-07-23 00:10:03 -07:00
Harshavardhana
7764f4a8e3 return tags as part of Head/Get calls (#17635)
AWS S3 only returns the number of tag
counts, along with that we must return
the tags as well to avoid another metadata
call to the server.
2023-07-22 07:19:43 -07:00
Harshavardhana
e1094dde08 update MinIO replication dashboard with latest metrics 2023-07-21 17:30:04 -07:00
Minio Trusted
4894c67196 Update yaml files to latest version RELEASE.2023-07-21T21-12-44Z 2023-07-21 22:28:35 +00:00
Doom And Love
d004c45386 grafana-dashboard: Update scrape_jobs variable to be single select (#17696)
Set `includeAll` and `mult` to be false since this dashboard only works with a single value being selected
2023-07-21 14:12:44 -07:00
Kaan Kabalak
6624f970c0 Fix spelling of 'already' across repository (#17703) 2023-07-21 08:45:08 -07:00
Harshavardhana
de684dc122 update console release v0.33.0
Signed-off-by: Harshavardhana <harsha@minio.io>
2023-07-21 01:20:13 -07:00
Harshavardhana
331bdc2245 fix: remove CompleteMultipartUpload() 200 OK response for blocking calls (#17699)
sending whitespace character with CompleteMultipartUpload()
with 200 OK was an AWS S3 compatible implementation detail,
and it was expected that the client SDK must look for both
successful XML as well as error XML for 200 OK.

But this is not useful anymore on MinIO, since we do not
have any large delayed coalescing of parts anymore.
2023-07-20 22:14:38 -07:00
Harshavardhana
e12ab486a2 avoid using os.Getenv for internal code, use env.Get() instead (#17688) 2023-07-20 07:52:49 -07:00
Krishnan Parthasarathi
9eeee92d36 Add deletemarker_total metric (#17689) 2023-07-20 07:52:32 -07:00
Anis Eleuch
756d6aa729 fix: report correct pool/set/disk indexes for offline disks (#17695) 2023-07-20 07:48:21 -07:00
Harshavardhana
bddd53d6d2 fix: retry listing in decommissioning if it fails perpetually (#17682) 2023-07-19 13:09:37 -07:00
Harshavardhana
c0a5bdaed9 update grafana dashboard JSON with the new metrics (#17683) 2023-07-19 08:16:04 -07:00
jiuker
a99cd825ab fix: byHost realTime metrics API (#17681) 2023-07-18 23:50:30 -07:00
Harshavardhana
6426b74770 move bucket centric metrics to /minio/v2/metrics/bucket handlers (#17663)
users/customers do not have a reasonable number of buckets anymore,
this is why we must avoid overpopulating cluster endpoints, instead
move the bucket monitoring to a separate endpoint.

some of it's a breaking change here for a couple of metrics, but
it is imperative that we do it to improve the responsiveness of
our Prometheus cluster endpoint.

Bonus: Added new cluster metrics for usage, objects and histograms
2023-07-18 22:25:12 -07:00
Harshavardhana
4f257bf1e6 pick internode interface properly via globalLocalNodeName (#17680)
current code will not pick the right interface name
if --address or --interface is not provided.
2023-07-18 19:18:11 -07:00
Minio Trusted
73a056999c Update yaml files to latest version RELEASE.2023-07-18T17-49-40Z 2023-07-18 21:44:57 +00:00
Krishnan Parthasarathi
0120ff93bc admin-info: add DeleteMarkers count (#17659) 2023-07-18 10:49:40 -07:00
Anis Eleuch
49638fa533 s3: Delete Bucket should not recreate bucket if it does not exist (#17676)
Also return Bucket Not Found error in the same use case.
2023-07-18 09:32:19 -07:00
Harshavardhana
76510dac8a upgrade all deps to their latest releases (#17671) 2023-07-17 21:12:48 -07:00
Shubhendu
7a3a7b19e5 Added a start script to inspect command output (#17591)
Using this script, post decrypt we should be able to bring up the
MinIO instance with same configuration.

Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2023-07-17 14:15:28 -07:00
Harshavardhana
24e86d0c59 avoid passing around poolIdx, setIdx instead pass the relevant disks (#17660) 2023-07-17 09:52:05 -07:00
drivebyer
9b5c2c386a fix: return error when requested interface has no stats available (#17666) 2023-07-17 01:14:01 -07:00
jiuker
d118031ed6 fix: when Origin: null is set return back '*' for allow origins (#17651) 2023-07-15 12:15:06 -07:00
Anis Eleuch
341a89c00d return a descriptive error when loading any IAM item fails (#17654)
Sometimes IAM fails to load certain items, which could be a user, 
a service account or a policy but with not enough information for 
us to debug.

This commit will create a more descriptive error to make it easier to
debug in such situations.
2023-07-14 20:17:14 -07:00
Anis Eleuch
df29d25e6b return different status code for internode communication (#17655)
mc admin trace -a will be able to quickly show
401 Unauthorized header to pinpoint trivial issues
between nodes, such as wrong root 
credentials and skewed time.
2023-07-14 18:34:55 -07:00
Harshavardhana
3e196fa7b3 fix: ILM newer noncurrent version limit must return correct versions (#17652)
objects/versions that are not expired via NewerNoncurrentVersions
must be properly returned to be applied under further ILM actions.

this would cause legitimately expired objects to be missed
from expiration.
2023-07-14 16:42:35 -07:00
drivebyer
04c792476f fix: provide a possible slice cap for heal failed metrics items (#17647)
Signed-off-by: Wu <yang.wu@daocloud.io>
2023-07-14 11:02:45 -07:00
Harshavardhana
005a4a275a add more bootstrap messages to provide latency (#17650)
- simplify refreshing bucket metadata, wait() to
  depend on how fast the bucket metadata can load.

- simplify resync to start resync in single pass.
2023-07-14 04:00:29 -07:00
Harshavardhana
bdddf597f6 shuffle buckets randomly before being scanned (#17644)
this randomness is needed to avoid scanning
the same buckets across different erasure sets,
in the same order.

allow random buckets to be scanned instead
allowing a wider spread of ILM, replication
checks.

Additionally do not loop over twice to fill
the channel, fill the channel regardless of
having bucket new or old.
2023-07-14 02:25:40 -07:00
Aditya Manthramurthy
bb6921bf9c Send AuditLog via new middleware fn for admin APIs (#17632)
A new middleware function is added for admin handlers, including options
for modifying certain behaviors. This admin middleware:

- sets the handler context via reflection in the request and sends AuditLog
- checks for object API availability (skipping it if a flag is passed)
- enables gzip compression (skipping it if a flag is passed)
- enables header tracing (adding body tracing if a flag is passed)

While the new function is a middleware, due to the flags used for
conditional behavior modification, which is used in each route registration
call.

To try to ensure that no regressions are introduced, the following
changes were done mechanically mostly with `sed` and regexp:

- Remove defer logger.AuditLog in admin handlers
- Replace newContext() calls with r.Context()
- Update admin routes registration calls

Bonus: remove unused NetSpeedtestHandler

Since the new adminMiddleware function checks for object layer presence
by default, we need to pass the `noObjLayerFlag` explicitly to admin
handlers that should work even when it is not available. The following
admin handlers do not require it:

- ServerInfoHandler
- StartProfilingHandler
- DownloadProfilingHandler
- ProfileHandler
- SiteReplicationDevNull
- SiteReplicationNetPerf
- TraceHandler

For these handlers adminMiddleware does not check for the object layer
presence (disabled by passing the `noObjLayerFlag`), and for all other
handlers, the pre-check ensures that the handler is not called when the
object layer is not available - the client would get a
ErrServerNotInitialized and can retry later.

This `noObjLayerFlag` is added based on existing behavior for these
handlers only.
2023-07-13 14:52:21 -07:00
Shireesh Anjal
bb63375f1b Do not consider subnet api key as secret (#17643)
As it is required by mc and console to communicate with subnet
2023-07-13 12:24:47 -07:00
Klaus Post
4f89e5bba9 Add active disk health checks (#17539)
Add check every 2 minutes to see if a write+read operation can complete.

If disk is unresponsive for 2 minutes or returns errFaultyDisk, take it offline.
2023-07-13 11:41:55 -07:00
jiuker
183428db03 fear: Implement 'mc support top net' (#17598) 2023-07-13 11:41:19 -07:00
Shireesh Anjal
fc6d873758 Use os.ReadFile instead of ioutil.ReadFile (#17649)
ioutil.ReadFile is deprecated and also doesn't work with certain kinds
of symlinks.
2023-07-13 09:07:10 -07:00
Poorna
5e2f8d7a42 replication: Simplify mrf requeueing and add backlog handler (#17171)
Simplify MRF queueing and add backlog handler

- Limit re-tries to 3 to avoid repeated re-queueing. Fall offs
to be re-tried when the scanner revisits this object or upon access.

- Change MRF to have each node process only its MRF entries.

- Collect MRF backlog by the node to allow for current backlog visibility
2023-07-12 23:51:33 -07:00
Shubhendu
9b9871cfbb Added endpoint and versions attributes to KMS details (#17350)
Now it would list details of all KMS instances with additional
attributes `endpoint` and `version`. In the case of k8s-based
deployment the list would consist of a single entry.

Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2023-07-12 23:50:38 -07:00
guangwu
f80b6926d3 chore: fix minor issues reported via staticcheck (#17639) 2023-07-12 20:33:11 -07:00
Shubhendu
6dc55fe5ed Corrected the API name for audit logging purpose (#17642)
This would better to record the correct API name so that
any verification around audit logs to figure out if required
APIs are called required no of times, would be correct.
Here in this case of policy attached, API `AttachDetachPolicyBuiltin`
would be called with `requestPath` as `/minio/admin/v3/idp/builtin/policy/attach`
and in case of detach policy the value would be `/minio/admin/v3/idp/builtin/policy/detach`

Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2023-07-12 15:38:49 -07:00
Harshavardhana
2d1cda2061 fix: do not os.Exit(1) while writing goroutines during shutdown (#17640)
Also shutdown poll add jitter, to verify if the shutdown
sequence can finish before 500ms, this reduces the overall
time taken during "restart" of the service.

Provides speedup for `mc admin service restart` during
active I/O, also ensures that systemd doesn't treat the
returned 'error' as a failure, certain configurations in
systemd can cause it to 'auto-restart' the process by-itself
which can interfere with `mc admin service restart`.

It can be observed how now restarting the service is
much snappier.
2023-07-12 07:18:30 -07:00
Harshavardhana
a566bcf613 treat 0-byte objects to honor same quorum as delete marker (#17633)
on unversioned buckets its possible that 0-byte objects
might lose quorum on flaky systems, allow them to be same
as DELETE markers. Since practically speak they have no
content.
2023-07-11 21:53:49 -07:00
Minio Trusted
f6040dffaf Update yaml files to latest version RELEASE.2023-07-11T21-29-34Z 2023-07-11 23:25:30 +00:00
Klaus Post
9885a0a6af Fix hasSpaceFor in SNSD setup (#17630)
If drive is offline or filled we divide by 0.

Fixes #17629

Bonus: Reject when any valid disk exceeds minimum inode threshold.
2023-07-11 14:29:34 -07:00
Kaan Kabalak
f64d62b01d Fix style of logOnceIf calls w/unique identifiers (#17631) 2023-07-11 13:17:45 -07:00
Harshavardhana
82075e8e3a use strconv variants to improve on performance per 'op' (#17626)
```
BenchmarkItoa
BenchmarkItoa-8         	673628088	         1.946 ns/op	       0 B/op	       0 allocs/op
BenchmarkFormatInt
BenchmarkFormatInt-8    	592919769	         2.012 ns/op	       0 B/op	       0 allocs/op
BenchmarkSprint
BenchmarkSprint-8       	26149144	        49.06 ns/op	       2 B/op	       1 allocs/op
BenchmarkSprintBool
BenchmarkSprintBool-8   	26440180	        45.92 ns/op	       4 B/op	       1 allocs/op
BenchmarkFormatBool
BenchmarkFormatBool-8   	1000000000	         0.2558 ns/op	       0 B/op	       0 allocs/op
```
2023-07-11 07:46:58 -07:00
Harshavardhana
5b7c83341b move per bucket metrics to peer location (#17627) 2023-07-11 07:46:24 -07:00
Harshavardhana
8522905d97 update helm linting workflow 2023-07-10 20:11:51 -07:00
Harshavardhana
524ed7ccd0 update go.mod pointing to wrong repo 2023-07-10 20:10:39 -07:00
Poorna
fb49aead9b replication: add validation API (#17520)
To check if replication is set up properly on a bucket.
2023-07-10 20:09:20 -07:00
Aditya Manthramurthy
85f5700e4e fix: missing audit logger call for some admin APIs (#17623) 2023-07-10 16:59:44 -07:00
Aditya Manthramurthy
43b3c093ef Fix: set request id in trace context properly (#17622) 2023-07-10 15:40:44 -07:00
Kaan Kabalak
bd6842d917 Further print log messages once per error (#17618) 2023-07-10 07:59:57 -07:00
Poorna
e8c98c3246 Avoid extra GetObjectInfo call in DeleteObject API (#17599)
Optimize DeleteObject API to avoid extra 
GetObjectInfo call on the replicating side.

For receiving side, it is just a regular
DeleteObject call.

Bonus: Fix a corner case where version purged is 
absent on target (either due to replication not yet
complete or target version already deleted in a
one-way replication or when replication was disabled). 

In such cases, mark version purge complete.
2023-07-10 07:57:56 -07:00
Harshavardhana
dfd7cca0d2 fix: allow cancel of decom only when its in progress (#17607) 2023-07-10 07:55:38 -07:00
Harshavardhana
af3d99e35f helm release v5.0.13
Signed-off-by: Harshavardhana <harsha@minio.io>
2023-07-09 00:13:05 -07:00
Harshavardhana
f6186965c3 honor DeleteAllVersions in list(), head() calls (#17604) 2023-07-08 15:42:10 -07:00
Ian Martin
90c2129f44 Add helm chart linting to CI workflow (#17606) 2023-07-08 15:41:12 -07:00
yaohwu
69e131ee69 Fix helm templating syntax in post-job (#17605) 2023-07-08 12:18:31 -07:00
Harshavardhana
28a01f0320 update missing license header in files (#17603) 2023-07-08 10:42:05 -07:00
Anis Eleuch
6d0bc5ab1e prometheus: Fix internode stats (#17594)
Internode calculation was done inside S3 handlers, fix it by moving it
to internode handlers.

Remove admin stats since it is not used.
2023-07-08 07:35:11 -07:00
Aditya Manthramurthy
7af78af1f0 fix: set request ID in tracing context key (#17602)
Since `addCustomerHeaders` middleware was after the `httpTracer`
middleware, the request ID was not set in the http tracing context. By
reordering these middleware functions, the request ID header becomes
available. We also avoid setting the tracing context key again in
`newContext`.

Bonus: All middleware functions are renamed with a "Middleware" suffix
to avoid confusion with http Handler functions.
2023-07-08 07:31:42 -07:00
Klaus Post
45a717a142 Avoid per request URL parsing (#17593)
Every request does a `url.Parse(c.url.String())` to clone a URL.

The host will also be static, so we rewrite that on creation.
2023-07-07 22:07:30 -07:00
Ian Martin
cb1ec0a0d9 fix: helm templating syntax in post-job (#17600) 2023-07-07 22:06:40 -07:00
Harshavardhana
abb1f22057 Revert "change ttfb_distribution metrics to histogramMetric (#17115)"
This reverts commit 9112ca4e29.
2023-07-07 13:57:37 -07:00
Harshavardhana
73efe436a5 update helm v5.0.12
Signed-off-by: Harshavardhana <harsha@minio.io>
2023-07-07 09:44:16 -07:00
Harshavardhana
f41edb23e2 add variadic delays in peer notification retries (#17592)
just adds more `jitter` in our retries to avoid
burst flooding for peer calls.
2023-07-07 07:47:38 -07:00
Minio Trusted
6335a48a53 Update yaml files to latest version RELEASE.2023-07-07T07-13-57Z 2023-07-07 07:53:18 +00:00
Klaus Post
e20aab25ec Check for progress before we reach the limit (#17552) 2023-07-07 00:13:57 -07:00
Anis Eleuch
66bea3942a CI/CD to stop one node per pool in the two pools mint test (#17518)
This is to make sure that all S3 ops work when there is enough quorum
2023-07-07 00:10:13 -07:00
Harshavardhana
08acd9c43d update all our deps (#17590) 2023-07-06 21:47:46 -07:00
Klaus Post
ff5988f4e0 Reduce allocations (#17584)
* Reduce allocations

* Add stringsHasPrefixFold which can compare string prefixes, while ignoring case and not allocating.
* Reuse all msgp.Readers
* Reuse metadata buffers when not reading data.

* Make type safe. Make buffer 4K instead of 8.

* Unslice
2023-07-06 16:02:08 -07:00
Harshavardhana
1bf23374a3 do not need to gzip 'mc' in our container (#17586)
fixes #17581
2023-07-06 15:13:17 -07:00
Alex
899b429094 Update Console to v0.32.0 (#17587)
Signed-off-by: Benjamin Perez <benjamin@bexsoft.net>
2023-07-06 15:13:07 -07:00
jiuker
c47ff44f5e fix: disable site network test if site replication is disabled (#17579) 2023-07-06 09:19:14 -07:00
Harshavardhana
8af0773baf remove deprecated Content-Security-Policy (#17580)
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/block-all-mixed-content
2023-07-06 09:18:38 -07:00
Alex
37cbd114de Updated console to v0.31.0 (#17578) 2023-07-06 00:37:56 -07:00
jiuker
2dbb1cff4a feat: support perf site replication (#17477) 2023-07-05 22:28:26 -07:00
Klaus Post
6efcf9c982 Do lockless last minute latency metrics (#17576)
Collect metrics in one second and accumulate lockless before sending upstream.
2023-07-05 10:40:45 -07:00
Harshavardhana
0bc34952eb fix: under FanOut API avoid repeated md5sum calculation (#17572)
md5sum calculation has a high CPU overhead, avoid calculating
it repeatedly for similar fanOut calls.

To fix following CPU profiler result
```
(pprof) top10
Showing nodes accounting for 678.68s, 84.67% of 801.54s total
Dropped 1072 nodes (cum <= 4.01s)
Showing top 10 nodes out of 156
      flat  flat%   sum%        cum   cum%
   332.54s 41.49% 41.49%    332.54s 41.49%  runtime/internal/syscall.Syscall6
   228.39s 28.49% 69.98%    228.39s 28.49%  crypto/md5.block
    48.07s  6.00% 75.98%     48.07s  6.00%  runtime.memmove
    28.91s  3.61% 79.59%     28.91s  3.61%  github.com/minio/highwayhash.updateAVX2
     8.25s  1.03% 80.61%      8.25s  1.03%  runtime.futex
     8.25s  1.03% 81.64%     10.81s  1.35%  runtime.step
     6.99s  0.87% 82.52%     22.35s  2.79%  runtime.pcvalue
     6.67s  0.83% 83.35%     38.90s  4.85%  runtime.mallocgc
     5.77s  0.72% 84.07%     32.61s  4.07%  runtime.gentraceback
     4.84s   0.6% 84.67%     10.49s  1.31%  runtime.lock2
```
2023-07-05 03:16:05 -07:00
jiuker
f6b48ed02a fix: create user without policy that is required (#17554)
fixes #17492
2023-07-04 07:39:29 -07:00
Harshavardhana
e37c4efc6e fix: upon DNS refresh() failure use previous values (#17561)
DNS refresh() in-case of MinIO can safely re-use
the previous values on bare-metal setups, since
bare-metal arrangements do not change DNS in any 
manner commonly.

This PR simplifies that, we only ever need DNS caching
on bare-metal setups.

- On containerized setups do not enable DNS
  caching at all, as it may have adverse effects on
  the overall effectiveness of k8s DNS systems.

  k8s DNS systems are dynamic and expect applications
  to avoid managing DNS caching themselves, instead
  provide a cleaner container native caching
  implementations that must be used.

- update IsDocker() detection, including podman runtime

- move to minio/dnscache fork for a simpler package
2023-07-03 12:30:51 -07:00
Harshavardhana
22f5bc643c fix: honor older scanner settings only if newer has not changed (#17564) 2023-07-03 12:28:36 -07:00
Anis Eleuch
15fd5ce2fa fix: A typo in per pool make/delete bucket errs calculation (#17553) 2023-07-03 09:47:40 -07:00
Harshavardhana
7f782983ca fix: for FTP server driver allow implicit trust of TLS (#17541)
fixes #17535
2023-06-30 08:04:13 -07:00
Aditya Manthramurthy
9d628346eb fix: service account list for root user (#17547)
Fixes https://github.com/minio/minio/issues/17545
2023-06-30 08:02:12 -07:00
Aditya Manthramurthy
bde533a9c7 fix: OpenID config initialization (#17544)
This is due to a regression in the handling of the enable key in OpenID
configuration.
2023-06-29 23:38:26 -07:00
Minio Trusted
2fcb75d86d Update yaml files to latest version RELEASE.2023-06-29T05-12-28Z 2023-06-29 06:37:32 +00:00
Harshavardhana
aae6846413 feat: allow expiration of all versions via ILM Expiration action (#17521)
Following extension allows users to specify immediate purge of
all versions as soon as the latest version of this object has
expired.

```
<LifecycleConfiguration>
    <Rule>
        <ID>ClassADocRule</ID>
        <Filter>
           <Prefix>classA/</Prefix>
        </Filter>
        <Status>Enabled</Status>
        <Expiration>
             <Days>3650</Days>
	     <ExpiredObjectAllVersions>true</ExpiredObjectAllVersions>
        </Expiration>
    </Rule>
    ...
```
2023-06-28 22:12:28 -07:00
Harshavardhana
5317a0b755 fix: support LDAP settings properly in ftp/sftp (#17536)
Bonus this PR enhances and supports creating
buckets via ftp `mkdir`

fixes #17526
2023-06-28 13:15:21 -07:00
Harshavardhana
73de721a63 fix: handle copyObjectPart encryption properly (#17530)
- look for requested encryption while compressing
  not just via HTTP Headers, but also via multipart
  metadata

- look for SSE-S3 etag decryption not just via HTTP
  Headers, but also via multipart metadata

fixes #17519
2023-06-28 09:43:50 -07:00
Harshavardhana
d2f5c3621f fix: add additional decommission traces for ILM expired content (#17522)
current decommission traces were missing for

- Skipped ILM expired versions
- Skipped single DELETE marked version
- A success or failure in decommissioning DELETE marker
- allow additional info to be shared in DecomStatus() API
2023-06-27 11:59:40 -07:00
Harshavardhana
1818764840 fix: bug in passing Versioned field set for getHealReplicationInfo() (#17498)
Bonus: rejects prefix deletes on object-locked buckets earlier
2023-06-27 09:45:50 -07:00
Harshavardhana
d3e5e607a7 allow site-replication checks to work on non-distributed setups (#17524)
fixes #17523
2023-06-27 09:23:50 -07:00
Shireesh Anjal
c1943ea3af Capture realtime metrics in health report (#17516) 2023-06-27 01:39:18 -07:00
Harshavardhana
2a82c15bf1 update all our deps (#17497) 2023-06-26 15:36:56 -07:00
guangwu
87b6fb37d6 chore: pkg imported more than once (#17444) 2023-06-26 09:21:29 -07:00
Kaan Kabalak
21fbe88e1f Print certain log messages once per error (#17484) 2023-06-24 20:29:13 -07:00
Harshavardhana
1f8b9b4bd5 fix: do not listAndHeal() inline with PutObject() (#17499)
there is a possibility that slow drives can actually add latency
to the overall call, leading to a large spike in latency.

this can happen if there are other parallel listObjects()
calls to the same drive, in-turn causing each other to sort
of serialize.

this potentially improves performance and makes PutObject()
also non-blocking.
2023-06-24 19:31:04 -07:00
Minio Trusted
fcbed41cc3 Update yaml files to latest version RELEASE.2023-06-23T20-26-00Z 2023-06-24 07:25:49 +00:00
Klaus Post
216069d0da Remove 'null' version ID from directory object response (#17495)
Fixes #17494

Regression from #17132
2023-06-23 13:26:00 -07:00
Harshavardhana
eefa047974 fix: keep decommission in a go-routine (#17496)
This was removed by mistake in #17491
2023-06-23 12:29:32 -07:00
Anis Eleuch
d8dad5c9ea s3: Make/Delete buckets to use error quorum per pool (#17467) 2023-06-23 11:48:23 -07:00
Klaus Post
bf8a68879c fix: Time ILM Actions for scanner info (#17493)
ILM Actions were not timed fix it.
2023-06-23 07:48:36 -07:00
Aditya Manthramurthy
f3248a4b37 Redact all secrets from config viewing APIs (#17380)
This change adds a `Secret` property to `HelpKV` to identify secrets
like passwords and auth tokens that should not be revealed by the server
in its configuration fetching APIs. Configuration reporting APIs now do
not return secrets.
2023-06-23 07:45:27 -07:00
Harshavardhana
d315d012a4 decom: during multiple pool decom preserve current pool status (#17491)
removal of completed pools must retain pool status of other
pools in draining, to resume any remaining draining operations.
2023-06-23 07:44:18 -07:00
Harshavardhana
bd9bf3693f lambda: negative duration for presigned URL default to 1H (#17489)
fixes a bug where users created with Expiration as
timeSentinel is not rejected while generating the
presigned URL for lambda processing.
2023-06-23 00:17:24 -07:00
Klaus Post
15daa2e74a tooling: Add xlmeta --combine switch that will combine inline data (#17488)
Will combine or write partial data of each version found in the inspect data.

Example:

```
> xl-meta -export -combine inspect-data.1228fb52.zip

(... metadata json...)
}
Attempting to combine version "994f1113-da94-4be1-8551-9dbc54b204bc".
Read shard 1 Data shards 9 Parity 4 (994f1113-da94-4be1-8551-9dbc54b204bc/shard-01-of-13.data)
Read shard 2 Data shards 9 Parity 4 (994f1113-da94-4be1-8551-9dbc54b204bc/shard-02-of-13.data)
Read shard 3 Data shards 9 Parity 4 (994f1113-da94-4be1-8551-9dbc54b204bc/shard-03-of-13.data)
Read shard 4 Data shards 9 Parity 4 (994f1113-da94-4be1-8551-9dbc54b204bc/shard-04-of-13.data)
Read shard 6 Data shards 9 Parity 4 (994f1113-da94-4be1-8551-9dbc54b204bc/shard-06-of-13.data)
Read shard 7 Data shards 9 Parity 4 (994f1113-da94-4be1-8551-9dbc54b204bc/shard-07-of-13.data)
Read shard 8 Data shards 8 Parity 5 (994f1113-da94-4be1-8551-9dbc54b204bc/shard-08-of-13.data)
Read shard 9 Data shards 8 Parity 5 (994f1113-da94-4be1-8551-9dbc54b204bc/shard-09-of-13.data)
Read shard 10 Data shards 8 Parity 5 (994f1113-da94-4be1-8551-9dbc54b204bc/shard-10-of-13.data)
Read shard 11 Data shards 8 Parity 5 (994f1113-da94-4be1-8551-9dbc54b204bc/shard-11-of-13.data)
Read shard 13 Data shards 8 Parity 5 (994f1113-da94-4be1-8551-9dbc54b204bc/shard-13-of-13.data)
Attempting to reconstruct using parity sets:
* Setup: Data shards: 9 - Parity blocks: 6
Have 6 complete remapped data shards and 6 complete parity shards. Could NOT reconstruct: too few shards given
* Setup: Data shards: 8 - Parity blocks: 5
Have 5 complete remapped data shards and 5 complete parity shards. Could reconstruct completely
0 bytes missing. Truncating 0 from the end.
Wrote output to 994f1113-da94-4be1-8551-9dbc54b204bc.complete
```

So far only inline data, but no real reason that external data can't also be included with some handling of blocks.

Supports only unencrypted data.
2023-06-22 12:41:24 -07:00
Harshavardhana
74759b05a5 make sure to set relevant config entries correctly (#17485)
Bonus: also allow skipping keys properly.
2023-06-22 10:04:02 -07:00
Aditya Manthramurthy
82ce78a17c Fix locking in policy attach API (#17426)
For policy attach/detach API to work correctly the server should hold a
lock before reading existing policy mapping and until after writing the
updated policy mapping. This is fixed in this change.

A site replication bug, where LDAP policy attach/detach were not
correctly propagated is also fixed in this change.

Bonus: Additionally, the server responds with the actual (or net)
changes performed in the attach/detach API call. For e.g. if a user
already has policy A applied, and a call to attach policies A and B is
performed, the server will respond that B was attached successfully.
2023-06-21 22:44:50 -07:00
Harshavardhana
9af6c6ceef under rebalance look for expired versions v/s remaining versions (#17482)
A continuation of PR #17479 for rebalance behavior must
also match the decommission behavior.

Fixes bug where rebalance would ignore rebalancing object
versions after one of the version returned "ObjectNotFound"
2023-06-21 13:23:20 -07:00
Harshavardhana
021372cc4c update helm release v5.0.11
Signed-off-by: Harshavardhana <harsha@minio.io>
2023-06-21 12:29:09 -07:00
Praveen raj Mani
b94ab07c2f Honor global root CAs for kafka audit tls (#17481)
honor global root CAs for kafka audit tls
2023-06-21 10:50:40 -07:00
Harshavardhana
7605d07bb2 add support for bucket level request count per API (#17468)
New metrics added to calculate API request count
per bucket, per API.  Captures errors, including
4xx, 5xx HTTP status codes separately.
2023-06-21 09:41:59 -07:00
Harshavardhana
ccc5801112 always look for expired versions v/s remaining versions (#17479)
while decommissioning it can so happen that the non-current
versions are all expired but there is a DEL marker as the
latest version.

For such objects, we should not decommission them instead
calculate the remaining versions and if the remaining versions
is one and that version is a DEL marker consider such
an object not to be scheduled for decommissioning.
2023-06-21 08:49:28 -07:00
Praveen raj Mani
7c72b25ef0 Add an option to make bucket notifications synchronous (#17406)
With the current asynchronous behaviour in sending notification events
to the targets, we can't provide guaranteed delivery as the systems
might go for restarts.

For such event-driven use-cases, we can provide an option to enable
synchronous events where the APIs wait until the event is successfully
sent or persisted.

This commit adds 'MINIO_API_SYNC_EVENTS' env which when set to 'on'
will enable sending/persisting events to targets synchronously.
2023-06-20 17:38:59 -07:00
Harshavardhana
02c2ec3027 skip onlineDisks with parity mismatch (#17478) 2023-06-20 13:18:24 -07:00
Harshavardhana
65c31fab12 fix: do not crash rebalance code instead set the object layer (#17465)
fixes #17421
2023-06-20 09:28:23 -07:00
jiuker
b6b68be052 fix: replication check for duplicate endpoints detection with wrong route (#17474) 2023-06-20 09:27:54 -07:00
Harshavardhana
15911c85f6 safely ignore out of band deletions while decommissioning (#17473) 2023-06-20 08:31:42 -07:00
Aditya Manthramurthy
5a1612fe32 Bump up madmin-go and pkg deps (#17469) 2023-06-19 17:53:08 -07:00
Minio Trusted
bbb7ae156c Update yaml files to latest version RELEASE.2023-06-19T19-52-50Z 2023-06-19 22:53:03 +00:00
Harshavardhana
f9b8d1c699 fix: sio-error test to fail if commands fail (#17466) 2023-06-19 12:52:50 -07:00
Harshavardhana
1443b5927a allow quorum fileInfo to pick same parityBlocks (#17454)
Bonus: allow replication to proceed for 503 errors such as
with error code SlowDownRead
2023-06-18 18:20:15 -07:00
Anis Eleuch
35ef35b5c1 fix a integer divide by zero crash during rebalance (#17455)
A state is updated with a delete marker, which does not have parity or
data blocks defined, which can cause the integer divide by zero panics.

This commit fixes to avoid panics.
2023-06-18 11:14:53 -07:00
Harshavardhana
6806537eb3 event args list for fanOut notification must be sized same (#17450)
without this fan-out API can crash if client cancels
the on-going request.
2023-06-18 07:09:20 -07:00
Harshavardhana
64de61d15d fallback on etags if they match when mtime is not same (#17424)
on "unversioned" buckets there are situations
when successive concurrent I/O can lead to
an inconsistent state() with mtime while the
etag might be the same for the object on disk.

in such a scenario it is possible for us to
allow reading of the object since etag matches
and if etag matches we are guaranteed that we
have enough copies the object will be readable
and same.

This PR allows fallback in such scenarios.
2023-06-17 19:18:20 -07:00
Harshavardhana
22b7c8cd8a upgrade pkg and dperf to latest packages (#17448)
- dperf improvements in benchmarking read and write tests
- upgrade mimedb to use latest content-types
2023-06-17 07:31:36 -07:00
Poorna
c4d0c49a5f ensure metadata updates go to same pool where version exists (#17451)
This PR also returns the replication status in 
proxy calls and defers replication attempt if 
HEAD on object version returned a error different
from NoSuchKey
2023-06-17 07:30:53 -07:00
Minio Trusted
142a5b0dcd Update yaml files to latest version RELEASE.2023-06-16T02-41-06Z 2023-06-16 05:36:19 +00:00
mungo312
25db1e4eca helm: fix permission denied errors in post-job when running as non-root (#17175)
Fixes ##17174
2023-06-15 19:41:06 -07:00
Harshavardhana
47a48b6832 do not save any metadata from the headers in tar extract (#17436)
only preserve the same storage-class as incoming
request other than that rest of them must be
deduced.
2023-06-15 17:44:07 -07:00
Harshavardhana
e98309eb75 update minio-go/v7 v7.0.57 (#17439) 2023-06-15 16:29:19 -07:00
Alex
87051872a7 Update console to v0.30.0 (#17438) 2023-06-15 15:30:56 -07:00
Anis Eleuch
a2aed12dcd decom: Fix a typo in routing decommissioning requests (#17435)
A specific node should do the decommissioning task, however routing the
start decommissioning to that node was not working properly.

Co-authored-by: Anis Elleuch <anis@min.io>
2023-06-15 14:54:29 -07:00
Anis Eleuch
d8e6e76e89 site-repl: Better error msg when setting sync in a local cluster (#17407) 2023-06-15 12:44:22 -07:00
Anis Eleuch
8c33fdf5f4 s3-check-md5: Add --modified-since flag to skip some objects (#17410) 2023-06-15 12:44:09 -07:00
Harshavardhana
ad4e511026 do not save plain-text ETag when encryption is requested (#17427)
fixes an issue under bucket replication could cause
ETags for replicated SSE-S3 single part PUT objects,
to fail as we would attempt a decryption while listing,
or stat() operation.
2023-06-15 12:43:26 -07:00
Klaus Post
4a562d6732 fix: fanout error response - error must be string for marshaling (#17433)
Uses https://github.com/minio/minio-go/pull/1839
2023-06-15 09:21:53 -07:00
Poorna
a9082e4f79 site replication: cancel ongoing op properly (#17428) 2023-06-15 08:05:08 -07:00
dependabot[bot]
6278679ffd build(deps): bump github.com/lestrrat-go/jwx from 1.2.25 to 1.2.26 (#17425)
Bumps [github.com/lestrrat-go/jwx](https://github.com/lestrrat-go/jwx) from 1.2.25 to 1.2.26.
- [Release notes](https://github.com/lestrrat-go/jwx/releases)
- [Changelog](https://github.com/lestrrat-go/jwx/blob/v1.2.26/Changes)
- [Commits](https://github.com/lestrrat-go/jwx/compare/v1.2.25...v1.2.26)

---
updated-dependencies:
- dependency-name: github.com/lestrrat-go/jwx
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-06-14 12:17:22 -07:00
jiuker
0474791cf8 fix: set time format right (#17402) 2023-06-14 07:49:13 -07:00
Harshavardhana
69f819e199 move to pkg@v1.7.3 to support aws:kms string in policy conditions (#17414) 2023-06-14 07:42:03 -07:00
Harshavardhana
f32efd5429 more compliance related fixes (#17408)
- lifecycle must return InvalidArgument for rule errors
- do not return `null` versionId in HTTP header
- reject mixed SSE uploads with correct error message
2023-06-13 13:52:33 -07:00
jiuker
22c247a988 fix: preserve multiple values for query params (#17392) 2023-06-13 11:38:46 -07:00
Shubhendu
35d71682f6 fix: do not allow removal of inbuilt policies unless they are already persisted (#17264)
Dont allow removal of inbuilt policies such as `readwrite, readonly, writeonly and diagnostics`
2023-06-13 11:06:17 -07:00
drivebyer
3d6b88a60e fix: syscall to record time on non-linux (#17383) 2023-06-13 11:04:50 -07:00
Harshavardhana
26a0803388 various compliance related fixes (#17401)
- getObjectTagging to be allowed for anonymous policies
- return correct errors for invalid retention period
- return sorted list of tags for an object
- putObjectTagging must return 200 OK not 204 OK
- return 409 ErrObjectLockConfigurationNotAllowed for existing buckets
2023-06-12 13:22:07 -07:00
Anis Eleuch
ae95384dd8 Revert "heal: Update object parity with the latest configured SC (#17187)" (#17404) 2023-06-12 11:54:51 -07:00
Anis Eleuch
0f0dcf0c5e tar: Avoid storing snowball extraction header in extract objects (#17389) 2023-06-12 09:42:06 -07:00
Klaus Post
6f2406b0b6 fix: protect ReplicationStats against concurrent map iteration and write crash (#17403) 2023-06-12 09:17:11 -07:00
Anis Eleuch
bb24346e04 listen: Only error out if not able to bind any interface (#17353) 2023-06-12 09:09:28 -07:00
Harshavardhana
be45ffd8a4 return 204 status code for DeleteBucketTagging (#17400) 2023-06-11 20:49:02 -07:00
Poorna Krishnamoorthy
f986b0c493 replication: perform bucket resync in parallel (#16707)
Default number of parallel resync operations for a bucket to 10
to speed up resync.
2023-06-11 16:09:55 -07:00
Harshavardhana
c9e87f0548 service accounts are allowed to have no expiration (#17397) 2023-06-11 10:34:59 -07:00
Harshavardhana
43468f4d47 return InvalidRequest when no parts are provided (#17395) 2023-06-10 21:59:51 -07:00
Minio Trusted
91987d6f7a Update yaml files to latest version RELEASE.2023-06-09T07-32-12Z 2023-06-10 05:38:08 +00:00
Harshavardhana
6b7c98bd0f make sure we pick up the right Go version in vulncheck (#17388) 2023-06-09 00:32:12 -07:00
Harshavardhana
b829e80ecb do not disable root for invalid API config values (#17386) 2023-06-08 15:50:06 -07:00
Klaus Post
6e38d0f3ab Add more bootstrap info in debug mode (#17362) 2023-06-08 08:39:47 -07:00
Anis Eleuch
38342b1df5 decom: Parallelize decommissining (#17364) 2023-06-07 14:27:51 -07:00
Harshavardhana
dbd4c2425e fix: kafka broker pings must not be greater than 1sec (#17376) 2023-06-07 11:47:00 -07:00
Harshavardhana
49ce85ee3d allow prefix/markers to have '/' in the beginning to throw an empty (#17373) 2023-06-07 11:25:26 -07:00
Anis Eleuch
eba378e4a1 vrf: Fix testing for loopback coming from the address (#17372) 2023-06-07 09:53:05 -07:00
Harshavardhana
442c50ff00 remove delimiter if not set by client, also fetchOwner is optional (#17366) 2023-06-06 21:31:47 -07:00
Harshavardhana
d1448adbda use slices package and remove some helpers (#17342) 2023-06-06 10:12:52 -07:00
jiuker
5a21b1f353 fix: Delete dir failed when .DS_Store in it (#17352) 2023-06-06 10:12:06 -07:00
Bartosz Marczyński
123a2fb3a8 helm: add /tmp mount to post jobs (#17301) 2023-06-06 06:50:33 -07:00
Harshavardhana
2f9e2147f5 allow quota enforcement to rely on older values (#17351)
PUT calls cannot afford to have large latency build-ups due
to contentious usage.json, or worse letting them fail with
some unexpected error, this can happen when this file is
concurrently being updated via scanner or it is being
healed during a disk replacement heal.

However, these are fairly quick in theory, stressed clusters
can quickly show visible latency this can add up leading to
invalid errors returned during PUT.

It is perhaps okay for us to relax this error return requirement
instead, make sure that we log that we are proceeding to take in
the requests while the quota is using an older value for the quota
enforcement. These things will reconcile themselves eventually,
via scanner making sure to overwrite the usage.json.

Bonus: make sure that storage-rest-client sets ExpectTimeouts to
be 'true', such that DiskInfo() call with contextTimeout does
not prematurely disconnect the servers leading to a longer
healthCheck, back-off routine. This can easily pile up while also
causing active callers to disconnect, leading to quorum loss.

DiskInfo is actively used in the PUT, Multipart call path for
upgrading parity when disks are down, it in-turn shouldn't cause
more disks to go down.
2023-06-05 16:56:35 -07:00
Harshavardhana
75c6fc4f02 only allow decryption of etag for only sse-s3 (#17335) 2023-06-05 13:08:51 -07:00
Anis Eleuch
f9e07d6143 goroutines parser: Add --less flag to filter goroutines (#17339) 2023-06-04 14:20:46 -07:00
Anis Eleuch
1436858347 log: Add a log when saving pool.bin fails (#17338)
Co-authored-by: Anis Elleuch <anis@min.io>
2023-06-04 14:20:21 -07:00
jiuker
8030e12ba5 fix: expMovingAvg is too small when startTime is zero (#17346) 2023-06-03 13:41:51 -07:00
Minio Trusted
a485b923bf Update yaml files to latest version RELEASE.2023-06-02T23-17-26Z 2023-06-03 05:07:30 +00:00
Kaan Kabalak
0649aca219 Add expiration to ListServiceAccounts function (#17249) 2023-06-02 16:17:26 -07:00
Harshavardhana
b210ea79bc do not save MTime in newMultipartUpload() to avoid side-affects (#17340) 2023-06-02 14:38:09 -07:00
Poorna
68f80b5fe7 replication: ignore retention mode validation for replica (#17332) 2023-06-01 18:53:12 -07:00
Poorna
e95825a42e replication: use latest object info for metrics update (#17333) 2023-06-01 18:52:55 -07:00
Anis Eleuch
931712dc46 fix: converting 'server closed idle connection' to errDiskNotFound (#17330) 2023-06-01 15:40:28 -07:00
Harshavardhana
54e544e03e allow lookup()/head() operations on Veeam SOS objects (#17331) 2023-06-01 15:26:26 -07:00
Poorna
f86b9abf32 site removal: update site config and reload targets after update (#17327) 2023-06-01 10:19:56 -07:00
Anis Eleuch
9ef7eda33a heal: Avoid objects created after the heal disk start time (#17323) 2023-05-31 13:10:45 -07:00
Klaus Post
c9e26401fa Fix GetObject encrypted etag (#17302)
Co-authored-by: Harshavardhana <harsha@minio.io>
2023-05-31 13:10:25 -07:00
Harshavardhana
e53f49e9a9 add additional tools that help in debugging (#17325) 2023-05-31 13:06:08 -07:00
jiuker
14f6ac9222 fix: fail large content in DeleteMultipleObjects() early (#17321) 2023-05-31 10:58:14 -07:00
drivebyer
b8474295af fix: time() returned function not being called as expected in globalSync() (#17319) 2023-05-31 09:40:23 -07:00
Shireesh Anjal
817e85a3e0 fix: proxy not set on subnet logger webhook sometimes (#17320) 2023-05-31 08:09:09 -07:00
jiuker
fb5ce3b87a record err time when remote node is offline (#17262) 2023-05-30 10:07:26 -07:00
Klaus Post
6fe028b7c5 Revert s3 select simdjson reuse (#17310) 2023-05-30 10:02:22 -07:00
Harshavardhana
1cd7f1e38d fix: cleanup empty multipart folders upon stale upload cleanup (#17312) 2023-05-30 09:56:50 -07:00
jiuker
043fd8b536 fix: on windows use FindClose close handler (#17306) 2023-05-30 02:15:57 -07:00
Klaus Post
669acbb032 Fix Test LDAP for automatic site replication (#17305) 2023-05-29 08:13:58 -07:00
Minio Trusted
086d8f036e Update yaml files to latest version RELEASE.2023-05-27T05-56-19Z 2023-05-28 06:53:17 +00:00
Harshavardhana
394690dcfb check for upto 50%+ data disks to be offline (#17294) 2023-05-26 22:56:19 -07:00
Harshavardhana
398bca92ff update helm to v5.0.10
Signed-off-by: Harshavardhana <harsha@minio.io>
2023-05-26 17:05:49 -07:00
Harshavardhana
fb328b1a64 upgrade all dependencies (#17276) 2023-05-26 16:31:28 -07:00
Anis Eleuch
563f667e30 reorder-disks: Fix UID to UUID and add better error messages (#17292) 2023-05-26 15:03:31 -07:00
Klaus Post
c839b64f6a fix: compressed+encrypted block overhead (#17289) 2023-05-26 10:57:07 -07:00
Anis Eleuch
6425fec366 s3: Add x-minio-error-code header for S3 HEAD requests (#17283) 2023-05-26 10:13:18 -07:00
Harshavardhana
d5059840ef fix: for delete marked objects choose appropriate parity (#17287) 2023-05-26 09:57:44 -07:00
Aditya Manthramurthy
65cba212e8 Remove older policy attach behavior for LDAP (#17240) 2023-05-26 06:31:24 -07:00
Aditya Manthramurthy
7a69c9c75a Update builtin policy entities command (#17241) 2023-05-25 22:31:05 -07:00
Harshavardhana
4a425cbac1 cleanup scripts and apply shfmt (#17284) 2023-05-25 22:07:25 -07:00
Harshavardhana
615169c4ec update docker ubi image to 8.8 (#17281) 2023-05-25 18:19:09 -07:00
Harshavardhana
5cd9dcb844 rebalance 'null' delete markers properly (#17282) 2023-05-25 16:12:53 -07:00
Anis Eleuch
54c5c88fe6 Add number of offline disks in quorum errors (#16822) 2023-05-25 09:39:06 -07:00
jiuker
443250d135 fix: for Target isActive use net.Dial instead (#17251) 2023-05-25 09:24:11 -07:00
Harshavardhana
9b5829c16e avoid decommissioning DEL markers with single versions (#17274) 2023-05-25 09:18:49 -07:00
jiuker
d749aaab69 fix: ignore existing target status when adding new targets (#17250) 2023-05-24 22:57:37 -07:00
Krishnan Parthasarathi
62df731006 Add updatedAt for GetBucketLifecycleConfig (#17271) 2023-05-24 22:52:39 -07:00
Harshavardhana
d0a0eb9738 support fan-out objects via PostUpload() (#17233) 2023-05-24 22:51:07 -07:00
Klaus Post
66156b8230 Stricter partNumber checks (#17270)
Fixes #17269
2023-05-24 08:00:47 -07:00
Klaus Post
5677f73794 Add PostObject Checksum (#17244) 2023-05-23 07:58:33 -07:00
Harshavardhana
ef54200db7 offline drives more than 50% of total drives return error (#17252) 2023-05-23 07:57:57 -07:00
Harshavardhana
7875efbf61 update minio/dperf to latest release v0.4.4 (#17267) 2023-05-22 19:17:17 -07:00
Krishnan Parthasarathi
3e128c116e Add lifecycle event source to audit log tags (#17248) 2023-05-22 15:28:56 -07:00
Krishnan Parthasarathi
55a3310446 logger-http: Don't retry after a succesful send (#17266) 2023-05-22 14:53:18 -07:00
Harshavardhana
fc03be7891 simplify bucket metadata lookups for versioning/object locking (#17253) 2023-05-22 12:05:14 -07:00
jiuker
b1b00a5055 fix: Avoid Income globalStats twice upon error (#17263) 2023-05-22 07:42:27 -07:00
Poorna
2920b0fc6d allow specification of path/virtual style bucket lookup in batch replication (#17201) 2023-05-21 15:16:31 -07:00
Anis Eleuch
a30a55f3b1 Add object parity in listing V2M and listing versions M (#17238) 2023-05-19 09:42:45 -07:00
Praveen raj Mani
ecfb18b26a Freeze the s3 APIs until the notification sub-system initializes completely (#17182) 2023-05-19 08:44:48 -07:00
jiuker
41fa8fa2d2 fix: increment counter when entry be skipped (#17237) 2023-05-19 08:36:52 -07:00
jiuker
e94e6adf91 fix: return proper error if OIDC Discoverydoc fails to respond (#17242) 2023-05-19 02:13:33 -07:00
jiuker
7d433f16c4 before return make globalScannerMetrics.incTime call (#17230) 2023-05-18 13:45:05 -07:00
Klaus Post
b06d7bf834 fix: leaking connections in JSON SQL with limited return (#17239) 2023-05-18 11:26:46 -07:00
Minio Trusted
b784e458cb Update yaml files to latest version RELEASE.2023-05-18T00-05-36Z 2023-05-18 17:56:05 +00:00
Aditya Manthramurthy
9d96b18df0 Add "name" and "description" params to service acc (#17172) 2023-05-17 17:05:36 -07:00
drivebyer
ad2ab6eb3e fix: Give accurate cap to slice (#17224) 2023-05-17 15:14:09 -07:00
jiuker
f037c9b286 Protecting the read index is not out of bounds (#17226) 2023-05-17 12:09:41 -07:00
Praveen raj Mani
85912985b6 Check for only network errors in audit webhook for reachability (#17228) 2023-05-17 11:10:33 -07:00
Harshavardhana
876f51a708 remove minio-js from mint tests until next minio-js release 2023-05-17 09:09:54 -07:00
Harshavardhana
f7d29b4a53 cleanup of multipart per disk must cleanup itself only (#17223) 2023-05-17 01:45:58 -07:00
Harshavardhana
06557fe8be allow decommissioned pools to be removed while others are finishing (#17221) 2023-05-16 16:00:57 -07:00
Poorna
2131046427 replication: fix audit log reporting (#17222) 2023-05-16 15:35:08 -07:00
Klaus Post
aaf1abc993 simplify HardLimitReader by using LimitReader for internal usage (#17218) 2023-05-16 13:14:37 -07:00
jiuker
413549bcf5 fix: loadStatsFromDisk() should return nil for configNotFound (#17217) 2023-05-16 12:23:38 -07:00
jiuker
9a799065b3 fix: make slice cap of right size (#17192) 2023-05-16 08:10:07 -07:00
jiuker
fd2959fa3a fix: workers.New err must be returned (#17208) 2023-05-16 08:08:00 -07:00
Anis Eleuch
07927e032a Add a script to filter goroutines waiting for a given number of minutes (#17204) 2023-05-16 08:05:49 -07:00
jiuker
15bec32bb4 fix: tier handlers must write error only once (#17205) 2023-05-15 23:56:52 -07:00
Anis Eleuch
e2b7a08c10 heal: Update object parity with the latest configured SC (#17187) 2023-05-15 21:32:13 -07:00
Harshavardhana
ef2fc0f99e fix: reduce using memory and temporary files. (#17206) 2023-05-15 14:08:54 -07:00
Harshavardhana
d063596430 fix: veeam SOS API 'system.xml' strings (#17202) 2023-05-15 12:06:42 -07:00
jiuker
bd2dc6c670 fix: in healing tracker printTo when err (#17207) 2023-05-15 10:14:48 -07:00
Anis Eleuch
684399433b lock: Retry locking with an increasing random interval (#17200) 2023-05-13 08:42:21 -07:00
Harshavardhana
b62791617c fix: notify systemd as soon as we wait on the OS signal (#17199) 2023-05-12 16:42:17 -07:00
Poorna
e07c2ab868 Use hash.NewLimitReader for internal multipart calls (#17191) 2023-05-12 11:19:08 -07:00
jiuker
203755793c fix: in printEndpointError count error once per init() (#17193) 2023-05-12 10:41:54 -07:00
Anis Eleuch
883c98e26f fix: remove objects when there are skipped versions due to ILM in decom (#17198) 2023-05-12 10:37:38 -07:00
Harshavardhana
f5a20a5d06 allow nodes offline in k8s setups when expanding pools (#17183) 2023-05-11 17:41:33 -07:00
Poorna
ef7177ebbd disallow bucket replication setup with site replication (#17189) 2023-05-11 15:48:40 -07:00
Harshavardhana
3637aad36e do not count ILM expired objects and other skipped objects (#17184) 2023-05-11 13:35:16 -07:00
Aditya Manthramurthy
77db9686fb Update console to v0.27.0 (#17188) 2023-05-11 12:18:17 -07:00
Shireesh Anjal
c326e5a34e Add metrics for webhook endpoint stats (#17179) 2023-05-11 11:24:37 -07:00
jiuker
c23c982593 xmlDecoder err use ErrMalformedXML when PutBucketACLHandler (#17185) 2023-05-11 11:11:15 -07:00
Shireesh Anjal
a3d666356c fix: error in capturing XFS error config in health report (#17176) 2023-05-10 15:20:48 -07:00
jiuker
3cdbc2f414 add validationErr to validateConfig When DeleteIdentityProviderCfg (#17173) 2023-05-10 09:37:30 -07:00
Harshavardhana
b92cdea578 fix: start using pkg/workers to spawn parallel workers (#17170) 2023-05-09 16:37:31 -07:00
jiuker
5e629a99af fix: for profiling duration parsing error reply use ErrInvalidRequest (#17169) 2023-05-09 14:27:49 -07:00
Klaus Post
99c4ffa34f fix: avoid audit log race protection deadlocks (#17168) 2023-05-09 08:11:32 -07:00
Harshavardhana
a7f266c907 allow JWT parsing on large session policy based tokens (#17167) 2023-05-09 00:53:08 -07:00
Praveen raj Mani
57acacd5a7 Support persistent queue store for loggers (#17121) 2023-05-08 21:20:31 -07:00
Han Cen
42fb3cd95e helm: fix pod annotations indentation (#17130) 2023-05-08 07:59:56 -07:00
mstein11
855ed642c3 helm: allow postjob to run without user 1000 (#17160) 2023-05-08 07:59:25 -07:00
jiuker
629503ff73 add Err to BucketExists when NoSuchBucket (#17155) 2023-05-08 07:51:59 -07:00
jiuker
e3a070e3de put *msgp.Reader back to pool (#17156) 2023-05-08 07:51:39 -07:00
Anton Lindholm
5b364bca1f helm-chart: Use minio service account for post-deploy job if available (#17077) 2023-05-06 23:12:56 -07:00
Poorna
c5c1426262 Validate if replication config being added is self referential (#17142) 2023-05-06 13:35:43 -07:00
Denis Krivenko
be18d435a2 helm: declare missing properties in values.yaml (#17153) 2023-05-06 13:34:58 -07:00
mstein11
7eea6cdb12 add etc-path to post-job.yaml in helm chart (#17148) 2023-05-06 13:34:38 -07:00
Harshavardhana
824c55b3a4 fix: few typos and wordings in minio-limits.md 2023-05-05 20:04:52 -07:00
Klaus Post
76913a9fd5 Signed trailers for signature v4 (#16484) 2023-05-05 19:53:12 -07:00
Minio Trusted
2f44dac14f Update yaml files to latest version RELEASE.2023-05-04T21-44-30Z 2023-05-05 06:21:31 +00:00
Harshavardhana
5569acd95c disallow EC:0 if not set during server startup (#17141) 2023-05-04 14:44:30 -07:00
Harshavardhana
1d0211d395 allow deletes on directory objects to perform permanent deletes (#17132) 2023-05-04 14:43:52 -07:00
Anis Eleuch
06cd0a636e Avoid calling KES Status when peers ping each other (#17140) 2023-05-04 11:28:33 -07:00
Klaus Post
7f7b489a3d snowball: use latest time when mtime is missing (#17133) 2023-05-04 07:29:33 -07:00
Klaus Post
bb6f4d7633 Remove redundant checkFormatJSON logging (#17134) 2023-05-04 07:28:37 -07:00
Alex
6e24dff26a Added MINIO_BROWSER_LOGIN_ANIMATION env support for WebUI console (#17123)
Signed-off-by: Benjamin Perez <benjamin@bexsoft.net>
2023-05-03 15:32:50 -07:00
Harshavardhana
e372e4e592 add nodeName to the log while taking drive offline (#17124) 2023-05-03 15:05:45 -07:00
Harshavardhana
9571b0825e add configurable VRF interface and user-timeout (#17108) 2023-05-03 14:12:25 -07:00
Poorna
90e2cc3d4c Add audit logging of site replication multipart proxying (#17122) 2023-05-03 11:19:45 -07:00
Alex
0c0820caef Updated Console version to v0.26.4 (#17119)
Signed-off-by: Benjamin Perez <benjamin@bexsoft.net>
2023-05-03 11:00:23 -07:00
Harshavardhana
9112ca4e29 change ttfb_distribution metrics to histogramMetric (#17115) 2023-05-03 07:31:00 -07:00
Dylan Piergies
8203cb9990 helm: apply templating to Ingress host and environment variable values (#17073) 2023-05-02 23:27:17 -07:00
Harshavardhana
d5bce978a8 upgrade helm to v5.0.9
Signed-off-by: Harshavardhana <harsha@minio.io>
2023-05-02 23:23:26 -07:00
Poorna
ec84bad882 batch replication now supports arbitrary S3 targets (#17113) 2023-05-02 22:52:35 -07:00
Harshavardhana
b53376a3a4 change directory objects to never create new versions (#17109) 2023-05-02 16:09:33 -07:00
Krishnan Parthasarathi
0ec722bc54 Add tags to NewerNoncurrentVersions audit event (#17110) 2023-05-02 12:56:33 -07:00
Anis Eleuch
4640b13c66 Use expontential backoff algo for internode reconnections (#17052) 2023-05-02 12:35:52 -07:00
Praveen raj Mani
1704abaf6b fix: store notification events immediately for persistent queues (#17112) 2023-05-02 07:53:13 -07:00
WGH
ab34f0065c Support systemd notify protocol (#17062) 2023-05-01 23:15:08 -07:00
Klaus Post
e8c0a50862 optimization use small blocks up to 64KB (#17107) 2023-05-01 09:47:49 -07:00
Denis Krivenko
b963f69f34 helm: align chart properties with naming convention (#17065) 2023-04-29 23:59:40 -07:00
Harshavardhana
02d8f3cdc8 fix: remove active healing on .minio.sys/ during startup (#17072) 2023-04-29 02:05:28 -07:00
Harshavardhana
7ae69accc0 allow root user to be disabled via config settings (#17089) 2023-04-28 12:24:14 -07:00
Minio Trusted
701b89f377 Update yaml files to latest version RELEASE.2023-04-28T18-11-17Z 2023-04-28 18:48:25 +00:00
Anis Eleuch
46d45a6923 grafana: Add TCP dial errors panel (#17101) 2023-04-28 11:11:17 -07:00
Klaus Post
7fad0c8b41 Remove checksums from HTTP range request, add part checksums (#17105) 2023-04-28 08:26:32 -07:00
jiuker
6e27264c6b update cleanupRoutine comment (#17102) 2023-04-28 01:11:51 -07:00
Anis Eleuch
5c83c9724f audit: Add request path and host to audit event (#17099) 2023-04-27 22:18:24 -07:00
Anis Eleuch
d5aff735be info: Add drives per set and sets count per pool information (#17100) 2023-04-27 15:24:03 -07:00
Poorna
98c26df53e fix: allow past retention headers to be copied in batch replication (#17095) 2023-04-27 13:43:18 -07:00
Anis Eleuch
2448a9e047 grafana: Remove minio_s3_requests_errors_total metric (#17094) 2023-04-27 10:55:30 -07:00
jiuker
b28d391a22 fix: add correct worker count before startHTTPLogger() (#17091) 2023-04-27 10:51:16 -07:00
jiuker
c8b92f6067 protect wg.Done from being called twice (#17075) 2023-04-27 07:55:36 -07:00
Krishnan Parthasarathi
e7cac8acef Add tags to auditLogLifecycle (#17081) 2023-04-26 17:49:00 -07:00
Anis Eleuch
31b5acc245 tcp: Increase user timeout to 10 minutes (#17087) 2023-04-26 17:48:31 -07:00
Harshavardhana
6105997299 remove unnecessary log when listing resume fails (#17086) 2023-04-26 14:53:25 -07:00
Harshavardhana
8c874884fc fix: do not copy context in DiskInfo cache (#17085) 2023-04-26 12:13:54 -07:00
Aditya Manthramurthy
ebfe81e5fd Fix put bucket policy error code (#17084) 2023-04-26 11:21:27 -07:00
Anis Eleuch
0b7ca094e4 Remove Expect 100-continue in internode communications (#17061) 2023-04-26 09:33:45 -07:00
Praveen raj Mani
72802a5972 Use 'minio/pkg/sync/errgroup' and 'minio/pkg/workers' (#17069) 2023-04-25 22:57:40 -07:00
Harshavardhana
b1f3935c5b allow ListObjects() when a prefix is an object (#17074) 2023-04-25 22:41:54 -07:00
Harshavardhana
dbd53af369 fix: initialize reverse proxy forwarder with right public certs (#17080) 2023-04-25 15:50:32 -07:00
Harshavardhana
b09fe0e50e fix: DeleteBucket for peers() must recreate bucket upon errors (#17079) 2023-04-25 14:16:35 -07:00
Krishnan Parthasarathi
fae9000304 heal: Pick maximally occuring modTime in quorum (#17071) 2023-04-25 10:13:57 -07:00
Harshavardhana
8fd07bcd51 simplify sort.Sort by using sort.Slice (#17066) 2023-04-24 13:28:18 -07:00
Anis Eleuch
6addc7a35d server-info: Return initializing state properly (#17070) 2023-04-24 09:10:02 -07:00
Harshavardhana
477230c82e avoid attempting to migrate old configs (#17004) 2023-04-21 13:56:08 -07:00
Harshavardhana
d1737199ed fix: delete DNS upon success, update failure message (#17059) 2023-04-21 12:12:31 -07:00
Jiffs Maverick
61101d82d9 Rename inodes metric in grafana dashboards (#17030) 2023-04-21 11:07:30 -07:00
Minio Trusted
cebb948da2 Update yaml files to latest version RELEASE.2023-04-20T17-56-55Z 2023-04-21 08:00:05 +00:00
Shireesh Anjal
c61c4b71b2 Upgrade madmin-go to 2.0.20 (#17054) 2023-04-20 10:56:55 -07:00
Harshavardhana
84f31ed45d simplify MRF, converge it to regular healing (#17026) 2023-04-19 07:47:42 -07:00
jiuker
8a81e317d6 verify maxPartID in object options helpers (#17015) 2023-04-18 22:34:30 -07:00
Anis Eleuch
224d9a752f fix: the race in healing tracker code (#17048) 2023-04-18 14:49:56 -07:00
Anis Eleuch
0db34e4b85 Listen bucket events to send empty events with new line (#17037) 2023-04-18 08:11:30 -07:00
Harshavardhana
8a9b9832fd add Dial timeout for Kafka broker pings (#17044) 2023-04-17 15:45:01 -07:00
Klaus Post
f66625be67 Snowball: Extract headers for metadata (#17042) 2023-04-17 12:16:54 -07:00
Harshavardhana
6825bd7e75 fix: inlined objects don't need to honor long locks (#17039) 2023-04-17 12:16:37 -07:00
Denis Krivenko
18515a4e3b Reformat helm chart templates (#16947) 2023-04-16 23:04:15 -07:00
Klaus Post
839b9c9271 Reduce allocations in Walkdir (#17036) 2023-04-15 10:25:25 -07:00
Harshavardhana
dd9ed85e22 implement support for FTP/SFTP server (#16952) 2023-04-15 07:34:02 -07:00
jiuker
e96c88e914 fix: DeleteBucketThrottle must delete ARN (#17034) 2023-04-15 02:14:26 -07:00
jiuker
6c1410f7f5 fix: Type of rejection for FIFO quota input (#17016) 2023-04-15 01:22:18 -07:00
Krishnan Parthasarathi
f92450d8b3 commonParity should pick readable FileInfo (#17032) 2023-04-14 16:23:28 -07:00
Klaus Post
c133979b8e Add part count to checksum (#17035) 2023-04-14 09:44:45 -07:00
Poorna
a9269cee29 heal: avoid logging version not found (#17031) 2023-04-13 19:45:52 -07:00
Harshavardhana
cf42ede92c upgrade helm to v5.0.8
Signed-off-by: Harshavardhana <harsha@minio.io>
2023-04-13 14:49:51 -07:00
Klaus Post
958a480e53 fix: lambda function expiration when cred.Expiration is set (#17029) 2023-04-13 08:10:57 -07:00
吴小白
62151a751d build: support loong64 (#17027) 2023-04-13 08:10:23 -07:00
Minio Trusted
f1ab9df2ee Update yaml files to latest version RELEASE.2023-04-13T03-08-07Z 2023-04-13 07:28:46 +00:00
Anis Eleuch
a42650c065 Add minio_bucket_usage_version_total metric to Prometheus (#17023) 2023-04-12 20:08:07 -07:00
Harshavardhana
bdad3730f7 fix: do not error out if the local bucket is missing (#17025) 2023-04-12 15:44:16 -07:00
Harshavardhana
a5835cecbf fix: regression in counting total requests (#17024) 2023-04-12 14:37:19 -07:00
Denis Krivenko
b19620b324 Remove redundant namespace from the helm chart (#16968) 2023-04-11 21:09:29 -07:00
Poorna
cd6dec49c0 Add trace support for ilm activity (#16993) 2023-04-11 19:22:32 -07:00
Poorna
d350654aee config: fix duplication of replication priority key (#17014) 2023-04-11 19:22:10 -07:00
Krishnan Parthasarathi
6877578bbc Update minio_node_bucket_scans_finished metrics (#17006) 2023-04-11 19:21:34 -07:00
Kunal Singh
10693fddfa docs: fix repetition in wordings (#16999) 2023-04-11 13:23:14 -07:00
Harshavardhana
f3682b6149 allow writes to pools with inconsistent xl.meta (#17008) 2023-04-11 11:17:46 -07:00
ferhat elmas
056ca0c68e refactor: reuse open file in storage interface (#16970) 2023-04-09 23:09:28 -07:00
Krishnan Parthasarathi
25f7a8e406 Indicate RenameData is called by healObject (#16997) 2023-04-09 10:25:37 -07:00
Anis Eleuch
1f1c267b6c Add used inodes to disk info (#16994) 2023-04-07 07:52:14 -07:00
ferhat elmas
eab1dc927b chore: fix automatic issue handling from linter in ci (#16969) 2023-04-07 07:51:31 -07:00
Anis Eleuch
fc94ea1ced trace: Fix <unknown> func name of requests rejected by max clients (#16977) 2023-04-07 07:51:12 -07:00
jiuker
67d2cf8f30 add Err to Get getRemoteTargetClient. (#16982) 2023-04-07 07:50:46 -07:00
Minio Trusted
2c85b84cbc Update yaml files to latest version RELEASE.2023-04-07T05-28-58Z 2023-04-07 06:07:27 +00:00
Harshavardhana
09a25ea7b7 lint: fix some lint issues on files 2023-04-06 22:42:10 -07:00
Klaus Post
260a63ca73 os/windows: do not return errFileNotFound in readDir's (#16988) 2023-04-06 22:28:58 -07:00
Shubhendu
d3f70ea340 Enable audit log for global handlers (#16964)
Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2023-04-06 21:03:39 -07:00
Aditya Manthramurthy
ceebd35ef7 Bump up console to 0.26.3 (#16995) 2023-04-06 20:18:14 -07:00
Anis Eleuch
91b6fe1af3 trace: Bootstrap to show the correct source line number (#16989) 2023-04-06 17:51:53 -07:00
Klaus Post
9803f68522 snowball: Restrict zstd window size (#16987) 2023-04-06 17:47:38 -07:00
Harshavardhana
47b7469a60 add buffer pool for proxy forwarder (#16942) 2023-04-06 15:54:12 -07:00
Shubhendu
4c204707fd Correct to remove null version while ILM rule application (#16971)
Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
Co-authored-by: Harshavardhana <harsha@minio.io>
2023-04-06 14:10:01 -07:00
Harshavardhana
8fd6be0827 avoid mknod in GitHub actions (#16991) 2023-04-06 12:02:41 -07:00
Harshavardhana
c06e0bfef9 set correct Host: value for replication event notification (#16984) 2023-04-06 10:20:53 -07:00
Klaus Post
8625a9dbb3 Make listing metadata permissions stricter (#16974) 2023-04-06 07:52:35 -07:00
Alex
2b71b659e0 Update Console to v0.26.2 (#16981)
Signed-off-by: Benjamin Perez <benjamin@bexsoft.net>
2023-04-06 01:31:09 -07:00
jiuker
0320ac43cb simplify bucketInfo return in GetBucketInfo peer call (#16983) 2023-04-06 01:30:50 -07:00
Anis Eleuch
111c7d4026 assumeRole return the correct http code for auth errors (#16967) 2023-04-05 22:19:31 -07:00
Klaus Post
62c3df0ca3 fix: directory listing on Go 1.20 windows (#16976) 2023-04-05 14:36:49 -07:00
Klaus Post
ae011663e8 mint: Ignore teardown errors (#16979) 2023-04-05 11:10:24 -07:00
Shireesh Anjal
12591cd241 Upgrade madmin-go to 2.0.18 (#16961) 2023-04-05 04:55:55 -07:00
Andrea Longo
0499e1c4b0 Remove ~ from Object Lambda curl example (#16966) 2023-04-04 12:15:28 -07:00
Poorna
3158f2d12e Add support for batch key rotation (#16844) 2023-04-04 10:56:54 -07:00
Praveen raj Mani
51f7f9aaa3 Generalize the event store using go generics (#16910) 2023-04-04 10:52:24 -07:00
jiuker
6e359c586e fix: close chan before return in scanner usage updates (#16960) 2023-04-04 10:51:05 -07:00
Poorna
699a24f7e5 batch: validate versioning on src/tgt buckets (#16955) 2023-04-04 10:50:11 -07:00
jiuker
5fa3665074 add isSysErrNotDir to openFileNoSync (#16958) 2023-04-04 08:00:08 -07:00
Harshavardhana
3b7781835e add lock metrics per node (#16943) 2023-04-03 21:23:24 -07:00
Shubhendu
5fe1b46bfd Enabled to send audit log while version deletion (#16954)
Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2023-04-03 11:58:04 -07:00
Harshavardhana
f65cce4317 move mint tests to separate folders to not confuse GitHub (#16940) 2023-03-31 14:38:10 -07:00
Allan Roger Reid
27d0d22e5d docs: Specify correct port in docker-compose README.md (#16939) 2023-03-31 12:20:56 -07:00
Poorna
407c9ddcbf feat: add batch replicate from remote MinIO to local cluster (#16922) 2023-03-31 10:48:36 -07:00
Anis Eleuch
d90d0c8931 Use one http response recorder per external http call (#16938) 2023-03-31 09:37:29 -07:00
Harshavardhana
216a471bbb on quorum DeleteObject() errors attempt an MRF (#16932) 2023-03-31 08:15:41 -07:00
jiuker
a7b7860e0e fix: potential data conflicts save site-resync metadata (#16926) 2023-03-30 20:59:45 -07:00
Klaus Post
d703daa480 Add metadata extension to ListObjectVersions (#16930) 2023-03-30 12:20:42 -07:00
Poorna
dc8fdcb9c9 fix: error checking in DeleteBucket (#16929) 2023-03-30 11:54:08 -07:00
Harshavardhana
7a6c4e438e allow more workers for ILM expiration (#16924) 2023-03-30 10:47:15 -07:00
jiuker
c468b4e2a8 fix: avoid out of slice index (#16925) 2023-03-30 08:03:48 -07:00
jiuker
b04956a676 fix: put *msgp.Reader back in pool (#16927) 2023-03-30 08:03:12 -07:00
Aditya Manthramurthy
518f6e4d39 fix: missing return after error response (#16920) 2023-03-29 16:21:13 -07:00
Allan Roger Reid
483b226cc1 fix: avoid logging when object/version not found in replication (#16919) 2023-03-29 15:02:45 -07:00
Harshavardhana
13151cbb2b [testing] add mint runner test (#16868) 2023-03-29 11:38:43 -07:00
Harshavardhana
8e02660a0d update all our deps (#16899) 2023-03-28 03:45:24 -07:00
jiuker
16feef2a2c fix: time.Parse RFC3339Nano (#16892) 2023-03-27 11:51:54 -07:00
jiuker
66ff17e452 fix: Avoid multiple write responses (#16894) 2023-03-27 09:15:23 -07:00
Harshavardhana
4c5edacae2 ignore operation timedout errors (#16891) 2023-03-26 03:16:51 -07:00
Anis Eleuch
8b4d0255b7 Set Console global Root CAs early to trust provided certs (#16890) 2023-03-25 09:58:38 -07:00
Minio Trusted
58c129f94a Update yaml files to latest version RELEASE.2023-03-24T21-41-23Z 2023-03-24 23:03:23 +00:00
Poorna
74040b457b Allow setting sync mode for site replication (#16876) 2023-03-24 14:41:23 -07:00
Anis Eleuch
c259a8ea38 Set tcp user timeout to clean sockets with data in the buffer (#16887) 2023-03-24 08:10:58 -07:00
mstmdev
2d51e42305 Remove the redundant conditional in the validateParity function (#16866) 2023-03-23 14:06:22 -07:00
Klaus Post
5e3bfd2148 Add user tags when listing with metadata (#16883) 2023-03-23 10:27:19 -07:00
Klaus Post
8b0ab6ead6 Revert "Make localLocker lock attempts cancellable (#16510)" (#16884) 2023-03-23 10:26:21 -07:00
Shubhendu
b1b0aadabf Revert query parameter src from diag upload if callhome enabled (#16881) 2023-03-23 00:24:58 -07:00
Harshavardhana
ac7d9c449a add missing expiration information from 'sts info' (#16878) 2023-03-22 16:47:02 -07:00
Anis Eleuch
1346561b9d return quorum error instead of insufficient storage error (#16874) 2023-03-22 16:22:37 -07:00
Minio Trusted
4bc52897b2 Update yaml files to latest version RELEASE.2023-03-22T06-36-24Z 2023-03-22 21:16:15 +00:00
dependabot[bot]
035791669e build(deps): bump github.com/nats-io/nats-streaming-server from 0.24.1 to 0.24.3 (#16752)
Signed-off-by: dependabot[bot] <support@github.com>
2023-03-21 23:36:24 -07:00
Krishnan Parthasarathi
6017b63a06 top-locks: Group by lock request ID (#16860) 2023-03-21 18:35:29 -07:00
Klaus Post
11d04279c8 Add lazy init of audit logger (#16842) 2023-03-21 10:50:40 -07:00
Harshavardhana
0448728228 fix: add deadline conns and dnsCache for remote transports (#16865) 2023-03-21 08:49:20 -07:00
Harshavardhana
12047702f5 fix: tweak the maintenance=true to satisfy baremetal first (#16864) 2023-03-21 08:48:38 -07:00
Harshavardhana
fb1492f531 check for quorum errors for DeleteBucket() (#16859) 2023-03-20 23:38:06 -07:00
Minio Trusted
d14ead7bec Update yaml files to latest version RELEASE.2023-03-20T20-16-18Z 2023-03-21 01:07:32 +00:00
Aditya Manthramurthy
05444a0f6a Use the official pub key to always verify binary (#16857) 2023-03-20 13:16:18 -07:00
Harshavardhana
b3c54ec81e reject object names with '\' on windows (#16856) 2023-03-20 13:16:00 -07:00
Harshavardhana
6c11dbffd5 add crash protection from backend modifications (#16846) 2023-03-20 09:08:42 -07:00
Harshavardhana
3b5dbf9046 allow bootstrapping to validate internode tokens (#16853) 2023-03-20 01:40:24 -07:00
Aditya Manthramurthy
09c733677a Add test for fixed post policy exploit (#16855) 2023-03-20 01:06:45 -07:00
Harshavardhana
8d6558b236 fix: convert '\' to '/' on windows (#16852) 2023-03-20 00:35:25 -07:00
Aditya Manthramurthy
67f4ba154a fix: post policy request security bypass (#16849) 2023-03-19 21:15:20 -07:00
Poorna
440ad20c1d Add support for batch job cancellation (#16843) 2023-03-17 23:42:43 -07:00
Krishnan Parthasarathi
31fba6f434 Save bootstrap trace events in a circular buffer (#16823) 2023-03-17 16:01:03 -07:00
Harshavardhana
280442e533 reduce 250ms to 50ms retry looking for metacache block (#16795) 2023-03-17 14:44:01 -07:00
Shubhendu
850a945a18 Added query parameter src to diag upload if callhome enabled (#16837)
Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2023-03-17 08:30:16 -07:00
Harshavardhana
46f9049fb4 simplify error responses for KMS (#16793) 2023-03-16 11:59:42 -07:00
Aditya Manthramurthy
58266c9e2c Add enable flag for LDAP IDP config (#16805) 2023-03-16 11:58:59 -07:00
Poorna
d1e775313d support decommissioning of tiered objects (#16751) 2023-03-16 07:48:05 -07:00
Anis Eleuch
a65df1e67b debug: new tool to reorder local erasure disks (#16816) 2023-03-15 13:53:17 -07:00
Harshavardhana
e700be8cd6 fix: return appropriate Location header for MakeBucket() (#16820) 2023-03-15 13:40:40 -07:00
Harshavardhana
3fdd574f54 update go dependencies (#16798) 2023-03-15 11:59:17 -07:00
Harshavardhana
e0f4dd6027 remove unncessary logs from WalkDir(), PutObject() (#16818) 2023-03-15 11:52:23 -07:00
Harshavardhana
de02eca467 restore rotating root credentials properly (#16812) 2023-03-15 08:07:42 -07:00
Nitish Tiwari
50dbd2cacc Update audit log flow to use new headers with unit (#16797) 2023-03-13 22:50:19 -07:00
Minio Trusted
c2f9cc5824 Update yaml files to latest version RELEASE.2023-03-13T19-46-17Z 2023-03-13 22:11:07 +00:00
Harshavardhana
c7f7e67a10 Do not allow adding root user to IAM subsystem (#16803) 2023-03-13 12:46:17 -07:00
Klaus Post
628042e65e tests: Protect globalLocalDrives against races (#16800) 2023-03-13 06:04:20 -07:00
ferhat elmas
cde7eeb660 chore: drop go versions in static analysis (#16790) 2023-03-11 22:10:33 -08:00
Aditya Manthramurthy
6305b206e1 fix: site-repl should heal STS with virtual parent (#16792) 2023-03-10 16:21:51 -08:00
Klaus Post
d85da9236e Add Object Version count histogram (#16739) 2023-03-10 08:53:59 -08:00
Minio Trusted
9800760cb3 Update yaml files to latest version RELEASE.2023-03-09T23-16-13Z 2023-03-10 00:59:46 +00:00
Anis Elleuch
5c087bdcad fix: a cosmetic error reporting with a lock timeout (#16788) 2023-03-09 15:16:13 -08:00
Klaus Post
a547bf517d Remove locks on usage cache (#16786) 2023-03-09 15:15:46 -08:00
Harshavardhana
b984bf8d1a allow expiration of all versions during Listing() (#16757) 2023-03-09 15:15:30 -08:00
Daniel Valdivia
18f9cccfa7 Upgrade Console to v0.25.0 (#16782)
Signed-off-by: Daniel Valdivia <18384552+dvaldivia@users.noreply.github.com>
2023-03-08 12:15:13 -08:00
Poorna
fb6ab1cca2 fix: allow replication of 'null' delete markers (#16773) 2023-03-08 07:03:29 -08:00
Krishnan Parthasarathi
56c57e2c53 tier-stats: Avoid repeated logs (#16774) 2023-03-07 16:43:05 -08:00
Poorna
a6057c35cc Avoid peer notification when peer is offline, tune retries (#16737) 2023-03-07 08:13:28 -08:00
Harshavardhana
901887e6bf feat: add lambda transformation functions target (#16507) 2023-03-07 08:12:41 -08:00
Poorna
ee54643004 Avoid unnecessary replication heal attempts (#16769) 2023-03-07 07:43:38 -08:00
Harshavardhana
0a17acdb34 return error if policy changes on disabled groups (#16766) 2023-03-06 10:46:24 -08:00
Harshavardhana
72e5212842 fix: handle syscall.EROFS also for osIsPermission() (#16765) 2023-03-06 08:56:29 -08:00
ferhat elmas
714283fae2 cleanup ignored static analysis (#16767) 2023-03-06 08:56:10 -08:00
ferhat elmas
3423028713 cleanup Go linter settings (#16736) 2023-03-04 20:57:35 -08:00
jiuker
9d062b37d7 return underlying error with BackendDown{} error (#16738) 2023-03-03 23:56:53 -08:00
Harshavardhana
4636d3a9c3 upgrade all dependencies (#16753) 2023-03-03 18:22:40 -08:00
Aditya Manthramurthy
c95ede35c1 Switch windows CI back to go 1.19.x (#16755) 2023-03-03 15:19:28 -08:00
Aditya Manthramurthy
7415e1aa56 Switch to go1.20 in CI (#16743) 2023-03-03 10:15:03 -08:00
jiuker
f350953a19 calculate disk cache usage percent accurately (#16740) 2023-03-02 20:32:22 -08:00
Daniel Valdivia
958bba5b42 Attach creds, owner and region to madmin calls (#16658)
Signed-off-by: Daniel Valdivia <18384552+dvaldivia@users.noreply.github.com>
2023-03-02 14:35:24 -08:00
Poorna
0f2b95b497 fix a data race in IAM loading (#16742) 2023-03-02 14:32:54 -08:00
Klaus Post
d07089ceac Fix scanner deadlock on lost global lock (#16726) 2023-02-28 21:34:45 -08:00
Aditya Manthramurthy
47dfa62384 Update LDAP doc for new policy attach|detach cmds (#16723) 2023-02-27 21:04:27 -08:00
Minio Trusted
3a3265cf88 Update yaml files to latest version RELEASE.2023-02-27T18-10-45Z 2023-02-28 00:30:32 +00:00
Harshavardhana
0ff931dc76 fix: allow CORS to work by default (#16713) 2023-02-27 10:10:45 -08:00
Praveen raj Mani
4d708cebe9 Support adding service accounts with expiration (#16430)
Co-authored-by: Harshavardhana <harsha@minio.io>
2023-02-27 10:10:22 -08:00
dorman
4d7c8e3bb8 Remove redundant log (#16710)
Co-authored-by: z30001483 <zekaifeng2@huawei.com>
2023-02-27 09:59:47 -08:00
Aditya Manthramurthy
8cde38404d Add metrics for custom auth plugin (#16701) 2023-02-27 09:55:18 -08:00
Krishnan Parthasarathi
fe7bf6cbbc Support tier-add if tier backend not empty (#16715) 2023-02-27 09:26:26 -08:00
Shubhendu
8b4eb2304b Set logger webhook proxy on subnet proxy change (#16665)
Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2023-02-27 08:35:36 -08:00
Harshavardhana
ae029191a3 liveness returns "busy" if queued requests > available capacity (#16719) 2023-02-27 08:34:52 -08:00
Harshavardhana
bfedea9bad fix: disk healing should honor the right pool/set index (#16712) 2023-02-27 04:55:32 -08:00
Aditya Manthramurthy
7777d3b43a Remove globalSTSTLSConfig (#16709) 2023-02-26 23:37:00 -08:00
Aditya Manthramurthy
9ed4fc9687 Remove globalOpenIDConfig (#16708) 2023-02-25 21:01:37 -08:00
jiuker
b49b39e99d fix: errNoSuchPolicy should use errors.Is (#16656) 2023-02-25 00:01:37 -08:00
jiuker
cd3a2de5a3 add isValidLocation to common parseLocation (#16690) 2023-02-25 08:09:20 +05:30
jiuker
6e8960ccdd fix: delete globalProfiler should lock (#16697) 2023-02-25 08:07:44 +05:30
Aditya Manthramurthy
e05f3d5d84 Remove globalLDAPConfig (#16706) 2023-02-25 08:07:22 +05:30
Anis Elleuch
94c6cb1323 tests: Add test for S3 API error codes (#16705) 2023-02-25 08:06:29 +05:30
Aditya Manthramurthy
3f81cd1b22 Update OpenID doc with info on redirection params (#16704) 2023-02-24 12:13:00 -08:00
Anis Elleuch
8da0f4c5bb Better error message when TLS certs do not have proper permissions (#16703) 2023-02-24 06:34:55 -08:00
Klaus Post
9acf1024e4 Remove bloom filter (#16682)
Removes the bloom filter since it has so limited usability, often gets saturated anyway and adds a bunch of complexity to the scanner.

Also removes a tiny bit of CPU by each write operation.
2023-02-24 09:03:31 +05:30
Harshavardhana
b21d3f9b82 event target registration failures must be returned (#16700) 2023-02-23 21:59:14 +05:30
Minio Trusted
2bbf380262 Update yaml files to latest version RELEASE.2023-02-22T18-23-45Z 2023-02-22 21:50:11 +00:00
Harshavardhana
f678bcf7ba update dependencies to the latest releases (#16694) 2023-02-22 10:23:45 -08:00
Harshavardhana
a0f06eac2a add Veeam SOS API first implementation (#16688) 2023-02-22 19:54:57 +05:30
jiuker
83fe1a2732 log: add more info about BROWSER_URL (#16678) 2023-02-22 19:54:05 +05:30
Harshavardhana
5c98223c89 add correct HostId instead of deploymentId for error responses (#16686) 2023-02-22 15:41:09 +05:30
jiuker
663a0b7783 save correct bucketInfo on it's indexes (#16685) 2023-02-22 14:08:34 +05:30
Anis Elleuch
6efe4d1df6 perf: Only remove generated data when no bucket name specified (#16610) 2023-02-21 21:21:40 -08:00
Daniel Valdivia
fb17f97cf3 move audit and logger message structure to minio/pkg (#16655)
Signed-off-by: Daniel Valdivia <18384552+dvaldivia@users.noreply.github.com>
2023-02-21 21:21:17 -08:00
Shubhendu
6b65ba1551 Added attribute proxy for mc admin config set ALIAS logger_webhook (#16657)
Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2023-02-21 21:19:46 -08:00
Poorna
9202c6e26a Reload bucket targets when replication config is refreshed (#16684) 2023-02-21 21:18:49 -08:00
Poorna
59a5456091 fix: STS error translation to API error (#16683) 2023-02-22 09:57:48 +05:30
Allan Roger Reid
8bfe972bab Set meaningful message from minio with env variable KMS_SECRET_KEY (#16584) 2023-02-22 07:13:01 +05:30
Klaus Post
fd6622458b Add detailed scanner trace output and notifications (#16668) 2023-02-21 09:33:33 -08:00
Poorna
8a08861dd9 fix: healing of replication config for endpoint changes (#16648) 2023-02-20 16:06:13 +05:30
Harshavardhana
82dcfd4e10 update dependencies to latest releases (#16651) 2023-02-20 16:05:20 +05:30
Harshavardhana
b66d7dc708 add missing x-amz-id-2 to event notification date (#16646) 2023-02-20 15:41:47 +05:30
Harshavardhana
eebdd2b31d update NOTICE to 2015-2023 copyright 2023-02-19 00:03:50 +05:30
Harshavardhana
b94733ab31 avoid locks when unnecessary in SiteReplicationMetaInfo() (#16650) 2023-02-18 05:35:22 -08:00
Minio Trusted
7f2c90a0ed Update yaml files to latest version RELEASE.2023-02-17T17-52-43Z 2023-02-17 19:07:39 +00:00
Klaus Post
84bb7d05a9 fix: healing deadlocks and ordering (#16643) 2023-02-17 23:22:43 +05:30
Harshavardhana
98a84d88e2 fix: trim and ignore './' directories (#16642) 2023-02-17 07:15:03 -08:00
jiuker
e470268c7c fix: a possible closer leak in SelectObjectHandler (#16598) 2023-02-17 01:44:40 -08:00
jiuker
3a6cd4f73d fix: sync.pool won't Put reader back (#16638) 2023-02-17 01:42:43 -08:00
Harshavardhana
6ea150fd68 fix: avoid printing certain errors under few locations (#16631) 2023-02-17 01:40:31 -08:00
Anis Elleuch
a7188bc9d0 fix: evaluate BypassGov policy action in deletion correctly (#16635) 2023-02-17 07:53:34 +05:30
Harshavardhana
e1e9ddd4a4 use kes.Status() for Status() call (#16629) 2023-02-16 22:12:24 +05:30
Krishnan Parthasarathi
a1dd08f2e6 Include tier name in MinIO/S3 target user-agent (#16630) 2023-02-15 22:09:46 -08:00
Krishnan Parthasarathi
d136ac0596 Don't close transition task channel on server exit (#16627) 2023-02-15 22:09:25 -08:00
Poorna
c33a237067 fix: under site replication disallow remote target modification (#16628) 2023-02-15 20:22:13 -08:00
Poorna
eb7d3da994 Fix site replication status reporting of quota (#16626) 2023-02-16 08:08:35 +05:30
Harshavardhana
37134e42d4 ignore io.EOF, io.ErrUnexpectedEOF on xl.meta reads in WalkDir() (#16625) 2023-02-15 07:12:48 -08:00
Klaus Post
626a4efaad Update reedsolomon to v1.11.7 (#16624) 2023-02-15 05:07:35 -08:00
Harshavardhana
0c1f8b4e0f add user-agent for all minio.Client usage (#16619) 2023-02-14 13:19:30 -08:00
Anis Elleuch
857674c3a0 heal: Do not mark buckets as done when there is no online disks (#16621) 2023-02-14 12:50:13 -08:00
Harshavardhana
15a75bd79b ignore preconditionFailed error in batch replication (#16615) 2023-02-14 07:22:08 -08:00
Andreas Auernhammer
74887c7372 kms: add support for KES API keys and switch to KES Go SDK (#16617)
Signed-off-by: Andreas Auernhammer <hi@aead.dev>
2023-02-14 07:19:20 -08:00
Harshavardhana
31188e9327 add parallel workers in batch replication (#16609) 2023-02-13 12:07:58 -08:00
Harshavardhana
ee6d96eb46 Periodically remove stale buckets from in-memory (#16597) 2023-02-13 08:09:52 -08:00
Minio Trusted
11fe2fd79a update helm v5.0.7
Signed-off-by: Harshavardhana <harsha@minio.io>
2023-02-13 16:07:23 +05:30
atalakey4work
c2863cc6ef helm: remove parentheses from podDisruptionBudget (#16605) 2023-02-13 02:36:06 -08:00
Anis Elleuch
d6d01067a0 fix: allow global leader lock context merge to be canceled (#16603)
Global leader lock was first designated to only acquired once
until the node is killed. However, currently, the code acquires
it repeatedly during the lifetime of the server, now there is a
goroutine leak.
2023-02-13 01:26:38 -08:00
Minio Trusted
1d3b18c3f4 update helm v5.0.6
Signed-off-by: Harshavardhana <harsha@minio.io>
2023-02-13 12:23:06 +05:30
jiuker
a15b6f21b8 remove incorrect use of WaitGroup (#16596) 2023-02-12 20:59:45 -08:00
Anis Elleuch
689179bf18 ServerInfo: return per erasure set information (#16583) 2023-02-11 18:31:56 +05:30
Minio Trusted
bf749eec61 Update yaml files to latest version RELEASE.2023-02-10T18-48-39Z 2023-02-10 20:55:45 +00:00
Klaus Post
d0f4cc89a5 Re-add Veeam Listing workaround (#16593) 2023-02-10 10:48:39 -08:00
Harshavardhana
d65debb6bc fix: comply with RFC6750 UserInfo endpoint requirements (#16592) 2023-02-10 22:20:25 +05:30
Harshavardhana
72daccd468 fix: scanner in healing cycle must use actual size (#16589) 2023-02-10 06:53:03 -08:00
Harshavardhana
b363400587 fix: username replacements for aws:username must use parentUser (#16591) 2023-02-10 06:52:31 -08:00
k0i
6b41f941b6 fix: README.md in docs/config (#16564) 2023-02-10 03:02:11 -08:00
jiuker
b9f5f9ba3f fix: aclHandlers convert XML parse error to relevant client error (#16587) 2023-02-10 02:59:10 -08:00
Anis Elleuch
c8ffa59d28 Periodically refresh buckets metadata from the backend disks (#16561)
fixes #16553
2023-02-09 10:29:20 -08:00
Minio Trusted
1141187bf2 Update yaml files to latest version RELEASE.2023-02-09T05-16-53Z 2023-02-09 06:21:34 +00:00
Poorna
52aeebebea Fix site replication meta info call to be non-blocking (#16526)
Co-authored-by: Harshavardhana <harsha@minio.io>
2023-02-08 21:16:53 -08:00
Aditya Manthramurthy
e101384aa4 Update console to v0.23.1 (#16574) 2023-02-08 20:01:53 -08:00
Harshavardhana
71f02adfca Revert "Print golang http errors in MinIO log format (#16465)"
This reverts commit 1fd7946dce.
2023-02-09 09:27:27 +05:30
Krishnan Parthasarathi
9de26531e4 tiering: UpdateWorkers may be called before Init (#16573) 2023-02-08 19:13:34 -08:00
Anis Elleuch
fadc46b906 Add the access key and parent user in the audit log (#16572) 2023-02-08 11:05:26 -08:00
Anis Elleuch
b1d98febfd New disk healing goes through the healing workers (#16568) 2023-02-08 09:25:29 -08:00
jiuker
1828fb212a fix: avoid goroutine leak after timeouts in PeerMetrics (#16569) 2023-02-08 09:11:16 -08:00
Harshavardhana
c97f50e274 replication syncs happen in 60sec intervals (#16571) 2023-02-08 08:23:26 -08:00
jiuker
be92046dfd Anonymous pools in health info output are double counted (#16566) 2023-02-08 06:58:50 -08:00
Krishnan Parthasarathi
990fc415f7 Ensure safety of transitionState at startup (#16563) 2023-02-07 23:11:42 -08:00
Harshavardhana
d8daabae9b make site replication healing safer (#16560) 2023-02-07 21:44:42 -08:00
Harshavardhana
84fe4fd156 fix: multiObjectDelete by passing versionId for authorization (#16562) 2023-02-08 08:01:00 +05:30
Harshavardhana
747d475e76 initialize subsystems that are not dependent on buckets first (#16559) 2023-02-07 12:46:47 -08:00
Anis Elleuch
095b518802 Show a better error msg when internal data encryption key is incorrect (#16549) 2023-02-07 05:22:54 -08:00
Harshavardhana
0319ae756a fix: pass proper username (simple) string as expected (#16555) 2023-02-07 03:43:08 -08:00
Harshavardhana
11c7ecb5cf support if-match/if-none-match with s3 uploads (#16551) 2023-02-06 18:58:29 -08:00
Cesar Celis Hernandez
422c396d73 Removing old action that is no longer needed (#16550) 2023-02-07 07:06:29 +05:30
Anis Elleuch
ffd57fde90 Convert 'server closed idle connection' to errDiskNotFound (#16548) 2023-02-06 10:42:08 -08:00
jiuker
a451d1cb8d fix: close resp.body checking for kubernetes version (#16547) 2023-02-06 10:41:41 -08:00
Harshavardhana
14cf8f1b22 upgrade deps for minio/pkg v1.6.1 to include groups conditions (#16538) 2023-02-06 09:27:29 -08:00
Harshavardhana
5996c8c4d5 feat: allow offline disks on a fresh start (#16541) 2023-02-06 09:26:09 -08:00
Harshavardhana
21885f9457 fix: liveness/readiness must return errors if KMS is unreachable (#16540) 2023-02-06 08:55:56 -08:00
Christian Niessner
6ac48aff46 helm: shared secrets handling for user and svcacct's (#16379) 2023-02-05 21:51:10 -08:00
Mathieu Parent
85ff76e7b0 fix(helm): use PodDisruptionBudget v1 for newer k8s versions (#16466) 2023-02-05 21:50:45 -08:00
Harshavardhana
e47a31f9fc fix: object size distribution in metrics for all objects (#16539) 2023-02-04 21:10:10 -08:00
Harshavardhana
517fcd423d add necessary tools to our docker release (#16536) 2023-02-03 19:40:25 -08:00
Harshavardhana
b780359598 helm version v5.0.5 2023-02-04 02:24:02 +05:30
Harshavardhana
aa8b9572b9 remove double ENABLED help output (#16528) 2023-02-03 05:52:52 -08:00
Cesar Celis Hernandez
8ca14e6267 Updating enterprise action (#16518) 2023-02-02 19:23:31 +05:30
Poorna
876e1a91b2 replication: Fix typo checking PreconditionFailed status code (#16517) 2023-02-02 19:22:02 +05:30
Klaus Post
0b7989aa4b Fix Kafka initialization crash (#16523) 2023-02-02 19:21:19 +05:30
Anis Elleuch
2278fc8f47 Print original error when IAM load is failed in some places (#16511) 2023-02-01 17:32:22 +05:30
Harshavardhana
a91f353621 support 'mc admin service restart' for windows (#16512) 2023-02-01 17:31:46 +05:30
Klaus Post
cdb1b48ad9 Make localLocker lock attempts cancellable (#16510) 2023-01-31 09:41:17 -08:00
Minio Trusted
a24037bfec Update yaml files to latest version RELEASE.2023-01-31T02-24-19Z 2023-01-31 07:49:15 +00:00
Kaan Kabalak
2d0f30f062 Fix typo in code comment (#16509) 2023-01-31 07:54:19 +05:30
Krishnan Parthasarathi
cea2ca8c8e Add restore-status header for multipart objects (#16508) 2023-01-31 07:53:45 +05:30
Klaus Post
f713436dd0 Fix truncated list response on deleted replicated objects (#16504) 2023-01-30 09:13:53 -08:00
Klaus Post
b923a62425 Check pool-index for invalid setups (#16501) 2023-01-30 18:33:07 +05:30
Harshavardhana
67fce4a5b3 fix: dangling delete() upon success should return 404 (#16494) 2023-01-27 12:43:45 -08:00
Poorna
eaa65b7ade fix replication healing on list to consider all versions (#16496) 2023-01-27 12:43:28 -08:00
Poorna
820d94447c replication: fix target bucket passed on GET proxy (#16495) 2023-01-27 10:24:51 -08:00
Poorna
ed20134a7b replication: detect proxy header presence correctly (#16489) 2023-01-27 01:29:32 -08:00
Harshavardhana
d19cbc81b5 fix: do not return IAM/Bucket metadata replication errors to client (#16486) 2023-01-26 11:11:54 -08:00
Anis Elleuch
1fd7946dce Print golang http errors in MinIO log format (#16465) 2023-01-26 22:46:16 +05:30
Klaus Post
027ff0f3a8 fix: set modTime to current in snowball if archive shows empty (#16482) 2023-01-26 22:20:35 +05:30
Jan Zhanal
8fa80874a6 doc: LDAP/AD - nested groups (#16483) 2023-01-26 22:17:59 +05:30
Harshavardhana
430669cfad do not fail groupadd if group already exists
fixes #16488
2023-01-26 21:32:02 +05:30
Harshavardhana
54b561898f fix: anonymize the x-amz-id-2 value from hostname (#16478) 2023-01-25 10:25:36 -08:00
Harshavardhana
65c104a589 add x-amz-id-2 to indicate the node that received the request (#16474) 2023-01-25 09:14:10 -08:00
Anis Elleuch
0a0416b6ea Better error when setting up replication with a service account alias (#16472) 2023-01-25 21:50:12 +05:30
Anis Elleuch
441babdc41 Rename peer S3 prefix to avoid collision in the future (#16473) 2023-01-25 06:46:30 -08:00
Minio Trusted
1bf1fafc86 Update yaml files to latest version RELEASE.2023-01-25T00-19-54Z 2023-01-25 02:09:27 +00:00
Aditya Manthramurthy
50d58e9b2d Bump up console to v0.23.0 (#16470) 2023-01-24 16:19:54 -08:00
Harshavardhana
e64b9f6751 fix: disallow SSE-C encrypted objects on replicated buckets (#16467) 2023-01-24 15:46:33 -08:00
Florian Schwab
d67a846ec4 allow restarting of decommissioning if completed, failed or canceld (#16464) 2023-01-24 07:07:59 -08:00
Poorna
ca2a1c3f60 replication: clone metrics while loading metrics cache (#16462) 2023-01-24 02:10:32 -08:00
Poorna
93fbb228bf Validate if parent user exists for service acct (#16443) 2023-01-24 08:17:18 +05:30
Harshavardhana
3683673fb0 add missing gorilla/mux migration, update credits (#16461) 2023-01-23 08:46:37 -08:00
Anis Elleuch
f37a5b6dae Add CPU info in the check update user-agent (#16447) 2023-01-23 08:07:55 -08:00
Harshavardhana
31b0decd46 migrate to minio/mux from gorilla/mux (#16456) 2023-01-23 16:42:47 +05:30
Harshavardhana
eb561e1c05 allow bootstrap platform checks to be pool specific (#16455) 2023-01-23 16:24:50 +05:30
Mathieu Parent
54c9ecff5b fix(helm): fsGroup is only on podSecurityContext (#16402) 2023-01-21 10:03:03 -08:00
Wessel Valkenburg (prevue.ch)
edcd72585d Update post-job.yaml in Helm chart (#16387) 2023-01-21 10:02:08 -08:00
Alex
1a17fc17bb GitHub Workflows security hardening (#15708) 2023-01-21 09:55:17 -08:00
Poorna
ddad231921 replication: Avoid logging PreConditionFailed error (#16450) 2023-01-21 07:33:04 +05:30
Anis Elleuch
e73894fa50 grafana: Show one metric for the total data growth (#16449) 2023-01-20 09:39:28 -08:00
Klaus Post
03b94f907f fix: deleted object names for directory objects (#16448) 2023-01-20 21:16:06 +05:30
Minio Trusted
3fa7218c44 Update yaml files to latest version RELEASE.2023-01-20T02-05-44Z 2023-01-20 08:11:58 +00:00
Shireesh Anjal
0f591d245d fix: incorrect anonymization of drive endpoint (#16442) 2023-01-20 07:35:44 +05:30
Poorna
1b02e046c2 Fix bandwidth monitoring to be per remote target (#16360) 2023-01-19 18:52:16 +05:30
Harshavardhana
d08e3cc895 add a way to avoid blocking queueHealTask() depending on caller (#16433) 2023-01-19 18:50:54 +05:30
Anis Elleuch
d98116559b Use async healing in PutObject call (#16431) 2023-01-19 00:54:22 -08:00
Krishnan Parthasarathi
71c95ad0d0 Signal stop-rebalance to all rebalancing pools (#16438) 2023-01-19 06:54:23 +05:30
Minio Trusted
5c1a4ba5f9 Update yaml files to latest version RELEASE.2023-01-18T04-36-38Z 2023-01-18 07:46:44 +00:00
Aditya Manthramurthy
698862ec5d Fix transports/timeouts related regressions (#16427) 2023-01-18 10:06:38 +05:30
Harshavardhana
b4ef5ff294 remove unnecessary code checking for supported features (#16423) 2023-01-17 19:37:47 +05:30
Harshavardhana
3db658e51e use correct xml package for custom MarshalXML() (#16421) 2023-01-17 05:08:33 +05:30
Shireesh Anjal
5a9f7516d6 Add monthly license update job (#16391) 2023-01-17 05:08:15 +05:30
Anis Elleuch
3039fd4519 Optimize background heal status to use LocalStorageInfo (#16414) 2023-01-17 05:02:00 +05:30
Harshavardhana
095fc0561d feat: allow decom of multiple pools (#16416) 2023-01-16 21:36:34 +05:30
Anis Elleuch
beb1924437 Properly restart fresh disk healing when failed in some places (#16413) 2023-01-14 05:06:46 +05:30
jiuker
c8e1154f1e fix: reading from erasureDisks must be protected via read lock() (#16407) 2023-01-13 04:16:23 -08:00
Poorna
b204c2dbec fix: enforce deny on DeleteVersionAction (#16409) 2023-01-13 04:16:00 -08:00
Poorna
b22b39de96 Avoid dangling deletes if disk not found (#16401) 2023-01-12 22:20:19 -08:00
Harshavardhana
c242e6c391 fix: calculate common parity properly (#16406) 2023-01-13 03:28:16 +05:30
Anis Elleuch
e05205756f metrics: Add more logs when unable to read bucket usage (#16405) 2023-01-13 02:32:00 +05:30
Daniel Valdivia
d03b244fcd Remove checks target from docker target (#16399)
Signed-off-by: Daniel Valdivia <18384552+dvaldivia@users.noreply.github.com>
2023-01-12 01:39:12 -08:00
Aditya Manthramurthy
5ef679d8f1 Simplify/speedup container image publishing script (#16403) 2023-01-12 01:35:47 -08:00
Minio Trusted
d33c527e39 Update yaml files to latest version RELEASE.2023-01-12T02-06-16Z 2023-01-12 07:27:07 +00:00
Aditya Manthramurthy
7bc95c47a3 Update console to 0.22.5 (#16400) 2023-01-11 18:06:16 -08:00
Anis Elleuch
475a88b555 fix: error out if an object is found after a full decom (#16277) 2023-01-12 05:52:51 +05:30
Allan Roger Reid
9815dac48f fix: allow bind on ipv6 loopback failures (#16388) 2023-01-11 08:47:39 +05:30
Anis Elleuch
1ece3d1dfe Add comment field to service accounts (#16380) 2023-01-10 21:57:52 +04:00
Anis Elleuch
2146ed4033 xl: Quit early when EC config is incorrect (#16390)
Co-authored-by: Anis Elleuch <anis@min.io>
2023-01-09 23:07:45 -08:00
Minio Trusted
52b88b52f0 Update yaml files to latest version RELEASE.2023-01-06T18-11-18Z 2023-01-08 07:51:31 +00:00
Anis Elleuch
ebd4388cca s3: Return XMinioInvalidObjectName if the object contains null char (#16372) 2023-01-06 10:11:18 -08:00
Harshavardhana
57fd02ee57 update console v0.22.4 (#16374)
Signed-off-by: Harshavardhana <harsha@minio.io>
2023-01-05 22:15:51 -08:00
Anis Elleuch
0333412148 fix: heal only once per disk per set among multiple disks (#16358) 2023-01-05 20:41:19 -08:00
Anis Elleuch
1c85652cff lint: Fix in darwin environment (#16368) 2023-01-05 10:12:01 -08:00
Harshavardhana
e0086c1be7 reduce startup delays on kubernetes (#16356) 2023-01-05 02:32:43 -08:00
Poorna
b29e159604 docs: Update replication setup commands (#16361) 2023-01-04 13:39:37 -08:00
Anis Elleuch
7883e55da2 Merge buckets list from different nodes in ListBuckets() call (#16357) 2023-01-04 08:53:58 -08:00
Harshavardhana
b197623ed2 remove unnecessary kernel-tuning docs (#16354) 2023-01-04 01:33:40 -08:00
Harshavardhana
a15a2556c3 converge listBuckets() as a peer call (#16346) 2023-01-03 23:39:40 -08:00
Harshavardhana
14d29b77ae update replication tests with latest 'mc' (#16348) 2023-01-03 22:54:39 -08:00
Harshavardhana
a2514ffeed update klauspost/compress dependency (#16343) 2023-01-03 10:41:14 -08:00
Harshavardhana
f1bbb7fef5 vectorize cluster-wide calls such as bucket operations (#16313) 2023-01-03 08:16:39 -08:00
Minio Trusted
72394a8319 Update yaml files to latest version RELEASE.2023-01-02T09-40-09Z 2023-01-03 10:16:34 +00:00
Harshavardhana
1cd8e1d8b6 remove the startup jitter before locks() (#16340) 2023-01-02 01:40:09 -08:00
jiuker
62cd918061 fix: close helmInfo file descriptor (#16319) 2023-01-01 23:26:59 -08:00
Klaus Post
6a04067514 fix: tweak read buffer size to reduce over-reading (#16338) 2023-01-01 08:14:20 -08:00
Taran Pelkey
49b3908635 fix: misplaced write response command in DetachPolicy() (#16333) 2022-12-30 20:04:03 -08:00
Harshavardhana
75faef888e disable builds for go1.18 (#16332) 2022-12-30 11:37:07 -08:00
Harshavardhana
b67d97b1ba add missing fields in audit logs for non-compressed handlers (#16328) 2022-12-30 10:20:19 -08:00
Anis Elleuch
b8943fdf19 doc: Update prometheus metrics list (#16329) 2022-12-29 15:08:22 -08:00
Harshavardhana
f93183f66e fix: a deadlock by refactoring listBuckets() under site replication (#16323) 2022-12-29 00:08:31 -08:00
Harshavardhana
2937711390 fix: DeleteObject() API with versionId under replication (#16325) 2022-12-28 22:48:33 -08:00
Wojtek Czekalski
aa56c6d51d helm: Make bucket existence check faster (#16321) 2022-12-27 10:32:39 -08:00
Anis Elleuch
27417459fb metrics: Show healing info for all nodes (#16315) 2022-12-26 08:35:32 -08:00
Harshavardhana
5b8fe2e89a allow locks with object affinity to spread across pools (#16312) 2022-12-23 20:55:45 -08:00
Anis Elleuch
acc9c033ed debug: Add X-Amz-Request-ID to lock/unlock calls (#16309) 2022-12-23 19:49:07 -08:00
Poorna
8528b265a9 Validate replication target update to avoid duplicate endpoints (#16311) 2022-12-23 15:44:48 -08:00
Han Cen
44250f1a52 helm: disallow empty containers in post job template (#16281) 2022-12-23 12:32:18 -08:00
Minio Trusted
f7560670d9 update helm v5.0.4
Signed-off-by: Harshavardhana <harsha@minio.io>
2022-12-23 12:29:40 -08:00
Russell Sim
3891885800 helm: fix creating users, via proper secretKey (#16310) 2022-12-23 12:28:43 -08:00
Harshavardhana
b882310e2b avoid locks for internal and invalid buckets in MakeBucket() (#16302) 2022-12-23 07:46:00 -08:00
Poorna
de0b43de32 persist replication stats with leader lock (#16282) 2022-12-22 14:25:13 -08:00
Harshavardhana
48152a56ac upgrade UBI image to 8.7 (#16301) 2022-12-22 10:56:05 -08:00
jiuker
29dd7f1d68 tier verification leaks fd, that must be closed (#16296)
Co-authored-by: Harshavardhana <harsha@minio.io>
2022-12-22 10:35:54 -08:00
Poorna
6423e4c767 Remove site replication config if it succeeded locally (#16279) 2022-12-22 01:31:20 -08:00
Harshavardhana
1dd8f0e8f3 update console v0.22.3 (#16292)
Signed-off-by: Harshavardhana <harsha@minio.io>
2022-12-21 23:47:51 -08:00
Krishnan Parthasarathi
2fa35def2c Fix DeleteObject when only free versions remain (#16289) 2022-12-21 16:24:07 -08:00
Anis Elleuch
34167c51d5 trace: Add bootstrap tracing events (#16286) 2022-12-21 15:52:29 -08:00
Harshavardhana
a5f8af4efb serialize replication stats() only when needed (#16280) 2022-12-20 00:07:53 -08:00
Harshavardhana
5a218f38a1 allow retries for transaction lock on startup (#16273) 2022-12-19 22:00:00 -08:00
Anis Elleuch
e57e946206 Do not save credentials in config.json (#16275) 2022-12-19 12:27:06 -08:00
Klaus Post
b4f71362e9 Avoid config migration on every startup (#16278) 2022-12-19 11:10:14 -08:00
Taran Pelkey
ed37b7a9d5 Add API to fetch policy user/group associations (#16239) 2022-12-19 10:37:03 -08:00
Minio Trusted
6511021fbe update helm v5.0.3 2022-12-19 00:53:02 -08:00
mruzicka
6197ba851b helm: Fix post job template (#16236) 2022-12-18 08:01:22 -08:00
Minio Trusted
3ae1f9d852 update helm v5.0.2 2022-12-17 23:57:10 -08:00
orblazer
0db1930f48 helm: add policy to svcacct (#16272) 2022-12-17 22:50:37 -08:00
Anis Elleuch
89db3fdb5d Do not return an error when version disparity is detected (#16269) 2022-12-16 08:52:12 -08:00
Harshavardhana
80fc3a8a52 use newDynamicTimeoutWithOpts() when appropriate (#16266) 2022-12-15 13:11:37 -08:00
Klaus Post
988a2e8fed Faster startup of large distributed systems with latency (#16259) 2022-12-15 08:31:21 -08:00
Harshavardhana
2433698372 fix: remove unnecessary logs for client conn errors (#16261) 2022-12-15 08:25:05 -08:00
Harshavardhana
5d7e8f79ed fix: remove scanner healing with unnecessary logs (#16260) 2022-12-14 16:39:18 -08:00
Harshavardhana
bad229e16e fix: support event name s3:Restore:* (#16257) 2022-12-14 05:12:07 -08:00
Poorna
d37e514733 Cleanup remote targets automatically on replication config removal. (#16221) 2022-12-14 03:24:06 -08:00
Harshavardhana
c73ea27ed7 do not log checksum mismatch error, client received the error (#16246) 2022-12-14 01:57:40 -08:00
Krishnan Parthasarathi
0159b56717 fix: rebalance to account for object's on-disk size (#16240) 2022-12-14 00:15:14 -08:00
Aditya Manthramurthy
9e6cc847f8 Add HTTP2 config option for policy plugin (#16225) 2022-12-13 14:28:48 -08:00
Taran Pelkey
709eb283d9 Add endpoints for managing IAM policies (#15897)
Co-authored-by: Taran <taran@minio.io>
Co-authored-by: ¨taran-p¨ <¨taran@minio.io¨>
Co-authored-by: Aditya Manthramurthy <donatello@users.noreply.github.com>
2022-12-13 12:13:23 -08:00
Anis Elleuch
76dde82b41 Implement STS account info API (#16115) 2022-12-13 08:38:50 -08:00
Anis Elleuch
939c0100a6 log: Do not interpret verbs in object names in console output (#16233) 2022-12-13 08:27:40 -08:00
Aditya Manthramurthy
2d60bf8c50 Refactor HTTP transports (#16222) 2022-12-12 20:31:21 -08:00
Harshavardhana
37e20f6ef2 feat: allow listening specific addrs for API port (#16223) 2022-12-12 18:48:46 -08:00
Minio Trusted
76905b7a67 Update yaml files to latest version RELEASE.2022-12-12T19-27-27Z 2022-12-13 01:10:31 +00:00
Aditya Manthramurthy
a469e6768d Add LDAP DNS SRV record lookup support (#16201) 2022-12-12 11:27:27 -08:00
Harshavardhana
2fc182d8e6 fix: iso8601TimeFormat padding issue for certain nanoseconds (#16207) 2022-12-12 10:28:30 -08:00
Shireesh Anjal
a2cbeaa9e6 Use different subnet public key during dev/test (#16216) 2022-12-12 10:28:15 -08:00
Harshavardhana
444ff20bc5 do not rename multipart failed transactions back to tmp (#16204) 2022-12-12 01:40:29 -08:00
Harshavardhana
20ef5e7a6a avoid double deletes() when no more versions (#16206) 2022-12-12 01:40:04 -08:00
Minio Trusted
c233c8e329 update console to v0.22.2 2022-12-09 21:10:13 -08:00
Aditya Manthramurthy
e06127566d Add IAM API to attach/detach policies for LDAP (#16182) 2022-12-09 13:08:33 -08:00
Harshavardhana
dfe73629a3 fix: delete marker discrepancies via DeleteObject() API (#16195) 2022-12-08 18:15:16 -08:00
Harshavardhana
b03dd1af17 remove hard limit for number of buckets (#16194) 2022-12-08 12:24:03 -08:00
Harshavardhana
4bc367c490 fix: translate tier add errors properly (#16191) 2022-12-08 11:18:07 -08:00
Klaus Post
3eb2d086b2 Replace filepathx with fork (#16192) 2022-12-08 10:42:44 -08:00
Klaus Post
70986b6e6e Add version id to healresult (#16193) 2022-12-08 07:49:10 -08:00
jiuker
8edc2faaa9 reuse sha256 in config GetSettings (#16188) 2022-12-08 03:03:24 -08:00
Klaus Post
ebe395788b feat: Encrypt s3zip file index (#16179) 2022-12-07 14:56:07 -08:00
Klaus Post
12fd6678ee Encrypt checksums with KMS on CompleteMultipartUpload (#16177) 2022-12-07 10:18:18 -08:00
Harshavardhana
90d35b70b4 remove unnecessary logs for truncated XML inputs (#16184) 2022-12-07 08:30:52 -08:00
Minio Trusted
9f71369b67 Update yaml files to latest version RELEASE.2022-12-07T00-56-37Z 2022-12-07 01:30:51 +00:00
Javier Adriel
04ae9058ed Populate end_session_endpoint (#16183) 2022-12-06 16:56:37 -08:00
Aditya Manthramurthy
a30cfdd88f Bump up madmin-go to v2 (#16162) 2022-12-06 13:46:50 -08:00
Anis Elleuch
1bae32dc96 xl: Delete older data-dir when replacing an existing version-id (#16176) 2022-12-06 13:43:18 -08:00
Anis Elleuch
932d2c3c62 Add X-Amz-Request-Id to internode calls (#16146) 2022-12-06 09:27:26 -08:00
Anis Elleuch
52f4124678 Remove go1.18 from Github workflow tests (#16180) 2022-12-06 09:11:20 -08:00
jiuker
8d8d07ac5c use readlock instead of writelock to get heal information (#16175) 2022-12-06 08:08:22 -08:00
Anis Elleuch
44735be38e s3: Return correct error when Version is invalid in policy document (#16178) 2022-12-06 08:07:24 -08:00
dorman
1ef1b2ba50 helm: modify the job create order (#15696) 2022-12-05 19:22:31 -08:00
Timofei Bredov
6fdbd778d5 Add minimal setup command to helm chart's readme (#16165) 2022-12-05 13:22:02 -08:00
Harshavardhana
419f351df3 avoid logging gzipped body in trace output (#16172) 2022-12-05 13:21:27 -08:00
Klaus Post
180d6b30ca Avoid hot loop when lock is cancelled (#16169) 2022-12-05 13:21:14 -08:00
Klaus Post
3fd9059b4e opt: Only stream big data usage caches (#16168) 2022-12-05 13:01:11 -08:00
Klaus Post
a713aee3d5 Run staticcheck on CI (#16170) 2022-12-05 11:18:50 -08:00
Harshavardhana
a9f5b58a01 fix: update the JSON keys for latest 'mc' release (#16171) 2022-12-05 10:28:22 -08:00
Andreas Auernhammer
d882ba2cb4 kms: add support for KES enclaves (#16139)
Signed-off-by: Andreas Auernhammer <hi@aead.dev>
2022-12-04 02:34:24 -08:00
Cesar Celis Hernandez
90e37a8745 Start PR on Enterprise when there is new MinIO Version (#16121) 2022-12-04 02:29:25 -08:00
jiuker
6086f45d25 fix: in disk cache readCacheFileStream should closed upon return (#16138) 2022-12-04 02:28:10 -08:00
Minio Trusted
d6351879f3 Update yaml files to latest version RELEASE.2022-12-02T19-19-22Z 2022-12-02 20:15:36 +00:00
Harshavardhana
5655272f5a ship mc along with MinIO container image (#16156) 2022-12-02 11:19:22 -08:00
Harshavardhana
9b35c72349 fix: a crash in KMS cert reload function (#16158) 2022-12-02 11:19:05 -08:00
Klaus Post
98cffbce03 s3zip: Limit over-read for single file (#16161) 2022-12-02 08:53:24 -08:00
Klaus Post
1cd875de1e Persist updated metadata (#16160) 2022-12-02 08:35:04 -08:00
Harshavardhana
5a8df7efb3 re-implement StorageInfo to be a peer call (#16155) 2022-12-01 14:31:35 -08:00
Anis Elleuch
c84e2939e4 trace: Publish storage layer errors (#16153) 2022-12-01 12:10:54 -08:00
Anis Elleuch
641ab24aec repl: resync orchestrator to use global shared lock (#16154) 2022-12-01 12:10:09 -08:00
Harshavardhana
71133105d7 re-order the top-level config keys for priority (#16150) 2022-12-01 07:50:08 -08:00
Harshavardhana
625677b189 update reedsolomon v1.11.3 (#16149) 2022-11-30 13:39:03 -08:00
Minio Trusted
76943ac05e Update yaml files to latest version RELEASE.2022-11-29T23-40-49Z 2022-11-30 21:13:07 +00:00
Aditya Manthramurthy
87cbd41265 feat: Allow at most one claim based OpenID IDP (#16145) 2022-11-29 15:40:49 -08:00
Harshavardhana
be92cf5959 change dependency from amqp -> amqp091 (RabbitMQ) official (#16142) 2022-11-28 16:05:06 -08:00
Klaus Post
cc1d8f0057 Check for abandoned data when healing (#16122) 2022-11-28 10:20:55 -08:00
Anis Elleuch
1f1dcdce65 move HTTP recorder to an internal library (#16128) 2022-11-28 10:20:27 -08:00
Shireesh Anjal
98a67a3776 Improvements in logger and audit webhooks (#16102) 2022-11-28 08:03:26 -08:00
Andreas Auernhammer
9b1e70e4f9 kms: fix possible deadlock due to nested RLock calls. (#16136)
Signed-off-by: Andreas Auernhammer <hi@aead.dev>
2022-11-28 07:31:07 -08:00
Harshavardhana
09d4f8cd0f avoid serializing decryptKey() every 15mins (#16135)
if the certs are the same in an environment where the 
cert files are symlinks (e.g Kubernetes), then we resort
to reloading certs every 15mins - we can avoid reload
of the kes client instance. Ensure that the price to pay 
for contending with the lock must happen when necessary.
2022-11-28 01:14:33 -08:00
Minio Trusted
53cbc020b9 Update yaml files to latest version RELEASE.2022-11-26T22-43-32Z 2022-11-27 04:08:35 +00:00
Poorna
63fc6ba2cd preserve replicated ETag properly on target (#16129) 2022-11-26 14:43:32 -08:00
jiuker
ce53d7f6c2 add disk.Close() in healFreshDisk to indicate idiomatic flow of code (#16124) 2022-11-26 00:26:15 -08:00
jiuker
fe8eed963e fix: wrapped error will not equal in decommissioning (#16113) 2022-11-24 08:00:42 -08:00
Anis Elleuch
97eb7dbf5f notify: Return detailed err msg when connecting to target fails (#16118) 2022-11-24 07:59:19 -08:00
Shireesh Anjal
59f877fc64 fix: Timestamp not added in diagnostics report (#16114) 2022-11-23 07:11:22 -08:00
Klaus Post
f96fe9773c fix: duplicated shared prefix with custom delimiter when listing (#16111) 2022-11-22 08:51:04 -08:00
Anis Elleuch
04948b4d55 fix: checking for stale STS account under site replication (#16109) 2022-11-22 07:26:33 -08:00
Klaus Post
98ba622679 Reduce temporary file clean-up waits (#16110) 2022-11-22 07:23:36 -08:00
Harshavardhana
08103870a5 update single drive setup error message (#16098) 2022-11-18 14:47:38 -08:00
Anis Elleuch
993e586855 config: return XMinioConfigNotFound code for non existing config (#16065) 2022-11-18 10:28:14 -08:00
Harshavardhana
58ec835af0 fix: skip free version ID and marker in metadata equality (#16093) 2022-11-18 05:48:22 -08:00
Harshavardhana
6aea950d74 avoid partID lock validating uploadID exists prematurely (#16086) 2022-11-18 03:09:35 -08:00
Poorna
7198be5be9 bucket resync: persist reset id to bucket metadata (#16088) 2022-11-18 01:39:05 -08:00
Minio Trusted
3661aaf8a1 Update yaml files to latest version RELEASE.2022-11-17T23-20-09Z 2022-11-18 08:51:38 +00:00
Klaus Post
a22b4adf4c distribute replication ops based on names (#16083) 2022-11-17 15:20:09 -08:00
Klaus Post
b7bb122be8 fix: replication auto-scaling deadlock (#16084) 2022-11-17 07:35:02 -08:00
Krishnan Parthasarathi
8441a3bf5f fix: update metacache entry only once (#16072) 2022-11-16 11:25:00 -08:00
Harshavardhana
853c4de75a allow changing endpoints in distributed setups (#16071) 2022-11-16 07:59:10 -08:00
jiuker
3597af789e allow resultCh to be closed() after clusterMetaHealthInfo() (#16073) 2022-11-16 03:04:36 -08:00
Andreas Auernhammer
4c9cac0b47 update KES dependency to v0.22.0 (#16077)
Signed-off-by: Andreas Auernhammer <hi@aead.dev>
2022-11-16 03:03:04 -08:00
Minio Trusted
1a0b68498b update console release v0.21.3
Signed-off-by: Harshavardhana <harsha@minio.io>
2022-11-15 16:47:25 -08:00
Shireesh Anjal
5246e3be84 Send health diagnostics data as part of callhome (#16006) 2022-11-15 13:53:05 -08:00
Klaus Post
8a07000e58 fix: refactor getReplicationDiff for safe use (#16051) 2022-11-15 07:59:21 -08:00
Krishnan Parthasarathi
3bb82ef60d top-locks: Include lock-held duration (#16061) 2022-11-15 07:57:52 -08:00
Alexander Overvoorde
c8a221a9a7 Add missing argument for tpl in Helm chart (fix for bug in #16064) (#16068) 2022-11-15 07:56:58 -08:00
Harshavardhana
91f45c4aa6 avoid inconsistent versions healing when versions are large (#16066) 2022-11-14 18:35:26 -08:00
Alexander Overvoorde
7c5e4da90c helm: Allow tls.certSecret in chart to be template'd (#16064) 2022-11-14 09:47:59 -08:00
Poorna
d6bc141bd1 feat: Add support for site level resync (#15753) 2022-11-14 07:16:40 -08:00
jiuker
7ac64ad24a fix: use errors.Is for wrapped returns (#16062) 2022-11-14 07:15:46 -08:00
asoria-lf
14e52f29b0 helm: Add new job to create service accounts (#15939) 2022-11-13 09:28:07 -08:00
Philipp B
344ae9f84e helm: add extraContainer (#15660)
Signed-off-by: Philipp Born <git@pborn.eu>
2022-11-13 09:22:27 -08:00
Minio Trusted
f7db12c7ef helm release v5.0.1 2022-11-13 02:04:51 -08:00
Harshavardhana
962d1f1a71 choose default values upon incorrect storage_class value (#16058) 2022-11-12 10:18:21 -08:00
Harshavardhana
6d76db9d6c improve server startup error when pools are incorrect (#16056) 2022-11-11 19:40:45 -08:00
yanggang
00857f8f59 helm: fix positional parameter in template (#15983)
fixes #15901
2022-11-11 12:44:37 -08:00
Ray
66239f30ce configuring the nats target to reconnect forever (#16050) 2022-11-11 12:42:41 -08:00
jiuker
bf89f79694 save deploymentID to avoid mutating request entry in Audit (#16053) 2022-11-11 12:42:15 -08:00
elg0ch0
ce299b47ea helm: update bucket policy setting via 'mc anonymous' (#16055) 2022-11-11 11:34:01 -08:00
Minio Trusted
6dc7109a9f Update yaml files to latest version RELEASE.2022-11-11T03-44-20Z 2022-11-11 04:57:16 +00:00
jiuker
bdcb485740 netPerfRX Reset() should use write Lock() (#16043) 2022-11-10 19:44:20 -08:00
Poorna
e32b948a49 fix: parsing multipart uploadID under site replicated setup (#16048)
continue the fix from #16034
2022-11-10 16:17:45 -08:00
Minio Trusted
4fe9cbb973 Update yaml files to latest version RELEASE.2022-11-10T18-20-21Z 2022-11-10 19:36:54 +00:00
Klaus Post
5b242f1d11 Add Audit target metrics (#16044) 2022-11-10 10:20:21 -08:00
Poorna
34d28dd79f replication: Avoid blocking on mrf save (#16045) 2022-11-10 10:20:02 -08:00
Krishnan Parthasarathi
6eef9b4a23 lifecycle: simplify Eval and HasActiveRules (#16036) 2022-11-10 07:17:45 -08:00
Aditya Manthramurthy
5f1999cc71 fix: avoid URL unsafe chars in multipart upload ID (#16034) 2022-11-09 16:41:16 -08:00
Krishnan Parthasarathi
40a2c6b882 Return remote tier as StorageClass for transitioned objects (#16035) 2022-11-09 15:57:34 -08:00
Krishnan Parthasarathi
7ba281728f ilm: fix x-amz-expiration header evaluation (#16029) 2022-11-09 04:20:34 -08:00
jiuker
7b7356f04c close the reader under disk cache bitrot verification (#16024) 2022-11-09 04:20:11 -08:00
Klaus Post
bbc312fce6 Add notification queue metrics (#16026) 2022-11-08 16:36:47 -08:00
Harshavardhana
1b0dfb0f58 remove printing map() checksums (#16028) 2022-11-08 13:29:24 -08:00
Anis Elleuch
7260241511 Remove some logs caused by external apps (#16027) 2022-11-08 13:29:05 -08:00
Anis Elleuch
3b1a9b9fdf Use the same lock for the scanner and site replication healing (#15985) 2022-11-08 08:55:55 -08:00
yanggang
52769e1e71 remove io/util for advanced golang (#16011) 2022-11-08 07:58:02 -08:00
Harshavardhana
72afc2727a rebalance status must return appropriate error initially (#16022) 2022-11-08 07:56:45 -08:00
Minio Trusted
808739867c Update yaml files to latest version RELEASE.2022-11-08T05-27-07Z 2022-11-08 05:59:18 +00:00
Harshavardhana
752e18e795 upgrade console to v0.21.2 2022-11-07 21:27:07 -08:00
Aditya Manthramurthy
76d822bf1e Add LDAP policy entities API (#15908) 2022-11-07 14:35:09 -08:00
Klaus Post
ddeca9f12a fix: filter rest errors and logs returned (#16019) 2022-11-07 10:38:08 -08:00
Shireesh Anjal
19d0340ddf Update version of madmin-go to v1.7.3 (#15994) 2022-11-07 09:32:18 -08:00
Harshavardhana
21251d8c22 initialize streaming events without lazy initialization (#16016) 2022-11-07 08:01:24 -08:00
Harshavardhana
1f3db03bf0 allow changing argument for path for SNSD setup (#16013) 2022-11-07 00:11:58 -08:00
Harshavardhana
944c62daf4 skip flaky tests on windows OS (#16015) 2022-11-07 00:11:21 -08:00
Harshavardhana
9547b7d0e9 add deadlineConnections on remoteTransport (#16010) 2022-11-05 11:09:21 -07:00
Harshavardhana
76c4ea7682 force all internal MinIO operations to be under UTC (#16009) 2022-11-04 16:44:38 -07:00
Klaus Post
808ecfe0f2 merge versions across sets when listing (#16003) 2022-11-04 11:33:22 -07:00
Klaus Post
2894dd4d1a fix: hold lock while serializing replication stats (#16007) 2022-11-04 09:59:14 -07:00
yanggang
797fa7f97b update Elasticsearch dependency to 7.17.7 (#15992) 2022-11-04 08:23:33 -07:00
jiuker
fd8750e959 fix: http body must be drained in downloadBinary() (#16001) 2022-11-04 08:22:38 -07:00
Harshavardhana
7be65f66b8 support HS256 series of JWT signature for OpenID connect (#15993) 2022-11-03 16:41:53 -07:00
Poorna
4f5d38a4b1 site replication edit: validate endpoint belongs to deployment (#16000) 2022-11-03 16:23:45 -07:00
Anis Elleuch
7e73fc2870 Implement inspect data API v2 (#15474)
Co-authored-by: Klaus Post <klauspost@gmail.com>
2022-11-02 13:36:38 -07:00
yanggang
d2c9a9e395 add windows port allot by "netsh dynamicport" (#15986) 2022-11-02 09:10:26 -07:00
Harshavardhana
0d49b365ff converge SNSD deployments into single code (#15988) 2022-11-01 16:41:01 -07:00
Anis Elleuch
7721595aa9 config: Deprecated delay/max_wait/scanner and introduce speed (#15941) 2022-11-01 08:04:07 -07:00
Harshavardhana
fd6f6fc8df cleanup stale parent multipart directories (#15980) 2022-11-01 08:00:02 -07:00
Aditya Manthramurthy
4fb47cd568 fix: update admin IDP APIs to be more RESTful (#15896) 2022-10-31 14:52:26 -07:00
Klaus Post
ecc932d5dd Clean entire tmp-old on restart (#15979) 2022-10-31 07:27:50 -07:00
Harshavardhana
b57fbff7c1 ignore background healInfo in single drive setup (#15968) 2022-10-31 07:26:10 -07:00
Harshavardhana
4892a766a8 do not panic if webhook returns an error (#15970) 2022-10-30 16:45:53 -07:00
Minio Trusted
0303cd8625 Update yaml files to latest version RELEASE.2022-10-29T06-21-33Z 2022-10-29 09:29:37 +00:00
Poorna
d765b89a63 improve validation for replication resync API (#15964) 2022-10-28 23:21:33 -07:00
Harshavardhana
6e4acf0504 add a message of removal for gateway and hide the command (#15965) 2022-10-28 14:11:20 -07:00
Klaus Post
71954faa3a mark pubsub type safe via generics (#15961) 2022-10-28 10:55:42 -07:00
Shireesh Anjal
6d22e74d11 mark SUBNET config keys as sensitive info (#15962) 2022-10-28 10:54:44 -07:00
Poorna
dc92bb4646 xl-meta: update metadata version (#15958) 2022-10-28 02:48:43 -07:00
Klaus Post
0f0e154315 fix: inconsistent replication delete marker timestamps (#15956) 2022-10-27 09:46:52 -07:00
Harshavardhana
136d41775f remove numAvailableDisks check as it doesn't serve any purpose (#15954) 2022-10-27 09:05:24 -07:00
Harshavardhana
ec77d28e62 make subnet subsys dynamic and simplify callhome (#15927) 2022-10-27 00:20:01 -07:00
Klaus Post
86420a1f46 Store multipart checksums (#15953) 2022-10-26 18:14:58 -07:00
Poorna
7dd8b6c8ed ensure ILM expiry creates non null deleteMarker for versioned bucket (#15947) 2022-10-26 16:09:27 -07:00
杨刚
8afa6fefd8 add cross-build linux/amd64 (#15949) 2022-10-26 16:05:26 -07:00
Anis Elleuch
533c9d4fe3 fix: lockName to disallow parallel same erasure set healing (#15951) 2022-10-26 12:43:54 -07:00
Anis Elleuch
a35ef155fc return appropriate error status code in the lock handler (#15950) 2022-10-26 09:51:26 -07:00
Poorna
8dd3c41b2a allow MakeBucket errors to be handled lazily (#15945)
remote error is not required to be passed back to the 
client - this is mostly because we have healing that should 
eventually, catch up on this and heal the bucket.
2022-10-25 23:32:37 -07:00
Krishnan Parthasarathi
4523da6543 feat: introduce pool-level rebalance (#15483) 2022-10-25 12:36:57 -07:00
Poorna
ce8456a1a9 proxy multipart to peers via multipart uploadID (#15926) 2022-10-25 10:52:29 -07:00
Minio Trusted
1673778633 add missing helm 5.0.0
Signed-off-by: Harshavardhana <harsha@minio.io>
2022-10-25 03:20:22 -07:00
Poorna
9ce1884732 reject editing bucket replication config when site replication is enabled (#15937) 2022-10-24 20:24:32 -07:00
Harshavardhana
23b329b9df remove gateway completely (#15929) 2022-10-24 17:44:15 -07:00
Krishnan Parthasarathi
0c34e51a75 Filter out tiering metadata during CopyObject (#15936) 2022-10-24 16:32:31 -07:00
Minio Trusted
1633b30979 update helm v4.1.0
Signed-off-by: Harshavardhana <harsha@minio.io>
2022-10-24 13:37:05 -07:00
Minio Trusted
630dabf4b9 Update yaml files to latest version RELEASE.2022-10-24T18-35-07Z 2022-10-24 19:29:30 +00:00
Anis Elleuch
fc6c794972 Audit dangling object removal (#15933) 2022-10-24 11:35:07 -07:00
Brien Dieterle
2e33b99c6b helm: move Prometheus TLSConfig to job spec (#15739) 2022-10-24 08:46:41 -07:00
Harshavardhana
3b7292b637 update console v0.21.1 2022-10-24 02:20:47 -07:00
Harshavardhana
e4f469ae7a update console dependency to master 2022-10-23 22:36:24 -07:00
Minio Trusted
c921dc75c7 Update yaml files to latest version RELEASE.2022-10-21T22-37-48Z 2022-10-22 00:04:37 +00:00
Klaus Post
86d543d0f6 Check for s3zip content offset (#15924) 2022-10-21 15:37:48 -07:00
Poorna
e4e90b53c1 fix: delete-marker replication check properly (#15923) 2022-10-21 14:45:06 -07:00
Anis Elleuch
58d776daa0 Set CONSOLE_MINIO_SERVER to 127.0.0.1 by default (#15887) 2022-10-21 14:42:28 -07:00
Krishnan Parthasarathi
f6b2e89109 Pass encrypted etag as is for immediate tiering (#15925) 2022-10-21 14:40:50 -07:00
Anis Elleuch
ac85c2af76 lifecycle: refactor rules filtering and tagging support (#15914) 2022-10-21 10:46:53 -07:00
Shireesh Anjal
5aba2aedb3 Do not freeze s3 traffic in healthinfo api (#15912) 2022-10-21 00:34:32 -07:00
Minio Trusted
bd77f1df4c Update yaml files to latest version RELEASE.2022-10-20T00-55-09Z 2022-10-20 06:38:33 +00:00
Harshavardhana
a8332efa94 fix: Delete() of bucket metadata should not parse the config (#15904) 2022-10-19 17:55:09 -07:00
Aditya Manthramurthy
3dbef72dc7 fix: AccountInfo API for roleARN based accounts (#15907) 2022-10-19 17:54:41 -07:00
Aditya Manthramurthy
2d16e74f38 Add LDAP IDP Configuration APIs (#15840) 2022-10-19 11:00:10 -07:00
Anis Elleuch
de5070446d Deprecate --listeners flag (#15900) 2022-10-19 08:45:50 -07:00
Harshavardhana
374abd1e7d add filter support for tags and metadata in batch replication (#15885) 2022-10-18 21:22:21 -07:00
Anis Elleuch
0506d9e83d storage: Return errDiskNotFound when a peer is during shutdown (#15868) 2022-10-18 13:50:46 -07:00
Klaus Post
bd3dfad8b9 Add concurrent Snowball extraction + options (#15836) 2022-10-18 13:50:21 -07:00
Harshavardhana
9fff315555 do not need to trace ignored objects (#15894) 2022-10-18 13:47:55 -07:00
Harshavardhana
07b6dce1a5 remove nancy, we rely on vulncheck from now on (#15893) 2022-10-18 10:45:44 -07:00
Anis Elleuch
18fb86b7be convert context.DeadlineExceed to offline disk in DiskInfo() (#15886) 2022-10-18 03:01:16 -07:00
Harshavardhana
58a8275e84 do not assume invalid buf to be non-xl.meta (#15843) 2022-10-17 09:39:21 -07:00
Minio Trusted
196fab6834 Update yaml files to latest version RELEASE.2022-10-15T19-57-03Z 2022-10-15 23:20:56 +00:00
Aditya Manthramurthy
85fc7cea97 Pass role ARN for OIDC providers to console (#15862) 2022-10-15 12:57:03 -07:00
Harshavardhana
328d660106 support CRC32 Checksums on single drive setup (#15873) 2022-10-15 11:58:47 -07:00
Harshavardhana
c68910005b validate bucket before attempting batch replication (#15861) 2022-10-15 11:58:31 -07:00
Harshavardhana
c79bcc8838 Revert "convert context.DeadlineExceed to offline disk in DiskInfo() (#15869)"
This reverts commit 0fe58dbb34.
2022-10-14 20:37:50 -07:00
Anis Elleuch
0fe58dbb34 convert context.DeadlineExceed to offline disk in DiskInfo() (#15869) 2022-10-14 19:32:13 -07:00
Harshavardhana
6cb2f56395 Revert "Revert "tests: Add context cancelation (#15374)""
This reverts commit 564a0afae1.
2022-10-14 03:08:40 -07:00
Harshavardhana
59e33b3b21 validate setBucketTarget properly as per BucketExists() call (#15860) 2022-10-13 17:46:49 -07:00
Poorna
0e3c92c027 attempt delete marker replication after object is replicated (#15857)
Ensure delete marker replication success, especially since the
recent optimizations to heal on HEAD, LIST and GET can force
replication attempts on delete marker before underlying object
version could have synced.
2022-10-13 17:45:23 -07:00
Anis Elleuch
db7a9b2c37 heal-info: Return the endpoint of a disk with unknown state (#15854) 2022-10-13 16:41:44 -07:00
Harshavardhana
44097faec1 support deleteMarkers and all versions in batch replication (#15858) 2022-10-13 14:42:10 -07:00
Anis Elleuch
ff5fca76ab Bump golang.org/x/text to 0.3.8 to fix CVE-2022-32149 (#15855) 2022-10-13 11:43:18 -07:00
Klaus Post
bf3da5081f Omit empty checksums in responses (#15850) 2022-10-13 00:49:46 -07:00
Harshavardhana
5532982857 do not disable IsKubernetes(), IsDocker() checks with MINIO_CI_CD (#15852) 2022-10-12 23:40:48 -07:00
Anis Elleuch
783dd875f7 refactor objectQuorumFromMeta() to search for parity quorum (#15844) 2022-10-12 16:42:45 -07:00
Harshavardhana
97112c69be fix: replication stats() to not crash under any situation (#15851)
Co-authored-by: Daniel Valdivia <18384552+dvaldivia@users.noreply.github.com>
2022-10-12 15:47:41 -07:00
hellivan
b0b573052a fix: unrecognized openid config parameters (#15847) 2022-10-12 12:19:44 -07:00
Javier Adriel
2939000342 Add metrics, version and apis handlers (#15839) 2022-10-12 12:08:03 -07:00
Harshavardhana
41e1654f9a remove spurious logging for object not found (#15842) 2022-10-12 04:28:21 -07:00
Harshavardhana
e3cb0278ce honor specified target prefix under batch replication (#15834) 2022-10-11 14:36:06 -07:00
Harshavardhana
0c81f1bdb3 indicate how long it took to bring the drive online (#15835) 2022-10-11 11:33:56 -07:00
Klaus Post
6220875803 Add missing server info fields (#15826) 2022-10-11 11:31:26 -07:00
Anis Elleuch
afd4279cd8 lock tests: Initialize different DRWMutex for each lock (#15833) 2022-10-10 15:14:32 -07:00
Minio Trusted
0c8dd8046a Update yaml files to latest version RELEASE.2022-10-08T20-11-00Z 2022-10-09 19:44:19 +00:00
Harshavardhana
3c4ef4338f marshal retention XML in expected format (#15821) 2022-10-08 13:11:00 -07:00
Aditya Manthramurthy
64cf887b28 use LDAP config from minio/pkg to share with console (#15810) 2022-10-07 22:12:36 -07:00
Harshavardhana
927a879052 authenticate the request first for headObject() (#15820) 2022-10-07 21:45:53 -07:00
Anis Elleuch
dfe0c96b87 preserve Version and DeleteMarker sort order in the list XML response (#15819) 2022-10-07 16:12:36 -07:00
Anis Elleuch
e856e10ac2 ignore VersionNotFound in addition to ObjectNotFound while replicating (#15814) 2022-10-07 16:11:41 -07:00
Krishnan Parthasarathi
6d6a731d6d Handle overlapping and conflicting ILM rules (#15812) 2022-10-07 14:36:23 -07:00
Harshavardhana
928feb0889 remove unused debug param from evalActionFromLifecycle (#15813) 2022-10-07 10:24:12 -07:00
Javier Adriel
b3febe2d24 Update pkg to v1.4.5 (#15808) 2022-10-06 16:58:54 -07:00
Poorna
b6b26dba87 fix: GetObjectRetention to parse in ISO8601 time format (#15809) 2022-10-06 13:53:56 -07:00
Minio Trusted
5c034e26bd fix: govulncheck must use go1.19.2 2022-10-06 12:42:59 -07:00
Harshavardhana
cef0fb1434 remove build asset caching (#15807) 2022-10-06 11:36:07 -07:00
Anis Elleuch
158d0e26a2 decom: Ignore object/version error during deletion (#15806) 2022-10-06 09:41:58 -07:00
Harshavardhana
78385bfbeb set bucket creation timestamp properly for legacy FS backend (#15800) 2022-10-06 02:46:31 -07:00
Harshavardhana
2a13cc28f2 feat: implement support batch replication (#15554) 2022-10-05 23:00:43 -07:00
Minio Trusted
4d761fda81 Update yaml files to latest version RELEASE.2022-10-05T14-58-27Z 2022-10-05 22:50:50 +00:00
Lenin Alevski
4bdf41a6c7 Removing unused getUpdateReaderFromFile function (#15794)
Signed-off-by: Lenin Alevski <alevsk.8772@gmail.com>
2022-10-05 07:58:27 -07:00
Klaus Post
3c605c93fe warn when 0 parity has been set as default parity (#15790) 2022-10-04 22:41:42 -07:00
Anis Elleuch
121f18a443 Use admin request check for ReplicationDiff handler (#15793) 2022-10-04 17:47:31 -07:00
Harshavardhana
538aeef27a fix: heal service accounts for LDAP users in site replication (#15785) 2022-10-04 10:41:47 -07:00
Poorna
be0d2537b7 site replication: fix typo in meta collection (#15792) 2022-10-04 10:19:17 -07:00
Javier Adriel
3307aa1260 Implement KMS handlers (#15737) 2022-10-04 10:05:09 -07:00
Harshavardhana
57cfdfd8fb remove 'perf' tests from health diagnostics (#15780) 2022-10-03 00:18:41 -07:00
Minio Trusted
dc6733dacc Update yaml files to latest version RELEASE.2022-10-02T19-29-29Z 2022-10-03 02:29:56 +00:00
Harshavardhana
f696a221af allow tagging policy condition for GetObject (#15777) 2022-10-02 12:29:29 -07:00
Harshavardhana
ed5b67720c rename deprecated 'mc policy' -> 'mc anonymous' (#15779) 2022-10-01 11:47:48 -07:00
Harshavardhana
2aac50571d fix: de-duplicate conflicting object names on namespace (#15772) 2022-09-30 15:44:21 -07:00
Shireesh Anjal
45edd27ad7 Re-load config after 'mc admin config reset' (#15771) 2022-09-30 10:55:53 -07:00
Minio Trusted
c302d1cfc8 update console v0.20.5 2022-09-30 02:13:12 -07:00
Anis Elleuch
6287e8c571 fix: race when accessing REST TCP dial values (#15770) 2022-09-29 09:27:58 -07:00
Anis Elleuch
f69a98ce49 fix: loading Audit kafka configuration loading (#15766) 2022-09-29 08:35:08 -07:00
Daryl White
d44f3526dc Update links to documentation site (#15750) 2022-09-28 21:28:45 -07:00
Harshavardhana
41b633f5ea support tagging based policy conditions (#15763) 2022-09-28 11:25:46 -07:00
Minio Trusted
4f1ff9c4d9 update console v0.20.4 2022-09-28 00:00:22 -07:00
Anis Elleuch
86bb48792c non-blocking initialization of bucket target notifications (#15571) 2022-09-27 17:23:28 -07:00
Harshavardhana
94dbb4a427 fix: generalize SC config and also skip healing sub-sys under SD (#15757) 2022-09-26 09:04:54 -07:00
Anis Elleuch
048a46ec2a Add RPC tcp timeout/errs and AVG duration to prometheus (#15747) 2022-09-26 09:04:26 -07:00
Minio Trusted
1480340830 Update yaml files to latest version RELEASE.2022-09-25T15-44-53Z 2022-09-26 04:54:15 +00:00
Harshavardhana
877bd95fa3 remove unused package internal/smart (#15758) 2022-09-25 08:44:53 -07:00
Poorna
8ea6fb368d Add auto configuration of replication workers (#15636) 2022-09-24 16:20:28 -07:00
Poorna
5fd5ddea23 relax retention date validation on replication target (#15752) 2022-09-23 21:19:03 -07:00
Harshavardhana
b04c0697e1 validate correct ETag for the parts sent during CompleteMultipart (#15751) 2022-09-23 21:17:08 -07:00
Harshavardhana
50a8ba6a6f fix: parse and save retainUntilDate in correct time format (#15741) 2022-09-23 08:49:27 -07:00
Minio Trusted
334f1ed45a Update yaml files to latest version RELEASE.2022-09-22T18-57-27Z 2022-09-23 06:01:11 +00:00
Anis Elleuch
20c89ebbb3 freeze before exit when _MINIO_DEBUG_NO_EXIT is defined (#15709)
this is to ensure keep k8s pods running, when they reach a "crashloop" stage
2022-09-22 11:57:27 -07:00
Krishnan Parthasarathi
6f56ba80b3 lifecycle: Assign unique id to rules with empty id (#15731) 2022-09-22 10:51:54 -07:00
Anis Elleuch
6e84283c66 fix: ignoring O_DIRECT in case of erasure single disk (#15734)
fixes #15733 
fixes #15735
2022-09-22 10:41:06 -07:00
Harshavardhana
9d6fddcfdf persist the non-default creds in config (#15711) 2022-09-21 16:14:47 -07:00
Minio Trusted
a83105df9d remove deprecated GO111MODULE=on env 2022-09-21 10:34:45 -07:00
Minio Trusted
9528b55c25 update helm chart v4.0.15 2022-09-21 04:07:01 -07:00
jiuker
749ce107ee fix: context leak with replication endpoint hearbeat (#15721) 2022-09-21 03:08:45 -07:00
Minio Trusted
b2a67834ac upgrade console to v0.20.3 2022-09-21 02:29:50 -07:00
Poorna
aec2aa3497 site replication: clear config if remove --all specified (#15716) 2022-09-20 14:32:23 -07:00
Jan Šafařík
c7dcbfd6c1 helm: specify service account for the jobs (#15706) 2022-09-19 11:06:16 -07:00
Klaus Post
ff12080ff5 Remove deprecated io/ioutil (#15707) 2022-09-19 11:05:16 -07:00
Javier Adriel
0b6175b742 Implement KMS methods for keys, policies and identities (#15673) 2022-09-19 11:04:40 -07:00
Harshavardhana
cf49da387b enable cross compile for openbsd/amd64 (#15701) 2022-09-19 07:01:22 -07:00
Josh Kasuboski
ac714e7e3d helm: default additional labels/annotations as object (#15698) 2022-09-18 08:03:50 -07:00
mersl
79fb79b71c helm: adds support for policy conditions (#15599) 2022-09-18 08:01:58 -07:00
Minio Trusted
98874c3baf Update yaml files to latest version RELEASE.2022-09-17T00-09-45Z 2022-09-17 08:07:32 +00:00
884 changed files with 104804 additions and 58614 deletions

View File

@@ -1,10 +1,17 @@
.git
.github
docs
default.etcd
*.gz
*.tar.gz
*.bzip2
*.zip
browser/node_modules
node_modules
node_modules
docs/debugging/s3-verify/s3-verify
docs/debugging/xl-meta/xl-meta
docs/debugging/s3-check-md5/s3-check-md5
docs/debugging/hash-set/hash-set
docs/debugging/healing-bin/healing-bin
docs/debugging/inspect/inspect
docs/debugging/pprofgoparser/pprofgoparser
docs/debugging/reorder-disks/reorder-disks

View File

@@ -1,3 +1,9 @@
## Community Contribution License
All community contributions in this pull request are licensed to the project maintainers
under the terms of the [Apache 2 license](https://www.apache.org/licenses/LICENSE-2.0).
By creating this pull request I represent that I have the right to license the
contributions to the project maintainers under the Apache 2 license.
## Description

View File

@@ -1,5 +0,0 @@
# Config file for markdownlint-cli
MD033:
allowed_elements:
- details
- summary

View File

@@ -4,6 +4,7 @@ on:
pull_request:
branches:
- master
- next
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
@@ -20,11 +21,11 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.18.x, 1.19.x]
go-version: [1.21.x]
os: [ubuntu-latest]
steps:
- uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2
- uses: actions/setup-go@bfdd3570ce990073878bf10f6b2d79082de49492 # v2
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3
- uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3
with:
go-version: ${{ matrix.go-version }}
check-latest: true

View File

@@ -4,6 +4,7 @@ on:
pull_request:
branches:
- master
- next
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
@@ -20,20 +21,28 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.18.5b7]
go-version: [1.21.x]
os: [ubuntu-latest]
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: ${{ matrix.go-version }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Setup dockerfile for build test
run: |
echo "FROM us-docker.pkg.dev/google.com/api-project-999119582588/go-boringcrypto/golang:${{ matrix.go-version }}" > Dockerfile.fips.test
echo "COPY . /minio" >> Dockerfile.fips.test
echo "WORKDIR /minio" >> Dockerfile.fips.test
echo "RUN make" >> Dockerfile.fips.test
GO_VERSION=$(go version | cut -d ' ' -f 3 | sed 's/go//')
echo Detected go version $GO_VERSION
cat > Dockerfile.fips.test <<EOF
FROM golang:${GO_VERSION}
COPY . /minio
WORKDIR /minio
ENV GOEXPERIMENT=boringcrypto
RUN make
EOF
- name: Build
uses: docker/build-push-action@v3
@@ -48,4 +57,4 @@ jobs:
- name: Test binary
run: |
docker run --rm minio/fips-test:latest ./minio --version
docker run --rm -i minio/fips-test:latest /bin/bash -c 'go tool nm ./minio' | grep -q FIPS
docker run --rm -i minio/fips-test:latest /bin/bash -c 'go tool nm ./minio | grep FIPS | grep -q FIPS'

View File

@@ -4,6 +4,7 @@ on:
pull_request:
branches:
- master
- next
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
@@ -20,22 +21,14 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.18.x, 1.19.x]
go-version: [1.21.x]
os: [ubuntu-latest]
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: ${{ matrix.go-version }}
check-latest: true
- uses: actions/cache@v2
with:
path: |
~/.cache/go-build
~/go/pkg/mod
key: ${{ runner.os }}-${{ matrix.go-version }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-${{ matrix.go-version }}-go-
- name: Build on ${{ matrix.os }}
if: matrix.os == 'ubuntu-latest'
env:

View File

@@ -4,6 +4,7 @@ on:
pull_request:
branches:
- master
- next
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
@@ -20,38 +21,21 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.18.x, 1.19.x]
go-version: [1.21.x]
os: [ubuntu-latest, windows-latest]
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: ${{ matrix.go-version }}
check-latest: true
- uses: actions/cache@v2
if: matrix.os == 'ubuntu-latest'
with:
path: |
~/.cache/go-build
~/go/pkg/mod
key: ${{ runner.os }}-${{ matrix.go-version }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-${{ matrix.go-version }}-go-
- uses: actions/cache@v2
if: matrix.os == 'windows-latest'
with:
path: |
%LocalAppData%\go-build
~/go/pkg/mod
key: ${{ runner.os }}-${{ matrix.go-version }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-${{ matrix.go-version }}-go-
- name: Build on ${{ matrix.os }}
if: matrix.os == 'windows-latest'
env:
CGO_ENABLED: 0
GO111MODULE: on
run: |
netsh int ipv4 set dynamicport tcp start=60000 num=61000
go build --ldflags="-s -w" -o %GOPATH%\bin\minio.exe
go test -v --timeout 50m ./...
- name: Build on ${{ matrix.os }}
@@ -63,9 +47,6 @@ jobs:
sudo apt install jq -y
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
nancy_version=$(curl --retry 10 -Ls -o /dev/null -w "%{url_effective}" https://github.com/sonatype-nexus-community/nancy/releases/latest | sed "s/https:\/\/github.com\/sonatype-nexus-community\/nancy\/releases\/tag\///")
curl -L -o nancy https://github.com/sonatype-nexus-community/nancy/releases/download/${nancy_version}/nancy-${nancy_version}-linux-amd64 && chmod +x nancy
go list -deps -json ./... | jq -s 'unique_by(.Module.Path)|.[]|select(has("Module"))|.Module' | ./nancy sleuth
make
make test
make test-race

View File

@@ -4,6 +4,7 @@ on:
pull_request:
branches:
- master
- next
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
@@ -20,31 +21,20 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.18.x, 1.19.x]
go-version: [1.21.x]
os: [ubuntu-latest]
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: ${{ matrix.go-version }}
check-latest: true
- uses: actions/cache@v2
with:
path: |
~/.cache/go-build
~/go/pkg/mod
key: ${{ runner.os }}-${{ matrix.go-version }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-${{ matrix.go-version }}-go-
- name: Build on ${{ matrix.os }}
if: matrix.os == 'ubuntu-latest'
env:
CGO_ENABLED: 0
GO111MODULE: on
MINIO_KMS_KES_CERT_FILE: /home/runner/work/minio/minio/.github/workflows/root.cert
MINIO_KMS_KES_KEY_FILE: /home/runner/work/minio/minio/.github/workflows/root.key
MINIO_KMS_KES_ENDPOINT: "https://play.min.io:7373"
MINIO_KMS_KES_KEY_NAME: "my-minio-key"
MINIO_KMS_SECRET_KEY: "my-minio-key:OSMM+vkKUTCvQs9YL/CVMIMt43HFhkUpqJxTmGl6rYw="
MINIO_KMS_AUTO_ENCRYPTION: on
run: |
sudo sysctl net.ipv6.conf.all.disable_ipv6=0

31
.github/workflows/helm-lint.yml vendored Normal file
View File

@@ -0,0 +1,31 @@
name: Helm Chart linting
on:
pull_request:
branches:
- master
- next
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref }}
cancel-in-progress: true
permissions:
contents: read
jobs:
release:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Install Helm
uses: azure/setup-helm@v3
- name: Run helm lint
run: |
cd helm/minio
helm lint .

View File

@@ -4,6 +4,7 @@ on:
pull_request:
branches:
- master
- next
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
@@ -61,7 +62,7 @@ jobs:
# are turned off - i.e. if ldap="", then ldap server is not enabled for
# the tests.
matrix:
go-version: [1.18.x]
go-version: [1.21.x]
ldap: ["", "localhost:389"]
etcd: ["", "http://localhost:2379"]
openid: ["", "http://127.0.0.1:5556/dex"]
@@ -75,24 +76,16 @@ jobs:
openid: "http://127.0.0.1:5556/dex"
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: ${{ matrix.go-version }}
check-latest: true
- uses: actions/cache@v2
with:
path: |
~/.cache/go-build
~/go/pkg/mod
key: ${{ runner.os }}-${{ matrix.go-version }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-${{ matrix.go-version }}-go-
- name: Test LDAP/OpenID/Etcd combo
env:
LDAP_TEST_SERVER: ${{ matrix.ldap }}
ETCD_SERVER: ${{ matrix.etcd }}
OPENID_TEST_SERVER: ${{ matrix.openid }}
_MINIO_LDAP_TEST_SERVER: ${{ matrix.ldap }}
_MINIO_ETCD_TEST_SERVER: ${{ matrix.etcd }}
_MINIO_OPENID_TEST_SERVER: ${{ matrix.openid }}
run: |
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
@@ -100,20 +93,20 @@ jobs:
- name: Test with multiple OpenID providers
if: matrix.openid == 'http://127.0.0.1:5556/dex'
env:
LDAP_TEST_SERVER: ${{ matrix.ldap }}
ETCD_SERVER: ${{ matrix.etcd }}
OPENID_TEST_SERVER: ${{ matrix.openid }}
OPENID_TEST_SERVER_2: "http://127.0.0.1:5557/dex"
_MINIO_LDAP_TEST_SERVER: ${{ matrix.ldap }}
_MINIO_ETCD_TEST_SERVER: ${{ matrix.etcd }}
_MINIO_OPENID_TEST_SERVER: ${{ matrix.openid }}
_MINIO_OPENID_TEST_SERVER_2: "http://127.0.0.1:5557/dex"
run: |
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
make test-iam
- name: Test with Access Management Plugin enabled
env:
LDAP_TEST_SERVER: ${{ matrix.ldap }}
ETCD_SERVER: ${{ matrix.etcd }}
OPENID_TEST_SERVER: ${{ matrix.openid }}
POLICY_PLUGIN_ENDPOINT: "http://127.0.0.1:8080"
_MINIO_LDAP_TEST_SERVER: ${{ matrix.ldap }}
_MINIO_ETCD_TEST_SERVER: ${{ matrix.etcd }}
_MINIO_OPENID_TEST_SERVER: ${{ matrix.openid }}
_MINIO_POLICY_PLUGIN_TEST_ENDPOINT: "http://127.0.0.1:8080"
run: |
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
sudo sysctl net.ipv6.conf.default.disable_ipv6=0

View File

@@ -1,30 +0,0 @@
name: Markdown Linter
on:
pull_request:
branches:
- master
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref }}
cancel-in-progress: true
permissions:
contents: read
jobs:
lint:
name: Lint all docs
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v2
- name: Lint all docs
run: |
npm install -g markdownlint-cli
markdownlint --fix '**/*.md' \
--config /home/runner/work/minio/minio/.github/markdown-lint-cfg.yaml \
--disable MD013 MD040 MD051

77
.github/workflows/mint.yml vendored Normal file
View File

@@ -0,0 +1,77 @@
name: Mint Tests
on:
pull_request:
branches:
- master
- next
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref }}
cancel-in-progress: true
permissions:
contents: read
jobs:
mint-test:
runs-on: mint
timeout-minutes: 120
steps:
- name: cleanup #https://github.com/actions/checkout/issues/273
run: |
sudo -S rm -rf ${GITHUB_WORKSPACE}
mkdir ${GITHUB_WORKSPACE}
- name: checkout-step
uses: actions/checkout@v3
- name: setup-go-step
uses: actions/setup-go@v2
with:
go-version: 1.21.x
- name: github sha short
id: vars
run: echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
- name: build-minio
run: |
TAG="quay.io/minio/minio:${{ steps.vars.outputs.sha_short }}" make docker
- name: multipart uploads test
run: |
${GITHUB_WORKSPACE}/.github/workflows/multipart/migrate.sh "${{ steps.vars.outputs.sha_short }}"
- name: compress and encrypt
run: |
${GITHUB_WORKSPACE}/.github/workflows/run-mint.sh "compress-encrypt" "minio" "minio123" "${{ steps.vars.outputs.sha_short }}"
- name: multiple pools
run: |
${GITHUB_WORKSPACE}/.github/workflows/run-mint.sh "pools" "minio" "minio123" "${{ steps.vars.outputs.sha_short }}"
- name: standalone erasure
run: |
${GITHUB_WORKSPACE}/.github/workflows/run-mint.sh "erasure" "minio" "minio123" "${{ steps.vars.outputs.sha_short }}"
- name: The job must cleanup
if: ${{ always() }}
run: |
export JOB_NAME=${{ steps.vars.outputs.sha_short }}
for mode in $(echo compress-encrypt pools erasure); do
docker-compose -f ${GITHUB_WORKSPACE}/.github/workflows/mint/minio-${mode}.yaml down || true
docker-compose -f ${GITHUB_WORKSPACE}/.github/workflows/mint/minio-${mode}.yaml rm || true
done
docker-compose -f ${GITHUB_WORKSPACE}/.github/workflows/multipart/docker-compose-site1.yaml rm -s -f || true
docker-compose -f ${GITHUB_WORKSPACE}/.github/workflows/multipart/docker-compose-site2.yaml rm -s -f || true
for volume in $(docker volume ls -q | grep minio); do
docker volume rm ${volume} || true
done
docker rmi -f quay.io/minio/minio:${{ steps.vars.outputs.sha_short }}
docker system prune -f || true
docker volume prune -f || true
docker volume rm $(docker volume ls -q -f dangling=true) || true

View File

@@ -0,0 +1,80 @@
version: '3.7'
# Settings and configurations that are common for all containers
x-minio-common: &minio-common
image: quay.io/minio/minio:${JOB_NAME}
command: server --console-address ":9001" http://minio{1...4}/cdata{1...2}
expose:
- "9000"
- "9001"
environment:
MINIO_CI_CD: "on"
MINIO_ROOT_USER: "minio"
MINIO_ROOT_PASSWORD: "minio123"
MINIO_COMPRESSION_ENABLE: "on"
MINIO_COMPRESSION_MIME_TYPES: "*"
MINIO_COMPRESSION_ALLOW_ENCRYPTION: "on"
MINIO_KMS_SECRET_KEY: "my-minio-key:OSMM+vkKUTCvQs9YL/CVMIMt43HFhkUpqJxTmGl6rYw="
healthcheck:
test: ["CMD", "mc", "ready", "local"]
interval: 5s
timeout: 5s
retries: 5
# starts 4 docker containers running minio server instances.
# using nginx reverse proxy, load balancing, you can access
# it through port 9000.
services:
minio1:
<<: *minio-common
hostname: minio1
volumes:
- cdata1-1:/cdata1
- cdata1-2:/cdata2
minio2:
<<: *minio-common
hostname: minio2
volumes:
- cdata2-1:/cdata1
- cdata2-2:/cdata2
minio3:
<<: *minio-common
hostname: minio3
volumes:
- cdata3-1:/cdata1
- cdata3-2:/cdata2
minio4:
<<: *minio-common
hostname: minio4
volumes:
- cdata4-1:/cdata1
- cdata4-2:/cdata2
nginx:
image: nginx:1.19.2-alpine
hostname: nginx
volumes:
- ./nginx-4-node.conf:/etc/nginx/nginx.conf:ro
ports:
- "9000:9000"
- "9001:9001"
depends_on:
- minio1
- minio2
- minio3
- minio4
## By default this config uses default local driver,
## For custom volumes replace with volume driver configuration.
volumes:
cdata1-1:
cdata1-2:
cdata2-1:
cdata2-2:
cdata3-1:
cdata3-2:
cdata4-1:
cdata4-2:

View File

@@ -0,0 +1,51 @@
version: '3.7'
# Settings and configurations that are common for all containers
x-minio-common: &minio-common
image: quay.io/minio/minio:${JOB_NAME}
command: server --console-address ":9001" edata{1...4}
expose:
- "9000"
- "9001"
environment:
MINIO_CI_CD: "on"
MINIO_ROOT_USER: "minio"
MINIO_ROOT_PASSWORD: "minio123"
MINIO_KMS_SECRET_KEY: "my-minio-key:OSMM+vkKUTCvQs9YL/CVMIMt43HFhkUpqJxTmGl6rYw="
healthcheck:
test: ["CMD", "mc", "ready", "local"]
interval: 5s
timeout: 5s
retries: 5
# starts 4 docker containers running minio server instances.
# using nginx reverse proxy, load balancing, you can access
# it through port 9000.
services:
minio1:
<<: *minio-common
hostname: minio1
volumes:
- edata1-1:/edata1
- edata1-2:/edata2
- edata1-3:/edata3
- edata1-4:/edata4
nginx:
image: nginx:1.19.2-alpine
hostname: nginx
volumes:
- ./nginx-1-node.conf:/etc/nginx/nginx.conf:ro
ports:
- "9000:9000"
- "9001:9001"
depends_on:
- minio1
## By default this config uses default local driver,
## For custom volumes replace with volume driver configuration.
volumes:
edata1-1:
edata1-2:
edata1-3:
edata1-4:

117
.github/workflows/mint/minio-pools.yaml vendored Normal file
View File

@@ -0,0 +1,117 @@
version: '3.7'
# Settings and configurations that are common for all containers
x-minio-common: &minio-common
image: quay.io/minio/minio:${JOB_NAME}
command: server --console-address ":9001" http://minio{1...4}/pdata{1...2} http://minio{5...8}/pdata{1...2}
expose:
- "9000"
- "9001"
environment:
MINIO_CI_CD: "on"
MINIO_ROOT_USER: "minio"
MINIO_ROOT_PASSWORD: "minio123"
MINIO_KMS_SECRET_KEY: "my-minio-key:OSMM+vkKUTCvQs9YL/CVMIMt43HFhkUpqJxTmGl6rYw="
healthcheck:
test: ["CMD", "mc", "ready", "local"]
interval: 5s
timeout: 5s
retries: 5
# starts 4 docker containers running minio server instances.
# using nginx reverse proxy, load balancing, you can access
# it through port 9000.
services:
minio1:
<<: *minio-common
hostname: minio1
volumes:
- pdata1-1:/pdata1
- pdata1-2:/pdata2
minio2:
<<: *minio-common
hostname: minio2
volumes:
- pdata2-1:/pdata1
- pdata2-2:/pdata2
minio3:
<<: *minio-common
hostname: minio3
volumes:
- pdata3-1:/pdata1
- pdata3-2:/pdata2
minio4:
<<: *minio-common
hostname: minio4
volumes:
- pdata4-1:/pdata1
- pdata4-2:/pdata2
minio5:
<<: *minio-common
hostname: minio5
volumes:
- pdata5-1:/pdata1
- pdata5-2:/pdata2
minio6:
<<: *minio-common
hostname: minio6
volumes:
- pdata6-1:/pdata1
- pdata6-2:/pdata2
minio7:
<<: *minio-common
hostname: minio7
volumes:
- pdata7-1:/pdata1
- pdata7-2:/pdata2
minio8:
<<: *minio-common
hostname: minio8
volumes:
- pdata8-1:/pdata1
- pdata8-2:/pdata2
nginx:
image: nginx:1.19.2-alpine
hostname: nginx
volumes:
- ./nginx-8-node.conf:/etc/nginx/nginx.conf:ro
ports:
- "9000:9000"
- "9001:9001"
depends_on:
- minio1
- minio2
- minio3
- minio4
- minio5
- minio6
- minio7
- minio8
## By default this config uses default local driver,
## For custom volumes replace with volume driver configuration.
volumes:
pdata1-1:
pdata1-2:
pdata2-1:
pdata2-2:
pdata3-1:
pdata3-2:
pdata4-1:
pdata4-2:
pdata5-1:
pdata5-2:
pdata6-1:
pdata6-2:
pdata7-1:
pdata7-2:
pdata8-1:
pdata8-2:

100
.github/workflows/mint/nginx-1-node.conf vendored Normal file
View File

@@ -0,0 +1,100 @@
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 4096;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
keepalive_timeout 65;
# include /etc/nginx/conf.d/*.conf;
upstream minio {
server minio1:9000;
}
upstream console {
ip_hash;
server minio1:9001;
}
server {
listen 9000;
listen [::]:9000;
server_name localhost;
# To allow special characters in headers
ignore_invalid_headers off;
# Allow any size file to be uploaded.
# Set to a value such as 1000m; to restrict file size to a specific value
client_max_body_size 0;
# To disable buffering
proxy_buffering off;
proxy_request_buffering off;
location / {
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_connect_timeout 300;
# Default is HTTP/1, keepalive is only enabled in HTTP/1.1
proxy_http_version 1.1;
proxy_set_header Connection "";
chunked_transfer_encoding off;
proxy_pass http://minio;
}
}
server {
listen 9001;
listen [::]:9001;
server_name localhost;
# To allow special characters in headers
ignore_invalid_headers off;
# Allow any size file to be uploaded.
# Set to a value such as 1000m; to restrict file size to a specific value
client_max_body_size 0;
# To disable buffering
proxy_buffering off;
proxy_request_buffering off;
location / {
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-NginX-Proxy true;
# This is necessary to pass the correct IP to be hashed
real_ip_header X-Real-IP;
proxy_connect_timeout 300;
# To support websocket
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
chunked_transfer_encoding off;
proxy_pass http://console;
}
}
}

106
.github/workflows/mint/nginx-4-node.conf vendored Normal file
View File

@@ -0,0 +1,106 @@
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 4096;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
keepalive_timeout 65;
# include /etc/nginx/conf.d/*.conf;
upstream minio {
server minio1:9000;
server minio2:9000;
server minio3:9000;
server minio4:9000;
}
upstream console {
ip_hash;
server minio1:9001;
server minio2:9001;
server minio3:9001;
server minio4:9001;
}
server {
listen 9000;
listen [::]:9000;
server_name localhost;
# To allow special characters in headers
ignore_invalid_headers off;
# Allow any size file to be uploaded.
# Set to a value such as 1000m; to restrict file size to a specific value
client_max_body_size 0;
# To disable buffering
proxy_buffering off;
proxy_request_buffering off;
location / {
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_connect_timeout 300;
# Default is HTTP/1, keepalive is only enabled in HTTP/1.1
proxy_http_version 1.1;
proxy_set_header Connection "";
chunked_transfer_encoding off;
proxy_pass http://minio;
}
}
server {
listen 9001;
listen [::]:9001;
server_name localhost;
# To allow special characters in headers
ignore_invalid_headers off;
# Allow any size file to be uploaded.
# Set to a value such as 1000m; to restrict file size to a specific value
client_max_body_size 0;
# To disable buffering
proxy_buffering off;
proxy_request_buffering off;
location / {
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-NginX-Proxy true;
# This is necessary to pass the correct IP to be hashed
real_ip_header X-Real-IP;
proxy_connect_timeout 300;
# To support websocket
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
chunked_transfer_encoding off;
proxy_pass http://console;
}
}
}

114
.github/workflows/mint/nginx-8-node.conf vendored Normal file
View File

@@ -0,0 +1,114 @@
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 4096;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
keepalive_timeout 65;
# include /etc/nginx/conf.d/*.conf;
upstream minio {
server minio1:9000;
server minio2:9000;
server minio3:9000;
server minio4:9000;
server minio5:9000;
server minio6:9000;
server minio7:9000;
server minio8:9000;
}
upstream console {
ip_hash;
server minio1:9001;
server minio2:9001;
server minio3:9001;
server minio4:9001;
server minio5:9001;
server minio6:9001;
server minio7:9001;
server minio8:9001;
}
server {
listen 9000;
listen [::]:9000;
server_name localhost;
# To allow special characters in headers
ignore_invalid_headers off;
# Allow any size file to be uploaded.
# Set to a value such as 1000m; to restrict file size to a specific value
client_max_body_size 0;
# To disable buffering
proxy_buffering off;
proxy_request_buffering off;
location / {
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_connect_timeout 300;
# Default is HTTP/1, keepalive is only enabled in HTTP/1.1
proxy_http_version 1.1;
proxy_set_header Connection "";
chunked_transfer_encoding off;
proxy_pass http://minio;
}
}
server {
listen 9001;
listen [::]:9001;
server_name localhost;
# To allow special characters in headers
ignore_invalid_headers off;
# Allow any size file to be uploaded.
# Set to a value such as 1000m; to restrict file size to a specific value
client_max_body_size 0;
# To disable buffering
proxy_buffering off;
proxy_request_buffering off;
location / {
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-NginX-Proxy true;
# This is necessary to pass the correct IP to be hashed
real_ip_header X-Real-IP;
proxy_connect_timeout 300;
# To support websocket
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
chunked_transfer_encoding off;
proxy_pass http://console;
}
}
}

106
.github/workflows/mint/nginx.conf vendored Normal file
View File

@@ -0,0 +1,106 @@
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 4096;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
keepalive_timeout 65;
# include /etc/nginx/conf.d/*.conf;
upstream minio {
server minio1:9000;
server minio2:9000;
server minio3:9000;
server minio4:9000;
}
upstream console {
ip_hash;
server minio1:9001;
server minio2:9001;
server minio3:9001;
server minio4:9001;
}
server {
listen 9000;
listen [::]:9000;
server_name localhost;
# To allow special characters in headers
ignore_invalid_headers off;
# Allow any size file to be uploaded.
# Set to a value such as 1000m; to restrict file size to a specific value
client_max_body_size 0;
# To disable buffering
proxy_buffering off;
proxy_request_buffering off;
location / {
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_connect_timeout 300;
# Default is HTTP/1, keepalive is only enabled in HTTP/1.1
proxy_http_version 1.1;
proxy_set_header Connection "";
chunked_transfer_encoding off;
proxy_pass http://minio;
}
}
server {
listen 9001;
listen [::]:9001;
server_name localhost;
# To allow special characters in headers
ignore_invalid_headers off;
# Allow any size file to be uploaded.
# Set to a value such as 1000m; to restrict file size to a specific value
client_max_body_size 0;
# To disable buffering
proxy_buffering off;
proxy_request_buffering off;
location / {
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-NginX-Proxy true;
# This is necessary to pass the correct IP to be hashed
real_ip_header X-Real-IP;
proxy_connect_timeout 300;
# To support websocket
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
chunked_transfer_encoding off;
proxy_pass http://console;
}
}
}

View File

@@ -0,0 +1,66 @@
version: '3.7'
# Settings and configurations that are common for all containers
x-minio-common: &minio-common
image: quay.io/minio/minio:${RELEASE}
command: server http://site1-minio{1...4}/data{1...2}
environment:
- MINIO_PROMETHEUS_AUTH_TYPE=public
- CI=true
# starts 4 docker containers running minio server instances.
# using nginx reverse proxy, load balancing, you can access
# it through port 9000.
services:
site1-minio1:
<<: *minio-common
hostname: site1-minio1
volumes:
- site1-data1-1:/data1
- site1-data1-2:/data2
site1-minio2:
<<: *minio-common
hostname: site1-minio2
volumes:
- site1-data2-1:/data1
- site1-data2-2:/data2
site1-minio3:
<<: *minio-common
hostname: site1-minio3
volumes:
- site1-data3-1:/data1
- site1-data3-2:/data2
site1-minio4:
<<: *minio-common
hostname: site1-minio4
volumes:
- site1-data4-1:/data1
- site1-data4-2:/data2
site1-nginx:
image: nginx:1.19.2-alpine
hostname: site1-nginx
volumes:
- ./nginx-site1.conf:/etc/nginx/nginx.conf:ro
ports:
- "9001:9001"
depends_on:
- site1-minio1
- site1-minio2
- site1-minio3
- site1-minio4
## By default this config uses default local driver,
## For custom volumes replace with volume driver configuration.
volumes:
site1-data1-1:
site1-data1-2:
site1-data2-1:
site1-data2-2:
site1-data3-1:
site1-data3-2:
site1-data4-1:
site1-data4-2:

View File

@@ -0,0 +1,66 @@
version: '3.7'
# Settings and configurations that are common for all containers
x-minio-common: &minio-common
image: quay.io/minio/minio:${RELEASE}
command: server http://site2-minio{1...4}/data{1...2}
environment:
- MINIO_PROMETHEUS_AUTH_TYPE=public
- CI=true
# starts 4 docker containers running minio server instances.
# using nginx reverse proxy, load balancing, you can access
# it through port 9000.
services:
site2-minio1:
<<: *minio-common
hostname: site2-minio1
volumes:
- site2-data1-1:/data1
- site2-data1-2:/data2
site2-minio2:
<<: *minio-common
hostname: site2-minio2
volumes:
- site2-data2-1:/data1
- site2-data2-2:/data2
site2-minio3:
<<: *minio-common
hostname: site2-minio3
volumes:
- site2-data3-1:/data1
- site2-data3-2:/data2
site2-minio4:
<<: *minio-common
hostname: site2-minio4
volumes:
- site2-data4-1:/data1
- site2-data4-2:/data2
site2-nginx:
image: nginx:1.19.2-alpine
hostname: site2-nginx
volumes:
- ./nginx-site2.conf:/etc/nginx/nginx.conf:ro
ports:
- "9002:9002"
depends_on:
- site2-minio1
- site2-minio2
- site2-minio3
- site2-minio4
## By default this config uses default local driver,
## For custom volumes replace with volume driver configuration.
volumes:
site2-data1-1:
site2-data1-2:
site2-data2-1:
site2-data2-2:
site2-data3-1:
site2-data3-2:
site2-data4-1:
site2-data4-2:

115
.github/workflows/multipart/migrate.sh vendored Executable file
View File

@@ -0,0 +1,115 @@
#!/bin/bash
set -x
## change working directory
cd .github/workflows/multipart/
function cleanup() {
docker-compose -f docker-compose-site1.yaml rm -s -f || true
docker-compose -f docker-compose-site2.yaml rm -s -f || true
for volume in $(docker volume ls -q | grep minio); do
docker volume rm ${volume} || true
done
docker system prune -f || true
docker volume prune -f || true
docker volume rm $(docker volume ls -q -f dangling=true) || true
}
cleanup
if [ ! -f ./mc ]; then
wget --quiet -O mc https://dl.minio.io/client/mc/release/linux-amd64/mc &&
chmod +x mc
fi
(
cd /tmp
go install github.com/minio/minio/docs/debugging/s3-check-md5@latest
)
export RELEASE=RELEASE.2023-08-29T23-07-35Z
docker-compose -f docker-compose-site1.yaml up -d
docker-compose -f docker-compose-site2.yaml up -d
sleep 30s
./mc alias set site1 http://site1-nginx:9001 minioadmin minioadmin --api s3v4
./mc alias set site2 http://site2-nginx:9002 minioadmin minioadmin --api s3v4
./mc ready site1/
./mc ready site2/
./mc admin replicate add site1 site2
./mc mb site1/testbucket/
./mc cp -r --quiet /usr/bin site1/testbucket/
sleep 5
s3-check-md5 -h
failed_count_site1=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
failed_count_site2=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
if [ $failed_count_site1 -ne 0 ]; then
echo "failed with multipart on site1 uploads"
exit 1
fi
if [ $failed_count_site2 -ne 0 ]; then
echo "failed with multipart on site2 uploads"
exit 1
fi
./mc cp -r --quiet /usr/bin site1/testbucket/
sleep 5
failed_count_site1=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
failed_count_site2=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
## we do not need to fail here, since we are going to test
## upgrading to master, healing and being able to recover
## the last version.
if [ $failed_count_site1 -ne 0 ]; then
echo "failed with multipart on site1 uploads ${failed_count_site1}"
fi
if [ $failed_count_site2 -ne 0 ]; then
echo "failed with multipart on site2 uploads ${failed_count_site2}"
fi
export RELEASE=${1}
docker-compose -f docker-compose-site1.yaml up -d
docker-compose -f docker-compose-site2.yaml up -d
./mc ready site1/
./mc ready site2/
for i in $(seq 1 10); do
# mc admin heal -r --remove when used against a LB endpoint
# behaves flaky, let this run 10 times before giving up
./mc admin heal -r --remove --json site1/ 2>&1 >/dev/null
./mc admin heal -r --remove --json site2/ 2>&1 >/dev/null
done
failed_count_site1=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
failed_count_site2=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
if [ $failed_count_site1 -ne 0 ]; then
echo "failed with multipart on site1 uploads"
exit 1
fi
if [ $failed_count_site2 -ne 0 ]; then
echo "failed with multipart on site2 uploads"
exit 1
fi
cleanup
## change working directory
cd ../../../

View File

@@ -0,0 +1,61 @@
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 4096;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
keepalive_timeout 65;
# include /etc/nginx/conf.d/*.conf;
upstream minio {
server site1-minio1:9000;
server site1-minio2:9000;
server site1-minio3:9000;
server site1-minio4:9000;
}
server {
listen 9001;
listen [::]:9001;
server_name localhost;
# To allow special characters in headers
ignore_invalid_headers off;
# Allow any size file to be uploaded.
# Set to a value such as 1000m; to restrict file size to a specific value
client_max_body_size 0;
# To disable buffering
proxy_buffering off;
proxy_request_buffering off;
location / {
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_connect_timeout 300;
# Default is HTTP/1, keepalive is only enabled in HTTP/1.1
proxy_http_version 1.1;
proxy_set_header Connection "";
chunked_transfer_encoding off;
proxy_pass http://minio;
}
}
}

View File

@@ -0,0 +1,61 @@
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 4096;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
keepalive_timeout 65;
# include /etc/nginx/conf.d/*.conf;
upstream minio {
server site2-minio1:9000;
server site2-minio2:9000;
server site2-minio3:9000;
server site2-minio4:9000;
}
server {
listen 9002;
listen [::]:9002;
server_name localhost;
# To allow special characters in headers
ignore_invalid_headers off;
# Allow any size file to be uploaded.
# Set to a value such as 1000m; to restrict file size to a specific value
client_max_body_size 0;
# To disable buffering
proxy_buffering off;
proxy_request_buffering off;
location / {
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_connect_timeout 300;
# Default is HTTP/1, keepalive is only enabled in HTTP/1.1
proxy_http_version 1.1;
proxy_set_header Connection "";
chunked_transfer_encoding off;
proxy_pass http://minio;
}
}
}

View File

@@ -4,6 +4,7 @@ on:
pull_request:
branches:
- master
- next
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
@@ -21,22 +22,14 @@ jobs:
strategy:
matrix:
go-version: [1.18.x, 1.19.x]
go-version: [1.21.x]
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: ${{ matrix.go-version }}
check-latest: true
- uses: actions/cache@v2
with:
path: |
~/.cache/go-build
~/go/pkg/mod
key: ${{ runner.os }}-${{ matrix.go-version }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-${{ matrix.go-version }}-go-
- name: Test Decom
run: |
sudo sysctl net.ipv6.conf.all.disable_ipv6=0

35
.github/workflows/root-disable.yml vendored Normal file
View File

@@ -0,0 +1,35 @@
name: Root lockdown tests
on:
pull_request:
branches:
- master
- next
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref }}
cancel-in-progress: true
permissions:
contents: read
jobs:
build:
name: Go ${{ matrix.go-version }} on ${{ matrix.os }}
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.21.x]
os: [ubuntu-latest]
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: ${{ matrix.go-version }}
check-latest: true
- name: Start root lockdown tests
run: |
make test-root-disable

46
.github/workflows/run-mint.sh vendored Executable file
View File

@@ -0,0 +1,46 @@
#!/bin/bash
set -ex
export MODE="$1"
export ACCESS_KEY="$2"
export SECRET_KEY="$3"
export JOB_NAME="$4"
export MINT_MODE="full"
docker system prune -f || true
docker volume prune -f || true
docker volume rm $(docker volume ls -f dangling=true) || true
## change working directory
cd .github/workflows/mint
docker-compose -f minio-${MODE}.yaml up -d
sleep 30s
docker system prune -f || true
docker volume prune -f || true
docker volume rm $(docker volume ls -q -f dangling=true) || true
# Stop two nodes, one of each pool, to check that all S3 calls work while quorum is still there
[ "${MODE}" == "pools" ] && docker-compose -f minio-${MODE}.yaml stop minio2
[ "${MODE}" == "pools" ] && docker-compose -f minio-${MODE}.yaml stop minio6
docker run --rm --net=mint_default \
--name="mint-${MODE}-${JOB_NAME}" \
-e SERVER_ENDPOINT="nginx:9000" \
-e ACCESS_KEY="${ACCESS_KEY}" \
-e SECRET_KEY="${SECRET_KEY}" \
-e ENABLE_HTTPS=0 \
-e MINT_MODE="${MINT_MODE}" \
docker.io/minio/mint:edge
docker-compose -f minio-${MODE}.yaml down || true
sleep 10s
docker system prune -f || true
docker volume prune -f || true
docker volume rm $(docker volume ls -q -f dangling=true) || true
## change working directory
cd ../../../

23
.github/workflows/shfmt.yml vendored Normal file
View File

@@ -0,0 +1,23 @@
name: Shell formatting checks
on:
pull_request:
branches:
- master
- next
permissions:
contents: read
jobs:
build:
name: runner / shfmt
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: luizm/action-sh-checker@master
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SHFMT_OPTS: "-s"
with:
sh_checker_shellcheck_disable: true # disable for now

View File

@@ -4,6 +4,7 @@ on:
pull_request:
branches:
- master
- next
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
@@ -20,11 +21,11 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.18.x, 1.19.x]
go-version: [1.21.x]
os: [ubuntu-latest]
steps:
- uses: actions/checkout@v1
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: ${{ matrix.go-version }}

View File

@@ -3,9 +3,14 @@ on:
pull_request:
branches:
- master
push:
branches:
- master
permissions:
contents: read # to fetch code (actions/checkout)
jobs:
vulncheck:
name: Analysis
@@ -16,7 +21,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: 1.19.x
go-version: 1.21.5
check-latest: true
- name: Get official govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest

7
.gitignore vendored
View File

@@ -25,16 +25,21 @@ mc.*
s3-check-md5*
xl-meta*
healing-*
inspect*
inspect*.zip
200M*
hash-set
minio.RELEASE*
mc
nancy
inspects/*
.bin/
*.gz
docs/debugging/s3-verify/s3-verify
docs/debugging/xl-meta/xl-meta
docs/debugging/s3-check-md5/s3-check-md5
docs/debugging/hash-set/hash-set
docs/debugging/healing-bin/healing-bin
docs/debugging/inspect/inspect
docs/debugging/pprofgoparser/pprofgoparser
docs/debugging/reorder-disks/reorder-disks
docs/debugging/populate-hard-links/populate-hardlinks

View File

@@ -1,46 +1,36 @@
linters-settings:
golint:
min-confidence: 0
gofumpt:
lang-version: "1.18"
# Choose whether or not to use the extra rules that are disabled
# by default
extra-rules: false
simplify: true
misspell:
locale: US
staticcheck:
checks: ['all', '-ST1005', '-ST1000', '-SA4000', '-SA9004', '-SA1019', '-SA1008', '-U1000', '-ST1016']
linters:
disable-all: true
enable:
- typecheck
- goimports
- misspell
- govet
- revive
- ineffassign
- gomodguard
- gofmt
- unconvert
- unused
- durationcheck
- gocritic
- gofumpt
- goimports
- gomodguard
- govet
- ineffassign
- misspell
- revive
- staticcheck
- tenv
- durationcheck
- typecheck
- unconvert
- unused
issues:
exclude-use-default: false
exclude:
- should have a package comment
- error strings should not be capitalized or end with punctuation or a newline
# todo fix these when we get enough time.
- "singleCaseSwitch: should rewrite switch statement to if statement"
- "unlambda: replace"
- "captLocal:"
- "ifElseChain:"
- "elseif:"
service:
golangci-lint-version: 1.43.0 # use the fixed version to not introduce new linters unexpectedly
- "empty-block:"
- "unused-parameter:"
- "dot-imports:"
- should have a package comment
- error strings should not be capitalized or end with punctuation or a newline

View File

@@ -1,6 +0,0 @@
CVE-2020-26160
CVE-2020-15136
CVE-2020-15115
CVE-2020-15114
CVE-2022-2835
CVE-2022-2837

4974
CREDITS

File diff suppressed because it is too large Load Diff

View File

@@ -1,8 +1,6 @@
FROM minio/minio:latest
ENV PATH=/opt/bin:$PATH
COPY ./minio /opt/bin/minio
COPY ./minio /usr/bin/minio
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]

View File

@@ -1,4 +1,31 @@
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.6
FROM golang:1.21-alpine as build
ARG TARGETARCH
ARG RELEASE
ENV GOPATH /go
ENV CGO_ENABLED 0
# Install curl and minisign
RUN apk add -U --no-cache ca-certificates && \
apk add -U --no-cache curl && \
go install aead.dev/minisign/cmd/minisign@v0.2.0
# Download minio binary and signature file
RUN curl -s -q https://dl.min.io/server/minio/hotfixes/linux-${TARGETARCH}/archive/minio.${RELEASE} -o /go/bin/minio && \
curl -s -q https://dl.min.io/server/minio/hotfixes/linux-${TARGETARCH}/archive/minio.${RELEASE}.minisig -o /go/bin/minio.minisig && \
chmod +x /go/bin/minio
# Download mc binary and signature file
RUN curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc -o /go/bin/mc && \
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.minisig -o /go/bin/mc.minisig && \
chmod +x /go/bin/mc
# Verify binary signature using public key "RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGavRUN"
RUN minisign -Vqm /go/bin/minio -x /go/bin/minio.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav && \
minisign -Vqm /go/bin/mc -x /go/bin/mc.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav
FROM registry.access.redhat.com/ubi9/ubi-micro:latest
ARG RELEASE
@@ -17,34 +44,18 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
MINIO_KMS_SECRET_KEY_FILE=kms_master_key \
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav" \
MINIO_CONFIG_ENV_FILE=config.env \
PATH=/opt/bin:$PATH
MC_CONFIG_DIR=/tmp/.mc
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
COPY --from=build /go/bin/minio /usr/bin/minio
COPY --from=build /go/bin/mc /usr/bin/mc
COPY dockerscripts/verify-minio.sh /usr/bin/verify-minio.sh
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
COPY CREDITS /licenses/CREDITS
COPY LICENSE /licenses/LICENSE
RUN \
microdnf clean all && \
microdnf update --nodocs && \
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \
microdnf install minisign --nodocs && \
mkdir -p /opt/bin && chmod -R 777 /opt/bin && \
curl -s -q https://dl.min.io/server/minio/hotfixes/linux-amd64/archive/minio.${RELEASE} -o /opt/bin/minio && \
curl -s -q https://dl.min.io/server/minio/hotfixes/linux-amd64/archive/minio.${RELEASE}.sha256sum -o /opt/bin/minio.sha256sum && \
curl -s -q https://dl.min.io/server/minio/hotfixes/linux-amd64/archive/minio.${RELEASE}.minisig -o /opt/bin/minio.minisig && \
microdnf clean all && \
chmod +x /opt/bin/minio && \
chmod +x /usr/bin/docker-entrypoint.sh && \
chmod +x /usr/bin/verify-minio.sh && \
/usr/bin/verify-minio.sh && \
microdnf clean all
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
EXPOSE 9000
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
VOLUME ["/data"]
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
CMD ["minio"]

View File

@@ -1,6 +1,31 @@
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.6
FROM golang:1.21-alpine as build
ARG TARGETARCH
ARG RELEASE
ENV GOPATH /go
ENV CGO_ENABLED 0
# Install curl and minisign
RUN apk add -U --no-cache ca-certificates && \
apk add -U --no-cache curl && \
go install aead.dev/minisign/cmd/minisign@v0.2.0
# Download minio binary and signature file
RUN curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE} -o /go/bin/minio && \
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.minisig -o /go/bin/minio.minisig && \
chmod +x /go/bin/minio
# Download mc binary and signature file
RUN curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc -o /go/bin/mc && \
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.minisig -o /go/bin/mc.minisig && \
chmod +x /go/bin/mc
# Verify binary signature using public key "RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGavRUN"
RUN minisign -Vqm /go/bin/minio -x /go/bin/minio.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav && \
minisign -Vqm /go/bin/mc -x /go/bin/mc.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav
FROM registry.access.redhat.com/ubi9/ubi-micro:latest
ARG RELEASE
@@ -19,34 +44,18 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
MINIO_KMS_SECRET_KEY_FILE=kms_master_key \
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav" \
MINIO_CONFIG_ENV_FILE=config.env \
PATH=/opt/bin:$PATH
MC_CONFIG_DIR=/tmp/.mc
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
COPY --from=build /go/bin/minio /usr/bin/minio
COPY --from=build /go/bin/mc /usr/bin/mc
COPY dockerscripts/verify-minio.sh /usr/bin/verify-minio.sh
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
COPY CREDITS /licenses/CREDITS
COPY LICENSE /licenses/LICENSE
RUN \
microdnf clean all && \
microdnf update --nodocs && \
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \
microdnf install minisign --nodocs && \
mkdir -p /opt/bin && chmod -R 777 /opt/bin && \
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE} -o /opt/bin/minio && \
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.sha256sum -o /opt/bin/minio.sha256sum && \
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.minisig -o /opt/bin/minio.minisig && \
microdnf clean all && \
chmod +x /opt/bin/minio && \
chmod +x /usr/bin/docker-entrypoint.sh && \
chmod +x /usr/bin/verify-minio.sh && \
/usr/bin/verify-minio.sh && \
microdnf clean all
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
EXPOSE 9000
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
VOLUME ["/data"]
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
CMD ["minio"]

View File

@@ -1,6 +1,25 @@
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.6
FROM golang:1.21-alpine as build
ARG TARGETARCH
ARG RELEASE
ENV GOPATH /go
ENV CGO_ENABLED 0
# Install curl and minisign
RUN apk add -U --no-cache ca-certificates && \
apk add -U --no-cache curl && \
go install aead.dev/minisign/cmd/minisign@v0.2.0
# Download minio binary and signature file
RUN curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.fips -o /go/bin/minio && \
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.fips.minisig -o /go/bin/minio.minisig && \
chmod +x /go/bin/minio
# Verify binary signature using public key "RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGavRUN"
RUN minisign -Vqm /go/bin/minio -x /go/bin/minio.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav
FROM registry.access.redhat.com/ubi9/ubi-micro:latest
ARG RELEASE
@@ -18,35 +37,17 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
MINIO_ROOT_PASSWORD_FILE=secret_key \
MINIO_KMS_SECRET_KEY_FILE=kms_master_key \
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav" \
MINIO_CONFIG_ENV_FILE=config.env \
PATH=/opt/bin:$PATH
MINIO_CONFIG_ENV_FILE=config.env
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
COPY --from=build /go/bin/minio /usr/bin/minio
COPY dockerscripts/verify-minio.sh /usr/bin/verify-minio.sh
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
COPY CREDITS /licenses/CREDITS
COPY LICENSE /licenses/LICENSE
RUN \
microdnf clean all && \
microdnf update --nodocs && \
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \
microdnf install minisign --nodocs && \
mkdir -p /opt/bin && chmod -R 777 /opt/bin && \
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.fips -o /opt/bin/minio && \
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.fips.sha256sum -o /opt/bin/minio.sha256sum && \
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.fips.minisig -o /opt/bin/minio.minisig && \
microdnf clean all && \
chmod +x /opt/bin/minio && \
chmod +x /usr/bin/docker-entrypoint.sh && \
chmod +x /usr/bin/verify-minio.sh && \
/usr/bin/verify-minio.sh && \
microdnf clean all
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
EXPOSE 9000
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
VOLUME ["/data"]
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
CMD ["minio"]

View File

@@ -0,0 +1,61 @@
FROM golang:1.21-alpine as build
ARG TARGETARCH
ARG RELEASE
ENV GOPATH /go
ENV CGO_ENABLED 0
# Install curl and minisign
RUN apk add -U --no-cache ca-certificates && \
apk add -U --no-cache curl && \
go install aead.dev/minisign/cmd/minisign@v0.2.0
# Download minio binary and signature file
RUN curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE} -o /go/bin/minio && \
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.minisig -o /go/bin/minio.minisig && \
chmod +x /go/bin/minio
# Download mc binary and signature file
RUN curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc -o /go/bin/mc && \
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.minisig -o /go/bin/mc.minisig && \
chmod +x /go/bin/mc
# Verify binary signature using public key "RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGavRUN"
RUN minisign -Vqm /go/bin/minio -x /go/bin/minio.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav && \
minisign -Vqm /go/bin/mc -x /go/bin/mc.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav
FROM registry.access.redhat.com/ubi8/ubi-micro:latest
ARG RELEASE
LABEL name="MinIO" \
vendor="MinIO Inc <dev@min.io>" \
maintainer="MinIO Inc <dev@min.io>" \
version="${RELEASE}" \
release="${RELEASE}" \
summary="MinIO is a High Performance Object Storage, API compatible with Amazon S3 cloud storage service." \
description="MinIO object storage is fundamentally different. Designed for performance and the S3 API, it is 100% open-source. MinIO is ideal for large, private cloud environments with stringent security requirements and delivers mission-critical availability across a diverse range of workloads."
ENV MINIO_ACCESS_KEY_FILE=access_key \
MINIO_SECRET_KEY_FILE=secret_key \
MINIO_ROOT_USER_FILE=access_key \
MINIO_ROOT_PASSWORD_FILE=secret_key \
MINIO_KMS_SECRET_KEY_FILE=kms_master_key \
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav" \
MINIO_CONFIG_ENV_FILE=config.env \
MC_CONFIG_DIR=/tmp/.mc
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
COPY --from=build /go/bin/minio /usr/bin/minio
COPY --from=build /go/bin/mc /usr/bin/mc
COPY CREDITS /licenses/CREDITS
COPY LICENSE /licenses/LICENSE
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
EXPOSE 9000
VOLUME ["/data"]
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
CMD ["minio"]

View File

@@ -6,7 +6,10 @@ GOARCH := $(shell go env GOARCH)
GOOS := $(shell go env GOOS)
VERSION ?= $(shell git describe --tags)
TAG ?= "minio/minio:$(VERSION)"
TAG ?= "quay.io/minio/minio:$(VERSION)"
GOLANGCI_DIR = .bin/golangci/$(GOLANGCI_VERSION)
GOLANGCI = $(GOLANGCI_DIR)/golangci-lint
all: build
@@ -19,36 +22,44 @@ help: ## print this help
getdeps: ## fetch necessary dependencies
@mkdir -p ${GOPATH}/bin
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin
@echo "Installing msgp" && go install -v github.com/tinylib/msgp@f3635b96e4838a6c773babb65ef35297fe5fe2f9
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOLANGCI_DIR)
@echo "Installing msgp" && go install -v github.com/tinylib/msgp@6ac204f0b4d48d17ab4fa442134c7fba13127a4e
@echo "Installing stringer" && go install -v golang.org/x/tools/cmd/stringer@latest
crosscompile: ## cross compile minio
@(env bash $(PWD)/buildscripts/cross-compile.sh)
verifiers: getdeps lint check-gen
verifiers: lint check-gen
check-gen: ## check for updated autogenerated files
@go generate ./... >/dev/null
@(! git diff --name-only | grep '_gen.go$$') || (echo "Non-committed changes in auto-generated code is detected, please commit them to proceed." && false)
lint: ## runs golangci-lint suite of linters
lint: getdeps ## runs golangci-lint suite of linters
@echo "Running $@ check"
@${GOPATH}/bin/golangci-lint run --build-tags kqueue --timeout=10m --config ./.golangci.yml
@$(GOLANGCI) run --build-tags kqueue --timeout=10m --config ./.golangci.yml
lint-fix: getdeps ## runs golangci-lint suite of linters with automatic fixes
@echo "Running $@ check"
@$(GOLANGCI) run --build-tags kqueue --timeout=10m --config ./.golangci.yml --fix
check: test
test: verifiers build ## builds minio, runs linters, tests
test: verifiers build build-debugging ## builds minio, runs linters, tests
@echo "Running unit tests"
@MINIO_API_REQUESTS_MAX=10000 CGO_ENABLED=0 go test -tags kqueue ./...
test-decom: install
test-root-disable: install-race
@echo "Running minio root lockdown tests"
@env bash $(PWD)/buildscripts/disable-root.sh
test-decom: install-race
@echo "Running minio decom tests"
@env bash $(PWD)/docs/distributed/decom.sh
@env bash $(PWD)/docs/distributed/decom-encrypted.sh
@env bash $(PWD)/docs/distributed/decom-encrypted-sse-s3.sh
@env bash $(PWD)/docs/distributed/decom-compressed-sse-s3.sh
test-upgrade: build
test-upgrade: install-race
@echo "Running minio upgrade tests"
@(env bash $(PWD)/buildscripts/minio-upgrade.sh)
@@ -62,20 +73,30 @@ test-iam: build ## verify IAM (external IDP, etcd backends)
@echo "Running tests for IAM (external IDP, etcd backends) with -race"
@MINIO_API_REQUESTS_MAX=10000 GORACE=history_size=7 CGO_ENABLED=1 go test -race -tags kqueue -v -run TestIAM* ./cmd
test-replication: install ## verify multi site replication
@echo "Running tests for replicating three sites"
@(env bash $(PWD)/docs/bucket/replication/setup_3site_replication.sh)
test-sio-error:
@(env bash $(PWD)/docs/bucket/replication/sio-error.sh)
test-replication-2site:
@(env bash $(PWD)/docs/bucket/replication/setup_2site_existing_replication.sh)
test-site-replication-ldap: install ## verify automatic site replication
test-replication-3site:
@(env bash $(PWD)/docs/bucket/replication/setup_3site_replication.sh)
test-delete-replication:
@(env bash $(PWD)/docs/bucket/replication/delete-replication.sh)
test-replication: install-race test-replication-2site test-replication-3site test-delete-replication test-sio-error ## verify multi site replication
@echo "Running tests for replicating three sites"
test-site-replication-ldap: install-race ## verify automatic site replication
@echo "Running tests for automatic site replication of IAM (with LDAP)"
@(env bash $(PWD)/docs/site-replication/run-multi-site-ldap.sh)
test-site-replication-oidc: install ## verify automatic site replication
test-site-replication-oidc: install-race ## verify automatic site replication
@echo "Running tests for automatic site replication of IAM (with OIDC)"
@(env bash $(PWD)/docs/site-replication/run-multi-site-oidc.sh)
test-site-replication-minio: install ## verify automatic site replication
test-site-replication-minio: install-race ## verify automatic site replication
@echo "Running tests for automatic site replication of IAM (with MinIO IDP)"
@(env bash $(PWD)/docs/site-replication/run-multi-site-minio-idp.sh)
@@ -106,6 +127,9 @@ verify-healing-inconsistent-versions: ## verify resolving inconsistent versions
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
@(env bash $(PWD)/buildscripts/resolve-right-versions.sh)
build-debugging:
@(env bash $(PWD)/docs/debugging/build.sh)
build: checks ## builds minio to $(PWD)
@echo "Building minio binary to './minio'"
@CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
@@ -116,14 +140,23 @@ hotfix-vars:
$(eval VERSION := $(shell git describe --tags --abbrev=0).hotfix.$(shell git rev-parse --short HEAD))
$(eval TAG := "minio/minio:$(VERSION)")
hotfix: hotfix-vars install ## builds minio binary with hotfix tags
@mv -f ./minio ./minio.$(VERSION)
@minisign -qQSm ./minio.$(VERSION) -s "${CRED_DIR}/minisign.key" < "${CRED_DIR}/minisign-passphrase"
@sha256sum < ./minio.$(VERSION) | sed 's, -,minio.$(VERSION),g' > minio.$(VERSION).sha256sum
hotfix: hotfix-vars clean install ## builds minio binary with hotfix tags
@wget -q -c https://github.com/minio/pkger/releases/download/v2.2.0/pkger_2.2.0_linux_amd64.deb
@wget -q -c https://raw.githubusercontent.com/minio/minio-service/v1.0.0/linux-systemd/distributed/minio.service
@sudo apt install ./pkger_2.2.0_linux_amd64.deb --yes
@mkdir -p minio-release/$(GOOS)-$(GOARCH)/archive
@cp -af ./minio minio-release/$(GOOS)-$(GOARCH)/minio
@cp -af ./minio minio-release/$(GOOS)-$(GOARCH)/minio.$(VERSION)
@minisign -qQSm minio-release/$(GOOS)-$(GOARCH)/minio.$(VERSION) -s "${CRED_DIR}/minisign.key" < "${CRED_DIR}/minisign-passphrase"
@sha256sum < minio-release/$(GOOS)-$(GOARCH)/minio.$(VERSION) | sed 's, -,minio.$(VERSION),g' > minio-release/$(GOOS)-$(GOARCH)/minio.$(VERSION).sha256sum
@cp -af minio-release/$(GOOS)-$(GOARCH)/minio.$(VERSION)* minio-release/$(GOOS)-$(GOARCH)/archive/
@pkger -r $(VERSION) --ignore
hotfix-push: hotfix
@scp -q -r minio.$(VERSION)* minio@dl-0.minio.io:~/releases/server/minio/hotfixes/linux-amd64/archive/
@scp -q -r minio.$(VERSION)* minio@dl-1.minio.io:~/releases/server/minio/hotfixes/linux-amd64/archive/
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-0.minio.io:~/releases/server/minio/hotfixes/linux-amd64/
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-0.minio.io:~/releases/server/minio/hotfixes/linux-amd64/archive
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-1.minio.io:~/releases/server/minio/hotfixes/linux-amd64/
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-1.minio.io:~/releases/server/minio/hotfixes/linux-amd64/archive
@echo "Published new hotfix binaries at https://dl.min.io/server/minio/hotfixes/linux-amd64/archive/minio.$(VERSION)"
docker-hotfix-push: docker-hotfix
@@ -133,10 +166,16 @@ docker-hotfix: hotfix-push checks ## builds minio docker container with hotfix t
@echo "Building minio docker image '$(TAG)'"
@docker build -q --no-cache -t $(TAG) --build-arg RELEASE=$(VERSION) . -f Dockerfile.hotfix
docker: build checks ## builds minio docker container
docker: build ## builds minio docker container
@echo "Building minio docker image '$(TAG)'"
@docker build -q --no-cache -t $(TAG) . -f Dockerfile
install-race: checks ## builds minio to $(PWD)
@echo "Building minio binary to './minio'"
@GORACE=history_size=7 CGO_ENABLED=1 go build -tags kqueue -race -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
@echo "Installing minio binary to '$(GOPATH)/bin/minio'"
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/minio $(GOPATH)/bin/minio
install: build ## builds minio and installs it to $GOPATH/bin.
@echo "Installing minio binary to '$(GOPATH)/bin/minio'"
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/minio $(GOPATH)/bin/minio
@@ -152,3 +191,6 @@ clean: ## cleanup all generated assets
@rm -rvf build
@rm -rvf release
@rm -rvf .verify*
@rm -rvf minio-release
@rm -rvf minio.RELEASE*.hotfix.*
@rm -rvf pkger_*.deb

2
NOTICE
View File

@@ -1,4 +1,4 @@
MinIO Project, (C) 2015-2021 MinIO, Inc.
MinIO Project, (C) 2015-2023 MinIO, Inc.
This product includes software developed at MinIO, Inc.
(https://min.io/).

View File

@@ -14,7 +14,7 @@ Use the following commands to run a standalone MinIO server as a container.
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html)
for more complete documentation.
### Stable
@@ -32,7 +32,7 @@ root credentials. You can use the Browser to create buckets, upload objects, and
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
see <https://docs.min.io/docs/> and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
> NOTE: To deploy MinIO on with persistent storage, you must map local persistent directories from the host OS to the container using the `podman -v` option. For example, `-v /mnt/data:/data` maps the host OS drive at `/mnt/data` to `/data` on the container.
@@ -40,7 +40,7 @@ see <https://docs.min.io/docs/> and click **MinIO SDKs** in the navigation to vi
Use the following commands to run a standalone MinIO server on macOS.
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html) for more complete documentation.
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html) for more complete documentation.
### Homebrew (recommended)
@@ -60,7 +60,7 @@ brew install minio/stable/minio
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://docs.min.io/docs/> and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html/> to view MinIO SDKs for supported languages.
### Binary Download
@@ -74,7 +74,7 @@ chmod +x minio
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://docs.min.io/docs/> and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
## GNU/Linux
@@ -86,8 +86,6 @@ chmod +x minio
./minio server /data
```
Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.
The following table lists supported architectures. Replace the `wget` URL with the architecture for your Linux host.
| Architecture | URL |
@@ -99,9 +97,9 @@ The following table lists supported architectures. Replace the `wget` URL with t
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://docs.min.io/docs/> and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html) for more complete documentation.
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html#) for more complete documentation.
## Microsoft Windows
@@ -119,23 +117,23 @@ minio.exe server D:\
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://docs.min.io/docs/> and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html) for more complete documentation.
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html#) for more complete documentation.
## Install from Source
Use the following commands to compile and run a standalone MinIO server from source. Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.18](https://golang.org/dl/#stable)
Use the following commands to compile and run a standalone MinIO server from source. Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.19](https://golang.org/dl/#stable)
```sh
GO111MODULE=on go install github.com/minio/minio@latest
go install github.com/minio/minio@latest
```
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://docs.min.io/docs/> and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html) for more complete documentation.
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html) for more complete documentation.
MinIO strongly recommends *against* using compiled-from-source MinIO servers for production environments.
@@ -202,7 +200,7 @@ service iptables restart
MinIO Server comes with an embedded web based object browser. Point your web browser to <http://127.0.0.1:9000> to ensure your server has started successfully.
> NOTE: MinIO runs console on random port by default if you wish choose a specific port use `--console-address` to pick a specific interface and port.
> NOTE: MinIO runs console on random port by default, if you wish to choose a specific port use `--console-address` to pick a specific interface and port.
### Things to consider
@@ -222,7 +220,7 @@ For example: `export MINIO_SERVER_URL="https://minio.example.net"`
## Test using MinIO Client `mc`
`mc` provides a modern alternative to UNIX commands like ls, cat, cp, mirror, diff etc. It supports filesystems and Amazon S3 compatible cloud storage services. Follow the MinIO Client [Quickstart Guide](https://docs.min.io/docs/minio-client-quickstart-guide) for further instructions.
`mc` provides a modern alternative to UNIX commands like ls, cat, cp, mirror, diff etc. It supports filesystems and Amazon S3 compatible cloud storage services. Follow the MinIO Client [Quickstart Guide](https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart) for further instructions.
## Upgrading MinIO
@@ -230,7 +228,7 @@ Upgrades require zero downtime in MinIO, all upgrades are non-disruptive, all tr
> NOTE: requires internet access to update directly from <https://dl.min.io>, optionally you can host any mirrors at <https://my-artifactory.example.com/minio/>
- For deployments that installed the MinIO server binary by hand, use [`mc admin update`](https://docs.min.io/minio/baremetal/reference/minio-mc-admin/mc-admin-update.html)
- For deployments that installed the MinIO server binary by hand, use [`mc admin update`](https://min.io/docs/minio/linux/reference/minio-mc-admin/mc-admin-update.html)
```sh
mc admin update <minio alias, e.g., myminio>
@@ -243,19 +241,17 @@ mc admin update <minio alias, e.g., myminio>
### Upgrade Checklist
- Test all upgrades in a lower environment (DEV, QA, UAT) before applying to production. Performing blind upgrades in production environments carries significant risk.
- Read the release notes for MinIO *before* performing any upgrade, there is no forced requirement to upgrade to latest releases upon every releases. Some releases may not be relevant to your setup, avoid upgrading production environments unnecessarily.
- Read the release notes for MinIO *before* performing any upgrade, there is no forced requirement to upgrade to latest release upon every release. Some release may not be relevant to your setup, avoid upgrading production environments unnecessarily.
- If you plan to use `mc admin update`, MinIO process must have write access to the parent directory where the binary is present on the host system.
- `mc admin update` is not supported and should be avoided in kubernetes/container environments, please upgrade containers by upgrading relevant container images.
- **We do not recommend upgrading one MinIO server at a time, the product is designed to support parallel upgrades please follow our recommended guidelines.**
## Explore Further
- [MinIO Erasure Code QuickStart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide)
- [Use `mc` with MinIO Server](https://docs.min.io/docs/minio-client-quickstart-guide)
- [Use `aws-cli` with MinIO Server](https://docs.min.io/docs/aws-cli-with-minio)
- [Use `s3cmd` with MinIO Server](https://docs.min.io/docs/s3cmd-with-minio)
- [Use `minio-go` SDK with MinIO Server](https://docs.min.io/docs/golang-client-quickstart-guide)
- [The MinIO documentation website](https://docs.min.io)
- [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html)
- [Use `mc` with MinIO Server](https://min.io/docs/minio/linux/reference/minio-mc.html)
- [Use `minio-go` SDK with MinIO Server](https://min.io/docs/minio/linux/developers/go/minio-go.html)
- [The MinIO documentation website](https://min.io/docs/minio/linux/index.html)
## Contribute to MinIO Project
@@ -263,6 +259,6 @@ Please follow MinIO [Contributor's Guide](https://github.com/minio/minio/blob/ma
## License
- MinIO source is licensed under the GNU AGPLv3 license that can be found in the [LICENSE](https://github.com/minio/minio/blob/master/LICENSE) file.
- MinIO [Documentation](https://github.com/minio/minio/tree/master/docs) © 2021 by MinIO, Inc is licensed under [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/).
- MinIO source is licensed under the [GNU AGPLv3](https://github.com/minio/minio/blob/master/LICENSE).
- MinIO [documentation](https://github.com/minio/minio/tree/master/docs) is licensed under [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/).
- [License Compliance](https://github.com/minio/minio/blob/master/COMPLIANCE.md)

View File

@@ -3,19 +3,19 @@
_init() {
shopt -s extglob
shopt -s extglob
## Minimum required versions for build dependencies
GIT_VERSION="1.0"
GO_VERSION="1.16"
OSX_VERSION="10.8"
KNAME=$(uname -s)
ARCH=$(uname -m)
case "${KNAME}" in
SunOS )
ARCH=$(isainfo -k)
;;
esac
## Minimum required versions for build dependencies
GIT_VERSION="1.0"
GO_VERSION="1.16"
OSX_VERSION="10.8"
KNAME=$(uname -s)
ARCH=$(uname -m)
case "${KNAME}" in
SunOS)
ARCH=$(isainfo -k)
;;
esac
}
## FIXME:
@@ -28,24 +28,23 @@ _init() {
## }
##
readlink() {
TARGET_FILE=$1
TARGET_FILE=$1
cd `dirname $TARGET_FILE`
TARGET_FILE=`basename $TARGET_FILE`
cd $(dirname $TARGET_FILE)
TARGET_FILE=$(basename $TARGET_FILE)
# Iterate down a (possible) chain of symlinks
while [ -L "$TARGET_FILE" ]
do
TARGET_FILE=$(env readlink $TARGET_FILE)
cd `dirname $TARGET_FILE`
TARGET_FILE=`basename $TARGET_FILE`
done
# Iterate down a (possible) chain of symlinks
while [ -L "$TARGET_FILE" ]; do
TARGET_FILE=$(env readlink $TARGET_FILE)
cd $(dirname $TARGET_FILE)
TARGET_FILE=$(basename $TARGET_FILE)
done
# Compute the canonicalized name by finding the physical path
# for the directory we're in and appending the target file.
PHYS_DIR=`pwd -P`
RESULT=$PHYS_DIR/$TARGET_FILE
echo $RESULT
# Compute the canonicalized name by finding the physical path
# for the directory we're in and appending the target file.
PHYS_DIR=$(pwd -P)
RESULT=$PHYS_DIR/$TARGET_FILE
echo $RESULT
}
## FIXME:
@@ -59,84 +58,86 @@ readlink() {
## }
##
check_minimum_version() {
IFS='.' read -r -a varray1 <<< "$1"
IFS='.' read -r -a varray2 <<< "$2"
IFS='.' read -r -a varray1 <<<"$1"
IFS='.' read -r -a varray2 <<<"$2"
for i in "${!varray1[@]}"; do
if [[ ${varray1[i]} -lt ${varray2[i]} ]]; then
return 0
elif [[ ${varray1[i]} -gt ${varray2[i]} ]]; then
return 1
fi
done
for i in "${!varray1[@]}"; do
if [[ ${varray1[i]} -lt ${varray2[i]} ]]; then
return 0
elif [[ ${varray1[i]} -gt ${varray2[i]} ]]; then
return 1
fi
done
return 0
return 0
}
assert_is_supported_arch() {
case "${ARCH}" in
x86_64 | amd64 | aarch64 | ppc64le | arm* | s390x )
return
;;
*)
echo "Arch '${ARCH}' is not supported. Supported Arch: [x86_64, amd64, aarch64, ppc64le, arm*, s390x]"
exit 1
esac
case "${ARCH}" in
x86_64 | amd64 | aarch64 | ppc64le | arm* | s390x | loong64 | loongarch64)
return
;;
*)
echo "Arch '${ARCH}' is not supported. Supported Arch: [x86_64, amd64, aarch64, ppc64le, arm*, s390x, loong64, loongarch64]"
exit 1
;;
esac
}
assert_is_supported_os() {
case "${KNAME}" in
Linux | FreeBSD | OpenBSD | NetBSD | DragonFly | SunOS )
return
;;
Darwin )
osx_host_version=$(env sw_vers -productVersion)
if ! check_minimum_version "${OSX_VERSION}" "${osx_host_version}"; then
echo "OSX version '${osx_host_version}' is not supported. Minimum supported version: ${OSX_VERSION}"
exit 1
fi
return
;;
*)
echo "OS '${KNAME}' is not supported. Supported OS: [Linux, FreeBSD, OpenBSD, NetBSD, Darwin, DragonFly]"
exit 1
esac
case "${KNAME}" in
Linux | FreeBSD | OpenBSD | NetBSD | DragonFly | SunOS)
return
;;
Darwin)
osx_host_version=$(env sw_vers -productVersion)
if ! check_minimum_version "${OSX_VERSION}" "${osx_host_version}"; then
echo "OSX version '${osx_host_version}' is not supported. Minimum supported version: ${OSX_VERSION}"
exit 1
fi
return
;;
*)
echo "OS '${KNAME}' is not supported. Supported OS: [Linux, FreeBSD, OpenBSD, NetBSD, Darwin, DragonFly]"
exit 1
;;
esac
}
assert_check_golang_env() {
if ! which go >/dev/null 2>&1; then
echo "Cannot find go binary in your PATH configuration, please refer to Go installation document at https://golang.org/doc/install"
exit 1
fi
if ! which go >/dev/null 2>&1; then
echo "Cannot find go binary in your PATH configuration, please refer to Go installation document at https://golang.org/doc/install"
exit 1
fi
installed_go_version=$(go version | sed 's/^.* go\([0-9.]*\).*$/\1/')
if ! check_minimum_version "${GO_VERSION}" "${installed_go_version}"; then
echo "Go runtime version '${installed_go_version}' is unsupported. Minimum supported version: ${GO_VERSION} to compile."
exit 1
fi
installed_go_version=$(go version | sed 's/^.* go\([0-9.]*\).*$/\1/')
if ! check_minimum_version "${GO_VERSION}" "${installed_go_version}"; then
echo "Go runtime version '${installed_go_version}' is unsupported. Minimum supported version: ${GO_VERSION} to compile."
exit 1
fi
}
assert_check_deps() {
# support unusual Git versions such as: 2.7.4 (Apple Git-66)
installed_git_version=$(git version | perl -ne '$_ =~ m/git version (.*?)( |$)/; print "$1\n";')
if ! check_minimum_version "${GIT_VERSION}" "${installed_git_version}"; then
echo "Git version '${installed_git_version}' is not supported. Minimum supported version: ${GIT_VERSION}"
exit 1
fi
# support unusual Git versions such as: 2.7.4 (Apple Git-66)
installed_git_version=$(git version | perl -ne '$_ =~ m/git version (.*?)( |$)/; print "$1\n";')
if ! check_minimum_version "${GIT_VERSION}" "${installed_git_version}"; then
echo "Git version '${installed_git_version}' is not supported. Minimum supported version: ${GIT_VERSION}"
exit 1
fi
}
main() {
## Check for supported arch
assert_is_supported_arch
## Check for supported arch
assert_is_supported_arch
## Check for supported os
assert_is_supported_os
## Check for supported os
assert_is_supported_os
## Check for Go environment
assert_check_golang_env
## Check for Go environment
assert_check_golang_env
## Check for dependencies
assert_check_deps
## Check for dependencies
assert_check_deps
}
_init && main "$@"

View File

@@ -5,33 +5,33 @@ set -e
[ -n "$BASH_XTRACEFD" ] && set -x
function _init() {
## All binaries are static make sure to disable CGO.
export CGO_ENABLED=0
## All binaries are static make sure to disable CGO.
export CGO_ENABLED=0
## List of architectures and OS to test coss compilation.
SUPPORTED_OSARCH="linux/ppc64le linux/mips64 linux/arm64 linux/s390x darwin/arm64 darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64 linux/mips"
## List of architectures and OS to test coss compilation.
SUPPORTED_OSARCH="linux/ppc64le linux/mips64 linux/amd64 linux/arm64 linux/s390x darwin/arm64 darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64 linux/mips"
}
function _build() {
local osarch=$1
IFS=/ read -r -a arr <<<"$osarch"
os="${arr[0]}"
arch="${arr[1]}"
package=$(go list -f '{{.ImportPath}}')
printf -- "--> %15s:%s\n" "${osarch}" "${package}"
local osarch=$1
IFS=/ read -r -a arr <<<"$osarch"
os="${arr[0]}"
arch="${arr[1]}"
package=$(go list -f '{{.ImportPath}}')
printf -- "--> %15s:%s\n" "${osarch}" "${package}"
# go build -trimpath to build the binary.
export GOOS=$os
export GOARCH=$arch
export GO111MODULE=on
go build -trimpath -tags kqueue -o /dev/null
# go build -trimpath to build the binary.
export GOOS=$os
export GOARCH=$arch
export GO111MODULE=on
go build -trimpath -tags kqueue -o /dev/null
}
function main() {
echo "Testing builds for OS/Arch: ${SUPPORTED_OSARCH}"
for each_osarch in ${SUPPORTED_OSARCH}; do
_build "${each_osarch}"
done
echo "Testing builds for OS/Arch: ${SUPPORTED_OSARCH}"
for each_osarch in ${SUPPORTED_OSARCH}; do
_build "${each_osarch}"
done
}
_init && main "$@"

121
buildscripts/disable-root.sh Executable file
View File

@@ -0,0 +1,121 @@
#!/bin/bash
set -x
export MINIO_CI_CD=1
killall -9 minio
rm -rf ${HOME}/tmp/dist
scheme="http"
nr_servers=4
addr="localhost"
args=""
for ((i = 0; i < $((nr_servers)); i++)); do
args="$args $scheme://$addr:$((9100 + i))/${HOME}/tmp/dist/path1/$i"
done
echo $args
for ((i = 0; i < $((nr_servers)); i++)); do
(minio server --address ":$((9100 + i))" $args 2>&1 >/tmp/log$i.txt) &
done
sleep 10s
if [ ! -f ./mc ]; then
wget --quiet -O ./mc https://dl.minio.io/client/mc/release/linux-amd64/./mc &&
chmod +x mc
fi
set +e
export MC_HOST_minioadm=http://minioadmin:minioadmin@localhost:9100/
./mc ls minioadm/
./mc admin config set minioadm/ api root_access=off
sleep 3s # let things settle a little
./mc ls minioadm/
if [ $? -eq 0 ]; then
echo "listing succeeded, 'minioadmin' was not disabled"
exit 1
fi
set -e
killall -9 minio
export MINIO_API_ROOT_ACCESS=on
for ((i = 0; i < $((nr_servers)); i++)); do
(minio server --address ":$((9100 + i))" $args 2>&1 >/tmp/log$i.txt) &
done
set +e
sleep 10
./mc ls minioadm/
if [ $? -ne 0 ]; then
echo "listing failed, 'minioadmin' should be enabled"
exit 1
fi
killall -9 minio
rm -rf /tmp/multisitea/
rm -rf /tmp/multisiteb/
echo "Setup site-replication and then disable root credentials"
minio server --address 127.0.0.1:9001 "http://127.0.0.1:9001/tmp/multisitea/data/disterasure/xl{1...4}" \
"http://127.0.0.1:9002/tmp/multisitea/data/disterasure/xl{5...8}" >/tmp/sitea_1.log 2>&1 &
minio server --address 127.0.0.1:9002 "http://127.0.0.1:9001/tmp/multisitea/data/disterasure/xl{1...4}" \
"http://127.0.0.1:9002/tmp/multisitea/data/disterasure/xl{5...8}" >/tmp/sitea_2.log 2>&1 &
minio server --address 127.0.0.1:9003 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \
"http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_1.log 2>&1 &
minio server --address 127.0.0.1:9004 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \
"http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_2.log 2>&1 &
sleep 20s
export MC_HOST_sitea=http://minioadmin:minioadmin@127.0.0.1:9001
export MC_HOST_siteb=http://minioadmin:minioadmin@127.0.0.1:9004
./mc admin replicate add sitea siteb
./mc admin user add sitea foobar foo12345
./mc admin policy attach sitea/ consoleAdmin --user=foobar
./mc admin user info siteb foobar
killall -9 minio
echo "turning off root access, however site replication must continue"
export MINIO_API_ROOT_ACCESS=off
minio server --address 127.0.0.1:9001 "http://127.0.0.1:9001/tmp/multisitea/data/disterasure/xl{1...4}" \
"http://127.0.0.1:9002/tmp/multisitea/data/disterasure/xl{5...8}" >/tmp/sitea_1.log 2>&1 &
minio server --address 127.0.0.1:9002 "http://127.0.0.1:9001/tmp/multisitea/data/disterasure/xl{1...4}" \
"http://127.0.0.1:9002/tmp/multisitea/data/disterasure/xl{5...8}" >/tmp/sitea_2.log 2>&1 &
minio server --address 127.0.0.1:9003 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \
"http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_1.log 2>&1 &
minio server --address 127.0.0.1:9004 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \
"http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_2.log 2>&1 &
sleep 20s
export MC_HOST_sitea=http://foobar:foo12345@127.0.0.1:9001
export MC_HOST_siteb=http://foobar:foo12345@127.0.0.1:9004
./mc admin user add sitea foobar-admin foo12345
sleep 2s
./mc admin user info siteb foobar-admin

View File

@@ -6,88 +6,87 @@ set -x
WORK_DIR="$PWD/.verify-$RANDOM"
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server )
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
if [ ! -x "$PWD/minio" ]; then
echo "minio executable binary not found in current directory"
exit 1
echo "minio executable binary not found in current directory"
exit 1
fi
if [ ! -x "$PWD/minio" ]; then
echo "minio executable binary not found in current directory"
exit 1
echo "minio executable binary not found in current directory"
exit 1
fi
function start_minio_4drive() {
start_port=$1
start_port=$1
export MINIO_ROOT_USER=minio
export MINIO_ROOT_PASSWORD=minio123
export MC_HOST_minio="http://minio:minio123@127.0.0.1:${start_port}/"
unset MINIO_KMS_AUTO_ENCRYPTION # do not auto-encrypt objects
export MINIO_CI_CD=1
export MINIO_ROOT_USER=minio
export MINIO_ROOT_PASSWORD=minio123
export MC_HOST_minio="http://minio:minio123@127.0.0.1:${start_port}/"
unset MINIO_KMS_AUTO_ENCRYPTION # do not auto-encrypt objects
export MINIO_CI_CD=1
mkdir ${WORK_DIR}
C_PWD=${PWD}
if [ ! -x "$PWD/mc" ]; then
MC_BUILD_DIR="mc-$RANDOM"
if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then
echo "failed to download https://github.com/minio/mc"
purge "${MC_BUILD_DIR}"
exit 1
mkdir ${WORK_DIR}
C_PWD=${PWD}
if [ ! -x "$PWD/mc" ]; then
MC_BUILD_DIR="mc-$RANDOM"
if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then
echo "failed to download https://github.com/minio/mc"
purge "${MC_BUILD_DIR}"
exit 1
fi
(cd "${MC_BUILD_DIR}" && go build -o "$C_PWD/mc")
# remove mc source.
purge "${MC_BUILD_DIR}"
fi
(cd "${MC_BUILD_DIR}" && go build -o "$C_PWD/mc")
"${MINIO[@]}" --address ":$start_port" "${WORK_DIR}/disk{1...4}" >"${WORK_DIR}/server1.log" 2>&1 &
pid=$!
disown $pid
sleep 5
# remove mc source.
purge "${MC_BUILD_DIR}"
fi
if ! ps -p ${pid} 1>&2 >/dev/null; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
"${MINIO[@]}" --address ":$start_port" "${WORK_DIR}/disk{1...4}" > "${WORK_DIR}/server1.log" 2>&1 &
pid=$!
disown $pid
sleep 5
"${PWD}/mc" mb --with-versioning minio/bucket
if ! ps -p ${pid} 1>&2 >/dev/null; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
for i in $(seq 1 4); do
"${PWD}/mc" cp /etc/hosts minio/bucket/testobj
"${PWD}/mc" mb --with-versioning minio/bucket
sudo chown -R root. "${WORK_DIR}/disk${i}"
for i in $(seq 1 4); do
"${PWD}/mc" cp /etc/hosts minio/bucket/testobj
"${PWD}/mc" cp /etc/hosts minio/bucket/testobj
sudo chown -R root. "${WORK_DIR}/disk${i}"
sudo chown -R ${USER}. "${WORK_DIR}/disk${i}"
done
"${PWD}/mc" cp /etc/hosts minio/bucket/testobj
for vid in $("${PWD}/mc" ls --json --versions minio/bucket/testobj | jq -r .versionId); do
"${PWD}/mc" cat --vid "${vid}" minio/bucket/testobj | md5sum
done
sudo chown -R ${USER}. "${WORK_DIR}/disk${i}"
done
for vid in $("${PWD}/mc" ls --json --versions minio/bucket/testobj | jq -r .versionId); do
"${PWD}/mc" cat --vid "${vid}" minio/bucket/testobj | md5sum
done
pkill minio
sleep 3
pkill minio
sleep 3
}
function main() {
start_port=$(shuf -i 10000-65000 -n 1)
start_port=$(shuf -i 10000-65000 -n 1)
start_minio_4drive ${start_port}
start_minio_4drive ${start_port}
}
function purge()
{
rm -rf "$1"
function purge() {
rm -rf "$1"
}
( main "$@" )
(main "$@")
rv=$?
purge "$WORK_DIR"
exit "$rv"

View File

@@ -27,7 +27,7 @@ import (
"os"
"time"
"github.com/minio/madmin-go"
"github.com/minio/madmin-go/v3"
)
func main() {

View File

@@ -4,89 +4,89 @@ trap 'cleanup $LINENO' ERR
# shellcheck disable=SC2120
cleanup() {
MINIO_VERSION=dev docker-compose \
-f "buildscripts/upgrade-tests/compose.yml" \
rm -s -f
docker volume prune -f
MINIO_VERSION=dev docker-compose \
-f "buildscripts/upgrade-tests/compose.yml" \
rm -s -f
docker volume prune -f
}
verify_checksum_after_heal() {
local sum1
sum1=$(curl -s "$2" | sha256sum);
mc admin heal --json -r "$1" >/dev/null; # test after healing
local sum1_heal
sum1_heal=$(curl -s "$2" | sha256sum);
local sum1
sum1=$(curl -s "$2" | sha256sum)
mc admin heal --json -r "$1" >/dev/null # test after healing
local sum1_heal
sum1_heal=$(curl -s "$2" | sha256sum)
if [ "${sum1_heal}" != "${sum1}" ]; then
echo "mismatch expected ${sum1_heal}, got ${sum1}"
exit 1;
fi
if [ "${sum1_heal}" != "${sum1}" ]; then
echo "mismatch expected ${sum1_heal}, got ${sum1}"
exit 1
fi
}
verify_checksum_mc() {
local expected
expected=$(mc cat "$1" | sha256sum)
local got
got=$(mc cat "$2" | sha256sum)
local expected
expected=$(mc cat "$1" | sha256sum)
local got
got=$(mc cat "$2" | sha256sum)
if [ "${expected}" != "${got}" ]; then
echo "mismatch - expected ${expected}, got ${got}"
exit 1;
fi
echo "matches - ${expected}, got ${got}"
if [ "${expected}" != "${got}" ]; then
echo "mismatch - expected ${expected}, got ${got}"
exit 1
fi
echo "matches - ${expected}, got ${got}"
}
add_alias() {
for i in $(seq 1 4); do
echo "... attempting to add alias $i"
until (mc alias set minio http://127.0.0.1:9000 minioadmin minioadmin); do
echo "...waiting... for 5secs" && sleep 5
done
done
for i in $(seq 1 4); do
echo "... attempting to add alias $i"
until (mc alias set minio http://127.0.0.1:9000 minioadmin minioadmin); do
echo "...waiting... for 5secs" && sleep 5
done
done
echo "Sleeping for nginx"
sleep 20
echo "Sleeping for nginx"
sleep 20
}
__init__() {
sudo apt install curl -y
export GOPATH=/tmp/gopath
export PATH=${PATH}:${GOPATH}/bin
sudo apt install curl -y
export GOPATH=/tmp/gopath
export PATH=${PATH}:${GOPATH}/bin
go install github.com/minio/mc@latest
go install github.com/minio/mc@latest
TAG=minio/minio:dev make docker
TAG=minio/minio:dev make docker
MINIO_VERSION=RELEASE.2019-12-19T22-52-26Z docker-compose \
-f "buildscripts/upgrade-tests/compose.yml" \
up -d --build
MINIO_VERSION=RELEASE.2019-12-19T22-52-26Z docker-compose \
-f "buildscripts/upgrade-tests/compose.yml" \
up -d --build
add_alias
add_alias
mc mb minio/minio-test/
mc cp ./minio minio/minio-test/to-read/
mc cp /etc/hosts minio/minio-test/to-read/hosts
mc policy set download minio/minio-test
mc mb minio/minio-test/
mc cp ./minio minio/minio-test/to-read/
mc cp /etc/hosts minio/minio-test/to-read/hosts
mc anonymous set download minio/minio-test
verify_checksum_mc ./minio minio/minio-test/to-read/minio
verify_checksum_mc ./minio minio/minio-test/to-read/minio
curl -s http://127.0.0.1:9000/minio-test/to-read/hosts | sha256sum
curl -s http://127.0.0.1:9000/minio-test/to-read/hosts | sha256sum
MINIO_VERSION=dev docker-compose -f "buildscripts/upgrade-tests/compose.yml" stop
MINIO_VERSION=dev docker-compose -f "buildscripts/upgrade-tests/compose.yml" stop
}
main() {
MINIO_VERSION=dev docker-compose -f "buildscripts/upgrade-tests/compose.yml" up -d --build
MINIO_VERSION=dev docker-compose -f "buildscripts/upgrade-tests/compose.yml" up -d --build
add_alias
add_alias
verify_checksum_after_heal minio/minio-test http://127.0.0.1:9000/minio-test/to-read/hosts
verify_checksum_after_heal minio/minio-test http://127.0.0.1:9000/minio-test/to-read/hosts
verify_checksum_mc ./minio minio/minio-test/to-read/minio
verify_checksum_mc ./minio minio/minio-test/to-read/minio
verify_checksum_mc /etc/hosts minio/minio-test/to-read/hosts
verify_checksum_mc /etc/hosts minio/minio-test/to-read/hosts
cleanup
cleanup
}
( __init__ "$@" && main "$@" )
(__init__ "$@" && main "$@")

View File

@@ -5,7 +5,6 @@ set -e
export GORACE="history_size=7"
export MINIO_API_REQUESTS_MAX=10000
## TODO remove `dsync` from race detector once this is merged and released https://go-review.googlesource.com/c/go/+/333529/
for d in $(go list ./... | grep -v dsync); do
CGO_ENABLED=1 go test -v -race --timeout 100m "$d"
for d in $(go list ./...); do
CGO_ENABLED=1 go test -v -race --timeout 100m "$d"
done

View File

@@ -3,70 +3,70 @@
set -E
set -o pipefail
set -x
set -e
WORK_DIR="$PWD/.verify-$RANDOM"
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server )
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
if [ ! -x "$PWD/minio" ]; then
echo "minio executable binary not found in current directory"
exit 1
echo "minio executable binary not found in current directory"
exit 1
fi
function start_minio_5drive() {
start_port=$1
start_port=$1
export MINIO_ROOT_USER=minio
export MINIO_ROOT_PASSWORD=minio123
export MC_HOST_minio="http://minio:minio123@127.0.0.1:${start_port}/"
unset MINIO_KMS_AUTO_ENCRYPTION # do not auto-encrypt objects
export MINIO_CI_CD=1
export MINIO_ROOT_USER=minio
export MINIO_ROOT_PASSWORD=minio123
export MC_HOST_minio="http://minio:minio123@127.0.0.1:${start_port}/"
unset MINIO_KMS_AUTO_ENCRYPTION # do not auto-encrypt objects
export MINIO_CI_CD=1
MC_BUILD_DIR="mc-$RANDOM"
if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then
echo "failed to download https://github.com/minio/mc"
MC_BUILD_DIR="mc-$RANDOM"
if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then
echo "failed to download https://github.com/minio/mc"
purge "${MC_BUILD_DIR}"
exit 1
fi
(cd "${MC_BUILD_DIR}" && go build -o "$WORK_DIR/mc")
# remove mc source.
purge "${MC_BUILD_DIR}"
exit 1
fi
(cd "${MC_BUILD_DIR}" && go build -o "$WORK_DIR/mc")
"${WORK_DIR}/mc" cp --quiet -r "buildscripts/cicd-corpus/" "${WORK_DIR}/cicd-corpus/"
# remove mc source.
purge "${MC_BUILD_DIR}"
"${MINIO[@]}" --address ":$start_port" "${WORK_DIR}/cicd-corpus/disk{1...5}" >"${WORK_DIR}/server1.log" 2>&1 &
pid=$!
disown $pid
sleep 5
"${WORK_DIR}/mc" cp --quiet -r "buildscripts/cicd-corpus/" "${WORK_DIR}/cicd-corpus/"
if ! ps -p ${pid} 1>&2 >/dev/null; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
"${MINIO[@]}" --address ":$start_port" "${WORK_DIR}/cicd-corpus/disk{1...5}" > "${WORK_DIR}/server1.log" 2>&1 &
pid=$!
disown $pid
sleep 5
"${WORK_DIR}/mc" stat minio/bucket/testobj
if ! ps -p ${pid} 1>&2 >/dev/null; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
"${WORK_DIR}/mc" stat minio/bucket/testobj
pkill minio
sleep 3
pkill minio
sleep 3
}
function main() {
start_port=$(shuf -i 10000-65000 -n 1)
start_port=$(shuf -i 10000-65000 -n 1)
start_minio_5drive ${start_port}
start_minio_5drive ${start_port}
}
function purge()
{
rm -rf "$1"
function purge() {
rm -rf "$1"
}
( main "$@" )
(main "$@")
rv=$?
purge "$WORK_DIR"
exit "$rv"

View File

@@ -6,146 +6,151 @@ set -x
WORK_DIR="$PWD/.verify-$RANDOM"
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
MINIO_OLD=( "$PWD/minio.RELEASE.2020-10-28T08-16-50Z" --config-dir "$MINIO_CONFIG_DIR" server )
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server )
MINIO_OLD=("$PWD/minio.RELEASE.2020-10-28T08-16-50Z" --config-dir "$MINIO_CONFIG_DIR" server)
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
if [ ! -x "$PWD/minio" ]; then
echo "minio executable binary not found in current directory"
exit 1
echo "minio executable binary not found in current directory"
exit 1
fi
function download_old_release() {
if [ ! -f minio.RELEASE.2020-10-28T08-16-50Z ]; then
curl --silent -O https://dl.minio.io/server/minio/release/linux-amd64/archive/minio.RELEASE.2020-10-28T08-16-50Z
chmod a+x minio.RELEASE.2020-10-28T08-16-50Z
fi
if [ ! -f minio.RELEASE.2020-10-28T08-16-50Z ]; then
curl --silent -O https://dl.minio.io/server/minio/release/linux-amd64/archive/minio.RELEASE.2020-10-28T08-16-50Z
chmod a+x minio.RELEASE.2020-10-28T08-16-50Z
fi
}
function verify_rewrite() {
start_port=$1
start_port=$1
export MINIO_ACCESS_KEY=minio
export MINIO_SECRET_KEY=minio123
export MC_HOST_minio="http://minio:minio123@127.0.0.1:${start_port}/"
unset MINIO_KMS_AUTO_ENCRYPTION # do not auto-encrypt objects
export MINIO_CI_CD=1
export MINIO_ACCESS_KEY=minio
export MINIO_SECRET_KEY=minio123
export MC_HOST_minio="http://minio:minio123@127.0.0.1:${start_port}/"
unset MINIO_KMS_AUTO_ENCRYPTION # do not auto-encrypt objects
export MINIO_CI_CD=1
MC_BUILD_DIR="mc-$RANDOM"
if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then
echo "failed to download https://github.com/minio/mc"
MC_BUILD_DIR="mc-$RANDOM"
if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then
echo "failed to download https://github.com/minio/mc"
purge "${MC_BUILD_DIR}"
exit 1
fi
(cd "${MC_BUILD_DIR}" && go build -o "$WORK_DIR/mc")
# remove mc source.
purge "${MC_BUILD_DIR}"
exit 1
fi
(cd "${MC_BUILD_DIR}" && go build -o "$WORK_DIR/mc")
"${MINIO_OLD[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" >"${WORK_DIR}/server1.log" 2>&1 &
pid=$!
disown $pid
sleep 10
# remove mc source.
purge "${MC_BUILD_DIR}"
if ! ps -p ${pid} 1>&2 >/dev/null; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
"${MINIO_OLD[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" > "${WORK_DIR}/server1.log" 2>&1 &
pid=$!
disown $pid
sleep 10
"${WORK_DIR}/mc" mb minio/healing-rewrite-bucket --quiet --with-lock
"${WORK_DIR}/mc" cp \
buildscripts/verify-build.sh \
minio/healing-rewrite-bucket/ \
--disable-multipart --quiet
if ! ps -p ${pid} 1>&2 >/dev/null; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
"${WORK_DIR}/mc" cp \
buildscripts/verify-build.sh \
minio/healing-rewrite-bucket/ \
--disable-multipart --quiet
"${WORK_DIR}/mc" mb minio/healing-rewrite-bucket --quiet --with-lock
"${WORK_DIR}/mc" cp \
buildscripts/verify-build.sh \
minio/healing-rewrite-bucket/ \
--disable-multipart --quiet
"${WORK_DIR}/mc" cp \
buildscripts/verify-build.sh \
minio/healing-rewrite-bucket/ \
--disable-multipart --quiet
"${WORK_DIR}/mc" cp \
buildscripts/verify-build.sh \
minio/healing-rewrite-bucket/ \
--disable-multipart --quiet
kill ${pid}
sleep 3
"${WORK_DIR}/mc" cp \
buildscripts/verify-build.sh \
minio/healing-rewrite-bucket/ \
--disable-multipart --quiet
"${MINIO[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" >"${WORK_DIR}/server1.log" 2>&1 &
pid=$!
disown $pid
sleep 10
kill ${pid}
sleep 3
if ! ps -p ${pid} 1>&2 >/dev/null; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
"${MINIO[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" > "${WORK_DIR}/server1.log" 2>&1 &
pid=$!
disown $pid
sleep 10
go install github.com/minio/minio/docs/debugging/s3-check-md5@latest
if ! s3-check-md5 \
-debug \
-versions \
-access-key minio \
-secret-key minio123 \
-endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep INTACT; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
mkdir -p inspects
(
cd inspects
"${WORK_DIR}/mc" admin inspect minio/healing-rewrite-bucket/verify-build.sh/**
)
if ! ps -p ${pid} 1>&2 >/dev/null; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
"${WORK_DIR}/mc" mb play/inspects
"${WORK_DIR}/mc" mirror inspects play/inspects
go build ./docs/debugging/s3-check-md5/
if ! ./s3-check-md5 \
-debug \
-versions \
-access-key minio \
-secret-key minio123 \
-endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep INTACT; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
mkdir -p inspects
(cd inspects; "${WORK_DIR}/mc" admin inspect minio/healing-rewrite-bucket/verify-build.sh/**)
purge "$WORK_DIR"
exit 1
fi
"${WORK_DIR}/mc" mb play/inspects
"${WORK_DIR}/mc" mirror inspects play/inspects
go run ./buildscripts/heal-manual.go "127.0.0.1:${start_port}" "minio" "minio123"
sleep 1
purge "$WORK_DIR"
exit 1
fi
if ! s3-check-md5 \
-debug \
-versions \
-access-key minio \
-secret-key minio123 \
-endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep INTACT; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
mkdir -p inspects
(
cd inspects
"${WORK_DIR}/mc" admin inspect minio/healing-rewrite-bucket/verify-build.sh/**
)
go run ./buildscripts/heal-manual.go "127.0.0.1:${start_port}" "minio" "minio123"
sleep 1
"${WORK_DIR}/mc" mb play/inspects
"${WORK_DIR}/mc" mirror inspects play/inspects
if ! ./s3-check-md5 \
-debug \
-versions \
-access-key minio \
-secret-key minio123 \
-endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep INTACT; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
mkdir -p inspects
(cd inspects; "${WORK_DIR}/mc" admin inspect minio/healing-rewrite-bucket/verify-build.sh/**)
purge "$WORK_DIR"
exit 1
fi
"${WORK_DIR}/mc" mb play/inspects
"${WORK_DIR}/mc" mirror inspects play/inspects
purge "$WORK_DIR"
exit 1
fi
kill ${pid}
kill ${pid}
}
function main() {
download_old_release
download_old_release
start_port=$(shuf -i 10000-65000 -n 1)
start_port=$(shuf -i 10000-65000 -n 1)
verify_rewrite ${start_port}
verify_rewrite ${start_port}
}
function purge()
{
rm -rf "$1"
function purge() {
rm -rf "$1"
}
( main "$@" )
(main "$@")
rv=$?
purge "$WORK_DIR"
exit "$rv"

View File

@@ -6,167 +6,172 @@ set -o pipefail
set -x
if [ ! -x "$PWD/minio" ]; then
echo "minio executable binary not found in current directory"
exit 1
echo "minio executable binary not found in current directory"
exit 1
fi
WORK_DIR="$PWD/.verify-$RANDOM"
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
MINIO_OLD=( "$PWD/minio.RELEASE.2021-11-24T23-19-33Z" --config-dir "$MINIO_CONFIG_DIR" server )
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server )
MINIO_OLD=("$PWD/minio.RELEASE.2021-11-24T23-19-33Z" --config-dir "$MINIO_CONFIG_DIR" server)
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
function download_old_release() {
if [ ! -f minio.RELEASE.2021-11-24T23-19-33Z ]; then
curl --silent -O https://dl.minio.io/server/minio/release/linux-amd64/archive/minio.RELEASE.2021-11-24T23-19-33Z
chmod a+x minio.RELEASE.2021-11-24T23-19-33Z
fi
if [ ! -f minio.RELEASE.2021-11-24T23-19-33Z ]; then
curl --silent -O https://dl.minio.io/server/minio/release/linux-amd64/archive/minio.RELEASE.2021-11-24T23-19-33Z
chmod a+x minio.RELEASE.2021-11-24T23-19-33Z
fi
}
function start_minio_16drive() {
start_port=$1
start_port=$1
export MINIO_ROOT_USER=minio
export MINIO_ROOT_PASSWORD=minio123
export MC_HOST_minio="http://minio:minio123@127.0.0.1:${start_port}/"
unset MINIO_KMS_AUTO_ENCRYPTION # do not auto-encrypt objects
export _MINIO_SHARD_DISKTIME_DELTA="5s" # do not change this as its needed for tests
export MINIO_CI_CD=1
export MINIO_ROOT_USER=minio
export MINIO_ROOT_PASSWORD=minio123
export MC_HOST_minio="http://minio:minio123@127.0.0.1:${start_port}/"
unset MINIO_KMS_AUTO_ENCRYPTION # do not auto-encrypt objects
export _MINIO_SHARD_DISKTIME_DELTA="5s" # do not change this as its needed for tests
export MINIO_CI_CD=1
MC_BUILD_DIR="mc-$RANDOM"
if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then
echo "failed to download https://github.com/minio/mc"
MC_BUILD_DIR="mc-$RANDOM"
if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then
echo "failed to download https://github.com/minio/mc"
purge "${MC_BUILD_DIR}"
exit 1
fi
(cd "${MC_BUILD_DIR}" && go build -o "$WORK_DIR/mc")
# remove mc source.
purge "${MC_BUILD_DIR}"
exit 1
fi
(cd "${MC_BUILD_DIR}" && go build -o "$WORK_DIR/mc")
"${MINIO_OLD[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" >"${WORK_DIR}/server1.log" 2>&1 &
pid=$!
disown $pid
sleep 30
# remove mc source.
purge "${MC_BUILD_DIR}"
if ! ps -p ${pid} 1>&2 >/dev/null; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
"${MINIO_OLD[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" > "${WORK_DIR}/server1.log" 2>&1 &
pid=$!
disown $pid
sleep 30
shred --iterations=1 --size=5241856 - 1>"${WORK_DIR}/unaligned" 2>/dev/null
"${WORK_DIR}/mc" mb minio/healing-shard-bucket --quiet
"${WORK_DIR}/mc" cp \
"${WORK_DIR}/unaligned" \
minio/healing-shard-bucket/unaligned \
--disable-multipart --quiet
if ! ps -p ${pid} 1>&2 >/dev/null; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
## "unaligned" object name gets consistently distributed
## to disks in following distribution order
##
## NOTE: if you change the name make sure to change the
## distribution order present here
##
## [15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
shred --iterations=1 --size=5241856 - 1>"${WORK_DIR}/unaligned" 2>/dev/null
"${WORK_DIR}/mc" mb minio/healing-shard-bucket --quiet
"${WORK_DIR}/mc" cp \
"${WORK_DIR}/unaligned" \
minio/healing-shard-bucket/unaligned \
--disable-multipart --quiet
## make sure to remove the "last" data shard
rm -rf "${WORK_DIR}/xl14/healing-shard-bucket/unaligned"
sleep 10
## Heal the shard
"${WORK_DIR}/mc" admin heal --quiet --recursive minio/healing-shard-bucket
## then remove any other data shard let's pick first disk
## - 1st data shard.
rm -rf "${WORK_DIR}/xl3/healing-shard-bucket/unaligned"
sleep 10
## "unaligned" object name gets consistently distributed
## to disks in following distribution order
##
## NOTE: if you change the name make sure to change the
## distribution order present here
##
## [15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
go install github.com/minio/minio/docs/debugging/s3-check-md5@latest
if ! s3-check-md5 \
-debug \
-access-key minio \
-secret-key minio123 \
-endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep CORRUPTED; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
## make sure to remove the "last" data shard
rm -rf "${WORK_DIR}/xl14/healing-shard-bucket/unaligned"
sleep 10
## Heal the shard
"${WORK_DIR}/mc" admin heal --quiet --recursive minio/healing-shard-bucket
## then remove any other data shard let's pick first disk
## - 1st data shard.
rm -rf "${WORK_DIR}/xl3/healing-shard-bucket/unaligned"
sleep 10
pkill minio
sleep 3
go build ./docs/debugging/s3-check-md5/
if ! ./s3-check-md5 \
-debug \
-access-key minio \
-secret-key minio123 \
-endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep CORRUPTED; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
"${MINIO[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" >"${WORK_DIR}/server1.log" 2>&1 &
pid=$!
disown $pid
sleep 30
pkill minio
sleep 3
if ! ps -p ${pid} 1>&2 >/dev/null; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
"${MINIO[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" > "${WORK_DIR}/server1.log" 2>&1 &
pid=$!
disown $pid
sleep 30
if ! s3-check-md5 \
-debug \
-access-key minio \
-secret-key minio123 \
-endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep INTACT; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
mkdir -p inspects
(
cd inspects
"${WORK_DIR}/mc" support inspect minio/healing-shard-bucket/unaligned/**
)
if ! ps -p ${pid} 1>&2 >/dev/null; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
"${WORK_DIR}/mc" mb play/inspects
"${WORK_DIR}/mc" mirror inspects play/inspects
if ! ./s3-check-md5 \
-debug \
-access-key minio \
-secret-key minio123 \
-endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep INTACT; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
mkdir -p inspects
(cd inspects; "${WORK_DIR}/mc" support inspect minio/healing-shard-bucket/unaligned/**)
purge "$WORK_DIR"
exit 1
fi
"${WORK_DIR}/mc" mb play/inspects
"${WORK_DIR}/mc" mirror inspects play/inspects
"${WORK_DIR}/mc" admin heal --quiet --recursive minio/healing-shard-bucket
purge "$WORK_DIR"
exit 1
fi
if ! s3-check-md5 \
-debug \
-access-key minio \
-secret-key minio123 \
-endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep INTACT; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
mkdir -p inspects
(
cd inspects
"${WORK_DIR}/mc" support inspect minio/healing-shard-bucket/unaligned/**
)
"${WORK_DIR}/mc" admin heal --quiet --recursive minio/healing-shard-bucket
"${WORK_DIR}/mc" mb play/inspects
"${WORK_DIR}/mc" mirror inspects play/inspects
if ! ./s3-check-md5 \
-debug \
-access-key minio \
-secret-key minio123 \
-endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep INTACT; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
mkdir -p inspects
(cd inspects; "${WORK_DIR}/mc" support inspect minio/healing-shard-bucket/unaligned/**)
purge "$WORK_DIR"
exit 1
fi
"${WORK_DIR}/mc" mb play/inspects
"${WORK_DIR}/mc" mirror inspects play/inspects
purge "$WORK_DIR"
exit 1
fi
pkill minio
sleep 3
pkill minio
sleep 3
}
function main() {
download_old_release
download_old_release
start_port=$(shuf -i 10000-65000 -n 1)
start_port=$(shuf -i 10000-65000 -n 1)
start_minio_16drive ${start_port}
start_minio_16drive ${start_port}
}
function purge()
{
rm -rf "$1"
function purge() {
rm -rf "$1"
}
( main "$@" )
(main "$@")
rv=$?
purge "$WORK_DIR"
exit "$rv"

View File

@@ -9,11 +9,6 @@ x-minio-common: &minio-common
expose:
- "9000"
- "9001"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
# starts 4 docker containers running minio server instances.
# using nginx reverse proxy, load balancing, you can access

View File

@@ -6,8 +6,8 @@ set -E
set -o pipefail
if [ ! -x "$PWD/minio" ]; then
echo "minio executable binary not found in current directory"
exit 1
echo "minio executable binary not found in current directory"
exit 1
fi
WORK_DIR="$PWD/.verify-$RANDOM"
@@ -25,285 +25,270 @@ export ENABLE_ADMIN=1
export MINIO_CI_CD=1
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" )
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR")
FILE_1_MB="$MINT_DATA_DIR/datafile-1-MB"
FILE_65_MB="$MINT_DATA_DIR/datafile-65-MB"
FUNCTIONAL_TESTS="$WORK_DIR/functional-tests.sh"
function start_minio_fs()
{
export MINIO_ROOT_USER=$ACCESS_KEY
export MINIO_ROOT_PASSWORD=$SECRET_KEY
"${MINIO[@]}" server "${WORK_DIR}/fs-disk" >"$WORK_DIR/fs-minio.log" 2>&1 &
sleep 10
function start_minio_fs() {
export MINIO_ROOT_USER=$ACCESS_KEY
export MINIO_ROOT_PASSWORD=$SECRET_KEY
"${MINIO[@]}" server "${WORK_DIR}/fs-disk" >"$WORK_DIR/fs-minio.log" 2>&1 &
sleep 10
}
function start_minio_erasure()
{
"${MINIO[@]}" server "${WORK_DIR}/erasure-disk1" "${WORK_DIR}/erasure-disk2" "${WORK_DIR}/erasure-disk3" "${WORK_DIR}/erasure-disk4" >"$WORK_DIR/erasure-minio.log" 2>&1 &
sleep 15
function start_minio_erasure() {
"${MINIO[@]}" server "${WORK_DIR}/erasure-disk1" "${WORK_DIR}/erasure-disk2" "${WORK_DIR}/erasure-disk3" "${WORK_DIR}/erasure-disk4" >"$WORK_DIR/erasure-minio.log" 2>&1 &
sleep 15
}
function start_minio_erasure_sets()
{
export MINIO_ENDPOINTS="${WORK_DIR}/erasure-disk-sets{1...32}"
"${MINIO[@]}" server > "$WORK_DIR/erasure-minio-sets.log" 2>&1 &
sleep 15
function start_minio_erasure_sets() {
export MINIO_ENDPOINTS="${WORK_DIR}/erasure-disk-sets{1...32}"
"${MINIO[@]}" server >"$WORK_DIR/erasure-minio-sets.log" 2>&1 &
sleep 15
}
function start_minio_pool_erasure_sets()
{
export MINIO_ROOT_USER=$ACCESS_KEY
export MINIO_ROOT_PASSWORD=$SECRET_KEY
export MINIO_ENDPOINTS="http://127.0.0.1:9000${WORK_DIR}/pool-disk-sets{1...4} http://127.0.0.1:9001${WORK_DIR}/pool-disk-sets{5...8}"
"${MINIO[@]}" server --address ":9000" > "$WORK_DIR/pool-minio-9000.log" 2>&1 &
"${MINIO[@]}" server --address ":9001" > "$WORK_DIR/pool-minio-9001.log" 2>&1 &
function start_minio_pool_erasure_sets() {
export MINIO_ROOT_USER=$ACCESS_KEY
export MINIO_ROOT_PASSWORD=$SECRET_KEY
export MINIO_ENDPOINTS="http://127.0.0.1:9000${WORK_DIR}/pool-disk-sets{1...4} http://127.0.0.1:9001${WORK_DIR}/pool-disk-sets{5...8}"
"${MINIO[@]}" server --address ":9000" >"$WORK_DIR/pool-minio-9000.log" 2>&1 &
"${MINIO[@]}" server --address ":9001" >"$WORK_DIR/pool-minio-9001.log" 2>&1 &
sleep 40
sleep 40
}
function start_minio_pool_erasure_sets_ipv6()
{
export MINIO_ROOT_USER=$ACCESS_KEY
export MINIO_ROOT_PASSWORD=$SECRET_KEY
export MINIO_ENDPOINTS="http://[::1]:9000${WORK_DIR}/pool-disk-sets-ipv6{1...4} http://[::1]:9001${WORK_DIR}/pool-disk-sets-ipv6{5...8}"
"${MINIO[@]}" server --address="[::1]:9000" > "$WORK_DIR/pool-minio-ipv6-9000.log" 2>&1 &
"${MINIO[@]}" server --address="[::1]:9001" > "$WORK_DIR/pool-minio-ipv6-9001.log" 2>&1 &
function start_minio_pool_erasure_sets_ipv6() {
export MINIO_ROOT_USER=$ACCESS_KEY
export MINIO_ROOT_PASSWORD=$SECRET_KEY
export MINIO_ENDPOINTS="http://[::1]:9000${WORK_DIR}/pool-disk-sets-ipv6{1...4} http://[::1]:9001${WORK_DIR}/pool-disk-sets-ipv6{5...8}"
"${MINIO[@]}" server --address="[::1]:9000" >"$WORK_DIR/pool-minio-ipv6-9000.log" 2>&1 &
"${MINIO[@]}" server --address="[::1]:9001" >"$WORK_DIR/pool-minio-ipv6-9001.log" 2>&1 &
sleep 40
sleep 40
}
function start_minio_dist_erasure()
{
export MINIO_ROOT_USER=$ACCESS_KEY
export MINIO_ROOT_PASSWORD=$SECRET_KEY
export MINIO_ENDPOINTS="http://127.0.0.1:9000${WORK_DIR}/dist-disk1 http://127.0.0.1:9001${WORK_DIR}/dist-disk2 http://127.0.0.1:9002${WORK_DIR}/dist-disk3 http://127.0.0.1:9003${WORK_DIR}/dist-disk4"
for i in $(seq 0 3); do
"${MINIO[@]}" server --address ":900${i}" > "$WORK_DIR/dist-minio-900${i}.log" 2>&1 &
done
function start_minio_dist_erasure() {
export MINIO_ROOT_USER=$ACCESS_KEY
export MINIO_ROOT_PASSWORD=$SECRET_KEY
export MINIO_ENDPOINTS="http://127.0.0.1:9000${WORK_DIR}/dist-disk1 http://127.0.0.1:9001${WORK_DIR}/dist-disk2 http://127.0.0.1:9002${WORK_DIR}/dist-disk3 http://127.0.0.1:9003${WORK_DIR}/dist-disk4"
for i in $(seq 0 3); do
"${MINIO[@]}" server --address ":900${i}" >"$WORK_DIR/dist-minio-900${i}.log" 2>&1 &
done
sleep 40
sleep 40
}
function run_test_fs()
{
start_minio_fs
function run_test_fs() {
start_minio_fs
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
pkill minio
sleep 3
pkill minio
sleep 3
if [ "$rv" -ne 0 ]; then
cat "$WORK_DIR/fs-minio.log"
fi
rm -f "$WORK_DIR/fs-minio.log"
if [ "$rv" -ne 0 ]; then
cat "$WORK_DIR/fs-minio.log"
fi
rm -f "$WORK_DIR/fs-minio.log"
return "$rv"
return "$rv"
}
function run_test_erasure_sets()
{
start_minio_erasure_sets
function run_test_erasure_sets() {
start_minio_erasure_sets
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
pkill minio
sleep 3
pkill minio
sleep 3
if [ "$rv" -ne 0 ]; then
cat "$WORK_DIR/erasure-minio-sets.log"
fi
rm -f "$WORK_DIR/erasure-minio-sets.log"
if [ "$rv" -ne 0 ]; then
cat "$WORK_DIR/erasure-minio-sets.log"
fi
rm -f "$WORK_DIR/erasure-minio-sets.log"
return "$rv"
return "$rv"
}
function run_test_pool_erasure_sets()
{
start_minio_pool_erasure_sets
function run_test_pool_erasure_sets() {
start_minio_pool_erasure_sets
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
pkill minio
sleep 3
pkill minio
sleep 3
if [ "$rv" -ne 0 ]; then
for i in $(seq 0 1); do
echo "server$i log:"
cat "$WORK_DIR/pool-minio-900$i.log"
done
fi
if [ "$rv" -ne 0 ]; then
for i in $(seq 0 1); do
echo "server$i log:"
cat "$WORK_DIR/pool-minio-900$i.log"
done
fi
for i in $(seq 0 1); do
rm -f "$WORK_DIR/pool-minio-900$i.log"
done
for i in $(seq 0 1); do
rm -f "$WORK_DIR/pool-minio-900$i.log"
done
return "$rv"
return "$rv"
}
function run_test_pool_erasure_sets_ipv6()
{
start_minio_pool_erasure_sets_ipv6
function run_test_pool_erasure_sets_ipv6() {
start_minio_pool_erasure_sets_ipv6
export SERVER_ENDPOINT="[::1]:9000"
export SERVER_ENDPOINT="[::1]:9000"
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
pkill minio
sleep 3
pkill minio
sleep 3
if [ "$rv" -ne 0 ]; then
for i in $(seq 0 1); do
echo "server$i log:"
cat "$WORK_DIR/pool-minio-ipv6-900$i.log"
done
fi
if [ "$rv" -ne 0 ]; then
for i in $(seq 0 1); do
echo "server$i log:"
cat "$WORK_DIR/pool-minio-ipv6-900$i.log"
done
fi
for i in $(seq 0 1); do
rm -f "$WORK_DIR/pool-minio-ipv6-900$i.log"
done
for i in $(seq 0 1); do
rm -f "$WORK_DIR/pool-minio-ipv6-900$i.log"
done
return "$rv"
return "$rv"
}
function run_test_erasure()
{
start_minio_erasure
function run_test_erasure() {
start_minio_erasure
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
pkill minio
sleep 3
pkill minio
sleep 3
if [ "$rv" -ne 0 ]; then
cat "$WORK_DIR/erasure-minio.log"
fi
rm -f "$WORK_DIR/erasure-minio.log"
if [ "$rv" -ne 0 ]; then
cat "$WORK_DIR/erasure-minio.log"
fi
rm -f "$WORK_DIR/erasure-minio.log"
return "$rv"
return "$rv"
}
function run_test_dist_erasure()
{
start_minio_dist_erasure
function run_test_dist_erasure() {
start_minio_dist_erasure
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
pkill minio
sleep 3
pkill minio
sleep 3
if [ "$rv" -ne 0 ]; then
echo "server1 log:"
cat "$WORK_DIR/dist-minio-9000.log"
echo "server2 log:"
cat "$WORK_DIR/dist-minio-9001.log"
echo "server3 log:"
cat "$WORK_DIR/dist-minio-9002.log"
echo "server4 log:"
cat "$WORK_DIR/dist-minio-9003.log"
fi
if [ "$rv" -ne 0 ]; then
echo "server1 log:"
cat "$WORK_DIR/dist-minio-9000.log"
echo "server2 log:"
cat "$WORK_DIR/dist-minio-9001.log"
echo "server3 log:"
cat "$WORK_DIR/dist-minio-9002.log"
echo "server4 log:"
cat "$WORK_DIR/dist-minio-9003.log"
fi
rm -f "$WORK_DIR/dist-minio-9000.log" "$WORK_DIR/dist-minio-9001.log" "$WORK_DIR/dist-minio-9002.log" "$WORK_DIR/dist-minio-9003.log"
rm -f "$WORK_DIR/dist-minio-9000.log" "$WORK_DIR/dist-minio-9001.log" "$WORK_DIR/dist-minio-9002.log" "$WORK_DIR/dist-minio-9003.log"
return "$rv"
return "$rv"
}
function purge()
{
rm -rf "$1"
function purge() {
rm -rf "$1"
}
function __init__()
{
echo "Initializing environment"
mkdir -p "$WORK_DIR"
mkdir -p "$MINIO_CONFIG_DIR"
mkdir -p "$MINT_DATA_DIR"
function __init__() {
echo "Initializing environment"
mkdir -p "$WORK_DIR"
mkdir -p "$MINIO_CONFIG_DIR"
mkdir -p "$MINT_DATA_DIR"
MC_BUILD_DIR="mc-$RANDOM"
if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then
echo "failed to download https://github.com/minio/mc"
purge "${MC_BUILD_DIR}"
exit 1
fi
MC_BUILD_DIR="mc-$RANDOM"
if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then
echo "failed to download https://github.com/minio/mc"
purge "${MC_BUILD_DIR}"
exit 1
fi
(cd "${MC_BUILD_DIR}" && go build -o "$WORK_DIR/mc")
(cd "${MC_BUILD_DIR}" && go build -o "$WORK_DIR/mc")
# remove mc source.
purge "${MC_BUILD_DIR}"
# remove mc source.
purge "${MC_BUILD_DIR}"
shred -n 1 -s 1M - 1>"$FILE_1_MB" 2>/dev/null
shred -n 1 -s 65M - 1>"$FILE_65_MB" 2>/dev/null
shred -n 1 -s 1M - 1>"$FILE_1_MB" 2>/dev/null
shred -n 1 -s 65M - 1>"$FILE_65_MB" 2>/dev/null
## version is purposefully set to '3' for minio to migrate configuration file
echo '{"version": "3", "credential": {"accessKey": "minio", "secretKey": "minio123"}, "region": "us-east-1"}' > "$MINIO_CONFIG_DIR/config.json"
## version is purposefully set to '3' for minio to migrate configuration file
echo '{"version": "3", "credential": {"accessKey": "minio", "secretKey": "minio123"}, "region": "us-east-1"}' >"$MINIO_CONFIG_DIR/config.json"
if ! wget -q -O "$FUNCTIONAL_TESTS" https://raw.githubusercontent.com/minio/mc/master/functional-tests.sh; then
echo "failed to download https://raw.githubusercontent.com/minio/mc/master/functional-tests.sh"
exit 1
fi
if ! wget -q -O "$FUNCTIONAL_TESTS" https://raw.githubusercontent.com/minio/mc/master/functional-tests.sh; then
echo "failed to download https://raw.githubusercontent.com/minio/mc/master/functional-tests.sh"
exit 1
fi
sed -i 's|-sS|-sSg|g' "$FUNCTIONAL_TESTS"
chmod a+x "$FUNCTIONAL_TESTS"
sed -i 's|-sS|-sSg|g' "$FUNCTIONAL_TESTS"
chmod a+x "$FUNCTIONAL_TESTS"
}
function main()
{
echo "Testing in FS setup"
if ! run_test_fs; then
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
function main() {
echo "Testing in FS setup"
if ! run_test_fs; then
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
echo "Testing in Erasure setup"
if ! run_test_erasure; then
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
echo "Testing in Erasure setup"
if ! run_test_erasure; then
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
echo "Testing in Distributed Erasure setup"
if ! run_test_dist_erasure; then
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
echo "Testing in Distributed Erasure setup"
if ! run_test_dist_erasure; then
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
echo "Testing in Erasure setup as sets"
if ! run_test_erasure_sets; then
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
echo "Testing in Erasure setup as sets"
if ! run_test_erasure_sets; then
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
echo "Testing in Distributed Eraure expanded setup"
if ! run_test_pool_erasure_sets; then
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
echo "Testing in Distributed Eraure expanded setup"
if ! run_test_pool_erasure_sets; then
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
echo "Testing in Distributed Erasure expanded setup with ipv6"
if ! run_test_pool_erasure_sets_ipv6; then
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
echo "Testing in Distributed Erasure expanded setup with ipv6"
if ! run_test_pool_erasure_sets_ipv6; then
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
purge "$WORK_DIR"
purge "$WORK_DIR"
}
( __init__ "$@" && main "$@" )
(__init__ "$@" && main "$@")
rv=$?
purge "$WORK_DIR"
exit "$rv"

View File

@@ -4,94 +4,93 @@ set -E
set -o pipefail
set -x
if [ ! -x "$PWD/minio" ]; then
echo "minio executable binary not found in current directory"
exit 1
echo "minio executable binary not found in current directory"
exit 1
fi
WORK_DIR="$(mktemp -d)"
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server )
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
function start_minio() {
start_port=$1
start_port=$1
export MINIO_ROOT_USER=minio
export MINIO_ROOT_PASSWORD=minio123
unset MINIO_KMS_AUTO_ENCRYPTION # do not auto-encrypt objects
unset MINIO_CI_CD
unset CI
export MINIO_ROOT_USER=minio
export MINIO_ROOT_PASSWORD=minio123
unset MINIO_KMS_AUTO_ENCRYPTION # do not auto-encrypt objects
unset MINIO_CI_CD
unset CI
args=()
for i in $(seq 1 4); do
args+=("http://localhost:$[${start_port}+$i]${WORK_DIR}/mnt/disk$i/ ")
done
args=()
for i in $(seq 1 4); do
args+=("http://localhost:$((start_port + i))${WORK_DIR}/mnt/disk$i/ ")
done
for i in $(seq 1 4); do
"${MINIO[@]}" --address ":$[$start_port+$i]" ${args[@]} 2>&1 >"${WORK_DIR}/server$i.log" &
done
for i in $(seq 1 4); do
"${MINIO[@]}" --address ":$((start_port + i))" ${args[@]} 2>&1 >"${WORK_DIR}/server$i.log" &
done
# Wait until all nodes return 403
for i in $(seq 1 4); do
while [ "$(curl -m 1 -s -o /dev/null -w "%{http_code}" http://localhost:$[$start_port+$i])" -ne "403" ]; do
echo -n ".";
sleep 1;
done
done
# Wait until all nodes return 403
for i in $(seq 1 4); do
while [ "$(curl -m 1 -s -o /dev/null -w "%{http_code}" http://localhost:$((start_port + i)))" -ne "403" ]; do
echo -n "."
sleep 1
done
done
}
}
# Prepare fake disks with losetup
function prepare_block_devices() {
mkdir -p ${WORK_DIR}/disks/ ${WORK_DIR}/mnt/
for i in 1 2 3 4; do
dd if=/dev/zero of=${WORK_DIR}/disks/img.$i bs=1M count=2048
mkfs.ext4 -F ${WORK_DIR}/disks/img.$i
sudo mknod /dev/minio-loopdisk$i b 7 $[256-$i]
sudo losetup /dev/minio-loopdisk$i ${WORK_DIR}/disks/img.$i
mkdir -p ${WORK_DIR}/mnt/disk$i/
sudo mount /dev/minio-loopdisk$i ${WORK_DIR}/mnt/disk$i/
sudo chown "$(id -u):$(id -g)" /dev/minio-loopdisk$i ${WORK_DIR}/mnt/disk$i/
done
set -e
mkdir -p ${WORK_DIR}/disks/ ${WORK_DIR}/mnt/
sudo modprobe loop
for i in 1 2 3 4; do
dd if=/dev/zero of=${WORK_DIR}/disks/img.${i} bs=1M count=2000
device=$(sudo losetup --find --show ${WORK_DIR}/disks/img.${i})
sudo mkfs.ext4 -F ${device}
mkdir -p ${WORK_DIR}/mnt/disk${i}/
sudo mount ${device} ${WORK_DIR}/mnt/disk${i}/
sudo chown "$(id -u):$(id -g)" ${device} ${WORK_DIR}/mnt/disk${i}/
done
set +e
}
# Start a distributed MinIO setup, unmount one disk and check if it is formatted
function main() {
start_port=$(shuf -i 10000-65000 -n 1)
start_minio ${start_port}
start_port=$(shuf -i 10000-65000 -n 1)
start_minio ${start_port}
# Unmount the disk, after the unmount the device id
# /tmp/xxx/mnt/disk4 will be the same as '/' and it
# will be detected as root disk
while [ "$u" != "0" ]; do
sudo umount ${WORK_DIR}/mnt/disk4/
u=$?
sleep 1
done
# Unmount the disk, after the unmount the device id
# /tmp/xxx/mnt/disk4 will be the same as '/' and it
# will be detected as root disk
while [ "$u" != "0" ]; do
sudo umount ${WORK_DIR}/mnt/disk4/
u=$?
sleep 1
done
# Wait until MinIO self heal kicks in
sleep 60
# Wait until MinIO self heal kicks in
sleep 60
if [ -f ${WORK_DIR}/mnt/disk4/.minio.sys/format.json ]; then
echo "A root disk is formatted unexpectedely"
cat "${WORK_DIR}/server4.log"
exit -1
fi
if [ -f ${WORK_DIR}/mnt/disk4/.minio.sys/format.json ]; then
echo "A root disk is formatted unexpectedely"
cat "${WORK_DIR}/server4.log"
exit -1
fi
}
function cleanup() {
pkill minio
sudo umount ${WORK_DIR}/mnt/disk{1..3}/
sudo rm /dev/minio-loopdisk*
rm -rf "$WORK_DIR"
pkill minio
sudo umount ${WORK_DIR}/mnt/disk{1..3}/
sudo rm /dev/minio-loopdisk*
rm -rf "$WORK_DIR"
}
( prepare_block_devices )
( main "$@" )
(prepare_block_devices)
(main "$@")
rv=$?
cleanup
exit "$rv"

View File

@@ -5,139 +5,135 @@ set -E
set -o pipefail
if [ ! -x "$PWD/minio" ]; then
echo "minio executable binary not found in current directory"
exit 1
echo "minio executable binary not found in current directory"
exit 1
fi
WORK_DIR="$PWD/.verify-$RANDOM"
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server )
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
function start_minio_3_node() {
export MINIO_ROOT_USER=minio
export MINIO_ROOT_PASSWORD=minio123
export MINIO_ERASURE_SET_DRIVE_COUNT=6
export MINIO_CI_CD=1
export MINIO_ROOT_USER=minio
export MINIO_ROOT_PASSWORD=minio123
export MINIO_ERASURE_SET_DRIVE_COUNT=6
export MINIO_CI_CD=1
start_port=$2
args=""
for i in $(seq 1 3); do
args="$args http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/1/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/2/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/3/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/4/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/5/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/6/"
done
"${MINIO[@]}" --address ":$[$start_port+1]" $args > "${WORK_DIR}/dist-minio-server1.log" 2>&1 &
pid1=$!
disown ${pid1}
"${MINIO[@]}" --address ":$[$start_port+2]" $args > "${WORK_DIR}/dist-minio-server2.log" 2>&1 &
pid2=$!
disown $pid2
"${MINIO[@]}" --address ":$[$start_port+3]" $args > "${WORK_DIR}/dist-minio-server3.log" 2>&1 &
pid3=$!
disown $pid3
sleep "$1"
if ! ps -p $pid1 1>&2 > /dev/null; then
echo "server1 log:"
cat "${WORK_DIR}/dist-minio-server1.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
if ! ps -p $pid2 1>&2 > /dev/null; then
echo "server2 log:"
cat "${WORK_DIR}/dist-minio-server2.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
if ! ps -p $pid3 1>&2 > /dev/null; then
echo "server3 log:"
cat "${WORK_DIR}/dist-minio-server3.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
if ! pkill minio; then
start_port=$2
args=""
for i in $(seq 1 3); do
echo "server$i log:"
cat "${WORK_DIR}/dist-minio-server$i.log"
args="$args http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/1/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/2/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/3/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/4/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/5/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/6/"
done
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
sleep 1;
if pgrep minio; then
# forcibly killing, to proceed further properly.
if ! pkill -9 minio; then
echo "no minio process running anymore, proceed."
"${MINIO[@]}" --address ":$((start_port + 1))" $args >"${WORK_DIR}/dist-minio-server1.log" 2>&1 &
pid1=$!
disown ${pid1}
"${MINIO[@]}" --address ":$((start_port + 2))" $args >"${WORK_DIR}/dist-minio-server2.log" 2>&1 &
pid2=$!
disown $pid2
"${MINIO[@]}" --address ":$((start_port + 3))" $args >"${WORK_DIR}/dist-minio-server3.log" 2>&1 &
pid3=$!
disown $pid3
sleep "$1"
if ! ps -p $pid1 1>&2 >/dev/null; then
echo "server1 log:"
cat "${WORK_DIR}/dist-minio-server1.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
fi
}
if ! ps -p $pid2 1>&2 >/dev/null; then
echo "server2 log:"
cat "${WORK_DIR}/dist-minio-server2.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
if ! ps -p $pid3 1>&2 >/dev/null; then
echo "server3 log:"
cat "${WORK_DIR}/dist-minio-server3.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
if ! pkill minio; then
for i in $(seq 1 3); do
echo "server$i log:"
cat "${WORK_DIR}/dist-minio-server$i.log"
done
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
sleep 1
if pgrep minio; then
# forcibly killing, to proceed further properly.
if ! pkill -9 minio; then
echo "no minio process running anymore, proceed."
fi
fi
}
function check_online() {
if grep -q 'Unable to initialize sub-systems' ${WORK_DIR}/dist-minio-*.log; then
echo "1"
fi
if grep -q 'Unable to initialize sub-systems' ${WORK_DIR}/dist-minio-*.log; then
echo "1"
fi
}
function purge()
{
rm -rf "$1"
function purge() {
rm -rf "$1"
}
function __init__()
{
echo "Initializing environment"
mkdir -p "$WORK_DIR"
mkdir -p "$MINIO_CONFIG_DIR"
function __init__() {
echo "Initializing environment"
mkdir -p "$WORK_DIR"
mkdir -p "$MINIO_CONFIG_DIR"
## version is purposefully set to '3' for minio to migrate configuration file
echo '{"version": "3", "credential": {"accessKey": "minio", "secretKey": "minio123"}, "region": "us-east-1"}' > "$MINIO_CONFIG_DIR/config.json"
## version is purposefully set to '3' for minio to migrate configuration file
echo '{"version": "3", "credential": {"accessKey": "minio", "secretKey": "minio123"}, "region": "us-east-1"}' >"$MINIO_CONFIG_DIR/config.json"
}
function perform_test() {
start_minio_3_node 120 $2
start_minio_3_node 120 $2
echo "Testing Distributed Erasure setup healing of drives"
echo "Remove the contents of the disks belonging to '${1}' erasure set"
echo "Testing Distributed Erasure setup healing of drives"
echo "Remove the contents of the disks belonging to '${1}' erasure set"
rm -rf ${WORK_DIR}/${1}/*/
rm -rf ${WORK_DIR}/${1}/*/
start_minio_3_node 120 $2
start_minio_3_node 120 $2
rv=$(check_online)
if [ "$rv" == "1" ]; then
for i in $(seq 1 3); do
echo "server$i log:"
cat "${WORK_DIR}/dist-minio-server$i.log"
done
pkill -9 minio
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
rv=$(check_online)
if [ "$rv" == "1" ]; then
for i in $(seq 1 3); do
echo "server$i log:"
cat "${WORK_DIR}/dist-minio-server$i.log"
done
pkill -9 minio
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
}
function main()
{
# use same ports for all tests
start_port=$(shuf -i 10000-65000 -n 1)
function main() {
# use same ports for all tests
start_port=$(shuf -i 10000-65000 -n 1)
perform_test "2" ${start_port}
perform_test "1" ${start_port}
perform_test "3" ${start_port}
perform_test "2" ${start_port}
perform_test "1" ${start_port}
perform_test "3" ${start_port}
}
( __init__ "$@" && main "$@" )
(__init__ "$@" && main "$@")
rv=$?
purge "$WORK_DIR"
exit "$rv"

View File

@@ -22,10 +22,10 @@ import (
"io"
"net/http"
"github.com/gorilla/mux"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/bucket/policy"
"github.com/minio/mux"
"github.com/minio/pkg/v2/policy"
)
// Data types used for returning dummy access control
@@ -90,8 +90,8 @@ func (api objectAPIHandlers) PutBucketACLHandler(w http.ResponseWriter, r *http.
if aclHeader == "" {
acl := &accessControlPolicy{}
if err = xmlDecoder(r.Body, acl, r.ContentLength); err != nil {
if err == io.EOF {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingSecurityHeader),
if terr, ok := err.(*xml.SyntaxError); ok && terr.Msg == io.EOF.Error() {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedXML),
r.URL)
return
}

View File

@@ -25,16 +25,14 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"strings"
"time"
"github.com/gorilla/mux"
jsoniter "github.com/json-iterator/go"
"github.com/klauspost/compress/zip"
"github.com/minio/kes"
"github.com/minio/madmin-go"
"github.com/minio/kes-go"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio-go/v7/pkg/tags"
"github.com/minio/minio/internal/bucket/lifecycle"
objectlock "github.com/minio/minio/internal/bucket/object/lock"
@@ -42,8 +40,8 @@ import (
"github.com/minio/minio/internal/event"
"github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/bucket/policy"
iampolicy "github.com/minio/pkg/iam/policy"
"github.com/minio/mux"
"github.com/minio/pkg/v2/policy"
)
const (
@@ -57,11 +55,9 @@ const (
// specified in the quota configuration will be applied by default
// to enforce total quota for the specified bucket.
func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutBucketQuotaConfig")
ctx := r.Context()
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SetBucketQuotaAdminAction)
objectAPI, _ := validateAdminReq(ctx, w, r, policy.SetBucketQuotaAdminAction)
if objectAPI == nil {
return
}
@@ -74,7 +70,7 @@ func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *
return
}
data, err := ioutil.ReadAll(r.Body)
data, err := io.ReadAll(r.Body)
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
@@ -86,11 +82,6 @@ func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *
return
}
if quotaConfig.Type == "fifo" {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
updatedAt, err := globalBucketMetadataSys.Update(ctx, bucket, bucketQuotaConfigFile, data)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
@@ -103,15 +94,12 @@ func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *
Quota: data,
UpdatedAt: updatedAt,
}
if quotaConfig.Quota == 0 {
if quotaConfig.Size == 0 || quotaConfig.Quota == 0 {
bucketMeta.Quota = nil
}
// Call site replication hook.
if err = globalSiteReplicationSys.BucketMetaHook(ctx, bucketMeta); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, bucketMeta))
// Write success response.
writeSuccessResponseHeadersOnly(w)
@@ -119,11 +107,9 @@ func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *
// GetBucketQuotaConfigHandler - gets bucket quota configuration
func (a adminAPIHandlers) GetBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketQuotaConfig")
ctx := r.Context()
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.GetBucketQuotaAdminAction)
objectAPI, _ := validateAdminReq(ctx, w, r, policy.GetBucketQuotaAdminAction)
if objectAPI == nil {
return
}
@@ -154,20 +140,14 @@ func (a adminAPIHandlers) GetBucketQuotaConfigHandler(w http.ResponseWriter, r *
// SetRemoteTargetHandler - sets a remote target for bucket
func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetBucketTarget")
ctx := r.Context()
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := pathClean(vars["bucket"])
update := r.Form.Get("update") == "true"
if globalIsGateway {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
// Get current object layer instance.
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SetBucketTargetAction)
objectAPI, _ := validateAdminReq(ctx, w, r, policy.SetBucketTargetAction)
if objectAPI == nil {
return
}
@@ -178,7 +158,7 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
return
}
cred, _, _, s3Err := validateAdminSignature(ctx, r, "")
cred, _, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
@@ -207,12 +187,28 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
if update {
ops = madmin.GetTargetUpdateOps(r.Form)
} else {
target.Arn = globalBucketTargetSys.getRemoteARN(bucket, &target)
var exists bool // true if arn exists
target.Arn, exists = globalBucketTargetSys.getRemoteARN(bucket, &target, "")
if exists && target.Arn != "" { // return pre-existing ARN
data, err := json.Marshal(target.Arn)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Write success response.
writeSuccessResponseJSON(w, data)
return
}
}
if target.Arn == "" {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
return
}
if globalSiteReplicationSys.isEnabled() && !update {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrRemoteTargetDenyAddError, err), r.URL)
return
}
if update {
// overlay the updates on existing target
tgt := globalBucketTargetSys.GetRemoteBucketTargetByArn(ctx, bucket, target.Arn)
@@ -223,10 +219,14 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
for _, op := range ops {
switch op {
case madmin.CredentialsUpdateType:
tgt.Credentials = target.Credentials
tgt.TargetBucket = target.TargetBucket
tgt.Secure = target.Secure
tgt.Endpoint = target.Endpoint
if !globalSiteReplicationSys.isEnabled() {
// credentials update is possible only in bucket replication. User will never
// know the site replicator creds.
tgt.Credentials = target.Credentials
tgt.TargetBucket = target.TargetBucket
tgt.Secure = target.Secure
tgt.Endpoint = target.Endpoint
}
case madmin.SyncUpdateType:
tgt.ReplicationSync = target.ReplicationSync
case madmin.ProxyUpdateType:
@@ -283,19 +283,14 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
// ListRemoteTargetsHandler - lists remote target(s) for a bucket or gets a target
// for a particular ARN type
func (a adminAPIHandlers) ListRemoteTargetsHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListBucketTargets")
ctx := r.Context()
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := pathClean(vars["bucket"])
arnType := vars["type"]
if globalIsGateway {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
// Get current object layer instance.
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.GetBucketTargetAction)
objectAPI, _ := validateAdminReq(ctx, w, r, policy.GetBucketTargetAction)
if objectAPI == nil {
return
}
@@ -322,19 +317,14 @@ func (a adminAPIHandlers) ListRemoteTargetsHandler(w http.ResponseWriter, r *htt
// RemoveRemoteTargetHandler - removes a remote target for bucket with specified ARN
func (a adminAPIHandlers) RemoveRemoteTargetHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "RemoveBucketTarget")
ctx := r.Context()
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := pathClean(vars["bucket"])
arn := vars["arn"]
if globalIsGateway {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
// Get current object layer instance.
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SetBucketTargetAction)
objectAPI, _ := validateAdminReq(ctx, w, r, policy.SetBucketTargetAction)
if objectAPI == nil {
return
}
@@ -370,16 +360,11 @@ func (a adminAPIHandlers) RemoveRemoteTargetHandler(w http.ResponseWriter, r *ht
// ExportBucketMetadataHandler - exports all bucket metadata as a zipped file
func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ExportBucketMetadata")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
ctx := r.Context()
bucket := pathClean(r.Form.Get("bucket"))
if globalIsGateway {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
// Get current object layer instance.
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ExportBucketMetadataAction)
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ExportBucketMetadataAction)
if objectAPI == nil {
return
}
@@ -407,7 +392,8 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
// of bucket metadata
zipWriter := zip.NewWriter(w)
defer zipWriter.Close()
rawDataFn := func(r io.Reader, filename string, sz int) error {
rawDataFn := func(r io.Reader, filename string, sz int) {
header, zerr := zip.FileInfoHeader(dummyFileInfo{
name: filename,
size: int64(sz),
@@ -416,20 +402,13 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
isDir: false,
sys: nil,
})
if zerr != nil {
logger.LogIf(ctx, zerr)
return nil
if zerr == nil {
header.Method = zip.Deflate
zwriter, zerr := zipWriter.CreateHeader(header)
if zerr == nil {
io.Copy(zwriter, r)
}
}
header.Method = zip.Deflate
zwriter, zerr := zipWriter.CreateHeader(header)
if zerr != nil {
logger.LogIf(ctx, zerr)
return nil
}
if _, err := io.Copy(zwriter, r); err != nil {
logger.LogIf(ctx, err)
}
return nil
}
cfgFiles := []string{
@@ -461,12 +440,9 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
case bucketLifecycleConfig:
config, err := globalBucketMetadataSys.GetLifecycleConfig(bucket)
config, _, err := globalBucketMetadataSys.GetLifecycleConfig(bucket)
if err != nil {
if errors.Is(err, BucketLifecycleNotFound{Bucket: bucket}) {
continue
@@ -480,10 +456,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
case bucketQuotaConfigFile:
config, _, err := globalBucketMetadataSys.GetQuotaConfig(ctx, bucket)
if err != nil {
@@ -498,10 +471,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
case bucketSSEConfig:
config, _, err := globalBucketMetadataSys.GetSSEConfig(bucket)
if err != nil {
@@ -516,10 +486,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
case bucketTaggingConfig:
config, _, err := globalBucketMetadataSys.GetTaggingConfig(bucket)
if err != nil {
@@ -534,10 +501,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
case objectLockConfig:
config, _, err := globalBucketMetadataSys.GetObjectLockConfig(bucket)
if err != nil {
@@ -553,10 +517,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
case bucketVersioningConfig:
config, _, err := globalBucketMetadataSys.GetVersioningConfig(bucket)
if err != nil {
@@ -572,10 +533,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
case bucketReplicationConfig:
config, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket)
if err != nil {
@@ -590,11 +548,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
case bucketTargetsFile:
config, err := globalBucketMetadataSys.GetBucketTargetsConfig(bucket)
if err != nil {
@@ -610,10 +564,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
}
}
}
@@ -658,20 +609,14 @@ func (i *importMetaReport) SetStatus(bucket, fname string, err error) {
// 2. Replication config - is omitted from import as remote target credentials are not available from exported data for security reasons.
// 3. lifecycle config - if transition rules are present, tier name needs to have been defined.
func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ImportBucketMetadata")
ctx := r.Context()
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
if globalIsGateway {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
// Get current object layer instance.
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ImportBucketMetadataAction)
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ImportBucketMetadataAction)
if objectAPI == nil {
return
}
data, err := ioutil.ReadAll(r.Body)
data, err := io.ReadAll(r.Body)
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
@@ -682,12 +627,31 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
bucketMap := make(map[string]struct{}, 1)
rpt := importMetaReport{
madmin.BucketMetaImportErrs{
Buckets: make(map[string]madmin.BucketStatus, len(zr.File)),
},
}
bucketMap := make(map[string]*BucketMetadata, len(zr.File))
updatedAt := UTCNow()
for _, file := range zr.File {
slc := strings.Split(file.Name, slashSeparator)
if len(slc) != 2 { // expecting bucket/configfile in the zipfile
rpt.SetStatus(file.Name, "", fmt.Errorf("malformed zip - expecting format bucket/<config.json>"))
continue
}
bucket := slc[0]
meta, err := readBucketMetadata(ctx, objectAPI, bucket)
if err == nil {
bucketMap[bucket] = &meta
} else if err != errConfigNotFound {
rpt.SetStatus(bucket, "", err)
}
}
// import object lock config if any - order of import matters here.
for _, file := range zr.File {
slc := strings.Split(file.Name, slashSeparator)
@@ -696,8 +660,7 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
continue
}
bucket, fileName := slc[0], slc[1]
switch fileName {
case objectLockConfig:
if fileName == objectLockConfig {
reader, err := file.Open()
if err != nil {
rpt.SetStatus(bucket, fileName, err)
@@ -716,16 +679,17 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
}
if _, ok := bucketMap[bucket]; !ok {
opts := MakeBucketOptions{
LockEnabled: config.ObjectLockEnabled == "Enabled",
LockEnabled: config.Enabled(),
}
err = objectAPI.MakeBucketWithLocation(ctx, bucket, opts)
err = objectAPI.MakeBucket(ctx, bucket, opts)
if err != nil {
if _, ok := err.(BucketExists); !ok {
rpt.SetStatus(bucket, fileName, err)
continue
}
}
bucketMap[bucket] = struct{}{}
v := newBucketMetadata(bucket)
bucketMap[bucket] = &v
}
// Deny object locking configuration settings on existing buckets without object lock enabled.
@@ -734,27 +698,9 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
continue
}
updatedAt, err := globalBucketMetadataSys.Update(ctx, bucket, objectLockConfig, configData)
if err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
bucketMap[bucket].ObjectLockConfigXML = configData
bucketMap[bucket].ObjectLockConfigUpdatedAt = updatedAt
rpt.SetStatus(bucket, fileName, nil)
// Call site replication hook.
//
// We encode the xml bytes as base64 to ensure there are no encoding
// errors.
cfgStr := base64.StdEncoding.EncodeToString(configData)
if err = globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypeObjectLockConfig,
Bucket: bucket,
ObjectLockConfig: &cfgStr,
UpdatedAt: updatedAt,
}); err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
}
}
@@ -766,8 +712,7 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
continue
}
bucket, fileName := slc[0], slc[1]
switch fileName {
case bucketVersioningConfig:
if fileName == bucketVersioningConfig {
reader, err := file.Open()
if err != nil {
rpt.SetStatus(bucket, fileName, err)
@@ -779,13 +724,14 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
continue
}
if _, ok := bucketMap[bucket]; !ok {
if err = objectAPI.MakeBucketWithLocation(ctx, bucket, MakeBucketOptions{}); err != nil {
if err = objectAPI.MakeBucket(ctx, bucket, MakeBucketOptions{}); err != nil {
if _, ok := err.(BucketExists); !ok {
rpt.SetStatus(bucket, fileName, err)
continue
}
}
bucketMap[bucket] = struct{}{}
v := newBucketMetadata(bucket)
bucketMap[bucket] = &v
}
if globalSiteReplicationSys.isEnabled() && v.Suspended() {
@@ -808,10 +754,8 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
continue
}
if _, err = globalBucketMetadataSys.Update(ctx, bucket, bucketVersioningConfig, configData); err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
bucketMap[bucket].VersioningConfigXML = configData
bucketMap[bucket].VersioningConfigUpdatedAt = updatedAt
rpt.SetStatus(bucket, fileName, nil)
}
}
@@ -829,16 +773,18 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
continue
}
bucket, fileName := slc[0], slc[1]
// create bucket if it does not exist yet.
if _, ok := bucketMap[bucket]; !ok {
err = objectAPI.MakeBucketWithLocation(ctx, bucket, MakeBucketOptions{})
err = objectAPI.MakeBucket(ctx, bucket, MakeBucketOptions{})
if err != nil {
if _, ok := err.(BucketExists); !ok {
rpt.SetStatus(bucket, "", err)
continue
}
}
bucketMap[bucket] = struct{}{}
v := newBucketMetadata(bucket)
bucketMap[bucket] = &v
}
if _, ok := bucketMap[bucket]; !ok {
continue
@@ -857,12 +803,7 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
continue
}
if _, err = globalBucketMetadataSys.Update(ctx, bucket, bucketNotificationConfig, configData); err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
rulesMap := config.ToRulesMap()
globalEventNotifier.AddRulesMap(bucket, rulesMap)
bucketMap[bucket].NotificationConfigXML = configData
rpt.SetStatus(bucket, fileName, nil)
case bucketPolicyConfig:
// Error out if Content-Length is beyond allowed size.
@@ -871,13 +812,13 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
continue
}
bucketPolicyBytes, err := ioutil.ReadAll(io.LimitReader(reader, sz))
bucketPolicyBytes, err := io.ReadAll(io.LimitReader(reader, sz))
if err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
bucketPolicy, err := policy.ParseConfig(bytes.NewReader(bucketPolicyBytes), bucket)
bucketPolicy, err := policy.ParseBucketPolicyConfig(bytes.NewReader(bucketPolicyBytes), bucket)
if err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
@@ -885,7 +826,7 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
// Version in policy must not be empty
if bucketPolicy.Version == "" {
rpt.SetStatus(bucket, fileName, fmt.Errorf(ErrMalformedPolicy.String()))
rpt.SetStatus(bucket, fileName, fmt.Errorf(ErrPolicyInvalidVersion.String()))
continue
}
@@ -895,22 +836,9 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
continue
}
updatedAt, err := globalBucketMetadataSys.Update(ctx, bucket, bucketPolicyConfig, configData)
if err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
bucketMap[bucket].PolicyConfigJSON = configData
bucketMap[bucket].PolicyConfigUpdatedAt = updatedAt
rpt.SetStatus(bucket, fileName, nil)
// Call site replication hook.
if err = globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypePolicy,
Bucket: bucket,
Policy: bucketPolicyBytes,
UpdatedAt: updatedAt,
}); err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
case bucketLifecycleConfig:
bucketLifecycle, err := lifecycle.ParseLifecycleConfig(io.LimitReader(reader, sz))
if err != nil {
@@ -936,10 +864,8 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
continue
}
if _, err = globalBucketMetadataSys.Update(ctx, bucket, bucketLifecycleConfig, configData); err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
bucketMap[bucket].LifecycleConfigXML = configData
bucketMap[bucket].LifecycleConfigUpdatedAt = updatedAt
rpt.SetStatus(bucket, fileName, nil)
case bucketSSEConfig:
// Parse bucket encryption xml
@@ -974,29 +900,9 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
continue
}
// Store the bucket encryption configuration in the object layer
updatedAt, err := globalBucketMetadataSys.Update(ctx, bucket, bucketSSEConfig, configData)
if err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
bucketMap[bucket].EncryptionConfigXML = configData
bucketMap[bucket].EncryptionConfigUpdatedAt = updatedAt
rpt.SetStatus(bucket, fileName, nil)
// Call site replication hook.
//
// We encode the xml bytes as base64 to ensure there are no encoding
// errors.
cfgStr := base64.StdEncoding.EncodeToString(configData)
if err = globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypeSSEConfig,
Bucket: bucket,
SSEConfig: &cfgStr,
UpdatedAt: updatedAt,
}); err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
case bucketTaggingConfig:
tags, err := tags.ParseBucketXML(io.LimitReader(reader, sz))
if err != nil {
@@ -1010,70 +916,59 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
continue
}
updatedAt, err := globalBucketMetadataSys.Update(ctx, bucket, bucketTaggingConfig, configData)
if err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
bucketMap[bucket].TaggingConfigXML = configData
bucketMap[bucket].TaggingConfigUpdatedAt = updatedAt
rpt.SetStatus(bucket, fileName, nil)
// Call site replication hook.
//
// We encode the xml bytes as base64 to ensure there are no encoding
// errors.
cfgStr := base64.StdEncoding.EncodeToString(configData)
if err = globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypeTags,
Bucket: bucket,
Tags: &cfgStr,
UpdatedAt: updatedAt,
}); err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
case bucketQuotaConfigFile:
data, err := ioutil.ReadAll(reader)
data, err := io.ReadAll(reader)
if err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
quotaConfig, err := parseBucketQuota(bucket, data)
_, err = parseBucketQuota(bucket, data)
if err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
if quotaConfig.Type == "fifo" {
rpt.SetStatus(bucket, fileName, fmt.Errorf("Detected older 'fifo' quota config, 'fifo' feature is removed and not supported anymore"))
continue
}
updatedAt, err := globalBucketMetadataSys.Update(ctx, bucket, bucketQuotaConfigFile, data)
if err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
bucketMap[bucket].QuotaConfigJSON = data
bucketMap[bucket].QuotaConfigUpdatedAt = updatedAt
rpt.SetStatus(bucket, fileName, nil)
bucketMeta := madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypeQuotaConfig,
Bucket: bucket,
Quota: data,
UpdatedAt: updatedAt,
}
if quotaConfig.Quota == 0 {
bucketMeta.Quota = nil
}
// Call site replication hook.
if err = globalSiteReplicationSys.BucketMetaHook(ctx, bucketMeta); err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
}
}
enc := func(b []byte) *string {
if b == nil {
return nil
}
v := base64.StdEncoding.EncodeToString(b)
return &v
}
for bucket, meta := range bucketMap {
err := globalBucketMetadataSys.save(ctx, *meta)
if err != nil {
rpt.SetStatus(bucket, "", err)
continue
}
// Call site replication hook.
if err = globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
Bucket: bucket,
Quota: meta.QuotaConfigJSON,
Policy: meta.PolicyConfigJSON,
Versioning: enc(meta.VersioningConfigXML),
Tags: enc(meta.TaggingConfigXML),
ObjectLockConfig: enc(meta.ObjectLockConfigXML),
SSEConfig: enc(meta.EncryptionConfigXML),
UpdatedAt: updatedAt,
}); err != nil {
rpt.SetStatus(bucket, "", err)
continue
}
}
rptData, err := json.Marshal(rpt.BucketMetaImportErrs)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
@@ -1084,28 +979,15 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
}
// ReplicationDiffHandler - POST returns info on unreplicated versions for a remote target ARN
// to the connected HTTP client. This is a MinIO only extension
// to the connected HTTP client.
func (a adminAPIHandlers) ReplicationDiffHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ReplicationDiff")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
ctx := r.Context()
vars := mux.Vars(r)
bucket := vars["bucket"]
if globalIsGateway {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
// Get current object layer instance.
objectAPI := newObjectLayerFn()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ReplicationDiff)
if objectAPI == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
// check if user has permissions to perform this operation
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketVersionsAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
@@ -1158,3 +1040,62 @@ func (a adminAPIHandlers) ReplicationDiffHandler(w http.ResponseWriter, r *http.
}
}
}
// ReplicationMRFHandler - POST returns info on entries in the MRF backlog for a node or all nodes
func (a adminAPIHandlers) ReplicationMRFHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ReplicationDiff)
if objectAPI == nil {
return
}
// Check if bucket exists.
if bucket != "" {
if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
}
q := r.Form
node := q.Get("node")
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
defer keepAliveTicker.Stop()
mrfCh, err := globalNotificationSys.GetReplicationMRF(ctx, bucket, node)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
enc := json.NewEncoder(w)
for {
select {
case entry, ok := <-mrfCh:
if !ok {
return
}
if err := enc.Encode(entry); err != nil {
return
}
if len(mrfCh) == 0 {
// Flush if nothing is queued
w.(http.Flusher).Flush()
}
case <-keepAliveTicker.C:
if len(mrfCh) > 0 {
continue
}
if _, err := w.Write([]byte(" ")); err != nil {
return
}
w.(http.Flusher).Flush()
case <-ctx.Done():
return
}
}
}

View File

@@ -23,18 +23,18 @@ import (
"fmt"
"net/http"
"github.com/minio/kes"
"github.com/minio/madmin-go"
"github.com/minio/kes-go"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/auth"
"github.com/minio/minio/internal/config"
iampolicy "github.com/minio/pkg/iam/policy"
"github.com/minio/pkg/v2/policy"
)
// validateAdminReq will validate request against and return whether it is allowed.
// If any of the supplied actions are allowed it will be successful.
// If nil ObjectLayer is returned, the operation is not permitted.
// When nil ObjectLayer has been returned an error has always been sent to w.
func validateAdminReq(ctx context.Context, w http.ResponseWriter, r *http.Request, actions ...iampolicy.AdminAction) (ObjectLayer, auth.Credentials) {
func validateAdminReq(ctx context.Context, w http.ResponseWriter, r *http.Request, actions ...policy.AdminAction) (ObjectLayer, auth.Credentials) {
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
@@ -78,13 +78,19 @@ func toAdminAPIErr(ctx context.Context, err error) APIError {
var apiErr APIError
switch e := err.(type) {
case iampolicy.Error:
case policy.Error:
apiErr = APIError{
Code: "XMinioMalformedIAMPolicy",
Description: e.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case config.Error:
case config.ErrConfigNotFound:
apiErr = APIError{
Code: "XMinioConfigNotFoundError",
Description: e.Error(),
HTTPStatusCode: http.StatusNotFound,
}
case config.ErrConfigGeneric:
apiErr = APIError{
Code: "XMinioConfigError",
Description: e.Error(),
@@ -124,6 +130,18 @@ func toAdminAPIErr(ctx context.Context, err error) APIError {
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case errors.Is(err, errDecommissionRebalanceAlreadyRunning):
apiErr = APIError{
Code: "XMinioDecommissionNotAllowed",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case errors.Is(err, errRebalanceDecommissionAlreadyRunning):
apiErr = APIError{
Code: "XMinioRebalanceNotAllowed",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case errors.Is(err, errConfigNotFound):
apiErr = APIError{
Code: "XMinioConfigError",
@@ -136,15 +154,9 @@ func toAdminAPIErr(ctx context.Context, err error) APIError {
Description: err.Error(),
HTTPStatusCode: http.StatusForbidden,
}
case errors.Is(err, errIAMServiceAccount):
case errors.Is(err, errIAMServiceAccountNotAllowed):
apiErr = APIError{
Code: "XMinioIAMServiceAccount",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case errors.Is(err, errIAMServiceAccountUsed):
apiErr = APIError{
Code: "XMinioIAMServiceAccountUsed",
Code: "XMinioIAMServiceAccountNotAllowed",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
@@ -156,10 +168,16 @@ func toAdminAPIErr(ctx context.Context, err error) APIError {
}
case errors.Is(err, errPolicyInUse):
apiErr = APIError{
Code: "XMinioAdminPolicyInUse",
Code: "XMinioIAMPolicyInUse",
Description: "The policy cannot be removed, as it is in use",
HTTPStatusCode: http.StatusBadRequest,
}
case errors.Is(err, errSessionPolicyTooLarge):
apiErr = APIError{
Code: "XMinioIAMServiceAccountSessionPolicyTooLarge",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case errors.Is(err, kes.ErrKeyExists):
apiErr = APIError{
Code: "XMinioKMSKeyExists",
@@ -192,24 +210,6 @@ func toAdminAPIErr(ctx context.Context, err error) APIError {
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case errors.Is(err, errTierBackendInUse):
apiErr = APIError{
Code: "XMinioAdminTierBackendInUse",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case errors.Is(err, errTierBackendNotEmpty):
apiErr = APIError{
Code: "XMinioAdminTierBackendNotEmpty",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case errors.Is(err, errTierInsufficientCreds):
apiErr = APIError{
Code: "XMinioAdminTierInsufficientCreds",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case errIsTierPermError(err):
apiErr = APIError{
Code: "XMinioAdminTierInsufficientPermissions",
@@ -226,12 +226,10 @@ func toAdminAPIErr(ctx context.Context, err error) APIError {
// toAdminAPIErrCode - converts errErasureWriteQuorum error to admin API
// specific error.
func toAdminAPIErrCode(ctx context.Context, err error) APIErrorCode {
switch err {
case errErasureWriteQuorum:
if errors.Is(err, errErasureWriteQuorum) {
return ErrAdminConfigNoQuorum
default:
return toAPIErrorCode(ctx, err)
}
return toAPIErrorCode(ctx, err)
}
// wraps export error for more context

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2015-2021 MinIO, Inc.
// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
@@ -26,27 +26,25 @@ import (
"strconv"
"strings"
"github.com/gorilla/mux"
"github.com/minio/madmin-go"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/config"
"github.com/minio/minio/internal/config/cache"
"github.com/minio/minio/internal/config/etcd"
xldap "github.com/minio/minio/internal/config/identity/ldap"
"github.com/minio/minio/internal/config/identity/openid"
idplugin "github.com/minio/minio/internal/config/identity/plugin"
polplugin "github.com/minio/minio/internal/config/policy/plugin"
"github.com/minio/minio/internal/config/storageclass"
"github.com/minio/minio/internal/config/subnet"
"github.com/minio/minio/internal/logger"
iampolicy "github.com/minio/pkg/iam/policy"
"github.com/minio/mux"
"github.com/minio/pkg/v2/policy"
)
// DelConfigKVHandler - DELETE /minio/admin/v3/del-config-kv
func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "DeleteConfigKV")
ctx := r.Context()
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
objectAPI, cred := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
if objectAPI == nil {
return
}
@@ -71,7 +69,7 @@ func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Requ
return
}
cfg, err := readServerConfig(ctx, objectAPI)
cfg, err := readServerConfig(ctx, objectAPI, nil)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
@@ -81,19 +79,34 @@ func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Requ
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err = validateConfig(cfg, subSys); err != nil {
if err = validateConfig(ctx, cfg, subSys); err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
return
}
// Check if subnet proxy being deleted and if so the value of proxy of subnet
// target of logger webhook configuration also should be deleted
loggerWebhookProxyDeleted := setLoggerWebhookSubnetProxy(subSys, cfg)
if err = saveServerConfig(ctx, objectAPI, cfg); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// freshly retrieve the config so that default values are loaded for reset config
if cfg, err = getValidConfig(objectAPI); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
dynamic := config.SubSystemsDynamic.Contains(subSys)
if dynamic {
applyDynamic(ctx, objectAPI, cfg, subSys, r, w)
if subSys == config.SubnetSubSys && loggerWebhookProxyDeleted {
// Logger webhook proxy deleted, apply the dynamic changes
applyDynamic(ctx, objectAPI, cfg, config.LoggerWebhookSubSys, r, w)
}
}
}
@@ -110,13 +123,32 @@ func applyDynamic(ctx context.Context, objectAPI ObjectLayer, cfg config.Config,
w.Header().Set(madmin.ConfigAppliedHeader, madmin.ConfigAppliedTrue)
}
type badConfigErr struct {
Err error
}
// Error - return the error message
func (bce badConfigErr) Error() string {
return bce.Err.Error()
}
// Unwrap the error to its underlying error.
func (bce badConfigErr) Unwrap() error {
return bce.Err
}
type setConfigResult struct {
Cfg config.Config
SubSys string
Dynamic bool
LoggerWebhookCfgUpdated bool
}
// SetConfigKVHandler - PUT /minio/admin/v3/set-config-kv
func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetConfigKV")
ctx := r.Context()
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
objectAPI, cred := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
if objectAPI == nil {
return
}
@@ -135,45 +167,67 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
return
}
cfg, err := readServerConfig(ctx, objectAPI)
result, err := setConfigKV(ctx, objectAPI, kvBytes)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
switch err.(type) {
case badConfigErr:
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
default:
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
}
return
}
dynamic, err := cfg.ReadConfig(bytes.NewReader(kvBytes))
if result.Dynamic {
applyDynamic(ctx, objectAPI, result.Cfg, result.SubSys, r, w)
// If logger webhook config updated (proxy due to callhome), explicitly dynamically
// apply the config
if result.LoggerWebhookCfgUpdated {
applyDynamic(ctx, objectAPI, result.Cfg, config.LoggerWebhookSubSys, r, w)
}
}
writeSuccessResponseHeadersOnly(w)
}
func setConfigKV(ctx context.Context, objectAPI ObjectLayer, kvBytes []byte) (result setConfigResult, err error) {
result.Cfg, err = readServerConfig(ctx, objectAPI, nil)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
subSys, _, _, err := config.GetSubSys(string(kvBytes))
result.Dynamic, err = result.Cfg.ReadConfig(bytes.NewReader(kvBytes))
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err = validateConfig(cfg, subSys); err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
result.SubSys, _, _, err = config.GetSubSys(string(kvBytes))
if err != nil {
return
}
tgts, err := config.ParseConfigTargetID(bytes.NewReader(kvBytes))
if err != nil {
return
}
ctx = context.WithValue(ctx, config.ContextKeyForTargetFromConfig, tgts)
if verr := validateConfig(ctx, result.Cfg, result.SubSys); verr != nil {
err = badConfigErr{Err: verr}
return
}
// Check if subnet proxy being set and if so set the same value to proxy of subnet
// target of logger webhook configuration
result.LoggerWebhookCfgUpdated = setLoggerWebhookSubnetProxy(result.SubSys, result.Cfg)
// Update the actual server config on disk.
if err = saveServerConfig(ctx, objectAPI, cfg); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
if err = saveServerConfig(ctx, objectAPI, result.Cfg); err != nil {
return
}
// Write to the config input KV to history.
if err = saveServerConfigHistory(ctx, objectAPI, kvBytes); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if dynamic {
applyDynamic(ctx, objectAPI, cfg, subSys, r, w)
}
writeSuccessResponseHeadersOnly(w)
// Write the config input KV to history.
err = saveServerConfigHistory(ctx, objectAPI, kvBytes)
return
}
// GetConfigKVHandler - GET /minio/admin/v3/get-config-kv?key={key}
@@ -182,12 +236,12 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
// 1. `subsys:target` -> request for config of a single subsystem and target pair.
// 2. `subsys:` -> request for config of a single subsystem and the default target.
// 3. `subsys` -> request for config of all targets for the given subsystem.
//
// This is a reporting API and config secrets are redacted in the response.
func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetConfigKV")
ctx := r.Context()
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
objectAPI, cred := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
if objectAPI == nil {
return
}
@@ -209,7 +263,7 @@ func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Requ
}
}
subSysConfigs, err := cfg.GetSubsysInfo(subSys, target)
subSysConfigs, err := cfg.GetSubsysInfo(subSys, target, true)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
@@ -217,7 +271,7 @@ func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Requ
var s strings.Builder
for _, subSysConfig := range subSysConfigs {
subSysConfig.AddString(&s, false)
subSysConfig.WriteTo(&s, false)
}
password := cred.SecretKey
@@ -231,11 +285,9 @@ func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Requ
}
func (a adminAPIHandlers) ClearConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ClearConfigHistoryKV")
ctx := r.Context()
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
if objectAPI == nil {
return
}
@@ -266,11 +318,9 @@ func (a adminAPIHandlers) ClearConfigHistoryKVHandler(w http.ResponseWriter, r *
// RestoreConfigHistoryKVHandler - restores a config with KV settings for the given KV id.
func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "RestoreConfigHistoryKV")
ctx := r.Context()
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
if objectAPI == nil {
return
}
@@ -288,7 +338,7 @@ func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r
return
}
cfg, err := readServerConfig(ctx, objectAPI)
cfg, err := readServerConfig(ctx, objectAPI, nil)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
@@ -299,7 +349,7 @@ func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r
return
}
if err = validateConfig(cfg, ""); err != nil {
if err = validateConfig(ctx, cfg, ""); err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
return
}
@@ -314,11 +364,9 @@ func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r
// ListConfigHistoryKVHandler - lists all the KV ids.
func (a adminAPIHandlers) ListConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListConfigHistoryKV")
ctx := r.Context()
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
objectAPI, cred := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
if objectAPI == nil {
return
}
@@ -354,11 +402,9 @@ func (a adminAPIHandlers) ListConfigHistoryKVHandler(w http.ResponseWriter, r *h
// HelpConfigKVHandler - GET /minio/admin/v3/help-config-kv?subSys={subSys}&key={key}
func (a adminAPIHandlers) HelpConfigKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "HelpConfigKV")
ctx := r.Context()
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
if objectAPI == nil {
return
}
@@ -381,11 +427,9 @@ func (a adminAPIHandlers) HelpConfigKVHandler(w http.ResponseWriter, r *http.Req
// SetConfigHandler - PUT /minio/admin/v3/config
func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetConfig")
ctx := r.Context()
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
objectAPI, cred := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
if objectAPI == nil {
return
}
@@ -410,7 +454,7 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
return
}
if err = validateConfig(cfg, ""); err != nil {
if err = validateConfig(ctx, cfg, ""); err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
return
}
@@ -431,13 +475,13 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
}
// GetConfigHandler - GET /minio/admin/v3/config
// Get config.json of this minio setup.
//
// This endpoint is mainly for exporting and backing up the configuration.
// Secrets are not redacted.
func (a adminAPIHandlers) GetConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetConfig")
ctx := r.Context()
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
objectAPI, cred := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
if objectAPI == nil {
return
}
@@ -446,21 +490,15 @@ func (a adminAPIHandlers) GetConfigHandler(w http.ResponseWriter, r *http.Reques
var s strings.Builder
hkvs := config.HelpSubSysMap[""]
var count int
for _, hkv := range hkvs {
count += len(cfg[hkv.Key])
}
for _, hkv := range hkvs {
// We ignore the error below, as we cannot get one.
cfgSubsysItems, _ := cfg.GetSubsysInfo(hkv.Key, "")
cfgSubsysItems, _ := cfg.GetSubsysInfo(hkv.Key, "", false)
for _, item := range cfgSubsysItems {
off := item.Config.Get(config.Enable) == config.EnableOff
switch hkv.Key {
case config.EtcdSubSys:
off = !etcd.Enabled(item.Config)
case config.CacheSubSys:
off = !cache.Enabled(item.Config)
case config.StorageClassSubSys:
off = !storageclass.Enabled(item.Config)
case config.PolicyPluginSubSys:
@@ -470,11 +508,11 @@ func (a adminAPIHandlers) GetConfigHandler(w http.ResponseWriter, r *http.Reques
case config.IdentityLDAPSubSys:
off = !xldap.Enabled(item.Config)
case config.IdentityTLSSubSys:
off = !globalSTSTLSConfig.Enabled
off = !globalIAMSys.STSTLSConfig.Enabled
case config.IdentityPluginSubSys:
off = !idplugin.Enabled(item.Config)
}
item.AddString(&s, off)
item.WriteTo(&s, off)
}
}
@@ -487,3 +525,18 @@ func (a adminAPIHandlers) GetConfigHandler(w http.ResponseWriter, r *http.Reques
writeSuccessResponseJSON(w, econfigData)
}
// setLoggerWebhookSubnetProxy - Sets the logger webhook's subnet proxy value to
// one being set for subnet proxy
func setLoggerWebhookSubnetProxy(subSys string, cfg config.Config) bool {
if subSys == config.SubnetSubSys || subSys == config.LoggerWebhookSubSys {
subnetWebhookCfg := cfg[config.LoggerWebhookSubSys][subnet.LoggerWebhookName]
loggerWebhookSubnetProxy := subnetWebhookCfg.Get(logger.Proxy)
subnetProxy := cfg[config.SubnetSubSys][config.Default].Get(logger.Proxy)
if loggerWebhookSubnetProxy != subnetProxy {
subnetWebhookCfg.Set(logger.Proxy, subnetProxy)
return true
}
}
return false
}

View File

@@ -20,32 +20,25 @@ package cmd
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"strings"
"github.com/gorilla/mux"
"github.com/minio/madmin-go"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio/internal/config"
cfgldap "github.com/minio/minio/internal/config/identity/ldap"
"github.com/minio/minio/internal/config/identity/openid"
"github.com/minio/minio/internal/logger"
iampolicy "github.com/minio/pkg/iam/policy"
"github.com/minio/mux"
"github.com/minio/pkg/v2/ldap"
"github.com/minio/pkg/v2/policy"
)
// List of implemented ID config types.
var idCfgTypes = set.CreateStringSet("openid")
// SetIdentityProviderCfg:
//
// PUT <admin-prefix>/id-cfg?type=openid&name=dex1
func (a adminAPIHandlers) SetIdentityProviderCfg(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetIdentityCfg")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
func addOrUpdateIDPHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, isUpdate bool) {
objectAPI, cred := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
if objectAPI == nil {
return
}
@@ -56,6 +49,14 @@ func (a adminAPIHandlers) SetIdentityProviderCfg(w http.ResponseWriter, r *http.
return
}
// Ensure body content type is opaque to ensure that request body has not
// been interpreted as form data.
contentType := r.Header.Get("Content-Type")
if contentType != "application/octet-stream" {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL)
return
}
password := cred.SecretKey
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
@@ -64,43 +65,49 @@ func (a adminAPIHandlers) SetIdentityProviderCfg(w http.ResponseWriter, r *http.
return
}
cfgType := mux.Vars(r)["type"]
if !idCfgTypes.Contains(cfgType) {
// TODO: change this to invalid type error when implementation
// is complete.
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
idpCfgType := mux.Vars(r)["type"]
if !madmin.ValidIDPConfigTypes.Contains(idpCfgType) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigInvalidIDPType), r.URL)
return
}
var cfgDataBuilder strings.Builder
switch cfgType {
case "openid":
fmt.Fprintf(&cfgDataBuilder, "identity_openid")
var subSys string
switch idpCfgType {
case madmin.OpenidIDPCfg:
subSys = madmin.IdentityOpenIDSubSys
case madmin.LDAPIDPCfg:
subSys = madmin.IdentityLDAPSubSys
}
// Ensure body content type is opaque.
contentType := r.Header.Get("Content-Type")
if contentType != "application/octet-stream" {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL)
return
}
// Subsystem configuration name could be empty.
cfgName := mux.Vars(r)["name"]
cfgTarget := madmin.Default
if cfgName != "" {
fmt.Fprintf(&cfgDataBuilder, "%s%s", config.SubSystemSeparator, cfgName)
cfgTarget = cfgName
if idpCfgType == madmin.LDAPIDPCfg && cfgName != madmin.Default {
// LDAP does not support multiple configurations. So cfgName must be
// empty or `madmin.Default`.
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigLDAPNonDefaultConfigName), r.URL)
return
}
}
fmt.Fprintf(&cfgDataBuilder, "%s%s", config.KvSpaceSeparator, string(reqBytes))
cfgData := cfgDataBuilder.String()
subSys, _, _, err := config.GetSubSys(cfgData)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
// Check that this is a valid Create vs Update API call.
s := globalServerConfig.Clone()
if apiErrCode := handleCreateUpdateValidation(s, subSys, cfgTarget, isUpdate); apiErrCode != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(apiErrCode), r.URL)
return
}
cfg, err := readServerConfig(ctx, objectAPI)
cfgData := ""
{
tgtSuffix := ""
if cfgTarget != madmin.Default {
tgtSuffix = config.SubSystemSeparator + cfgTarget
}
cfgData = subSys + tgtSuffix + config.KvSpaceSeparator + string(reqBytes)
}
cfg, err := readServerConfig(ctx, objectAPI, nil)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
@@ -114,11 +121,21 @@ func (a adminAPIHandlers) SetIdentityProviderCfg(w http.ResponseWriter, r *http.
// IDP config is not dynamic. Sanity check.
if dynamic {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), "", r.URL)
return
}
if err = validateConfig(cfg, subSys); err != nil {
if err = validateConfig(ctx, cfg, subSys); err != nil {
var validationErr ldap.Validation
if errors.As(err, &validationErr) {
// If we got an LDAP validation error, we need to send appropriate
// error message back to client (likely mc).
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigLDAPValidation),
validationErr.FormatError(), r.URL)
return
}
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
return
}
@@ -138,86 +155,98 @@ func (a adminAPIHandlers) SetIdentityProviderCfg(w http.ResponseWriter, r *http.
writeSuccessResponseHeadersOnly(w)
}
// GetIdentityProviderCfg:
//
// GET <admin-prefix>/id-cfg?type=openid&name=dex_test
//
// GetIdentityProviderCfg returns a list of configured IDPs on the server if
// name is empty. If name is non-empty, returns the configuration details for
// the IDP of the given type and configuration name. The configuration name for
// the default ("un-named") configuration target is `_`.
func (a adminAPIHandlers) GetIdentityProviderCfg(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetIdentityProviderCfg")
func handleCreateUpdateValidation(s config.Config, subSys, cfgTarget string, isUpdate bool) APIErrorCode {
if cfgTarget != madmin.Default {
// This cannot give an error at this point.
subSysTargets, _ := s.GetAvailableTargets(subSys)
subSysTargetsSet := set.CreateStringSet(subSysTargets...)
if isUpdate && !subSysTargetsSet.Contains(cfgTarget) {
return ErrAdminConfigIDPCfgNameDoesNotExist
}
if !isUpdate && subSysTargetsSet.Contains(cfgTarget) {
return ErrAdminConfigIDPCfgNameAlreadyExists
}
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
return ErrNone
}
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
// For the default configuration name, since it will always be an available
// target, we need to check if a configuration value has been set previously
// to figure out if this is a valid create or update API call.
// This cannot really error (FIXME: improve the type for GetConfigInfo)
var cfgInfos []madmin.IDPCfgInfo
switch subSys {
case madmin.IdentityOpenIDSubSys:
cfgInfos, _ = globalIAMSys.OpenIDConfig.GetConfigInfo(s, cfgTarget)
case madmin.IdentityLDAPSubSys:
cfgInfos, _ = globalIAMSys.LDAPConfig.GetConfigInfo(s, cfgTarget)
}
if len(cfgInfos) > 0 && !isUpdate {
return ErrAdminConfigIDPCfgNameAlreadyExists
}
if len(cfgInfos) == 0 && isUpdate {
return ErrAdminConfigIDPCfgNameDoesNotExist
}
return ErrNone
}
// AddIdentityProviderCfg: adds a new IDP config for openid/ldap.
//
// PUT <admin-prefix>/idp-cfg/openid/dex1 -> create named config `dex1`
//
// PUT <admin-prefix>/idp-cfg/openid/_ -> create (default) named config `_`
func (a adminAPIHandlers) AddIdentityProviderCfg(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
addOrUpdateIDPHandler(ctx, w, r, false)
}
// UpdateIdentityProviderCfg: updates an existing IDP config for openid/ldap.
//
// POST <admin-prefix>/idp-cfg/openid/dex1 -> update named config `dex1`
//
// POST <admin-prefix>/idp-cfg/openid/_ -> update (default) named config `_`
func (a adminAPIHandlers) UpdateIdentityProviderCfg(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
addOrUpdateIDPHandler(ctx, w, r, true)
}
// ListIdentityProviderCfg:
//
// GET <admin-prefix>/idp-cfg/openid -> lists openid provider configs.
func (a adminAPIHandlers) ListIdentityProviderCfg(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, cred := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
if objectAPI == nil {
return
}
cfgType := mux.Vars(r)["type"]
cfgName := r.Form.Get("name")
password := cred.SecretKey
if !idCfgTypes.Contains(cfgType) {
// TODO: change this to invalid type error when implementation
// is complete.
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
idpCfgType := mux.Vars(r)["type"]
if !madmin.ValidIDPConfigTypes.Contains(idpCfgType) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigInvalidIDPType), r.URL)
return
}
// If no cfgName is provided, we list.
if cfgName == "" {
a.listIdentityProviders(ctx, w, r, cfgType, password)
return
}
var cfgList []madmin.IDPListItem
var err error
switch idpCfgType {
case madmin.OpenidIDPCfg:
cfg := globalServerConfig.Clone()
cfgList, err = globalIAMSys.OpenIDConfig.GetConfigList(cfg)
case madmin.LDAPIDPCfg:
cfg := globalServerConfig.Clone()
cfgList, err = globalIAMSys.LDAPConfig.GetConfigList(cfg)
cfg := globalServerConfig.Clone()
cfgInfos, err := globalOpenIDConfig.GetConfigInfo(cfg, cfgName)
if err != nil {
if err == openid.ErrProviderConfigNotFound {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminNoSuchConfigTarget), r.URL)
return
}
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
res := madmin.IDPConfig{
Type: cfgType,
Name: cfgName,
Info: cfgInfos,
}
data, err := json.Marshal(res)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
econfigData, err := madmin.EncryptData(password, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, econfigData)
}
func (a adminAPIHandlers) listIdentityProviders(ctx context.Context, w http.ResponseWriter, r *http.Request, cfgType, password string) {
// var subSys string
switch cfgType {
case "openid":
// subSys = config.IdentityOpenIDSubSys
default:
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
cfg := globalServerConfig.Clone()
cfgList, err := globalOpenIDConfig.GetConfigList(cfg)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
@@ -238,33 +267,37 @@ func (a adminAPIHandlers) listIdentityProviders(ctx context.Context, w http.Resp
writeSuccessResponseJSON(w, econfigData)
}
// DeleteIdentityProviderCfg:
// GetIdentityProviderCfg:
//
// DELETE <admin-prefix>/id-cfg?type=openid&name=dex_test
func (a adminAPIHandlers) DeleteIdentityProviderCfg(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "DeleteIdentityProviderCfg")
// GET <admin-prefix>/idp-cfg/openid/dex_test
func (a adminAPIHandlers) GetIdentityProviderCfg(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
objectAPI, cred := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
if objectAPI == nil {
return
}
cfgType := mux.Vars(r)["type"]
idpCfgType := mux.Vars(r)["type"]
cfgName := mux.Vars(r)["name"]
if !idCfgTypes.Contains(cfgType) {
// TODO: change this to invalid type error when implementation
// is complete.
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
password := cred.SecretKey
if !madmin.ValidIDPConfigTypes.Contains(idpCfgType) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigInvalidIDPType), r.URL)
return
}
cfg := globalServerConfig.Clone()
cfgInfos, err := globalOpenIDConfig.GetConfigInfo(cfg, cfgName)
var cfgInfos []madmin.IDPCfgInfo
var err error
switch idpCfgType {
case madmin.OpenidIDPCfg:
cfgInfos, err = globalIAMSys.OpenIDConfig.GetConfigInfo(cfg, cfgName)
case madmin.LDAPIDPCfg:
cfgInfos, err = globalIAMSys.LDAPConfig.GetConfigInfo(cfg, cfgName)
}
if err != nil {
if err == openid.ErrProviderConfigNotFound {
if errors.Is(err, openid.ErrProviderConfigNotFound) || errors.Is(err, cfgldap.ErrProviderConfigNotFound) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminNoSuchConfigTarget), r.URL)
return
}
@@ -273,39 +306,127 @@ func (a adminAPIHandlers) DeleteIdentityProviderCfg(w http.ResponseWriter, r *ht
return
}
hasEnv := false
for _, ci := range cfgInfos {
if ci.IsCfg && ci.IsEnv {
hasEnv = true
break
}
res := madmin.IDPConfig{
Type: idpCfgType,
Name: cfgName,
Info: cfgInfos,
}
if hasEnv {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigEnvOverridden), r.URL)
return
}
var subSys string
switch cfgType {
case "openid":
subSys = config.IdentityOpenIDSubSys
default:
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
cfg, err = readServerConfig(ctx, objectAPI)
data, err := json.Marshal(res)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err = cfg.DelKVS(fmt.Sprintf("%s:%s", subSys, cfgName)); err != nil {
econfigData, err := madmin.EncryptData(password, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err = validateConfig(cfg, subSys); err != nil {
writeSuccessResponseJSON(w, econfigData)
}
// DeleteIdentityProviderCfg:
//
// DELETE <admin-prefix>/idp-cfg/openid/dex_test
func (a adminAPIHandlers) DeleteIdentityProviderCfg(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
if objectAPI == nil {
return
}
idpCfgType := mux.Vars(r)["type"]
cfgName := mux.Vars(r)["name"]
if !madmin.ValidIDPConfigTypes.Contains(idpCfgType) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigInvalidIDPType), r.URL)
return
}
cfgCopy := globalServerConfig.Clone()
var subSys string
switch idpCfgType {
case madmin.OpenidIDPCfg:
subSys = config.IdentityOpenIDSubSys
cfgInfos, err := globalIAMSys.OpenIDConfig.GetConfigInfo(cfgCopy, cfgName)
if err != nil {
if errors.Is(err, openid.ErrProviderConfigNotFound) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminNoSuchConfigTarget), r.URL)
return
}
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
hasEnv := false
for _, ci := range cfgInfos {
if ci.IsCfg && ci.IsEnv {
hasEnv = true
break
}
}
if hasEnv {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigEnvOverridden), r.URL)
return
}
case madmin.LDAPIDPCfg:
subSys = config.IdentityLDAPSubSys
cfgInfos, err := globalIAMSys.LDAPConfig.GetConfigInfo(cfgCopy, cfgName)
if err != nil {
if errors.Is(err, openid.ErrProviderConfigNotFound) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminNoSuchConfigTarget), r.URL)
return
}
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
hasEnv := false
for _, ci := range cfgInfos {
if ci.IsCfg && ci.IsEnv {
hasEnv = true
break
}
}
if hasEnv {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigEnvOverridden), r.URL)
return
}
default:
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
cfg, err := readServerConfig(ctx, objectAPI, nil)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
cfgKey := fmt.Sprintf("%s:%s", subSys, cfgName)
if cfgName == madmin.Default {
cfgKey = subSys
}
if err = cfg.DelKVS(cfgKey); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err = validateConfig(ctx, cfg, subSys); err != nil {
var validationErr ldap.Validation
if errors.As(err, &validationErr) {
// If we got an LDAP validation error, we need to send appropriate
// error message back to client (likely mc).
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigLDAPValidation),
validationErr.FormatError(), r.URL)
return
}
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
return
}

View File

@@ -0,0 +1,177 @@
// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"encoding/json"
"io"
"net/http"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
"github.com/minio/pkg/v2/policy"
)
// ListLDAPPolicyMappingEntities lists users/groups mapped to given/all policies.
//
// GET <admin-prefix>/idp/ldap/policy-entities?[query-params]
//
// Query params:
//
// user=... -> repeatable query parameter, specifying users to query for
// policy mapping
//
// group=... -> repeatable query parameter, specifying groups to query for
// policy mapping
//
// policy=... -> repeatable query parameter, specifying policy to query for
// user/group mapping
//
// When all query parameters are omitted, returns mappings for all policies.
func (a adminAPIHandlers) ListLDAPPolicyMappingEntities(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
// Check authorization.
objectAPI, cred := validateAdminReq(ctx, w, r,
policy.ListGroupsAdminAction, policy.ListUsersAdminAction, policy.ListUserPoliciesAdminAction)
if objectAPI == nil {
return
}
// Validate API arguments.
q := madmin.PolicyEntitiesQuery{
Users: r.Form["user"],
Groups: r.Form["group"],
Policy: r.Form["policy"],
}
// Query IAM
res, err := globalIAMSys.QueryLDAPPolicyEntities(r.Context(), q)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Encode result and send response.
data, err := json.Marshal(res)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
password := cred.SecretKey
econfigData, err := madmin.EncryptData(password, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, econfigData)
}
// AttachDetachPolicyLDAP attaches or detaches policies from an LDAP entity
// (user or group).
//
// POST <admin-prefix>/idp/ldap/policy/{operation}
func (a adminAPIHandlers) AttachDetachPolicyLDAP(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
// Check authorization.
objectAPI, cred := validateAdminReq(ctx, w, r, policy.UpdatePolicyAssociationAction)
if objectAPI == nil {
return
}
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
// More than maxConfigSize bytes were available
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
return
}
// Ensure body content type is opaque to ensure that request body has not
// been interpreted as form data.
contentType := r.Header.Get("Content-Type")
if contentType != "application/octet-stream" {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL)
return
}
// Validate operation
operation := mux.Vars(r)["operation"]
if operation != "attach" && operation != "detach" {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminInvalidArgument), r.URL)
return
}
isAttach := operation == "attach"
// Validate API arguments in body.
password := cred.SecretKey
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
logger.LogIf(ctx, err, logger.Application)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
return
}
var par madmin.PolicyAssociationReq
err = json.Unmarshal(reqBytes, &par)
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
if err := par.IsValid(); err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
return
}
// Call IAM subsystem
updatedAt, addedOrRemoved, _, err := globalIAMSys.PolicyDBUpdateLDAP(ctx, isAttach, par)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
respBody := madmin.PolicyAssociationResp{
UpdatedAt: updatedAt,
}
if isAttach {
respBody.PoliciesAttached = addedOrRemoved
} else {
respBody.PoliciesDetached = addedOrRemoved
}
data, err := json.Marshal(respBody)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
encryptedData, err := madmin.EncryptData(password, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, encryptedData)
}

View File

@@ -19,20 +19,25 @@ package cmd
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"strings"
"github.com/gorilla/mux"
"github.com/minio/minio/internal/logger"
iampolicy "github.com/minio/pkg/iam/policy"
"github.com/minio/mux"
"github.com/minio/pkg/v2/policy"
)
var (
errRebalanceDecommissionAlreadyRunning = errors.New("Rebalance cannot be started, decommission is already in progress")
errDecommissionRebalanceAlreadyRunning = errors.New("Decommission cannot be started, rebalance is already in progress")
)
func (a adminAPIHandlers) StartDecommission(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "StartDecommission")
ctx := r.Context()
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.DecommissionAdminAction)
objectAPI, _ := validateAdminReq(ctx, w, r, policy.DecommissionAdminAction)
if objectAPI == nil {
return
}
@@ -43,23 +48,53 @@ func (a adminAPIHandlers) StartDecommission(w http.ResponseWriter, r *http.Reque
return
}
pools, ok := objectAPI.(*erasureServerPools)
if !ok {
z, ok := objectAPI.(*erasureServerPools)
if !ok || len(z.serverPools) == 1 {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
if z.IsDecommissionRunning() {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errDecommissionAlreadyRunning), r.URL)
return
}
if z.IsRebalanceStarted() {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminRebalanceAlreadyStarted), r.URL)
return
}
vars := mux.Vars(r)
v := vars["pool"]
idx := globalEndpoints.GetPoolIdx(v)
if idx == -1 {
// We didn't find any matching pools, invalid input
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errInvalidArgument), r.URL)
return
pools := strings.Split(v, ",")
poolIndices := make([]int, 0, len(pools))
for _, pool := range pools {
idx := globalEndpoints.GetPoolIdx(pool)
if idx == -1 {
// We didn't find any matching pools, invalid input
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errInvalidArgument), r.URL)
return
}
var pool *erasureSets
for pidx := range z.serverPools {
if pidx == idx {
pool = z.serverPools[idx]
break
}
}
if pool == nil {
// We didn't find any matching pools, invalid input
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errInvalidArgument), r.URL)
return
}
poolIndices = append(poolIndices, idx)
}
if ep := globalEndpoints[idx].Endpoints[0]; !ep.IsLocal {
if len(poolIndices) > 0 && !globalEndpoints[poolIndices[0]].Endpoints[0].IsLocal {
ep := globalEndpoints[poolIndices[0]].Endpoints[0]
for nodeIdx, proxyEp := range globalProxyEndpoints {
if proxyEp.Endpoint.Host == ep.Host {
if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) {
@@ -69,18 +104,16 @@ func (a adminAPIHandlers) StartDecommission(w http.ResponseWriter, r *http.Reque
}
}
if err := pools.Decommission(r.Context(), idx); err != nil {
if err := z.Decommission(r.Context(), poolIndices...); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
func (a adminAPIHandlers) CancelDecommission(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "CancelDecommission")
ctx := r.Context()
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.DecommissionAdminAction)
objectAPI, _ := validateAdminReq(ctx, w, r, policy.DecommissionAdminAction)
if objectAPI == nil {
return
}
@@ -124,11 +157,9 @@ func (a adminAPIHandlers) CancelDecommission(w http.ResponseWriter, r *http.Requ
}
func (a adminAPIHandlers) StatusPool(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "StatusPool")
ctx := r.Context()
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ServerInfoAdminAction, iampolicy.DecommissionAdminAction)
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ServerInfoAdminAction, policy.DecommissionAdminAction)
if objectAPI == nil {
return
}
@@ -167,11 +198,9 @@ func (a adminAPIHandlers) StatusPool(w http.ResponseWriter, r *http.Request) {
}
func (a adminAPIHandlers) ListPools(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListPools")
ctx := r.Context()
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ServerInfoAdminAction, iampolicy.DecommissionAdminAction)
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ServerInfoAdminAction, policy.DecommissionAdminAction)
if objectAPI == nil {
return
}
@@ -200,3 +229,134 @@ func (a adminAPIHandlers) ListPools(w http.ResponseWriter, r *http.Request) {
logger.LogIf(r.Context(), json.NewEncoder(w).Encode(poolsStatus))
}
func (a adminAPIHandlers) RebalanceStart(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.RebalanceAdminAction)
if objectAPI == nil {
return
}
// NB rebalance-start admin API is always coordinated from first pool's
// first node. The following is required to serialize (the effects of)
// concurrent rebalance-start commands.
if ep := globalEndpoints[0].Endpoints[0]; !ep.IsLocal {
for nodeIdx, proxyEp := range globalProxyEndpoints {
if proxyEp.Endpoint.Host == ep.Host {
if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) {
return
}
}
}
}
pools, ok := objectAPI.(*erasureServerPools)
if !ok || len(pools.serverPools) == 1 {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
if pools.IsDecommissionRunning() {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errRebalanceDecommissionAlreadyRunning), r.URL)
return
}
if pools.IsRebalanceStarted() {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminRebalanceAlreadyStarted), r.URL)
return
}
bucketInfos, err := objectAPI.ListBuckets(ctx, BucketOptions{})
if err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
buckets := make([]string, 0, len(bucketInfos))
for _, bInfo := range bucketInfos {
buckets = append(buckets, bInfo.Name)
}
var id string
if id, err = pools.initRebalanceMeta(ctx, buckets); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
// Rebalance routine is run on the first node of any pool participating in rebalance.
pools.StartRebalance()
b, err := json.Marshal(struct {
ID string `json:"id"`
}{ID: id})
if err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, b)
// Notify peers to load rebalance.bin and start rebalance routine if they happen to be
// participating pool's leader node
globalNotificationSys.LoadRebalanceMeta(ctx, true)
}
func (a adminAPIHandlers) RebalanceStatus(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.RebalanceAdminAction)
if objectAPI == nil {
return
}
// Proxy rebalance-status to first pool first node, so that users see a
// consistent view of rebalance progress even though different rebalancing
// pools may temporarily have out of date info on the others.
if ep := globalEndpoints[0].Endpoints[0]; !ep.IsLocal {
for nodeIdx, proxyEp := range globalProxyEndpoints {
if proxyEp.Endpoint.Host == ep.Host {
if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) {
return
}
}
}
}
pools, ok := objectAPI.(*erasureServerPools)
if !ok {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
rs, err := rebalanceStatus(ctx, pools)
if err != nil {
if errors.Is(err, errRebalanceNotStarted) || errors.Is(err, errConfigNotFound) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminRebalanceNotStarted), r.URL)
return
}
logger.LogIf(ctx, fmt.Errorf("failed to fetch rebalance status: %w", err))
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
logger.LogIf(r.Context(), json.NewEncoder(w).Encode(rs))
}
func (a adminAPIHandlers) RebalanceStop(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.RebalanceAdminAction)
if objectAPI == nil {
return
}
pools, ok := objectAPI.(*erasureServerPools)
if !ok {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
// Cancel any ongoing rebalance operation
globalNotificationSys.StopRebalance(r.Context())
writeSuccessResponseHeadersOnly(w)
logger.LogIf(ctx, pools.saveRebalanceStats(GlobalContext, 0, rebalSaveStoppedAt))
}

View File

@@ -20,28 +20,28 @@ package cmd
import (
"bytes"
"context"
"encoding/gob"
"encoding/json"
"errors"
"io"
"io/ioutil"
"net/http"
"strings"
"sync/atomic"
"time"
"github.com/gorilla/mux"
"github.com/minio/madmin-go"
"github.com/dustin/go-humanize"
"github.com/minio/madmin-go/v3"
xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/bucket/policy"
iampolicy "github.com/minio/pkg/iam/policy"
"github.com/minio/mux"
"github.com/minio/pkg/v2/policy"
)
// SiteReplicationAdd - PUT /minio/admin/v3/site-replication/add
func (a adminAPIHandlers) SiteReplicationAdd(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SiteReplicationAdd")
ctx := r.Context()
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationAddAction)
objectAPI, cred := validateAdminReq(ctx, w, r, policy.SiteReplicationAddAction)
if objectAPI == nil {
return
}
@@ -52,7 +52,8 @@ func (a adminAPIHandlers) SiteReplicationAdd(w http.ResponseWriter, r *http.Requ
return
}
status, err := globalSiteReplicationSys.AddPeerClusters(ctx, sites)
opts := getSRAddOptions(r)
status, err := globalSiteReplicationSys.AddPeerClusters(ctx, sites, opts)
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
@@ -68,16 +69,19 @@ func (a adminAPIHandlers) SiteReplicationAdd(w http.ResponseWriter, r *http.Requ
writeSuccessResponseJSON(w, body)
}
func getSRAddOptions(r *http.Request) (opts madmin.SRAddOptions) {
opts.ReplicateILMExpiry = r.Form.Get("replicateILMExpiry") == "true"
return
}
// SRPeerJoin - PUT /minio/admin/v3/site-replication/join
//
// used internally to tell current cluster to enable SR with
// the provided peer clusters and service account.
func (a adminAPIHandlers) SRPeerJoin(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SRPeerJoin")
ctx := r.Context()
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationAddAction)
objectAPI, cred := validateAdminReq(ctx, w, r, policy.SiteReplicationAddAction)
if objectAPI == nil {
return
}
@@ -97,11 +101,9 @@ func (a adminAPIHandlers) SRPeerJoin(w http.ResponseWriter, r *http.Request) {
// SRPeerBucketOps - PUT /minio/admin/v3/site-replication/bucket-ops?bucket=x&operation=y
func (a adminAPIHandlers) SRPeerBucketOps(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SRPeerBucketOps")
ctx := r.Context()
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationOperationAction)
objectAPI, _ := validateAdminReq(ctx, w, r, policy.SiteReplicationOperationAction)
if objectAPI == nil {
return
}
@@ -115,37 +117,23 @@ func (a adminAPIHandlers) SRPeerBucketOps(w http.ResponseWriter, r *http.Request
default:
err = errSRInvalidRequest(errInvalidArgument)
case madmin.MakeWithVersioningBktOp:
_, isLockEnabled := r.Form["lockEnabled"]
_, isVersioningEnabled := r.Form["versioningEnabled"]
_, isForceCreate := r.Form["forceCreate"]
createdAtStr := strings.TrimSpace(r.Form.Get("createdAt"))
createdAt, cerr := time.Parse(time.RFC3339Nano, createdAtStr)
createdAt, cerr := time.Parse(time.RFC3339Nano, strings.TrimSpace(r.Form.Get("createdAt")))
if cerr != nil {
createdAt = timeSentinel
}
opts := MakeBucketOptions{
Location: r.Form.Get("location"),
LockEnabled: isLockEnabled,
VersioningEnabled: isVersioningEnabled,
ForceCreate: isForceCreate,
LockEnabled: r.Form.Get("lockEnabled") == "true",
VersioningEnabled: r.Form.Get("versioningEnabled") == "true",
ForceCreate: r.Form.Get("forceCreate") == "true",
CreatedAt: createdAt,
}
err = globalSiteReplicationSys.PeerBucketMakeWithVersioningHandler(ctx, bucket, opts)
case madmin.ConfigureReplBktOp:
err = globalSiteReplicationSys.PeerBucketConfigureReplHandler(ctx, bucket)
case madmin.DeleteBucketBktOp:
_, noRecreate := r.Form["noRecreate"]
case madmin.DeleteBucketBktOp, madmin.ForceDeleteBucketBktOp:
err = globalSiteReplicationSys.PeerBucketDeleteHandler(ctx, bucket, DeleteBucketOptions{
Force: false,
NoRecreate: noRecreate,
SRDeleteOp: getSRBucketDeleteOp(true),
})
case madmin.ForceDeleteBucketBktOp:
_, noRecreate := r.Form["noRecreate"]
err = globalSiteReplicationSys.PeerBucketDeleteHandler(ctx, bucket, DeleteBucketOptions{
Force: true,
NoRecreate: noRecreate,
Force: operation == madmin.ForceDeleteBucketBktOp,
SRDeleteOp: getSRBucketDeleteOp(true),
})
case madmin.PurgeDeletedBucketOp:
@@ -160,11 +148,9 @@ func (a adminAPIHandlers) SRPeerBucketOps(w http.ResponseWriter, r *http.Request
// SRPeerReplicateIAMItem - PUT /minio/admin/v3/site-replication/iam-item
func (a adminAPIHandlers) SRPeerReplicateIAMItem(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SRPeerReplicateIAMItem")
ctx := r.Context()
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationOperationAction)
objectAPI, _ := validateAdminReq(ctx, w, r, policy.SiteReplicationOperationAction)
if objectAPI == nil {
return
}
@@ -183,7 +169,7 @@ func (a adminAPIHandlers) SRPeerReplicateIAMItem(w http.ResponseWriter, r *http.
if item.Policy == nil {
err = globalSiteReplicationSys.PeerAddPolicyHandler(ctx, item.Name, nil, item.UpdatedAt)
} else {
policy, perr := iampolicy.ParseConfig(bytes.NewReader(item.Policy))
policy, perr := policy.ParseConfig(bytes.NewReader(item.Policy))
if perr != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, perr), r.URL)
return
@@ -212,13 +198,11 @@ func (a adminAPIHandlers) SRPeerReplicateIAMItem(w http.ResponseWriter, r *http.
}
}
// SRPeerReplicateBucketItem - PUT /minio/admin/v3/site-replication/bucket-meta
// SRPeerReplicateBucketItem - PUT /minio/admin/v3/site-replication/peer/bucket-meta
func (a adminAPIHandlers) SRPeerReplicateBucketItem(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SRPeerReplicateBucketItem")
ctx := r.Context()
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationOperationAction)
objectAPI, _ := validateAdminReq(ctx, w, r, policy.SiteReplicationOperationAction)
if objectAPI == nil {
return
}
@@ -229,15 +213,20 @@ func (a adminAPIHandlers) SRPeerReplicateBucketItem(w http.ResponseWriter, r *ht
return
}
if item.Bucket == "" {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errSRInvalidRequest(errInvalidArgument)), r.URL)
return
}
var err error
switch item.Type {
default:
err = errSRInvalidRequest(errInvalidArgument)
err = globalSiteReplicationSys.PeerBucketMetadataUpdateHandler(ctx, item)
case madmin.SRBucketMetaTypePolicy:
if item.Policy == nil {
err = globalSiteReplicationSys.PeerBucketPolicyHandler(ctx, item.Bucket, nil, item.UpdatedAt)
} else {
bktPolicy, berr := policy.ParseConfig(bytes.NewReader(item.Policy), item.Bucket)
bktPolicy, berr := policy.ParseBucketPolicyConfig(bytes.NewReader(item.Policy), item.Bucket)
if berr != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, berr), r.URL)
return
@@ -258,7 +247,7 @@ func (a adminAPIHandlers) SRPeerReplicateBucketItem(w http.ResponseWriter, r *ht
return
}
if err = globalSiteReplicationSys.PeerBucketQuotaConfigHandler(ctx, item.Bucket, quotaConfig, item.UpdatedAt); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
@@ -270,6 +259,8 @@ func (a adminAPIHandlers) SRPeerReplicateBucketItem(w http.ResponseWriter, r *ht
err = globalSiteReplicationSys.PeerBucketObjectLockConfigHandler(ctx, item.Bucket, item.ObjectLockConfig, item.UpdatedAt)
case madmin.SRBucketMetaTypeSSEConfig:
err = globalSiteReplicationSys.PeerBucketSSEConfigHandler(ctx, item.Bucket, item.SSEConfig, item.UpdatedAt)
case madmin.SRBucketMetaLCConfig:
err = globalSiteReplicationSys.PeerBucketLCConfigHandler(ctx, item.Bucket, item.ExpiryLCConfig, item.UpdatedAt)
}
if err != nil {
logger.LogIf(ctx, err)
@@ -280,11 +271,9 @@ func (a adminAPIHandlers) SRPeerReplicateBucketItem(w http.ResponseWriter, r *ht
// SiteReplicationInfo - GET /minio/admin/v3/site-replication/info
func (a adminAPIHandlers) SiteReplicationInfo(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SiteReplicationInfo")
ctx := r.Context()
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationInfoAction)
objectAPI, _ := validateAdminReq(ctx, w, r, policy.SiteReplicationInfoAction)
if objectAPI == nil {
return
}
@@ -302,11 +291,9 @@ func (a adminAPIHandlers) SiteReplicationInfo(w http.ResponseWriter, r *http.Req
}
func (a adminAPIHandlers) SRPeerGetIDPSettings(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SiteReplicationGetIDPSettings")
ctx := r.Context()
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationAddAction)
objectAPI, _ := validateAdminReq(ctx, w, r, policy.SiteReplicationAddAction)
if objectAPI == nil {
return
}
@@ -319,7 +306,7 @@ func (a adminAPIHandlers) SRPeerGetIDPSettings(w http.ResponseWriter, r *http.Re
}
func parseJSONBody(ctx context.Context, body io.Reader, v interface{}, encryptionKey string) error {
data, err := ioutil.ReadAll(body)
data, err := io.ReadAll(body)
if err != nil {
return SRError{
Cause: err,
@@ -341,11 +328,9 @@ func parseJSONBody(ctx context.Context, body io.Reader, v interface{}, encryptio
// SiteReplicationStatus - GET /minio/admin/v3/site-replication/status
func (a adminAPIHandlers) SiteReplicationStatus(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SiteReplicationStatus")
ctx := r.Context()
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationInfoAction)
objectAPI, _ := validateAdminReq(ctx, w, r, policy.SiteReplicationInfoAction)
if objectAPI == nil {
return
}
@@ -357,6 +342,7 @@ func (a adminAPIHandlers) SiteReplicationStatus(w http.ResponseWriter, r *http.R
opts.Users = true
opts.Policies = true
opts.Groups = true
opts.ILMExpiryRules = true
}
info, err := globalSiteReplicationSys.SiteReplicationStatus(ctx, objectAPI, opts)
if err != nil {
@@ -372,11 +358,9 @@ func (a adminAPIHandlers) SiteReplicationStatus(w http.ResponseWriter, r *http.R
// SiteReplicationMetaInfo - GET /minio/admin/v3/site-replication/metainfo
func (a adminAPIHandlers) SiteReplicationMetaInfo(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SiteReplicationMetaInfo")
ctx := r.Context()
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationInfoAction)
objectAPI, _ := validateAdminReq(ctx, w, r, policy.SiteReplicationInfoAction)
if objectAPI == nil {
return
}
@@ -396,10 +380,9 @@ func (a adminAPIHandlers) SiteReplicationMetaInfo(w http.ResponseWriter, r *http
// SiteReplicationEdit - PUT /minio/admin/v3/site-replication/edit
func (a adminAPIHandlers) SiteReplicationEdit(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SiteReplicationEdit")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
ctx := r.Context()
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationAddAction)
objectAPI, cred := validateAdminReq(ctx, w, r, policy.SiteReplicationAddAction)
if objectAPI == nil {
return
}
@@ -409,7 +392,9 @@ func (a adminAPIHandlers) SiteReplicationEdit(w http.ResponseWriter, r *http.Req
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
status, err := globalSiteReplicationSys.EditPeerCluster(ctx, site)
opts := getSREditOptions(r)
status, err := globalSiteReplicationSys.EditPeerCluster(ctx, site, opts)
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
@@ -424,14 +409,19 @@ func (a adminAPIHandlers) SiteReplicationEdit(w http.ResponseWriter, r *http.Req
writeSuccessResponseJSON(w, body)
}
func getSREditOptions(r *http.Request) (opts madmin.SREditOptions) {
opts.DisableILMExpiryReplication = r.Form.Get("disableILMExpiryReplication") == "true"
opts.EnableILMExpiryReplication = r.Form.Get("enableILMExpiryReplication") == "true"
return
}
// SRPeerEdit - PUT /minio/admin/v3/site-replication/peer/edit
//
// used internally to tell current cluster to update endpoint for peer
func (a adminAPIHandlers) SRPeerEdit(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SRPeerEdit")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
ctx := r.Context()
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationAddAction)
objectAPI, _ := validateAdminReq(ctx, w, r, policy.SiteReplicationAddAction)
if objectAPI == nil {
return
}
@@ -449,25 +439,49 @@ func (a adminAPIHandlers) SRPeerEdit(w http.ResponseWriter, r *http.Request) {
}
}
// SRStateEdit - PUT /minio/admin/v3/site-replication/state/edit
//
// used internally to tell current cluster to update site replication state
func (a adminAPIHandlers) SRStateEdit(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.SiteReplicationOperationAction)
if objectAPI == nil {
return
}
var state madmin.SRStateEditReq
if err := parseJSONBody(ctx, r.Body, &state, ""); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err := globalSiteReplicationSys.PeerStateEditReq(ctx, state); err != nil {
logger.LogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
func getSRStatusOptions(r *http.Request) (opts madmin.SRStatusOptions) {
q := r.Form
opts.Buckets = q.Get("buckets") == "true"
opts.Policies = q.Get("policies") == "true"
opts.Groups = q.Get("groups") == "true"
opts.Users = q.Get("users") == "true"
opts.ILMExpiryRules = q.Get("ilm-expiry-rules") == "true"
opts.PeerState = q.Get("peer-state") == "true"
opts.Entity = madmin.GetSREntityType(q.Get("entity"))
opts.EntityValue = q.Get("entityvalue")
opts.ShowDeleted = q.Get("showDeleted") == "true"
opts.Metrics = q.Get("metrics") == "true"
return
}
// SiteReplicationRemove - PUT /minio/admin/v3/site-replication/remove
func (a adminAPIHandlers) SiteReplicationRemove(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SiteReplicationRemove")
ctx := r.Context()
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationRemoveAction)
objectAPI, _ := validateAdminReq(ctx, w, r, policy.SiteReplicationRemoveAction)
if objectAPI == nil {
return
}
@@ -496,10 +510,9 @@ func (a adminAPIHandlers) SiteReplicationRemove(w http.ResponseWriter, r *http.R
//
// used internally to tell current cluster to update endpoint for peer
func (a adminAPIHandlers) SRPeerRemove(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SRPeerRemove")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
ctx := r.Context()
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationRemoveAction)
objectAPI, _ := validateAdminReq(ctx, w, r, policy.SiteReplicationRemoveAction)
if objectAPI == nil {
return
}
@@ -516,3 +529,85 @@ func (a adminAPIHandlers) SRPeerRemove(w http.ResponseWriter, r *http.Request) {
return
}
}
// SiteReplicationResyncOp - PUT /minio/admin/v3/site-replication/resync/op
func (a adminAPIHandlers) SiteReplicationResyncOp(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.SiteReplicationResyncAction)
if objectAPI == nil {
return
}
var peerSite madmin.PeerInfo
if err := parseJSONBody(ctx, r.Body, &peerSite, ""); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
vars := mux.Vars(r)
op := madmin.SiteResyncOp(vars["operation"])
var (
status madmin.SRResyncOpStatus
err error
)
switch op {
case madmin.SiteResyncStart:
status, err = globalSiteReplicationSys.startResync(ctx, objectAPI, peerSite)
case madmin.SiteResyncCancel:
status, err = globalSiteReplicationSys.cancelResync(ctx, objectAPI, peerSite)
default:
err = errSRInvalidRequest(errInvalidArgument)
}
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
body, err := json.Marshal(status)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, body)
}
// SiteReplicationDevNull - everything goes to io.Discard
// [POST] /minio/admin/v3/site-replication/devnull
func (a adminAPIHandlers) SiteReplicationDevNull(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
globalSiteNetPerfRX.Connect()
defer globalSiteNetPerfRX.Disconnect()
connectTime := time.Now()
for {
n, err := io.CopyN(xioutil.Discard, r.Body, 128*humanize.KiByte)
atomic.AddUint64(&globalSiteNetPerfRX.RX, uint64(n))
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
// If there is a disconnection before globalNetPerfMinDuration (we give a margin of error of 1 sec)
// would mean the network is not stable. Logging here will help in debugging network issues.
if time.Since(connectTime) < (globalNetPerfMinDuration - time.Second) {
logger.LogIf(ctx, err)
}
}
if err != nil {
if errors.Is(err, io.EOF) {
w.WriteHeader(http.StatusNoContent)
} else {
w.WriteHeader(http.StatusBadRequest)
}
break
}
}
}
// SiteReplicationNetPerf - everything goes to io.Discard
// [POST] /minio/admin/v3/site-replication/netperf
func (a adminAPIHandlers) SiteReplicationNetPerf(w http.ResponseWriter, r *http.Request) {
durationStr := r.Form.Get(peerRESTDuration)
duration, _ := time.ParseDuration(durationStr)
if duration < globalNetPerfMinDuration {
duration = globalNetPerfMinDuration
}
result := siteNetperf(r.Context(), duration)
logger.LogIf(r.Context(), gob.NewEncoder(w).Encode(result))
}

View File

@@ -27,12 +27,12 @@ import (
"context"
"fmt"
"runtime"
"sync"
"testing"
"time"
"github.com/minio/madmin-go"
"github.com/minio/madmin-go/v3"
minio "github.com/minio/minio-go/v7"
"github.com/minio/pkg/v2/sync/errgroup"
)
func runAllIAMConcurrencyTests(suite *TestSuiteIAM, c *check) {
@@ -129,18 +129,21 @@ func (s *TestSuiteIAM) TestDeleteUserRace(c *check) {
secretKeys[i] = secretKey
}
wg := sync.WaitGroup{}
g := errgroup.Group{}
for i := 0; i < userCount; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
uClient := s.getUserClient(c, accessKeys[i], secretKeys[i], "")
err := s.adm.RemoveUser(ctx, accessKeys[i])
if err != nil {
c.Fatalf("unable to remove user: %v", err)
g.Go(func(i int) func() error {
return func() error {
uClient := s.getUserClient(c, accessKeys[i], secretKeys[i], "")
err := s.adm.RemoveUser(ctx, accessKeys[i])
if err != nil {
return err
}
c.mustNotListObjects(ctx, uClient, bucket)
return nil
}
c.mustNotListObjects(ctx, uClient, bucket)
}(i)
}(i), i)
}
if errs := g.Wait(); len(errs) > 0 {
c.Fatalf("unable to remove users: %v", errs)
}
wg.Wait()
}

File diff suppressed because it is too large Load Diff

View File

@@ -24,23 +24,22 @@ import (
"encoding/hex"
"encoding/json"
"fmt"
"io/ioutil"
"io"
"net/http"
"net/url"
"os"
"runtime"
"strings"
"testing"
"time"
"github.com/minio/madmin-go"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
cr "github.com/minio/minio-go/v7/pkg/credentials"
"github.com/minio/minio-go/v7/pkg/s3utils"
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio-go/v7/pkg/signer"
"github.com/minio/minio/internal/auth"
"github.com/minio/pkg/v2/env"
)
const (
@@ -122,7 +121,7 @@ var iamTestSuites = func() []*TestSuiteIAM {
}()
const (
EnvTestEtcdBackend = "ETCD_SERVER"
EnvTestEtcdBackend = "_MINIO_ETCD_TEST_SERVER"
)
func (s *TestSuiteIAM) setUpEtcd(c *check, etcdServer string) {
@@ -145,7 +144,7 @@ func (s *TestSuiteIAM) setUpEtcd(c *check, etcdServer string) {
func (s *TestSuiteIAM) SetUpSuite(c *check) {
// If etcd backend is specified and etcd server is not present, the test
// is skipped.
etcdServer := os.Getenv(EnvTestEtcdBackend)
etcdServer := env.Get(EnvTestEtcdBackend, "")
if s.withEtcdBackend && etcdServer == "" {
c.Skip("Skipping etcd backend IAM test as no etcd server is configured.")
}
@@ -388,7 +387,7 @@ func (s *TestSuiteIAM) TestUserPolicyEscalationBug(c *check) {
req.ContentLength = int64(len(buf))
sum := sha256.Sum256(buf)
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum[:]))
req.Body = ioutil.NopCloser(bytes.NewReader(buf))
req.Body = io.NopCloser(bytes.NewReader(buf))
req = signer.SignV4(*req, accessKey, secretKey, "", "")
// 3.1 Execute the request.
@@ -825,7 +824,7 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
if set.CreateStringSet(groups...).Contains(group) {
c.Fatalf("created group still present!")
}
groupInfo, err = s.adm.GetGroupDescription(ctx, group)
_, err = s.adm.GetGroupDescription(ctx, group)
if err == nil {
c.Fatalf("group appears to exist")
}
@@ -877,7 +876,7 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByUser(c *check) {
// Create an madmin client with user creds
userAdmClient, err := madmin.NewWithOptions(s.endpoint, &madmin.Options{
Creds: cr.NewStaticV4(accessKey, secretKey, ""),
Creds: credentials.NewStaticV4(accessKey, secretKey, ""),
Secure: s.secure,
})
if err != nil {
@@ -984,9 +983,9 @@ func (s *TestSuiteIAM) SetUpAccMgmtPlugin(c *check) {
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
defer cancel()
pluginEndpoint := os.Getenv("POLICY_PLUGIN_ENDPOINT")
pluginEndpoint := env.Get("_MINIO_POLICY_PLUGIN_ENDPOINT", "")
if pluginEndpoint == "" {
c.Skip("POLICY_PLUGIN_ENDPOINT not given - skipping.")
c.Skip("_MINIO_POLICY_PLUGIN_ENDPOINT not given - skipping.")
}
configCmds := []string{
@@ -1072,7 +1071,7 @@ func (s *TestSuiteIAM) TestAccMgmtPlugin(c *check) {
// Create an madmin client with user creds
userAdmClient, err := madmin.NewWithOptions(s.endpoint, &madmin.Options{
Creds: cr.NewStaticV4(accessKey, secretKey, ""),
Creds: credentials.NewStaticV4(accessKey, secretKey, ""),
Secure: s.secure,
})
if err != nil {
@@ -1136,7 +1135,7 @@ func (s *TestSuiteIAM) TestAccMgmtPlugin(c *check) {
c.assertSvcAccDeletion(ctx, s, userAdmClient, accessKey, bucket)
// 6. Check that service account **can** be created for some other user.
// This is possible because of the policy enforced in the plugin.
// This is possible because the policy enforced in the plugin.
c.mustCreateSvcAccount(ctx, globalActiveCred.AccessKey, userAdmClient)
}
@@ -1203,6 +1202,47 @@ func (c *check) mustNotListObjects(ctx context.Context, client *minio.Client, bu
}
}
func (c *check) mustPutObjectWithTags(ctx context.Context, client *minio.Client, bucket, object string) {
c.Helper()
_, err := client.PutObject(ctx, bucket, object, bytes.NewBuffer([]byte("stuff")), 5, minio.PutObjectOptions{
UserTags: map[string]string{
"security": "public",
"virus": "true",
},
})
if err != nil {
c.Fatalf("user was unable to upload the object: %v", err)
}
}
func (c *check) mustGetObject(ctx context.Context, client *minio.Client, bucket, object string) {
c.Helper()
r, err := client.GetObject(ctx, bucket, object, minio.GetObjectOptions{})
if err != nil {
c.Fatalf("user was unable to download the object: %v", err)
}
defer r.Close()
_, err = io.Copy(io.Discard, r)
if err != nil {
c.Fatalf("user was unable to download the object: %v", err)
}
}
func (c *check) mustHeadObject(ctx context.Context, client *minio.Client, bucket, object string, tagCount int) {
c.Helper()
oinfo, err := client.StatObject(ctx, bucket, object, minio.StatObjectOptions{})
if err != nil {
c.Fatalf("user was unable to download the object: %v", err)
}
if oinfo.UserTagCount != tagCount {
c.Fatalf("expected tagCount: %d, got %d", tagCount, oinfo.UserTagCount)
}
}
func (c *check) mustListObjects(ctx context.Context, client *minio.Client, bucket string) {
c.Helper()
res := client.ListObjects(ctx, bucket, minio.ListObjectsOptions{})
@@ -1220,6 +1260,52 @@ func (c *check) mustListBuckets(ctx context.Context, client *minio.Client) {
}
}
func (c *check) mustNotDelete(ctx context.Context, client *minio.Client, bucket string, vid string) {
c.Helper()
err := client.RemoveObject(ctx, bucket, "some-object", minio.RemoveObjectOptions{VersionID: vid})
if err == nil {
c.Fatalf("user must not be allowed to delete")
}
err = client.RemoveObject(ctx, bucket, "some-object", minio.RemoveObjectOptions{})
if err != nil {
c.Fatal("user must be able to create delete marker")
}
}
func (c *check) mustDownload(ctx context.Context, client *minio.Client, bucket string) {
c.Helper()
rd, err := client.GetObject(ctx, bucket, "some-object", minio.GetObjectOptions{})
if err != nil {
c.Fatalf("download did not succeed got %#v", err)
}
if _, err = io.Copy(io.Discard, rd); err != nil {
c.Fatalf("download did not succeed got %#v", err)
}
}
func (c *check) mustUploadReturnVersions(ctx context.Context, client *minio.Client, bucket string) []string {
c.Helper()
versions := []string{}
for i := 0; i < 5; i++ {
ui, err := client.PutObject(ctx, bucket, "some-object", bytes.NewBuffer([]byte("stuff")), 5, minio.PutObjectOptions{})
if err != nil {
c.Fatalf("upload did not succeed got %#v", err)
}
versions = append(versions, ui.VersionID)
}
return versions
}
func (c *check) mustUpload(ctx context.Context, client *minio.Client, bucket string) {
c.Helper()
_, err := client.PutObject(ctx, bucket, "some-object", bytes.NewBuffer([]byte("stuff")), 5, minio.PutObjectOptions{})
if err != nil {
c.Fatalf("upload did not succeed got %#v", err)
}
}
func (c *check) mustNotUpload(ctx context.Context, client *minio.Client, bucket string) {
c.Helper()
_, err := client.PutObject(ctx, bucket, "some-object", bytes.NewBuffer([]byte("stuff")), 5, minio.PutObjectOptions{})
@@ -1242,7 +1328,11 @@ func (c *check) assertSvcAccAppearsInListing(ctx context.Context, madmClient *ma
if err != nil {
c.Fatalf("unable to list svc accounts: %v", err)
}
if !set.CreateStringSet(listResp.Accounts...).Contains(svcAK) {
var accessKeys []string
for _, item := range listResp.Accounts {
accessKeys = append(accessKeys, item.AccessKey)
}
if !set.CreateStringSet(accessKeys...).Contains(svcAK) {
c.Fatalf("service account did not appear in listing!")
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -21,18 +21,19 @@ import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"sort"
"sync"
"testing"
"time"
"github.com/gorilla/mux"
"github.com/minio/madmin-go"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/auth"
"github.com/minio/mux"
)
// adminErasureTestBed - encapsulates subsystems that need to be setup for
@@ -72,9 +73,9 @@ func prepareAdminErasureTestBed(ctx context.Context) (*adminErasureTestBed, erro
// Initialize boot time
globalBootTime = UTCNow()
globalEndpoints = mustGetPoolEndpoints(erasureDirs...)
globalEndpoints = mustGetPoolEndpoints(0, erasureDirs...)
initAllSubsystems()
initAllSubsystems(ctx)
initConfigSubsystem(ctx, objLayer)
@@ -107,7 +108,7 @@ func initTestErasureObjLayer(ctx context.Context) (ObjectLayer, []string, error)
if err != nil {
return nil, nil, err
}
endpoints := mustGetPoolEndpoints(erasureDirs...)
endpoints := mustGetPoolEndpoints(0, erasureDirs...)
globalPolicySys = NewPolicySys()
objLayer, err := newErasureServerPools(ctx, endpoints)
if err != nil {
@@ -220,7 +221,7 @@ func testServicesCmdHandler(cmd cmdType, t *testing.T) {
adminTestBed.router.ServeHTTP(rec, req)
if rec.Code != http.StatusOK {
resp, _ := ioutil.ReadAll(rec.Body)
resp, _ := io.ReadAll(rec.Body)
t.Errorf("Expected to receive %d status code but received %d. Body (%s)",
http.StatusOK, rec.Code, string(resp))
}
@@ -381,3 +382,146 @@ func TestExtractHealInitParams(t *testing.T) {
}
}
}
type byResourceUID struct{ madmin.LockEntries }
func (b byResourceUID) Less(i, j int) bool {
toUniqLock := func(entry madmin.LockEntry) string {
return fmt.Sprintf("%s/%s", entry.Resource, entry.ID)
}
return toUniqLock(b.LockEntries[i]) < toUniqLock(b.LockEntries[j])
}
func TestTopLockEntries(t *testing.T) {
locksHeld := make(map[string][]lockRequesterInfo)
var owners []string
for i := 0; i < 4; i++ {
owners = append(owners, fmt.Sprintf("node-%d", i))
}
// Simulate DeleteObjects of 10 objects in a single request. i.e same lock
// request UID, but 10 different resource names associated with it.
var lris []lockRequesterInfo
uuid := mustGetUUID()
for i := 0; i < 10; i++ {
resource := fmt.Sprintf("bucket/delete-object-%d", i)
lri := lockRequesterInfo{
Name: resource,
Writer: true,
UID: uuid,
Owner: owners[i%len(owners)],
Group: true,
Quorum: 3,
}
lris = append(lris, lri)
locksHeld[resource] = []lockRequesterInfo{lri}
}
// Add a few concurrent read locks to the mix
for i := 0; i < 50; i++ {
resource := fmt.Sprintf("bucket/get-object-%d", i)
lri := lockRequesterInfo{
Name: resource,
UID: mustGetUUID(),
Owner: owners[i%len(owners)],
Quorum: 2,
}
lris = append(lris, lri)
locksHeld[resource] = append(locksHeld[resource], lri)
// concurrent read lock, same resource different uid
lri.UID = mustGetUUID()
lris = append(lris, lri)
locksHeld[resource] = append(locksHeld[resource], lri)
}
var peerLocks []*PeerLocks
for _, owner := range owners {
peerLocks = append(peerLocks, &PeerLocks{
Addr: owner,
Locks: locksHeld,
})
}
var exp madmin.LockEntries
for _, lri := range lris {
lockType := func(lri lockRequesterInfo) string {
if lri.Writer {
return "WRITE"
}
return "READ"
}
exp = append(exp, madmin.LockEntry{
Resource: lri.Name,
Type: lockType(lri),
ServerList: owners,
Owner: lri.Owner,
ID: lri.UID,
Quorum: lri.Quorum,
})
}
testCases := []struct {
peerLocks []*PeerLocks
expected madmin.LockEntries
}{
{
peerLocks: peerLocks,
expected: exp,
},
}
// printEntries := func(entries madmin.LockEntries) {
// for i, entry := range entries {
// fmt.Printf("%d: %s %s %s %s %v %d\n", i, entry.Resource, entry.ID, entry.Owner, entry.Type, entry.ServerList, entry.Elapsed)
// }
// }
check := func(exp, got madmin.LockEntries) (int, bool) {
if len(exp) != len(got) {
return 0, false
}
sort.Slice(exp, byResourceUID{exp}.Less)
sort.Slice(got, byResourceUID{got}.Less)
// printEntries(exp)
// printEntries(got)
for i, e := range exp {
if !e.Timestamp.Equal(got[i].Timestamp) {
return i, false
}
// Skip checking elapsed since it's time sensitive.
// if e.Elapsed != got[i].Elapsed {
// return false
// }
if e.Resource != got[i].Resource {
return i, false
}
if e.Type != got[i].Type {
return i, false
}
if e.Source != got[i].Source {
return i, false
}
if e.Owner != got[i].Owner {
return i, false
}
if e.ID != got[i].ID {
return i, false
}
if len(e.ServerList) != len(got[i].ServerList) {
return i, false
}
for j := range e.ServerList {
if e.ServerList[j] != got[i].ServerList[j] {
return i, false
}
}
}
return 0, true
}
for i, tc := range testCases {
got := topLockEntries(tc.peerLocks, false)
if idx, ok := check(tc.expected, got); !ok {
t.Fatalf("%d: mismatch at %d \n expected %#v but got %#v", i, idx, tc.expected[idx], got[idx])
}
}
}

View File

@@ -27,7 +27,7 @@ import (
"sync"
"time"
"github.com/minio/madmin-go"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/logger"
)
@@ -100,14 +100,14 @@ type allHealState struct {
}
// newHealState - initialize global heal state management
func newHealState(cleanup bool) *allHealState {
func newHealState(ctx context.Context, cleanup bool) *allHealState {
hstate := &allHealState{
healSeqMap: make(map[string]*healSequence),
healLocalDisks: make(map[Endpoint]bool),
healStatus: make(map[string]healingTracker),
}
if cleanup {
go hstate.periodicHealSeqsClean(GlobalContext)
go hstate.periodicHealSeqsClean(ctx)
}
return hstate
}
@@ -176,11 +176,12 @@ func (ahs *allHealState) getHealLocalDiskEndpoints() Endpoints {
return endpoints
}
func (ahs *allHealState) markDiskForHealing(ep Endpoint) {
// Set, in the memory, the state of the disk as currently healing or not
func (ahs *allHealState) setDiskHealingStatus(ep Endpoint, healing bool) {
ahs.Lock()
defer ahs.Unlock()
ahs.healLocalDisks[ep] = true
ahs.healLocalDisks[ep] = healing
}
func (ahs *allHealState) pushHealLocalDisks(healLocalDisks ...Endpoint) {
@@ -222,8 +223,8 @@ func (ahs *allHealState) periodicHealSeqsClean(ctx context.Context) {
// getHealSequenceByToken - Retrieve a heal sequence by token. The second
// argument returns if a heal sequence actually exists.
func (ahs *allHealState) getHealSequenceByToken(token string) (h *healSequence, exists bool) {
ahs.Lock()
defer ahs.Unlock()
ahs.RLock()
defer ahs.RUnlock()
for _, healSeq := range ahs.healSeqMap {
if healSeq.clientToken == token {
return healSeq, true
@@ -235,8 +236,8 @@ func (ahs *allHealState) getHealSequenceByToken(token string) (h *healSequence,
// getHealSequence - Retrieve a heal sequence by path. The second
// argument returns if a heal sequence actually exists.
func (ahs *allHealState) getHealSequence(path string) (h *healSequence, exists bool) {
ahs.Lock()
defer ahs.Unlock()
ahs.RLock()
defer ahs.RUnlock()
h, exists = ahs.healSeqMap[path]
return h, exists
}
@@ -252,7 +253,7 @@ func (ahs *allHealState) stopHealSequence(path string) ([]byte, APIError) {
} else {
clientToken := he.clientToken
if globalIsDistErasure {
clientToken = fmt.Sprintf("%s@%d", he.clientToken, GetProxyEndpointLocalIndex(globalProxyEndpoints))
clientToken = fmt.Sprintf("%s:%d", he.clientToken, GetProxyEndpointLocalIndex(globalProxyEndpoints))
}
hsp = madmin.HealStopSuccess{
@@ -326,7 +327,7 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence, objAPI ObjectLay
clientToken := h.clientToken
if globalIsDistErasure {
clientToken = fmt.Sprintf("%s@%d", h.clientToken, GetProxyEndpointLocalIndex(globalProxyEndpoints))
clientToken = fmt.Sprintf("%s:%d", h.clientToken, GetProxyEndpointLocalIndex(globalProxyEndpoints))
}
b, err := json.Marshal(madmin.HealStartSuccess{
@@ -396,6 +397,7 @@ type healSource struct {
bucket string
object string
versionID string
noWait bool // a non blocking call, if task queue is full return right away.
opts *madmin.HealOpts // optional heal option overrides default setting
}
@@ -405,9 +407,6 @@ type healSequence struct {
// bucket, and object on which heal seq. was initiated
bucket, object string
// A channel of entities with heal result
respCh chan healResult
// Report healing progress
reportProgress bool
@@ -470,7 +469,6 @@ func newHealSequence(ctx context.Context, bucket, objPrefix, clientAddr string,
clientToken := mustGetUUID()
return &healSequence{
respCh: make(chan healResult),
bucket: bucket,
object: objPrefix,
reportProgress: true,
@@ -685,13 +683,6 @@ func (h *healSequence) healSequenceStart(objAPI ObjectLayer) {
}
}
func (h *healSequence) logHeal(healType madmin.HealItemType) {
h.mutex.Lock()
h.scannedItemsMap[healType]++
h.lastHealActivity = UTCNow()
h.mutex.Unlock()
}
func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItemType) error {
// Send heal request
task := healTask{
@@ -699,7 +690,6 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
object: source.object,
versionID: source.versionID,
opts: h.settings,
respCh: h.respCh,
}
if source.opts != nil {
task.opts = *source.opts
@@ -712,6 +702,24 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
h.lastHealActivity = UTCNow()
h.mutex.Unlock()
if source.noWait {
select {
case globalBackgroundHealRoutine.tasks <- task:
if serverDebugLog {
logger.Info("Task in the queue: %#v", task)
}
default:
// task queue is full, no more workers, we shall move on and heal later.
return nil
}
// Don't wait for result
return nil
}
// respCh must be set to wait for result.
// We make it size 1, so a result can always be written
// even if we aren't listening.
task.respCh = make(chan healResult, 1)
select {
case globalBackgroundHealRoutine.tasks <- task:
if serverDebugLog {
@@ -721,8 +729,9 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
return nil
}
// task queued, now wait for the response.
select {
case res := <-h.respCh:
case res := <-task.respCh:
if !h.reportProgress {
if errors.Is(res.err, errSkipFile) { // this is only sent usually by nopHeal
return nil
@@ -768,6 +777,11 @@ func (h *healSequence) healDiskMeta(objAPI ObjectLayer) error {
}
func (h *healSequence) healItems(objAPI ObjectLayer, bucketsOnly bool) error {
if h.clientToken == bgHealingUUID {
// For background heal do nothing.
return nil
}
if err := h.healDiskMeta(objAPI); err != nil {
return err
}
@@ -853,13 +867,8 @@ func (h *healSequence) healBucket(objAPI ObjectLayer, bucket string, bucketsOnly
if !h.settings.Recursive {
if h.object != "" {
// Check if an object named as the objPrefix exists,
// and if so heal it.
oi, err := objAPI.GetObjectInfo(h.ctx, bucket, h.object, ObjectOptions{})
if err == nil {
if err = h.healObject(bucket, h.object, oi.VersionID); err != nil {
return err
}
if err := h.healObject(bucket, h.object, ""); err != nil {
return err
}
}

View File

@@ -19,20 +19,127 @@ package cmd
import (
"net/http"
"reflect"
"runtime"
"strings"
"github.com/gorilla/mux"
"github.com/klauspost/compress/gzhttp"
"github.com/klauspost/compress/gzip"
"github.com/minio/madmin-go"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
)
const (
adminPathPrefix = minioReservedBucketPath + "/admin"
adminAPIVersion = madmin.AdminAPIVersion
adminAPIVersionPrefix = SlashSeparator + adminAPIVersion
adminPathPrefix = minioReservedBucketPath + "/admin"
adminAPIVersion = madmin.AdminAPIVersion
adminAPIVersionPrefix = SlashSeparator + adminAPIVersion
adminAPISiteReplicationDevNull = "/site-replication/devnull"
adminAPISiteReplicationNetPerf = "/site-replication/netperf"
adminAPIClientDevNull = "/speedtest/client/devnull"
adminAPIClientDevExtraTime = "/speedtest/client/devnull/extratime"
)
var gzipHandler = func() func(http.Handler) http.HandlerFunc {
gz, err := gzhttp.NewWrapper(gzhttp.MinSize(1000), gzhttp.CompressionLevel(gzip.BestSpeed))
if err != nil {
// Static params, so this is very unlikely.
logger.Fatal(err, "Unable to initialize server")
}
return gz
}()
// Set of handler options as bit flags
type hFlag uint8
const (
// this flag disables gzip compression of responses
noGZFlag = 1 << iota
// this flag enables tracing body and headers instead of just headers
traceAllFlag
// pass this flag to skip checking if object layer is available
noObjLayerFlag
)
// Has checks if the the given flag is enabled in `h`.
func (h hFlag) Has(flag hFlag) bool {
// Use bitwise-AND and check if the result is non-zero.
return h&flag != 0
}
func getHandlerName(f http.HandlerFunc) string {
name := runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name()
name = strings.TrimPrefix(name, "github.com/minio/minio/cmd.adminAPIHandlers.")
name = strings.TrimSuffix(name, "Handler-fm")
name = strings.TrimSuffix(name, "-fm")
return name
}
// adminMiddleware performs some common admin handler functionality for all
// handlers:
//
// - updates request context with `logger.ReqInfo` and api name based on the
// name of the function handler passed (this handler must be a method of
// `adminAPIHandlers`).
//
// - sets up call to send AuditLog
//
// Note that, while this is a middleware function (i.e. it takes a handler
// function and returns one), due to flags being passed based on required
// conditions, it is done per-"handler function registration" in the router.
//
// When no flags are passed, gzip compression, http tracing of headers and
// checking of object layer availability are all enabled. Use flags to modify
// this behavior.
func adminMiddleware(f http.HandlerFunc, flags ...hFlag) http.HandlerFunc {
// Collect all flags with bitwise-OR and assign operator
var handlerFlags hFlag
for _, flag := range flags {
handlerFlags |= flag
}
// Get name of the handler using reflection. NOTE: The passed in handler
// function must be a method of `adminAPIHandlers` for this extraction to
// work as expected.
handlerName := getHandlerName(f)
var handler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) {
// Update request context with `logger.ReqInfo`.
r = r.WithContext(newContext(r, w, handlerName))
defer logger.AuditLog(r.Context(), w, r, mustGetClaimsFromToken(r))
// Check if object layer is available, if not return error early.
if !handlerFlags.Has(noObjLayerFlag) {
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(r.Context(), w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
}
// Apply http tracing "middleware" based on presence of flag.
var f2 http.HandlerFunc
if handlerFlags.Has(traceAllFlag) {
f2 = httpTraceAll(f)
} else {
f2 = httpTraceHdrs(f)
}
// call the final handler
f2(w, r)
}
// Enable compression of responses based on presence of flag.
if !handlerFlags.Has(noGZFlag) {
handler = gzipHandler(handler)
}
return handler
}
// adminAPIHandlers provides HTTP handlers for MinIO admin API.
type adminAPIHandlers struct{}
@@ -46,234 +153,265 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
adminAPIVersionPrefix,
}
gz, err := gzhttp.NewWrapper(gzhttp.MinSize(1000), gzhttp.CompressionLevel(gzip.BestSpeed))
if err != nil {
// Static params, so this is very unlikely.
logger.Fatal(err, "Unable to initialize server")
}
for _, adminVersion := range adminVersions {
// Restart and stop MinIO service.
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/service").HandlerFunc(gz(httpTraceAll(adminAPI.ServiceHandler))).Queries("action", "{action:.*}")
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/service").HandlerFunc(adminMiddleware(adminAPI.ServiceHandler, traceAllFlag)).Queries("action", "{action:.*}")
// Update MinIO servers.
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/update").HandlerFunc(gz(httpTraceAll(adminAPI.ServerUpdateHandler))).Queries("updateURL", "{updateURL:.*}")
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/update").HandlerFunc(adminMiddleware(adminAPI.ServerUpdateHandler, traceAllFlag)).Queries("updateURL", "{updateURL:.*}")
// Info operations
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/info").HandlerFunc(gz(httpTraceAll(adminAPI.ServerInfoHandler)))
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/inspect-data").HandlerFunc(httpTraceHdrs(adminAPI.InspectDataHandler)).Queries("volume", "{volume:.*}", "file", "{file:.*}")
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/info").HandlerFunc(adminMiddleware(adminAPI.ServerInfoHandler, traceAllFlag, noObjLayerFlag))
adminRouter.Methods(http.MethodGet, http.MethodPost).Path(adminVersion + "/inspect-data").HandlerFunc(adminMiddleware(adminAPI.InspectDataHandler, noGZFlag, traceAllFlag))
// StorageInfo operations
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/storageinfo").HandlerFunc(gz(httpTraceAll(adminAPI.StorageInfoHandler)))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/storageinfo").HandlerFunc(adminMiddleware(adminAPI.StorageInfoHandler, traceAllFlag))
// DataUsageInfo operations
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/datausageinfo").HandlerFunc(gz(httpTraceAll(adminAPI.DataUsageInfoHandler)))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/datausageinfo").HandlerFunc(adminMiddleware(adminAPI.DataUsageInfoHandler, traceAllFlag))
// Metrics operation
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/metrics").HandlerFunc(gz(httpTraceAll(adminAPI.MetricsHandler)))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/metrics").HandlerFunc(adminMiddleware(adminAPI.MetricsHandler, traceAllFlag))
if globalIsDistErasure || globalIsErasure {
// Heal operations
// Heal processing endpoint.
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/").HandlerFunc(gz(httpTraceAll(adminAPI.HealHandler)))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/{bucket}").HandlerFunc(gz(httpTraceAll(adminAPI.HealHandler)))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/{bucket}/{prefix:.*}").HandlerFunc(gz(httpTraceAll(adminAPI.HealHandler)))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/background-heal/status").HandlerFunc(gz(httpTraceAll(adminAPI.BackgroundHealStatusHandler)))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/").HandlerFunc(adminMiddleware(adminAPI.HealHandler, traceAllFlag))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/{bucket}").HandlerFunc(adminMiddleware(adminAPI.HealHandler, traceAllFlag))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/{bucket}/{prefix:.*}").HandlerFunc(adminMiddleware(adminAPI.HealHandler, traceAllFlag))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/background-heal/status").HandlerFunc(adminMiddleware(adminAPI.BackgroundHealStatusHandler, traceAllFlag))
// Pool operations
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/pools/list").HandlerFunc(gz(httpTraceAll(adminAPI.ListPools)))
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/pools/status").HandlerFunc(gz(httpTraceAll(adminAPI.StatusPool))).Queries("pool", "{pool:.*}")
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/pools/list").HandlerFunc(adminMiddleware(adminAPI.ListPools, traceAllFlag))
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/pools/status").HandlerFunc(adminMiddleware(adminAPI.StatusPool, traceAllFlag)).Queries("pool", "{pool:.*}")
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/pools/decommission").HandlerFunc(gz(httpTraceAll(adminAPI.StartDecommission))).Queries("pool", "{pool:.*}")
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/pools/cancel").HandlerFunc(gz(httpTraceAll(adminAPI.CancelDecommission))).Queries("pool", "{pool:.*}")
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/pools/decommission").HandlerFunc(adminMiddleware(adminAPI.StartDecommission, traceAllFlag)).Queries("pool", "{pool:.*}")
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/pools/cancel").HandlerFunc(adminMiddleware(adminAPI.CancelDecommission, traceAllFlag)).Queries("pool", "{pool:.*}")
// Rebalance operations
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/rebalance/start").HandlerFunc(adminMiddleware(adminAPI.RebalanceStart, traceAllFlag))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/rebalance/status").HandlerFunc(adminMiddleware(adminAPI.RebalanceStatus, traceAllFlag))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/rebalance/stop").HandlerFunc(adminMiddleware(adminAPI.RebalanceStop, traceAllFlag))
}
// Profiling operations - deprecated API
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/profiling/start").HandlerFunc(gz(httpTraceAll(adminAPI.StartProfilingHandler))).
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/profiling/start").HandlerFunc(adminMiddleware(adminAPI.StartProfilingHandler, traceAllFlag, noObjLayerFlag)).
Queries("profilerType", "{profilerType:.*}")
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/profiling/download").HandlerFunc(gz(httpTraceAll(adminAPI.DownloadProfilingHandler)))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/profiling/download").HandlerFunc(adminMiddleware(adminAPI.DownloadProfilingHandler, traceAllFlag, noObjLayerFlag))
// Profiling operations
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/profile").HandlerFunc(gz(httpTraceAll(adminAPI.ProfileHandler)))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/profile").HandlerFunc(adminMiddleware(adminAPI.ProfileHandler, traceAllFlag, noObjLayerFlag))
// Config KV operations.
if enableConfigOps {
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-config-kv").HandlerFunc(gz(httpTraceHdrs(adminAPI.GetConfigKVHandler))).Queries("key", "{key:.*}")
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/set-config-kv").HandlerFunc(gz(httpTraceHdrs(adminAPI.SetConfigKVHandler)))
adminRouter.Methods(http.MethodDelete).Path(adminVersion + "/del-config-kv").HandlerFunc(gz(httpTraceHdrs(adminAPI.DelConfigKVHandler)))
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-config-kv").HandlerFunc(adminMiddleware(adminAPI.GetConfigKVHandler)).Queries("key", "{key:.*}")
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/set-config-kv").HandlerFunc(adminMiddleware(adminAPI.SetConfigKVHandler))
adminRouter.Methods(http.MethodDelete).Path(adminVersion + "/del-config-kv").HandlerFunc(adminMiddleware(adminAPI.DelConfigKVHandler))
}
// Enable config help in all modes.
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/help-config-kv").HandlerFunc(gz(httpTraceAll(adminAPI.HelpConfigKVHandler))).Queries("subSys", "{subSys:.*}", "key", "{key:.*}")
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/help-config-kv").HandlerFunc(adminMiddleware(adminAPI.HelpConfigKVHandler, traceAllFlag)).Queries("subSys", "{subSys:.*}", "key", "{key:.*}")
// Config KV history operations.
if enableConfigOps {
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-config-history-kv").HandlerFunc(gz(httpTraceAll(adminAPI.ListConfigHistoryKVHandler))).Queries("count", "{count:[0-9]+}")
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/clear-config-history-kv").HandlerFunc(gz(httpTraceHdrs(adminAPI.ClearConfigHistoryKVHandler))).Queries("restoreId", "{restoreId:.*}")
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/restore-config-history-kv").HandlerFunc(gz(httpTraceHdrs(adminAPI.RestoreConfigHistoryKVHandler))).Queries("restoreId", "{restoreId:.*}")
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-config-history-kv").HandlerFunc(adminMiddleware(adminAPI.ListConfigHistoryKVHandler, traceAllFlag)).Queries("count", "{count:[0-9]+}")
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/clear-config-history-kv").HandlerFunc(adminMiddleware(adminAPI.ClearConfigHistoryKVHandler)).Queries("restoreId", "{restoreId:.*}")
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/restore-config-history-kv").HandlerFunc(adminMiddleware(adminAPI.RestoreConfigHistoryKVHandler)).Queries("restoreId", "{restoreId:.*}")
}
// Config import/export bulk operations
if enableConfigOps {
// Get config
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/config").HandlerFunc(gz(httpTraceHdrs(adminAPI.GetConfigHandler)))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/config").HandlerFunc(adminMiddleware(adminAPI.GetConfigHandler))
// Set config
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/config").HandlerFunc(gz(httpTraceHdrs(adminAPI.SetConfigHandler)))
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/config").HandlerFunc(adminMiddleware(adminAPI.SetConfigHandler))
}
// -- IAM APIs --
// Add policy IAM
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-canned-policy").HandlerFunc(gz(httpTraceAll(adminAPI.AddCannedPolicy))).Queries("name", "{name:.*}")
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-canned-policy").HandlerFunc(adminMiddleware(adminAPI.AddCannedPolicy, traceAllFlag)).Queries("name", "{name:.*}")
// Add user IAM
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/accountinfo").HandlerFunc(gz(httpTraceAll(adminAPI.AccountInfoHandler)))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/accountinfo").HandlerFunc(adminMiddleware(adminAPI.AccountInfoHandler, traceAllFlag))
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-user").HandlerFunc(gz(httpTraceHdrs(adminAPI.AddUser))).Queries("accessKey", "{accessKey:.*}")
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-user").HandlerFunc(adminMiddleware(adminAPI.AddUser)).Queries("accessKey", "{accessKey:.*}")
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-user-status").HandlerFunc(gz(httpTraceHdrs(adminAPI.SetUserStatus))).Queries("accessKey", "{accessKey:.*}").Queries("status", "{status:.*}")
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-user-status").HandlerFunc(adminMiddleware(adminAPI.SetUserStatus)).Queries("accessKey", "{accessKey:.*}").Queries("status", "{status:.*}")
// Service accounts ops
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/add-service-account").HandlerFunc(gz(httpTraceHdrs(adminAPI.AddServiceAccount)))
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/update-service-account").HandlerFunc(gz(httpTraceHdrs(adminAPI.UpdateServiceAccount))).Queries("accessKey", "{accessKey:.*}")
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/info-service-account").HandlerFunc(gz(httpTraceHdrs(adminAPI.InfoServiceAccount))).Queries("accessKey", "{accessKey:.*}")
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-service-accounts").HandlerFunc(gz(httpTraceHdrs(adminAPI.ListServiceAccounts)))
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/delete-service-account").HandlerFunc(gz(httpTraceHdrs(adminAPI.DeleteServiceAccount))).Queries("accessKey", "{accessKey:.*}")
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/add-service-account").HandlerFunc(adminMiddleware(adminAPI.AddServiceAccount))
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/update-service-account").HandlerFunc(adminMiddleware(adminAPI.UpdateServiceAccount)).Queries("accessKey", "{accessKey:.*}")
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/info-service-account").HandlerFunc(adminMiddleware(adminAPI.InfoServiceAccount)).Queries("accessKey", "{accessKey:.*}")
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-service-accounts").HandlerFunc(adminMiddleware(adminAPI.ListServiceAccounts))
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/delete-service-account").HandlerFunc(adminMiddleware(adminAPI.DeleteServiceAccount)).Queries("accessKey", "{accessKey:.*}")
// STS accounts ops
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/temporary-account-info").HandlerFunc(adminMiddleware(adminAPI.TemporaryAccountInfo)).Queries("accessKey", "{accessKey:.*}")
// Info policy IAM latest
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/info-canned-policy").HandlerFunc(gz(httpTraceHdrs(adminAPI.InfoCannedPolicy))).Queries("name", "{name:.*}")
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/info-canned-policy").HandlerFunc(adminMiddleware(adminAPI.InfoCannedPolicy)).Queries("name", "{name:.*}")
// List policies latest
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-canned-policies").HandlerFunc(gz(httpTraceHdrs(adminAPI.ListBucketPolicies))).Queries("bucket", "{bucket:.*}")
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-canned-policies").HandlerFunc(gz(httpTraceHdrs(adminAPI.ListCannedPolicies)))
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-canned-policies").HandlerFunc(adminMiddleware(adminAPI.ListBucketPolicies)).Queries("bucket", "{bucket:.*}")
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-canned-policies").HandlerFunc(adminMiddleware(adminAPI.ListCannedPolicies))
// Builtin IAM policy associations
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/idp/builtin/policy-entities").HandlerFunc(adminMiddleware(adminAPI.ListPolicyMappingEntities))
// Remove policy IAM
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-canned-policy").HandlerFunc(gz(httpTraceHdrs(adminAPI.RemoveCannedPolicy))).Queries("name", "{name:.*}")
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-canned-policy").HandlerFunc(adminMiddleware(adminAPI.RemoveCannedPolicy)).Queries("name", "{name:.*}")
// Set user or group policy
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-user-or-group-policy").
HandlerFunc(gz(httpTraceHdrs(adminAPI.SetPolicyForUserOrGroup))).
HandlerFunc(adminMiddleware(adminAPI.SetPolicyForUserOrGroup)).
Queries("policyName", "{policyName:.*}", "userOrGroup", "{userOrGroup:.*}", "isGroup", "{isGroup:true|false}")
// Attach/Detach policies to/from user or group
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/idp/builtin/policy/{operation}").HandlerFunc(adminMiddleware(adminAPI.AttachDetachPolicyBuiltin))
// Remove user IAM
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-user").HandlerFunc(gz(httpTraceHdrs(adminAPI.RemoveUser))).Queries("accessKey", "{accessKey:.*}")
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-user").HandlerFunc(adminMiddleware(adminAPI.RemoveUser)).Queries("accessKey", "{accessKey:.*}")
// List users
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-users").HandlerFunc(gz(httpTraceHdrs(adminAPI.ListBucketUsers))).Queries("bucket", "{bucket:.*}")
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-users").HandlerFunc(gz(httpTraceHdrs(adminAPI.ListUsers)))
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-users").HandlerFunc(adminMiddleware(adminAPI.ListBucketUsers)).Queries("bucket", "{bucket:.*}")
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-users").HandlerFunc(adminMiddleware(adminAPI.ListUsers))
// User info
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/user-info").HandlerFunc(gz(httpTraceHdrs(adminAPI.GetUserInfo))).Queries("accessKey", "{accessKey:.*}")
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/user-info").HandlerFunc(adminMiddleware(adminAPI.GetUserInfo)).Queries("accessKey", "{accessKey:.*}")
// Add/Remove members from group
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/update-group-members").HandlerFunc(gz(httpTraceHdrs(adminAPI.UpdateGroupMembers)))
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/update-group-members").HandlerFunc(adminMiddleware(adminAPI.UpdateGroupMembers))
// Get Group
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/group").HandlerFunc(gz(httpTraceHdrs(adminAPI.GetGroup))).Queries("group", "{group:.*}")
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/group").HandlerFunc(adminMiddleware(adminAPI.GetGroup)).Queries("group", "{group:.*}")
// List Groups
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/groups").HandlerFunc(gz(httpTraceHdrs(adminAPI.ListGroups)))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/groups").HandlerFunc(adminMiddleware(adminAPI.ListGroups))
// Set Group Status
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-group-status").HandlerFunc(gz(httpTraceHdrs(adminAPI.SetGroupStatus))).Queries("group", "{group:.*}").Queries("status", "{status:.*}")
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-group-status").HandlerFunc(adminMiddleware(adminAPI.SetGroupStatus)).Queries("group", "{group:.*}").Queries("status", "{status:.*}")
// Export IAM info to zipped file
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/export-iam").HandlerFunc(httpTraceHdrs(adminAPI.ExportIAM))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/export-iam").HandlerFunc(adminMiddleware(adminAPI.ExportIAM, noGZFlag))
// Import IAM info
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/import-iam").HandlerFunc(httpTraceHdrs(adminAPI.ImportIAM))
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/import-iam").HandlerFunc(adminMiddleware(adminAPI.ImportIAM, noGZFlag))
// IDentity Provider configuration APIs
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/idp-config").HandlerFunc(gz(httpTraceHdrs(adminAPI.SetIdentityProviderCfg))).Queries("type", "{type:.*}").Queries("name", "{name:.*}")
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/idp-config").HandlerFunc(gz(httpTraceHdrs(adminAPI.GetIdentityProviderCfg))).Queries("type", "{type:.*}")
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/idp-config").HandlerFunc(gz(httpTraceHdrs(adminAPI.DeleteIdentityProviderCfg))).Queries("type", "{type:.*}").Queries("name", "{name:.*}")
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/idp-config/{type}/{name}").HandlerFunc(adminMiddleware(adminAPI.AddIdentityProviderCfg))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/idp-config/{type}/{name}").HandlerFunc(adminMiddleware(adminAPI.UpdateIdentityProviderCfg))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/idp-config/{type}").HandlerFunc(adminMiddleware(adminAPI.ListIdentityProviderCfg))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/idp-config/{type}/{name}").HandlerFunc(adminMiddleware(adminAPI.GetIdentityProviderCfg))
adminRouter.Methods(http.MethodDelete).Path(adminVersion + "/idp-config/{type}/{name}").HandlerFunc(adminMiddleware(adminAPI.DeleteIdentityProviderCfg))
// LDAP IAM operations
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/idp/ldap/policy-entities").HandlerFunc(adminMiddleware(adminAPI.ListLDAPPolicyMappingEntities))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/idp/ldap/policy/{operation}").HandlerFunc(adminMiddleware(adminAPI.AttachDetachPolicyLDAP))
// -- END IAM APIs --
// GetBucketQuotaConfig
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-bucket-quota").HandlerFunc(
gz(httpTraceHdrs(adminAPI.GetBucketQuotaConfigHandler))).Queries("bucket", "{bucket:.*}")
adminMiddleware(adminAPI.GetBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
// PutBucketQuotaConfig
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-bucket-quota").HandlerFunc(
gz(httpTraceHdrs(adminAPI.PutBucketQuotaConfigHandler))).Queries("bucket", "{bucket:.*}")
adminMiddleware(adminAPI.PutBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
// Bucket replication operations
// GetBucketTargetHandler
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-remote-targets").HandlerFunc(
gz(httpTraceHdrs(adminAPI.ListRemoteTargetsHandler))).Queries("bucket", "{bucket:.*}", "type", "{type:.*}")
adminMiddleware(adminAPI.ListRemoteTargetsHandler)).Queries("bucket", "{bucket:.*}", "type", "{type:.*}")
// SetRemoteTargetHandler
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-remote-target").HandlerFunc(
gz(httpTraceHdrs(adminAPI.SetRemoteTargetHandler))).Queries("bucket", "{bucket:.*}")
adminMiddleware(adminAPI.SetRemoteTargetHandler)).Queries("bucket", "{bucket:.*}")
// RemoveRemoteTargetHandler
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-remote-target").HandlerFunc(
gz(httpTraceHdrs(adminAPI.RemoveRemoteTargetHandler))).Queries("bucket", "{bucket:.*}", "arn", "{arn:.*}")
adminMiddleware(adminAPI.RemoveRemoteTargetHandler)).Queries("bucket", "{bucket:.*}", "arn", "{arn:.*}")
// ReplicationDiff - MinIO extension API
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/replication/diff").HandlerFunc(
gz(httpTraceHdrs(adminAPI.ReplicationDiffHandler))).Queries("bucket", "{bucket:.*}")
adminMiddleware(adminAPI.ReplicationDiffHandler)).Queries("bucket", "{bucket:.*}")
// ReplicationMRFHandler - MinIO extension API
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/replication/mrf").HandlerFunc(
adminMiddleware(adminAPI.ReplicationMRFHandler)).Queries("bucket", "{bucket:.*}")
// Batch job operations
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/start-job").HandlerFunc(
adminMiddleware(adminAPI.StartBatchJob))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-jobs").HandlerFunc(
adminMiddleware(adminAPI.ListBatchJobs))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/describe-job").HandlerFunc(
adminMiddleware(adminAPI.DescribeBatchJob))
adminRouter.Methods(http.MethodDelete).Path(adminVersion + "/cancel-job").HandlerFunc(
adminMiddleware(adminAPI.CancelBatchJob))
// Bucket migration operations
// ExportBucketMetaHandler
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/export-bucket-metadata").HandlerFunc(
gz(httpTraceHdrs(adminAPI.ExportBucketMetadataHandler)))
adminMiddleware(adminAPI.ExportBucketMetadataHandler))
// ImportBucketMetaHandler
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/import-bucket-metadata").HandlerFunc(
gz(httpTraceHdrs(adminAPI.ImportBucketMetadataHandler)))
adminMiddleware(adminAPI.ImportBucketMetadataHandler))
// Remote Tier management operations
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/tier").HandlerFunc(gz(httpTraceHdrs(adminAPI.AddTierHandler)))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/tier/{tier}").HandlerFunc(gz(httpTraceHdrs(adminAPI.EditTierHandler)))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/tier").HandlerFunc(gz(httpTraceHdrs(adminAPI.ListTierHandler)))
adminRouter.Methods(http.MethodDelete).Path(adminVersion + "/tier/{tier}").HandlerFunc(gz(httpTraceHdrs(adminAPI.RemoveTierHandler)))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/tier/{tier}").HandlerFunc(gz(httpTraceHdrs(adminAPI.VerifyTierHandler)))
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/tier").HandlerFunc(adminMiddleware(adminAPI.AddTierHandler))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/tier/{tier}").HandlerFunc(adminMiddleware(adminAPI.EditTierHandler))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/tier").HandlerFunc(adminMiddleware(adminAPI.ListTierHandler))
adminRouter.Methods(http.MethodDelete).Path(adminVersion + "/tier/{tier}").HandlerFunc(adminMiddleware(adminAPI.RemoveTierHandler))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/tier/{tier}").HandlerFunc(adminMiddleware(adminAPI.VerifyTierHandler))
// Tier stats
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/tier-stats").HandlerFunc(gz(httpTraceHdrs(adminAPI.TierStatsHandler)))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/tier-stats").HandlerFunc(adminMiddleware(adminAPI.TierStatsHandler))
// Cluster Replication APIs
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/add").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationAdd)))
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/remove").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationRemove)))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/info").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationInfo)))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/metainfo").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationMetaInfo)))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/status").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationStatus)))
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/add").HandlerFunc(adminMiddleware(adminAPI.SiteReplicationAdd))
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/remove").HandlerFunc(adminMiddleware(adminAPI.SiteReplicationRemove))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/info").HandlerFunc(adminMiddleware(adminAPI.SiteReplicationInfo))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/metainfo").HandlerFunc(adminMiddleware(adminAPI.SiteReplicationMetaInfo))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/status").HandlerFunc(adminMiddleware(adminAPI.SiteReplicationStatus))
adminRouter.Methods(http.MethodPost).Path(adminVersion + adminAPISiteReplicationDevNull).HandlerFunc(adminMiddleware(adminAPI.SiteReplicationDevNull, noObjLayerFlag))
adminRouter.Methods(http.MethodPost).Path(adminVersion + adminAPISiteReplicationNetPerf).HandlerFunc(adminMiddleware(adminAPI.SiteReplicationNetPerf, noObjLayerFlag))
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/join").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerJoin)))
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/site-replication/peer/bucket-ops").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerBucketOps))).Queries("bucket", "{bucket:.*}").Queries("operation", "{operation:.*}")
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/iam-item").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerReplicateIAMItem)))
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/bucket-meta").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerReplicateBucketItem)))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/peer/idp-settings").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerGetIDPSettings)))
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/edit").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationEdit)))
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/edit").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerEdit)))
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/remove").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerRemove)))
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/join").HandlerFunc(adminMiddleware(adminAPI.SRPeerJoin))
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/site-replication/peer/bucket-ops").HandlerFunc(adminMiddleware(adminAPI.SRPeerBucketOps)).Queries("bucket", "{bucket:.*}").Queries("operation", "{operation:.*}")
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/iam-item").HandlerFunc(adminMiddleware(adminAPI.SRPeerReplicateIAMItem))
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/bucket-meta").HandlerFunc(adminMiddleware(adminAPI.SRPeerReplicateBucketItem))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/peer/idp-settings").HandlerFunc(adminMiddleware(adminAPI.SRPeerGetIDPSettings))
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/edit").HandlerFunc(adminMiddleware(adminAPI.SiteReplicationEdit))
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/edit").HandlerFunc(adminMiddleware(adminAPI.SRPeerEdit))
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/remove").HandlerFunc(adminMiddleware(adminAPI.SRPeerRemove))
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/site-replication/resync/op").HandlerFunc(adminMiddleware(adminAPI.SiteReplicationResyncOp)).Queries("operation", "{operation:.*}")
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/state/edit").HandlerFunc(adminMiddleware(adminAPI.SRStateEdit))
if globalIsDistErasure {
// Top locks
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/top/locks").HandlerFunc(gz(httpTraceHdrs(adminAPI.TopLocksHandler)))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/top/locks").HandlerFunc(adminMiddleware(adminAPI.TopLocksHandler))
// Force unlocks paths
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/force-unlock").
Queries("paths", "{paths:.*}").HandlerFunc(gz(httpTraceHdrs(adminAPI.ForceUnlockHandler)))
Queries("paths", "{paths:.*}").HandlerFunc(adminMiddleware(adminAPI.ForceUnlockHandler))
}
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/speedtest").HandlerFunc(httpTraceHdrs(adminAPI.SpeedTestHandler))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/speedtest/object").HandlerFunc(httpTraceHdrs(adminAPI.ObjectSpeedTestHandler))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/speedtest/drive").HandlerFunc(httpTraceHdrs(adminAPI.DriveSpeedtestHandler))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/speedtest/net").HandlerFunc(httpTraceHdrs(adminAPI.NetperfHandler))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/speedtest").HandlerFunc(adminMiddleware(adminAPI.ObjectSpeedTestHandler, noGZFlag))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/speedtest/object").HandlerFunc(adminMiddleware(adminAPI.ObjectSpeedTestHandler, noGZFlag))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/speedtest/drive").HandlerFunc(adminMiddleware(adminAPI.DriveSpeedtestHandler, noGZFlag))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/speedtest/net").HandlerFunc(adminMiddleware(adminAPI.NetperfHandler, noGZFlag))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/speedtest/site").HandlerFunc(adminMiddleware(adminAPI.SitePerfHandler, noGZFlag))
adminRouter.Methods(http.MethodPost).Path(adminVersion + adminAPIClientDevNull).HandlerFunc(adminMiddleware(adminAPI.ClientDevNull, noGZFlag))
adminRouter.Methods(http.MethodPost).Path(adminVersion + adminAPIClientDevExtraTime).HandlerFunc(adminMiddleware(adminAPI.ClientDevNullExtraTime, noGZFlag))
// HTTP Trace
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/trace").HandlerFunc(gz(http.HandlerFunc(adminAPI.TraceHandler)))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/trace").HandlerFunc(adminMiddleware(adminAPI.TraceHandler, noObjLayerFlag))
// Console Logs
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/log").HandlerFunc(gz(httpTraceAll(adminAPI.ConsoleLogHandler)))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/log").HandlerFunc(adminMiddleware(adminAPI.ConsoleLogHandler, traceAllFlag))
// -- KMS APIs --
//
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/kms/status").HandlerFunc(gz(httpTraceAll(adminAPI.KMSStatusHandler)))
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/kms/key/create").HandlerFunc(gz(httpTraceAll(adminAPI.KMSCreateKeyHandler))).Queries("key-id", "{key-id:.*}")
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/kms/key/status").HandlerFunc(gz(httpTraceAll(adminAPI.KMSKeyStatusHandler)))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/kms/status").HandlerFunc(adminMiddleware(adminAPI.KMSStatusHandler, traceAllFlag))
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/kms/key/create").HandlerFunc(adminMiddleware(adminAPI.KMSCreateKeyHandler, traceAllFlag)).Queries("key-id", "{key-id:.*}")
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/kms/key/status").HandlerFunc(adminMiddleware(adminAPI.KMSKeyStatusHandler, traceAllFlag))
if !globalIsGateway {
// Keep obdinfo for backward compatibility with mc
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/obdinfo").
HandlerFunc(gz(httpTraceHdrs(adminAPI.HealthInfoHandler)))
// -- Health API --
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/healthinfo").
HandlerFunc(gz(httpTraceHdrs(adminAPI.HealthInfoHandler)))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/bandwidth").
HandlerFunc(gz(httpTraceHdrs(adminAPI.BandwidthMonitorHandler)))
}
// Keep obdinfo for backward compatibility with mc
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/obdinfo").
HandlerFunc(adminMiddleware(adminAPI.HealthInfoHandler))
// -- Health API --
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/healthinfo").
HandlerFunc(adminMiddleware(adminAPI.HealthInfoHandler))
}
// If none of the routes match add default error handler routes

View File

@@ -20,17 +20,21 @@ package cmd
import (
"context"
"net/http"
"os"
"runtime"
"runtime/debug"
"strings"
"time"
"github.com/minio/madmin-go"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/config"
"github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
)
// getLocalServerProperty - returns madmin.ServerProperties for only the
// local endpoints from given list of endpoints
func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Request) madmin.ServerProperties {
var localEndpoints Endpoints
addr := globalLocalNodeName
if r != nil {
addr = r.Host
@@ -48,7 +52,6 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req
if endpoint.IsLocal {
// Only proceed for local endpoints
network[nodeName] = string(madmin.ItemOnline)
localEndpoints = append(localEndpoints, endpoint)
continue
}
_, present := network[nodeName]
@@ -67,8 +70,23 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req
var memstats runtime.MemStats
runtime.ReadMemStats(&memstats)
gcStats := debug.GCStats{
// If stats.PauseQuantiles is non-empty, ReadGCStats fills
// it with quantiles summarizing the distribution of pause time.
// For example, if len(stats.PauseQuantiles) is 5, it will be
// filled with the minimum, 25%, 50%, 75%, and maximum pause times.
PauseQuantiles: make([]time.Duration, 5),
}
debug.ReadGCStats(&gcStats)
// Truncate GC stats to max 5 entries.
if len(gcStats.PauseEnd) > 5 {
gcStats.PauseEnd = gcStats.PauseEnd[len(gcStats.PauseEnd)-5:]
}
if len(gcStats.Pause) > 5 {
gcStats.Pause = gcStats.Pause[len(gcStats.Pause)-5:]
}
props := madmin.ServerProperties{
State: string(madmin.ItemInitializing),
Endpoint: addr,
Uptime: UTCNow().Unix() - globalBootTime.Unix(),
Version: Version,
@@ -81,14 +99,54 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req
Frees: memstats.Frees,
HeapAlloc: memstats.HeapAlloc,
},
GoMaxProcs: runtime.GOMAXPROCS(0),
NumCPU: runtime.NumCPU(),
RuntimeVersion: runtime.Version(),
GCStats: &madmin.GCStats{
LastGC: gcStats.LastGC,
NumGC: gcStats.NumGC,
PauseTotal: gcStats.PauseTotal,
Pause: gcStats.Pause,
PauseEnd: gcStats.PauseEnd,
},
MinioEnvVars: make(map[string]string, 10),
}
sensitive := map[string]struct{}{
config.EnvAccessKey: {},
config.EnvSecretKey: {},
config.EnvRootUser: {},
config.EnvRootPassword: {},
config.EnvMinIOSubnetAPIKey: {},
kms.EnvKMSSecretKey: {},
}
for _, v := range os.Environ() {
if !strings.HasPrefix(v, "MINIO") && !strings.HasPrefix(v, "_MINIO") {
continue
}
split := strings.SplitN(v, "=", 2)
key := split[0]
value := ""
if len(split) > 1 {
value = split[1]
}
// Do not send sensitive creds.
if _, ok := sensitive[key]; ok || strings.Contains(strings.ToLower(key), "password") || strings.HasSuffix(strings.ToLower(key), "key") {
props.MinioEnvVars[key] = "*** EXISTS, REDACTED ***"
continue
}
props.MinioEnvVars[key] = value
}
objLayer := newObjectLayerFn()
if objLayer != nil && !globalIsGateway {
// only need Disks information in server mode.
storageInfo, _ := objLayer.LocalStorageInfo(GlobalContext)
if objLayer != nil {
storageInfo := objLayer.LocalStorageInfo(GlobalContext)
props.State = string(madmin.ItemOnline)
props.Disks = storageInfo.Disks
} else {
props.State = string(madmin.ItemInitializing)
props.Disks = getOfflineDisks("", globalEndpoints)
}
return props

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2015-2021 MinIO, Inc.
// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
@@ -28,8 +28,10 @@ import (
"strings"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/minio/minio/internal/ioutil"
"google.golang.org/api/googleapi"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/tags"
"github.com/minio/minio/internal/auth"
@@ -37,13 +39,15 @@ import (
"github.com/minio/minio/internal/bucket/replication"
"github.com/minio/minio/internal/config/dns"
"github.com/minio/minio/internal/crypto"
"github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
objectlock "github.com/minio/minio/internal/bucket/object/lock"
"github.com/minio/minio/internal/bucket/versioning"
levent "github.com/minio/minio/internal/config/lambda/event"
"github.com/minio/minio/internal/event"
"github.com/minio/minio/internal/hash"
"github.com/minio/pkg/bucket/policy"
"github.com/minio/pkg/v2/policy"
)
// APIError structure
@@ -83,6 +87,7 @@ const (
ErrInternalError
ErrInvalidAccessKeyID
ErrAccessKeyDisabled
ErrInvalidArgument
ErrInvalidBucketName
ErrInvalidDigest
ErrInvalidRange
@@ -131,7 +136,10 @@ const (
ErrReplicationNeedsVersioningError
ErrReplicationBucketNeedsVersioningError
ErrReplicationDenyEditError
ErrRemoteTargetDenyAddError
ErrReplicationNoExistingObjects
ErrReplicationValidationError
ErrReplicationPermissionCheckError
ErrObjectRestoreAlreadyInProgress
ErrNoSuchKey
ErrNoSuchUpload
@@ -144,13 +152,14 @@ const (
ErrMethodNotAllowed
ErrInvalidPart
ErrInvalidPartOrder
ErrMissingPart
ErrAuthorizationHeaderMalformed
ErrMalformedPOSTRequest
ErrPOSTFileRequired
ErrSignatureVersionNotSupported
ErrBucketNotEmpty
ErrAllAccessDisabled
ErrMalformedPolicy
ErrPolicyInvalidVersion
ErrMissingFields
ErrMissingCredTag
ErrCredMalformed
@@ -163,7 +172,6 @@ const (
ErrMalformedDate
ErrMalformedPresignedDate
ErrMalformedCredentialDate
ErrMalformedCredentialRegion
ErrMalformedExpires
ErrNegativeExpires
ErrAuthHeaderEmpty
@@ -176,11 +184,13 @@ const (
ErrBucketAlreadyOwnedByYou
ErrInvalidDuration
ErrBucketAlreadyExists
ErrTooManyBuckets
ErrMetadataTooLarge
ErrUnsupportedMetadata
ErrUnsupportedHostHeader
ErrMaximumExpires
ErrSlowDown
ErrSlowDownRead
ErrSlowDownWrite
ErrMaxVersionsExceeded
ErrInvalidPrefixMarker
ErrBadRequest
ErrKeyTooLongError
@@ -195,6 +205,9 @@ const (
ErrBucketTaggingNotFound
ErrObjectLockInvalidHeaders
ErrInvalidTagDirective
ErrPolicyAlreadyAttached
ErrPolicyNotAttached
ErrExcessData
// Add new error codes here.
// SSE-S3/SSE-KMS related API errors
@@ -206,6 +219,8 @@ const (
ErrSSEMultipartEncrypted
ErrSSEEncryptedObject
ErrInvalidEncryptionParameters
ErrInvalidEncryptionParametersSSEC
ErrInvalidSSECustomerAlgorithm
ErrInvalidSSECustomerKey
ErrMissingSSECustomerKey
@@ -215,6 +230,7 @@ const (
ErrIncompatibleEncryptionMethod
ErrKMSNotConfigured
ErrKMSKeyNotFoundException
ErrKMSDefaultKeyAlreadyConfigured
ErrNoAccessKey
ErrInvalidToken
@@ -238,18 +254,17 @@ const (
// Add new extended error codes here.
// MinIO extended errors.
ErrReadQuorum
ErrWriteQuorum
ErrStorageFull
ErrRequestBodyParse
ErrObjectExistsAsDirectory
ErrInvalidObjectName
ErrInvalidObjectNamePrefixSlash
ErrInvalidResourceName
ErrInvalidLifecycleQueryParameter
ErrServerNotInitialized
ErrOperationTimedOut
ErrRequestTimedout
ErrClientDisconnected
ErrOperationMaxedOut
ErrTooManyRequests
ErrInvalidRequest
ErrTransitionStorageClassNotFoundError
// MinIO storage class error codes
@@ -261,9 +276,13 @@ const (
ErrMalformedJSON
ErrAdminNoSuchUser
ErrAdminNoSuchUserLDAPWarn
ErrAdminNoSuchGroup
ErrAdminGroupNotEmpty
ErrAdminGroupDisabled
ErrAdminNoSuchJob
ErrAdminNoSuchPolicy
ErrAdminPolicyChangeAlreadyApplied
ErrAdminInvalidArgument
ErrAdminInvalidAccessKey
ErrAdminInvalidSecretKey
@@ -273,6 +292,11 @@ const (
ErrAdminNoSuchConfigTarget
ErrAdminConfigEnvOverridden
ErrAdminConfigDuplicateKeys
ErrAdminConfigInvalidIDPType
ErrAdminConfigLDAPNonDefaultConfigName
ErrAdminConfigLDAPValidation
ErrAdminConfigIDPCfgNameAlreadyExists
ErrAdminConfigIDPCfgNameDoesNotExist
ErrAdminCredentialsMismatch
ErrInsecureClientRequest
ErrObjectTampered
@@ -286,6 +310,11 @@ const (
ErrSiteReplicationBucketMetaError
ErrSiteReplicationIAMError
ErrSiteReplicationConfigMissing
ErrSiteReplicationIAMConfigMismatch
// Pool rebalance errors
ErrAdminRebalanceAlreadyStarted
ErrAdminRebalanceNotStarted
// Bucket Quota error codes
ErrAdminBucketQuotaExceeded
@@ -396,6 +425,12 @@ const (
ErrPostPolicyConditionInvalidFormat
ErrInvalidChecksum
// Lambda functions
ErrLambdaARNInvalid
ErrLambdaARNNotFound
apiErrCodeEnd // This is used only for the testing code
)
type errorCodeMap map[APIErrorCode]APIError
@@ -409,8 +444,7 @@ func (e errorCodeMap) ToAPIErrWithErr(errCode APIErrorCode, err error) APIError
apiErr.Description = fmt.Sprintf("%s (%s)", apiErr.Description, err)
}
if globalSite.Region != "" {
switch errCode {
case ErrAuthorizationHeaderMalformed:
if errCode == ErrAuthorizationHeaderMalformed {
apiErr.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalSite.Region)
return apiErr
}
@@ -505,6 +539,11 @@ var errorCodes = errorCodeMap{
Description: "Your proposed upload exceeds the maximum allowed object size.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrExcessData: {
Code: "ExcessData",
Description: "More data provided than indicated content length",
HTTPStatusCode: http.StatusBadRequest,
},
ErrPolicyTooLarge: {
Code: "PolicyTooLarge",
Description: "Policy exceeds the maximum allowed document size.",
@@ -530,6 +569,11 @@ var errorCodes = errorCodeMap{
Description: "Your account is disabled; please contact your administrator.",
HTTPStatusCode: http.StatusForbidden,
},
ErrInvalidArgument: {
Code: "InvalidArgument",
Description: "Invalid argument",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidBucketName: {
Code: "InvalidBucketName",
Description: "The specified bucket is not valid.",
@@ -655,6 +699,11 @@ var errorCodes = errorCodeMap{
Description: "One or more of the specified parts could not be found. The part may not have been uploaded, or the specified entity tag may not match the part's entity tag.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrMissingPart: {
Code: "InvalidRequest",
Description: "You must specify at least one part",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidPartOrder: {
Code: "InvalidPartOrder",
Description: "The list of parts was not in ascending order. The parts list must be specified in order by part number.",
@@ -685,11 +734,6 @@ var errorCodes = errorCodeMap{
Description: "The authorization mechanism you have provided is not supported. Please use AWS4-HMAC-SHA256.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrTooManyBuckets: {
Code: "TooManyBuckets",
Description: "You have attempted to create more buckets than allowed",
HTTPStatusCode: http.StatusBadRequest,
},
ErrBucketNotEmpty: {
Code: "BucketNotEmpty",
Description: "The bucket you tried to delete is not empty",
@@ -705,9 +749,9 @@ var errorCodes = errorCodeMap{
Description: "All access to this resource has been disabled.",
HTTPStatusCode: http.StatusForbidden,
},
ErrMalformedPolicy: {
ErrPolicyInvalidVersion: {
Code: "MalformedPolicy",
Description: "Policy has invalid resource.",
Description: "The policy must contain a valid version string",
HTTPStatusCode: http.StatusBadRequest,
},
ErrMissingFields: {
@@ -805,11 +849,21 @@ var errorCodes = errorCodeMap{
Description: "Request is not valid yet",
HTTPStatusCode: http.StatusForbidden,
},
ErrSlowDown: {
Code: "SlowDown",
ErrSlowDownRead: {
Code: "SlowDownRead",
Description: "Resource requested is unreadable, please reduce your request rate",
HTTPStatusCode: http.StatusServiceUnavailable,
},
ErrSlowDownWrite: {
Code: "SlowDownWrite",
Description: "Resource requested is unwritable, please reduce your request rate",
HTTPStatusCode: http.StatusServiceUnavailable,
},
ErrMaxVersionsExceeded: {
Code: "MaxVersionsExceeded",
Description: "You've exceeded the limit on the number of versions you can create on this object",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidPrefixMarker: {
Code: "InvalidPrefixMarker",
Description: "Invalid marker prefix combination",
@@ -910,6 +964,11 @@ var errorCodes = errorCodeMap{
Description: "No matching ExistingsObjects rule enabled",
HTTPStatusCode: http.StatusBadRequest,
},
ErrRemoteTargetDenyAddError: {
Code: "XMinioAdminRemoteTargetDenyAdd",
Description: "Cannot add remote target endpoint since this server is in a cluster replication setup",
HTTPStatusCode: http.StatusBadRequest,
},
ErrReplicationDenyEditError: {
Code: "XMinioReplicationDenyEdit",
Description: "Cannot alter local replication config since this server is in a cluster replication setup",
@@ -965,6 +1024,16 @@ var errorCodes = errorCodeMap{
Description: "Versioning must be 'Enabled' on the bucket to add a replication target",
HTTPStatusCode: http.StatusBadRequest,
},
ErrReplicationValidationError: {
Code: "InvalidRequest",
Description: "Replication validation failed on target",
HTTPStatusCode: http.StatusBadRequest,
},
ErrReplicationPermissionCheckError: {
Code: "ReplicationPermissionCheck",
Description: "X-Minio-Source-Replication-Check cannot be specified in request. Request cannot be completed",
HTTPStatusCode: http.StatusBadRequest,
},
ErrNoSuchObjectLockConfiguration: {
Code: "NoSuchObjectLockConfiguration",
Description: "The specified object does not have a ObjectLock configuration",
@@ -1078,8 +1147,13 @@ var errorCodes = errorCodeMap{
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidEncryptionMethod: {
Code: "InvalidRequest",
Description: "The encryption method specified is not supported",
Code: "InvalidArgument",
Description: "Server Side Encryption with AWS KMS managed key requires HTTP header x-amz-server-side-encryption : aws:kms",
HTTPStatusCode: http.StatusBadRequest,
},
ErrIncompatibleEncryptionMethod: {
Code: "InvalidArgument",
Description: "Server Side Encryption with Customer provided key is incompatible with the encryption method specified",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidEncryptionKeyID: {
@@ -1107,6 +1181,11 @@ var errorCodes = errorCodeMap{
Description: "The encryption parameters are not applicable to this object.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidEncryptionParametersSSEC: {
Code: "InvalidRequest",
Description: "SSE-C encryption parameters are not supported on replicated bucket.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidSSECustomerAlgorithm: {
Code: "InvalidArgument",
Description: "Requests specifying Server Side Encryption with Customer provided keys must provide a valid encryption algorithm.",
@@ -1137,11 +1216,6 @@ var errorCodes = errorCodeMap{
Description: "The provided encryption parameters did not match the ones used originally.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrIncompatibleEncryptionMethod: {
Code: "InvalidArgument",
Description: "Server side encryption specified with both SSE-C and SSE-S3 headers",
HTTPStatusCode: http.StatusBadRequest,
},
ErrKMSNotConfigured: {
Code: "NotImplemented",
Description: "Server side encryption specified but KMS is not configured",
@@ -1152,6 +1226,11 @@ var errorCodes = errorCodeMap{
Description: "Invalid keyId",
HTTPStatusCode: http.StatusBadRequest,
},
ErrKMSDefaultKeyAlreadyConfigured: {
Code: "KMS.DefaultKeyAlreadyConfiguredException",
Description: "A default encryption already exists and cannot be changed on KMS",
HTTPStatusCode: http.StatusConflict,
},
ErrNoAccessKey: {
Code: "AccessDenied",
Description: "No AWSAccessKey was presented",
@@ -1216,26 +1295,52 @@ var errorCodes = errorCodeMap{
Description: "The JSON you provided was not well-formed or did not validate against our published format.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidLifecycleQueryParameter: {
Code: "XMinioInvalidLifecycleParameter",
Description: "The boolean value provided for withUpdatedAt query parameter was invalid.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrAdminNoSuchUser: {
Code: "XMinioAdminNoSuchUser",
Description: "The specified user does not exist.",
HTTPStatusCode: http.StatusNotFound,
},
ErrAdminNoSuchUserLDAPWarn: {
Code: "XMinioAdminNoSuchUser",
Description: "The specified user does not exist. If you meant a user in LDAP, use `mc idp ldap`",
HTTPStatusCode: http.StatusNotFound,
},
ErrAdminNoSuchGroup: {
Code: "XMinioAdminNoSuchGroup",
Description: "The specified group does not exist.",
HTTPStatusCode: http.StatusNotFound,
},
ErrAdminNoSuchJob: {
Code: "XMinioAdminNoSuchJob",
Description: "The specified job does not exist.",
HTTPStatusCode: http.StatusNotFound,
},
ErrAdminGroupNotEmpty: {
Code: "XMinioAdminGroupNotEmpty",
Description: "The specified group is not empty - cannot remove it.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrAdminGroupDisabled: {
Code: "XMinioAdminGroupDisabled",
Description: "The specified group is disabled.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrAdminNoSuchPolicy: {
Code: "XMinioAdminNoSuchPolicy",
Description: "The canned policy does not exist.",
HTTPStatusCode: http.StatusNotFound,
},
ErrAdminPolicyChangeAlreadyApplied: {
Code: "XMinioAdminPolicyChangeAlreadyApplied",
Description: "The specified policy change is already in effect.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrAdminInvalidArgument: {
Code: "XMinioAdminInvalidArgument",
Description: "Invalid arguments specified.",
@@ -1282,6 +1387,31 @@ var errorCodes = errorCodeMap{
Description: "JSON configuration provided has objects with duplicate keys",
HTTPStatusCode: http.StatusBadRequest,
},
ErrAdminConfigInvalidIDPType: {
Code: "XMinioAdminConfigInvalidIDPType",
Description: fmt.Sprintf("Invalid IDP configuration type - must be one of %v", madmin.ValidIDPConfigTypes),
HTTPStatusCode: http.StatusBadRequest,
},
ErrAdminConfigLDAPNonDefaultConfigName: {
Code: "XMinioAdminConfigLDAPNonDefaultConfigName",
Description: "Only a single LDAP configuration is supported - config name must be empty or `_`",
HTTPStatusCode: http.StatusBadRequest,
},
ErrAdminConfigLDAPValidation: {
Code: "XMinioAdminConfigLDAPValidation",
Description: "LDAP Configuration validation failed",
HTTPStatusCode: http.StatusBadRequest,
},
ErrAdminConfigIDPCfgNameAlreadyExists: {
Code: "XMinioAdminConfigIDPCfgNameAlreadyExists",
Description: "An IDP configuration with the given name already exists",
HTTPStatusCode: http.StatusBadRequest,
},
ErrAdminConfigIDPCfgNameDoesNotExist: {
Code: "XMinioAdminConfigIDPCfgNameDoesNotExist",
Description: "No such IDP configuration exists",
HTTPStatusCode: http.StatusBadRequest,
},
ErrAdminConfigNotificationTargetsFailed: {
Code: "XMinioAdminNotificationTargetsTestFailed",
Description: "Configuration update failed due an unsuccessful attempt to connect to one or more notification servers",
@@ -1312,7 +1442,7 @@ var errorCodes = errorCodeMap{
Description: "Cannot respond to plain-text request from TLS-encrypted server",
HTTPStatusCode: http.StatusBadRequest,
},
ErrOperationTimedOut: {
ErrRequestTimedout: {
Code: "RequestTimeout",
Description: "A timeout occurred while trying to lock a resource, please reduce your request rate",
HTTPStatusCode: http.StatusServiceUnavailable,
@@ -1322,9 +1452,9 @@ var errorCodes = errorCodeMap{
Description: "Client disconnected before response was ready",
HTTPStatusCode: 499, // No official code, use nginx value.
},
ErrOperationMaxedOut: {
Code: "SlowDown",
Description: "A timeout exceeded while waiting to proceed with the request, please reduce your request rate",
ErrTooManyRequests: {
Code: "TooManyRequests",
Description: "Deadline exceeded while waiting in incoming queue, please reduce your request rate",
HTTPStatusCode: http.StatusServiceUnavailable,
},
ErrUnsupportedMetadata: {
@@ -1332,6 +1462,11 @@ var errorCodes = errorCodeMap{
Description: "Your metadata headers are not supported.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrUnsupportedHostHeader: {
Code: "InvalidArgument",
Description: "Your Host header is malformed.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrObjectTampered: {
Code: "XMinioObjectTampered",
Description: errObjectTampered.Error(),
@@ -1346,7 +1481,7 @@ var errorCodes = errorCodeMap{
ErrSiteReplicationPeerResp: {
Code: "XMinioSiteReplicationPeerResp",
Description: "Error received when contacting a peer site",
HTTPStatusCode: http.StatusServiceUnavailable,
HTTPStatusCode: http.StatusBadRequest,
},
ErrSiteReplicationBackendIssue: {
Code: "XMinioSiteReplicationBackendIssue",
@@ -1378,6 +1513,21 @@ var errorCodes = errorCodeMap{
Description: "Site not found in site replication configuration",
HTTPStatusCode: http.StatusBadRequest,
},
ErrSiteReplicationIAMConfigMismatch: {
Code: "XMinioSiteReplicationIAMConfigMismatch",
Description: "IAM configuration mismatch between sites",
HTTPStatusCode: http.StatusBadRequest,
},
ErrAdminRebalanceAlreadyStarted: {
Code: "XMinioAdminRebalanceAlreadyStarted",
Description: "Pool rebalance is already started",
HTTPStatusCode: http.StatusConflict,
},
ErrAdminRebalanceNotStarted: {
Code: "XMinioAdminRebalanceNotStarted",
Description: "Pool rebalance is not started",
HTTPStatusCode: http.StatusNotFound,
},
ErrMaximumExpires: {
Code: "AuthorizationQueryParametersError",
Description: "X-Amz-Expires must be less than a week (in seconds); that is, the given X-Amz-Expires must be less than 604800 seconds",
@@ -1454,7 +1604,7 @@ var errorCodes = errorCodeMap{
HTTPStatusCode: http.StatusBadRequest,
},
ErrBusy: {
Code: "Busy",
Code: "ServerBusy",
Description: "The service is unavailable. Please retry.",
HTTPStatusCode: http.StatusServiceUnavailable,
},
@@ -1893,6 +2043,26 @@ var errorCodes = errorCodeMap{
Description: "Invalid checksum provided.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrLambdaARNInvalid: {
Code: "LambdaARNInvalid",
Description: "The specified lambda ARN is invalid",
HTTPStatusCode: http.StatusBadRequest,
},
ErrLambdaARNNotFound: {
Code: "LambdaARNNotFound",
Description: "The specified lambda ARN does not exist",
HTTPStatusCode: http.StatusNotFound,
},
ErrPolicyAlreadyAttached: {
Code: "XMinioPolicyAlreadyAttached",
Description: "The specified policy is already attached.",
HTTPStatusCode: http.StatusConflict,
},
ErrPolicyNotAttached: {
Code: "XMinioPolicyNotAttached",
Description: "The specified policy is not found.",
HTTPStatusCode: http.StatusNotFound,
},
// Add your error structure here.
}
@@ -1905,26 +2075,33 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
}
// Only return ErrClientDisconnected if the provided context is actually canceled.
// This way downstream context.Canceled will still report ErrOperationTimedOut
if contextCanceled(ctx) {
if ctx.Err() == context.Canceled {
return ErrClientDisconnected
}
// This way downstream context.Canceled will still report ErrRequestTimedout
if contextCanceled(ctx) && errors.Is(ctx.Err(), context.Canceled) {
return ErrClientDisconnected
}
// Unwrap the error first
err = unwrapAll(err)
switch err {
case errInvalidArgument:
apiErr = ErrAdminInvalidArgument
case errNoSuchPolicy:
apiErr = ErrAdminNoSuchPolicy
case errNoSuchUser:
apiErr = ErrAdminNoSuchUser
case errNoSuchUserLDAPWarn:
apiErr = ErrAdminNoSuchUserLDAPWarn
case errNoSuchServiceAccount:
apiErr = ErrAdminServiceAccountNotFound
case errNoSuchGroup:
apiErr = ErrAdminNoSuchGroup
case errGroupNotEmpty:
apiErr = ErrAdminGroupNotEmpty
case errNoSuchPolicy:
apiErr = ErrAdminNoSuchPolicy
case errNoSuchJob:
apiErr = ErrAdminNoSuchJob
case errNoPolicyToAttachOrDetach:
apiErr = ErrAdminPolicyChangeAlreadyApplied
case errSignatureMismatch:
apiErr = ErrSignatureDoesNotMatch
case errInvalidRange:
@@ -1941,9 +2118,17 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
apiErr = ErrAdminInvalidSecretKey
case errInvalidStorageClass:
apiErr = ErrInvalidStorageClass
case errErasureReadQuorum:
apiErr = ErrSlowDownRead
case errErasureWriteQuorum:
apiErr = ErrSlowDownWrite
case errMaxVersionsExceeded:
apiErr = ErrMaxVersionsExceeded
// SSE errors
case errInvalidEncryptionParameters:
apiErr = ErrInvalidEncryptionParameters
case errInvalidEncryptionParametersSSEC:
apiErr = ErrInvalidEncryptionParametersSSEC
case crypto.ErrInvalidEncryptionMethod:
apiErr = ErrInvalidEncryptionMethod
case crypto.ErrInvalidEncryptionKeyID:
@@ -1970,11 +2155,12 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
apiErr = ErrKMSNotConfigured
case errKMSKeyNotFound:
apiErr = ErrKMSKeyNotFoundException
case context.Canceled, context.DeadlineExceeded:
apiErr = ErrOperationTimedOut
case errDiskNotFound:
apiErr = ErrSlowDown
case errKMSDefaultKeyAlreadyConfigured:
apiErr = ErrKMSDefaultKeyAlreadyConfigured
case context.Canceled:
apiErr = ErrClientDisconnected
case context.DeadlineExceeded:
apiErr = ErrRequestTimedout
case objectlock.ErrInvalidRetentionDate:
apiErr = ErrInvalidRetentionDate
case objectlock.ErrPastObjectLockRetainDate:
@@ -1985,11 +2171,14 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
apiErr = ErrObjectLockInvalidHeaders
case objectlock.ErrMalformedXML:
apiErr = ErrMalformedXML
case errInvalidMaxParts:
apiErr = ErrInvalidMaxParts
case ioutil.ErrOverread:
apiErr = ErrExcessData
}
// Compression errors
switch err {
case errInvalidDecompressedSize:
if err == errInvalidDecompressedSize {
apiErr = ErrInvalidDecompressedSize
}
@@ -2000,7 +2189,7 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
// etcd specific errors, a key is always a bucket for us return
// ErrNoSuchBucket in such a case.
if err == dns.ErrNoEntriesFound {
if errors.Is(err, dns.ErrNoEntriesFound) {
return ErrNoSuchBucket
}
@@ -2050,11 +2239,9 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
case InvalidPart:
apiErr = ErrInvalidPart
case InsufficientWriteQuorum:
apiErr = ErrSlowDown
apiErr = ErrSlowDownWrite
case InsufficientReadQuorum:
apiErr = ErrSlowDown
case InvalidMarkerPrefixCombination:
apiErr = ErrNotImplemented
apiErr = ErrSlowDownRead
case InvalidUploadIDKeyCombination:
apiErr = ErrNotImplemented
case MalformedUploadID:
@@ -2067,10 +2254,10 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
apiErr = ErrContentSHA256Mismatch
case hash.ChecksumMismatch:
apiErr = ErrContentChecksumMismatch
case ObjectTooLarge:
apiErr = ErrEntityTooLarge
case ObjectTooSmall:
case hash.SizeTooSmall:
apiErr = ErrEntityTooSmall
case hash.SizeTooLarge:
apiErr = ErrEntityTooLarge
case NotImplemented:
apiErr = ErrNotImplemented
case PartTooBig:
@@ -2115,7 +2302,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
apiErr = ErrTransitionStorageClassNotFoundError
case InvalidObjectState:
apiErr = ErrInvalidObjectState
case PreConditionFailed:
apiErr = ErrPreconditionFailed
case BucketQuotaExceeded:
apiErr = ErrAdminBucketQuotaExceeded
case *event.ErrInvalidEventName:
@@ -2124,6 +2312,10 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
apiErr = ErrARNNotification
case *event.ErrARNNotFound:
apiErr = ErrARNNotification
case *levent.ErrInvalidARN:
apiErr = ErrLambdaARNInvalid
case *levent.ErrARNNotFound:
apiErr = ErrLambdaARNNotFound
case *event.ErrUnknownRegion:
apiErr = ErrRegionNotification
case *event.ErrInvalidFilterName:
@@ -2141,7 +2333,7 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
case *event.ErrUnsupportedConfiguration:
apiErr = ErrUnsupportedNotification
case OperationTimedOut:
apiErr = ErrOperationTimedOut
apiErr = ErrRequestTimedout
case BackendDown:
apiErr = ErrBackendDown
case ObjectNameTooLong:
@@ -2151,6 +2343,12 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
case dns.ErrBucketConflict:
apiErr = ErrBucketAlreadyExists
default:
if _, ok := err.(tags.Error); ok {
// tag errors are not exported, so we check their custom interface to avoid logging.
// The correct type is inserted by toAPIError.
apiErr = ErrInternalError
break
}
var ie, iw int
// This work-around is to handle the issue golang/go#30648
//nolint:gocritic
@@ -2187,39 +2385,29 @@ func toAPIError(ctx context.Context, err error) APIError {
}
apiErr := errorCodes.ToAPIErr(toAPIErrorCode(ctx, err))
e, ok := err.(dns.ErrInvalidBucketName)
if ok {
code := toAPIErrorCode(ctx, e)
apiErr = errorCodes.ToAPIErrWithErr(code, e)
}
if apiErr.Code == "NotImplemented" {
switch e := err.(type) {
case NotImplemented:
desc := e.Error()
if desc == "" {
desc = apiErr.Description
}
apiErr = APIError{
Code: apiErr.Code,
Description: desc,
HTTPStatusCode: apiErr.HTTPStatusCode,
}
return apiErr
switch apiErr.Code {
case "NotImplemented":
desc := fmt.Sprintf("%s (%v)", apiErr.Description, err)
apiErr = APIError{
Code: apiErr.Code,
Description: desc,
HTTPStatusCode: apiErr.HTTPStatusCode,
}
}
if apiErr.Code == "XMinioBackendDown" {
case "XMinioBackendDown":
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, err)
return apiErr
}
if apiErr.Code == "InternalError" {
case "InternalError":
// If we see an internal error try to interpret
// any underlying errors if possible depending on
// their internal error types. This code is only
// useful with gateway implementations.
// their internal error types.
switch e := err.(type) {
case kms.Error:
apiErr = APIError{
Description: e.Err.Error(),
Code: e.APICode,
HTTPStatusCode: e.HTTPStatusCode,
}
case batchReplicationJobError:
apiErr = APIError(e)
case InvalidArgument:
apiErr = APIError{
Code: "InvalidArgument",
@@ -2228,27 +2416,25 @@ func toAPIError(ctx context.Context, err error) APIError {
}
case *xml.SyntaxError:
apiErr = APIError{
Code: "MalformedXML",
Description: fmt.Sprintf("%s (%s)", errorCodes[ErrMalformedXML].Description,
e.Error()),
Code: "MalformedXML",
Description: fmt.Sprintf("%s (%s)", errorCodes[ErrMalformedXML].Description, e),
HTTPStatusCode: errorCodes[ErrMalformedXML].HTTPStatusCode,
}
case url.EscapeError:
apiErr = APIError{
Code: "XMinioInvalidObjectName",
Description: fmt.Sprintf("%s (%s)", errorCodes[ErrInvalidObjectName].Description,
e.Error()),
Code: "XMinioInvalidObjectName",
Description: fmt.Sprintf("%s (%s)", errorCodes[ErrInvalidObjectName].Description, e),
HTTPStatusCode: http.StatusBadRequest,
}
case versioning.Error:
apiErr = APIError{
Code: "IllegalVersioningConfigurationException",
Description: fmt.Sprintf("Versioning configuration specified in the request is invalid. (%s)", e.Error()),
Description: fmt.Sprintf("Versioning configuration specified in the request is invalid. (%s)", e),
HTTPStatusCode: http.StatusBadRequest,
}
case lifecycle.Error:
apiErr = APIError{
Code: "InvalidRequest",
Code: "InvalidArgument",
Description: e.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
@@ -2282,7 +2468,7 @@ func toAPIError(ctx context.Context, err error) APIError {
Description: e.Message,
HTTPStatusCode: e.StatusCode,
}
if globalIsGateway && strings.Contains(e.Message, "KMS is not configured") {
if strings.Contains(e.Message, "KMS is not configured") {
apiErr = APIError{
Code: "NotImplemented",
Description: e.Message,
@@ -2306,22 +2492,10 @@ func toAPIError(ctx context.Context, err error) APIError {
Description: e.Error(),
HTTPStatusCode: e.Response().StatusCode,
}
// Add more Gateway SDKs here if any in future.
// Add more other SDK related errors here if any in future.
default:
//nolint:gocritic
if errors.Is(err, errMalformedEncoding) {
apiErr = APIError{
Code: "BadRequest",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
} else if errors.Is(err, errChunkTooBig) {
apiErr = APIError{
Code: "BadRequest",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
} else if errors.Is(err, strconv.ErrRange) {
if errors.Is(err, errMalformedEncoding) || errors.Is(err, errChunkTooBig) || errors.Is(err, strconv.ErrRange) {
apiErr = APIError{
Code: "BadRequest",
Description: err.Error(),

View File

@@ -20,8 +20,6 @@ package cmd
import (
"context"
"errors"
"os"
"path/filepath"
"testing"
"github.com/minio/minio/internal/crypto"
@@ -42,9 +40,8 @@ var toAPIErrorTests = []struct {
{err: ObjectNameInvalid{}, errCode: ErrInvalidObjectName},
{err: InvalidUploadID{}, errCode: ErrNoSuchUpload},
{err: InvalidPart{}, errCode: ErrInvalidPart},
{err: InsufficientReadQuorum{}, errCode: ErrSlowDown},
{err: InsufficientWriteQuorum{}, errCode: ErrSlowDown},
{err: InvalidMarkerPrefixCombination{}, errCode: ErrNotImplemented},
{err: InsufficientReadQuorum{}, errCode: ErrSlowDownRead},
{err: InsufficientWriteQuorum{}, errCode: ErrSlowDownWrite},
{err: InvalidUploadIDKeyCombination{}, errCode: ErrNotImplemented},
{err: MalformedUploadID{}, errCode: ErrNoSuchUpload},
{err: PartTooSmall{}, errCode: ErrEntityTooSmall},
@@ -67,11 +64,6 @@ var toAPIErrorTests = []struct {
}
func TestAPIErrCode(t *testing.T) {
disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
defer os.RemoveAll(disk)
initFSObjects(disk, t)
ctx := context.Background()
for i, testCase := range toAPIErrorTests {
errCode := toAPIErrorCode(ctx, testCase.err)
@@ -80,3 +72,19 @@ func TestAPIErrCode(t *testing.T) {
}
}
}
// Check if an API error is properly defined
func TestAPIErrCodeDefinition(t *testing.T) {
for errAPI := ErrNone + 1; errAPI < apiErrCodeEnd; errAPI++ {
errCode, ok := errorCodes[errAPI]
if !ok {
t.Fatal(errAPI, "error code is not defined in the API error code table")
}
if errCode.Code == "" {
t.Fatal(errAPI, "error code has an empty XML code")
}
if errCode.HTTPStatusCode == 0 {
t.Fatal(errAPI, "error code has a zero HTTP status code")
}
}
}

View File

@@ -20,13 +20,14 @@ package cmd
import (
"bytes"
"encoding/json"
"encoding/xml"
"fmt"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/minio/minio-go/v7/pkg/tags"
"github.com/minio/minio/internal/crypto"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
@@ -64,15 +65,31 @@ func setCommonHeaders(w http.ResponseWriter) {
// Encodes the response headers into XML format.
func encodeResponse(response interface{}) []byte {
var bytesBuffer bytes.Buffer
bytesBuffer.WriteString(xxml.Header)
buf, err := xxml.Marshal(response)
if err != nil {
var buf bytes.Buffer
buf.WriteString(xml.Header)
if err := xml.NewEncoder(&buf).Encode(response); err != nil {
logger.LogIf(GlobalContext, err)
return nil
}
bytesBuffer.Write(buf)
return bytesBuffer.Bytes()
return buf.Bytes()
}
// Use this encodeResponseList() to support control characters
// this function must be used by only ListObjects() for objects
// with control characters, this is a specialized extension
// to support AWS S3 compatible behavior.
//
// Do not use this function for anything other than ListObjects()
// variants, please open a github discussion if you wish to use
// this in other places.
func encodeResponseList(response interface{}) []byte {
var buf bytes.Buffer
buf.WriteString(xxml.Header)
if err := xxml.NewEncoder(&buf).Encode(response); err != nil {
logger.LogIf(GlobalContext, err)
return nil
}
return buf.Bytes()
}
// Encodes the response headers into JSON format.
@@ -116,16 +133,15 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
w.Header().Set(xhttp.Expires, objInfo.Expires.UTC().Format(http.TimeFormat))
}
if globalCacheConfig.Enabled {
w.Header().Set(xhttp.XCache, objInfo.CacheStatus.String())
w.Header().Set(xhttp.XCacheLookup, objInfo.CacheLookupStatus.String())
}
// Set tag count if object has tags
if len(objInfo.UserTags) > 0 {
tags, _ := url.ParseQuery(objInfo.UserTags)
if len(tags) > 0 {
w.Header()[xhttp.AmzTagCount] = []string{strconv.Itoa(len(tags))}
tags, _ := tags.ParseObjectTags(objInfo.UserTags)
if tags.Count() > 0 {
w.Header()[xhttp.AmzTagCount] = []string{strconv.Itoa(tags.Count())}
if opts.Tagging {
// This is MinIO only extension to return back tags along with the count.
w.Header()[xhttp.AmzObjectTagging] = []string{objInfo.UserTags}
}
}
}
@@ -136,7 +152,7 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
continue
}
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
if stringsHasPrefixFold(k, ReservedMetadataPrefixLower) {
// Do not need to send any internal metadata
// values to client.
continue
@@ -149,7 +165,7 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
var isSet bool
for _, userMetadataPrefix := range userMetadataKeyPrefixes {
if !strings.HasPrefix(strings.ToLower(k), strings.ToLower(userMetadataPrefix)) {
if !stringsHasPrefixFold(k, userMetadataPrefix) {
continue
}
w.Header()[strings.ToLower(k)] = []string{v}
@@ -186,7 +202,7 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
}
// Set the relevant version ID as part of the response header.
if objInfo.VersionID != "" {
if objInfo.VersionID != "" && objInfo.VersionID != nullVersionID {
w.Header()[xhttp.AmzVersionID] = []string{objInfo.VersionID}
}

View File

@@ -37,8 +37,8 @@ func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string,
maxkeys = maxObjectList
}
prefix = trimLeadingSlash(values.Get("prefix"))
marker = trimLeadingSlash(values.Get("marker"))
prefix = values.Get("prefix")
marker = values.Get("marker")
delimiter = values.Get("delimiter")
encodingType = values.Get("encoding-type")
return
@@ -57,8 +57,8 @@ func getListBucketObjectVersionsArgs(values url.Values) (prefix, marker, delimit
maxkeys = maxObjectList
}
prefix = trimLeadingSlash(values.Get("prefix"))
marker = trimLeadingSlash(values.Get("key-marker"))
prefix = values.Get("prefix")
marker = values.Get("key-marker")
delimiter = values.Get("delimiter")
encodingType = values.Get("encoding-type")
versionIDMarker = values.Get("version-id-marker")
@@ -87,8 +87,8 @@ func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimit
maxkeys = maxObjectList
}
prefix = trimLeadingSlash(values.Get("prefix"))
startAfter = trimLeadingSlash(values.Get("start-after"))
prefix = values.Get("prefix")
startAfter = values.Get("start-after")
delimiter = values.Get("delimiter")
fetchOwner = values.Get("fetch-owner") == "true"
encodingType = values.Get("encoding-type")
@@ -118,8 +118,8 @@ func getBucketMultipartResources(values url.Values) (prefix, keyMarker, uploadID
maxUploads = maxUploadsList
}
prefix = trimLeadingSlash(values.Get("prefix"))
keyMarker = trimLeadingSlash(values.Get("key-marker"))
prefix = values.Get("prefix")
keyMarker = values.Get("key-marker")
uploadIDMarker = values.Get("upload-id-marker")
delimiter = values.Get("delimiter")
encodingType = values.Get("encoding-type")

View File

@@ -29,20 +29,21 @@ import (
"strings"
"time"
"github.com/minio/minio/internal/amztime"
"github.com/minio/minio/internal/crypto"
"github.com/minio/minio/internal/handlers"
"github.com/minio/minio/internal/hash"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/policy"
xxml "github.com/minio/xxml"
)
const (
// RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
iso8601TimeFormat = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision.
maxObjectList = 1000 // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse.
maxDeleteList = 1000 // Limit number of objects deleted in a delete call.
maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse.
maxPartsList = 10000 // Limit number of parts in a listPartsResponse.
maxObjectList = 1000 // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse.
maxDeleteList = 1000 // Limit number of objects deleted in a delete call.
maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse.
maxPartsList = 10000 // Limit number of parts in a listPartsResponse.
)
// LocationResponse - format for location response.
@@ -84,14 +85,13 @@ type ListVersionsResponse struct {
VersionIDMarker string `xml:"VersionIdMarker"`
MaxKeys int
Delimiter string
Delimiter string `xml:"Delimiter,omitempty"`
// A flag that indicates whether or not ListObjects returned all of the results
// that satisfied the search criteria.
IsTruncated bool
CommonPrefixes []CommonPrefix
DeleteMarkers []DeleteMarkerVersion `xml:"DeleteMarker,omitempty"`
Versions []ObjectVersion `xml:"Version,omitempty"`
Versions []ObjectVersion
// Encoding type used to encode object keys in the response.
EncodingType string `xml:"EncodingType,omitempty"`
@@ -115,7 +115,7 @@ type ListObjectsResponse struct {
NextMarker string `xml:"NextMarker,omitempty"`
MaxKeys int
Delimiter string
Delimiter string `xml:"Delimiter,omitempty"`
// A flag that indicates whether or not ListObjects returned all of the results
// that satisfied the search criteria.
IsTruncated bool
@@ -146,7 +146,7 @@ type ListObjectsV2Response struct {
KeyCount int
MaxKeys int
Delimiter string
Delimiter string `xml:"Delimiter,omitempty"`
// A flag that indicates whether or not ListObjects returned all of the results
// that satisfied the search criteria.
IsTruncated bool
@@ -166,10 +166,10 @@ type Part struct {
Size int64
// Checksum values
ChecksumCRC32 string
ChecksumCRC32C string
ChecksumSHA1 string
ChecksumSHA256 string
ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"`
ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"`
ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"`
ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"`
}
// ListPartsResponse - format for list parts response.
@@ -205,7 +205,7 @@ type ListMultipartUploadsResponse struct {
UploadIDMarker string `xml:"UploadIdMarker"`
NextKeyMarker string
NextUploadIDMarker string `xml:"NextUploadIdMarker"`
Delimiter string
Delimiter string `xml:"Delimiter,omitempty"`
Prefix string
EncodingType string `xml:"EncodingType,omitempty"`
MaxUploads int
@@ -256,6 +256,19 @@ type ObjectVersion struct {
Object
IsLatest bool
VersionID string `xml:"VersionId"`
isDeleteMarker bool
}
// MarshalXML - marshal ObjectVersion
func (o ObjectVersion) MarshalXML(e *xxml.Encoder, start xxml.StartElement) error {
if o.isDeleteMarker {
start.Name.Local = "DeleteMarker"
} else {
start.Name.Local = "Version"
}
type objectVersionWrapper ObjectVersion
return e.EncodeElement(objectVersionWrapper(o), start)
}
// DeleteMarkerVersion container for delete marker metadata
@@ -302,12 +315,12 @@ func (s *Metadata) Set(k, v string) {
}
type xmlKeyEntry struct {
XMLName xml.Name
XMLName xxml.Name
Value string `xml:",chardata"`
}
// MarshalXML - StringMap marshals into XML.
func (s *Metadata) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
func (s *Metadata) MarshalXML(e *xxml.Encoder, start xxml.StartElement) error {
if s == nil {
return nil
}
@@ -322,7 +335,7 @@ func (s *Metadata) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
for _, item := range s.Items {
if err := e.Encode(xmlKeyEntry{
XMLName: xml.Name{Local: item.Key},
XMLName: xxml.Name{Local: item.Key},
Value: item.Value,
}); err != nil {
return err
@@ -332,6 +345,13 @@ func (s *Metadata) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
return e.EncodeToken(start.End())
}
// ObjectInternalInfo contains some internal information about a given
// object, it will printed in listing calls with enabled metadata.
type ObjectInternalInfo struct {
K int // Data blocks
M int // Parity blocks
}
// Object container for object metadata
type Object struct {
Key string
@@ -340,13 +360,16 @@ type Object struct {
Size int64
// Owner of the object.
Owner Owner
Owner *Owner `xml:"Owner,omitempty"`
// The class of storage used to store the object.
StorageClass string
// UserMetadata user-defined metadata
UserMetadata *Metadata `xml:"UserMetadata,omitempty"`
UserTags string `xml:"UserTags,omitempty"`
Internal *ObjectInternalInfo `xml:"Internal,omitempty"`
}
// CopyObjectResponse container returns ETag and LastModified of the successfully copied object
@@ -390,10 +413,10 @@ type CompleteMultipartUploadResponse struct {
Key string
ETag string
ChecksumCRC32 string
ChecksumCRC32C string
ChecksumSHA1 string
ChecksumSHA256 string
ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"`
ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"`
ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"`
ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"`
}
// DeleteError structure.
@@ -469,7 +492,7 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
for _, bucket := range buckets {
listbuckets = append(listbuckets, Bucket{
Name: bucket.Name,
CreationDate: bucket.Created.UTC().Format(iso8601TimeFormat),
CreationDate: amztime.ISO8601Format(bucket.Created.UTC()),
})
}
@@ -479,39 +502,72 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
return data
}
// generates an ListBucketVersions response for the said bucket with other enumerated options.
func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo) ListVersionsResponse {
versions := make([]ObjectVersion, 0, len(resp.Objects))
deleteMarkers := make([]DeleteMarkerVersion, 0, len(resp.Objects))
func cleanReservedKeys(metadata map[string]string) map[string]string {
m := cloneMSS(metadata)
owner := Owner{
switch kind, _ := crypto.IsEncrypted(metadata); kind {
case crypto.S3:
m[xhttp.AmzServerSideEncryption] = xhttp.AmzEncryptionAES
case crypto.S3KMS:
m[xhttp.AmzServerSideEncryption] = xhttp.AmzEncryptionKMS
m[xhttp.AmzServerSideEncryptionKmsID] = kmsKeyIDFromMetadata(metadata)
if kmsCtx, ok := metadata[crypto.MetaContext]; ok {
m[xhttp.AmzServerSideEncryptionKmsContext] = kmsCtx
}
case crypto.SSEC:
m[xhttp.AmzServerSideEncryptionCustomerAlgorithm] = xhttp.AmzEncryptionAES
}
var toRemove []string
for k := range cleanMinioInternalMetadataKeys(m) {
if stringsHasPrefixFold(k, ReservedMetadataPrefixLower) {
// Do not need to send any internal metadata
// values to client.
toRemove = append(toRemove, k)
continue
}
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
if equals(k, xhttp.AmzMetaUnencryptedContentLength, xhttp.AmzMetaUnencryptedContentMD5) {
toRemove = append(toRemove, k)
continue
}
}
for _, k := range toRemove {
delete(m, k)
delete(m, strings.ToLower(k))
}
return m
}
// generates an ListBucketVersions response for the said bucket with other enumerated options.
func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo, metadata metaCheckFn) ListVersionsResponse {
versions := make([]ObjectVersion, 0, len(resp.Objects))
owner := &Owner{
ID: globalMinioDefaultOwnerID,
DisplayName: "minio",
}
data := ListVersionsResponse{}
var lastObjMetaName string
var tagErr, metaErr APIErrorCode = -1, -1
for _, object := range resp.Objects {
if object.Name == "" {
continue
}
if object.DeleteMarker {
deleteMarker := DeleteMarkerVersion{
Key: s3EncodeName(object.Name, encodingType),
LastModified: object.ModTime.UTC().Format(iso8601TimeFormat),
Owner: owner,
VersionID: object.VersionID,
}
if deleteMarker.VersionID == "" {
deleteMarker.VersionID = nullVersionID
}
deleteMarker.IsLatest = object.IsLatest
deleteMarkers = append(deleteMarkers, deleteMarker)
continue
// Cache checks for the same object
if metadata != nil && lastObjMetaName != object.Name {
tagErr = metadata(object.Name, policy.GetObjectTaggingAction)
metaErr = metadata(object.Name, policy.GetObjectAction)
lastObjMetaName = object.Name
}
content := ObjectVersion{}
content.Key = s3EncodeName(object.Name, encodingType)
content.LastModified = object.ModTime.UTC().Format(iso8601TimeFormat)
content.LastModified = amztime.ISO8601Format(object.ModTime.UTC())
if object.ETag != "" {
content.ETag = "\"" + object.ETag + "\""
}
@@ -521,18 +577,41 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
} else {
content.StorageClass = globalMinioDefaultStorageClass
}
if tagErr == ErrNone {
content.UserTags = object.UserTags
}
if metaErr == ErrNone {
content.UserMetadata = &Metadata{}
switch kind, _ := crypto.IsEncrypted(object.UserDefined); kind {
case crypto.S3:
content.UserMetadata.Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES)
case crypto.S3KMS:
content.UserMetadata.Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionKMS)
case crypto.SSEC:
content.UserMetadata.Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, xhttp.AmzEncryptionAES)
}
for k, v := range cleanReservedKeys(object.UserDefined) {
content.UserMetadata.Set(k, v)
}
content.UserMetadata.Set("expires", object.Expires.Format(http.TimeFormat))
content.Internal = &ObjectInternalInfo{
K: object.DataBlocks,
M: object.ParityBlocks,
}
}
content.Owner = owner
content.VersionID = object.VersionID
if content.VersionID == "" {
content.VersionID = nullVersionID
}
content.IsLatest = object.IsLatest
content.isDeleteMarker = object.DeleteMarker
versions = append(versions, content)
}
data.Name = bucket
data.Versions = versions
data.DeleteMarkers = deleteMarkers
data.EncodingType = encodingType
data.Prefix = s3EncodeName(prefix, encodingType)
data.KeyMarker = s3EncodeName(marker, encodingType)
@@ -557,7 +636,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
// generates an ListObjectsV1 response for the said bucket with other enumerated options.
func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
contents := make([]Object, 0, len(resp.Objects))
owner := Owner{
owner := &Owner{
ID: globalMinioDefaultOwnerID,
DisplayName: "minio",
}
@@ -569,7 +648,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
continue
}
content.Key = s3EncodeName(object.Name, encodingType)
content.LastModified = object.ModTime.UTC().Format(iso8601TimeFormat)
content.LastModified = amztime.ISO8601Format(object.ModTime.UTC())
if object.ETag != "" {
content.ETag = "\"" + object.ETag + "\""
}
@@ -604,12 +683,16 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
}
// generates an ListObjectsV2 response for the said bucket with other enumerated options.
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata bool) ListObjectsV2Response {
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata metaCheckFn) ListObjectsV2Response {
contents := make([]Object, 0, len(objects))
owner := Owner{
ID: globalMinioDefaultOwnerID,
DisplayName: "minio",
var owner *Owner
if fetchOwner {
owner = &Owner{
ID: globalMinioDefaultOwnerID,
DisplayName: "minio",
}
}
data := ListObjectsV2Response{}
for _, object := range objects {
@@ -618,7 +701,7 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
continue
}
content.Key = s3EncodeName(object.Name, encodingType)
content.LastModified = object.ModTime.UTC().Format(iso8601TimeFormat)
content.LastModified = amztime.ISO8601Format(object.ModTime.UTC())
if object.ETag != "" {
content.ETag = "\"" + object.ETag + "\""
}
@@ -629,27 +712,28 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
content.StorageClass = globalMinioDefaultStorageClass
}
content.Owner = owner
if metadata {
content.UserMetadata = &Metadata{}
switch kind, _ := crypto.IsEncrypted(object.UserDefined); kind {
case crypto.S3:
content.UserMetadata.Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES)
case crypto.S3KMS:
content.UserMetadata.Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionKMS)
case crypto.SSEC:
content.UserMetadata.Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, xhttp.AmzEncryptionAES)
if metadata != nil {
if metadata(object.Name, policy.GetObjectTaggingAction) == ErrNone {
content.UserTags = object.UserTags
}
for k, v := range CleanMinioInternalMetadataKeys(object.UserDefined) {
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
// Do not need to send any internal metadata
// values to client.
continue
if metadata(object.Name, policy.GetObjectAction) == ErrNone {
content.UserMetadata = &Metadata{}
switch kind, _ := crypto.IsEncrypted(object.UserDefined); kind {
case crypto.S3:
content.UserMetadata.Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES)
case crypto.S3KMS:
content.UserMetadata.Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionKMS)
case crypto.SSEC:
content.UserMetadata.Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, xhttp.AmzEncryptionAES)
}
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
if equals(k, xhttp.AmzMetaUnencryptedContentLength, xhttp.AmzMetaUnencryptedContentMD5) {
continue
for k, v := range cleanReservedKeys(object.UserDefined) {
content.UserMetadata.Set(k, v)
}
content.UserMetadata.Set("expires", object.Expires.Format(http.TimeFormat))
content.Internal = &ObjectInternalInfo{
K: object.DataBlocks,
M: object.ParityBlocks,
}
content.UserMetadata.Set(k, v)
}
}
contents = append(contents, content)
@@ -677,11 +761,13 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
return data
}
type metaCheckFn = func(name string, action policy.Action) (s3Err APIErrorCode)
// generates CopyObjectResponse from etag and lastModified time.
func generateCopyObjectResponse(etag string, lastModified time.Time) CopyObjectResponse {
return CopyObjectResponse{
ETag: "\"" + etag + "\"",
LastModified: lastModified.UTC().Format(iso8601TimeFormat),
LastModified: amztime.ISO8601Format(lastModified.UTC()),
}
}
@@ -689,7 +775,7 @@ func generateCopyObjectResponse(etag string, lastModified time.Time) CopyObjectR
func generateCopyObjectPartResponse(etag string, lastModified time.Time) CopyObjectPartResponse {
return CopyObjectPartResponse{
ETag: "\"" + etag + "\"",
LastModified: lastModified.UTC().Format(iso8601TimeFormat),
LastModified: amztime.ISO8601Format(lastModified.UTC()),
}
}
@@ -704,7 +790,7 @@ func generateInitiateMultipartUploadResponse(bucket, key, uploadID string) Initi
// generates CompleteMultipartUploadResponse for given bucket, key, location and ETag.
func generateCompleteMultpartUploadResponse(bucket, key, location string, oi ObjectInfo) CompleteMultipartUploadResponse {
cs := oi.decryptChecksums()
cs := oi.decryptChecksums(0)
c := CompleteMultipartUploadResponse{
Location: location,
Bucket: bucket,
@@ -749,7 +835,7 @@ func generateListPartsResponse(partsInfo ListPartsInfo, encodingType string) Lis
newPart.PartNumber = part.PartNumber
newPart.ETag = "\"" + part.ETag + "\""
newPart.Size = part.Size
newPart.LastModified = part.LastModified.UTC().Format(iso8601TimeFormat)
newPart.LastModified = amztime.ISO8601Format(part.LastModified.UTC())
newPart.ChecksumCRC32 = part.ChecksumCRC32
newPart.ChecksumCRC32C = part.ChecksumCRC32C
newPart.ChecksumSHA1 = part.ChecksumSHA1
@@ -783,7 +869,7 @@ func generateListMultipartUploadsResponse(bucket string, multipartsInfo ListMult
newUpload := Upload{}
newUpload.UploadID = upload.UploadID
newUpload.Key = s3EncodeName(upload.Object, encodingType)
newUpload.Initiated = upload.Initiated.UTC().Format(iso8601TimeFormat)
newUpload.Initiated = amztime.ISO8601Format(upload.Initiated.UTC())
listMultipartUploadsResponse.Uploads[index] = newUpload
}
return listMultipartUploadsResponse
@@ -879,12 +965,14 @@ func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError
// Generate error response.
errorResponse := getAPIErrorResponse(ctx, err, reqURL.Path,
w.Header().Get(xhttp.AmzRequestID), globalDeploymentID)
w.Header().Get(xhttp.AmzRequestID), w.Header().Get(xhttp.AmzRequestHostID))
encodedErrorResponse := encodeResponse(errorResponse)
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeXML)
}
func writeErrorResponseHeadersOnly(w http.ResponseWriter, err APIError) {
w.Header().Set(xMinIOErrCodeHeader, err.Code)
w.Header().Set(xMinIOErrDescHeader, "\""+err.Description+"\"")
writeResponse(w, err.HTTPStatusCode, nil, mimeNone)
}
@@ -897,7 +985,7 @@ func writeErrorResponseString(ctx context.Context, w http.ResponseWriter, err AP
// useful for admin APIs.
func writeErrorResponseJSON(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL) {
// Generate error response.
errorResponse := getAPIErrorResponse(ctx, err, reqURL.Path, w.Header().Get(xhttp.AmzRequestID), globalDeploymentID)
errorResponse := getAPIErrorResponse(ctx, err, reqURL.Path, w.Header().Get(xhttp.AmzRequestID), w.Header().Get(xhttp.AmzRequestHostID))
encodedErrorResponse := encodeResponseJSON(errorResponse)
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON)
}
@@ -916,7 +1004,7 @@ func writeCustomErrorResponseJSON(ctx context.Context, w http.ResponseWriter, er
BucketName: reqInfo.BucketName,
Key: reqInfo.ObjectName,
RequestID: w.Header().Get(xhttp.AmzRequestID),
HostID: globalDeploymentID,
HostID: globalDeploymentID(),
}
encodedErrorResponse := encodeResponseJSON(errorResponse)
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON)

View File

@@ -22,12 +22,12 @@ import (
"net"
"net/http"
"github.com/gorilla/mux"
"github.com/klauspost/compress/gzhttp"
"github.com/minio/console/restapi"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/wildcard"
"github.com/minio/mux"
"github.com/minio/pkg/v2/wildcard"
"github.com/rs/cors"
)
@@ -61,18 +61,6 @@ func newObjectLayerFn() ObjectLayer {
return globalObjectAPI
}
func newCachedObjectLayerFn() CacheObjectLayer {
globalObjLayerMutex.RLock()
defer globalObjLayerMutex.RUnlock()
return globalCacheObjectAPI
}
func setCacheObjectLayer(c CacheObjectLayer) {
globalObjLayerMutex.Lock()
globalCacheObjectAPI = c
globalObjLayerMutex.Unlock()
}
func setObjectLayer(o ObjectLayer) {
globalObjLayerMutex.Lock()
globalObjectAPI = o
@@ -82,7 +70,6 @@ func setObjectLayer(o ObjectLayer) {
// objectAPIHandler implements and provides http handlers for S3 API.
type objectAPIHandlers struct {
ObjectAPI func() ObjectLayer
CacheAPI func() CacheObjectLayer
}
// getHost tries its best to return the request host.
@@ -189,7 +176,6 @@ func registerAPIRouter(router *mux.Router) {
// Initialize API.
api := objectAPIHandlers{
ObjectAPI: newObjectLayerFn,
CacheAPI: newCachedObjectLayerFn,
}
// API Router
@@ -245,10 +231,10 @@ func registerAPIRouter(router *mux.Router) {
router.Methods(http.MethodPut).Path("/{object:.+}").
HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").
HandlerFunc(collectAPIStats("copyobjectpart", maxClients(gz(httpTraceAll(api.CopyObjectPartHandler))))).
Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
Queries("partNumber", "{partNumber:.*}", "uploadId", "{uploadId:.*}")
// PutObjectPart
router.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
collectAPIStats("putobjectpart", maxClients(gz(httpTraceHdrs(api.PutObjectPartHandler))))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
collectAPIStats("putobjectpart", maxClients(gz(httpTraceHdrs(api.PutObjectPartHandler))))).Queries("partNumber", "{partNumber:.*}", "uploadId", "{uploadId:.*}")
// ListObjectParts
router.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
collectAPIStats("listobjectparts", maxClients(gz(httpTraceAll(api.ListObjectPartsHandler))))).Queries("uploadId", "{uploadId:.*}")
@@ -285,7 +271,10 @@ func registerAPIRouter(router *mux.Router) {
// GetObjectLegalHold
router.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
collectAPIStats("getobjectlegalhold", maxClients(gz(httpTraceAll(api.GetObjectLegalHoldHandler))))).Queries("legal-hold", "")
// GetObject - note gzip compression is *not* added due to Range requests.
// GetObject with lambda ARNs
router.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
collectAPIStats("getobject", maxClients(gz(httpTraceHdrs(api.GetObjectLambdaHandler))))).Queries("lambdaArn", "{lambdaArn:.*}")
// GetObject
router.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
collectAPIStats("getobject", maxClients(gz(httpTraceHdrs(api.GetObjectHandler)))))
// CopyObject
@@ -389,6 +378,9 @@ func registerAPIRouter(router *mux.Router) {
router.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("listobjectsv2", maxClients(gz(httpTraceAll(api.ListObjectsV2Handler))))).Queries("list-type", "2")
// ListObjectVersions
router.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("listobjectversions", maxClients(gz(httpTraceAll(api.ListObjectVersionsMHandler))))).Queries("versions", "", "metadata", "true")
// ListObjectVersions
router.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("listobjectversions", maxClients(gz(httpTraceAll(api.ListObjectVersionsHandler))))).Queries("versions", "")
// GetBucketPolicyStatus
@@ -431,8 +423,9 @@ func registerAPIRouter(router *mux.Router) {
router.Methods(http.MethodHead).HandlerFunc(
collectAPIStats("headbucket", maxClients(gz(httpTraceAll(api.HeadBucketHandler)))))
// PostPolicy
router.Methods(http.MethodPost).HeadersRegexp(xhttp.ContentType, "multipart/form-data*").HandlerFunc(
collectAPIStats("postpolicybucket", maxClients(gz(httpTraceHdrs(api.PostPolicyBucketHandler)))))
router.Methods(http.MethodPost).MatcherFunc(func(r *http.Request, _ *mux.RouteMatch) bool {
return isRequestPostPolicySignatureV4(r)
}).HandlerFunc(collectAPIStats("postpolicybucket", maxClients(gz(httpTraceHdrs(api.PostPolicyBucketHandler)))))
// DeleteMultipleObjects
router.Methods(http.MethodPost).HandlerFunc(
collectAPIStats("deletemultipleobjects", maxClients(gz(httpTraceAll(api.DeleteMultipleObjectsHandler))))).Queries("delete", "")
@@ -454,10 +447,16 @@ func registerAPIRouter(router *mux.Router) {
// MinIO extension API for replication.
//
// GetBucketReplicationMetrics
router.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketreplicationmetrics", maxClients(gz(httpTraceAll(api.GetBucketReplicationMetricsV2Handler))))).Queries("replication-metrics", "2")
// deprecated handler
router.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketreplicationmetrics", maxClients(gz(httpTraceAll(api.GetBucketReplicationMetricsHandler))))).Queries("replication-metrics", "")
// ValidateBucketReplicationCreds
router.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("checkbucketreplicationconfiguration", maxClients(gz(httpTraceAll(api.ValidateBucketReplicationCredsHandler))))).Queries("replication-check", "")
// Register rejected bucket APIs
for _, r := range rejectedBucketAPIs {
router.Methods(r.methods...).
@@ -513,8 +512,7 @@ func corsHandler(handler http.Handler) http.Handler {
"x-amz*",
"*",
}
return cors.New(cors.Options{
opts := cors.Options{
AllowOriginFunc: func(origin string) bool {
for _, allowedOrigin := range globalAPIConfig.getCorsAllowOrigins() {
if wildcard.MatchSimple(allowedOrigin, origin) {
@@ -535,5 +533,6 @@ func corsHandler(handler http.Handler) http.Handler {
AllowedHeaders: commonS3Headers,
ExposedHeaders: commonS3Headers,
AllowCredentials: true,
}).Handler(handler)
}
return cors.New(opts).Handler(handler)
}

View File

@@ -94,14 +94,8 @@ func s3URLEncode(s string) string {
}
// s3EncodeName encodes string in response when encodingType is specified in AWS S3 requests.
func s3EncodeName(name string, encodingType string) (result string) {
// Quick path to exit
if encodingType == "" {
return name
}
encodingType = strings.ToLower(encodingType)
switch encodingType {
case "url":
func s3EncodeName(name, encodingType string) string {
if strings.ToLower(encodingType) == "url" {
return s3URLEncode(name)
}
return name

File diff suppressed because one or more lines are too long

View File

@@ -25,7 +25,7 @@ import (
"encoding/hex"
"errors"
"io"
"io/ioutil"
"mime"
"net/http"
"net/url"
"strconv"
@@ -40,8 +40,8 @@ import (
xhttp "github.com/minio/minio/internal/http"
xjwt "github.com/minio/minio/internal/jwt"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/bucket/policy"
iampolicy "github.com/minio/pkg/iam/policy"
"github.com/minio/minio/internal/mcontext"
"github.com/minio/pkg/v2/policy"
)
// Verify if request has JWT.
@@ -74,8 +74,11 @@ func isRequestPresignedSignatureV2(r *http.Request) bool {
// Verify if request has AWS Post policy Signature Version '4'.
func isRequestPostPolicySignatureV4(r *http.Request) bool {
return strings.Contains(r.Header.Get(xhttp.ContentType), "multipart/form-data") &&
r.Method == http.MethodPost
mediaType, _, err := mime.ParseMediaType(r.Header.Get(xhttp.ContentType))
if err != nil {
return false
}
return mediaType == "multipart/form-data" && r.Method == http.MethodPost
}
// Verify if the request has AWS Streaming Signature Version '4'. This is only valid for 'PUT' operation.
@@ -84,7 +87,21 @@ func isRequestSignStreamingV4(r *http.Request) bool {
r.Method == http.MethodPut
}
// Verify if the request has AWS Streaming Signature Version '4'. This is only valid for 'PUT' operation.
func isRequestSignStreamingTrailerV4(r *http.Request) bool {
return r.Header.Get(xhttp.AmzContentSha256) == streamingContentSHA256Trailer &&
r.Method == http.MethodPut
}
// Verify if the request has AWS Streaming Signature Version '4', with unsigned content and trailer.
func isRequestUnsignedTrailerV4(r *http.Request) bool {
return r.Header.Get(xhttp.AmzContentSha256) == unsignedPayloadTrailer &&
r.Method == http.MethodPut && strings.Contains(r.Header.Get(xhttp.ContentEncoding), streamingContentEncoding)
}
// Authorization type.
//
//go:generate stringer -type=authType -trimprefix=authType $GOFILE
type authType int
// List of all supported auth types.
@@ -99,10 +116,12 @@ const (
authTypeSignedV2
authTypeJWT
authTypeSTS
authTypeStreamingSignedTrailer
authTypeStreamingUnsignedTrailer
)
// Get request authentication type.
func getRequestAuthType(r *http.Request) authType {
func getRequestAuthType(r *http.Request) (at authType) {
if r.URL != nil {
var err error
r.Form, err = url.ParseQuery(r.URL.RawQuery)
@@ -117,6 +136,10 @@ func getRequestAuthType(r *http.Request) authType {
return authTypePresignedV2
} else if isRequestSignStreamingV4(r) {
return authTypeStreamingSigned
} else if isRequestSignStreamingTrailerV4(r) {
return authTypeStreamingSignedTrailer
} else if isRequestUnsignedTrailerV4(r) {
return authTypeStreamingUnsignedTrailer
} else if isRequestSignatureV4(r) {
return authTypeSigned
} else if isRequestPresignedSignatureV4(r) {
@@ -133,7 +156,7 @@ func getRequestAuthType(r *http.Request) authType {
return authTypeUnknown
}
func validateAdminSignature(ctx context.Context, r *http.Request, region string) (auth.Credentials, map[string]interface{}, bool, APIErrorCode) {
func validateAdminSignature(ctx context.Context, r *http.Request, region string) (auth.Credentials, bool, APIErrorCode) {
var cred auth.Credentials
var owner bool
s3Err := ErrAccessDenied
@@ -142,37 +165,38 @@ func validateAdminSignature(ctx context.Context, r *http.Request, region string)
// We only support admin credentials to access admin APIs.
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
if s3Err != ErrNone {
return cred, nil, owner, s3Err
return cred, owner, s3Err
}
// we only support V4 (no presign) with auth body
s3Err = isReqAuthenticated(ctx, r, region, serviceS3)
}
if s3Err != ErrNone {
reqInfo := (&logger.ReqInfo{}).AppendTags("requestHeaders", dumpRequest(r))
ctx := logger.SetReqInfo(ctx, reqInfo)
logger.LogIf(ctx, errors.New(getAPIError(s3Err).Description), logger.Application)
return cred, nil, owner, s3Err
return cred, owner, s3Err
}
return cred, cred.Claims, owner, ErrNone
logger.GetReqInfo(ctx).Cred = cred
logger.GetReqInfo(ctx).Owner = owner
logger.GetReqInfo(ctx).Region = globalSite.Region
return cred, owner, ErrNone
}
// checkAdminRequestAuth checks for authentication and authorization for the incoming
// request. It only accepts V2 and V4 requests. Presigned, JWT and anonymous requests
// are automatically rejected.
func checkAdminRequestAuth(ctx context.Context, r *http.Request, action iampolicy.AdminAction, region string) (auth.Credentials, APIErrorCode) {
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, region)
func checkAdminRequestAuth(ctx context.Context, r *http.Request, action policy.AdminAction, region string) (auth.Credentials, APIErrorCode) {
cred, owner, s3Err := validateAdminSignature(ctx, r, region)
if s3Err != ErrNone {
return cred, s3Err
}
if globalIAMSys.IsAllowed(iampolicy.Args{
if globalIAMSys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: iampolicy.Action(action),
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
Action: policy.Action(action),
ConditionValues: getConditionValues(r, "", cred),
IsOwner: owner,
Claims: claims,
Claims: cred.Claims,
}) {
// Request is allowed return the appropriate access key.
return cred, ErrNone
@@ -204,7 +228,7 @@ func getClaimsFromTokenWithSecret(token, secret string) (map[string]interface{},
// that clients cannot decode the token using the temp
// secret keys and generate an entirely new claim by essentially
// hijacking the policies. We need to make sure that this is
// based an admin credential such that token cannot be decoded
// based on admin credential such that token cannot be decoded
// on the client side and is treated like an opaque value.
claims, err := auth.ExtractClaims(token, secret)
if err != nil {
@@ -223,7 +247,7 @@ func getClaimsFromTokenWithSecret(token, secret string) (map[string]interface{},
}
// Check if a session policy is set. If so, decode it here.
sp, spok := claims.Lookup(iampolicy.SessionPolicyName)
sp, spok := claims.Lookup(policy.SessionPolicyName)
if spok {
// Looks like subpolicy is set and is a string, if set then its
// base64 encoded, decode it. Decoding fails reject such
@@ -254,7 +278,7 @@ func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]in
return nil, ErrNoAccessKey
}
if token == "" && cred.IsTemp() {
if token == "" && cred.IsTemp() && !cred.IsServiceAccount() {
// Temporary credentials should always have x-amz-security-token
return nil, ErrInvalidToken
}
@@ -264,7 +288,7 @@ func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]in
return nil, ErrInvalidToken
}
if cred.IsTemp() && subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 {
if !cred.IsServiceAccount() && cred.IsTemp() && subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 {
// validate token for temporary credentials only.
return nil, ErrInvalidToken
}
@@ -294,24 +318,38 @@ func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]in
//
// returns APIErrorCode if any to be replied to the client.
func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (s3Err APIErrorCode) {
_, _, s3Err = checkRequestAuthTypeCredential(ctx, r, action, bucketName, objectName)
logger.GetReqInfo(ctx).BucketName = bucketName
logger.GetReqInfo(ctx).ObjectName = objectName
_, _, s3Err = checkRequestAuthTypeCredential(ctx, r, action)
return s3Err
}
// Check request auth type verifies the incoming http request
// - validates the request signature
// - validates the policy action if anonymous tests bucket policies if any,
// for authenticated requests validates IAM policies.
//
// returns APIErrorCode if any to be replied to the client.
// Additionally returns the accessKey used in the request, and if this request is by an admin.
func checkRequestAuthTypeCredential(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (cred auth.Credentials, owner bool, s3Err APIErrorCode) {
// checkRequestAuthTypeWithVID is similar to checkRequestAuthType
// passes versionID additionally.
func checkRequestAuthTypeWithVID(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName, versionID string) (s3Err APIErrorCode) {
logger.GetReqInfo(ctx).BucketName = bucketName
logger.GetReqInfo(ctx).ObjectName = objectName
logger.GetReqInfo(ctx).VersionID = versionID
_, _, s3Err = checkRequestAuthTypeCredential(ctx, r, action)
return s3Err
}
func authenticateRequest(ctx context.Context, r *http.Request, action policy.Action) (s3Err APIErrorCode) {
if logger.GetReqInfo(ctx) == nil {
logger.LogIf(ctx, errors.New("unexpected context.Context does not have a logger.ReqInfo"), logger.Minio)
return ErrAccessDenied
}
var cred auth.Credentials
var owner bool
switch getRequestAuthType(r) {
case authTypeUnknown, authTypeStreamingSigned:
return cred, owner, ErrSignatureVersionNotSupported
return ErrSignatureVersionNotSupported
case authTypePresignedV2, authTypeSignedV2:
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
return cred, owner, s3Err
return s3Err
}
cred, owner, s3Err = getReqAccessKeyV2(r)
case authTypeSigned, authTypePresigned:
@@ -321,106 +359,162 @@ func checkRequestAuthTypeCredential(ctx context.Context, r *http.Request, action
region = ""
}
if s3Err = isReqAuthenticated(ctx, r, region, serviceS3); s3Err != ErrNone {
return cred, owner, s3Err
return s3Err
}
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
}
if s3Err != ErrNone {
return cred, owner, s3Err
return s3Err
}
// LocationConstraint is valid only for CreateBucketAction.
var locationConstraint string
logger.GetReqInfo(ctx).Cred = cred
logger.GetReqInfo(ctx).Owner = owner
logger.GetReqInfo(ctx).Region = globalSite.Region
// region is valid only for CreateBucketAction.
var region string
if action == policy.CreateBucketAction {
// To extract region from XML in request body, get copy of request body.
payload, err := ioutil.ReadAll(io.LimitReader(r.Body, maxLocationConstraintSize))
payload, err := io.ReadAll(io.LimitReader(r.Body, maxLocationConstraintSize))
if err != nil {
logger.LogIf(ctx, err, logger.Application)
return cred, owner, ErrMalformedXML
return ErrMalformedXML
}
// Populate payload to extract location constraint.
r.Body = ioutil.NopCloser(bytes.NewReader(payload))
var s3Error APIErrorCode
locationConstraint, s3Error = parseLocationConstraint(r)
if s3Error != ErrNone {
return cred, owner, s3Error
r.Body = io.NopCloser(bytes.NewReader(payload))
region, s3Err = parseLocationConstraint(r)
if s3Err != ErrNone {
return s3Err
}
// Populate payload again to handle it in HTTP handler.
r.Body = ioutil.NopCloser(bytes.NewReader(payload))
}
if cred.AccessKey != "" {
logger.GetReqInfo(ctx).AccessKey = cred.AccessKey
r.Body = io.NopCloser(bytes.NewReader(payload))
}
logger.GetReqInfo(ctx).Region = region
return s3Err
}
func authorizeRequest(ctx context.Context, r *http.Request, action policy.Action) (s3Err APIErrorCode) {
reqInfo := logger.GetReqInfo(ctx)
if reqInfo == nil {
return ErrAccessDenied
}
cred := reqInfo.Cred
owner := reqInfo.Owner
region := reqInfo.Region
bucket := reqInfo.BucketName
object := reqInfo.ObjectName
versionID := reqInfo.VersionID
if action != policy.ListAllMyBucketsAction && cred.AccessKey == "" {
// Anonymous checks are not meant for ListBuckets action
if globalPolicySys.IsAllowed(policy.Args{
// Anonymous checks are not meant for ListAllBuckets action
if globalPolicySys.IsAllowed(policy.BucketPolicyArgs{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: action,
BucketName: bucketName,
ConditionValues: getConditionValues(r, locationConstraint, "", nil),
BucketName: bucket,
ConditionValues: getConditionValues(r, region, auth.AnonymousCredentials),
IsOwner: false,
ObjectName: objectName,
ObjectName: object,
}) {
// Request is allowed return the appropriate access key.
return cred, owner, ErrNone
return ErrNone
}
if action == policy.ListBucketVersionsAction {
// In AWS S3 s3:ListBucket permission is same as s3:ListBucketVersions permission
// verify as a fallback.
if globalPolicySys.IsAllowed(policy.Args{
if globalPolicySys.IsAllowed(policy.BucketPolicyArgs{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: policy.ListBucketAction,
BucketName: bucketName,
ConditionValues: getConditionValues(r, locationConstraint, "", nil),
BucketName: bucket,
ConditionValues: getConditionValues(r, region, auth.AnonymousCredentials),
IsOwner: false,
ObjectName: objectName,
ObjectName: object,
}) {
// Request is allowed return the appropriate access key.
return cred, owner, ErrNone
return ErrNone
}
}
return cred, owner, ErrAccessDenied
return ErrAccessDenied
}
if globalIAMSys.IsAllowed(iampolicy.Args{
if action == policy.DeleteObjectAction && versionID != "" {
if !globalIAMSys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: policy.Action(policy.DeleteObjectVersionAction),
BucketName: bucket,
ConditionValues: getConditionValues(r, "", cred),
ObjectName: object,
IsOwner: owner,
Claims: cred.Claims,
DenyOnly: true,
}) { // Request is not allowed if Deny action on DeleteObjectVersionAction
return ErrAccessDenied
}
}
if globalIAMSys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: iampolicy.Action(action),
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", cred.AccessKey, cred.Claims),
ObjectName: objectName,
Action: action,
BucketName: bucket,
ConditionValues: getConditionValues(r, "", cred),
ObjectName: object,
IsOwner: owner,
Claims: cred.Claims,
}) {
// Request is allowed return the appropriate access key.
return cred, owner, ErrNone
return ErrNone
}
if action == policy.ListBucketVersionsAction {
// In AWS S3 s3:ListBucket permission is same as s3:ListBucketVersions permission
// verify as a fallback.
if globalIAMSys.IsAllowed(iampolicy.Args{
if globalIAMSys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: iampolicy.ListBucketAction,
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", cred.AccessKey, cred.Claims),
ObjectName: objectName,
Action: policy.ListBucketAction,
BucketName: bucket,
ConditionValues: getConditionValues(r, "", cred),
ObjectName: object,
IsOwner: owner,
Claims: cred.Claims,
}) {
// Request is allowed return the appropriate access key.
return cred, owner, ErrNone
return ErrNone
}
}
return cred, owner, ErrAccessDenied
return ErrAccessDenied
}
// Check request auth type verifies the incoming http request
// - validates the request signature
// - validates the policy action if anonymous tests bucket policies if any,
// for authenticated requests validates IAM policies.
//
// returns APIErrorCode if any to be replied to the client.
// Additionally returns the accessKey used in the request, and if this request is by an admin.
func checkRequestAuthTypeCredential(ctx context.Context, r *http.Request, action policy.Action) (cred auth.Credentials, owner bool, s3Err APIErrorCode) {
s3Err = authenticateRequest(ctx, r, action)
reqInfo := logger.GetReqInfo(ctx)
if reqInfo == nil {
return cred, owner, ErrAccessDenied
}
cred = reqInfo.Cred
owner = reqInfo.Owner
if s3Err != ErrNone {
return cred, owner, s3Err
}
return cred, owner, authorizeRequest(ctx, r, action)
}
// Verify if request has valid AWS Signature Version '2'.
@@ -473,7 +567,7 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
// Verify 'Content-Md5' and/or 'X-Amz-Content-Sha256' if present.
// The verification happens implicit during reading.
reader, err := hash.NewReader(r.Body, -1, clientETag.String(), hex.EncodeToString(contentSHA256), -1)
reader, err := hash.NewReader(ctx, r.Body, -1, clientETag.String(), hex.EncodeToString(contentSHA256), -1)
if err != nil {
return toAPIErrorCode(ctx, err)
}
@@ -483,13 +577,15 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
// List of all support S3 auth types.
var supportedS3AuthTypes = map[authType]struct{}{
authTypeAnonymous: {},
authTypePresigned: {},
authTypePresignedV2: {},
authTypeSigned: {},
authTypeSignedV2: {},
authTypePostPolicy: {},
authTypeStreamingSigned: {},
authTypeAnonymous: {},
authTypePresigned: {},
authTypePresignedV2: {},
authTypeSigned: {},
authTypeSignedV2: {},
authTypePostPolicy: {},
authTypeStreamingSigned: {},
authTypeStreamingSignedTrailer: {},
authTypeStreamingUnsignedTrailer: {},
}
// Validate if the authType is valid and supported.
@@ -498,25 +594,27 @@ func isSupportedS3AuthType(aType authType) bool {
return ok
}
// setAuthHandler to validate authorization header for the incoming request.
func setAuthHandler(h http.Handler) http.Handler {
// setAuthMiddleware to validate authorization header for the incoming request.
func setAuthMiddleware(h http.Handler) http.Handler {
// handler for validating incoming authorization headers.
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
tc, ok := r.Context().Value(contextTraceReqKey).(*traceCtxt)
tc, ok := r.Context().Value(mcontext.ContextTraceKey).(*mcontext.TraceCtxt)
aType := getRequestAuthType(r)
if aType == authTypeSigned || aType == authTypeSignedV2 || aType == authTypeStreamingSigned {
switch aType {
case authTypeSigned, authTypeSignedV2, authTypeStreamingSigned, authTypeStreamingSignedTrailer:
// Verify if date headers are set, if not reject the request
amzDate, errCode := parseAmzDateHeader(r)
if errCode != ErrNone {
if ok {
tc.funcName = "handler.Auth"
tc.responseRecorder.LogErrBody = true
tc.FuncName = "handler.Auth"
tc.ResponseRecorder.LogErrBody = true
}
// All our internal APIs are sensitive towards Date
// header, for all requests where Date header is not
// present we will reject such clients.
defer logger.AuditLog(r.Context(), w, r, mustGetClaimsFromToken(r))
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(errCode), r.URL)
atomic.AddUint64(&globalHTTPStats.rejectedRequestsTime, 1)
return
@@ -526,25 +624,33 @@ func setAuthHandler(h http.Handler) http.Handler {
curTime := UTCNow()
if curTime.Sub(amzDate) > globalMaxSkewTime || amzDate.Sub(curTime) > globalMaxSkewTime {
if ok {
tc.funcName = "handler.Auth"
tc.responseRecorder.LogErrBody = true
tc.FuncName = "handler.Auth"
tc.ResponseRecorder.LogErrBody = true
}
defer logger.AuditLog(r.Context(), w, r, mustGetClaimsFromToken(r))
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrRequestTimeTooSkewed), r.URL)
atomic.AddUint64(&globalHTTPStats.rejectedRequestsTime, 1)
return
}
}
if isSupportedS3AuthType(aType) || aType == authTypeJWT || aType == authTypeSTS {
h.ServeHTTP(w, r)
return
case authTypeJWT, authTypeSTS:
h.ServeHTTP(w, r)
return
default:
if isSupportedS3AuthType(aType) {
h.ServeHTTP(w, r)
return
}
}
if ok {
tc.funcName = "handler.Auth"
tc.responseRecorder.LogErrBody = true
tc.FuncName = "handler.Auth"
tc.ResponseRecorder.LogErrBody = true
}
defer logger.AuditLog(r.Context(), w, r, mustGetClaimsFromToken(r))
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrSignatureVersionNotSupported), r.URL)
atomic.AddUint64(&globalHTTPStats.rejectedRequestsAuth, 1)
})
@@ -582,17 +688,17 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
return ErrAccessDenied
}
conditions := getConditionValues(r, "", cred.AccessKey, cred.Claims)
conditions := getConditionValues(r, "", cred)
conditions["object-lock-mode"] = []string{string(retMode)}
conditions["object-lock-retain-until-date"] = []string{retDate.Format(time.RFC3339)}
conditions["object-lock-retain-until-date"] = []string{retDate.UTC().Format(time.RFC3339)}
if retDays > 0 {
conditions["object-lock-remaining-retention-days"] = []string{strconv.Itoa(retDays)}
}
if retMode == objectlock.RetGovernance && byPassSet {
byPassSet = globalIAMSys.IsAllowed(iampolicy.Args{
byPassSet = globalIAMSys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: iampolicy.BypassGovernanceRetentionAction,
Action: policy.BypassGovernanceRetentionAction,
BucketName: bucketName,
ObjectName: objectName,
ConditionValues: conditions,
@@ -600,10 +706,10 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
Claims: cred.Claims,
})
}
if globalIAMSys.IsAllowed(iampolicy.Args{
if globalIAMSys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: iampolicy.PutObjectRetentionAction,
Action: policy.PutObjectRetentionAction,
BucketName: bucketName,
ConditionValues: conditions,
ObjectName: objectName,
@@ -621,42 +727,42 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
// isPutActionAllowed - check if PUT operation is allowed on the resource, this
// call verifies bucket policies and IAM policies, supports multi user
// checks etc.
func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectName string, r *http.Request, action iampolicy.Action) (s3Err APIErrorCode) {
func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectName string, r *http.Request, action policy.Action) (s3Err APIErrorCode) {
var cred auth.Credentials
var owner bool
region := globalSite.Region
switch atype {
case authTypeUnknown:
return ErrSignatureVersionNotSupported
case authTypeSignedV2, authTypePresignedV2:
cred, owner, s3Err = getReqAccessKeyV2(r)
case authTypeStreamingSigned, authTypePresigned, authTypeSigned:
region := globalSite.Region
case authTypeStreamingSigned, authTypePresigned, authTypeSigned, authTypeStreamingSignedTrailer, authTypeStreamingUnsignedTrailer:
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
}
if s3Err != ErrNone {
return s3Err
}
if cred.AccessKey != "" {
logger.GetReqInfo(ctx).AccessKey = cred.AccessKey
}
logger.GetReqInfo(ctx).Cred = cred
logger.GetReqInfo(ctx).Owner = owner
logger.GetReqInfo(ctx).Region = region
// Do not check for PutObjectRetentionAction permission,
// if mode and retain until date are not set.
// Can happen when bucket has default lock config set
if action == iampolicy.PutObjectRetentionAction &&
if action == policy.PutObjectRetentionAction &&
r.Header.Get(xhttp.AmzObjectLockMode) == "" &&
r.Header.Get(xhttp.AmzObjectLockRetainUntilDate) == "" {
return ErrNone
}
if cred.AccessKey == "" {
if globalPolicySys.IsAllowed(policy.Args{
if globalPolicySys.IsAllowed(policy.BucketPolicyArgs{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: policy.Action(action),
Action: action,
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", "", nil),
ConditionValues: getConditionValues(r, "", auth.AnonymousCredentials),
IsOwner: false,
ObjectName: objectName,
}) {
@@ -665,12 +771,12 @@ func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectN
return ErrAccessDenied
}
if globalIAMSys.IsAllowed(iampolicy.Args{
if globalIAMSys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: action,
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", cred.AccessKey, cred.Claims),
ConditionValues: getConditionValues(r, "", cred),
ObjectName: objectName,
IsOwner: owner,
Claims: cred.Claims,

View File

@@ -21,7 +21,6 @@ import (
"bytes"
"context"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
@@ -29,7 +28,7 @@ import (
"time"
"github.com/minio/minio/internal/auth"
iampolicy "github.com/minio/pkg/iam/policy"
"github.com/minio/pkg/v2/policy"
)
type nullReader struct{}
@@ -44,7 +43,7 @@ func TestGetRequestAuthType(t *testing.T) {
req *http.Request
authT authType
}
nopCloser := ioutil.NopCloser(io.LimitReader(&nullReader{}, 1024))
nopCloser := io.NopCloser(io.LimitReader(&nullReader{}, 1024))
testCases := []testCase{
// Test case - 1
// Check for generic signature v4 header.
@@ -360,7 +359,10 @@ func mustNewSignedBadMD5Request(method string, urlStr string, contentLength int6
// Tests is requested authenticated function, tests replies for s3 errors.
func TestIsReqAuthenticated(t *testing.T) {
objLayer, fsDir, err := prepareFS()
ctx, cancel := context.WithCancel(GlobalContext)
defer cancel()
objLayer, fsDir, err := prepareFS(ctx)
if err != nil {
t.Fatal(err)
}
@@ -369,15 +371,10 @@ func TestIsReqAuthenticated(t *testing.T) {
t.Fatalf("unable initialize config file, %s", err)
}
initAllSubsystems()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
initAllSubsystems(ctx)
initConfigSubsystem(ctx, objLayer)
globalIAMSys.Init(ctx, objLayer, globalEtcdClient, 2*time.Second)
creds, err := auth.CreateCredentials("myuser", "mypassword")
if err != nil {
t.Fatalf("unable create credential, %s", err)
@@ -385,6 +382,8 @@ func TestIsReqAuthenticated(t *testing.T) {
globalActiveCred = creds
globalIAMSys.Init(ctx, objLayer, globalEtcdClient, 2*time.Second)
// List of test cases for validating http request authentication.
testCases := []struct {
req *http.Request
@@ -406,7 +405,7 @@ func TestIsReqAuthenticated(t *testing.T) {
for i, testCase := range testCases {
s3Error := isReqAuthenticated(ctx, testCase.req, globalSite.Region, serviceS3)
if s3Error != testCase.s3Error {
if _, err := ioutil.ReadAll(testCase.req.Body); toAPIErrorCode(ctx, err) != testCase.s3Error {
if _, err := io.ReadAll(testCase.req.Body); toAPIErrorCode(ctx, err) != testCase.s3Error {
t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d (got after reading request %s)", i, testCase.s3Error, s3Error, toAPIError(ctx, err).Code)
}
}
@@ -414,7 +413,10 @@ func TestIsReqAuthenticated(t *testing.T) {
}
func TestCheckAdminRequestAuthType(t *testing.T) {
objLayer, fsDir, err := prepareFS()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
objLayer, fsDir, err := prepareFS(ctx)
if err != nil {
t.Fatal(err)
}
@@ -440,9 +442,8 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
{Request: mustNewPresignedV2Request(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
{Request: mustNewPresignedRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
}
ctx := context.Background()
for i, testCase := range testCases {
if _, s3Error := checkAdminRequestAuth(ctx, testCase.Request, iampolicy.AllAdminActions, globalSite.Region); s3Error != testCase.ErrCode {
if _, s3Error := checkAdminRequestAuth(ctx, testCase.Request, policy.AllAdminActions, globalSite.Region); s3Error != testCase.ErrCode {
t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i, testCase.ErrCode, s3Error)
}
}
@@ -452,7 +453,7 @@ func TestValidateAdminSignature(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
objLayer, fsDir, err := prepareFS()
objLayer, fsDir, err := prepareFS(ctx)
if err != nil {
t.Fatal(err)
}
@@ -462,18 +463,18 @@ func TestValidateAdminSignature(t *testing.T) {
t.Fatalf("unable initialize config file, %s", err)
}
initAllSubsystems()
initAllSubsystems(ctx)
initConfigSubsystem(ctx, objLayer)
globalIAMSys.Init(ctx, objLayer, globalEtcdClient, 2*time.Second)
creds, err := auth.CreateCredentials("admin", "mypassword")
if err != nil {
t.Fatalf("unable create credential, %s", err)
}
globalActiveCred = creds
globalIAMSys.Init(ctx, objLayer, globalEtcdClient, 2*time.Second)
testCases := []struct {
AccessKey string
SecretKey string
@@ -492,7 +493,7 @@ func TestValidateAdminSignature(t *testing.T) {
if err := signRequestV4(req, testCase.AccessKey, testCase.SecretKey); err != nil {
t.Fatalf("Unable to inititalized new signed http request %s", err)
}
_, _, _, s3Error := validateAdminSignature(ctx, req, globalMinioDefaultRegion)
_, _, s3Error := validateAdminSignature(ctx, req, globalMinioDefaultRegion)
if s3Error != testCase.ErrCode {
t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i+1, testCase.ErrCode, s3Error)
}

34
cmd/authtype_string.go Normal file
View File

@@ -0,0 +1,34 @@
// Code generated by "stringer -type=authType -trimprefix=authType auth-handler.go"; DO NOT EDIT.
package cmd
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[authTypeUnknown-0]
_ = x[authTypeAnonymous-1]
_ = x[authTypePresigned-2]
_ = x[authTypePresignedV2-3]
_ = x[authTypePostPolicy-4]
_ = x[authTypeStreamingSigned-5]
_ = x[authTypeSigned-6]
_ = x[authTypeSignedV2-7]
_ = x[authTypeJWT-8]
_ = x[authTypeSTS-9]
_ = x[authTypeStreamingSignedTrailer-10]
_ = x[authTypeStreamingUnsignedTrailer-11]
}
const _authType_name = "UnknownAnonymousPresignedPresignedV2PostPolicyStreamingSignedSignedSignedV2JWTSTSStreamingSignedTrailerStreamingUnsignedTrailer"
var _authType_index = [...]uint8{0, 7, 16, 25, 36, 46, 61, 67, 75, 78, 81, 103, 127}
func (i authType) String() string {
if i < 0 || i >= authType(len(_authType_index)-1) {
return "authType(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _authType_name[_authType_index[i]:_authType_index[i+1]]
}

View File

@@ -19,10 +19,14 @@ package cmd
import (
"context"
"fmt"
"runtime"
"strconv"
"time"
"github.com/minio/madmin-go"
"github.com/minio/minio/internal/pubsub"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/env"
)
// healTask represents what to heal along with options
@@ -54,16 +58,46 @@ type healRoutine struct {
func activeListeners() int {
// Bucket notification and http trace are not costly, it is okay to ignore them
// while counting the number of concurrent connections
return int(globalHTTPListen.NumSubscribers(pubsub.MaskAll)) + int(globalTrace.NumSubscribers(pubsub.MaskAll))
return int(globalHTTPListen.Subscribers()) + int(globalTrace.Subscribers())
}
func waitForLowIO(maxIO int, maxWait time.Duration, currentIO func() int) {
// No need to wait run at full speed.
if maxIO <= 0 {
return
}
const waitTick = 100 * time.Millisecond
tmpMaxWait := maxWait
for currentIO() >= maxIO {
if tmpMaxWait > 0 {
if tmpMaxWait < waitTick {
time.Sleep(tmpMaxWait)
return
}
time.Sleep(waitTick)
tmpMaxWait -= waitTick
}
if tmpMaxWait <= 0 {
return
}
}
}
func currentHTTPIO() int {
httpServer := newHTTPServerFn()
if httpServer == nil {
return 0
}
return httpServer.GetRequestCount() - activeListeners()
}
func waitForLowHTTPReq() {
var currentIO func() int
if httpServer := newHTTPServerFn(); httpServer != nil {
currentIO = httpServer.GetRequestCount
}
globalHealConfig.Wait(currentIO, activeListeners)
maxIO, maxWait, _ := globalHealConfig.Clone()
waitForLowIO(maxIO, maxWait, currentHTTPIO)
}
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
@@ -89,8 +123,7 @@ func (h *healRoutine) AddWorker(ctx context.Context, objAPI ObjectLayer) {
var err error
switch task.bucket {
case nopHeal:
task.respCh <- healResult{err: errSkipFile}
continue
err = errSkipFile
case SlashSeparator:
res, err = healDiskFormat(ctx, objAPI, task.opts)
default:
@@ -101,7 +134,10 @@ func (h *healRoutine) AddWorker(ctx context.Context, objAPI ObjectLayer) {
}
}
task.respCh <- healResult{result: res, err: err}
if task.respCh != nil {
task.respCh <- healResult{result: res, err: err}
}
case <-ctx.Done():
return
}
@@ -110,9 +146,19 @@ func (h *healRoutine) AddWorker(ctx context.Context, objAPI ObjectLayer) {
func newHealRoutine() *healRoutine {
workers := runtime.GOMAXPROCS(0) / 2
if envHealWorkers := env.Get("_MINIO_HEAL_WORKERS", ""); envHealWorkers != "" {
if numHealers, err := strconv.Atoi(envHealWorkers); err != nil {
logger.LogIf(context.Background(), fmt.Errorf("invalid _MINIO_HEAL_WORKERS value: %w", err))
} else {
workers = numHealers
}
}
if workers == 0 {
workers = 4
}
return &healRoutine{
tasks: make(chan healTask),
workers: workers,

View File

@@ -26,12 +26,15 @@ import (
"os"
"sort"
"strings"
"sync"
"time"
"github.com/dustin/go-humanize"
"github.com/minio/madmin-go"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio/internal/config"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/env"
)
const (
@@ -43,7 +46,8 @@ const (
// healingTracker is used to persist healing information during a heal.
type healingTracker struct {
disk StorageAPI `msg:"-"`
disk StorageAPI `msg:"-"`
mu *sync.RWMutex `msg:"-"`
ID string
PoolIndex int
@@ -79,6 +83,10 @@ type healingTracker struct {
// Filled during heal.
HealedBuckets []string
// ID of the current healing operation
HealID string
// Add future tracking capabilities
// Be sure that they are included in toHealingDisk
}
@@ -108,21 +116,76 @@ func loadHealingTracker(ctx context.Context, disk StorageAPI) (*healingTracker,
}
h.disk = disk
h.ID = diskID
h.mu = &sync.RWMutex{}
return &h, nil
}
// newHealingTracker will create a new healing tracker for the disk.
func newHealingTracker(disk StorageAPI) *healingTracker {
diskID, _ := disk.GetDiskID()
h := healingTracker{
disk: disk,
ID: diskID,
Path: disk.String(),
Endpoint: disk.Endpoint().String(),
Started: time.Now().UTC(),
func newHealingTracker() *healingTracker {
return &healingTracker{
mu: &sync.RWMutex{},
}
}
func initHealingTracker(disk StorageAPI, healID string) *healingTracker {
h := newHealingTracker()
diskID, _ := disk.GetDiskID()
h.disk = disk
h.ID = diskID
h.HealID = healID
h.Path = disk.String()
h.Endpoint = disk.Endpoint().String()
h.Started = time.Now().UTC()
h.PoolIndex, h.SetIndex, h.DiskIndex = disk.GetDiskLoc()
return &h
return h
}
func (h healingTracker) getLastUpdate() time.Time {
h.mu.RLock()
defer h.mu.RUnlock()
return h.LastUpdate
}
func (h healingTracker) getBucket() string {
h.mu.RLock()
defer h.mu.RUnlock()
return h.Bucket
}
func (h *healingTracker) setBucket(bucket string) {
h.mu.Lock()
defer h.mu.Unlock()
h.Bucket = bucket
}
func (h healingTracker) getObject() string {
h.mu.RLock()
defer h.mu.RUnlock()
return h.Object
}
func (h *healingTracker) setObject(object string) {
h.mu.Lock()
defer h.mu.Unlock()
h.Object = object
}
func (h *healingTracker) updateProgress(success bool, bytes uint64) {
h.mu.Lock()
defer h.mu.Unlock()
if success {
h.ItemsHealed++
h.BytesDone += bytes
} else {
h.ItemsFailed++
h.BytesFailed += bytes
}
}
// update will update the tracker on the disk.
@@ -131,15 +194,18 @@ func (h *healingTracker) update(ctx context.Context) error {
if h.disk.Healing() == nil {
return fmt.Errorf("healingTracker: drive %q is not marked as healing", h.ID)
}
h.mu.Lock()
if h.ID == "" || h.PoolIndex < 0 || h.SetIndex < 0 || h.DiskIndex < 0 {
h.ID, _ = h.disk.GetDiskID()
h.PoolIndex, h.SetIndex, h.DiskIndex = h.disk.GetDiskLoc()
}
h.mu.Unlock()
return h.save(ctx)
}
// save will unconditionally save the tracker and will be created if not existing.
func (h *healingTracker) save(ctx context.Context) error {
h.mu.Lock()
if h.PoolIndex < 0 || h.SetIndex < 0 || h.DiskIndex < 0 {
// Attempt to get location.
if api := newObjectLayerFn(); api != nil {
@@ -150,6 +216,7 @@ func (h *healingTracker) save(ctx context.Context) error {
}
h.LastUpdate = time.Now().UTC()
htrackerBytes, err := h.MarshalMsg(nil)
h.mu.Unlock()
if err != nil {
return err
}
@@ -165,12 +232,14 @@ func (h *healingTracker) delete(ctx context.Context) error {
pathJoin(bucketMetaPrefix, healingTrackerFilename),
DeleteOptions{
Recursive: false,
Force: false,
Immediate: false,
},
)
}
func (h *healingTracker) isHealed(bucket string) bool {
h.mu.RLock()
defer h.mu.RUnlock()
for _, v := range h.HealedBuckets {
if v == bucket {
return true
@@ -181,6 +250,9 @@ func (h *healingTracker) isHealed(bucket string) bool {
// resume will reset progress to the numbers at the start of the bucket.
func (h *healingTracker) resume() {
h.mu.Lock()
defer h.mu.Unlock()
h.ItemsHealed = h.ResumeItemsHealed
h.ItemsFailed = h.ResumeItemsFailed
h.BytesDone = h.ResumeBytesDone
@@ -190,6 +262,9 @@ func (h *healingTracker) resume() {
// bucketDone should be called when a bucket is done healing.
// Adds the bucket to the list of healed buckets and updates resume numbers.
func (h *healingTracker) bucketDone(bucket string) {
h.mu.Lock()
defer h.mu.Unlock()
h.ResumeItemsHealed = h.ItemsHealed
h.ResumeItemsFailed = h.ItemsFailed
h.ResumeBytesDone = h.BytesDone
@@ -206,6 +281,9 @@ func (h *healingTracker) bucketDone(bucket string) {
// setQueuedBuckets will add buckets, but exclude any that is already in h.HealedBuckets.
// Order is preserved.
func (h *healingTracker) setQueuedBuckets(buckets []BucketInfo) {
h.mu.Lock()
defer h.mu.Unlock()
s := set.CreateStringSet(h.HealedBuckets...)
h.QueuedBuckets = make([]string, 0, len(buckets))
for _, b := range buckets {
@@ -216,17 +294,25 @@ func (h *healingTracker) setQueuedBuckets(buckets []BucketInfo) {
}
func (h *healingTracker) printTo(writer io.Writer) {
h.mu.RLock()
defer h.mu.RUnlock()
b, err := json.MarshalIndent(h, "", " ")
if err != nil {
writer.Write([]byte(err.Error()))
return
}
writer.Write(b)
}
// toHealingDisk converts the information to madmin.HealingDisk
func (h *healingTracker) toHealingDisk() madmin.HealingDisk {
h.mu.RLock()
defer h.mu.RUnlock()
return madmin.HealingDisk{
ID: h.ID,
HealID: h.HealID,
Endpoint: h.Endpoint,
PoolIndex: h.PoolIndex,
SetIndex: h.SetIndex,
@@ -261,10 +347,15 @@ func initAutoHeal(ctx context.Context, objAPI ObjectLayer) {
globalBackgroundHealState.pushHealLocalDisks(getLocalDisksToHeal()...)
go monitorLocalDisksAndHeal(ctx, z)
if env.Get("_MINIO_AUTO_DRIVE_HEALING", config.EnableOn) == config.EnableOn || env.Get("_MINIO_AUTO_DISK_HEALING", config.EnableOn) == config.EnableOn {
go monitorLocalDisksAndHeal(ctx, z)
}
}
func getLocalDisksToHeal() (disksToHeal Endpoints) {
globalLocalDrivesMu.RLock()
globalLocalDrives := globalLocalDrives
globalLocalDrivesMu.RUnlock()
for _, disk := range globalLocalDrives {
_, err := disk.GetDiskID()
if errors.Is(err, errUnformattedDisk) {
@@ -286,16 +377,14 @@ func getLocalDisksToHeal() (disksToHeal Endpoints) {
var newDiskHealingTimeout = newDynamicTimeout(30*time.Second, 10*time.Second)
func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint) error {
logger.Info(fmt.Sprintf("Proceeding to heal '%s' - 'mc admin heal alias/ --verbose' to check the status.", endpoint))
disk, format, err := connectEndpoint(endpoint)
if err != nil {
return fmt.Errorf("Error: %w, %s", err, endpoint)
}
defer disk.Close()
poolIdx := globalEndpoints.GetLocalPoolIdx(disk.Endpoint())
if poolIdx < 0 {
return fmt.Errorf("unexpected pool index (%d) found in %s", poolIdx, disk.Endpoint())
return fmt.Errorf("unexpected pool index (%d) found for %s", poolIdx, disk.Endpoint())
}
// Calculate the set index where the current endpoint belongs
@@ -306,20 +395,36 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
return err
}
if setIdx < 0 {
return fmt.Errorf("unexpected set index (%d) found in %s", setIdx, disk.Endpoint())
return fmt.Errorf("unexpected set index (%d) found for %s", setIdx, disk.Endpoint())
}
// Prevent parallel erasure set healing
locker := z.NewNSLock(minioMetaBucket, fmt.Sprintf("new-drive-healing/%s/%d/%d", endpoint, poolIdx, setIdx))
locker := z.NewNSLock(minioMetaBucket, fmt.Sprintf("new-drive-healing/%d/%d", poolIdx, setIdx))
lkctx, err := locker.GetLock(ctx, newDiskHealingTimeout)
if err != nil {
return err
return fmt.Errorf("Healing of drive '%v' on %s pool, belonging to %s erasure set already in progress: %w",
disk, humanize.Ordinal(poolIdx+1), humanize.Ordinal(setIdx+1), err)
}
ctx = lkctx.Context()
defer locker.Unlock(lkctx.Cancel)
defer locker.Unlock(lkctx)
// Load healing tracker in this disk
tracker, err := loadHealingTracker(ctx, disk)
if err != nil {
// A healing tracker may be deleted if another disk in the
// same erasure set with same healing-id successfully finished
// healing.
if errors.Is(err, errFileNotFound) {
return nil
}
logger.LogIf(ctx, fmt.Errorf("Unable to load healing tracker on '%s': %w, re-initializing..", disk, err))
tracker = initHealingTracker(disk, mustGetUUID())
}
logger.Info(fmt.Sprintf("Healing drive '%s' - 'mc admin heal alias/ --verbose' to check the current status.", endpoint))
buckets, _ := z.ListBuckets(ctx, BucketOptions{})
// Buckets data are dispersed in multiple zones/sets, make
// Buckets data are dispersed in multiple pools/sets, make
// sure to heal all bucket metadata configuration.
buckets = append(buckets, BucketInfo{
Name: pathJoin(minioMetaBucket, minioConfigPrefix),
@@ -337,16 +442,7 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
})
if serverDebugLog {
logger.Info("Healing drive '%v' on %s pool", disk, humanize.Ordinal(poolIdx+1))
}
// Load healing tracker in this disk
tracker, err := loadHealingTracker(ctx, disk)
if err != nil {
// So someone changed the drives underneath, healing tracker missing.
logger.LogIf(ctx, fmt.Errorf("Healing tracker missing on '%s', drive was swapped again on %s pool: %w",
disk, humanize.Ordinal(poolIdx+1), err))
tracker = newHealingTracker(disk)
logger.Info("Healing drive '%v' on %s pool, belonging to %s erasure set", disk, humanize.Ordinal(poolIdx+1), humanize.Ordinal(setIdx+1))
}
// Load bucket totals
@@ -369,9 +465,13 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
}
if tracker.ItemsFailed > 0 {
logger.Info("Healing drive '%s' failed (healed: %d, failed: %d).", disk, tracker.ItemsHealed, tracker.ItemsFailed)
logger.Info("Healing of drive '%s' failed (healed: %d, failed: %d).", disk, tracker.ItemsHealed, tracker.ItemsFailed)
} else {
logger.Info("Healing drive '%s' complete (healed: %d, failed: %d).", disk, tracker.ItemsHealed, tracker.ItemsFailed)
logger.Info("Healing of drive '%s' complete (healed: %d, failed: %d).", disk, tracker.ItemsHealed, tracker.ItemsFailed)
}
if len(tracker.QueuedBuckets) > 0 {
return fmt.Errorf("not all buckets were healed: %v", tracker.QueuedBuckets)
}
if serverDebugLog {
@@ -379,7 +479,29 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
logger.Info("\n")
}
logger.LogIf(ctx, tracker.delete(ctx))
if tracker.HealID == "" { // HealID was empty only before Feb 2023
logger.LogIf(ctx, tracker.delete(ctx))
return nil
}
// Remove .healing.bin from all disks with similar heal-id
disks, err := z.GetDisks(poolIdx, setIdx)
if err != nil {
return err
}
for _, disk := range disks {
t, err := loadHealingTracker(ctx, disk)
if err != nil {
if !errors.Is(err, errFileNotFound) {
logger.LogIf(ctx, err)
}
continue
}
if t.HealID == tracker.HealID {
t.delete(ctx)
}
}
return nil
}
@@ -415,10 +537,13 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools) {
for _, disk := range healDisks {
go func(disk Endpoint) {
globalBackgroundHealState.markDiskForHealing(disk)
err := healFreshDisk(ctx, z, disk)
if err != nil {
printEndpointError(disk, err, false)
globalBackgroundHealState.setDiskHealingStatus(disk, true)
if err := healFreshDisk(ctx, z, disk); err != nil {
globalBackgroundHealState.setDiskHealingStatus(disk, false)
timedout := OperationTimedOut{}
if !errors.Is(err, context.Canceled) && !errors.As(err, &timedout) {
printEndpointError(disk, err, false)
}
return
}
// Only upon success pop the healed disk.

View File

@@ -182,6 +182,12 @@ func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
return
}
}
case "HealID":
z.HealID, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "HealID")
return
}
default:
err = dc.Skip()
if err != nil {
@@ -195,9 +201,9 @@ func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
// EncodeMsg implements msgp.Encodable
func (z *healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 22
// map header, size 23
// write "ID"
err = en.Append(0xde, 0x0, 0x16, 0xa2, 0x49, 0x44)
err = en.Append(0xde, 0x0, 0x17, 0xa2, 0x49, 0x44)
if err != nil {
return
}
@@ -430,15 +436,25 @@ func (z *healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
return
}
}
// write "HealID"
err = en.Append(0xa6, 0x48, 0x65, 0x61, 0x6c, 0x49, 0x44)
if err != nil {
return
}
err = en.WriteString(z.HealID)
if err != nil {
err = msgp.WrapError(err, "HealID")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 22
// map header, size 23
// string "ID"
o = append(o, 0xde, 0x0, 0x16, 0xa2, 0x49, 0x44)
o = append(o, 0xde, 0x0, 0x17, 0xa2, 0x49, 0x44)
o = msgp.AppendString(o, z.ID)
// string "PoolIndex"
o = append(o, 0xa9, 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x6e, 0x64, 0x65, 0x78)
@@ -509,6 +525,9 @@ func (z *healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
for za0002 := range z.HealedBuckets {
o = msgp.AppendString(o, z.HealedBuckets[za0002])
}
// string "HealID"
o = append(o, 0xa6, 0x48, 0x65, 0x61, 0x6c, 0x49, 0x44)
o = msgp.AppendString(o, z.HealID)
return
}
@@ -688,6 +707,12 @@ func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
}
case "HealID":
z.HealID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "HealID")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
@@ -710,5 +735,6 @@ func (z *healingTracker) Msgsize() (s int) {
for za0002 := range z.HealedBuckets {
s += msgp.StringPrefixSize + len(z.HealedBuckets[za0002])
}
s += 7 + msgp.StringPrefixSize + len(z.HealID)
return
}

677
cmd/batch-expire.go Normal file
View File

@@ -0,0 +1,677 @@
// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"runtime"
"strconv"
"time"
"github.com/minio/minio-go/v7/pkg/tags"
"github.com/minio/minio/internal/bucket/versioning"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/env"
"github.com/minio/pkg/v2/wildcard"
"github.com/minio/pkg/v2/workers"
)
// expire: # Expire objects that match a condition
// apiVersion: v1
// bucket: mybucket # Bucket where this batch job will expire matching objects from
// prefix: myprefix # (Optional) Prefix under which this job will expire objects matching the rules below.
// rules:
// - type: object # regular objects with zero ore more older versions
// name: NAME # match object names that satisfy the wildcard expression.
// olderThan: 70h # match objects older than this value
// createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date"
// tags:
// - key: name
// value: pick* # match objects with tag 'name', all values starting with 'pick'
// metadata:
// - key: content-type
// value: image/* # match objects with 'content-type', all values starting with 'image/'
// size:
// lessThan: "10MiB" # match objects with size less than this value (e.g. 10MiB)
// greaterThan: 1MiB # match objects with size greater than this value (e.g. 1MiB)
// purge:
// # retainVersions: 0 # (default) delete all versions of the object. This option is the fastest.
// # retainVersions: 5 # keep the latest 5 versions of the object.
//
// - type: deleted # objects with delete marker as their latest version
// name: NAME # match object names that satisfy the wildcard expression.
// olderThan: 10h # match objects older than this value (e.g. 7d10h31s)
// createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date"
// purge:
// # retainVersions: 0 # (default) delete all versions of the object. This option is the fastest.
// # retainVersions: 5 # keep the latest 5 versions of the object including delete markers.
//
// notify:
// endpoint: https://notify.endpoint # notification endpoint to receive job completion status
// token: Bearer xxxxx # optional authentication token for the notification endpoint
//
// retry:
// attempts: 10 # number of retries for the job before giving up
// delay: 500ms # least amount of delay between each retry
//go:generate msgp -file $GOFILE
// BatchJobExpirePurge type accepts non-negative versions to be retained
type BatchJobExpirePurge struct {
RetainVersions int `yaml:"retainVersions" json:"retainVersions"`
}
// Validate returns nil if value is valid, ie > 0.
func (p BatchJobExpirePurge) Validate() error {
if p.RetainVersions < 0 {
return errors.New("retainVersions must be >= 0")
}
return nil
}
// BatchJobExpireFilter holds all the filters currently supported for batch replication
type BatchJobExpireFilter struct {
OlderThan time.Duration `yaml:"olderThan,omitempty" json:"olderThan"`
CreatedBefore *time.Time `yaml:"createdBefore,omitempty" json:"createdBefore"`
Tags []BatchJobKV `yaml:"tags,omitempty" json:"tags"`
Metadata []BatchJobKV `yaml:"metadata,omitempty" json:"metadata"`
Size BatchJobSizeFilter `yaml:"size" json:"size"`
Type string `yaml:"type" json:"type"`
Name string `yaml:"name" json:"name"`
Purge BatchJobExpirePurge `yaml:"purge" json:"purge"`
}
// Matches returns true if obj matches the filter conditions specified in ef.
func (ef BatchJobExpireFilter) Matches(obj ObjectInfo, now time.Time) bool {
switch ef.Type {
case BatchJobExpireObject:
if obj.DeleteMarker {
return false
}
case BatchJobExpireDeleted:
if !obj.DeleteMarker {
return false
}
default:
// we should never come here, Validate should have caught this.
logger.LogOnceIf(context.Background(), fmt.Errorf("invalid filter type: %s", ef.Type), ef.Type)
return false
}
if len(ef.Name) > 0 && !wildcard.Match(ef.Name, obj.Name) {
return false
}
if ef.OlderThan > 0 && now.Sub(obj.ModTime) <= ef.OlderThan {
return false
}
if ef.CreatedBefore != nil && !obj.ModTime.Before(*ef.CreatedBefore) {
return false
}
if len(ef.Tags) > 0 && !obj.DeleteMarker {
// Only parse object tags if tags filter is specified.
var tagMap map[string]string
if len(obj.UserTags) != 0 {
t, err := tags.ParseObjectTags(obj.UserTags)
if err != nil {
return false
}
tagMap = t.ToMap()
}
for _, kv := range ef.Tags {
// Object (version) must match all tags specified in
// the filter
var match bool
for t, v := range tagMap {
if kv.Match(BatchJobKV{Key: t, Value: v}) {
match = true
}
}
if !match {
return false
}
}
}
if len(ef.Metadata) > 0 && !obj.DeleteMarker {
for _, kv := range ef.Metadata {
// Object (version) must match all x-amz-meta and
// standard metadata headers
// specified in the filter
var match bool
for k, v := range obj.UserDefined {
if !stringsHasPrefixFold(k, "x-amz-meta-") && !isStandardHeader(k) {
continue
}
// We only need to match x-amz-meta or standardHeaders
if kv.Match(BatchJobKV{Key: k, Value: v}) {
match = true
}
}
if !match {
return false
}
}
}
return ef.Size.InRange(obj.Size)
}
const (
// BatchJobExpireObject - object type
BatchJobExpireObject string = "object"
// BatchJobExpireDeleted - delete marker type
BatchJobExpireDeleted string = "deleted"
)
// Validate returns nil if ef has valid fields, validation error otherwise.
func (ef BatchJobExpireFilter) Validate() error {
switch ef.Type {
case BatchJobExpireObject:
case BatchJobExpireDeleted:
if len(ef.Tags) > 0 || len(ef.Metadata) > 0 {
return errors.New("invalid batch-expire rule filter")
}
default:
return errors.New("invalid batch-expire type")
}
for _, tag := range ef.Tags {
if err := tag.Validate(); err != nil {
return err
}
}
for _, meta := range ef.Metadata {
if err := meta.Validate(); err != nil {
return err
}
}
if err := ef.Purge.Validate(); err != nil {
return err
}
if err := ef.Size.Validate(); err != nil {
return err
}
if ef.CreatedBefore != nil && !ef.CreatedBefore.Before(time.Now()) {
return errors.New("CreatedBefore is in the future")
}
return nil
}
// BatchJobExpire represents configuration parameters for a batch expiration
// job typically supplied in yaml form
type BatchJobExpire struct {
APIVersion string `yaml:"apiVersion" json:"apiVersion"`
Bucket string `yaml:"bucket" json:"bucket"`
Prefix string `yaml:"prefix" json:"prefix"`
NotificationCfg BatchJobNotification `yaml:"notify" json:"notify"`
Retry BatchJobRetry `yaml:"retry" json:"retry"`
Rules []BatchJobExpireFilter `yaml:"rules" json:"rules"`
}
// Notify notifies notification endpoint if configured regarding job failure or success.
func (r BatchJobExpire) Notify(ctx context.Context, body io.Reader) error {
if r.NotificationCfg.Endpoint == "" {
return nil
}
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
req, err := http.NewRequestWithContext(ctx, http.MethodPost, r.NotificationCfg.Endpoint, body)
if err != nil {
return err
}
if r.NotificationCfg.Token != "" {
req.Header.Set("Authorization", r.NotificationCfg.Token)
}
clnt := http.Client{Transport: getRemoteInstanceTransport}
resp, err := clnt.Do(req)
if err != nil {
return err
}
xhttp.DrainBody(resp.Body)
if resp.StatusCode != http.StatusOK {
return errors.New(resp.Status)
}
return nil
}
// Expire expires object versions which have already matched supplied filter conditions
func (r *BatchJobExpire) Expire(ctx context.Context, api ObjectLayer, vc *versioning.Versioning, objsToDel []ObjectToDelete) []error {
opts := ObjectOptions{
PrefixEnabledFn: vc.PrefixEnabled,
VersionSuspended: vc.Suspended(),
}
_, errs := api.DeleteObjects(ctx, r.Bucket, objsToDel, opts)
return errs
}
const (
batchExpireName = "batch-expire.bin"
batchExpireFormat = 1
batchExpireVersionV1 = 1
batchExpireVersion = batchExpireVersionV1
batchExpireAPIVersion = "v1"
batchExpireJobDefaultRetries = 3
batchExpireJobDefaultRetryDelay = 250 * time.Millisecond
)
type objInfoCache map[string]*ObjectInfo
func newObjInfoCache() objInfoCache {
return objInfoCache(make(map[string]*ObjectInfo))
}
func (oiCache objInfoCache) Add(toDel ObjectToDelete, oi *ObjectInfo) {
oiCache[fmt.Sprintf("%s-%s", toDel.ObjectName, toDel.VersionID)] = oi
}
func (oiCache objInfoCache) Get(toDel ObjectToDelete) (*ObjectInfo, bool) {
oi, ok := oiCache[fmt.Sprintf("%s-%s", toDel.ObjectName, toDel.VersionID)]
return oi, ok
}
func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo, job BatchJobRequest, api ObjectLayer, wk *workers.Workers, expireCh <-chan []expireObjInfo) {
vc, _ := globalBucketVersioningSys.Get(r.Bucket)
retryAttempts := r.Retry.Attempts
delay := job.Expire.Retry.Delay
if delay == 0 {
delay = batchExpireJobDefaultRetryDelay
}
var i int
for toExpire := range expireCh {
select {
case <-ctx.Done():
return
default:
}
if i > 0 {
if wait := globalBatchConfig.ExpirationWait(); wait > 0 {
time.Sleep(wait)
}
}
i++
wk.Take()
go func(toExpire []expireObjInfo) {
defer wk.Give()
toExpireAll := make([]ObjectInfo, 0, len(toExpire))
toDel := make([]ObjectToDelete, 0, len(toExpire))
oiCache := newObjInfoCache()
for _, exp := range toExpire {
if exp.ExpireAll {
toExpireAll = append(toExpireAll, exp.ObjectInfo)
continue
}
// Cache ObjectInfo value via pointers for
// subsequent use to track objects which
// couldn't be deleted.
od := ObjectToDelete{
ObjectV: ObjectV{
ObjectName: exp.Name,
VersionID: exp.VersionID,
},
}
toDel = append(toDel, od)
oiCache.Add(od, &exp.ObjectInfo)
}
var done bool
// DeleteObject(deletePrefix: true) to expire all versions of an object
for _, exp := range toExpireAll {
var success bool
for attempts := 1; attempts <= retryAttempts; attempts++ {
select {
case <-ctx.Done():
done = true
default:
}
stopFn := globalBatchJobsMetrics.trace(batchJobMetricExpire, ri.JobID, attempts)
_, err := api.DeleteObject(ctx, exp.Bucket, exp.Name, ObjectOptions{
DeletePrefix: true,
})
if err != nil {
stopFn(exp, err)
logger.LogIf(ctx, fmt.Errorf("Failed to expire %s/%s versionID=%s due to %v (attempts=%d)", toExpire[i].Bucket, toExpire[i].Name, toExpire[i].VersionID, err, attempts))
} else {
stopFn(exp, err)
success = true
break
}
}
ri.trackMultipleObjectVersions(r.Bucket, exp, success)
if done {
break
}
}
if done {
return
}
// DeleteMultiple objects
toDelCopy := make([]ObjectToDelete, len(toDel))
for attempts := 1; attempts <= retryAttempts; attempts++ {
select {
case <-ctx.Done():
return
default:
}
stopFn := globalBatchJobsMetrics.trace(batchJobMetricExpire, ri.JobID, attempts)
// Copying toDel to select from objects whose
// deletion failed
copy(toDelCopy, toDel)
var failed int
errs := r.Expire(ctx, api, vc, toDel)
// reslice toDel in preparation for next retry
// attempt
toDel = toDel[:0]
for i, err := range errs {
if err != nil {
stopFn(toDelCopy[i], err)
logger.LogIf(ctx, fmt.Errorf("Failed to expire %s/%s versionID=%s due to %v (attempts=%d)", ri.Bucket, toDelCopy[i].ObjectName, toDelCopy[i].VersionID, err, attempts))
failed++
if attempts == retryAttempts { // all retry attempts failed, record failure
if oi, ok := oiCache.Get(toDelCopy[i]); ok {
ri.trackCurrentBucketObject(r.Bucket, *oi, false)
}
} else {
toDel = append(toDel, toDelCopy[i])
}
} else {
stopFn(toDelCopy[i], nil)
if oi, ok := oiCache.Get(toDelCopy[i]); ok {
ri.trackCurrentBucketObject(r.Bucket, *oi, true)
}
}
}
globalBatchJobsMetrics.save(ri.JobID, ri)
if failed == 0 {
break
}
// Add a delay between retry attempts
if attempts < retryAttempts {
time.Sleep(delay)
}
}
}(toExpire)
}
}
type expireObjInfo struct {
ObjectInfo
ExpireAll bool
}
// Start the batch expiration job, resumes if there was a pending job via "job.ID"
func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJobRequest) error {
ri := &batchJobInfo{
JobID: job.ID,
JobType: string(job.Type()),
StartTime: job.Started,
}
if err := ri.load(ctx, api, job); err != nil {
return err
}
globalBatchJobsMetrics.save(job.ID, ri)
lastObject := ri.Object
now := time.Now().UTC()
workerSize, err := strconv.Atoi(env.Get("_MINIO_BATCH_EXPIRATION_WORKERS", strconv.Itoa(runtime.GOMAXPROCS(0)/2)))
if err != nil {
return err
}
wk, err := workers.New(workerSize)
if err != nil {
// invalid worker size.
return err
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
results := make(chan ObjectInfo, workerSize)
if err := api.Walk(ctx, r.Bucket, r.Prefix, results, WalkOptions{
Marker: lastObject,
LatestOnly: false, // we need to visit all versions of the object to implement purge: retainVersions
VersionsSort: WalkVersionsSortDesc,
}); err != nil {
// Do not need to retry if we can't list objects on source.
return err
}
// Goroutine to periodically save batch-expire job's in-memory state
saverQuitCh := make(chan struct{})
go func() {
saveTicker := time.NewTicker(10 * time.Second)
defer saveTicker.Stop()
for {
select {
case <-saveTicker.C:
// persist in-memory state to disk after every 10secs.
logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
case <-ctx.Done():
// persist in-memory state immediately before exiting due to context cancelation.
logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job))
return
case <-saverQuitCh:
// persist in-memory state immediately to disk.
logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job))
return
}
}
}()
expireCh := make(chan []expireObjInfo, workerSize)
go batchObjsForDelete(ctx, r, ri, job, api, wk, expireCh)
var (
prevObj ObjectInfo
matchedFilter BatchJobExpireFilter
versionsCount int
toDel []expireObjInfo
)
for result := range results {
// Apply filter to find the matching rule to apply expiry
// actions accordingly.
// nolint:gocritic
if result.IsLatest {
// send down filtered entries to be deleted using
// DeleteObjects method
if len(toDel) > 10 { // batch up to 10 objects/versions to be expired simultaneously.
xfer := make([]expireObjInfo, len(toDel))
copy(xfer, toDel)
var done bool
select {
case <-ctx.Done():
done = true
case expireCh <- xfer:
toDel = toDel[:0] // resetting toDel
}
if done {
break
}
}
var match BatchJobExpireFilter
var found bool
for _, rule := range r.Rules {
if rule.Matches(result, now) {
match = rule
found = true
break
}
}
if !found {
continue
}
prevObj = result
matchedFilter = match
versionsCount = 1
// Include the latest version
if matchedFilter.Purge.RetainVersions == 0 {
toDel = append(toDel, expireObjInfo{
ObjectInfo: result,
ExpireAll: true,
})
continue
}
} else if prevObj.Name == result.Name {
if matchedFilter.Purge.RetainVersions == 0 {
continue // including latest version in toDel suffices, skipping other versions
}
versionsCount++
} else {
continue
}
if versionsCount <= matchedFilter.Purge.RetainVersions {
continue // retain versions
}
toDel = append(toDel, expireObjInfo{
ObjectInfo: result,
})
}
// Send any remaining objects downstream
if len(toDel) > 0 {
select {
case <-ctx.Done():
case expireCh <- toDel:
}
}
close(expireCh)
wk.Wait() // waits for all expire goroutines to complete
ri.Complete = ri.ObjectsFailed == 0
ri.Failed = ri.ObjectsFailed > 0
globalBatchJobsMetrics.save(job.ID, ri)
// Close the saverQuitCh - this also triggers saving in-memory state
// immediately one last time before we exit this method.
close(saverQuitCh)
// Notify expire jobs final status to the configured endpoint
buf, _ := json.Marshal(ri)
if err := r.Notify(context.Background(), bytes.NewReader(buf)); err != nil {
logger.LogIf(context.Background(), fmt.Errorf("unable to notify %v", err))
}
return nil
}
//msgp:ignore batchExpireJobError
type batchExpireJobError struct {
Code string
Description string
HTTPStatusCode int
}
func (e batchExpireJobError) Error() string {
return e.Description
}
// maxBatchRules maximum number of rules a batch-expiry job supports
const maxBatchRules = 50
// Validate validates the job definition input
func (r *BatchJobExpire) Validate(ctx context.Context, job BatchJobRequest, o ObjectLayer) error {
if r == nil {
return nil
}
if r.APIVersion != batchExpireAPIVersion {
return batchExpireJobError{
Code: "InvalidArgument",
Description: "Unsupported batch expire API version",
HTTPStatusCode: http.StatusBadRequest,
}
}
if r.Bucket == "" {
return batchExpireJobError{
Code: "InvalidArgument",
Description: "Bucket argument missing",
HTTPStatusCode: http.StatusBadRequest,
}
}
if _, err := o.GetBucketInfo(ctx, r.Bucket, BucketOptions{}); err != nil {
if isErrBucketNotFound(err) {
return batchExpireJobError{
Code: "NoSuchSourceBucket",
Description: "The specified source bucket does not exist",
HTTPStatusCode: http.StatusNotFound,
}
}
return err
}
if len(r.Rules) > maxBatchRules {
return batchExpireJobError{
Code: "InvalidArgument",
Description: "Too many rules. Batch expire job can't have more than 100 rules",
HTTPStatusCode: http.StatusBadRequest,
}
}
for _, rule := range r.Rules {
if err := rule.Validate(); err != nil {
return batchExpireJobError{
Code: "InvalidArgument",
Description: fmt.Sprintf("Invalid batch expire rule: %s", err),
HTTPStatusCode: http.StatusBadRequest,
}
}
}
if err := r.Retry.Validate(); err != nil {
return batchExpireJobError{
Code: "InvalidArgument",
Description: fmt.Sprintf("Invalid batch expire retry configuration: %s", err),
HTTPStatusCode: http.StatusBadRequest,
}
}
return nil
}

856
cmd/batch-expire_gen.go Normal file
View File

@@ -0,0 +1,856 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"time"
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *BatchJobExpire) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "APIVersion":
z.APIVersion, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "APIVersion")
return
}
case "Bucket":
z.Bucket, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "Prefix":
z.Prefix, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Prefix")
return
}
case "NotificationCfg":
err = z.NotificationCfg.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "NotificationCfg")
return
}
case "Retry":
err = z.Retry.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Retry")
return
}
case "Rules":
var zb0002 uint32
zb0002, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Rules")
return
}
if cap(z.Rules) >= int(zb0002) {
z.Rules = (z.Rules)[:zb0002]
} else {
z.Rules = make([]BatchJobExpireFilter, zb0002)
}
for za0001 := range z.Rules {
err = z.Rules[za0001].DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Rules", za0001)
return
}
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *BatchJobExpire) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 6
// write "APIVersion"
err = en.Append(0x86, 0xaa, 0x41, 0x50, 0x49, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
if err != nil {
return
}
err = en.WriteString(z.APIVersion)
if err != nil {
err = msgp.WrapError(err, "APIVersion")
return
}
// write "Bucket"
err = en.Append(0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Bucket)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
// write "Prefix"
err = en.Append(0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78)
if err != nil {
return
}
err = en.WriteString(z.Prefix)
if err != nil {
err = msgp.WrapError(err, "Prefix")
return
}
// write "NotificationCfg"
err = en.Append(0xaf, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x66, 0x67)
if err != nil {
return
}
err = z.NotificationCfg.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "NotificationCfg")
return
}
// write "Retry"
err = en.Append(0xa5, 0x52, 0x65, 0x74, 0x72, 0x79)
if err != nil {
return
}
err = z.Retry.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Retry")
return
}
// write "Rules"
err = en.Append(0xa5, 0x52, 0x75, 0x6c, 0x65, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.Rules)))
if err != nil {
err = msgp.WrapError(err, "Rules")
return
}
for za0001 := range z.Rules {
err = z.Rules[za0001].EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Rules", za0001)
return
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *BatchJobExpire) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 6
// string "APIVersion"
o = append(o, 0x86, 0xaa, 0x41, 0x50, 0x49, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
o = msgp.AppendString(o, z.APIVersion)
// string "Bucket"
o = append(o, 0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
o = msgp.AppendString(o, z.Bucket)
// string "Prefix"
o = append(o, 0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78)
o = msgp.AppendString(o, z.Prefix)
// string "NotificationCfg"
o = append(o, 0xaf, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x66, 0x67)
o, err = z.NotificationCfg.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "NotificationCfg")
return
}
// string "Retry"
o = append(o, 0xa5, 0x52, 0x65, 0x74, 0x72, 0x79)
o, err = z.Retry.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Retry")
return
}
// string "Rules"
o = append(o, 0xa5, 0x52, 0x75, 0x6c, 0x65, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.Rules)))
for za0001 := range z.Rules {
o, err = z.Rules[za0001].MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Rules", za0001)
return
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *BatchJobExpire) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "APIVersion":
z.APIVersion, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "APIVersion")
return
}
case "Bucket":
z.Bucket, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "Prefix":
z.Prefix, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Prefix")
return
}
case "NotificationCfg":
bts, err = z.NotificationCfg.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "NotificationCfg")
return
}
case "Retry":
bts, err = z.Retry.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Retry")
return
}
case "Rules":
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Rules")
return
}
if cap(z.Rules) >= int(zb0002) {
z.Rules = (z.Rules)[:zb0002]
} else {
z.Rules = make([]BatchJobExpireFilter, zb0002)
}
for za0001 := range z.Rules {
bts, err = z.Rules[za0001].UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Rules", za0001)
return
}
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *BatchJobExpire) Msgsize() (s int) {
s = 1 + 11 + msgp.StringPrefixSize + len(z.APIVersion) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Prefix) + 16 + z.NotificationCfg.Msgsize() + 6 + z.Retry.Msgsize() + 6 + msgp.ArrayHeaderSize
for za0001 := range z.Rules {
s += z.Rules[za0001].Msgsize()
}
return
}
// DecodeMsg implements msgp.Decodable
func (z *BatchJobExpireFilter) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "OlderThan":
z.OlderThan, err = dc.ReadDuration()
if err != nil {
err = msgp.WrapError(err, "OlderThan")
return
}
case "CreatedBefore":
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "CreatedBefore")
return
}
z.CreatedBefore = nil
} else {
if z.CreatedBefore == nil {
z.CreatedBefore = new(time.Time)
}
*z.CreatedBefore, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "CreatedBefore")
return
}
}
case "Tags":
var zb0002 uint32
zb0002, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Tags")
return
}
if cap(z.Tags) >= int(zb0002) {
z.Tags = (z.Tags)[:zb0002]
} else {
z.Tags = make([]BatchJobKV, zb0002)
}
for za0001 := range z.Tags {
err = z.Tags[za0001].DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Tags", za0001)
return
}
}
case "Metadata":
var zb0003 uint32
zb0003, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Metadata")
return
}
if cap(z.Metadata) >= int(zb0003) {
z.Metadata = (z.Metadata)[:zb0003]
} else {
z.Metadata = make([]BatchJobKV, zb0003)
}
for za0002 := range z.Metadata {
err = z.Metadata[za0002].DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Metadata", za0002)
return
}
}
case "Size":
err = z.Size.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Size")
return
}
case "Type":
z.Type, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Type")
return
}
case "Name":
z.Name, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Name")
return
}
case "Purge":
var zb0004 uint32
zb0004, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "Purge")
return
}
for zb0004 > 0 {
zb0004--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err, "Purge")
return
}
switch msgp.UnsafeString(field) {
case "RetainVersions":
z.Purge.RetainVersions, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "Purge", "RetainVersions")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err, "Purge")
return
}
}
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *BatchJobExpireFilter) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 8
// write "OlderThan"
err = en.Append(0x88, 0xa9, 0x4f, 0x6c, 0x64, 0x65, 0x72, 0x54, 0x68, 0x61, 0x6e)
if err != nil {
return
}
err = en.WriteDuration(z.OlderThan)
if err != nil {
err = msgp.WrapError(err, "OlderThan")
return
}
// write "CreatedBefore"
err = en.Append(0xad, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65)
if err != nil {
return
}
if z.CreatedBefore == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
err = en.WriteTime(*z.CreatedBefore)
if err != nil {
err = msgp.WrapError(err, "CreatedBefore")
return
}
}
// write "Tags"
err = en.Append(0xa4, 0x54, 0x61, 0x67, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.Tags)))
if err != nil {
err = msgp.WrapError(err, "Tags")
return
}
for za0001 := range z.Tags {
err = z.Tags[za0001].EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Tags", za0001)
return
}
}
// write "Metadata"
err = en.Append(0xa8, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.Metadata)))
if err != nil {
err = msgp.WrapError(err, "Metadata")
return
}
for za0002 := range z.Metadata {
err = z.Metadata[za0002].EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Metadata", za0002)
return
}
}
// write "Size"
err = en.Append(0xa4, 0x53, 0x69, 0x7a, 0x65)
if err != nil {
return
}
err = z.Size.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Size")
return
}
// write "Type"
err = en.Append(0xa4, 0x54, 0x79, 0x70, 0x65)
if err != nil {
return
}
err = en.WriteString(z.Type)
if err != nil {
err = msgp.WrapError(err, "Type")
return
}
// write "Name"
err = en.Append(0xa4, 0x4e, 0x61, 0x6d, 0x65)
if err != nil {
return
}
err = en.WriteString(z.Name)
if err != nil {
err = msgp.WrapError(err, "Name")
return
}
// write "Purge"
err = en.Append(0xa5, 0x50, 0x75, 0x72, 0x67, 0x65)
if err != nil {
return
}
// map header, size 1
// write "RetainVersions"
err = en.Append(0x81, 0xae, 0x52, 0x65, 0x74, 0x61, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73)
if err != nil {
return
}
err = en.WriteInt(z.Purge.RetainVersions)
if err != nil {
err = msgp.WrapError(err, "Purge", "RetainVersions")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *BatchJobExpireFilter) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 8
// string "OlderThan"
o = append(o, 0x88, 0xa9, 0x4f, 0x6c, 0x64, 0x65, 0x72, 0x54, 0x68, 0x61, 0x6e)
o = msgp.AppendDuration(o, z.OlderThan)
// string "CreatedBefore"
o = append(o, 0xad, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65)
if z.CreatedBefore == nil {
o = msgp.AppendNil(o)
} else {
o = msgp.AppendTime(o, *z.CreatedBefore)
}
// string "Tags"
o = append(o, 0xa4, 0x54, 0x61, 0x67, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.Tags)))
for za0001 := range z.Tags {
o, err = z.Tags[za0001].MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Tags", za0001)
return
}
}
// string "Metadata"
o = append(o, 0xa8, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61)
o = msgp.AppendArrayHeader(o, uint32(len(z.Metadata)))
for za0002 := range z.Metadata {
o, err = z.Metadata[za0002].MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Metadata", za0002)
return
}
}
// string "Size"
o = append(o, 0xa4, 0x53, 0x69, 0x7a, 0x65)
o, err = z.Size.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Size")
return
}
// string "Type"
o = append(o, 0xa4, 0x54, 0x79, 0x70, 0x65)
o = msgp.AppendString(o, z.Type)
// string "Name"
o = append(o, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
o = msgp.AppendString(o, z.Name)
// string "Purge"
o = append(o, 0xa5, 0x50, 0x75, 0x72, 0x67, 0x65)
// map header, size 1
// string "RetainVersions"
o = append(o, 0x81, 0xae, 0x52, 0x65, 0x74, 0x61, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73)
o = msgp.AppendInt(o, z.Purge.RetainVersions)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *BatchJobExpireFilter) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "OlderThan":
z.OlderThan, bts, err = msgp.ReadDurationBytes(bts)
if err != nil {
err = msgp.WrapError(err, "OlderThan")
return
}
case "CreatedBefore":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.CreatedBefore = nil
} else {
if z.CreatedBefore == nil {
z.CreatedBefore = new(time.Time)
}
*z.CreatedBefore, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "CreatedBefore")
return
}
}
case "Tags":
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Tags")
return
}
if cap(z.Tags) >= int(zb0002) {
z.Tags = (z.Tags)[:zb0002]
} else {
z.Tags = make([]BatchJobKV, zb0002)
}
for za0001 := range z.Tags {
bts, err = z.Tags[za0001].UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Tags", za0001)
return
}
}
case "Metadata":
var zb0003 uint32
zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Metadata")
return
}
if cap(z.Metadata) >= int(zb0003) {
z.Metadata = (z.Metadata)[:zb0003]
} else {
z.Metadata = make([]BatchJobKV, zb0003)
}
for za0002 := range z.Metadata {
bts, err = z.Metadata[za0002].UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Metadata", za0002)
return
}
}
case "Size":
bts, err = z.Size.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Size")
return
}
case "Type":
z.Type, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Type")
return
}
case "Name":
z.Name, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Name")
return
}
case "Purge":
var zb0004 uint32
zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Purge")
return
}
for zb0004 > 0 {
zb0004--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err, "Purge")
return
}
switch msgp.UnsafeString(field) {
case "RetainVersions":
z.Purge.RetainVersions, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Purge", "RetainVersions")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err, "Purge")
return
}
}
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *BatchJobExpireFilter) Msgsize() (s int) {
s = 1 + 10 + msgp.DurationSize + 14
if z.CreatedBefore == nil {
s += msgp.NilSize
} else {
s += msgp.TimeSize
}
s += 5 + msgp.ArrayHeaderSize
for za0001 := range z.Tags {
s += z.Tags[za0001].Msgsize()
}
s += 9 + msgp.ArrayHeaderSize
for za0002 := range z.Metadata {
s += z.Metadata[za0002].Msgsize()
}
s += 5 + z.Size.Msgsize() + 5 + msgp.StringPrefixSize + len(z.Type) + 5 + msgp.StringPrefixSize + len(z.Name) + 6 + 1 + 15 + msgp.IntSize
return
}
// DecodeMsg implements msgp.Decodable
func (z *BatchJobExpirePurge) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "RetainVersions":
z.RetainVersions, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "RetainVersions")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z BatchJobExpirePurge) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 1
// write "RetainVersions"
err = en.Append(0x81, 0xae, 0x52, 0x65, 0x74, 0x61, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73)
if err != nil {
return
}
err = en.WriteInt(z.RetainVersions)
if err != nil {
err = msgp.WrapError(err, "RetainVersions")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z BatchJobExpirePurge) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 1
// string "RetainVersions"
o = append(o, 0x81, 0xae, 0x52, 0x65, 0x74, 0x61, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73)
o = msgp.AppendInt(o, z.RetainVersions)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *BatchJobExpirePurge) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "RetainVersions":
z.RetainVersions, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "RetainVersions")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z BatchJobExpirePurge) Msgsize() (s int) {
s = 1 + 15 + msgp.IntSize
return
}

View File

@@ -0,0 +1,349 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalBatchJobExpire(t *testing.T) {
v := BatchJobExpire{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgBatchJobExpire(b *testing.B) {
v := BatchJobExpire{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgBatchJobExpire(b *testing.B) {
v := BatchJobExpire{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalBatchJobExpire(b *testing.B) {
v := BatchJobExpire{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeBatchJobExpire(t *testing.T) {
v := BatchJobExpire{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeBatchJobExpire Msgsize() is inaccurate")
}
vn := BatchJobExpire{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeBatchJobExpire(b *testing.B) {
v := BatchJobExpire{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeBatchJobExpire(b *testing.B) {
v := BatchJobExpire{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalBatchJobExpireFilter(t *testing.T) {
v := BatchJobExpireFilter{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgBatchJobExpireFilter(b *testing.B) {
v := BatchJobExpireFilter{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgBatchJobExpireFilter(b *testing.B) {
v := BatchJobExpireFilter{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalBatchJobExpireFilter(b *testing.B) {
v := BatchJobExpireFilter{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeBatchJobExpireFilter(t *testing.T) {
v := BatchJobExpireFilter{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeBatchJobExpireFilter Msgsize() is inaccurate")
}
vn := BatchJobExpireFilter{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeBatchJobExpireFilter(b *testing.B) {
v := BatchJobExpireFilter{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeBatchJobExpireFilter(b *testing.B) {
v := BatchJobExpireFilter{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalBatchJobExpirePurge(t *testing.T) {
v := BatchJobExpirePurge{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgBatchJobExpirePurge(b *testing.B) {
v := BatchJobExpirePurge{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgBatchJobExpirePurge(b *testing.B) {
v := BatchJobExpirePurge{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalBatchJobExpirePurge(b *testing.B) {
v := BatchJobExpirePurge{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeBatchJobExpirePurge(t *testing.T) {
v := BatchJobExpirePurge{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeBatchJobExpirePurge Msgsize() is inaccurate")
}
vn := BatchJobExpirePurge{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeBatchJobExpirePurge(b *testing.B) {
v := BatchJobExpirePurge{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeBatchJobExpirePurge(b *testing.B) {
v := BatchJobExpirePurge{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}

71
cmd/batch-expire_test.go Normal file
View File

@@ -0,0 +1,71 @@
// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"testing"
"gopkg.in/yaml.v2"
)
func TestParseBatchJobExpire(t *testing.T) {
expireYaml := `
expire: # Expire objects that match a condition
apiVersion: v1
bucket: mybucket # Bucket where this batch job will expire matching objects from
prefix: myprefix # (Optional) Prefix under which this job will expire objects matching the rules below.
rules:
- type: object # regular objects with zero ore more older versions
name: NAME # match object names that satisfy the wildcard expression.
olderThan: 70h # match objects older than this value
createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date"
tags:
- key: name
value: pick* # match objects with tag 'name', all values starting with 'pick'
metadata:
- key: content-type
value: image/* # match objects with 'content-type', all values starting with 'image/'
size:
lessThan: "10MiB" # match objects with size less than this value (e.g. 10MiB)
greaterThan: 1MiB # match objects with size greater than this value (e.g. 1MiB)
purge:
# retainVersions: 0 # (default) delete all versions of the object. This option is the fastest.
# retainVersions: 5 # keep the latest 5 versions of the object.
- type: deleted # objects with delete marker as their latest version
name: NAME # match object names that satisfy the wildcard expression.
olderThan: 10h # match objects older than this value (e.g. 7d10h31s)
createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date"
purge:
# retainVersions: 0 # (default) delete all versions of the object. This option is the fastest.
# retainVersions: 5 # keep the latest 5 versions of the object including delete markers.
notify:
endpoint: https://notify.endpoint # notification endpoint to receive job completion status
token: Bearer xxxxx # optional authentication token for the notification endpoint
retry:
attempts: 10 # number of retries for the job before giving up
delay: 500ms # least amount of delay between each retry
`
var job BatchJobRequest
err := yaml.UnmarshalStrict([]byte(expireYaml), &job)
if err != nil {
t.Fatal("Failed to parse batch-job-expire yaml", err)
}
}

2040
cmd/batch-handlers.go Normal file

File diff suppressed because it is too large Load Diff

869
cmd/batch-handlers_gen.go Normal file
View File

@@ -0,0 +1,869 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *BatchJobRequest) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "ID":
z.ID, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "ID")
return
}
case "User":
z.User, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "User")
return
}
case "Started":
z.Started, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "Started")
return
}
case "Location":
z.Location, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Location")
return
}
case "Replicate":
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "Replicate")
return
}
z.Replicate = nil
} else {
if z.Replicate == nil {
z.Replicate = new(BatchJobReplicateV1)
}
err = z.Replicate.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Replicate")
return
}
}
case "KeyRotate":
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "KeyRotate")
return
}
z.KeyRotate = nil
} else {
if z.KeyRotate == nil {
z.KeyRotate = new(BatchJobKeyRotateV1)
}
err = z.KeyRotate.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "KeyRotate")
return
}
}
case "Expire":
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "Expire")
return
}
z.Expire = nil
} else {
if z.Expire == nil {
z.Expire = new(BatchJobExpire)
}
err = z.Expire.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Expire")
return
}
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *BatchJobRequest) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 7
// write "ID"
err = en.Append(0x87, 0xa2, 0x49, 0x44)
if err != nil {
return
}
err = en.WriteString(z.ID)
if err != nil {
err = msgp.WrapError(err, "ID")
return
}
// write "User"
err = en.Append(0xa4, 0x55, 0x73, 0x65, 0x72)
if err != nil {
return
}
err = en.WriteString(z.User)
if err != nil {
err = msgp.WrapError(err, "User")
return
}
// write "Started"
err = en.Append(0xa7, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteTime(z.Started)
if err != nil {
err = msgp.WrapError(err, "Started")
return
}
// write "Location"
err = en.Append(0xa8, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e)
if err != nil {
return
}
err = en.WriteString(z.Location)
if err != nil {
err = msgp.WrapError(err, "Location")
return
}
// write "Replicate"
err = en.Append(0xa9, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65)
if err != nil {
return
}
if z.Replicate == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
err = z.Replicate.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Replicate")
return
}
}
// write "KeyRotate"
err = en.Append(0xa9, 0x4b, 0x65, 0x79, 0x52, 0x6f, 0x74, 0x61, 0x74, 0x65)
if err != nil {
return
}
if z.KeyRotate == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
err = z.KeyRotate.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "KeyRotate")
return
}
}
// write "Expire"
err = en.Append(0xa6, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65)
if err != nil {
return
}
if z.Expire == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
err = z.Expire.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Expire")
return
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *BatchJobRequest) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 7
// string "ID"
o = append(o, 0x87, 0xa2, 0x49, 0x44)
o = msgp.AppendString(o, z.ID)
// string "User"
o = append(o, 0xa4, 0x55, 0x73, 0x65, 0x72)
o = msgp.AppendString(o, z.User)
// string "Started"
o = append(o, 0xa7, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64)
o = msgp.AppendTime(o, z.Started)
// string "Location"
o = append(o, 0xa8, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e)
o = msgp.AppendString(o, z.Location)
// string "Replicate"
o = append(o, 0xa9, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65)
if z.Replicate == nil {
o = msgp.AppendNil(o)
} else {
o, err = z.Replicate.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Replicate")
return
}
}
// string "KeyRotate"
o = append(o, 0xa9, 0x4b, 0x65, 0x79, 0x52, 0x6f, 0x74, 0x61, 0x74, 0x65)
if z.KeyRotate == nil {
o = msgp.AppendNil(o)
} else {
o, err = z.KeyRotate.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "KeyRotate")
return
}
}
// string "Expire"
o = append(o, 0xa6, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65)
if z.Expire == nil {
o = msgp.AppendNil(o)
} else {
o, err = z.Expire.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Expire")
return
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *BatchJobRequest) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "ID":
z.ID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ID")
return
}
case "User":
z.User, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "User")
return
}
case "Started":
z.Started, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Started")
return
}
case "Location":
z.Location, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Location")
return
}
case "Replicate":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.Replicate = nil
} else {
if z.Replicate == nil {
z.Replicate = new(BatchJobReplicateV1)
}
bts, err = z.Replicate.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Replicate")
return
}
}
case "KeyRotate":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.KeyRotate = nil
} else {
if z.KeyRotate == nil {
z.KeyRotate = new(BatchJobKeyRotateV1)
}
bts, err = z.KeyRotate.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "KeyRotate")
return
}
}
case "Expire":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.Expire = nil
} else {
if z.Expire == nil {
z.Expire = new(BatchJobExpire)
}
bts, err = z.Expire.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Expire")
return
}
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *BatchJobRequest) Msgsize() (s int) {
s = 1 + 3 + msgp.StringPrefixSize + len(z.ID) + 5 + msgp.StringPrefixSize + len(z.User) + 8 + msgp.TimeSize + 9 + msgp.StringPrefixSize + len(z.Location) + 10
if z.Replicate == nil {
s += msgp.NilSize
} else {
s += z.Replicate.Msgsize()
}
s += 10
if z.KeyRotate == nil {
s += msgp.NilSize
} else {
s += z.KeyRotate.Msgsize()
}
s += 7
if z.Expire == nil {
s += msgp.NilSize
} else {
s += z.Expire.Msgsize()
}
return
}
// DecodeMsg implements msgp.Decodable
func (z *batchJobInfo) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "v":
z.Version, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "Version")
return
}
case "jid":
z.JobID, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "JobID")
return
}
case "jt":
z.JobType, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "JobType")
return
}
case "st":
z.StartTime, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "StartTime")
return
}
case "lu":
z.LastUpdate, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "LastUpdate")
return
}
case "ra":
z.RetryAttempts, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "RetryAttempts")
return
}
case "cmp":
z.Complete, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "Complete")
return
}
case "fld":
z.Failed, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "Failed")
return
}
case "lbkt":
z.Bucket, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "lobj":
z.Object, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
case "ob":
z.Objects, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "Objects")
return
}
case "dm":
z.DeleteMarkers, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "DeleteMarkers")
return
}
case "obf":
z.ObjectsFailed, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "ObjectsFailed")
return
}
case "dmf":
z.DeleteMarkersFailed, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "DeleteMarkersFailed")
return
}
case "bt":
z.BytesTransferred, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "BytesTransferred")
return
}
case "bf":
z.BytesFailed, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "BytesFailed")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *batchJobInfo) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 16
// write "v"
err = en.Append(0xde, 0x0, 0x10, 0xa1, 0x76)
if err != nil {
return
}
err = en.WriteInt(z.Version)
if err != nil {
err = msgp.WrapError(err, "Version")
return
}
// write "jid"
err = en.Append(0xa3, 0x6a, 0x69, 0x64)
if err != nil {
return
}
err = en.WriteString(z.JobID)
if err != nil {
err = msgp.WrapError(err, "JobID")
return
}
// write "jt"
err = en.Append(0xa2, 0x6a, 0x74)
if err != nil {
return
}
err = en.WriteString(z.JobType)
if err != nil {
err = msgp.WrapError(err, "JobType")
return
}
// write "st"
err = en.Append(0xa2, 0x73, 0x74)
if err != nil {
return
}
err = en.WriteTime(z.StartTime)
if err != nil {
err = msgp.WrapError(err, "StartTime")
return
}
// write "lu"
err = en.Append(0xa2, 0x6c, 0x75)
if err != nil {
return
}
err = en.WriteTime(z.LastUpdate)
if err != nil {
err = msgp.WrapError(err, "LastUpdate")
return
}
// write "ra"
err = en.Append(0xa2, 0x72, 0x61)
if err != nil {
return
}
err = en.WriteInt(z.RetryAttempts)
if err != nil {
err = msgp.WrapError(err, "RetryAttempts")
return
}
// write "cmp"
err = en.Append(0xa3, 0x63, 0x6d, 0x70)
if err != nil {
return
}
err = en.WriteBool(z.Complete)
if err != nil {
err = msgp.WrapError(err, "Complete")
return
}
// write "fld"
err = en.Append(0xa3, 0x66, 0x6c, 0x64)
if err != nil {
return
}
err = en.WriteBool(z.Failed)
if err != nil {
err = msgp.WrapError(err, "Failed")
return
}
// write "lbkt"
err = en.Append(0xa4, 0x6c, 0x62, 0x6b, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Bucket)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
// write "lobj"
err = en.Append(0xa4, 0x6c, 0x6f, 0x62, 0x6a)
if err != nil {
return
}
err = en.WriteString(z.Object)
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
// write "ob"
err = en.Append(0xa2, 0x6f, 0x62)
if err != nil {
return
}
err = en.WriteInt64(z.Objects)
if err != nil {
err = msgp.WrapError(err, "Objects")
return
}
// write "dm"
err = en.Append(0xa2, 0x64, 0x6d)
if err != nil {
return
}
err = en.WriteInt64(z.DeleteMarkers)
if err != nil {
err = msgp.WrapError(err, "DeleteMarkers")
return
}
// write "obf"
err = en.Append(0xa3, 0x6f, 0x62, 0x66)
if err != nil {
return
}
err = en.WriteInt64(z.ObjectsFailed)
if err != nil {
err = msgp.WrapError(err, "ObjectsFailed")
return
}
// write "dmf"
err = en.Append(0xa3, 0x64, 0x6d, 0x66)
if err != nil {
return
}
err = en.WriteInt64(z.DeleteMarkersFailed)
if err != nil {
err = msgp.WrapError(err, "DeleteMarkersFailed")
return
}
// write "bt"
err = en.Append(0xa2, 0x62, 0x74)
if err != nil {
return
}
err = en.WriteInt64(z.BytesTransferred)
if err != nil {
err = msgp.WrapError(err, "BytesTransferred")
return
}
// write "bf"
err = en.Append(0xa2, 0x62, 0x66)
if err != nil {
return
}
err = en.WriteInt64(z.BytesFailed)
if err != nil {
err = msgp.WrapError(err, "BytesFailed")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *batchJobInfo) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 16
// string "v"
o = append(o, 0xde, 0x0, 0x10, 0xa1, 0x76)
o = msgp.AppendInt(o, z.Version)
// string "jid"
o = append(o, 0xa3, 0x6a, 0x69, 0x64)
o = msgp.AppendString(o, z.JobID)
// string "jt"
o = append(o, 0xa2, 0x6a, 0x74)
o = msgp.AppendString(o, z.JobType)
// string "st"
o = append(o, 0xa2, 0x73, 0x74)
o = msgp.AppendTime(o, z.StartTime)
// string "lu"
o = append(o, 0xa2, 0x6c, 0x75)
o = msgp.AppendTime(o, z.LastUpdate)
// string "ra"
o = append(o, 0xa2, 0x72, 0x61)
o = msgp.AppendInt(o, z.RetryAttempts)
// string "cmp"
o = append(o, 0xa3, 0x63, 0x6d, 0x70)
o = msgp.AppendBool(o, z.Complete)
// string "fld"
o = append(o, 0xa3, 0x66, 0x6c, 0x64)
o = msgp.AppendBool(o, z.Failed)
// string "lbkt"
o = append(o, 0xa4, 0x6c, 0x62, 0x6b, 0x74)
o = msgp.AppendString(o, z.Bucket)
// string "lobj"
o = append(o, 0xa4, 0x6c, 0x6f, 0x62, 0x6a)
o = msgp.AppendString(o, z.Object)
// string "ob"
o = append(o, 0xa2, 0x6f, 0x62)
o = msgp.AppendInt64(o, z.Objects)
// string "dm"
o = append(o, 0xa2, 0x64, 0x6d)
o = msgp.AppendInt64(o, z.DeleteMarkers)
// string "obf"
o = append(o, 0xa3, 0x6f, 0x62, 0x66)
o = msgp.AppendInt64(o, z.ObjectsFailed)
// string "dmf"
o = append(o, 0xa3, 0x64, 0x6d, 0x66)
o = msgp.AppendInt64(o, z.DeleteMarkersFailed)
// string "bt"
o = append(o, 0xa2, 0x62, 0x74)
o = msgp.AppendInt64(o, z.BytesTransferred)
// string "bf"
o = append(o, 0xa2, 0x62, 0x66)
o = msgp.AppendInt64(o, z.BytesFailed)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *batchJobInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "v":
z.Version, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Version")
return
}
case "jid":
z.JobID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "JobID")
return
}
case "jt":
z.JobType, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "JobType")
return
}
case "st":
z.StartTime, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "StartTime")
return
}
case "lu":
z.LastUpdate, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastUpdate")
return
}
case "ra":
z.RetryAttempts, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "RetryAttempts")
return
}
case "cmp":
z.Complete, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Complete")
return
}
case "fld":
z.Failed, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Failed")
return
}
case "lbkt":
z.Bucket, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "lobj":
z.Object, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
case "ob":
z.Objects, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Objects")
return
}
case "dm":
z.DeleteMarkers, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "DeleteMarkers")
return
}
case "obf":
z.ObjectsFailed, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ObjectsFailed")
return
}
case "dmf":
z.DeleteMarkersFailed, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "DeleteMarkersFailed")
return
}
case "bt":
z.BytesTransferred, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "BytesTransferred")
return
}
case "bf":
z.BytesFailed, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "BytesFailed")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *batchJobInfo) Msgsize() (s int) {
s = 3 + 2 + msgp.IntSize + 4 + msgp.StringPrefixSize + len(z.JobID) + 3 + msgp.StringPrefixSize + len(z.JobType) + 3 + msgp.TimeSize + 3 + msgp.TimeSize + 3 + msgp.IntSize + 4 + msgp.BoolSize + 4 + msgp.BoolSize + 5 + msgp.StringPrefixSize + len(z.Bucket) + 5 + msgp.StringPrefixSize + len(z.Object) + 3 + msgp.Int64Size + 3 + msgp.Int64Size + 4 + msgp.Int64Size + 4 + msgp.Int64Size + 3 + msgp.Int64Size + 3 + msgp.Int64Size
return
}

View File

@@ -0,0 +1,236 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalBatchJobRequest(t *testing.T) {
v := BatchJobRequest{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgBatchJobRequest(b *testing.B) {
v := BatchJobRequest{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgBatchJobRequest(b *testing.B) {
v := BatchJobRequest{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalBatchJobRequest(b *testing.B) {
v := BatchJobRequest{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeBatchJobRequest(t *testing.T) {
v := BatchJobRequest{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeBatchJobRequest Msgsize() is inaccurate")
}
vn := BatchJobRequest{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeBatchJobRequest(b *testing.B) {
v := BatchJobRequest{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeBatchJobRequest(b *testing.B) {
v := BatchJobRequest{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalbatchJobInfo(t *testing.T) {
v := batchJobInfo{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgbatchJobInfo(b *testing.B) {
v := batchJobInfo{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgbatchJobInfo(b *testing.B) {
v := batchJobInfo{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalbatchJobInfo(b *testing.B) {
v := batchJobInfo{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodebatchJobInfo(t *testing.T) {
v := batchJobInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodebatchJobInfo Msgsize() is inaccurate")
}
vn := batchJobInfo{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodebatchJobInfo(b *testing.B) {
v := batchJobInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodebatchJobInfo(b *testing.B) {
v := batchJobInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}

View File

@@ -0,0 +1,163 @@
// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"errors"
"strings"
"time"
"github.com/dustin/go-humanize"
"github.com/minio/pkg/v2/wildcard"
)
//go:generate msgp -file $GOFILE
// BatchJobKV is a key-value data type which supports wildcard matching
type BatchJobKV struct {
Key string `yaml:"key" json:"key"`
Value string `yaml:"value" json:"value"`
}
// Validate returns an error if key is empty
func (kv BatchJobKV) Validate() error {
if kv.Key == "" {
return errInvalidArgument
}
return nil
}
// Empty indicates if kv is not set
func (kv BatchJobKV) Empty() bool {
return kv.Key == "" && kv.Value == ""
}
// Match matches input kv with kv, value will be wildcard matched depending on the user input
func (kv BatchJobKV) Match(ikv BatchJobKV) bool {
if kv.Empty() {
return true
}
if strings.EqualFold(kv.Key, ikv.Key) {
return wildcard.Match(kv.Value, ikv.Value)
}
return false
}
// BatchJobNotification stores notification endpoint and token information.
// Used by batch jobs to notify of their status.
type BatchJobNotification struct {
Endpoint string `yaml:"endpoint" json:"endpoint"`
Token string `yaml:"token" json:"token"`
}
// BatchJobRetry stores retry configuration used in the event of failures.
type BatchJobRetry struct {
Attempts int `yaml:"attempts" json:"attempts"` // number of retry attempts
Delay time.Duration `yaml:"delay" json:"delay"` // delay between each retries
}
// Validate validates input replicate retries.
func (r BatchJobRetry) Validate() error {
if r.Attempts < 0 {
return errInvalidArgument
}
if r.Delay < 0 {
return errInvalidArgument
}
return nil
}
// # snowball based archive transfer is by default enabled when source
// # is local and target is remote which is also minio.
// snowball:
// disable: false # optionally turn-off snowball archive transfer
// batch: 100 # upto this many objects per archive
// inmemory: true # indicates if the archive must be staged locally or in-memory
// compress: true # S2/Snappy compressed archive
// smallerThan: 5MiB # create archive for all objects smaller than 5MiB
// skipErrs: false # skips any source side read() errors
// BatchJobSnowball describes the snowball feature when replicating objects from a local source to a remote target
type BatchJobSnowball struct {
Disable *bool `yaml:"disable" json:"disable"`
Batch *int `yaml:"batch" json:"batch"`
InMemory *bool `yaml:"inmemory" json:"inmemory"`
Compress *bool `yaml:"compress" json:"compress"`
SmallerThan *string `yaml:"smallerThan" json:"smallerThan"`
SkipErrs *bool `yaml:"skipErrs" json:"skipErrs"`
}
// Validate the snowball parameters in the job description
func (b BatchJobSnowball) Validate() error {
if *b.Batch <= 0 {
return errors.New("batch number should be non positive zero")
}
_, err := humanize.ParseBytes(*b.SmallerThan)
return err
}
// BatchJobSizeFilter supports size based filters - LesserThan and GreaterThan
type BatchJobSizeFilter struct {
UpperBound BatchJobSize `yaml:"lessThan" json:"lessThan"`
LowerBound BatchJobSize `yaml:"greaterThan" json:"greaterThan"`
}
// InRange returns true in the following cases and false otherwise,
// - sf.LowerBound < sz, when sf.LowerBound alone is specified
// - sz < sf.UpperBound, when sf.UpperBound alone is specified
// - sf.LowerBound < sz < sf.UpperBound when both are specified,
func (sf BatchJobSizeFilter) InRange(sz int64) bool {
if sf.UpperBound > 0 && sz > int64(sf.UpperBound) {
return false
}
if sf.LowerBound > 0 && sz < int64(sf.LowerBound) {
return false
}
return true
}
var errInvalidBatchJobSizeFilter = errors.New("invalid batch-job size filter")
// Validate checks if sf is a valid batch-job size filter
func (sf BatchJobSizeFilter) Validate() error {
if sf.LowerBound > 0 && sf.UpperBound > 0 && sf.LowerBound >= sf.UpperBound {
return errInvalidBatchJobSizeFilter
}
return nil
}
// BatchJobSize supports humanized byte values in yaml files type BatchJobSize uint64
type BatchJobSize int64
// UnmarshalYAML to parse humanized byte values
func (s *BatchJobSize) UnmarshalYAML(unmarshal func(interface{}) error) error {
var batchExpireSz string
err := unmarshal(&batchExpireSz)
if err != nil {
return err
}
sz, err := humanize.ParseBytes(batchExpireSz)
if err != nil {
return err
}
*s = BatchJobSize(sz)
return nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,575 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalBatchJobKV(t *testing.T) {
v := BatchJobKV{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgBatchJobKV(b *testing.B) {
v := BatchJobKV{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgBatchJobKV(b *testing.B) {
v := BatchJobKV{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalBatchJobKV(b *testing.B) {
v := BatchJobKV{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeBatchJobKV(t *testing.T) {
v := BatchJobKV{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeBatchJobKV Msgsize() is inaccurate")
}
vn := BatchJobKV{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeBatchJobKV(b *testing.B) {
v := BatchJobKV{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeBatchJobKV(b *testing.B) {
v := BatchJobKV{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalBatchJobNotification(t *testing.T) {
v := BatchJobNotification{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgBatchJobNotification(b *testing.B) {
v := BatchJobNotification{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgBatchJobNotification(b *testing.B) {
v := BatchJobNotification{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalBatchJobNotification(b *testing.B) {
v := BatchJobNotification{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeBatchJobNotification(t *testing.T) {
v := BatchJobNotification{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeBatchJobNotification Msgsize() is inaccurate")
}
vn := BatchJobNotification{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeBatchJobNotification(b *testing.B) {
v := BatchJobNotification{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeBatchJobNotification(b *testing.B) {
v := BatchJobNotification{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalBatchJobRetry(t *testing.T) {
v := BatchJobRetry{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgBatchJobRetry(b *testing.B) {
v := BatchJobRetry{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgBatchJobRetry(b *testing.B) {
v := BatchJobRetry{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalBatchJobRetry(b *testing.B) {
v := BatchJobRetry{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeBatchJobRetry(t *testing.T) {
v := BatchJobRetry{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeBatchJobRetry Msgsize() is inaccurate")
}
vn := BatchJobRetry{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeBatchJobRetry(b *testing.B) {
v := BatchJobRetry{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeBatchJobRetry(b *testing.B) {
v := BatchJobRetry{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalBatchJobSizeFilter(t *testing.T) {
v := BatchJobSizeFilter{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgBatchJobSizeFilter(b *testing.B) {
v := BatchJobSizeFilter{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgBatchJobSizeFilter(b *testing.B) {
v := BatchJobSizeFilter{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalBatchJobSizeFilter(b *testing.B) {
v := BatchJobSizeFilter{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeBatchJobSizeFilter(t *testing.T) {
v := BatchJobSizeFilter{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeBatchJobSizeFilter Msgsize() is inaccurate")
}
vn := BatchJobSizeFilter{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeBatchJobSizeFilter(b *testing.B) {
v := BatchJobSizeFilter{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeBatchJobSizeFilter(b *testing.B) {
v := BatchJobSizeFilter{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalBatchJobSnowball(t *testing.T) {
v := BatchJobSnowball{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgBatchJobSnowball(b *testing.B) {
v := BatchJobSnowball{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgBatchJobSnowball(b *testing.B) {
v := BatchJobSnowball{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalBatchJobSnowball(b *testing.B) {
v := BatchJobSnowball{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeBatchJobSnowball(t *testing.T) {
v := BatchJobSnowball{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeBatchJobSnowball Msgsize() is inaccurate")
}
vn := BatchJobSnowball{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeBatchJobSnowball(b *testing.B) {
v := BatchJobSnowball{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeBatchJobSnowball(b *testing.B) {
v := BatchJobSnowball{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}

View File

@@ -0,0 +1,136 @@
// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"fmt"
"testing"
)
func TestBatchJobSizeInRange(t *testing.T) {
tests := []struct {
objSize int64
sizeFilter BatchJobSizeFilter
want bool
}{
{
// 1Mib < 2Mib < 10MiB -> in range
objSize: 2 << 20,
sizeFilter: BatchJobSizeFilter{
UpperBound: 10 << 20,
LowerBound: 1 << 20,
},
want: true,
},
{
// 2KiB < 1 MiB -> out of range from left
objSize: 2 << 10,
sizeFilter: BatchJobSizeFilter{
UpperBound: 10 << 20,
LowerBound: 1 << 20,
},
want: false,
},
{
// 11MiB > 10 MiB -> out of range from right
objSize: 11 << 20,
sizeFilter: BatchJobSizeFilter{
UpperBound: 10 << 20,
LowerBound: 1 << 20,
},
want: false,
},
{
// 2MiB < 10MiB -> in range
objSize: 2 << 20,
sizeFilter: BatchJobSizeFilter{
UpperBound: 10 << 20,
},
want: true,
},
{
// 2MiB > 1MiB -> in range
objSize: 2 << 20,
sizeFilter: BatchJobSizeFilter{
LowerBound: 1 << 20,
},
want: true,
},
}
for i, test := range tests {
t.Run(fmt.Sprintf("test-%d", i+1), func(t *testing.T) {
if got := test.sizeFilter.InRange(test.objSize); got != test.want {
t.Fatalf("Expected %v but got %v", test.want, got)
}
})
}
}
func TestBatchJobSizeValidate(t *testing.T) {
tests := []struct {
sizeFilter BatchJobSizeFilter
err error
}{
{
// Unspecified size filter is a valid filter
sizeFilter: BatchJobSizeFilter{
UpperBound: 0,
LowerBound: 0,
},
err: nil,
},
{
sizeFilter: BatchJobSizeFilter{
UpperBound: 0,
LowerBound: 1 << 20,
},
err: nil,
},
{
sizeFilter: BatchJobSizeFilter{
UpperBound: 10 << 20,
LowerBound: 0,
},
err: nil,
},
{
// LowerBound > UpperBound -> empty range
sizeFilter: BatchJobSizeFilter{
UpperBound: 1 << 20,
LowerBound: 10 << 20,
},
err: errInvalidBatchJobSizeFilter,
},
{
// LowerBound == UpperBound -> empty range
sizeFilter: BatchJobSizeFilter{
UpperBound: 1 << 20,
LowerBound: 1 << 20,
},
err: errInvalidBatchJobSizeFilter,
},
}
for i, test := range tests {
t.Run(fmt.Sprintf("test-%d", i+1), func(t *testing.T) {
if err := test.sizeFilter.Validate(); err != test.err {
t.Fatalf("Expected %v but got %v", test.err, err)
}
})
}
}

184
cmd/batch-replicate.go Normal file
View File

@@ -0,0 +1,184 @@
// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"time"
miniogo "github.com/minio/minio-go/v7"
"github.com/minio/minio/internal/auth"
)
//go:generate msgp -file $GOFILE
// replicate:
// # source of the objects to be replicated
// source:
// type: "minio"
// bucket: "testbucket"
// prefix: "spark/"
//
// # optional flags based filtering criteria
// # for source objects
// flags:
// filter:
// newerThan: "7d"
// olderThan: "7d"
// createdAfter: "date"
// createdBefore: "date"
// tags:
// - key: "name"
// value: "value*"
// metadata:
// - key: "content-type"
// value: "image/*"
// notify:
// endpoint: "https://splunk-hec.dev.com"
// token: "Splunk ..." # e.g. "Bearer token"
//
// # target where the objects must be replicated
// target:
// type: "minio"
// bucket: "testbucket1"
// endpoint: "https://play.min.io"
// path: "on"
// credentials:
// accessKey: "minioadmin"
// secretKey: "minioadmin"
// sessionToken: ""
// BatchReplicateFilter holds all the filters currently supported for batch replication
type BatchReplicateFilter struct {
NewerThan time.Duration `yaml:"newerThan,omitempty" json:"newerThan"`
OlderThan time.Duration `yaml:"olderThan,omitempty" json:"olderThan"`
CreatedAfter time.Time `yaml:"createdAfter,omitempty" json:"createdAfter"`
CreatedBefore time.Time `yaml:"createdBefore,omitempty" json:"createdBefore"`
Tags []BatchJobKV `yaml:"tags,omitempty" json:"tags"`
Metadata []BatchJobKV `yaml:"metadata,omitempty" json:"metadata"`
}
// BatchJobReplicateFlags various configurations for replication job definition currently includes
// - filter
// - notify
// - retry
type BatchJobReplicateFlags struct {
Filter BatchReplicateFilter `yaml:"filter" json:"filter"`
Notify BatchJobNotification `yaml:"notify" json:"notify"`
Retry BatchJobRetry `yaml:"retry" json:"retry"`
}
// BatchJobReplicateResourceType defines the type of batch jobs
type BatchJobReplicateResourceType string
// Validate validates if the replicate resource type is recognized and supported
func (t BatchJobReplicateResourceType) Validate() error {
switch t {
case BatchJobReplicateResourceMinIO:
case BatchJobReplicateResourceS3:
default:
return errInvalidArgument
}
return nil
}
func (t BatchJobReplicateResourceType) isMinio() bool {
return t == BatchJobReplicateResourceMinIO
}
// Different types of batch jobs..
const (
BatchJobReplicateResourceMinIO BatchJobReplicateResourceType = "minio"
BatchJobReplicateResourceS3 BatchJobReplicateResourceType = "s3"
// add future targets
)
// BatchJobReplicateCredentials access credentials for batch replication it may
// be either for target or source.
type BatchJobReplicateCredentials struct {
AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty" yaml:"accessKey"`
SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty" yaml:"secretKey"`
SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty" yaml:"sessionToken"`
}
// Empty indicates if credentials are not set
func (c BatchJobReplicateCredentials) Empty() bool {
return c.AccessKey == "" && c.SecretKey == "" && c.SessionToken == ""
}
// Validate validates if credentials are valid
func (c BatchJobReplicateCredentials) Validate() error {
if !auth.IsAccessKeyValid(c.AccessKey) || !auth.IsSecretKeyValid(c.SecretKey) {
return errInvalidArgument
}
return nil
}
// BatchJobReplicateTarget describes target element of the replication job that receives
// the filtered data from source
type BatchJobReplicateTarget struct {
Type BatchJobReplicateResourceType `yaml:"type" json:"type"`
Bucket string `yaml:"bucket" json:"bucket"`
Prefix string `yaml:"prefix" json:"prefix"`
Endpoint string `yaml:"endpoint" json:"endpoint"`
Path string `yaml:"path" json:"path"`
Creds BatchJobReplicateCredentials `yaml:"credentials" json:"credentials"`
}
// ValidPath returns true if path is valid
func (t BatchJobReplicateTarget) ValidPath() bool {
return t.Path == "on" || t.Path == "off" || t.Path == "auto" || t.Path == ""
}
// BatchJobReplicateSource describes source element of the replication job that is
// the source of the data for the target
type BatchJobReplicateSource struct {
Type BatchJobReplicateResourceType `yaml:"type" json:"type"`
Bucket string `yaml:"bucket" json:"bucket"`
Prefix string `yaml:"prefix" json:"prefix"`
Endpoint string `yaml:"endpoint" json:"endpoint"`
Path string `yaml:"path" json:"path"`
Creds BatchJobReplicateCredentials `yaml:"credentials" json:"credentials"`
Snowball BatchJobSnowball `yaml:"snowball" json:"snowball"`
}
// ValidPath returns true if path is valid
func (s BatchJobReplicateSource) ValidPath() bool {
switch s.Path {
case "on", "off", "auto", "":
return true
default:
return false
}
}
// BatchJobReplicateV1 v1 of batch job replication
type BatchJobReplicateV1 struct {
APIVersion string `yaml:"apiVersion" json:"apiVersion"`
Flags BatchJobReplicateFlags `yaml:"flags" json:"flags"`
Target BatchJobReplicateTarget `yaml:"target" json:"target"`
Source BatchJobReplicateSource `yaml:"source" json:"source"`
clnt *miniogo.Core `msg:"-"`
}
// RemoteToLocal returns true if source is remote and target is local
func (r BatchJobReplicateV1) RemoteToLocal() bool {
return !r.Source.Creds.Empty()
}

1706
cmd/batch-replicate_gen.go Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,688 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalBatchJobReplicateCredentials(t *testing.T) {
v := BatchJobReplicateCredentials{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgBatchJobReplicateCredentials(b *testing.B) {
v := BatchJobReplicateCredentials{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgBatchJobReplicateCredentials(b *testing.B) {
v := BatchJobReplicateCredentials{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalBatchJobReplicateCredentials(b *testing.B) {
v := BatchJobReplicateCredentials{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeBatchJobReplicateCredentials(t *testing.T) {
v := BatchJobReplicateCredentials{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeBatchJobReplicateCredentials Msgsize() is inaccurate")
}
vn := BatchJobReplicateCredentials{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeBatchJobReplicateCredentials(b *testing.B) {
v := BatchJobReplicateCredentials{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeBatchJobReplicateCredentials(b *testing.B) {
v := BatchJobReplicateCredentials{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalBatchJobReplicateFlags(t *testing.T) {
v := BatchJobReplicateFlags{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgBatchJobReplicateFlags(b *testing.B) {
v := BatchJobReplicateFlags{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgBatchJobReplicateFlags(b *testing.B) {
v := BatchJobReplicateFlags{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalBatchJobReplicateFlags(b *testing.B) {
v := BatchJobReplicateFlags{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeBatchJobReplicateFlags(t *testing.T) {
v := BatchJobReplicateFlags{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeBatchJobReplicateFlags Msgsize() is inaccurate")
}
vn := BatchJobReplicateFlags{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeBatchJobReplicateFlags(b *testing.B) {
v := BatchJobReplicateFlags{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeBatchJobReplicateFlags(b *testing.B) {
v := BatchJobReplicateFlags{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalBatchJobReplicateSource(t *testing.T) {
v := BatchJobReplicateSource{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgBatchJobReplicateSource(b *testing.B) {
v := BatchJobReplicateSource{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgBatchJobReplicateSource(b *testing.B) {
v := BatchJobReplicateSource{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalBatchJobReplicateSource(b *testing.B) {
v := BatchJobReplicateSource{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeBatchJobReplicateSource(t *testing.T) {
v := BatchJobReplicateSource{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeBatchJobReplicateSource Msgsize() is inaccurate")
}
vn := BatchJobReplicateSource{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeBatchJobReplicateSource(b *testing.B) {
v := BatchJobReplicateSource{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeBatchJobReplicateSource(b *testing.B) {
v := BatchJobReplicateSource{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalBatchJobReplicateTarget(t *testing.T) {
v := BatchJobReplicateTarget{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgBatchJobReplicateTarget(b *testing.B) {
v := BatchJobReplicateTarget{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgBatchJobReplicateTarget(b *testing.B) {
v := BatchJobReplicateTarget{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalBatchJobReplicateTarget(b *testing.B) {
v := BatchJobReplicateTarget{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeBatchJobReplicateTarget(t *testing.T) {
v := BatchJobReplicateTarget{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeBatchJobReplicateTarget Msgsize() is inaccurate")
}
vn := BatchJobReplicateTarget{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeBatchJobReplicateTarget(b *testing.B) {
v := BatchJobReplicateTarget{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeBatchJobReplicateTarget(b *testing.B) {
v := BatchJobReplicateTarget{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalBatchJobReplicateV1(t *testing.T) {
v := BatchJobReplicateV1{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgBatchJobReplicateV1(b *testing.B) {
v := BatchJobReplicateV1{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgBatchJobReplicateV1(b *testing.B) {
v := BatchJobReplicateV1{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalBatchJobReplicateV1(b *testing.B) {
v := BatchJobReplicateV1{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeBatchJobReplicateV1(t *testing.T) {
v := BatchJobReplicateV1{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeBatchJobReplicateV1 Msgsize() is inaccurate")
}
vn := BatchJobReplicateV1{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeBatchJobReplicateV1(b *testing.B) {
v := BatchJobReplicateV1{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeBatchJobReplicateV1(b *testing.B) {
v := BatchJobReplicateV1{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalBatchReplicateFilter(t *testing.T) {
v := BatchReplicateFilter{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgBatchReplicateFilter(b *testing.B) {
v := BatchReplicateFilter{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgBatchReplicateFilter(b *testing.B) {
v := BatchReplicateFilter{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalBatchReplicateFilter(b *testing.B) {
v := BatchReplicateFilter{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeBatchReplicateFilter(t *testing.T) {
v := BatchReplicateFilter{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeBatchReplicateFilter Msgsize() is inaccurate")
}
vn := BatchReplicateFilter{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeBatchReplicateFilter(b *testing.B) {
v := BatchReplicateFilter{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeBatchReplicateFilter(b *testing.B) {
v := BatchReplicateFilter{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}

Some files were not shown because too many files have changed in this diff Show More