diff --git a/cmd/config-current.go b/cmd/config-current.go index 45cc041ab..a3f5aee2e 100644 --- a/cmd/config-current.go +++ b/cmd/config-current.go @@ -575,7 +575,7 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf configLogIf(ctx, fmt.Errorf("Invalid api configuration: %w", err)) } - globalAPIConfig.init(apiConfig, setDriveCounts) + globalAPIConfig.init(apiConfig, setDriveCounts, objAPI.Legacy()) autoGenerateRootCredentials() // Generate the KMS root credentials here since we don't know whether API root access is disabled until now. setRemoteInstanceTransport(NewHTTPTransportWithTimeout(apiConfig.RemoteTransportDeadline)) case config.CompressionSubSys: diff --git a/cmd/erasure-server-pool.go b/cmd/erasure-server-pool.go index 8033ae28d..4a1277e1e 100644 --- a/cmd/erasure-server-pool.go +++ b/cmd/erasure-server-pool.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015-2021 MinIO, Inc. +// Copyright (c) 2015-2024 MinIO, Inc. // // This file is part of MinIO Object Storage stack // @@ -644,6 +644,15 @@ func (z *erasureServerPools) Shutdown(ctx context.Context) error { return nil } +// Legacy returns 'true' if distribution algo is CRCMOD +func (z *erasureServerPools) Legacy() (ok bool) { + ok = true + for _, set := range z.serverPools { + ok = ok && set.Legacy() + } + return ok +} + func (z *erasureServerPools) BackendInfo() (b madmin.BackendInfo) { b.Type = madmin.Erasure diff --git a/cmd/erasure-sets.go b/cmd/erasure-sets.go index 9a0dd7847..073c0bd30 100644 --- a/cmd/erasure-sets.go +++ b/cmd/erasure-sets.go @@ -194,6 +194,11 @@ func findDiskIndex(refFormat, format *formatErasureV3) (int, int, error) { return -1, -1, fmt.Errorf("DriveID: %s not found", format.Erasure.This) } +// Legacy returns 'true' if distribution algo is CRCMOD +func (s *erasureSets) Legacy() (ok bool) { + return s.distributionAlgo == formatErasureVersionV2DistributionAlgoV1 +} + // connectDisks - attempt to connect all the endpoints, loads format // and re-arranges the disks in proper position. func (s *erasureSets) connectDisks() { diff --git a/cmd/handler-api.go b/cmd/handler-api.go index e5ce25ac7..c787150ac 100644 --- a/cmd/handler-api.go +++ b/cmd/handler-api.go @@ -22,6 +22,7 @@ import ( "net/http" "os" "runtime" + "slices" "strconv" "strings" "sync" @@ -38,13 +39,11 @@ import ( type apiConfig struct { mu sync.RWMutex - requestsDeadline time.Duration - requestsPool chan struct{} - clusterDeadline time.Duration - listQuorum string - corsAllowOrigins []string - // total drives per erasure set across pools. - totalDriveCount int + requestsDeadline time.Duration + requestsPool chan struct{} + clusterDeadline time.Duration + listQuorum string + corsAllowOrigins []string replicationPriority string replicationMaxWorkers int transitionWorkers int @@ -110,7 +109,7 @@ func availableMemory() (available uint64) { return } -func (t *apiConfig) init(cfg api.Config, setDriveCounts []int) { +func (t *apiConfig) init(cfg api.Config, setDriveCounts []int, legacy bool) { t.mu.Lock() defer t.mu.Unlock() @@ -125,27 +124,24 @@ func (t *apiConfig) init(cfg api.Config, setDriveCounts []int) { } t.corsAllowOrigins = corsAllowOrigin - maxSetDrives := 0 - for _, setDriveCount := range setDriveCounts { - t.totalDriveCount += setDriveCount - if setDriveCount > maxSetDrives { - maxSetDrives = setDriveCount - } - } - var apiRequestsMaxPerNode int if cfg.RequestsMax <= 0 { + maxSetDrives := slices.Max(setDriveCounts) + // Returns 75% of max memory allowed maxMem := availableMemory() // max requests per node is calculated as // total_ram / ram_per_request - // ram_per_request is (2MiB+128KiB) * driveCount \ - // + 2 * 10MiB (default erasure block size v1) + 2 * 1MiB (default erasure block size v2) blockSize := xioutil.LargeBlock + xioutil.SmallBlock - apiRequestsMaxPerNode = int(maxMem / uint64(maxSetDrives*blockSize+int(blockSizeV1*2+blockSizeV2*2))) - if globalIsDistErasure { - logger.Info("Automatically configured API requests per node based on available memory on the system: %d", apiRequestsMaxPerNode) + if legacy { + // ram_per_request is (1MiB+32KiB) * driveCount \ + // + 2 * 10MiB (default erasure block size v1) + 2 * 1MiB (default erasure block size v2) + apiRequestsMaxPerNode = int(maxMem / uint64(maxSetDrives*blockSize+int(blockSizeV1*2+blockSizeV2*2))) + } else { + // ram_per_request is (1MiB+32KiB) * driveCount \ + // + 2 * 1MiB (default erasure block size v2) + apiRequestsMaxPerNode = int(maxMem / uint64(maxSetDrives*blockSize+int(blockSizeV2*2))) } } else { apiRequestsMaxPerNode = cfg.RequestsMax @@ -154,6 +150,10 @@ func (t *apiConfig) init(cfg api.Config, setDriveCounts []int) { } } + if globalIsDistErasure { + logger.Info("Configured max API requests per node based on available memory: %d", apiRequestsMaxPerNode) + } + if cap(t.requestsPool) != apiRequestsMaxPerNode { // Only replace if needed. // Existing requests will use the previous limit, diff --git a/cmd/object-api-interface.go b/cmd/object-api-interface.go index 0f24c9833..5e0945a6d 100644 --- a/cmd/object-api-interface.go +++ b/cmd/object-api-interface.go @@ -244,6 +244,7 @@ type ObjectLayer interface { Shutdown(context.Context) error NSScanner(ctx context.Context, updates chan<- DataUsageInfo, wantCycle uint32, scanMode madmin.HealScanMode) error BackendInfo() madmin.BackendInfo + Legacy() bool // Only returns true for deployments which use CRCMOD as its object distribution algorithm. StorageInfo(ctx context.Context, metrics bool) StorageInfo LocalStorageInfo(ctx context.Context, metrics bool) StorageInfo