diff --git a/cmd/handler-api.go b/cmd/handler-api.go index 93108af96..e5ce25ac7 100644 --- a/cmd/handler-api.go +++ b/cmd/handler-api.go @@ -142,7 +142,7 @@ func (t *apiConfig) init(cfg api.Config, setDriveCounts []int) { // total_ram / ram_per_request // ram_per_request is (2MiB+128KiB) * driveCount \ // + 2 * 10MiB (default erasure block size v1) + 2 * 1MiB (default erasure block size v2) - blockSize := xioutil.BlockSizeLarge + xioutil.BlockSizeSmall + blockSize := xioutil.LargeBlock + xioutil.SmallBlock apiRequestsMaxPerNode = int(maxMem / uint64(maxSetDrives*blockSize+int(blockSizeV1*2+blockSizeV2*2))) if globalIsDistErasure { logger.Info("Automatically configured API requests per node based on available memory on the system: %d", apiRequestsMaxPerNode) diff --git a/cmd/storage-datatypes.go b/cmd/storage-datatypes.go index 01d29a44a..cfe20f710 100644 --- a/cmd/storage-datatypes.go +++ b/cmd/storage-datatypes.go @@ -450,7 +450,7 @@ func (r *RenameDataInlineHandlerParams) Recycle() { if r == nil { return } - if cap(r.FI.Data) >= xioutil.BlockSizeSmall { + if cap(r.FI.Data) >= xioutil.SmallBlock { grid.PutByteBuffer(r.FI.Data) r.FI.Data = nil } diff --git a/cmd/storage-rest-server.go b/cmd/storage-rest-server.go index b926e6759..bf4ea2824 100644 --- a/cmd/storage-rest-server.go +++ b/cmd/storage-rest-server.go @@ -595,6 +595,8 @@ func (s *storageRESTServer) ReadFileStreamHandler(w http.ResponseWriter, r *http // Windows can lock up with this optimization, so we fall back to regular copy. sr, ok := rc.(*sendFileReader) if ok { + // Sendfile sends in 4MiB chunks per sendfile syscall which is more than enough + // for most setups. _, err = rf.ReadFrom(sr.Reader) if !xnet.IsNetworkOrHostDown(err, true) { // do not need to log disconnected clients storageLogIf(r.Context(), err) diff --git a/cmd/warm-backend-minio.go b/cmd/warm-backend-minio.go index ee21632bd..60f272fed 100644 --- a/cmd/warm-backend-minio.go +++ b/cmd/warm-backend-minio.go @@ -42,7 +42,7 @@ const ( maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5 maxPartsCount = 10000 maxPartSize = 1024 * 1024 * 1024 * 5 - minPartSize = 1024 * 1024 * 64 // chosen by us to be optimal for HDDs + minPartSize = 1024 * 1024 * 128 // chosen by us to be optimal for HDDs ) // optimalPartInfo - calculate the optimal part info for a given diff --git a/cmd/xl-storage.go b/cmd/xl-storage.go index f026e1af8..f46f2aebc 100644 --- a/cmd/xl-storage.go +++ b/cmd/xl-storage.go @@ -2131,11 +2131,15 @@ func (s *xlStorage) writeAllDirect(ctx context.Context, filePath string, fileSiz var bufp *[]byte switch { - case fileSize > 0 && fileSize >= xioutil.BlockSizeReallyLarge: + case fileSize > 0 && fileSize >= xioutil.XXLargeBlock*2: + // use a larger 8MiB buffer for a really really large streamsx. + bufp = xioutil.ODirectPoolXXLarge.Get().(*[]byte) + defer xioutil.ODirectPoolXXLarge.Put(bufp) + case fileSize > 0 && fileSize >= xioutil.XLargeBlock: // use a larger 4MiB buffer for a really large streams. bufp = xioutil.ODirectPoolXLarge.Get().(*[]byte) defer xioutil.ODirectPoolXLarge.Put(bufp) - case fileSize <= xioutil.BlockSizeSmall: + case fileSize <= xioutil.SmallBlock: bufp = xioutil.ODirectPoolSmall.Get().(*[]byte) defer xioutil.ODirectPoolSmall.Put(bufp) default: diff --git a/internal/ioutil/ioutil.go b/internal/ioutil/ioutil.go index 20a496afd..99fcf1d21 100644 --- a/internal/ioutil/ioutil.go +++ b/internal/ioutil/ioutil.go @@ -34,28 +34,35 @@ import ( // Block sizes constant. const ( - BlockSizeSmall = 32 * humanize.KiByte // Default r/w block size for smaller objects. - BlockSizeLarge = 1 * humanize.MiByte // Default r/w block size for normal objects. - BlockSizeReallyLarge = 4 * humanize.MiByte // Default r/w block size for very large objects. + SmallBlock = 32 * humanize.KiByte // Default r/w block size for smaller objects. + LargeBlock = 1 * humanize.MiByte // Default r/w block size for normal objects. + XLargeBlock = 4 * humanize.MiByte // Default r/w block size for very large objects. + XXLargeBlock = 8 * humanize.MiByte // Default r/w block size for very very large objects. ) // aligned sync.Pool's var ( + ODirectPoolXXLarge = sync.Pool{ + New: func() interface{} { + b := disk.AlignedBlock(XXLargeBlock) + return &b + }, + } ODirectPoolXLarge = sync.Pool{ New: func() interface{} { - b := disk.AlignedBlock(BlockSizeReallyLarge) + b := disk.AlignedBlock(XLargeBlock) return &b }, } ODirectPoolLarge = sync.Pool{ New: func() interface{} { - b := disk.AlignedBlock(BlockSizeLarge) + b := disk.AlignedBlock(LargeBlock) return &b }, } ODirectPoolSmall = sync.Pool{ New: func() interface{} { - b := disk.AlignedBlock(BlockSizeSmall) + b := disk.AlignedBlock(SmallBlock) return &b }, }