diff --git a/cmd/background-newdisks-heal-ops.go b/cmd/background-newdisks-heal-ops.go index 7162b01b8..1250cb603 100644 --- a/cmd/background-newdisks-heal-ops.go +++ b/cmd/background-newdisks-heal-ops.go @@ -464,7 +464,10 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint } // Remove .healing.bin from all disks with similar heal-id - disks := z.serverPools[poolIdx].sets[setIdx].getDisks() + disks, err := z.GetDisks(poolIdx, setIdx) + if err != nil { + return err + } for _, disk := range disks { if disk == nil { diff --git a/cmd/data-scanner.go b/cmd/data-scanner.go index 4639e4f4a..3c1152f2d 100644 --- a/cmd/data-scanner.go +++ b/cmd/data-scanner.go @@ -31,6 +31,7 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "time" "github.com/minio/madmin-go/v3" @@ -249,7 +250,8 @@ type folderScanner struct { healObjectSelect uint32 // Do a heal check on an object once every n cycles. Must divide into healFolderInclude scanMode madmin.HealScanMode - weSleep func() bool + weSleep func() bool + shouldHeal func() bool disks []StorageAPI disksQuorum int @@ -304,11 +306,12 @@ type folderScanner struct { // The returned cache will always be valid, but may not be updated from the existing. // Before each operation sleepDuration is called which can be used to temporarily halt the scanner. // If the supplied context is canceled the function will return at the first chance. -func scanDataFolder(ctx context.Context, disks []StorageAPI, basePath string, healing bool, cache dataUsageCache, getSize getSizeFn, scanMode madmin.HealScanMode, weSleep func() bool) (dataUsageCache, error) { +func scanDataFolder(ctx context.Context, disks []StorageAPI, drive *xlStorage, cache dataUsageCache, getSize getSizeFn, scanMode madmin.HealScanMode, weSleep func() bool) (dataUsageCache, error) { switch cache.Info.Name { case "", dataUsageRoot: return cache, errors.New("internal error: root scan attempted") } + basePath := drive.drivePath updatePath, closeDisk := globalScannerMetrics.currentPathUpdater(basePath, cache.Info.Name) defer closeDisk() @@ -319,7 +322,7 @@ func scanDataFolder(ctx context.Context, disks []StorageAPI, basePath string, he newCache: dataUsageCache{Info: cache.Info}, updateCache: dataUsageCache{Info: cache.Info}, dataUsageScannerDebug: false, - healObjectSelect: healObjectSelectProb, + healObjectSelect: 0, scanMode: scanMode, weSleep: weSleep, updates: cache.Info.updates, @@ -328,6 +331,32 @@ func scanDataFolder(ctx context.Context, disks []StorageAPI, basePath string, he disksQuorum: len(disks) / 2, } + var skipHeal atomic.Bool + if globalIsErasure || cache.Info.SkipHealing { + skipHeal.Store(true) + } + + // Check if we should do healing at all. + s.shouldHeal = func() bool { + if skipHeal.Load() { + return false + } + if s.healObjectSelect == 0 { + return false + } + if di, _ := drive.DiskInfo(ctx, DiskInfoOptions{}); di.Healing { + skipHeal.Store(true) + return false + } + return true + } + + // Enable healing in XL mode. + if globalIsErasure && !cache.Info.SkipHealing { + // Do a heal check on an object once every n cycles. Must divide into healFolderInclude + s.healObjectSelect = healObjectSelectProb + } + done := ctx.Done() // Read top level in bucket. @@ -338,7 +367,7 @@ func scanDataFolder(ctx context.Context, disks []StorageAPI, basePath string, he } root := dataUsageEntry{} folder := cachedFolder{name: cache.Info.Name, objectHealProbDiv: 1} - err := s.scanFolder(ctx, folder, healing, &root) + err := s.scanFolder(ctx, folder, &root) if err != nil { // No useful information... return cache, err @@ -369,7 +398,7 @@ func (f *folderScanner) sendUpdate() { // Files found in the folders will be added to f.newCache. // If final is provided folders will be put into f.newFolders or f.existingFolders. // If final is not provided the folders found are returned from the function. -func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, healing bool, into *dataUsageEntry) error { +func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, into *dataUsageEntry) error { done := ctx.Done() scannerLogPrefix := color.Green("folder-scanner:") @@ -476,14 +505,9 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, hea replication: replicationCfg, } - item.heal.enabled = thisHash.modAlt(f.oldCache.Info.NextCycle/folder.objectHealProbDiv, f.healObjectSelect/folder.objectHealProbDiv) && globalIsErasure + item.heal.enabled = thisHash.modAlt(f.oldCache.Info.NextCycle/folder.objectHealProbDiv, f.healObjectSelect/folder.objectHealProbDiv) && f.shouldHeal() item.heal.bitrot = f.scanMode == madmin.HealDeepScan - // if the drive belongs to an erasure set - // that is already being healed, skip the - // healing attempt on this drive. - item.heal.enabled = item.heal.enabled && !healing - sz, err := f.getSize(item) if err != nil && err != errIgnoreFileContrib { wait() // wait to proceed to next entry. @@ -565,7 +589,7 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, hea if !into.Compacted { dst = &dataUsageEntry{Compacted: false} } - if err := f.scanFolder(ctx, folder, healing, dst); err != nil { + if err := f.scanFolder(ctx, folder, dst); err != nil { return } if !into.Compacted { @@ -646,8 +670,8 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, hea } // Scan for healing - if healing || len(abandonedChildren) == 0 { - // if disks are already healing or we have no abandoned childrens do not need to heal + if len(abandonedChildren) == 0 || !f.shouldHeal() { + // If we are not heal scanning, return now. break } @@ -681,6 +705,9 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, hea healObjectsPrefix := color.Green("healObjects:") for k := range abandonedChildren { + if !f.shouldHeal() { + break + } bucket, prefix := path2BucketObject(k) stopFn := globalScannerMetrics.time(scannerMetricCheckMissing) f.updateCurrentPath(k) @@ -714,6 +741,10 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, hea }, // Some disks have data for this. partial: func(entries metaCacheEntries, errs []error) { + if !f.shouldHeal() { + cancel() + return + } entry, ok := entries.resolve(&resolver) if !ok { // check if we can get one entry at least diff --git a/cmd/data-usage-cache.go b/cmd/data-usage-cache.go index a419f690c..22ee0522f 100644 --- a/cmd/data-usage-cache.go +++ b/cmd/data-usage-cache.go @@ -352,6 +352,9 @@ type dataUsageCacheInfo struct { Name string NextCycle uint32 LastUpdate time.Time + // indicates if the disk is being healed and scanner + // should skip healing the disk + SkipHealing bool // Active lifecycle, if any on the bucket lifeCycle *lifecycle.Lifecycle `msg:"-"` diff --git a/cmd/data-usage-cache_gen.go b/cmd/data-usage-cache_gen.go index b639be9aa..69e0cc6c8 100644 --- a/cmd/data-usage-cache_gen.go +++ b/cmd/data-usage-cache_gen.go @@ -400,62 +400,27 @@ func (z *dataUsageCache) DecodeMsg(dc *msgp.Reader) (err error) { } switch msgp.UnsafeString(field) { case "Info": - var zb0002 uint32 - zb0002, err = dc.ReadMapHeader() + err = z.Info.DecodeMsg(dc) if err != nil { err = msgp.WrapError(err, "Info") return } - for zb0002 > 0 { - zb0002-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err, "Info") - return - } - switch msgp.UnsafeString(field) { - case "Name": - z.Info.Name, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Info", "Name") - return - } - case "NextCycle": - z.Info.NextCycle, err = dc.ReadUint32() - if err != nil { - err = msgp.WrapError(err, "Info", "NextCycle") - return - } - case "LastUpdate": - z.Info.LastUpdate, err = dc.ReadTime() - if err != nil { - err = msgp.WrapError(err, "Info", "LastUpdate") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err, "Info") - return - } - } - } case "Cache": - var zb0003 uint32 - zb0003, err = dc.ReadMapHeader() + var zb0002 uint32 + zb0002, err = dc.ReadMapHeader() if err != nil { err = msgp.WrapError(err, "Cache") return } if z.Cache == nil { - z.Cache = make(map[string]dataUsageEntry, zb0003) + z.Cache = make(map[string]dataUsageEntry, zb0002) } else if len(z.Cache) > 0 { for key := range z.Cache { delete(z.Cache, key) } } - for zb0003 > 0 { - zb0003-- + for zb0002 > 0 { + zb0002-- var za0001 string var za0002 dataUsageEntry za0001, err = dc.ReadString() @@ -489,35 +454,9 @@ func (z *dataUsageCache) EncodeMsg(en *msgp.Writer) (err error) { if err != nil { return } - // map header, size 3 - // write "Name" - err = en.Append(0x83, 0xa4, 0x4e, 0x61, 0x6d, 0x65) + err = z.Info.EncodeMsg(en) if err != nil { - return - } - err = en.WriteString(z.Info.Name) - if err != nil { - err = msgp.WrapError(err, "Info", "Name") - return - } - // write "NextCycle" - err = en.Append(0xa9, 0x4e, 0x65, 0x78, 0x74, 0x43, 0x79, 0x63, 0x6c, 0x65) - if err != nil { - return - } - err = en.WriteUint32(z.Info.NextCycle) - if err != nil { - err = msgp.WrapError(err, "Info", "NextCycle") - return - } - // write "LastUpdate" - err = en.Append(0xaa, 0x4c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65) - if err != nil { - return - } - err = en.WriteTime(z.Info.LastUpdate) - if err != nil { - err = msgp.WrapError(err, "Info", "LastUpdate") + err = msgp.WrapError(err, "Info") return } // write "Cache" @@ -551,16 +490,11 @@ func (z *dataUsageCache) MarshalMsg(b []byte) (o []byte, err error) { // map header, size 2 // string "Info" o = append(o, 0x82, 0xa4, 0x49, 0x6e, 0x66, 0x6f) - // map header, size 3 - // string "Name" - o = append(o, 0x83, 0xa4, 0x4e, 0x61, 0x6d, 0x65) - o = msgp.AppendString(o, z.Info.Name) - // string "NextCycle" - o = append(o, 0xa9, 0x4e, 0x65, 0x78, 0x74, 0x43, 0x79, 0x63, 0x6c, 0x65) - o = msgp.AppendUint32(o, z.Info.NextCycle) - // string "LastUpdate" - o = append(o, 0xaa, 0x4c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65) - o = msgp.AppendTime(o, z.Info.LastUpdate) + o, err = z.Info.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Info") + return + } // string "Cache" o = append(o, 0xa5, 0x43, 0x61, 0x63, 0x68, 0x65) o = msgp.AppendMapHeader(o, uint32(len(z.Cache))) @@ -594,64 +528,29 @@ func (z *dataUsageCache) UnmarshalMsg(bts []byte) (o []byte, err error) { } switch msgp.UnsafeString(field) { case "Info": - var zb0002 uint32 - zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + bts, err = z.Info.UnmarshalMsg(bts) if err != nil { err = msgp.WrapError(err, "Info") return } - for zb0002 > 0 { - zb0002-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err, "Info") - return - } - switch msgp.UnsafeString(field) { - case "Name": - z.Info.Name, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Info", "Name") - return - } - case "NextCycle": - z.Info.NextCycle, bts, err = msgp.ReadUint32Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Info", "NextCycle") - return - } - case "LastUpdate": - z.Info.LastUpdate, bts, err = msgp.ReadTimeBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Info", "LastUpdate") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err, "Info") - return - } - } - } case "Cache": - var zb0003 uint32 - zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + var zb0002 uint32 + zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) if err != nil { err = msgp.WrapError(err, "Cache") return } if z.Cache == nil { - z.Cache = make(map[string]dataUsageEntry, zb0003) + z.Cache = make(map[string]dataUsageEntry, zb0002) } else if len(z.Cache) > 0 { for key := range z.Cache { delete(z.Cache, key) } } - for zb0003 > 0 { + for zb0002 > 0 { var za0001 string var za0002 dataUsageEntry - zb0003-- + zb0002-- za0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "Cache") @@ -678,7 +577,7 @@ func (z *dataUsageCache) UnmarshalMsg(bts []byte) (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *dataUsageCache) Msgsize() (s int) { - s = 1 + 5 + 1 + 5 + msgp.StringPrefixSize + len(z.Info.Name) + 10 + msgp.Uint32Size + 11 + msgp.TimeSize + 6 + msgp.MapHeaderSize + s = 1 + 5 + z.Info.Msgsize() + 6 + msgp.MapHeaderSize if z.Cache != nil { for za0001, za0002 := range z.Cache { _ = za0002 @@ -724,6 +623,12 @@ func (z *dataUsageCacheInfo) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err, "LastUpdate") return } + case "SkipHealing": + z.SkipHealing, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "SkipHealing") + return + } default: err = dc.Skip() if err != nil { @@ -736,10 +641,10 @@ func (z *dataUsageCacheInfo) DecodeMsg(dc *msgp.Reader) (err error) { } // EncodeMsg implements msgp.Encodable -func (z dataUsageCacheInfo) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 3 +func (z *dataUsageCacheInfo) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 4 // write "Name" - err = en.Append(0x83, 0xa4, 0x4e, 0x61, 0x6d, 0x65) + err = en.Append(0x84, 0xa4, 0x4e, 0x61, 0x6d, 0x65) if err != nil { return } @@ -768,15 +673,25 @@ func (z dataUsageCacheInfo) EncodeMsg(en *msgp.Writer) (err error) { err = msgp.WrapError(err, "LastUpdate") return } + // write "SkipHealing" + err = en.Append(0xab, 0x53, 0x6b, 0x69, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x69, 0x6e, 0x67) + if err != nil { + return + } + err = en.WriteBool(z.SkipHealing) + if err != nil { + err = msgp.WrapError(err, "SkipHealing") + return + } return } // MarshalMsg implements msgp.Marshaler -func (z dataUsageCacheInfo) MarshalMsg(b []byte) (o []byte, err error) { +func (z *dataUsageCacheInfo) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // map header, size 3 + // map header, size 4 // string "Name" - o = append(o, 0x83, 0xa4, 0x4e, 0x61, 0x6d, 0x65) + o = append(o, 0x84, 0xa4, 0x4e, 0x61, 0x6d, 0x65) o = msgp.AppendString(o, z.Name) // string "NextCycle" o = append(o, 0xa9, 0x4e, 0x65, 0x78, 0x74, 0x43, 0x79, 0x63, 0x6c, 0x65) @@ -784,6 +699,9 @@ func (z dataUsageCacheInfo) MarshalMsg(b []byte) (o []byte, err error) { // string "LastUpdate" o = append(o, 0xaa, 0x4c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65) o = msgp.AppendTime(o, z.LastUpdate) + // string "SkipHealing" + o = append(o, 0xab, 0x53, 0x6b, 0x69, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x69, 0x6e, 0x67) + o = msgp.AppendBool(o, z.SkipHealing) return } @@ -823,6 +741,12 @@ func (z *dataUsageCacheInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "LastUpdate") return } + case "SkipHealing": + z.SkipHealing, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "SkipHealing") + return + } default: bts, err = msgp.Skip(bts) if err != nil { @@ -836,8 +760,8 @@ func (z *dataUsageCacheInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z dataUsageCacheInfo) Msgsize() (s int) { - s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 10 + msgp.Uint32Size + 11 + msgp.TimeSize +func (z *dataUsageCacheInfo) Msgsize() (s int) { + s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 10 + msgp.Uint32Size + 11 + msgp.TimeSize + 12 + msgp.BoolSize return } @@ -860,62 +784,27 @@ func (z *dataUsageCacheV2) DecodeMsg(dc *msgp.Reader) (err error) { } switch msgp.UnsafeString(field) { case "Info": - var zb0002 uint32 - zb0002, err = dc.ReadMapHeader() + err = z.Info.DecodeMsg(dc) if err != nil { err = msgp.WrapError(err, "Info") return } - for zb0002 > 0 { - zb0002-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err, "Info") - return - } - switch msgp.UnsafeString(field) { - case "Name": - z.Info.Name, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Info", "Name") - return - } - case "NextCycle": - z.Info.NextCycle, err = dc.ReadUint32() - if err != nil { - err = msgp.WrapError(err, "Info", "NextCycle") - return - } - case "LastUpdate": - z.Info.LastUpdate, err = dc.ReadTime() - if err != nil { - err = msgp.WrapError(err, "Info", "LastUpdate") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err, "Info") - return - } - } - } case "Cache": - var zb0003 uint32 - zb0003, err = dc.ReadMapHeader() + var zb0002 uint32 + zb0002, err = dc.ReadMapHeader() if err != nil { err = msgp.WrapError(err, "Cache") return } if z.Cache == nil { - z.Cache = make(map[string]dataUsageEntryV2, zb0003) + z.Cache = make(map[string]dataUsageEntryV2, zb0002) } else if len(z.Cache) > 0 { for key := range z.Cache { delete(z.Cache, key) } } - for zb0003 > 0 { - zb0003-- + for zb0002 > 0 { + zb0002-- var za0001 string var za0002 dataUsageEntryV2 za0001, err = dc.ReadString() @@ -960,64 +849,29 @@ func (z *dataUsageCacheV2) UnmarshalMsg(bts []byte) (o []byte, err error) { } switch msgp.UnsafeString(field) { case "Info": - var zb0002 uint32 - zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + bts, err = z.Info.UnmarshalMsg(bts) if err != nil { err = msgp.WrapError(err, "Info") return } - for zb0002 > 0 { - zb0002-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err, "Info") - return - } - switch msgp.UnsafeString(field) { - case "Name": - z.Info.Name, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Info", "Name") - return - } - case "NextCycle": - z.Info.NextCycle, bts, err = msgp.ReadUint32Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Info", "NextCycle") - return - } - case "LastUpdate": - z.Info.LastUpdate, bts, err = msgp.ReadTimeBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Info", "LastUpdate") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err, "Info") - return - } - } - } case "Cache": - var zb0003 uint32 - zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + var zb0002 uint32 + zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) if err != nil { err = msgp.WrapError(err, "Cache") return } if z.Cache == nil { - z.Cache = make(map[string]dataUsageEntryV2, zb0003) + z.Cache = make(map[string]dataUsageEntryV2, zb0002) } else if len(z.Cache) > 0 { for key := range z.Cache { delete(z.Cache, key) } } - for zb0003 > 0 { + for zb0002 > 0 { var za0001 string var za0002 dataUsageEntryV2 - zb0003-- + zb0002-- za0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "Cache") @@ -1044,7 +898,7 @@ func (z *dataUsageCacheV2) UnmarshalMsg(bts []byte) (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *dataUsageCacheV2) Msgsize() (s int) { - s = 1 + 5 + 1 + 5 + msgp.StringPrefixSize + len(z.Info.Name) + 10 + msgp.Uint32Size + 11 + msgp.TimeSize + 6 + msgp.MapHeaderSize + s = 1 + 5 + z.Info.Msgsize() + 6 + msgp.MapHeaderSize if z.Cache != nil { for za0001, za0002 := range z.Cache { _ = za0002 @@ -1073,62 +927,27 @@ func (z *dataUsageCacheV3) DecodeMsg(dc *msgp.Reader) (err error) { } switch msgp.UnsafeString(field) { case "Info": - var zb0002 uint32 - zb0002, err = dc.ReadMapHeader() + err = z.Info.DecodeMsg(dc) if err != nil { err = msgp.WrapError(err, "Info") return } - for zb0002 > 0 { - zb0002-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err, "Info") - return - } - switch msgp.UnsafeString(field) { - case "Name": - z.Info.Name, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Info", "Name") - return - } - case "NextCycle": - z.Info.NextCycle, err = dc.ReadUint32() - if err != nil { - err = msgp.WrapError(err, "Info", "NextCycle") - return - } - case "LastUpdate": - z.Info.LastUpdate, err = dc.ReadTime() - if err != nil { - err = msgp.WrapError(err, "Info", "LastUpdate") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err, "Info") - return - } - } - } case "Cache": - var zb0003 uint32 - zb0003, err = dc.ReadMapHeader() + var zb0002 uint32 + zb0002, err = dc.ReadMapHeader() if err != nil { err = msgp.WrapError(err, "Cache") return } if z.Cache == nil { - z.Cache = make(map[string]dataUsageEntryV3, zb0003) + z.Cache = make(map[string]dataUsageEntryV3, zb0002) } else if len(z.Cache) > 0 { for key := range z.Cache { delete(z.Cache, key) } } - for zb0003 > 0 { - zb0003-- + for zb0002 > 0 { + zb0002-- var za0001 string var za0002 dataUsageEntryV3 za0001, err = dc.ReadString() @@ -1173,64 +992,29 @@ func (z *dataUsageCacheV3) UnmarshalMsg(bts []byte) (o []byte, err error) { } switch msgp.UnsafeString(field) { case "Info": - var zb0002 uint32 - zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + bts, err = z.Info.UnmarshalMsg(bts) if err != nil { err = msgp.WrapError(err, "Info") return } - for zb0002 > 0 { - zb0002-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err, "Info") - return - } - switch msgp.UnsafeString(field) { - case "Name": - z.Info.Name, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Info", "Name") - return - } - case "NextCycle": - z.Info.NextCycle, bts, err = msgp.ReadUint32Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Info", "NextCycle") - return - } - case "LastUpdate": - z.Info.LastUpdate, bts, err = msgp.ReadTimeBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Info", "LastUpdate") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err, "Info") - return - } - } - } case "Cache": - var zb0003 uint32 - zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + var zb0002 uint32 + zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) if err != nil { err = msgp.WrapError(err, "Cache") return } if z.Cache == nil { - z.Cache = make(map[string]dataUsageEntryV3, zb0003) + z.Cache = make(map[string]dataUsageEntryV3, zb0002) } else if len(z.Cache) > 0 { for key := range z.Cache { delete(z.Cache, key) } } - for zb0003 > 0 { + for zb0002 > 0 { var za0001 string var za0002 dataUsageEntryV3 - zb0003-- + zb0002-- za0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "Cache") @@ -1257,7 +1041,7 @@ func (z *dataUsageCacheV3) UnmarshalMsg(bts []byte) (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *dataUsageCacheV3) Msgsize() (s int) { - s = 1 + 5 + 1 + 5 + msgp.StringPrefixSize + len(z.Info.Name) + 10 + msgp.Uint32Size + 11 + msgp.TimeSize + 6 + msgp.MapHeaderSize + s = 1 + 5 + z.Info.Msgsize() + 6 + msgp.MapHeaderSize if z.Cache != nil { for za0001, za0002 := range z.Cache { _ = za0002 @@ -1286,62 +1070,27 @@ func (z *dataUsageCacheV4) DecodeMsg(dc *msgp.Reader) (err error) { } switch msgp.UnsafeString(field) { case "Info": - var zb0002 uint32 - zb0002, err = dc.ReadMapHeader() + err = z.Info.DecodeMsg(dc) if err != nil { err = msgp.WrapError(err, "Info") return } - for zb0002 > 0 { - zb0002-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err, "Info") - return - } - switch msgp.UnsafeString(field) { - case "Name": - z.Info.Name, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Info", "Name") - return - } - case "NextCycle": - z.Info.NextCycle, err = dc.ReadUint32() - if err != nil { - err = msgp.WrapError(err, "Info", "NextCycle") - return - } - case "LastUpdate": - z.Info.LastUpdate, err = dc.ReadTime() - if err != nil { - err = msgp.WrapError(err, "Info", "LastUpdate") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err, "Info") - return - } - } - } case "Cache": - var zb0003 uint32 - zb0003, err = dc.ReadMapHeader() + var zb0002 uint32 + zb0002, err = dc.ReadMapHeader() if err != nil { err = msgp.WrapError(err, "Cache") return } if z.Cache == nil { - z.Cache = make(map[string]dataUsageEntryV4, zb0003) + z.Cache = make(map[string]dataUsageEntryV4, zb0002) } else if len(z.Cache) > 0 { for key := range z.Cache { delete(z.Cache, key) } } - for zb0003 > 0 { - zb0003-- + for zb0002 > 0 { + zb0002-- var za0001 string var za0002 dataUsageEntryV4 za0001, err = dc.ReadString() @@ -1386,64 +1135,29 @@ func (z *dataUsageCacheV4) UnmarshalMsg(bts []byte) (o []byte, err error) { } switch msgp.UnsafeString(field) { case "Info": - var zb0002 uint32 - zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + bts, err = z.Info.UnmarshalMsg(bts) if err != nil { err = msgp.WrapError(err, "Info") return } - for zb0002 > 0 { - zb0002-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err, "Info") - return - } - switch msgp.UnsafeString(field) { - case "Name": - z.Info.Name, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Info", "Name") - return - } - case "NextCycle": - z.Info.NextCycle, bts, err = msgp.ReadUint32Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Info", "NextCycle") - return - } - case "LastUpdate": - z.Info.LastUpdate, bts, err = msgp.ReadTimeBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Info", "LastUpdate") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err, "Info") - return - } - } - } case "Cache": - var zb0003 uint32 - zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + var zb0002 uint32 + zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) if err != nil { err = msgp.WrapError(err, "Cache") return } if z.Cache == nil { - z.Cache = make(map[string]dataUsageEntryV4, zb0003) + z.Cache = make(map[string]dataUsageEntryV4, zb0002) } else if len(z.Cache) > 0 { for key := range z.Cache { delete(z.Cache, key) } } - for zb0003 > 0 { + for zb0002 > 0 { var za0001 string var za0002 dataUsageEntryV4 - zb0003-- + zb0002-- za0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "Cache") @@ -1470,7 +1184,7 @@ func (z *dataUsageCacheV4) UnmarshalMsg(bts []byte) (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *dataUsageCacheV4) Msgsize() (s int) { - s = 1 + 5 + 1 + 5 + msgp.StringPrefixSize + len(z.Info.Name) + 10 + msgp.Uint32Size + 11 + msgp.TimeSize + 6 + msgp.MapHeaderSize + s = 1 + 5 + z.Info.Msgsize() + 6 + msgp.MapHeaderSize if z.Cache != nil { for za0001, za0002 := range z.Cache { _ = za0002 @@ -1499,62 +1213,27 @@ func (z *dataUsageCacheV5) DecodeMsg(dc *msgp.Reader) (err error) { } switch msgp.UnsafeString(field) { case "Info": - var zb0002 uint32 - zb0002, err = dc.ReadMapHeader() + err = z.Info.DecodeMsg(dc) if err != nil { err = msgp.WrapError(err, "Info") return } - for zb0002 > 0 { - zb0002-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err, "Info") - return - } - switch msgp.UnsafeString(field) { - case "Name": - z.Info.Name, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Info", "Name") - return - } - case "NextCycle": - z.Info.NextCycle, err = dc.ReadUint32() - if err != nil { - err = msgp.WrapError(err, "Info", "NextCycle") - return - } - case "LastUpdate": - z.Info.LastUpdate, err = dc.ReadTime() - if err != nil { - err = msgp.WrapError(err, "Info", "LastUpdate") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err, "Info") - return - } - } - } case "Cache": - var zb0003 uint32 - zb0003, err = dc.ReadMapHeader() + var zb0002 uint32 + zb0002, err = dc.ReadMapHeader() if err != nil { err = msgp.WrapError(err, "Cache") return } if z.Cache == nil { - z.Cache = make(map[string]dataUsageEntryV5, zb0003) + z.Cache = make(map[string]dataUsageEntryV5, zb0002) } else if len(z.Cache) > 0 { for key := range z.Cache { delete(z.Cache, key) } } - for zb0003 > 0 { - zb0003-- + for zb0002 > 0 { + zb0002-- var za0001 string var za0002 dataUsageEntryV5 za0001, err = dc.ReadString() @@ -1599,64 +1278,29 @@ func (z *dataUsageCacheV5) UnmarshalMsg(bts []byte) (o []byte, err error) { } switch msgp.UnsafeString(field) { case "Info": - var zb0002 uint32 - zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + bts, err = z.Info.UnmarshalMsg(bts) if err != nil { err = msgp.WrapError(err, "Info") return } - for zb0002 > 0 { - zb0002-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err, "Info") - return - } - switch msgp.UnsafeString(field) { - case "Name": - z.Info.Name, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Info", "Name") - return - } - case "NextCycle": - z.Info.NextCycle, bts, err = msgp.ReadUint32Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Info", "NextCycle") - return - } - case "LastUpdate": - z.Info.LastUpdate, bts, err = msgp.ReadTimeBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Info", "LastUpdate") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err, "Info") - return - } - } - } case "Cache": - var zb0003 uint32 - zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + var zb0002 uint32 + zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) if err != nil { err = msgp.WrapError(err, "Cache") return } if z.Cache == nil { - z.Cache = make(map[string]dataUsageEntryV5, zb0003) + z.Cache = make(map[string]dataUsageEntryV5, zb0002) } else if len(z.Cache) > 0 { for key := range z.Cache { delete(z.Cache, key) } } - for zb0003 > 0 { + for zb0002 > 0 { var za0001 string var za0002 dataUsageEntryV5 - zb0003-- + zb0002-- za0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "Cache") @@ -1683,7 +1327,7 @@ func (z *dataUsageCacheV5) UnmarshalMsg(bts []byte) (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *dataUsageCacheV5) Msgsize() (s int) { - s = 1 + 5 + 1 + 5 + msgp.StringPrefixSize + len(z.Info.Name) + 10 + msgp.Uint32Size + 11 + msgp.TimeSize + 6 + msgp.MapHeaderSize + s = 1 + 5 + z.Info.Msgsize() + 6 + msgp.MapHeaderSize if z.Cache != nil { for za0001, za0002 := range z.Cache { _ = za0002 @@ -1712,62 +1356,27 @@ func (z *dataUsageCacheV6) DecodeMsg(dc *msgp.Reader) (err error) { } switch msgp.UnsafeString(field) { case "Info": - var zb0002 uint32 - zb0002, err = dc.ReadMapHeader() + err = z.Info.DecodeMsg(dc) if err != nil { err = msgp.WrapError(err, "Info") return } - for zb0002 > 0 { - zb0002-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err, "Info") - return - } - switch msgp.UnsafeString(field) { - case "Name": - z.Info.Name, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Info", "Name") - return - } - case "NextCycle": - z.Info.NextCycle, err = dc.ReadUint32() - if err != nil { - err = msgp.WrapError(err, "Info", "NextCycle") - return - } - case "LastUpdate": - z.Info.LastUpdate, err = dc.ReadTime() - if err != nil { - err = msgp.WrapError(err, "Info", "LastUpdate") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err, "Info") - return - } - } - } case "Cache": - var zb0003 uint32 - zb0003, err = dc.ReadMapHeader() + var zb0002 uint32 + zb0002, err = dc.ReadMapHeader() if err != nil { err = msgp.WrapError(err, "Cache") return } if z.Cache == nil { - z.Cache = make(map[string]dataUsageEntryV6, zb0003) + z.Cache = make(map[string]dataUsageEntryV6, zb0002) } else if len(z.Cache) > 0 { for key := range z.Cache { delete(z.Cache, key) } } - for zb0003 > 0 { - zb0003-- + for zb0002 > 0 { + zb0002-- var za0001 string var za0002 dataUsageEntryV6 za0001, err = dc.ReadString() @@ -1812,64 +1421,29 @@ func (z *dataUsageCacheV6) UnmarshalMsg(bts []byte) (o []byte, err error) { } switch msgp.UnsafeString(field) { case "Info": - var zb0002 uint32 - zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + bts, err = z.Info.UnmarshalMsg(bts) if err != nil { err = msgp.WrapError(err, "Info") return } - for zb0002 > 0 { - zb0002-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err, "Info") - return - } - switch msgp.UnsafeString(field) { - case "Name": - z.Info.Name, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Info", "Name") - return - } - case "NextCycle": - z.Info.NextCycle, bts, err = msgp.ReadUint32Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Info", "NextCycle") - return - } - case "LastUpdate": - z.Info.LastUpdate, bts, err = msgp.ReadTimeBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Info", "LastUpdate") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err, "Info") - return - } - } - } case "Cache": - var zb0003 uint32 - zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + var zb0002 uint32 + zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) if err != nil { err = msgp.WrapError(err, "Cache") return } if z.Cache == nil { - z.Cache = make(map[string]dataUsageEntryV6, zb0003) + z.Cache = make(map[string]dataUsageEntryV6, zb0002) } else if len(z.Cache) > 0 { for key := range z.Cache { delete(z.Cache, key) } } - for zb0003 > 0 { + for zb0002 > 0 { var za0001 string var za0002 dataUsageEntryV6 - zb0003-- + zb0002-- za0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "Cache") @@ -1896,7 +1470,7 @@ func (z *dataUsageCacheV6) UnmarshalMsg(bts []byte) (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *dataUsageCacheV6) Msgsize() (s int) { - s = 1 + 5 + 1 + 5 + msgp.StringPrefixSize + len(z.Info.Name) + 10 + msgp.Uint32Size + 11 + msgp.TimeSize + 6 + msgp.MapHeaderSize + s = 1 + 5 + z.Info.Msgsize() + 6 + msgp.MapHeaderSize if z.Cache != nil { for za0001, za0002 := range z.Cache { _ = za0002 @@ -1925,62 +1499,27 @@ func (z *dataUsageCacheV7) DecodeMsg(dc *msgp.Reader) (err error) { } switch msgp.UnsafeString(field) { case "Info": - var zb0002 uint32 - zb0002, err = dc.ReadMapHeader() + err = z.Info.DecodeMsg(dc) if err != nil { err = msgp.WrapError(err, "Info") return } - for zb0002 > 0 { - zb0002-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err, "Info") - return - } - switch msgp.UnsafeString(field) { - case "Name": - z.Info.Name, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Info", "Name") - return - } - case "NextCycle": - z.Info.NextCycle, err = dc.ReadUint32() - if err != nil { - err = msgp.WrapError(err, "Info", "NextCycle") - return - } - case "LastUpdate": - z.Info.LastUpdate, err = dc.ReadTime() - if err != nil { - err = msgp.WrapError(err, "Info", "LastUpdate") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err, "Info") - return - } - } - } case "Cache": - var zb0003 uint32 - zb0003, err = dc.ReadMapHeader() + var zb0002 uint32 + zb0002, err = dc.ReadMapHeader() if err != nil { err = msgp.WrapError(err, "Cache") return } if z.Cache == nil { - z.Cache = make(map[string]dataUsageEntryV7, zb0003) + z.Cache = make(map[string]dataUsageEntryV7, zb0002) } else if len(z.Cache) > 0 { for key := range z.Cache { delete(z.Cache, key) } } - for zb0003 > 0 { - zb0003-- + for zb0002 > 0 { + zb0002-- var za0001 string var za0002 dataUsageEntryV7 za0001, err = dc.ReadString() @@ -2025,64 +1564,29 @@ func (z *dataUsageCacheV7) UnmarshalMsg(bts []byte) (o []byte, err error) { } switch msgp.UnsafeString(field) { case "Info": - var zb0002 uint32 - zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + bts, err = z.Info.UnmarshalMsg(bts) if err != nil { err = msgp.WrapError(err, "Info") return } - for zb0002 > 0 { - zb0002-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err, "Info") - return - } - switch msgp.UnsafeString(field) { - case "Name": - z.Info.Name, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Info", "Name") - return - } - case "NextCycle": - z.Info.NextCycle, bts, err = msgp.ReadUint32Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Info", "NextCycle") - return - } - case "LastUpdate": - z.Info.LastUpdate, bts, err = msgp.ReadTimeBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Info", "LastUpdate") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err, "Info") - return - } - } - } case "Cache": - var zb0003 uint32 - zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + var zb0002 uint32 + zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) if err != nil { err = msgp.WrapError(err, "Cache") return } if z.Cache == nil { - z.Cache = make(map[string]dataUsageEntryV7, zb0003) + z.Cache = make(map[string]dataUsageEntryV7, zb0002) } else if len(z.Cache) > 0 { for key := range z.Cache { delete(z.Cache, key) } } - for zb0003 > 0 { + for zb0002 > 0 { var za0001 string var za0002 dataUsageEntryV7 - zb0003-- + zb0002-- za0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "Cache") @@ -2109,7 +1613,7 @@ func (z *dataUsageCacheV7) UnmarshalMsg(bts []byte) (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *dataUsageCacheV7) Msgsize() (s int) { - s = 1 + 5 + 1 + 5 + msgp.StringPrefixSize + len(z.Info.Name) + 10 + msgp.Uint32Size + 11 + msgp.TimeSize + 6 + msgp.MapHeaderSize + s = 1 + 5 + z.Info.Msgsize() + 6 + msgp.MapHeaderSize if z.Cache != nil { for za0001, za0002 := range z.Cache { _ = za0002 diff --git a/cmd/data-usage_test.go b/cmd/data-usage_test.go index ba016e1de..792c4293f 100644 --- a/cmd/data-usage_test.go +++ b/cmd/data-usage_test.go @@ -26,6 +26,9 @@ import ( "path" "path/filepath" "testing" + "time" + + "github.com/minio/minio/internal/cachevalue" ) type usageTestFile struct { @@ -61,10 +64,13 @@ func TestDataUsageUpdate(t *testing.T) { } return } - + xls := xlStorage{drivePath: base, diskInfoCache: cachevalue.New[DiskInfo]()} + xls.diskInfoCache.InitOnce(time.Second, cachevalue.Opts{}, func(ctx context.Context) (DiskInfo, error) { + return DiskInfo{Total: 1 << 40, Free: 1 << 40}, nil + }) weSleep := func() bool { return false } - got, err := scanDataFolder(context.Background(), nil, base, false, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, getSize, 0, weSleep) + got, err := scanDataFolder(context.Background(), nil, &xls, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, getSize, 0, weSleep) if err != nil { t.Fatal(err) } @@ -174,7 +180,7 @@ func TestDataUsageUpdate(t *testing.T) { } // Changed dir must be picked up in this many cycles. for i := 0; i < dataUsageUpdateDirCycles; i++ { - got, err = scanDataFolder(context.Background(), nil, base, false, got, getSize, 0, weSleep) + got, err = scanDataFolder(context.Background(), nil, &xls, got, getSize, 0, weSleep) got.Info.NextCycle++ if err != nil { t.Fatal(err) @@ -283,8 +289,12 @@ func TestDataUsageUpdatePrefix(t *testing.T) { } weSleep := func() bool { return false } + xls := xlStorage{drivePath: base, diskInfoCache: cachevalue.New[DiskInfo]()} + xls.diskInfoCache.InitOnce(time.Second, cachevalue.Opts{}, func(ctx context.Context) (DiskInfo, error) { + return DiskInfo{Total: 1 << 40, Free: 1 << 40}, nil + }) - got, err := scanDataFolder(context.Background(), nil, base, false, dataUsageCache{Info: dataUsageCacheInfo{Name: "bucket"}}, getSize, 0, weSleep) + got, err := scanDataFolder(context.Background(), nil, &xls, dataUsageCache{Info: dataUsageCacheInfo{Name: "bucket"}}, getSize, 0, weSleep) if err != nil { t.Fatal(err) } @@ -419,7 +429,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) { } // Changed dir must be picked up in this many cycles. for i := 0; i < dataUsageUpdateDirCycles; i++ { - got, err = scanDataFolder(context.Background(), nil, base, false, got, getSize, 0, weSleep) + got, err = scanDataFolder(context.Background(), nil, &xls, got, getSize, 0, weSleep) got.Info.NextCycle++ if err != nil { t.Fatal(err) @@ -567,8 +577,12 @@ func TestDataUsageCacheSerialize(t *testing.T) { } return } + xls := xlStorage{drivePath: base, diskInfoCache: cachevalue.New[DiskInfo]()} + xls.diskInfoCache.InitOnce(time.Second, cachevalue.Opts{}, func(ctx context.Context) (DiskInfo, error) { + return DiskInfo{Total: 1 << 40, Free: 1 << 40}, nil + }) weSleep := func() bool { return false } - want, err := scanDataFolder(context.Background(), nil, base, false, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, getSize, 0, weSleep) + want, err := scanDataFolder(context.Background(), nil, &xls, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, getSize, 0, weSleep) if err != nil { t.Fatal(err) } diff --git a/cmd/erasure-healing-common_test.go b/cmd/erasure-healing-common_test.go index 7782cd105..99b1ca99e 100644 --- a/cmd/erasure-healing-common_test.go +++ b/cmd/erasure-healing-common_test.go @@ -233,7 +233,7 @@ func TestListOnlineDisks(t *testing.T) { data := bytes.Repeat([]byte("a"), smallFileThreshold*16) z := obj.(*erasureServerPools) - erasureDisks, _, err := z.GetDisks(0, 0) + erasureDisks, err := z.GetDisks(0, 0) if err != nil { t.Fatal(err) } @@ -409,7 +409,7 @@ func TestListOnlineDisksSmallObjects(t *testing.T) { data := bytes.Repeat([]byte("a"), smallFileThreshold/2) z := obj.(*erasureServerPools) - erasureDisks, _, err := z.GetDisks(0, 0) + erasureDisks, err := z.GetDisks(0, 0) if err != nil { t.Fatal(err) } diff --git a/cmd/erasure-server-pool.go b/cmd/erasure-server-pool.go index 477fff3e9..386d03256 100644 --- a/cmd/erasure-server-pool.go +++ b/cmd/erasure-server-pool.go @@ -302,12 +302,11 @@ func (z *erasureServerPools) GetRawData(ctx context.Context, volume, file string } // Return the disks belonging to the poolIdx, and setIdx. -func (z *erasureServerPools) GetDisks(poolIdx, setIdx int) ([]StorageAPI, bool, error) { +func (z *erasureServerPools) GetDisks(poolIdx, setIdx int) ([]StorageAPI, error) { if poolIdx < len(z.serverPools) && setIdx < len(z.serverPools[poolIdx].sets) { - disks, healing := z.serverPools[poolIdx].sets[setIdx].getOnlineDisksWithHealing(true) - return disks, healing, nil + return z.serverPools[poolIdx].sets[setIdx].getDisks(), nil } - return nil, false, fmt.Errorf("Matching pool %s, set %s not found", humanize.Ordinal(poolIdx+1), humanize.Ordinal(setIdx+1)) + return nil, fmt.Errorf("Matching pool %s, set %s not found", humanize.Ordinal(poolIdx+1), humanize.Ordinal(setIdx+1)) } // Return the count of disks in each pool diff --git a/cmd/erasure.go b/cmd/erasure.go index 18ff512ce..08e26bd27 100644 --- a/cmd/erasure.go +++ b/cmd/erasure.go @@ -381,7 +381,7 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, wa } // Collect disks we can use. - disks, _ := er.getOnlineDisksWithHealing(false) + disks, healing := er.getOnlineDisksWithHealing(false) if len(disks) == 0 { scannerLogIf(ctx, errors.New("data-scanner: all drives are offline or being healed, skipping scanner cycle")) return nil @@ -497,6 +497,7 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, wa if cache.Info.Name == "" { cache.Info.Name = bucket.Name } + cache.Info.SkipHealing = healing cache.Info.NextCycle = wantCycle if cache.Info.Name != bucket.Name { cache.Info = dataUsageCacheInfo{ diff --git a/cmd/object-api-interface.go b/cmd/object-api-interface.go index 0ba517312..8960b6ca6 100644 --- a/cmd/object-api-interface.go +++ b/cmd/object-api-interface.go @@ -287,8 +287,8 @@ type ObjectLayer interface { AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) error CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) - GetDisks(poolIdx, setIdx int) ([]StorageAPI, bool, error) // return the disks belonging to pool and set. - SetDriveCounts() []int // list of erasure stripe size for each pool in order. + GetDisks(poolIdx, setIdx int) ([]StorageAPI, error) // return the disks belonging to pool and set. + SetDriveCounts() []int // list of erasure stripe size for each pool in order. // Healing operations. HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) diff --git a/cmd/xl-storage.go b/cmd/xl-storage.go index ffce0ad92..dab2bb622 100644 --- a/cmd/xl-storage.go +++ b/cmd/xl-storage.go @@ -554,14 +554,14 @@ func (s *xlStorage) NSScanner(ctx context.Context, cache dataUsageCache, updates poolIdx, setIdx, _ := s.GetDiskLoc() - disks, healing, err := objAPI.GetDisks(poolIdx, setIdx) + disks, err := objAPI.GetDisks(poolIdx, setIdx) if err != nil { return cache, err } cache.Info.updates = updates - dataUsageInfo, err := scanDataFolder(ctx, disks, s.drivePath, healing, cache, func(item scannerItem) (sizeSummary, error) { + dataUsageInfo, err := scanDataFolder(ctx, disks, s, cache, func(item scannerItem) (sizeSummary, error) { // Look for `xl.meta/xl.json' at the leaf. if !strings.HasSuffix(item.Path, SlashSeparator+xlStorageFormatFile) && !strings.HasSuffix(item.Path, SlashSeparator+xlStorageFormatFileV1) {