From 26a23c51f9b3e22ef3e68d0d174d118a78dd4172 Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Sun, 25 Dec 2022 23:46:53 -0700 Subject: tree-wide, except rebuildnodes: Fuss with logging to take advantage of textui --- .../btrfsinspect/rebuildmappings/fuzzymatchsums.go | 16 ++++--- .../btrfsinspect/rebuildmappings/matchsums.go | 6 +-- .../rebuildmappings/rebuildmappings.go | 44 ++++++++++------- lib/btrfsprogs/btrfsinspect/scandevices.go | 56 +++++++++++----------- lib/textui/log.go | 17 ++++++- 5 files changed, 82 insertions(+), 57 deletions(-) (limited to 'lib') diff --git a/lib/btrfsprogs/btrfsinspect/rebuildmappings/fuzzymatchsums.go b/lib/btrfsprogs/btrfsinspect/rebuildmappings/fuzzymatchsums.go index 0b5fc6f..ae83513 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildmappings/fuzzymatchsums.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildmappings/fuzzymatchsums.go @@ -43,7 +43,10 @@ func fuzzyMatchBlockGroupSums(ctx context.Context, physicalSums map[btrfsvol.DeviceID]btrfssum.SumRun[btrfsvol.PhysicalAddr], logicalSums btrfssum.SumRunWithGaps[btrfsvol.LogicalAddr], ) error { - dlog.Info(ctx, "... Indexing physical regions...") // O(m) + _ctx := ctx + + ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.substep", "indexing") + dlog.Info(ctx, "Indexing physical regions...") // O(m) regions := ListUnmappedPhysicalRegions(fs) physicalIndex := make(map[btrfssum.ShortSum][]btrfsvol.QualifiedPhysicalAddr) if err := WalkUnmappedPhysicalRegions(ctx, physicalSums, regions, func(devID btrfsvol.DeviceID, region btrfssum.SumRun[btrfsvol.PhysicalAddr]) error { @@ -57,9 +60,10 @@ func fuzzyMatchBlockGroupSums(ctx context.Context, }); err != nil { return err } - dlog.Info(ctx, "... ... done indexing") + dlog.Info(ctx, "... done indexing") - dlog.Info(ctx, "... Searching...") + ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.substep", "searching") + dlog.Info(ctx, "Searching...") numBlockgroups := len(blockgroups) for i, bgLAddr := range maps.SortedKeys(blockgroups) { blockgroup := blockgroups[bgLAddr] @@ -108,7 +112,7 @@ func fuzzyMatchBlockGroupSums(ctx context.Context, if apply { lvl = dlog.LogLevelInfo } - dlog.Logf(ctx, lvl, "... (%v/%v) blockgroup[laddr=%v] matches=[%s]; bestpossible=%v%% (based on %v runs)", + dlog.Logf(ctx, lvl, "(%v/%v) blockgroup[laddr=%v] matches=[%s]; bestpossible=%v%% (based on %v runs)", i+1, numBlockgroups, bgLAddr, matchesStr, int(100*bgRun.PctFull()), len(bgRun.Runs)) if !apply { continue @@ -125,12 +129,12 @@ func fuzzyMatchBlockGroupSums(ctx context.Context, }, } if err := fs.LV.AddMapping(mapping); err != nil { - dlog.Errorf(ctx, "... error: %v", err) + dlog.Errorf(ctx, "error: %v", err) continue } delete(blockgroups, bgLAddr) } - dlog.Info(ctx, "... ... done searching") + dlog.Info(ctx, "done searching") return nil } diff --git a/lib/btrfsprogs/btrfsinspect/rebuildmappings/matchsums.go b/lib/btrfsprogs/btrfsinspect/rebuildmappings/matchsums.go index 6a8333a..be82f87 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildmappings/matchsums.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildmappings/matchsums.go @@ -29,7 +29,7 @@ func matchBlockGroupSums(ctx context.Context, blockgroup := blockgroups[bgLAddr] bgRun := SumsForLogicalRegion(logicalSums, blockgroup.LAddr, blockgroup.Size) if len(bgRun.Runs) == 0 { - dlog.Errorf(ctx, "... (%v/%v) blockgroup[laddr=%v] can't be matched because it has 0 runs", + dlog.Errorf(ctx, "(%v/%v) blockgroup[laddr=%v] can't be matched because it has 0 runs", i+1, numBlockgroups, bgLAddr) continue } @@ -55,7 +55,7 @@ func matchBlockGroupSums(ctx context.Context, if len(matches) == 1 { lvl = dlog.LogLevelInfo } - dlog.Logf(ctx, lvl, "... (%v/%v) blockgroup[laddr=%v] has %v matches based on %v%% coverage from %v runs", + dlog.Logf(ctx, lvl, "(%v/%v) blockgroup[laddr=%v] has %v matches based on %v%% coverage from %v runs", i+1, numBlockgroups, bgLAddr, len(matches), int(100*bgRun.PctFull()), len(bgRun.Runs)) if len(matches) != 1 { continue @@ -72,7 +72,7 @@ func matchBlockGroupSums(ctx context.Context, }, } if err := fs.LV.AddMapping(mapping); err != nil { - dlog.Errorf(ctx, "... error: %v", err) + dlog.Errorf(ctx, "error: %v", err) continue } delete(blockgroups, bgLAddr) diff --git a/lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go b/lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go index 5d44b4e..9807764 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go @@ -53,35 +53,39 @@ func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect dlog.Infof(ctx, "plan: 5/6 search for block groups in checksum map (exact)") dlog.Infof(ctx, "plan: 6/6 search for block groups in checksum map (fuzzy)") - dlog.Infof(ctx, "1/6: Processing %d chunks...", numChunks) + _ctx := ctx + ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.step", "1/6") + dlog.Infof(_ctx, "1/6: Processing %d chunks...", numChunks) for _, devID := range devIDs { devResults := scanResults[devID] for _, chunk := range devResults.FoundChunks { for _, mapping := range chunk.Chunk.Mappings(chunk.Key) { if err := fs.LV.AddMapping(mapping); err != nil { - dlog.Errorf(ctx, "... error: adding chunk: %v", err) + dlog.Errorf(ctx, "error: adding chunk: %v", err) } } } } - dlog.Info(ctx, "... done processing chunks") + dlog.Info(_ctx, "... done processing chunks") - dlog.Infof(ctx, "2/6: Processing %d device extents...", numDevExts) + ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.step", "2/6") + dlog.Infof(_ctx, "2/6: Processing %d device extents...", numDevExts) for _, devID := range devIDs { devResults := scanResults[devID] for _, ext := range devResults.FoundDevExtents { if err := fs.LV.AddMapping(ext.DevExt.Mapping(ext.Key)); err != nil { - dlog.Errorf(ctx, "... error: adding devext: %v", err) + dlog.Errorf(ctx, "error: adding devext: %v", err) } } } - dlog.Info(ctx, "... done processing device extents") + dlog.Info(_ctx, "... done processing device extents") // Do the nodes "last" to avoid bloating the mappings table // too much. (Because nodes are numerous and small, while the // others are few and large; so it is likely that many of the // nodes will be subsumed by other things.) - dlog.Infof(ctx, "3/6: Processing %d nodes...", numNodes) + ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.step", "3/6") + dlog.Infof(_ctx, "3/6: Processing %d nodes...", numNodes) for _, devID := range devIDs { devResults := scanResults[devID] // Sort them so that progress numbers are predictable. @@ -96,16 +100,17 @@ func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect Size: nodeSize, SizeLocked: false, }); err != nil { - dlog.Errorf(ctx, "... error: adding node ident: %v", err) + dlog.Errorf(ctx, "error: adding node ident: %v", err) } } } } - dlog.Info(ctx, "... done processing nodes") + dlog.Info(_ctx, "... done processing nodes") // Use block groups to add missing flags (and as a hint to // combine node entries). - dlog.Infof(ctx, "4/6: Processing %d block groups...", numBlockGroups) + ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.step", "4/6") + dlog.Infof(_ctx, "4/6: Processing %d block groups...", numBlockGroups) // First dedup them, because they change for allocations and // CoW means that they'll bounce around a lot, so you likely // have oodles of duplicates? @@ -118,7 +123,7 @@ func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect bg := bgs[bgLAddr] otherLAddr, otherPAddr := fs.LV.ResolveAny(bg.LAddr, bg.Size) if otherLAddr < 0 || otherPAddr.Addr < 0 { - dlog.Errorf(ctx, "... error: could not pair blockgroup laddr=%v (size=%v flags=%v) with a mapping", + dlog.Errorf(ctx, "error: could not pair blockgroup laddr=%v (size=%v flags=%v) with a mapping", bg.LAddr, bg.Size, bg.Flags) continue } @@ -135,14 +140,15 @@ func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect }, } if err := fs.LV.AddMapping(mapping); err != nil { - dlog.Errorf(ctx, "... error: adding flags from blockgroup: %v", err) + dlog.Errorf(ctx, "error: adding flags from blockgroup: %v", err) continue } delete(bgs, bgLAddr) } - dlog.Info(ctx, "... done processing block groups") + dlog.Info(_ctx, "... done processing block groups") - dlog.Infof(ctx, "5/6: Searching for %d block groups in checksum map (exact)...", len(bgs)) + ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.step", "5/6") + dlog.Infof(_ctx, "5/6: Searching for %d block groups in checksum map (exact)...", len(bgs)) physicalSums := ExtractPhysicalSums(scanResults) logicalSums := ExtractLogicalSums(ctx, scanResults) if err := matchBlockGroupSums(ctx, fs, bgs, physicalSums, logicalSums); err != nil { @@ -150,13 +156,15 @@ func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect } dlog.Info(ctx, "... done searching for exact block groups") - dlog.Infof(ctx, "6/6: Searching for %d block groups in checksum map (fuzzy)...", len(bgs)) + ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.step", "6/6") + dlog.Infof(_ctx, "6/6: Searching for %d block groups in checksum map (fuzzy)...", len(bgs)) if err := fuzzyMatchBlockGroupSums(ctx, fs, bgs, physicalSums, logicalSums); err != nil { return err } - dlog.Info(ctx, "... done searching for fuzzy block groups") + dlog.Info(_ctx, "... done searching for fuzzy block groups") - dlog.Info(ctx, "report:") + ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.step", "report") + dlog.Info(_ctx, "report:") unmappedPhysicalRegions := ListUnmappedPhysicalRegions(fs) var unmappedPhysical btrfsvol.AddrDelta @@ -182,7 +190,7 @@ func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect } dlog.Infof(ctx, "... %d KiB of unmapped block groups (across %d groups)", int(unmappedBlockGroups/1024), len(bgs)) - dlog.Info(ctx, "detailed report:") + dlog.Info(_ctx, "detailed report:") for _, devID := range maps.SortedKeys(unmappedPhysicalRegions) { for _, region := range unmappedPhysicalRegions[devID] { dlog.Infof(ctx, "... unmapped physical region: dev=%v beg=%v end=%v (size=%v)", diff --git a/lib/btrfsprogs/btrfsinspect/scandevices.go b/lib/btrfsprogs/btrfsinspect/scandevices.go index b6c929b..628995a 100644 --- a/lib/btrfsprogs/btrfsinspect/scandevices.go +++ b/lib/btrfsprogs/btrfsinspect/scandevices.go @@ -34,7 +34,7 @@ func ScanDevices(ctx context.Context, fs *btrfs.FS) (ScanDevicesResult, error) { for id, dev := range fs.LV.PhysicalVolumes() { id := id dev := dev - grp.Go(dev.Name(), func(ctx context.Context) error { + grp.Go(fmt.Sprintf("dev-%d", id), func(ctx context.Context) error { sb, err := dev.Superblock() if err != nil { return err @@ -80,10 +80,7 @@ type SysExtentCSum struct { } type scanStats struct { - DevName string - DevSize btrfsvol.PhysicalAddr - - Pos btrfsvol.PhysicalAddr + textui.Portion[btrfsvol.PhysicalAddr] NumFoundNodes int NumFoundChunks int @@ -93,10 +90,8 @@ type scanStats struct { } func (s scanStats) String() string { - return fmt.Sprintf("... dev[%q] scanned %v%% (%d/%d) (found: %v nodes, %v chunks, %v block groups, %v dev extents, %v sum items)", - s.DevName, - int(100*float64(s.Pos)/float64(s.DevSize)), - s.Pos, s.DevSize, + return textui.Sprintf("scanned %v (found: %v nodes, %v chunks, %v block groups, %v dev extents, %v sum items)", + s.Portion, s.NumFoundNodes, s.NumFoundChunks, s.NumFoundBlockGroups, @@ -107,6 +102,8 @@ func (s scanStats) String() string { // ScanOneDevice mostly mimics btrfs-progs // cmds/rescue-chunk-recover.c:scan_one_device(). func ScanOneDevice(ctx context.Context, dev *btrfs.Device, sb btrfstree.Superblock) (ScanOneDeviceResult, error) { + ctx = dlog.WithField(ctx, "btrfsinspect.scandevices.dev", dev.Name()) + result := ScanOneDeviceResult{ FoundNodes: make(map[btrfsvol.LogicalAddr][]btrfsvol.PhysicalAddr), } @@ -132,9 +129,10 @@ func ScanOneDevice(ctx context.Context, dev *btrfs.Device, sb btrfstree.Superblo progressWriter := textui.NewProgress[scanStats](ctx, dlog.LogLevelInfo, 1*time.Second) progress := func(pos btrfsvol.PhysicalAddr) { progressWriter.Set(scanStats{ - DevName: dev.Name(), - DevSize: devSize, - Pos: pos, + Portion: textui.Portion[btrfsvol.PhysicalAddr]{ + N: pos, + D: devSize, + }, NumFoundNodes: len(result.FoundNodes), NumFoundChunks: len(result.FoundChunks), NumFoundBlockGroups: len(result.FoundBlockGroups), @@ -171,7 +169,7 @@ func ScanOneDevice(ctx context.Context, dev *btrfs.Device, sb btrfstree.Superblo nodeRef, err := btrfstree.ReadNode[btrfsvol.PhysicalAddr](dev, sb, pos, btrfstree.NodeExpectations{}) if err != nil { if !errors.Is(err, btrfstree.ErrNotANode) { - dlog.Errorf(ctx, "... dev[%q] error: %v", dev.Name(), err) + dlog.Errorf(ctx, "error: %v", err) } } else { result.FoundNodes[nodeRef.Data.Head.Addr] = append(result.FoundNodes[nodeRef.Data.Head.Addr], nodeRef.Addr) @@ -180,12 +178,12 @@ func ScanOneDevice(ctx context.Context, dev *btrfs.Device, sb btrfstree.Superblo case btrfsitem.CHUNK_ITEM_KEY: chunk, ok := item.Body.(btrfsitem.Chunk) if !ok { - dlog.Errorf(ctx, "... dev[%q] node@%v: item %v: error: type is CHUNK_ITEM_KEY, but struct is %T", - dev.Name(), nodeRef.Addr, i, item.Body) + dlog.Errorf(ctx, "node@%v: item %v: error: type is CHUNK_ITEM_KEY, but struct is %T", + nodeRef.Addr, i, item.Body) continue } - //dlog.Tracef(ctx, "... dev[%q] node@%v: item %v: found chunk", - // dev.Name(), nodeRef.Addr, i) + dlog.Tracef(ctx, "node@%v: item %v: found chunk", + nodeRef.Addr, i) result.FoundChunks = append(result.FoundChunks, btrfstree.SysChunk{ Key: item.Key, Chunk: chunk, @@ -193,12 +191,12 @@ func ScanOneDevice(ctx context.Context, dev *btrfs.Device, sb btrfstree.Superblo case btrfsitem.BLOCK_GROUP_ITEM_KEY: bg, ok := item.Body.(btrfsitem.BlockGroup) if !ok { - dlog.Errorf(ctx, "... dev[%q] node@%v: item %v: error: type is BLOCK_GROUP_ITEM_KEY, but struct is %T", - dev.Name(), nodeRef.Addr, i, item.Body) + dlog.Errorf(ctx, "node@%v: item %v: error: type is BLOCK_GROUP_ITEM_KEY, but struct is %T", + nodeRef.Addr, i, item.Body) continue } - //dlog.Tracef(ctx, "... dev[%q] node@%v: item %v: found block group", - // dev.Name(), nodeRef.Addr, i) + dlog.Tracef(ctx, "node@%v: item %v: found block group", + nodeRef.Addr, i) result.FoundBlockGroups = append(result.FoundBlockGroups, SysBlockGroup{ Key: item.Key, BG: bg, @@ -206,12 +204,12 @@ func ScanOneDevice(ctx context.Context, dev *btrfs.Device, sb btrfstree.Superblo case btrfsitem.DEV_EXTENT_KEY: devext, ok := item.Body.(btrfsitem.DevExtent) if !ok { - dlog.Errorf(ctx, "... dev[%q] node@%v: item %v: error: type is DEV_EXTENT_KEY, but struct is %T", - dev.Name(), nodeRef.Addr, i, item.Body) + dlog.Errorf(ctx, "node@%v: item %v: error: type is DEV_EXTENT_KEY, but struct is %T", + nodeRef.Addr, i, item.Body) continue } - //dlog.Tracef(ctx, "... dev[%q] node@%v: item %v: found dev extent", - // dev.Name(), nodeRef.Addr, i) + dlog.Tracef(ctx, "node@%v: item %v: found dev extent", + nodeRef.Addr, i) result.FoundDevExtents = append(result.FoundDevExtents, SysDevExtent{ Key: item.Key, DevExt: devext, @@ -219,12 +217,12 @@ func ScanOneDevice(ctx context.Context, dev *btrfs.Device, sb btrfstree.Superblo case btrfsitem.EXTENT_CSUM_KEY: sums, ok := item.Body.(btrfsitem.ExtentCSum) if !ok { - dlog.Errorf(ctx, "... dev[%q] node@%v: item %v: error: type is EXTENT_CSUM_OBJECTID, but struct is %T", - dev.Name(), nodeRef.Addr, i, item.Body) + dlog.Errorf(ctx, "node@%v: item %v: error: type is EXTENT_CSUM_OBJECTID, but struct is %T", + nodeRef.Addr, i, item.Body) continue } - //dlog.Tracef(ctx, "... dev[%q] node@%v: item %v: found csums", - // dev.Name(), nodeRef.Addr, i) + dlog.Tracef(ctx, "node@%v: item %v: found csums", + nodeRef.Addr, i) result.FoundExtentCSums = append(result.FoundExtentCSums, SysExtentCSum{ Generation: nodeRef.Data.Head.Generation, Sums: sums, diff --git a/lib/textui/log.go b/lib/textui/log.go index ca8c201..4421074 100644 --- a/lib/textui/log.go +++ b/lib/textui/log.go @@ -280,6 +280,14 @@ func fieldOrd(key string) int { return -4 case "dexec.err": return -3 + + case "btrfsinspect.scandevices.dev": + return -1 + case "btrfsinspect.rebuild-mappings.step": + return -2 + case "btrfsinspect.rebuild-mappings.substep": + return -1 + default: return 1 } @@ -290,6 +298,13 @@ func fieldName(key string) string { case "THREAD": return "thread" default: - return key + switch { + case strings.HasPrefix(key, "btrfsinspect.scandevices."): + return strings.TrimPrefix(key, "btrfsinspect.scandevices.") + case strings.HasPrefix(key, "btrfsinspect.rebuild-mappings."): + return strings.TrimPrefix(key, "btrfsinspect.rebuild-mappings.") + default: + return key + } } } -- cgit v1.2.3-2-g168b