From b15e874d00e113813a928ef4769e8a73fd6090a5 Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Sat, 24 Dec 2022 23:05:34 -0700 Subject: textui: Add some utilities for human-friendly text --- lib/textui/text.go | 85 +++++++++++++++++++++++++++++++++++++++++++++++++ lib/textui/text_test.go | 39 +++++++++++++++++++++++ 2 files changed, 124 insertions(+) create mode 100644 lib/textui/text.go create mode 100644 lib/textui/text_test.go (limited to 'lib') diff --git a/lib/textui/text.go b/lib/textui/text.go new file mode 100644 index 0000000..f628eab --- /dev/null +++ b/lib/textui/text.go @@ -0,0 +1,85 @@ +// Copyright (C) 2022 Luke Shumaker +// +// SPDX-License-Identifier: GPL-2.0-or-later + +package textui + +import ( + "fmt" + "io" + + "golang.org/x/exp/constraints" + "golang.org/x/text/language" + "golang.org/x/text/message" + "golang.org/x/text/number" + + "git.lukeshu.com/btrfs-progs-ng/lib/fmtutil" +) + +var printer = message.NewPrinter(language.English) + +// Fprintf is like `fmt.Fprintf`, but (1) includes the extensions of +// `golang.org/x/text/message.Printer`, and (2) is useful for marking +// when a print call is part of the UI, rather than something +// internal. +func Fprintf(w io.Writer, key string, a ...any) (n int, err error) { + return printer.Fprintf(w, key, a...) +} + +// Sprintf is like `fmt.Sprintf`, but (1) includes the extensions of +// `golang.org/x/text/message.Printer`, and (2) is useful for marking +// when a sprint call is part of the UI, rather than something +// internal. +func Sprintf(key string, a ...any) string { + return printer.Sprintf(key, a...) +} + +// Humanized wraps a value such that formatting of it can make use of +// the `golang.org/x/text/message.Printer` extensions even when used +// with plain-old `fmt`. +func Humanized(x any) any { + return humanized{val: x} +} + +type humanized struct { + val any +} + +var ( + _ fmt.Formatter = humanized{} + _ fmt.Stringer = humanized{} +) + +// String implements fmt.Formatter. +func (h humanized) Format(f fmt.State, verb rune) { + printer.Fprintf(f, fmtutil.FmtStateString(f, verb), h.val) +} + +// String implements fmt.Stringer. +func (h humanized) String() string { + return fmt.Sprint(h) +} + +// Portion renders a fraction N/D as both a percentage and +// parenthetically as the exact fractional value, rendered with +// human-friendly commas. +// +// For example: +// +// fmt.Sprint(Portion[int]{N: 1, D: 12345}) ⇒ "0% (1/12,345)" +type Portion[T constraints.Integer] struct { + N, D T +} + +var ( + _ fmt.Stringer = Portion[int]{} +) + +// String implements fmt.Stringer. +func (p Portion[T]) String() string { + pct := float64(1) + if p.D > 0 { + pct = float64(p.N) / float64(p.D) + } + return printer.Sprintf("%v (%v/%v)", number.Percent(pct), uint64(p.N), uint64(p.D)) +} diff --git a/lib/textui/text_test.go b/lib/textui/text_test.go new file mode 100644 index 0000000..c4b42f6 --- /dev/null +++ b/lib/textui/text_test.go @@ -0,0 +1,39 @@ +// Copyright (C) 2022 Luke Shumaker +// +// SPDX-License-Identifier: GPL-2.0-or-later + +package textui_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" + "git.lukeshu.com/btrfs-progs-ng/lib/textui" +) + +func TestFprintf(t *testing.T) { + var out strings.Builder + textui.Fprintf(&out, "%d", 12345) + assert.Equal(t, "12,345", out.String()) +} + +func TestHumanized(t *testing.T) { + assert.Equal(t, "12,345", fmt.Sprint(textui.Humanized(12345))) + assert.Equal(t, "12,345 ", fmt.Sprintf("%-8d", textui.Humanized(12345))) + + laddr := btrfsvol.LogicalAddr(345243543) + assert.Equal(t, "0x000000001493ff97", fmt.Sprintf("%v", textui.Humanized(laddr))) + assert.Equal(t, "345243543", fmt.Sprintf("%d", textui.Humanized(laddr))) + assert.Equal(t, "345,243,543", fmt.Sprintf("%d", textui.Humanized(uint64(laddr)))) +} + +func TestPortion(t *testing.T) { + assert.Equal(t, "100% (0/0)", fmt.Sprint(textui.Portion[int]{})) + assert.Equal(t, "0% (1/12,345)", fmt.Sprint(textui.Portion[int]{N: 1, D: 12345})) + assert.Equal(t, "100% (0/0)", fmt.Sprint(textui.Portion[btrfsvol.PhysicalAddr]{})) + assert.Equal(t, "0% (1/12,345)", fmt.Sprint(textui.Portion[btrfsvol.PhysicalAddr]{N: 1, D: 12345})) +} -- cgit v1.2.3-2-g168b From 00f80af7ff67dd5723ddc53f676536ef926f2791 Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Sun, 25 Dec 2022 00:48:54 -0700 Subject: rebuildnodes: Integrate loop-checking in to Graph.FinalCheck --- .../rebuildnodes/btrees/rebuilt_btrees.go | 2 + .../btrfsinspect/rebuildnodes/graph/graph.go | 75 ++++++++++++++++++++-- .../btrfsinspect/rebuildnodes/graph/loops.go | 54 ++++------------ lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go | 7 +- 4 files changed, 82 insertions(+), 56 deletions(-) (limited to 'lib') diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/rebuilt_btrees.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/rebuilt_btrees.go index 50c75a4..ab4b55f 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/rebuilt_btrees.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/rebuilt_btrees.go @@ -345,6 +345,8 @@ func (tree *rebuiltTree) indexNode(graph pkggraph.Graph, node btrfsvol.LogicalAd return } if slices.Contains(node, stack) { + // This is a panic because graph.FinalCheck() should + // have already checked for loops. panic("loop") } if !tree.isOwnerOK(graph.Nodes[node].Owner, graph.Nodes[node].Generation) { diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/graph.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/graph.go index 1180b0d..d3dd19a 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/graph.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/graph.go @@ -5,8 +5,12 @@ package graph import ( + "context" "fmt" "reflect" + "time" + + "github.com/datawire/dlib/dlog" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" @@ -14,6 +18,9 @@ import ( "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" "git.lukeshu.com/btrfs-progs-ng/lib/containers" "git.lukeshu.com/btrfs-progs-ng/lib/diskio" + "git.lukeshu.com/btrfs-progs-ng/lib/maps" + "git.lukeshu.com/btrfs-progs-ng/lib/slices" + "git.lukeshu.com/btrfs-progs-ng/lib/textui" ) type Node struct { @@ -131,7 +138,7 @@ func New(sb btrfstree.Superblock) *Graph { return g } -func (g *Graph) InsertNode(nodeRef *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]) { +func (g Graph) InsertNode(nodeRef *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]) { nodeData := Node{ Level: nodeRef.Data.Head.Level, Generation: nodeRef.Data.Head.Generation, @@ -184,23 +191,77 @@ func (g *Graph) InsertNode(nodeRef *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.N } } -func (g *Graph) FinalCheck(fs diskio.File[btrfsvol.LogicalAddr], sb btrfstree.Superblock, progress func(n, d int)) error { - total := len(g.EdgesTo) - done := 0 - progress(done, total) +type progStats struct { + textui.Portion[int] +} + +func (s progStats) String() string { + return "... " + s.Portion.String() +} + +func (g Graph) FinalCheck(ctx context.Context, fs diskio.File[btrfsvol.LogicalAddr], sb btrfstree.Superblock) error { + var stats progStats + + dlog.Info(ctx, "Checking keypointers for dead-ends...") + progressWriter := textui.NewProgress[progStats](ctx, dlog.LogLevelInfo, 1*time.Second) + stats.D = len(g.EdgesTo) + progressWriter.Set(stats) for laddr := range g.EdgesTo { if _, ok := g.Nodes[laddr]; !ok { _, err := btrfstree.ReadNode[btrfsvol.LogicalAddr](fs, sb, laddr, btrfstree.NodeExpectations{ LAddr: containers.Optional[btrfsvol.LogicalAddr]{OK: true, Val: laddr}, }) if err == nil { + progressWriter.Done() return fmt.Errorf("node@%v exists but was not in node scan results", laddr) } g.BadNodes[laddr] = err } - done++ - progress(done, total) + stats.N++ + progressWriter.Set(stats) } + progressWriter.Done() + dlog.Info(ctx, "... done checking keypointers") + + dlog.Info(ctx, "Checking for btree loops...") + stats.D = len(g.Nodes) + stats.N = 0 + progressWriter = textui.NewProgress[progStats](ctx, dlog.LogLevelInfo, 1*time.Second) + progressWriter.Set(stats) + visited := make(containers.Set[btrfsvol.LogicalAddr], len(g.Nodes)) + numLoops := 0 + var checkNode func(node btrfsvol.LogicalAddr, stack []btrfsvol.LogicalAddr) + checkNode = func(node btrfsvol.LogicalAddr, stack []btrfsvol.LogicalAddr) { + defer func() { + stats.N = len(visited) + progressWriter.Set(stats) + }() + if visited.Has(node) { + return + } + if slices.Contains(node, stack) { + numLoops++ + dlog.Error(ctx, "loop:") + for _, line := range g.renderLoop(append(stack, node)) { + dlog.Errorf(ctx, " %s", line) + } + return + } + stack = append(stack, node) + for _, kp := range g.EdgesTo[node] { + checkNode(kp.FromNode, stack) + } + visited.Insert(node) + } + for _, node := range maps.SortedKeys(g.Nodes) { + checkNode(node, nil) + } + progressWriter.Done() + if numLoops > 0 { + return fmt.Errorf("%d btree loops", numLoops) + } + dlog.Info(ctx, "... done checking for loops") + return nil } diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/loops.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/loops.go index e76985c..0e51805 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/loops.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/loops.go @@ -6,47 +6,18 @@ package graph import ( "fmt" - "io" "strings" "github.com/datawire/dlib/derror" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" - "git.lukeshu.com/btrfs-progs-ng/lib/containers" - "git.lukeshu.com/btrfs-progs-ng/lib/maps" - "git.lukeshu.com/btrfs-progs-ng/lib/slices" ) -func (g Graph) ShowLoops(out io.Writer) { - orphans := make(containers.Set[btrfsvol.LogicalAddr]) - for node := range g.Nodes { - if len(g.EdgesTo[node]) == 0 { - orphans.Insert(node) - } - } - - loopWalk(out, g, 0) - for _, orphan := range maps.SortedKeys(orphans) { - loopWalk(out, g, orphan) - } -} - -func loopWalk(out io.Writer, scanData Graph, stack ...btrfsvol.LogicalAddr) { - for _, kp := range scanData.EdgesFrom[stack[len(stack)-1]] { - childStack := append(stack, kp.ToNode) - if slices.Contains(kp.ToNode, stack) { - loopRender(out, scanData, childStack...) - } else { - loopWalk(out, scanData, childStack...) - } - } -} - -func nodeRender(scanData Graph, node btrfsvol.LogicalAddr) []string { +func (g Graph) renderNode(node btrfsvol.LogicalAddr) []string { if node == 0 { return []string{"root"} - } else if nodeData, ok := scanData.Nodes[node]; ok { + } else if nodeData, ok := g.Nodes[node]; ok { return []string{ fmt.Sprintf("{addr: %v,", node), fmt.Sprintf(" level: %v,", nodeData.Level), @@ -61,7 +32,7 @@ func nodeRender(scanData Graph, node btrfsvol.LogicalAddr) []string { nodeData.MaxItem.ItemType, nodeData.MaxItem.Offset), } - } else if nodeErr, ok := scanData.BadNodes[node]; ok { + } else if nodeErr, ok := g.BadNodes[node]; ok { return []string{ fmt.Sprintf("{addr:%v,", node), fmt.Sprintf(" err:%q}", nodeErr.Error()), @@ -71,7 +42,7 @@ func nodeRender(scanData Graph, node btrfsvol.LogicalAddr) []string { } } -func edgeRender(scanData Graph, kp Edge) []string { +func (g Graph) renderEdge(kp Edge) []string { a := fmt.Sprintf("[%d]={", kp.FromItem) b := strings.Repeat(" ", len(a)) ret := []string{ @@ -85,8 +56,8 @@ func edgeRender(scanData Graph, kp Edge) []string { } var err error - if toNode, ok := scanData.Nodes[kp.ToNode]; !ok { - err = scanData.BadNodes[kp.ToNode] + if toNode, ok := g.Nodes[kp.ToNode]; !ok { + err = g.BadNodes[kp.ToNode] } else { err = checkNodeExpectations(kp, toNode) } @@ -100,7 +71,7 @@ func edgeRender(scanData Graph, kp Edge) []string { return ret } -func loopRender(out io.Writer, scanData Graph, stack ...btrfsvol.LogicalAddr) { +func (g Graph) renderLoop(stack []btrfsvol.LogicalAddr) []string { var lines []string add := func(suffixes []string) { curLen := 0 @@ -126,20 +97,17 @@ func loopRender(out io.Writer, scanData Graph, stack ...btrfsvol.LogicalAddr) { for i, node := range stack { if i > 0 { - for _, kp := range scanData.EdgesTo[node] { + for _, kp := range g.EdgesTo[node] { if kp.FromNode == stack[i-1] { - add(edgeRender(scanData, *kp)) + add(g.renderEdge(*kp)) break } } } - add(nodeRender(scanData, node)) + add(g.renderNode(node)) } - fmt.Fprintln(out, "loop:") - for _, line := range lines { - fmt.Fprintln(out, " "+line) - } + return lines } func checkNodeExpectations(kp Edge, toNode Node) error { diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go index 5c2d0fd..cdd5cba 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go @@ -72,14 +72,9 @@ func ScanDevices(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect.Sca progressWriter.Done() dlog.Info(ctx, "... done reading node data") - progressWriter = textui.NewProgress[scanStats](ctx, dlog.LogLevelInfo, 1*time.Second) - dlog.Infof(ctx, "Checking keypointers for dead-ends...") - if err := nodeGraph.FinalCheck(fs, *sb, progress); err != nil { + if err := nodeGraph.FinalCheck(ctx, fs, *sb); err != nil { return graph.Graph{}, nil, err } - progressWriter.Done() - dlog.Info(ctx, "... done checking keypointers") - keyIO.SetGraph(*nodeGraph) return *nodeGraph, keyIO, nil -- cgit v1.2.3-2-g168b From 1668269788b00738e3db390a9a12caa883b32f89 Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Sun, 25 Dec 2022 00:49:07 -0700 Subject: rebuildnodes: Read the superblock before building the graph --- lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'lib') diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go index 66eaf1a..0a3f2bb 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go @@ -51,13 +51,13 @@ type rebuilder struct { } func RebuildNodes(ctx context.Context, fs *btrfs.FS, nodeScanResults btrfsinspect.ScanDevicesResult) (map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr], error) { - nodeGraph, keyIO, err := ScanDevices(ctx, fs, nodeScanResults) // ScanDevices does its own logging + dlog.Info(ctx, "Reading superblock...") + sb, err := fs.Superblock() if err != nil { return nil, err } - dlog.Info(ctx, "Reading superblock...") - sb, err := fs.Superblock() + nodeGraph, keyIO, err := ScanDevices(ctx, fs, nodeScanResults) // ScanDevices does its own logging if err != nil { return nil, err } -- cgit v1.2.3-2-g168b From 92b20f73e93a6a08030ad6100ca70d4c9668537e Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Sat, 24 Dec 2022 23:40:02 -0700 Subject: tree-wide: Migrate from using x/text/message directly to using textui --- lib/btrfsprogs/btrfsinspect/print_addrspace.go | 17 +++++++---------- .../btrfsinspect/rebuildmappings/rebuildmappings.go | 9 ++++----- 2 files changed, 11 insertions(+), 15 deletions(-) (limited to 'lib') diff --git a/lib/btrfsprogs/btrfsinspect/print_addrspace.go b/lib/btrfsprogs/btrfsinspect/print_addrspace.go index 7436d5b..cd90288 100644 --- a/lib/btrfsprogs/btrfsinspect/print_addrspace.go +++ b/lib/btrfsprogs/btrfsinspect/print_addrspace.go @@ -9,10 +9,9 @@ import ( "io" "sort" - "golang.org/x/text/message" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" + "git.lukeshu.com/btrfs-progs-ng/lib/textui" ) func PrintLogicalSpace(out io.Writer, fs *btrfs.FS) { @@ -40,10 +39,9 @@ func PrintLogicalSpace(out io.Writer, fs *btrfs.FS) { prevBeg = mapping.LAddr prevEnd = mapping.LAddr.Add(mapping.Size) } - p := message.NewPrinter(message.MatchLanguage("en")) - p.Fprintf(out, "total logical holes = %v (%d)\n", sumHole, int64(sumHole)) - p.Fprintf(out, "total logical chunks = %v (%d)\n", sumChunk, int64(sumChunk)) - p.Fprintf(out, "total logical addr space = %v (%d)\n", prevEnd, int64(prevEnd)) + textui.Fprintf(out, "total logical holes = %v (%d)\n", sumHole, int64(sumHole)) + textui.Fprintf(out, "total logical chunks = %v (%d)\n", sumChunk, int64(sumChunk)) + textui.Fprintf(out, "total logical addr space = %v (%d)\n", prevEnd, int64(prevEnd)) } func PrintPhysicalSpace(out io.Writer, fs *btrfs.FS) { @@ -70,8 +68,7 @@ func PrintPhysicalSpace(out io.Writer, fs *btrfs.FS) { sumExt += mapping.Size prevEnd = mapping.PAddr.Addr.Add(mapping.Size) } - p := message.NewPrinter(message.MatchLanguage("en")) - p.Fprintf(out, "total physical holes = %v (%d)\n", sumHole, int64(sumHole)) - p.Fprintf(out, "total physical extents = %v (%d)\n", sumExt, int64(sumExt)) - p.Fprintf(out, "total physical addr space = %v (%d)\n", prevEnd, int64(prevEnd)) + textui.Fprintf(out, "total physical holes = %v (%d)\n", sumHole, int64(sumHole)) + textui.Fprintf(out, "total physical extents = %v (%d)\n", sumExt, int64(sumExt)) + textui.Fprintf(out, "total physical addr space = %v (%d)\n", prevEnd, int64(prevEnd)) } diff --git a/lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go b/lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go index f1959b4..5bf1347 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go @@ -9,13 +9,13 @@ import ( "fmt" "github.com/datawire/dlib/dlog" - "golang.org/x/text/message" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect" "git.lukeshu.com/btrfs-progs-ng/lib/containers" "git.lukeshu.com/btrfs-progs-ng/lib/maps" + "git.lukeshu.com/btrfs-progs-ng/lib/textui" ) func getNodeSize(fs *btrfs.FS) (btrfsvol.AddrDelta, error) { @@ -158,7 +158,6 @@ func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect dlog.Info(ctx, "... done searching for fuzzy block groups") dlog.Info(ctx, "report:") - p := message.NewPrinter(message.MatchLanguage("en")) unmappedPhysicalRegions := ListUnmappedPhysicalRegions(fs) var unmappedPhysical btrfsvol.AddrDelta @@ -169,20 +168,20 @@ func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect unmappedPhysical += region.End.Sub(region.Beg) } } - dlog.Info(ctx, p.Sprintf("... %d KiB of unmapped physical space (across %d regions)", int(unmappedPhysical/1024), numUnmappedPhysical)) + dlog.Info(ctx, fmt.Sprintf("... %d KiB of unmapped physical space (across %d regions)", textui.Humanized(int(unmappedPhysical/1024)), textui.Humanized(numUnmappedPhysical))) unmappedLogicalRegions := ListUnmappedLogicalRegions(fs, logicalSums) var unmappedLogical btrfsvol.AddrDelta for _, region := range unmappedLogicalRegions { unmappedLogical += region.Size() } - dlog.Info(ctx, p.Sprintf("... %d KiB of unmapped summed logical space (across %d regions)", int(unmappedLogical/1024), len(unmappedLogicalRegions))) + dlog.Info(ctx, fmt.Sprintf("... %d KiB of unmapped summed logical space (across %d regions)", textui.Humanized(int(unmappedLogical/1024)), textui.Humanized(len(unmappedLogicalRegions)))) var unmappedBlockGroups btrfsvol.AddrDelta for _, bg := range bgs { unmappedBlockGroups += bg.Size } - dlog.Info(ctx, p.Sprintf("... %d KiB of unmapped block groups (across %d groups)", int(unmappedBlockGroups/1024), len(bgs))) + dlog.Info(ctx, fmt.Sprintf("... %d KiB of unmapped block groups (across %d groups)", textui.Humanized(int(unmappedBlockGroups/1024)), textui.Humanized(len(bgs)))) dlog.Info(ctx, "detailed report:") for _, devID := range maps.SortedKeys(unmappedPhysicalRegions) { -- cgit v1.2.3-2-g168b From 74bb10779338bbb06be44a523d1e166b760018f7 Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Sat, 24 Dec 2022 23:55:03 -0700 Subject: tree-wide: Audit for fmt.Fprint usage, in favor of textui --- lib/btrfsprogs/btrfsinspect/print_addrspace.go | 13 +- lib/btrfsprogs/btrfsinspect/print_tree.go | 193 +++++++++++++------------ 2 files changed, 103 insertions(+), 103 deletions(-) (limited to 'lib') diff --git a/lib/btrfsprogs/btrfsinspect/print_addrspace.go b/lib/btrfsprogs/btrfsinspect/print_addrspace.go index cd90288..a8b992e 100644 --- a/lib/btrfsprogs/btrfsinspect/print_addrspace.go +++ b/lib/btrfsprogs/btrfsinspect/print_addrspace.go @@ -5,7 +5,6 @@ package btrfsinspect import ( - "fmt" "io" "sort" @@ -21,19 +20,19 @@ func PrintLogicalSpace(out io.Writer, fs *btrfs.FS) { for _, mapping := range mappings { if mapping.LAddr > prevEnd { size := mapping.LAddr.Sub(prevEnd) - fmt.Fprintf(out, "logical_hole laddr=%v size=%v\n", prevEnd, size) + textui.Fprintf(out, "logical_hole laddr=%v size=%v\n", prevEnd, size) sumHole += size } if mapping.LAddr != prevBeg { if !mapping.Flags.OK { - fmt.Fprintf(out, "chunk laddr=%v size=%v flags=(missing)\n", + textui.Fprintf(out, "chunk laddr=%v size=%v flags=(missing)\n", mapping.LAddr, mapping.Size) } else { - fmt.Fprintf(out, "chunk laddr=%v size=%v flags=%v\n", + textui.Fprintf(out, "chunk laddr=%v size=%v flags=%v\n", mapping.LAddr, mapping.Size, mapping.Flags.Val) } } - fmt.Fprintf(out, "\tstripe dev_id=%v paddr=%v\n", + textui.Fprintf(out, "\tstripe dev_id=%v paddr=%v\n", mapping.PAddr.Dev, mapping.PAddr.Addr) sumChunk += mapping.Size prevBeg = mapping.LAddr @@ -60,10 +59,10 @@ func PrintPhysicalSpace(out io.Writer, fs *btrfs.FS) { } if mapping.PAddr.Addr > prevEnd { size := mapping.PAddr.Addr.Sub(prevEnd) - fmt.Fprintf(out, "physical_hole paddr=%v size=%v\n", prevEnd, size) + textui.Fprintf(out, "physical_hole paddr=%v size=%v\n", prevEnd, size) sumHole += size } - fmt.Fprintf(out, "devext dev=%v paddr=%v size=%v laddr=%v\n", + textui.Fprintf(out, "devext dev=%v paddr=%v size=%v laddr=%v\n", mapping.PAddr.Dev, mapping.PAddr.Addr, mapping.Size, mapping.LAddr) sumExt += mapping.Size prevEnd = mapping.PAddr.Addr.Add(mapping.Size) diff --git a/lib/btrfsprogs/btrfsinspect/print_tree.go b/lib/btrfsprogs/btrfsinspect/print_tree.go index 9e5a5d1..b882c6b 100644 --- a/lib/btrfsprogs/btrfsinspect/print_tree.go +++ b/lib/btrfsprogs/btrfsinspect/print_tree.go @@ -22,6 +22,7 @@ import ( "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" "git.lukeshu.com/btrfs-progs-ng/lib/diskio" "git.lukeshu.com/btrfs-progs-ng/lib/slices" + "git.lukeshu.com/btrfs-progs-ng/lib/textui" ) func DumpTrees(ctx context.Context, out io.Writer, fs *btrfs.FS) { @@ -32,19 +33,19 @@ func DumpTrees(ctx context.Context, out io.Writer, fs *btrfs.FS) { } if superblock.RootTree != 0 { - fmt.Fprintf(out, "root tree\n") + textui.Fprintf(out, "root tree\n") printTree(ctx, out, fs, btrfsprim.ROOT_TREE_OBJECTID) } if superblock.ChunkTree != 0 { - fmt.Fprintf(out, "chunk tree\n") + textui.Fprintf(out, "chunk tree\n") printTree(ctx, out, fs, btrfsprim.CHUNK_TREE_OBJECTID) } if superblock.LogTree != 0 { - fmt.Fprintf(out, "log root tree\n") + textui.Fprintf(out, "log root tree\n") printTree(ctx, out, fs, btrfsprim.TREE_LOG_OBJECTID) } if superblock.BlockGroupRoot != 0 { - fmt.Fprintf(out, "block group tree\n") + textui.Fprintf(out, "block group tree\n") printTree(ctx, out, fs, btrfsprim.BLOCK_GROUP_TREE_OBJECTID) } fs.TreeWalk( @@ -81,15 +82,15 @@ func DumpTrees(ctx context.Context, out io.Writer, fs *btrfs.FS) { if !ok { treeName = "file" } - fmt.Fprintf(out, "%v tree %v \n", treeName, fmtKey(item.Key)) + textui.Fprintf(out, "%v tree %v \n", treeName, fmtKey(item.Key)) printTree(ctx, out, fs, item.Key.ObjectID) return nil }, }, ) - fmt.Fprintf(out, "total bytes %v\n", superblock.TotalBytes) - fmt.Fprintf(out, "bytes used %v\n", superblock.BytesUsed) - fmt.Fprintf(out, "uuid %v\n", superblock.FSUUID) + textui.Fprintf(out, "total bytes %v\n", superblock.TotalBytes) + textui.Fprintf(out, "bytes used %v\n", superblock.BytesUsed) + textui.Fprintf(out, "uuid %v\n", superblock.FSUUID) } // printTree mimics btrfs-progs @@ -104,7 +105,7 @@ func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfspri return nil }, PreKeyPointer: func(_ btrfstree.TreePath, item btrfstree.KeyPointer) error { - fmt.Fprintf(out, "\t%v block %v gen %v\n", + textui.Fprintf(out, "\t%v block %v gen %v\n", fmtKey(item.Key), item.BlockPtr, item.Generation) @@ -115,65 +116,65 @@ func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfspri bs, _ := binstruct.Marshal(item.Body) itemSize := uint32(len(bs)) itemOffset -= itemSize - fmt.Fprintf(out, "\titem %v %v itemoff %v itemsize %v\n", + textui.Fprintf(out, "\titem %v %v itemoff %v itemsize %v\n", i, fmtKey(item.Key), itemOffset, itemSize) switch body := item.Body.(type) { case btrfsitem.FreeSpaceHeader: - fmt.Fprintf(out, "\t\tlocation %v\n", fmtKey(body.Location)) - fmt.Fprintf(out, "\t\tcache generation %v entries %v bitmaps %v\n", + textui.Fprintf(out, "\t\tlocation %v\n", fmtKey(body.Location)) + textui.Fprintf(out, "\t\tcache generation %v entries %v bitmaps %v\n", body.Generation, body.NumEntries, body.NumBitmaps) case btrfsitem.Inode: - fmt.Fprintf(out, ""+ + textui.Fprintf(out, ""+ "\t\tgeneration %v transid %v size %v nbytes %v\n"+ "\t\tblock group %v mode %o links %v uid %v gid %v rdev %v\n"+ "\t\tsequence %v flags %v\n", body.Generation, body.TransID, body.Size, body.NumBytes, body.BlockGroup, body.Mode, body.NLink, body.UID, body.GID, body.RDev, body.Sequence, body.Flags) - fmt.Fprintf(out, "\t\tatime %v\n", fmtTime(body.ATime)) - fmt.Fprintf(out, "\t\tctime %v\n", fmtTime(body.CTime)) - fmt.Fprintf(out, "\t\tmtime %v\n", fmtTime(body.MTime)) - fmt.Fprintf(out, "\t\totime %v\n", fmtTime(body.OTime)) + textui.Fprintf(out, "\t\tatime %v\n", fmtTime(body.ATime)) + textui.Fprintf(out, "\t\tctime %v\n", fmtTime(body.CTime)) + textui.Fprintf(out, "\t\tmtime %v\n", fmtTime(body.MTime)) + textui.Fprintf(out, "\t\totime %v\n", fmtTime(body.OTime)) case btrfsitem.InodeRefs: for _, ref := range body { - fmt.Fprintf(out, "\t\tindex %v namelen %v name: %s\n", + textui.Fprintf(out, "\t\tindex %v namelen %v name: %s\n", ref.Index, ref.NameLen, ref.Name) } //case btrfsitem.INODE_EXTREF_KEY: // // TODO case btrfsitem.DirEntry: - fmt.Fprintf(out, "\t\tlocation %v type %v\n", + textui.Fprintf(out, "\t\tlocation %v type %v\n", fmtKey(body.Location), body.Type) - fmt.Fprintf(out, "\t\ttransid %v data_len %v name_len %v\n", + textui.Fprintf(out, "\t\ttransid %v data_len %v name_len %v\n", body.TransID, body.DataLen, body.NameLen) - fmt.Fprintf(out, "\t\tname: %s\n", body.Name) + textui.Fprintf(out, "\t\tname: %s\n", body.Name) if len(body.Data) > 0 { - fmt.Fprintf(out, "\t\tdata %v\n", body.Data) + textui.Fprintf(out, "\t\tdata %v\n", body.Data) } //case btrfsitem.DIR_LOG_INDEX_KEY, btrfsitem.DIR_LOG_ITEM_KEY: // // TODO case btrfsitem.Root: - fmt.Fprintf(out, "\t\tgeneration %v root_dirid %v bytenr %d byte_limit %v bytes_used %v\n", + textui.Fprintf(out, "\t\tgeneration %v root_dirid %v bytenr %d byte_limit %v bytes_used %v\n", body.Generation, body.RootDirID, body.ByteNr, body.ByteLimit, body.BytesUsed) - fmt.Fprintf(out, "\t\tlast_snapshot %v flags %v refs %v\n", + textui.Fprintf(out, "\t\tlast_snapshot %v flags %v refs %v\n", body.LastSnapshot, body.Flags, body.Refs) - fmt.Fprintf(out, "\t\tdrop_progress %v drop_level %v\n", + textui.Fprintf(out, "\t\tdrop_progress %v drop_level %v\n", fmtKey(body.DropProgress), body.DropLevel) - fmt.Fprintf(out, "\t\tlevel %v generation_v2 %v\n", + textui.Fprintf(out, "\t\tlevel %v generation_v2 %v\n", body.Level, body.GenerationV2) if body.Generation == body.GenerationV2 { - fmt.Fprintf(out, "\t\tuuid %v\n", body.UUID) - fmt.Fprintf(out, "\t\tparent_uuid %v\n", body.ParentUUID) - fmt.Fprintf(out, "\t\treceived_uuid %v\n", body.ReceivedUUID) - fmt.Fprintf(out, "\t\tctransid %v otransid %v stransid %v rtransid %v\n", + textui.Fprintf(out, "\t\tuuid %v\n", body.UUID) + textui.Fprintf(out, "\t\tparent_uuid %v\n", body.ParentUUID) + textui.Fprintf(out, "\t\treceived_uuid %v\n", body.ReceivedUUID) + textui.Fprintf(out, "\t\tctransid %v otransid %v stransid %v rtransid %v\n", body.CTransID, body.OTransID, body.STransID, body.RTransID) - fmt.Fprintf(out, "\t\tctime %v\n", fmtTime(body.CTime)) - fmt.Fprintf(out, "\t\totime %v\n", fmtTime(body.OTime)) - fmt.Fprintf(out, "\t\tstime %v\n", fmtTime(body.STime)) - fmt.Fprintf(out, "\t\trtime %v\n", fmtTime(body.RTime)) + textui.Fprintf(out, "\t\tctime %v\n", fmtTime(body.CTime)) + textui.Fprintf(out, "\t\totime %v\n", fmtTime(body.OTime)) + textui.Fprintf(out, "\t\tstime %v\n", fmtTime(body.STime)) + textui.Fprintf(out, "\t\trtime %v\n", fmtTime(body.RTime)) } case btrfsitem.RootRef: var tag string @@ -185,20 +186,20 @@ func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfspri default: tag = fmt.Sprintf("(error: unhandled RootRef item type: %v)", item.Key.ItemType) } - fmt.Fprintf(out, "\t\troot %v key dirid %v sequence %v name %s\n", + textui.Fprintf(out, "\t\troot %v key dirid %v sequence %v name %s\n", tag, body.DirID, body.Sequence, body.Name) case btrfsitem.Extent: - fmt.Fprintf(out, "\t\trefs %v gen %v flags %v\n", + textui.Fprintf(out, "\t\trefs %v gen %v flags %v\n", body.Head.Refs, body.Head.Generation, body.Head.Flags) if body.Head.Flags.Has(btrfsitem.EXTENT_FLAG_TREE_BLOCK) { - fmt.Fprintf(out, "\t\ttree block %v level %v\n", + textui.Fprintf(out, "\t\ttree block %v level %v\n", fmtKey(body.Info.Key), body.Info.Level) } printExtentInlineRefs(out, body.Refs) case btrfsitem.Metadata: - fmt.Fprintf(out, "\t\trefs %v gen %v flags %v\n", + textui.Fprintf(out, "\t\trefs %v gen %v flags %v\n", body.Head.Refs, body.Head.Generation, body.Head.Flags) - fmt.Fprintf(out, "\t\ttree block skinny level %v\n", item.Key.Offset) + textui.Fprintf(out, "\t\ttree block skinny level %v\n", item.Key.Offset) printExtentInlineRefs(out, body.Refs) //case btrfsitem.EXTENT_DATA_REF_KEY: // // TODO @@ -206,72 +207,72 @@ func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfspri // // TODO case btrfsitem.ExtentCSum: start := btrfsvol.LogicalAddr(item.Key.Offset) - fmt.Fprintf(out, "\t\trange start %d end %d length %d", + textui.Fprintf(out, "\t\trange start %d end %d length %d", start, start.Add(body.Size()), body.Size()) sumsPerLine := slices.Max(1, len(btrfssum.CSum{})/body.ChecksumSize/2) i := 0 _ = body.Walk(ctx, func(pos btrfsvol.LogicalAddr, sum btrfssum.ShortSum) error { if i%sumsPerLine == 0 { - fmt.Fprintf(out, "\n\t\t") + textui.Fprintf(out, "\n\t\t") } else { - fmt.Fprintf(out, " ") + textui.Fprintf(out, " ") } - fmt.Fprintf(out, "[%d] 0x%x", pos, sum) + textui.Fprintf(out, "[%d] 0x%x", pos, sum) i++ return nil }) - fmt.Fprintf(out, "\n") + textui.Fprintf(out, "\n") case btrfsitem.FileExtent: - fmt.Fprintf(out, "\t\tgeneration %v type %v\n", + textui.Fprintf(out, "\t\tgeneration %v type %v\n", body.Generation, body.Type) switch body.Type { case btrfsitem.FILE_EXTENT_INLINE: - fmt.Fprintf(out, "\t\tinline extent data size %v ram_bytes %v compression %v\n", + textui.Fprintf(out, "\t\tinline extent data size %v ram_bytes %v compression %v\n", len(body.BodyInline), body.RAMBytes, body.Compression) case btrfsitem.FILE_EXTENT_PREALLOC: - fmt.Fprintf(out, "\t\tprealloc data disk byte %v nr %v\n", + textui.Fprintf(out, "\t\tprealloc data disk byte %v nr %v\n", body.BodyExtent.DiskByteNr, body.BodyExtent.DiskNumBytes) - fmt.Fprintf(out, "\t\tprealloc data offset %v nr %v\n", + textui.Fprintf(out, "\t\tprealloc data offset %v nr %v\n", body.BodyExtent.Offset, body.BodyExtent.NumBytes) case btrfsitem.FILE_EXTENT_REG: - fmt.Fprintf(out, "\t\textent data disk byte %d nr %d\n", + textui.Fprintf(out, "\t\textent data disk byte %d nr %d\n", body.BodyExtent.DiskByteNr, body.BodyExtent.DiskNumBytes) - fmt.Fprintf(out, "\t\textent data offset %d nr %d ram %v\n", + textui.Fprintf(out, "\t\textent data offset %d nr %d ram %v\n", body.BodyExtent.Offset, body.BodyExtent.NumBytes, body.RAMBytes) - fmt.Fprintf(out, "\t\textent compression %v\n", + textui.Fprintf(out, "\t\textent compression %v\n", body.Compression) default: - fmt.Fprintf(out, "\t\t(error) unknown file extent type %v", body.Type) + textui.Fprintf(out, "\t\t(error) unknown file extent type %v", body.Type) } case btrfsitem.BlockGroup: - fmt.Fprintf(out, "\t\tblock group used %v chunk_objectid %v flags %v\n", + textui.Fprintf(out, "\t\tblock group used %v chunk_objectid %v flags %v\n", body.Used, body.ChunkObjectID, body.Flags) case btrfsitem.FreeSpaceInfo: - fmt.Fprintf(out, "\t\tfree space info extent count %v flags %v\n", + textui.Fprintf(out, "\t\tfree space info extent count %v flags %v\n", body.ExtentCount, body.Flags) case btrfsitem.FreeSpaceBitmap: - fmt.Fprintf(out, "\t\tfree space bitmap\n") + textui.Fprintf(out, "\t\tfree space bitmap\n") case btrfsitem.Chunk: - fmt.Fprintf(out, "\t\tlength %d owner %d stripe_len %v type %v\n", + textui.Fprintf(out, "\t\tlength %d owner %d stripe_len %v type %v\n", body.Head.Size, body.Head.Owner, body.Head.StripeLen, body.Head.Type) - fmt.Fprintf(out, "\t\tio_align %v io_width %v sector_size %v\n", + textui.Fprintf(out, "\t\tio_align %v io_width %v sector_size %v\n", body.Head.IOOptimalAlign, body.Head.IOOptimalWidth, body.Head.IOMinSize) - fmt.Fprintf(out, "\t\tnum_stripes %v sub_stripes %v\n", + textui.Fprintf(out, "\t\tnum_stripes %v sub_stripes %v\n", body.Head.NumStripes, body.Head.SubStripes) for i, stripe := range body.Stripes { - fmt.Fprintf(out, "\t\t\tstripe %v devid %d offset %d\n", + textui.Fprintf(out, "\t\t\tstripe %v devid %d offset %d\n", i, stripe.DeviceID, stripe.Offset) - fmt.Fprintf(out, "\t\t\tdev_uuid %v\n", + textui.Fprintf(out, "\t\t\tdev_uuid %v\n", stripe.DeviceUUID) } case btrfsitem.Dev: - fmt.Fprintf(out, ""+ + textui.Fprintf(out, ""+ "\t\tdevid %d total_bytes %v bytes_used %v\n"+ "\t\tio_align %v io_width %v sector_size %v type %v\n"+ "\t\tgeneration %v start_offset %v dev_group %v\n"+ @@ -285,7 +286,7 @@ func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfspri body.DevUUID, body.FSUUID) case btrfsitem.DevExtent: - fmt.Fprintf(out, ""+ + textui.Fprintf(out, ""+ "\t\tdev extent chunk_tree %v\n"+ "\t\tchunk_objectid %v chunk_offset %d length %d\n"+ "\t\tchunk_tree_uuid %v\n", @@ -298,49 +299,49 @@ func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfspri //case btrfsitem.QGROUP_LIMIT_KEY: // // TODO case btrfsitem.UUIDMap: - fmt.Fprintf(out, "\t\tsubvol_id %d\n", body.ObjID) + textui.Fprintf(out, "\t\tsubvol_id %d\n", body.ObjID) //case btrfsitem.STRING_ITEM_KEY: // // TODO case btrfsitem.DevStats: - fmt.Fprintf(out, "\t\tpersistent item objectid %v offset %v\n", + textui.Fprintf(out, "\t\tpersistent item objectid %v offset %v\n", item.Key.ObjectID.Format(item.Key.ItemType), item.Key.Offset) switch item.Key.ObjectID { case btrfsprim.DEV_STATS_OBJECTID: - fmt.Fprintf(out, "\t\tdevice stats\n") - fmt.Fprintf(out, "\t\twrite_errs %v read_errs %v flush_errs %v corruption_errs %v generation %v\n", + textui.Fprintf(out, "\t\tdevice stats\n") + textui.Fprintf(out, "\t\twrite_errs %v read_errs %v flush_errs %v corruption_errs %v generation %v\n", body.Values[btrfsitem.DEV_STAT_WRITE_ERRS], body.Values[btrfsitem.DEV_STAT_READ_ERRS], body.Values[btrfsitem.DEV_STAT_FLUSH_ERRS], body.Values[btrfsitem.DEV_STAT_CORRUPTION_ERRS], body.Values[btrfsitem.DEV_STAT_GENERATION_ERRS]) default: - fmt.Fprintf(out, "\t\tunknown persistent item objectid %v\n", item.Key.ObjectID) + textui.Fprintf(out, "\t\tunknown persistent item objectid %v\n", item.Key.ObjectID) } //case btrfsitem.TEMPORARY_ITEM_KEY: // // TODO case btrfsitem.Empty: switch item.Key.ItemType { case btrfsitem.ORPHAN_ITEM_KEY: // 48 - fmt.Fprintf(out, "\t\torphan item\n") + textui.Fprintf(out, "\t\torphan item\n") case btrfsitem.TREE_BLOCK_REF_KEY: // 176 - fmt.Fprintf(out, "\t\ttree block backref\n") + textui.Fprintf(out, "\t\ttree block backref\n") case btrfsitem.SHARED_BLOCK_REF_KEY: // 182 - fmt.Fprintf(out, "\t\tshared block backref\n") + textui.Fprintf(out, "\t\tshared block backref\n") case btrfsitem.FREE_SPACE_EXTENT_KEY: // 199 - fmt.Fprintf(out, "\t\tfree space extent\n") + textui.Fprintf(out, "\t\tfree space extent\n") case btrfsitem.QGROUP_RELATION_KEY: // 246 // do nothing //case btrfsitem.EXTENT_REF_V0_KEY: - // fmt.Fprintf(out, "\t\textent ref v0 (deprecated)\n") + // textui.Fprintf(out, "\t\textent ref v0 (deprecated)\n") //case btrfsitem.CSUM_ITEM_KEY: - // fmt.Fprintf(out, "\t\tcsum item\n") + // textui.Fprintf(out, "\t\tcsum item\n") default: - fmt.Fprintf(out, "\t\t(error) unhandled empty item type: %v\n", item.Key.ItemType) + textui.Fprintf(out, "\t\t(error) unhandled empty item type: %v\n", item.Key.ItemType) } case btrfsitem.Error: - fmt.Fprintf(out, "\t\t(error) error item: %v\n", body.Err) + textui.Fprintf(out, "\t\t(error) error item: %v\n", body.Err) default: - fmt.Fprintf(out, "\t\t(error) unhandled item type: %T\n", body) + textui.Fprintf(out, "\t\t(error) unhandled item type: %T\n", body) } return nil }, @@ -361,37 +362,37 @@ func printHeaderInfo(out io.Writer, node btrfstree.Node) { var typename string if node.Head.Level > 0 { // internal node typename = "node" - fmt.Fprintf(out, "node %v level %v items %v free space %v", + textui.Fprintf(out, "node %v level %v items %v free space %v", node.Head.Addr, node.Head.Level, node.Head.NumItems, node.MaxItems()-node.Head.NumItems) } else { // leaf node typename = "leaf" - fmt.Fprintf(out, "leaf %d items %v free space %v", + textui.Fprintf(out, "leaf %d items %v free space %v", node.Head.Addr, node.Head.NumItems, node.LeafFreeSpace()) } - fmt.Fprintf(out, " generation %v owner %v\n", + textui.Fprintf(out, " generation %v owner %v\n", node.Head.Generation, node.Head.Owner) - fmt.Fprintf(out, "%v %d flags %v backref revision %v\n", + textui.Fprintf(out, "%v %d flags %v backref revision %v\n", typename, node.Head.Addr, node.Head.Flags, node.Head.BackrefRev) - fmt.Fprintf(out, "checksum stored %v\n", node.Head.Checksum.Fmt(node.ChecksumType)) + textui.Fprintf(out, "checksum stored %v\n", node.Head.Checksum.Fmt(node.ChecksumType)) if calcSum, err := node.CalculateChecksum(); err != nil { - fmt.Fprintf(out, "checksum calced %v\n", err) + textui.Fprintf(out, "checksum calced %v\n", err) } else { - fmt.Fprintf(out, "checksum calced %v\n", calcSum.Fmt(node.ChecksumType)) + textui.Fprintf(out, "checksum calced %v\n", calcSum.Fmt(node.ChecksumType)) } - fmt.Fprintf(out, "fs uuid %v\n", node.Head.MetadataUUID) - fmt.Fprintf(out, "chunk uuid %v\n", node.Head.ChunkTreeUUID) + textui.Fprintf(out, "fs uuid %v\n", node.Head.MetadataUUID) + textui.Fprintf(out, "chunk uuid %v\n", node.Head.ChunkTreeUUID) } // printExtentInlineRefs mimics part of btrfs-progs kernel-shared/print-tree.c:print_extent_item() @@ -401,22 +402,22 @@ func printExtentInlineRefs(out io.Writer, refs []btrfsitem.ExtentInlineRef) { case nil: switch ref.Type { case btrfsitem.TREE_BLOCK_REF_KEY: - fmt.Fprintf(out, "\t\ttree block backref root %v\n", + textui.Fprintf(out, "\t\ttree block backref root %v\n", btrfsprim.ObjID(ref.Offset)) case btrfsitem.SHARED_BLOCK_REF_KEY: - fmt.Fprintf(out, "\t\tshared block backref parent %v\n", + textui.Fprintf(out, "\t\tshared block backref parent %v\n", ref.Offset) default: - fmt.Fprintf(out, "\t\t(error) unexpected empty sub-item type: %v\n", ref.Type) + textui.Fprintf(out, "\t\t(error) unexpected empty sub-item type: %v\n", ref.Type) } case btrfsitem.ExtentDataRef: - fmt.Fprintf(out, "\t\textent data backref root %v objectid %v offset %v count %v\n", + textui.Fprintf(out, "\t\textent data backref root %v objectid %v offset %v count %v\n", subitem.Root, subitem.ObjectID, subitem.Offset, subitem.Count) case btrfsitem.SharedDataRef: - fmt.Fprintf(out, "\t\tshared data backref parent %v count %v\n", + textui.Fprintf(out, "\t\tshared data backref parent %v count %v\n", ref.Offset, subitem.Count) default: - fmt.Fprintf(out, "\t\t(error) unexpected sub-item type: %T\n", subitem) + textui.Fprintf(out, "\t\t(error) unexpected sub-item type: %T\n", subitem) } } } @@ -424,19 +425,19 @@ func printExtentInlineRefs(out io.Writer, refs []btrfsitem.ExtentInlineRef) { // mimics print-tree.c:btrfs_print_key() func fmtKey(key btrfsprim.Key) string { var out strings.Builder - fmt.Fprintf(&out, "key (%v %v", key.ObjectID.Format(key.ItemType), key.ItemType) + textui.Fprintf(&out, "key (%v %v", key.ObjectID.Format(key.ItemType), key.ItemType) switch key.ItemType { case btrfsitem.QGROUP_RELATION_KEY: //TODO, btrfsitem.QGROUP_INFO_KEY, btrfsitem.QGROUP_LIMIT_KEY: panic("not implemented") case btrfsitem.UUID_SUBVOL_KEY, btrfsitem.UUID_RECEIVED_SUBVOL_KEY: - fmt.Fprintf(&out, " %#08x)", key.Offset) + textui.Fprintf(&out, " %#08x)", key.Offset) case btrfsitem.ROOT_ITEM_KEY: - fmt.Fprintf(&out, " %v)", btrfsprim.ObjID(key.Offset)) + textui.Fprintf(&out, " %v)", btrfsprim.ObjID(key.Offset)) default: if key.Offset == math.MaxUint64 { - fmt.Fprintf(&out, " -1)") + textui.Fprintf(&out, " -1)") } else { - fmt.Fprintf(&out, " %v)", key.Offset) + textui.Fprintf(&out, " %v)", key.Offset) } } return out.String() -- cgit v1.2.3-2-g168b From 9fe871dcf2a15afa66df52e927e9c402b24231fe Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Sun, 25 Dec 2022 01:05:31 -0700 Subject: tree-wide: Audit for fmt.Sprint usage, in favor of textui --- lib/btrfsprogs/btrfsinspect/print_tree.go | 5 ++--- lib/btrfsprogs/btrfsinspect/rebuildmappings/fuzzymatchsums.go | 7 ++++--- lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go | 6 +++--- lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/rebuilt_btrees.go | 4 ++-- 4 files changed, 11 insertions(+), 11 deletions(-) (limited to 'lib') diff --git a/lib/btrfsprogs/btrfsinspect/print_tree.go b/lib/btrfsprogs/btrfsinspect/print_tree.go index b882c6b..2ede80f 100644 --- a/lib/btrfsprogs/btrfsinspect/print_tree.go +++ b/lib/btrfsprogs/btrfsinspect/print_tree.go @@ -6,7 +6,6 @@ package btrfsinspect import ( "context" - "fmt" "io" "math" "strings" @@ -184,7 +183,7 @@ func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfspri case btrfsitem.ROOT_BACKREF_KEY: tag = "backref" default: - tag = fmt.Sprintf("(error: unhandled RootRef item type: %v)", item.Key.ItemType) + tag = textui.Sprintf("(error: unhandled RootRef item type: %v)", item.Key.ItemType) } textui.Fprintf(out, "\t\troot %v key dirid %v sequence %v name %s\n", tag, body.DirID, body.Sequence, body.Name) @@ -444,6 +443,6 @@ func fmtKey(key btrfsprim.Key) string { } func fmtTime(t btrfsprim.Time) string { - return fmt.Sprintf("%v.%v (%v)", + return textui.Sprintf("%v.%v (%v)", t.Sec, t.NSec, t.ToStd().Format("2006-01-02 15:04:05")) } diff --git a/lib/btrfsprogs/btrfsinspect/rebuildmappings/fuzzymatchsums.go b/lib/btrfsprogs/btrfsinspect/rebuildmappings/fuzzymatchsums.go index c6e23bc..0b5fc6f 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildmappings/fuzzymatchsums.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildmappings/fuzzymatchsums.go @@ -6,16 +6,17 @@ package rebuildmappings import ( "context" - "fmt" "sort" "github.com/datawire/dlib/dlog" + "golang.org/x/text/number" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" "git.lukeshu.com/btrfs-progs-ng/lib/containers" "git.lukeshu.com/btrfs-progs-ng/lib/maps" + "git.lukeshu.com/btrfs-progs-ng/lib/textui" ) const minFuzzyPct = 0.5 @@ -95,12 +96,12 @@ func fuzzyMatchBlockGroupSums(ctx context.Context, matchesStr = "" case 1: // not sure how this can happen, but whatev pct := float64(d-best.Dat[0].N) / float64(d) - matchesStr = fmt.Sprintf("%v%%", int(100*pct)) + matchesStr = textui.Sprintf("%v", number.Percent(pct)) apply = pct > minFuzzyPct case 2: pct := float64(d-best.Dat[0].N) / float64(d) pct2 := float64(d-best.Dat[1].N) / float64(d) - matchesStr = fmt.Sprintf("best=%v%% secondbest=%v%%", int(100*pct), int(100*pct2)) + matchesStr = textui.Sprintf("best=%v secondbest=%v", number.Percent(pct), number.Percent(pct2)) apply = pct > minFuzzyPct && pct2 < minFuzzyPct } lvl := dlog.LogLevelError diff --git a/lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go b/lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go index 5bf1347..6eb119c 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go @@ -168,20 +168,20 @@ func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect unmappedPhysical += region.End.Sub(region.Beg) } } - dlog.Info(ctx, fmt.Sprintf("... %d KiB of unmapped physical space (across %d regions)", textui.Humanized(int(unmappedPhysical/1024)), textui.Humanized(numUnmappedPhysical))) + dlog.Infof(ctx, "... %d KiB of unmapped physical space (across %d regions)", textui.Humanized(int(unmappedPhysical/1024)), textui.Humanized(numUnmappedPhysical)) unmappedLogicalRegions := ListUnmappedLogicalRegions(fs, logicalSums) var unmappedLogical btrfsvol.AddrDelta for _, region := range unmappedLogicalRegions { unmappedLogical += region.Size() } - dlog.Info(ctx, fmt.Sprintf("... %d KiB of unmapped summed logical space (across %d regions)", textui.Humanized(int(unmappedLogical/1024)), textui.Humanized(len(unmappedLogicalRegions)))) + dlog.Infof(ctx, "... %d KiB of unmapped summed logical space (across %d regions)", textui.Humanized(int(unmappedLogical/1024)), textui.Humanized(len(unmappedLogicalRegions))) var unmappedBlockGroups btrfsvol.AddrDelta for _, bg := range bgs { unmappedBlockGroups += bg.Size } - dlog.Info(ctx, fmt.Sprintf("... %d KiB of unmapped block groups (across %d groups)", textui.Humanized(int(unmappedBlockGroups/1024)), textui.Humanized(len(bgs)))) + dlog.Infof(ctx, "... %d KiB of unmapped block groups (across %d groups)", textui.Humanized(int(unmappedBlockGroups/1024)), textui.Humanized(len(bgs))) dlog.Info(ctx, "detailed report:") for _, devID := range maps.SortedKeys(unmappedPhysicalRegions) { diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/rebuilt_btrees.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/rebuilt_btrees.go index ab4b55f..4f0f5d4 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/rebuilt_btrees.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/rebuilt_btrees.go @@ -178,7 +178,7 @@ type rootStats struct { } func (s rootStats) String() string { - return fmt.Sprintf("tree %v: adding root node@%v: %v%% (%v/%v) (added %v items, replaced %v items)", + return textui.Sprintf("tree %v: adding root node@%v: %v%% (%v/%v) (added %v items, replaced %v items)", s.TreeID, s.RootNode, int(100*float64(s.DoneLeafs)/float64(s.TotalLeafs)), s.DoneLeafs, s.TotalLeafs, @@ -309,7 +309,7 @@ type indexStats struct { } func (s indexStats) String() string { - return fmt.Sprintf("tree %v: indexing leaf nodes: %v%% (%v/%v)", + return textui.Sprintf("tree %v: indexing leaf nodes: %v%% (%v/%v)", s.TreeID, int(100*float64(s.DoneNodes)/float64(s.TotalNodes)), s.DoneNodes, s.TotalNodes) -- cgit v1.2.3-2-g168b From 31341857abc73334852d3b888f4fb558d8ed3c61 Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Sun, 25 Dec 2022 14:03:02 -0700 Subject: Factor logger code from main.go in to textui --- lib/textui/log.go | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 lib/textui/log.go (limited to 'lib') diff --git a/lib/textui/log.go b/lib/textui/log.go new file mode 100644 index 0000000..cee51db --- /dev/null +++ b/lib/textui/log.go @@ -0,0 +1,33 @@ +// Copyright (C) 2022 Luke Shumaker +// +// SPDX-License-Identifier: GPL-2.0-or-later + +package textui + +import ( + "github.com/datawire/dlib/dlog" + "github.com/sirupsen/logrus" + "github.com/spf13/pflag" +) + +type LogLevelFlag struct { + logrus.Level +} + +var _ pflag.Value = (*LogLevelFlag)(nil) + +// Type implements pflag.Value. +func (lvl *LogLevelFlag) Type() string { return "loglevel" } + +// Type implements pflag.Value. +func (lvl *LogLevelFlag) Set(str string) error { + var err error + lvl.Level, err = logrus.ParseLevel(str) + return err +} + +func NewLogger(lvl logrus.Level) dlog.Logger { + logger := logrus.New() + logger.SetLevel(lvl) + return dlog.WrapLogrus(logger) +} -- cgit v1.2.3-2-g168b From 29ad8ec24e07888953c14fb5435d2e8e2023931b Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Sun, 25 Dec 2022 22:47:51 -0700 Subject: textui: Implement a custom dlog backend that uses x/text/message --- lib/textui/log.go | 282 +++++++++++++++++++++++++++++++++++++++++++++++-- lib/textui/log_test.go | 64 +++++++++++ 2 files changed, 336 insertions(+), 10 deletions(-) create mode 100644 lib/textui/log_test.go (limited to 'lib') diff --git a/lib/textui/log.go b/lib/textui/log.go index cee51db..ca8c201 100644 --- a/lib/textui/log.go +++ b/lib/textui/log.go @@ -1,17 +1,33 @@ +// Copyright (C) 2019-2022 Ambassador Labs // Copyright (C) 2022 Luke Shumaker // -// SPDX-License-Identifier: GPL-2.0-or-later +// SPDX-License-Identifier: Apache-2.0 +// +// Contains code based on: +// https://github.com/datawire/dlib/blob/b09ab2e017e16d261f05fff5b3b860d645e774d4/dlog/logger_logrus.go +// https://github.com/datawire/dlib/blob/b09ab2e017e16d261f05fff5b3b860d645e774d4/dlog/logger_testing.go +// https://github.com/telepresenceio/telepresence/blob/ece94a40b00a90722af36b12e40f91cbecc0550c/pkg/log/formatter.go package textui import ( + "bytes" + "fmt" + "io" + "log" + "path/filepath" + "runtime" + "sort" + "strings" + "sync" + "time" + "github.com/datawire/dlib/dlog" - "github.com/sirupsen/logrus" "github.com/spf13/pflag" ) type LogLevelFlag struct { - logrus.Level + Level dlog.LogLevel } var _ pflag.Value = (*LogLevelFlag)(nil) @@ -21,13 +37,259 @@ func (lvl *LogLevelFlag) Type() string { return "loglevel" } // Type implements pflag.Value. func (lvl *LogLevelFlag) Set(str string) error { - var err error - lvl.Level, err = logrus.ParseLevel(str) - return err + switch strings.ToLower(str) { + case "error": + lvl.Level = dlog.LogLevelError + case "warn", "warning": + lvl.Level = dlog.LogLevelWarn + case "info": + lvl.Level = dlog.LogLevelInfo + case "debug": + lvl.Level = dlog.LogLevelDebug + case "trace": + lvl.Level = dlog.LogLevelTrace + default: + return fmt.Errorf("invalid log level: %q", str) + } + return nil +} + +// Type implements pflag.Value. +func (lvl *LogLevelFlag) String() string { + switch lvl.Level { + case dlog.LogLevelError: + return "error" + case dlog.LogLevelWarn: + return "warn" + case dlog.LogLevelInfo: + return "info" + case dlog.LogLevelDebug: + return "debug" + case dlog.LogLevelTrace: + return "trace" + default: + panic(fmt.Errorf("invalid log level: %#v", lvl.Level)) + } +} + +type logger struct { + parent *logger + out io.Writer + lvl dlog.LogLevel + + // only valid if parent is non-nil + fieldKey string + fieldVal any +} + +var _ dlog.OptimizedLogger = (*logger)(nil) + +func NewLogger(out io.Writer, lvl dlog.LogLevel) dlog.Logger { + return &logger{ + out: out, + lvl: lvl, + } +} + +// Helper implements dlog.Logger. +func (l *logger) Helper() {} + +// WithField implements dlog.Logger. +func (l *logger) WithField(key string, value any) dlog.Logger { + return &logger{ + parent: l, + out: l.out, + lvl: l.lvl, + + fieldKey: key, + fieldVal: value, + } +} + +type logWriter struct { + log *logger + lvl dlog.LogLevel +} + +// Write implements io.Writer. +func (lw logWriter) Write(data []byte) (int, error) { + lw.log.log(lw.lvl, func(w io.Writer) { + _, _ = w.Write(data) + }) + return len(data), nil +} + +// StdLogger implements dlog.Logger. +func (l *logger) StdLogger(lvl dlog.LogLevel) *log.Logger { + return log.New(logWriter{log: l, lvl: lvl}, "", 0) +} + +// Log implements dlog.Logger. +func (l *logger) Log(lvl dlog.LogLevel, msg string) { + panic("should not happen: optimized log methods should be used instead") +} + +// UnformattedLog implements dlog.OptimizedLogger. +func (l *logger) UnformattedLog(lvl dlog.LogLevel, args ...any) { + l.log(lvl, func(w io.Writer) { + printer.Fprint(w, args...) + }) +} + +// UnformattedLogln implements dlog.OptimizedLogger. +func (l *logger) UnformattedLogln(lvl dlog.LogLevel, args ...any) { + l.log(lvl, func(w io.Writer) { + printer.Fprintln(w, args...) + }) +} + +// UnformattedLogf implements dlog.OptimizedLogger. +func (l *logger) UnformattedLogf(lvl dlog.LogLevel, format string, args ...any) { + l.log(lvl, func(w io.Writer) { + printer.Fprintf(w, format, args...) + }) +} + +var ( + logBuf bytes.Buffer + logMu sync.Mutex + thisModDir string +) + +func init() { + _, file, _, _ := runtime.Caller(0) + thisModDir = filepath.Dir(filepath.Dir(filepath.Dir(file))) +} + +func (l *logger) log(lvl dlog.LogLevel, writeMsg func(io.Writer)) { + // boilerplate ///////////////////////////////////////////////////////// + if lvl > l.lvl { + return + } + // This is optimized for mostly-single-threaded usage. If I cared more + // about multi-threaded performance, I'd trade in some + // memory-use/allocations and (1) instead of using a static `logBuf`, + // I'd have a `logBufPool` `sync.Pool`, and (2) have the the final call + // to `l.out.Write()` be the only thing protected by `logMu`. + logMu.Lock() + defer logMu.Unlock() + defer logBuf.Reset() + + // time //////////////////////////////////////////////////////////////// + now := time.Now() + const timeFmt = "2006-01-02 15:04:05.0000" + logBuf.WriteString(timeFmt) + now.AppendFormat(logBuf.Bytes()[:0], timeFmt) + + // level /////////////////////////////////////////////////////////////// + switch lvl { + case dlog.LogLevelError: + logBuf.WriteString(" ERR") + case dlog.LogLevelWarn: + logBuf.WriteString(" WRN") + case dlog.LogLevelInfo: + logBuf.WriteString(" INF") + case dlog.LogLevelDebug: + logBuf.WriteString(" DBG") + case dlog.LogLevelTrace: + logBuf.WriteString(" TRC") + } + + // fields (early) ////////////////////////////////////////////////////// + fields := make(map[string]any) + var fieldKeys []string + for f := l; f.parent != nil; f = f.parent { + if _, exists := fields[f.fieldKey]; exists { + continue + } + fields[f.fieldKey] = f.fieldVal + fieldKeys = append(fieldKeys, f.fieldKey) + } + sort.Slice(fieldKeys, func(i, j int) bool { + iOrd := fieldOrd(fieldKeys[i]) + jOrd := fieldOrd(fieldKeys[j]) + if iOrd != jOrd { + return iOrd < jOrd + } + return fieldKeys[i] < fieldKeys[j] + }) + nextField := len(fieldKeys) + for i, fieldKey := range fieldKeys { + if fieldOrd(fieldKey) >= 0 { + nextField = i + break + } + printer.Fprintf(&logBuf, " %s=%v", fieldName(fieldKey), fields[fieldKey]) + } + + // message ///////////////////////////////////////////////////////////// + logBuf.WriteString(" : ") + writeMsg(&logBuf) + + // fields (late) /////////////////////////////////////////////////////// + if nextField < len(fieldKeys) { + logBuf.WriteString(" :") + } + for _, fieldKey := range fieldKeys[nextField:] { + printer.Fprintf(&logBuf, " %s=%v", fieldName(fieldKey), fields[fieldKey]) + } + + // caller ////////////////////////////////////////////////////////////// + const ( + thisModule = "git.lukeshu.com/btrfs-progs-ng" + thisPackage = "git.lukeshu.com/btrfs-progs-ng/lib/textui" + maximumCallerDepth int = 25 + minimumCallerDepth int = 3 // runtime.Callers + .log + .Log + ) + var pcs [maximumCallerDepth]uintptr + depth := runtime.Callers(minimumCallerDepth, pcs[:]) + frames := runtime.CallersFrames(pcs[:depth]) + for f, again := frames.Next(); again; f, again = frames.Next() { + if !strings.HasPrefix(f.Function, thisModule+"/") { + continue + } + if strings.HasPrefix(f.Function, thisPackage+".") { + continue + } + if nextField == len(fieldKeys) { + logBuf.WriteString(" :") + } + file := f.File[strings.LastIndex(f.File, thisModDir+"/")+len(thisModDir+"/"):] + fmt.Fprintf(&logBuf, " (from %s:%d)", file, f.Line) + break + } + + // boilerplate ///////////////////////////////////////////////////////// + logBuf.WriteByte('\n') + _, _ = l.out.Write(logBuf.Bytes()) +} + +// fieldOrd returns the sort-position for a given log-field-key. Lower return +// values should be positioned on the left when logging, and higher values +// should be positioned on the right; values <0 should be on the left of the log +// message, while values ≥0 should be on the right of the log message. +func fieldOrd(key string) int { + switch key { + case "THREAD": // dgroup + return -7 + case "dexec.pid": + return -6 + case "dexec.stream": + return -5 + case "dexec.data": + return -4 + case "dexec.err": + return -3 + default: + return 1 + } } -func NewLogger(lvl logrus.Level) dlog.Logger { - logger := logrus.New() - logger.SetLevel(lvl) - return dlog.WrapLogrus(logger) +func fieldName(key string) string { + switch key { + case "THREAD": + return "thread" + default: + return key + } } diff --git a/lib/textui/log_test.go b/lib/textui/log_test.go new file mode 100644 index 0000000..08eb38c --- /dev/null +++ b/lib/textui/log_test.go @@ -0,0 +1,64 @@ +// Copyright (C) 2022 Luke Shumaker +// +// SPDX-License-Identifier: GPL-2.0-or-later + +package textui_test + +import ( + "context" + "strings" + "testing" + + "github.com/datawire/dlib/dlog" + "github.com/stretchr/testify/assert" + + "git.lukeshu.com/btrfs-progs-ng/lib/textui" +) + +func logLineRegexp(inner string) string { + return `[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}\.[0-9]{4} ` + inner + ` \(from lib/textui/log_test\.go:[0-9]+\)\n` +} + +func TestLogFormat(t *testing.T) { + var out strings.Builder + ctx := dlog.WithLogger(context.Background(), textui.NewLogger(&out, dlog.LogLevelTrace)) + dlog.Debugf(ctx, "foo %d", 12345) + assert.Regexp(t, + `^`+logLineRegexp(`DBG : foo 12,345 :`)+`$`, + out.String()) +} + +func TestLogLevel(t *testing.T) { + var out strings.Builder + ctx := dlog.WithLogger(context.Background(), textui.NewLogger(&out, dlog.LogLevelInfo)) + dlog.Error(ctx, "Error") + dlog.Warn(ctx, "Warn") + dlog.Info(ctx, "Info") + dlog.Debug(ctx, "Debug") + dlog.Trace(ctx, "Trace") + dlog.Trace(ctx, "Trace") + dlog.Debug(ctx, "Debug") + dlog.Info(ctx, "Info") + dlog.Warn(ctx, "Warn") + dlog.Error(ctx, "Error") + assert.Regexp(t, + `^`+ + logLineRegexp(`ERR : Error :`)+ + logLineRegexp(`WRN : Warn :`)+ + logLineRegexp(`INF : Info :`)+ + logLineRegexp(`INF : Info :`)+ + logLineRegexp(`WRN : Warn :`)+ + logLineRegexp(`ERR : Error :`)+ + `$`, + out.String()) +} + +func TestLogField(t *testing.T) { + var out strings.Builder + ctx := dlog.WithLogger(context.Background(), textui.NewLogger(&out, dlog.LogLevelInfo)) + ctx = dlog.WithField(ctx, "foo", 12345) + dlog.Info(ctx, "msg") + assert.Regexp(t, + `^`+logLineRegexp(`INF : msg : foo=12,345`)+`$`, + out.String()) +} -- cgit v1.2.3-2-g168b From b687c4d71bd891e5c0b40c1bfe3b9d352fca2114 Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Sun, 25 Dec 2022 23:30:35 -0700 Subject: tree-wide: Remove some now-superfluous calls to textui.Humanized --- lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'lib') diff --git a/lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go b/lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go index 6eb119c..5d44b4e 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go @@ -15,7 +15,6 @@ import ( "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect" "git.lukeshu.com/btrfs-progs-ng/lib/containers" "git.lukeshu.com/btrfs-progs-ng/lib/maps" - "git.lukeshu.com/btrfs-progs-ng/lib/textui" ) func getNodeSize(fs *btrfs.FS) (btrfsvol.AddrDelta, error) { @@ -168,20 +167,20 @@ func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect unmappedPhysical += region.End.Sub(region.Beg) } } - dlog.Infof(ctx, "... %d KiB of unmapped physical space (across %d regions)", textui.Humanized(int(unmappedPhysical/1024)), textui.Humanized(numUnmappedPhysical)) + dlog.Infof(ctx, "... %d KiB of unmapped physical space (across %d regions)", int(unmappedPhysical/1024), numUnmappedPhysical) unmappedLogicalRegions := ListUnmappedLogicalRegions(fs, logicalSums) var unmappedLogical btrfsvol.AddrDelta for _, region := range unmappedLogicalRegions { unmappedLogical += region.Size() } - dlog.Infof(ctx, "... %d KiB of unmapped summed logical space (across %d regions)", textui.Humanized(int(unmappedLogical/1024)), textui.Humanized(len(unmappedLogicalRegions))) + dlog.Infof(ctx, "... %d KiB of unmapped summed logical space (across %d regions)", int(unmappedLogical/1024), len(unmappedLogicalRegions)) var unmappedBlockGroups btrfsvol.AddrDelta for _, bg := range bgs { unmappedBlockGroups += bg.Size } - dlog.Infof(ctx, "... %d KiB of unmapped block groups (across %d groups)", textui.Humanized(int(unmappedBlockGroups/1024)), textui.Humanized(len(bgs))) + dlog.Infof(ctx, "... %d KiB of unmapped block groups (across %d groups)", int(unmappedBlockGroups/1024), len(bgs)) dlog.Info(ctx, "detailed report:") for _, devID := range maps.SortedKeys(unmappedPhysicalRegions) { -- cgit v1.2.3-2-g168b From 26a23c51f9b3e22ef3e68d0d174d118a78dd4172 Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Sun, 25 Dec 2022 23:46:53 -0700 Subject: tree-wide, except rebuildnodes: Fuss with logging to take advantage of textui --- .../btrfsinspect/rebuildmappings/fuzzymatchsums.go | 16 ++++--- .../btrfsinspect/rebuildmappings/matchsums.go | 6 +-- .../rebuildmappings/rebuildmappings.go | 44 ++++++++++------- lib/btrfsprogs/btrfsinspect/scandevices.go | 56 +++++++++++----------- lib/textui/log.go | 17 ++++++- 5 files changed, 82 insertions(+), 57 deletions(-) (limited to 'lib') diff --git a/lib/btrfsprogs/btrfsinspect/rebuildmappings/fuzzymatchsums.go b/lib/btrfsprogs/btrfsinspect/rebuildmappings/fuzzymatchsums.go index 0b5fc6f..ae83513 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildmappings/fuzzymatchsums.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildmappings/fuzzymatchsums.go @@ -43,7 +43,10 @@ func fuzzyMatchBlockGroupSums(ctx context.Context, physicalSums map[btrfsvol.DeviceID]btrfssum.SumRun[btrfsvol.PhysicalAddr], logicalSums btrfssum.SumRunWithGaps[btrfsvol.LogicalAddr], ) error { - dlog.Info(ctx, "... Indexing physical regions...") // O(m) + _ctx := ctx + + ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.substep", "indexing") + dlog.Info(ctx, "Indexing physical regions...") // O(m) regions := ListUnmappedPhysicalRegions(fs) physicalIndex := make(map[btrfssum.ShortSum][]btrfsvol.QualifiedPhysicalAddr) if err := WalkUnmappedPhysicalRegions(ctx, physicalSums, regions, func(devID btrfsvol.DeviceID, region btrfssum.SumRun[btrfsvol.PhysicalAddr]) error { @@ -57,9 +60,10 @@ func fuzzyMatchBlockGroupSums(ctx context.Context, }); err != nil { return err } - dlog.Info(ctx, "... ... done indexing") + dlog.Info(ctx, "... done indexing") - dlog.Info(ctx, "... Searching...") + ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.substep", "searching") + dlog.Info(ctx, "Searching...") numBlockgroups := len(blockgroups) for i, bgLAddr := range maps.SortedKeys(blockgroups) { blockgroup := blockgroups[bgLAddr] @@ -108,7 +112,7 @@ func fuzzyMatchBlockGroupSums(ctx context.Context, if apply { lvl = dlog.LogLevelInfo } - dlog.Logf(ctx, lvl, "... (%v/%v) blockgroup[laddr=%v] matches=[%s]; bestpossible=%v%% (based on %v runs)", + dlog.Logf(ctx, lvl, "(%v/%v) blockgroup[laddr=%v] matches=[%s]; bestpossible=%v%% (based on %v runs)", i+1, numBlockgroups, bgLAddr, matchesStr, int(100*bgRun.PctFull()), len(bgRun.Runs)) if !apply { continue @@ -125,12 +129,12 @@ func fuzzyMatchBlockGroupSums(ctx context.Context, }, } if err := fs.LV.AddMapping(mapping); err != nil { - dlog.Errorf(ctx, "... error: %v", err) + dlog.Errorf(ctx, "error: %v", err) continue } delete(blockgroups, bgLAddr) } - dlog.Info(ctx, "... ... done searching") + dlog.Info(ctx, "done searching") return nil } diff --git a/lib/btrfsprogs/btrfsinspect/rebuildmappings/matchsums.go b/lib/btrfsprogs/btrfsinspect/rebuildmappings/matchsums.go index 6a8333a..be82f87 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildmappings/matchsums.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildmappings/matchsums.go @@ -29,7 +29,7 @@ func matchBlockGroupSums(ctx context.Context, blockgroup := blockgroups[bgLAddr] bgRun := SumsForLogicalRegion(logicalSums, blockgroup.LAddr, blockgroup.Size) if len(bgRun.Runs) == 0 { - dlog.Errorf(ctx, "... (%v/%v) blockgroup[laddr=%v] can't be matched because it has 0 runs", + dlog.Errorf(ctx, "(%v/%v) blockgroup[laddr=%v] can't be matched because it has 0 runs", i+1, numBlockgroups, bgLAddr) continue } @@ -55,7 +55,7 @@ func matchBlockGroupSums(ctx context.Context, if len(matches) == 1 { lvl = dlog.LogLevelInfo } - dlog.Logf(ctx, lvl, "... (%v/%v) blockgroup[laddr=%v] has %v matches based on %v%% coverage from %v runs", + dlog.Logf(ctx, lvl, "(%v/%v) blockgroup[laddr=%v] has %v matches based on %v%% coverage from %v runs", i+1, numBlockgroups, bgLAddr, len(matches), int(100*bgRun.PctFull()), len(bgRun.Runs)) if len(matches) != 1 { continue @@ -72,7 +72,7 @@ func matchBlockGroupSums(ctx context.Context, }, } if err := fs.LV.AddMapping(mapping); err != nil { - dlog.Errorf(ctx, "... error: %v", err) + dlog.Errorf(ctx, "error: %v", err) continue } delete(blockgroups, bgLAddr) diff --git a/lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go b/lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go index 5d44b4e..9807764 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go @@ -53,35 +53,39 @@ func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect dlog.Infof(ctx, "plan: 5/6 search for block groups in checksum map (exact)") dlog.Infof(ctx, "plan: 6/6 search for block groups in checksum map (fuzzy)") - dlog.Infof(ctx, "1/6: Processing %d chunks...", numChunks) + _ctx := ctx + ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.step", "1/6") + dlog.Infof(_ctx, "1/6: Processing %d chunks...", numChunks) for _, devID := range devIDs { devResults := scanResults[devID] for _, chunk := range devResults.FoundChunks { for _, mapping := range chunk.Chunk.Mappings(chunk.Key) { if err := fs.LV.AddMapping(mapping); err != nil { - dlog.Errorf(ctx, "... error: adding chunk: %v", err) + dlog.Errorf(ctx, "error: adding chunk: %v", err) } } } } - dlog.Info(ctx, "... done processing chunks") + dlog.Info(_ctx, "... done processing chunks") - dlog.Infof(ctx, "2/6: Processing %d device extents...", numDevExts) + ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.step", "2/6") + dlog.Infof(_ctx, "2/6: Processing %d device extents...", numDevExts) for _, devID := range devIDs { devResults := scanResults[devID] for _, ext := range devResults.FoundDevExtents { if err := fs.LV.AddMapping(ext.DevExt.Mapping(ext.Key)); err != nil { - dlog.Errorf(ctx, "... error: adding devext: %v", err) + dlog.Errorf(ctx, "error: adding devext: %v", err) } } } - dlog.Info(ctx, "... done processing device extents") + dlog.Info(_ctx, "... done processing device extents") // Do the nodes "last" to avoid bloating the mappings table // too much. (Because nodes are numerous and small, while the // others are few and large; so it is likely that many of the // nodes will be subsumed by other things.) - dlog.Infof(ctx, "3/6: Processing %d nodes...", numNodes) + ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.step", "3/6") + dlog.Infof(_ctx, "3/6: Processing %d nodes...", numNodes) for _, devID := range devIDs { devResults := scanResults[devID] // Sort them so that progress numbers are predictable. @@ -96,16 +100,17 @@ func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect Size: nodeSize, SizeLocked: false, }); err != nil { - dlog.Errorf(ctx, "... error: adding node ident: %v", err) + dlog.Errorf(ctx, "error: adding node ident: %v", err) } } } } - dlog.Info(ctx, "... done processing nodes") + dlog.Info(_ctx, "... done processing nodes") // Use block groups to add missing flags (and as a hint to // combine node entries). - dlog.Infof(ctx, "4/6: Processing %d block groups...", numBlockGroups) + ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.step", "4/6") + dlog.Infof(_ctx, "4/6: Processing %d block groups...", numBlockGroups) // First dedup them, because they change for allocations and // CoW means that they'll bounce around a lot, so you likely // have oodles of duplicates? @@ -118,7 +123,7 @@ func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect bg := bgs[bgLAddr] otherLAddr, otherPAddr := fs.LV.ResolveAny(bg.LAddr, bg.Size) if otherLAddr < 0 || otherPAddr.Addr < 0 { - dlog.Errorf(ctx, "... error: could not pair blockgroup laddr=%v (size=%v flags=%v) with a mapping", + dlog.Errorf(ctx, "error: could not pair blockgroup laddr=%v (size=%v flags=%v) with a mapping", bg.LAddr, bg.Size, bg.Flags) continue } @@ -135,14 +140,15 @@ func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect }, } if err := fs.LV.AddMapping(mapping); err != nil { - dlog.Errorf(ctx, "... error: adding flags from blockgroup: %v", err) + dlog.Errorf(ctx, "error: adding flags from blockgroup: %v", err) continue } delete(bgs, bgLAddr) } - dlog.Info(ctx, "... done processing block groups") + dlog.Info(_ctx, "... done processing block groups") - dlog.Infof(ctx, "5/6: Searching for %d block groups in checksum map (exact)...", len(bgs)) + ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.step", "5/6") + dlog.Infof(_ctx, "5/6: Searching for %d block groups in checksum map (exact)...", len(bgs)) physicalSums := ExtractPhysicalSums(scanResults) logicalSums := ExtractLogicalSums(ctx, scanResults) if err := matchBlockGroupSums(ctx, fs, bgs, physicalSums, logicalSums); err != nil { @@ -150,13 +156,15 @@ func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect } dlog.Info(ctx, "... done searching for exact block groups") - dlog.Infof(ctx, "6/6: Searching for %d block groups in checksum map (fuzzy)...", len(bgs)) + ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.step", "6/6") + dlog.Infof(_ctx, "6/6: Searching for %d block groups in checksum map (fuzzy)...", len(bgs)) if err := fuzzyMatchBlockGroupSums(ctx, fs, bgs, physicalSums, logicalSums); err != nil { return err } - dlog.Info(ctx, "... done searching for fuzzy block groups") + dlog.Info(_ctx, "... done searching for fuzzy block groups") - dlog.Info(ctx, "report:") + ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.step", "report") + dlog.Info(_ctx, "report:") unmappedPhysicalRegions := ListUnmappedPhysicalRegions(fs) var unmappedPhysical btrfsvol.AddrDelta @@ -182,7 +190,7 @@ func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect } dlog.Infof(ctx, "... %d KiB of unmapped block groups (across %d groups)", int(unmappedBlockGroups/1024), len(bgs)) - dlog.Info(ctx, "detailed report:") + dlog.Info(_ctx, "detailed report:") for _, devID := range maps.SortedKeys(unmappedPhysicalRegions) { for _, region := range unmappedPhysicalRegions[devID] { dlog.Infof(ctx, "... unmapped physical region: dev=%v beg=%v end=%v (size=%v)", diff --git a/lib/btrfsprogs/btrfsinspect/scandevices.go b/lib/btrfsprogs/btrfsinspect/scandevices.go index b6c929b..628995a 100644 --- a/lib/btrfsprogs/btrfsinspect/scandevices.go +++ b/lib/btrfsprogs/btrfsinspect/scandevices.go @@ -34,7 +34,7 @@ func ScanDevices(ctx context.Context, fs *btrfs.FS) (ScanDevicesResult, error) { for id, dev := range fs.LV.PhysicalVolumes() { id := id dev := dev - grp.Go(dev.Name(), func(ctx context.Context) error { + grp.Go(fmt.Sprintf("dev-%d", id), func(ctx context.Context) error { sb, err := dev.Superblock() if err != nil { return err @@ -80,10 +80,7 @@ type SysExtentCSum struct { } type scanStats struct { - DevName string - DevSize btrfsvol.PhysicalAddr - - Pos btrfsvol.PhysicalAddr + textui.Portion[btrfsvol.PhysicalAddr] NumFoundNodes int NumFoundChunks int @@ -93,10 +90,8 @@ type scanStats struct { } func (s scanStats) String() string { - return fmt.Sprintf("... dev[%q] scanned %v%% (%d/%d) (found: %v nodes, %v chunks, %v block groups, %v dev extents, %v sum items)", - s.DevName, - int(100*float64(s.Pos)/float64(s.DevSize)), - s.Pos, s.DevSize, + return textui.Sprintf("scanned %v (found: %v nodes, %v chunks, %v block groups, %v dev extents, %v sum items)", + s.Portion, s.NumFoundNodes, s.NumFoundChunks, s.NumFoundBlockGroups, @@ -107,6 +102,8 @@ func (s scanStats) String() string { // ScanOneDevice mostly mimics btrfs-progs // cmds/rescue-chunk-recover.c:scan_one_device(). func ScanOneDevice(ctx context.Context, dev *btrfs.Device, sb btrfstree.Superblock) (ScanOneDeviceResult, error) { + ctx = dlog.WithField(ctx, "btrfsinspect.scandevices.dev", dev.Name()) + result := ScanOneDeviceResult{ FoundNodes: make(map[btrfsvol.LogicalAddr][]btrfsvol.PhysicalAddr), } @@ -132,9 +129,10 @@ func ScanOneDevice(ctx context.Context, dev *btrfs.Device, sb btrfstree.Superblo progressWriter := textui.NewProgress[scanStats](ctx, dlog.LogLevelInfo, 1*time.Second) progress := func(pos btrfsvol.PhysicalAddr) { progressWriter.Set(scanStats{ - DevName: dev.Name(), - DevSize: devSize, - Pos: pos, + Portion: textui.Portion[btrfsvol.PhysicalAddr]{ + N: pos, + D: devSize, + }, NumFoundNodes: len(result.FoundNodes), NumFoundChunks: len(result.FoundChunks), NumFoundBlockGroups: len(result.FoundBlockGroups), @@ -171,7 +169,7 @@ func ScanOneDevice(ctx context.Context, dev *btrfs.Device, sb btrfstree.Superblo nodeRef, err := btrfstree.ReadNode[btrfsvol.PhysicalAddr](dev, sb, pos, btrfstree.NodeExpectations{}) if err != nil { if !errors.Is(err, btrfstree.ErrNotANode) { - dlog.Errorf(ctx, "... dev[%q] error: %v", dev.Name(), err) + dlog.Errorf(ctx, "error: %v", err) } } else { result.FoundNodes[nodeRef.Data.Head.Addr] = append(result.FoundNodes[nodeRef.Data.Head.Addr], nodeRef.Addr) @@ -180,12 +178,12 @@ func ScanOneDevice(ctx context.Context, dev *btrfs.Device, sb btrfstree.Superblo case btrfsitem.CHUNK_ITEM_KEY: chunk, ok := item.Body.(btrfsitem.Chunk) if !ok { - dlog.Errorf(ctx, "... dev[%q] node@%v: item %v: error: type is CHUNK_ITEM_KEY, but struct is %T", - dev.Name(), nodeRef.Addr, i, item.Body) + dlog.Errorf(ctx, "node@%v: item %v: error: type is CHUNK_ITEM_KEY, but struct is %T", + nodeRef.Addr, i, item.Body) continue } - //dlog.Tracef(ctx, "... dev[%q] node@%v: item %v: found chunk", - // dev.Name(), nodeRef.Addr, i) + dlog.Tracef(ctx, "node@%v: item %v: found chunk", + nodeRef.Addr, i) result.FoundChunks = append(result.FoundChunks, btrfstree.SysChunk{ Key: item.Key, Chunk: chunk, @@ -193,12 +191,12 @@ func ScanOneDevice(ctx context.Context, dev *btrfs.Device, sb btrfstree.Superblo case btrfsitem.BLOCK_GROUP_ITEM_KEY: bg, ok := item.Body.(btrfsitem.BlockGroup) if !ok { - dlog.Errorf(ctx, "... dev[%q] node@%v: item %v: error: type is BLOCK_GROUP_ITEM_KEY, but struct is %T", - dev.Name(), nodeRef.Addr, i, item.Body) + dlog.Errorf(ctx, "node@%v: item %v: error: type is BLOCK_GROUP_ITEM_KEY, but struct is %T", + nodeRef.Addr, i, item.Body) continue } - //dlog.Tracef(ctx, "... dev[%q] node@%v: item %v: found block group", - // dev.Name(), nodeRef.Addr, i) + dlog.Tracef(ctx, "node@%v: item %v: found block group", + nodeRef.Addr, i) result.FoundBlockGroups = append(result.FoundBlockGroups, SysBlockGroup{ Key: item.Key, BG: bg, @@ -206,12 +204,12 @@ func ScanOneDevice(ctx context.Context, dev *btrfs.Device, sb btrfstree.Superblo case btrfsitem.DEV_EXTENT_KEY: devext, ok := item.Body.(btrfsitem.DevExtent) if !ok { - dlog.Errorf(ctx, "... dev[%q] node@%v: item %v: error: type is DEV_EXTENT_KEY, but struct is %T", - dev.Name(), nodeRef.Addr, i, item.Body) + dlog.Errorf(ctx, "node@%v: item %v: error: type is DEV_EXTENT_KEY, but struct is %T", + nodeRef.Addr, i, item.Body) continue } - //dlog.Tracef(ctx, "... dev[%q] node@%v: item %v: found dev extent", - // dev.Name(), nodeRef.Addr, i) + dlog.Tracef(ctx, "node@%v: item %v: found dev extent", + nodeRef.Addr, i) result.FoundDevExtents = append(result.FoundDevExtents, SysDevExtent{ Key: item.Key, DevExt: devext, @@ -219,12 +217,12 @@ func ScanOneDevice(ctx context.Context, dev *btrfs.Device, sb btrfstree.Superblo case btrfsitem.EXTENT_CSUM_KEY: sums, ok := item.Body.(btrfsitem.ExtentCSum) if !ok { - dlog.Errorf(ctx, "... dev[%q] node@%v: item %v: error: type is EXTENT_CSUM_OBJECTID, but struct is %T", - dev.Name(), nodeRef.Addr, i, item.Body) + dlog.Errorf(ctx, "node@%v: item %v: error: type is EXTENT_CSUM_OBJECTID, but struct is %T", + nodeRef.Addr, i, item.Body) continue } - //dlog.Tracef(ctx, "... dev[%q] node@%v: item %v: found csums", - // dev.Name(), nodeRef.Addr, i) + dlog.Tracef(ctx, "node@%v: item %v: found csums", + nodeRef.Addr, i) result.FoundExtentCSums = append(result.FoundExtentCSums, SysExtentCSum{ Generation: nodeRef.Data.Head.Generation, Sums: sums, diff --git a/lib/textui/log.go b/lib/textui/log.go index ca8c201..4421074 100644 --- a/lib/textui/log.go +++ b/lib/textui/log.go @@ -280,6 +280,14 @@ func fieldOrd(key string) int { return -4 case "dexec.err": return -3 + + case "btrfsinspect.scandevices.dev": + return -1 + case "btrfsinspect.rebuild-mappings.step": + return -2 + case "btrfsinspect.rebuild-mappings.substep": + return -1 + default: return 1 } @@ -290,6 +298,13 @@ func fieldName(key string) string { case "THREAD": return "thread" default: - return key + switch { + case strings.HasPrefix(key, "btrfsinspect.scandevices."): + return strings.TrimPrefix(key, "btrfsinspect.scandevices.") + case strings.HasPrefix(key, "btrfsinspect.rebuild-mappings."): + return strings.TrimPrefix(key, "btrfsinspect.rebuild-mappings.") + default: + return key + } } } -- cgit v1.2.3-2-g168b From 0bbcd2cadf8a6e1a277a68653ac2f7f95c63ba01 Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Mon, 26 Dec 2022 14:30:45 -0700 Subject: rebuildmappings: Add a comment about the importance of exact-search --- .../btrfsinspect/rebuildmappings/rebuildmappings.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'lib') diff --git a/lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go b/lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go index 9807764..7311aca 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go @@ -147,6 +147,20 @@ func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect } dlog.Info(_ctx, "... done processing block groups") + // More than once, I've been tempted to get rid of this exact-search step and just have the fuzzy-search step. + // After all, looking at the timestamps in the log, it's faster per blockgroup! For some background, the big-O + // for each (per blockgroup) looks like: + // + // - exact-search: O(bgSize+physicalBlocks) + // - fuzzy-search: O(bgSize*physicalBlocks) worst-case; O(bgSize*log(physicalBlocks)) expected + // + // The fuzzy-search is only fast because the exact-search is so good at getting `physicalBlocks` down. + // Empirically: if I remove the exact-search step, then the fuzzy-match step is more than an order of magnitude + // slower. + // + // The exact-search probably could be optimized to be substantially faster (by a constant factor; not affecting + // the big-O) by figuring out how to inline function calls and get reduce allocations, but IMO it's "fast + // enough" for now. ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.step", "5/6") dlog.Infof(_ctx, "5/6: Searching for %d block groups in checksum map (exact)...", len(bgs)) physicalSums := ExtractPhysicalSums(scanResults) -- cgit v1.2.3-2-g168b From d5737a0e71b17a97b82ae68e49acc41a08fcc0ad Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Mon, 26 Dec 2022 22:02:13 -0700 Subject: cmd/btrfs-rec: Add a utility function for reading JSON files with progress --- lib/textui/log.go | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'lib') diff --git a/lib/textui/log.go b/lib/textui/log.go index 4421074..e94a24f 100644 --- a/lib/textui/log.go +++ b/lib/textui/log.go @@ -288,6 +288,8 @@ func fieldOrd(key string) int { case "btrfsinspect.rebuild-mappings.substep": return -1 + case "btrfs.read-json-file": + return -1 default: return 1 } @@ -303,6 +305,8 @@ func fieldName(key string) string { return strings.TrimPrefix(key, "btrfsinspect.scandevices.") case strings.HasPrefix(key, "btrfsinspect.rebuild-mappings."): return strings.TrimPrefix(key, "btrfsinspect.rebuild-mappings.") + case strings.HasPrefix(key, "btrfs."): + return strings.TrimPrefix(key, "btrfs.") default: return key } -- cgit v1.2.3-2-g168b From a54e21d9538abece01ecc2e5327eed01fa56a949 Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Mon, 26 Dec 2022 16:32:15 -0700 Subject: rebuildnodes: Fuss with the main loop to make the logs more ordered --- .../btrfsinspect/rebuildnodes/rebuild.go | 85 ++++++++++++---------- 1 file changed, 46 insertions(+), 39 deletions(-) (limited to 'lib') diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go index 0a3f2bb..4df3a9a 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go @@ -45,9 +45,10 @@ type rebuilder struct { graph graph.Graph keyIO *keyio.Handle - curKey keyAndTree - queue []keyAndTree - pendingAugments map[btrfsprim.ObjID][]map[btrfsvol.LogicalAddr]int + curKey keyAndTree + treeQueue []btrfsprim.ObjID + itemQueue []keyAndTree + augmentQueue map[btrfsprim.ObjID][]map[btrfsvol.LogicalAddr]int } func RebuildNodes(ctx context.Context, fs *btrfs.FS, nodeScanResults btrfsinspect.ScanDevicesResult) (map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr], error) { @@ -100,28 +101,42 @@ func (s rebuildStats) String() string { } func (o *rebuilder) rebuild(ctx context.Context) error { - passNum := 0 - dlog.Infof(ctx, "... pass %d: scanning for implied items", passNum) + // Initialize + o.augmentQueue = make(map[btrfsprim.ObjID][]map[btrfsvol.LogicalAddr]int) + // Seed the queue - o.pendingAugments = make(map[btrfsprim.ObjID][]map[btrfsvol.LogicalAddr]int) - o.rebuilt.AddTree(ctx, btrfsprim.ROOT_TREE_OBJECTID) - o.rebuilt.AddTree(ctx, btrfsprim.CHUNK_TREE_OBJECTID) - //o.rebuilt.AddTree(ctx, btrfsprim.TREE_LOG_OBJECTID) // TODO(lukeshu): Special LOG_TREE handling - o.rebuilt.AddTree(ctx, btrfsprim.BLOCK_GROUP_TREE_OBJECTID) - for { - // Handle items in the queue - queue := o.queue - o.queue = nil + o.treeQueue = []btrfsprim.ObjID{ + btrfsprim.ROOT_TREE_OBJECTID, + btrfsprim.CHUNK_TREE_OBJECTID, + // btrfsprim.TREE_LOG_OBJECTID, // TODO(lukeshu): Special LOG_TREE handling + btrfsprim.BLOCK_GROUP_TREE_OBJECTID, + } + + for passNum := 0; len(o.treeQueue) > 0 || len(o.itemQueue) > 0 || len(o.augmentQueue) > 0; passNum++ { + // Add items to the queue (drain o.treeQueue, fill o.itemQueue) + dlog.Infof(ctx, "... pass %d: scanning for implied items", passNum) + treeQueue := o.treeQueue + o.treeQueue = nil + sort.Slice(treeQueue, func(i, j int) bool { + return treeQueue[i] < treeQueue[j] + }) + for _, treeID := range treeQueue { + o.rebuilt.AddTree(ctx, treeID) + } + + // Handle items in the queue (drain o.itemQueue, fill o.augmentQueue and o.treeQueue) + itemQueue := o.itemQueue + o.itemQueue = nil progressWriter := textui.NewProgress[rebuildStats](ctx, dlog.LogLevelInfo, 1*time.Second) queueProgress := func(done int) { progressWriter.Set(rebuildStats{ PassNum: passNum, Task: "processing item queue", N: done, - D: len(queue), + D: len(itemQueue), }) } - for i, key := range queue { + for i, key := range itemQueue { queueProgress(i) o.curKey = key itemBody, ok := o.rebuilt.Load(ctx, key.TreeID, key.Key) @@ -133,26 +148,21 @@ func (o *rebuilder) rebuild(ctx context.Context) error { Body: itemBody, }) if key.ItemType == btrfsitem.ROOT_ITEM_KEY { - o.rebuilt.AddTree(ctx, key.ObjectID) + o.treeQueue = append(o.treeQueue, key.ObjectID) } } - queueProgress(len(queue)) + queueProgress(len(itemQueue)) progressWriter.Done() - // Check if we can bail - if len(o.queue) == 0 && len(o.pendingAugments) == 0 { - break - } - - // Apply augments that were requested while handling items from the queue - resolvedAugments := make(map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr], len(o.pendingAugments)) + // Apply augments (drain o.augmentQueue, fill o.itemQueue) + resolvedAugments := make(map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr], len(o.augmentQueue)) numAugments := 0 - for _, treeID := range maps.SortedKeys(o.pendingAugments) { + for _, treeID := range maps.SortedKeys(o.augmentQueue) { dlog.Infof(ctx, "... ... augments for tree %v:", treeID) - resolvedAugments[treeID] = o.resolveTreeAugments(ctx, o.pendingAugments[treeID]) + resolvedAugments[treeID] = o.resolveTreeAugments(ctx, o.augmentQueue[treeID]) numAugments += len(resolvedAugments[treeID]) } - o.pendingAugments = make(map[btrfsprim.ObjID][]map[btrfsvol.LogicalAddr]int) + o.augmentQueue = make(map[btrfsprim.ObjID][]map[btrfsvol.LogicalAddr]int) progressWriter = textui.NewProgress[rebuildStats](ctx, dlog.LogLevelInfo, 1*time.Second) numAugmented := 0 augmentProgress := func() { @@ -172,15 +182,12 @@ func (o *rebuilder) rebuild(ctx context.Context) error { } augmentProgress() progressWriter.Done() - - passNum++ - dlog.Infof(ctx, "... pass %d: scanning for implied items", passNum) } return nil } func (o *rebuilder) cbAddedItem(ctx context.Context, tree btrfsprim.ObjID, key btrfsprim.Key) { - o.queue = append(o.queue, keyAndTree{ + o.itemQueue = append(o.itemQueue, keyAndTree{ TreeID: tree, Key: key, }) @@ -189,7 +196,7 @@ func (o *rebuilder) cbAddedItem(ctx context.Context, tree btrfsprim.ObjID, key b func (o *rebuilder) cbLookupRoot(ctx context.Context, tree btrfsprim.ObjID) (offset btrfsprim.Generation, item btrfsitem.Root, ok bool) { key, ok := o._want(ctx, btrfsprim.ROOT_TREE_OBJECTID, tree, btrfsitem.ROOT_ITEM_KEY) if !ok { - o.queue = append(o.queue, o.curKey) + o.itemQueue = append(o.itemQueue, o.curKey) return 0, btrfsitem.Root{}, false } itemBody, ok := o.rebuilt.Load(ctx, btrfsprim.ROOT_TREE_OBJECTID, key) @@ -212,7 +219,7 @@ func (o *rebuilder) cbLookupRoot(ctx context.Context, tree btrfsprim.ObjID) (off func (o *rebuilder) cbLookupUUID(ctx context.Context, uuid btrfsprim.UUID) (id btrfsprim.ObjID, ok bool) { key := btrfsitem.UUIDToKey(uuid) if ok := o._wantOff(ctx, btrfsprim.UUID_TREE_OBJECTID, key.ObjectID, key.ItemType, key.Offset); !ok { - o.queue = append(o.queue, o.curKey) + o.itemQueue = append(o.itemQueue, o.curKey) return 0, false } itemBody, ok := o.rebuilt.Load(ctx, btrfsprim.UUID_TREE_OBJECTID, key) @@ -367,7 +374,7 @@ func (o *rebuilder) wantAugment(ctx context.Context, treeID btrfsprim.ObjID, cho choicesWithDist[choice] = dist } dlog.Infof(ctx, "augment(tree=%v): %v", treeID, maps.SortedKeys(choicesWithDist)) - o.pendingAugments[treeID] = append(o.pendingAugments[treeID], choicesWithDist) + o.augmentQueue[treeID] = append(o.augmentQueue[treeID], choicesWithDist) } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// @@ -383,7 +390,7 @@ func (o *rebuilder) want(ctx context.Context, treeID btrfsprim.ObjID, objID btrf } func (o *rebuilder) _want(ctx context.Context, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType) (key btrfsprim.Key, ok bool) { if !o.rebuilt.AddTree(ctx, treeID) { - o.queue = append(o.queue, o.curKey) + o.itemQueue = append(o.itemQueue, o.curKey) return btrfsprim.Key{}, false } @@ -420,7 +427,7 @@ func (o *rebuilder) wantOff(ctx context.Context, treeID btrfsprim.ObjID, objID b } func (o *rebuilder) _wantOff(ctx context.Context, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, off uint64) (ok bool) { if !o.rebuilt.AddTree(ctx, treeID) { - o.queue = append(o.queue, o.curKey) + o.itemQueue = append(o.itemQueue, o.curKey) return false } @@ -452,7 +459,7 @@ func (o *rebuilder) _wantOff(ctx context.Context, treeID btrfsprim.ObjID, objID // wantFunc implements rebuildCallbacks. func (o *rebuilder) wantFunc(ctx context.Context, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, fn func(btrfsitem.Item) bool) { if !o.rebuilt.AddTree(ctx, treeID) { - o.queue = append(o.queue, o.curKey) + o.itemQueue = append(o.itemQueue, o.curKey) return } @@ -501,7 +508,7 @@ func (o *rebuilder) _wantRange( beg, end uint64, ) { if !o.rebuilt.AddTree(ctx, treeID) { - o.queue = append(o.queue, o.curKey) + o.itemQueue = append(o.itemQueue, o.curKey) return } -- cgit v1.2.3-2-g168b From 6a314a2de6c2b3299b2220e90be9338d1365c076 Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Tue, 27 Dec 2022 01:00:30 -0700 Subject: Improve comments and related messages --- lib/btrfs/btrfsitem/item_dir.go | 2 +- lib/btrfs/btrfssum/csum.go | 6 +++--- lib/btrfsprogs/btrfsinspect/print_tree.go | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) (limited to 'lib') diff --git a/lib/btrfs/btrfsitem/item_dir.go b/lib/btrfs/btrfsitem/item_dir.go index 836c477..b3e33f4 100644 --- a/lib/btrfs/btrfsitem/item_dir.go +++ b/lib/btrfs/btrfsitem/item_dir.go @@ -30,7 +30,7 @@ type DirEntry struct { // DIR_ITEM=84 DIR_INDEX=96 XATTR_ITEM=24 NameLen uint16 `bin:"off=0x1b, siz=2"` // [ignored-when-writing] Type FileType `bin:"off=0x1d, siz=1"` binstruct.End `bin:"off=0x1e"` - Data []byte `bin:"-"` + Data []byte `bin:"-"` // xattr value (only for XATTR_ITEM) Name []byte `bin:"-"` } diff --git a/lib/btrfs/btrfssum/csum.go b/lib/btrfs/btrfssum/csum.go index c7c1f37..770f6ea 100644 --- a/lib/btrfs/btrfssum/csum.go +++ b/lib/btrfs/btrfssum/csum.go @@ -91,11 +91,11 @@ func (typ CSumType) Sum(data []byte) (CSum, error) { binary.LittleEndian.PutUint32(ret[:], crc) return ret, nil case TYPE_XXHASH: - panic("not implemented") + panic("TODO: XXHASH not yet implemented") case TYPE_SHA256: - panic("not implemented") + panic("TODO: SHA256 not yet implemented") case TYPE_BLAKE2: - panic("not implemented") + panic("TODO: BLAKE2 not yet implemented") default: return CSum{}, fmt.Errorf("unknown checksum type: %v", typ) } diff --git a/lib/btrfsprogs/btrfsinspect/print_tree.go b/lib/btrfsprogs/btrfsinspect/print_tree.go index 2ede80f..6c31350 100644 --- a/lib/btrfsprogs/btrfsinspect/print_tree.go +++ b/lib/btrfsprogs/btrfsinspect/print_tree.go @@ -427,7 +427,7 @@ func fmtKey(key btrfsprim.Key) string { textui.Fprintf(&out, "key (%v %v", key.ObjectID.Format(key.ItemType), key.ItemType) switch key.ItemType { case btrfsitem.QGROUP_RELATION_KEY: //TODO, btrfsitem.QGROUP_INFO_KEY, btrfsitem.QGROUP_LIMIT_KEY: - panic("not implemented") + panic("TODO: printing qgroup items not yet implemented") case btrfsitem.UUID_SUBVOL_KEY, btrfsitem.UUID_RECEIVED_SUBVOL_KEY: textui.Fprintf(&out, " %#08x)", key.Offset) case btrfsitem.ROOT_ITEM_KEY: -- cgit v1.2.3-2-g168b From 6e8e2960c5412685c1ac87c20b4d34d2caf90640 Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Mon, 26 Dec 2022 16:32:15 -0700 Subject: rebuildnodes: Fuss with logging to take advantage of textui --- .../rebuildnodes/btrees/rebuilt_btrees.go | 78 ++++------ .../btrfsinspect/rebuildnodes/graph/graph.go | 23 ++- .../btrfsinspect/rebuildnodes/keyio/keyio.go | 11 +- .../btrfsinspect/rebuildnodes/rebuild.go | 169 +++++++++++---------- .../btrfsinspect/rebuildnodes/rebuild_graph.go | 95 ++++++------ lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go | 30 ++-- lib/textui/log.go | 124 ++++++++++++--- 7 files changed, 291 insertions(+), 239 deletions(-) (limited to 'lib') diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/rebuilt_btrees.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/rebuilt_btrees.go index 4f0f5d4..33eb352 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/rebuilt_btrees.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/rebuilt_btrees.go @@ -168,21 +168,14 @@ func NewRebuiltTrees( } type rootStats struct { - TreeID btrfsprim.ObjID - RootNode btrfsvol.LogicalAddr - - DoneLeafs int - TotalLeafs int + Leafs textui.Portion[int] AddedItems int ReplacedItems int } func (s rootStats) String() string { - return textui.Sprintf("tree %v: adding root node@%v: %v%% (%v/%v) (added %v items, replaced %v items)", - s.TreeID, s.RootNode, - int(100*float64(s.DoneLeafs)/float64(s.TotalLeafs)), - s.DoneLeafs, s.TotalLeafs, - s.AddedItems, s.ReplacedItems) + return textui.Sprintf("%v (added %v items, replaced %v items)", + s.Leafs, s.AddedItems, s.ReplacedItems) } // AddRoot adds an additional root node to an existing tree. It is @@ -192,24 +185,16 @@ func (s rootStats) String() string { // It is invalid (panic) to call AddRoot for a tree without having // called AddTree first. func (ts *RebuiltTrees) AddRoot(ctx context.Context, treeID btrfsprim.ObjID, rootNode btrfsvol.LogicalAddr) { + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.add-root", fmt.Sprintf("tree=%v rootNode=%v", treeID, rootNode)) tree := ts.trees[treeID] tree.Roots.Insert(rootNode) + var stats rootStats + stats.Leafs.D = len(tree.leafToRoots) progressWriter := textui.NewProgress[rootStats](ctx, dlog.LogLevelInfo, 1*time.Second) - numAdded := 0 - numReplaced := 0 - progress := func(done int) { - progressWriter.Set(rootStats{ - TreeID: treeID, - RootNode: rootNode, - DoneLeafs: done, - TotalLeafs: len(tree.leafToRoots), - AddedItems: numAdded, - ReplacedItems: numReplaced, - }) - } for i, leaf := range maps.SortedKeys(tree.leafToRoots) { - progress(i) + stats.Leafs.N = i + progressWriter.Set(stats) if tree.Leafs.Has(leaf) || !tree.leafToRoots[leaf].Has(rootNode) { continue } @@ -221,16 +206,17 @@ func (ts *RebuiltTrees) AddRoot(ctx context.Context, treeID btrfsprim.ObjID, roo } if oldPtr, exists := tree.Items.Load(itemKey); !exists { tree.Items.Store(itemKey, newPtr) - numAdded++ + stats.AddedItems++ } else if tree.shouldReplace(ts.graph, oldPtr.Node, newPtr.Node) { tree.Items.Store(itemKey, newPtr) - numReplaced++ + stats.ReplacedItems++ } ts.cbAddedItem(ctx, treeID, itemKey) - progress(i) + progressWriter.Set(stats) } } - progress(len(tree.leafToRoots)) + stats.Leafs.N = len(tree.leafToRoots) + progressWriter.Set(stats) progressWriter.Done() } @@ -250,6 +236,9 @@ func (ts *RebuiltTrees) addTree(ctx context.Context, treeID btrfsprim.ObjID, sta if slices.Contains(treeID, stack) { return false } + stack = append(stack, treeID) + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.add-tree", stack) + dlog.Info(ctx, "adding tree...") tree := &rebuiltTree{ ID: treeID, @@ -267,7 +256,6 @@ func (ts *RebuiltTrees) addTree(ctx context.Context, treeID btrfsprim.ObjID, sta case btrfsprim.BLOCK_GROUP_TREE_OBJECTID: root = ts.sb.BlockGroupRoot default: - stack := append(stack, treeID) if !ts.addTree(ctx, btrfsprim.ROOT_TREE_OBJECTID, stack) { return false } @@ -279,7 +267,7 @@ func (ts *RebuiltTrees) addTree(ctx context.Context, treeID btrfsprim.ObjID, sta tree.UUID = rootItem.UUID if rootItem.ParentUUID != (btrfsprim.UUID{}) { tree.ParentGen = rootOff - if !ts.addTree(ctx, btrfsprim.ROOT_TREE_OBJECTID, stack) { + if !ts.addTree(ctx, btrfsprim.UUID_TREE_OBJECTID, stack) { return false } parentID, ok := ts.cbLookupUUID(ctx, rootItem.ParentUUID) @@ -299,32 +287,22 @@ func (ts *RebuiltTrees) addTree(ctx context.Context, treeID btrfsprim.ObjID, sta ts.AddRoot(ctx, treeID, root) } - return true -} - -type indexStats struct { - TreeID btrfsprim.ObjID - DoneNodes int - TotalNodes int -} - -func (s indexStats) String() string { - return textui.Sprintf("tree %v: indexing leaf nodes: %v%% (%v/%v)", - s.TreeID, - int(100*float64(s.DoneNodes)/float64(s.TotalNodes)), - s.DoneNodes, s.TotalNodes) + return } func (tree *rebuiltTree) indexLeafs(ctx context.Context, graph pkggraph.Graph) { + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.add-tree.substep", "index-nodes") + nodeToRoots := make(map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr]) - progressWriter := textui.NewProgress[indexStats](ctx, dlog.LogLevelInfo, 1*time.Second) + + var stats textui.Portion[int] + stats.D = len(graph.Nodes) + progressWriter := textui.NewProgress[textui.Portion[int]](ctx, dlog.LogLevelInfo, 1*time.Second) progress := func() { - progressWriter.Set(indexStats{ - TreeID: tree.ID, - DoneNodes: len(nodeToRoots), - TotalNodes: len(graph.Nodes), - }) + stats.N = len(nodeToRoots) + progressWriter.Set(stats) } + progress() for _, node := range maps.SortedKeys(graph.Nodes) { tree.indexNode(graph, node, nodeToRoots, progress, nil) @@ -397,7 +375,7 @@ func (ts *RebuiltTrees) Load(ctx context.Context, treeID btrfsprim.ObjID, key bt if !ok { return nil, false } - return ts.keyIO.ReadItem(ptr) + return ts.keyIO.ReadItem(ctx, ptr) } // Search searches for an item from a tree. diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/graph.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/graph.go index d3dd19a..c4ed675 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/graph.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/graph.go @@ -191,19 +191,13 @@ func (g Graph) InsertNode(nodeRef *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.No } } -type progStats struct { - textui.Portion[int] -} - -func (s progStats) String() string { - return "... " + s.Portion.String() -} - func (g Graph) FinalCheck(ctx context.Context, fs diskio.File[btrfsvol.LogicalAddr], sb btrfstree.Superblock) error { - var stats progStats + var stats textui.Portion[int] + _ctx := ctx - dlog.Info(ctx, "Checking keypointers for dead-ends...") - progressWriter := textui.NewProgress[progStats](ctx, dlog.LogLevelInfo, 1*time.Second) + ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-nodes.read.substep", "check-keypointers") + dlog.Info(_ctx, "Checking keypointers for dead-ends...") + progressWriter := textui.NewProgress[textui.Portion[int]](ctx, dlog.LogLevelInfo, 1*time.Second) stats.D = len(g.EdgesTo) progressWriter.Set(stats) for laddr := range g.EdgesTo { @@ -223,10 +217,11 @@ func (g Graph) FinalCheck(ctx context.Context, fs diskio.File[btrfsvol.LogicalAd progressWriter.Done() dlog.Info(ctx, "... done checking keypointers") - dlog.Info(ctx, "Checking for btree loops...") + ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-nodes.read.substep", "check-for-loops") + dlog.Info(_ctx, "Checking for btree loops...") stats.D = len(g.Nodes) stats.N = 0 - progressWriter = textui.NewProgress[progStats](ctx, dlog.LogLevelInfo, 1*time.Second) + progressWriter = textui.NewProgress[textui.Portion[int]](ctx, dlog.LogLevelInfo, 1*time.Second) progressWriter.Set(stats) visited := make(containers.Set[btrfsvol.LogicalAddr], len(g.Nodes)) numLoops := 0 @@ -260,7 +255,7 @@ func (g Graph) FinalCheck(ctx context.Context, fs diskio.File[btrfsvol.LogicalAd if numLoops > 0 { return fmt.Errorf("%d btree loops", numLoops) } - dlog.Info(ctx, "... done checking for loops") + dlog.Info(_ctx, "... done checking for loops") return nil } diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go index d92495e..b1e68f9 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go @@ -5,8 +5,11 @@ package keyio import ( + "context" "fmt" + "github.com/datawire/dlib/dlog" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree" @@ -85,8 +88,9 @@ func (o *Handle) SetGraph(graph graph.Graph) { o.graph = graph } -func (o *Handle) readNode(laddr btrfsvol.LogicalAddr) *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node] { +func (o *Handle) readNode(ctx context.Context, laddr btrfsvol.LogicalAddr) *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node] { if cached, ok := o.cache.Get(laddr); ok { + dlog.Tracef(ctx, "cache-hit node@%v", laddr) return cached } @@ -95,6 +99,7 @@ func (o *Handle) readNode(laddr btrfsvol.LogicalAddr) *diskio.Ref[btrfsvol.Logic panic(fmt.Errorf("should not happen: node@%v is not mentioned in the in-memory graph", laddr)) } + dlog.Infof(ctx, "cache-miss node@%v, reading...", laddr) ref, err := btrfstree.ReadNode(o.rawFile, o.sb, laddr, btrfstree.NodeExpectations{ LAddr: containers.Optional[btrfsvol.LogicalAddr]{OK: true, Val: laddr}, Level: containers.Optional[uint8]{OK: true, Val: graphInfo.Level}, @@ -118,11 +123,11 @@ func (o *Handle) readNode(laddr btrfsvol.LogicalAddr) *diskio.Ref[btrfsvol.Logic return ref } -func (o *Handle) ReadItem(ptr ItemPtr) (item btrfsitem.Item, ok bool) { +func (o *Handle) ReadItem(ctx context.Context, ptr ItemPtr) (item btrfsitem.Item, ok bool) { if o.graph.Nodes[ptr.Node].Level != 0 || ptr.Idx < 0 { return nil, false } - items := o.readNode(ptr.Node).Data.BodyLeaf + items := o.readNode(ctx, ptr.Node).Data.BodyLeaf if ptr.Idx >= len(items) { return nil, false } diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go index 4df3a9a..a7fe5c7 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go @@ -38,6 +38,10 @@ func (a keyAndTree) Cmp(b keyAndTree) int { return containers.NativeCmp(a.TreeID, b.TreeID) } +func (o keyAndTree) String() string { + return fmt.Sprintf("tree=%v key=%v", o.TreeID, o.Key) +} + type rebuilder struct { sb btrfstree.Superblock rebuilt *btrees.RebuiltTrees @@ -52,17 +56,20 @@ type rebuilder struct { } func RebuildNodes(ctx context.Context, fs *btrfs.FS, nodeScanResults btrfsinspect.ScanDevicesResult) (map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr], error) { + _ctx := ctx + + ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-nodes.step", "read-fs-data") dlog.Info(ctx, "Reading superblock...") sb, err := fs.Superblock() if err != nil { return nil, err } - nodeGraph, keyIO, err := ScanDevices(ctx, fs, nodeScanResults) // ScanDevices does its own logging if err != nil { return nil, err } + ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-nodes.step", "rebuild") dlog.Info(ctx, "Rebuilding node tree...") o := &rebuilder{ sb: *sb, @@ -85,22 +92,7 @@ func (o *rebuilder) ioErr(ctx context.Context, err error) { panic(err) } -type rebuildStats struct { - PassNum int - Task string - N, D int -} - -func (s rebuildStats) String() string { - pct := 100 - if s.D > 0 { - pct = int(100 * float64(s.N) / float64(s.D)) - } - return fmt.Sprintf("... pass %d: %s %v%% (%v/%v)", - s.PassNum, s.Task, pct, s.N, s.D) -} - -func (o *rebuilder) rebuild(ctx context.Context) error { +func (o *rebuilder) rebuild(_ctx context.Context) error { // Initialize o.augmentQueue = make(map[btrfsprim.ObjID][]map[btrfsvol.LogicalAddr]int) @@ -113,37 +105,39 @@ func (o *rebuilder) rebuild(ctx context.Context) error { } for passNum := 0; len(o.treeQueue) > 0 || len(o.itemQueue) > 0 || len(o.augmentQueue) > 0; passNum++ { + passCtx := dlog.WithField(_ctx, "btrfsinspect.rebuild-nodes.rebuild.pass", passNum) + // Add items to the queue (drain o.treeQueue, fill o.itemQueue) - dlog.Infof(ctx, "... pass %d: scanning for implied items", passNum) + stepCtx := dlog.WithField(passCtx, "btrfsinspect.rebuild-nodes.rebuild.substep", "collect-items") treeQueue := o.treeQueue o.treeQueue = nil sort.Slice(treeQueue, func(i, j int) bool { return treeQueue[i] < treeQueue[j] }) + // Because trees can be wildly different sizes, it's impossible to have a meaningful + // progress percentage here. for _, treeID := range treeQueue { - o.rebuilt.AddTree(ctx, treeID) + o.rebuilt.AddTree(stepCtx, treeID) } // Handle items in the queue (drain o.itemQueue, fill o.augmentQueue and o.treeQueue) + stepCtx = dlog.WithField(passCtx, "btrfsinspect.rebuild-nodes.rebuild.substep", "process-items") itemQueue := o.itemQueue o.itemQueue = nil - progressWriter := textui.NewProgress[rebuildStats](ctx, dlog.LogLevelInfo, 1*time.Second) - queueProgress := func(done int) { - progressWriter.Set(rebuildStats{ - PassNum: passNum, - Task: "processing item queue", - N: done, - D: len(itemQueue), - }) - } + var progress textui.Portion[int] + progress.D = len(itemQueue) + progressWriter := textui.NewProgress[textui.Portion[int]](stepCtx, dlog.LogLevelInfo, 1*time.Second) + stepCtx = dlog.WithField(stepCtx, "btrfsinspect.rebuild-nodes.rebuild.substep.progress", &progress) for i, key := range itemQueue { - queueProgress(i) + itemCtx := dlog.WithField(stepCtx, "btrfsinspect.rebuild-nodes.rebuild.process.item", key) + progress.N = i + progressWriter.Set(progress) o.curKey = key - itemBody, ok := o.rebuilt.Load(ctx, key.TreeID, key.Key) + itemBody, ok := o.rebuilt.Load(itemCtx, key.TreeID, key.Key) if !ok { - o.ioErr(ctx, fmt.Errorf("could not read previously read item: %v", key)) + o.ioErr(itemCtx, fmt.Errorf("could not read previously read item: %v", key)) } - handleItem(o, ctx, key.TreeID, btrfstree.Item{ + handleItem(o, itemCtx, key.TreeID, btrfstree.Item{ Key: key.Key, Body: itemBody, }) @@ -151,36 +145,32 @@ func (o *rebuilder) rebuild(ctx context.Context) error { o.treeQueue = append(o.treeQueue, key.ObjectID) } } - queueProgress(len(itemQueue)) + progress.N = len(itemQueue) + progressWriter.Set(progress) progressWriter.Done() // Apply augments (drain o.augmentQueue, fill o.itemQueue) + stepCtx = dlog.WithField(passCtx, "btrfsinspect.rebuild-nodes.rebuild.substep", "apply-augments") resolvedAugments := make(map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr], len(o.augmentQueue)) - numAugments := 0 + progress.N = 0 + progress.D = 0 for _, treeID := range maps.SortedKeys(o.augmentQueue) { - dlog.Infof(ctx, "... ... augments for tree %v:", treeID) - resolvedAugments[treeID] = o.resolveTreeAugments(ctx, o.augmentQueue[treeID]) - numAugments += len(resolvedAugments[treeID]) + treeCtx := dlog.WithField(stepCtx, "btrfsinspect.rebuild-nodes.rebuild.augment.tree", treeID) + resolvedAugments[treeID] = o.resolveTreeAugments(treeCtx, o.augmentQueue[treeID]) + progress.D += len(resolvedAugments[treeID]) } o.augmentQueue = make(map[btrfsprim.ObjID][]map[btrfsvol.LogicalAddr]int) - progressWriter = textui.NewProgress[rebuildStats](ctx, dlog.LogLevelInfo, 1*time.Second) - numAugmented := 0 - augmentProgress := func() { - progressWriter.Set(rebuildStats{ - PassNum: passNum, - Task: "applying augments", - N: numAugmented, - D: numAugments, - }) - } + progressWriter = textui.NewProgress[textui.Portion[int]](stepCtx, dlog.LogLevelInfo, 1*time.Second) + stepCtx = dlog.WithField(stepCtx, "btrfsinspect.rebuild-nodes.rebuild.substep.progress", &progress) for _, treeID := range maps.SortedKeys(resolvedAugments) { + treeCtx := dlog.WithField(stepCtx, "btrfsinspect.rebuild-nodes.rebuild.augment.tree", treeID) for _, nodeAddr := range maps.SortedKeys(resolvedAugments[treeID]) { - augmentProgress() - o.rebuilt.AddRoot(ctx, treeID, nodeAddr) - numAugmented++ + progressWriter.Set(progress) + o.rebuilt.AddRoot(treeCtx, treeID, nodeAddr) + progress.N++ } } - augmentProgress() + progressWriter.Set(progress) progressWriter.Done() } return nil @@ -194,6 +184,9 @@ func (o *rebuilder) cbAddedItem(ctx context.Context, tree btrfsprim.ObjID, key b } func (o *rebuilder) cbLookupRoot(ctx context.Context, tree btrfsprim.ObjID) (offset btrfsprim.Generation, item btrfsitem.Root, ok bool) { + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.add-tree.want.reason", "tree Root") + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.add-tree.want.key", + fmt.Sprintf("tree=%v key={%v %v ?}", btrfsprim.ROOT_TREE_OBJECTID, tree, btrfsitem.ROOT_ITEM_KEY)) key, ok := o._want(ctx, btrfsprim.ROOT_TREE_OBJECTID, tree, btrfsitem.ROOT_ITEM_KEY) if !ok { o.itemQueue = append(o.itemQueue, o.curKey) @@ -218,7 +211,9 @@ func (o *rebuilder) cbLookupRoot(ctx context.Context, tree btrfsprim.ObjID) (off func (o *rebuilder) cbLookupUUID(ctx context.Context, uuid btrfsprim.UUID) (id btrfsprim.ObjID, ok bool) { key := btrfsitem.UUIDToKey(uuid) - if ok := o._wantOff(ctx, btrfsprim.UUID_TREE_OBJECTID, key.ObjectID, key.ItemType, key.Offset); !ok { + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.add-tree.want.reason", "resolve parent UUID") + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.add-tree.want.key", keyAndTree{TreeID: btrfsprim.UUID_TREE_OBJECTID, Key: key}) + if ok := o._wantOff(ctx, btrfsprim.UUID_TREE_OBJECTID, key); !ok { o.itemQueue = append(o.itemQueue, o.curKey) return 0, false } @@ -349,9 +344,9 @@ func (o *rebuilder) resolveTreeAugments(ctx context.Context, listsWithDistances for i, list := range lists { chose := list.Intersection(ret) if len(chose) == 0 { - dlog.Infof(ctx, "... ... ... lists[%d]: chose (none) from %v", i, maps.SortedKeys(list)) + dlog.Infof(ctx, "lists[%d]: chose (none) from %v", i, maps.SortedKeys(list)) } else { - dlog.Infof(ctx, "... ... ... lists[%d]: chose %v from %v", i, chose.TakeOne(), maps.SortedKeys(list)) + dlog.Infof(ctx, "lists[%d]: chose %v from %v", i, chose.TakeOne(), maps.SortedKeys(list)) } } @@ -362,7 +357,7 @@ func (o *rebuilder) resolveTreeAugments(ctx context.Context, listsWithDistances func (o *rebuilder) wantAugment(ctx context.Context, treeID btrfsprim.ObjID, choices containers.Set[btrfsvol.LogicalAddr]) { if len(choices) == 0 { - dlog.Errorf(ctx, "augment(tree=%v): could not find wanted item", treeID) + dlog.Error(ctx, "could not find wanted item") return } choicesWithDist := make(map[btrfsvol.LogicalAddr]int, len(choices)) @@ -373,7 +368,7 @@ func (o *rebuilder) wantAugment(ctx context.Context, treeID btrfsprim.ObjID, cho } choicesWithDist[choice] = dist } - dlog.Infof(ctx, "augment(tree=%v): %v", treeID, maps.SortedKeys(choicesWithDist)) + dlog.Infof(ctx, "choices=%v", maps.SortedKeys(choicesWithDist)) o.augmentQueue[treeID] = append(o.augmentQueue[treeID], choicesWithDist) } @@ -385,7 +380,10 @@ func (o *rebuilder) fsErr(ctx context.Context, e error) { } // want implements rebuildCallbacks. -func (o *rebuilder) want(ctx context.Context, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType) { +func (o *rebuilder) want(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType) { + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.reason", reason) + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key", + fmt.Sprintf("tree=%v key={%v %v ?}", treeID, objID, typ)) o._want(ctx, treeID, objID, typ) } func (o *rebuilder) _want(ctx context.Context, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType) (key btrfsprim.Key, ok bool) { @@ -409,7 +407,6 @@ func (o *rebuilder) _want(ctx context.Context, treeID btrfsprim.ObjID, objID btr // OK, we need to insert it - ctx = dlog.WithField(ctx, "want_key", fmt.Sprintf("tree=%v key={%v %v ?}", treeID, objID, typ)) wants := make(containers.Set[btrfsvol.LogicalAddr]) o.rebuilt.Keys(treeID).Subrange( func(k btrfsprim.Key, _ keyio.ItemPtr) int { k.Offset = 0; return tgt.Cmp(k) }, @@ -422,10 +419,17 @@ func (o *rebuilder) _want(ctx context.Context, treeID btrfsprim.ObjID, objID btr } // wantOff implements rebuildCallbacks. -func (o *rebuilder) wantOff(ctx context.Context, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, off uint64) { - o._wantOff(ctx, treeID, objID, typ, off) +func (o *rebuilder) wantOff(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, off uint64) { + key := btrfsprim.Key{ + ObjectID: objID, + ItemType: typ, + Offset: off, + } + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.reason", reason) + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key", keyAndTree{TreeID: treeID, Key: key}) + o._wantOff(ctx, treeID, key) } -func (o *rebuilder) _wantOff(ctx context.Context, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, off uint64) (ok bool) { +func (o *rebuilder) _wantOff(ctx context.Context, treeID btrfsprim.ObjID, tgt btrfsprim.Key) (ok bool) { if !o.rebuilt.AddTree(ctx, treeID) { o.itemQueue = append(o.itemQueue, o.curKey) return false @@ -433,18 +437,12 @@ func (o *rebuilder) _wantOff(ctx context.Context, treeID btrfsprim.ObjID, objID // check if we already have it - tgt := btrfsprim.Key{ - ObjectID: objID, - ItemType: typ, - Offset: off, - } if _, ok := o.rebuilt.Load(ctx, treeID, tgt); ok { return true } // OK, we need to insert it - ctx = dlog.WithField(ctx, "want_key", fmt.Sprintf("tree=%v key=%v", treeID, tgt)) wants := make(containers.Set[btrfsvol.LogicalAddr]) o.rebuilt.Keys(treeID).Subrange( func(k btrfsprim.Key, _ keyio.ItemPtr) int { return tgt.Cmp(k) }, @@ -457,7 +455,11 @@ func (o *rebuilder) _wantOff(ctx context.Context, treeID btrfsprim.ObjID, objID } // wantFunc implements rebuildCallbacks. -func (o *rebuilder) wantFunc(ctx context.Context, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, fn func(btrfsitem.Item) bool) { +func (o *rebuilder) wantFunc(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, fn func(btrfsitem.Item) bool) { + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.reason", reason) + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key", + fmt.Sprintf("tree=%v key={%v %v ?} +func", treeID, objID, typ)) + if !o.rebuilt.AddTree(ctx, treeID) { o.itemQueue = append(o.itemQueue, o.curKey) return @@ -485,12 +487,11 @@ func (o *rebuilder) wantFunc(ctx context.Context, treeID btrfsprim.ObjID, objID // OK, we need to insert it - ctx = dlog.WithField(ctx, "want_key", fmt.Sprintf("tree=%v key=%v +func", treeID, tgt)) wants := make(containers.Set[btrfsvol.LogicalAddr]) o.rebuilt.Keys(treeID).Subrange( func(k btrfsprim.Key, _ keyio.ItemPtr) int { k.Offset = 0; return tgt.Cmp(k) }, func(k btrfsprim.Key, v keyio.ItemPtr) bool { - itemBody, ok := o.keyIO.ReadItem(v) + itemBody, ok := o.keyIO.ReadItem(ctx, v) if !ok { o.ioErr(ctx, fmt.Errorf("could not read previously read item: %v at %v", k, v)) } @@ -507,6 +508,9 @@ func (o *rebuilder) _wantRange( treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, beg, end uint64, ) { + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key", + fmt.Sprintf("tree=%v key={%v %v ?}", treeID, objID, typ)) + if !o.rebuilt.AddTree(ctx, treeID) { o.itemQueue = append(o.itemQueue, o.curKey) return @@ -651,22 +655,23 @@ func (o *rebuilder) _wantRange( // TODO: This is dumb and greedy. if last < runBeg { // log an error - o.wantAugment(dlog.WithField(ctx, "want_key", fmt.Sprintf("tree=%v key={%v, %v, %v-%v}", - treeID, objID, typ, last, runBeg)), - treeID, nil) + wantCtx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key", + fmt.Sprintf("tree=%v key={%v %v %v-%v}", treeID, objID, typ, last, runBeg)) + o.wantAugment(wantCtx, treeID, nil) } - o.wantAugment(dlog.WithField(ctx, "want_key", fmt.Sprintf("tree=%v key={%v, %v, %v-%v}", - treeID, objID, typ, gap.Beg, gap.End)), - treeID, o.rebuilt.LeafToRoots(ctx, treeID, v.Node)) + wantCtx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key", + fmt.Sprintf("tree=%v key={%v %v %v-%v}", treeID, objID, typ, gap.Beg, gap.End)) + o.wantAugment(wantCtx, treeID, o.rebuilt.LeafToRoots(ctx, treeID, v.Node)) last = runEnd return true }) if last < gap.End { // log an error - o.wantAugment(dlog.WithField(ctx, "want_key", fmt.Sprintf("tree=%v key={%v, %v, %v-%v}", - treeID, objID, typ, last, gap.End)), - treeID, nil) + wantCtx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key", + fmt.Sprintf("tree=%v key={%v, %v, %v-%v}", + treeID, objID, typ, last, gap.End)) + o.wantAugment(wantCtx, treeID, nil) } return nil }) @@ -675,14 +680,16 @@ func (o *rebuilder) _wantRange( // func implements rebuildCallbacks. // // interval is [beg, end) -func (o *rebuilder) wantCSum(ctx context.Context, beg, end btrfsvol.LogicalAddr) { +func (o *rebuilder) wantCSum(ctx context.Context, reason string, beg, end btrfsvol.LogicalAddr) { + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.reason", reason) const treeID = btrfsprim.CSUM_TREE_OBJECTID o._wantRange(ctx, treeID, btrfsprim.EXTENT_CSUM_OBJECTID, btrfsprim.EXTENT_CSUM_KEY, uint64(beg), uint64(end)) } // wantFileExt implements rebuildCallbacks. -func (o *rebuilder) wantFileExt(ctx context.Context, treeID btrfsprim.ObjID, ino btrfsprim.ObjID, size int64) { +func (o *rebuilder) wantFileExt(ctx context.Context, reason string, treeID btrfsprim.ObjID, ino btrfsprim.ObjID, size int64) { + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.reason", reason) o._wantRange(ctx, treeID, ino, btrfsprim.EXTENT_DATA_KEY, 0, uint64(size)) } diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_graph.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_graph.go index db7a7a5..45c9c97 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_graph.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_graph.go @@ -9,8 +9,6 @@ import ( "fmt" "reflect" - "github.com/datawire/dlib/dlog" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum" @@ -20,40 +18,37 @@ import ( type rebuildCallbacks interface { fsErr(ctx context.Context, e error) - want(ctx context.Context, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType) - wantOff(ctx context.Context, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, off uint64) - wantFunc(ctx context.Context, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, fn func(btrfsitem.Item) bool) - wantCSum(ctx context.Context, beg, end btrfsvol.LogicalAddr) // interval is [beg, end) - wantFileExt(ctx context.Context, treeID btrfsprim.ObjID, ino btrfsprim.ObjID, size int64) + want(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType) + wantOff(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, off uint64) + wantFunc(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, fn func(btrfsitem.Item) bool) + wantCSum(ctx context.Context, reason string, beg, end btrfsvol.LogicalAddr) // interval is [beg, end) + wantFileExt(ctx context.Context, reason string, treeID btrfsprim.ObjID, ino btrfsprim.ObjID, size int64) } func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID, item btrfstree.Item) { - ctx = dlog.WithField(ctx, "tree", treeID) - ctx = dlog.WithField(ctx, "key", item.Key) - // Notionally, just express the relationships shown in // https://btrfs.wiki.kernel.org/index.php/File:References.png (from the page // https://btrfs.wiki.kernel.org/index.php/Data_Structures ) switch body := item.Body.(type) { case btrfsitem.BlockGroup: - o.want(dlog.WithField(ctx, "wants", "Chunk"), + o.want(ctx, "Chunk", btrfsprim.CHUNK_TREE_OBJECTID, body.ChunkObjectID, btrfsitem.CHUNK_ITEM_KEY) - o.wantOff(dlog.WithField(ctx, "wants", "FreeSpaceInfo"), + o.wantOff(ctx, "FreeSpaceInfo", btrfsprim.FREE_SPACE_TREE_OBJECTID, item.Key.ObjectID, btrfsitem.FREE_SPACE_INFO_KEY, item.Key.Offset) case btrfsitem.Chunk: - o.want(dlog.WithField(ctx, "wants", "owning Root"), + o.want(ctx, "owning Root", btrfsprim.ROOT_TREE_OBJECTID, body.Head.Owner, btrfsitem.ROOT_ITEM_KEY) case btrfsitem.Dev: // nothing case btrfsitem.DevExtent: - o.wantOff(dlog.WithField(ctx, "wants", "Chunk"), + o.wantOff(ctx, "Chunk", body.ChunkTree, body.ChunkObjectID, btrfsitem.CHUNK_ITEM_KEY, @@ -62,7 +57,7 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID, // nothing case btrfsitem.DirEntry: // containing-directory - o.wantOff(dlog.WithField(ctx, "wants", "containing dir inode"), + o.wantOff(ctx, "containing dir inode", treeID, item.Key.ObjectID, btrfsitem.INODE_ITEM_KEY, @@ -70,7 +65,7 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID, // siblings switch item.Key.ItemType { case btrfsitem.DIR_ITEM_KEY: - o.wantFunc(dlog.WithField(ctx, "wants", "corresponding DIR_INDEX"), + o.wantFunc(ctx, "corresponding DIR_INDEX", treeID, item.Key.ObjectID, btrfsitem.DIR_INDEX_KEY, @@ -78,7 +73,7 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID, return reflect.DeepEqual(item, peerItem) }) case btrfsitem.DIR_INDEX_KEY: - o.wantOff(dlog.WithField(ctx, "wants", "corresponding DIR_ITEM"), + o.wantOff(ctx, "corresponding DIR_ITEM", treeID, item.Key.ObjectID, btrfsitem.DIR_ITEM_KEY, @@ -95,18 +90,18 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID, if body.Location != (btrfsprim.Key{}) { switch body.Location.ItemType { case btrfsitem.INODE_ITEM_KEY: - o.wantOff(dlog.WithField(ctx, "wants", "item being pointed to"), + o.wantOff(ctx, "item being pointed to", treeID, body.Location.ObjectID, body.Location.ItemType, body.Location.Offset) - o.wantOff(dlog.WithField(ctx, "wants", "backref from item being pointed to"), + o.wantOff(ctx, "backref from item being pointed to", treeID, body.Location.ObjectID, btrfsitem.INODE_REF_KEY, uint64(item.Key.ObjectID)) case btrfsitem.ROOT_ITEM_KEY: - o.want(dlog.WithField(ctx, "wants", "Root of subvolume being pointed to"), + o.want(ctx, "Root of subvolume being pointed to", btrfsprim.ROOT_TREE_OBJECTID, body.Location.ObjectID, body.Location.ItemType) @@ -129,12 +124,12 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID, case nil: // nothing case btrfsitem.ExtentDataRef: - o.wantOff(dlog.WithField(ctx, "wants", "referencing Inode"), + o.wantOff(ctx, "referencing Inode", refBody.Root, refBody.ObjectID, btrfsitem.INODE_ITEM_KEY, 0) - o.wantOff(dlog.WithField(ctx, "wants", "referencing FileExtent"), + o.wantOff(ctx, "referencing FileExtent", refBody.Root, refBody.ObjectID, btrfsitem.EXTENT_DATA_KEY, @@ -150,22 +145,22 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID, case btrfsitem.ExtentCSum: // nothing case btrfsitem.ExtentDataRef: - o.want(dlog.WithField(ctx, "wants", "Extent being referenced"), + o.want(ctx, "Extent being referenced", btrfsprim.EXTENT_TREE_OBJECTID, item.Key.ObjectID, btrfsitem.EXTENT_ITEM_KEY) - o.wantOff(dlog.WithField(ctx, "wants", "referencing Inode"), + o.wantOff(ctx, "referencing Inode", body.Root, body.ObjectID, btrfsitem.INODE_ITEM_KEY, 0) - o.wantOff(dlog.WithField(ctx, "wants", "referencing FileExtent"), + o.wantOff(ctx, "referencing FileExtent", body.Root, body.ObjectID, btrfsitem.EXTENT_DATA_KEY, uint64(body.Offset)) case btrfsitem.FileExtent: - o.wantOff(dlog.WithField(ctx, "wants", "containing Inode"), + o.wantOff(ctx, "containing Inode", treeID, item.Key.ObjectID, btrfsitem.INODE_ITEM_KEY, @@ -175,63 +170,63 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID, // nothing case btrfsitem.FILE_EXTENT_REG, btrfsitem.FILE_EXTENT_PREALLOC: // TODO: Check if inodeBody.Flags.Has(btrfsitem.INODE_NODATASUM) - o.wantCSum(dlog.WithField(ctx, "wants", "data sum"), + o.wantCSum(ctx, "data sum", roundDown(body.BodyExtent.DiskByteNr, btrfssum.BlockSize), roundUp(body.BodyExtent.DiskByteNr.Add(body.BodyExtent.DiskNumBytes), btrfssum.BlockSize)) default: o.fsErr(ctx, fmt.Errorf("FileExtent: unexpected body.Type=%v", body.Type)) } case btrfsitem.FreeSpaceBitmap: - o.wantOff(dlog.WithField(ctx, "wants", "FreeSpaceInfo"), + o.wantOff(ctx, "FreeSpaceInfo", treeID, item.Key.ObjectID, btrfsitem.FREE_SPACE_INFO_KEY, item.Key.Offset) case btrfsitem.FreeSpaceHeader: - o.wantOff(dlog.WithField(ctx, "wants", ".Location"), + o.wantOff(ctx, ".Location", treeID, body.Location.ObjectID, body.Location.ItemType, body.Location.Offset) case btrfsitem.FreeSpaceInfo: if body.Flags.Has(btrfsitem.FREE_SPACE_USING_BITMAPS) { - o.wantOff(dlog.WithField(ctx, "wants", "FreeSpaceBitmap"), + o.wantOff(ctx, "FreeSpaceBitmap", treeID, item.Key.ObjectID, btrfsitem.FREE_SPACE_BITMAP_KEY, item.Key.Offset) } case btrfsitem.Inode: - o.want(dlog.WithField(ctx, "wants", "backrefs"), + o.want(ctx, "backrefs", treeID, // TODO: validate the number of these against body.NLink item.Key.ObjectID, btrfsitem.INODE_REF_KEY) - o.wantFileExt(dlog.WithField(ctx, "wants", "FileExtents"), + o.wantFileExt(ctx, "FileExtents", treeID, item.Key.ObjectID, body.Size) if body.BlockGroup != 0 { - o.want(dlog.WithField(ctx, "wants", "BlockGroup"), + o.want(ctx, "BlockGroup", btrfsprim.EXTENT_TREE_OBJECTID, body.BlockGroup, btrfsitem.BLOCK_GROUP_ITEM_KEY) } case btrfsitem.InodeRefs: - o.wantOff(dlog.WithField(ctx, "wants", "child Inode"), + o.wantOff(ctx, "child Inode", treeID, item.Key.ObjectID, btrfsitem.INODE_ITEM_KEY, 0) - o.wantOff(dlog.WithField(ctx, "wants", "parent Inode"), + o.wantOff(ctx, "parent Inode", treeID, btrfsprim.ObjID(item.Key.Offset), btrfsitem.INODE_ITEM_KEY, 0) for _, ref := range body { - o.wantOff(dlog.WithField(ctx, "wants", "DIR_ITEM"), + o.wantOff(ctx, "DIR_ITEM", treeID, btrfsprim.ObjID(item.Key.Offset), btrfsitem.DIR_ITEM_KEY, btrfsitem.NameHash(ref.Name)) - o.wantOff(dlog.WithField(ctx, "wants", "DIR_INDEX"), + o.wantOff(ctx, "DIR_INDEX", treeID, btrfsprim.ObjID(item.Key.Offset), btrfsitem.DIR_INDEX_KEY, @@ -243,12 +238,12 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID, case nil: // nothing case btrfsitem.ExtentDataRef: - o.wantOff(dlog.WithField(ctx, "wants", "referencing INode"), + o.wantOff(ctx, "referencing INode", refBody.Root, refBody.ObjectID, btrfsitem.INODE_ITEM_KEY, 0) - o.wantOff(dlog.WithField(ctx, "wants", "referencing FileExtent"), + o.wantOff(ctx, "referencing FileExtent", refBody.Root, refBody.ObjectID, btrfsitem.EXTENT_DATA_KEY, @@ -263,7 +258,7 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID, } case btrfsitem.Root: if body.RootDirID != 0 { - o.wantOff(dlog.WithField(ctx, "wants", "root directory"), + o.wantOff(ctx, "root directory", item.Key.ObjectID, body.RootDirID, btrfsitem.INODE_ITEM_KEY, @@ -271,7 +266,7 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID, } if body.UUID != (btrfsprim.UUID{}) { key := btrfsitem.UUIDToKey(body.UUID) - o.wantOff(dlog.WithField(ctx, "wants", "uuid"), + o.wantOff(ctx, "uuid", btrfsprim.UUID_TREE_OBJECTID, key.ObjectID, key.ItemType, @@ -279,7 +274,7 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID, } if body.ParentUUID != (btrfsprim.UUID{}) { key := btrfsitem.UUIDToKey(body.ParentUUID) - o.wantOff(dlog.WithField(ctx, "wants", "parent uuid"), + o.wantOff(ctx, "parent uuid", btrfsprim.UUID_TREE_OBJECTID, key.ObjectID, key.ItemType, @@ -304,43 +299,43 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID, panic(fmt.Errorf("should not happen: RootRef: unexpected ItemType=%v", item.Key.ItemType)) } // sibling - o.wantOff(dlog.WithField(ctx, "wants", fmt.Sprintf("corresponding %v", otherType)), + o.wantOff(ctx, fmt.Sprintf("corresponding %v", otherType), treeID, btrfsprim.ObjID(item.Key.Offset), otherType, uint64(item.Key.ObjectID)) // parent - o.want(dlog.WithField(ctx, "wants", "parent subvolume: Root"), + o.want(ctx, "parent subvolume: Root", treeID, parent, btrfsitem.ROOT_ITEM_KEY) - o.wantOff(dlog.WithField(ctx, "wants", "parent subvolume: Inode of parent dir"), + o.wantOff(ctx, "parent subvolume: Inode of parent dir", parent, body.DirID, btrfsitem.INODE_ITEM_KEY, 0) - o.wantOff(dlog.WithField(ctx, "wants", "parent subvolume: DIR_ITEM in parent dir"), + o.wantOff(ctx, "parent subvolume: DIR_ITEM in parent dir", parent, body.DirID, btrfsitem.DIR_ITEM_KEY, btrfsitem.NameHash(body.Name)) - o.wantOff(dlog.WithField(ctx, "wants", "parent subvolume: DIR_INDEX in parent dir"), + o.wantOff(ctx, "parent subvolume: DIR_INDEX in parent dir", parent, body.DirID, btrfsitem.DIR_INDEX_KEY, uint64(body.Sequence)) // child - o.want(dlog.WithField(ctx, "wants", "child subvolume: Root"), + o.want(ctx, "child subvolume: Root", treeID, child, btrfsitem.ROOT_ITEM_KEY) case btrfsitem.SharedDataRef: - o.want(dlog.WithField(ctx, "wants", "Extent"), + o.want(ctx, "Extent", btrfsprim.EXTENT_TREE_OBJECTID, item.Key.ObjectID, btrfsitem.EXTENT_ITEM_KEY) case btrfsitem.UUIDMap: - o.want(dlog.WithField(ctx, "wants", "subvolume Root"), + o.want(ctx, "subvolume Root", btrfsprim.ROOT_TREE_OBJECTID, body.ObjID, btrfsitem.ROOT_ITEM_KEY) diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go index cdd5cba..7a112b4 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go @@ -6,7 +6,6 @@ package rebuildnodes import ( "context" - "fmt" "time" "github.com/datawire/dlib/dlog" @@ -21,16 +20,6 @@ import ( "git.lukeshu.com/btrfs-progs-ng/lib/textui" ) -type scanStats struct { - N, D int -} - -func (s scanStats) String() string { - return fmt.Sprintf("... %v%% (%v/%v)", - int(100*float64(s.N)/float64(s.D)), - s.N, s.D) -} - func ScanDevices(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect.ScanDevicesResult) (graph.Graph, *keyio.Handle, error) { dlog.Infof(ctx, "Reading node data from FS...") @@ -39,17 +28,16 @@ func ScanDevices(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect.Sca return graph.Graph{}, nil, err } - total := countNodes(scanResults) - done := 0 - progressWriter := textui.NewProgress[scanStats](ctx, dlog.LogLevelInfo, 1*time.Second) - progress := func(done, total int) { - progressWriter.Set(scanStats{N: done, D: total}) - } + var stats textui.Portion[int] + stats.D = countNodes(scanResults) + progressWriter := textui.NewProgress[textui.Portion[int]]( + dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.read.substep", "read-nodes"), + dlog.LogLevelInfo, 1*time.Second) nodeGraph := graph.New(*sb) keyIO := keyio.NewHandle(fs, *sb) - progress(done, total) + progressWriter.Set(stats) for _, devResults := range scanResults { for laddr := range devResults.FoundNodes { nodeRef, err := btrfstree.ReadNode[btrfsvol.LogicalAddr](fs, *sb, laddr, btrfstree.NodeExpectations{ @@ -62,11 +50,11 @@ func ScanDevices(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect.Sca nodeGraph.InsertNode(nodeRef) keyIO.InsertNode(nodeRef) - done++ - progress(done, total) + stats.N++ + progressWriter.Set(stats) } } - if done != total { + if stats.N != stats.D { panic("should not happen") } progressWriter.Done() diff --git a/lib/textui/log.go b/lib/textui/log.go index e94a24f..5ddc7a8 100644 --- a/lib/textui/log.go +++ b/lib/textui/log.go @@ -18,9 +18,11 @@ import ( "path/filepath" "runtime" "sort" + "strconv" "strings" "sync" "time" + "unicode" "github.com/datawire/dlib/dlog" "github.com/spf13/pflag" @@ -219,7 +221,7 @@ func (l *logger) log(lvl dlog.LogLevel, writeMsg func(io.Writer)) { nextField = i break } - printer.Fprintf(&logBuf, " %s=%v", fieldName(fieldKey), fields[fieldKey]) + writeField(&logBuf, fieldKey, fields[fieldKey]) } // message ///////////////////////////////////////////////////////////// @@ -231,7 +233,7 @@ func (l *logger) log(lvl dlog.LogLevel, writeMsg func(io.Writer)) { logBuf.WriteString(" :") } for _, fieldKey := range fieldKeys[nextField:] { - printer.Fprintf(&logBuf, " %s=%v", fieldName(fieldKey), fields[fieldKey]) + writeField(&logBuf, fieldKey, fields[fieldKey]) } // caller ////////////////////////////////////////////////////////////// @@ -270,24 +272,65 @@ func (l *logger) log(lvl dlog.LogLevel, writeMsg func(io.Writer)) { // message, while values ≥0 should be on the right of the log message. func fieldOrd(key string) int { switch key { + // dlib //////////////////////////////////////////////////////////////// case "THREAD": // dgroup - return -7 + return -99 case "dexec.pid": - return -6 + return -98 case "dexec.stream": - return -5 + return -97 case "dexec.data": - return -4 + return -96 case "dexec.err": - return -3 + return -95 + // btrfsinspect scandevices //////////////////////////////////////////// case "btrfsinspect.scandevices.dev": return -1 + + // btrfsinspect rebuild-mappings /////////////////////////////////////// case "btrfsinspect.rebuild-mappings.step": return -2 case "btrfsinspect.rebuild-mappings.substep": return -1 + // btrfsinspect rebuild-nodes ////////////////////////////////////////// + case "btrfsinspect.rebuild-nodes.step": + return -50 + // step=read-fs-data + case "btrfsinspect.rebuild-nodes.read.substep": + return -1 + // step=rebuild + case "btrfsinspect.rebuild-nodes.rebuild.pass": + return -49 + case "btrfsinspect.rebuild-nodes.rebuild.substep": + return -48 + case "btrfsinspect.rebuild-nodes.rebuild.substep.progress": + return -47 + // step=rebuild, substep=collect-items (1/3) + // step=rebuild, substep=process-items (2/3) + case "btrfsinspect.rebuild-nodes.rebuild.process.item": + return -25 + // step=rebuild, substep=apply-augments (3/3) + case "btrfsinspect.rebuild-nodes.rebuild.augment.tree": + return -25 + // step=rebuild (any substep) + case "btrfsinspect.rebuild-nodes.rebuild.want.key": + return -7 + case "btrfsinspect.rebuild-nodes.rebuild.want.reason": + return -6 + case "btrfsinspect.rebuild-nodes.rebuild.add-tree": + return -5 + case "btrfsinspect.rebuild-nodes.rebuild.add-tree.substep": + return -4 + case "btrfsinspect.rebuild-nodes.rebuild.add-tree.want.key": + return -3 + case "btrfsinspect.rebuild-nodes.rebuild.add-tree.want.reason": + return -2 + case "btrfsinspect.rebuild-nodes.rebuild.add-root": + return -1 + + // other /////////////////////////////////////////////////////////////// case "btrfs.read-json-file": return -1 default: @@ -295,20 +338,61 @@ func fieldOrd(key string) int { } } -func fieldName(key string) string { - switch key { - case "THREAD": - return "thread" - default: - switch { - case strings.HasPrefix(key, "btrfsinspect.scandevices."): - return strings.TrimPrefix(key, "btrfsinspect.scandevices.") - case strings.HasPrefix(key, "btrfsinspect.rebuild-mappings."): - return strings.TrimPrefix(key, "btrfsinspect.rebuild-mappings.") - case strings.HasPrefix(key, "btrfs."): - return strings.TrimPrefix(key, "btrfs.") +func writeField(w io.Writer, key string, val any) { + valStr := printer.Sprint(val) + needsQuote := false + if strings.HasPrefix(valStr, `"`) { + needsQuote = true + } else { + for _, r := range valStr { + if !(unicode.IsPrint(r) && r != ' ') { + needsQuote = true + break + } + } + } + if needsQuote { + valStr = strconv.Quote(valStr) + } + + name := key + + switch { + case name == "THREAD": + name = "thread" + switch valStr { + case "", "/main": + return default: - return key + if strings.HasPrefix(valStr, "/main/") { + valStr = strings.TrimPrefix(valStr, "/main") + } } + case strings.HasSuffix(name, ".pass"): + fmt.Fprintf(w, "/pass-%s", valStr) + return + case strings.HasSuffix(name, ".substep") && name != "btrfsinspect.rebuild-nodes.rebuild.add-tree.substep": + fmt.Fprintf(w, "/%s", valStr) + return + case strings.HasPrefix(name, "btrfsinspect."): + name = strings.TrimPrefix(name, "btrfsinspect.") + switch { + case strings.HasPrefix(name, "scandevices."): + name = strings.TrimPrefix(name, "scandevices.") + case strings.HasPrefix(name, "rebuild-mappings."): + name = strings.TrimPrefix(name, "rebuild-mappings.") + case strings.HasPrefix(name, "rebuild-nodes."): + name = strings.TrimPrefix(name, "rebuild-nodes.") + switch { + case strings.HasPrefix(name, "read."): + name = strings.TrimPrefix(name, "read.") + case strings.HasPrefix(name, "rebuild."): + name = strings.TrimPrefix(name, "rebuild.") + } + } + case strings.HasPrefix(name, "btrfs."): + name = strings.TrimPrefix(name, "btrfs.") } + + fmt.Fprintf(w, " %s=%s", name, valStr) } -- cgit v1.2.3-2-g168b From 3d0937e9ab148c074922b0d46ed33bdbcbef85b5 Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Wed, 28 Dec 2022 18:14:14 -0700 Subject: cmd/btrfs-rec: Have all logging include live memory statistics --- lib/textui/log_memstats.go | 132 +++++++++++++++++++++++++++++++++++++++++++++ lib/textui/text.go | 119 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 251 insertions(+) create mode 100644 lib/textui/log_memstats.go (limited to 'lib') diff --git a/lib/textui/log_memstats.go b/lib/textui/log_memstats.go new file mode 100644 index 0000000..39733c6 --- /dev/null +++ b/lib/textui/log_memstats.go @@ -0,0 +1,132 @@ +// Copyright (C) 2022 Luke Shumaker +// +// SPDX-License-Identifier: GPL-2.0-or-later + +package textui + +import ( + "fmt" + "runtime" + "sync" + "time" +) + +type LiveMemUse struct { + mu sync.Mutex + stats runtime.MemStats + last time.Time +} + +var _ fmt.Stringer = (*LiveMemUse)(nil) + +const liveMemUseUpdateInterval = 1 * time.Second + +func (o *LiveMemUse) String() string { + o.mu.Lock() + + // runtime.ReadMemStats() calls stopTheWorld(), so we want to + // rate-limit how often we call it. + if now := time.Now(); now.Sub(o.last) > liveMemUseUpdateInterval { + runtime.ReadMemStats(&o.stats) + o.last = now + } + + // runtime.MemStats only knows about memory managed by the Go runtime; + // even for a pure Go program, there's also + // + // - memory mapped to the executable itself + // - vDSO and friends + // + // But those are pretty small, just a few MiB. + // + // OK, so: memory managed by the Go runtime. runtime.MemStats is pretty + // obtuse, I think it was designed more for "debugging the Go runtime" + // than "monitoring the behavior of a Go program". From the Go + // runtime's perspective, regions of the virtual address space are in + // one of 4 states (see `runtime/mem.go`): + // + // - None : not mapped + // + // - Reserved : mapped, but without r/w permissions (PROT_NONE); so + // this region isn't actually backed by anything + // + // - Prepared : mapped, but allowed to be collected by the OS + // (MADV_FREE, or MADV_DONTNEED on systems without MADV_FREE); so + // this region may or may not actually be backed by anything. + // + // - Ready : mapped, ready to be used + // + // Normal tools count Reserved+Prepared+Ready toward the VSS (which is a + // little silly, when inspecting /proc/{pid}/maps to calculate the VSS, + // IMO they should exclude maps without r/w permissions, which would + // exclude Reserved), but we all know that VSS numbers are over + // inflated. And RSS only useful if we fit in RAM and don't spill to + // swap (this is being written for btrfs-rec, which is quite likely to + // consume all RAM on a laptop). Useful numbers are Ready and Prepared; + // as I said above, outside tools reporting Ready+Prepared would be easy + // and useful, but none do; but I don't think outside tools have a way + // to distinguish between Ready and Prepared (unless you can detect + // MADV_FREE/MADV_DONTNEED in /proc/{pid}/smaps?). + // + // Of the 3 mapped states, here's how we get them from runtime: + // + // - Reserved : AFAICT, you can't :( + // + // - Prepared : `runtime.MemStats.HeapReleased` + // + // - Ready : `runtime.MemStats.Sys - runtime.MemStats.HeapReleased` + // (that is, runtime.MemStats.Sys is Prepared+Ready) + // + // It's a bummer that we can't get Reserved from runtime, but as I've + // said, it's not super useful; it's only use would really be + // cross-referencing runtime's numbers against the VSS. + // + // The godocs for runtime.MemStats.Sys say "It's likely that not all of + // the virtual address space is backed by physical memory at any given + // moment, though in general it all was at some point." That's both + // confusing and a lie. It's confusing because it doesn't give you + // hooks to find out more; it could have said that this is + // Ready+Prepared and that Prepared is the portion of the space that + // might not be backed by physical memory, but instead it wants you to + // throw your hands up and say "this is too weird for me to understand". + // It's a lie because "in general it all was at some point" implies that + // all Prepared memory was previously Ready, which is false; it can go + // None->Reserved->Prepared (but it only does that Reserved->Prepared + // transition if it thinks it will need to transition it to Ready very + // soon, so maybe the doc author though that was negligible?). + // + // Now, those are still pretty opaque numbers; most of runtime.MemStats + // goes toward accounting for what's going on inside of Ready space. + // + // For our purposes, we don't care too much about specifics of how Ready + // space is being used; just how much is "actually storing data", vs + // "overhead from heap-fragmentation", vs "idle". + + var ( + // We're going to add up all of the `o.stats.{thing}Sys` + // variables and check that against `o.stats.Sys`, in order to + // make sure that we're not missing any {thing} when adding up + // `inuse`. + calcSys = o.stats.HeapSys + o.stats.StackSys + o.stats.MSpanSys + o.stats.MCacheSys + o.stats.BuckHashSys + o.stats.GCSys + o.stats.OtherSys + inuse = o.stats.HeapInuse + o.stats.StackInuse + o.stats.MSpanInuse + o.stats.MCacheInuse + o.stats.BuckHashSys + o.stats.GCSys + o.stats.OtherSys + ) + if calcSys != o.stats.Sys { + panic("should not happen") + } + prepared := o.stats.HeapReleased + ready := o.stats.Sys - prepared + + readyFragOverhead := o.stats.HeapInuse - o.stats.HeapAlloc + readyData := inuse - readyFragOverhead + readyIdle := ready - inuse + + o.mu.Unlock() + + return Sprintf("Ready+Prepared=%.1f (Ready=%.1f (data:%.1f + fragOverhead:%.1f + idle:%.1f) ; Prepared=%.1f)", + IEC(ready+prepared, "B"), + IEC(ready, "B"), + IEC(readyData, "B"), + IEC(readyFragOverhead, "B"), + IEC(readyIdle, "B"), + IEC(prepared, "B")) +} diff --git a/lib/textui/text.go b/lib/textui/text.go index f628eab..d6a80b3 100644 --- a/lib/textui/text.go +++ b/lib/textui/text.go @@ -7,6 +7,7 @@ package textui import ( "fmt" "io" + "math" "golang.org/x/exp/constraints" "golang.org/x/text/language" @@ -83,3 +84,121 @@ func (p Portion[T]) String() string { } return printer.Sprintf("%v (%v/%v)", number.Percent(pct), uint64(p.N), uint64(p.D)) } + +type metric[T constraints.Integer | constraints.Float] struct { + Val T + Unit string +} + +var ( + _ fmt.Formatter = metric[int]{} + _ fmt.Stringer = metric[int]{} +) + +func Metric[T constraints.Integer | constraints.Float](x T, unit string) metric[T] { + return metric[T]{ + Val: x, + Unit: unit, + } +} + +var metricSmallPrefixes = []string{ + "m", + "μ", + "n", + "p", + "f", + "a", + "z", + "y", + "r", + "q", +} + +var metricBigPrefixes = []string{ + "k", + "M", + "G", + "T", + "P", + "E", + "Z", + "Y", + "R", + "Q", +} + +// String implements fmt.Formatter. +func (v metric[T]) Format(f fmt.State, verb rune) { + var prefix string + y := math.Abs(float64(v.Val)) + if y < 1 { + for i := 0; y < 1 && i <= len(metricSmallPrefixes); i++ { + y *= 1000 + prefix = metricSmallPrefixes[i] + } + } else { + for i := 0; y > 1000 && i <= len(metricBigPrefixes); i++ { + y /= 1000 + prefix = metricBigPrefixes[i] + } + } + if v.Val < 0 { + y = -y + } + printer.Fprintf(f, fmtutil.FmtStateString(f, verb)+"%s%s", + y, prefix, v.Unit) +} + +// String implements fmt.Stringer. +func (v metric[T]) String() string { + return fmt.Sprint(v) +} + +type iec[T constraints.Integer | constraints.Float] struct { + Val T + Unit string +} + +var ( + _ fmt.Formatter = iec[int]{} + _ fmt.Stringer = iec[int]{} +) + +func IEC[T constraints.Integer | constraints.Float](x T, unit string) iec[T] { + return iec[T]{ + Val: x, + Unit: unit, + } +} + +var iecPrefixes = []string{ + "Ki", + "Mi", + "Gi", + "Ti", + "Pi", + "Ei", + "Zi", + "Yi", +} + +// String implements fmt.Formatter. +func (v iec[T]) Format(f fmt.State, verb rune) { + var prefix string + y := math.Abs(float64(v.Val)) + for i := 0; y > 1024 && i <= len(iecPrefixes); i++ { + y /= 1024 + prefix = iecPrefixes[i] + } + if v.Val < 0 { + y = -y + } + printer.Fprintf(f, fmtutil.FmtStateString(f, verb)+"%s%s", + number.Decimal(y), prefix, v.Unit) +} + +// String implements fmt.Stringer. +func (v iec[T]) String() string { + return fmt.Sprint(v) +} -- cgit v1.2.3-2-g168b