summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.golangci.yml1
-rw-r--r--cmd/btrfs-rec/inspect_dumptrees.go7
-rw-r--r--cmd/btrfs-rec/inspect_lsfiles.go29
-rw-r--r--cmd/btrfs-rec/inspect_lstrees.go24
-rw-r--r--cmd/btrfs-rec/inspect_rebuildmappings.go3
-rw-r--r--cmd/btrfs-rec/inspect_rebuildnodes.go3
-rw-r--r--cmd/btrfs-rec/inspect_scandevices.go14
-rw-r--r--cmd/btrfs-rec/inspect_spewitems.go6
-rw-r--r--cmd/btrfs-rec/main.go37
-rw-r--r--cmd/btrfs-rec/util.go83
-rw-r--r--lib/btrfs/btrfsitem/item_dir.go2
-rw-r--r--lib/btrfs/btrfssum/csum.go6
-rw-r--r--lib/btrfsprogs/btrfsinspect/print_addrspace.go30
-rw-r--r--lib/btrfsprogs/btrfsinspect/print_tree.go200
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildmappings/fuzzymatchsums.go23
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildmappings/matchsums.go6
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go68
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/rebuilt_btrees.go80
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/graph.go70
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/loops.go54
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go11
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go246
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_graph.go95
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go37
-rw-r--r--lib/btrfsprogs/btrfsinspect/scandevices.go56
-rw-r--r--lib/textui/log.go398
-rw-r--r--lib/textui/log_memstats.go132
-rw-r--r--lib/textui/log_test.go64
-rw-r--r--lib/textui/text.go204
-rw-r--r--lib/textui/text_test.go39
-rwxr-xr-xscripts/main.sh2
31 files changed, 1470 insertions, 560 deletions
diff --git a/.golangci.yml b/.golangci.yml
index efd6d2c..a6b3ed0 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -6,6 +6,7 @@ linters:
disable:
- structcheck # doesn't support Go 1.18 yet
enable:
+ - forbidigo
- gofmt
linters-settings:
gofmt:
diff --git a/cmd/btrfs-rec/inspect_dumptrees.go b/cmd/btrfs-rec/inspect_dumptrees.go
index 4abeaba..610f502 100644
--- a/cmd/btrfs-rec/inspect_dumptrees.go
+++ b/cmd/btrfs-rec/inspect_dumptrees.go
@@ -5,7 +5,6 @@
package main
import (
- "fmt"
"os"
"github.com/datawire/ocibuild/pkg/cliutil"
@@ -13,6 +12,7 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect"
+ "git.lukeshu.com/btrfs-progs-ng/lib/textui"
)
func init() {
@@ -24,8 +24,9 @@ func init() {
},
RunE: func(fs *btrfs.FS, cmd *cobra.Command, _ []string) error {
const version = "5.18.1"
- fmt.Printf("btrfs-progs v%v\n", version)
- btrfsinspect.DumpTrees(cmd.Context(), os.Stdout, fs)
+ out := os.Stdout
+ textui.Fprintf(out, "btrfs-progs v%v\n", version)
+ btrfsinspect.DumpTrees(cmd.Context(), out, fs)
return nil
},
})
diff --git a/cmd/btrfs-rec/inspect_lsfiles.go b/cmd/btrfs-rec/inspect_lsfiles.go
index 9267f77..502d91d 100644
--- a/cmd/btrfs-rec/inspect_lsfiles.go
+++ b/cmd/btrfs-rec/inspect_lsfiles.go
@@ -22,6 +22,7 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsutil"
"git.lukeshu.com/btrfs-progs-ng/lib/maps"
+ "git.lukeshu.com/btrfs-progs-ng/lib/textui"
)
func init() {
@@ -32,20 +33,20 @@ func init() {
Args: cliutil.WrapPositionalArgs(cobra.NoArgs),
},
RunE: func(fs *btrfs.FS, cmd *cobra.Command, _ []string) (err error) {
+ out := bufio.NewWriter(os.Stdout)
+ defer out.Flush()
defer func() {
if r := derror.PanicToError(recover()); r != nil {
- fmt.Printf("\n\n%+v\n", r)
+ textui.Fprintf(out, "\n\n%+v\n", r)
err = fmt.Errorf("panicked")
}
}()
ctx := cmd.Context()
- out := bufio.NewWriter(os.Stdout)
printSubvol(out, "", true, "/", &btrfs.Subvolume{
FS: btrfsutil.NewBrokenTrees(ctx, fs),
TreeID: btrfsprim.FS_TREE_OBJECTID,
})
- out.Flush()
return nil
},
@@ -64,7 +65,7 @@ func printText(out io.Writer, prefix string, isLast bool, name, text string) {
if isLast {
first, rest = tL, tS
}
- for i, line := range strings.Split(fmt.Sprintf("%q %s", name, text), "\n") {
+ for i, line := range strings.Split(textui.Sprintf("%q %s", name, text), "\n") {
_, _ = io.WriteString(out, prefix)
if i == 0 {
_, _ = io.WriteString(out, first)
@@ -79,13 +80,13 @@ func printText(out io.Writer, prefix string, isLast bool, name, text string) {
func printSubvol(out io.Writer, prefix string, isLast bool, name string, subvol *btrfs.Subvolume) {
rootInode, err := subvol.GetRootInode()
if err != nil {
- printText(out, prefix, isLast, name+"/", fmt.Sprintf("subvol_id=%v err=%v",
+ printText(out, prefix, isLast, name+"/", textui.Sprintf("subvol_id=%v err=%v",
subvol.TreeID, fmtErr(err)))
return
}
dir, err := subvol.LoadDir(rootInode)
if err != nil {
- printText(out, prefix, isLast, name+"/", fmt.Sprintf("subvol_id=%v err=%v",
+ printText(out, prefix, isLast, name+"/", textui.Sprintf("subvol_id=%v err=%v",
subvol.TreeID, fmtErr(err)))
return
}
@@ -93,7 +94,7 @@ func printSubvol(out io.Writer, prefix string, isLast bool, name string, subvol
printDir(out, prefix, isLast, name, dir)
return
}
- printText(out, prefix, isLast, name+"/", fmt.Sprintf("subvol_id=%v", subvol.TreeID))
+ printText(out, prefix, isLast, name+"/", textui.Sprintf("subvol_id=%v", subvol.TreeID))
if isLast {
prefix += tS
} else {
@@ -117,7 +118,7 @@ func fmtInode(inode btrfs.BareInode) string {
} else {
mode = inode.InodeItem.Mode
}
- ret := fmt.Sprintf("ino=%v mode=%v", inode.Inode, mode)
+ ret := textui.Sprintf("ino=%v mode=%v", inode.Inode, mode)
if len(inode.Errs) > 0 {
ret += " err=" + fmtErr(inode.Errs)
}
@@ -152,7 +153,7 @@ func printDirEntry(out io.Writer, prefix string, isLast bool, subvol *btrfs.Subv
case btrfsitem.INODE_ITEM_KEY:
dir, err := subvol.LoadDir(entry.Location.ObjectID)
if err != nil {
- printText(out, prefix, isLast, name, fmt.Sprintf("%v err=%v", entry.Type, fmtErr(err)))
+ printText(out, prefix, isLast, name, textui.Sprintf("%v err=%v", entry.Type, fmtErr(err)))
return
}
printDir(out, prefix, isLast, name, dir)
@@ -172,7 +173,7 @@ func printDirEntry(out io.Writer, prefix string, isLast bool, subvol *btrfs.Subv
}
file, err := subvol.LoadFile(entry.Location.ObjectID)
if err != nil {
- printText(out, prefix, isLast, name, fmt.Sprintf("%v err=%v", entry.Type, fmtErr(err)))
+ printText(out, prefix, isLast, name, textui.Sprintf("%v err=%v", entry.Type, fmtErr(err)))
return
}
printSymlink(out, prefix, isLast, name, file)
@@ -183,7 +184,7 @@ func printDirEntry(out io.Writer, prefix string, isLast bool, subvol *btrfs.Subv
}
file, err := subvol.LoadFile(entry.Location.ObjectID)
if err != nil {
- printText(out, prefix, isLast, name, fmt.Sprintf("%v err=%v", entry.Type, fmtErr(err)))
+ printText(out, prefix, isLast, name, textui.Sprintf("%v err=%v", entry.Type, fmtErr(err)))
return
}
printFile(out, prefix, isLast, name, file)
@@ -194,7 +195,7 @@ func printDirEntry(out io.Writer, prefix string, isLast bool, subvol *btrfs.Subv
}
file, err := subvol.LoadFile(entry.Location.ObjectID)
if err != nil {
- printText(out, prefix, isLast, name, fmt.Sprintf("%v err=%v", entry.Type, fmtErr(err)))
+ printText(out, prefix, isLast, name, textui.Sprintf("%v err=%v", entry.Type, fmtErr(err)))
return
}
printSocket(out, prefix, isLast, name, file)
@@ -205,7 +206,7 @@ func printDirEntry(out io.Writer, prefix string, isLast bool, subvol *btrfs.Subv
}
file, err := subvol.LoadFile(entry.Location.ObjectID)
if err != nil {
- printText(out, prefix, isLast, name, fmt.Sprintf("%v err=%v", entry.Type, fmtErr(err)))
+ printText(out, prefix, isLast, name, textui.Sprintf("%v err=%v", entry.Type, fmtErr(err)))
return
}
printPipe(out, prefix, isLast, name, file)
@@ -224,7 +225,7 @@ func printSymlink(out io.Writer, prefix string, isLast bool, name string, file *
file.Errs = append(file.Errs, err)
}
}
- printText(out, prefix, isLast, name, fmt.Sprintf(
+ printText(out, prefix, isLast, name, textui.Sprintf(
"-> %q : %s",
tgt,
fmtInode(file.BareInode)))
diff --git a/cmd/btrfs-rec/inspect_lstrees.go b/cmd/btrfs-rec/inspect_lstrees.go
index 54c2074..e92c544 100644
--- a/cmd/btrfs-rec/inspect_lstrees.go
+++ b/cmd/btrfs-rec/inspect_lstrees.go
@@ -5,8 +5,6 @@
package main
import (
- "encoding/json"
- "fmt"
"os"
"strconv"
"text/tabwriter"
@@ -25,6 +23,7 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/diskio"
"git.lukeshu.com/btrfs-progs-ng/lib/maps"
"git.lukeshu.com/btrfs-progs-ng/lib/slices"
+ "git.lukeshu.com/btrfs-progs-ng/lib/textui"
)
func init() {
@@ -36,15 +35,14 @@ func init() {
Args: cliutil.WrapPositionalArgs(cobra.NoArgs),
},
RunE: func(fs *btrfs.FS, cmd *cobra.Command, _ []string) error {
- var scanResults map[btrfsvol.DeviceID]btrfsinspect.ScanOneDeviceResult
+ ctx := cmd.Context()
+ var scanResults btrfsinspect.ScanDevicesResult
if scandevicesFilename != "" {
- scanResultsBytes, err := os.ReadFile(scandevicesFilename)
+ var err error
+ scanResults, err = readJSONFile[btrfsinspect.ScanDevicesResult](ctx, scandevicesFilename)
if err != nil {
return err
}
- if err := json.Unmarshal(scanResultsBytes, &scanResults); err != nil {
- return err
- }
}
var treeErrCnt int
@@ -57,19 +55,19 @@ func init() {
numWidth := len(strconv.Itoa(slices.Max(treeErrCnt, totalItems)))
table := tabwriter.NewWriter(os.Stdout, 0, 8, 2, ' ', 0)
- fmt.Fprintf(table, " errors\t% *s\n", numWidth, strconv.Itoa(treeErrCnt))
+ textui.Fprintf(table, " errors\t% *s\n", numWidth, strconv.Itoa(treeErrCnt))
for _, typ := range maps.SortedKeys(treeItemCnt) {
- fmt.Fprintf(table, " %v items\t% *s\n", typ, numWidth, strconv.Itoa(treeItemCnt[typ]))
+ textui.Fprintf(table, " %v items\t% *s\n", typ, numWidth, strconv.Itoa(treeItemCnt[typ]))
}
- fmt.Fprintf(table, " total items\t% *s\n", numWidth, strconv.Itoa(totalItems))
+ textui.Fprintf(table, " total items\t% *s\n", numWidth, strconv.Itoa(totalItems))
table.Flush()
}
visitedNodes := make(containers.Set[btrfsvol.LogicalAddr])
- btrfsutil.WalkAllTrees(cmd.Context(), fs, btrfsutil.WalkAllTreesHandler{
+ btrfsutil.WalkAllTrees(ctx, fs, btrfsutil.WalkAllTreesHandler{
PreTree: func(name string, treeID btrfsprim.ObjID) {
treeErrCnt = 0
treeItemCnt = make(map[btrfsitem.Type]int)
- fmt.Printf("tree id=%v name=%q\n", treeID, name)
+ textui.Fprintf(os.Stdout, "tree id=%v name=%q\n", treeID, name)
},
Err: func(_ *btrfsutil.WalkError) {
treeErrCnt++
@@ -98,7 +96,7 @@ func init() {
if scandevicesFilename != "" {
treeErrCnt = 0
treeItemCnt = make(map[btrfsitem.Type]int)
- fmt.Printf("lost+found\n")
+ textui.Fprintf(os.Stdout, "lost+found\n")
sb, _ := fs.Superblock()
for _, devResults := range scanResults {
for laddr := range devResults.FoundNodes {
diff --git a/cmd/btrfs-rec/inspect_rebuildmappings.go b/cmd/btrfs-rec/inspect_rebuildmappings.go
index 54535ec..da7d12e 100644
--- a/cmd/btrfs-rec/inspect_rebuildmappings.go
+++ b/cmd/btrfs-rec/inspect_rebuildmappings.go
@@ -15,6 +15,7 @@ import (
"github.com/spf13/cobra"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildmappings"
)
@@ -37,7 +38,7 @@ func init() {
ctx := cmd.Context()
dlog.Infof(ctx, "Reading %q...", args[0])
- scanResults, err := readScanResults(args[0])
+ scanResults, err := readJSONFile[btrfsinspect.ScanDevicesResult](ctx, args[0])
if err != nil {
return err
}
diff --git a/cmd/btrfs-rec/inspect_rebuildnodes.go b/cmd/btrfs-rec/inspect_rebuildnodes.go
index 5f6d9b5..0f3d60e 100644
--- a/cmd/btrfs-rec/inspect_rebuildnodes.go
+++ b/cmd/btrfs-rec/inspect_rebuildnodes.go
@@ -17,6 +17,7 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes"
"git.lukeshu.com/btrfs-progs-ng/lib/containers"
)
@@ -31,7 +32,7 @@ func init() {
ctx := cmd.Context()
dlog.Infof(ctx, "Reading %q...", args[0])
- nodeScanResults, err := readScanResults(args[0])
+ nodeScanResults, err := readJSONFile[btrfsinspect.ScanDevicesResult](ctx, args[0])
if err != nil {
return err
}
diff --git a/cmd/btrfs-rec/inspect_scandevices.go b/cmd/btrfs-rec/inspect_scandevices.go
index 5c8b2b0..7235e45 100644
--- a/cmd/btrfs-rec/inspect_scandevices.go
+++ b/cmd/btrfs-rec/inspect_scandevices.go
@@ -58,17 +58,3 @@ func writeScanResults(w io.Writer, results btrfsinspect.ScanDevicesResult) (err
CompactIfUnder: 16,
}, results)
}
-
-func readScanResults(filename string) (btrfsinspect.ScanDevicesResult, error) {
- fh, err := os.Open(filename)
- if err != nil {
- return nil, err
- }
- var scanResults btrfsinspect.ScanDevicesResult
- buf := bufio.NewReader(fh)
- if err := lowmemjson.DecodeThenEOF(buf, &scanResults); err != nil {
- return nil, err
- }
- _ = fh.Close()
- return scanResults, nil
-}
diff --git a/cmd/btrfs-rec/inspect_spewitems.go b/cmd/btrfs-rec/inspect_spewitems.go
index a4e8d69..8d71797 100644
--- a/cmd/btrfs-rec/inspect_spewitems.go
+++ b/cmd/btrfs-rec/inspect_spewitems.go
@@ -5,7 +5,6 @@
package main
import (
- "fmt"
"os"
"github.com/datawire/dlib/dlog"
@@ -16,6 +15,7 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsutil"
+ "git.lukeshu.com/btrfs-progs-ng/lib/textui"
)
func init() {
@@ -37,13 +37,13 @@ func init() {
},
TreeWalkHandler: btrfstree.TreeWalkHandler{
Item: func(path btrfstree.TreePath, item btrfstree.Item) error {
- fmt.Printf("%s = ", path)
+ textui.Fprintf(os.Stdout, "%s = ", path)
spew.Dump(item)
os.Stdout.WriteString("\n")
return nil
},
BadItem: func(path btrfstree.TreePath, item btrfstree.Item) error {
- fmt.Printf("%s = ", path)
+ textui.Fprintf(os.Stdout, "%s = ", path)
spew.Dump(item)
os.Stdout.WriteString("\n")
return nil
diff --git a/cmd/btrfs-rec/main.go b/cmd/btrfs-rec/main.go
index c808237..13ae886 100644
--- a/cmd/btrfs-rec/main.go
+++ b/cmd/btrfs-rec/main.go
@@ -6,35 +6,19 @@ package main
import (
"context"
- "encoding/json"
- "fmt"
"os"
"github.com/datawire/dlib/dgroup"
"github.com/datawire/dlib/dlog"
"github.com/datawire/ocibuild/pkg/cliutil"
- "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
- "github.com/spf13/pflag"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsutil"
+ "git.lukeshu.com/btrfs-progs-ng/lib/textui"
)
-type logLevelFlag struct {
- logrus.Level
-}
-
-func (lvl *logLevelFlag) Type() string { return "loglevel" }
-func (lvl *logLevelFlag) Set(str string) error {
- var err error
- lvl.Level, err = logrus.ParseLevel(str)
- return err
-}
-
-var _ pflag.Value = (*logLevelFlag)(nil)
-
type subcommand struct {
cobra.Command
RunE func(*btrfs.FS, *cobra.Command, []string) error
@@ -43,8 +27,8 @@ type subcommand struct {
var inspectors, repairers []subcommand
func main() {
- logLevelFlag := logLevelFlag{
- Level: logrus.InfoLevel,
+ logLevelFlag := textui.LogLevelFlag{
+ Level: dlog.LogLevelInfo,
}
var pvsFlag []string
var mappingsFlag string
@@ -115,9 +99,10 @@ func main() {
runE := child.RunE
cmd.RunE = func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
- logger := logrus.New()
- logger.SetLevel(logLevelFlag.Level)
- ctx = dlog.WithLogger(ctx, dlog.WrapLogrus(logger))
+ logger := textui.NewLogger(os.Stderr, logLevelFlag.Level)
+ ctx = dlog.WithLogger(ctx, logger)
+ ctx = dlog.WithField(ctx, "mem", new(textui.LiveMemUse))
+ dlog.SetFallbackLogger(logger.WithField("btrfs-progs.THIS_IS_A_BUG", true))
grp := dgroup.NewGroup(ctx, dgroup.GroupConfig{
EnableSignalHandling: true,
@@ -137,14 +122,10 @@ func main() {
}()
if mappingsFlag != "" {
- bs, err := os.ReadFile(mappingsFlag)
+ mappingsJSON, err := readJSONFile[[]btrfsvol.Mapping](ctx, mappingsFlag)
if err != nil {
return err
}
- var mappingsJSON []btrfsvol.Mapping
- if err := json.Unmarshal(bs, &mappingsJSON); err != nil {
- return err
- }
for _, mapping := range mappingsJSON {
if err := fs.LV.AddMapping(mapping); err != nil {
return err
@@ -162,7 +143,7 @@ func main() {
}
if err := argparser.ExecuteContext(context.Background()); err != nil {
- fmt.Fprintf(os.Stderr, "%v: error: %v\n", argparser.CommandPath(), err)
+ textui.Fprintf(os.Stderr, "%v: error: %v\n", argparser.CommandPath(), err)
os.Exit(1)
}
}
diff --git a/cmd/btrfs-rec/util.go b/cmd/btrfs-rec/util.go
new file mode 100644
index 0000000..adfe97e
--- /dev/null
+++ b/cmd/btrfs-rec/util.go
@@ -0,0 +1,83 @@
+// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+//
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+package main
+
+import (
+ "bufio"
+ "context"
+ "io"
+ "os"
+ "time"
+
+ "git.lukeshu.com/go/lowmemjson"
+ "github.com/datawire/dlib/dlog"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/textui"
+)
+
+type runeScanner struct {
+ progress textui.Portion[int64]
+ progressWriter *textui.Progress[textui.Portion[int64]]
+ unreadCnt uint64
+ reader *bufio.Reader
+ closer io.Closer
+}
+
+func newRuneScanner(ctx context.Context, fh *os.File) (*runeScanner, error) {
+ fi, err := fh.Stat()
+ if err != nil {
+ return nil, err
+ }
+ ret := &runeScanner{
+ progress: textui.Portion[int64]{
+ D: fi.Size(),
+ },
+ progressWriter: textui.NewProgress[textui.Portion[int64]](ctx, dlog.LogLevelInfo, 1*time.Second),
+ reader: bufio.NewReader(fh),
+ closer: fh,
+ }
+ return ret, nil
+}
+
+func (rs *runeScanner) ReadRune() (r rune, size int, err error) {
+ r, size, err = rs.reader.ReadRune()
+ if rs.unreadCnt > 0 {
+ rs.unreadCnt--
+ } else {
+ rs.progress.N += int64(size)
+ rs.progressWriter.Set(rs.progress)
+ }
+ return
+}
+
+func (rs *runeScanner) UnreadRune() error {
+ rs.unreadCnt++
+ return rs.reader.UnreadRune()
+}
+
+func (rs *runeScanner) Close() error {
+ rs.progressWriter.Done()
+ return rs.closer.Close()
+}
+
+func readJSONFile[T any](ctx context.Context, filename string) (T, error) {
+ fh, err := os.Open(filename)
+ if err != nil {
+ var zero T
+ return zero, err
+ }
+ buf, err := newRuneScanner(dlog.WithField(ctx, "btrfs.read-json-file", filename), fh)
+ if err != nil {
+ var zero T
+ return zero, err
+ }
+ var ret T
+ if err := lowmemjson.DecodeThenEOF(buf, &ret); err != nil {
+ var zero T
+ return zero, err
+ }
+ _ = buf.Close()
+ return ret, nil
+}
diff --git a/lib/btrfs/btrfsitem/item_dir.go b/lib/btrfs/btrfsitem/item_dir.go
index 836c477..b3e33f4 100644
--- a/lib/btrfs/btrfsitem/item_dir.go
+++ b/lib/btrfs/btrfsitem/item_dir.go
@@ -30,7 +30,7 @@ type DirEntry struct { // DIR_ITEM=84 DIR_INDEX=96 XATTR_ITEM=24
NameLen uint16 `bin:"off=0x1b, siz=2"` // [ignored-when-writing]
Type FileType `bin:"off=0x1d, siz=1"`
binstruct.End `bin:"off=0x1e"`
- Data []byte `bin:"-"`
+ Data []byte `bin:"-"` // xattr value (only for XATTR_ITEM)
Name []byte `bin:"-"`
}
diff --git a/lib/btrfs/btrfssum/csum.go b/lib/btrfs/btrfssum/csum.go
index c7c1f37..770f6ea 100644
--- a/lib/btrfs/btrfssum/csum.go
+++ b/lib/btrfs/btrfssum/csum.go
@@ -91,11 +91,11 @@ func (typ CSumType) Sum(data []byte) (CSum, error) {
binary.LittleEndian.PutUint32(ret[:], crc)
return ret, nil
case TYPE_XXHASH:
- panic("not implemented")
+ panic("TODO: XXHASH not yet implemented")
case TYPE_SHA256:
- panic("not implemented")
+ panic("TODO: SHA256 not yet implemented")
case TYPE_BLAKE2:
- panic("not implemented")
+ panic("TODO: BLAKE2 not yet implemented")
default:
return CSum{}, fmt.Errorf("unknown checksum type: %v", typ)
}
diff --git a/lib/btrfsprogs/btrfsinspect/print_addrspace.go b/lib/btrfsprogs/btrfsinspect/print_addrspace.go
index 7436d5b..a8b992e 100644
--- a/lib/btrfsprogs/btrfsinspect/print_addrspace.go
+++ b/lib/btrfsprogs/btrfsinspect/print_addrspace.go
@@ -5,14 +5,12 @@
package btrfsinspect
import (
- "fmt"
"io"
"sort"
- "golang.org/x/text/message"
-
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/textui"
)
func PrintLogicalSpace(out io.Writer, fs *btrfs.FS) {
@@ -22,28 +20,27 @@ func PrintLogicalSpace(out io.Writer, fs *btrfs.FS) {
for _, mapping := range mappings {
if mapping.LAddr > prevEnd {
size := mapping.LAddr.Sub(prevEnd)
- fmt.Fprintf(out, "logical_hole laddr=%v size=%v\n", prevEnd, size)
+ textui.Fprintf(out, "logical_hole laddr=%v size=%v\n", prevEnd, size)
sumHole += size
}
if mapping.LAddr != prevBeg {
if !mapping.Flags.OK {
- fmt.Fprintf(out, "chunk laddr=%v size=%v flags=(missing)\n",
+ textui.Fprintf(out, "chunk laddr=%v size=%v flags=(missing)\n",
mapping.LAddr, mapping.Size)
} else {
- fmt.Fprintf(out, "chunk laddr=%v size=%v flags=%v\n",
+ textui.Fprintf(out, "chunk laddr=%v size=%v flags=%v\n",
mapping.LAddr, mapping.Size, mapping.Flags.Val)
}
}
- fmt.Fprintf(out, "\tstripe dev_id=%v paddr=%v\n",
+ textui.Fprintf(out, "\tstripe dev_id=%v paddr=%v\n",
mapping.PAddr.Dev, mapping.PAddr.Addr)
sumChunk += mapping.Size
prevBeg = mapping.LAddr
prevEnd = mapping.LAddr.Add(mapping.Size)
}
- p := message.NewPrinter(message.MatchLanguage("en"))
- p.Fprintf(out, "total logical holes = %v (%d)\n", sumHole, int64(sumHole))
- p.Fprintf(out, "total logical chunks = %v (%d)\n", sumChunk, int64(sumChunk))
- p.Fprintf(out, "total logical addr space = %v (%d)\n", prevEnd, int64(prevEnd))
+ textui.Fprintf(out, "total logical holes = %v (%d)\n", sumHole, int64(sumHole))
+ textui.Fprintf(out, "total logical chunks = %v (%d)\n", sumChunk, int64(sumChunk))
+ textui.Fprintf(out, "total logical addr space = %v (%d)\n", prevEnd, int64(prevEnd))
}
func PrintPhysicalSpace(out io.Writer, fs *btrfs.FS) {
@@ -62,16 +59,15 @@ func PrintPhysicalSpace(out io.Writer, fs *btrfs.FS) {
}
if mapping.PAddr.Addr > prevEnd {
size := mapping.PAddr.Addr.Sub(prevEnd)
- fmt.Fprintf(out, "physical_hole paddr=%v size=%v\n", prevEnd, size)
+ textui.Fprintf(out, "physical_hole paddr=%v size=%v\n", prevEnd, size)
sumHole += size
}
- fmt.Fprintf(out, "devext dev=%v paddr=%v size=%v laddr=%v\n",
+ textui.Fprintf(out, "devext dev=%v paddr=%v size=%v laddr=%v\n",
mapping.PAddr.Dev, mapping.PAddr.Addr, mapping.Size, mapping.LAddr)
sumExt += mapping.Size
prevEnd = mapping.PAddr.Addr.Add(mapping.Size)
}
- p := message.NewPrinter(message.MatchLanguage("en"))
- p.Fprintf(out, "total physical holes = %v (%d)\n", sumHole, int64(sumHole))
- p.Fprintf(out, "total physical extents = %v (%d)\n", sumExt, int64(sumExt))
- p.Fprintf(out, "total physical addr space = %v (%d)\n", prevEnd, int64(prevEnd))
+ textui.Fprintf(out, "total physical holes = %v (%d)\n", sumHole, int64(sumHole))
+ textui.Fprintf(out, "total physical extents = %v (%d)\n", sumExt, int64(sumExt))
+ textui.Fprintf(out, "total physical addr space = %v (%d)\n", prevEnd, int64(prevEnd))
}
diff --git a/lib/btrfsprogs/btrfsinspect/print_tree.go b/lib/btrfsprogs/btrfsinspect/print_tree.go
index 9e5a5d1..6c31350 100644
--- a/lib/btrfsprogs/btrfsinspect/print_tree.go
+++ b/lib/btrfsprogs/btrfsinspect/print_tree.go
@@ -6,7 +6,6 @@ package btrfsinspect
import (
"context"
- "fmt"
"io"
"math"
"strings"
@@ -22,6 +21,7 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
"git.lukeshu.com/btrfs-progs-ng/lib/diskio"
"git.lukeshu.com/btrfs-progs-ng/lib/slices"
+ "git.lukeshu.com/btrfs-progs-ng/lib/textui"
)
func DumpTrees(ctx context.Context, out io.Writer, fs *btrfs.FS) {
@@ -32,19 +32,19 @@ func DumpTrees(ctx context.Context, out io.Writer, fs *btrfs.FS) {
}
if superblock.RootTree != 0 {
- fmt.Fprintf(out, "root tree\n")
+ textui.Fprintf(out, "root tree\n")
printTree(ctx, out, fs, btrfsprim.ROOT_TREE_OBJECTID)
}
if superblock.ChunkTree != 0 {
- fmt.Fprintf(out, "chunk tree\n")
+ textui.Fprintf(out, "chunk tree\n")
printTree(ctx, out, fs, btrfsprim.CHUNK_TREE_OBJECTID)
}
if superblock.LogTree != 0 {
- fmt.Fprintf(out, "log root tree\n")
+ textui.Fprintf(out, "log root tree\n")
printTree(ctx, out, fs, btrfsprim.TREE_LOG_OBJECTID)
}
if superblock.BlockGroupRoot != 0 {
- fmt.Fprintf(out, "block group tree\n")
+ textui.Fprintf(out, "block group tree\n")
printTree(ctx, out, fs, btrfsprim.BLOCK_GROUP_TREE_OBJECTID)
}
fs.TreeWalk(
@@ -81,15 +81,15 @@ func DumpTrees(ctx context.Context, out io.Writer, fs *btrfs.FS) {
if !ok {
treeName = "file"
}
- fmt.Fprintf(out, "%v tree %v \n", treeName, fmtKey(item.Key))
+ textui.Fprintf(out, "%v tree %v \n", treeName, fmtKey(item.Key))
printTree(ctx, out, fs, item.Key.ObjectID)
return nil
},
},
)
- fmt.Fprintf(out, "total bytes %v\n", superblock.TotalBytes)
- fmt.Fprintf(out, "bytes used %v\n", superblock.BytesUsed)
- fmt.Fprintf(out, "uuid %v\n", superblock.FSUUID)
+ textui.Fprintf(out, "total bytes %v\n", superblock.TotalBytes)
+ textui.Fprintf(out, "bytes used %v\n", superblock.BytesUsed)
+ textui.Fprintf(out, "uuid %v\n", superblock.FSUUID)
}
// printTree mimics btrfs-progs
@@ -104,7 +104,7 @@ func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfspri
return nil
},
PreKeyPointer: func(_ btrfstree.TreePath, item btrfstree.KeyPointer) error {
- fmt.Fprintf(out, "\t%v block %v gen %v\n",
+ textui.Fprintf(out, "\t%v block %v gen %v\n",
fmtKey(item.Key),
item.BlockPtr,
item.Generation)
@@ -115,65 +115,65 @@ func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfspri
bs, _ := binstruct.Marshal(item.Body)
itemSize := uint32(len(bs))
itemOffset -= itemSize
- fmt.Fprintf(out, "\titem %v %v itemoff %v itemsize %v\n",
+ textui.Fprintf(out, "\titem %v %v itemoff %v itemsize %v\n",
i,
fmtKey(item.Key),
itemOffset,
itemSize)
switch body := item.Body.(type) {
case btrfsitem.FreeSpaceHeader:
- fmt.Fprintf(out, "\t\tlocation %v\n", fmtKey(body.Location))
- fmt.Fprintf(out, "\t\tcache generation %v entries %v bitmaps %v\n",
+ textui.Fprintf(out, "\t\tlocation %v\n", fmtKey(body.Location))
+ textui.Fprintf(out, "\t\tcache generation %v entries %v bitmaps %v\n",
body.Generation, body.NumEntries, body.NumBitmaps)
case btrfsitem.Inode:
- fmt.Fprintf(out, ""+
+ textui.Fprintf(out, ""+
"\t\tgeneration %v transid %v size %v nbytes %v\n"+
"\t\tblock group %v mode %o links %v uid %v gid %v rdev %v\n"+
"\t\tsequence %v flags %v\n",
body.Generation, body.TransID, body.Size, body.NumBytes,
body.BlockGroup, body.Mode, body.NLink, body.UID, body.GID, body.RDev,
body.Sequence, body.Flags)
- fmt.Fprintf(out, "\t\tatime %v\n", fmtTime(body.ATime))
- fmt.Fprintf(out, "\t\tctime %v\n", fmtTime(body.CTime))
- fmt.Fprintf(out, "\t\tmtime %v\n", fmtTime(body.MTime))
- fmt.Fprintf(out, "\t\totime %v\n", fmtTime(body.OTime))
+ textui.Fprintf(out, "\t\tatime %v\n", fmtTime(body.ATime))
+ textui.Fprintf(out, "\t\tctime %v\n", fmtTime(body.CTime))
+ textui.Fprintf(out, "\t\tmtime %v\n", fmtTime(body.MTime))
+ textui.Fprintf(out, "\t\totime %v\n", fmtTime(body.OTime))
case btrfsitem.InodeRefs:
for _, ref := range body {
- fmt.Fprintf(out, "\t\tindex %v namelen %v name: %s\n",
+ textui.Fprintf(out, "\t\tindex %v namelen %v name: %s\n",
ref.Index, ref.NameLen, ref.Name)
}
//case btrfsitem.INODE_EXTREF_KEY:
// // TODO
case btrfsitem.DirEntry:
- fmt.Fprintf(out, "\t\tlocation %v type %v\n",
+ textui.Fprintf(out, "\t\tlocation %v type %v\n",
fmtKey(body.Location), body.Type)
- fmt.Fprintf(out, "\t\ttransid %v data_len %v name_len %v\n",
+ textui.Fprintf(out, "\t\ttransid %v data_len %v name_len %v\n",
body.TransID, body.DataLen, body.NameLen)
- fmt.Fprintf(out, "\t\tname: %s\n", body.Name)
+ textui.Fprintf(out, "\t\tname: %s\n", body.Name)
if len(body.Data) > 0 {
- fmt.Fprintf(out, "\t\tdata %v\n", body.Data)
+ textui.Fprintf(out, "\t\tdata %v\n", body.Data)
}
//case btrfsitem.DIR_LOG_INDEX_KEY, btrfsitem.DIR_LOG_ITEM_KEY:
// // TODO
case btrfsitem.Root:
- fmt.Fprintf(out, "\t\tgeneration %v root_dirid %v bytenr %d byte_limit %v bytes_used %v\n",
+ textui.Fprintf(out, "\t\tgeneration %v root_dirid %v bytenr %d byte_limit %v bytes_used %v\n",
body.Generation, body.RootDirID, body.ByteNr, body.ByteLimit, body.BytesUsed)
- fmt.Fprintf(out, "\t\tlast_snapshot %v flags %v refs %v\n",
+ textui.Fprintf(out, "\t\tlast_snapshot %v flags %v refs %v\n",
body.LastSnapshot, body.Flags, body.Refs)
- fmt.Fprintf(out, "\t\tdrop_progress %v drop_level %v\n",
+ textui.Fprintf(out, "\t\tdrop_progress %v drop_level %v\n",
fmtKey(body.DropProgress), body.DropLevel)
- fmt.Fprintf(out, "\t\tlevel %v generation_v2 %v\n",
+ textui.Fprintf(out, "\t\tlevel %v generation_v2 %v\n",
body.Level, body.GenerationV2)
if body.Generation == body.GenerationV2 {
- fmt.Fprintf(out, "\t\tuuid %v\n", body.UUID)
- fmt.Fprintf(out, "\t\tparent_uuid %v\n", body.ParentUUID)
- fmt.Fprintf(out, "\t\treceived_uuid %v\n", body.ReceivedUUID)
- fmt.Fprintf(out, "\t\tctransid %v otransid %v stransid %v rtransid %v\n",
+ textui.Fprintf(out, "\t\tuuid %v\n", body.UUID)
+ textui.Fprintf(out, "\t\tparent_uuid %v\n", body.ParentUUID)
+ textui.Fprintf(out, "\t\treceived_uuid %v\n", body.ReceivedUUID)
+ textui.Fprintf(out, "\t\tctransid %v otransid %v stransid %v rtransid %v\n",
body.CTransID, body.OTransID, body.STransID, body.RTransID)
- fmt.Fprintf(out, "\t\tctime %v\n", fmtTime(body.CTime))
- fmt.Fprintf(out, "\t\totime %v\n", fmtTime(body.OTime))
- fmt.Fprintf(out, "\t\tstime %v\n", fmtTime(body.STime))
- fmt.Fprintf(out, "\t\trtime %v\n", fmtTime(body.RTime))
+ textui.Fprintf(out, "\t\tctime %v\n", fmtTime(body.CTime))
+ textui.Fprintf(out, "\t\totime %v\n", fmtTime(body.OTime))
+ textui.Fprintf(out, "\t\tstime %v\n", fmtTime(body.STime))
+ textui.Fprintf(out, "\t\trtime %v\n", fmtTime(body.RTime))
}
case btrfsitem.RootRef:
var tag string
@@ -183,22 +183,22 @@ func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfspri
case btrfsitem.ROOT_BACKREF_KEY:
tag = "backref"
default:
- tag = fmt.Sprintf("(error: unhandled RootRef item type: %v)", item.Key.ItemType)
+ tag = textui.Sprintf("(error: unhandled RootRef item type: %v)", item.Key.ItemType)
}
- fmt.Fprintf(out, "\t\troot %v key dirid %v sequence %v name %s\n",
+ textui.Fprintf(out, "\t\troot %v key dirid %v sequence %v name %s\n",
tag, body.DirID, body.Sequence, body.Name)
case btrfsitem.Extent:
- fmt.Fprintf(out, "\t\trefs %v gen %v flags %v\n",
+ textui.Fprintf(out, "\t\trefs %v gen %v flags %v\n",
body.Head.Refs, body.Head.Generation, body.Head.Flags)
if body.Head.Flags.Has(btrfsitem.EXTENT_FLAG_TREE_BLOCK) {
- fmt.Fprintf(out, "\t\ttree block %v level %v\n",
+ textui.Fprintf(out, "\t\ttree block %v level %v\n",
fmtKey(body.Info.Key), body.Info.Level)
}
printExtentInlineRefs(out, body.Refs)
case btrfsitem.Metadata:
- fmt.Fprintf(out, "\t\trefs %v gen %v flags %v\n",
+ textui.Fprintf(out, "\t\trefs %v gen %v flags %v\n",
body.Head.Refs, body.Head.Generation, body.Head.Flags)
- fmt.Fprintf(out, "\t\ttree block skinny level %v\n", item.Key.Offset)
+ textui.Fprintf(out, "\t\ttree block skinny level %v\n", item.Key.Offset)
printExtentInlineRefs(out, body.Refs)
//case btrfsitem.EXTENT_DATA_REF_KEY:
// // TODO
@@ -206,72 +206,72 @@ func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfspri
// // TODO
case btrfsitem.ExtentCSum:
start := btrfsvol.LogicalAddr(item.Key.Offset)
- fmt.Fprintf(out, "\t\trange start %d end %d length %d",
+ textui.Fprintf(out, "\t\trange start %d end %d length %d",
start, start.Add(body.Size()), body.Size())
sumsPerLine := slices.Max(1, len(btrfssum.CSum{})/body.ChecksumSize/2)
i := 0
_ = body.Walk(ctx, func(pos btrfsvol.LogicalAddr, sum btrfssum.ShortSum) error {
if i%sumsPerLine == 0 {
- fmt.Fprintf(out, "\n\t\t")
+ textui.Fprintf(out, "\n\t\t")
} else {
- fmt.Fprintf(out, " ")
+ textui.Fprintf(out, " ")
}
- fmt.Fprintf(out, "[%d] 0x%x", pos, sum)
+ textui.Fprintf(out, "[%d] 0x%x", pos, sum)
i++
return nil
})
- fmt.Fprintf(out, "\n")
+ textui.Fprintf(out, "\n")
case btrfsitem.FileExtent:
- fmt.Fprintf(out, "\t\tgeneration %v type %v\n",
+ textui.Fprintf(out, "\t\tgeneration %v type %v\n",
body.Generation, body.Type)
switch body.Type {
case btrfsitem.FILE_EXTENT_INLINE:
- fmt.Fprintf(out, "\t\tinline extent data size %v ram_bytes %v compression %v\n",
+ textui.Fprintf(out, "\t\tinline extent data size %v ram_bytes %v compression %v\n",
len(body.BodyInline), body.RAMBytes, body.Compression)
case btrfsitem.FILE_EXTENT_PREALLOC:
- fmt.Fprintf(out, "\t\tprealloc data disk byte %v nr %v\n",
+ textui.Fprintf(out, "\t\tprealloc data disk byte %v nr %v\n",
body.BodyExtent.DiskByteNr,
body.BodyExtent.DiskNumBytes)
- fmt.Fprintf(out, "\t\tprealloc data offset %v nr %v\n",
+ textui.Fprintf(out, "\t\tprealloc data offset %v nr %v\n",
body.BodyExtent.Offset,
body.BodyExtent.NumBytes)
case btrfsitem.FILE_EXTENT_REG:
- fmt.Fprintf(out, "\t\textent data disk byte %d nr %d\n",
+ textui.Fprintf(out, "\t\textent data disk byte %d nr %d\n",
body.BodyExtent.DiskByteNr,
body.BodyExtent.DiskNumBytes)
- fmt.Fprintf(out, "\t\textent data offset %d nr %d ram %v\n",
+ textui.Fprintf(out, "\t\textent data offset %d nr %d ram %v\n",
body.BodyExtent.Offset,
body.BodyExtent.NumBytes,
body.RAMBytes)
- fmt.Fprintf(out, "\t\textent compression %v\n",
+ textui.Fprintf(out, "\t\textent compression %v\n",
body.Compression)
default:
- fmt.Fprintf(out, "\t\t(error) unknown file extent type %v", body.Type)
+ textui.Fprintf(out, "\t\t(error) unknown file extent type %v", body.Type)
}
case btrfsitem.BlockGroup:
- fmt.Fprintf(out, "\t\tblock group used %v chunk_objectid %v flags %v\n",
+ textui.Fprintf(out, "\t\tblock group used %v chunk_objectid %v flags %v\n",
body.Used, body.ChunkObjectID, body.Flags)
case btrfsitem.FreeSpaceInfo:
- fmt.Fprintf(out, "\t\tfree space info extent count %v flags %v\n",
+ textui.Fprintf(out, "\t\tfree space info extent count %v flags %v\n",
body.ExtentCount, body.Flags)
case btrfsitem.FreeSpaceBitmap:
- fmt.Fprintf(out, "\t\tfree space bitmap\n")
+ textui.Fprintf(out, "\t\tfree space bitmap\n")
case btrfsitem.Chunk:
- fmt.Fprintf(out, "\t\tlength %d owner %d stripe_len %v type %v\n",
+ textui.Fprintf(out, "\t\tlength %d owner %d stripe_len %v type %v\n",
body.Head.Size, body.Head.Owner, body.Head.StripeLen, body.Head.Type)
- fmt.Fprintf(out, "\t\tio_align %v io_width %v sector_size %v\n",
+ textui.Fprintf(out, "\t\tio_align %v io_width %v sector_size %v\n",
body.Head.IOOptimalAlign, body.Head.IOOptimalWidth, body.Head.IOMinSize)
- fmt.Fprintf(out, "\t\tnum_stripes %v sub_stripes %v\n",
+ textui.Fprintf(out, "\t\tnum_stripes %v sub_stripes %v\n",
body.Head.NumStripes, body.Head.SubStripes)
for i, stripe := range body.Stripes {
- fmt.Fprintf(out, "\t\t\tstripe %v devid %d offset %d\n",
+ textui.Fprintf(out, "\t\t\tstripe %v devid %d offset %d\n",
i, stripe.DeviceID, stripe.Offset)
- fmt.Fprintf(out, "\t\t\tdev_uuid %v\n",
+ textui.Fprintf(out, "\t\t\tdev_uuid %v\n",
stripe.DeviceUUID)
}
case btrfsitem.Dev:
- fmt.Fprintf(out, ""+
+ textui.Fprintf(out, ""+
"\t\tdevid %d total_bytes %v bytes_used %v\n"+
"\t\tio_align %v io_width %v sector_size %v type %v\n"+
"\t\tgeneration %v start_offset %v dev_group %v\n"+
@@ -285,7 +285,7 @@ func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfspri
body.DevUUID,
body.FSUUID)
case btrfsitem.DevExtent:
- fmt.Fprintf(out, ""+
+ textui.Fprintf(out, ""+
"\t\tdev extent chunk_tree %v\n"+
"\t\tchunk_objectid %v chunk_offset %d length %d\n"+
"\t\tchunk_tree_uuid %v\n",
@@ -298,49 +298,49 @@ func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfspri
//case btrfsitem.QGROUP_LIMIT_KEY:
// // TODO
case btrfsitem.UUIDMap:
- fmt.Fprintf(out, "\t\tsubvol_id %d\n", body.ObjID)
+ textui.Fprintf(out, "\t\tsubvol_id %d\n", body.ObjID)
//case btrfsitem.STRING_ITEM_KEY:
// // TODO
case btrfsitem.DevStats:
- fmt.Fprintf(out, "\t\tpersistent item objectid %v offset %v\n",
+ textui.Fprintf(out, "\t\tpersistent item objectid %v offset %v\n",
item.Key.ObjectID.Format(item.Key.ItemType), item.Key.Offset)
switch item.Key.ObjectID {
case btrfsprim.DEV_STATS_OBJECTID:
- fmt.Fprintf(out, "\t\tdevice stats\n")
- fmt.Fprintf(out, "\t\twrite_errs %v read_errs %v flush_errs %v corruption_errs %v generation %v\n",
+ textui.Fprintf(out, "\t\tdevice stats\n")
+ textui.Fprintf(out, "\t\twrite_errs %v read_errs %v flush_errs %v corruption_errs %v generation %v\n",
body.Values[btrfsitem.DEV_STAT_WRITE_ERRS],
body.Values[btrfsitem.DEV_STAT_READ_ERRS],
body.Values[btrfsitem.DEV_STAT_FLUSH_ERRS],
body.Values[btrfsitem.DEV_STAT_CORRUPTION_ERRS],
body.Values[btrfsitem.DEV_STAT_GENERATION_ERRS])
default:
- fmt.Fprintf(out, "\t\tunknown persistent item objectid %v\n", item.Key.ObjectID)
+ textui.Fprintf(out, "\t\tunknown persistent item objectid %v\n", item.Key.ObjectID)
}
//case btrfsitem.TEMPORARY_ITEM_KEY:
// // TODO
case btrfsitem.Empty:
switch item.Key.ItemType {
case btrfsitem.ORPHAN_ITEM_KEY: // 48
- fmt.Fprintf(out, "\t\torphan item\n")
+ textui.Fprintf(out, "\t\torphan item\n")
case btrfsitem.TREE_BLOCK_REF_KEY: // 176
- fmt.Fprintf(out, "\t\ttree block backref\n")
+ textui.Fprintf(out, "\t\ttree block backref\n")
case btrfsitem.SHARED_BLOCK_REF_KEY: // 182
- fmt.Fprintf(out, "\t\tshared block backref\n")
+ textui.Fprintf(out, "\t\tshared block backref\n")
case btrfsitem.FREE_SPACE_EXTENT_KEY: // 199
- fmt.Fprintf(out, "\t\tfree space extent\n")
+ textui.Fprintf(out, "\t\tfree space extent\n")
case btrfsitem.QGROUP_RELATION_KEY: // 246
// do nothing
//case btrfsitem.EXTENT_REF_V0_KEY:
- // fmt.Fprintf(out, "\t\textent ref v0 (deprecated)\n")
+ // textui.Fprintf(out, "\t\textent ref v0 (deprecated)\n")
//case btrfsitem.CSUM_ITEM_KEY:
- // fmt.Fprintf(out, "\t\tcsum item\n")
+ // textui.Fprintf(out, "\t\tcsum item\n")
default:
- fmt.Fprintf(out, "\t\t(error) unhandled empty item type: %v\n", item.Key.ItemType)
+ textui.Fprintf(out, "\t\t(error) unhandled empty item type: %v\n", item.Key.ItemType)
}
case btrfsitem.Error:
- fmt.Fprintf(out, "\t\t(error) error item: %v\n", body.Err)
+ textui.Fprintf(out, "\t\t(error) error item: %v\n", body.Err)
default:
- fmt.Fprintf(out, "\t\t(error) unhandled item type: %T\n", body)
+ textui.Fprintf(out, "\t\t(error) unhandled item type: %T\n", body)
}
return nil
},
@@ -361,37 +361,37 @@ func printHeaderInfo(out io.Writer, node btrfstree.Node) {
var typename string
if node.Head.Level > 0 { // internal node
typename = "node"
- fmt.Fprintf(out, "node %v level %v items %v free space %v",
+ textui.Fprintf(out, "node %v level %v items %v free space %v",
node.Head.Addr,
node.Head.Level,
node.Head.NumItems,
node.MaxItems()-node.Head.NumItems)
} else { // leaf node
typename = "leaf"
- fmt.Fprintf(out, "leaf %d items %v free space %v",
+ textui.Fprintf(out, "leaf %d items %v free space %v",
node.Head.Addr,
node.Head.NumItems,
node.LeafFreeSpace())
}
- fmt.Fprintf(out, " generation %v owner %v\n",
+ textui.Fprintf(out, " generation %v owner %v\n",
node.Head.Generation,
node.Head.Owner)
- fmt.Fprintf(out, "%v %d flags %v backref revision %v\n",
+ textui.Fprintf(out, "%v %d flags %v backref revision %v\n",
typename,
node.Head.Addr,
node.Head.Flags,
node.Head.BackrefRev)
- fmt.Fprintf(out, "checksum stored %v\n", node.Head.Checksum.Fmt(node.ChecksumType))
+ textui.Fprintf(out, "checksum stored %v\n", node.Head.Checksum.Fmt(node.ChecksumType))
if calcSum, err := node.CalculateChecksum(); err != nil {
- fmt.Fprintf(out, "checksum calced %v\n", err)
+ textui.Fprintf(out, "checksum calced %v\n", err)
} else {
- fmt.Fprintf(out, "checksum calced %v\n", calcSum.Fmt(node.ChecksumType))
+ textui.Fprintf(out, "checksum calced %v\n", calcSum.Fmt(node.ChecksumType))
}
- fmt.Fprintf(out, "fs uuid %v\n", node.Head.MetadataUUID)
- fmt.Fprintf(out, "chunk uuid %v\n", node.Head.ChunkTreeUUID)
+ textui.Fprintf(out, "fs uuid %v\n", node.Head.MetadataUUID)
+ textui.Fprintf(out, "chunk uuid %v\n", node.Head.ChunkTreeUUID)
}
// printExtentInlineRefs mimics part of btrfs-progs kernel-shared/print-tree.c:print_extent_item()
@@ -401,22 +401,22 @@ func printExtentInlineRefs(out io.Writer, refs []btrfsitem.ExtentInlineRef) {
case nil:
switch ref.Type {
case btrfsitem.TREE_BLOCK_REF_KEY:
- fmt.Fprintf(out, "\t\ttree block backref root %v\n",
+ textui.Fprintf(out, "\t\ttree block backref root %v\n",
btrfsprim.ObjID(ref.Offset))
case btrfsitem.SHARED_BLOCK_REF_KEY:
- fmt.Fprintf(out, "\t\tshared block backref parent %v\n",
+ textui.Fprintf(out, "\t\tshared block backref parent %v\n",
ref.Offset)
default:
- fmt.Fprintf(out, "\t\t(error) unexpected empty sub-item type: %v\n", ref.Type)
+ textui.Fprintf(out, "\t\t(error) unexpected empty sub-item type: %v\n", ref.Type)
}
case btrfsitem.ExtentDataRef:
- fmt.Fprintf(out, "\t\textent data backref root %v objectid %v offset %v count %v\n",
+ textui.Fprintf(out, "\t\textent data backref root %v objectid %v offset %v count %v\n",
subitem.Root, subitem.ObjectID, subitem.Offset, subitem.Count)
case btrfsitem.SharedDataRef:
- fmt.Fprintf(out, "\t\tshared data backref parent %v count %v\n",
+ textui.Fprintf(out, "\t\tshared data backref parent %v count %v\n",
ref.Offset, subitem.Count)
default:
- fmt.Fprintf(out, "\t\t(error) unexpected sub-item type: %T\n", subitem)
+ textui.Fprintf(out, "\t\t(error) unexpected sub-item type: %T\n", subitem)
}
}
}
@@ -424,25 +424,25 @@ func printExtentInlineRefs(out io.Writer, refs []btrfsitem.ExtentInlineRef) {
// mimics print-tree.c:btrfs_print_key()
func fmtKey(key btrfsprim.Key) string {
var out strings.Builder
- fmt.Fprintf(&out, "key (%v %v", key.ObjectID.Format(key.ItemType), key.ItemType)
+ textui.Fprintf(&out, "key (%v %v", key.ObjectID.Format(key.ItemType), key.ItemType)
switch key.ItemType {
case btrfsitem.QGROUP_RELATION_KEY: //TODO, btrfsitem.QGROUP_INFO_KEY, btrfsitem.QGROUP_LIMIT_KEY:
- panic("not implemented")
+ panic("TODO: printing qgroup items not yet implemented")
case btrfsitem.UUID_SUBVOL_KEY, btrfsitem.UUID_RECEIVED_SUBVOL_KEY:
- fmt.Fprintf(&out, " %#08x)", key.Offset)
+ textui.Fprintf(&out, " %#08x)", key.Offset)
case btrfsitem.ROOT_ITEM_KEY:
- fmt.Fprintf(&out, " %v)", btrfsprim.ObjID(key.Offset))
+ textui.Fprintf(&out, " %v)", btrfsprim.ObjID(key.Offset))
default:
if key.Offset == math.MaxUint64 {
- fmt.Fprintf(&out, " -1)")
+ textui.Fprintf(&out, " -1)")
} else {
- fmt.Fprintf(&out, " %v)", key.Offset)
+ textui.Fprintf(&out, " %v)", key.Offset)
}
}
return out.String()
}
func fmtTime(t btrfsprim.Time) string {
- return fmt.Sprintf("%v.%v (%v)",
+ return textui.Sprintf("%v.%v (%v)",
t.Sec, t.NSec, t.ToStd().Format("2006-01-02 15:04:05"))
}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildmappings/fuzzymatchsums.go b/lib/btrfsprogs/btrfsinspect/rebuildmappings/fuzzymatchsums.go
index c6e23bc..ae83513 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildmappings/fuzzymatchsums.go
+++ b/lib/btrfsprogs/btrfsinspect/rebuildmappings/fuzzymatchsums.go
@@ -6,16 +6,17 @@ package rebuildmappings
import (
"context"
- "fmt"
"sort"
"github.com/datawire/dlib/dlog"
+ "golang.org/x/text/number"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
"git.lukeshu.com/btrfs-progs-ng/lib/containers"
"git.lukeshu.com/btrfs-progs-ng/lib/maps"
+ "git.lukeshu.com/btrfs-progs-ng/lib/textui"
)
const minFuzzyPct = 0.5
@@ -42,7 +43,10 @@ func fuzzyMatchBlockGroupSums(ctx context.Context,
physicalSums map[btrfsvol.DeviceID]btrfssum.SumRun[btrfsvol.PhysicalAddr],
logicalSums btrfssum.SumRunWithGaps[btrfsvol.LogicalAddr],
) error {
- dlog.Info(ctx, "... Indexing physical regions...") // O(m)
+ _ctx := ctx
+
+ ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.substep", "indexing")
+ dlog.Info(ctx, "Indexing physical regions...") // O(m)
regions := ListUnmappedPhysicalRegions(fs)
physicalIndex := make(map[btrfssum.ShortSum][]btrfsvol.QualifiedPhysicalAddr)
if err := WalkUnmappedPhysicalRegions(ctx, physicalSums, regions, func(devID btrfsvol.DeviceID, region btrfssum.SumRun[btrfsvol.PhysicalAddr]) error {
@@ -56,9 +60,10 @@ func fuzzyMatchBlockGroupSums(ctx context.Context,
}); err != nil {
return err
}
- dlog.Info(ctx, "... ... done indexing")
+ dlog.Info(ctx, "... done indexing")
- dlog.Info(ctx, "... Searching...")
+ ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.substep", "searching")
+ dlog.Info(ctx, "Searching...")
numBlockgroups := len(blockgroups)
for i, bgLAddr := range maps.SortedKeys(blockgroups) {
blockgroup := blockgroups[bgLAddr]
@@ -95,19 +100,19 @@ func fuzzyMatchBlockGroupSums(ctx context.Context,
matchesStr = ""
case 1: // not sure how this can happen, but whatev
pct := float64(d-best.Dat[0].N) / float64(d)
- matchesStr = fmt.Sprintf("%v%%", int(100*pct))
+ matchesStr = textui.Sprintf("%v", number.Percent(pct))
apply = pct > minFuzzyPct
case 2:
pct := float64(d-best.Dat[0].N) / float64(d)
pct2 := float64(d-best.Dat[1].N) / float64(d)
- matchesStr = fmt.Sprintf("best=%v%% secondbest=%v%%", int(100*pct), int(100*pct2))
+ matchesStr = textui.Sprintf("best=%v secondbest=%v", number.Percent(pct), number.Percent(pct2))
apply = pct > minFuzzyPct && pct2 < minFuzzyPct
}
lvl := dlog.LogLevelError
if apply {
lvl = dlog.LogLevelInfo
}
- dlog.Logf(ctx, lvl, "... (%v/%v) blockgroup[laddr=%v] matches=[%s]; bestpossible=%v%% (based on %v runs)",
+ dlog.Logf(ctx, lvl, "(%v/%v) blockgroup[laddr=%v] matches=[%s]; bestpossible=%v%% (based on %v runs)",
i+1, numBlockgroups, bgLAddr, matchesStr, int(100*bgRun.PctFull()), len(bgRun.Runs))
if !apply {
continue
@@ -124,12 +129,12 @@ func fuzzyMatchBlockGroupSums(ctx context.Context,
},
}
if err := fs.LV.AddMapping(mapping); err != nil {
- dlog.Errorf(ctx, "... error: %v", err)
+ dlog.Errorf(ctx, "error: %v", err)
continue
}
delete(blockgroups, bgLAddr)
}
- dlog.Info(ctx, "... ... done searching")
+ dlog.Info(ctx, "done searching")
return nil
}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildmappings/matchsums.go b/lib/btrfsprogs/btrfsinspect/rebuildmappings/matchsums.go
index 6a8333a..be82f87 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildmappings/matchsums.go
+++ b/lib/btrfsprogs/btrfsinspect/rebuildmappings/matchsums.go
@@ -29,7 +29,7 @@ func matchBlockGroupSums(ctx context.Context,
blockgroup := blockgroups[bgLAddr]
bgRun := SumsForLogicalRegion(logicalSums, blockgroup.LAddr, blockgroup.Size)
if len(bgRun.Runs) == 0 {
- dlog.Errorf(ctx, "... (%v/%v) blockgroup[laddr=%v] can't be matched because it has 0 runs",
+ dlog.Errorf(ctx, "(%v/%v) blockgroup[laddr=%v] can't be matched because it has 0 runs",
i+1, numBlockgroups, bgLAddr)
continue
}
@@ -55,7 +55,7 @@ func matchBlockGroupSums(ctx context.Context,
if len(matches) == 1 {
lvl = dlog.LogLevelInfo
}
- dlog.Logf(ctx, lvl, "... (%v/%v) blockgroup[laddr=%v] has %v matches based on %v%% coverage from %v runs",
+ dlog.Logf(ctx, lvl, "(%v/%v) blockgroup[laddr=%v] has %v matches based on %v%% coverage from %v runs",
i+1, numBlockgroups, bgLAddr, len(matches), int(100*bgRun.PctFull()), len(bgRun.Runs))
if len(matches) != 1 {
continue
@@ -72,7 +72,7 @@ func matchBlockGroupSums(ctx context.Context,
},
}
if err := fs.LV.AddMapping(mapping); err != nil {
- dlog.Errorf(ctx, "... error: %v", err)
+ dlog.Errorf(ctx, "error: %v", err)
continue
}
delete(blockgroups, bgLAddr)
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go b/lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go
index f1959b4..7311aca 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go
+++ b/lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go
@@ -9,7 +9,6 @@ import (
"fmt"
"github.com/datawire/dlib/dlog"
- "golang.org/x/text/message"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
@@ -54,35 +53,39 @@ func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect
dlog.Infof(ctx, "plan: 5/6 search for block groups in checksum map (exact)")
dlog.Infof(ctx, "plan: 6/6 search for block groups in checksum map (fuzzy)")
- dlog.Infof(ctx, "1/6: Processing %d chunks...", numChunks)
+ _ctx := ctx
+ ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.step", "1/6")
+ dlog.Infof(_ctx, "1/6: Processing %d chunks...", numChunks)
for _, devID := range devIDs {
devResults := scanResults[devID]
for _, chunk := range devResults.FoundChunks {
for _, mapping := range chunk.Chunk.Mappings(chunk.Key) {
if err := fs.LV.AddMapping(mapping); err != nil {
- dlog.Errorf(ctx, "... error: adding chunk: %v", err)
+ dlog.Errorf(ctx, "error: adding chunk: %v", err)
}
}
}
}
- dlog.Info(ctx, "... done processing chunks")
+ dlog.Info(_ctx, "... done processing chunks")
- dlog.Infof(ctx, "2/6: Processing %d device extents...", numDevExts)
+ ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.step", "2/6")
+ dlog.Infof(_ctx, "2/6: Processing %d device extents...", numDevExts)
for _, devID := range devIDs {
devResults := scanResults[devID]
for _, ext := range devResults.FoundDevExtents {
if err := fs.LV.AddMapping(ext.DevExt.Mapping(ext.Key)); err != nil {
- dlog.Errorf(ctx, "... error: adding devext: %v", err)
+ dlog.Errorf(ctx, "error: adding devext: %v", err)
}
}
}
- dlog.Info(ctx, "... done processing device extents")
+ dlog.Info(_ctx, "... done processing device extents")
// Do the nodes "last" to avoid bloating the mappings table
// too much. (Because nodes are numerous and small, while the
// others are few and large; so it is likely that many of the
// nodes will be subsumed by other things.)
- dlog.Infof(ctx, "3/6: Processing %d nodes...", numNodes)
+ ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.step", "3/6")
+ dlog.Infof(_ctx, "3/6: Processing %d nodes...", numNodes)
for _, devID := range devIDs {
devResults := scanResults[devID]
// Sort them so that progress numbers are predictable.
@@ -97,16 +100,17 @@ func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect
Size: nodeSize,
SizeLocked: false,
}); err != nil {
- dlog.Errorf(ctx, "... error: adding node ident: %v", err)
+ dlog.Errorf(ctx, "error: adding node ident: %v", err)
}
}
}
}
- dlog.Info(ctx, "... done processing nodes")
+ dlog.Info(_ctx, "... done processing nodes")
// Use block groups to add missing flags (and as a hint to
// combine node entries).
- dlog.Infof(ctx, "4/6: Processing %d block groups...", numBlockGroups)
+ ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.step", "4/6")
+ dlog.Infof(_ctx, "4/6: Processing %d block groups...", numBlockGroups)
// First dedup them, because they change for allocations and
// CoW means that they'll bounce around a lot, so you likely
// have oodles of duplicates?
@@ -119,7 +123,7 @@ func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect
bg := bgs[bgLAddr]
otherLAddr, otherPAddr := fs.LV.ResolveAny(bg.LAddr, bg.Size)
if otherLAddr < 0 || otherPAddr.Addr < 0 {
- dlog.Errorf(ctx, "... error: could not pair blockgroup laddr=%v (size=%v flags=%v) with a mapping",
+ dlog.Errorf(ctx, "error: could not pair blockgroup laddr=%v (size=%v flags=%v) with a mapping",
bg.LAddr, bg.Size, bg.Flags)
continue
}
@@ -136,14 +140,29 @@ func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect
},
}
if err := fs.LV.AddMapping(mapping); err != nil {
- dlog.Errorf(ctx, "... error: adding flags from blockgroup: %v", err)
+ dlog.Errorf(ctx, "error: adding flags from blockgroup: %v", err)
continue
}
delete(bgs, bgLAddr)
}
- dlog.Info(ctx, "... done processing block groups")
-
- dlog.Infof(ctx, "5/6: Searching for %d block groups in checksum map (exact)...", len(bgs))
+ dlog.Info(_ctx, "... done processing block groups")
+
+ // More than once, I've been tempted to get rid of this exact-search step and just have the fuzzy-search step.
+ // After all, looking at the timestamps in the log, it's faster per blockgroup! For some background, the big-O
+ // for each (per blockgroup) looks like:
+ //
+ // - exact-search: O(bgSize+physicalBlocks)
+ // - fuzzy-search: O(bgSize*physicalBlocks) worst-case; O(bgSize*log(physicalBlocks)) expected
+ //
+ // The fuzzy-search is only fast because the exact-search is so good at getting `physicalBlocks` down.
+ // Empirically: if I remove the exact-search step, then the fuzzy-match step is more than an order of magnitude
+ // slower.
+ //
+ // The exact-search probably could be optimized to be substantially faster (by a constant factor; not affecting
+ // the big-O) by figuring out how to inline function calls and get reduce allocations, but IMO it's "fast
+ // enough" for now.
+ ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.step", "5/6")
+ dlog.Infof(_ctx, "5/6: Searching for %d block groups in checksum map (exact)...", len(bgs))
physicalSums := ExtractPhysicalSums(scanResults)
logicalSums := ExtractLogicalSums(ctx, scanResults)
if err := matchBlockGroupSums(ctx, fs, bgs, physicalSums, logicalSums); err != nil {
@@ -151,14 +170,15 @@ func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect
}
dlog.Info(ctx, "... done searching for exact block groups")
- dlog.Infof(ctx, "6/6: Searching for %d block groups in checksum map (fuzzy)...", len(bgs))
+ ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.step", "6/6")
+ dlog.Infof(_ctx, "6/6: Searching for %d block groups in checksum map (fuzzy)...", len(bgs))
if err := fuzzyMatchBlockGroupSums(ctx, fs, bgs, physicalSums, logicalSums); err != nil {
return err
}
- dlog.Info(ctx, "... done searching for fuzzy block groups")
+ dlog.Info(_ctx, "... done searching for fuzzy block groups")
- dlog.Info(ctx, "report:")
- p := message.NewPrinter(message.MatchLanguage("en"))
+ ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.step", "report")
+ dlog.Info(_ctx, "report:")
unmappedPhysicalRegions := ListUnmappedPhysicalRegions(fs)
var unmappedPhysical btrfsvol.AddrDelta
@@ -169,22 +189,22 @@ func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect
unmappedPhysical += region.End.Sub(region.Beg)
}
}
- dlog.Info(ctx, p.Sprintf("... %d KiB of unmapped physical space (across %d regions)", int(unmappedPhysical/1024), numUnmappedPhysical))
+ dlog.Infof(ctx, "... %d KiB of unmapped physical space (across %d regions)", int(unmappedPhysical/1024), numUnmappedPhysical)
unmappedLogicalRegions := ListUnmappedLogicalRegions(fs, logicalSums)
var unmappedLogical btrfsvol.AddrDelta
for _, region := range unmappedLogicalRegions {
unmappedLogical += region.Size()
}
- dlog.Info(ctx, p.Sprintf("... %d KiB of unmapped summed logical space (across %d regions)", int(unmappedLogical/1024), len(unmappedLogicalRegions)))
+ dlog.Infof(ctx, "... %d KiB of unmapped summed logical space (across %d regions)", int(unmappedLogical/1024), len(unmappedLogicalRegions))
var unmappedBlockGroups btrfsvol.AddrDelta
for _, bg := range bgs {
unmappedBlockGroups += bg.Size
}
- dlog.Info(ctx, p.Sprintf("... %d KiB of unmapped block groups (across %d groups)", int(unmappedBlockGroups/1024), len(bgs)))
+ dlog.Infof(ctx, "... %d KiB of unmapped block groups (across %d groups)", int(unmappedBlockGroups/1024), len(bgs))
- dlog.Info(ctx, "detailed report:")
+ dlog.Info(_ctx, "detailed report:")
for _, devID := range maps.SortedKeys(unmappedPhysicalRegions) {
for _, region := range unmappedPhysicalRegions[devID] {
dlog.Infof(ctx, "... unmapped physical region: dev=%v beg=%v end=%v (size=%v)",
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/rebuilt_btrees.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/rebuilt_btrees.go
index 50c75a4..33eb352 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/rebuilt_btrees.go
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/rebuilt_btrees.go
@@ -168,21 +168,14 @@ func NewRebuiltTrees(
}
type rootStats struct {
- TreeID btrfsprim.ObjID
- RootNode btrfsvol.LogicalAddr
-
- DoneLeafs int
- TotalLeafs int
+ Leafs textui.Portion[int]
AddedItems int
ReplacedItems int
}
func (s rootStats) String() string {
- return fmt.Sprintf("tree %v: adding root node@%v: %v%% (%v/%v) (added %v items, replaced %v items)",
- s.TreeID, s.RootNode,
- int(100*float64(s.DoneLeafs)/float64(s.TotalLeafs)),
- s.DoneLeafs, s.TotalLeafs,
- s.AddedItems, s.ReplacedItems)
+ return textui.Sprintf("%v (added %v items, replaced %v items)",
+ s.Leafs, s.AddedItems, s.ReplacedItems)
}
// AddRoot adds an additional root node to an existing tree. It is
@@ -192,24 +185,16 @@ func (s rootStats) String() string {
// It is invalid (panic) to call AddRoot for a tree without having
// called AddTree first.
func (ts *RebuiltTrees) AddRoot(ctx context.Context, treeID btrfsprim.ObjID, rootNode btrfsvol.LogicalAddr) {
+ ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.add-root", fmt.Sprintf("tree=%v rootNode=%v", treeID, rootNode))
tree := ts.trees[treeID]
tree.Roots.Insert(rootNode)
+ var stats rootStats
+ stats.Leafs.D = len(tree.leafToRoots)
progressWriter := textui.NewProgress[rootStats](ctx, dlog.LogLevelInfo, 1*time.Second)
- numAdded := 0
- numReplaced := 0
- progress := func(done int) {
- progressWriter.Set(rootStats{
- TreeID: treeID,
- RootNode: rootNode,
- DoneLeafs: done,
- TotalLeafs: len(tree.leafToRoots),
- AddedItems: numAdded,
- ReplacedItems: numReplaced,
- })
- }
for i, leaf := range maps.SortedKeys(tree.leafToRoots) {
- progress(i)
+ stats.Leafs.N = i
+ progressWriter.Set(stats)
if tree.Leafs.Has(leaf) || !tree.leafToRoots[leaf].Has(rootNode) {
continue
}
@@ -221,16 +206,17 @@ func (ts *RebuiltTrees) AddRoot(ctx context.Context, treeID btrfsprim.ObjID, roo
}
if oldPtr, exists := tree.Items.Load(itemKey); !exists {
tree.Items.Store(itemKey, newPtr)
- numAdded++
+ stats.AddedItems++
} else if tree.shouldReplace(ts.graph, oldPtr.Node, newPtr.Node) {
tree.Items.Store(itemKey, newPtr)
- numReplaced++
+ stats.ReplacedItems++
}
ts.cbAddedItem(ctx, treeID, itemKey)
- progress(i)
+ progressWriter.Set(stats)
}
}
- progress(len(tree.leafToRoots))
+ stats.Leafs.N = len(tree.leafToRoots)
+ progressWriter.Set(stats)
progressWriter.Done()
}
@@ -250,6 +236,9 @@ func (ts *RebuiltTrees) addTree(ctx context.Context, treeID btrfsprim.ObjID, sta
if slices.Contains(treeID, stack) {
return false
}
+ stack = append(stack, treeID)
+ ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.add-tree", stack)
+ dlog.Info(ctx, "adding tree...")
tree := &rebuiltTree{
ID: treeID,
@@ -267,7 +256,6 @@ func (ts *RebuiltTrees) addTree(ctx context.Context, treeID btrfsprim.ObjID, sta
case btrfsprim.BLOCK_GROUP_TREE_OBJECTID:
root = ts.sb.BlockGroupRoot
default:
- stack := append(stack, treeID)
if !ts.addTree(ctx, btrfsprim.ROOT_TREE_OBJECTID, stack) {
return false
}
@@ -279,7 +267,7 @@ func (ts *RebuiltTrees) addTree(ctx context.Context, treeID btrfsprim.ObjID, sta
tree.UUID = rootItem.UUID
if rootItem.ParentUUID != (btrfsprim.UUID{}) {
tree.ParentGen = rootOff
- if !ts.addTree(ctx, btrfsprim.ROOT_TREE_OBJECTID, stack) {
+ if !ts.addTree(ctx, btrfsprim.UUID_TREE_OBJECTID, stack) {
return false
}
parentID, ok := ts.cbLookupUUID(ctx, rootItem.ParentUUID)
@@ -299,32 +287,22 @@ func (ts *RebuiltTrees) addTree(ctx context.Context, treeID btrfsprim.ObjID, sta
ts.AddRoot(ctx, treeID, root)
}
- return true
-}
-
-type indexStats struct {
- TreeID btrfsprim.ObjID
- DoneNodes int
- TotalNodes int
-}
-
-func (s indexStats) String() string {
- return fmt.Sprintf("tree %v: indexing leaf nodes: %v%% (%v/%v)",
- s.TreeID,
- int(100*float64(s.DoneNodes)/float64(s.TotalNodes)),
- s.DoneNodes, s.TotalNodes)
+ return
}
func (tree *rebuiltTree) indexLeafs(ctx context.Context, graph pkggraph.Graph) {
+ ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.add-tree.substep", "index-nodes")
+
nodeToRoots := make(map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr])
- progressWriter := textui.NewProgress[indexStats](ctx, dlog.LogLevelInfo, 1*time.Second)
+
+ var stats textui.Portion[int]
+ stats.D = len(graph.Nodes)
+ progressWriter := textui.NewProgress[textui.Portion[int]](ctx, dlog.LogLevelInfo, 1*time.Second)
progress := func() {
- progressWriter.Set(indexStats{
- TreeID: tree.ID,
- DoneNodes: len(nodeToRoots),
- TotalNodes: len(graph.Nodes),
- })
+ stats.N = len(nodeToRoots)
+ progressWriter.Set(stats)
}
+
progress()
for _, node := range maps.SortedKeys(graph.Nodes) {
tree.indexNode(graph, node, nodeToRoots, progress, nil)
@@ -345,6 +323,8 @@ func (tree *rebuiltTree) indexNode(graph pkggraph.Graph, node btrfsvol.LogicalAd
return
}
if slices.Contains(node, stack) {
+ // This is a panic because graph.FinalCheck() should
+ // have already checked for loops.
panic("loop")
}
if !tree.isOwnerOK(graph.Nodes[node].Owner, graph.Nodes[node].Generation) {
@@ -395,7 +375,7 @@ func (ts *RebuiltTrees) Load(ctx context.Context, treeID btrfsprim.ObjID, key bt
if !ok {
return nil, false
}
- return ts.keyIO.ReadItem(ptr)
+ return ts.keyIO.ReadItem(ctx, ptr)
}
// Search searches for an item from a tree.
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/graph.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/graph.go
index 1180b0d..c4ed675 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/graph.go
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/graph.go
@@ -5,8 +5,12 @@
package graph
import (
+ "context"
"fmt"
"reflect"
+ "time"
+
+ "github.com/datawire/dlib/dlog"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
@@ -14,6 +18,9 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
"git.lukeshu.com/btrfs-progs-ng/lib/containers"
"git.lukeshu.com/btrfs-progs-ng/lib/diskio"
+ "git.lukeshu.com/btrfs-progs-ng/lib/maps"
+ "git.lukeshu.com/btrfs-progs-ng/lib/slices"
+ "git.lukeshu.com/btrfs-progs-ng/lib/textui"
)
type Node struct {
@@ -131,7 +138,7 @@ func New(sb btrfstree.Superblock) *Graph {
return g
}
-func (g *Graph) InsertNode(nodeRef *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]) {
+func (g Graph) InsertNode(nodeRef *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]) {
nodeData := Node{
Level: nodeRef.Data.Head.Level,
Generation: nodeRef.Data.Head.Generation,
@@ -184,23 +191,72 @@ func (g *Graph) InsertNode(nodeRef *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.N
}
}
-func (g *Graph) FinalCheck(fs diskio.File[btrfsvol.LogicalAddr], sb btrfstree.Superblock, progress func(n, d int)) error {
- total := len(g.EdgesTo)
- done := 0
- progress(done, total)
+func (g Graph) FinalCheck(ctx context.Context, fs diskio.File[btrfsvol.LogicalAddr], sb btrfstree.Superblock) error {
+ var stats textui.Portion[int]
+ _ctx := ctx
+
+ ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-nodes.read.substep", "check-keypointers")
+ dlog.Info(_ctx, "Checking keypointers for dead-ends...")
+ progressWriter := textui.NewProgress[textui.Portion[int]](ctx, dlog.LogLevelInfo, 1*time.Second)
+ stats.D = len(g.EdgesTo)
+ progressWriter.Set(stats)
for laddr := range g.EdgesTo {
if _, ok := g.Nodes[laddr]; !ok {
_, err := btrfstree.ReadNode[btrfsvol.LogicalAddr](fs, sb, laddr, btrfstree.NodeExpectations{
LAddr: containers.Optional[btrfsvol.LogicalAddr]{OK: true, Val: laddr},
})
if err == nil {
+ progressWriter.Done()
return fmt.Errorf("node@%v exists but was not in node scan results", laddr)
}
g.BadNodes[laddr] = err
}
- done++
- progress(done, total)
+ stats.N++
+ progressWriter.Set(stats)
}
+ progressWriter.Done()
+ dlog.Info(ctx, "... done checking keypointers")
+
+ ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-nodes.read.substep", "check-for-loops")
+ dlog.Info(_ctx, "Checking for btree loops...")
+ stats.D = len(g.Nodes)
+ stats.N = 0
+ progressWriter = textui.NewProgress[textui.Portion[int]](ctx, dlog.LogLevelInfo, 1*time.Second)
+ progressWriter.Set(stats)
+ visited := make(containers.Set[btrfsvol.LogicalAddr], len(g.Nodes))
+ numLoops := 0
+ var checkNode func(node btrfsvol.LogicalAddr, stack []btrfsvol.LogicalAddr)
+ checkNode = func(node btrfsvol.LogicalAddr, stack []btrfsvol.LogicalAddr) {
+ defer func() {
+ stats.N = len(visited)
+ progressWriter.Set(stats)
+ }()
+ if visited.Has(node) {
+ return
+ }
+ if slices.Contains(node, stack) {
+ numLoops++
+ dlog.Error(ctx, "loop:")
+ for _, line := range g.renderLoop(append(stack, node)) {
+ dlog.Errorf(ctx, " %s", line)
+ }
+ return
+ }
+ stack = append(stack, node)
+ for _, kp := range g.EdgesTo[node] {
+ checkNode(kp.FromNode, stack)
+ }
+ visited.Insert(node)
+ }
+ for _, node := range maps.SortedKeys(g.Nodes) {
+ checkNode(node, nil)
+ }
+ progressWriter.Done()
+ if numLoops > 0 {
+ return fmt.Errorf("%d btree loops", numLoops)
+ }
+ dlog.Info(_ctx, "... done checking for loops")
+
return nil
}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/loops.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/loops.go
index e76985c..0e51805 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/loops.go
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/loops.go
@@ -6,47 +6,18 @@ package graph
import (
"fmt"
- "io"
"strings"
"github.com/datawire/dlib/derror"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
- "git.lukeshu.com/btrfs-progs-ng/lib/containers"
- "git.lukeshu.com/btrfs-progs-ng/lib/maps"
- "git.lukeshu.com/btrfs-progs-ng/lib/slices"
)
-func (g Graph) ShowLoops(out io.Writer) {
- orphans := make(containers.Set[btrfsvol.LogicalAddr])
- for node := range g.Nodes {
- if len(g.EdgesTo[node]) == 0 {
- orphans.Insert(node)
- }
- }
-
- loopWalk(out, g, 0)
- for _, orphan := range maps.SortedKeys(orphans) {
- loopWalk(out, g, orphan)
- }
-}
-
-func loopWalk(out io.Writer, scanData Graph, stack ...btrfsvol.LogicalAddr) {
- for _, kp := range scanData.EdgesFrom[stack[len(stack)-1]] {
- childStack := append(stack, kp.ToNode)
- if slices.Contains(kp.ToNode, stack) {
- loopRender(out, scanData, childStack...)
- } else {
- loopWalk(out, scanData, childStack...)
- }
- }
-}
-
-func nodeRender(scanData Graph, node btrfsvol.LogicalAddr) []string {
+func (g Graph) renderNode(node btrfsvol.LogicalAddr) []string {
if node == 0 {
return []string{"root"}
- } else if nodeData, ok := scanData.Nodes[node]; ok {
+ } else if nodeData, ok := g.Nodes[node]; ok {
return []string{
fmt.Sprintf("{addr: %v,", node),
fmt.Sprintf(" level: %v,", nodeData.Level),
@@ -61,7 +32,7 @@ func nodeRender(scanData Graph, node btrfsvol.LogicalAddr) []string {
nodeData.MaxItem.ItemType,
nodeData.MaxItem.Offset),
}
- } else if nodeErr, ok := scanData.BadNodes[node]; ok {
+ } else if nodeErr, ok := g.BadNodes[node]; ok {
return []string{
fmt.Sprintf("{addr:%v,", node),
fmt.Sprintf(" err:%q}", nodeErr.Error()),
@@ -71,7 +42,7 @@ func nodeRender(scanData Graph, node btrfsvol.LogicalAddr) []string {
}
}
-func edgeRender(scanData Graph, kp Edge) []string {
+func (g Graph) renderEdge(kp Edge) []string {
a := fmt.Sprintf("[%d]={", kp.FromItem)
b := strings.Repeat(" ", len(a))
ret := []string{
@@ -85,8 +56,8 @@ func edgeRender(scanData Graph, kp Edge) []string {
}
var err error
- if toNode, ok := scanData.Nodes[kp.ToNode]; !ok {
- err = scanData.BadNodes[kp.ToNode]
+ if toNode, ok := g.Nodes[kp.ToNode]; !ok {
+ err = g.BadNodes[kp.ToNode]
} else {
err = checkNodeExpectations(kp, toNode)
}
@@ -100,7 +71,7 @@ func edgeRender(scanData Graph, kp Edge) []string {
return ret
}
-func loopRender(out io.Writer, scanData Graph, stack ...btrfsvol.LogicalAddr) {
+func (g Graph) renderLoop(stack []btrfsvol.LogicalAddr) []string {
var lines []string
add := func(suffixes []string) {
curLen := 0
@@ -126,20 +97,17 @@ func loopRender(out io.Writer, scanData Graph, stack ...btrfsvol.LogicalAddr) {
for i, node := range stack {
if i > 0 {
- for _, kp := range scanData.EdgesTo[node] {
+ for _, kp := range g.EdgesTo[node] {
if kp.FromNode == stack[i-1] {
- add(edgeRender(scanData, *kp))
+ add(g.renderEdge(*kp))
break
}
}
}
- add(nodeRender(scanData, node))
+ add(g.renderNode(node))
}
- fmt.Fprintln(out, "loop:")
- for _, line := range lines {
- fmt.Fprintln(out, " "+line)
- }
+ return lines
}
func checkNodeExpectations(kp Edge, toNode Node) error {
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go
index d92495e..b1e68f9 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go
@@ -5,8 +5,11 @@
package keyio
import (
+ "context"
"fmt"
+ "github.com/datawire/dlib/dlog"
+
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
@@ -85,8 +88,9 @@ func (o *Handle) SetGraph(graph graph.Graph) {
o.graph = graph
}
-func (o *Handle) readNode(laddr btrfsvol.LogicalAddr) *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node] {
+func (o *Handle) readNode(ctx context.Context, laddr btrfsvol.LogicalAddr) *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node] {
if cached, ok := o.cache.Get(laddr); ok {
+ dlog.Tracef(ctx, "cache-hit node@%v", laddr)
return cached
}
@@ -95,6 +99,7 @@ func (o *Handle) readNode(laddr btrfsvol.LogicalAddr) *diskio.Ref[btrfsvol.Logic
panic(fmt.Errorf("should not happen: node@%v is not mentioned in the in-memory graph", laddr))
}
+ dlog.Infof(ctx, "cache-miss node@%v, reading...", laddr)
ref, err := btrfstree.ReadNode(o.rawFile, o.sb, laddr, btrfstree.NodeExpectations{
LAddr: containers.Optional[btrfsvol.LogicalAddr]{OK: true, Val: laddr},
Level: containers.Optional[uint8]{OK: true, Val: graphInfo.Level},
@@ -118,11 +123,11 @@ func (o *Handle) readNode(laddr btrfsvol.LogicalAddr) *diskio.Ref[btrfsvol.Logic
return ref
}
-func (o *Handle) ReadItem(ptr ItemPtr) (item btrfsitem.Item, ok bool) {
+func (o *Handle) ReadItem(ctx context.Context, ptr ItemPtr) (item btrfsitem.Item, ok bool) {
if o.graph.Nodes[ptr.Node].Level != 0 || ptr.Idx < 0 {
return nil, false
}
- items := o.readNode(ptr.Node).Data.BodyLeaf
+ items := o.readNode(ctx, ptr.Node).Data.BodyLeaf
if ptr.Idx >= len(items) {
return nil, false
}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go
index 66eaf1a..a7fe5c7 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go
@@ -38,6 +38,10 @@ func (a keyAndTree) Cmp(b keyAndTree) int {
return containers.NativeCmp(a.TreeID, b.TreeID)
}
+func (o keyAndTree) String() string {
+ return fmt.Sprintf("tree=%v key=%v", o.TreeID, o.Key)
+}
+
type rebuilder struct {
sb btrfstree.Superblock
rebuilt *btrees.RebuiltTrees
@@ -45,23 +49,27 @@ type rebuilder struct {
graph graph.Graph
keyIO *keyio.Handle
- curKey keyAndTree
- queue []keyAndTree
- pendingAugments map[btrfsprim.ObjID][]map[btrfsvol.LogicalAddr]int
+ curKey keyAndTree
+ treeQueue []btrfsprim.ObjID
+ itemQueue []keyAndTree
+ augmentQueue map[btrfsprim.ObjID][]map[btrfsvol.LogicalAddr]int
}
func RebuildNodes(ctx context.Context, fs *btrfs.FS, nodeScanResults btrfsinspect.ScanDevicesResult) (map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr], error) {
- nodeGraph, keyIO, err := ScanDevices(ctx, fs, nodeScanResults) // ScanDevices does its own logging
- if err != nil {
- return nil, err
- }
+ _ctx := ctx
+ ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-nodes.step", "read-fs-data")
dlog.Info(ctx, "Reading superblock...")
sb, err := fs.Superblock()
if err != nil {
return nil, err
}
+ nodeGraph, keyIO, err := ScanDevices(ctx, fs, nodeScanResults) // ScanDevices does its own logging
+ if err != nil {
+ return nil, err
+ }
+ ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-nodes.step", "rebuild")
dlog.Info(ctx, "Rebuilding node tree...")
o := &rebuilder{
sb: *sb,
@@ -84,112 +92,104 @@ func (o *rebuilder) ioErr(ctx context.Context, err error) {
panic(err)
}
-type rebuildStats struct {
- PassNum int
- Task string
- N, D int
-}
-
-func (s rebuildStats) String() string {
- pct := 100
- if s.D > 0 {
- pct = int(100 * float64(s.N) / float64(s.D))
- }
- return fmt.Sprintf("... pass %d: %s %v%% (%v/%v)",
- s.PassNum, s.Task, pct, s.N, s.D)
-}
+func (o *rebuilder) rebuild(_ctx context.Context) error {
+ // Initialize
+ o.augmentQueue = make(map[btrfsprim.ObjID][]map[btrfsvol.LogicalAddr]int)
-func (o *rebuilder) rebuild(ctx context.Context) error {
- passNum := 0
- dlog.Infof(ctx, "... pass %d: scanning for implied items", passNum)
// Seed the queue
- o.pendingAugments = make(map[btrfsprim.ObjID][]map[btrfsvol.LogicalAddr]int)
- o.rebuilt.AddTree(ctx, btrfsprim.ROOT_TREE_OBJECTID)
- o.rebuilt.AddTree(ctx, btrfsprim.CHUNK_TREE_OBJECTID)
- //o.rebuilt.AddTree(ctx, btrfsprim.TREE_LOG_OBJECTID) // TODO(lukeshu): Special LOG_TREE handling
- o.rebuilt.AddTree(ctx, btrfsprim.BLOCK_GROUP_TREE_OBJECTID)
- for {
- // Handle items in the queue
- queue := o.queue
- o.queue = nil
- progressWriter := textui.NewProgress[rebuildStats](ctx, dlog.LogLevelInfo, 1*time.Second)
- queueProgress := func(done int) {
- progressWriter.Set(rebuildStats{
- PassNum: passNum,
- Task: "processing item queue",
- N: done,
- D: len(queue),
- })
+ o.treeQueue = []btrfsprim.ObjID{
+ btrfsprim.ROOT_TREE_OBJECTID,
+ btrfsprim.CHUNK_TREE_OBJECTID,
+ // btrfsprim.TREE_LOG_OBJECTID, // TODO(lukeshu): Special LOG_TREE handling
+ btrfsprim.BLOCK_GROUP_TREE_OBJECTID,
+ }
+
+ for passNum := 0; len(o.treeQueue) > 0 || len(o.itemQueue) > 0 || len(o.augmentQueue) > 0; passNum++ {
+ passCtx := dlog.WithField(_ctx, "btrfsinspect.rebuild-nodes.rebuild.pass", passNum)
+
+ // Add items to the queue (drain o.treeQueue, fill o.itemQueue)
+ stepCtx := dlog.WithField(passCtx, "btrfsinspect.rebuild-nodes.rebuild.substep", "collect-items")
+ treeQueue := o.treeQueue
+ o.treeQueue = nil
+ sort.Slice(treeQueue, func(i, j int) bool {
+ return treeQueue[i] < treeQueue[j]
+ })
+ // Because trees can be wildly different sizes, it's impossible to have a meaningful
+ // progress percentage here.
+ for _, treeID := range treeQueue {
+ o.rebuilt.AddTree(stepCtx, treeID)
}
- for i, key := range queue {
- queueProgress(i)
+
+ // Handle items in the queue (drain o.itemQueue, fill o.augmentQueue and o.treeQueue)
+ stepCtx = dlog.WithField(passCtx, "btrfsinspect.rebuild-nodes.rebuild.substep", "process-items")
+ itemQueue := o.itemQueue
+ o.itemQueue = nil
+ var progress textui.Portion[int]
+ progress.D = len(itemQueue)
+ progressWriter := textui.NewProgress[textui.Portion[int]](stepCtx, dlog.LogLevelInfo, 1*time.Second)
+ stepCtx = dlog.WithField(stepCtx, "btrfsinspect.rebuild-nodes.rebuild.substep.progress", &progress)
+ for i, key := range itemQueue {
+ itemCtx := dlog.WithField(stepCtx, "btrfsinspect.rebuild-nodes.rebuild.process.item", key)
+ progress.N = i
+ progressWriter.Set(progress)
o.curKey = key
- itemBody, ok := o.rebuilt.Load(ctx, key.TreeID, key.Key)
+ itemBody, ok := o.rebuilt.Load(itemCtx, key.TreeID, key.Key)
if !ok {
- o.ioErr(ctx, fmt.Errorf("could not read previously read item: %v", key))
+ o.ioErr(itemCtx, fmt.Errorf("could not read previously read item: %v", key))
}
- handleItem(o, ctx, key.TreeID, btrfstree.Item{
+ handleItem(o, itemCtx, key.TreeID, btrfstree.Item{
Key: key.Key,
Body: itemBody,
})
if key.ItemType == btrfsitem.ROOT_ITEM_KEY {
- o.rebuilt.AddTree(ctx, key.ObjectID)
+ o.treeQueue = append(o.treeQueue, key.ObjectID)
}
}
- queueProgress(len(queue))
+ progress.N = len(itemQueue)
+ progressWriter.Set(progress)
progressWriter.Done()
- // Check if we can bail
- if len(o.queue) == 0 && len(o.pendingAugments) == 0 {
- break
- }
-
- // Apply augments that were requested while handling items from the queue
- resolvedAugments := make(map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr], len(o.pendingAugments))
- numAugments := 0
- for _, treeID := range maps.SortedKeys(o.pendingAugments) {
- dlog.Infof(ctx, "... ... augments for tree %v:", treeID)
- resolvedAugments[treeID] = o.resolveTreeAugments(ctx, o.pendingAugments[treeID])
- numAugments += len(resolvedAugments[treeID])
- }
- o.pendingAugments = make(map[btrfsprim.ObjID][]map[btrfsvol.LogicalAddr]int)
- progressWriter = textui.NewProgress[rebuildStats](ctx, dlog.LogLevelInfo, 1*time.Second)
- numAugmented := 0
- augmentProgress := func() {
- progressWriter.Set(rebuildStats{
- PassNum: passNum,
- Task: "applying augments",
- N: numAugmented,
- D: numAugments,
- })
+ // Apply augments (drain o.augmentQueue, fill o.itemQueue)
+ stepCtx = dlog.WithField(passCtx, "btrfsinspect.rebuild-nodes.rebuild.substep", "apply-augments")
+ resolvedAugments := make(map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr], len(o.augmentQueue))
+ progress.N = 0
+ progress.D = 0
+ for _, treeID := range maps.SortedKeys(o.augmentQueue) {
+ treeCtx := dlog.WithField(stepCtx, "btrfsinspect.rebuild-nodes.rebuild.augment.tree", treeID)
+ resolvedAugments[treeID] = o.resolveTreeAugments(treeCtx, o.augmentQueue[treeID])
+ progress.D += len(resolvedAugments[treeID])
}
+ o.augmentQueue = make(map[btrfsprim.ObjID][]map[btrfsvol.LogicalAddr]int)
+ progressWriter = textui.NewProgress[textui.Portion[int]](stepCtx, dlog.LogLevelInfo, 1*time.Second)
+ stepCtx = dlog.WithField(stepCtx, "btrfsinspect.rebuild-nodes.rebuild.substep.progress", &progress)
for _, treeID := range maps.SortedKeys(resolvedAugments) {
+ treeCtx := dlog.WithField(stepCtx, "btrfsinspect.rebuild-nodes.rebuild.augment.tree", treeID)
for _, nodeAddr := range maps.SortedKeys(resolvedAugments[treeID]) {
- augmentProgress()
- o.rebuilt.AddRoot(ctx, treeID, nodeAddr)
- numAugmented++
+ progressWriter.Set(progress)
+ o.rebuilt.AddRoot(treeCtx, treeID, nodeAddr)
+ progress.N++
}
}
- augmentProgress()
+ progressWriter.Set(progress)
progressWriter.Done()
-
- passNum++
- dlog.Infof(ctx, "... pass %d: scanning for implied items", passNum)
}
return nil
}
func (o *rebuilder) cbAddedItem(ctx context.Context, tree btrfsprim.ObjID, key btrfsprim.Key) {
- o.queue = append(o.queue, keyAndTree{
+ o.itemQueue = append(o.itemQueue, keyAndTree{
TreeID: tree,
Key: key,
})
}
func (o *rebuilder) cbLookupRoot(ctx context.Context, tree btrfsprim.ObjID) (offset btrfsprim.Generation, item btrfsitem.Root, ok bool) {
+ ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.add-tree.want.reason", "tree Root")
+ ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.add-tree.want.key",
+ fmt.Sprintf("tree=%v key={%v %v ?}", btrfsprim.ROOT_TREE_OBJECTID, tree, btrfsitem.ROOT_ITEM_KEY))
key, ok := o._want(ctx, btrfsprim.ROOT_TREE_OBJECTID, tree, btrfsitem.ROOT_ITEM_KEY)
if !ok {
- o.queue = append(o.queue, o.curKey)
+ o.itemQueue = append(o.itemQueue, o.curKey)
return 0, btrfsitem.Root{}, false
}
itemBody, ok := o.rebuilt.Load(ctx, btrfsprim.ROOT_TREE_OBJECTID, key)
@@ -211,8 +211,10 @@ func (o *rebuilder) cbLookupRoot(ctx context.Context, tree btrfsprim.ObjID) (off
func (o *rebuilder) cbLookupUUID(ctx context.Context, uuid btrfsprim.UUID) (id btrfsprim.ObjID, ok bool) {
key := btrfsitem.UUIDToKey(uuid)
- if ok := o._wantOff(ctx, btrfsprim.UUID_TREE_OBJECTID, key.ObjectID, key.ItemType, key.Offset); !ok {
- o.queue = append(o.queue, o.curKey)
+ ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.add-tree.want.reason", "resolve parent UUID")
+ ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.add-tree.want.key", keyAndTree{TreeID: btrfsprim.UUID_TREE_OBJECTID, Key: key})
+ if ok := o._wantOff(ctx, btrfsprim.UUID_TREE_OBJECTID, key); !ok {
+ o.itemQueue = append(o.itemQueue, o.curKey)
return 0, false
}
itemBody, ok := o.rebuilt.Load(ctx, btrfsprim.UUID_TREE_OBJECTID, key)
@@ -342,9 +344,9 @@ func (o *rebuilder) resolveTreeAugments(ctx context.Context, listsWithDistances
for i, list := range lists {
chose := list.Intersection(ret)
if len(chose) == 0 {
- dlog.Infof(ctx, "... ... ... lists[%d]: chose (none) from %v", i, maps.SortedKeys(list))
+ dlog.Infof(ctx, "lists[%d]: chose (none) from %v", i, maps.SortedKeys(list))
} else {
- dlog.Infof(ctx, "... ... ... lists[%d]: chose %v from %v", i, chose.TakeOne(), maps.SortedKeys(list))
+ dlog.Infof(ctx, "lists[%d]: chose %v from %v", i, chose.TakeOne(), maps.SortedKeys(list))
}
}
@@ -355,7 +357,7 @@ func (o *rebuilder) resolveTreeAugments(ctx context.Context, listsWithDistances
func (o *rebuilder) wantAugment(ctx context.Context, treeID btrfsprim.ObjID, choices containers.Set[btrfsvol.LogicalAddr]) {
if len(choices) == 0 {
- dlog.Errorf(ctx, "augment(tree=%v): could not find wanted item", treeID)
+ dlog.Error(ctx, "could not find wanted item")
return
}
choicesWithDist := make(map[btrfsvol.LogicalAddr]int, len(choices))
@@ -366,8 +368,8 @@ func (o *rebuilder) wantAugment(ctx context.Context, treeID btrfsprim.ObjID, cho
}
choicesWithDist[choice] = dist
}
- dlog.Infof(ctx, "augment(tree=%v): %v", treeID, maps.SortedKeys(choicesWithDist))
- o.pendingAugments[treeID] = append(o.pendingAugments[treeID], choicesWithDist)
+ dlog.Infof(ctx, "choices=%v", maps.SortedKeys(choicesWithDist))
+ o.augmentQueue[treeID] = append(o.augmentQueue[treeID], choicesWithDist)
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@@ -378,12 +380,15 @@ func (o *rebuilder) fsErr(ctx context.Context, e error) {
}
// want implements rebuildCallbacks.
-func (o *rebuilder) want(ctx context.Context, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType) {
+func (o *rebuilder) want(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType) {
+ ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.reason", reason)
+ ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key",
+ fmt.Sprintf("tree=%v key={%v %v ?}", treeID, objID, typ))
o._want(ctx, treeID, objID, typ)
}
func (o *rebuilder) _want(ctx context.Context, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType) (key btrfsprim.Key, ok bool) {
if !o.rebuilt.AddTree(ctx, treeID) {
- o.queue = append(o.queue, o.curKey)
+ o.itemQueue = append(o.itemQueue, o.curKey)
return btrfsprim.Key{}, false
}
@@ -402,7 +407,6 @@ func (o *rebuilder) _want(ctx context.Context, treeID btrfsprim.ObjID, objID btr
// OK, we need to insert it
- ctx = dlog.WithField(ctx, "want_key", fmt.Sprintf("tree=%v key={%v %v ?}", treeID, objID, typ))
wants := make(containers.Set[btrfsvol.LogicalAddr])
o.rebuilt.Keys(treeID).Subrange(
func(k btrfsprim.Key, _ keyio.ItemPtr) int { k.Offset = 0; return tgt.Cmp(k) },
@@ -415,29 +419,30 @@ func (o *rebuilder) _want(ctx context.Context, treeID btrfsprim.ObjID, objID btr
}
// wantOff implements rebuildCallbacks.
-func (o *rebuilder) wantOff(ctx context.Context, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, off uint64) {
- o._wantOff(ctx, treeID, objID, typ, off)
+func (o *rebuilder) wantOff(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, off uint64) {
+ key := btrfsprim.Key{
+ ObjectID: objID,
+ ItemType: typ,
+ Offset: off,
+ }
+ ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.reason", reason)
+ ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key", keyAndTree{TreeID: treeID, Key: key})
+ o._wantOff(ctx, treeID, key)
}
-func (o *rebuilder) _wantOff(ctx context.Context, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, off uint64) (ok bool) {
+func (o *rebuilder) _wantOff(ctx context.Context, treeID btrfsprim.ObjID, tgt btrfsprim.Key) (ok bool) {
if !o.rebuilt.AddTree(ctx, treeID) {
- o.queue = append(o.queue, o.curKey)
+ o.itemQueue = append(o.itemQueue, o.curKey)
return false
}
// check if we already have it
- tgt := btrfsprim.Key{
- ObjectID: objID,
- ItemType: typ,
- Offset: off,
- }
if _, ok := o.rebuilt.Load(ctx, treeID, tgt); ok {
return true
}
// OK, we need to insert it
- ctx = dlog.WithField(ctx, "want_key", fmt.Sprintf("tree=%v key=%v", treeID, tgt))
wants := make(containers.Set[btrfsvol.LogicalAddr])
o.rebuilt.Keys(treeID).Subrange(
func(k btrfsprim.Key, _ keyio.ItemPtr) int { return tgt.Cmp(k) },
@@ -450,9 +455,13 @@ func (o *rebuilder) _wantOff(ctx context.Context, treeID btrfsprim.ObjID, objID
}
// wantFunc implements rebuildCallbacks.
-func (o *rebuilder) wantFunc(ctx context.Context, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, fn func(btrfsitem.Item) bool) {
+func (o *rebuilder) wantFunc(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, fn func(btrfsitem.Item) bool) {
+ ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.reason", reason)
+ ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key",
+ fmt.Sprintf("tree=%v key={%v %v ?} +func", treeID, objID, typ))
+
if !o.rebuilt.AddTree(ctx, treeID) {
- o.queue = append(o.queue, o.curKey)
+ o.itemQueue = append(o.itemQueue, o.curKey)
return
}
@@ -478,12 +487,11 @@ func (o *rebuilder) wantFunc(ctx context.Context, treeID btrfsprim.ObjID, objID
// OK, we need to insert it
- ctx = dlog.WithField(ctx, "want_key", fmt.Sprintf("tree=%v key=%v +func", treeID, tgt))
wants := make(containers.Set[btrfsvol.LogicalAddr])
o.rebuilt.Keys(treeID).Subrange(
func(k btrfsprim.Key, _ keyio.ItemPtr) int { k.Offset = 0; return tgt.Cmp(k) },
func(k btrfsprim.Key, v keyio.ItemPtr) bool {
- itemBody, ok := o.keyIO.ReadItem(v)
+ itemBody, ok := o.keyIO.ReadItem(ctx, v)
if !ok {
o.ioErr(ctx, fmt.Errorf("could not read previously read item: %v at %v", k, v))
}
@@ -500,8 +508,11 @@ func (o *rebuilder) _wantRange(
treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType,
beg, end uint64,
) {
+ ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key",
+ fmt.Sprintf("tree=%v key={%v %v ?}", treeID, objID, typ))
+
if !o.rebuilt.AddTree(ctx, treeID) {
- o.queue = append(o.queue, o.curKey)
+ o.itemQueue = append(o.itemQueue, o.curKey)
return
}
@@ -644,22 +655,23 @@ func (o *rebuilder) _wantRange(
// TODO: This is dumb and greedy.
if last < runBeg {
// log an error
- o.wantAugment(dlog.WithField(ctx, "want_key", fmt.Sprintf("tree=%v key={%v, %v, %v-%v}",
- treeID, objID, typ, last, runBeg)),
- treeID, nil)
+ wantCtx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key",
+ fmt.Sprintf("tree=%v key={%v %v %v-%v}", treeID, objID, typ, last, runBeg))
+ o.wantAugment(wantCtx, treeID, nil)
}
- o.wantAugment(dlog.WithField(ctx, "want_key", fmt.Sprintf("tree=%v key={%v, %v, %v-%v}",
- treeID, objID, typ, gap.Beg, gap.End)),
- treeID, o.rebuilt.LeafToRoots(ctx, treeID, v.Node))
+ wantCtx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key",
+ fmt.Sprintf("tree=%v key={%v %v %v-%v}", treeID, objID, typ, gap.Beg, gap.End))
+ o.wantAugment(wantCtx, treeID, o.rebuilt.LeafToRoots(ctx, treeID, v.Node))
last = runEnd
return true
})
if last < gap.End {
// log an error
- o.wantAugment(dlog.WithField(ctx, "want_key", fmt.Sprintf("tree=%v key={%v, %v, %v-%v}",
- treeID, objID, typ, last, gap.End)),
- treeID, nil)
+ wantCtx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key",
+ fmt.Sprintf("tree=%v key={%v, %v, %v-%v}",
+ treeID, objID, typ, last, gap.End))
+ o.wantAugment(wantCtx, treeID, nil)
}
return nil
})
@@ -668,14 +680,16 @@ func (o *rebuilder) _wantRange(
// func implements rebuildCallbacks.
//
// interval is [beg, end)
-func (o *rebuilder) wantCSum(ctx context.Context, beg, end btrfsvol.LogicalAddr) {
+func (o *rebuilder) wantCSum(ctx context.Context, reason string, beg, end btrfsvol.LogicalAddr) {
+ ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.reason", reason)
const treeID = btrfsprim.CSUM_TREE_OBJECTID
o._wantRange(ctx, treeID, btrfsprim.EXTENT_CSUM_OBJECTID, btrfsprim.EXTENT_CSUM_KEY,
uint64(beg), uint64(end))
}
// wantFileExt implements rebuildCallbacks.
-func (o *rebuilder) wantFileExt(ctx context.Context, treeID btrfsprim.ObjID, ino btrfsprim.ObjID, size int64) {
+func (o *rebuilder) wantFileExt(ctx context.Context, reason string, treeID btrfsprim.ObjID, ino btrfsprim.ObjID, size int64) {
+ ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.reason", reason)
o._wantRange(ctx, treeID, ino, btrfsprim.EXTENT_DATA_KEY,
0, uint64(size))
}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_graph.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_graph.go
index db7a7a5..45c9c97 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_graph.go
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_graph.go
@@ -9,8 +9,6 @@ import (
"fmt"
"reflect"
- "github.com/datawire/dlib/dlog"
-
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum"
@@ -20,40 +18,37 @@ import (
type rebuildCallbacks interface {
fsErr(ctx context.Context, e error)
- want(ctx context.Context, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType)
- wantOff(ctx context.Context, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, off uint64)
- wantFunc(ctx context.Context, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, fn func(btrfsitem.Item) bool)
- wantCSum(ctx context.Context, beg, end btrfsvol.LogicalAddr) // interval is [beg, end)
- wantFileExt(ctx context.Context, treeID btrfsprim.ObjID, ino btrfsprim.ObjID, size int64)
+ want(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType)
+ wantOff(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, off uint64)
+ wantFunc(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, fn func(btrfsitem.Item) bool)
+ wantCSum(ctx context.Context, reason string, beg, end btrfsvol.LogicalAddr) // interval is [beg, end)
+ wantFileExt(ctx context.Context, reason string, treeID btrfsprim.ObjID, ino btrfsprim.ObjID, size int64)
}
func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID, item btrfstree.Item) {
- ctx = dlog.WithField(ctx, "tree", treeID)
- ctx = dlog.WithField(ctx, "key", item.Key)
-
// Notionally, just express the relationships shown in
// https://btrfs.wiki.kernel.org/index.php/File:References.png (from the page
// https://btrfs.wiki.kernel.org/index.php/Data_Structures )
switch body := item.Body.(type) {
case btrfsitem.BlockGroup:
- o.want(dlog.WithField(ctx, "wants", "Chunk"),
+ o.want(ctx, "Chunk",
btrfsprim.CHUNK_TREE_OBJECTID,
body.ChunkObjectID,
btrfsitem.CHUNK_ITEM_KEY)
- o.wantOff(dlog.WithField(ctx, "wants", "FreeSpaceInfo"),
+ o.wantOff(ctx, "FreeSpaceInfo",
btrfsprim.FREE_SPACE_TREE_OBJECTID,
item.Key.ObjectID,
btrfsitem.FREE_SPACE_INFO_KEY,
item.Key.Offset)
case btrfsitem.Chunk:
- o.want(dlog.WithField(ctx, "wants", "owning Root"),
+ o.want(ctx, "owning Root",
btrfsprim.ROOT_TREE_OBJECTID,
body.Head.Owner,
btrfsitem.ROOT_ITEM_KEY)
case btrfsitem.Dev:
// nothing
case btrfsitem.DevExtent:
- o.wantOff(dlog.WithField(ctx, "wants", "Chunk"),
+ o.wantOff(ctx, "Chunk",
body.ChunkTree,
body.ChunkObjectID,
btrfsitem.CHUNK_ITEM_KEY,
@@ -62,7 +57,7 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
// nothing
case btrfsitem.DirEntry:
// containing-directory
- o.wantOff(dlog.WithField(ctx, "wants", "containing dir inode"),
+ o.wantOff(ctx, "containing dir inode",
treeID,
item.Key.ObjectID,
btrfsitem.INODE_ITEM_KEY,
@@ -70,7 +65,7 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
// siblings
switch item.Key.ItemType {
case btrfsitem.DIR_ITEM_KEY:
- o.wantFunc(dlog.WithField(ctx, "wants", "corresponding DIR_INDEX"),
+ o.wantFunc(ctx, "corresponding DIR_INDEX",
treeID,
item.Key.ObjectID,
btrfsitem.DIR_INDEX_KEY,
@@ -78,7 +73,7 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
return reflect.DeepEqual(item, peerItem)
})
case btrfsitem.DIR_INDEX_KEY:
- o.wantOff(dlog.WithField(ctx, "wants", "corresponding DIR_ITEM"),
+ o.wantOff(ctx, "corresponding DIR_ITEM",
treeID,
item.Key.ObjectID,
btrfsitem.DIR_ITEM_KEY,
@@ -95,18 +90,18 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
if body.Location != (btrfsprim.Key{}) {
switch body.Location.ItemType {
case btrfsitem.INODE_ITEM_KEY:
- o.wantOff(dlog.WithField(ctx, "wants", "item being pointed to"),
+ o.wantOff(ctx, "item being pointed to",
treeID,
body.Location.ObjectID,
body.Location.ItemType,
body.Location.Offset)
- o.wantOff(dlog.WithField(ctx, "wants", "backref from item being pointed to"),
+ o.wantOff(ctx, "backref from item being pointed to",
treeID,
body.Location.ObjectID,
btrfsitem.INODE_REF_KEY,
uint64(item.Key.ObjectID))
case btrfsitem.ROOT_ITEM_KEY:
- o.want(dlog.WithField(ctx, "wants", "Root of subvolume being pointed to"),
+ o.want(ctx, "Root of subvolume being pointed to",
btrfsprim.ROOT_TREE_OBJECTID,
body.Location.ObjectID,
body.Location.ItemType)
@@ -129,12 +124,12 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
case nil:
// nothing
case btrfsitem.ExtentDataRef:
- o.wantOff(dlog.WithField(ctx, "wants", "referencing Inode"),
+ o.wantOff(ctx, "referencing Inode",
refBody.Root,
refBody.ObjectID,
btrfsitem.INODE_ITEM_KEY,
0)
- o.wantOff(dlog.WithField(ctx, "wants", "referencing FileExtent"),
+ o.wantOff(ctx, "referencing FileExtent",
refBody.Root,
refBody.ObjectID,
btrfsitem.EXTENT_DATA_KEY,
@@ -150,22 +145,22 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
case btrfsitem.ExtentCSum:
// nothing
case btrfsitem.ExtentDataRef:
- o.want(dlog.WithField(ctx, "wants", "Extent being referenced"),
+ o.want(ctx, "Extent being referenced",
btrfsprim.EXTENT_TREE_OBJECTID,
item.Key.ObjectID,
btrfsitem.EXTENT_ITEM_KEY)
- o.wantOff(dlog.WithField(ctx, "wants", "referencing Inode"),
+ o.wantOff(ctx, "referencing Inode",
body.Root,
body.ObjectID,
btrfsitem.INODE_ITEM_KEY,
0)
- o.wantOff(dlog.WithField(ctx, "wants", "referencing FileExtent"),
+ o.wantOff(ctx, "referencing FileExtent",
body.Root,
body.ObjectID,
btrfsitem.EXTENT_DATA_KEY,
uint64(body.Offset))
case btrfsitem.FileExtent:
- o.wantOff(dlog.WithField(ctx, "wants", "containing Inode"),
+ o.wantOff(ctx, "containing Inode",
treeID,
item.Key.ObjectID,
btrfsitem.INODE_ITEM_KEY,
@@ -175,63 +170,63 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
// nothing
case btrfsitem.FILE_EXTENT_REG, btrfsitem.FILE_EXTENT_PREALLOC:
// TODO: Check if inodeBody.Flags.Has(btrfsitem.INODE_NODATASUM)
- o.wantCSum(dlog.WithField(ctx, "wants", "data sum"),
+ o.wantCSum(ctx, "data sum",
roundDown(body.BodyExtent.DiskByteNr, btrfssum.BlockSize),
roundUp(body.BodyExtent.DiskByteNr.Add(body.BodyExtent.DiskNumBytes), btrfssum.BlockSize))
default:
o.fsErr(ctx, fmt.Errorf("FileExtent: unexpected body.Type=%v", body.Type))
}
case btrfsitem.FreeSpaceBitmap:
- o.wantOff(dlog.WithField(ctx, "wants", "FreeSpaceInfo"),
+ o.wantOff(ctx, "FreeSpaceInfo",
treeID,
item.Key.ObjectID,
btrfsitem.FREE_SPACE_INFO_KEY,
item.Key.Offset)
case btrfsitem.FreeSpaceHeader:
- o.wantOff(dlog.WithField(ctx, "wants", ".Location"),
+ o.wantOff(ctx, ".Location",
treeID,
body.Location.ObjectID,
body.Location.ItemType,
body.Location.Offset)
case btrfsitem.FreeSpaceInfo:
if body.Flags.Has(btrfsitem.FREE_SPACE_USING_BITMAPS) {
- o.wantOff(dlog.WithField(ctx, "wants", "FreeSpaceBitmap"),
+ o.wantOff(ctx, "FreeSpaceBitmap",
treeID,
item.Key.ObjectID,
btrfsitem.FREE_SPACE_BITMAP_KEY,
item.Key.Offset)
}
case btrfsitem.Inode:
- o.want(dlog.WithField(ctx, "wants", "backrefs"),
+ o.want(ctx, "backrefs",
treeID, // TODO: validate the number of these against body.NLink
item.Key.ObjectID,
btrfsitem.INODE_REF_KEY)
- o.wantFileExt(dlog.WithField(ctx, "wants", "FileExtents"),
+ o.wantFileExt(ctx, "FileExtents",
treeID, item.Key.ObjectID, body.Size)
if body.BlockGroup != 0 {
- o.want(dlog.WithField(ctx, "wants", "BlockGroup"),
+ o.want(ctx, "BlockGroup",
btrfsprim.EXTENT_TREE_OBJECTID,
body.BlockGroup,
btrfsitem.BLOCK_GROUP_ITEM_KEY)
}
case btrfsitem.InodeRefs:
- o.wantOff(dlog.WithField(ctx, "wants", "child Inode"),
+ o.wantOff(ctx, "child Inode",
treeID,
item.Key.ObjectID,
btrfsitem.INODE_ITEM_KEY,
0)
- o.wantOff(dlog.WithField(ctx, "wants", "parent Inode"),
+ o.wantOff(ctx, "parent Inode",
treeID,
btrfsprim.ObjID(item.Key.Offset),
btrfsitem.INODE_ITEM_KEY,
0)
for _, ref := range body {
- o.wantOff(dlog.WithField(ctx, "wants", "DIR_ITEM"),
+ o.wantOff(ctx, "DIR_ITEM",
treeID,
btrfsprim.ObjID(item.Key.Offset),
btrfsitem.DIR_ITEM_KEY,
btrfsitem.NameHash(ref.Name))
- o.wantOff(dlog.WithField(ctx, "wants", "DIR_INDEX"),
+ o.wantOff(ctx, "DIR_INDEX",
treeID,
btrfsprim.ObjID(item.Key.Offset),
btrfsitem.DIR_INDEX_KEY,
@@ -243,12 +238,12 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
case nil:
// nothing
case btrfsitem.ExtentDataRef:
- o.wantOff(dlog.WithField(ctx, "wants", "referencing INode"),
+ o.wantOff(ctx, "referencing INode",
refBody.Root,
refBody.ObjectID,
btrfsitem.INODE_ITEM_KEY,
0)
- o.wantOff(dlog.WithField(ctx, "wants", "referencing FileExtent"),
+ o.wantOff(ctx, "referencing FileExtent",
refBody.Root,
refBody.ObjectID,
btrfsitem.EXTENT_DATA_KEY,
@@ -263,7 +258,7 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
}
case btrfsitem.Root:
if body.RootDirID != 0 {
- o.wantOff(dlog.WithField(ctx, "wants", "root directory"),
+ o.wantOff(ctx, "root directory",
item.Key.ObjectID,
body.RootDirID,
btrfsitem.INODE_ITEM_KEY,
@@ -271,7 +266,7 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
}
if body.UUID != (btrfsprim.UUID{}) {
key := btrfsitem.UUIDToKey(body.UUID)
- o.wantOff(dlog.WithField(ctx, "wants", "uuid"),
+ o.wantOff(ctx, "uuid",
btrfsprim.UUID_TREE_OBJECTID,
key.ObjectID,
key.ItemType,
@@ -279,7 +274,7 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
}
if body.ParentUUID != (btrfsprim.UUID{}) {
key := btrfsitem.UUIDToKey(body.ParentUUID)
- o.wantOff(dlog.WithField(ctx, "wants", "parent uuid"),
+ o.wantOff(ctx, "parent uuid",
btrfsprim.UUID_TREE_OBJECTID,
key.ObjectID,
key.ItemType,
@@ -304,43 +299,43 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
panic(fmt.Errorf("should not happen: RootRef: unexpected ItemType=%v", item.Key.ItemType))
}
// sibling
- o.wantOff(dlog.WithField(ctx, "wants", fmt.Sprintf("corresponding %v", otherType)),
+ o.wantOff(ctx, fmt.Sprintf("corresponding %v", otherType),
treeID,
btrfsprim.ObjID(item.Key.Offset),
otherType,
uint64(item.Key.ObjectID))
// parent
- o.want(dlog.WithField(ctx, "wants", "parent subvolume: Root"),
+ o.want(ctx, "parent subvolume: Root",
treeID,
parent,
btrfsitem.ROOT_ITEM_KEY)
- o.wantOff(dlog.WithField(ctx, "wants", "parent subvolume: Inode of parent dir"),
+ o.wantOff(ctx, "parent subvolume: Inode of parent dir",
parent,
body.DirID,
btrfsitem.INODE_ITEM_KEY,
0)
- o.wantOff(dlog.WithField(ctx, "wants", "parent subvolume: DIR_ITEM in parent dir"),
+ o.wantOff(ctx, "parent subvolume: DIR_ITEM in parent dir",
parent,
body.DirID,
btrfsitem.DIR_ITEM_KEY,
btrfsitem.NameHash(body.Name))
- o.wantOff(dlog.WithField(ctx, "wants", "parent subvolume: DIR_INDEX in parent dir"),
+ o.wantOff(ctx, "parent subvolume: DIR_INDEX in parent dir",
parent,
body.DirID,
btrfsitem.DIR_INDEX_KEY,
uint64(body.Sequence))
// child
- o.want(dlog.WithField(ctx, "wants", "child subvolume: Root"),
+ o.want(ctx, "child subvolume: Root",
treeID,
child,
btrfsitem.ROOT_ITEM_KEY)
case btrfsitem.SharedDataRef:
- o.want(dlog.WithField(ctx, "wants", "Extent"),
+ o.want(ctx, "Extent",
btrfsprim.EXTENT_TREE_OBJECTID,
item.Key.ObjectID,
btrfsitem.EXTENT_ITEM_KEY)
case btrfsitem.UUIDMap:
- o.want(dlog.WithField(ctx, "wants", "subvolume Root"),
+ o.want(ctx, "subvolume Root",
btrfsprim.ROOT_TREE_OBJECTID,
body.ObjID,
btrfsitem.ROOT_ITEM_KEY)
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go
index 5c2d0fd..7a112b4 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go
@@ -6,7 +6,6 @@ package rebuildnodes
import (
"context"
- "fmt"
"time"
"github.com/datawire/dlib/dlog"
@@ -21,16 +20,6 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/textui"
)
-type scanStats struct {
- N, D int
-}
-
-func (s scanStats) String() string {
- return fmt.Sprintf("... %v%% (%v/%v)",
- int(100*float64(s.N)/float64(s.D)),
- s.N, s.D)
-}
-
func ScanDevices(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect.ScanDevicesResult) (graph.Graph, *keyio.Handle, error) {
dlog.Infof(ctx, "Reading node data from FS...")
@@ -39,17 +28,16 @@ func ScanDevices(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect.Sca
return graph.Graph{}, nil, err
}
- total := countNodes(scanResults)
- done := 0
- progressWriter := textui.NewProgress[scanStats](ctx, dlog.LogLevelInfo, 1*time.Second)
- progress := func(done, total int) {
- progressWriter.Set(scanStats{N: done, D: total})
- }
+ var stats textui.Portion[int]
+ stats.D = countNodes(scanResults)
+ progressWriter := textui.NewProgress[textui.Portion[int]](
+ dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.read.substep", "read-nodes"),
+ dlog.LogLevelInfo, 1*time.Second)
nodeGraph := graph.New(*sb)
keyIO := keyio.NewHandle(fs, *sb)
- progress(done, total)
+ progressWriter.Set(stats)
for _, devResults := range scanResults {
for laddr := range devResults.FoundNodes {
nodeRef, err := btrfstree.ReadNode[btrfsvol.LogicalAddr](fs, *sb, laddr, btrfstree.NodeExpectations{
@@ -62,24 +50,19 @@ func ScanDevices(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect.Sca
nodeGraph.InsertNode(nodeRef)
keyIO.InsertNode(nodeRef)
- done++
- progress(done, total)
+ stats.N++
+ progressWriter.Set(stats)
}
}
- if done != total {
+ if stats.N != stats.D {
panic("should not happen")
}
progressWriter.Done()
dlog.Info(ctx, "... done reading node data")
- progressWriter = textui.NewProgress[scanStats](ctx, dlog.LogLevelInfo, 1*time.Second)
- dlog.Infof(ctx, "Checking keypointers for dead-ends...")
- if err := nodeGraph.FinalCheck(fs, *sb, progress); err != nil {
+ if err := nodeGraph.FinalCheck(ctx, fs, *sb); err != nil {
return graph.Graph{}, nil, err
}
- progressWriter.Done()
- dlog.Info(ctx, "... done checking keypointers")
-
keyIO.SetGraph(*nodeGraph)
return *nodeGraph, keyIO, nil
diff --git a/lib/btrfsprogs/btrfsinspect/scandevices.go b/lib/btrfsprogs/btrfsinspect/scandevices.go
index b6c929b..628995a 100644
--- a/lib/btrfsprogs/btrfsinspect/scandevices.go
+++ b/lib/btrfsprogs/btrfsinspect/scandevices.go
@@ -34,7 +34,7 @@ func ScanDevices(ctx context.Context, fs *btrfs.FS) (ScanDevicesResult, error) {
for id, dev := range fs.LV.PhysicalVolumes() {
id := id
dev := dev
- grp.Go(dev.Name(), func(ctx context.Context) error {
+ grp.Go(fmt.Sprintf("dev-%d", id), func(ctx context.Context) error {
sb, err := dev.Superblock()
if err != nil {
return err
@@ -80,10 +80,7 @@ type SysExtentCSum struct {
}
type scanStats struct {
- DevName string
- DevSize btrfsvol.PhysicalAddr
-
- Pos btrfsvol.PhysicalAddr
+ textui.Portion[btrfsvol.PhysicalAddr]
NumFoundNodes int
NumFoundChunks int
@@ -93,10 +90,8 @@ type scanStats struct {
}
func (s scanStats) String() string {
- return fmt.Sprintf("... dev[%q] scanned %v%% (%d/%d) (found: %v nodes, %v chunks, %v block groups, %v dev extents, %v sum items)",
- s.DevName,
- int(100*float64(s.Pos)/float64(s.DevSize)),
- s.Pos, s.DevSize,
+ return textui.Sprintf("scanned %v (found: %v nodes, %v chunks, %v block groups, %v dev extents, %v sum items)",
+ s.Portion,
s.NumFoundNodes,
s.NumFoundChunks,
s.NumFoundBlockGroups,
@@ -107,6 +102,8 @@ func (s scanStats) String() string {
// ScanOneDevice mostly mimics btrfs-progs
// cmds/rescue-chunk-recover.c:scan_one_device().
func ScanOneDevice(ctx context.Context, dev *btrfs.Device, sb btrfstree.Superblock) (ScanOneDeviceResult, error) {
+ ctx = dlog.WithField(ctx, "btrfsinspect.scandevices.dev", dev.Name())
+
result := ScanOneDeviceResult{
FoundNodes: make(map[btrfsvol.LogicalAddr][]btrfsvol.PhysicalAddr),
}
@@ -132,9 +129,10 @@ func ScanOneDevice(ctx context.Context, dev *btrfs.Device, sb btrfstree.Superblo
progressWriter := textui.NewProgress[scanStats](ctx, dlog.LogLevelInfo, 1*time.Second)
progress := func(pos btrfsvol.PhysicalAddr) {
progressWriter.Set(scanStats{
- DevName: dev.Name(),
- DevSize: devSize,
- Pos: pos,
+ Portion: textui.Portion[btrfsvol.PhysicalAddr]{
+ N: pos,
+ D: devSize,
+ },
NumFoundNodes: len(result.FoundNodes),
NumFoundChunks: len(result.FoundChunks),
NumFoundBlockGroups: len(result.FoundBlockGroups),
@@ -171,7 +169,7 @@ func ScanOneDevice(ctx context.Context, dev *btrfs.Device, sb btrfstree.Superblo
nodeRef, err := btrfstree.ReadNode[btrfsvol.PhysicalAddr](dev, sb, pos, btrfstree.NodeExpectations{})
if err != nil {
if !errors.Is(err, btrfstree.ErrNotANode) {
- dlog.Errorf(ctx, "... dev[%q] error: %v", dev.Name(), err)
+ dlog.Errorf(ctx, "error: %v", err)
}
} else {
result.FoundNodes[nodeRef.Data.Head.Addr] = append(result.FoundNodes[nodeRef.Data.Head.Addr], nodeRef.Addr)
@@ -180,12 +178,12 @@ func ScanOneDevice(ctx context.Context, dev *btrfs.Device, sb btrfstree.Superblo
case btrfsitem.CHUNK_ITEM_KEY:
chunk, ok := item.Body.(btrfsitem.Chunk)
if !ok {
- dlog.Errorf(ctx, "... dev[%q] node@%v: item %v: error: type is CHUNK_ITEM_KEY, but struct is %T",
- dev.Name(), nodeRef.Addr, i, item.Body)
+ dlog.Errorf(ctx, "node@%v: item %v: error: type is CHUNK_ITEM_KEY, but struct is %T",
+ nodeRef.Addr, i, item.Body)
continue
}
- //dlog.Tracef(ctx, "... dev[%q] node@%v: item %v: found chunk",
- // dev.Name(), nodeRef.Addr, i)
+ dlog.Tracef(ctx, "node@%v: item %v: found chunk",
+ nodeRef.Addr, i)
result.FoundChunks = append(result.FoundChunks, btrfstree.SysChunk{
Key: item.Key,
Chunk: chunk,
@@ -193,12 +191,12 @@ func ScanOneDevice(ctx context.Context, dev *btrfs.Device, sb btrfstree.Superblo
case btrfsitem.BLOCK_GROUP_ITEM_KEY:
bg, ok := item.Body.(btrfsitem.BlockGroup)
if !ok {
- dlog.Errorf(ctx, "... dev[%q] node@%v: item %v: error: type is BLOCK_GROUP_ITEM_KEY, but struct is %T",
- dev.Name(), nodeRef.Addr, i, item.Body)
+ dlog.Errorf(ctx, "node@%v: item %v: error: type is BLOCK_GROUP_ITEM_KEY, but struct is %T",
+ nodeRef.Addr, i, item.Body)
continue
}
- //dlog.Tracef(ctx, "... dev[%q] node@%v: item %v: found block group",
- // dev.Name(), nodeRef.Addr, i)
+ dlog.Tracef(ctx, "node@%v: item %v: found block group",
+ nodeRef.Addr, i)
result.FoundBlockGroups = append(result.FoundBlockGroups, SysBlockGroup{
Key: item.Key,
BG: bg,
@@ -206,12 +204,12 @@ func ScanOneDevice(ctx context.Context, dev *btrfs.Device, sb btrfstree.Superblo
case btrfsitem.DEV_EXTENT_KEY:
devext, ok := item.Body.(btrfsitem.DevExtent)
if !ok {
- dlog.Errorf(ctx, "... dev[%q] node@%v: item %v: error: type is DEV_EXTENT_KEY, but struct is %T",
- dev.Name(), nodeRef.Addr, i, item.Body)
+ dlog.Errorf(ctx, "node@%v: item %v: error: type is DEV_EXTENT_KEY, but struct is %T",
+ nodeRef.Addr, i, item.Body)
continue
}
- //dlog.Tracef(ctx, "... dev[%q] node@%v: item %v: found dev extent",
- // dev.Name(), nodeRef.Addr, i)
+ dlog.Tracef(ctx, "node@%v: item %v: found dev extent",
+ nodeRef.Addr, i)
result.FoundDevExtents = append(result.FoundDevExtents, SysDevExtent{
Key: item.Key,
DevExt: devext,
@@ -219,12 +217,12 @@ func ScanOneDevice(ctx context.Context, dev *btrfs.Device, sb btrfstree.Superblo
case btrfsitem.EXTENT_CSUM_KEY:
sums, ok := item.Body.(btrfsitem.ExtentCSum)
if !ok {
- dlog.Errorf(ctx, "... dev[%q] node@%v: item %v: error: type is EXTENT_CSUM_OBJECTID, but struct is %T",
- dev.Name(), nodeRef.Addr, i, item.Body)
+ dlog.Errorf(ctx, "node@%v: item %v: error: type is EXTENT_CSUM_OBJECTID, but struct is %T",
+ nodeRef.Addr, i, item.Body)
continue
}
- //dlog.Tracef(ctx, "... dev[%q] node@%v: item %v: found csums",
- // dev.Name(), nodeRef.Addr, i)
+ dlog.Tracef(ctx, "node@%v: item %v: found csums",
+ nodeRef.Addr, i)
result.FoundExtentCSums = append(result.FoundExtentCSums, SysExtentCSum{
Generation: nodeRef.Data.Head.Generation,
Sums: sums,
diff --git a/lib/textui/log.go b/lib/textui/log.go
new file mode 100644
index 0000000..5ddc7a8
--- /dev/null
+++ b/lib/textui/log.go
@@ -0,0 +1,398 @@
+// Copyright (C) 2019-2022 Ambassador Labs
+// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+//
+// SPDX-License-Identifier: Apache-2.0
+//
+// Contains code based on:
+// https://github.com/datawire/dlib/blob/b09ab2e017e16d261f05fff5b3b860d645e774d4/dlog/logger_logrus.go
+// https://github.com/datawire/dlib/blob/b09ab2e017e16d261f05fff5b3b860d645e774d4/dlog/logger_testing.go
+// https://github.com/telepresenceio/telepresence/blob/ece94a40b00a90722af36b12e40f91cbecc0550c/pkg/log/formatter.go
+
+package textui
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "log"
+ "path/filepath"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+ "unicode"
+
+ "github.com/datawire/dlib/dlog"
+ "github.com/spf13/pflag"
+)
+
+type LogLevelFlag struct {
+ Level dlog.LogLevel
+}
+
+var _ pflag.Value = (*LogLevelFlag)(nil)
+
+// Type implements pflag.Value.
+func (lvl *LogLevelFlag) Type() string { return "loglevel" }
+
+// Type implements pflag.Value.
+func (lvl *LogLevelFlag) Set(str string) error {
+ switch strings.ToLower(str) {
+ case "error":
+ lvl.Level = dlog.LogLevelError
+ case "warn", "warning":
+ lvl.Level = dlog.LogLevelWarn
+ case "info":
+ lvl.Level = dlog.LogLevelInfo
+ case "debug":
+ lvl.Level = dlog.LogLevelDebug
+ case "trace":
+ lvl.Level = dlog.LogLevelTrace
+ default:
+ return fmt.Errorf("invalid log level: %q", str)
+ }
+ return nil
+}
+
+// Type implements pflag.Value.
+func (lvl *LogLevelFlag) String() string {
+ switch lvl.Level {
+ case dlog.LogLevelError:
+ return "error"
+ case dlog.LogLevelWarn:
+ return "warn"
+ case dlog.LogLevelInfo:
+ return "info"
+ case dlog.LogLevelDebug:
+ return "debug"
+ case dlog.LogLevelTrace:
+ return "trace"
+ default:
+ panic(fmt.Errorf("invalid log level: %#v", lvl.Level))
+ }
+}
+
+type logger struct {
+ parent *logger
+ out io.Writer
+ lvl dlog.LogLevel
+
+ // only valid if parent is non-nil
+ fieldKey string
+ fieldVal any
+}
+
+var _ dlog.OptimizedLogger = (*logger)(nil)
+
+func NewLogger(out io.Writer, lvl dlog.LogLevel) dlog.Logger {
+ return &logger{
+ out: out,
+ lvl: lvl,
+ }
+}
+
+// Helper implements dlog.Logger.
+func (l *logger) Helper() {}
+
+// WithField implements dlog.Logger.
+func (l *logger) WithField(key string, value any) dlog.Logger {
+ return &logger{
+ parent: l,
+ out: l.out,
+ lvl: l.lvl,
+
+ fieldKey: key,
+ fieldVal: value,
+ }
+}
+
+type logWriter struct {
+ log *logger
+ lvl dlog.LogLevel
+}
+
+// Write implements io.Writer.
+func (lw logWriter) Write(data []byte) (int, error) {
+ lw.log.log(lw.lvl, func(w io.Writer) {
+ _, _ = w.Write(data)
+ })
+ return len(data), nil
+}
+
+// StdLogger implements dlog.Logger.
+func (l *logger) StdLogger(lvl dlog.LogLevel) *log.Logger {
+ return log.New(logWriter{log: l, lvl: lvl}, "", 0)
+}
+
+// Log implements dlog.Logger.
+func (l *logger) Log(lvl dlog.LogLevel, msg string) {
+ panic("should not happen: optimized log methods should be used instead")
+}
+
+// UnformattedLog implements dlog.OptimizedLogger.
+func (l *logger) UnformattedLog(lvl dlog.LogLevel, args ...any) {
+ l.log(lvl, func(w io.Writer) {
+ printer.Fprint(w, args...)
+ })
+}
+
+// UnformattedLogln implements dlog.OptimizedLogger.
+func (l *logger) UnformattedLogln(lvl dlog.LogLevel, args ...any) {
+ l.log(lvl, func(w io.Writer) {
+ printer.Fprintln(w, args...)
+ })
+}
+
+// UnformattedLogf implements dlog.OptimizedLogger.
+func (l *logger) UnformattedLogf(lvl dlog.LogLevel, format string, args ...any) {
+ l.log(lvl, func(w io.Writer) {
+ printer.Fprintf(w, format, args...)
+ })
+}
+
+var (
+ logBuf bytes.Buffer
+ logMu sync.Mutex
+ thisModDir string
+)
+
+func init() {
+ _, file, _, _ := runtime.Caller(0)
+ thisModDir = filepath.Dir(filepath.Dir(filepath.Dir(file)))
+}
+
+func (l *logger) log(lvl dlog.LogLevel, writeMsg func(io.Writer)) {
+ // boilerplate /////////////////////////////////////////////////////////
+ if lvl > l.lvl {
+ return
+ }
+ // This is optimized for mostly-single-threaded usage. If I cared more
+ // about multi-threaded performance, I'd trade in some
+ // memory-use/allocations and (1) instead of using a static `logBuf`,
+ // I'd have a `logBufPool` `sync.Pool`, and (2) have the the final call
+ // to `l.out.Write()` be the only thing protected by `logMu`.
+ logMu.Lock()
+ defer logMu.Unlock()
+ defer logBuf.Reset()
+
+ // time ////////////////////////////////////////////////////////////////
+ now := time.Now()
+ const timeFmt = "2006-01-02 15:04:05.0000"
+ logBuf.WriteString(timeFmt)
+ now.AppendFormat(logBuf.Bytes()[:0], timeFmt)
+
+ // level ///////////////////////////////////////////////////////////////
+ switch lvl {
+ case dlog.LogLevelError:
+ logBuf.WriteString(" ERR")
+ case dlog.LogLevelWarn:
+ logBuf.WriteString(" WRN")
+ case dlog.LogLevelInfo:
+ logBuf.WriteString(" INF")
+ case dlog.LogLevelDebug:
+ logBuf.WriteString(" DBG")
+ case dlog.LogLevelTrace:
+ logBuf.WriteString(" TRC")
+ }
+
+ // fields (early) //////////////////////////////////////////////////////
+ fields := make(map[string]any)
+ var fieldKeys []string
+ for f := l; f.parent != nil; f = f.parent {
+ if _, exists := fields[f.fieldKey]; exists {
+ continue
+ }
+ fields[f.fieldKey] = f.fieldVal
+ fieldKeys = append(fieldKeys, f.fieldKey)
+ }
+ sort.Slice(fieldKeys, func(i, j int) bool {
+ iOrd := fieldOrd(fieldKeys[i])
+ jOrd := fieldOrd(fieldKeys[j])
+ if iOrd != jOrd {
+ return iOrd < jOrd
+ }
+ return fieldKeys[i] < fieldKeys[j]
+ })
+ nextField := len(fieldKeys)
+ for i, fieldKey := range fieldKeys {
+ if fieldOrd(fieldKey) >= 0 {
+ nextField = i
+ break
+ }
+ writeField(&logBuf, fieldKey, fields[fieldKey])
+ }
+
+ // message /////////////////////////////////////////////////////////////
+ logBuf.WriteString(" : ")
+ writeMsg(&logBuf)
+
+ // fields (late) ///////////////////////////////////////////////////////
+ if nextField < len(fieldKeys) {
+ logBuf.WriteString(" :")
+ }
+ for _, fieldKey := range fieldKeys[nextField:] {
+ writeField(&logBuf, fieldKey, fields[fieldKey])
+ }
+
+ // caller //////////////////////////////////////////////////////////////
+ const (
+ thisModule = "git.lukeshu.com/btrfs-progs-ng"
+ thisPackage = "git.lukeshu.com/btrfs-progs-ng/lib/textui"
+ maximumCallerDepth int = 25
+ minimumCallerDepth int = 3 // runtime.Callers + .log + .Log
+ )
+ var pcs [maximumCallerDepth]uintptr
+ depth := runtime.Callers(minimumCallerDepth, pcs[:])
+ frames := runtime.CallersFrames(pcs[:depth])
+ for f, again := frames.Next(); again; f, again = frames.Next() {
+ if !strings.HasPrefix(f.Function, thisModule+"/") {
+ continue
+ }
+ if strings.HasPrefix(f.Function, thisPackage+".") {
+ continue
+ }
+ if nextField == len(fieldKeys) {
+ logBuf.WriteString(" :")
+ }
+ file := f.File[strings.LastIndex(f.File, thisModDir+"/")+len(thisModDir+"/"):]
+ fmt.Fprintf(&logBuf, " (from %s:%d)", file, f.Line)
+ break
+ }
+
+ // boilerplate /////////////////////////////////////////////////////////
+ logBuf.WriteByte('\n')
+ _, _ = l.out.Write(logBuf.Bytes())
+}
+
+// fieldOrd returns the sort-position for a given log-field-key. Lower return
+// values should be positioned on the left when logging, and higher values
+// should be positioned on the right; values <0 should be on the left of the log
+// message, while values ≥0 should be on the right of the log message.
+func fieldOrd(key string) int {
+ switch key {
+ // dlib ////////////////////////////////////////////////////////////////
+ case "THREAD": // dgroup
+ return -99
+ case "dexec.pid":
+ return -98
+ case "dexec.stream":
+ return -97
+ case "dexec.data":
+ return -96
+ case "dexec.err":
+ return -95
+
+ // btrfsinspect scandevices ////////////////////////////////////////////
+ case "btrfsinspect.scandevices.dev":
+ return -1
+
+ // btrfsinspect rebuild-mappings ///////////////////////////////////////
+ case "btrfsinspect.rebuild-mappings.step":
+ return -2
+ case "btrfsinspect.rebuild-mappings.substep":
+ return -1
+
+ // btrfsinspect rebuild-nodes //////////////////////////////////////////
+ case "btrfsinspect.rebuild-nodes.step":
+ return -50
+ // step=read-fs-data
+ case "btrfsinspect.rebuild-nodes.read.substep":
+ return -1
+ // step=rebuild
+ case "btrfsinspect.rebuild-nodes.rebuild.pass":
+ return -49
+ case "btrfsinspect.rebuild-nodes.rebuild.substep":
+ return -48
+ case "btrfsinspect.rebuild-nodes.rebuild.substep.progress":
+ return -47
+ // step=rebuild, substep=collect-items (1/3)
+ // step=rebuild, substep=process-items (2/3)
+ case "btrfsinspect.rebuild-nodes.rebuild.process.item":
+ return -25
+ // step=rebuild, substep=apply-augments (3/3)
+ case "btrfsinspect.rebuild-nodes.rebuild.augment.tree":
+ return -25
+ // step=rebuild (any substep)
+ case "btrfsinspect.rebuild-nodes.rebuild.want.key":
+ return -7
+ case "btrfsinspect.rebuild-nodes.rebuild.want.reason":
+ return -6
+ case "btrfsinspect.rebuild-nodes.rebuild.add-tree":
+ return -5
+ case "btrfsinspect.rebuild-nodes.rebuild.add-tree.substep":
+ return -4
+ case "btrfsinspect.rebuild-nodes.rebuild.add-tree.want.key":
+ return -3
+ case "btrfsinspect.rebuild-nodes.rebuild.add-tree.want.reason":
+ return -2
+ case "btrfsinspect.rebuild-nodes.rebuild.add-root":
+ return -1
+
+ // other ///////////////////////////////////////////////////////////////
+ case "btrfs.read-json-file":
+ return -1
+ default:
+ return 1
+ }
+}
+
+func writeField(w io.Writer, key string, val any) {
+ valStr := printer.Sprint(val)
+ needsQuote := false
+ if strings.HasPrefix(valStr, `"`) {
+ needsQuote = true
+ } else {
+ for _, r := range valStr {
+ if !(unicode.IsPrint(r) && r != ' ') {
+ needsQuote = true
+ break
+ }
+ }
+ }
+ if needsQuote {
+ valStr = strconv.Quote(valStr)
+ }
+
+ name := key
+
+ switch {
+ case name == "THREAD":
+ name = "thread"
+ switch valStr {
+ case "", "/main":
+ return
+ default:
+ if strings.HasPrefix(valStr, "/main/") {
+ valStr = strings.TrimPrefix(valStr, "/main")
+ }
+ }
+ case strings.HasSuffix(name, ".pass"):
+ fmt.Fprintf(w, "/pass-%s", valStr)
+ return
+ case strings.HasSuffix(name, ".substep") && name != "btrfsinspect.rebuild-nodes.rebuild.add-tree.substep":
+ fmt.Fprintf(w, "/%s", valStr)
+ return
+ case strings.HasPrefix(name, "btrfsinspect."):
+ name = strings.TrimPrefix(name, "btrfsinspect.")
+ switch {
+ case strings.HasPrefix(name, "scandevices."):
+ name = strings.TrimPrefix(name, "scandevices.")
+ case strings.HasPrefix(name, "rebuild-mappings."):
+ name = strings.TrimPrefix(name, "rebuild-mappings.")
+ case strings.HasPrefix(name, "rebuild-nodes."):
+ name = strings.TrimPrefix(name, "rebuild-nodes.")
+ switch {
+ case strings.HasPrefix(name, "read."):
+ name = strings.TrimPrefix(name, "read.")
+ case strings.HasPrefix(name, "rebuild."):
+ name = strings.TrimPrefix(name, "rebuild.")
+ }
+ }
+ case strings.HasPrefix(name, "btrfs."):
+ name = strings.TrimPrefix(name, "btrfs.")
+ }
+
+ fmt.Fprintf(w, " %s=%s", name, valStr)
+}
diff --git a/lib/textui/log_memstats.go b/lib/textui/log_memstats.go
new file mode 100644
index 0000000..39733c6
--- /dev/null
+++ b/lib/textui/log_memstats.go
@@ -0,0 +1,132 @@
+// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+//
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+package textui
+
+import (
+ "fmt"
+ "runtime"
+ "sync"
+ "time"
+)
+
+type LiveMemUse struct {
+ mu sync.Mutex
+ stats runtime.MemStats
+ last time.Time
+}
+
+var _ fmt.Stringer = (*LiveMemUse)(nil)
+
+const liveMemUseUpdateInterval = 1 * time.Second
+
+func (o *LiveMemUse) String() string {
+ o.mu.Lock()
+
+ // runtime.ReadMemStats() calls stopTheWorld(), so we want to
+ // rate-limit how often we call it.
+ if now := time.Now(); now.Sub(o.last) > liveMemUseUpdateInterval {
+ runtime.ReadMemStats(&o.stats)
+ o.last = now
+ }
+
+ // runtime.MemStats only knows about memory managed by the Go runtime;
+ // even for a pure Go program, there's also
+ //
+ // - memory mapped to the executable itself
+ // - vDSO and friends
+ //
+ // But those are pretty small, just a few MiB.
+ //
+ // OK, so: memory managed by the Go runtime. runtime.MemStats is pretty
+ // obtuse, I think it was designed more for "debugging the Go runtime"
+ // than "monitoring the behavior of a Go program". From the Go
+ // runtime's perspective, regions of the virtual address space are in
+ // one of 4 states (see `runtime/mem.go`):
+ //
+ // - None : not mapped
+ //
+ // - Reserved : mapped, but without r/w permissions (PROT_NONE); so
+ // this region isn't actually backed by anything
+ //
+ // - Prepared : mapped, but allowed to be collected by the OS
+ // (MADV_FREE, or MADV_DONTNEED on systems without MADV_FREE); so
+ // this region may or may not actually be backed by anything.
+ //
+ // - Ready : mapped, ready to be used
+ //
+ // Normal tools count Reserved+Prepared+Ready toward the VSS (which is a
+ // little silly, when inspecting /proc/{pid}/maps to calculate the VSS,
+ // IMO they should exclude maps without r/w permissions, which would
+ // exclude Reserved), but we all know that VSS numbers are over
+ // inflated. And RSS only useful if we fit in RAM and don't spill to
+ // swap (this is being written for btrfs-rec, which is quite likely to
+ // consume all RAM on a laptop). Useful numbers are Ready and Prepared;
+ // as I said above, outside tools reporting Ready+Prepared would be easy
+ // and useful, but none do; but I don't think outside tools have a way
+ // to distinguish between Ready and Prepared (unless you can detect
+ // MADV_FREE/MADV_DONTNEED in /proc/{pid}/smaps?).
+ //
+ // Of the 3 mapped states, here's how we get them from runtime:
+ //
+ // - Reserved : AFAICT, you can't :(
+ //
+ // - Prepared : `runtime.MemStats.HeapReleased`
+ //
+ // - Ready : `runtime.MemStats.Sys - runtime.MemStats.HeapReleased`
+ // (that is, runtime.MemStats.Sys is Prepared+Ready)
+ //
+ // It's a bummer that we can't get Reserved from runtime, but as I've
+ // said, it's not super useful; it's only use would really be
+ // cross-referencing runtime's numbers against the VSS.
+ //
+ // The godocs for runtime.MemStats.Sys say "It's likely that not all of
+ // the virtual address space is backed by physical memory at any given
+ // moment, though in general it all was at some point." That's both
+ // confusing and a lie. It's confusing because it doesn't give you
+ // hooks to find out more; it could have said that this is
+ // Ready+Prepared and that Prepared is the portion of the space that
+ // might not be backed by physical memory, but instead it wants you to
+ // throw your hands up and say "this is too weird for me to understand".
+ // It's a lie because "in general it all was at some point" implies that
+ // all Prepared memory was previously Ready, which is false; it can go
+ // None->Reserved->Prepared (but it only does that Reserved->Prepared
+ // transition if it thinks it will need to transition it to Ready very
+ // soon, so maybe the doc author though that was negligible?).
+ //
+ // Now, those are still pretty opaque numbers; most of runtime.MemStats
+ // goes toward accounting for what's going on inside of Ready space.
+ //
+ // For our purposes, we don't care too much about specifics of how Ready
+ // space is being used; just how much is "actually storing data", vs
+ // "overhead from heap-fragmentation", vs "idle".
+
+ var (
+ // We're going to add up all of the `o.stats.{thing}Sys`
+ // variables and check that against `o.stats.Sys`, in order to
+ // make sure that we're not missing any {thing} when adding up
+ // `inuse`.
+ calcSys = o.stats.HeapSys + o.stats.StackSys + o.stats.MSpanSys + o.stats.MCacheSys + o.stats.BuckHashSys + o.stats.GCSys + o.stats.OtherSys
+ inuse = o.stats.HeapInuse + o.stats.StackInuse + o.stats.MSpanInuse + o.stats.MCacheInuse + o.stats.BuckHashSys + o.stats.GCSys + o.stats.OtherSys
+ )
+ if calcSys != o.stats.Sys {
+ panic("should not happen")
+ }
+ prepared := o.stats.HeapReleased
+ ready := o.stats.Sys - prepared
+
+ readyFragOverhead := o.stats.HeapInuse - o.stats.HeapAlloc
+ readyData := inuse - readyFragOverhead
+ readyIdle := ready - inuse
+
+ o.mu.Unlock()
+
+ return Sprintf("Ready+Prepared=%.1f (Ready=%.1f (data:%.1f + fragOverhead:%.1f + idle:%.1f) ; Prepared=%.1f)",
+ IEC(ready+prepared, "B"),
+ IEC(ready, "B"),
+ IEC(readyData, "B"),
+ IEC(readyFragOverhead, "B"),
+ IEC(readyIdle, "B"),
+ IEC(prepared, "B"))
+}
diff --git a/lib/textui/log_test.go b/lib/textui/log_test.go
new file mode 100644
index 0000000..08eb38c
--- /dev/null
+++ b/lib/textui/log_test.go
@@ -0,0 +1,64 @@
+// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+//
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+package textui_test
+
+import (
+ "context"
+ "strings"
+ "testing"
+
+ "github.com/datawire/dlib/dlog"
+ "github.com/stretchr/testify/assert"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/textui"
+)
+
+func logLineRegexp(inner string) string {
+ return `[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}\.[0-9]{4} ` + inner + ` \(from lib/textui/log_test\.go:[0-9]+\)\n`
+}
+
+func TestLogFormat(t *testing.T) {
+ var out strings.Builder
+ ctx := dlog.WithLogger(context.Background(), textui.NewLogger(&out, dlog.LogLevelTrace))
+ dlog.Debugf(ctx, "foo %d", 12345)
+ assert.Regexp(t,
+ `^`+logLineRegexp(`DBG : foo 12,345 :`)+`$`,
+ out.String())
+}
+
+func TestLogLevel(t *testing.T) {
+ var out strings.Builder
+ ctx := dlog.WithLogger(context.Background(), textui.NewLogger(&out, dlog.LogLevelInfo))
+ dlog.Error(ctx, "Error")
+ dlog.Warn(ctx, "Warn")
+ dlog.Info(ctx, "Info")
+ dlog.Debug(ctx, "Debug")
+ dlog.Trace(ctx, "Trace")
+ dlog.Trace(ctx, "Trace")
+ dlog.Debug(ctx, "Debug")
+ dlog.Info(ctx, "Info")
+ dlog.Warn(ctx, "Warn")
+ dlog.Error(ctx, "Error")
+ assert.Regexp(t,
+ `^`+
+ logLineRegexp(`ERR : Error :`)+
+ logLineRegexp(`WRN : Warn :`)+
+ logLineRegexp(`INF : Info :`)+
+ logLineRegexp(`INF : Info :`)+
+ logLineRegexp(`WRN : Warn :`)+
+ logLineRegexp(`ERR : Error :`)+
+ `$`,
+ out.String())
+}
+
+func TestLogField(t *testing.T) {
+ var out strings.Builder
+ ctx := dlog.WithLogger(context.Background(), textui.NewLogger(&out, dlog.LogLevelInfo))
+ ctx = dlog.WithField(ctx, "foo", 12345)
+ dlog.Info(ctx, "msg")
+ assert.Regexp(t,
+ `^`+logLineRegexp(`INF : msg : foo=12,345`)+`$`,
+ out.String())
+}
diff --git a/lib/textui/text.go b/lib/textui/text.go
new file mode 100644
index 0000000..d6a80b3
--- /dev/null
+++ b/lib/textui/text.go
@@ -0,0 +1,204 @@
+// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+//
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+package textui
+
+import (
+ "fmt"
+ "io"
+ "math"
+
+ "golang.org/x/exp/constraints"
+ "golang.org/x/text/language"
+ "golang.org/x/text/message"
+ "golang.org/x/text/number"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/fmtutil"
+)
+
+var printer = message.NewPrinter(language.English)
+
+// Fprintf is like `fmt.Fprintf`, but (1) includes the extensions of
+// `golang.org/x/text/message.Printer`, and (2) is useful for marking
+// when a print call is part of the UI, rather than something
+// internal.
+func Fprintf(w io.Writer, key string, a ...any) (n int, err error) {
+ return printer.Fprintf(w, key, a...)
+}
+
+// Sprintf is like `fmt.Sprintf`, but (1) includes the extensions of
+// `golang.org/x/text/message.Printer`, and (2) is useful for marking
+// when a sprint call is part of the UI, rather than something
+// internal.
+func Sprintf(key string, a ...any) string {
+ return printer.Sprintf(key, a...)
+}
+
+// Humanized wraps a value such that formatting of it can make use of
+// the `golang.org/x/text/message.Printer` extensions even when used
+// with plain-old `fmt`.
+func Humanized(x any) any {
+ return humanized{val: x}
+}
+
+type humanized struct {
+ val any
+}
+
+var (
+ _ fmt.Formatter = humanized{}
+ _ fmt.Stringer = humanized{}
+)
+
+// String implements fmt.Formatter.
+func (h humanized) Format(f fmt.State, verb rune) {
+ printer.Fprintf(f, fmtutil.FmtStateString(f, verb), h.val)
+}
+
+// String implements fmt.Stringer.
+func (h humanized) String() string {
+ return fmt.Sprint(h)
+}
+
+// Portion renders a fraction N/D as both a percentage and
+// parenthetically as the exact fractional value, rendered with
+// human-friendly commas.
+//
+// For example:
+//
+// fmt.Sprint(Portion[int]{N: 1, D: 12345}) ⇒ "0% (1/12,345)"
+type Portion[T constraints.Integer] struct {
+ N, D T
+}
+
+var (
+ _ fmt.Stringer = Portion[int]{}
+)
+
+// String implements fmt.Stringer.
+func (p Portion[T]) String() string {
+ pct := float64(1)
+ if p.D > 0 {
+ pct = float64(p.N) / float64(p.D)
+ }
+ return printer.Sprintf("%v (%v/%v)", number.Percent(pct), uint64(p.N), uint64(p.D))
+}
+
+type metric[T constraints.Integer | constraints.Float] struct {
+ Val T
+ Unit string
+}
+
+var (
+ _ fmt.Formatter = metric[int]{}
+ _ fmt.Stringer = metric[int]{}
+)
+
+func Metric[T constraints.Integer | constraints.Float](x T, unit string) metric[T] {
+ return metric[T]{
+ Val: x,
+ Unit: unit,
+ }
+}
+
+var metricSmallPrefixes = []string{
+ "m",
+ "μ",
+ "n",
+ "p",
+ "f",
+ "a",
+ "z",
+ "y",
+ "r",
+ "q",
+}
+
+var metricBigPrefixes = []string{
+ "k",
+ "M",
+ "G",
+ "T",
+ "P",
+ "E",
+ "Z",
+ "Y",
+ "R",
+ "Q",
+}
+
+// String implements fmt.Formatter.
+func (v metric[T]) Format(f fmt.State, verb rune) {
+ var prefix string
+ y := math.Abs(float64(v.Val))
+ if y < 1 {
+ for i := 0; y < 1 && i <= len(metricSmallPrefixes); i++ {
+ y *= 1000
+ prefix = metricSmallPrefixes[i]
+ }
+ } else {
+ for i := 0; y > 1000 && i <= len(metricBigPrefixes); i++ {
+ y /= 1000
+ prefix = metricBigPrefixes[i]
+ }
+ }
+ if v.Val < 0 {
+ y = -y
+ }
+ printer.Fprintf(f, fmtutil.FmtStateString(f, verb)+"%s%s",
+ y, prefix, v.Unit)
+}
+
+// String implements fmt.Stringer.
+func (v metric[T]) String() string {
+ return fmt.Sprint(v)
+}
+
+type iec[T constraints.Integer | constraints.Float] struct {
+ Val T
+ Unit string
+}
+
+var (
+ _ fmt.Formatter = iec[int]{}
+ _ fmt.Stringer = iec[int]{}
+)
+
+func IEC[T constraints.Integer | constraints.Float](x T, unit string) iec[T] {
+ return iec[T]{
+ Val: x,
+ Unit: unit,
+ }
+}
+
+var iecPrefixes = []string{
+ "Ki",
+ "Mi",
+ "Gi",
+ "Ti",
+ "Pi",
+ "Ei",
+ "Zi",
+ "Yi",
+}
+
+// String implements fmt.Formatter.
+func (v iec[T]) Format(f fmt.State, verb rune) {
+ var prefix string
+ y := math.Abs(float64(v.Val))
+ for i := 0; y > 1024 && i <= len(iecPrefixes); i++ {
+ y /= 1024
+ prefix = iecPrefixes[i]
+ }
+ if v.Val < 0 {
+ y = -y
+ }
+ printer.Fprintf(f, fmtutil.FmtStateString(f, verb)+"%s%s",
+ number.Decimal(y), prefix, v.Unit)
+}
+
+// String implements fmt.Stringer.
+func (v iec[T]) String() string {
+ return fmt.Sprint(v)
+}
diff --git a/lib/textui/text_test.go b/lib/textui/text_test.go
new file mode 100644
index 0000000..c4b42f6
--- /dev/null
+++ b/lib/textui/text_test.go
@@ -0,0 +1,39 @@
+// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+//
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+package textui_test
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/textui"
+)
+
+func TestFprintf(t *testing.T) {
+ var out strings.Builder
+ textui.Fprintf(&out, "%d", 12345)
+ assert.Equal(t, "12,345", out.String())
+}
+
+func TestHumanized(t *testing.T) {
+ assert.Equal(t, "12,345", fmt.Sprint(textui.Humanized(12345)))
+ assert.Equal(t, "12,345 ", fmt.Sprintf("%-8d", textui.Humanized(12345)))
+
+ laddr := btrfsvol.LogicalAddr(345243543)
+ assert.Equal(t, "0x000000001493ff97", fmt.Sprintf("%v", textui.Humanized(laddr)))
+ assert.Equal(t, "345243543", fmt.Sprintf("%d", textui.Humanized(laddr)))
+ assert.Equal(t, "345,243,543", fmt.Sprintf("%d", textui.Humanized(uint64(laddr))))
+}
+
+func TestPortion(t *testing.T) {
+ assert.Equal(t, "100% (0/0)", fmt.Sprint(textui.Portion[int]{}))
+ assert.Equal(t, "0% (1/12,345)", fmt.Sprint(textui.Portion[int]{N: 1, D: 12345}))
+ assert.Equal(t, "100% (0/0)", fmt.Sprint(textui.Portion[btrfsvol.PhysicalAddr]{}))
+ assert.Equal(t, "0% (1/12,345)", fmt.Sprint(textui.Portion[btrfsvol.PhysicalAddr]{N: 1, D: 12345}))
+}
diff --git a/scripts/main.sh b/scripts/main.sh
index e44ae7f..160aa42 100755
--- a/scripts/main.sh
+++ b/scripts/main.sh
@@ -13,7 +13,7 @@ gen() (
)
set -x
-go build ./cmd/btrfs-rec
+CGO_ENABLED=0 go build -trimpath ./cmd/btrfs-rec
mkdir -p "$b.gen"
{ set +x; } &>/dev/null