summaryrefslogtreecommitdiff
path: root/lib/btrfsprogs/btrfsinspect
diff options
context:
space:
mode:
Diffstat (limited to 'lib/btrfsprogs/btrfsinspect')
-rw-r--r--lib/btrfsprogs/btrfsinspect/mount.go18
-rw-r--r--lib/btrfsprogs/btrfsinspect/print_tree.go48
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildmappings/fuzzymatchsums.go17
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildmappings/matchsums.go7
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go9
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/rebuilt_btrees.go8
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/graph.go4
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go3
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go16
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_graph.go16
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go2
-rw-r--r--lib/btrfsprogs/btrfsinspect/scandevices.go100
12 files changed, 133 insertions, 115 deletions
diff --git a/lib/btrfsprogs/btrfsinspect/mount.go b/lib/btrfsprogs/btrfsinspect/mount.go
index 2a0b232..ee3c0ec 100644
--- a/lib/btrfsprogs/btrfsinspect/mount.go
+++ b/lib/btrfsprogs/btrfsinspect/mount.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -144,11 +144,11 @@ func inodeItemToFUSE(itemBody btrfsitem.Inode) fuseops.InodeAttributes {
Size: uint64(itemBody.Size),
Nlink: uint32(itemBody.NLink),
Mode: uint32(itemBody.Mode),
- //RDev: itemBody.Rdev, // jacobsa/fuse doesn't expose rdev
+ // RDev: itemBody.Rdev, // jacobsa/fuse doesn't expose rdev
Atime: itemBody.ATime.ToStd(),
Mtime: itemBody.MTime.ToStd(),
Ctime: itemBody.CTime.ToStd(),
- //Crtime: itemBody.OTime,
+ // Crtime: itemBody.OTime,
Uid: uint32(itemBody.UID),
Gid: uint32(itemBody.GID),
}
@@ -168,7 +168,7 @@ func (sv *subvolume) LoadDir(inode btrfsprim.ObjID) (val *btrfs.Dir, err error)
if haveSubvolumes {
abspath, _err := val.AbsPath()
if _err != nil {
- return
+ return val, err
}
sv.subvolMu.Lock()
for _, index := range maps.SortedKeys(val.ChildrenByIndex) {
@@ -200,7 +200,7 @@ func (sv *subvolume) LoadDir(inode btrfsprim.ObjID) (val *btrfs.Dir, err error)
sv.subvolMu.Unlock()
}
}
- return
+ return val, err
}
func (sv *subvolume) StatFS(_ context.Context, op *fuseops.StatFSOp) error {
@@ -213,7 +213,7 @@ func (sv *subvolume) StatFS(_ context.Context, op *fuseops.StatFSOp) error {
op.IoSize = sb.SectorSize
op.BlockSize = sb.SectorSize
op.Blocks = sb.TotalBytes / uint64(sb.SectorSize) // TODO: adjust for RAID type
- //op.BlocksFree = TODO
+ // op.BlocksFree = TODO
// btrfs doesn't have a fixed number of inodes
op.Inodes = 0
@@ -260,7 +260,7 @@ func (sv *subvolume) LookUpInode(_ context.Context, op *fuseops.LookUpInodeOp) e
Child: 2, // an inode number that a real file will never have
Attributes: fuseops.InodeAttributes{
Nlink: 1,
- Mode: uint32(btrfsitem.ModeFmtDir | 0700),
+ Mode: uint32(btrfsitem.ModeFmtDir | 0o700), //nolint:gomnd // TODO
},
}
return nil
@@ -315,6 +315,7 @@ func (sv *subvolume) OpenDir(_ context.Context, op *fuseops.OpenDirOp) error {
op.Handle = handle
return nil
}
+
func (sv *subvolume) ReadDir(_ context.Context, op *fuseops.ReadDirOp) error {
state, ok := sv.dirHandles.Load(op.Handle)
if !ok {
@@ -348,6 +349,7 @@ func (sv *subvolume) ReadDir(_ context.Context, op *fuseops.ReadDirOp) error {
}
return nil
}
+
func (sv *subvolume) ReleaseDirHandle(_ context.Context, op *fuseops.ReleaseDirHandleOp) error {
_, ok := sv.dirHandles.LoadAndDelete(op.Handle)
if !ok {
@@ -369,6 +371,7 @@ func (sv *subvolume) OpenFile(_ context.Context, op *fuseops.OpenFileOp) error {
op.KeepPageCache = true
return nil
}
+
func (sv *subvolume) ReadFile(_ context.Context, op *fuseops.ReadFileOp) error {
state, ok := sv.fileHandles.Load(op.Handle)
if !ok {
@@ -392,6 +395,7 @@ func (sv *subvolume) ReadFile(_ context.Context, op *fuseops.ReadFileOp) error {
return err
}
+
func (sv *subvolume) ReleaseFileHandle(_ context.Context, op *fuseops.ReleaseFileHandleOp) error {
_, ok := sv.fileHandles.LoadAndDelete(op.Handle)
if !ok {
diff --git a/lib/btrfsprogs/btrfsinspect/print_tree.go b/lib/btrfsprogs/btrfsinspect/print_tree.go
index 6c31350..62d1d7b 100644
--- a/lib/btrfsprogs/btrfsinspect/print_tree.go
+++ b/lib/btrfsprogs/btrfsinspect/print_tree.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -142,8 +142,8 @@ func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfspri
textui.Fprintf(out, "\t\tindex %v namelen %v name: %s\n",
ref.Index, ref.NameLen, ref.Name)
}
- //case btrfsitem.INODE_EXTREF_KEY:
- // // TODO
+ // case btrfsitem.INODE_EXTREF_KEY:
+ // // TODO
case btrfsitem.DirEntry:
textui.Fprintf(out, "\t\tlocation %v type %v\n",
fmtKey(body.Location), body.Type)
@@ -153,8 +153,8 @@ func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfspri
if len(body.Data) > 0 {
textui.Fprintf(out, "\t\tdata %v\n", body.Data)
}
- //case btrfsitem.DIR_LOG_INDEX_KEY, btrfsitem.DIR_LOG_ITEM_KEY:
- // // TODO
+ // case btrfsitem.DIR_LOG_INDEX_KEY, btrfsitem.DIR_LOG_ITEM_KEY:
+ // // TODO
case btrfsitem.Root:
textui.Fprintf(out, "\t\tgeneration %v root_dirid %v bytenr %d byte_limit %v bytes_used %v\n",
body.Generation, body.RootDirID, body.ByteNr, body.ByteLimit, body.BytesUsed)
@@ -200,10 +200,10 @@ func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfspri
body.Head.Refs, body.Head.Generation, body.Head.Flags)
textui.Fprintf(out, "\t\ttree block skinny level %v\n", item.Key.Offset)
printExtentInlineRefs(out, body.Refs)
- //case btrfsitem.EXTENT_DATA_REF_KEY:
- // // TODO
- //case btrfsitem.SHARED_DATA_REF_KEY:
- // // TODO
+ // case btrfsitem.EXTENT_DATA_REF_KEY:
+ // // TODO
+ // case btrfsitem.SHARED_DATA_REF_KEY:
+ // // TODO
case btrfsitem.ExtentCSum:
start := btrfsvol.LogicalAddr(item.Key.Offset)
textui.Fprintf(out, "\t\trange start %d end %d length %d",
@@ -291,16 +291,16 @@ func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfspri
"\t\tchunk_tree_uuid %v\n",
body.ChunkTree, body.ChunkObjectID, body.ChunkOffset, body.Length,
body.ChunkTreeUUID)
- //case btrfsitem.QGROUP_STATUS_KEY:
- // // TODO
- //case btrfsitem.QGROUP_INFO_KEY:
- // // TODO
- //case btrfsitem.QGROUP_LIMIT_KEY:
- // // TODO
+ // case btrfsitem.QGROUP_STATUS_KEY:
+ // // TODO
+ // case btrfsitem.QGROUP_INFO_KEY:
+ // // TODO
+ // case btrfsitem.QGROUP_LIMIT_KEY:
+ // // TODO
case btrfsitem.UUIDMap:
textui.Fprintf(out, "\t\tsubvol_id %d\n", body.ObjID)
- //case btrfsitem.STRING_ITEM_KEY:
- // // TODO
+ // case btrfsitem.STRING_ITEM_KEY:
+ // // TODO
case btrfsitem.DevStats:
textui.Fprintf(out, "\t\tpersistent item objectid %v offset %v\n",
item.Key.ObjectID.Format(item.Key.ItemType), item.Key.Offset)
@@ -316,8 +316,8 @@ func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfspri
default:
textui.Fprintf(out, "\t\tunknown persistent item objectid %v\n", item.Key.ObjectID)
}
- //case btrfsitem.TEMPORARY_ITEM_KEY:
- // // TODO
+ // case btrfsitem.TEMPORARY_ITEM_KEY:
+ // // TODO
case btrfsitem.Empty:
switch item.Key.ItemType {
case btrfsitem.ORPHAN_ITEM_KEY: // 48
@@ -330,10 +330,10 @@ func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfspri
textui.Fprintf(out, "\t\tfree space extent\n")
case btrfsitem.QGROUP_RELATION_KEY: // 246
// do nothing
- //case btrfsitem.EXTENT_REF_V0_KEY:
- // textui.Fprintf(out, "\t\textent ref v0 (deprecated)\n")
- //case btrfsitem.CSUM_ITEM_KEY:
- // textui.Fprintf(out, "\t\tcsum item\n")
+ // case btrfsitem.EXTENT_REF_V0_KEY:
+ // textui.Fprintf(out, "\t\textent ref v0 (deprecated)\n")
+ // case btrfsitem.CSUM_ITEM_KEY:
+ // textui.Fprintf(out, "\t\tcsum item\n")
default:
textui.Fprintf(out, "\t\t(error) unhandled empty item type: %v\n", item.Key.ItemType)
}
@@ -426,7 +426,7 @@ func fmtKey(key btrfsprim.Key) string {
var out strings.Builder
textui.Fprintf(&out, "key (%v %v", key.ObjectID.Format(key.ItemType), key.ItemType)
switch key.ItemType {
- case btrfsitem.QGROUP_RELATION_KEY: //TODO, btrfsitem.QGROUP_INFO_KEY, btrfsitem.QGROUP_LIMIT_KEY:
+ case btrfsitem.QGROUP_RELATION_KEY: // TODO, btrfsitem.QGROUP_INFO_KEY, btrfsitem.QGROUP_LIMIT_KEY:
panic("TODO: printing qgroup items not yet implemented")
case btrfsitem.UUID_SUBVOL_KEY, btrfsitem.UUID_RECEIVED_SUBVOL_KEY:
textui.Fprintf(&out, " %#08x)", key.Offset)
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildmappings/fuzzymatchsums.go b/lib/btrfsprogs/btrfsinspect/rebuildmappings/fuzzymatchsums.go
index ae83513..9e6b864 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildmappings/fuzzymatchsums.go
+++ b/lib/btrfsprogs/btrfsinspect/rebuildmappings/fuzzymatchsums.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -19,7 +19,7 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/textui"
)
-const minFuzzyPct = 0.5
+var minFuzzyPct = textui.Tunable(0.5)
type fuzzyRecord struct {
PAddr btrfsvol.QualifiedPhysicalAddr
@@ -78,7 +78,7 @@ func fuzzyMatchBlockGroupSums(ctx context.Context,
Dev: paddr.Dev,
Addr: paddr.Addr.Add(-off),
}
- matches[key] = matches[key] + 1
+ matches[key]++
}
return nil
}); err != nil {
@@ -112,8 +112,8 @@ func fuzzyMatchBlockGroupSums(ctx context.Context,
if apply {
lvl = dlog.LogLevelInfo
}
- dlog.Logf(ctx, lvl, "(%v/%v) blockgroup[laddr=%v] matches=[%s]; bestpossible=%v%% (based on %v runs)",
- i+1, numBlockgroups, bgLAddr, matchesStr, int(100*bgRun.PctFull()), len(bgRun.Runs))
+ dlog.Logf(ctx, lvl, "(%v/%v) blockgroup[laddr=%v] matches=[%s]; bestpossible=%v (based on %v runs)",
+ i+1, numBlockgroups, bgLAddr, matchesStr, number.Percent(bgRun.PctFull()), len(bgRun.Runs))
if !apply {
continue
}
@@ -145,11 +145,12 @@ type lowestN[T containers.Ordered[T]] struct {
}
func (l *lowestN[T]) Insert(v T) {
- if len(l.Dat) < l.N {
+ switch {
+ case len(l.Dat) < l.N:
l.Dat = append(l.Dat, v)
- } else if v.Cmp(l.Dat[0]) < 0 {
+ case v.Cmp(l.Dat[0]) < 0:
l.Dat[0] = v
- } else {
+ default:
return
}
sort.Slice(l.Dat, func(i, j int) bool {
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildmappings/matchsums.go b/lib/btrfsprogs/btrfsinspect/rebuildmappings/matchsums.go
index be82f87..02c657f 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildmappings/matchsums.go
+++ b/lib/btrfsprogs/btrfsinspect/rebuildmappings/matchsums.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -8,6 +8,7 @@ import (
"context"
"github.com/datawire/dlib/dlog"
+ "golang.org/x/text/number"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum"
@@ -55,8 +56,8 @@ func matchBlockGroupSums(ctx context.Context,
if len(matches) == 1 {
lvl = dlog.LogLevelInfo
}
- dlog.Logf(ctx, lvl, "(%v/%v) blockgroup[laddr=%v] has %v matches based on %v%% coverage from %v runs",
- i+1, numBlockgroups, bgLAddr, len(matches), int(100*bgRun.PctFull()), len(bgRun.Runs))
+ dlog.Logf(ctx, lvl, "(%v/%v) blockgroup[laddr=%v] has %v matches based on %v coverage from %v runs",
+ i+1, numBlockgroups, bgLAddr, len(matches), number.Percent(bgRun.PctFull()), len(bgRun.Runs))
if len(matches) != 1 {
continue
}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go b/lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go
index 7311aca..665bc96 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go
+++ b/lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -15,6 +15,7 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect"
"git.lukeshu.com/btrfs-progs-ng/lib/containers"
"git.lukeshu.com/btrfs-progs-ng/lib/maps"
+ "git.lukeshu.com/btrfs-progs-ng/lib/textui"
)
func getNodeSize(fs *btrfs.FS) (btrfsvol.AddrDelta, error) {
@@ -189,20 +190,20 @@ func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect
unmappedPhysical += region.End.Sub(region.Beg)
}
}
- dlog.Infof(ctx, "... %d KiB of unmapped physical space (across %d regions)", int(unmappedPhysical/1024), numUnmappedPhysical)
+ dlog.Infof(ctx, "... %d of unmapped physical space (across %d regions)", textui.IEC(unmappedPhysical, "B"), numUnmappedPhysical)
unmappedLogicalRegions := ListUnmappedLogicalRegions(fs, logicalSums)
var unmappedLogical btrfsvol.AddrDelta
for _, region := range unmappedLogicalRegions {
unmappedLogical += region.Size()
}
- dlog.Infof(ctx, "... %d KiB of unmapped summed logical space (across %d regions)", int(unmappedLogical/1024), len(unmappedLogicalRegions))
+ dlog.Infof(ctx, "... %d of unmapped summed logical space (across %d regions)", textui.IEC(unmappedLogical, "B"), len(unmappedLogicalRegions))
var unmappedBlockGroups btrfsvol.AddrDelta
for _, bg := range bgs {
unmappedBlockGroups += bg.Size
}
- dlog.Infof(ctx, "... %d KiB of unmapped block groups (across %d groups)", int(unmappedBlockGroups/1024), len(bgs))
+ dlog.Infof(ctx, "... %d of unmapped block groups (across %d groups)", textui.IEC(unmappedBlockGroups, "B"), len(bgs))
dlog.Info(_ctx, "detailed report:")
for _, devID := range maps.SortedKeys(unmappedPhysicalRegions) {
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/rebuilt_btrees.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/rebuilt_btrees.go
index 33eb352..b53a28e 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/rebuilt_btrees.go
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/rebuilt_btrees.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -191,7 +191,7 @@ func (ts *RebuiltTrees) AddRoot(ctx context.Context, treeID btrfsprim.ObjID, roo
var stats rootStats
stats.Leafs.D = len(tree.leafToRoots)
- progressWriter := textui.NewProgress[rootStats](ctx, dlog.LogLevelInfo, 1*time.Second)
+ progressWriter := textui.NewProgress[rootStats](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second))
for i, leaf := range maps.SortedKeys(tree.leafToRoots) {
stats.Leafs.N = i
progressWriter.Set(stats)
@@ -287,7 +287,7 @@ func (ts *RebuiltTrees) addTree(ctx context.Context, treeID btrfsprim.ObjID, sta
ts.AddRoot(ctx, treeID, root)
}
- return
+ return true
}
func (tree *rebuiltTree) indexLeafs(ctx context.Context, graph pkggraph.Graph) {
@@ -297,7 +297,7 @@ func (tree *rebuiltTree) indexLeafs(ctx context.Context, graph pkggraph.Graph) {
var stats textui.Portion[int]
stats.D = len(graph.Nodes)
- progressWriter := textui.NewProgress[textui.Portion[int]](ctx, dlog.LogLevelInfo, 1*time.Second)
+ progressWriter := textui.NewProgress[textui.Portion[int]](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second))
progress := func() {
stats.N = len(nodeToRoots)
progressWriter.Set(stats)
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/graph.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/graph.go
index c4ed675..cf86d74 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/graph.go
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/graph.go
@@ -197,7 +197,7 @@ func (g Graph) FinalCheck(ctx context.Context, fs diskio.File[btrfsvol.LogicalAd
ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-nodes.read.substep", "check-keypointers")
dlog.Info(_ctx, "Checking keypointers for dead-ends...")
- progressWriter := textui.NewProgress[textui.Portion[int]](ctx, dlog.LogLevelInfo, 1*time.Second)
+ progressWriter := textui.NewProgress[textui.Portion[int]](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second))
stats.D = len(g.EdgesTo)
progressWriter.Set(stats)
for laddr := range g.EdgesTo {
@@ -221,7 +221,7 @@ func (g Graph) FinalCheck(ctx context.Context, fs diskio.File[btrfsvol.LogicalAd
dlog.Info(_ctx, "Checking for btree loops...")
stats.D = len(g.Nodes)
stats.N = 0
- progressWriter = textui.NewProgress[textui.Portion[int]](ctx, dlog.LogLevelInfo, 1*time.Second)
+ progressWriter = textui.NewProgress[textui.Portion[int]](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second))
progressWriter.Set(stats)
visited := make(containers.Set[btrfsvol.LogicalAddr], len(g.Nodes))
numLoops := 0
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go
index b1e68f9..24c3dcf 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go
@@ -17,6 +17,7 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph"
"git.lukeshu.com/btrfs-progs-ng/lib/containers"
"git.lukeshu.com/btrfs-progs-ng/lib/diskio"
+ "git.lukeshu.com/btrfs-progs-ng/lib/textui"
)
type ItemPtr struct {
@@ -50,7 +51,7 @@ func NewHandle(file diskio.File[btrfsvol.LogicalAddr], sb btrfstree.Superblock)
Sizes: make(map[ItemPtr]SizeAndErr),
- cache: containers.NewLRUCache[btrfsvol.LogicalAddr, *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]](8),
+ cache: containers.NewLRUCache[btrfsvol.LogicalAddr, *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]](textui.Tunable(8)),
}
}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go
index a7fe5c7..7e55732 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -126,7 +126,7 @@ func (o *rebuilder) rebuild(_ctx context.Context) error {
o.itemQueue = nil
var progress textui.Portion[int]
progress.D = len(itemQueue)
- progressWriter := textui.NewProgress[textui.Portion[int]](stepCtx, dlog.LogLevelInfo, 1*time.Second)
+ progressWriter := textui.NewProgress[textui.Portion[int]](stepCtx, dlog.LogLevelInfo, textui.Tunable(1*time.Second))
stepCtx = dlog.WithField(stepCtx, "btrfsinspect.rebuild-nodes.rebuild.substep.progress", &progress)
for i, key := range itemQueue {
itemCtx := dlog.WithField(stepCtx, "btrfsinspect.rebuild-nodes.rebuild.process.item", key)
@@ -160,7 +160,7 @@ func (o *rebuilder) rebuild(_ctx context.Context) error {
progress.D += len(resolvedAugments[treeID])
}
o.augmentQueue = make(map[btrfsprim.ObjID][]map[btrfsvol.LogicalAddr]int)
- progressWriter = textui.NewProgress[textui.Portion[int]](stepCtx, dlog.LogLevelInfo, 1*time.Second)
+ progressWriter = textui.NewProgress[textui.Portion[int]](stepCtx, dlog.LogLevelInfo, textui.Tunable(1*time.Second))
stepCtx = dlog.WithField(stepCtx, "btrfsinspect.rebuild-nodes.rebuild.substep.progress", &progress)
for _, treeID := range maps.SortedKeys(resolvedAugments) {
treeCtx := dlog.WithField(stepCtx, "btrfsinspect.rebuild-nodes.rebuild.augment.tree", treeID)
@@ -266,14 +266,14 @@ func (o *rebuilder) resolveTreeAugments(ctx context.Context, listsWithDistances
// > 2: [A]
// > 3: [B]
// >
- // > legal solution woudl be `[]`, `[A]` or `[B]`. It would not be legal
+ // > legal solution would be `[]`, `[A]` or `[B]`. It would not be legal
// > to return `[A, B]`.
//
// The algorithm should optimize for the following goals:
//
// - We prefer that each input list have an item in the return set.
//
- // > In Example 1, while `[]`, `[B]`, and `[C]` are permissable
+ // > In Example 1, while `[]`, `[B]`, and `[C]` are permissible
// > solutions, they are not optimal, because one or both of the input
// > lists are not represented.
// >
@@ -299,7 +299,7 @@ func (o *rebuilder) resolveTreeAugments(ctx context.Context, listsWithDistances
// - We prefer items that appear in more lists over items that appear in
// fewer lists.
//
- // The relative priority of these 4 goals is undefined; preferrably the
+ // The relative priority of these 4 goals is undefined; preferably the
// algorithm should be defined in a way that makes it easy to adjust the
// relative priorities.
@@ -317,7 +317,7 @@ func (o *rebuilder) resolveTreeAugments(ctx context.Context, listsWithDistances
counts := make(map[btrfsvol.LogicalAddr]int)
for _, list := range lists {
for item := range list {
- counts[item] = counts[item] + 1
+ counts[item]++
}
}
@@ -386,6 +386,7 @@ func (o *rebuilder) want(ctx context.Context, reason string, treeID btrfsprim.Ob
fmt.Sprintf("tree=%v key={%v %v ?}", treeID, objID, typ))
o._want(ctx, treeID, objID, typ)
}
+
func (o *rebuilder) _want(ctx context.Context, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType) (key btrfsprim.Key, ok bool) {
if !o.rebuilt.AddTree(ctx, treeID) {
o.itemQueue = append(o.itemQueue, o.curKey)
@@ -429,6 +430,7 @@ func (o *rebuilder) wantOff(ctx context.Context, reason string, treeID btrfsprim
ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key", keyAndTree{TreeID: treeID, Key: key})
o._wantOff(ctx, treeID, key)
}
+
func (o *rebuilder) _wantOff(ctx context.Context, treeID btrfsprim.ObjID, tgt btrfsprim.Key) (ok bool) {
if !o.rebuilt.AddTree(ctx, treeID) {
o.itemQueue = append(o.itemQueue, o.curKey)
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_graph.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_graph.go
index 45c9c97..bf4c95d 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_graph.go
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_graph.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -112,13 +112,13 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
case btrfsitem.Empty:
// nothing
case btrfsitem.Extent:
- //if body.Head.Flags.Has(btrfsitem.EXTENT_FLAG_TREE_BLOCK) {
- // // Supposedly this flag indicates that that
- // // body.Info.Key identifies a node by the
- // // first key in the node. But nothing in the
- // // kernel ever reads this, so who knows if it
- // // always gets updated correctly?
- //}
+ // if body.Head.Flags.Has(btrfsitem.EXTENT_FLAG_TREE_BLOCK) {
+ // // Supposedly this flag indicates that
+ // // body.Info.Key identifies a node by the
+ // // first key in the node. But nothing in the
+ // // kernel ever reads this, so who knows if it
+ // // always gets updated correctly?
+ // }
for i, ref := range body.Refs {
switch refBody := ref.Body.(type) {
case nil:
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go
index 7a112b4..7e96e29 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go
@@ -32,7 +32,7 @@ func ScanDevices(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect.Sca
stats.D = countNodes(scanResults)
progressWriter := textui.NewProgress[textui.Portion[int]](
dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.read.substep", "read-nodes"),
- dlog.LogLevelInfo, 1*time.Second)
+ dlog.LogLevelInfo, textui.Tunable(1*time.Second))
nodeGraph := graph.New(*sb)
keyIO := keyio.NewHandle(fs, *sb)
diff --git a/lib/btrfsprogs/btrfsinspect/scandevices.go b/lib/btrfsprogs/btrfsinspect/scandevices.go
index 628995a..7668a83 100644
--- a/lib/btrfsprogs/btrfsinspect/scandevices.go
+++ b/lib/btrfsprogs/btrfsinspect/scandevices.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -126,7 +126,7 @@ func ScanOneDevice(ctx context.Context, dev *btrfs.Device, sb btrfstree.Superblo
var sums strings.Builder
sums.Grow(numSums * csumSize)
- progressWriter := textui.NewProgress[scanStats](ctx, dlog.LogLevelInfo, 1*time.Second)
+ progressWriter := textui.NewProgress[scanStats](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second))
progress := func(pos btrfsvol.PhysicalAddr) {
progressWriter.Set(scanStats{
Portion: textui.Portion[btrfsvol.PhysicalAddr]{
@@ -176,57 +176,65 @@ func ScanOneDevice(ctx context.Context, dev *btrfs.Device, sb btrfstree.Superblo
for i, item := range nodeRef.Data.BodyLeaf {
switch item.Key.ItemType {
case btrfsitem.CHUNK_ITEM_KEY:
- chunk, ok := item.Body.(btrfsitem.Chunk)
- if !ok {
- dlog.Errorf(ctx, "node@%v: item %v: error: type is CHUNK_ITEM_KEY, but struct is %T",
- nodeRef.Addr, i, item.Body)
- continue
+ switch itemBody := item.Body.(type) {
+ case btrfsitem.Chunk:
+ dlog.Tracef(ctx, "node@%v: item %v: found chunk",
+ nodeRef.Addr, i)
+ result.FoundChunks = append(result.FoundChunks, btrfstree.SysChunk{
+ Key: item.Key,
+ Chunk: itemBody,
+ })
+ case btrfsitem.Error:
+ dlog.Errorf(ctx, "node@%v: item %v: error: malformed CHUNK_ITEM: %v",
+ nodeRef.Addr, i, itemBody.Err)
+ default:
+ panic(fmt.Errorf("should not happen: CHUNK_ITEM has unexpected item type: %T", itemBody))
}
- dlog.Tracef(ctx, "node@%v: item %v: found chunk",
- nodeRef.Addr, i)
- result.FoundChunks = append(result.FoundChunks, btrfstree.SysChunk{
- Key: item.Key,
- Chunk: chunk,
- })
case btrfsitem.BLOCK_GROUP_ITEM_KEY:
- bg, ok := item.Body.(btrfsitem.BlockGroup)
- if !ok {
- dlog.Errorf(ctx, "node@%v: item %v: error: type is BLOCK_GROUP_ITEM_KEY, but struct is %T",
- nodeRef.Addr, i, item.Body)
- continue
+ switch itemBody := item.Body.(type) {
+ case btrfsitem.BlockGroup:
+ dlog.Tracef(ctx, "node@%v: item %v: found block group",
+ nodeRef.Addr, i)
+ result.FoundBlockGroups = append(result.FoundBlockGroups, SysBlockGroup{
+ Key: item.Key,
+ BG: itemBody,
+ })
+ case btrfsitem.Error:
+ dlog.Errorf(ctx, "node@%v: item %v: error: malformed BLOCK_GROUP_ITEM: %v",
+ nodeRef.Addr, i, itemBody.Err)
+ default:
+ panic(fmt.Errorf("should not happen: BLOCK_GROUP_ITEM has unexpected item type: %T", itemBody))
}
- dlog.Tracef(ctx, "node@%v: item %v: found block group",
- nodeRef.Addr, i)
- result.FoundBlockGroups = append(result.FoundBlockGroups, SysBlockGroup{
- Key: item.Key,
- BG: bg,
- })
case btrfsitem.DEV_EXTENT_KEY:
- devext, ok := item.Body.(btrfsitem.DevExtent)
- if !ok {
- dlog.Errorf(ctx, "node@%v: item %v: error: type is DEV_EXTENT_KEY, but struct is %T",
- nodeRef.Addr, i, item.Body)
- continue
+ switch itemBody := item.Body.(type) {
+ case btrfsitem.DevExtent:
+ dlog.Tracef(ctx, "node@%v: item %v: found dev extent",
+ nodeRef.Addr, i)
+ result.FoundDevExtents = append(result.FoundDevExtents, SysDevExtent{
+ Key: item.Key,
+ DevExt: itemBody,
+ })
+ case btrfsitem.Error:
+ dlog.Errorf(ctx, "node@%v: item %v: error: malformed DEV_EXTENT: %v",
+ nodeRef.Addr, i, itemBody.Err)
+ default:
+ panic(fmt.Errorf("should not happen: DEV_EXTENT has unexpected item type: %T", itemBody))
}
- dlog.Tracef(ctx, "node@%v: item %v: found dev extent",
- nodeRef.Addr, i)
- result.FoundDevExtents = append(result.FoundDevExtents, SysDevExtent{
- Key: item.Key,
- DevExt: devext,
- })
case btrfsitem.EXTENT_CSUM_KEY:
- sums, ok := item.Body.(btrfsitem.ExtentCSum)
- if !ok {
- dlog.Errorf(ctx, "node@%v: item %v: error: type is EXTENT_CSUM_OBJECTID, but struct is %T",
- nodeRef.Addr, i, item.Body)
- continue
+ switch itemBody := item.Body.(type) {
+ case btrfsitem.ExtentCSum:
+ dlog.Tracef(ctx, "node@%v: item %v: found csums",
+ nodeRef.Addr, i)
+ result.FoundExtentCSums = append(result.FoundExtentCSums, SysExtentCSum{
+ Generation: nodeRef.Data.Head.Generation,
+ Sums: itemBody,
+ })
+ case btrfsitem.Error:
+ dlog.Errorf(ctx, "node@%v: item %v: error: malformed is EXTENT_CSUM: %v",
+ nodeRef.Addr, i, itemBody.Err)
+ default:
+ panic(fmt.Errorf("should not happen: EXTENT_CSUM has unexpected item type: %T", itemBody))
}
- dlog.Tracef(ctx, "node@%v: item %v: found csums",
- nodeRef.Addr, i)
- result.FoundExtentCSums = append(result.FoundExtentCSums, SysExtentCSum{
- Generation: nodeRef.Data.Head.Generation,
- Sums: sums,
- })
}
}
minNextNode = pos + btrfsvol.PhysicalAddr(sb.NodeSize)