summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/btrfs/btrfstree/btree_forrest.go (renamed from lib/btrfs/btrfstree/root.go)0
-rw-r--r--lib/btrfs/btrfstree/btree_tree.go (renamed from lib/btrfs/btrfstree/ops.go)0
-rw-r--r--lib/btrfscheck/graph.go (renamed from lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_graph.go)112
-rw-r--r--lib/btrfsprogs/btrfsinspect/mount.go482
-rw-r--r--lib/btrfsprogs/btrfsinspect/print_tree.go443
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildmappings/blockgroups.go58
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildmappings/fuzzymatchsums.go159
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildmappings/kmp.go113
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildmappings/kmp_test.go126
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildmappings/logicalsums.go249
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildmappings/matchsums.go78
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildmappings/physicalsums.go89
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go222
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildmappings/sumrunwithgaps.go167
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildmappings/testdata/fuzz/FuzzIndexAll/593a5dd328ee86bac6820e3f98383aebadd2af3a22aa248546a228b08451c30d3
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildmappings/testdata/fuzz/FuzzIndexAll/6bd9babbebf7eb78814e1f3425bfa6ccd6bfd42824a26e8cbd63b5117934e6003
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildmappings/testdata/fuzz/FuzzIndexAll/84ed65595ad05a58e293dbf423c1a816b697e2763a29d7c37aa476d6eef6fd603
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildmappings/testdata/fuzz/FuzzIndexAll/9be40f71bc49b1b5c3b8e08d58d0f69cc22aefb12fd9b5931e49ff0e419537383
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildmappings/testdata/fuzz/FuzzIndexAll/e43317ec61b0da9627d7b2fc0237d369082a6bf5dfe39e05c113b42ff6218d5d3
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go583
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_treecb.go86
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_wantcb.go400
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_wanttyp.go107
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go77
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/util.go31
-rw-r--r--lib/btrfsprogs/btrfsinspect/scandevices.go260
-rw-r--r--lib/btrfsutil/graph.go (renamed from lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/graph.go)49
-rw-r--r--lib/btrfsutil/graph_loops.go (renamed from lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/loops.go)8
-rw-r--r--lib/btrfsutil/nestedlock.go (renamed from lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/nestedlock.go)2
-rw-r--r--lib/btrfsutil/old_rebuilt_forrest.go (renamed from lib/btrfsprogs/btrfsutil/broken_btree.go)153
-rw-r--r--lib/btrfsutil/open.go (renamed from lib/btrfsprogs/btrfsutil/open.go)0
-rw-r--r--lib/btrfsutil/print_addrspace.go (renamed from lib/btrfsprogs/btrfsinspect/print_addrspace.go)2
-rw-r--r--lib/btrfsutil/rebuilt_forrest.go (renamed from lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/forrest.go)32
-rw-r--r--lib/btrfsutil/rebuilt_readitem.go (renamed from lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go)25
-rw-r--r--lib/btrfsutil/rebuilt_tree.go (renamed from lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go)25
-rw-r--r--lib/btrfsutil/skinny_paths.go (renamed from lib/btrfsprogs/btrfsutil/skinny_paths.go)0
-rw-r--r--lib/btrfsutil/walk.go (renamed from lib/btrfsprogs/btrfsutil/walk.go)0
-rw-r--r--lib/textui/log.go88
38 files changed, 248 insertions, 3993 deletions
diff --git a/lib/btrfs/btrfstree/root.go b/lib/btrfs/btrfstree/btree_forrest.go
index ace2b49..ace2b49 100644
--- a/lib/btrfs/btrfstree/root.go
+++ b/lib/btrfs/btrfstree/btree_forrest.go
diff --git a/lib/btrfs/btrfstree/ops.go b/lib/btrfs/btrfstree/btree_tree.go
index b01312f..b01312f 100644
--- a/lib/btrfs/btrfstree/ops.go
+++ b/lib/btrfs/btrfstree/btree_tree.go
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_graph.go b/lib/btrfscheck/graph.go
index 710030c..ea51818 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_graph.go
+++ b/lib/btrfscheck/graph.go
@@ -2,7 +2,7 @@
//
// SPDX-License-Identifier: GPL-2.0-or-later
-package rebuildnodes
+package btrfscheck
import (
"context"
@@ -14,18 +14,18 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
)
-type rebuildCallbacks interface {
- fsErr(ctx context.Context, e error)
- want(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType)
- wantOff(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, off uint64)
- wantDirIndex(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, name []byte)
- wantCSum(ctx context.Context, reason string, inodeTree, inodeItem btrfsprim.ObjID, beg, end btrfsvol.LogicalAddr) // interval is [beg, end)
- wantFileExt(ctx context.Context, reason string, treeID btrfsprim.ObjID, ino btrfsprim.ObjID, size int64)
+type GraphCallbacks interface {
+ FSErr(ctx context.Context, e error)
+ Want(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType)
+ WantOff(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, off uint64)
+ WantDirIndex(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, name []byte)
+ WantCSum(ctx context.Context, reason string, inodeTree, inodeItem btrfsprim.ObjID, beg, end btrfsvol.LogicalAddr) // interval is [beg, end)
+ WantFileExt(ctx context.Context, reason string, treeID btrfsprim.ObjID, ino btrfsprim.ObjID, size int64)
}
-// handleWouldBeNoOp returns whether or not a call to handleItem for a
-// given item type would be a no-op.
-func handleWouldBeNoOp(typ btrfsprim.ItemType) bool {
+// HandleItemWouldBeNoOp returns whether or not a call to HandleItem
+// for a given item type would be a no-op.
+func HandleItemWouldBeNoOp(typ btrfsprim.ItemType) bool {
switch typ {
case // btrfsitem.Dev
btrfsprim.DEV_ITEM_KEY,
@@ -45,30 +45,30 @@ func handleWouldBeNoOp(typ btrfsprim.ItemType) bool {
}
}
-func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID, item btrfstree.Item) {
+func HandleItem(o GraphCallbacks, ctx context.Context, treeID btrfsprim.ObjID, item btrfstree.Item) {
// Notionally, just express the relationships shown in
// https://btrfs.wiki.kernel.org/index.php/File:References.png (from the page
// https://btrfs.wiki.kernel.org/index.php/Data_Structures )
switch body := item.Body.(type) {
case *btrfsitem.BlockGroup:
- o.want(ctx, "Chunk",
+ o.Want(ctx, "Chunk",
btrfsprim.CHUNK_TREE_OBJECTID,
body.ChunkObjectID,
btrfsitem.CHUNK_ITEM_KEY)
- o.wantOff(ctx, "FreeSpaceInfo",
+ o.WantOff(ctx, "FreeSpaceInfo",
btrfsprim.FREE_SPACE_TREE_OBJECTID,
item.Key.ObjectID,
btrfsitem.FREE_SPACE_INFO_KEY,
item.Key.Offset)
case *btrfsitem.Chunk:
- o.want(ctx, "owning Root",
+ o.Want(ctx, "owning Root",
btrfsprim.ROOT_TREE_OBJECTID,
body.Head.Owner,
btrfsitem.ROOT_ITEM_KEY)
case *btrfsitem.Dev:
// nothing
case *btrfsitem.DevExtent:
- o.wantOff(ctx, "Chunk",
+ o.WantOff(ctx, "Chunk",
body.ChunkTree,
body.ChunkObjectID,
btrfsitem.CHUNK_ITEM_KEY,
@@ -77,7 +77,7 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
// nothing
case *btrfsitem.DirEntry:
// containing-directory
- o.wantOff(ctx, "containing dir inode",
+ o.WantOff(ctx, "containing dir inode",
treeID,
item.Key.ObjectID,
btrfsitem.INODE_ITEM_KEY,
@@ -85,12 +85,12 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
// siblings
switch item.Key.ItemType {
case btrfsitem.DIR_ITEM_KEY:
- o.wantDirIndex(ctx, "corresponding DIR_INDEX",
+ o.WantDirIndex(ctx, "corresponding DIR_INDEX",
treeID,
item.Key.ObjectID,
body.Name)
case btrfsitem.DIR_INDEX_KEY:
- o.wantOff(ctx, "corresponding DIR_ITEM",
+ o.WantOff(ctx, "corresponding DIR_ITEM",
treeID,
item.Key.ObjectID,
btrfsitem.DIR_ITEM_KEY,
@@ -107,23 +107,23 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
if body.Location != (btrfsprim.Key{}) {
switch body.Location.ItemType {
case btrfsitem.INODE_ITEM_KEY:
- o.wantOff(ctx, "item being pointed to",
+ o.WantOff(ctx, "item being pointed to",
treeID,
body.Location.ObjectID,
body.Location.ItemType,
body.Location.Offset)
- o.wantOff(ctx, "backref from item being pointed to",
+ o.WantOff(ctx, "backref from item being pointed to",
treeID,
body.Location.ObjectID,
btrfsitem.INODE_REF_KEY,
uint64(item.Key.ObjectID))
case btrfsitem.ROOT_ITEM_KEY:
- o.want(ctx, "Root of subvolume being pointed to",
+ o.Want(ctx, "Root of subvolume being pointed to",
btrfsprim.ROOT_TREE_OBJECTID,
body.Location.ObjectID,
body.Location.ItemType)
default:
- o.fsErr(ctx, fmt.Errorf("DirEntry: unexpected .Location.ItemType=%v", body.Location.ItemType))
+ o.FSErr(ctx, fmt.Errorf("DirEntry: unexpected .Location.ItemType=%v", body.Location.ItemType))
}
}
case *btrfsitem.Empty:
@@ -141,12 +141,12 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
case nil:
// nothing
case *btrfsitem.ExtentDataRef:
- o.wantOff(ctx, "referencing Inode",
+ o.WantOff(ctx, "referencing Inode",
refBody.Root,
refBody.ObjectID,
btrfsitem.INODE_ITEM_KEY,
0)
- o.wantOff(ctx, "referencing FileExtent",
+ o.WantOff(ctx, "referencing FileExtent",
refBody.Root,
refBody.ObjectID,
btrfsitem.EXTENT_DATA_KEY,
@@ -162,22 +162,22 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
case *btrfsitem.ExtentCSum:
// nothing
case *btrfsitem.ExtentDataRef:
- o.want(ctx, "Extent being referenced",
+ o.Want(ctx, "Extent being referenced",
btrfsprim.EXTENT_TREE_OBJECTID,
item.Key.ObjectID,
btrfsitem.EXTENT_ITEM_KEY)
- o.wantOff(ctx, "referencing Inode",
+ o.WantOff(ctx, "referencing Inode",
body.Root,
body.ObjectID,
btrfsitem.INODE_ITEM_KEY,
0)
- o.wantOff(ctx, "referencing FileExtent",
+ o.WantOff(ctx, "referencing FileExtent",
body.Root,
body.ObjectID,
btrfsitem.EXTENT_DATA_KEY,
uint64(body.Offset))
case *btrfsitem.FileExtent:
- o.wantOff(ctx, "containing Inode",
+ o.WantOff(ctx, "containing Inode",
treeID,
item.Key.ObjectID,
btrfsitem.INODE_ITEM_KEY,
@@ -186,65 +186,65 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
case btrfsitem.FILE_EXTENT_INLINE:
// nothing
case btrfsitem.FILE_EXTENT_REG, btrfsitem.FILE_EXTENT_PREALLOC:
- // NB: o.wantCSum checks inodeBody.Flags.Has(btrfsitem.INODE_NODATASUM) for us.
- o.wantCSum(ctx, "data sum",
+ // NB: o.WantCSum checks inodeBody.Flags.Has(btrfsitem.INODE_NODATASUM) for us.
+ o.WantCSum(ctx, "data sum",
treeID, item.Key.ObjectID,
body.BodyExtent.DiskByteNr,
body.BodyExtent.DiskByteNr.Add(body.BodyExtent.DiskNumBytes))
default:
- o.fsErr(ctx, fmt.Errorf("FileExtent: unexpected body.Type=%v", body.Type))
+ o.FSErr(ctx, fmt.Errorf("FileExtent: unexpected body.Type=%v", body.Type))
}
case *btrfsitem.FreeSpaceBitmap:
- o.wantOff(ctx, "FreeSpaceInfo",
+ o.WantOff(ctx, "FreeSpaceInfo",
treeID,
item.Key.ObjectID,
btrfsitem.FREE_SPACE_INFO_KEY,
item.Key.Offset)
case *btrfsitem.FreeSpaceHeader:
- o.wantOff(ctx, ".Location",
+ o.WantOff(ctx, ".Location",
treeID,
body.Location.ObjectID,
body.Location.ItemType,
body.Location.Offset)
case *btrfsitem.FreeSpaceInfo:
if body.Flags.Has(btrfsitem.FREE_SPACE_USING_BITMAPS) {
- o.wantOff(ctx, "FreeSpaceBitmap",
+ o.WantOff(ctx, "FreeSpaceBitmap",
treeID,
item.Key.ObjectID,
btrfsitem.FREE_SPACE_BITMAP_KEY,
item.Key.Offset)
}
case *btrfsitem.Inode:
- o.want(ctx, "backrefs",
+ o.Want(ctx, "backrefs",
treeID, // TODO: validate the number of these against body.NLink
item.Key.ObjectID,
btrfsitem.INODE_REF_KEY)
- o.wantFileExt(ctx, "FileExtents",
+ o.WantFileExt(ctx, "FileExtents",
treeID, item.Key.ObjectID, body.Size)
if body.BlockGroup != 0 {
- o.want(ctx, "BlockGroup",
+ o.Want(ctx, "BlockGroup",
btrfsprim.EXTENT_TREE_OBJECTID,
body.BlockGroup,
btrfsitem.BLOCK_GROUP_ITEM_KEY)
}
case *btrfsitem.InodeRefs:
- o.wantOff(ctx, "child Inode",
+ o.WantOff(ctx, "child Inode",
treeID,
item.Key.ObjectID,
btrfsitem.INODE_ITEM_KEY,
0)
- o.wantOff(ctx, "parent Inode",
+ o.WantOff(ctx, "parent Inode",
treeID,
btrfsprim.ObjID(item.Key.Offset),
btrfsitem.INODE_ITEM_KEY,
0)
for _, ref := range body.Refs {
- o.wantOff(ctx, "DIR_ITEM",
+ o.WantOff(ctx, "DIR_ITEM",
treeID,
btrfsprim.ObjID(item.Key.Offset),
btrfsitem.DIR_ITEM_KEY,
btrfsitem.NameHash(ref.Name))
- o.wantOff(ctx, "DIR_INDEX",
+ o.WantOff(ctx, "DIR_INDEX",
treeID,
btrfsprim.ObjID(item.Key.Offset),
btrfsitem.DIR_INDEX_KEY,
@@ -256,12 +256,12 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
case nil:
// nothing
case *btrfsitem.ExtentDataRef:
- o.wantOff(ctx, "referencing INode",
+ o.WantOff(ctx, "referencing INode",
refBody.Root,
refBody.ObjectID,
btrfsitem.INODE_ITEM_KEY,
0)
- o.wantOff(ctx, "referencing FileExtent",
+ o.WantOff(ctx, "referencing FileExtent",
refBody.Root,
refBody.ObjectID,
btrfsitem.EXTENT_DATA_KEY,
@@ -276,7 +276,7 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
}
case *btrfsitem.Root:
if body.RootDirID != 0 {
- o.wantOff(ctx, "root directory",
+ o.WantOff(ctx, "root directory",
item.Key.ObjectID,
body.RootDirID,
btrfsitem.INODE_ITEM_KEY,
@@ -284,7 +284,7 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
}
if body.UUID != (btrfsprim.UUID{}) {
key := btrfsitem.UUIDToKey(body.UUID)
- o.wantOff(ctx, "uuid",
+ o.WantOff(ctx, "uuid",
btrfsprim.UUID_TREE_OBJECTID,
key.ObjectID,
key.ItemType,
@@ -292,7 +292,7 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
}
if body.ParentUUID != (btrfsprim.UUID{}) {
key := btrfsitem.UUIDToKey(body.ParentUUID)
- o.wantOff(ctx, "parent uuid",
+ o.WantOff(ctx, "parent uuid",
btrfsprim.UUID_TREE_OBJECTID,
key.ObjectID,
key.ItemType,
@@ -317,48 +317,48 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
panic(fmt.Errorf("should not happen: RootRef: unexpected ItemType=%v", item.Key.ItemType))
}
// sibling
- o.wantOff(ctx, fmt.Sprintf("corresponding %v", otherType),
+ o.WantOff(ctx, fmt.Sprintf("corresponding %v", otherType),
treeID,
btrfsprim.ObjID(item.Key.Offset),
otherType,
uint64(item.Key.ObjectID))
// parent
- o.want(ctx, "parent subvolume: Root",
+ o.Want(ctx, "parent subvolume: Root",
treeID,
parent,
btrfsitem.ROOT_ITEM_KEY)
- o.wantOff(ctx, "parent subvolume: Inode of parent dir",
+ o.WantOff(ctx, "parent subvolume: Inode of parent dir",
parent,
body.DirID,
btrfsitem.INODE_ITEM_KEY,
0)
- o.wantOff(ctx, "parent subvolume: DIR_ITEM in parent dir",
+ o.WantOff(ctx, "parent subvolume: DIR_ITEM in parent dir",
parent,
body.DirID,
btrfsitem.DIR_ITEM_KEY,
btrfsitem.NameHash(body.Name))
- o.wantOff(ctx, "parent subvolume: DIR_INDEX in parent dir",
+ o.WantOff(ctx, "parent subvolume: DIR_INDEX in parent dir",
parent,
body.DirID,
btrfsitem.DIR_INDEX_KEY,
uint64(body.Sequence))
// child
- o.want(ctx, "child subvolume: Root",
+ o.Want(ctx, "child subvolume: Root",
treeID,
child,
btrfsitem.ROOT_ITEM_KEY)
case *btrfsitem.SharedDataRef:
- o.want(ctx, "Extent",
+ o.Want(ctx, "Extent",
btrfsprim.EXTENT_TREE_OBJECTID,
item.Key.ObjectID,
btrfsitem.EXTENT_ITEM_KEY)
case *btrfsitem.UUIDMap:
- o.want(ctx, "subvolume Root",
+ o.Want(ctx, "subvolume Root",
btrfsprim.ROOT_TREE_OBJECTID,
body.ObjID,
btrfsitem.ROOT_ITEM_KEY)
case *btrfsitem.Error:
- o.fsErr(ctx, fmt.Errorf("error decoding item: %w", body.Err))
+ o.FSErr(ctx, fmt.Errorf("error decoding item: %w", body.Err))
default:
// This is a panic because the item decoder should not emit new types without this
// code also being updated.
diff --git a/lib/btrfsprogs/btrfsinspect/mount.go b/lib/btrfsprogs/btrfsinspect/mount.go
deleted file mode 100644
index 0ac8497..0000000
--- a/lib/btrfsprogs/btrfsinspect/mount.go
+++ /dev/null
@@ -1,482 +0,0 @@
-// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
-//
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-package btrfsinspect
-
-import (
- "context"
- "errors"
- "fmt"
- "io"
- "path/filepath"
- "sync"
- "sync/atomic"
- "syscall"
-
- "git.lukeshu.com/go/typedsync"
- "github.com/datawire/dlib/dcontext"
- "github.com/datawire/dlib/dgroup"
- "github.com/datawire/dlib/dlog"
- "github.com/jacobsa/fuse"
- "github.com/jacobsa/fuse/fuseops"
- "github.com/jacobsa/fuse/fuseutil"
-
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsutil"
- "git.lukeshu.com/btrfs-progs-ng/lib/containers"
- "git.lukeshu.com/btrfs-progs-ng/lib/maps"
- "git.lukeshu.com/btrfs-progs-ng/lib/slices"
-)
-
-func MountRO(ctx context.Context, fs *btrfs.FS, mountpoint string, noChecksums bool) error {
- pvs := fs.LV.PhysicalVolumes()
- if len(pvs) < 1 {
- return errors.New("no devices")
- }
-
- deviceName := pvs[maps.SortedKeys(pvs)[0]].Name()
- if abs, err := filepath.Abs(deviceName); err == nil {
- deviceName = abs
- }
-
- rootSubvol := &subvolume{
- Subvolume: btrfs.Subvolume{
- FS: btrfsutil.NewBrokenTrees(ctx, fs),
- TreeID: btrfsprim.FS_TREE_OBJECTID,
- NoChecksums: noChecksums,
- },
- DeviceName: deviceName,
- Mountpoint: mountpoint,
- }
- return rootSubvol.Run(ctx)
-}
-
-func fuseMount(ctx context.Context, mountpoint string, server fuse.Server, cfg *fuse.MountConfig) error {
- grp := dgroup.NewGroup(ctx, dgroup.GroupConfig{
- // Allow mountHandle.Join() returning to cause the
- // "unmount" goroutine to quit.
- ShutdownOnNonError: true,
- })
- mounted := uint32(1)
- grp.Go("unmount", func(ctx context.Context) error {
- <-ctx.Done()
- var err error
- var gotNil bool
- // Keep retrying, because the FS might be busy.
- for atomic.LoadUint32(&mounted) != 0 {
- if _err := fuse.Unmount(mountpoint); _err == nil {
- gotNil = true
- } else if !gotNil {
- err = _err
- }
- }
- if gotNil {
- return nil
- }
- return err
- })
- grp.Go("mount", func(ctx context.Context) error {
- defer atomic.StoreUint32(&mounted, 0)
-
- cfg.OpContext = ctx
- cfg.ErrorLogger = dlog.StdLogger(ctx, dlog.LogLevelError)
- cfg.DebugLogger = dlog.StdLogger(ctx, dlog.LogLevelDebug)
-
- mountHandle, err := fuse.Mount(mountpoint, server, cfg)
- if err != nil {
- return err
- }
- dlog.Infof(ctx, "mounted %q", mountpoint)
- return mountHandle.Join(dcontext.HardContext(ctx))
- })
- return grp.Wait()
-}
-
-type dirState struct {
- Dir *btrfs.Dir
-}
-
-type fileState struct {
- File *btrfs.File
-}
-
-type subvolume struct {
- btrfs.Subvolume
- DeviceName string
- Mountpoint string
-
- fuseutil.NotImplementedFileSystem
- lastHandle uint64
- dirHandles typedsync.Map[fuseops.HandleID, *dirState]
- fileHandles typedsync.Map[fuseops.HandleID, *fileState]
-
- subvolMu sync.Mutex
- subvols containers.Set[string]
- grp *dgroup.Group
-}
-
-func (sv *subvolume) Run(ctx context.Context) error {
- sv.grp = dgroup.NewGroup(ctx, dgroup.GroupConfig{})
- sv.grp.Go("self", func(ctx context.Context) error {
- cfg := &fuse.MountConfig{
- FSName: sv.DeviceName,
- Subtype: "btrfs",
-
- ReadOnly: true,
-
- Options: map[string]string{
- "allow_other": "",
- },
- }
- return fuseMount(ctx, sv.Mountpoint, fuseutil.NewFileSystemServer(sv), cfg)
- })
- return sv.grp.Wait()
-}
-
-func (sv *subvolume) newHandle() fuseops.HandleID {
- return fuseops.HandleID(atomic.AddUint64(&sv.lastHandle, 1))
-}
-
-func inodeItemToFUSE(itemBody btrfsitem.Inode) fuseops.InodeAttributes {
- return fuseops.InodeAttributes{
- Size: uint64(itemBody.Size),
- Nlink: uint32(itemBody.NLink),
- Mode: uint32(itemBody.Mode),
- // RDev: itemBody.Rdev, // jacobsa/fuse doesn't expose rdev
- Atime: itemBody.ATime.ToStd(),
- Mtime: itemBody.MTime.ToStd(),
- Ctime: itemBody.CTime.ToStd(),
- // Crtime: itemBody.OTime,
- Uid: uint32(itemBody.UID),
- Gid: uint32(itemBody.GID),
- }
-}
-
-func (sv *subvolume) LoadDir(inode btrfsprim.ObjID) (val *btrfs.Dir, err error) {
- val, err = sv.Subvolume.LoadDir(inode)
- if val != nil {
- haveSubvolumes := false
- for _, index := range maps.SortedKeys(val.ChildrenByIndex) {
- entry := val.ChildrenByIndex[index]
- if entry.Location.ItemType == btrfsitem.ROOT_ITEM_KEY {
- haveSubvolumes = true
- break
- }
- }
- if haveSubvolumes {
- abspath, _err := val.AbsPath()
- if _err != nil {
- return val, err
- }
- sv.subvolMu.Lock()
- for _, index := range maps.SortedKeys(val.ChildrenByIndex) {
- entry := val.ChildrenByIndex[index]
- if entry.Location.ItemType != btrfsitem.ROOT_ITEM_KEY {
- continue
- }
- if sv.subvols == nil {
- sv.subvols = make(containers.Set[string])
- }
- subMountpoint := filepath.Join(abspath, string(entry.Name))
- if !sv.subvols.Has(subMountpoint) {
- sv.subvols.Insert(subMountpoint)
- workerName := fmt.Sprintf("%d-%s", val.Inode, filepath.Base(subMountpoint))
- sv.grp.Go(workerName, func(ctx context.Context) error {
- subSv := &subvolume{
- Subvolume: btrfs.Subvolume{
- FS: sv.FS,
- TreeID: entry.Location.ObjectID,
- NoChecksums: sv.NoChecksums,
- },
- DeviceName: sv.DeviceName,
- Mountpoint: filepath.Join(sv.Mountpoint, subMountpoint[1:]),
- }
- return subSv.Run(ctx)
- })
- }
- }
- sv.subvolMu.Unlock()
- }
- }
- return val, err
-}
-
-func (sv *subvolume) StatFS(_ context.Context, op *fuseops.StatFSOp) error {
- // See linux.git/fs/btrfs/super.c:btrfs_statfs()
- sb, err := sv.FS.Superblock()
- if err != nil {
- return err
- }
-
- op.IoSize = sb.SectorSize
- op.BlockSize = sb.SectorSize
- op.Blocks = sb.TotalBytes / uint64(sb.SectorSize) // TODO: adjust for RAID type
- // op.BlocksFree = TODO
-
- // btrfs doesn't have a fixed number of inodes
- op.Inodes = 0
- op.InodesFree = 0
-
- // jacobsa/fuse doesn't expose namelen, instead hard-coding it
- // to 255. Which is fine by us, because that's what it is for
- // btrfs.
-
- return nil
-}
-
-func (sv *subvolume) LookUpInode(_ context.Context, op *fuseops.LookUpInodeOp) error {
- if op.Parent == fuseops.RootInodeID {
- parent, err := sv.GetRootInode()
- if err != nil {
- return err
- }
- op.Parent = fuseops.InodeID(parent)
- }
-
- dir, err := sv.LoadDir(btrfsprim.ObjID(op.Parent))
- if err != nil {
- return err
- }
- entry, ok := dir.ChildrenByName[op.Name]
- if !ok {
- return syscall.ENOENT
- }
- if entry.Location.ItemType != btrfsitem.INODE_ITEM_KEY {
- // Subvolume
- //
- // Because each subvolume has its own pool of inodes
- // (as in 2 different subvolumes can have files with
- // te same inode number), so to represent that to FUSE
- // we need to have this be a full separate mountpoint.
- //
- // I'd want to return EIO or EINTR or something here,
- // but both the FUSE userspace tools and the kernel
- // itself stat the mountpoint before mounting it, so
- // we've got to return something bogus here to let
- // that mount happen.
- op.Entry = fuseops.ChildInodeEntry{
- Child: 2, // an inode number that a real file will never have
- Attributes: fuseops.InodeAttributes{
- Nlink: 1,
- Mode: uint32(btrfsitem.ModeFmtDir | 0o700), //nolint:gomnd // TODO
- },
- }
- return nil
- }
- bareInode, err := sv.LoadBareInode(entry.Location.ObjectID)
- if err != nil {
- return err
- }
- op.Entry = fuseops.ChildInodeEntry{
- Child: fuseops.InodeID(entry.Location.ObjectID),
- Generation: fuseops.GenerationNumber(bareInode.InodeItem.Sequence),
- Attributes: inodeItemToFUSE(*bareInode.InodeItem),
- }
- return nil
-}
-
-func (sv *subvolume) GetInodeAttributes(_ context.Context, op *fuseops.GetInodeAttributesOp) error {
- if op.Inode == fuseops.RootInodeID {
- inode, err := sv.GetRootInode()
- if err != nil {
- return err
- }
- op.Inode = fuseops.InodeID(inode)
- }
-
- bareInode, err := sv.LoadBareInode(btrfsprim.ObjID(op.Inode))
- if err != nil {
- return err
- }
-
- op.Attributes = inodeItemToFUSE(*bareInode.InodeItem)
- return nil
-}
-
-func (sv *subvolume) OpenDir(_ context.Context, op *fuseops.OpenDirOp) error {
- if op.Inode == fuseops.RootInodeID {
- inode, err := sv.GetRootInode()
- if err != nil {
- return err
- }
- op.Inode = fuseops.InodeID(inode)
- }
-
- dir, err := sv.LoadDir(btrfsprim.ObjID(op.Inode))
- if err != nil {
- return err
- }
- handle := sv.newHandle()
- sv.dirHandles.Store(handle, &dirState{
- Dir: dir,
- })
- op.Handle = handle
- return nil
-}
-
-func (sv *subvolume) ReadDir(_ context.Context, op *fuseops.ReadDirOp) error {
- state, ok := sv.dirHandles.Load(op.Handle)
- if !ok {
- return syscall.EBADF
- }
- origOffset := op.Offset
- for _, index := range maps.SortedKeys(state.Dir.ChildrenByIndex) {
- if index < uint64(origOffset) {
- continue
- }
- entry := state.Dir.ChildrenByIndex[index]
- n := fuseutil.WriteDirent(op.Dst[op.BytesRead:], fuseutil.Dirent{
- Offset: fuseops.DirOffset(index + 1),
- Inode: fuseops.InodeID(entry.Location.ObjectID),
- Name: string(entry.Name),
- Type: map[btrfsitem.FileType]fuseutil.DirentType{
- btrfsitem.FT_UNKNOWN: fuseutil.DT_Unknown,
- btrfsitem.FT_REG_FILE: fuseutil.DT_File,
- btrfsitem.FT_DIR: fuseutil.DT_Directory,
- btrfsitem.FT_CHRDEV: fuseutil.DT_Char,
- btrfsitem.FT_BLKDEV: fuseutil.DT_Block,
- btrfsitem.FT_FIFO: fuseutil.DT_FIFO,
- btrfsitem.FT_SOCK: fuseutil.DT_Socket,
- btrfsitem.FT_SYMLINK: fuseutil.DT_Link,
- }[entry.Type],
- })
- if n == 0 {
- break
- }
- op.BytesRead += n
- }
- return nil
-}
-
-func (sv *subvolume) ReleaseDirHandle(_ context.Context, op *fuseops.ReleaseDirHandleOp) error {
- _, ok := sv.dirHandles.LoadAndDelete(op.Handle)
- if !ok {
- return syscall.EBADF
- }
- return nil
-}
-
-func (sv *subvolume) OpenFile(_ context.Context, op *fuseops.OpenFileOp) error {
- file, err := sv.LoadFile(btrfsprim.ObjID(op.Inode))
- if err != nil {
- return err
- }
- handle := sv.newHandle()
- sv.fileHandles.Store(handle, &fileState{
- File: file,
- })
- op.Handle = handle
- op.KeepPageCache = true
- return nil
-}
-
-func (sv *subvolume) ReadFile(_ context.Context, op *fuseops.ReadFileOp) error {
- state, ok := sv.fileHandles.Load(op.Handle)
- if !ok {
- return syscall.EBADF
- }
-
- var dat []byte
- if op.Dst != nil {
- size := slices.Min(int64(len(op.Dst)), op.Size)
- dat = op.Dst[:size]
- } else {
- dat = make([]byte, op.Size)
- op.Data = [][]byte{dat}
- }
-
- var err error
- op.BytesRead, err = state.File.ReadAt(dat, op.Offset)
- if errors.Is(err, io.EOF) {
- err = nil
- }
-
- return err
-}
-
-func (sv *subvolume) ReleaseFileHandle(_ context.Context, op *fuseops.ReleaseFileHandleOp) error {
- _, ok := sv.fileHandles.LoadAndDelete(op.Handle)
- if !ok {
- return syscall.EBADF
- }
- return nil
-}
-
-func (sv *subvolume) ReadSymlink(_ context.Context, op *fuseops.ReadSymlinkOp) error {
- file, err := sv.LoadFile(btrfsprim.ObjID(op.Inode))
- if err != nil {
- return err
- }
- reader := io.NewSectionReader(file, 0, file.InodeItem.Size)
- tgt, err := io.ReadAll(reader)
- if err != nil {
- return err
- }
- op.Target = string(tgt)
- return nil
-}
-
-func (sv *subvolume) ListXattr(_ context.Context, op *fuseops.ListXattrOp) error {
- if op.Inode == fuseops.RootInodeID {
- inode, err := sv.GetRootInode()
- if err != nil {
- return err
- }
- op.Inode = fuseops.InodeID(inode)
- }
-
- fullInode, err := sv.LoadFullInode(btrfsprim.ObjID(op.Inode))
- if err != nil {
- return err
- }
-
- size := 0
- for name := range fullInode.XAttrs {
- size += len(name) + 1
- }
- if len(op.Dst) < size {
- return syscall.ERANGE
- }
-
- op.BytesRead = size
- n := 0
- for _, name := range maps.SortedKeys(fullInode.XAttrs) {
- n += copy(op.Dst[n:], name)
- op.Dst[n] = 0
- n++
- }
- return nil
-}
-
-func (sv *subvolume) GetXattr(_ context.Context, op *fuseops.GetXattrOp) error {
- if op.Inode == fuseops.RootInodeID {
- inode, err := sv.GetRootInode()
- if err != nil {
- return err
- }
- op.Inode = fuseops.InodeID(inode)
- }
-
- fullInode, err := sv.LoadFullInode(btrfsprim.ObjID(op.Inode))
- if err != nil {
- return err
- }
-
- val, ok := fullInode.XAttrs[op.Name]
- if !ok {
- return syscall.ENODATA
- }
-
- if len(op.Dst) < len(val) {
- return syscall.ERANGE
- }
-
- op.BytesRead = len(val)
- copy(op.Dst, val)
- return nil
-}
-
-func (sv *subvolume) Destroy() {}
diff --git a/lib/btrfsprogs/btrfsinspect/print_tree.go b/lib/btrfsprogs/btrfsinspect/print_tree.go
deleted file mode 100644
index 240c72f..0000000
--- a/lib/btrfsprogs/btrfsinspect/print_tree.go
+++ /dev/null
@@ -1,443 +0,0 @@
-// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
-//
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-package btrfsinspect
-
-import (
- "context"
- "io"
-
- "github.com/datawire/dlib/dlog"
-
- "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
- "git.lukeshu.com/btrfs-progs-ng/lib/diskio"
- "git.lukeshu.com/btrfs-progs-ng/lib/slices"
- "git.lukeshu.com/btrfs-progs-ng/lib/textui"
-)
-
-func DumpTrees(ctx context.Context, out io.Writer, fs *btrfs.FS) {
- superblock, err := fs.Superblock()
- if err != nil {
- dlog.Error(ctx, err)
- return
- }
-
- if superblock.RootTree != 0 {
- textui.Fprintf(out, "root tree\n")
- printTree(ctx, out, fs, btrfsprim.ROOT_TREE_OBJECTID)
- }
- if superblock.ChunkTree != 0 {
- textui.Fprintf(out, "chunk tree\n")
- printTree(ctx, out, fs, btrfsprim.CHUNK_TREE_OBJECTID)
- }
- if superblock.LogTree != 0 {
- textui.Fprintf(out, "log root tree\n")
- printTree(ctx, out, fs, btrfsprim.TREE_LOG_OBJECTID)
- }
- if superblock.BlockGroupRoot != 0 {
- textui.Fprintf(out, "block group tree\n")
- printTree(ctx, out, fs, btrfsprim.BLOCK_GROUP_TREE_OBJECTID)
- }
- fs.TreeWalk(
- ctx,
- btrfsprim.ROOT_TREE_OBJECTID,
- func(err *btrfstree.TreeError) {
- dlog.Error(ctx, err)
- },
- btrfstree.TreeWalkHandler{
- Item: func(_ btrfstree.TreePath, item btrfstree.Item) error {
- if item.Key.ItemType != btrfsitem.ROOT_ITEM_KEY {
- return nil
- }
- treeName, ok := map[btrfsprim.ObjID]string{
- btrfsprim.ROOT_TREE_OBJECTID: "root",
- btrfsprim.EXTENT_TREE_OBJECTID: "extent",
- btrfsprim.CHUNK_TREE_OBJECTID: "chunk",
- btrfsprim.DEV_TREE_OBJECTID: "device",
- btrfsprim.FS_TREE_OBJECTID: "fs",
- btrfsprim.ROOT_TREE_DIR_OBJECTID: "directory",
- btrfsprim.CSUM_TREE_OBJECTID: "checksum",
- btrfsprim.ORPHAN_OBJECTID: "orphan",
- btrfsprim.TREE_LOG_OBJECTID: "log",
- btrfsprim.TREE_LOG_FIXUP_OBJECTID: "log fixup",
- btrfsprim.TREE_RELOC_OBJECTID: "reloc",
- btrfsprim.DATA_RELOC_TREE_OBJECTID: "data reloc",
- btrfsprim.EXTENT_CSUM_OBJECTID: "extent checksum",
- btrfsprim.QUOTA_TREE_OBJECTID: "quota",
- btrfsprim.UUID_TREE_OBJECTID: "uuid",
- btrfsprim.FREE_SPACE_TREE_OBJECTID: "free space",
- btrfsprim.MULTIPLE_OBJECTIDS: "multiple",
- btrfsprim.BLOCK_GROUP_TREE_OBJECTID: "block group",
- }[item.Key.ObjectID]
- if !ok {
- treeName = "file"
- }
- textui.Fprintf(out, "%v tree key %v \n", treeName, item.Key.Format(btrfsprim.ROOT_TREE_OBJECTID))
- printTree(ctx, out, fs, item.Key.ObjectID)
- return nil
- },
- },
- )
- textui.Fprintf(out, "total bytes %v\n", superblock.TotalBytes)
- textui.Fprintf(out, "bytes used %v\n", superblock.BytesUsed)
- textui.Fprintf(out, "uuid %v\n", superblock.FSUUID)
-}
-
-var nodeHeaderSize = binstruct.StaticSize(btrfstree.NodeHeader{})
-
-// printTree mimics btrfs-progs
-// kernel-shared/print-tree.c:btrfs_print_tree() and
-// kernel-shared/print-tree.c:btrfs_print_leaf()
-func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfsprim.ObjID) {
- var itemOffset uint32
- handlers := btrfstree.TreeWalkHandler{
- Node: func(path btrfstree.TreePath, nodeRef *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]) error {
- printHeaderInfo(out, nodeRef.Data)
- itemOffset = nodeRef.Data.Size - uint32(nodeHeaderSize)
- return nil
- },
- PreKeyPointer: func(path btrfstree.TreePath, item btrfstree.KeyPointer) error {
- treeID := path[0].FromTree
- textui.Fprintf(out, "\tkey %v block %v gen %v\n",
- item.Key.Format(treeID),
- item.BlockPtr,
- item.Generation)
- return nil
- },
- Item: func(path btrfstree.TreePath, item btrfstree.Item) error {
- treeID := path[0].FromTree
- i := path.Node(-1).FromItemIdx
- bs, _ := binstruct.Marshal(item.Body)
- itemSize := uint32(len(bs))
- itemOffset -= itemSize
- textui.Fprintf(out, "\titem %v key %v itemoff %v itemsize %v\n",
- i,
- item.Key.Format(treeID),
- itemOffset,
- itemSize)
- switch body := item.Body.(type) {
- case *btrfsitem.FreeSpaceHeader:
- textui.Fprintf(out, "\t\tlocation key %v\n", body.Location.Format(treeID))
- textui.Fprintf(out, "\t\tcache generation %v entries %v bitmaps %v\n",
- body.Generation, body.NumEntries, body.NumBitmaps)
- case *btrfsitem.Inode:
- textui.Fprintf(out, ""+
- "\t\tgeneration %v transid %v size %v nbytes %v\n"+
- "\t\tblock group %v mode %o links %v uid %v gid %v rdev %v\n"+
- "\t\tsequence %v flags %v\n",
- body.Generation, body.TransID, body.Size, body.NumBytes,
- body.BlockGroup, body.Mode, body.NLink, body.UID, body.GID, body.RDev,
- body.Sequence, body.Flags)
- textui.Fprintf(out, "\t\tatime %v\n", fmtTime(body.ATime))
- textui.Fprintf(out, "\t\tctime %v\n", fmtTime(body.CTime))
- textui.Fprintf(out, "\t\tmtime %v\n", fmtTime(body.MTime))
- textui.Fprintf(out, "\t\totime %v\n", fmtTime(body.OTime))
- case *btrfsitem.InodeRefs:
- for _, ref := range body.Refs {
- textui.Fprintf(out, "\t\tindex %v namelen %v name: %s\n",
- ref.Index, ref.NameLen, ref.Name)
- }
- // case btrfsitem.INODE_EXTREF_KEY:
- // // TODO
- case *btrfsitem.DirEntry:
- textui.Fprintf(out, "\t\tlocation key %v type %v\n",
- body.Location.Format(treeID), body.Type)
- textui.Fprintf(out, "\t\ttransid %v data_len %v name_len %v\n",
- body.TransID, body.DataLen, body.NameLen)
- textui.Fprintf(out, "\t\tname: %s\n", body.Name)
- if len(body.Data) > 0 {
- textui.Fprintf(out, "\t\tdata %s\n", body.Data)
- }
- // case btrfsitem.DIR_LOG_INDEX_KEY, btrfsitem.DIR_LOG_ITEM_KEY:
- // // TODO
- case *btrfsitem.Root:
- textui.Fprintf(out, "\t\tgeneration %v root_dirid %v bytenr %d byte_limit %v bytes_used %v\n",
- body.Generation, body.RootDirID, body.ByteNr, body.ByteLimit, body.BytesUsed)
- textui.Fprintf(out, "\t\tlast_snapshot %v flags %v refs %v\n",
- body.LastSnapshot, body.Flags, body.Refs)
- textui.Fprintf(out, "\t\tdrop_progress key %v drop_level %v\n",
- body.DropProgress.Format(treeID), body.DropLevel)
- textui.Fprintf(out, "\t\tlevel %v generation_v2 %v\n",
- body.Level, body.GenerationV2)
- if body.Generation == body.GenerationV2 {
- textui.Fprintf(out, "\t\tuuid %v\n", body.UUID)
- textui.Fprintf(out, "\t\tparent_uuid %v\n", body.ParentUUID)
- textui.Fprintf(out, "\t\treceived_uuid %v\n", body.ReceivedUUID)
- textui.Fprintf(out, "\t\tctransid %v otransid %v stransid %v rtransid %v\n",
- body.CTransID, body.OTransID, body.STransID, body.RTransID)
- textui.Fprintf(out, "\t\tctime %v\n", fmtTime(body.CTime))
- textui.Fprintf(out, "\t\totime %v\n", fmtTime(body.OTime))
- textui.Fprintf(out, "\t\tstime %v\n", fmtTime(body.STime))
- textui.Fprintf(out, "\t\trtime %v\n", fmtTime(body.RTime))
- }
- case *btrfsitem.RootRef:
- var tag string
- switch item.Key.ItemType {
- case btrfsitem.ROOT_REF_KEY:
- tag = "ref"
- case btrfsitem.ROOT_BACKREF_KEY:
- tag = "backref"
- default:
- tag = textui.Sprintf("(error: unhandled RootRef item type: %v)", item.Key.ItemType)
- }
- textui.Fprintf(out, "\t\troot %v key dirid %v sequence %v name %s\n",
- tag, body.DirID, body.Sequence, body.Name)
- case *btrfsitem.Extent:
- textui.Fprintf(out, "\t\trefs %v gen %v flags %v\n",
- body.Head.Refs, body.Head.Generation, body.Head.Flags)
- if body.Head.Flags.Has(btrfsitem.EXTENT_FLAG_TREE_BLOCK) {
- textui.Fprintf(out, "\t\ttree block key %v level %v\n",
- body.Info.Key.Format(treeID), body.Info.Level)
- }
- printExtentInlineRefs(out, body.Refs)
- case *btrfsitem.Metadata:
- textui.Fprintf(out, "\t\trefs %v gen %v flags %v\n",
- body.Head.Refs, body.Head.Generation, body.Head.Flags)
- textui.Fprintf(out, "\t\ttree block skinny level %v\n", item.Key.Offset)
- printExtentInlineRefs(out, body.Refs)
- // case btrfsitem.EXTENT_DATA_REF_KEY:
- // // TODO
- // case btrfsitem.SHARED_DATA_REF_KEY:
- // // TODO
- case *btrfsitem.ExtentCSum:
- start := btrfsvol.LogicalAddr(item.Key.Offset)
- textui.Fprintf(out, "\t\trange start %d end %d length %d",
- start, start.Add(body.Size()), body.Size())
- sumsPerLine := slices.Max(1, len(btrfssum.CSum{})/body.ChecksumSize/2)
-
- i := 0
- _ = body.Walk(ctx, func(pos btrfsvol.LogicalAddr, sum btrfssum.ShortSum) error {
- if i%sumsPerLine == 0 {
- textui.Fprintf(out, "\n\t\t")
- } else {
- textui.Fprintf(out, " ")
- }
- textui.Fprintf(out, "[%d] 0x%x", pos, sum)
- i++
- return nil
- })
- textui.Fprintf(out, "\n")
- case *btrfsitem.FileExtent:
- textui.Fprintf(out, "\t\tgeneration %v type %v\n",
- body.Generation, body.Type)
- switch body.Type {
- case btrfsitem.FILE_EXTENT_INLINE:
- textui.Fprintf(out, "\t\tinline extent data size %v ram_bytes %v compression %v\n",
- len(body.BodyInline), body.RAMBytes, body.Compression)
- case btrfsitem.FILE_EXTENT_PREALLOC:
- textui.Fprintf(out, "\t\tprealloc data disk byte %v nr %v\n",
- body.BodyExtent.DiskByteNr,
- body.BodyExtent.DiskNumBytes)
- textui.Fprintf(out, "\t\tprealloc data offset %v nr %v\n",
- body.BodyExtent.Offset,
- body.BodyExtent.NumBytes)
- case btrfsitem.FILE_EXTENT_REG:
- textui.Fprintf(out, "\t\textent data disk byte %d nr %d\n",
- body.BodyExtent.DiskByteNr,
- body.BodyExtent.DiskNumBytes)
- textui.Fprintf(out, "\t\textent data offset %d nr %d ram %v\n",
- body.BodyExtent.Offset,
- body.BodyExtent.NumBytes,
- body.RAMBytes)
- textui.Fprintf(out, "\t\textent compression %v\n",
- body.Compression)
- default:
- textui.Fprintf(out, "\t\t(error) unknown file extent type %v", body.Type)
- }
- case *btrfsitem.BlockGroup:
- textui.Fprintf(out, "\t\tblock group used %v chunk_objectid %v flags %v\n",
- body.Used, body.ChunkObjectID, body.Flags)
- case *btrfsitem.FreeSpaceInfo:
- textui.Fprintf(out, "\t\tfree space info extent count %v flags %d\n",
- body.ExtentCount, body.Flags)
- case *btrfsitem.FreeSpaceBitmap:
- textui.Fprintf(out, "\t\tfree space bitmap\n")
- case *btrfsitem.Chunk:
- textui.Fprintf(out, "\t\tlength %d owner %d stripe_len %v type %v\n",
- body.Head.Size, body.Head.Owner, body.Head.StripeLen, body.Head.Type)
- textui.Fprintf(out, "\t\tio_align %v io_width %v sector_size %v\n",
- body.Head.IOOptimalAlign, body.Head.IOOptimalWidth, body.Head.IOMinSize)
- textui.Fprintf(out, "\t\tnum_stripes %v sub_stripes %v\n",
- body.Head.NumStripes, body.Head.SubStripes)
- for i, stripe := range body.Stripes {
- textui.Fprintf(out, "\t\t\tstripe %v devid %d offset %d\n",
- i, stripe.DeviceID, stripe.Offset)
- textui.Fprintf(out, "\t\t\tdev_uuid %v\n",
- stripe.DeviceUUID)
- }
- case *btrfsitem.Dev:
- textui.Fprintf(out, ""+
- "\t\tdevid %d total_bytes %v bytes_used %v\n"+
- "\t\tio_align %v io_width %v sector_size %v type %v\n"+
- "\t\tgeneration %v start_offset %v dev_group %v\n"+
- "\t\tseek_speed %v bandwidth %v\n"+
- "\t\tuuid %v\n"+
- "\t\tfsid %v\n",
- body.DevID, body.NumBytes, body.NumBytesUsed,
- body.IOOptimalAlign, body.IOOptimalWidth, body.IOMinSize, body.Type,
- body.Generation, body.StartOffset, body.DevGroup,
- body.SeekSpeed, body.Bandwidth,
- body.DevUUID,
- body.FSUUID)
- case *btrfsitem.DevExtent:
- textui.Fprintf(out, ""+
- "\t\tdev extent chunk_tree %d\n"+
- "\t\tchunk_objectid %v chunk_offset %d length %d\n"+
- "\t\tchunk_tree_uuid %v\n",
- body.ChunkTree, body.ChunkObjectID, body.ChunkOffset, body.Length,
- body.ChunkTreeUUID)
- case *btrfsitem.QGroupStatus:
- textui.Fprintf(out, ""+
- "\t\tversion %v generation %v flags %v scan %d\n",
- body.Version, body.Generation, body.Flags, body.RescanProgress)
- case *btrfsitem.QGroupInfo:
- textui.Fprintf(out, ""+
- "\t\tgeneration %v\n"+
- "\t\treferenced %d referenced_compressed %d\n"+
- "\t\texclusive %d exclusive_compressed %d\n",
- body.Generation,
- body.ReferencedBytes, body.ReferencedBytesCompressed,
- body.ExclusiveBytes, body.ExclusiveBytesCompressed)
- case *btrfsitem.QGroupLimit:
- textui.Fprintf(out, ""+
- "\t\tflags %x\n"+
- "\t\tmax_referenced %d max_exclusive %d\n"+
- "\t\trsv_referenced %d rsv_exclusive %d\n",
- uint64(body.Flags),
- body.MaxReferenced, body.MaxExclusive,
- body.RsvReferenced, body.RsvExclusive)
- case *btrfsitem.UUIDMap:
- textui.Fprintf(out, "\t\tsubvol_id %d\n", body.ObjID)
- // case btrfsitem.STRING_ITEM_KEY:
- // // TODO
- case *btrfsitem.DevStats:
- textui.Fprintf(out, "\t\tpersistent item objectid %v offset %v\n",
- item.Key.ObjectID.Format(treeID), item.Key.Offset)
- switch item.Key.ObjectID {
- case btrfsprim.DEV_STATS_OBJECTID:
- textui.Fprintf(out, "\t\tdevice stats\n")
- textui.Fprintf(out, "\t\twrite_errs %v read_errs %v flush_errs %v corruption_errs %v generation %v\n",
- body.Values[btrfsitem.DEV_STAT_WRITE_ERRS],
- body.Values[btrfsitem.DEV_STAT_READ_ERRS],
- body.Values[btrfsitem.DEV_STAT_FLUSH_ERRS],
- body.Values[btrfsitem.DEV_STAT_CORRUPTION_ERRS],
- body.Values[btrfsitem.DEV_STAT_GENERATION_ERRS])
- default:
- textui.Fprintf(out, "\t\tunknown persistent item objectid %v\n", item.Key.ObjectID)
- }
- // case btrfsitem.TEMPORARY_ITEM_KEY:
- // // TODO
- case *btrfsitem.Empty:
- switch item.Key.ItemType {
- case btrfsitem.ORPHAN_ITEM_KEY: // 48
- textui.Fprintf(out, "\t\torphan item\n")
- case btrfsitem.TREE_BLOCK_REF_KEY: // 176
- textui.Fprintf(out, "\t\ttree block backref\n")
- case btrfsitem.SHARED_BLOCK_REF_KEY: // 182
- textui.Fprintf(out, "\t\tshared block backref\n")
- case btrfsitem.FREE_SPACE_EXTENT_KEY: // 199
- textui.Fprintf(out, "\t\tfree space extent\n")
- case btrfsitem.QGROUP_RELATION_KEY: // 246
- // do nothing
- // case btrfsitem.EXTENT_REF_V0_KEY:
- // textui.Fprintf(out, "\t\textent ref v0 (deprecated)\n")
- // case btrfsitem.CSUM_ITEM_KEY:
- // textui.Fprintf(out, "\t\tcsum item\n")
- default:
- textui.Fprintf(out, "\t\t(error) unhandled empty item type: %v\n", item.Key.ItemType)
- }
- case *btrfsitem.Error:
- textui.Fprintf(out, "\t\t(error) error item: %v\n", body.Err)
- default:
- textui.Fprintf(out, "\t\t(error) unhandled item type: %T\n", body)
- }
- return nil
- },
- }
- handlers.BadItem = handlers.Item
- fs.TreeWalk(
- ctx,
- treeID,
- func(err *btrfstree.TreeError) {
- dlog.Error(ctx, err)
- },
- handlers,
- )
-}
-
-// printHeaderInfo mimics btrfs-progs kernel-shared/print-tree.c:print_header_info()
-func printHeaderInfo(out io.Writer, node btrfstree.Node) {
- var typename string
- if node.Head.Level > 0 { // internal node
- typename = "node"
- textui.Fprintf(out, "node %v level %v items %v free space %v",
- node.Head.Addr,
- node.Head.Level,
- node.Head.NumItems,
- node.MaxItems()-node.Head.NumItems)
- } else { // leaf node
- typename = "leaf"
- textui.Fprintf(out, "leaf %d items %v free space %v",
- node.Head.Addr,
- node.Head.NumItems,
- node.LeafFreeSpace())
- }
- textui.Fprintf(out, " generation %v owner %v\n",
- node.Head.Generation,
- node.Head.Owner)
-
- textui.Fprintf(out, "%v %d flags %v backref revision %v\n",
- typename,
- node.Head.Addr,
- node.Head.Flags,
- node.Head.BackrefRev)
-
- textui.Fprintf(out, "checksum stored %v\n", node.Head.Checksum.Fmt(node.ChecksumType))
- if calcSum, err := node.CalculateChecksum(); err != nil {
- textui.Fprintf(out, "checksum calced %v\n", err)
- } else {
- textui.Fprintf(out, "checksum calced %v\n", calcSum.Fmt(node.ChecksumType))
- }
-
- textui.Fprintf(out, "fs uuid %v\n", node.Head.MetadataUUID)
- textui.Fprintf(out, "chunk uuid %v\n", node.Head.ChunkTreeUUID)
-}
-
-// printExtentInlineRefs mimics part of btrfs-progs kernel-shared/print-tree.c:print_extent_item()
-func printExtentInlineRefs(out io.Writer, refs []btrfsitem.ExtentInlineRef) {
- for _, ref := range refs {
- switch subitem := ref.Body.(type) {
- case nil:
- switch ref.Type {
- case btrfsitem.TREE_BLOCK_REF_KEY:
- textui.Fprintf(out, "\t\ttree block backref root %v\n",
- btrfsprim.ObjID(ref.Offset))
- case btrfsitem.SHARED_BLOCK_REF_KEY:
- textui.Fprintf(out, "\t\tshared block backref parent %v\n",
- ref.Offset)
- default:
- textui.Fprintf(out, "\t\t(error) unexpected empty sub-item type: %v\n", ref.Type)
- }
- case *btrfsitem.ExtentDataRef:
- textui.Fprintf(out, "\t\textent data backref root %v objectid %v offset %v count %v\n",
- subitem.Root, subitem.ObjectID, subitem.Offset, subitem.Count)
- case *btrfsitem.SharedDataRef:
- textui.Fprintf(out, "\t\tshared data backref parent %v count %v\n",
- ref.Offset, subitem.Count)
- default:
- textui.Fprintf(out, "\t\t(error) unexpected sub-item type: %T\n", subitem)
- }
- }
-}
-
-func fmtTime(t btrfsprim.Time) string {
- return textui.Sprintf("%v.%v (%v)",
- t.Sec, t.NSec, t.ToStd().Format("2006-01-02 15:04:05"))
-}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildmappings/blockgroups.go b/lib/btrfsprogs/btrfsinspect/rebuildmappings/blockgroups.go
deleted file mode 100644
index 0e2d5a0..0000000
--- a/lib/btrfsprogs/btrfsinspect/rebuildmappings/blockgroups.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
-//
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-package rebuildmappings
-
-import (
- "fmt"
- "sort"
-
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect"
- "git.lukeshu.com/btrfs-progs-ng/lib/containers"
- "git.lukeshu.com/btrfs-progs-ng/lib/maps"
-)
-
-type BlockGroup struct {
- LAddr btrfsvol.LogicalAddr
- Size btrfsvol.AddrDelta
- Flags btrfsvol.BlockGroupFlags
-}
-
-func DedupBlockGroups(scanResults btrfsinspect.ScanDevicesResult) (map[btrfsvol.LogicalAddr]BlockGroup, error) {
- // Dedup
- bgsSet := make(containers.Set[BlockGroup])
- for _, devResults := range scanResults {
- for _, bg := range devResults.FoundBlockGroups {
- bgsSet.Insert(BlockGroup{
- LAddr: btrfsvol.LogicalAddr(bg.Key.ObjectID),
- Size: btrfsvol.AddrDelta(bg.Key.Offset),
- Flags: bg.BG.Flags,
- })
- }
- }
-
- // Sort
- bgsOrdered := maps.Keys(bgsSet)
- sort.Slice(bgsOrdered, func(i, j int) bool {
- return bgsOrdered[i].LAddr < bgsOrdered[j].LAddr
- })
-
- // Sanity check
- var pos btrfsvol.LogicalAddr
- for _, bg := range bgsOrdered {
- if bg.LAddr < pos || bg.Size < 0 {
- return nil, fmt.Errorf("found block groups are inconsistent")
- }
- pos = bg.LAddr.Add(bg.Size)
- }
-
- // Return. We return a map instead of a slice in order to
- // facilitate easy deletes.
- bgsMap := make(map[btrfsvol.LogicalAddr]BlockGroup, len(bgsSet))
- for bg := range bgsSet {
- bgsMap[bg.LAddr] = bg
- }
- return bgsMap, nil
-}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildmappings/fuzzymatchsums.go b/lib/btrfsprogs/btrfsinspect/rebuildmappings/fuzzymatchsums.go
deleted file mode 100644
index 4724c12..0000000
--- a/lib/btrfsprogs/btrfsinspect/rebuildmappings/fuzzymatchsums.go
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
-//
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-package rebuildmappings
-
-import (
- "context"
- "sort"
-
- "github.com/datawire/dlib/dlog"
- "golang.org/x/text/number"
-
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
- "git.lukeshu.com/btrfs-progs-ng/lib/containers"
- "git.lukeshu.com/btrfs-progs-ng/lib/maps"
- "git.lukeshu.com/btrfs-progs-ng/lib/textui"
-)
-
-var minFuzzyPct = textui.Tunable(0.5)
-
-type fuzzyRecord struct {
- PAddr btrfsvol.QualifiedPhysicalAddr
- N int
-}
-
-func (a fuzzyRecord) Compare(b fuzzyRecord) int {
- switch {
- case a.N < b.N:
- return -1
- case a.N > b.N:
- return 1
- default:
- return 0
- }
-}
-
-func fuzzyMatchBlockGroupSums(ctx context.Context,
- fs *btrfs.FS,
- blockgroups map[btrfsvol.LogicalAddr]BlockGroup,
- physicalSums map[btrfsvol.DeviceID]btrfssum.SumRun[btrfsvol.PhysicalAddr],
- logicalSums SumRunWithGaps[btrfsvol.LogicalAddr],
-) error {
- _ctx := ctx
-
- ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.substep", "indexing")
- dlog.Info(ctx, "Indexing physical regions...") // O(m)
- regions := ListUnmappedPhysicalRegions(fs)
- physicalIndex := make(map[btrfssum.ShortSum][]btrfsvol.QualifiedPhysicalAddr)
- if err := WalkUnmappedPhysicalRegions(ctx, physicalSums, regions, func(devID btrfsvol.DeviceID, region btrfssum.SumRun[btrfsvol.PhysicalAddr]) error {
- return region.Walk(ctx, func(paddr btrfsvol.PhysicalAddr, sum btrfssum.ShortSum) error {
- physicalIndex[sum] = append(physicalIndex[sum], btrfsvol.QualifiedPhysicalAddr{
- Dev: devID,
- Addr: paddr,
- })
- return nil
- })
- }); err != nil {
- return err
- }
- dlog.Info(ctx, "... done indexing")
-
- ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.substep", "searching")
- dlog.Info(ctx, "Searching...")
- numBlockgroups := len(blockgroups)
- for i, bgLAddr := range maps.SortedKeys(blockgroups) {
- blockgroup := blockgroups[bgLAddr]
- bgRun := SumsForLogicalRegion(logicalSums, blockgroup.LAddr, blockgroup.Size)
-
- d := bgRun.PatLen()
- matches := make(map[btrfsvol.QualifiedPhysicalAddr]int)
- if err := bgRun.Walk(ctx, func(laddr btrfsvol.LogicalAddr, sum btrfssum.ShortSum) error { // O(n*…
- off := laddr.Sub(bgRun.Addr)
- for _, paddr := range physicalIndex[sum] { // …log(m)) expected but …m) worst
- key := btrfsvol.QualifiedPhysicalAddr{
- Dev: paddr.Dev,
- Addr: paddr.Addr.Add(-off),
- }
- matches[key]++
- }
- return nil
- }); err != nil {
- return err
- }
-
- best := lowestN[fuzzyRecord]{N: 2}
- for paddr, n := range matches { // O(m)
- best.Insert(fuzzyRecord{
- PAddr: paddr,
- N: d - n,
- })
- }
-
- var apply bool
- var matchesStr string
- switch len(best.Dat) {
- case 0: // can happen if there are no sums in the run
- matchesStr = ""
- case 1: // not sure how this can happen, but whatev
- pct := float64(d-best.Dat[0].N) / float64(d)
- matchesStr = textui.Sprintf("%v", number.Percent(pct))
- apply = pct > minFuzzyPct
- case 2:
- pct := float64(d-best.Dat[0].N) / float64(d)
- pct2 := float64(d-best.Dat[1].N) / float64(d)
- matchesStr = textui.Sprintf("best=%v secondbest=%v", number.Percent(pct), number.Percent(pct2))
- apply = pct > minFuzzyPct && pct2 < minFuzzyPct
- }
- lvl := dlog.LogLevelError
- if apply {
- lvl = dlog.LogLevelInfo
- }
- dlog.Logf(ctx, lvl, "(%v/%v) blockgroup[laddr=%v] matches=[%s]; bestpossible=%v (based on %v runs)",
- i+1, numBlockgroups, bgLAddr, matchesStr, number.Percent(bgRun.PctFull()), len(bgRun.Runs))
- if !apply {
- continue
- }
-
- mapping := btrfsvol.Mapping{
- LAddr: blockgroup.LAddr,
- PAddr: best.Dat[0].PAddr,
- Size: blockgroup.Size,
- SizeLocked: true,
- Flags: containers.Optional[btrfsvol.BlockGroupFlags]{
- OK: true,
- Val: blockgroup.Flags,
- },
- }
- if err := fs.LV.AddMapping(mapping); err != nil {
- dlog.Errorf(ctx, "error: %v", err)
- continue
- }
- delete(blockgroups, bgLAddr)
- }
- dlog.Info(ctx, "done searching")
-
- return nil
-}
-
-type lowestN[T containers.Ordered[T]] struct {
- N int
- Dat []T
-}
-
-func (l *lowestN[T]) Insert(v T) {
- switch {
- case len(l.Dat) < l.N:
- l.Dat = append(l.Dat, v)
- case v.Compare(l.Dat[0]) < 0:
- l.Dat[0] = v
- default:
- return
- }
- sort.Slice(l.Dat, func(i, j int) bool {
- return l.Dat[i].Compare(l.Dat[j]) < 0
- })
-}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildmappings/kmp.go b/lib/btrfsprogs/btrfsinspect/rebuildmappings/kmp.go
deleted file mode 100644
index 20772ba..0000000
--- a/lib/btrfsprogs/btrfsinspect/rebuildmappings/kmp.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
-//
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-package rebuildmappings
-
-import (
- "errors"
-
- "git.lukeshu.com/btrfs-progs-ng/lib/diskio"
-)
-
-type kmpPattern[K ~int64 | ~int, V comparable] interface {
- PatLen() K
- // Get the value at 'pos' in the sequence. Positions start at
- // 0 and increment naturally. It is invalid to call Get(pos)
- // with a pos that is >= Len(). If there is a gap/wildcard at
- // pos, ok is false.
- PatGet(pos K) (v V, ok bool)
-}
-
-func kmpSelfEq[K ~int64 | ~int, V comparable](s kmpPattern[K, V], aI K, bI K) bool {
- aV, aOK := s.PatGet(aI)
- bV, bOK := s.PatGet(bI)
- if !aOK || !bOK {
- return true
- }
- return aV == bV
-}
-
-// buildKMPTable takes the string 'substr', and returns a table such
-// that 'table[matchLen-1]' is the largest value 'val' for which 'val < matchLen' and
-// 'substr[:val] == substr[matchLen-val:matchLen]'.
-func buildKMPTable[K ~int64 | ~int, V comparable](substr kmpPattern[K, V]) []K {
- substrLen := substr.PatLen()
-
- table := make([]K, substrLen)
- for j := K(0); j < substrLen; j++ {
- if j == 0 {
- // First entry must always be 0 (in order to
- // satisfy 'val < matchLen').
- continue
- }
- val := table[j-1]
- // not a match; go back
- for val > 0 && !kmpSelfEq(substr, j, val) {
- val = table[val-1]
- }
- // is a match; go forward
- if kmpSelfEq(substr, val, j) {
- val++
- }
- table[j] = val
- }
- return table
-}
-
-func kmpEq[K ~int64 | ~int, V comparable](aV V, bS kmpPattern[K, V], bI K) bool {
- bV, ok := bS.PatGet(bI)
- if !ok {
- return true
- }
- return aV == bV
-}
-
-// indexAll returns the starting-position of all possibly-overlapping
-// occurrences of 'substr' in the 'str' sequence.
-//
-// Will hop around in 'substr', but will only get the natural sequence
-// [0...) in order from 'str'.
-//
-// Will panic if the length of 'substr' is 0.
-//
-// The 'substr' may include wildcard characters by returning
-// ErrWildcard for a position.
-//
-// Uses the Knuth-Morris-Pratt algorithm.
-func indexAll[K ~int64 | ~int, V comparable](str diskio.Sequence[K, V], substr kmpPattern[K, V]) []K {
- table := buildKMPTable(substr)
- substrLen := K(len(table))
- if substrLen == 0 {
- panic(errors.New("rebuildmappings.IndexAll: empty substring"))
- }
-
- var matches []K
- var curMatchBeg K
- var curMatchLen K
-
- strLen := str.SeqLen()
- for pos := K(0); pos < strLen; pos++ {
- chr := str.SeqGet(pos)
-
- // Consider 'chr'
- for curMatchLen > 0 && !kmpEq(chr, substr, curMatchLen) { // shorten the match
- overlap := table[curMatchLen-1]
- curMatchBeg += curMatchLen - overlap
- curMatchLen = overlap
- }
- if kmpEq(chr, substr, curMatchLen) { // lengthen the match
- if curMatchLen == 0 {
- curMatchBeg = pos
- }
- curMatchLen++
- if curMatchLen == substrLen {
- matches = append(matches, curMatchBeg)
- overlap := table[curMatchLen-1]
- curMatchBeg += curMatchLen - overlap
- curMatchLen = overlap
- }
- }
- }
- return matches
-}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildmappings/kmp_test.go b/lib/btrfsprogs/btrfsinspect/rebuildmappings/kmp_test.go
deleted file mode 100644
index acec9b8..0000000
--- a/lib/btrfsprogs/btrfsinspect/rebuildmappings/kmp_test.go
+++ /dev/null
@@ -1,126 +0,0 @@
-// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
-//
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-package rebuildmappings
-
-import (
- "bytes"
- "testing"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-
- "git.lukeshu.com/btrfs-progs-ng/lib/diskio"
-)
-
-type bytePattern[K ~int64 | ~int] []byte
-
-var _ kmpPattern[int, byte] = bytePattern[int]{}
-
-// PatLen implements kmpPattern.
-func (s bytePattern[K]) PatLen() K {
- return K(len(s))
-}
-
-// PatGet implements kmpPattern.
-func (s bytePattern[K]) PatGet(i K) (byte, bool) {
- chr := s[int(i)]
- if chr == '.' {
- return 0, false
- }
- return chr, true
-}
-
-func TestBuildKMPTable(t *testing.T) {
- t.Parallel()
- substr := bytePattern[int64]([]byte("ababaa"))
- table := buildKMPTable[int64, byte](substr)
- require.Equal(t,
- []int64{0, 0, 1, 2, 3, 1},
- table)
- for j, val := range table {
- matchLen := j + 1
- assert.Equalf(t, substr[:val], substr[matchLen-int(val):matchLen],
- "for table[%d]=%d", j, val)
- }
-}
-
-func FuzzBuildKMPTable(f *testing.F) {
- f.Add([]byte("ababaa"))
- f.Fuzz(func(t *testing.T, substr []byte) {
- table := buildKMPTable[int64, byte](bytePattern[int64](substr))
- require.Equal(t, len(substr), len(table), "length")
- for j, val := range table {
- matchLen := j + 1
- assert.Equalf(t, substr[:val], substr[matchLen-int(val):matchLen],
- "for table[%d]=%d", j, val)
- }
- })
-}
-
-func NaiveIndexAll(str, substr []byte) []int64 {
- var matches []int64
- for i := range str {
- if bytes.HasPrefix(str[i:], substr) {
- matches = append(matches, int64(i))
- }
- }
- return matches
-}
-
-func FuzzIndexAll(f *testing.F) {
- f.Fuzz(func(t *testing.T, str, substr []byte) {
- if len(substr) == 0 {
- t.Skip()
- }
- t.Logf("str =%q", str)
- t.Logf("substr=%q", substr)
- exp := NaiveIndexAll(str, substr)
- act := indexAll[int64, byte](
- diskio.SliceSequence[int64, byte](str),
- bytePattern[int64](substr))
- assert.Equal(t, exp, act)
- })
-}
-
-func TestKMPWildcard(t *testing.T) {
- t.Parallel()
- type testcase struct {
- InStr string
- InSubstr string
- ExpMatches []int64
- }
- testcases := map[string]testcase{
- "trivial-bar": {
- InStr: "foo_bar",
- InSubstr: "foo.ba.",
- ExpMatches: []int64{0},
- },
- "trival-baz": {
- InStr: "foo-baz",
- InSubstr: "foo.ba.",
- ExpMatches: []int64{0},
- },
- "suffix": {
- InStr: "foobarbaz",
- InSubstr: "...baz",
- ExpMatches: []int64{3},
- },
- "overlap": {
- InStr: "foobarbar",
- InSubstr: "...bar",
- ExpMatches: []int64{0, 3},
- },
- }
- for tcName, tc := range testcases {
- tc := tc
- t.Run(tcName, func(t *testing.T) {
- t.Parallel()
- matches := indexAll[int64, byte](
- diskio.StringSequence[int64](tc.InStr),
- bytePattern[int64](tc.InSubstr))
- assert.Equal(t, tc.ExpMatches, matches)
- })
- }
-}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildmappings/logicalsums.go b/lib/btrfsprogs/btrfsinspect/rebuildmappings/logicalsums.go
deleted file mode 100644
index 7c02d05..0000000
--- a/lib/btrfsprogs/btrfsinspect/rebuildmappings/logicalsums.go
+++ /dev/null
@@ -1,249 +0,0 @@
-// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
-//
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-package rebuildmappings
-
-import (
- "context"
- "sort"
- "strings"
-
- "github.com/datawire/dlib/dlog"
-
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect"
- "git.lukeshu.com/btrfs-progs-ng/lib/containers"
- "git.lukeshu.com/btrfs-progs-ng/lib/slices"
-)
-
-func ExtractLogicalSums(ctx context.Context, scanResults btrfsinspect.ScanDevicesResult) SumRunWithGaps[btrfsvol.LogicalAddr] {
- var records []btrfsinspect.SysExtentCSum
- for _, devResults := range scanResults {
- records = append(records, devResults.FoundExtentCSums...)
- }
- // Sort lower-generation items earlier; then sort by addr.
- sort.Slice(records, func(i, j int) bool {
- a, b := records[i], records[j]
- switch {
- case a.Generation < b.Generation:
- return true
- case a.Generation > b.Generation:
- return false
- default:
- return a.Sums.Addr < b.Sums.Addr
- }
- })
- if len(records) == 0 {
- return SumRunWithGaps[btrfsvol.LogicalAddr]{}
- }
- sumSize := records[0].Sums.ChecksumSize
-
- // Now build them in to a coherent address space.
- //
- // We can't just reverse-sort by generation to avoid mutations, because given
- //
- // gen1 AAAAAAA
- // gen2 BBBBBBBB
- // gen3 CCCCCCC
- //
- // "AAAAAAA" shouldn't be present, and if we just discard "BBBBBBBB"
- // because it conflicts with "CCCCCCC", then we would erroneously
- // include "AAAAAAA".
- addrspace := new(containers.RBTree[btrfsinspect.SysExtentCSum])
- for _, newRecord := range records {
- for {
- conflict := addrspace.Search(func(oldRecord btrfsinspect.SysExtentCSum) int {
- switch {
- case newRecord.Sums.Addr.Add(newRecord.Sums.Size()) <= oldRecord.Sums.Addr:
- // 'newRecord' is wholly to the left of 'oldRecord'.
- return -1
- case oldRecord.Sums.Addr.Add(oldRecord.Sums.Size()) <= newRecord.Sums.Addr:
- // 'oldRecord' is wholly to the left of 'newRecord'.
- return 1
- default:
- // There is some overlap.
- return 0
- }
- })
- if conflict == nil {
- // We can insert it
- addrspace.Insert(newRecord)
- break
- }
- oldRecord := conflict.Value
- if oldRecord == newRecord {
- // Duplicates are to be expected.
- break
- }
- if oldRecord.Generation < newRecord.Generation {
- // Newer generation wins.
- addrspace.Delete(conflict)
- // loop around to check for more conflicts
- continue
- }
- if oldRecord.Generation > newRecord.Generation {
- // We sorted them! This shouldn't happen.
- panic("should not happen")
- }
- // Since sums are stored multiple times (RAID?), but may
- // be split between items differently between copies, we
- // should take the union (after verifying that they
- // agree on the overlapping part).
- overlapBeg := slices.Max(
- oldRecord.Sums.Addr,
- newRecord.Sums.Addr)
- overlapEnd := slices.Min(
- oldRecord.Sums.Addr.Add(oldRecord.Sums.Size()),
- newRecord.Sums.Addr.Add(newRecord.Sums.Size()))
-
- oldOverlapBeg := int(overlapBeg.Sub(oldRecord.Sums.Addr)/btrfssum.BlockSize) * sumSize
- oldOverlapEnd := int(overlapEnd.Sub(oldRecord.Sums.Addr)/btrfssum.BlockSize) * sumSize
- oldOverlap := oldRecord.Sums.Sums[oldOverlapBeg:oldOverlapEnd]
-
- newOverlapBeg := int(overlapBeg.Sub(newRecord.Sums.Addr)/btrfssum.BlockSize) * sumSize
- newOverlapEnd := int(overlapEnd.Sub(newRecord.Sums.Addr)/btrfssum.BlockSize) * sumSize
- newOverlap := newRecord.Sums.Sums[newOverlapBeg:newOverlapEnd]
-
- if oldOverlap != newOverlap {
- dlog.Errorf(ctx, "mismatch: {gen:%v, addr:%v, size:%v} conflicts with {gen:%v, addr:%v, size:%v}",
- oldRecord.Generation, oldRecord.Sums.Addr, oldRecord.Sums.Size(),
- newRecord.Generation, newRecord.Sums.Addr, newRecord.Sums.Size())
- break
- }
- // OK, we match, so take the union.
- var prefix, suffix btrfssum.ShortSum
- switch {
- case oldRecord.Sums.Addr < overlapBeg:
- prefix = oldRecord.Sums.Sums[:oldOverlapBeg]
- case newRecord.Sums.Addr < overlapBeg:
- prefix = newRecord.Sums.Sums[:newOverlapBeg]
- }
- switch {
- case oldRecord.Sums.Addr.Add(oldRecord.Sums.Size()) > overlapEnd:
- suffix = oldRecord.Sums.Sums[oldOverlapEnd:]
- case newRecord.Sums.Addr.Add(newRecord.Sums.Size()) > overlapEnd:
- suffix = newRecord.Sums.Sums[newOverlapEnd:]
- }
- unionRecord := btrfsinspect.SysExtentCSum{
- Generation: oldRecord.Generation,
- Sums: btrfsitem.ExtentCSum{
- SumRun: btrfssum.SumRun[btrfsvol.LogicalAddr]{
- ChecksumSize: oldRecord.Sums.ChecksumSize,
- Addr: slices.Min(oldRecord.Sums.Addr, newRecord.Sums.Addr),
- Sums: prefix + oldOverlap + suffix,
- },
- },
- }
- addrspace.Delete(conflict)
- newRecord = unionRecord
- // loop around to check for more conflicts
- }
- }
-
- // Now flatten that RBTree in to a SumRunWithGaps.
- var flattened SumRunWithGaps[btrfsvol.LogicalAddr]
- var curAddr btrfsvol.LogicalAddr
- var curSums strings.Builder
- addrspace.Range(func(node *containers.RBNode[btrfsinspect.SysExtentCSum]) bool {
- curEnd := curAddr + (btrfsvol.LogicalAddr(curSums.Len()/sumSize) * btrfssum.BlockSize)
- if node.Value.Sums.Addr != curEnd {
- if curSums.Len() > 0 {
- flattened.Runs = append(flattened.Runs, btrfssum.SumRun[btrfsvol.LogicalAddr]{
- ChecksumSize: sumSize,
- Addr: curAddr,
- Sums: btrfssum.ShortSum(curSums.String()),
- })
- }
- curAddr = node.Value.Sums.Addr
- curSums.Reset()
- }
- curSums.WriteString(string(node.Value.Sums.Sums))
- return true
- })
- if curSums.Len() > 0 {
- flattened.Runs = append(flattened.Runs, btrfssum.SumRun[btrfsvol.LogicalAddr]{
- ChecksumSize: sumSize,
- Addr: curAddr,
- Sums: btrfssum.ShortSum(curSums.String()),
- })
- }
- flattened.Addr = flattened.Runs[0].Addr
- last := flattened.Runs[len(flattened.Runs)-1]
- end := last.Addr.Add(last.Size())
- flattened.Size = end.Sub(flattened.Addr)
-
- return flattened
-}
-
-func ListUnmappedLogicalRegions(fs *btrfs.FS, logicalSums SumRunWithGaps[btrfsvol.LogicalAddr]) []btrfssum.SumRun[btrfsvol.LogicalAddr] {
- // There are a lot of ways this algorithm could be made
- // faster.
- var ret []btrfssum.SumRun[btrfsvol.LogicalAddr]
- var cur struct {
- Addr btrfsvol.LogicalAddr
- Size btrfsvol.AddrDelta
- }
- for _, run := range logicalSums.Runs {
- for addr := run.Addr; addr < run.Addr.Add(run.Size()); addr += btrfssum.BlockSize {
- if _, maxlen := fs.LV.Resolve(addr); maxlen < btrfssum.BlockSize {
- if cur.Size == 0 {
- cur.Addr = addr
- cur.Size = 0
- }
- cur.Size += btrfssum.BlockSize
- } else if cur.Size > 0 {
- begIdx := int(cur.Addr.Sub(run.Addr)/btrfssum.BlockSize) * run.ChecksumSize
- lenIdx := (int(cur.Size) / btrfssum.BlockSize) * run.ChecksumSize
- endIdx := begIdx + lenIdx
- ret = append(ret, btrfssum.SumRun[btrfsvol.LogicalAddr]{
- ChecksumSize: run.ChecksumSize,
- Addr: cur.Addr,
- Sums: run.Sums[begIdx:endIdx],
- })
- cur.Size = 0
- }
- }
- if cur.Size > 0 {
- begIdx := int(cur.Addr.Sub(run.Addr)/btrfssum.BlockSize) * run.ChecksumSize
- lenIdx := (int(cur.Size) / btrfssum.BlockSize) * run.ChecksumSize
- endIdx := begIdx + lenIdx
- ret = append(ret, btrfssum.SumRun[btrfsvol.LogicalAddr]{
- ChecksumSize: run.ChecksumSize,
- Addr: cur.Addr,
- Sums: run.Sums[begIdx:endIdx],
- })
- cur.Size = 0
- }
- }
- return ret
-}
-
-func SumsForLogicalRegion(sums SumRunWithGaps[btrfsvol.LogicalAddr], beg btrfsvol.LogicalAddr, size btrfsvol.AddrDelta) SumRunWithGaps[btrfsvol.LogicalAddr] {
- runs := SumRunWithGaps[btrfsvol.LogicalAddr]{
- Addr: beg,
- Size: size,
- }
- for laddr := beg; laddr < beg.Add(size); {
- run, next, ok := sums.RunForAddr(laddr)
- if !ok {
- laddr = next
- continue
- }
- off := int((laddr-run.Addr)/btrfssum.BlockSize) * run.ChecksumSize
- deltaAddr := slices.Min[btrfsvol.AddrDelta](
- size-laddr.Sub(beg),
- btrfsvol.AddrDelta((len(run.Sums)-off)/run.ChecksumSize)*btrfssum.BlockSize)
- deltaOff := int(deltaAddr/btrfssum.BlockSize) * run.ChecksumSize
- runs.Runs = append(runs.Runs, btrfssum.SumRun[btrfsvol.LogicalAddr]{
- ChecksumSize: run.ChecksumSize,
- Addr: laddr,
- Sums: run.Sums[off : off+deltaOff],
- })
- laddr = laddr.Add(deltaAddr)
- }
- return runs
-}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildmappings/matchsums.go b/lib/btrfsprogs/btrfsinspect/rebuildmappings/matchsums.go
deleted file mode 100644
index a3e724e..0000000
--- a/lib/btrfsprogs/btrfsinspect/rebuildmappings/matchsums.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
-//
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-package rebuildmappings
-
-import (
- "context"
-
- "github.com/datawire/dlib/dlog"
- "golang.org/x/text/number"
-
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
- "git.lukeshu.com/btrfs-progs-ng/lib/containers"
- "git.lukeshu.com/btrfs-progs-ng/lib/maps"
-)
-
-func matchBlockGroupSums(ctx context.Context,
- fs *btrfs.FS,
- blockgroups map[btrfsvol.LogicalAddr]BlockGroup,
- physicalSums map[btrfsvol.DeviceID]btrfssum.SumRun[btrfsvol.PhysicalAddr],
- logicalSums SumRunWithGaps[btrfsvol.LogicalAddr],
-) error {
- regions := ListUnmappedPhysicalRegions(fs)
- numBlockgroups := len(blockgroups)
- for i, bgLAddr := range maps.SortedKeys(blockgroups) {
- blockgroup := blockgroups[bgLAddr]
- bgRun := SumsForLogicalRegion(logicalSums, blockgroup.LAddr, blockgroup.Size)
- if len(bgRun.Runs) == 0 {
- dlog.Errorf(ctx, "(%v/%v) blockgroup[laddr=%v] can't be matched because it has 0 runs",
- i+1, numBlockgroups, bgLAddr)
- continue
- }
-
- var matches []btrfsvol.QualifiedPhysicalAddr
- if err := WalkUnmappedPhysicalRegions(ctx, physicalSums, regions, func(devID btrfsvol.DeviceID, region btrfssum.SumRun[btrfsvol.PhysicalAddr]) error {
- rawMatches := indexAll[int, btrfssum.ShortSum](region, bgRun)
- for _, match := range rawMatches {
- matches = append(matches, btrfsvol.QualifiedPhysicalAddr{
- Dev: devID,
- Addr: region.Addr + (btrfsvol.PhysicalAddr(match) * btrfssum.BlockSize),
- })
- }
- return nil
- }); err != nil {
- return err
- }
-
- lvl := dlog.LogLevelError
- if len(matches) == 1 {
- lvl = dlog.LogLevelInfo
- }
- dlog.Logf(ctx, lvl, "(%v/%v) blockgroup[laddr=%v] has %v matches based on %v coverage from %v runs",
- i+1, numBlockgroups, bgLAddr, len(matches), number.Percent(bgRun.PctFull()), len(bgRun.Runs))
- if len(matches) != 1 {
- continue
- }
-
- mapping := btrfsvol.Mapping{
- LAddr: blockgroup.LAddr,
- PAddr: matches[0],
- Size: blockgroup.Size,
- SizeLocked: true,
- Flags: containers.Optional[btrfsvol.BlockGroupFlags]{
- OK: true,
- Val: blockgroup.Flags,
- },
- }
- if err := fs.LV.AddMapping(mapping); err != nil {
- dlog.Errorf(ctx, "error: %v", err)
- continue
- }
- delete(blockgroups, bgLAddr)
- }
- return nil
-}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildmappings/physicalsums.go b/lib/btrfsprogs/btrfsinspect/rebuildmappings/physicalsums.go
deleted file mode 100644
index da22fbf..0000000
--- a/lib/btrfsprogs/btrfsinspect/rebuildmappings/physicalsums.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
-//
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-package rebuildmappings
-
-import (
- "context"
- "sort"
-
- "golang.org/x/exp/constraints"
-
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect"
- "git.lukeshu.com/btrfs-progs-ng/lib/maps"
-)
-
-func ExtractPhysicalSums(scanResults btrfsinspect.ScanDevicesResult) map[btrfsvol.DeviceID]btrfssum.SumRun[btrfsvol.PhysicalAddr] {
- ret := make(map[btrfsvol.DeviceID]btrfssum.SumRun[btrfsvol.PhysicalAddr], len(scanResults))
- for devID, devResults := range scanResults {
- ret[devID] = devResults.Checksums
- }
- return ret
-}
-
-type PhysicalRegion struct {
- Beg, End btrfsvol.PhysicalAddr
-}
-
-func ListUnmappedPhysicalRegions(fs *btrfs.FS) map[btrfsvol.DeviceID][]PhysicalRegion {
- regions := make(map[btrfsvol.DeviceID][]PhysicalRegion)
- pos := make(map[btrfsvol.DeviceID]btrfsvol.PhysicalAddr)
- mappings := fs.LV.Mappings()
- sort.Slice(mappings, func(i, j int) bool {
- return mappings[i].PAddr.Compare(mappings[j].PAddr) < 0
- })
- for _, mapping := range mappings {
- if pos[mapping.PAddr.Dev] < mapping.PAddr.Addr {
- regions[mapping.PAddr.Dev] = append(regions[mapping.PAddr.Dev], PhysicalRegion{
- Beg: pos[mapping.PAddr.Dev],
- End: mapping.PAddr.Addr,
- })
- }
- if pos[mapping.PAddr.Dev] < mapping.PAddr.Addr.Add(mapping.Size) {
- pos[mapping.PAddr.Dev] = mapping.PAddr.Addr.Add(mapping.Size)
- }
- }
- for devID, dev := range fs.LV.PhysicalVolumes() {
- devSize := dev.Size()
- if pos[devID] < devSize {
- regions[devID] = append(regions[devID], PhysicalRegion{
- Beg: pos[devID],
- End: devSize,
- })
- }
- }
- return regions
-}
-
-func roundUp[T constraints.Integer](x, multiple T) T {
- return ((x + multiple - 1) / multiple) * multiple
-}
-
-func WalkUnmappedPhysicalRegions(ctx context.Context,
- physicalSums map[btrfsvol.DeviceID]btrfssum.SumRun[btrfsvol.PhysicalAddr],
- gaps map[btrfsvol.DeviceID][]PhysicalRegion,
- fn func(btrfsvol.DeviceID, btrfssum.SumRun[btrfsvol.PhysicalAddr]) error,
-) error {
- for _, devID := range maps.SortedKeys(gaps) {
- for _, gap := range gaps[devID] {
- if err := ctx.Err(); err != nil {
- return err
- }
- begAddr := roundUp(gap.Beg, btrfssum.BlockSize)
- begOff := int(begAddr/btrfssum.BlockSize) * physicalSums[devID].ChecksumSize
- endOff := int(gap.End/btrfssum.BlockSize) * physicalSums[devID].ChecksumSize
- if err := fn(devID, btrfssum.SumRun[btrfsvol.PhysicalAddr]{
- ChecksumSize: physicalSums[devID].ChecksumSize,
- Addr: begAddr,
- Sums: physicalSums[devID].Sums[begOff:endOff],
- }); err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go b/lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go
deleted file mode 100644
index cdf5e5a..0000000
--- a/lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go
+++ /dev/null
@@ -1,222 +0,0 @@
-// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
-//
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-package rebuildmappings
-
-import (
- "context"
- "fmt"
-
- "github.com/datawire/dlib/dlog"
-
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect"
- "git.lukeshu.com/btrfs-progs-ng/lib/containers"
- "git.lukeshu.com/btrfs-progs-ng/lib/maps"
- "git.lukeshu.com/btrfs-progs-ng/lib/textui"
-)
-
-func getNodeSize(fs *btrfs.FS) (btrfsvol.AddrDelta, error) {
- sb, err := fs.Superblock()
- if err != nil {
- return 0, err
- }
- return btrfsvol.AddrDelta(sb.NodeSize), nil
-}
-
-func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect.ScanDevicesResult) error {
- nodeSize, err := getNodeSize(fs)
- if err != nil {
- return err
- }
-
- var numChunks, numDevExts, numBlockGroups, numNodes int
- devIDs := maps.SortedKeys(scanResults)
- devices := fs.LV.PhysicalVolumes()
- for _, devID := range devIDs {
- if _, ok := devices[devID]; !ok {
- return fmt.Errorf("device ID %v mentioned in scan results is not part of the filesystem", devID)
- }
- devResults := scanResults[devID]
- numChunks += len(devResults.FoundChunks)
- numDevExts += len(devResults.FoundDevExtents)
- numBlockGroups += len(devResults.FoundBlockGroups)
- for _, paddrs := range devResults.FoundNodes {
- numNodes += len(paddrs)
- }
- }
- dlog.Infof(ctx, "plan: 1/6 process %d chunks", numChunks)
- dlog.Infof(ctx, "plan: 2/6 process %d device extents", numDevExts)
- dlog.Infof(ctx, "plan: 3/6 process %d nodes", numNodes)
- dlog.Infof(ctx, "plan: 4/6 process %d block groups", numBlockGroups)
- dlog.Infof(ctx, "plan: 5/6 search for block groups in checksum map (exact)")
- dlog.Infof(ctx, "plan: 6/6 search for block groups in checksum map (fuzzy)")
-
- _ctx := ctx
- ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.step", "1/6")
- dlog.Infof(_ctx, "1/6: Processing %d chunks...", numChunks)
- for _, devID := range devIDs {
- devResults := scanResults[devID]
- for _, chunk := range devResults.FoundChunks {
- for _, mapping := range chunk.Chunk.Mappings(chunk.Key) {
- if err := fs.LV.AddMapping(mapping); err != nil {
- dlog.Errorf(ctx, "error: adding chunk: %v", err)
- }
- }
- }
- }
- dlog.Info(_ctx, "... done processing chunks")
-
- ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.step", "2/6")
- dlog.Infof(_ctx, "2/6: Processing %d device extents...", numDevExts)
- for _, devID := range devIDs {
- devResults := scanResults[devID]
- for _, ext := range devResults.FoundDevExtents {
- if err := fs.LV.AddMapping(ext.DevExt.Mapping(ext.Key)); err != nil {
- dlog.Errorf(ctx, "error: adding devext: %v", err)
- }
- }
- }
- dlog.Info(_ctx, "... done processing device extents")
-
- // Do the nodes "last" to avoid bloating the mappings table
- // too much. (Because nodes are numerous and small, while the
- // others are few and large; so it is likely that many of the
- // nodes will be subsumed by other things.)
- ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.step", "3/6")
- dlog.Infof(_ctx, "3/6: Processing %d nodes...", numNodes)
- for _, devID := range devIDs {
- devResults := scanResults[devID]
- // Sort them so that progress numbers are predictable.
- for _, laddr := range maps.SortedKeys(devResults.FoundNodes) {
- for _, paddr := range devResults.FoundNodes[laddr] {
- if err := fs.LV.AddMapping(btrfsvol.Mapping{
- LAddr: laddr,
- PAddr: btrfsvol.QualifiedPhysicalAddr{
- Dev: devID,
- Addr: paddr,
- },
- Size: nodeSize,
- SizeLocked: false,
- }); err != nil {
- dlog.Errorf(ctx, "error: adding node ident: %v", err)
- }
- }
- }
- }
- dlog.Info(_ctx, "... done processing nodes")
-
- // Use block groups to add missing flags (and as a hint to
- // combine node entries).
- ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.step", "4/6")
- dlog.Infof(_ctx, "4/6: Processing %d block groups...", numBlockGroups)
- // First dedup them, because they change for allocations and
- // CoW means that they'll bounce around a lot, so you likely
- // have oodles of duplicates?
- bgs, err := DedupBlockGroups(scanResults)
- if err != nil {
- return err
- }
- dlog.Infof(ctx, "... de-duplicated to %d block groups", len(bgs))
- for _, bgLAddr := range maps.SortedKeys(bgs) {
- bg := bgs[bgLAddr]
- otherLAddr, otherPAddr := fs.LV.ResolveAny(bg.LAddr, bg.Size)
- if otherLAddr < 0 || otherPAddr.Addr < 0 {
- dlog.Errorf(ctx, "error: could not pair blockgroup laddr=%v (size=%v flags=%v) with a mapping",
- bg.LAddr, bg.Size, bg.Flags)
- continue
- }
-
- offsetWithinChunk := otherLAddr.Sub(bg.LAddr)
- mapping := btrfsvol.Mapping{
- LAddr: bg.LAddr,
- PAddr: otherPAddr.Add(-offsetWithinChunk),
- Size: bg.Size,
- SizeLocked: true,
- Flags: containers.Optional[btrfsvol.BlockGroupFlags]{
- OK: true,
- Val: bg.Flags,
- },
- }
- if err := fs.LV.AddMapping(mapping); err != nil {
- dlog.Errorf(ctx, "error: adding flags from blockgroup: %v", err)
- continue
- }
- delete(bgs, bgLAddr)
- }
- dlog.Info(_ctx, "... done processing block groups")
-
- // More than once, I've been tempted to get rid of this exact-search step and just have the fuzzy-search step.
- // After all, looking at the timestamps in the log, it's faster per blockgroup! For some background, the big-O
- // for each (per blockgroup) looks like:
- //
- // - exact-search: O(bgSize+physicalBlocks)
- // - fuzzy-search: O(bgSize*physicalBlocks) worst-case; O(bgSize*log(physicalBlocks)) expected
- //
- // The fuzzy-search is only fast because the exact-search is so good at getting `physicalBlocks` down.
- // Empirically: if I remove the exact-search step, then the fuzzy-match step is more than an order of magnitude
- // slower.
- ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.step", "5/6")
- dlog.Infof(_ctx, "5/6: Searching for %d block groups in checksum map (exact)...", len(bgs))
- physicalSums := ExtractPhysicalSums(scanResults)
- logicalSums := ExtractLogicalSums(ctx, scanResults)
- if err := matchBlockGroupSums(ctx, fs, bgs, physicalSums, logicalSums); err != nil {
- return err
- }
- dlog.Info(ctx, "... done searching for exact block groups")
-
- ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.step", "6/6")
- dlog.Infof(_ctx, "6/6: Searching for %d block groups in checksum map (fuzzy)...", len(bgs))
- if err := fuzzyMatchBlockGroupSums(ctx, fs, bgs, physicalSums, logicalSums); err != nil {
- return err
- }
- dlog.Info(_ctx, "... done searching for fuzzy block groups")
-
- ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.step", "report")
- dlog.Info(_ctx, "report:")
-
- unmappedPhysicalRegions := ListUnmappedPhysicalRegions(fs)
- var unmappedPhysical btrfsvol.AddrDelta
- var numUnmappedPhysical int
- for _, devRegions := range unmappedPhysicalRegions {
- numUnmappedPhysical += len(devRegions)
- for _, region := range devRegions {
- unmappedPhysical += region.End.Sub(region.Beg)
- }
- }
- dlog.Infof(ctx, "... %d of unmapped physical space (across %d regions)", textui.IEC(unmappedPhysical, "B"), numUnmappedPhysical)
-
- unmappedLogicalRegions := ListUnmappedLogicalRegions(fs, logicalSums)
- var unmappedLogical btrfsvol.AddrDelta
- for _, region := range unmappedLogicalRegions {
- unmappedLogical += region.Size()
- }
- dlog.Infof(ctx, "... %d of unmapped summed logical space (across %d regions)", textui.IEC(unmappedLogical, "B"), len(unmappedLogicalRegions))
-
- var unmappedBlockGroups btrfsvol.AddrDelta
- for _, bg := range bgs {
- unmappedBlockGroups += bg.Size
- }
- dlog.Infof(ctx, "... %d of unmapped block groups (across %d groups)", textui.IEC(unmappedBlockGroups, "B"), len(bgs))
-
- dlog.Info(_ctx, "detailed report:")
- for _, devID := range maps.SortedKeys(unmappedPhysicalRegions) {
- for _, region := range unmappedPhysicalRegions[devID] {
- dlog.Infof(ctx, "... unmapped physical region: dev=%v beg=%v end=%v (size=%v)",
- devID, region.Beg, region.End, region.End.Sub(region.Beg))
- }
- }
- for _, region := range unmappedLogicalRegions {
- dlog.Infof(ctx, "... umapped summed logical region: beg=%v end=%v (size=%v)",
- region.Addr, region.Addr.Add(region.Size()), region.Size())
- }
- for _, laddr := range maps.SortedKeys(bgs) {
- bg := bgs[laddr]
- dlog.Infof(ctx, "... umapped block group: beg=%v end=%v (size=%v) flags=%v",
- bg.LAddr, bg.LAddr.Add(bg.Size), bg.Size, bg.Flags)
- }
-
- return nil
-}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildmappings/sumrunwithgaps.go b/lib/btrfsprogs/btrfsinspect/rebuildmappings/sumrunwithgaps.go
deleted file mode 100644
index f79e2be..0000000
--- a/lib/btrfsprogs/btrfsinspect/rebuildmappings/sumrunwithgaps.go
+++ /dev/null
@@ -1,167 +0,0 @@
-// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
-//
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-package rebuildmappings
-
-import (
- "context"
- "fmt"
- "io"
- "math"
-
- "git.lukeshu.com/go/lowmemjson"
-
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
- "git.lukeshu.com/btrfs-progs-ng/lib/slices"
-)
-
-type SumRunWithGaps[Addr btrfsvol.IntAddr[Addr]] struct {
- // Store the start address and size, in order to facilitate
- // leading and trailing gaps.
- Addr Addr
- Size btrfsvol.AddrDelta
-
- Runs []btrfssum.SumRun[Addr]
-}
-
-var (
- _ lowmemjson.Encodable = SumRunWithGaps[btrfsvol.LogicalAddr]{}
- _ lowmemjson.Decodable = (*SumRunWithGaps[btrfsvol.LogicalAddr])(nil)
-)
-
-// PatLen implements kmpPattern[int, ShortSum].
-func (sg SumRunWithGaps[Addr]) PatLen() int {
- return int(sg.Size / btrfssum.BlockSize)
-}
-
-func (sg SumRunWithGaps[Addr]) PctFull() float64 {
- total := sg.PatLen()
- var full int
- for _, run := range sg.Runs {
- full += run.SeqLen()
- }
- return float64(full) / float64(total)
-}
-
-func (sg SumRunWithGaps[Addr]) RunForAddr(addr Addr) (btrfssum.SumRun[Addr], Addr, bool) {
- for _, run := range sg.Runs {
- if run.Addr > addr {
- return btrfssum.SumRun[Addr]{}, run.Addr, false
- }
- if run.Addr.Add(run.Size()) <= addr {
- continue
- }
- return run, 0, true
- }
- return btrfssum.SumRun[Addr]{}, math.MaxInt64, false
-}
-
-func (sg SumRunWithGaps[Addr]) SumForAddr(addr Addr) (btrfssum.ShortSum, bool) {
- if addr < sg.Addr || addr >= sg.Addr.Add(sg.Size) {
- return "", false
- }
- runIdx, ok := slices.Search(sg.Runs, func(run btrfssum.SumRun[Addr]) int {
- switch {
- case addr < run.Addr:
- return -1
- case addr >= run.Addr.Add(run.Size()):
- return 1
- default:
- return 0
- }
- })
- if !ok {
- return "", false
- }
- run := sg.Runs[runIdx]
- off := int((addr-run.Addr)/btrfssum.BlockSize) * run.ChecksumSize
- return run.Sums[off : off+run.ChecksumSize], true
-}
-
-func (sg SumRunWithGaps[Addr]) Walk(ctx context.Context, fn func(Addr, btrfssum.ShortSum) error) error {
- for _, run := range sg.Runs {
- if err := run.Walk(ctx, fn); err != nil {
- return err
- }
- }
- return nil
-}
-
-// PatGet implements kmpPattern[int, ShortSum].
-func (sg SumRunWithGaps[Addr]) PatGet(sumIdx int) (btrfssum.ShortSum, bool) {
- addr := sg.Addr.Add(btrfsvol.AddrDelta(sumIdx) * btrfssum.BlockSize)
- return sg.SumForAddr(addr)
-}
-
-func (sg SumRunWithGaps[Addr]) EncodeJSON(w io.Writer) error {
- if _, err := fmt.Fprintf(w, `{"Addr":%d,"Size":%d,"Runs":[`, sg.Addr, sg.Size); err != nil {
- return err
- }
- cur := sg.Addr
- for i, run := range sg.Runs {
- if i > 0 {
- if _, err := w.Write([]byte{','}); err != nil {
- return err
- }
- }
- switch {
- case run.Addr < cur:
- return fmt.Errorf("invalid %T: addr went backwards: %v < %v", sg, run.Addr, cur)
- case run.Addr > cur:
- if _, err := fmt.Fprintf(w, `{"Gap":%d},`, run.Addr.Sub(cur)); err != nil {
- return err
- }
- fallthrough
- default:
- if err := lowmemjson.NewEncoder(w).Encode(run); err != nil {
- return err
- }
- cur = run.Addr.Add(run.Size())
- }
- }
- end := sg.Addr.Add(sg.Size)
- switch {
- case end < cur:
- return fmt.Errorf("invalid %T: addr went backwards: %v < %v", sg, end, cur)
- case end > cur:
- if _, err := fmt.Fprintf(w, `,{"Gap":%d}`, end.Sub(cur)); err != nil {
- return err
- }
- }
- if _, err := w.Write([]byte("]}")); err != nil {
- return err
- }
- return nil
-}
-
-func (sg *SumRunWithGaps[Addr]) DecodeJSON(r io.RuneScanner) error {
- *sg = SumRunWithGaps[Addr]{}
- var name string
- return lowmemjson.DecodeObject(r,
- func(r io.RuneScanner) error {
- return lowmemjson.NewDecoder(r).Decode(&name)
- },
- func(r io.RuneScanner) error {
- switch name {
- case "Addr":
- return lowmemjson.NewDecoder(r).Decode(&sg.Addr)
- case "Size":
- return lowmemjson.NewDecoder(r).Decode(&sg.Size)
- case "Runs":
- return lowmemjson.DecodeArray(r, func(r io.RuneScanner) error {
- var run btrfssum.SumRun[Addr]
- if err := lowmemjson.NewDecoder(r).Decode(&run); err != nil {
- return err
- }
- if run.ChecksumSize > 0 {
- sg.Runs = append(sg.Runs, run)
- }
- return nil
- })
- default:
- return fmt.Errorf("unknown key %q", name)
- }
- })
-}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildmappings/testdata/fuzz/FuzzIndexAll/593a5dd328ee86bac6820e3f98383aebadd2af3a22aa248546a228b08451c30d b/lib/btrfsprogs/btrfsinspect/rebuildmappings/testdata/fuzz/FuzzIndexAll/593a5dd328ee86bac6820e3f98383aebadd2af3a22aa248546a228b08451c30d
deleted file mode 100644
index 9d14adf..0000000
--- a/lib/btrfsprogs/btrfsinspect/rebuildmappings/testdata/fuzz/FuzzIndexAll/593a5dd328ee86bac6820e3f98383aebadd2af3a22aa248546a228b08451c30d
+++ /dev/null
@@ -1,3 +0,0 @@
-go test fuzz v1
-[]byte("0")
-[]byte("1")
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildmappings/testdata/fuzz/FuzzIndexAll/6bd9babbebf7eb78814e1f3425bfa6ccd6bfd42824a26e8cbd63b5117934e600 b/lib/btrfsprogs/btrfsinspect/rebuildmappings/testdata/fuzz/FuzzIndexAll/6bd9babbebf7eb78814e1f3425bfa6ccd6bfd42824a26e8cbd63b5117934e600
deleted file mode 100644
index 269f061..0000000
--- a/lib/btrfsprogs/btrfsinspect/rebuildmappings/testdata/fuzz/FuzzIndexAll/6bd9babbebf7eb78814e1f3425bfa6ccd6bfd42824a26e8cbd63b5117934e600
+++ /dev/null
@@ -1,3 +0,0 @@
-go test fuzz v1
-[]byte("0")
-[]byte("0")
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildmappings/testdata/fuzz/FuzzIndexAll/84ed65595ad05a58e293dbf423c1a816b697e2763a29d7c37aa476d6eef6fd60 b/lib/btrfsprogs/btrfsinspect/rebuildmappings/testdata/fuzz/FuzzIndexAll/84ed65595ad05a58e293dbf423c1a816b697e2763a29d7c37aa476d6eef6fd60
deleted file mode 100644
index b8f1562..0000000
--- a/lib/btrfsprogs/btrfsinspect/rebuildmappings/testdata/fuzz/FuzzIndexAll/84ed65595ad05a58e293dbf423c1a816b697e2763a29d7c37aa476d6eef6fd60
+++ /dev/null
@@ -1,3 +0,0 @@
-go test fuzz v1
-[]byte("0")
-[]byte("")
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildmappings/testdata/fuzz/FuzzIndexAll/9be40f71bc49b1b5c3b8e08d58d0f69cc22aefb12fd9b5931e49ff0e41953738 b/lib/btrfsprogs/btrfsinspect/rebuildmappings/testdata/fuzz/FuzzIndexAll/9be40f71bc49b1b5c3b8e08d58d0f69cc22aefb12fd9b5931e49ff0e41953738
deleted file mode 100644
index be67506..0000000
--- a/lib/btrfsprogs/btrfsinspect/rebuildmappings/testdata/fuzz/FuzzIndexAll/9be40f71bc49b1b5c3b8e08d58d0f69cc22aefb12fd9b5931e49ff0e41953738
+++ /dev/null
@@ -1,3 +0,0 @@
-go test fuzz v1
-[]byte("\xde\xdb!")
-[]byte("\xde\xdb")
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildmappings/testdata/fuzz/FuzzIndexAll/e43317ec61b0da9627d7b2fc0237d369082a6bf5dfe39e05c113b42ff6218d5d b/lib/btrfsprogs/btrfsinspect/rebuildmappings/testdata/fuzz/FuzzIndexAll/e43317ec61b0da9627d7b2fc0237d369082a6bf5dfe39e05c113b42ff6218d5d
deleted file mode 100644
index c3bfa37..0000000
--- a/lib/btrfsprogs/btrfsinspect/rebuildmappings/testdata/fuzz/FuzzIndexAll/e43317ec61b0da9627d7b2fc0237d369082a6bf5dfe39e05c113b42ff6218d5d
+++ /dev/null
@@ -1,3 +0,0 @@
-go test fuzz v1
-[]byte("\x10\x10\x15")
-[]byte("\x10\x15")
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go
deleted file mode 100644
index cf334a0..0000000
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go
+++ /dev/null
@@ -1,583 +0,0 @@
-// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
-//
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-package rebuildnodes
-
-import (
- "context"
- "fmt"
- "runtime"
- "sort"
- "time"
-
- "github.com/datawire/dlib/dgroup"
- "github.com/datawire/dlib/dlog"
-
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio"
- "git.lukeshu.com/btrfs-progs-ng/lib/containers"
- "git.lukeshu.com/btrfs-progs-ng/lib/maps"
- "git.lukeshu.com/btrfs-progs-ng/lib/textui"
-)
-
-type keyAndTree struct {
- btrfsprim.Key
- TreeID btrfsprim.ObjID
-}
-
-func (a keyAndTree) Compare(b keyAndTree) int {
- if d := containers.NativeCompare(a.TreeID, b.TreeID); d != 0 {
- return d
- }
- return a.Key.Compare(b.Key)
-}
-
-func (o keyAndTree) String() string {
- return fmt.Sprintf("tree=%v key=%v", o.TreeID, o.Key)
-}
-
-type rebuilder struct {
- sb btrfstree.Superblock
- graph graph.Graph
- keyIO *keyio.Handle
-
- rebuilt *btrees.RebuiltForrest
-
- curKey struct {
- TreeID btrfsprim.ObjID
- Key containers.Optional[btrfsprim.Key]
- }
- treeQueue containers.Set[btrfsprim.ObjID]
- retryItemQueue map[btrfsprim.ObjID]containers.Set[keyAndTree]
- addedItemQueue containers.Set[keyAndTree]
- settledItemQueue containers.Set[keyAndTree]
- augmentQueue map[btrfsprim.ObjID]*treeAugmentQueue
- numAugments int
- numAugmentFailures int
-}
-
-type treeAugmentQueue struct {
- zero map[Want]struct{}
- single map[Want]btrfsvol.LogicalAddr
- multi map[Want]containers.Set[btrfsvol.LogicalAddr]
-}
-
-type Rebuilder interface {
- Rebuild(context.Context) error
- ListRoots(context.Context) map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr]
-}
-
-func NewRebuilder(ctx context.Context, fs *btrfs.FS, nodeScanResults btrfsinspect.ScanDevicesResult) (Rebuilder, error) {
- ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.step", "read-fs-data")
- sb, nodeGraph, keyIO, err := ScanDevices(ctx, fs, nodeScanResults) // ScanDevices does its own logging
- if err != nil {
- return nil, err
- }
-
- o := &rebuilder{
- sb: sb,
- graph: nodeGraph,
- keyIO: keyIO,
- }
- o.rebuilt = btrees.NewRebuiltForrest(sb, nodeGraph, keyIO, o)
- return o, nil
-}
-
-func (o *rebuilder) ListRoots(ctx context.Context) map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr] {
- return o.rebuilt.ListRoots(ctx)
-}
-
-func (o *rebuilder) Rebuild(ctx context.Context) error {
- ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.step", "rebuild")
-
- // Initialize
- o.retryItemQueue = make(map[btrfsprim.ObjID]containers.Set[keyAndTree])
- o.addedItemQueue = make(containers.Set[keyAndTree])
- o.settledItemQueue = make(containers.Set[keyAndTree])
- o.augmentQueue = make(map[btrfsprim.ObjID]*treeAugmentQueue)
-
- // Seed the queue
- o.treeQueue = containers.NewSet[btrfsprim.ObjID](
- btrfsprim.ROOT_TREE_OBJECTID,
- btrfsprim.CHUNK_TREE_OBJECTID,
- // btrfsprim.TREE_LOG_OBJECTID, // TODO(lukeshu): Special LOG_TREE handling
- btrfsprim.BLOCK_GROUP_TREE_OBJECTID,
- )
-
- // Run
- for passNum := 0; len(o.treeQueue) > 0 || len(o.addedItemQueue) > 0 || len(o.settledItemQueue) > 0 || len(o.augmentQueue) > 0; passNum++ {
- ctx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.pass", passNum)
-
- // Crawl trees (Drain o.treeQueue, fill o.addedItemQueue).
- if err := o.processTreeQueue(ctx); err != nil {
- return err
- }
- runtime.GC()
-
- if len(o.addedItemQueue) > 0 {
- // Settle items (drain o.addedItemQueue, fill o.augmentQueue and o.settledItemQueue).
- if err := o.processAddedItemQueue(ctx); err != nil {
- return err
- }
- } else {
- // Process items (drain o.settledItemQueue, fill o.augmentQueue and o.treeQueue).
- if err := o.processSettledItemQueue(ctx); err != nil {
- return err
- }
- }
- runtime.GC()
-
- // Apply augments (drain o.augmentQueue (and maybe o.retryItemQueue), fill o.addedItemQueue).
- if err := o.processAugmentQueue(ctx); err != nil {
- return err
- }
- runtime.GC()
- }
-
- return nil
-}
-
-// processTreeQueue drains o.treeQueue, filling o.addedItemQueue.
-func (o *rebuilder) processTreeQueue(ctx context.Context) error {
- ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep", "collect-items")
-
- queue := maps.SortedKeys(o.treeQueue)
- o.treeQueue = make(containers.Set[btrfsprim.ObjID])
-
- // Because trees can be wildly different sizes, it's impossible to have a meaningful
- // progress percentage here.
- o.curKey.Key.OK = false
- for _, o.curKey.TreeID = range queue {
- if err := ctx.Err(); err != nil {
- return err
- }
- // This will call o.AddedItem as nescessary, which
- // inserts to o.addedItemQueue.
- _ = o.rebuilt.Tree(ctx, o.curKey.TreeID)
- }
-
- return nil
-}
-
-type settleItemStats struct {
- textui.Portion[int]
- NumAugments int
- NumAugmentTrees int
-}
-
-func (s settleItemStats) String() string {
- // return textui.Sprintf("%v (queued %v augments across %v trees)",
- return textui.Sprintf("%v (aug:%v trees:%v)",
- s.Portion, s.NumAugments, s.NumAugmentTrees)
-}
-
-// processAddedItemQueue drains o.addedItemQueue, filling o.augmentQueue and o.settledItemQueue.
-func (o *rebuilder) processAddedItemQueue(ctx context.Context) error {
- ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep", "settle-items")
-
- queue := maps.Keys(o.addedItemQueue)
- o.addedItemQueue = make(containers.Set[keyAndTree])
- sort.Slice(queue, func(i, j int) bool {
- return queue[i].Compare(queue[j]) < 0
- })
-
- var progress settleItemStats
- progress.D = len(queue)
- progressWriter := textui.NewProgress[settleItemStats](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second))
- ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep.progress", &progress)
-
- for i, key := range queue {
- progress.N = i
- progressWriter.Set(progress)
-
- ctx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.settle.item", key)
- tree := o.rebuilt.Tree(ctx, key.TreeID)
- incPtr, ok := tree.Items(ctx).Load(key.Key)
- if !ok {
- panic(fmt.Errorf("should not happen: failed to load already-added item: %v", key))
- }
- excPtr, ok := tree.PotentialItems(ctx).Load(key.Key)
- if ok && tree.ShouldReplace(incPtr.Node, excPtr.Node) {
- wantKey := WantWithTree{
- TreeID: key.TreeID,
- Key: wantFromKey(key.Key),
- }
- o.wantAugment(ctx, wantKey, tree.LeafToRoots(ctx, excPtr.Node))
- progress.NumAugments = o.numAugments
- progress.NumAugmentTrees = len(o.augmentQueue)
- progressWriter.Set(progress)
- } else if !handleWouldBeNoOp(key.ItemType) {
- o.settledItemQueue.Insert(key)
- }
- }
-
- progress.N = len(queue)
- progressWriter.Set(progress)
- progressWriter.Done()
- return nil
-}
-
-type processItemStats struct {
- textui.Portion[int]
- NumAugments int
- NumFailures int
- NumAugmentTrees int
-}
-
-func (s processItemStats) String() string {
- // return textui.Sprintf("%v (queued %v augments and %v failures across %v trees)",
- return textui.Sprintf("%v (aug:%v fail:%v trees:%v)",
- s.Portion, s.NumAugments, s.NumFailures, s.NumAugmentTrees)
-}
-
-// processSettledItemQueue drains o.settledItemQueue, filling o.augmentQueue and o.treeQueue.
-func (o *rebuilder) processSettledItemQueue(ctx context.Context) error {
- ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep", "process-items")
-
- queue := maps.Keys(o.settledItemQueue)
- o.settledItemQueue = make(containers.Set[keyAndTree])
- sort.Slice(queue, func(i, j int) bool {
- return queue[i].Compare(queue[j]) < 0
- })
-
- var progress processItemStats
- progress.D = len(queue)
- progressWriter := textui.NewProgress[processItemStats](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second))
- ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep.progress", &progress)
-
- type keyAndBody struct {
- keyAndTree
- Body btrfsitem.Item
- }
- itemChan := make(chan keyAndBody, textui.Tunable(300)) // average items-per-node≈100; let's have a buffer of ~3 nodes
- grp := dgroup.NewGroup(ctx, dgroup.GroupConfig{})
- grp.Go("io", func(ctx context.Context) error {
- defer close(itemChan)
- for _, key := range queue {
- if err := ctx.Err(); err != nil {
- return err
- }
- ctx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.process.item", key)
- item := keyAndBody{
- keyAndTree: key,
- Body: o.rebuilt.Tree(ctx, key.TreeID).ReadItem(ctx, key.Key),
- }
- select {
- case itemChan <- item:
- case <-ctx.Done():
- }
- }
- return nil
- })
- grp.Go("cpu", func(ctx context.Context) error {
- defer progressWriter.Done()
- o.curKey.Key.OK = true
- for item := range itemChan {
- ctx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.process.item", item.keyAndTree)
- o.curKey.TreeID = item.TreeID
- o.curKey.Key.Val = item.Key
- handleItem(o, ctx, item.TreeID, btrfstree.Item{
- Key: item.Key,
- Body: item.Body,
- })
- item.Body.Free()
- if item.ItemType == btrfsitem.ROOT_ITEM_KEY {
- o.treeQueue.Insert(item.ObjectID)
- }
- progress.N++
- progress.NumAugments = o.numAugments
- progress.NumFailures = o.numAugmentFailures
- progress.NumAugmentTrees = len(o.augmentQueue)
- progressWriter.Set(progress)
- }
- return nil
- })
- return grp.Wait()
-}
-
-// processAugmentQueue drains o.augmentQueue (and maybe o.retryItemQueue), filling o.addedItemQueue.
-func (o *rebuilder) processAugmentQueue(ctx context.Context) error {
- ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep", "apply-augments")
-
- resolvedAugments := make(map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr], len(o.augmentQueue))
- var progress textui.Portion[int]
- for _, treeID := range maps.SortedKeys(o.augmentQueue) {
- if err := ctx.Err(); err != nil {
- return err
- }
- ctx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.augment.tree", treeID)
- resolvedAugments[treeID] = o.resolveTreeAugments(ctx, treeID)
- progress.D += len(resolvedAugments[treeID])
- }
- o.augmentQueue = make(map[btrfsprim.ObjID]*treeAugmentQueue)
- o.numAugments = 0
- o.numAugmentFailures = 0
- runtime.GC()
-
- progressWriter := textui.NewProgress[textui.Portion[int]](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second))
- ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep.progress", &progress)
- for _, treeID := range maps.SortedKeys(resolvedAugments) {
- ctx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.augment.tree", treeID)
- for _, nodeAddr := range maps.SortedKeys(resolvedAugments[treeID]) {
- if err := ctx.Err(); err != nil {
- progressWriter.Set(progress)
- progressWriter.Done()
- return err
- }
- progressWriter.Set(progress)
- // This will call o.AddedItem as nescessary, which
- // inserts to o.addedItemQueue.
- o.rebuilt.Tree(ctx, treeID).AddRoot(ctx, nodeAddr)
- progress.N++
- }
- }
- progressWriter.Set(progress)
- progressWriter.Done()
-
- return nil
-}
-
-func (o *rebuilder) enqueueRetry(ifTreeID btrfsprim.ObjID) {
- if o.curKey.Key.OK {
- if o.retryItemQueue[ifTreeID] == nil {
- o.retryItemQueue[ifTreeID] = make(containers.Set[keyAndTree])
- }
- o.retryItemQueue[ifTreeID].Insert(keyAndTree{
- TreeID: o.curKey.TreeID,
- Key: o.curKey.Key.Val,
- })
- } else {
- o.treeQueue.Insert(o.curKey.TreeID)
- }
-}
-
-func (o *rebuilder) resolveTreeAugments(ctx context.Context, treeID btrfsprim.ObjID) containers.Set[btrfsvol.LogicalAddr] {
- // Define an algorithm that takes several lists of items, and returns a
- // set of those items such that each input list contains zero or one of
- // the items from your return set. The same item may appear in multiple
- // of the input lists.
-
- type ChoiceInfo struct {
- Count int
- Distance int
- Generation btrfsprim.Generation
- }
- choices := make(map[btrfsvol.LogicalAddr]ChoiceInfo)
- // o.augmentQueue[treeID].zero is optimized storage for lists
- // with zero items. Go ahead and free that memory up.
- o.augmentQueue[treeID].zero = nil
- // o.augmentQueue[treeID].single is optimized storage for
- // lists with exactly 1 item.
- for _, choice := range o.augmentQueue[treeID].single {
- if old, ok := choices[choice]; ok {
- old.Count++
- choices[choice] = old
- } else {
- choices[choice] = ChoiceInfo{
- Count: 1,
- Distance: discardOK(o.rebuilt.Tree(ctx, treeID).COWDistance(o.graph.Nodes[choice].Owner)),
- Generation: o.graph.Nodes[choice].Generation,
- }
- }
- }
- // o.augmentQueue[treeID].multi is the main list storage.
- for _, list := range o.augmentQueue[treeID].multi {
- for choice := range list {
- if old, ok := choices[choice]; ok {
- old.Count++
- choices[choice] = old
- } else {
- choices[choice] = ChoiceInfo{
- Count: 1,
- Distance: discardOK(o.rebuilt.Tree(ctx, treeID).COWDistance(o.graph.Nodes[choice].Owner)),
- Generation: o.graph.Nodes[choice].Generation,
- }
- }
- }
- }
-
- // > Example 1: Given the input lists
- // >
- // > 0: [A, B]
- // > 2: [A, C]
- // >
- // > legal solutions would be `[]`, `[A]`, `[B]`, `[C]`, or `[B, C]`. It
- // > would not be legal to return `[A, B]` or `[A, C]`.
- //
- // > Example 2: Given the input lists
- // >
- // > 1: [A, B]
- // > 2: [A]
- // > 3: [B]
- // >
- // > legal solution would be `[]`, `[A]` or `[B]`. It would not be legal
- // > to return `[A, B]`.
- //
- // The algorithm should optimize for the following goals:
- //
- // - We prefer that each input list have an item in the return set.
- //
- // > In Example 1, while `[]`, `[B]`, and `[C]` are permissible
- // > solutions, they are not optimal, because one or both of the input
- // > lists are not represented.
- // >
- // > It may be the case that it is not possible to represent all lists
- // > in the result; in Example 2, either list 2 or list 3 must be
- // > unrepresented.
- //
- // - Each item has a non-negative scalar "distance" score, we prefer
- // lower distances. Distance scores are comparable; 0 is preferred,
- // and a distance of 4 is twice as bad as a distance of 2.
- //
- // - Each item has a "generation" score, we prefer higher generations.
- // Generation scores should not be treated as a linear scale; the
- // magnitude of deltas is meaningless; only the sign of a delta is
- // meaningful.
- //
- // > So it would be wrong to say something like
- // >
- // > desirability = (-a*distance) + (b*generation) // for some constants `a` and `b`
- // >
- // > because `generation` can't be used that way
- //
- // - We prefer items that appear in more lists over items that appear in
- // fewer lists.
- //
- // The relative priority of these 4 goals is undefined; preferably the
- // algorithm should be defined in a way that makes it easy to adjust the
- // relative priorities.
-
- ret := make(containers.Set[btrfsvol.LogicalAddr])
- illegal := make(containers.Set[btrfsvol.LogicalAddr]) // cannot-be-accepted and already-accepted
- accept := func(item btrfsvol.LogicalAddr) {
- ret.Insert(item)
- for _, list := range o.augmentQueue[treeID].multi {
- if list.Has(item) {
- illegal.InsertFrom(list)
- }
- }
- }
-
- sortedItems := maps.Keys(choices)
- sort.Slice(sortedItems, func(i, j int) bool {
- iItem, jItem := sortedItems[i], sortedItems[j]
- if choices[iItem].Count != choices[jItem].Count {
- return choices[iItem].Count > choices[jItem].Count // reverse this check; higher counts should sort lower
- }
- if choices[iItem].Distance != choices[jItem].Distance {
- return choices[iItem].Distance < choices[jItem].Distance
- }
- if choices[iItem].Generation != choices[jItem].Generation {
- return choices[iItem].Generation > choices[jItem].Generation // reverse this check; higher generations should sort lower
- }
- return iItem < jItem // laddr is as good a tiebreaker as anything
- })
- for _, item := range sortedItems {
- if !illegal.Has(item) {
- accept(item)
- }
- }
-
- // Log our result
- wantKeys := append(
- maps.Keys(o.augmentQueue[treeID].single),
- maps.Keys(o.augmentQueue[treeID].multi)...)
- sort.Slice(wantKeys, func(i, j int) bool {
- return wantKeys[i].Compare(wantKeys[j]) < 0
- })
- for _, wantKey := range wantKeys {
- list, ok := o.augmentQueue[treeID].multi[wantKey]
- if !ok {
- list = containers.NewSet[btrfsvol.LogicalAddr](o.augmentQueue[treeID].single[wantKey])
- }
- chose := list.Intersection(ret)
- switch {
- case len(chose) == 0:
- dlog.Infof(ctx, "lists[%q]: chose (none) from %v", wantKey, maps.SortedKeys(list))
- case len(list) > 1:
- dlog.Infof(ctx, "lists[%q]: chose %v from %v", wantKey, chose.TakeOne(), maps.SortedKeys(list))
- default:
- dlog.Debugf(ctx, "lists[%q]: chose %v from %v", wantKey, chose.TakeOne(), maps.SortedKeys(list))
- }
- }
-
- // Free some memory
- o.augmentQueue[treeID].single = nil
- o.augmentQueue[treeID].multi = nil
-
- return ret
-}
-
-////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-
-func (queue *treeAugmentQueue) has(wantKey Want) bool {
- if queue != nil {
- if queue.zero != nil {
- if _, ok := queue.zero[wantKey]; ok {
- return true
- }
- }
- if queue.single != nil {
- if _, ok := queue.single[wantKey]; ok {
- return true
- }
- }
- if queue.multi != nil {
- if _, ok := queue.multi[wantKey]; ok {
- return true
- }
- }
- }
- return false
-}
-
-func (queue *treeAugmentQueue) store(wantKey Want, choices containers.Set[btrfsvol.LogicalAddr]) {
- if len(choices) == 0 && wantKey.OffsetType > offsetExact {
- // This wantKey is unlikely to come up again, so it's
- // not worth the RAM of storing a negative result.
- return
- }
- switch len(choices) {
- case 0:
- if queue.zero == nil {
- queue.zero = make(map[Want]struct{})
- }
- queue.zero[wantKey] = struct{}{}
- case 1:
- if queue.single == nil {
- queue.single = make(map[Want]btrfsvol.LogicalAddr)
- }
- queue.single[wantKey] = choices.TakeOne()
- default:
- if queue.multi == nil {
- queue.multi = make(map[Want]containers.Set[btrfsvol.LogicalAddr])
- }
- queue.multi[wantKey] = choices
- }
-}
-
-func (o *rebuilder) hasAugment(wantKey WantWithTree) bool {
- return o.augmentQueue[wantKey.TreeID].has(wantKey.Key)
-}
-
-func (o *rebuilder) wantAugment(ctx context.Context, wantKey WantWithTree, choices containers.Set[btrfsvol.LogicalAddr]) {
- if o.augmentQueue[wantKey.TreeID] == nil {
- o.augmentQueue[wantKey.TreeID] = new(treeAugmentQueue)
- }
- o.augmentQueue[wantKey.TreeID].store(wantKey.Key, choices)
- if len(choices) == 0 {
- o.numAugmentFailures++
- dlog.Debug(ctx, "ERR: could not find wanted item")
- } else {
- o.numAugments++
- dlog.Debugf(ctx, "choices=%v", maps.SortedKeys(choices))
- }
-}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_treecb.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_treecb.go
deleted file mode 100644
index 492436b..0000000
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_treecb.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
-//
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-package rebuildnodes
-
-import (
- "context"
- "fmt"
-
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
-)
-
-// AddedItem implements btrees.Callbacks.
-func (o *rebuilder) AddedItem(ctx context.Context, tree btrfsprim.ObjID, key btrfsprim.Key) {
- o.addedItemQueue.Insert(keyAndTree{
- TreeID: tree,
- Key: key,
- })
-}
-
-// AddedRoot implements btrees.Callbacks.
-func (o *rebuilder) AddedRoot(ctx context.Context, tree btrfsprim.ObjID, root btrfsvol.LogicalAddr) {
- if retries := o.retryItemQueue[tree]; retries != nil {
- o.addedItemQueue.InsertFrom(retries)
- }
-}
-
-// LookupRoot implements btrees.Callbacks.
-func (o *rebuilder) LookupRoot(ctx context.Context, tree btrfsprim.ObjID) (offset btrfsprim.Generation, item btrfsitem.Root, ok bool) {
- wantKey := WantWithTree{
- TreeID: btrfsprim.ROOT_TREE_OBJECTID,
- Key: Want{
- ObjectID: tree,
- ItemType: btrfsitem.ROOT_ITEM_KEY,
- OffsetType: offsetAny,
- },
- }
- ctx = withWant(ctx, logFieldTreeWant, "tree Root", wantKey)
- foundKey, ok := o._want(ctx, wantKey)
- if !ok {
- o.enqueueRetry(btrfsprim.ROOT_TREE_OBJECTID)
- return 0, btrfsitem.Root{}, false
- }
- itemBody := o.rebuilt.Tree(ctx, wantKey.TreeID).ReadItem(ctx, foundKey)
- defer itemBody.Free()
- switch itemBody := itemBody.(type) {
- case *btrfsitem.Root:
- return btrfsprim.Generation(foundKey.Offset), *itemBody, true
- case *btrfsitem.Error:
- o.fsErr(ctx, fmt.Errorf("error decoding item: %v: %w", foundKey, itemBody.Err))
- return 0, btrfsitem.Root{}, false
- default:
- // This is a panic because the item decoder should not emit ROOT_ITEM items as anything but
- // btrfsitem.Root or btrfsitem.Error without this code also being updated.
- panic(fmt.Errorf("should not happen: ROOT_ITEM item has unexpected type: %T", itemBody))
- }
-}
-
-// LookupUUID implements btrees.Callbacks.
-func (o *rebuilder) LookupUUID(ctx context.Context, uuid btrfsprim.UUID) (id btrfsprim.ObjID, ok bool) {
- wantKey := WantWithTree{
- TreeID: btrfsprim.UUID_TREE_OBJECTID,
- Key: wantFromKey(btrfsitem.UUIDToKey(uuid)),
- }
- ctx = withWant(ctx, logFieldTreeWant, "resolve parent UUID", wantKey)
- if !o._wantOff(ctx, wantKey) {
- o.enqueueRetry(btrfsprim.UUID_TREE_OBJECTID)
- return 0, false
- }
- itemBody := o.rebuilt.Tree(ctx, wantKey.TreeID).ReadItem(ctx, wantKey.Key.Key())
- defer itemBody.Free()
- switch itemBody := itemBody.(type) {
- case *btrfsitem.UUIDMap:
- return itemBody.ObjID, true
- case *btrfsitem.Error:
- o.fsErr(ctx, fmt.Errorf("error decoding item: %v: %w", wantKey, itemBody.Err))
- return 0, false
- default:
- // This is a panic because the item decoder should not emit UUID_SUBVOL items as anything but
- // btrfsitem.UUIDMap or btrfsitem.Error without this code also being updated.
- panic(fmt.Errorf("should not happen: UUID_SUBVOL item has unexpected type: %T", itemBody))
- }
-}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_wantcb.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_wantcb.go
deleted file mode 100644
index adf3cff..0000000
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_wantcb.go
+++ /dev/null
@@ -1,400 +0,0 @@
-// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
-//
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-package rebuildnodes
-
-import (
- "bytes"
- "context"
- "fmt"
-
- "github.com/datawire/dlib/dlog"
-
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio"
- "git.lukeshu.com/btrfs-progs-ng/lib/containers"
-)
-
-// fsErr implements rebuildCallbacks.
-func (o *rebuilder) fsErr(ctx context.Context, e error) {
- dlog.Errorf(ctx, "filesystem error: %v", e)
-}
-
-// want implements rebuildCallbacks.
-func (o *rebuilder) want(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType) {
- wantKey := WantWithTree{
- TreeID: treeID,
- Key: Want{
- ObjectID: objID,
- ItemType: typ,
- OffsetType: offsetAny,
- },
- }
- ctx = withWant(ctx, logFieldItemWant, reason, wantKey)
- o._want(ctx, wantKey)
-}
-
-func (o *rebuilder) _want(ctx context.Context, wantKey WantWithTree) (key btrfsprim.Key, ok bool) {
- if o.rebuilt.Tree(ctx, wantKey.TreeID) == nil {
- o.enqueueRetry(wantKey.TreeID)
- return btrfsprim.Key{}, false
- }
-
- // check if we already have it
-
- tgt := wantKey.Key.Key()
- if key, _, ok := o.rebuilt.Tree(ctx, wantKey.TreeID).Items(ctx).Search(func(key btrfsprim.Key, _ keyio.ItemPtr) int {
- key.Offset = 0
- return tgt.Compare(key)
- }); ok {
- return key, true
- }
-
- // OK, we need to insert it
-
- if o.hasAugment(wantKey) {
- return btrfsprim.Key{}, false
- }
- wants := make(containers.Set[btrfsvol.LogicalAddr])
- o.rebuilt.Tree(ctx, wantKey.TreeID).PotentialItems(ctx).Subrange(
- func(k btrfsprim.Key, _ keyio.ItemPtr) int {
- k.Offset = 0
- return tgt.Compare(k)
- },
- func(_ btrfsprim.Key, v keyio.ItemPtr) bool {
- wants.InsertFrom(o.rebuilt.Tree(ctx, wantKey.TreeID).LeafToRoots(ctx, v.Node))
- return true
- })
- o.wantAugment(ctx, wantKey, wants)
- return btrfsprim.Key{}, false
-}
-
-// wantOff implements rebuildCallbacks.
-func (o *rebuilder) wantOff(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, off uint64) {
- wantKey := WantWithTree{
- TreeID: treeID,
- Key: Want{
- ObjectID: objID,
- ItemType: typ,
- OffsetType: offsetExact,
- OffsetLow: off,
- },
- }
- ctx = withWant(ctx, logFieldItemWant, reason, wantKey)
- o._wantOff(ctx, wantKey)
-}
-
-func (o *rebuilder) _wantOff(ctx context.Context, wantKey WantWithTree) (ok bool) {
- if o.rebuilt.Tree(ctx, wantKey.TreeID) == nil {
- o.enqueueRetry(wantKey.TreeID)
- return false
- }
-
- // check if we already have it
-
- tgt := wantKey.Key.Key()
- if _, ok := o.rebuilt.Tree(ctx, wantKey.TreeID).Items(ctx).Load(tgt); ok {
- return true
- }
-
- // OK, we need to insert it
-
- if o.hasAugment(wantKey) {
- return false
- }
- wants := make(containers.Set[btrfsvol.LogicalAddr])
- o.rebuilt.Tree(ctx, wantKey.TreeID).PotentialItems(ctx).Subrange(
- func(k btrfsprim.Key, _ keyio.ItemPtr) int { return tgt.Compare(k) },
- func(_ btrfsprim.Key, v keyio.ItemPtr) bool {
- wants.InsertFrom(o.rebuilt.Tree(ctx, wantKey.TreeID).LeafToRoots(ctx, v.Node))
- return true
- })
- o.wantAugment(ctx, wantKey, wants)
- return false
-}
-
-// wantDirIndex implements rebuildCallbacks.
-func (o *rebuilder) wantDirIndex(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, name []byte) {
- wantKey := WantWithTree{
- TreeID: treeID,
- Key: Want{
- ObjectID: objID,
- ItemType: btrfsitem.DIR_INDEX_KEY,
- OffsetType: offsetName,
- OffsetName: string(name),
- },
- }
- ctx = withWant(ctx, logFieldItemWant, reason, wantKey)
-
- if o.rebuilt.Tree(ctx, treeID) == nil {
- o.enqueueRetry(treeID)
- return
- }
-
- // check if we already have it
-
- tgt := wantKey.Key.Key()
- found := false
- o.rebuilt.Tree(ctx, treeID).Items(ctx).Subrange(
- func(key btrfsprim.Key, _ keyio.ItemPtr) int {
- key.Offset = 0
- return tgt.Compare(key)
- },
- func(_ btrfsprim.Key, ptr keyio.ItemPtr) bool {
- if itemName, ok := o.keyIO.Names[ptr]; ok && bytes.Equal(itemName, name) {
- found = true
- }
- return !found
- })
- if found {
- return
- }
-
- // OK, we need to insert it
-
- if o.hasAugment(wantKey) {
- return
- }
- wants := make(containers.Set[btrfsvol.LogicalAddr])
- o.rebuilt.Tree(ctx, treeID).PotentialItems(ctx).Subrange(
- func(key btrfsprim.Key, _ keyio.ItemPtr) int {
- key.Offset = 0
- return tgt.Compare(key)
- },
- func(_ btrfsprim.Key, ptr keyio.ItemPtr) bool {
- if itemName, ok := o.keyIO.Names[ptr]; ok && bytes.Equal(itemName, name) {
- wants.InsertFrom(o.rebuilt.Tree(ctx, treeID).LeafToRoots(ctx, ptr.Node))
- }
- return true
- })
- o.wantAugment(ctx, wantKey, wants)
-}
-
-func (o *rebuilder) _walkRange(
- ctx context.Context,
- items *containers.SortedMap[btrfsprim.Key, keyio.ItemPtr],
- treeID, objID btrfsprim.ObjID, typ btrfsprim.ItemType,
- beg, end uint64,
- fn func(key btrfsprim.Key, ptr keyio.ItemPtr, beg, end uint64),
-) {
- min := btrfsprim.Key{
- ObjectID: objID,
- ItemType: typ,
- Offset: 0, // *NOT* `beg`
- }
- max := btrfsprim.Key{
- ObjectID: objID,
- ItemType: typ,
- Offset: end - 1,
- }
- items.Subrange(
- func(runKey btrfsprim.Key, _ keyio.ItemPtr) int {
- switch {
- case min.Compare(runKey) < 0:
- return 1
- case max.Compare(runKey) > 0:
- return -1
- default:
- return 0
- }
- },
- func(runKey btrfsprim.Key, runPtr keyio.ItemPtr) bool {
- runSizeAndErr, ok := o.keyIO.Sizes[runPtr]
- if !ok {
- panic(fmt.Errorf("should not happen: %v (%v) did not have a size recorded",
- runPtr, keyAndTree{TreeID: treeID, Key: runKey}))
- }
- if runSizeAndErr.Err != nil {
- o.fsErr(ctx, fmt.Errorf("get size: %v (%v): %w",
- runPtr, keyAndTree{TreeID: treeID, Key: runKey},
- runSizeAndErr.Err))
- return true
- }
- runSize := runSizeAndErr.Size
- if runSize == 0 {
- return true
- }
- runBeg := runKey.Offset
- runEnd := runBeg + runSize
- if runEnd <= beg {
- return true
- }
-
- fn(runKey, runPtr, runBeg, runEnd)
- return true
- })
-}
-
-type gap struct {
- // range is [Beg,End)
- Beg, End uint64
-}
-
-// Compare implements containers.Ordered.
-func (a gap) Compare(b gap) int {
- return containers.NativeCompare(a.Beg, b.Beg)
-}
-
-func (o *rebuilder) _wantRange(
- ctx context.Context, reason string,
- treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType,
- beg, end uint64,
-) {
- wantKey := WantWithTree{
- TreeID: treeID,
- Key: Want{
- ObjectID: objID,
- ItemType: typ,
- OffsetType: offsetAny,
- },
- }
- ctx = withWant(ctx, logFieldItemWant, reason, wantKey)
- wantKey.Key.OffsetType = offsetRange
-
- if o.rebuilt.Tree(ctx, treeID) == nil {
- o.enqueueRetry(treeID)
- return
- }
-
- // Step 1: Build a listing of the gaps.
- //
- // Start with a gap of the whole range, then subtract each run
- // from it.
- gaps := new(containers.RBTree[gap])
- gaps.Insert(gap{
- Beg: beg,
- End: end,
- })
- o._walkRange(
- ctx,
- o.rebuilt.Tree(ctx, treeID).Items(ctx),
- treeID, objID, typ, beg, end,
- func(runKey btrfsprim.Key, _ keyio.ItemPtr, runBeg, runEnd uint64) {
- var overlappingGaps []*containers.RBNode[gap]
- gaps.Subrange(
- func(gap gap) int {
- switch {
- case gap.End <= runBeg:
- return 1
- case runEnd <= gap.Beg:
- return -1
- default:
- return 0
- }
- },
- func(node *containers.RBNode[gap]) bool {
- overlappingGaps = append(overlappingGaps, node)
- return true
- })
- if len(overlappingGaps) == 0 {
- return
- }
- gapsBeg := overlappingGaps[0].Value.Beg
- gapsEnd := overlappingGaps[len(overlappingGaps)-1].Value.End
- for _, gap := range overlappingGaps {
- gaps.Delete(gap)
- }
- if gapsBeg < runBeg {
- gaps.Insert(gap{
- Beg: gapsBeg,
- End: runBeg,
- })
- }
- if gapsEnd > runEnd {
- gaps.Insert(gap{
- Beg: runEnd,
- End: gapsEnd,
- })
- }
- })
-
- // Step 2: Fill each gap.
- if gaps.Len() == 0 {
- return
- }
- potentialItems := o.rebuilt.Tree(ctx, treeID).PotentialItems(ctx)
- gaps.Range(func(rbNode *containers.RBNode[gap]) bool {
- gap := rbNode.Value
- last := gap.Beg
- o._walkRange(
- ctx,
- potentialItems,
- treeID, objID, typ, gap.Beg, gap.End,
- func(k btrfsprim.Key, v keyio.ItemPtr, runBeg, runEnd uint64) {
- // TODO: This is dumb and greedy.
- if last < runBeg {
- // log an error
- wantKey.Key.OffsetLow = last
- wantKey.Key.OffsetHigh = runBeg
- wantCtx := withWant(ctx, logFieldItemWant, reason, wantKey)
- o.wantAugment(wantCtx, wantKey, nil)
- }
- wantKey.Key.OffsetLow = gap.Beg
- wantKey.Key.OffsetHigh = gap.End
- wantCtx := withWant(ctx, logFieldItemWant, reason, wantKey)
- o.wantAugment(wantCtx, wantKey, o.rebuilt.Tree(wantCtx, treeID).LeafToRoots(wantCtx, v.Node))
- last = runEnd
- })
- if last < gap.End {
- // log an error
- wantKey.Key.OffsetLow = last
- wantKey.Key.OffsetHigh = gap.End
- wantCtx := withWant(ctx, logFieldItemWant, reason, wantKey)
- o.wantAugment(wantCtx, wantKey, nil)
- }
- return true
- })
-}
-
-// func implements rebuildCallbacks.
-//
-// interval is [beg, end)
-func (o *rebuilder) wantCSum(ctx context.Context, reason string, inodeTree, inode btrfsprim.ObjID, beg, end btrfsvol.LogicalAddr) {
- inodeWant := WantWithTree{
- TreeID: inodeTree,
- Key: Want{
- ObjectID: inode,
- ItemType: btrfsitem.INODE_ITEM_KEY,
- OffsetType: offsetExact,
- OffsetLow: 0,
- },
- }
- inodeCtx := withWant(ctx, logFieldItemWant, reason, inodeWant)
- if !o._wantOff(inodeCtx, inodeWant) {
- o.enqueueRetry(inodeTree)
- return
- }
- inodePtr, ok := o.rebuilt.Tree(inodeCtx, inodeTree).Items(inodeCtx).Load(inodeWant.Key.Key())
- if !ok {
- panic(fmt.Errorf("should not happen: could not load key: %v", inodeWant))
- }
- inodeFlags, ok := o.keyIO.Flags[inodePtr]
- if !ok {
- panic(fmt.Errorf("should not happen: INODE_ITEM did not have flags recorded"))
- }
- if inodeFlags.Err != nil {
- o.fsErr(inodeCtx, inodeFlags.Err)
- return
- }
-
- if inodeFlags.NoDataSum {
- return
- }
-
- o._wantRange(
- ctx, reason,
- btrfsprim.CSUM_TREE_OBJECTID, btrfsprim.EXTENT_CSUM_OBJECTID, btrfsprim.EXTENT_CSUM_KEY,
- uint64(roundDown(beg, btrfssum.BlockSize)), uint64(roundUp(end, btrfssum.BlockSize)))
-}
-
-// wantFileExt implements rebuildCallbacks.
-func (o *rebuilder) wantFileExt(ctx context.Context, reason string, treeID btrfsprim.ObjID, ino btrfsprim.ObjID, size int64) {
- o._wantRange(
- ctx, reason,
- treeID, ino, btrfsprim.EXTENT_DATA_KEY,
- 0, uint64(size))
-}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_wanttyp.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_wanttyp.go
deleted file mode 100644
index 2b471fe..0000000
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_wanttyp.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
-//
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-package rebuildnodes
-
-import (
- "context"
- "fmt"
-
- "github.com/datawire/dlib/dlog"
-
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
- "git.lukeshu.com/btrfs-progs-ng/lib/containers"
-)
-
-type wantOffsetType int8
-
-const (
- offsetAny = wantOffsetType(iota)
- offsetExact
- offsetRange
- offsetName
-)
-
-type Want struct {
- ObjectID btrfsprim.ObjID
- ItemType btrfsprim.ItemType
- OffsetType wantOffsetType
- OffsetLow uint64
- OffsetHigh uint64
- OffsetName string
-}
-
-func (a Want) Compare(b Want) int {
- if d := containers.NativeCompare(a.ObjectID, b.ObjectID); d != 0 {
- return d
- }
- if d := containers.NativeCompare(a.ItemType, b.ItemType); d != 0 {
- return d
- }
- if d := containers.NativeCompare(a.OffsetType, b.OffsetType); d != 0 {
- return d
- }
- if d := containers.NativeCompare(a.OffsetLow, b.OffsetLow); d != 0 {
- return d
- }
- if d := containers.NativeCompare(a.OffsetHigh, b.OffsetHigh); d != 0 {
- return d
- }
- if d := containers.NativeCompare(a.OffsetName, b.OffsetName); d != 0 {
- return d
- }
- return 0
-}
-
-func (o Want) Key() btrfsprim.Key {
- return btrfsprim.Key{
- ObjectID: o.ObjectID,
- ItemType: o.ItemType,
- Offset: o.OffsetLow,
- }
-}
-
-func wantFromKey(k btrfsprim.Key) Want {
- return Want{
- ObjectID: k.ObjectID,
- ItemType: k.ItemType,
- OffsetType: offsetExact,
- OffsetLow: k.Offset,
- }
-}
-
-func (o Want) String() string {
- switch o.OffsetType {
- case offsetAny:
- return fmt.Sprintf("{%v %v ?}", o.ObjectID, o.ItemType)
- case offsetExact:
- return fmt.Sprintf("{%v %v %v}", o.ObjectID, o.ItemType, o.OffsetLow)
- case offsetRange:
- return fmt.Sprintf("{%v %v %v-%v}", o.ObjectID, o.ItemType, o.OffsetLow, o.OffsetHigh)
- case offsetName:
- return fmt.Sprintf("{%v %v name=%q}", o.ObjectID, o.ItemType, o.OffsetName)
- default:
- panic(fmt.Errorf("should not happen: OffsetType=%#v", o.OffsetType))
- }
-}
-
-type WantWithTree struct {
- TreeID btrfsprim.ObjID
- Key Want
-}
-
-func (o WantWithTree) String() string {
- return fmt.Sprintf("tree=%v key=%v", o.TreeID, o.Key)
-}
-
-const (
- logFieldItemWant = "btrfsinspect.rebuild-nodes.rebuild.want"
- logFieldTreeWant = "btrfsinspect.rebuild-nodes.rebuild.add-tree.want"
-)
-
-func withWant(ctx context.Context, logField, reason string, wantKey WantWithTree) context.Context {
- ctx = dlog.WithField(ctx, logField+".reason", reason)
- ctx = dlog.WithField(ctx, logField+".key", wantKey)
- return ctx
-}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go
deleted file mode 100644
index 17949ab..0000000
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
-//
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-package rebuildnodes
-
-import (
- "context"
- "time"
-
- "github.com/datawire/dlib/dlog"
-
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio"
- "git.lukeshu.com/btrfs-progs-ng/lib/containers"
- "git.lukeshu.com/btrfs-progs-ng/lib/maps"
- "git.lukeshu.com/btrfs-progs-ng/lib/textui"
-)
-
-func ScanDevices(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect.ScanDevicesResult) (btrfstree.Superblock, graph.Graph, *keyio.Handle, error) {
- dlog.Info(ctx, "Reading superblock...")
- sb, err := fs.Superblock()
- if err != nil {
- return btrfstree.Superblock{}, graph.Graph{}, nil, err
- }
-
- dlog.Infof(ctx, "Reading node data from FS...")
-
- var stats textui.Portion[int]
- stats.D = countNodes(scanResults)
- progressWriter := textui.NewProgress[textui.Portion[int]](
- dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.read.substep", "read-nodes"),
- dlog.LogLevelInfo, textui.Tunable(1*time.Second))
-
- nodeGraph := graph.New(*sb)
- keyIO := keyio.NewHandle(fs, *sb)
-
- progressWriter.Set(stats)
- for _, devResults := range scanResults {
- for _, laddr := range maps.SortedKeys(devResults.FoundNodes) {
- if err := ctx.Err(); err != nil {
- return btrfstree.Superblock{}, graph.Graph{}, nil, err
- }
- nodeRef, err := btrfstree.ReadNode[btrfsvol.LogicalAddr](fs, *sb, laddr, btrfstree.NodeExpectations{
- LAddr: containers.Optional[btrfsvol.LogicalAddr]{OK: true, Val: laddr},
- })
- if err != nil {
- btrfstree.FreeNodeRef(nodeRef)
- return btrfstree.Superblock{}, graph.Graph{}, nil, err
- }
-
- nodeGraph.InsertNode(nodeRef)
- keyIO.InsertNode(nodeRef)
-
- btrfstree.FreeNodeRef(nodeRef)
-
- stats.N++
- progressWriter.Set(stats)
- }
- }
- if stats.N != stats.D {
- panic("should not happen")
- }
- progressWriter.Done()
- dlog.Info(ctx, "... done reading node data")
-
- if err := nodeGraph.FinalCheck(ctx, fs, *sb); err != nil {
- return btrfstree.Superblock{}, graph.Graph{}, nil, err
- }
- keyIO.SetGraph(*nodeGraph)
-
- return *sb, *nodeGraph, keyIO, nil
-}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/util.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/util.go
deleted file mode 100644
index 9d91f23..0000000
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/util.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
-//
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-package rebuildnodes
-
-import (
- "golang.org/x/exp/constraints"
-
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect"
-)
-
-func countNodes(nodeScanResults btrfsinspect.ScanDevicesResult) int {
- var cnt int
- for _, devResults := range nodeScanResults {
- cnt += len(devResults.FoundNodes)
- }
- return cnt
-}
-
-func roundDown[T constraints.Integer](n, d T) T {
- return (n / d) * d
-}
-
-func roundUp[T constraints.Integer](n, d T) T {
- return ((n + d - 1) / d) * d
-}
-
-func discardOK[T any](val T, _ bool) T {
- return val
-}
diff --git a/lib/btrfsprogs/btrfsinspect/scandevices.go b/lib/btrfsprogs/btrfsinspect/scandevices.go
deleted file mode 100644
index d54be71..0000000
--- a/lib/btrfsprogs/btrfsinspect/scandevices.go
+++ /dev/null
@@ -1,260 +0,0 @@
-// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
-//
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-package btrfsinspect
-
-import (
- "context"
- "errors"
- "fmt"
- "strings"
- "sync"
- "time"
-
- "github.com/datawire/dlib/dgroup"
- "github.com/datawire/dlib/dlog"
-
- "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
- "git.lukeshu.com/btrfs-progs-ng/lib/containers"
- "git.lukeshu.com/btrfs-progs-ng/lib/textui"
-)
-
-type ScanDevicesResult map[btrfsvol.DeviceID]ScanOneDeviceResult
-
-func ScanDevices(ctx context.Context, fs *btrfs.FS) (ScanDevicesResult, error) {
- grp := dgroup.NewGroup(ctx, dgroup.GroupConfig{})
- var mu sync.Mutex
- result := make(map[btrfsvol.DeviceID]ScanOneDeviceResult)
- for id, dev := range fs.LV.PhysicalVolumes() {
- id := id
- dev := dev
- grp.Go(fmt.Sprintf("dev-%d", id), func(ctx context.Context) error {
- sb, err := dev.Superblock()
- if err != nil {
- return err
- }
- devResult, err := ScanOneDevice(ctx, dev, *sb)
- if err != nil {
- return err
- }
- mu.Lock()
- result[id] = devResult
- mu.Unlock()
- return nil
- })
- }
- if err := grp.Wait(); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-type ScanOneDeviceResult struct {
- Checksums btrfssum.SumRun[btrfsvol.PhysicalAddr]
- FoundNodes map[btrfsvol.LogicalAddr][]btrfsvol.PhysicalAddr
- FoundChunks []btrfstree.SysChunk
- FoundBlockGroups []SysBlockGroup
- FoundDevExtents []SysDevExtent
- FoundExtentCSums []SysExtentCSum
-}
-
-type SysBlockGroup struct {
- Key btrfsprim.Key
- BG btrfsitem.BlockGroup
-}
-
-type SysDevExtent struct {
- Key btrfsprim.Key
- DevExt btrfsitem.DevExtent
-}
-
-type SysExtentCSum struct {
- Generation btrfsprim.Generation
- Sums btrfsitem.ExtentCSum
-}
-
-// Compare implements containers.Ordered.
-func (a SysExtentCSum) Compare(b SysExtentCSum) int {
- return containers.NativeCompare(a.Sums.Addr, b.Sums.Addr)
-}
-
-type scanStats struct {
- textui.Portion[btrfsvol.PhysicalAddr]
-
- NumFoundNodes int
- NumFoundChunks int
- NumFoundBlockGroups int
- NumFoundDevExtents int
- NumFoundExtentCSums int
-}
-
-func (s scanStats) String() string {
- return textui.Sprintf("scanned %v (found: %v nodes, %v chunks, %v block groups, %v dev extents, %v sum items)",
- s.Portion,
- s.NumFoundNodes,
- s.NumFoundChunks,
- s.NumFoundBlockGroups,
- s.NumFoundDevExtents,
- s.NumFoundExtentCSums)
-}
-
-var sbSize = btrfsvol.PhysicalAddr(binstruct.StaticSize(btrfstree.Superblock{}))
-
-// ScanOneDevice mostly mimics btrfs-progs
-// cmds/rescue-chunk-recover.c:scan_one_device().
-func ScanOneDevice(ctx context.Context, dev *btrfs.Device, sb btrfstree.Superblock) (ScanOneDeviceResult, error) {
- ctx = dlog.WithField(ctx, "btrfsinspect.scandevices.dev", dev.Name())
-
- result := ScanOneDeviceResult{
- FoundNodes: make(map[btrfsvol.LogicalAddr][]btrfsvol.PhysicalAddr),
- }
-
- devSize := dev.Size()
- if sb.NodeSize < sb.SectorSize {
- return result, fmt.Errorf("node_size(%v) < sector_size(%v)",
- sb.NodeSize, sb.SectorSize)
- }
- if sb.SectorSize != btrfssum.BlockSize {
- // TODO: probably handle this?
- return result, fmt.Errorf("sector_size(%v) != btrfssum.BlockSize",
- sb.SectorSize)
- }
- alg := sb.ChecksumType
- csumSize := alg.Size()
- numSums := int(devSize / btrfssum.BlockSize)
- var sums strings.Builder
- sums.Grow(numSums * csumSize)
-
- progressWriter := textui.NewProgress[scanStats](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second))
- progress := func(pos btrfsvol.PhysicalAddr) {
- progressWriter.Set(scanStats{
- Portion: textui.Portion[btrfsvol.PhysicalAddr]{
- N: pos,
- D: devSize,
- },
- NumFoundNodes: len(result.FoundNodes),
- NumFoundChunks: len(result.FoundChunks),
- NumFoundBlockGroups: len(result.FoundBlockGroups),
- NumFoundDevExtents: len(result.FoundDevExtents),
- NumFoundExtentCSums: len(result.FoundExtentCSums),
- })
- }
-
- var minNextNode btrfsvol.PhysicalAddr
- for i := 0; i < numSums; i++ {
- if ctx.Err() != nil {
- return result, ctx.Err()
- }
- pos := btrfsvol.PhysicalAddr(i * btrfssum.BlockSize)
- progress(pos)
-
- sum, err := btrfs.ChecksumPhysical(dev, alg, pos)
- if err != nil {
- return result, err
- }
- sums.Write(sum[:csumSize])
-
- checkForNode := pos >= minNextNode && pos+btrfsvol.PhysicalAddr(sb.NodeSize) <= devSize
- if checkForNode {
- for _, sbAddr := range btrfs.SuperblockAddrs {
- if sbAddr <= pos && pos < sbAddr+sbSize {
- checkForNode = false
- break
- }
- }
- }
-
- if checkForNode {
- nodeRef, err := btrfstree.ReadNode[btrfsvol.PhysicalAddr](dev, sb, pos, btrfstree.NodeExpectations{})
- if err != nil {
- if !errors.Is(err, btrfstree.ErrNotANode) {
- dlog.Errorf(ctx, "error: %v", err)
- }
- } else {
- result.FoundNodes[nodeRef.Data.Head.Addr] = append(result.FoundNodes[nodeRef.Data.Head.Addr], nodeRef.Addr)
- for i, item := range nodeRef.Data.BodyLeaf {
- switch item.Key.ItemType {
- case btrfsitem.CHUNK_ITEM_KEY:
- switch itemBody := item.Body.(type) {
- case *btrfsitem.Chunk:
- dlog.Tracef(ctx, "node@%v: item %v: found chunk",
- nodeRef.Addr, i)
- result.FoundChunks = append(result.FoundChunks, btrfstree.SysChunk{
- Key: item.Key,
- Chunk: *itemBody,
- })
- case *btrfsitem.Error:
- dlog.Errorf(ctx, "node@%v: item %v: error: malformed CHUNK_ITEM: %v",
- nodeRef.Addr, i, itemBody.Err)
- default:
- panic(fmt.Errorf("should not happen: CHUNK_ITEM has unexpected item type: %T", itemBody))
- }
- case btrfsitem.BLOCK_GROUP_ITEM_KEY:
- switch itemBody := item.Body.(type) {
- case *btrfsitem.BlockGroup:
- dlog.Tracef(ctx, "node@%v: item %v: found block group",
- nodeRef.Addr, i)
- result.FoundBlockGroups = append(result.FoundBlockGroups, SysBlockGroup{
- Key: item.Key,
- BG: *itemBody,
- })
- case *btrfsitem.Error:
- dlog.Errorf(ctx, "node@%v: item %v: error: malformed BLOCK_GROUP_ITEM: %v",
- nodeRef.Addr, i, itemBody.Err)
- default:
- panic(fmt.Errorf("should not happen: BLOCK_GROUP_ITEM has unexpected item type: %T", itemBody))
- }
- case btrfsitem.DEV_EXTENT_KEY:
- switch itemBody := item.Body.(type) {
- case *btrfsitem.DevExtent:
- dlog.Tracef(ctx, "node@%v: item %v: found dev extent",
- nodeRef.Addr, i)
- result.FoundDevExtents = append(result.FoundDevExtents, SysDevExtent{
- Key: item.Key,
- DevExt: *itemBody,
- })
- case *btrfsitem.Error:
- dlog.Errorf(ctx, "node@%v: item %v: error: malformed DEV_EXTENT: %v",
- nodeRef.Addr, i, itemBody.Err)
- default:
- panic(fmt.Errorf("should not happen: DEV_EXTENT has unexpected item type: %T", itemBody))
- }
- case btrfsitem.EXTENT_CSUM_KEY:
- switch itemBody := item.Body.(type) {
- case *btrfsitem.ExtentCSum:
- dlog.Tracef(ctx, "node@%v: item %v: found csums",
- nodeRef.Addr, i)
- result.FoundExtentCSums = append(result.FoundExtentCSums, SysExtentCSum{
- Generation: nodeRef.Data.Head.Generation,
- Sums: *itemBody,
- })
- case *btrfsitem.Error:
- dlog.Errorf(ctx, "node@%v: item %v: error: malformed is EXTENT_CSUM: %v",
- nodeRef.Addr, i, itemBody.Err)
- default:
- panic(fmt.Errorf("should not happen: EXTENT_CSUM has unexpected item type: %T", itemBody))
- }
- }
- }
- minNextNode = pos + btrfsvol.PhysicalAddr(sb.NodeSize)
- }
- btrfstree.FreeNodeRef(nodeRef)
- }
- }
- progress(devSize)
- progressWriter.Done()
-
- result.Checksums = btrfssum.SumRun[btrfsvol.PhysicalAddr]{
- ChecksumSize: csumSize,
- Sums: btrfssum.ShortSum(sums.String()),
- }
-
- return result, nil
-}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/graph.go b/lib/btrfsutil/graph.go
index 2a97ec8..8debe9d 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/graph.go
+++ b/lib/btrfsutil/graph.go
@@ -2,7 +2,7 @@
//
// SPDX-License-Identifier: GPL-2.0-or-later
-package graph
+package btrfsutil
import (
"context"
@@ -23,7 +23,7 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/textui"
)
-type Node struct {
+type GraphNode struct {
Level uint8
Generation btrfsprim.Generation
Owner btrfsprim.ObjID
@@ -33,7 +33,7 @@ type Node struct {
Items []btrfsprim.Key
}
-func (n Node) String() string {
+func (n GraphNode) String() string {
if reflect.ValueOf(n).IsZero() {
return "{}"
}
@@ -43,9 +43,9 @@ func (n Node) String() string {
n.MaxItem.ObjectID, n.MaxItem.ItemType, n.MaxItem.Offset)
}
-type Edge struct {
+type GraphEdge struct {
// It is invalid for both 'FromRoot' and 'FromNode' to be
- // non-zero. If both are zero, then the Edge is from the
+ // non-zero. If both are zero, then the GraphEdge is from the
// superblock.
FromRoot btrfsvol.LogicalAddr
FromNode btrfsvol.LogicalAddr
@@ -59,7 +59,7 @@ type Edge struct {
ToGeneration btrfsprim.Generation
}
-func (kp Edge) String() string {
+func (kp GraphEdge) String() string {
var from string
switch {
case kp.FromRoot != 0:
@@ -80,13 +80,13 @@ func (kp Edge) String() string {
}
type Graph struct {
- Nodes map[btrfsvol.LogicalAddr]Node
+ Nodes map[btrfsvol.LogicalAddr]GraphNode
BadNodes map[btrfsvol.LogicalAddr]error
- EdgesFrom map[btrfsvol.LogicalAddr][]*Edge
- EdgesTo map[btrfsvol.LogicalAddr][]*Edge
+ EdgesFrom map[btrfsvol.LogicalAddr][]*GraphEdge
+ EdgesTo map[btrfsvol.LogicalAddr][]*GraphEdge
}
-func (g Graph) insertEdge(ptr *Edge) {
+func (g Graph) insertEdge(ptr *GraphEdge) {
if ptr.ToNode == 0 {
panic("kp.ToNode should not be zero")
}
@@ -112,7 +112,7 @@ func (g Graph) insertTreeRoot(sb btrfstree.Superblock, treeID btrfsprim.ObjID) {
if treeInfo.RootNode == 0 {
return
}
- g.insertEdge(&Edge{
+ g.insertEdge(&GraphEdge{
FromTree: treeID,
ToNode: treeInfo.RootNode,
ToLevel: treeInfo.Level,
@@ -120,12 +120,12 @@ func (g Graph) insertTreeRoot(sb btrfstree.Superblock, treeID btrfsprim.ObjID) {
})
}
-func New(sb btrfstree.Superblock) *Graph {
+func NewGraph(sb btrfstree.Superblock) *Graph {
g := &Graph{
- Nodes: make(map[btrfsvol.LogicalAddr]Node),
+ Nodes: make(map[btrfsvol.LogicalAddr]GraphNode),
BadNodes: make(map[btrfsvol.LogicalAddr]error),
- EdgesFrom: make(map[btrfsvol.LogicalAddr][]*Edge),
- EdgesTo: make(map[btrfsvol.LogicalAddr][]*Edge),
+ EdgesFrom: make(map[btrfsvol.LogicalAddr][]*GraphEdge),
+ EdgesTo: make(map[btrfsvol.LogicalAddr][]*GraphEdge),
}
// These 4 trees are mentioned directly in the superblock, so
@@ -139,7 +139,7 @@ func New(sb btrfstree.Superblock) *Graph {
}
func (g Graph) InsertNode(nodeRef *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]) {
- nodeData := Node{
+ nodeData := GraphNode{
Level: nodeRef.Data.Head.Level,
Generation: nodeRef.Data.Head.Generation,
Owner: nodeRef.Data.Head.Owner,
@@ -155,14 +155,14 @@ func (g Graph) InsertNode(nodeRef *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.No
cnt++
}
}
- kps := make([]Edge, 0, cnt)
+ kps := make([]GraphEdge, 0, cnt)
keys := make([]btrfsprim.Key, len(nodeRef.Data.BodyLeaf))
nodeData.Items = keys
g.Nodes[nodeRef.Addr] = nodeData
for i, item := range nodeRef.Data.BodyLeaf {
keys[i] = item.Key
if itemBody, ok := item.Body.(*btrfsitem.Root); ok {
- kps = append(kps, Edge{
+ kps = append(kps, GraphEdge{
FromRoot: nodeRef.Addr,
FromItem: i,
FromTree: item.Key.ObjectID,
@@ -175,9 +175,9 @@ func (g Graph) InsertNode(nodeRef *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.No
}
} else {
g.Nodes[nodeRef.Addr] = nodeData
- kps := make([]Edge, len(nodeRef.Data.BodyInternal))
+ kps := make([]GraphEdge, len(nodeRef.Data.BodyInternal))
for i, kp := range nodeRef.Data.BodyInternal {
- kps[i] = Edge{
+ kps[i] = GraphEdge{
FromNode: nodeRef.Addr,
FromItem: i,
FromTree: nodeRef.Data.Head.Owner,
@@ -193,10 +193,8 @@ func (g Graph) InsertNode(nodeRef *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.No
func (g Graph) FinalCheck(ctx context.Context, fs diskio.File[btrfsvol.LogicalAddr], sb btrfstree.Superblock) error {
var stats textui.Portion[int]
- _ctx := ctx
- ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-nodes.read.substep", "check-keypointers")
- dlog.Info(_ctx, "Checking keypointers for dead-ends...")
+ dlog.Info(ctx, "Checking keypointers for dead-ends...")
progressWriter := textui.NewProgress[textui.Portion[int]](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second))
stats.D = len(g.EdgesTo)
progressWriter.Set(stats)
@@ -217,8 +215,7 @@ func (g Graph) FinalCheck(ctx context.Context, fs diskio.File[btrfsvol.LogicalAd
progressWriter.Done()
dlog.Info(ctx, "... done checking keypointers")
- ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-nodes.read.substep", "check-for-loops")
- dlog.Info(_ctx, "Checking for btree loops...")
+ dlog.Info(ctx, "Checking for btree loops...")
stats.D = len(g.Nodes)
stats.N = 0
progressWriter = textui.NewProgress[textui.Portion[int]](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second))
@@ -255,7 +252,7 @@ func (g Graph) FinalCheck(ctx context.Context, fs diskio.File[btrfsvol.LogicalAd
if numLoops > 0 {
return fmt.Errorf("%d btree loops", numLoops)
}
- dlog.Info(_ctx, "... done checking for loops")
+ dlog.Info(ctx, "... done checking for loops")
return nil
}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/loops.go b/lib/btrfsutil/graph_loops.go
index 0e51805..3382705 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/loops.go
+++ b/lib/btrfsutil/graph_loops.go
@@ -1,8 +1,8 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
-package graph
+package btrfsutil
import (
"fmt"
@@ -42,7 +42,7 @@ func (g Graph) renderNode(node btrfsvol.LogicalAddr) []string {
}
}
-func (g Graph) renderEdge(kp Edge) []string {
+func (g Graph) renderEdge(kp GraphEdge) []string {
a := fmt.Sprintf("[%d]={", kp.FromItem)
b := strings.Repeat(" ", len(a))
ret := []string{
@@ -110,7 +110,7 @@ func (g Graph) renderLoop(stack []btrfsvol.LogicalAddr) []string {
return lines
}
-func checkNodeExpectations(kp Edge, toNode Node) error {
+func checkNodeExpectations(kp GraphEdge, toNode GraphNode) error {
var errs derror.MultiError
if toNode.Level != kp.ToLevel {
errs = append(errs, fmt.Errorf("kp.level=%v != node.level=%v",
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/nestedlock.go b/lib/btrfsutil/nestedlock.go
index c1ffa18..917b4cd 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/nestedlock.go
+++ b/lib/btrfsutil/nestedlock.go
@@ -2,7 +2,7 @@
//
// SPDX-License-Identifier: GPL-2.0-or-later
-package btrees
+package btrfsutil
import (
"context"
diff --git a/lib/btrfsprogs/btrfsutil/broken_btree.go b/lib/btrfsutil/old_rebuilt_forrest.go
index b7663fa..2386803 100644
--- a/lib/btrfsprogs/btrfsutil/broken_btree.go
+++ b/lib/btrfsutil/old_rebuilt_forrest.go
@@ -21,60 +21,60 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/diskio"
)
-type treeIndex struct {
- TreeRootErr error
- Items *containers.RBTree[treeIndexValue]
- Errors *containers.IntervalTree[btrfsprim.Key, treeIndexError]
+type oldRebuiltTree struct {
+ RootErr error
+ Items *containers.RBTree[oldRebuiltTreeValue]
+ Errors *containers.IntervalTree[btrfsprim.Key, oldRebuiltTreeError]
}
-type treeIndexError struct {
+type oldRebuiltTreeError struct {
Path SkinnyPath
Err error
}
-type treeIndexValue struct {
+type oldRebuiltTreeValue struct {
Path SkinnyPath
Key btrfsprim.Key
ItemSize uint32
}
// Compare implements containers.Ordered.
-func (a treeIndexValue) Compare(b treeIndexValue) int {
+func (a oldRebuiltTreeValue) Compare(b oldRebuiltTreeValue) int {
return a.Key.Compare(b.Key)
}
-func newTreeIndex(arena *SkinnyPathArena) treeIndex {
- return treeIndex{
- Items: new(containers.RBTree[treeIndexValue]),
- Errors: &containers.IntervalTree[btrfsprim.Key, treeIndexError]{
- MinFn: func(err treeIndexError) btrfsprim.Key {
+func newOldRebuiltTree(arena *SkinnyPathArena) oldRebuiltTree {
+ return oldRebuiltTree{
+ Items: new(containers.RBTree[oldRebuiltTreeValue]),
+ Errors: &containers.IntervalTree[btrfsprim.Key, oldRebuiltTreeError]{
+ MinFn: func(err oldRebuiltTreeError) btrfsprim.Key {
return arena.Inflate(err.Path).Node(-1).ToKey
},
- MaxFn: func(err treeIndexError) btrfsprim.Key {
+ MaxFn: func(err oldRebuiltTreeError) btrfsprim.Key {
return arena.Inflate(err.Path).Node(-1).ToMaxKey
},
},
}
}
-type brokenTrees struct {
+type OldRebuiltForrest struct {
ctx context.Context //nolint:containedctx // don't have an option while keeping the same API
inner *btrfs.FS
arena *SkinnyPathArena
// btrfsprim.ROOT_TREE_OBJECTID
- rootTreeMu sync.Mutex
- rootTreeIndex *treeIndex
+ rootTreeMu sync.Mutex
+ rootTree *oldRebuiltTree
// for all other trees
- treeMu sync.Mutex
- treeIndexes map[btrfsprim.ObjID]treeIndex
+ treesMu sync.Mutex
+ trees map[btrfsprim.ObjID]oldRebuiltTree
}
-var _ btrfstree.TreeOperator = (*brokenTrees)(nil)
+var _ btrfstree.TreeOperator = (*OldRebuiltForrest)(nil)
-// NewBrokenTrees wraps a *btrfs.FS to support looking up information
-// from broken trees.
+// NewOldRebuiltForrest wraps a *btrfs.FS to support looking up
+// information from broken trees.
//
// Of the btrfstree.TreeOperator methods:
//
@@ -87,43 +87,38 @@ var _ btrfstree.TreeOperator = (*brokenTrees)(nil)
// broken tree might not be), and a bad node may cause it to not
// return a truncated list of results.
//
-// NewBrokenTrees attempts to remedy these deficiencies by using
+// NewOldRebuiltForrest attempts to remedy these deficiencies by using
// .TreeWalk to build an out-of-FS index of all of the items in the
// tree, and re-implements TreeLookup, TreeSearch, and TreeSearchAll
// using that index.
-func NewBrokenTrees(ctx context.Context, inner *btrfs.FS) interface {
- btrfstree.TreeOperator
- Superblock() (*btrfstree.Superblock, error)
- ReadAt(p []byte, off btrfsvol.LogicalAddr) (int, error)
- Augment(treeID btrfsprim.ObjID, nodeAddr btrfsvol.LogicalAddr) ([]btrfsprim.Key, error)
-} {
- return &brokenTrees{
+func NewOldRebuiltForrest(ctx context.Context, inner *btrfs.FS) *OldRebuiltForrest {
+ return &OldRebuiltForrest{
ctx: ctx,
inner: inner,
}
}
-func (bt *brokenTrees) treeIndex(treeID btrfsprim.ObjID) treeIndex {
+func (bt *OldRebuiltForrest) RebuiltTree(treeID btrfsprim.ObjID) oldRebuiltTree {
var treeRoot *btrfstree.TreeRoot
var sb *btrfstree.Superblock
var err error
if treeID == btrfsprim.ROOT_TREE_OBJECTID {
bt.rootTreeMu.Lock()
defer bt.rootTreeMu.Unlock()
- if bt.rootTreeIndex != nil {
- return *bt.rootTreeIndex
+ if bt.rootTree != nil {
+ return *bt.rootTree
}
sb, err = bt.inner.Superblock()
if err == nil {
treeRoot, err = btrfstree.LookupTreeRoot(bt.inner, *sb, treeID)
}
} else {
- bt.treeMu.Lock()
- defer bt.treeMu.Unlock()
- if bt.treeIndexes == nil {
- bt.treeIndexes = make(map[btrfsprim.ObjID]treeIndex)
+ bt.treesMu.Lock()
+ defer bt.treesMu.Unlock()
+ if bt.trees == nil {
+ bt.trees = make(map[btrfsprim.ObjID]oldRebuiltTree)
}
- if cacheEntry, exists := bt.treeIndexes[treeID]; exists {
+ if cacheEntry, exists := bt.trees[treeID]; exists {
return cacheEntry
}
sb, err = bt.inner.Superblock()
@@ -141,23 +136,23 @@ func (bt *brokenTrees) treeIndex(treeID btrfsprim.ObjID) treeIndex {
SB: _sb,
}
}
- cacheEntry := newTreeIndex(bt.arena)
+ cacheEntry := newOldRebuiltTree(bt.arena)
if err != nil {
- cacheEntry.TreeRootErr = err
+ cacheEntry.RootErr = err
} else {
dlog.Infof(bt.ctx, "indexing tree %v...", treeID)
bt.rawTreeWalk(*treeRoot, cacheEntry, nil)
dlog.Infof(bt.ctx, "... done indexing tree %v", treeID)
}
if treeID == btrfsprim.ROOT_TREE_OBJECTID {
- bt.rootTreeIndex = &cacheEntry
+ bt.rootTree = &cacheEntry
} else {
- bt.treeIndexes[treeID] = cacheEntry
+ bt.trees[treeID] = cacheEntry
}
return cacheEntry
}
-func (bt *brokenTrees) rawTreeWalk(root btrfstree.TreeRoot, cacheEntry treeIndex, walked *[]btrfsprim.Key) {
+func (bt *OldRebuiltForrest) rawTreeWalk(root btrfstree.TreeRoot, cacheEntry oldRebuiltTree, walked *[]btrfsprim.Key) {
btrfstree.TreeOperatorImpl{NodeSource: bt.inner}.RawTreeWalk(
bt.ctx,
root,
@@ -167,20 +162,20 @@ func (bt *brokenTrees) rawTreeWalk(root btrfstree.TreeRoot, cacheEntry treeIndex
// indicates a bug in my item parser than a problem with the filesystem.
panic(fmt.Errorf("TODO: error parsing item: %w", err))
}
- cacheEntry.Errors.Insert(treeIndexError{
+ cacheEntry.Errors.Insert(oldRebuiltTreeError{
Path: bt.arena.Deflate(err.Path),
Err: err.Err,
})
},
btrfstree.TreeWalkHandler{
Item: func(path btrfstree.TreePath, item btrfstree.Item) error {
- if cacheEntry.Items.Search(func(v treeIndexValue) int { return item.Key.Compare(v.Key) }) != nil {
+ if cacheEntry.Items.Search(func(v oldRebuiltTreeValue) int { return item.Key.Compare(v.Key) }) != nil {
// This is a panic because I'm not really sure what the best way to
// handle this is, and so if this happens I want the program to crash
// and force me to figure out how to handle it.
panic(fmt.Errorf("dup key=%v in tree=%v", item.Key, root.TreeID))
}
- cacheEntry.Items.Insert(treeIndexValue{
+ cacheEntry.Items.Insert(oldRebuiltTreeValue{
Path: bt.arena.Deflate(path),
Key: item.Key,
ItemSize: item.BodySize,
@@ -194,7 +189,7 @@ func (bt *brokenTrees) rawTreeWalk(root btrfstree.TreeRoot, cacheEntry treeIndex
)
}
-func (bt *brokenTrees) TreeLookup(treeID btrfsprim.ObjID, key btrfsprim.Key) (btrfstree.Item, error) {
+func (bt *OldRebuiltForrest) TreeLookup(treeID btrfsprim.ObjID, key btrfsprim.Key) (btrfstree.Item, error) {
item, err := bt.TreeSearch(treeID, btrfstree.KeySearch(key.Compare))
if err != nil {
err = fmt.Errorf("item with key=%v: %w", key, err)
@@ -202,11 +197,11 @@ func (bt *brokenTrees) TreeLookup(treeID btrfsprim.ObjID, key btrfsprim.Key) (bt
return item, err
}
-func (bt *brokenTrees) addErrs(index treeIndex, fn func(btrfsprim.Key, uint32) int, err error) error {
+func (bt *OldRebuiltForrest) addErrs(tree oldRebuiltTree, fn func(btrfsprim.Key, uint32) int, err error) error {
var errs derror.MultiError
- index.Errors.Subrange(
+ tree.Errors.Subrange(
func(k btrfsprim.Key) int { return fn(k, 0) },
- func(v treeIndexError) bool {
+ func(v oldRebuiltTreeError) bool {
errs = append(errs, &btrfstree.TreeError{
Path: bt.arena.Inflate(v.Path),
Err: v.Err,
@@ -222,24 +217,24 @@ func (bt *brokenTrees) addErrs(index treeIndex, fn func(btrfsprim.Key, uint32) i
return errs
}
-func (bt *brokenTrees) TreeSearch(treeID btrfsprim.ObjID, fn func(btrfsprim.Key, uint32) int) (btrfstree.Item, error) {
- index := bt.treeIndex(treeID)
- if index.TreeRootErr != nil {
- return btrfstree.Item{}, index.TreeRootErr
+func (bt *OldRebuiltForrest) TreeSearch(treeID btrfsprim.ObjID, fn func(btrfsprim.Key, uint32) int) (btrfstree.Item, error) {
+ tree := bt.RebuiltTree(treeID)
+ if tree.RootErr != nil {
+ return btrfstree.Item{}, tree.RootErr
}
- indexItem := index.Items.Search(func(indexItem treeIndexValue) int {
+ indexItem := tree.Items.Search(func(indexItem oldRebuiltTreeValue) int {
return fn(indexItem.Key, indexItem.ItemSize)
})
if indexItem == nil {
- return btrfstree.Item{}, bt.addErrs(index, fn, iofs.ErrNotExist)
+ return btrfstree.Item{}, bt.addErrs(tree, fn, iofs.ErrNotExist)
}
itemPath := bt.arena.Inflate(indexItem.Value.Path)
node, err := bt.inner.ReadNode(itemPath.Parent())
defer btrfstree.FreeNodeRef(node)
if err != nil {
- return btrfstree.Item{}, bt.addErrs(index, fn, err)
+ return btrfstree.Item{}, bt.addErrs(tree, fn, err)
}
item := node.Data.BodyLeaf[itemPath.Node(-1).FromItemIdx]
@@ -250,21 +245,21 @@ func (bt *brokenTrees) TreeSearch(treeID btrfsprim.ObjID, fn func(btrfsprim.Key,
return item, nil
}
-func (bt *brokenTrees) TreeSearchAll(treeID btrfsprim.ObjID, fn func(btrfsprim.Key, uint32) int) ([]btrfstree.Item, error) {
- index := bt.treeIndex(treeID)
- if index.TreeRootErr != nil {
- return nil, index.TreeRootErr
+func (bt *OldRebuiltForrest) TreeSearchAll(treeID btrfsprim.ObjID, fn func(btrfsprim.Key, uint32) int) ([]btrfstree.Item, error) {
+ tree := bt.RebuiltTree(treeID)
+ if tree.RootErr != nil {
+ return nil, tree.RootErr
}
- var indexItems []treeIndexValue
- index.Items.Subrange(
- func(indexItem treeIndexValue) int { return fn(indexItem.Key, indexItem.ItemSize) },
- func(node *containers.RBNode[treeIndexValue]) bool {
+ var indexItems []oldRebuiltTreeValue
+ tree.Items.Subrange(
+ func(indexItem oldRebuiltTreeValue) int { return fn(indexItem.Key, indexItem.ItemSize) },
+ func(node *containers.RBNode[oldRebuiltTreeValue]) bool {
indexItems = append(indexItems, node.Value)
return true
})
if len(indexItems) == 0 {
- return nil, bt.addErrs(index, fn, iofs.ErrNotExist)
+ return nil, bt.addErrs(tree, fn, iofs.ErrNotExist)
}
ret := make([]btrfstree.Item, len(indexItems))
@@ -277,7 +272,7 @@ func (bt *brokenTrees) TreeSearchAll(treeID btrfsprim.ObjID, fn func(btrfsprim.K
node, err = bt.inner.ReadNode(itemPath.Parent())
if err != nil {
btrfstree.FreeNodeRef(node)
- return nil, bt.addErrs(index, fn, err)
+ return nil, bt.addErrs(tree, fn, err)
}
}
ret[i] = node.Data.BodyLeaf[itemPath.Node(-1).FromItemIdx]
@@ -285,23 +280,23 @@ func (bt *brokenTrees) TreeSearchAll(treeID btrfsprim.ObjID, fn func(btrfsprim.K
}
btrfstree.FreeNodeRef(node)
- return ret, bt.addErrs(index, fn, nil)
+ return ret, bt.addErrs(tree, fn, nil)
}
-func (bt *brokenTrees) TreeWalk(ctx context.Context, treeID btrfsprim.ObjID, errHandle func(*btrfstree.TreeError), cbs btrfstree.TreeWalkHandler) {
- index := bt.treeIndex(treeID)
- if index.TreeRootErr != nil {
+func (bt *OldRebuiltForrest) TreeWalk(ctx context.Context, treeID btrfsprim.ObjID, errHandle func(*btrfstree.TreeError), cbs btrfstree.TreeWalkHandler) {
+ tree := bt.RebuiltTree(treeID)
+ if tree.RootErr != nil {
errHandle(&btrfstree.TreeError{
Path: btrfstree.TreePath{{
FromTree: treeID,
ToMaxKey: btrfsprim.MaxKey,
}},
- Err: index.TreeRootErr,
+ Err: tree.RootErr,
})
return
}
var node *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]
- index.Items.Range(func(indexItem *containers.RBNode[treeIndexValue]) bool {
+ tree.Items.Range(func(indexItem *containers.RBNode[oldRebuiltTreeValue]) bool {
if ctx.Err() != nil {
return false
}
@@ -330,22 +325,22 @@ func (bt *brokenTrees) TreeWalk(ctx context.Context, treeID btrfsprim.ObjID, err
btrfstree.FreeNodeRef(node)
}
-func (bt *brokenTrees) Superblock() (*btrfstree.Superblock, error) {
+func (bt *OldRebuiltForrest) Superblock() (*btrfstree.Superblock, error) {
return bt.inner.Superblock()
}
-func (bt *brokenTrees) ReadAt(p []byte, off btrfsvol.LogicalAddr) (int, error) {
+func (bt *OldRebuiltForrest) ReadAt(p []byte, off btrfsvol.LogicalAddr) (int, error) {
return bt.inner.ReadAt(p, off)
}
-func (bt *brokenTrees) Augment(treeID btrfsprim.ObjID, nodeAddr btrfsvol.LogicalAddr) ([]btrfsprim.Key, error) {
+func (bt *OldRebuiltForrest) Augment(treeID btrfsprim.ObjID, nodeAddr btrfsvol.LogicalAddr) ([]btrfsprim.Key, error) {
sb, err := bt.Superblock()
if err != nil {
return nil, err
}
- index := bt.treeIndex(treeID)
- if index.TreeRootErr != nil {
- return nil, index.TreeRootErr
+ tree := bt.RebuiltTree(treeID)
+ if tree.RootErr != nil {
+ return nil, tree.RootErr
}
nodeRef, err := btrfstree.ReadNode[btrfsvol.LogicalAddr](bt.inner, *sb, nodeAddr, btrfstree.NodeExpectations{})
defer btrfstree.FreeNodeRef(nodeRef)
@@ -358,6 +353,6 @@ func (bt *brokenTrees) Augment(treeID btrfsprim.ObjID, nodeAddr btrfsvol.Logical
RootNode: nodeAddr,
Level: nodeRef.Data.Head.Level,
Generation: nodeRef.Data.Head.Generation,
- }, index, &ret)
+ }, tree, &ret)
return ret, nil
}
diff --git a/lib/btrfsprogs/btrfsutil/open.go b/lib/btrfsutil/open.go
index c5ee314..c5ee314 100644
--- a/lib/btrfsprogs/btrfsutil/open.go
+++ b/lib/btrfsutil/open.go
diff --git a/lib/btrfsprogs/btrfsinspect/print_addrspace.go b/lib/btrfsutil/print_addrspace.go
index e85e055..c9c51f0 100644
--- a/lib/btrfsprogs/btrfsinspect/print_addrspace.go
+++ b/lib/btrfsutil/print_addrspace.go
@@ -2,7 +2,7 @@
//
// SPDX-License-Identifier: GPL-2.0-or-later
-package btrfsinspect
+package btrfsutil
import (
"io"
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/forrest.go b/lib/btrfsutil/rebuilt_forrest.go
index dbbc6eb..70ece13 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/forrest.go
+++ b/lib/btrfsutil/rebuilt_forrest.go
@@ -2,7 +2,7 @@
//
// SPDX-License-Identifier: GPL-2.0-or-later
-package btrees
+package btrfsutil
import (
"context"
@@ -13,14 +13,12 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
- pkggraph "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio"
"git.lukeshu.com/btrfs-progs-ng/lib/containers"
"git.lukeshu.com/btrfs-progs-ng/lib/slices"
"git.lukeshu.com/btrfs-progs-ng/lib/textui"
)
-type Callbacks interface {
+type RebuiltForrestCallbacks interface {
AddedItem(ctx context.Context, tree btrfsprim.ObjID, key btrfsprim.Key)
AddedRoot(ctx context.Context, tree btrfsprim.ObjID, root btrfsvol.LogicalAddr)
LookupRoot(ctx context.Context, tree btrfsprim.ObjID) (offset btrfsprim.Generation, item btrfsitem.Root, ok bool)
@@ -31,23 +29,23 @@ type Callbacks interface {
// potentially broken btrees.
//
// It is conceptually a btrfstree.TreeOperator, and adds similar
-// broken-tree handling to btrfsutil.BrokenForrest. However, the API
-// is different thant btrfstree.TreeOperator, and is much more
-// efficient than btrfsutil.BrokenForrest.
+// broken-tree handling to OldRebuiltForrest. However, the API is
+// different than btrfstree.TreeOperator, and is much more efficient
+// than OldRebuiltForrest.
//
// The efficiency improvements are possible because of the API
// differences, which are necessary for how it is used in
-// rebuildnodes:
+// rebuildtrees:
//
-// - it consumes an already-read graph.Graph instead of reading the
-// graph itself
+// - it consumes an already-read Graph instead of reading the graph
+// itself
//
// - it does not use `btrfstree.TreePath`
//
// - it does not keep track of errors encountered in a tree
//
-// Additionally, it provides some functionality that
-// btrfsutil.BrokenForrest does not:
+// Additionally, it provides some functionality that OldRebuiltForrest
+// does not:
//
// - it provides a .LeafToRoots() method to advise on what
// additional roots should be added
@@ -60,9 +58,9 @@ type Callbacks interface {
type RebuiltForrest struct {
// static
sb btrfstree.Superblock
- graph pkggraph.Graph
- keyIO *keyio.Handle
- cb Callbacks
+ graph Graph
+ keyIO *KeyIO
+ cb RebuiltForrestCallbacks
// mutable
treesMu nestedMutex
@@ -74,7 +72,7 @@ type RebuiltForrest struct {
// NewRebuiltForrest returns a new RebuiltForrest instance. All of
// the callbacks must be non-nil.
-func NewRebuiltForrest(sb btrfstree.Superblock, graph pkggraph.Graph, keyIO *keyio.Handle, cb Callbacks) *RebuiltForrest {
+func NewRebuiltForrest(sb btrfstree.Superblock, graph Graph, keyIO *KeyIO, cb RebuiltForrestCallbacks) *RebuiltForrest {
return &RebuiltForrest{
sb: sb,
graph: graph,
@@ -120,7 +118,7 @@ func (ts *RebuiltForrest) addTree(ctx context.Context, treeID btrfsprim.ObjID, s
}
}()
stack = append(stack, treeID)
- ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.add-tree", stack)
+ ctx = dlog.WithField(ctx, "btrfs.util.rebuilt-forrest.add-tree", stack)
dlog.Info(ctx, "adding tree...")
if slices.Contains(treeID, stack[:len(stack)-1]) {
dlog.Errorf(ctx, "failed to add tree: loop detected: %v", stack)
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go b/lib/btrfsutil/rebuilt_readitem.go
index 56da32d..57440cf 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go
+++ b/lib/btrfsutil/rebuilt_readitem.go
@@ -2,7 +2,7 @@
//
// SPDX-License-Identifier: GPL-2.0-or-later
-package keyio
+package btrfsutil
import (
"context"
@@ -15,7 +15,6 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph"
"git.lukeshu.com/btrfs-progs-ng/lib/containers"
"git.lukeshu.com/btrfs-progs-ng/lib/diskio"
"git.lukeshu.com/btrfs-progs-ng/lib/textui"
@@ -40,10 +39,10 @@ type FlagsAndErr struct {
Err error
}
-type Handle struct {
+type KeyIO struct {
rawFile diskio.File[btrfsvol.LogicalAddr]
sb btrfstree.Superblock
- graph graph.Graph
+ graph Graph
Flags map[ItemPtr]FlagsAndErr // INODE_ITEM
Names map[ItemPtr][]byte // DIR_INDEX
@@ -53,8 +52,8 @@ type Handle struct {
cache containers.ARCache[btrfsvol.LogicalAddr, *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]]
}
-func NewHandle(file diskio.File[btrfsvol.LogicalAddr], sb btrfstree.Superblock) *Handle {
- return &Handle{
+func NewKeyIO(file diskio.File[btrfsvol.LogicalAddr], sb btrfstree.Superblock) *KeyIO {
+ return &KeyIO{
rawFile: file,
sb: sb,
@@ -71,7 +70,7 @@ func NewHandle(file diskio.File[btrfsvol.LogicalAddr], sb btrfstree.Superblock)
}
}
-func (o *Handle) InsertNode(nodeRef *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]) {
+func (o *KeyIO) InsertNode(nodeRef *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]) {
for i, item := range nodeRef.Data.BodyLeaf {
ptr := ItemPtr{
Node: nodeRef.Addr,
@@ -115,11 +114,11 @@ func (o *Handle) InsertNode(nodeRef *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.
}
}
-func (o *Handle) SetGraph(graph graph.Graph) {
+func (o *KeyIO) SetGraph(graph Graph) {
o.graph = graph
}
-func (o *Handle) readNode(ctx context.Context, laddr btrfsvol.LogicalAddr) *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node] {
+func (o *KeyIO) readNode(ctx context.Context, laddr btrfsvol.LogicalAddr) *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node] {
if cached, ok := o.cache.Load(laddr); ok {
dlog.Tracef(ctx, "cache-hit node@%v", laddr)
return cached
@@ -154,18 +153,18 @@ func (o *Handle) readNode(ctx context.Context, laddr btrfsvol.LogicalAddr) *disk
return ref
}
-func (o *Handle) ReadItem(ctx context.Context, ptr ItemPtr) btrfsitem.Item {
+func (o *KeyIO) ReadItem(ctx context.Context, ptr ItemPtr) btrfsitem.Item {
o.mu.Lock()
defer o.mu.Unlock()
if o.graph.Nodes[ptr.Node].Level != 0 {
- panic(fmt.Errorf("should not happen: keyio.Handle.ReadItem called for non-leaf node@%v", ptr.Node))
+ panic(fmt.Errorf("should not happen: btrfsutil.KeyIO.ReadItem called for non-leaf node@%v", ptr.Node))
}
if ptr.Idx < 0 {
- panic(fmt.Errorf("should not happen: keyio.Handle.ReadItem called for negative item index: %v", ptr.Idx))
+ panic(fmt.Errorf("should not happen: btrfsutil.KeyIO.ReadItem called for negative item index: %v", ptr.Idx))
}
items := o.readNode(ctx, ptr.Node).Data.BodyLeaf
if ptr.Idx >= len(items) {
- panic(fmt.Errorf("should not happen: keyio.Handle.ReadItem called for out-of-bounds item index: index=%v len=%v",
+ panic(fmt.Errorf("should not happen: btrfsutil.KeyIO.ReadItem called for out-of-bounds item index: index=%v len=%v",
ptr.Idx, len(items)))
}
return items[ptr.Idx].Body.CloneItem()
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go b/lib/btrfsutil/rebuilt_tree.go
index 39d8871..1009204 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go
+++ b/lib/btrfsutil/rebuilt_tree.go
@@ -2,7 +2,7 @@
//
// SPDX-License-Identifier: GPL-2.0-or-later
-package btrees
+package btrfsutil
import (
"context"
@@ -15,7 +15,6 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio"
"git.lukeshu.com/btrfs-progs-ng/lib/containers"
"git.lukeshu.com/btrfs-progs-ng/lib/maps"
"git.lukeshu.com/btrfs-progs-ng/lib/slices"
@@ -49,7 +48,7 @@ type RebuiltTree struct {
// .isOwnerOK, whether or not they're in the tree.
func (tree *RebuiltTree) leafToRoots(ctx context.Context) map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr] {
return containers.LoadOrElse[btrfsprim.ObjID, map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr]](&tree.forrest.leafs, tree.ID, func(btrfsprim.ObjID) map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr] {
- ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.index-nodes", fmt.Sprintf("tree=%v", tree.ID))
+ ctx = dlog.WithField(ctx, "btrfs.util.rebuilt-tree.index-nodes", fmt.Sprintf("tree=%v", tree.ID))
nodeToRoots := make(map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr])
@@ -136,8 +135,8 @@ func (tree *RebuiltTree) isOwnerOK(owner btrfsprim.ObjID, gen btrfsprim.Generati
//
// Do not mutate the returned map; it is a pointer to the
// RebuiltTree's internal map!
-func (tree *RebuiltTree) Items(ctx context.Context) *containers.SortedMap[btrfsprim.Key, keyio.ItemPtr] {
- ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.index-inc-items", fmt.Sprintf("tree=%v", tree.ID))
+func (tree *RebuiltTree) Items(ctx context.Context) *containers.SortedMap[btrfsprim.Key, ItemPtr] {
+ ctx = dlog.WithField(ctx, "btrfs.util.rebuilt-tree.index-inc-items", fmt.Sprintf("tree=%v", tree.ID))
return tree.items(ctx, &tree.forrest.incItems, tree.Roots.HasAny)
}
@@ -146,15 +145,15 @@ func (tree *RebuiltTree) Items(ctx context.Context) *containers.SortedMap[btrfsp
//
// Do not mutate the returned map; it is a pointer to the
// RebuiltTree's internal map!
-func (tree *RebuiltTree) PotentialItems(ctx context.Context) *containers.SortedMap[btrfsprim.Key, keyio.ItemPtr] {
- ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.index-exc-items", fmt.Sprintf("tree=%v", tree.ID))
+func (tree *RebuiltTree) PotentialItems(ctx context.Context) *containers.SortedMap[btrfsprim.Key, ItemPtr] {
+ ctx = dlog.WithField(ctx, "btrfs.util.rebuilt-tree.index-exc-items", fmt.Sprintf("tree=%v", tree.ID))
return tree.items(ctx, &tree.forrest.excItems,
func(roots containers.Set[btrfsvol.LogicalAddr]) bool {
return !tree.Roots.HasAny(roots)
})
}
-type itemIndex = containers.SortedMap[btrfsprim.Key, keyio.ItemPtr]
+type itemIndex = containers.SortedMap[btrfsprim.Key, ItemPtr]
type itemStats struct {
Leafs textui.Portion[int]
@@ -169,7 +168,7 @@ func (s itemStats) String() string {
func (tree *RebuiltTree) items(ctx context.Context, cache containers.Map[btrfsprim.ObjID, *itemIndex],
leafFn func(roots containers.Set[btrfsvol.LogicalAddr]) bool,
-) *containers.SortedMap[btrfsprim.Key, keyio.ItemPtr] {
+) *containers.SortedMap[btrfsprim.Key, ItemPtr] {
tree.mu.RLock()
defer tree.mu.RUnlock()
@@ -186,12 +185,12 @@ func (tree *RebuiltTree) items(ctx context.Context, cache containers.Map[btrfspr
stats.Leafs.D = len(leafs)
progressWriter := textui.NewProgress[itemStats](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second))
- index := new(containers.SortedMap[btrfsprim.Key, keyio.ItemPtr])
+ index := new(containers.SortedMap[btrfsprim.Key, ItemPtr])
for i, leaf := range leafs {
stats.Leafs.N = i
progressWriter.Set(stats)
for j, itemKey := range tree.forrest.graph.Nodes[leaf].Items {
- newPtr := keyio.ItemPtr{
+ newPtr := ItemPtr{
Node: leaf,
Idx: j,
}
@@ -268,7 +267,7 @@ func (s rootStats) String() string {
func (tree *RebuiltTree) AddRoot(ctx context.Context, rootNode btrfsvol.LogicalAddr) {
tree.mu.Lock()
defer tree.mu.Unlock()
- ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.add-root", fmt.Sprintf("tree=%v rootNode=%v", tree.ID, rootNode))
+ ctx = dlog.WithField(ctx, "btrfs.util.rebuilt-tree.add-root", fmt.Sprintf("tree=%v rootNode=%v", tree.ID, rootNode))
dlog.Info(ctx, "adding root...")
leafToRoots := tree.leafToRoots(ctx)
@@ -328,7 +327,7 @@ func (tree *RebuiltTree) COWDistance(parentID btrfsprim.ObjID) (dist int, ok boo
func (tree *RebuiltTree) ReadItem(ctx context.Context, key btrfsprim.Key) btrfsitem.Item {
ptr, ok := tree.Items(ctx).Load(key)
if !ok {
- panic(fmt.Errorf("should not happen: btrees.RebuiltTree.ReadItem called for not-included key: %v", key))
+ panic(fmt.Errorf("should not happen: btrfsutil.RebuiltTree.ReadItem called for not-included key: %v", key))
}
return tree.forrest.keyIO.ReadItem(ctx, ptr)
}
diff --git a/lib/btrfsprogs/btrfsutil/skinny_paths.go b/lib/btrfsutil/skinny_paths.go
index 1695990..1695990 100644
--- a/lib/btrfsprogs/btrfsutil/skinny_paths.go
+++ b/lib/btrfsutil/skinny_paths.go
diff --git a/lib/btrfsprogs/btrfsutil/walk.go b/lib/btrfsutil/walk.go
index 355976a..355976a 100644
--- a/lib/btrfsprogs/btrfsutil/walk.go
+++ b/lib/btrfsutil/walk.go
diff --git a/lib/textui/log.go b/lib/textui/log.go
index 2a6fdd4..0a10ef6 100644
--- a/lib/textui/log.go
+++ b/lib/textui/log.go
@@ -289,57 +289,59 @@ func fieldOrd(key string) int {
case "dexec.err":
return -95
- // btrfsinspect scandevices ////////////////////////////////////////////
- case "btrfsinspect.scandevices.dev":
+ // btrfs inspect rebuild-mappings scan /////////////////////////////////
+ case "btrfs.inspect.rebuild-mappings.scan.dev":
return -1
- // btrfsinspect rebuild-mappings ///////////////////////////////////////
- case "btrfsinspect.rebuild-mappings.step":
+ // btrfs inspect rebuild-mappings process //////////////////////////////
+ case "btrfs.inspect.rebuild-mappings.process.step":
return -2
- case "btrfsinspect.rebuild-mappings.substep":
+ case "btrfs.inspect.rebuild-mappings.process.substep":
return -1
- // btrfsinspect rebuild-nodes //////////////////////////////////////////
- case "btrfsinspect.rebuild-nodes.step":
+ // btrfs inspect rebuild-trees /////////////////////////////////////////
+ case "btrfs.inspect.rebuild-trees.step":
return -50
// step=read-fs-data
- case "btrfsinspect.rebuild-nodes.read.substep":
+ case "btrfs.inspect.rebuild-trees.read.substep":
return -1
// step=rebuild
- case "btrfsinspect.rebuild-nodes.rebuild.pass":
+ case "btrfs.inspect.rebuild-trees.rebuild.pass":
return -49
- case "btrfsinspect.rebuild-nodes.rebuild.substep":
+ case "btrfs.inspect.rebuild-trees.rebuild.substep":
return -48
- case "btrfsinspect.rebuild-nodes.rebuild.substep.progress":
+ case "btrfs.inspect.rebuild-trees.rebuild.substep.progress":
return -47
// step=rebuild, substep=collect-items (1/3)
// step=rebuild, substep=settle-items (2a/3)
- case "btrfsinspect.rebuild-nodes.rebuild.settle.item":
+ case "btrfs.inspect.rebuild-trees.rebuild.settle.item":
return -25
// step=rebuild, substep=process-items (2b/3)
- case "btrfsinspect.rebuild-nodes.rebuild.process.item":
+ case "btrfs.inspect.rebuild-trees.rebuild.process.item":
return -25
// step=rebuild, substep=apply-augments (3/3)
- case "btrfsinspect.rebuild-nodes.rebuild.augment.tree":
+ case "btrfs.inspect.rebuild-trees.rebuild.augment.tree":
return -25
// step=rebuild (any substep)
- case "btrfsinspect.rebuild-nodes.rebuild.want.key":
+ case "btrfs.inspect.rebuild-trees.rebuild.want.key":
return -9
- case "btrfsinspect.rebuild-nodes.rebuild.want.reason":
+ case "btrfs.inspect.rebuild-trees.rebuild.want.reason":
return -8
- case "btrfsinspect.rebuild-nodes.rebuild.add-tree":
+
+ // btrfsutil.RebuiltForrest ////////////////////////////////////////////
+ case "btrfs.util.rebuilt-forrest.add-tree":
return -7
- case "btrfsinspect.rebuild-nodes.rebuild.add-tree.want.key":
+ case "btrfs.util.rebuilt-forrest.add-tree.want.key":
return -6
- case "btrfsinspect.rebuild-nodes.rebuild.add-tree.want.reason":
+ case "btrfs.util.rebuilt-forrest.add-tree.want.reason":
return -5
- case "btrfsinspect.rebuild-nodes.rebuild.add-root":
+ case "btrfs.util.rebuilt-tree.add-root":
return -4
- case "btrfsinspect.rebuild-nodes.rebuild.index-inc-items":
+ case "btrfs.util.rebuilt-tree.index-inc-items":
return -3
- case "btrfsinspect.rebuild-nodes.rebuild.index-exc-items":
+ case "btrfs.util.rebuilt-tree.index-exc-items":
return -2
- case "btrfsinspect.rebuild-nodes.rebuild.index-nodes":
+ case "btrfs.util.rebuilt-tree.index-nodes":
return -1
// other ///////////////////////////////////////////////////////////////
@@ -398,27 +400,37 @@ func writeField(w io.Writer, key string, val any) {
case strings.HasSuffix(name, ".pass"):
fmt.Fprintf(w, "/pass-%s", valStr)
return
- case strings.HasSuffix(name, ".substep") && name != "btrfsinspect.rebuild-nodes.rebuild.add-tree.substep":
+ case strings.HasSuffix(name, ".substep") && name != "btrfs.util.rebuilt-forrest.add-tree.substep":
fmt.Fprintf(w, "/%s", valStr)
return
- case strings.HasPrefix(name, "btrfsinspect."):
- name = strings.TrimPrefix(name, "btrfsinspect.")
+ case strings.HasPrefix(name, "btrfs."):
+ name = strings.TrimPrefix(name, "btrfs.")
switch {
- case strings.HasPrefix(name, "scandevices."):
- name = strings.TrimPrefix(name, "scandevices.")
- case strings.HasPrefix(name, "rebuild-mappings."):
- name = strings.TrimPrefix(name, "rebuild-mappings.")
- case strings.HasPrefix(name, "rebuild-nodes."):
- name = strings.TrimPrefix(name, "rebuild-nodes.")
+ case strings.HasPrefix(name, "inspect."):
+ name = strings.TrimPrefix(name, "inspect.")
switch {
- case strings.HasPrefix(name, "read."):
- name = strings.TrimPrefix(name, "read.")
- case strings.HasPrefix(name, "rebuild."):
- name = strings.TrimPrefix(name, "rebuild.")
+ case strings.HasPrefix(name, "rebuild-mappings."):
+ name = strings.TrimPrefix(name, "rebuild-mappings.")
+ switch {
+ case strings.HasPrefix(name, "scan."):
+ name = strings.TrimPrefix(name, "scan.")
+ case strings.HasPrefix(name, "process."):
+ name = strings.TrimPrefix(name, "process.")
+ }
+ case strings.HasPrefix(name, "rebuild-trees."):
+ name = strings.TrimPrefix(name, "rebuild-trees.")
+ switch {
+ case strings.HasPrefix(name, "read."):
+ name = strings.TrimPrefix(name, "read.")
+ case strings.HasPrefix(name, "rebuild."):
+ name = strings.TrimPrefix(name, "rebuild.")
+ }
}
+ case strings.HasPrefix(name, "util.rebuilt-forrest."):
+ name = strings.TrimPrefix(name, "util.rebuilt-forrest.")
+ case strings.HasPrefix(name, "util.rebuilt-tree."):
+ name = strings.TrimPrefix(name, "util.rebuilt-tree.")
}
- case strings.HasPrefix(name, "btrfs."):
- name = strings.TrimPrefix(name, "btrfs.")
}
fmt.Fprintf(w, " %s=%s", name, valStr)