summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go46
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go59
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go15
3 files changed, 67 insertions, 53 deletions
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go
index 916dc53..d92495e 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go
@@ -25,24 +25,66 @@ func (ptr ItemPtr) String() string {
return fmt.Sprintf("node@%v[%v]", ptr.Node, ptr.Idx)
}
+type SizeAndErr struct {
+ Size uint64
+ Err error
+}
+
type Handle struct {
rawFile diskio.File[btrfsvol.LogicalAddr]
sb btrfstree.Superblock
graph graph.Graph
+ Sizes map[ItemPtr]SizeAndErr
+
cache *containers.LRUCache[btrfsvol.LogicalAddr, *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]]
}
-func NewHandle(file diskio.File[btrfsvol.LogicalAddr], sb btrfstree.Superblock, graph graph.Graph) *Handle {
+func NewHandle(file diskio.File[btrfsvol.LogicalAddr], sb btrfstree.Superblock) *Handle {
return &Handle{
rawFile: file,
sb: sb,
- graph: graph,
+
+ Sizes: make(map[ItemPtr]SizeAndErr),
cache: containers.NewLRUCache[btrfsvol.LogicalAddr, *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]](8),
}
}
+func (o *Handle) InsertNode(nodeRef *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]) {
+ for i, item := range nodeRef.Data.BodyLeaf {
+ ptr := ItemPtr{
+ Node: nodeRef.Addr,
+ Idx: i,
+ }
+ switch itemBody := item.Body.(type) {
+ case btrfsitem.ExtentCSum:
+ o.Sizes[ptr] = SizeAndErr{
+ Size: uint64(itemBody.Size()),
+ Err: nil,
+ }
+ case btrfsitem.FileExtent:
+ size, err := itemBody.Size()
+ o.Sizes[ptr] = SizeAndErr{
+ Size: uint64(size),
+ Err: err,
+ }
+ case btrfsitem.Error:
+ switch item.Key.ItemType {
+ case btrfsprim.EXTENT_CSUM_KEY, btrfsprim.EXTENT_DATA_KEY:
+ o.Sizes[ptr] = SizeAndErr{
+ Err: fmt.Errorf("error decoding item: ptr=%v (tree=%v key=%v): %w",
+ ptr, nodeRef.Data.Head.Owner, item.Key, itemBody.Err),
+ }
+ }
+ }
+ }
+}
+
+func (o *Handle) SetGraph(graph graph.Graph) {
+ o.graph = graph
+}
+
func (o *Handle) readNode(laddr btrfsvol.LogicalAddr) *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node] {
if cached, ok := o.cache.Get(laddr); ok {
return cached
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go
index 5badc33..66eaf1a 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go
@@ -51,7 +51,7 @@ type rebuilder struct {
}
func RebuildNodes(ctx context.Context, fs *btrfs.FS, nodeScanResults btrfsinspect.ScanDevicesResult) (map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr], error) {
- nodeGraph, err := ScanDevices(ctx, fs, nodeScanResults) // ScanDevices does its own logging
+ nodeGraph, keyIO, err := ScanDevices(ctx, fs, nodeScanResults) // ScanDevices does its own logging
if err != nil {
return nil, err
}
@@ -63,7 +63,6 @@ func RebuildNodes(ctx context.Context, fs *btrfs.FS, nodeScanResults btrfsinspec
}
dlog.Info(ctx, "Rebuilding node tree...")
- keyIO := keyio.NewHandle(fs, *sb, nodeGraph)
o := &rebuilder{
sb: *sb,
@@ -499,7 +498,6 @@ func (o *rebuilder) wantFunc(ctx context.Context, treeID btrfsprim.ObjID, objID
func (o *rebuilder) _wantRange(
ctx context.Context,
treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType,
- sizeFn func(btrfsprim.Key) (uint64, error),
beg, end uint64,
) {
if !o.rebuilt.AddTree(ctx, treeID) {
@@ -507,6 +505,18 @@ func (o *rebuilder) _wantRange(
return
}
+ sizeFn := func(key btrfsprim.Key) (uint64, error) {
+ ptr, ok := o.rebuilt.Keys(treeID).Load(key)
+ if !ok {
+ panic(fmt.Errorf("should not happen: could not load key: %v", key))
+ }
+ sizeAndErr, ok := o.keyIO.Sizes[ptr]
+ if !ok {
+ panic(fmt.Errorf("should not happen: %v item did not have a size recorded", typ))
+ }
+ return sizeAndErr.Size, sizeAndErr.Err
+ }
+
// Step 1: Build a listing of the runs that we do have.
runMin := btrfsprim.Key{
ObjectID: objID,
@@ -661,54 +671,11 @@ func (o *rebuilder) _wantRange(
func (o *rebuilder) wantCSum(ctx context.Context, beg, end btrfsvol.LogicalAddr) {
const treeID = btrfsprim.CSUM_TREE_OBJECTID
o._wantRange(ctx, treeID, btrfsprim.EXTENT_CSUM_OBJECTID, btrfsprim.EXTENT_CSUM_KEY,
- func(key btrfsprim.Key) (uint64, error) {
- ptr, ok := o.rebuilt.Keys(treeID).Load(key)
- if !ok {
- panic(fmt.Errorf("should not happen: could not load key: %v", key))
- }
- body, ok := o.keyIO.ReadItem(ptr)
- if !ok {
- panic(fmt.Errorf("should not happen: could not read item: %v", key))
- }
- switch body := body.(type) {
- case btrfsitem.ExtentCSum:
- return uint64(body.Size()), nil
- case btrfsitem.Error:
- return 0, fmt.Errorf("error decoding item: tree=%v key=%v: %w", treeID, key, body.Err)
- default:
- // This is a panic because the item decoder should not emit EXTENT_CSUM
- // items as anything but btrfsitem.ExtentCSum or btrfsitem.Error without
- // this code also being updated.
- panic(fmt.Errorf("should not happen: EXTENT_CSUM item has unexpected type: %T", body))
- }
- },
uint64(beg), uint64(end))
}
// wantFileExt implements rebuildCallbacks.
func (o *rebuilder) wantFileExt(ctx context.Context, treeID btrfsprim.ObjID, ino btrfsprim.ObjID, size int64) {
o._wantRange(ctx, treeID, ino, btrfsprim.EXTENT_DATA_KEY,
- func(key btrfsprim.Key) (uint64, error) {
- ptr, ok := o.rebuilt.Keys(treeID).Load(key)
- if !ok {
- panic(fmt.Errorf("should not happen: could not load key: %v", key))
- }
- body, ok := o.keyIO.ReadItem(ptr)
- if !ok {
- panic(fmt.Errorf("should not happen: could not read item: %v", key))
- }
- switch body := body.(type) {
- case btrfsitem.FileExtent:
- size, err := body.Size()
- return uint64(size), err
- case btrfsitem.Error:
- return 0, fmt.Errorf("error decoding item: tree=%v key=%v: %w", treeID, key, body.Err)
- default:
- // This is a panic because the item decoder should not emit EXTENT_DATA
- // items as anything but btrfsitem.FileExtent or btrfsitem.Error without
- // this code also being updated.
- panic(fmt.Errorf("should not happen: EXTENT_DATA item has unexpected type: %T", body))
- }
- },
0, uint64(size))
}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go
index bcf2f5b..5c2d0fd 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go
@@ -16,6 +16,7 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio"
"git.lukeshu.com/btrfs-progs-ng/lib/containers"
"git.lukeshu.com/btrfs-progs-ng/lib/textui"
)
@@ -30,12 +31,12 @@ func (s scanStats) String() string {
s.N, s.D)
}
-func ScanDevices(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect.ScanDevicesResult) (graph.Graph, error) {
+func ScanDevices(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect.ScanDevicesResult) (graph.Graph, *keyio.Handle, error) {
dlog.Infof(ctx, "Reading node data from FS...")
sb, err := fs.Superblock()
if err != nil {
- return graph.Graph{}, err
+ return graph.Graph{}, nil, err
}
total := countNodes(scanResults)
@@ -46,6 +47,7 @@ func ScanDevices(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect.Sca
}
nodeGraph := graph.New(*sb)
+ keyIO := keyio.NewHandle(fs, *sb)
progress(done, total)
for _, devResults := range scanResults {
@@ -54,10 +56,11 @@ func ScanDevices(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect.Sca
LAddr: containers.Optional[btrfsvol.LogicalAddr]{OK: true, Val: laddr},
})
if err != nil {
- return graph.Graph{}, err
+ return graph.Graph{}, nil, err
}
nodeGraph.InsertNode(nodeRef)
+ keyIO.InsertNode(nodeRef)
done++
progress(done, total)
@@ -72,10 +75,12 @@ func ScanDevices(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect.Sca
progressWriter = textui.NewProgress[scanStats](ctx, dlog.LogLevelInfo, 1*time.Second)
dlog.Infof(ctx, "Checking keypointers for dead-ends...")
if err := nodeGraph.FinalCheck(fs, *sb, progress); err != nil {
- return graph.Graph{}, err
+ return graph.Graph{}, nil, err
}
progressWriter.Done()
dlog.Info(ctx, "... done checking keypointers")
- return *nodeGraph, nil
+ keyIO.SetGraph(*nodeGraph)
+
+ return *nodeGraph, keyIO, nil
}