summaryrefslogtreecommitdiff
path: root/cmd/btrfs-rec/inspect/rebuildtrees
diff options
context:
space:
mode:
Diffstat (limited to 'cmd/btrfs-rec/inspect/rebuildtrees')
-rw-r--r--cmd/btrfs-rec/inspect/rebuildtrees/rebuild.go583
-rw-r--r--cmd/btrfs-rec/inspect/rebuildtrees/rebuild_treecb.go86
-rw-r--r--cmd/btrfs-rec/inspect/rebuildtrees/rebuild_wantcb.go400
-rw-r--r--cmd/btrfs-rec/inspect/rebuildtrees/rebuild_wanttyp.go107
-rw-r--r--cmd/btrfs-rec/inspect/rebuildtrees/scan.go77
-rw-r--r--cmd/btrfs-rec/inspect/rebuildtrees/util.go31
6 files changed, 1284 insertions, 0 deletions
diff --git a/cmd/btrfs-rec/inspect/rebuildtrees/rebuild.go b/cmd/btrfs-rec/inspect/rebuildtrees/rebuild.go
new file mode 100644
index 0000000..cf334a0
--- /dev/null
+++ b/cmd/btrfs-rec/inspect/rebuildtrees/rebuild.go
@@ -0,0 +1,583 @@
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
+//
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+package rebuildnodes
+
+import (
+ "context"
+ "fmt"
+ "runtime"
+ "sort"
+ "time"
+
+ "github.com/datawire/dlib/dgroup"
+ "github.com/datawire/dlib/dlog"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio"
+ "git.lukeshu.com/btrfs-progs-ng/lib/containers"
+ "git.lukeshu.com/btrfs-progs-ng/lib/maps"
+ "git.lukeshu.com/btrfs-progs-ng/lib/textui"
+)
+
+type keyAndTree struct {
+ btrfsprim.Key
+ TreeID btrfsprim.ObjID
+}
+
+func (a keyAndTree) Compare(b keyAndTree) int {
+ if d := containers.NativeCompare(a.TreeID, b.TreeID); d != 0 {
+ return d
+ }
+ return a.Key.Compare(b.Key)
+}
+
+func (o keyAndTree) String() string {
+ return fmt.Sprintf("tree=%v key=%v", o.TreeID, o.Key)
+}
+
+type rebuilder struct {
+ sb btrfstree.Superblock
+ graph graph.Graph
+ keyIO *keyio.Handle
+
+ rebuilt *btrees.RebuiltForrest
+
+ curKey struct {
+ TreeID btrfsprim.ObjID
+ Key containers.Optional[btrfsprim.Key]
+ }
+ treeQueue containers.Set[btrfsprim.ObjID]
+ retryItemQueue map[btrfsprim.ObjID]containers.Set[keyAndTree]
+ addedItemQueue containers.Set[keyAndTree]
+ settledItemQueue containers.Set[keyAndTree]
+ augmentQueue map[btrfsprim.ObjID]*treeAugmentQueue
+ numAugments int
+ numAugmentFailures int
+}
+
+type treeAugmentQueue struct {
+ zero map[Want]struct{}
+ single map[Want]btrfsvol.LogicalAddr
+ multi map[Want]containers.Set[btrfsvol.LogicalAddr]
+}
+
+type Rebuilder interface {
+ Rebuild(context.Context) error
+ ListRoots(context.Context) map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr]
+}
+
+func NewRebuilder(ctx context.Context, fs *btrfs.FS, nodeScanResults btrfsinspect.ScanDevicesResult) (Rebuilder, error) {
+ ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.step", "read-fs-data")
+ sb, nodeGraph, keyIO, err := ScanDevices(ctx, fs, nodeScanResults) // ScanDevices does its own logging
+ if err != nil {
+ return nil, err
+ }
+
+ o := &rebuilder{
+ sb: sb,
+ graph: nodeGraph,
+ keyIO: keyIO,
+ }
+ o.rebuilt = btrees.NewRebuiltForrest(sb, nodeGraph, keyIO, o)
+ return o, nil
+}
+
+func (o *rebuilder) ListRoots(ctx context.Context) map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr] {
+ return o.rebuilt.ListRoots(ctx)
+}
+
+func (o *rebuilder) Rebuild(ctx context.Context) error {
+ ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.step", "rebuild")
+
+ // Initialize
+ o.retryItemQueue = make(map[btrfsprim.ObjID]containers.Set[keyAndTree])
+ o.addedItemQueue = make(containers.Set[keyAndTree])
+ o.settledItemQueue = make(containers.Set[keyAndTree])
+ o.augmentQueue = make(map[btrfsprim.ObjID]*treeAugmentQueue)
+
+ // Seed the queue
+ o.treeQueue = containers.NewSet[btrfsprim.ObjID](
+ btrfsprim.ROOT_TREE_OBJECTID,
+ btrfsprim.CHUNK_TREE_OBJECTID,
+ // btrfsprim.TREE_LOG_OBJECTID, // TODO(lukeshu): Special LOG_TREE handling
+ btrfsprim.BLOCK_GROUP_TREE_OBJECTID,
+ )
+
+ // Run
+ for passNum := 0; len(o.treeQueue) > 0 || len(o.addedItemQueue) > 0 || len(o.settledItemQueue) > 0 || len(o.augmentQueue) > 0; passNum++ {
+ ctx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.pass", passNum)
+
+ // Crawl trees (Drain o.treeQueue, fill o.addedItemQueue).
+ if err := o.processTreeQueue(ctx); err != nil {
+ return err
+ }
+ runtime.GC()
+
+ if len(o.addedItemQueue) > 0 {
+ // Settle items (drain o.addedItemQueue, fill o.augmentQueue and o.settledItemQueue).
+ if err := o.processAddedItemQueue(ctx); err != nil {
+ return err
+ }
+ } else {
+ // Process items (drain o.settledItemQueue, fill o.augmentQueue and o.treeQueue).
+ if err := o.processSettledItemQueue(ctx); err != nil {
+ return err
+ }
+ }
+ runtime.GC()
+
+ // Apply augments (drain o.augmentQueue (and maybe o.retryItemQueue), fill o.addedItemQueue).
+ if err := o.processAugmentQueue(ctx); err != nil {
+ return err
+ }
+ runtime.GC()
+ }
+
+ return nil
+}
+
+// processTreeQueue drains o.treeQueue, filling o.addedItemQueue.
+func (o *rebuilder) processTreeQueue(ctx context.Context) error {
+ ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep", "collect-items")
+
+ queue := maps.SortedKeys(o.treeQueue)
+ o.treeQueue = make(containers.Set[btrfsprim.ObjID])
+
+ // Because trees can be wildly different sizes, it's impossible to have a meaningful
+ // progress percentage here.
+ o.curKey.Key.OK = false
+ for _, o.curKey.TreeID = range queue {
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+ // This will call o.AddedItem as nescessary, which
+ // inserts to o.addedItemQueue.
+ _ = o.rebuilt.Tree(ctx, o.curKey.TreeID)
+ }
+
+ return nil
+}
+
+type settleItemStats struct {
+ textui.Portion[int]
+ NumAugments int
+ NumAugmentTrees int
+}
+
+func (s settleItemStats) String() string {
+ // return textui.Sprintf("%v (queued %v augments across %v trees)",
+ return textui.Sprintf("%v (aug:%v trees:%v)",
+ s.Portion, s.NumAugments, s.NumAugmentTrees)
+}
+
+// processAddedItemQueue drains o.addedItemQueue, filling o.augmentQueue and o.settledItemQueue.
+func (o *rebuilder) processAddedItemQueue(ctx context.Context) error {
+ ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep", "settle-items")
+
+ queue := maps.Keys(o.addedItemQueue)
+ o.addedItemQueue = make(containers.Set[keyAndTree])
+ sort.Slice(queue, func(i, j int) bool {
+ return queue[i].Compare(queue[j]) < 0
+ })
+
+ var progress settleItemStats
+ progress.D = len(queue)
+ progressWriter := textui.NewProgress[settleItemStats](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second))
+ ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep.progress", &progress)
+
+ for i, key := range queue {
+ progress.N = i
+ progressWriter.Set(progress)
+
+ ctx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.settle.item", key)
+ tree := o.rebuilt.Tree(ctx, key.TreeID)
+ incPtr, ok := tree.Items(ctx).Load(key.Key)
+ if !ok {
+ panic(fmt.Errorf("should not happen: failed to load already-added item: %v", key))
+ }
+ excPtr, ok := tree.PotentialItems(ctx).Load(key.Key)
+ if ok && tree.ShouldReplace(incPtr.Node, excPtr.Node) {
+ wantKey := WantWithTree{
+ TreeID: key.TreeID,
+ Key: wantFromKey(key.Key),
+ }
+ o.wantAugment(ctx, wantKey, tree.LeafToRoots(ctx, excPtr.Node))
+ progress.NumAugments = o.numAugments
+ progress.NumAugmentTrees = len(o.augmentQueue)
+ progressWriter.Set(progress)
+ } else if !handleWouldBeNoOp(key.ItemType) {
+ o.settledItemQueue.Insert(key)
+ }
+ }
+
+ progress.N = len(queue)
+ progressWriter.Set(progress)
+ progressWriter.Done()
+ return nil
+}
+
+type processItemStats struct {
+ textui.Portion[int]
+ NumAugments int
+ NumFailures int
+ NumAugmentTrees int
+}
+
+func (s processItemStats) String() string {
+ // return textui.Sprintf("%v (queued %v augments and %v failures across %v trees)",
+ return textui.Sprintf("%v (aug:%v fail:%v trees:%v)",
+ s.Portion, s.NumAugments, s.NumFailures, s.NumAugmentTrees)
+}
+
+// processSettledItemQueue drains o.settledItemQueue, filling o.augmentQueue and o.treeQueue.
+func (o *rebuilder) processSettledItemQueue(ctx context.Context) error {
+ ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep", "process-items")
+
+ queue := maps.Keys(o.settledItemQueue)
+ o.settledItemQueue = make(containers.Set[keyAndTree])
+ sort.Slice(queue, func(i, j int) bool {
+ return queue[i].Compare(queue[j]) < 0
+ })
+
+ var progress processItemStats
+ progress.D = len(queue)
+ progressWriter := textui.NewProgress[processItemStats](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second))
+ ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep.progress", &progress)
+
+ type keyAndBody struct {
+ keyAndTree
+ Body btrfsitem.Item
+ }
+ itemChan := make(chan keyAndBody, textui.Tunable(300)) // average items-per-node≈100; let's have a buffer of ~3 nodes
+ grp := dgroup.NewGroup(ctx, dgroup.GroupConfig{})
+ grp.Go("io", func(ctx context.Context) error {
+ defer close(itemChan)
+ for _, key := range queue {
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+ ctx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.process.item", key)
+ item := keyAndBody{
+ keyAndTree: key,
+ Body: o.rebuilt.Tree(ctx, key.TreeID).ReadItem(ctx, key.Key),
+ }
+ select {
+ case itemChan <- item:
+ case <-ctx.Done():
+ }
+ }
+ return nil
+ })
+ grp.Go("cpu", func(ctx context.Context) error {
+ defer progressWriter.Done()
+ o.curKey.Key.OK = true
+ for item := range itemChan {
+ ctx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.process.item", item.keyAndTree)
+ o.curKey.TreeID = item.TreeID
+ o.curKey.Key.Val = item.Key
+ handleItem(o, ctx, item.TreeID, btrfstree.Item{
+ Key: item.Key,
+ Body: item.Body,
+ })
+ item.Body.Free()
+ if item.ItemType == btrfsitem.ROOT_ITEM_KEY {
+ o.treeQueue.Insert(item.ObjectID)
+ }
+ progress.N++
+ progress.NumAugments = o.numAugments
+ progress.NumFailures = o.numAugmentFailures
+ progress.NumAugmentTrees = len(o.augmentQueue)
+ progressWriter.Set(progress)
+ }
+ return nil
+ })
+ return grp.Wait()
+}
+
+// processAugmentQueue drains o.augmentQueue (and maybe o.retryItemQueue), filling o.addedItemQueue.
+func (o *rebuilder) processAugmentQueue(ctx context.Context) error {
+ ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep", "apply-augments")
+
+ resolvedAugments := make(map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr], len(o.augmentQueue))
+ var progress textui.Portion[int]
+ for _, treeID := range maps.SortedKeys(o.augmentQueue) {
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+ ctx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.augment.tree", treeID)
+ resolvedAugments[treeID] = o.resolveTreeAugments(ctx, treeID)
+ progress.D += len(resolvedAugments[treeID])
+ }
+ o.augmentQueue = make(map[btrfsprim.ObjID]*treeAugmentQueue)
+ o.numAugments = 0
+ o.numAugmentFailures = 0
+ runtime.GC()
+
+ progressWriter := textui.NewProgress[textui.Portion[int]](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second))
+ ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep.progress", &progress)
+ for _, treeID := range maps.SortedKeys(resolvedAugments) {
+ ctx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.augment.tree", treeID)
+ for _, nodeAddr := range maps.SortedKeys(resolvedAugments[treeID]) {
+ if err := ctx.Err(); err != nil {
+ progressWriter.Set(progress)
+ progressWriter.Done()
+ return err
+ }
+ progressWriter.Set(progress)
+ // This will call o.AddedItem as nescessary, which
+ // inserts to o.addedItemQueue.
+ o.rebuilt.Tree(ctx, treeID).AddRoot(ctx, nodeAddr)
+ progress.N++
+ }
+ }
+ progressWriter.Set(progress)
+ progressWriter.Done()
+
+ return nil
+}
+
+func (o *rebuilder) enqueueRetry(ifTreeID btrfsprim.ObjID) {
+ if o.curKey.Key.OK {
+ if o.retryItemQueue[ifTreeID] == nil {
+ o.retryItemQueue[ifTreeID] = make(containers.Set[keyAndTree])
+ }
+ o.retryItemQueue[ifTreeID].Insert(keyAndTree{
+ TreeID: o.curKey.TreeID,
+ Key: o.curKey.Key.Val,
+ })
+ } else {
+ o.treeQueue.Insert(o.curKey.TreeID)
+ }
+}
+
+func (o *rebuilder) resolveTreeAugments(ctx context.Context, treeID btrfsprim.ObjID) containers.Set[btrfsvol.LogicalAddr] {
+ // Define an algorithm that takes several lists of items, and returns a
+ // set of those items such that each input list contains zero or one of
+ // the items from your return set. The same item may appear in multiple
+ // of the input lists.
+
+ type ChoiceInfo struct {
+ Count int
+ Distance int
+ Generation btrfsprim.Generation
+ }
+ choices := make(map[btrfsvol.LogicalAddr]ChoiceInfo)
+ // o.augmentQueue[treeID].zero is optimized storage for lists
+ // with zero items. Go ahead and free that memory up.
+ o.augmentQueue[treeID].zero = nil
+ // o.augmentQueue[treeID].single is optimized storage for
+ // lists with exactly 1 item.
+ for _, choice := range o.augmentQueue[treeID].single {
+ if old, ok := choices[choice]; ok {
+ old.Count++
+ choices[choice] = old
+ } else {
+ choices[choice] = ChoiceInfo{
+ Count: 1,
+ Distance: discardOK(o.rebuilt.Tree(ctx, treeID).COWDistance(o.graph.Nodes[choice].Owner)),
+ Generation: o.graph.Nodes[choice].Generation,
+ }
+ }
+ }
+ // o.augmentQueue[treeID].multi is the main list storage.
+ for _, list := range o.augmentQueue[treeID].multi {
+ for choice := range list {
+ if old, ok := choices[choice]; ok {
+ old.Count++
+ choices[choice] = old
+ } else {
+ choices[choice] = ChoiceInfo{
+ Count: 1,
+ Distance: discardOK(o.rebuilt.Tree(ctx, treeID).COWDistance(o.graph.Nodes[choice].Owner)),
+ Generation: o.graph.Nodes[choice].Generation,
+ }
+ }
+ }
+ }
+
+ // > Example 1: Given the input lists
+ // >
+ // > 0: [A, B]
+ // > 2: [A, C]
+ // >
+ // > legal solutions would be `[]`, `[A]`, `[B]`, `[C]`, or `[B, C]`. It
+ // > would not be legal to return `[A, B]` or `[A, C]`.
+ //
+ // > Example 2: Given the input lists
+ // >
+ // > 1: [A, B]
+ // > 2: [A]
+ // > 3: [B]
+ // >
+ // > legal solution would be `[]`, `[A]` or `[B]`. It would not be legal
+ // > to return `[A, B]`.
+ //
+ // The algorithm should optimize for the following goals:
+ //
+ // - We prefer that each input list have an item in the return set.
+ //
+ // > In Example 1, while `[]`, `[B]`, and `[C]` are permissible
+ // > solutions, they are not optimal, because one or both of the input
+ // > lists are not represented.
+ // >
+ // > It may be the case that it is not possible to represent all lists
+ // > in the result; in Example 2, either list 2 or list 3 must be
+ // > unrepresented.
+ //
+ // - Each item has a non-negative scalar "distance" score, we prefer
+ // lower distances. Distance scores are comparable; 0 is preferred,
+ // and a distance of 4 is twice as bad as a distance of 2.
+ //
+ // - Each item has a "generation" score, we prefer higher generations.
+ // Generation scores should not be treated as a linear scale; the
+ // magnitude of deltas is meaningless; only the sign of a delta is
+ // meaningful.
+ //
+ // > So it would be wrong to say something like
+ // >
+ // > desirability = (-a*distance) + (b*generation) // for some constants `a` and `b`
+ // >
+ // > because `generation` can't be used that way
+ //
+ // - We prefer items that appear in more lists over items that appear in
+ // fewer lists.
+ //
+ // The relative priority of these 4 goals is undefined; preferably the
+ // algorithm should be defined in a way that makes it easy to adjust the
+ // relative priorities.
+
+ ret := make(containers.Set[btrfsvol.LogicalAddr])
+ illegal := make(containers.Set[btrfsvol.LogicalAddr]) // cannot-be-accepted and already-accepted
+ accept := func(item btrfsvol.LogicalAddr) {
+ ret.Insert(item)
+ for _, list := range o.augmentQueue[treeID].multi {
+ if list.Has(item) {
+ illegal.InsertFrom(list)
+ }
+ }
+ }
+
+ sortedItems := maps.Keys(choices)
+ sort.Slice(sortedItems, func(i, j int) bool {
+ iItem, jItem := sortedItems[i], sortedItems[j]
+ if choices[iItem].Count != choices[jItem].Count {
+ return choices[iItem].Count > choices[jItem].Count // reverse this check; higher counts should sort lower
+ }
+ if choices[iItem].Distance != choices[jItem].Distance {
+ return choices[iItem].Distance < choices[jItem].Distance
+ }
+ if choices[iItem].Generation != choices[jItem].Generation {
+ return choices[iItem].Generation > choices[jItem].Generation // reverse this check; higher generations should sort lower
+ }
+ return iItem < jItem // laddr is as good a tiebreaker as anything
+ })
+ for _, item := range sortedItems {
+ if !illegal.Has(item) {
+ accept(item)
+ }
+ }
+
+ // Log our result
+ wantKeys := append(
+ maps.Keys(o.augmentQueue[treeID].single),
+ maps.Keys(o.augmentQueue[treeID].multi)...)
+ sort.Slice(wantKeys, func(i, j int) bool {
+ return wantKeys[i].Compare(wantKeys[j]) < 0
+ })
+ for _, wantKey := range wantKeys {
+ list, ok := o.augmentQueue[treeID].multi[wantKey]
+ if !ok {
+ list = containers.NewSet[btrfsvol.LogicalAddr](o.augmentQueue[treeID].single[wantKey])
+ }
+ chose := list.Intersection(ret)
+ switch {
+ case len(chose) == 0:
+ dlog.Infof(ctx, "lists[%q]: chose (none) from %v", wantKey, maps.SortedKeys(list))
+ case len(list) > 1:
+ dlog.Infof(ctx, "lists[%q]: chose %v from %v", wantKey, chose.TakeOne(), maps.SortedKeys(list))
+ default:
+ dlog.Debugf(ctx, "lists[%q]: chose %v from %v", wantKey, chose.TakeOne(), maps.SortedKeys(list))
+ }
+ }
+
+ // Free some memory
+ o.augmentQueue[treeID].single = nil
+ o.augmentQueue[treeID].multi = nil
+
+ return ret
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+func (queue *treeAugmentQueue) has(wantKey Want) bool {
+ if queue != nil {
+ if queue.zero != nil {
+ if _, ok := queue.zero[wantKey]; ok {
+ return true
+ }
+ }
+ if queue.single != nil {
+ if _, ok := queue.single[wantKey]; ok {
+ return true
+ }
+ }
+ if queue.multi != nil {
+ if _, ok := queue.multi[wantKey]; ok {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+func (queue *treeAugmentQueue) store(wantKey Want, choices containers.Set[btrfsvol.LogicalAddr]) {
+ if len(choices) == 0 && wantKey.OffsetType > offsetExact {
+ // This wantKey is unlikely to come up again, so it's
+ // not worth the RAM of storing a negative result.
+ return
+ }
+ switch len(choices) {
+ case 0:
+ if queue.zero == nil {
+ queue.zero = make(map[Want]struct{})
+ }
+ queue.zero[wantKey] = struct{}{}
+ case 1:
+ if queue.single == nil {
+ queue.single = make(map[Want]btrfsvol.LogicalAddr)
+ }
+ queue.single[wantKey] = choices.TakeOne()
+ default:
+ if queue.multi == nil {
+ queue.multi = make(map[Want]containers.Set[btrfsvol.LogicalAddr])
+ }
+ queue.multi[wantKey] = choices
+ }
+}
+
+func (o *rebuilder) hasAugment(wantKey WantWithTree) bool {
+ return o.augmentQueue[wantKey.TreeID].has(wantKey.Key)
+}
+
+func (o *rebuilder) wantAugment(ctx context.Context, wantKey WantWithTree, choices containers.Set[btrfsvol.LogicalAddr]) {
+ if o.augmentQueue[wantKey.TreeID] == nil {
+ o.augmentQueue[wantKey.TreeID] = new(treeAugmentQueue)
+ }
+ o.augmentQueue[wantKey.TreeID].store(wantKey.Key, choices)
+ if len(choices) == 0 {
+ o.numAugmentFailures++
+ dlog.Debug(ctx, "ERR: could not find wanted item")
+ } else {
+ o.numAugments++
+ dlog.Debugf(ctx, "choices=%v", maps.SortedKeys(choices))
+ }
+}
diff --git a/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_treecb.go b/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_treecb.go
new file mode 100644
index 0000000..492436b
--- /dev/null
+++ b/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_treecb.go
@@ -0,0 +1,86 @@
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
+//
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+package rebuildnodes
+
+import (
+ "context"
+ "fmt"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+)
+
+// AddedItem implements btrees.Callbacks.
+func (o *rebuilder) AddedItem(ctx context.Context, tree btrfsprim.ObjID, key btrfsprim.Key) {
+ o.addedItemQueue.Insert(keyAndTree{
+ TreeID: tree,
+ Key: key,
+ })
+}
+
+// AddedRoot implements btrees.Callbacks.
+func (o *rebuilder) AddedRoot(ctx context.Context, tree btrfsprim.ObjID, root btrfsvol.LogicalAddr) {
+ if retries := o.retryItemQueue[tree]; retries != nil {
+ o.addedItemQueue.InsertFrom(retries)
+ }
+}
+
+// LookupRoot implements btrees.Callbacks.
+func (o *rebuilder) LookupRoot(ctx context.Context, tree btrfsprim.ObjID) (offset btrfsprim.Generation, item btrfsitem.Root, ok bool) {
+ wantKey := WantWithTree{
+ TreeID: btrfsprim.ROOT_TREE_OBJECTID,
+ Key: Want{
+ ObjectID: tree,
+ ItemType: btrfsitem.ROOT_ITEM_KEY,
+ OffsetType: offsetAny,
+ },
+ }
+ ctx = withWant(ctx, logFieldTreeWant, "tree Root", wantKey)
+ foundKey, ok := o._want(ctx, wantKey)
+ if !ok {
+ o.enqueueRetry(btrfsprim.ROOT_TREE_OBJECTID)
+ return 0, btrfsitem.Root{}, false
+ }
+ itemBody := o.rebuilt.Tree(ctx, wantKey.TreeID).ReadItem(ctx, foundKey)
+ defer itemBody.Free()
+ switch itemBody := itemBody.(type) {
+ case *btrfsitem.Root:
+ return btrfsprim.Generation(foundKey.Offset), *itemBody, true
+ case *btrfsitem.Error:
+ o.fsErr(ctx, fmt.Errorf("error decoding item: %v: %w", foundKey, itemBody.Err))
+ return 0, btrfsitem.Root{}, false
+ default:
+ // This is a panic because the item decoder should not emit ROOT_ITEM items as anything but
+ // btrfsitem.Root or btrfsitem.Error without this code also being updated.
+ panic(fmt.Errorf("should not happen: ROOT_ITEM item has unexpected type: %T", itemBody))
+ }
+}
+
+// LookupUUID implements btrees.Callbacks.
+func (o *rebuilder) LookupUUID(ctx context.Context, uuid btrfsprim.UUID) (id btrfsprim.ObjID, ok bool) {
+ wantKey := WantWithTree{
+ TreeID: btrfsprim.UUID_TREE_OBJECTID,
+ Key: wantFromKey(btrfsitem.UUIDToKey(uuid)),
+ }
+ ctx = withWant(ctx, logFieldTreeWant, "resolve parent UUID", wantKey)
+ if !o._wantOff(ctx, wantKey) {
+ o.enqueueRetry(btrfsprim.UUID_TREE_OBJECTID)
+ return 0, false
+ }
+ itemBody := o.rebuilt.Tree(ctx, wantKey.TreeID).ReadItem(ctx, wantKey.Key.Key())
+ defer itemBody.Free()
+ switch itemBody := itemBody.(type) {
+ case *btrfsitem.UUIDMap:
+ return itemBody.ObjID, true
+ case *btrfsitem.Error:
+ o.fsErr(ctx, fmt.Errorf("error decoding item: %v: %w", wantKey, itemBody.Err))
+ return 0, false
+ default:
+ // This is a panic because the item decoder should not emit UUID_SUBVOL items as anything but
+ // btrfsitem.UUIDMap or btrfsitem.Error without this code also being updated.
+ panic(fmt.Errorf("should not happen: UUID_SUBVOL item has unexpected type: %T", itemBody))
+ }
+}
diff --git a/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_wantcb.go b/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_wantcb.go
new file mode 100644
index 0000000..adf3cff
--- /dev/null
+++ b/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_wantcb.go
@@ -0,0 +1,400 @@
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
+//
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+package rebuildnodes
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+
+ "github.com/datawire/dlib/dlog"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio"
+ "git.lukeshu.com/btrfs-progs-ng/lib/containers"
+)
+
+// fsErr implements rebuildCallbacks.
+func (o *rebuilder) fsErr(ctx context.Context, e error) {
+ dlog.Errorf(ctx, "filesystem error: %v", e)
+}
+
+// want implements rebuildCallbacks.
+func (o *rebuilder) want(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType) {
+ wantKey := WantWithTree{
+ TreeID: treeID,
+ Key: Want{
+ ObjectID: objID,
+ ItemType: typ,
+ OffsetType: offsetAny,
+ },
+ }
+ ctx = withWant(ctx, logFieldItemWant, reason, wantKey)
+ o._want(ctx, wantKey)
+}
+
+func (o *rebuilder) _want(ctx context.Context, wantKey WantWithTree) (key btrfsprim.Key, ok bool) {
+ if o.rebuilt.Tree(ctx, wantKey.TreeID) == nil {
+ o.enqueueRetry(wantKey.TreeID)
+ return btrfsprim.Key{}, false
+ }
+
+ // check if we already have it
+
+ tgt := wantKey.Key.Key()
+ if key, _, ok := o.rebuilt.Tree(ctx, wantKey.TreeID).Items(ctx).Search(func(key btrfsprim.Key, _ keyio.ItemPtr) int {
+ key.Offset = 0
+ return tgt.Compare(key)
+ }); ok {
+ return key, true
+ }
+
+ // OK, we need to insert it
+
+ if o.hasAugment(wantKey) {
+ return btrfsprim.Key{}, false
+ }
+ wants := make(containers.Set[btrfsvol.LogicalAddr])
+ o.rebuilt.Tree(ctx, wantKey.TreeID).PotentialItems(ctx).Subrange(
+ func(k btrfsprim.Key, _ keyio.ItemPtr) int {
+ k.Offset = 0
+ return tgt.Compare(k)
+ },
+ func(_ btrfsprim.Key, v keyio.ItemPtr) bool {
+ wants.InsertFrom(o.rebuilt.Tree(ctx, wantKey.TreeID).LeafToRoots(ctx, v.Node))
+ return true
+ })
+ o.wantAugment(ctx, wantKey, wants)
+ return btrfsprim.Key{}, false
+}
+
+// wantOff implements rebuildCallbacks.
+func (o *rebuilder) wantOff(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, off uint64) {
+ wantKey := WantWithTree{
+ TreeID: treeID,
+ Key: Want{
+ ObjectID: objID,
+ ItemType: typ,
+ OffsetType: offsetExact,
+ OffsetLow: off,
+ },
+ }
+ ctx = withWant(ctx, logFieldItemWant, reason, wantKey)
+ o._wantOff(ctx, wantKey)
+}
+
+func (o *rebuilder) _wantOff(ctx context.Context, wantKey WantWithTree) (ok bool) {
+ if o.rebuilt.Tree(ctx, wantKey.TreeID) == nil {
+ o.enqueueRetry(wantKey.TreeID)
+ return false
+ }
+
+ // check if we already have it
+
+ tgt := wantKey.Key.Key()
+ if _, ok := o.rebuilt.Tree(ctx, wantKey.TreeID).Items(ctx).Load(tgt); ok {
+ return true
+ }
+
+ // OK, we need to insert it
+
+ if o.hasAugment(wantKey) {
+ return false
+ }
+ wants := make(containers.Set[btrfsvol.LogicalAddr])
+ o.rebuilt.Tree(ctx, wantKey.TreeID).PotentialItems(ctx).Subrange(
+ func(k btrfsprim.Key, _ keyio.ItemPtr) int { return tgt.Compare(k) },
+ func(_ btrfsprim.Key, v keyio.ItemPtr) bool {
+ wants.InsertFrom(o.rebuilt.Tree(ctx, wantKey.TreeID).LeafToRoots(ctx, v.Node))
+ return true
+ })
+ o.wantAugment(ctx, wantKey, wants)
+ return false
+}
+
+// wantDirIndex implements rebuildCallbacks.
+func (o *rebuilder) wantDirIndex(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, name []byte) {
+ wantKey := WantWithTree{
+ TreeID: treeID,
+ Key: Want{
+ ObjectID: objID,
+ ItemType: btrfsitem.DIR_INDEX_KEY,
+ OffsetType: offsetName,
+ OffsetName: string(name),
+ },
+ }
+ ctx = withWant(ctx, logFieldItemWant, reason, wantKey)
+
+ if o.rebuilt.Tree(ctx, treeID) == nil {
+ o.enqueueRetry(treeID)
+ return
+ }
+
+ // check if we already have it
+
+ tgt := wantKey.Key.Key()
+ found := false
+ o.rebuilt.Tree(ctx, treeID).Items(ctx).Subrange(
+ func(key btrfsprim.Key, _ keyio.ItemPtr) int {
+ key.Offset = 0
+ return tgt.Compare(key)
+ },
+ func(_ btrfsprim.Key, ptr keyio.ItemPtr) bool {
+ if itemName, ok := o.keyIO.Names[ptr]; ok && bytes.Equal(itemName, name) {
+ found = true
+ }
+ return !found
+ })
+ if found {
+ return
+ }
+
+ // OK, we need to insert it
+
+ if o.hasAugment(wantKey) {
+ return
+ }
+ wants := make(containers.Set[btrfsvol.LogicalAddr])
+ o.rebuilt.Tree(ctx, treeID).PotentialItems(ctx).Subrange(
+ func(key btrfsprim.Key, _ keyio.ItemPtr) int {
+ key.Offset = 0
+ return tgt.Compare(key)
+ },
+ func(_ btrfsprim.Key, ptr keyio.ItemPtr) bool {
+ if itemName, ok := o.keyIO.Names[ptr]; ok && bytes.Equal(itemName, name) {
+ wants.InsertFrom(o.rebuilt.Tree(ctx, treeID).LeafToRoots(ctx, ptr.Node))
+ }
+ return true
+ })
+ o.wantAugment(ctx, wantKey, wants)
+}
+
+func (o *rebuilder) _walkRange(
+ ctx context.Context,
+ items *containers.SortedMap[btrfsprim.Key, keyio.ItemPtr],
+ treeID, objID btrfsprim.ObjID, typ btrfsprim.ItemType,
+ beg, end uint64,
+ fn func(key btrfsprim.Key, ptr keyio.ItemPtr, beg, end uint64),
+) {
+ min := btrfsprim.Key{
+ ObjectID: objID,
+ ItemType: typ,
+ Offset: 0, // *NOT* `beg`
+ }
+ max := btrfsprim.Key{
+ ObjectID: objID,
+ ItemType: typ,
+ Offset: end - 1,
+ }
+ items.Subrange(
+ func(runKey btrfsprim.Key, _ keyio.ItemPtr) int {
+ switch {
+ case min.Compare(runKey) < 0:
+ return 1
+ case max.Compare(runKey) > 0:
+ return -1
+ default:
+ return 0
+ }
+ },
+ func(runKey btrfsprim.Key, runPtr keyio.ItemPtr) bool {
+ runSizeAndErr, ok := o.keyIO.Sizes[runPtr]
+ if !ok {
+ panic(fmt.Errorf("should not happen: %v (%v) did not have a size recorded",
+ runPtr, keyAndTree{TreeID: treeID, Key: runKey}))
+ }
+ if runSizeAndErr.Err != nil {
+ o.fsErr(ctx, fmt.Errorf("get size: %v (%v): %w",
+ runPtr, keyAndTree{TreeID: treeID, Key: runKey},
+ runSizeAndErr.Err))
+ return true
+ }
+ runSize := runSizeAndErr.Size
+ if runSize == 0 {
+ return true
+ }
+ runBeg := runKey.Offset
+ runEnd := runBeg + runSize
+ if runEnd <= beg {
+ return true
+ }
+
+ fn(runKey, runPtr, runBeg, runEnd)
+ return true
+ })
+}
+
+type gap struct {
+ // range is [Beg,End)
+ Beg, End uint64
+}
+
+// Compare implements containers.Ordered.
+func (a gap) Compare(b gap) int {
+ return containers.NativeCompare(a.Beg, b.Beg)
+}
+
+func (o *rebuilder) _wantRange(
+ ctx context.Context, reason string,
+ treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType,
+ beg, end uint64,
+) {
+ wantKey := WantWithTree{
+ TreeID: treeID,
+ Key: Want{
+ ObjectID: objID,
+ ItemType: typ,
+ OffsetType: offsetAny,
+ },
+ }
+ ctx = withWant(ctx, logFieldItemWant, reason, wantKey)
+ wantKey.Key.OffsetType = offsetRange
+
+ if o.rebuilt.Tree(ctx, treeID) == nil {
+ o.enqueueRetry(treeID)
+ return
+ }
+
+ // Step 1: Build a listing of the gaps.
+ //
+ // Start with a gap of the whole range, then subtract each run
+ // from it.
+ gaps := new(containers.RBTree[gap])
+ gaps.Insert(gap{
+ Beg: beg,
+ End: end,
+ })
+ o._walkRange(
+ ctx,
+ o.rebuilt.Tree(ctx, treeID).Items(ctx),
+ treeID, objID, typ, beg, end,
+ func(runKey btrfsprim.Key, _ keyio.ItemPtr, runBeg, runEnd uint64) {
+ var overlappingGaps []*containers.RBNode[gap]
+ gaps.Subrange(
+ func(gap gap) int {
+ switch {
+ case gap.End <= runBeg:
+ return 1
+ case runEnd <= gap.Beg:
+ return -1
+ default:
+ return 0
+ }
+ },
+ func(node *containers.RBNode[gap]) bool {
+ overlappingGaps = append(overlappingGaps, node)
+ return true
+ })
+ if len(overlappingGaps) == 0 {
+ return
+ }
+ gapsBeg := overlappingGaps[0].Value.Beg
+ gapsEnd := overlappingGaps[len(overlappingGaps)-1].Value.End
+ for _, gap := range overlappingGaps {
+ gaps.Delete(gap)
+ }
+ if gapsBeg < runBeg {
+ gaps.Insert(gap{
+ Beg: gapsBeg,
+ End: runBeg,
+ })
+ }
+ if gapsEnd > runEnd {
+ gaps.Insert(gap{
+ Beg: runEnd,
+ End: gapsEnd,
+ })
+ }
+ })
+
+ // Step 2: Fill each gap.
+ if gaps.Len() == 0 {
+ return
+ }
+ potentialItems := o.rebuilt.Tree(ctx, treeID).PotentialItems(ctx)
+ gaps.Range(func(rbNode *containers.RBNode[gap]) bool {
+ gap := rbNode.Value
+ last := gap.Beg
+ o._walkRange(
+ ctx,
+ potentialItems,
+ treeID, objID, typ, gap.Beg, gap.End,
+ func(k btrfsprim.Key, v keyio.ItemPtr, runBeg, runEnd uint64) {
+ // TODO: This is dumb and greedy.
+ if last < runBeg {
+ // log an error
+ wantKey.Key.OffsetLow = last
+ wantKey.Key.OffsetHigh = runBeg
+ wantCtx := withWant(ctx, logFieldItemWant, reason, wantKey)
+ o.wantAugment(wantCtx, wantKey, nil)
+ }
+ wantKey.Key.OffsetLow = gap.Beg
+ wantKey.Key.OffsetHigh = gap.End
+ wantCtx := withWant(ctx, logFieldItemWant, reason, wantKey)
+ o.wantAugment(wantCtx, wantKey, o.rebuilt.Tree(wantCtx, treeID).LeafToRoots(wantCtx, v.Node))
+ last = runEnd
+ })
+ if last < gap.End {
+ // log an error
+ wantKey.Key.OffsetLow = last
+ wantKey.Key.OffsetHigh = gap.End
+ wantCtx := withWant(ctx, logFieldItemWant, reason, wantKey)
+ o.wantAugment(wantCtx, wantKey, nil)
+ }
+ return true
+ })
+}
+
+// func implements rebuildCallbacks.
+//
+// interval is [beg, end)
+func (o *rebuilder) wantCSum(ctx context.Context, reason string, inodeTree, inode btrfsprim.ObjID, beg, end btrfsvol.LogicalAddr) {
+ inodeWant := WantWithTree{
+ TreeID: inodeTree,
+ Key: Want{
+ ObjectID: inode,
+ ItemType: btrfsitem.INODE_ITEM_KEY,
+ OffsetType: offsetExact,
+ OffsetLow: 0,
+ },
+ }
+ inodeCtx := withWant(ctx, logFieldItemWant, reason, inodeWant)
+ if !o._wantOff(inodeCtx, inodeWant) {
+ o.enqueueRetry(inodeTree)
+ return
+ }
+ inodePtr, ok := o.rebuilt.Tree(inodeCtx, inodeTree).Items(inodeCtx).Load(inodeWant.Key.Key())
+ if !ok {
+ panic(fmt.Errorf("should not happen: could not load key: %v", inodeWant))
+ }
+ inodeFlags, ok := o.keyIO.Flags[inodePtr]
+ if !ok {
+ panic(fmt.Errorf("should not happen: INODE_ITEM did not have flags recorded"))
+ }
+ if inodeFlags.Err != nil {
+ o.fsErr(inodeCtx, inodeFlags.Err)
+ return
+ }
+
+ if inodeFlags.NoDataSum {
+ return
+ }
+
+ o._wantRange(
+ ctx, reason,
+ btrfsprim.CSUM_TREE_OBJECTID, btrfsprim.EXTENT_CSUM_OBJECTID, btrfsprim.EXTENT_CSUM_KEY,
+ uint64(roundDown(beg, btrfssum.BlockSize)), uint64(roundUp(end, btrfssum.BlockSize)))
+}
+
+// wantFileExt implements rebuildCallbacks.
+func (o *rebuilder) wantFileExt(ctx context.Context, reason string, treeID btrfsprim.ObjID, ino btrfsprim.ObjID, size int64) {
+ o._wantRange(
+ ctx, reason,
+ treeID, ino, btrfsprim.EXTENT_DATA_KEY,
+ 0, uint64(size))
+}
diff --git a/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_wanttyp.go b/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_wanttyp.go
new file mode 100644
index 0000000..2b471fe
--- /dev/null
+++ b/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_wanttyp.go
@@ -0,0 +1,107 @@
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
+//
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+package rebuildnodes
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/datawire/dlib/dlog"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
+ "git.lukeshu.com/btrfs-progs-ng/lib/containers"
+)
+
+type wantOffsetType int8
+
+const (
+ offsetAny = wantOffsetType(iota)
+ offsetExact
+ offsetRange
+ offsetName
+)
+
+type Want struct {
+ ObjectID btrfsprim.ObjID
+ ItemType btrfsprim.ItemType
+ OffsetType wantOffsetType
+ OffsetLow uint64
+ OffsetHigh uint64
+ OffsetName string
+}
+
+func (a Want) Compare(b Want) int {
+ if d := containers.NativeCompare(a.ObjectID, b.ObjectID); d != 0 {
+ return d
+ }
+ if d := containers.NativeCompare(a.ItemType, b.ItemType); d != 0 {
+ return d
+ }
+ if d := containers.NativeCompare(a.OffsetType, b.OffsetType); d != 0 {
+ return d
+ }
+ if d := containers.NativeCompare(a.OffsetLow, b.OffsetLow); d != 0 {
+ return d
+ }
+ if d := containers.NativeCompare(a.OffsetHigh, b.OffsetHigh); d != 0 {
+ return d
+ }
+ if d := containers.NativeCompare(a.OffsetName, b.OffsetName); d != 0 {
+ return d
+ }
+ return 0
+}
+
+func (o Want) Key() btrfsprim.Key {
+ return btrfsprim.Key{
+ ObjectID: o.ObjectID,
+ ItemType: o.ItemType,
+ Offset: o.OffsetLow,
+ }
+}
+
+func wantFromKey(k btrfsprim.Key) Want {
+ return Want{
+ ObjectID: k.ObjectID,
+ ItemType: k.ItemType,
+ OffsetType: offsetExact,
+ OffsetLow: k.Offset,
+ }
+}
+
+func (o Want) String() string {
+ switch o.OffsetType {
+ case offsetAny:
+ return fmt.Sprintf("{%v %v ?}", o.ObjectID, o.ItemType)
+ case offsetExact:
+ return fmt.Sprintf("{%v %v %v}", o.ObjectID, o.ItemType, o.OffsetLow)
+ case offsetRange:
+ return fmt.Sprintf("{%v %v %v-%v}", o.ObjectID, o.ItemType, o.OffsetLow, o.OffsetHigh)
+ case offsetName:
+ return fmt.Sprintf("{%v %v name=%q}", o.ObjectID, o.ItemType, o.OffsetName)
+ default:
+ panic(fmt.Errorf("should not happen: OffsetType=%#v", o.OffsetType))
+ }
+}
+
+type WantWithTree struct {
+ TreeID btrfsprim.ObjID
+ Key Want
+}
+
+func (o WantWithTree) String() string {
+ return fmt.Sprintf("tree=%v key=%v", o.TreeID, o.Key)
+}
+
+const (
+ logFieldItemWant = "btrfsinspect.rebuild-nodes.rebuild.want"
+ logFieldTreeWant = "btrfsinspect.rebuild-nodes.rebuild.add-tree.want"
+)
+
+func withWant(ctx context.Context, logField, reason string, wantKey WantWithTree) context.Context {
+ ctx = dlog.WithField(ctx, logField+".reason", reason)
+ ctx = dlog.WithField(ctx, logField+".key", wantKey)
+ return ctx
+}
diff --git a/cmd/btrfs-rec/inspect/rebuildtrees/scan.go b/cmd/btrfs-rec/inspect/rebuildtrees/scan.go
new file mode 100644
index 0000000..17949ab
--- /dev/null
+++ b/cmd/btrfs-rec/inspect/rebuildtrees/scan.go
@@ -0,0 +1,77 @@
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
+//
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+package rebuildnodes
+
+import (
+ "context"
+ "time"
+
+ "github.com/datawire/dlib/dlog"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio"
+ "git.lukeshu.com/btrfs-progs-ng/lib/containers"
+ "git.lukeshu.com/btrfs-progs-ng/lib/maps"
+ "git.lukeshu.com/btrfs-progs-ng/lib/textui"
+)
+
+func ScanDevices(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect.ScanDevicesResult) (btrfstree.Superblock, graph.Graph, *keyio.Handle, error) {
+ dlog.Info(ctx, "Reading superblock...")
+ sb, err := fs.Superblock()
+ if err != nil {
+ return btrfstree.Superblock{}, graph.Graph{}, nil, err
+ }
+
+ dlog.Infof(ctx, "Reading node data from FS...")
+
+ var stats textui.Portion[int]
+ stats.D = countNodes(scanResults)
+ progressWriter := textui.NewProgress[textui.Portion[int]](
+ dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.read.substep", "read-nodes"),
+ dlog.LogLevelInfo, textui.Tunable(1*time.Second))
+
+ nodeGraph := graph.New(*sb)
+ keyIO := keyio.NewHandle(fs, *sb)
+
+ progressWriter.Set(stats)
+ for _, devResults := range scanResults {
+ for _, laddr := range maps.SortedKeys(devResults.FoundNodes) {
+ if err := ctx.Err(); err != nil {
+ return btrfstree.Superblock{}, graph.Graph{}, nil, err
+ }
+ nodeRef, err := btrfstree.ReadNode[btrfsvol.LogicalAddr](fs, *sb, laddr, btrfstree.NodeExpectations{
+ LAddr: containers.Optional[btrfsvol.LogicalAddr]{OK: true, Val: laddr},
+ })
+ if err != nil {
+ btrfstree.FreeNodeRef(nodeRef)
+ return btrfstree.Superblock{}, graph.Graph{}, nil, err
+ }
+
+ nodeGraph.InsertNode(nodeRef)
+ keyIO.InsertNode(nodeRef)
+
+ btrfstree.FreeNodeRef(nodeRef)
+
+ stats.N++
+ progressWriter.Set(stats)
+ }
+ }
+ if stats.N != stats.D {
+ panic("should not happen")
+ }
+ progressWriter.Done()
+ dlog.Info(ctx, "... done reading node data")
+
+ if err := nodeGraph.FinalCheck(ctx, fs, *sb); err != nil {
+ return btrfstree.Superblock{}, graph.Graph{}, nil, err
+ }
+ keyIO.SetGraph(*nodeGraph)
+
+ return *sb, *nodeGraph, keyIO, nil
+}
diff --git a/cmd/btrfs-rec/inspect/rebuildtrees/util.go b/cmd/btrfs-rec/inspect/rebuildtrees/util.go
new file mode 100644
index 0000000..9d91f23
--- /dev/null
+++ b/cmd/btrfs-rec/inspect/rebuildtrees/util.go
@@ -0,0 +1,31 @@
+// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+//
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+package rebuildnodes
+
+import (
+ "golang.org/x/exp/constraints"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect"
+)
+
+func countNodes(nodeScanResults btrfsinspect.ScanDevicesResult) int {
+ var cnt int
+ for _, devResults := range nodeScanResults {
+ cnt += len(devResults.FoundNodes)
+ }
+ return cnt
+}
+
+func roundDown[T constraints.Integer](n, d T) T {
+ return (n / d) * d
+}
+
+func roundUp[T constraints.Integer](n, d T) T {
+ return ((n + d - 1) / d) * d
+}
+
+func discardOK[T any](val T, _ bool) T {
+ return val
+}