From 8530a8b7b5f9f27bb5b5c319f285bb5a26470285 Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Fri, 6 Jan 2023 02:03:50 -0700 Subject: rebuildnodes: Rework to be clear about what went wrong when reading --- .../btrfsinspect/rebuildnodes/btrees/tree.go | 4 ++-- .../btrfsinspect/rebuildnodes/keyio/keyio.go | 14 ++++++++----- .../btrfsinspect/rebuildnodes/rebuild.go | 24 +++------------------- 3 files changed, 14 insertions(+), 28 deletions(-) (limited to 'lib') diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go index c9d0fa4..f870121 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go @@ -329,10 +329,10 @@ func (tree *RebuiltTree) COWDistance(parentID btrfsprim.ObjID) (dist int, ok boo } // ReadItem reads an item from a tree. -func (tree *RebuiltTree) ReadItem(ctx context.Context, key btrfsprim.Key) (item btrfsitem.Item, ok bool) { +func (tree *RebuiltTree) ReadItem(ctx context.Context, key btrfsprim.Key) btrfsitem.Item { ptr, ok := tree.Items(ctx).Load(key) if !ok { - return nil, false + panic(fmt.Errorf("should not happen: btrees.RebuiltTree.ReadItem called for not-included key: %v", key)) } return tree.forrest.keyIO.ReadItem(ctx, ptr) } diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go index b4ab645..9e3b144 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go @@ -152,13 +152,17 @@ func (o *Handle) readNode(ctx context.Context, laddr btrfsvol.LogicalAddr) *disk return ref } -func (o *Handle) ReadItem(ctx context.Context, ptr ItemPtr) (item btrfsitem.Item, ok bool) { - if o.graph.Nodes[ptr.Node].Level != 0 || ptr.Idx < 0 { - return nil, false +func (o *Handle) ReadItem(ctx context.Context, ptr ItemPtr) btrfsitem.Item { + if o.graph.Nodes[ptr.Node].Level != 0 { + panic(fmt.Errorf("should not happen: keyio.Handle.ReadItem called for non-leaf node@%v", ptr.Node)) + } + if ptr.Idx < 0 { + panic(fmt.Errorf("should not happen: keyio.Handle.ReadItem called for negative item index: %v", ptr.Idx)) } items := o.readNode(ctx, ptr.Node).Data.BodyLeaf if ptr.Idx >= len(items) { - return nil, false + panic(fmt.Errorf("should not happen: keyio.Handle.ReadItem called for out-of-bounds item index: index=%v len=%v", + ptr.Idx, len(items))) } - return items[ptr.Idx].Body, true + return items[ptr.Idx].Body } diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go index 624441f..d11ca58 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go @@ -91,12 +91,6 @@ func NewRebuilder(ctx context.Context, fs *btrfs.FS, nodeScanResults btrfsinspec return o, nil } -func (o *rebuilder) ioErr(ctx context.Context, err error) { - err = fmt.Errorf("should not happen: i/o error: %w", err) - dlog.Error(ctx, err) - panic(err) -} - func (o *rebuilder) ListRoots() map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr] { return o.rebuilt.ListRoots() } @@ -174,13 +168,9 @@ func (o *rebuilder) Rebuild(_ctx context.Context) error { return err } itemCtx := dlog.WithField(stepCtx, "btrfsinspect.rebuild-nodes.rebuild.process.item", key) - itemBody, ok := o.rebuilt.Tree(itemCtx, key.TreeID).ReadItem(itemCtx, key.Key) - if !ok { - o.ioErr(itemCtx, fmt.Errorf("could not read previously read item: %v", key)) - } itemChan <- keyAndBody{ keyAndTree: key, - Body: itemBody, + Body: o.rebuilt.Tree(itemCtx, key.TreeID).ReadItem(itemCtx, key.Key), } } return nil @@ -285,11 +275,7 @@ func (o *rebuilder) cbLookupRoot(ctx context.Context, tree btrfsprim.ObjID) (off o.enqueueRetry() return 0, btrfsitem.Root{}, false } - itemBody, ok := o.rebuilt.Tree(ctx, key.TreeID).ReadItem(ctx, key.Key) - if !ok { - o.ioErr(ctx, fmt.Errorf("could not read previously read item: %v", key)) - } - switch itemBody := itemBody.(type) { + switch itemBody := o.rebuilt.Tree(ctx, key.TreeID).ReadItem(ctx, key.Key).(type) { case *btrfsitem.Root: return btrfsprim.Generation(key.Offset), *itemBody, true case *btrfsitem.Error: @@ -310,11 +296,7 @@ func (o *rebuilder) cbLookupUUID(ctx context.Context, uuid btrfsprim.UUID) (id b o.enqueueRetry() return 0, false } - itemBody, ok := o.rebuilt.Tree(ctx, key.TreeID).ReadItem(ctx, key.Key) - if !ok { - o.ioErr(ctx, fmt.Errorf("could not read previously read item: %v", key)) - } - switch itemBody := itemBody.(type) { + switch itemBody := o.rebuilt.Tree(ctx, key.TreeID).ReadItem(ctx, key.Key).(type) { case *btrfsitem.UUIDMap: return itemBody.ObjID, true case *btrfsitem.Error: -- cgit v1.2.3-2-g168b From 5a3785b5f43f17ee8766260732523cf65470fbe8 Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Fri, 6 Jan 2023 02:09:56 -0700 Subject: rebuildnodes: Explicitly track whether curKey.Key is valid --- .../btrfsinspect/rebuildnodes/rebuild.go | 24 ++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) (limited to 'lib') diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go index d11ca58..931b9f3 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go @@ -54,7 +54,10 @@ type rebuilder struct { rebuilt *btrees.RebuiltForrest - curKey keyAndTree + curKey struct { + TreeID btrfsprim.ObjID + Key containers.Optional[btrfsprim.Key] + } treeQueue containers.Set[btrfsprim.ObjID] itemQueue containers.Set[keyAndTree] augmentQueue map[btrfsprim.ObjID]*treeAugmentQueue @@ -133,12 +136,12 @@ func (o *rebuilder) Rebuild(_ctx context.Context) error { o.treeQueue = make(containers.Set[btrfsprim.ObjID]) // Because trees can be wildly different sizes, it's impossible to have a meaningful // progress percentage here. - for _, treeID := range maps.SortedKeys(treeQueue) { + o.curKey.Key.OK = false + for _, o.curKey.TreeID = range maps.SortedKeys(treeQueue) { if err := _ctx.Err(); err != nil { return err } - o.curKey = keyAndTree{TreeID: treeID} - o.rebuilt.Tree(stepCtx, treeID) + o.rebuilt.Tree(stepCtx, o.curKey.TreeID) } } runtime.GC() @@ -177,9 +180,11 @@ func (o *rebuilder) Rebuild(_ctx context.Context) error { }) grp.Go("cpu", func(stepCtx context.Context) error { defer progressWriter.Done() + o.curKey.Key.OK = true for item := range itemChan { itemCtx := dlog.WithField(stepCtx, "btrfsinspect.rebuild-nodes.rebuild.process.item", item.keyAndTree) - o.curKey = item.keyAndTree + o.curKey.TreeID = item.TreeID + o.curKey.Key.Val = item.Key handleItem(o, itemCtx, item.TreeID, btrfstree.Item{ Key: item.Key, Body: item.Body, @@ -242,10 +247,13 @@ func (o *rebuilder) Rebuild(_ctx context.Context) error { } func (o *rebuilder) enqueueRetry() { - if o.curKey.Key == (btrfsprim.Key{}) { - o.treeQueue.Insert(o.curKey.TreeID) + if o.curKey.Key.OK { + o.itemQueue.Insert(keyAndTree{ + TreeID: o.curKey.TreeID, + Key: o.curKey.Key.Val, + }) } else { - o.itemQueue.Insert(o.curKey) + o.treeQueue.Insert(o.curKey.TreeID) } } -- cgit v1.2.3-2-g168b From c9549dc423a38839d9f8b919877009abba74c607 Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Fri, 6 Jan 2023 02:20:29 -0700 Subject: rebuildnodes: Move each substep in to its own method --- .../btrfsinspect/rebuildnodes/rebuild.go | 219 +++++++++++---------- 1 file changed, 117 insertions(+), 102 deletions(-) (limited to 'lib') diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go index 931b9f3..da3d79f 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go @@ -129,120 +129,135 @@ func (o *rebuilder) Rebuild(_ctx context.Context) error { for passNum := 0; len(o.treeQueue) > 0 || len(o.itemQueue) > 0 || len(o.augmentQueue) > 0; passNum++ { passCtx := dlog.WithField(_ctx, "btrfsinspect.rebuild-nodes.rebuild.pass", passNum) - // Add items to the queue (drain o.treeQueue, fill o.itemQueue) - if true { - stepCtx := dlog.WithField(passCtx, "btrfsinspect.rebuild-nodes.rebuild.substep", "collect-items") - treeQueue := o.treeQueue - o.treeQueue = make(containers.Set[btrfsprim.ObjID]) - // Because trees can be wildly different sizes, it's impossible to have a meaningful - // progress percentage here. - o.curKey.Key.OK = false - for _, o.curKey.TreeID = range maps.SortedKeys(treeQueue) { - if err := _ctx.Err(); err != nil { - return err - } - o.rebuilt.Tree(stepCtx, o.curKey.TreeID) - } + // Crawl trees (Drain o.treeQueue, fill o.itemQueue). + if err := o.processTreeQueue(passCtx); err != nil { + return err } runtime.GC() - // Handle items in the queue (drain o.itemQueue, fill o.augmentQueue and o.treeQueue) - if true { - stepCtx := dlog.WithField(passCtx, "btrfsinspect.rebuild-nodes.rebuild.substep", "process-items") - itemQueue := maps.Keys(o.itemQueue) - o.itemQueue = make(containers.Set[keyAndTree]) - sort.Slice(itemQueue, func(i, j int) bool { - return itemQueue[i].Compare(itemQueue[j]) < 0 - }) - var progress itemStats - progress.D = len(itemQueue) - progressWriter := textui.NewProgress[itemStats](stepCtx, dlog.LogLevelInfo, textui.Tunable(1*time.Second)) - stepCtx = dlog.WithField(stepCtx, "btrfsinspect.rebuild-nodes.rebuild.substep.progress", &progress) - type keyAndBody struct { - keyAndTree - Body btrfsitem.Item + // Process items (drain o.itemQueue, fill o.augmentQueue and o.treeQueue). + if err := o.processItemQueue(passCtx); err != nil { + return err + } + runtime.GC() + + // Apply augments (drain o.augmentQueue, fill o.itemQueue). + if err := o.processAugmentQueue(passCtx); err != nil { + return err + } + runtime.GC() + } + return nil +} + +// processTreeQueue drains o.treeQueue, filling o.itemQueue. +func (o *rebuilder) processTreeQueue(ctx context.Context) error { + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep", "collect-items") + treeQueue := o.treeQueue + o.treeQueue = make(containers.Set[btrfsprim.ObjID]) + // Because trees can be wildly different sizes, it's impossible to have a meaningful + // progress percentage here. + o.curKey.Key.OK = false + for _, o.curKey.TreeID = range maps.SortedKeys(treeQueue) { + if err := ctx.Err(); err != nil { + return err + } + o.rebuilt.Tree(ctx, o.curKey.TreeID) + } + return nil +} + +// processItemQueue drains o.itemQueue, filling o.augmentQueue and o.treeQueue. +func (o *rebuilder) processItemQueue(ctx context.Context) error { + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep", "process-items") + itemQueue := maps.Keys(o.itemQueue) + o.itemQueue = make(containers.Set[keyAndTree]) + sort.Slice(itemQueue, func(i, j int) bool { + return itemQueue[i].Compare(itemQueue[j]) < 0 + }) + var progress itemStats + progress.D = len(itemQueue) + progressWriter := textui.NewProgress[itemStats](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second)) + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep.progress", &progress) + type keyAndBody struct { + keyAndTree + Body btrfsitem.Item + } + itemChan := make(chan keyAndBody, textui.Tunable(300)) // average items-per-node≈100; let's have a buffer of ~3 nodes + grp := dgroup.NewGroup(ctx, dgroup.GroupConfig{}) + grp.Go("io", func(ctx context.Context) error { + defer close(itemChan) + for _, key := range itemQueue { + if err := ctx.Err(); err != nil { + return err } - itemChan := make(chan keyAndBody, textui.Tunable(300)) // average items-per-node≈100; let's have a buffer of ~3 nodes - grp := dgroup.NewGroup(stepCtx, dgroup.GroupConfig{}) - grp.Go("io", func(stepCtx context.Context) error { - defer close(itemChan) - for _, key := range itemQueue { - if err := stepCtx.Err(); err != nil { - return err - } - itemCtx := dlog.WithField(stepCtx, "btrfsinspect.rebuild-nodes.rebuild.process.item", key) - itemChan <- keyAndBody{ - keyAndTree: key, - Body: o.rebuilt.Tree(itemCtx, key.TreeID).ReadItem(itemCtx, key.Key), - } - } - return nil - }) - grp.Go("cpu", func(stepCtx context.Context) error { - defer progressWriter.Done() - o.curKey.Key.OK = true - for item := range itemChan { - itemCtx := dlog.WithField(stepCtx, "btrfsinspect.rebuild-nodes.rebuild.process.item", item.keyAndTree) - o.curKey.TreeID = item.TreeID - o.curKey.Key.Val = item.Key - handleItem(o, itemCtx, item.TreeID, btrfstree.Item{ - Key: item.Key, - Body: item.Body, - }) - if item.ItemType == btrfsitem.ROOT_ITEM_KEY { - o.treeQueue.Insert(item.ObjectID) - } - progress.N++ - progress.NumAugments = o.numAugments - progress.NumFailures = o.numAugmentFailures - progress.NumAugmentTrees = len(o.augmentQueue) - progressWriter.Set(progress) - } - return nil + itemCtx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.process.item", key) + itemChan <- keyAndBody{ + keyAndTree: key, + Body: o.rebuilt.Tree(itemCtx, key.TreeID).ReadItem(itemCtx, key.Key), + } + } + return nil + }) + grp.Go("cpu", func(ctx context.Context) error { + defer progressWriter.Done() + o.curKey.Key.OK = true + for item := range itemChan { + itemCtx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.process.item", item.keyAndTree) + o.curKey.TreeID = item.TreeID + o.curKey.Key.Val = item.Key + handleItem(o, itemCtx, item.TreeID, btrfstree.Item{ + Key: item.Key, + Body: item.Body, }) - if err := grp.Wait(); err != nil { - return err + if item.ItemType == btrfsitem.ROOT_ITEM_KEY { + o.treeQueue.Insert(item.ObjectID) } + progress.N++ + progress.NumAugments = o.numAugments + progress.NumFailures = o.numAugmentFailures + progress.NumAugmentTrees = len(o.augmentQueue) + progressWriter.Set(progress) } - runtime.GC() + return nil + }) + return grp.Wait() +} - // Apply augments (drain o.augmentQueue, fill o.itemQueue) - if true { - stepCtx := dlog.WithField(passCtx, "btrfsinspect.rebuild-nodes.rebuild.substep", "apply-augments") - resolvedAugments := make(map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr], len(o.augmentQueue)) - var progress textui.Portion[int] - for _, treeID := range maps.SortedKeys(o.augmentQueue) { - if err := _ctx.Err(); err != nil { - return err - } - treeCtx := dlog.WithField(stepCtx, "btrfsinspect.rebuild-nodes.rebuild.augment.tree", treeID) - resolvedAugments[treeID] = o.resolveTreeAugments(treeCtx, treeID) - progress.D += len(resolvedAugments[treeID]) - } - o.augmentQueue = make(map[btrfsprim.ObjID]*treeAugmentQueue) - o.numAugments = 0 - o.numAugmentFailures = 0 - runtime.GC() - progressWriter := textui.NewProgress[textui.Portion[int]](stepCtx, dlog.LogLevelInfo, textui.Tunable(1*time.Second)) - stepCtx = dlog.WithField(stepCtx, "btrfsinspect.rebuild-nodes.rebuild.substep.progress", &progress) - for _, treeID := range maps.SortedKeys(resolvedAugments) { - treeCtx := dlog.WithField(stepCtx, "btrfsinspect.rebuild-nodes.rebuild.augment.tree", treeID) - for _, nodeAddr := range maps.SortedKeys(resolvedAugments[treeID]) { - if err := _ctx.Err(); err != nil { - progressWriter.Set(progress) - progressWriter.Done() - return err - } - progressWriter.Set(progress) - o.rebuilt.Tree(treeCtx, treeID).AddRoot(treeCtx, nodeAddr) - progress.N++ - } +// processAugmentQueue drains o.augmentQueue, filling o.itemQueue. +func (o *rebuilder) processAugmentQueue(ctx context.Context) error { + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep", "apply-augments") + resolvedAugments := make(map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr], len(o.augmentQueue)) + var progress textui.Portion[int] + for _, treeID := range maps.SortedKeys(o.augmentQueue) { + if err := ctx.Err(); err != nil { + return err + } + treeCtx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.augment.tree", treeID) + resolvedAugments[treeID] = o.resolveTreeAugments(treeCtx, treeID) + progress.D += len(resolvedAugments[treeID]) + } + o.augmentQueue = make(map[btrfsprim.ObjID]*treeAugmentQueue) + o.numAugments = 0 + o.numAugmentFailures = 0 + runtime.GC() + progressWriter := textui.NewProgress[textui.Portion[int]](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second)) + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep.progress", &progress) + for _, treeID := range maps.SortedKeys(resolvedAugments) { + treeCtx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.augment.tree", treeID) + for _, nodeAddr := range maps.SortedKeys(resolvedAugments[treeID]) { + if err := ctx.Err(); err != nil { + progressWriter.Set(progress) + progressWriter.Done() + return err } progressWriter.Set(progress) - progressWriter.Done() + o.rebuilt.Tree(treeCtx, treeID).AddRoot(treeCtx, nodeAddr) + progress.N++ } - runtime.GC() } + progressWriter.Set(progress) + progressWriter.Done() return nil } -- cgit v1.2.3-2-g168b From 0ffd1a5540201c4b0161b7fea32564adff01f4b2 Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Fri, 6 Jan 2023 02:49:05 -0700 Subject: rebuildnodes: Having the treeAugmentQueue instance be named treeQueue is confusing Since there's also Rebuilder.treeQueue. --- .../btrfsinspect/rebuildnodes/rebuild.go | 48 +++++++++++----------- 1 file changed, 24 insertions(+), 24 deletions(-) (limited to 'lib') diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go index da3d79f..a088d84 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go @@ -498,21 +498,21 @@ func shortenWantKey(wantKey string) string { return wantKey[sp+1:] } -func (treeQueue *treeAugmentQueue) has(wantKey string) bool { - if treeQueue != nil { +func (queue *treeAugmentQueue) has(wantKey string) bool { + if queue != nil { wantKey = shortenWantKey(wantKey) - if treeQueue.zero != nil { - if _, ok := treeQueue.zero[wantKey]; ok { + if queue.zero != nil { + if _, ok := queue.zero[wantKey]; ok { return true } } - if treeQueue.single != nil { - if _, ok := treeQueue.single[wantKey]; ok { + if queue.single != nil { + if _, ok := queue.single[wantKey]; ok { return true } } - if treeQueue.multi != nil { - if _, ok := treeQueue.multi[wantKey]; ok { + if queue.multi != nil { + if _, ok := queue.multi[wantKey]; ok { return true } } @@ -520,37 +520,37 @@ func (treeQueue *treeAugmentQueue) has(wantKey string) bool { return false } -func (treeQueue *treeAugmentQueue) store(wantKey string, choices containers.Set[btrfsvol.LogicalAddr]) { +func (queue *treeAugmentQueue) store(wantKey string, choices containers.Set[btrfsvol.LogicalAddr]) { if len(choices) == 0 && (strings.Contains(wantKey, " name=") || strings.Contains(wantKey, "-")) { // This wantKey is unlikely to come up again, so it's not worth storing a negative result. return } wantKey = shortenWantKey(wantKey) - beg := treeQueue.keyBuf.Len() - if treeQueue.keyBuf.Cap()-beg < len(wantKey) { - treeQueue.keyBuf.Reset() - treeQueue.keyBuf.Grow(textui.Tunable(4096)) + beg := queue.keyBuf.Len() + if queue.keyBuf.Cap()-beg < len(wantKey) { + queue.keyBuf.Reset() + queue.keyBuf.Grow(textui.Tunable(4096)) beg = 0 } - treeQueue.keyBuf.WriteString(wantKey) - wantKey = treeQueue.keyBuf.String()[beg:] + queue.keyBuf.WriteString(wantKey) + wantKey = queue.keyBuf.String()[beg:] switch len(choices) { case 0: - if treeQueue.zero == nil { - treeQueue.zero = make(map[string]struct{}) + if queue.zero == nil { + queue.zero = make(map[string]struct{}) } - treeQueue.zero[wantKey] = struct{}{} + queue.zero[wantKey] = struct{}{} case 1: - if treeQueue.single == nil { - treeQueue.single = make(map[string]btrfsvol.LogicalAddr) + if queue.single == nil { + queue.single = make(map[string]btrfsvol.LogicalAddr) } - treeQueue.single[wantKey] = choices.TakeOne() + queue.single[wantKey] = choices.TakeOne() default: - if treeQueue.multi == nil { - treeQueue.multi = make(map[string]containers.Set[btrfsvol.LogicalAddr]) + if queue.multi == nil { + queue.multi = make(map[string]containers.Set[btrfsvol.LogicalAddr]) } - treeQueue.multi[wantKey] = choices + queue.multi[wantKey] = choices } } -- cgit v1.2.3-2-g168b From 6f3ec83248920f5638108fc672696996459b6beb Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Fri, 6 Jan 2023 02:51:59 -0700 Subject: rebuildnodes: Split rebuild.go in to separate files --- .../btrfsinspect/rebuildnodes/rebuild.go | 431 --------------------- .../btrfsinspect/rebuildnodes/rebuild_treecb.go | 75 ++++ .../btrfsinspect/rebuildnodes/rebuild_wantcb.go | 387 ++++++++++++++++++ 3 files changed, 462 insertions(+), 431 deletions(-) create mode 100644 lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_treecb.go create mode 100644 lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_wantcb.go (limited to 'lib') diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go index a088d84..902d810 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go @@ -5,7 +5,6 @@ package rebuildnodes import ( - "bytes" "context" "fmt" "runtime" @@ -19,7 +18,6 @@ import ( "git.lukeshu.com/btrfs-progs-ng/lib/btrfs" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect" @@ -272,66 +270,6 @@ func (o *rebuilder) enqueueRetry() { } } -func (o *rebuilder) cbAddedItem(ctx context.Context, tree btrfsprim.ObjID, key btrfsprim.Key) { - if handleWouldBeNoOp(key.ItemType) { - return - } - o.itemQueue.Insert(keyAndTree{ - TreeID: tree, - Key: key, - }) -} - -func (o *rebuilder) cbLookupRoot(ctx context.Context, tree btrfsprim.ObjID) (offset btrfsprim.Generation, item btrfsitem.Root, ok bool) { - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.add-tree.want.reason", "tree Root") - key := keyAndTree{ - TreeID: btrfsprim.ROOT_TREE_OBJECTID, - Key: btrfsprim.Key{ - ObjectID: tree, - ItemType: btrfsitem.ROOT_ITEM_KEY, - }, - } - wantKey := fmt.Sprintf("tree=%v key={%v %v ?}", key.TreeID, key.ObjectID, key.ItemType) - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.add-tree.want.key", wantKey) - key.Key, ok = o._want(ctx, key.TreeID, wantKey, key.ObjectID, key.ItemType) - if !ok { - o.enqueueRetry() - return 0, btrfsitem.Root{}, false - } - switch itemBody := o.rebuilt.Tree(ctx, key.TreeID).ReadItem(ctx, key.Key).(type) { - case *btrfsitem.Root: - return btrfsprim.Generation(key.Offset), *itemBody, true - case *btrfsitem.Error: - o.fsErr(ctx, fmt.Errorf("error decoding item: %v: %w", key, itemBody.Err)) - return 0, btrfsitem.Root{}, false - default: - // This is a panic because the item decoder should not emit ROOT_ITEM items as anything but - // btrfsitem.Root or btrfsitem.Error without this code also being updated. - panic(fmt.Errorf("should not happen: ROOT_ITEM item has unexpected type: %T", itemBody)) - } -} - -func (o *rebuilder) cbLookupUUID(ctx context.Context, uuid btrfsprim.UUID) (id btrfsprim.ObjID, ok bool) { - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.add-tree.want.reason", "resolve parent UUID") - key := keyAndTree{TreeID: btrfsprim.UUID_TREE_OBJECTID, Key: btrfsitem.UUIDToKey(uuid)} - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.add-tree.want.key", key.String()) - if !o._wantOff(ctx, key.TreeID, key.String(), key.Key) { - o.enqueueRetry() - return 0, false - } - switch itemBody := o.rebuilt.Tree(ctx, key.TreeID).ReadItem(ctx, key.Key).(type) { - case *btrfsitem.UUIDMap: - return itemBody.ObjID, true - case *btrfsitem.Error: - o.fsErr(ctx, fmt.Errorf("error decoding item: %v: %w", key, itemBody.Err)) - return 0, false - default: - // This is a panic because the item decoder should not emit UUID_SUBVOL items as anything but - // btrfsitem.UUIDMap or btrfsitem.Error without this code also being updated. - panic(fmt.Errorf("should not happen: UUID_SUBVOL item has unexpected type: %T", itemBody)) - } -} - func (o *rebuilder) resolveTreeAugments(ctx context.Context, treeID btrfsprim.ObjID) containers.Set[btrfsvol.LogicalAddr] { // Define an algorithm that takes several lists of items, and returns a // set of those items such that each input list contains zero or one of @@ -571,372 +509,3 @@ func (o *rebuilder) wantAugment(ctx context.Context, treeID btrfsprim.ObjID, wan o.numAugments++ } } - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - -// fsErr implements rebuildCallbacks. -func (o *rebuilder) fsErr(ctx context.Context, e error) { - dlog.Errorf(ctx, "filesystem error: %v", e) -} - -// want implements rebuildCallbacks. -func (o *rebuilder) want(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType) { - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.reason", reason) - wantKey := fmt.Sprintf("tree=%v key={%v %v ?}", treeID, objID, typ) - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key", wantKey) - o._want(ctx, treeID, wantKey, objID, typ) -} - -func (o *rebuilder) _want(ctx context.Context, treeID btrfsprim.ObjID, wantKey string, objID btrfsprim.ObjID, typ btrfsprim.ItemType) (key btrfsprim.Key, ok bool) { - if o.rebuilt.Tree(ctx, treeID) == nil { - o.enqueueRetry() - return btrfsprim.Key{}, false - } - - // check if we already have it - - tgt := btrfsprim.Key{ - ObjectID: objID, - ItemType: typ, - } - if key, _, ok := o.rebuilt.Tree(ctx, treeID).Items(ctx).Search(func(key btrfsprim.Key, _ keyio.ItemPtr) int { - key.Offset = 0 - return tgt.Compare(key) - }); ok { - return key, true - } - - // OK, we need to insert it - - if o.hasAugment(treeID, wantKey) { - return btrfsprim.Key{}, false - } - wants := make(containers.Set[btrfsvol.LogicalAddr]) - o.rebuilt.Tree(ctx, treeID).PotentialItems(ctx).Subrange( - func(k btrfsprim.Key, _ keyio.ItemPtr) int { k.Offset = 0; return tgt.Compare(k) }, - func(_ btrfsprim.Key, v keyio.ItemPtr) bool { - wants.InsertFrom(o.rebuilt.Tree(ctx, treeID).LeafToRoots(ctx, v.Node)) - return true - }) - o.wantAugment(ctx, treeID, wantKey, wants) - return btrfsprim.Key{}, false -} - -// wantOff implements rebuildCallbacks. -func (o *rebuilder) wantOff(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, off uint64) { - key := btrfsprim.Key{ - ObjectID: objID, - ItemType: typ, - Offset: off, - } - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.reason", reason) - wantKey := keyAndTree{TreeID: treeID, Key: key}.String() - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key", wantKey) - o._wantOff(ctx, treeID, wantKey, key) -} - -func (o *rebuilder) _wantOff(ctx context.Context, treeID btrfsprim.ObjID, wantKey string, tgt btrfsprim.Key) (ok bool) { - if o.rebuilt.Tree(ctx, treeID) == nil { - o.enqueueRetry() - return false - } - - // check if we already have it - - if _, ok := o.rebuilt.Tree(ctx, treeID).Items(ctx).Load(tgt); ok { - return true - } - - // OK, we need to insert it - - if o.hasAugment(treeID, wantKey) { - return false - } - wants := make(containers.Set[btrfsvol.LogicalAddr]) - o.rebuilt.Tree(ctx, treeID).PotentialItems(ctx).Subrange( - func(k btrfsprim.Key, _ keyio.ItemPtr) int { return tgt.Compare(k) }, - func(_ btrfsprim.Key, v keyio.ItemPtr) bool { - wants.InsertFrom(o.rebuilt.Tree(ctx, treeID).LeafToRoots(ctx, v.Node)) - return true - }) - o.wantAugment(ctx, treeID, wantKey, wants) - return false -} - -func (o *rebuilder) _wantFunc(ctx context.Context, treeID btrfsprim.ObjID, wantKey string, objID btrfsprim.ObjID, typ btrfsprim.ItemType, fn func(keyio.ItemPtr) bool) { - if o.rebuilt.Tree(ctx, treeID) == nil { - o.enqueueRetry() - return - } - - // check if we already have it - - tgt := btrfsprim.Key{ - ObjectID: objID, - ItemType: typ, - } - found := false - o.rebuilt.Tree(ctx, treeID).Items(ctx).Subrange( - func(key btrfsprim.Key, _ keyio.ItemPtr) int { - key.Offset = 0 - return tgt.Compare(key) - }, - func(_ btrfsprim.Key, itemPtr keyio.ItemPtr) bool { - if fn(itemPtr) { - found = true - } - return !found - }) - if found { - return - } - - // OK, we need to insert it - - if o.hasAugment(treeID, wantKey) { - return - } - wants := make(containers.Set[btrfsvol.LogicalAddr]) - o.rebuilt.Tree(ctx, treeID).PotentialItems(ctx).Subrange( - func(k btrfsprim.Key, _ keyio.ItemPtr) int { k.Offset = 0; return tgt.Compare(k) }, - func(k btrfsprim.Key, v keyio.ItemPtr) bool { - if fn(v) { - wants.InsertFrom(o.rebuilt.Tree(ctx, treeID).LeafToRoots(ctx, v.Node)) - } - return true - }) - o.wantAugment(ctx, treeID, wantKey, wants) -} - -// wantDirIndex implements rebuildCallbacks. -func (o *rebuilder) wantDirIndex(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, name []byte) { - typ := btrfsitem.DIR_INDEX_KEY - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.reason", reason) - wantKey := fmt.Sprintf("tree=%v key={%v %v ?} name=%q", treeID, objID, typ, name) - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key", wantKey) - o._wantFunc(ctx, treeID, wantKey, objID, typ, func(ptr keyio.ItemPtr) bool { - itemName, ok := o.keyIO.Names[ptr] - return ok && bytes.Equal(itemName, name) - }) -} - -func (o *rebuilder) _walkRange( - ctx context.Context, - items *containers.SortedMap[btrfsprim.Key, keyio.ItemPtr], - treeID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, - beg, end uint64, - fn func(key btrfsprim.Key, ptr keyio.ItemPtr, beg, end uint64), -) { - min := btrfsprim.Key{ - ObjectID: objID, - ItemType: typ, - Offset: 0, // *NOT* `beg` - } - max := btrfsprim.Key{ - ObjectID: objID, - ItemType: typ, - Offset: end - 1, - } - items.Subrange( - func(runKey btrfsprim.Key, _ keyio.ItemPtr) int { - switch { - case min.Compare(runKey) < 0: - return 1 - case max.Compare(runKey) > 0: - return -1 - default: - return 0 - } - }, - func(runKey btrfsprim.Key, runPtr keyio.ItemPtr) bool { - runSizeAndErr, ok := o.keyIO.Sizes[runPtr] - if !ok { - panic(fmt.Errorf("should not happen: %v (%v) did not have a size recorded", - runPtr, keyAndTree{TreeID: treeID, Key: runKey})) - } - if runSizeAndErr.Err != nil { - o.fsErr(ctx, fmt.Errorf("get size: %v (%v): %w", - runPtr, keyAndTree{TreeID: treeID, Key: runKey}, - runSizeAndErr.Err)) - return true - } - runSize := runSizeAndErr.Size - if runSize == 0 { - return true - } - runBeg := runKey.Offset - runEnd := runBeg + runSize - if runEnd <= beg { - return true - } - - fn(runKey, runPtr, runBeg, runEnd) - return true - }) -} - -type gap struct { - // range is [Beg,End) - Beg, End uint64 -} - -// Compare implements containers.Ordered. -func (a gap) Compare(b gap) int { - return containers.NativeCompare(a.Beg, b.Beg) -} - -func (o *rebuilder) _wantRange( - ctx context.Context, - treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, - beg, end uint64, -) { - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key", - fmt.Sprintf("tree=%v key={%v %v ?}", treeID, objID, typ)) - - if o.rebuilt.Tree(ctx, treeID) == nil { - o.enqueueRetry() - return - } - - // Step 1: Build a listing of the gaps. - // - // Start with a gap of the whole range, then subtract each run - // from it. - gaps := new(containers.RBTree[gap]) - gaps.Insert(gap{ - Beg: beg, - End: end, - }) - o._walkRange( - ctx, - o.rebuilt.Tree(ctx, treeID).Items(ctx), - treeID, objID, typ, beg, end, - func(runKey btrfsprim.Key, _ keyio.ItemPtr, runBeg, runEnd uint64) { - var overlappingGaps []*containers.RBNode[gap] - gaps.Subrange( - func(gap gap) int { - switch { - case gap.End <= runBeg: - return 1 - case runEnd <= gap.Beg: - return -1 - default: - return 0 - } - }, - func(node *containers.RBNode[gap]) bool { - overlappingGaps = append(overlappingGaps, node) - return true - }) - if len(overlappingGaps) == 0 { - return - } - gapsBeg := overlappingGaps[0].Value.Beg - gapsEnd := overlappingGaps[len(overlappingGaps)-1].Value.End - for _, gap := range overlappingGaps { - gaps.Delete(gap) - } - if gapsBeg < runBeg { - gaps.Insert(gap{ - Beg: gapsBeg, - End: runBeg, - }) - } - if gapsEnd > runEnd { - gaps.Insert(gap{ - Beg: runEnd, - End: gapsEnd, - }) - } - }) - - // Step 2: Fill each gap. - if gaps.Len() == 0 { - return - } - potentialItems := o.rebuilt.Tree(ctx, treeID).PotentialItems(ctx) - gaps.Range(func(rbNode *containers.RBNode[gap]) bool { - gap := rbNode.Value - last := gap.Beg - o._walkRange( - ctx, - potentialItems, - treeID, objID, typ, gap.Beg, gap.End, - func(k btrfsprim.Key, v keyio.ItemPtr, runBeg, runEnd uint64) { - // TODO: This is dumb and greedy. - if last < runBeg { - // log an error - wantKey := fmt.Sprintf("tree=%v key={%v %v %v-%v}", treeID, objID, typ, last, runBeg) - if !o.hasAugment(treeID, wantKey) { - wantCtx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key", wantKey) - o.wantAugment(wantCtx, treeID, wantKey, nil) - } - } - wantKey := fmt.Sprintf("tree=%v key={%v %v %v-%v}", treeID, objID, typ, gap.Beg, gap.End) - if !o.hasAugment(treeID, wantKey) { - wantCtx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key", wantKey) - o.wantAugment(wantCtx, treeID, wantKey, o.rebuilt.Tree(wantCtx, treeID).LeafToRoots(wantCtx, v.Node)) - } - last = runEnd - }) - if last < gap.End { - // log an error - wantKey := fmt.Sprintf("tree=%v key={%v, %v, %v-%v}", treeID, objID, typ, last, gap.End) - if !o.hasAugment(treeID, wantKey) { - wantCtx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key", wantKey) - o.wantAugment(wantCtx, treeID, wantKey, nil) - } - } - return true - }) -} - -// func implements rebuildCallbacks. -// -// interval is [beg, end) -func (o *rebuilder) wantCSum(ctx context.Context, reason string, inodeTree, inode btrfsprim.ObjID, beg, end btrfsvol.LogicalAddr) { - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.reason", reason) - - inodeKey := keyAndTree{ - TreeID: inodeTree, - Key: btrfsprim.Key{ - ObjectID: inode, - ItemType: btrfsitem.INODE_ITEM_KEY, - Offset: 0, - }, - } - inodeCtx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key", inodeKey.String()) - if !o._wantOff(inodeCtx, inodeKey.TreeID, inodeKey.String(), inodeKey.Key) { - o.enqueueRetry() - return - } - inodePtr, ok := o.rebuilt.Tree(inodeCtx, inodeKey.TreeID).Items(inodeCtx).Load(inodeKey.Key) - if !ok { - panic(fmt.Errorf("should not happen: could not load key: %v", inodeKey)) - } - inodeFlags, ok := o.keyIO.Flags[inodePtr] - if !ok { - panic(fmt.Errorf("should not happen: INODE_ITEM did not have flags recorded")) - } - if inodeFlags.Err != nil { - o.fsErr(inodeCtx, inodeFlags.Err) - return - } - - if inodeFlags.NoDataSum { - return - } - - beg = roundDown(beg, btrfssum.BlockSize) - end = roundUp(end, btrfssum.BlockSize) - const treeID = btrfsprim.CSUM_TREE_OBJECTID - o._wantRange(ctx, treeID, btrfsprim.EXTENT_CSUM_OBJECTID, btrfsprim.EXTENT_CSUM_KEY, - uint64(beg), uint64(end)) -} - -// wantFileExt implements rebuildCallbacks. -func (o *rebuilder) wantFileExt(ctx context.Context, reason string, treeID btrfsprim.ObjID, ino btrfsprim.ObjID, size int64) { - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.reason", reason) - o._wantRange(ctx, treeID, ino, btrfsprim.EXTENT_DATA_KEY, - 0, uint64(size)) -} diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_treecb.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_treecb.go new file mode 100644 index 0000000..7266777 --- /dev/null +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_treecb.go @@ -0,0 +1,75 @@ +// Copyright (C) 2022-2023 Luke Shumaker +// +// SPDX-License-Identifier: GPL-2.0-or-later + +package rebuildnodes + +import ( + "context" + "fmt" + + "github.com/datawire/dlib/dlog" + + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" +) + +func (o *rebuilder) cbAddedItem(ctx context.Context, tree btrfsprim.ObjID, key btrfsprim.Key) { + if handleWouldBeNoOp(key.ItemType) { + return + } + o.itemQueue.Insert(keyAndTree{ + TreeID: tree, + Key: key, + }) +} + +func (o *rebuilder) cbLookupRoot(ctx context.Context, tree btrfsprim.ObjID) (offset btrfsprim.Generation, item btrfsitem.Root, ok bool) { + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.add-tree.want.reason", "tree Root") + key := keyAndTree{ + TreeID: btrfsprim.ROOT_TREE_OBJECTID, + Key: btrfsprim.Key{ + ObjectID: tree, + ItemType: btrfsitem.ROOT_ITEM_KEY, + }, + } + wantKey := fmt.Sprintf("tree=%v key={%v %v ?}", key.TreeID, key.ObjectID, key.ItemType) + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.add-tree.want.key", wantKey) + key.Key, ok = o._want(ctx, key.TreeID, wantKey, key.ObjectID, key.ItemType) + if !ok { + o.enqueueRetry() + return 0, btrfsitem.Root{}, false + } + switch itemBody := o.rebuilt.Tree(ctx, key.TreeID).ReadItem(ctx, key.Key).(type) { + case *btrfsitem.Root: + return btrfsprim.Generation(key.Offset), *itemBody, true + case *btrfsitem.Error: + o.fsErr(ctx, fmt.Errorf("error decoding item: %v: %w", key, itemBody.Err)) + return 0, btrfsitem.Root{}, false + default: + // This is a panic because the item decoder should not emit ROOT_ITEM items as anything but + // btrfsitem.Root or btrfsitem.Error without this code also being updated. + panic(fmt.Errorf("should not happen: ROOT_ITEM item has unexpected type: %T", itemBody)) + } +} + +func (o *rebuilder) cbLookupUUID(ctx context.Context, uuid btrfsprim.UUID) (id btrfsprim.ObjID, ok bool) { + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.add-tree.want.reason", "resolve parent UUID") + key := keyAndTree{TreeID: btrfsprim.UUID_TREE_OBJECTID, Key: btrfsitem.UUIDToKey(uuid)} + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.add-tree.want.key", key.String()) + if !o._wantOff(ctx, key.TreeID, key.String(), key.Key) { + o.enqueueRetry() + return 0, false + } + switch itemBody := o.rebuilt.Tree(ctx, key.TreeID).ReadItem(ctx, key.Key).(type) { + case *btrfsitem.UUIDMap: + return itemBody.ObjID, true + case *btrfsitem.Error: + o.fsErr(ctx, fmt.Errorf("error decoding item: %v: %w", key, itemBody.Err)) + return 0, false + default: + // This is a panic because the item decoder should not emit UUID_SUBVOL items as anything but + // btrfsitem.UUIDMap or btrfsitem.Error without this code also being updated. + panic(fmt.Errorf("should not happen: UUID_SUBVOL item has unexpected type: %T", itemBody)) + } +} diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_wantcb.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_wantcb.go new file mode 100644 index 0000000..7ea9c85 --- /dev/null +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_wantcb.go @@ -0,0 +1,387 @@ +// Copyright (C) 2022-2023 Luke Shumaker +// +// SPDX-License-Identifier: GPL-2.0-or-later + +package rebuildnodes + +import ( + "bytes" + "context" + "fmt" + + "github.com/datawire/dlib/dlog" + + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio" + "git.lukeshu.com/btrfs-progs-ng/lib/containers" +) + +// fsErr implements rebuildCallbacks. +func (o *rebuilder) fsErr(ctx context.Context, e error) { + dlog.Errorf(ctx, "filesystem error: %v", e) +} + +// want implements rebuildCallbacks. +func (o *rebuilder) want(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType) { + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.reason", reason) + wantKey := fmt.Sprintf("tree=%v key={%v %v ?}", treeID, objID, typ) + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key", wantKey) + o._want(ctx, treeID, wantKey, objID, typ) +} + +func (o *rebuilder) _want(ctx context.Context, treeID btrfsprim.ObjID, wantKey string, objID btrfsprim.ObjID, typ btrfsprim.ItemType) (key btrfsprim.Key, ok bool) { + if o.rebuilt.Tree(ctx, treeID) == nil { + o.enqueueRetry() + return btrfsprim.Key{}, false + } + + // check if we already have it + + tgt := btrfsprim.Key{ + ObjectID: objID, + ItemType: typ, + } + if key, _, ok := o.rebuilt.Tree(ctx, treeID).Items(ctx).Search(func(key btrfsprim.Key, _ keyio.ItemPtr) int { + key.Offset = 0 + return tgt.Compare(key) + }); ok { + return key, true + } + + // OK, we need to insert it + + if o.hasAugment(treeID, wantKey) { + return btrfsprim.Key{}, false + } + wants := make(containers.Set[btrfsvol.LogicalAddr]) + o.rebuilt.Tree(ctx, treeID).PotentialItems(ctx).Subrange( + func(k btrfsprim.Key, _ keyio.ItemPtr) int { k.Offset = 0; return tgt.Compare(k) }, + func(_ btrfsprim.Key, v keyio.ItemPtr) bool { + wants.InsertFrom(o.rebuilt.Tree(ctx, treeID).LeafToRoots(ctx, v.Node)) + return true + }) + o.wantAugment(ctx, treeID, wantKey, wants) + return btrfsprim.Key{}, false +} + +// wantOff implements rebuildCallbacks. +func (o *rebuilder) wantOff(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, off uint64) { + key := btrfsprim.Key{ + ObjectID: objID, + ItemType: typ, + Offset: off, + } + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.reason", reason) + wantKey := keyAndTree{TreeID: treeID, Key: key}.String() + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key", wantKey) + o._wantOff(ctx, treeID, wantKey, key) +} + +func (o *rebuilder) _wantOff(ctx context.Context, treeID btrfsprim.ObjID, wantKey string, tgt btrfsprim.Key) (ok bool) { + if o.rebuilt.Tree(ctx, treeID) == nil { + o.enqueueRetry() + return false + } + + // check if we already have it + + if _, ok := o.rebuilt.Tree(ctx, treeID).Items(ctx).Load(tgt); ok { + return true + } + + // OK, we need to insert it + + if o.hasAugment(treeID, wantKey) { + return false + } + wants := make(containers.Set[btrfsvol.LogicalAddr]) + o.rebuilt.Tree(ctx, treeID).PotentialItems(ctx).Subrange( + func(k btrfsprim.Key, _ keyio.ItemPtr) int { return tgt.Compare(k) }, + func(_ btrfsprim.Key, v keyio.ItemPtr) bool { + wants.InsertFrom(o.rebuilt.Tree(ctx, treeID).LeafToRoots(ctx, v.Node)) + return true + }) + o.wantAugment(ctx, treeID, wantKey, wants) + return false +} + +func (o *rebuilder) _wantFunc(ctx context.Context, treeID btrfsprim.ObjID, wantKey string, objID btrfsprim.ObjID, typ btrfsprim.ItemType, fn func(keyio.ItemPtr) bool) { + if o.rebuilt.Tree(ctx, treeID) == nil { + o.enqueueRetry() + return + } + + // check if we already have it + + tgt := btrfsprim.Key{ + ObjectID: objID, + ItemType: typ, + } + found := false + o.rebuilt.Tree(ctx, treeID).Items(ctx).Subrange( + func(key btrfsprim.Key, _ keyio.ItemPtr) int { + key.Offset = 0 + return tgt.Compare(key) + }, + func(_ btrfsprim.Key, itemPtr keyio.ItemPtr) bool { + if fn(itemPtr) { + found = true + } + return !found + }) + if found { + return + } + + // OK, we need to insert it + + if o.hasAugment(treeID, wantKey) { + return + } + wants := make(containers.Set[btrfsvol.LogicalAddr]) + o.rebuilt.Tree(ctx, treeID).PotentialItems(ctx).Subrange( + func(k btrfsprim.Key, _ keyio.ItemPtr) int { k.Offset = 0; return tgt.Compare(k) }, + func(k btrfsprim.Key, v keyio.ItemPtr) bool { + if fn(v) { + wants.InsertFrom(o.rebuilt.Tree(ctx, treeID).LeafToRoots(ctx, v.Node)) + } + return true + }) + o.wantAugment(ctx, treeID, wantKey, wants) +} + +// wantDirIndex implements rebuildCallbacks. +func (o *rebuilder) wantDirIndex(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, name []byte) { + typ := btrfsitem.DIR_INDEX_KEY + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.reason", reason) + wantKey := fmt.Sprintf("tree=%v key={%v %v ?} name=%q", treeID, objID, typ, name) + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key", wantKey) + o._wantFunc(ctx, treeID, wantKey, objID, typ, func(ptr keyio.ItemPtr) bool { + itemName, ok := o.keyIO.Names[ptr] + return ok && bytes.Equal(itemName, name) + }) +} + +func (o *rebuilder) _walkRange( + ctx context.Context, + items *containers.SortedMap[btrfsprim.Key, keyio.ItemPtr], + treeID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, + beg, end uint64, + fn func(key btrfsprim.Key, ptr keyio.ItemPtr, beg, end uint64), +) { + min := btrfsprim.Key{ + ObjectID: objID, + ItemType: typ, + Offset: 0, // *NOT* `beg` + } + max := btrfsprim.Key{ + ObjectID: objID, + ItemType: typ, + Offset: end - 1, + } + items.Subrange( + func(runKey btrfsprim.Key, _ keyio.ItemPtr) int { + switch { + case min.Compare(runKey) < 0: + return 1 + case max.Compare(runKey) > 0: + return -1 + default: + return 0 + } + }, + func(runKey btrfsprim.Key, runPtr keyio.ItemPtr) bool { + runSizeAndErr, ok := o.keyIO.Sizes[runPtr] + if !ok { + panic(fmt.Errorf("should not happen: %v (%v) did not have a size recorded", + runPtr, keyAndTree{TreeID: treeID, Key: runKey})) + } + if runSizeAndErr.Err != nil { + o.fsErr(ctx, fmt.Errorf("get size: %v (%v): %w", + runPtr, keyAndTree{TreeID: treeID, Key: runKey}, + runSizeAndErr.Err)) + return true + } + runSize := runSizeAndErr.Size + if runSize == 0 { + return true + } + runBeg := runKey.Offset + runEnd := runBeg + runSize + if runEnd <= beg { + return true + } + + fn(runKey, runPtr, runBeg, runEnd) + return true + }) +} + +type gap struct { + // range is [Beg,End) + Beg, End uint64 +} + +// Compare implements containers.Ordered. +func (a gap) Compare(b gap) int { + return containers.NativeCompare(a.Beg, b.Beg) +} + +func (o *rebuilder) _wantRange( + ctx context.Context, + treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, + beg, end uint64, +) { + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key", + fmt.Sprintf("tree=%v key={%v %v ?}", treeID, objID, typ)) + + if o.rebuilt.Tree(ctx, treeID) == nil { + o.enqueueRetry() + return + } + + // Step 1: Build a listing of the gaps. + // + // Start with a gap of the whole range, then subtract each run + // from it. + gaps := new(containers.RBTree[gap]) + gaps.Insert(gap{ + Beg: beg, + End: end, + }) + o._walkRange( + ctx, + o.rebuilt.Tree(ctx, treeID).Items(ctx), + treeID, objID, typ, beg, end, + func(runKey btrfsprim.Key, _ keyio.ItemPtr, runBeg, runEnd uint64) { + var overlappingGaps []*containers.RBNode[gap] + gaps.Subrange( + func(gap gap) int { + switch { + case gap.End <= runBeg: + return 1 + case runEnd <= gap.Beg: + return -1 + default: + return 0 + } + }, + func(node *containers.RBNode[gap]) bool { + overlappingGaps = append(overlappingGaps, node) + return true + }) + if len(overlappingGaps) == 0 { + return + } + gapsBeg := overlappingGaps[0].Value.Beg + gapsEnd := overlappingGaps[len(overlappingGaps)-1].Value.End + for _, gap := range overlappingGaps { + gaps.Delete(gap) + } + if gapsBeg < runBeg { + gaps.Insert(gap{ + Beg: gapsBeg, + End: runBeg, + }) + } + if gapsEnd > runEnd { + gaps.Insert(gap{ + Beg: runEnd, + End: gapsEnd, + }) + } + }) + + // Step 2: Fill each gap. + if gaps.Len() == 0 { + return + } + potentialItems := o.rebuilt.Tree(ctx, treeID).PotentialItems(ctx) + gaps.Range(func(rbNode *containers.RBNode[gap]) bool { + gap := rbNode.Value + last := gap.Beg + o._walkRange( + ctx, + potentialItems, + treeID, objID, typ, gap.Beg, gap.End, + func(k btrfsprim.Key, v keyio.ItemPtr, runBeg, runEnd uint64) { + // TODO: This is dumb and greedy. + if last < runBeg { + // log an error + wantKey := fmt.Sprintf("tree=%v key={%v %v %v-%v}", treeID, objID, typ, last, runBeg) + if !o.hasAugment(treeID, wantKey) { + wantCtx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key", wantKey) + o.wantAugment(wantCtx, treeID, wantKey, nil) + } + } + wantKey := fmt.Sprintf("tree=%v key={%v %v %v-%v}", treeID, objID, typ, gap.Beg, gap.End) + if !o.hasAugment(treeID, wantKey) { + wantCtx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key", wantKey) + o.wantAugment(wantCtx, treeID, wantKey, o.rebuilt.Tree(wantCtx, treeID).LeafToRoots(wantCtx, v.Node)) + } + last = runEnd + }) + if last < gap.End { + // log an error + wantKey := fmt.Sprintf("tree=%v key={%v, %v, %v-%v}", treeID, objID, typ, last, gap.End) + if !o.hasAugment(treeID, wantKey) { + wantCtx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key", wantKey) + o.wantAugment(wantCtx, treeID, wantKey, nil) + } + } + return true + }) +} + +// func implements rebuildCallbacks. +// +// interval is [beg, end) +func (o *rebuilder) wantCSum(ctx context.Context, reason string, inodeTree, inode btrfsprim.ObjID, beg, end btrfsvol.LogicalAddr) { + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.reason", reason) + + inodeKey := keyAndTree{ + TreeID: inodeTree, + Key: btrfsprim.Key{ + ObjectID: inode, + ItemType: btrfsitem.INODE_ITEM_KEY, + Offset: 0, + }, + } + inodeCtx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key", inodeKey.String()) + if !o._wantOff(inodeCtx, inodeKey.TreeID, inodeKey.String(), inodeKey.Key) { + o.enqueueRetry() + return + } + inodePtr, ok := o.rebuilt.Tree(inodeCtx, inodeKey.TreeID).Items(inodeCtx).Load(inodeKey.Key) + if !ok { + panic(fmt.Errorf("should not happen: could not load key: %v", inodeKey)) + } + inodeFlags, ok := o.keyIO.Flags[inodePtr] + if !ok { + panic(fmt.Errorf("should not happen: INODE_ITEM did not have flags recorded")) + } + if inodeFlags.Err != nil { + o.fsErr(inodeCtx, inodeFlags.Err) + return + } + + if inodeFlags.NoDataSum { + return + } + + beg = roundDown(beg, btrfssum.BlockSize) + end = roundUp(end, btrfssum.BlockSize) + const treeID = btrfsprim.CSUM_TREE_OBJECTID + o._wantRange(ctx, treeID, btrfsprim.EXTENT_CSUM_OBJECTID, btrfsprim.EXTENT_CSUM_KEY, + uint64(beg), uint64(end)) +} + +// wantFileExt implements rebuildCallbacks. +func (o *rebuilder) wantFileExt(ctx context.Context, reason string, treeID btrfsprim.ObjID, ino btrfsprim.ObjID, size int64) { + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.reason", reason) + o._wantRange(ctx, treeID, ino, btrfsprim.EXTENT_DATA_KEY, + 0, uint64(size)) +} -- cgit v1.2.3-2-g168b From e62111bae947070586aa5ca3c98f7ec13ba3ab88 Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Fri, 6 Jan 2023 04:16:10 -0700 Subject: rebuildnodes: Don't have wantKeys be stringly-typed --- .../btrfsinspect/rebuildnodes/rebuild.go | 62 +++---- .../btrfsinspect/rebuildnodes/rebuild_treecb.go | 37 ++-- .../btrfsinspect/rebuildnodes/rebuild_wantcb.go | 197 +++++++++++---------- .../btrfsinspect/rebuildnodes/rebuild_wanttyp.go | 107 +++++++++++ 4 files changed, 250 insertions(+), 153 deletions(-) create mode 100644 lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_wanttyp.go (limited to 'lib') diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go index 902d810..6d610ee 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go @@ -9,7 +9,6 @@ import ( "fmt" "runtime" "sort" - "strings" "time" "github.com/datawire/dlib/dgroup" @@ -64,10 +63,9 @@ type rebuilder struct { } type treeAugmentQueue struct { - keyBuf strings.Builder - zero map[string]struct{} - single map[string]btrfsvol.LogicalAddr - multi map[string]containers.Set[btrfsvol.LogicalAddr] + zero map[Want]struct{} + single map[Want]btrfsvol.LogicalAddr + multi map[Want]containers.Set[btrfsvol.LogicalAddr] } type Rebuilder interface { @@ -401,7 +399,9 @@ func (o *rebuilder) resolveTreeAugments(ctx context.Context, treeID btrfsprim.Ob wantKeys := append( maps.Keys(o.augmentQueue[treeID].single), maps.Keys(o.augmentQueue[treeID].multi)...) - sort.Strings(wantKeys) + sort.Slice(wantKeys, func(i, j int) bool { + return wantKeys[i].Cmp(wantKeys[j]) < 0 + }) for _, wantKey := range wantKeys { list, ok := o.augmentQueue[treeID].multi[wantKey] if !ok { @@ -418,27 +418,14 @@ func (o *rebuilder) resolveTreeAugments(ctx context.Context, treeID btrfsprim.Ob // Free some memory o.augmentQueue[treeID].single = nil o.augmentQueue[treeID].multi = nil - o.augmentQueue[treeID].keyBuf.Reset() return ret } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -func shortenWantKey(wantKey string) string { - if !strings.HasPrefix(wantKey, "tree=") { - panic("should not happen") - } - sp := strings.IndexByte(wantKey, ' ') - if sp < 0 { - panic("should not happen") - } - return wantKey[sp+1:] -} - -func (queue *treeAugmentQueue) has(wantKey string) bool { +func (queue *treeAugmentQueue) has(wantKey Want) bool { if queue != nil { - wantKey = shortenWantKey(wantKey) if queue.zero != nil { if _, ok := queue.zero[wantKey]; ok { return true @@ -458,49 +445,40 @@ func (queue *treeAugmentQueue) has(wantKey string) bool { return false } -func (queue *treeAugmentQueue) store(wantKey string, choices containers.Set[btrfsvol.LogicalAddr]) { - if len(choices) == 0 && (strings.Contains(wantKey, " name=") || strings.Contains(wantKey, "-")) { - // This wantKey is unlikely to come up again, so it's not worth storing a negative result. +func (queue *treeAugmentQueue) store(wantKey Want, choices containers.Set[btrfsvol.LogicalAddr]) { + if len(choices) == 0 && wantKey.OffsetType > offsetExact { + // This wantKey is unlikely to come up again, so it's + // not worth the RAM of storing a negative result. return } - wantKey = shortenWantKey(wantKey) - beg := queue.keyBuf.Len() - if queue.keyBuf.Cap()-beg < len(wantKey) { - queue.keyBuf.Reset() - queue.keyBuf.Grow(textui.Tunable(4096)) - beg = 0 - } - queue.keyBuf.WriteString(wantKey) - wantKey = queue.keyBuf.String()[beg:] - switch len(choices) { case 0: if queue.zero == nil { - queue.zero = make(map[string]struct{}) + queue.zero = make(map[Want]struct{}) } queue.zero[wantKey] = struct{}{} case 1: if queue.single == nil { - queue.single = make(map[string]btrfsvol.LogicalAddr) + queue.single = make(map[Want]btrfsvol.LogicalAddr) } queue.single[wantKey] = choices.TakeOne() default: if queue.multi == nil { - queue.multi = make(map[string]containers.Set[btrfsvol.LogicalAddr]) + queue.multi = make(map[Want]containers.Set[btrfsvol.LogicalAddr]) } queue.multi[wantKey] = choices } } -func (o *rebuilder) hasAugment(treeID btrfsprim.ObjID, wantKey string) bool { - return o.augmentQueue[treeID].has(wantKey) +func (o *rebuilder) hasAugment(wantKey WantWithTree) bool { + return o.augmentQueue[wantKey.TreeID].has(wantKey.Key) } -func (o *rebuilder) wantAugment(ctx context.Context, treeID btrfsprim.ObjID, wantKey string, choices containers.Set[btrfsvol.LogicalAddr]) { - if o.augmentQueue[treeID] == nil { - o.augmentQueue[treeID] = new(treeAugmentQueue) +func (o *rebuilder) wantAugment(ctx context.Context, wantKey WantWithTree, choices containers.Set[btrfsvol.LogicalAddr]) { + if o.augmentQueue[wantKey.TreeID] == nil { + o.augmentQueue[wantKey.TreeID] = new(treeAugmentQueue) } - o.augmentQueue[treeID].store(wantKey, choices) + o.augmentQueue[wantKey.TreeID].store(wantKey.Key, choices) if len(choices) == 0 { dlog.Error(ctx, "could not find wanted item") o.numAugmentFailures++ diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_treecb.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_treecb.go index 7266777..827aa5c 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_treecb.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_treecb.go @@ -8,8 +8,6 @@ import ( "context" "fmt" - "github.com/datawire/dlib/dlog" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" ) @@ -25,26 +23,25 @@ func (o *rebuilder) cbAddedItem(ctx context.Context, tree btrfsprim.ObjID, key b } func (o *rebuilder) cbLookupRoot(ctx context.Context, tree btrfsprim.ObjID) (offset btrfsprim.Generation, item btrfsitem.Root, ok bool) { - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.add-tree.want.reason", "tree Root") - key := keyAndTree{ + wantKey := WantWithTree{ TreeID: btrfsprim.ROOT_TREE_OBJECTID, - Key: btrfsprim.Key{ - ObjectID: tree, - ItemType: btrfsitem.ROOT_ITEM_KEY, + Key: Want{ + ObjectID: tree, + ItemType: btrfsitem.ROOT_ITEM_KEY, + OffsetType: offsetAny, }, } - wantKey := fmt.Sprintf("tree=%v key={%v %v ?}", key.TreeID, key.ObjectID, key.ItemType) - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.add-tree.want.key", wantKey) - key.Key, ok = o._want(ctx, key.TreeID, wantKey, key.ObjectID, key.ItemType) + ctx = withWant(ctx, logFieldTreeWant, "tree Root", wantKey) + foundKey, ok := o._want(ctx, wantKey) if !ok { o.enqueueRetry() return 0, btrfsitem.Root{}, false } - switch itemBody := o.rebuilt.Tree(ctx, key.TreeID).ReadItem(ctx, key.Key).(type) { + switch itemBody := o.rebuilt.Tree(ctx, wantKey.TreeID).ReadItem(ctx, foundKey).(type) { case *btrfsitem.Root: - return btrfsprim.Generation(key.Offset), *itemBody, true + return btrfsprim.Generation(foundKey.Offset), *itemBody, true case *btrfsitem.Error: - o.fsErr(ctx, fmt.Errorf("error decoding item: %v: %w", key, itemBody.Err)) + o.fsErr(ctx, fmt.Errorf("error decoding item: %v: %w", foundKey, itemBody.Err)) return 0, btrfsitem.Root{}, false default: // This is a panic because the item decoder should not emit ROOT_ITEM items as anything but @@ -54,18 +51,20 @@ func (o *rebuilder) cbLookupRoot(ctx context.Context, tree btrfsprim.ObjID) (off } func (o *rebuilder) cbLookupUUID(ctx context.Context, uuid btrfsprim.UUID) (id btrfsprim.ObjID, ok bool) { - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.add-tree.want.reason", "resolve parent UUID") - key := keyAndTree{TreeID: btrfsprim.UUID_TREE_OBJECTID, Key: btrfsitem.UUIDToKey(uuid)} - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.add-tree.want.key", key.String()) - if !o._wantOff(ctx, key.TreeID, key.String(), key.Key) { + wantKey := WantWithTree{ + TreeID: btrfsprim.UUID_TREE_OBJECTID, + Key: wantFromKey(btrfsitem.UUIDToKey(uuid)), + } + ctx = withWant(ctx, logFieldTreeWant, "resolve parent UUID", wantKey) + if !o._wantOff(ctx, wantKey) { o.enqueueRetry() return 0, false } - switch itemBody := o.rebuilt.Tree(ctx, key.TreeID).ReadItem(ctx, key.Key).(type) { + switch itemBody := o.rebuilt.Tree(ctx, wantKey.TreeID).ReadItem(ctx, wantKey.Key.Key()).(type) { case *btrfsitem.UUIDMap: return itemBody.ObjID, true case *btrfsitem.Error: - o.fsErr(ctx, fmt.Errorf("error decoding item: %v: %w", key, itemBody.Err)) + o.fsErr(ctx, fmt.Errorf("error decoding item: %v: %w", wantKey, itemBody.Err)) return 0, false default: // This is a panic because the item decoder should not emit UUID_SUBVOL items as anything but diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_wantcb.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_wantcb.go index 7ea9c85..c57b2bb 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_wantcb.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_wantcb.go @@ -26,25 +26,28 @@ func (o *rebuilder) fsErr(ctx context.Context, e error) { // want implements rebuildCallbacks. func (o *rebuilder) want(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType) { - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.reason", reason) - wantKey := fmt.Sprintf("tree=%v key={%v %v ?}", treeID, objID, typ) - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key", wantKey) - o._want(ctx, treeID, wantKey, objID, typ) + wantKey := WantWithTree{ + TreeID: treeID, + Key: Want{ + ObjectID: objID, + ItemType: typ, + OffsetType: offsetAny, + }, + } + ctx = withWant(ctx, logFieldItemWant, reason, wantKey) + o._want(ctx, wantKey) } -func (o *rebuilder) _want(ctx context.Context, treeID btrfsprim.ObjID, wantKey string, objID btrfsprim.ObjID, typ btrfsprim.ItemType) (key btrfsprim.Key, ok bool) { - if o.rebuilt.Tree(ctx, treeID) == nil { +func (o *rebuilder) _want(ctx context.Context, wantKey WantWithTree) (key btrfsprim.Key, ok bool) { + if o.rebuilt.Tree(ctx, wantKey.TreeID) == nil { o.enqueueRetry() return btrfsprim.Key{}, false } // check if we already have it - tgt := btrfsprim.Key{ - ObjectID: objID, - ItemType: typ, - } - if key, _, ok := o.rebuilt.Tree(ctx, treeID).Items(ctx).Search(func(key btrfsprim.Key, _ keyio.ItemPtr) int { + tgt := wantKey.Key.Key() + if key, _, ok := o.rebuilt.Tree(ctx, wantKey.TreeID).Items(ctx).Search(func(key btrfsprim.Key, _ keyio.ItemPtr) int { key.Offset = 0 return tgt.Compare(key) }); ok { @@ -53,62 +56,80 @@ func (o *rebuilder) _want(ctx context.Context, treeID btrfsprim.ObjID, wantKey s // OK, we need to insert it - if o.hasAugment(treeID, wantKey) { + if o.hasAugment(wantKey) { return btrfsprim.Key{}, false } wants := make(containers.Set[btrfsvol.LogicalAddr]) - o.rebuilt.Tree(ctx, treeID).PotentialItems(ctx).Subrange( - func(k btrfsprim.Key, _ keyio.ItemPtr) int { k.Offset = 0; return tgt.Compare(k) }, + o.rebuilt.Tree(ctx, wantKey.TreeID).PotentialItems(ctx).Subrange( + func(k btrfsprim.Key, _ keyio.ItemPtr) int { + k.Offset = 0 + return tgt.Compare(k) + }, func(_ btrfsprim.Key, v keyio.ItemPtr) bool { - wants.InsertFrom(o.rebuilt.Tree(ctx, treeID).LeafToRoots(ctx, v.Node)) + wants.InsertFrom(o.rebuilt.Tree(ctx, wantKey.TreeID).LeafToRoots(ctx, v.Node)) return true }) - o.wantAugment(ctx, treeID, wantKey, wants) + o.wantAugment(ctx, wantKey, wants) return btrfsprim.Key{}, false } // wantOff implements rebuildCallbacks. func (o *rebuilder) wantOff(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, off uint64) { - key := btrfsprim.Key{ - ObjectID: objID, - ItemType: typ, - Offset: off, + wantKey := WantWithTree{ + TreeID: treeID, + Key: Want{ + ObjectID: objID, + ItemType: typ, + OffsetType: offsetExact, + OffsetLow: off, + }, } - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.reason", reason) - wantKey := keyAndTree{TreeID: treeID, Key: key}.String() - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key", wantKey) - o._wantOff(ctx, treeID, wantKey, key) + ctx = withWant(ctx, logFieldItemWant, reason, wantKey) + o._wantOff(ctx, wantKey) } -func (o *rebuilder) _wantOff(ctx context.Context, treeID btrfsprim.ObjID, wantKey string, tgt btrfsprim.Key) (ok bool) { - if o.rebuilt.Tree(ctx, treeID) == nil { +func (o *rebuilder) _wantOff(ctx context.Context, wantKey WantWithTree) (ok bool) { + if o.rebuilt.Tree(ctx, wantKey.TreeID) == nil { o.enqueueRetry() return false } // check if we already have it - if _, ok := o.rebuilt.Tree(ctx, treeID).Items(ctx).Load(tgt); ok { + tgt := wantKey.Key.Key() + if _, ok := o.rebuilt.Tree(ctx, wantKey.TreeID).Items(ctx).Load(tgt); ok { return true } // OK, we need to insert it - if o.hasAugment(treeID, wantKey) { + if o.hasAugment(wantKey) { return false } wants := make(containers.Set[btrfsvol.LogicalAddr]) - o.rebuilt.Tree(ctx, treeID).PotentialItems(ctx).Subrange( + o.rebuilt.Tree(ctx, wantKey.TreeID).PotentialItems(ctx).Subrange( func(k btrfsprim.Key, _ keyio.ItemPtr) int { return tgt.Compare(k) }, func(_ btrfsprim.Key, v keyio.ItemPtr) bool { - wants.InsertFrom(o.rebuilt.Tree(ctx, treeID).LeafToRoots(ctx, v.Node)) + wants.InsertFrom(o.rebuilt.Tree(ctx, wantKey.TreeID).LeafToRoots(ctx, v.Node)) return true }) - o.wantAugment(ctx, treeID, wantKey, wants) + o.wantAugment(ctx, wantKey, wants) return false } -func (o *rebuilder) _wantFunc(ctx context.Context, treeID btrfsprim.ObjID, wantKey string, objID btrfsprim.ObjID, typ btrfsprim.ItemType, fn func(keyio.ItemPtr) bool) { +// wantDirIndex implements rebuildCallbacks. +func (o *rebuilder) wantDirIndex(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, name []byte) { + wantKey := WantWithTree{ + TreeID: treeID, + Key: Want{ + ObjectID: objID, + ItemType: btrfsitem.DIR_INDEX_KEY, + OffsetType: offsetName, + OffsetName: string(name), + }, + } + ctx = withWant(ctx, logFieldItemWant, reason, wantKey) + if o.rebuilt.Tree(ctx, treeID) == nil { o.enqueueRetry() return @@ -116,18 +137,15 @@ func (o *rebuilder) _wantFunc(ctx context.Context, treeID btrfsprim.ObjID, wantK // check if we already have it - tgt := btrfsprim.Key{ - ObjectID: objID, - ItemType: typ, - } + tgt := wantKey.Key.Key() found := false o.rebuilt.Tree(ctx, treeID).Items(ctx).Subrange( func(key btrfsprim.Key, _ keyio.ItemPtr) int { key.Offset = 0 return tgt.Compare(key) }, - func(_ btrfsprim.Key, itemPtr keyio.ItemPtr) bool { - if fn(itemPtr) { + func(_ btrfsprim.Key, ptr keyio.ItemPtr) bool { + if itemName, ok := o.keyIO.Names[ptr]; ok && bytes.Equal(itemName, name) { found = true } return !found @@ -138,31 +156,22 @@ func (o *rebuilder) _wantFunc(ctx context.Context, treeID btrfsprim.ObjID, wantK // OK, we need to insert it - if o.hasAugment(treeID, wantKey) { + if o.hasAugment(wantKey) { return } wants := make(containers.Set[btrfsvol.LogicalAddr]) o.rebuilt.Tree(ctx, treeID).PotentialItems(ctx).Subrange( - func(k btrfsprim.Key, _ keyio.ItemPtr) int { k.Offset = 0; return tgt.Compare(k) }, - func(k btrfsprim.Key, v keyio.ItemPtr) bool { - if fn(v) { - wants.InsertFrom(o.rebuilt.Tree(ctx, treeID).LeafToRoots(ctx, v.Node)) + func(key btrfsprim.Key, _ keyio.ItemPtr) int { + key.Offset = 0 + return tgt.Compare(key) + }, + func(_ btrfsprim.Key, ptr keyio.ItemPtr) bool { + if itemName, ok := o.keyIO.Names[ptr]; ok && bytes.Equal(itemName, name) { + wants.InsertFrom(o.rebuilt.Tree(ctx, treeID).LeafToRoots(ctx, ptr.Node)) } return true }) - o.wantAugment(ctx, treeID, wantKey, wants) -} - -// wantDirIndex implements rebuildCallbacks. -func (o *rebuilder) wantDirIndex(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, name []byte) { - typ := btrfsitem.DIR_INDEX_KEY - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.reason", reason) - wantKey := fmt.Sprintf("tree=%v key={%v %v ?} name=%q", treeID, objID, typ, name) - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key", wantKey) - o._wantFunc(ctx, treeID, wantKey, objID, typ, func(ptr keyio.ItemPtr) bool { - itemName, ok := o.keyIO.Names[ptr] - return ok && bytes.Equal(itemName, name) - }) + o.wantAugment(ctx, wantKey, wants) } func (o *rebuilder) _walkRange( @@ -231,12 +240,20 @@ func (a gap) Compare(b gap) int { } func (o *rebuilder) _wantRange( - ctx context.Context, + ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, beg, end uint64, ) { - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key", - fmt.Sprintf("tree=%v key={%v %v ?}", treeID, objID, typ)) + wantKey := WantWithTree{ + TreeID: treeID, + Key: Want{ + ObjectID: objID, + ItemType: typ, + OffsetType: offsetAny, + }, + } + ctx = withWant(ctx, logFieldItemWant, reason, wantKey) + wantKey.Key.OffsetType = offsetRange if o.rebuilt.Tree(ctx, treeID) == nil { o.enqueueRetry() @@ -311,26 +328,23 @@ func (o *rebuilder) _wantRange( // TODO: This is dumb and greedy. if last < runBeg { // log an error - wantKey := fmt.Sprintf("tree=%v key={%v %v %v-%v}", treeID, objID, typ, last, runBeg) - if !o.hasAugment(treeID, wantKey) { - wantCtx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key", wantKey) - o.wantAugment(wantCtx, treeID, wantKey, nil) - } - } - wantKey := fmt.Sprintf("tree=%v key={%v %v %v-%v}", treeID, objID, typ, gap.Beg, gap.End) - if !o.hasAugment(treeID, wantKey) { - wantCtx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key", wantKey) - o.wantAugment(wantCtx, treeID, wantKey, o.rebuilt.Tree(wantCtx, treeID).LeafToRoots(wantCtx, v.Node)) + wantKey.Key.OffsetLow = last + wantKey.Key.OffsetHigh = runBeg + wantCtx := withWant(ctx, logFieldItemWant, reason, wantKey) + o.wantAugment(wantCtx, wantKey, nil) } + wantKey.Key.OffsetLow = gap.Beg + wantKey.Key.OffsetHigh = gap.End + wantCtx := withWant(ctx, logFieldItemWant, reason, wantKey) + o.wantAugment(wantCtx, wantKey, o.rebuilt.Tree(wantCtx, treeID).LeafToRoots(wantCtx, v.Node)) last = runEnd }) if last < gap.End { // log an error - wantKey := fmt.Sprintf("tree=%v key={%v, %v, %v-%v}", treeID, objID, typ, last, gap.End) - if !o.hasAugment(treeID, wantKey) { - wantCtx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key", wantKey) - o.wantAugment(wantCtx, treeID, wantKey, nil) - } + wantKey.Key.OffsetLow = last + wantKey.Key.OffsetHigh = gap.End + wantCtx := withWant(ctx, logFieldItemWant, reason, wantKey) + o.wantAugment(wantCtx, wantKey, nil) } return true }) @@ -340,24 +354,23 @@ func (o *rebuilder) _wantRange( // // interval is [beg, end) func (o *rebuilder) wantCSum(ctx context.Context, reason string, inodeTree, inode btrfsprim.ObjID, beg, end btrfsvol.LogicalAddr) { - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.reason", reason) - - inodeKey := keyAndTree{ + inodeWant := WantWithTree{ TreeID: inodeTree, - Key: btrfsprim.Key{ - ObjectID: inode, - ItemType: btrfsitem.INODE_ITEM_KEY, - Offset: 0, + Key: Want{ + ObjectID: inode, + ItemType: btrfsitem.INODE_ITEM_KEY, + OffsetType: offsetExact, + OffsetLow: 0, }, } - inodeCtx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.key", inodeKey.String()) - if !o._wantOff(inodeCtx, inodeKey.TreeID, inodeKey.String(), inodeKey.Key) { + inodeCtx := withWant(ctx, logFieldItemWant, reason, inodeWant) + if !o._wantOff(inodeCtx, inodeWant) { o.enqueueRetry() return } - inodePtr, ok := o.rebuilt.Tree(inodeCtx, inodeKey.TreeID).Items(inodeCtx).Load(inodeKey.Key) + inodePtr, ok := o.rebuilt.Tree(inodeCtx, inodeTree).Items(inodeCtx).Load(inodeWant.Key.Key()) if !ok { - panic(fmt.Errorf("should not happen: could not load key: %v", inodeKey)) + panic(fmt.Errorf("should not happen: could not load key: %v", inodeWant)) } inodeFlags, ok := o.keyIO.Flags[inodePtr] if !ok { @@ -372,16 +385,16 @@ func (o *rebuilder) wantCSum(ctx context.Context, reason string, inodeTree, inod return } - beg = roundDown(beg, btrfssum.BlockSize) - end = roundUp(end, btrfssum.BlockSize) - const treeID = btrfsprim.CSUM_TREE_OBJECTID - o._wantRange(ctx, treeID, btrfsprim.EXTENT_CSUM_OBJECTID, btrfsprim.EXTENT_CSUM_KEY, - uint64(beg), uint64(end)) + o._wantRange( + ctx, reason, + btrfsprim.CSUM_TREE_OBJECTID, btrfsprim.EXTENT_CSUM_OBJECTID, btrfsprim.EXTENT_CSUM_KEY, + uint64(roundDown(beg, btrfssum.BlockSize)), uint64(roundUp(end, btrfssum.BlockSize))) } // wantFileExt implements rebuildCallbacks. func (o *rebuilder) wantFileExt(ctx context.Context, reason string, treeID btrfsprim.ObjID, ino btrfsprim.ObjID, size int64) { - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.want.reason", reason) - o._wantRange(ctx, treeID, ino, btrfsprim.EXTENT_DATA_KEY, + o._wantRange( + ctx, reason, + treeID, ino, btrfsprim.EXTENT_DATA_KEY, 0, uint64(size)) } diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_wanttyp.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_wanttyp.go new file mode 100644 index 0000000..eb1bb49 --- /dev/null +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_wanttyp.go @@ -0,0 +1,107 @@ +// Copyright (C) 2022-2023 Luke Shumaker +// +// SPDX-License-Identifier: GPL-2.0-or-later + +package rebuildnodes + +import ( + "context" + "fmt" + + "github.com/datawire/dlib/dlog" + + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" + "git.lukeshu.com/btrfs-progs-ng/lib/containers" +) + +type wantOffsetType int8 + +const ( + offsetAny = wantOffsetType(iota) + offsetExact + offsetRange + offsetName +) + +type Want struct { + ObjectID btrfsprim.ObjID + ItemType btrfsprim.ItemType + OffsetType wantOffsetType + OffsetLow uint64 + OffsetHigh uint64 + OffsetName string +} + +func (a Want) Cmp(b Want) int { + if d := containers.NativeCompare(a.ObjectID, b.ObjectID); d != 0 { + return d + } + if d := containers.NativeCompare(a.ItemType, b.ItemType); d != 0 { + return d + } + if d := containers.NativeCompare(a.OffsetType, b.OffsetType); d != 0 { + return d + } + if d := containers.NativeCompare(a.OffsetLow, b.OffsetLow); d != 0 { + return d + } + if d := containers.NativeCompare(a.OffsetHigh, b.OffsetHigh); d != 0 { + return d + } + if d := containers.NativeCompare(a.OffsetName, b.OffsetName); d != 0 { + return d + } + return 0 +} + +func (o Want) Key() btrfsprim.Key { + return btrfsprim.Key{ + ObjectID: o.ObjectID, + ItemType: o.ItemType, + Offset: o.OffsetLow, + } +} + +func wantFromKey(k btrfsprim.Key) Want { + return Want{ + ObjectID: k.ObjectID, + ItemType: k.ItemType, + OffsetType: offsetExact, + OffsetLow: k.Offset, + } +} + +func (o Want) String() string { + switch o.OffsetType { + case offsetAny: + return fmt.Sprintf("{%v %v ?}", o.ObjectID, o.ItemType) + case offsetExact: + return fmt.Sprintf("{%v %v %v}", o.ObjectID, o.ItemType, o.OffsetLow) + case offsetRange: + return fmt.Sprintf("{%v %v %v-%v}", o.ObjectID, o.ItemType, o.OffsetLow, o.OffsetHigh) + case offsetName: + return fmt.Sprintf("{%v %v name=%q}", o.ObjectID, o.ItemType, o.OffsetName) + default: + panic(fmt.Errorf("should not happen: OffsetType=%#v", o.OffsetType)) + } +} + +type WantWithTree struct { + TreeID btrfsprim.ObjID + Key Want +} + +func (o WantWithTree) String() string { + return fmt.Sprintf("tree=%v key=%v", o.TreeID, o.Key) +} + +const ( + logFieldItemWant = "btrfsinspect.rebuild-nodes.rebuild.want" + logFieldTreeWant = "btrfsinspect.rebuild-nodes.rebuild.add-tree.want" +) + +func withWant(ctx context.Context, logField, reason string, wantKey WantWithTree) context.Context { + ctx = dlog.WithField(ctx, logField+".reason", reason) + ctx = dlog.WithField(ctx, logField+".key", wantKey) + return ctx +} -- cgit v1.2.3-2-g168b From a6aa47f2eb6cceb83b77477d6f9bd5f25561911f Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Fri, 6 Jan 2023 04:26:37 -0700 Subject: rebuildnodes/btrees: Take a Callbacks interface, instead of func pointers --- .../btrfsinspect/rebuildnodes/btrees/forrest.go | 28 +++++++++------------- .../btrfsinspect/rebuildnodes/btrees/tree.go | 2 +- .../btrfsinspect/rebuildnodes/rebuild.go | 3 +-- .../btrfsinspect/rebuildnodes/rebuild_treecb.go | 9 ++++--- 4 files changed, 19 insertions(+), 23 deletions(-) (limited to 'lib') diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/forrest.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/forrest.go index 3eeea7f..1d3b693 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/forrest.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/forrest.go @@ -21,6 +21,12 @@ import ( "git.lukeshu.com/btrfs-progs-ng/lib/textui" ) +type Callbacks interface { + AddedItem(ctx context.Context, tree btrfsprim.ObjID, key btrfsprim.Key) + LookupRoot(ctx context.Context, tree btrfsprim.ObjID) (offset btrfsprim.Generation, item btrfsitem.Root, ok bool) + LookupUUID(ctx context.Context, uuid btrfsprim.UUID) (id btrfsprim.ObjID, ok bool) +} + // RebuiltForrest is an abstraction for rebuilding and accessing // potentially broken btrees. // @@ -56,11 +62,7 @@ type RebuiltForrest struct { sb btrfstree.Superblock graph pkggraph.Graph keyIO *keyio.Handle - - // static callbacks - cbAddedItem func(ctx context.Context, tree btrfsprim.ObjID, key btrfsprim.Key) - cbLookupRoot func(ctx context.Context, tree btrfsprim.ObjID) (offset btrfsprim.Generation, item btrfsitem.Root, ok bool) - cbLookupUUID func(ctx context.Context, uuid btrfsprim.UUID) (id btrfsprim.ObjID, ok bool) + cb Callbacks // mutable trees typedsync.Map[btrfsprim.ObjID, *RebuiltTree] @@ -71,20 +73,12 @@ type RebuiltForrest struct { // NewRebuiltForrest returns a new RebuiltForrest instance. All of // the callbacks must be non-nil. -func NewRebuiltForrest( - sb btrfstree.Superblock, graph pkggraph.Graph, keyIO *keyio.Handle, - cbAddedItem func(ctx context.Context, tree btrfsprim.ObjID, key btrfsprim.Key), - cbLookupRoot func(ctx context.Context, tree btrfsprim.ObjID) (offset btrfsprim.Generation, item btrfsitem.Root, ok bool), - cbLookupUUID func(ctx context.Context, uuid btrfsprim.UUID) (id btrfsprim.ObjID, ok bool), -) *RebuiltForrest { +func NewRebuiltForrest(sb btrfstree.Superblock, graph pkggraph.Graph, keyIO *keyio.Handle, cb Callbacks) *RebuiltForrest { return &RebuiltForrest{ sb: sb, graph: graph, keyIO: keyIO, - - cbAddedItem: cbAddedItem, - cbLookupRoot: cbLookupRoot, - cbLookupUUID: cbLookupUUID, + cb: cb, leafs: containers.ARCache[btrfsprim.ObjID, map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr]]{ MaxLen: textui.Tunable(8), @@ -150,7 +144,7 @@ func (ts *RebuiltForrest) addTree(ctx context.Context, treeID btrfsprim.ObjID, s dlog.Error(ctx, "failed to add tree: add ROOT_TREE") return false } - rootOff, rootItem, ok := ts.cbLookupRoot(ctx, treeID) + rootOff, rootItem, ok := ts.cb.LookupRoot(ctx, treeID) if !ok { dlog.Error(ctx, "failed to add tree: lookup ROOT_ITEM") return false @@ -162,7 +156,7 @@ func (ts *RebuiltForrest) addTree(ctx context.Context, treeID btrfsprim.ObjID, s if !ts.addTree(ctx, btrfsprim.UUID_TREE_OBJECTID, stack) { return false } - parentID, ok := ts.cbLookupUUID(ctx, rootItem.ParentUUID) + parentID, ok := ts.cb.LookupUUID(ctx, rootItem.ParentUUID) if !ok { dlog.Error(ctx, "failed to add tree: lookup UUID") return false diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go index f870121..9f5a5e6 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go @@ -288,7 +288,7 @@ func (tree *RebuiltTree) AddRoot(ctx context.Context, rootNode btrfsvol.LogicalA progressWriter.Set(stats) for _, itemKey := range tree.forrest.graph.Nodes[leaf].Items { - tree.forrest.cbAddedItem(ctx, tree.ID, itemKey) + tree.forrest.cb.AddedItem(ctx, tree.ID, itemKey) stats.AddedItems++ progressWriter.Set(stats) } diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go index 6d610ee..ffa8d59 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go @@ -85,8 +85,7 @@ func NewRebuilder(ctx context.Context, fs *btrfs.FS, nodeScanResults btrfsinspec graph: nodeGraph, keyIO: keyIO, } - o.rebuilt = btrees.NewRebuiltForrest(sb, nodeGraph, keyIO, - o.cbAddedItem, o.cbLookupRoot, o.cbLookupUUID) + o.rebuilt = btrees.NewRebuiltForrest(sb, nodeGraph, keyIO, o) return o, nil } diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_treecb.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_treecb.go index 827aa5c..ffb1ef0 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_treecb.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_treecb.go @@ -12,7 +12,8 @@ import ( "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" ) -func (o *rebuilder) cbAddedItem(ctx context.Context, tree btrfsprim.ObjID, key btrfsprim.Key) { +// AddedItem implements btrees.Callbacks. +func (o *rebuilder) AddedItem(ctx context.Context, tree btrfsprim.ObjID, key btrfsprim.Key) { if handleWouldBeNoOp(key.ItemType) { return } @@ -22,7 +23,8 @@ func (o *rebuilder) cbAddedItem(ctx context.Context, tree btrfsprim.ObjID, key b }) } -func (o *rebuilder) cbLookupRoot(ctx context.Context, tree btrfsprim.ObjID) (offset btrfsprim.Generation, item btrfsitem.Root, ok bool) { +// LookupRoot implements btrees.Callbacks. +func (o *rebuilder) LookupRoot(ctx context.Context, tree btrfsprim.ObjID) (offset btrfsprim.Generation, item btrfsitem.Root, ok bool) { wantKey := WantWithTree{ TreeID: btrfsprim.ROOT_TREE_OBJECTID, Key: Want{ @@ -50,7 +52,8 @@ func (o *rebuilder) cbLookupRoot(ctx context.Context, tree btrfsprim.ObjID) (off } } -func (o *rebuilder) cbLookupUUID(ctx context.Context, uuid btrfsprim.UUID) (id btrfsprim.ObjID, ok bool) { +// LookupUUID implements btrees.Callbacks. +func (o *rebuilder) LookupUUID(ctx context.Context, uuid btrfsprim.UUID) (id btrfsprim.ObjID, ok bool) { wantKey := WantWithTree{ TreeID: btrfsprim.UUID_TREE_OBJECTID, Key: wantFromKey(btrfsitem.UUIDToKey(uuid)), -- cgit v1.2.3-2-g168b From a4eb75f4a869b0737c919968fec5b90d9d5b1af7 Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Fri, 6 Jan 2023 13:13:08 -0700 Subject: rebuildnodes: rebuild.go: Tidy up --- .../btrfsinspect/rebuildnodes/rebuild.go | 79 ++++++++++++---------- 1 file changed, 45 insertions(+), 34 deletions(-) (limited to 'lib') diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go index ffa8d59..472f6cd 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go @@ -93,21 +93,8 @@ func (o *rebuilder) ListRoots() map[btrfsprim.ObjID]containers.Set[btrfsvol.Logi return o.rebuilt.ListRoots() } -type itemStats struct { - textui.Portion[int] - NumAugments int - NumFailures int - NumAugmentTrees int -} - -func (s itemStats) String() string { - // return textui.Sprintf("%v (queued %v augments and %v failures across %v trees)", - return textui.Sprintf("%v (aug:%v fail:%v trees:%v)", - s.Portion, s.NumAugments, s.NumFailures, s.NumAugmentTrees) -} - -func (o *rebuilder) Rebuild(_ctx context.Context) error { - _ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-nodes.step", "rebuild") +func (o *rebuilder) Rebuild(ctx context.Context) error { + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.step", "rebuild") // Initialize o.itemQueue = make(containers.Set[keyAndTree]) @@ -121,59 +108,80 @@ func (o *rebuilder) Rebuild(_ctx context.Context) error { btrfsprim.BLOCK_GROUP_TREE_OBJECTID, ) + // Run for passNum := 0; len(o.treeQueue) > 0 || len(o.itemQueue) > 0 || len(o.augmentQueue) > 0; passNum++ { - passCtx := dlog.WithField(_ctx, "btrfsinspect.rebuild-nodes.rebuild.pass", passNum) + ctx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.pass", passNum) // Crawl trees (Drain o.treeQueue, fill o.itemQueue). - if err := o.processTreeQueue(passCtx); err != nil { + if err := o.processTreeQueue(ctx); err != nil { return err } runtime.GC() // Process items (drain o.itemQueue, fill o.augmentQueue and o.treeQueue). - if err := o.processItemQueue(passCtx); err != nil { + if err := o.processItemQueue(ctx); err != nil { return err } runtime.GC() // Apply augments (drain o.augmentQueue, fill o.itemQueue). - if err := o.processAugmentQueue(passCtx); err != nil { + if err := o.processAugmentQueue(ctx); err != nil { return err } runtime.GC() } + return nil } // processTreeQueue drains o.treeQueue, filling o.itemQueue. func (o *rebuilder) processTreeQueue(ctx context.Context) error { ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep", "collect-items") - treeQueue := o.treeQueue + + queue := maps.SortedKeys(o.treeQueue) o.treeQueue = make(containers.Set[btrfsprim.ObjID]) + // Because trees can be wildly different sizes, it's impossible to have a meaningful // progress percentage here. o.curKey.Key.OK = false - for _, o.curKey.TreeID = range maps.SortedKeys(treeQueue) { + for _, o.curKey.TreeID = range queue { if err := ctx.Err(); err != nil { return err } o.rebuilt.Tree(ctx, o.curKey.TreeID) } + return nil } +type itemStats struct { + textui.Portion[int] + NumAugments int + NumFailures int + NumAugmentTrees int +} + +func (s itemStats) String() string { + // return textui.Sprintf("%v (queued %v augments and %v failures across %v trees)", + return textui.Sprintf("%v (aug:%v fail:%v trees:%v)", + s.Portion, s.NumAugments, s.NumFailures, s.NumAugmentTrees) +} + // processItemQueue drains o.itemQueue, filling o.augmentQueue and o.treeQueue. func (o *rebuilder) processItemQueue(ctx context.Context) error { ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep", "process-items") - itemQueue := maps.Keys(o.itemQueue) + + queue := maps.Keys(o.itemQueue) o.itemQueue = make(containers.Set[keyAndTree]) - sort.Slice(itemQueue, func(i, j int) bool { - return itemQueue[i].Compare(itemQueue[j]) < 0 + sort.Slice(queue, func(i, j int) bool { + return queue[i].Compare(queue[j]) < 0 }) + var progress itemStats - progress.D = len(itemQueue) + progress.D = len(queue) progressWriter := textui.NewProgress[itemStats](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second)) ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep.progress", &progress) + type keyAndBody struct { keyAndTree Body btrfsitem.Item @@ -182,14 +190,14 @@ func (o *rebuilder) processItemQueue(ctx context.Context) error { grp := dgroup.NewGroup(ctx, dgroup.GroupConfig{}) grp.Go("io", func(ctx context.Context) error { defer close(itemChan) - for _, key := range itemQueue { + for _, key := range queue { if err := ctx.Err(); err != nil { return err } - itemCtx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.process.item", key) + ctx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.process.item", key) itemChan <- keyAndBody{ keyAndTree: key, - Body: o.rebuilt.Tree(itemCtx, key.TreeID).ReadItem(itemCtx, key.Key), + Body: o.rebuilt.Tree(ctx, key.TreeID).ReadItem(ctx, key.Key), } } return nil @@ -198,10 +206,10 @@ func (o *rebuilder) processItemQueue(ctx context.Context) error { defer progressWriter.Done() o.curKey.Key.OK = true for item := range itemChan { - itemCtx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.process.item", item.keyAndTree) + ctx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.process.item", item.keyAndTree) o.curKey.TreeID = item.TreeID o.curKey.Key.Val = item.Key - handleItem(o, itemCtx, item.TreeID, btrfstree.Item{ + handleItem(o, ctx, item.TreeID, btrfstree.Item{ Key: item.Key, Body: item.Body, }) @@ -222,24 +230,26 @@ func (o *rebuilder) processItemQueue(ctx context.Context) error { // processAugmentQueue drains o.augmentQueue, filling o.itemQueue. func (o *rebuilder) processAugmentQueue(ctx context.Context) error { ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep", "apply-augments") + resolvedAugments := make(map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr], len(o.augmentQueue)) var progress textui.Portion[int] for _, treeID := range maps.SortedKeys(o.augmentQueue) { if err := ctx.Err(); err != nil { return err } - treeCtx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.augment.tree", treeID) - resolvedAugments[treeID] = o.resolveTreeAugments(treeCtx, treeID) + ctx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.augment.tree", treeID) + resolvedAugments[treeID] = o.resolveTreeAugments(ctx, treeID) progress.D += len(resolvedAugments[treeID]) } o.augmentQueue = make(map[btrfsprim.ObjID]*treeAugmentQueue) o.numAugments = 0 o.numAugmentFailures = 0 runtime.GC() + progressWriter := textui.NewProgress[textui.Portion[int]](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second)) ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep.progress", &progress) for _, treeID := range maps.SortedKeys(resolvedAugments) { - treeCtx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.augment.tree", treeID) + ctx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.augment.tree", treeID) for _, nodeAddr := range maps.SortedKeys(resolvedAugments[treeID]) { if err := ctx.Err(); err != nil { progressWriter.Set(progress) @@ -247,12 +257,13 @@ func (o *rebuilder) processAugmentQueue(ctx context.Context) error { return err } progressWriter.Set(progress) - o.rebuilt.Tree(treeCtx, treeID).AddRoot(treeCtx, nodeAddr) + o.rebuilt.Tree(ctx, treeID).AddRoot(ctx, nodeAddr) progress.N++ } } progressWriter.Set(progress) progressWriter.Done() + return nil } -- cgit v1.2.3-2-g168b From f99c76b597e37a94f6a23345c36f42e70e71c67c Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Fri, 6 Jan 2023 13:08:27 -0700 Subject: rebuildnodes: Add a settle-items step --- .../btrfsinspect/rebuildnodes/btrees/tree.go | 4 +- .../btrfsinspect/rebuildnodes/rebuild.go | 113 +++++++++++++++++---- .../btrfsinspect/rebuildnodes/rebuild_treecb.go | 5 +- .../btrfsinspect/rebuildnodes/rebuild_wanttyp.go | 2 +- lib/textui/log.go | 5 +- 5 files changed, 100 insertions(+), 29 deletions(-) (limited to 'lib') diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go index 9f5a5e6..6318d86 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go @@ -200,7 +200,7 @@ func (tree *RebuiltTree) items(ctx context.Context, cache containers.Map[btrfspr index.Store(itemKey, newPtr) stats.NumItems++ } else { - if tree.shouldReplace(oldPtr.Node, newPtr.Node) { + if tree.ShouldReplace(oldPtr.Node, newPtr.Node) { index.Store(itemKey, newPtr) } stats.NumDups++ @@ -218,7 +218,7 @@ func (tree *RebuiltTree) items(ctx context.Context, cache containers.Map[btrfspr }) } -func (tree *RebuiltTree) shouldReplace(oldNode, newNode btrfsvol.LogicalAddr) bool { +func (tree *RebuiltTree) ShouldReplace(oldNode, newNode btrfsvol.LogicalAddr) bool { oldDist, _ := tree.COWDistance(tree.forrest.graph.Nodes[oldNode].Owner) newDist, _ := tree.COWDistance(tree.forrest.graph.Nodes[newNode].Owner) switch { diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go index 472f6cd..9f7ce9f 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go @@ -56,7 +56,8 @@ type rebuilder struct { Key containers.Optional[btrfsprim.Key] } treeQueue containers.Set[btrfsprim.ObjID] - itemQueue containers.Set[keyAndTree] + addedItemQueue containers.Set[keyAndTree] + settledItemQueue containers.Set[keyAndTree] augmentQueue map[btrfsprim.ObjID]*treeAugmentQueue numAugments int numAugmentFailures int @@ -97,7 +98,8 @@ func (o *rebuilder) Rebuild(ctx context.Context) error { ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.step", "rebuild") // Initialize - o.itemQueue = make(containers.Set[keyAndTree]) + o.addedItemQueue = make(containers.Set[keyAndTree]) + o.settledItemQueue = make(containers.Set[keyAndTree]) o.augmentQueue = make(map[btrfsprim.ObjID]*treeAugmentQueue) // Seed the queue @@ -109,22 +111,29 @@ func (o *rebuilder) Rebuild(ctx context.Context) error { ) // Run - for passNum := 0; len(o.treeQueue) > 0 || len(o.itemQueue) > 0 || len(o.augmentQueue) > 0; passNum++ { + for passNum := 0; len(o.treeQueue) > 0 || len(o.addedItemQueue) > 0 || len(o.settledItemQueue) > 0 || len(o.augmentQueue) > 0; passNum++ { ctx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.pass", passNum) - // Crawl trees (Drain o.treeQueue, fill o.itemQueue). + // Crawl trees (Drain o.treeQueue, fill o.addedItemQueue). if err := o.processTreeQueue(ctx); err != nil { return err } runtime.GC() - // Process items (drain o.itemQueue, fill o.augmentQueue and o.treeQueue). - if err := o.processItemQueue(ctx); err != nil { - return err + if len(o.addedItemQueue) > 0 { + // Settle items (drain o.addedItemQueue, fill o.augmentQueue and o.settledItemQueue). + if err := o.processAddedItemQueue(ctx); err != nil { + return err + } + } else { + // Process items (drain o.settledItemQueue, fill o.augmentQueue and o.treeQueue). + if err := o.processSettledItemQueue(ctx); err != nil { + return err + } } runtime.GC() - // Apply augments (drain o.augmentQueue, fill o.itemQueue). + // Apply augments (drain o.augmentQueue, fill o.addedItemQueue). if err := o.processAugmentQueue(ctx); err != nil { return err } @@ -134,7 +143,7 @@ func (o *rebuilder) Rebuild(ctx context.Context) error { return nil } -// processTreeQueue drains o.treeQueue, filling o.itemQueue. +// processTreeQueue drains o.treeQueue, filling o.addedItemQueue. func (o *rebuilder) processTreeQueue(ctx context.Context) error { ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep", "collect-items") @@ -148,38 +157,98 @@ func (o *rebuilder) processTreeQueue(ctx context.Context) error { if err := ctx.Err(); err != nil { return err } - o.rebuilt.Tree(ctx, o.curKey.TreeID) + // This will call o.AddedItem as nescessary, which + // inserts to o.addedItemQueue. + _ = o.rebuilt.Tree(ctx, o.curKey.TreeID) + } + + return nil +} + +type settleItemStats struct { + textui.Portion[int] + NumAugments int + NumAugmentTrees int +} + +func (s settleItemStats) String() string { + // return textui.Sprintf("%v (queued %v augments across %v trees)", + return textui.Sprintf("%v (aug:%v trees:%v)", + s.Portion, s.NumAugments, s.NumAugmentTrees) +} + +// processAddedItemQueue drains o.addedItemQueue, filling o.augmentQueue and o.settledItemQueue. +func (o *rebuilder) processAddedItemQueue(ctx context.Context) error { + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep", "settle-items") + + queue := maps.Keys(o.addedItemQueue) + o.addedItemQueue = make(containers.Set[keyAndTree]) + sort.Slice(queue, func(i, j int) bool { + return queue[i].Compare(queue[j]) < 0 + }) + + var progress settleItemStats + progress.D = len(queue) + progressWriter := textui.NewProgress[settleItemStats](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second)) + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep.progress", &progress) + + for i, key := range queue { + progress.N = i + progressWriter.Set(progress) + + ctx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.settle.item", key) + tree := o.rebuilt.Tree(ctx, key.TreeID) + incPtr, ok := tree.Items(ctx).Load(key.Key) + if !ok { + panic(fmt.Errorf("should not happen: failed to load already-added item: %v", key)) + } + excPtr, ok := tree.PotentialItems(ctx).Load(key.Key) + if ok && tree.ShouldReplace(incPtr.Node, excPtr.Node) { + wantKey := WantWithTree{ + TreeID: key.TreeID, + Key: wantFromKey(key.Key), + } + o.wantAugment(ctx, wantKey, tree.LeafToRoots(ctx, excPtr.Node)) + progress.NumAugments = o.numAugments + progress.NumAugmentTrees = len(o.augmentQueue) + progressWriter.Set(progress) + } else if !handleWouldBeNoOp(key.ItemType) { + o.settledItemQueue.Insert(key) + } } + progress.N = len(queue) + progressWriter.Set(progress) + progressWriter.Done() return nil } -type itemStats struct { +type processItemStats struct { textui.Portion[int] NumAugments int NumFailures int NumAugmentTrees int } -func (s itemStats) String() string { +func (s processItemStats) String() string { // return textui.Sprintf("%v (queued %v augments and %v failures across %v trees)", return textui.Sprintf("%v (aug:%v fail:%v trees:%v)", s.Portion, s.NumAugments, s.NumFailures, s.NumAugmentTrees) } -// processItemQueue drains o.itemQueue, filling o.augmentQueue and o.treeQueue. -func (o *rebuilder) processItemQueue(ctx context.Context) error { +// processSettledItemQueue drains o.settledItemQueue, filling o.augmentQueue and o.treeQueue. +func (o *rebuilder) processSettledItemQueue(ctx context.Context) error { ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep", "process-items") - queue := maps.Keys(o.itemQueue) - o.itemQueue = make(containers.Set[keyAndTree]) + queue := maps.Keys(o.settledItemQueue) + o.settledItemQueue = make(containers.Set[keyAndTree]) sort.Slice(queue, func(i, j int) bool { return queue[i].Compare(queue[j]) < 0 }) - var progress itemStats + var progress processItemStats progress.D = len(queue) - progressWriter := textui.NewProgress[itemStats](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second)) + progressWriter := textui.NewProgress[processItemStats](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second)) ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep.progress", &progress) type keyAndBody struct { @@ -227,7 +296,7 @@ func (o *rebuilder) processItemQueue(ctx context.Context) error { return grp.Wait() } -// processAugmentQueue drains o.augmentQueue, filling o.itemQueue. +// processAugmentQueue drains o.augmentQueue, filling o.addedItemQueue. func (o *rebuilder) processAugmentQueue(ctx context.Context) error { ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep", "apply-augments") @@ -257,6 +326,8 @@ func (o *rebuilder) processAugmentQueue(ctx context.Context) error { return err } progressWriter.Set(progress) + // This will call o.AddedItem as nescessary, which + // inserts to o.addedItemQueue. o.rebuilt.Tree(ctx, treeID).AddRoot(ctx, nodeAddr) progress.N++ } @@ -269,7 +340,7 @@ func (o *rebuilder) processAugmentQueue(ctx context.Context) error { func (o *rebuilder) enqueueRetry() { if o.curKey.Key.OK { - o.itemQueue.Insert(keyAndTree{ + o.settledItemQueue.Insert(keyAndTree{ TreeID: o.curKey.TreeID, Key: o.curKey.Key.Val, }) @@ -410,7 +481,7 @@ func (o *rebuilder) resolveTreeAugments(ctx context.Context, treeID btrfsprim.Ob maps.Keys(o.augmentQueue[treeID].single), maps.Keys(o.augmentQueue[treeID].multi)...) sort.Slice(wantKeys, func(i, j int) bool { - return wantKeys[i].Cmp(wantKeys[j]) < 0 + return wantKeys[i].Compare(wantKeys[j]) < 0 }) for _, wantKey := range wantKeys { list, ok := o.augmentQueue[treeID].multi[wantKey] diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_treecb.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_treecb.go index ffb1ef0..eb0204c 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_treecb.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_treecb.go @@ -14,10 +14,7 @@ import ( // AddedItem implements btrees.Callbacks. func (o *rebuilder) AddedItem(ctx context.Context, tree btrfsprim.ObjID, key btrfsprim.Key) { - if handleWouldBeNoOp(key.ItemType) { - return - } - o.itemQueue.Insert(keyAndTree{ + o.addedItemQueue.Insert(keyAndTree{ TreeID: tree, Key: key, }) diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_wanttyp.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_wanttyp.go index eb1bb49..2b471fe 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_wanttyp.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_wanttyp.go @@ -32,7 +32,7 @@ type Want struct { OffsetName string } -func (a Want) Cmp(b Want) int { +func (a Want) Compare(b Want) int { if d := containers.NativeCompare(a.ObjectID, b.ObjectID); d != 0 { return d } diff --git a/lib/textui/log.go b/lib/textui/log.go index 2bcd9af..b4dcea4 100644 --- a/lib/textui/log.go +++ b/lib/textui/log.go @@ -311,7 +311,10 @@ func fieldOrd(key string) int { case "btrfsinspect.rebuild-nodes.rebuild.substep.progress": return -47 // step=rebuild, substep=collect-items (1/3) - // step=rebuild, substep=process-items (2/3) + // step=rebuild, substep=settle-items (2a/3) + case "btrfsinspect.rebuild-nodes.rebuild.settle.item": + return -25 + // step=rebuild, substep=process-items (2b/3) case "btrfsinspect.rebuild-nodes.rebuild.process.item": return -25 // step=rebuild, substep=apply-augments (3/3) -- cgit v1.2.3-2-g168b From 6097c4c634b8644e4ac1e0520f39a6bc501d227d Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Sat, 7 Jan 2023 14:03:47 -0700 Subject: rebuildnodes: Don't deadlock if the cpu goroutine panics --- lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go index 9f7ce9f..bc4d7c9 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go @@ -264,10 +264,14 @@ func (o *rebuilder) processSettledItemQueue(ctx context.Context) error { return err } ctx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.process.item", key) - itemChan <- keyAndBody{ + item := keyAndBody{ keyAndTree: key, Body: o.rebuilt.Tree(ctx, key.TreeID).ReadItem(ctx, key.Key), } + select { + case itemChan <- item: + case <-ctx.Done(): + } } return nil }) -- cgit v1.2.3-2-g168b From 26a5fcfca56bb8041223078ea04768de9df8378f Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Sun, 8 Jan 2023 14:31:49 -0700 Subject: rebuildnodes/btrees: Fuss with logging --- lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go | 1 + 1 file changed, 1 insertion(+) (limited to 'lib') diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go index 6318d86..b13bb98 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go @@ -270,6 +270,7 @@ func (tree *RebuiltTree) AddRoot(ctx context.Context, rootNode btrfsvol.LogicalA tree.mu.Lock() defer tree.mu.Unlock() ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.add-root", fmt.Sprintf("tree=%v rootNode=%v", tree.ID, rootNode)) + dlog.Info(ctx, "adding root...") leafToRoots := tree.leafToRoots(ctx) -- cgit v1.2.3-2-g168b From 6945af3e19a9c3ee52229463de4c136ebca44a7e Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Sun, 8 Jan 2023 18:02:07 -0700 Subject: rebuildnodes/btrees: Move cache-flush code from tree.go to forrest.go --- lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/forrest.go | 11 ++++++++++- lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go | 7 +------ 2 files changed, 11 insertions(+), 7 deletions(-) (limited to 'lib') diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/forrest.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/forrest.go index 1d3b693..4f4e05d 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/forrest.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/forrest.go @@ -112,7 +112,7 @@ func (ts *RebuiltForrest) addTree(ctx context.Context, treeID btrfsprim.ObjID, s defer func() { if !ok { // Store a negative cache of this. tree.AddRoot() for the ROOT or UUID - // trees will invalidate the negative cache. + // trees will call .flushNegativeCache(). ts.trees.Store(treeID, nil) } }() @@ -177,6 +177,15 @@ func (ts *RebuiltForrest) addTree(ctx context.Context, treeID btrfsprim.ObjID, s return true } +func (ts *RebuiltForrest) flushNegativeCache() { + ts.trees.Range(func(treeID btrfsprim.ObjID, tree *RebuiltTree) bool { + if tree == nil { + ts.trees.Delete(treeID) + } + return true + }) +} + // ListRoots returns a listing of all initialized trees and their root // nodes. // diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go index b13bb98..d64d7d2 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go @@ -303,12 +303,7 @@ func (tree *RebuiltTree) AddRoot(ctx context.Context, rootNode btrfsvol.LogicalA tree.forrest.excItems.Delete(tree.ID) // force re-gen if (tree.ID == btrfsprim.ROOT_TREE_OBJECTID || tree.ID == btrfsprim.UUID_TREE_OBJECTID) && stats.AddedItems > 0 { - tree.forrest.trees.Range(func(otherTreeID btrfsprim.ObjID, otherTree *RebuiltTree) bool { - if otherTree == nil { - tree.forrest.trees.Delete(otherTreeID) - } - return true - }) + tree.forrest.flushNegativeCache() } } -- cgit v1.2.3-2-g168b From 2b72cef74ea368b39d976866944d8279293be9de Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Sun, 8 Jan 2023 18:02:07 -0700 Subject: rebuildnodes/btrees: Have the concurrency story for .trees make more sense --- .../btrfsinspect/rebuildnodes/btrees/forrest.go | 38 ++++++++++-------- .../btrfsinspect/rebuildnodes/btrees/nestedlock.go | 45 ++++++++++++++++++++++ .../btrfsinspect/rebuildnodes/btrees/tree.go | 2 +- .../btrfsinspect/rebuildnodes/rebuild.go | 6 +-- 4 files changed, 70 insertions(+), 21 deletions(-) create mode 100644 lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/nestedlock.go (limited to 'lib') diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/forrest.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/forrest.go index 4f4e05d..9ec2849 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/forrest.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/forrest.go @@ -7,7 +7,6 @@ package btrees import ( "context" - "git.lukeshu.com/go/typedsync" "github.com/datawire/dlib/dlog" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem" @@ -65,7 +64,8 @@ type RebuiltForrest struct { cb Callbacks // mutable - trees typedsync.Map[btrfsprim.ObjID, *RebuiltTree] + treesMu nestedMutex + trees map[btrfsprim.ObjID]*RebuiltTree // must hold .treesMu to access leafs containers.ARCache[btrfsprim.ObjID, map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr]] incItems containers.ARCache[btrfsprim.ObjID, *itemIndex] excItems containers.ARCache[btrfsprim.ObjID, *itemIndex] @@ -80,6 +80,7 @@ func NewRebuiltForrest(sb btrfstree.Superblock, graph pkggraph.Graph, keyIO *key keyIO: keyIO, cb: cb, + trees: make(map[btrfsprim.ObjID]*RebuiltTree), leafs: containers.ARCache[btrfsprim.ObjID, map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr]]{ MaxLen: textui.Tunable(8), }, @@ -98,22 +99,23 @@ func NewRebuiltForrest(sb btrfstree.Superblock, graph pkggraph.Graph, keyIO *key // // The tree is initialized with the normal root node of the tree. func (ts *RebuiltForrest) Tree(ctx context.Context, treeID btrfsprim.ObjID) *RebuiltTree { + ctx = ts.treesMu.Lock(ctx) + defer ts.treesMu.Unlock() if !ts.addTree(ctx, treeID, nil) { return nil } - tree, _ := ts.trees.Load(treeID) - return tree + return ts.trees[treeID] } func (ts *RebuiltForrest) addTree(ctx context.Context, treeID btrfsprim.ObjID, stack []btrfsprim.ObjID) (ok bool) { - if tree, ok := ts.trees.Load(treeID); ok { + if tree, ok := ts.trees[treeID]; ok { return tree != nil } defer func() { if !ok { // Store a negative cache of this. tree.AddRoot() for the ROOT or UUID // trees will call .flushNegativeCache(). - ts.trees.Store(treeID, nil) + ts.trees[treeID] = nil } }() stack = append(stack, treeID) @@ -165,11 +167,11 @@ func (ts *RebuiltForrest) addTree(ctx context.Context, treeID btrfsprim.ObjID, s dlog.Error(ctx, "failed to add tree: add parent tree") return false } - tree.Parent, _ = ts.trees.Load(parentID) + tree.Parent = ts.trees[parentID] } } - ts.trees.Store(treeID, tree) + ts.trees[treeID] = tree if root != 0 { tree.AddRoot(ctx, root) } @@ -177,13 +179,14 @@ func (ts *RebuiltForrest) addTree(ctx context.Context, treeID btrfsprim.ObjID, s return true } -func (ts *RebuiltForrest) flushNegativeCache() { - ts.trees.Range(func(treeID btrfsprim.ObjID, tree *RebuiltTree) bool { +func (ts *RebuiltForrest) flushNegativeCache(ctx context.Context) { + _ = ts.treesMu.Lock(ctx) + defer ts.treesMu.Unlock() + for treeID, tree := range ts.trees { if tree == nil { - ts.trees.Delete(treeID) + delete(ts.trees, treeID) } - return true - }) + } } // ListRoots returns a listing of all initialized trees and their root @@ -191,13 +194,14 @@ func (ts *RebuiltForrest) flushNegativeCache() { // // Do not mutate the set of roots for a tree; it is a pointer to the // RebuiltForrest's internal set! -func (ts *RebuiltForrest) ListRoots() map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr] { +func (ts *RebuiltForrest) ListRoots(ctx context.Context) map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr] { + _ = ts.treesMu.Lock(ctx) + defer ts.treesMu.Unlock() ret := make(map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr]) - ts.trees.Range(func(treeID btrfsprim.ObjID, tree *RebuiltTree) bool { + for treeID, tree := range ts.trees { if tree != nil { ret[treeID] = tree.Roots } - return true - }) + } return ret } diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/nestedlock.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/nestedlock.go new file mode 100644 index 0000000..c1ffa18 --- /dev/null +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/nestedlock.go @@ -0,0 +1,45 @@ +// Copyright (C) 2023 Luke Shumaker +// +// SPDX-License-Identifier: GPL-2.0-or-later + +package btrees + +import ( + "context" + "sync" +) + +// A nestedMutex is like a sync.Mutex, but while it is locked by call +// 'A', may be simultaneously locked by subsequent calls if the +// subsequent calls use a Context descended from the one returned by +// the 'A' call to .Lock(). +type nestedMutex struct { + inner sync.Mutex + depth int +} + +type nestedMutexCtxKey struct{} + +// Lock locks the mutex. It is invalid to use a Context returned from +// Lock in a different goroutine than the one it was created in. It +// is invalid to use a Context returned from Lock after the mutex has +// subsequently become unlocked. +func (m *nestedMutex) Lock(ctx context.Context) context.Context { + if other, ok := ctx.Value(nestedMutexCtxKey{}).(*nestedMutex); ok && other == m { + m.depth++ + return ctx + } + m.inner.Lock() + return context.WithValue(ctx, nestedMutexCtxKey{}, m) +} + +// Unlock unlocks the mutex. It is invalid to call Unlock if the +// mutex is not already locked. It is invalid to call Unlock from +// multiple goroutines simultaneously. +func (m *nestedMutex) Unlock() { + if m.depth > 0 { + m.depth-- + } else { + m.inner.Unlock() + } +} diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go index d64d7d2..9ddd3c5 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go @@ -303,7 +303,7 @@ func (tree *RebuiltTree) AddRoot(ctx context.Context, rootNode btrfsvol.LogicalA tree.forrest.excItems.Delete(tree.ID) // force re-gen if (tree.ID == btrfsprim.ROOT_TREE_OBJECTID || tree.ID == btrfsprim.UUID_TREE_OBJECTID) && stats.AddedItems > 0 { - tree.forrest.flushNegativeCache() + tree.forrest.flushNegativeCache(ctx) } } diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go index bc4d7c9..dc78c2e 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go @@ -71,7 +71,7 @@ type treeAugmentQueue struct { type Rebuilder interface { Rebuild(context.Context) error - ListRoots() map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr] + ListRoots(context.Context) map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr] } func NewRebuilder(ctx context.Context, fs *btrfs.FS, nodeScanResults btrfsinspect.ScanDevicesResult) (Rebuilder, error) { @@ -90,8 +90,8 @@ func NewRebuilder(ctx context.Context, fs *btrfs.FS, nodeScanResults btrfsinspec return o, nil } -func (o *rebuilder) ListRoots() map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr] { - return o.rebuilt.ListRoots() +func (o *rebuilder) ListRoots(ctx context.Context) map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr] { + return o.rebuilt.ListRoots(ctx) } func (o *rebuilder) Rebuild(ctx context.Context) error { -- cgit v1.2.3-2-g168b From e85771c7b6b89dd84b636d75d182fc0910ce87de Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Wed, 11 Jan 2023 17:50:44 -0700 Subject: rebuildnodes: Load nodes in order Should speed things up, and should fix some determinism issues. --- lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go index 632ed70..17949ab 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go @@ -17,6 +17,7 @@ import ( "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph" "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio" "git.lukeshu.com/btrfs-progs-ng/lib/containers" + "git.lukeshu.com/btrfs-progs-ng/lib/maps" "git.lukeshu.com/btrfs-progs-ng/lib/textui" ) @@ -40,7 +41,7 @@ func ScanDevices(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect.Sca progressWriter.Set(stats) for _, devResults := range scanResults { - for laddr := range devResults.FoundNodes { + for _, laddr := range maps.SortedKeys(devResults.FoundNodes) { if err := ctx.Err(); err != nil { return btrfstree.Superblock{}, graph.Graph{}, nil, err } -- cgit v1.2.3-2-g168b From 3d12765e7433825bb4ac3cd8e9b9130ce3528840 Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Mon, 13 Feb 2023 01:44:34 -0700 Subject: rebuildnodes/graph: Fix typo in a comment --- lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/graph.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/graph.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/graph.go index c04fec0..2a97ec8 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/graph.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/graph.go @@ -44,7 +44,7 @@ func (n Node) String() string { } type Edge struct { - // It is invalid both 'FromRoot' and 'FromNode' to be + // It is invalid for both 'FromRoot' and 'FromNode' to be // non-zero. If both are zero, then the Edge is from the // superblock. FromRoot btrfsvol.LogicalAddr -- cgit v1.2.3-2-g168b From 25cbf5fbe8c5be5f3c3dabd694668fa7454a05b9 Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Mon, 13 Feb 2023 01:44:29 -0700 Subject: rebuildnodes/btrees: slices.RemoveAllFunc mutates the source slice --- lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'lib') diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go index 9ddd3c5..eab3eb2 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go @@ -15,7 +15,6 @@ import ( "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" - pkggraph "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph" "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio" "git.lukeshu.com/btrfs-progs-ng/lib/containers" "git.lukeshu.com/btrfs-progs-ng/lib/maps" @@ -99,10 +98,10 @@ func (tree *RebuiltTree) indexNode(ctx context.Context, node btrfsvol.LogicalAdd // tree.leafToRoots stack = append(stack, node) var roots containers.Set[btrfsvol.LogicalAddr] - kps := slices.RemoveAllFunc(tree.forrest.graph.EdgesTo[node], func(kp *pkggraph.Edge) bool { - return !tree.isOwnerOK(tree.forrest.graph.Nodes[kp.FromNode].Owner, tree.forrest.graph.Nodes[kp.FromNode].Generation) - }) - for _, kp := range kps { + for _, kp := range tree.forrest.graph.EdgesTo[node] { + if !tree.isOwnerOK(tree.forrest.graph.Nodes[kp.FromNode].Owner, tree.forrest.graph.Nodes[kp.FromNode].Generation) { + continue + } tree.indexNode(ctx, kp.FromNode, index, progress, stack) if len(index[kp.FromNode]) > 0 { if roots == nil { -- cgit v1.2.3-2-g168b