summaryrefslogtreecommitdiff
path: root/lib/btrfsprogs/btrfsinspect/rebuildnodes
diff options
context:
space:
mode:
Diffstat (limited to 'lib/btrfsprogs/btrfsinspect/rebuildnodes')
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_rebuildnodes.go102
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_rebuilttrees.go89
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_s3_reinit.go139
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_s3_reinit_test.go82
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_s4_reattach.go161
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/graph.go157
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/loops.go (renamed from lib/btrfsprogs/btrfsinspect/rebuildnodes/loops.go)57
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/orphans.go166
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go619
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_graph.go338
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go206
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/treeancestors.go76
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/util.go42
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/uuidmap.go128
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/uuidmap/scan.go73
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/uuidmap/uuidmap.go55
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/visualizenodes.go300
17 files changed, 1481 insertions, 1309 deletions
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_rebuildnodes.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_rebuildnodes.go
deleted file mode 100644
index 1a9f487..0000000
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_rebuildnodes.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
-//
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-package rebuildnodes
-
-/*
-import (
- "context"
- "errors"
-
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsutil"
- "git.lukeshu.com/btrfs-progs-ng/lib/diskio"
-)
-
-func RebuildNodes(ctx context.Context, fs *btrfs.FS, nodeScanResults btrfsinspect.ScanDevicesResult) (map[btrfsvol.LogicalAddr]*RebuiltNode, error) {
- uuidMap, err := buildUUIDMap(ctx, fs, nodeScanResults)
- if err != nil {
- return nil, err
- }
-
- nfs := &RebuiltTrees{
- inner: fs,
- uuidMap: uuidMap,
- }
-
- orphanedNodes, badNodes, treeAncestors, err := classifyNodes(ctx, nfs, nodeScanResults)
- if err != nil {
- return nil, err
- }
-
- uuidMap.considerAncestors(ctx, treeAncestors)
-
- rebuiltNodes, err := reInitBrokenNodes(ctx, nfs, badNodes)
- if err != nil {
- return nil, err
- }
-
- if err := reAttachNodes(ctx, nfs, orphanedNodes, rebuiltNodes); err != nil {
- return nil, err
- }
-
- return rebuiltNodes, nil
-}
-
-func spanOfTreePath(fs _FS, path btrfstree.TreePath) (btrfsprim.Key, btrfsprim.Key) {
- // tree root error
- if len(path) == 0 {
- return btrfsprim.Key{}, maxKey
- }
-
- // item error
- if path.Node(-1).ToNodeAddr == 0 {
- // If we got an item error, then the node is readable
- node, _ := fs.ReadNode(path.Parent())
- key := node.Data.BodyLeaf[path.Node(-1).FromItemIdx].Key
- return key, key
- }
-
- // node error
- //
- // assume that path.Node(-1).ToNodeAddr is not readable, but that path.Node(-2).ToNodeAddr is.
- if len(path) == 1 {
- return btrfsprim.Key{}, maxKey
- }
- parentNode, _ := fs.ReadNode(path.Parent())
- low := parentNode.Data.BodyInternal[path.Node(-1).FromItemIdx].Key
- var high btrfsprim.Key
- if path.Node(-1).FromItemIdx+1 < len(parentNode.Data.BodyInternal) {
- high = keyMm(parentNode.Data.BodyInternal[path.Node(-1).FromItemIdx+1].Key)
- } else {
- parentPath := path.Parent().DeepCopy()
- _, high = spanOfTreePath(fs, parentPath)
- }
- return low, high
-}
-
-func getChunkTreeUUID(ctx context.Context, fs _FS) (btrfsprim.UUID, bool) {
- ctx, cancel := context.WithCancel(ctx)
- var ret btrfsprim.UUID
- var retOK bool
- btrfsutil.WalkAllTrees(ctx, fs, btrfsutil.WalkAllTreesHandler{
- TreeWalkHandler: btrfstree.TreeWalkHandler{
- Node: func(path btrfstree.TreePath, node *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]) error {
- ret = node.Data.Head.ChunkTreeUUID
- retOK = true
- cancel()
- return nil
- },
- },
- Err: func(err *btrfsutil.WalkError) {
- // do nothing
- },
- })
- return ret, retOK
-}
-*/
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_rebuilttrees.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_rebuilttrees.go
deleted file mode 100644
index 7e1a7e9..0000000
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_rebuilttrees.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
-//
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-package rebuildnodes
-
-/*
-import (
- "context"
- "fmt"
-
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
- "git.lukeshu.com/btrfs-progs-ng/lib/diskio"
-)
-
-type RebuiltTrees struct {
- inner *btrfs.FS
- uuidMap uuidMap
- nodes map[btrfsvol.LogicalAddr]*RebuiltNode
-}
-
-type _FS interface {
- diskio.File[btrfsvol.LogicalAddr]
- btrfstree.NodeFile
- btrfstree.NodeSource
- btrfstree.TreeOperator
-}
-
-// diskio.File
-
-func (fs *RebuiltTrees) Name() string { return fs.inner.Name() }
-func (fs *RebuiltTrees) Size() btrfsvol.LogicalAddr { return fs.inner.Size() }
-func (fs *RebuiltTrees) Close() error { return fs.inner.Close() }
-func (fs *RebuiltTrees) ReadAt(p []byte, off btrfsvol.LogicalAddr) (int, error) {
- sb, err := fs.Superblock()
- if err != nil {
- return 0, err
- }
- if rebuilt, ok := fs.nodes[off]; ok && len(p) == int(sb.NodeSize) {
- rebuilt.Node.Head.Checksum, err = rebuilt.Node.CalculateChecksum()
- if err != nil {
- panic(fmt.Errorf("should not happen: %w", err))
- }
- bs, err := rebuilt.Node.MarshalBinary()
- if err != nil {
- panic(fmt.Errorf("should not happen: %w", err))
- }
- if len(p) != len(bs) {
- panic(fmt.Errorf("should not happen: sb.NodeSize=%v but node marshaled to %v", sb.NodeSize, len(bs)))
- }
- return copy(p, bs), nil
- }
- return fs.inner.ReadAt(p, off)
-}
-func (fs *RebuiltTrees) WriteAt(p []byte, off btrfsvol.LogicalAddr) (int, error) {
- return fs.inner.WriteAt(p, off)
-}
-
-// btrfstree.NodeFile
-
-func (fs *RebuiltTrees) ParentTree(tree btrfsprim.ObjID) (btrfsprim.ObjID, bool) {
- return fs.uuidMap.ParentTree(tree)
-}
-
-// btrfstree.NodeSource
-
-func (fs *RebuiltTrees) Superblock() (*btrfstree.Superblock, error) { return fs.inner.Superblock() }
-func (fs *RebuiltTrees) ReadNode(path btrfstree.TreePath) (*diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node], error) {
- return btrfstree.FSReadNode(fs, path)
-}
-
-// btrfstree.TreeOperator
-
-func (fs *RebuiltTrees) TreeWalk(ctx context.Context, treeID btrfsprim.ObjID, errHandle func(*btrfstree.TreeError), cbs btrfstree.TreeWalkHandler) {
- btrfstree.TreeOperatorImpl{NodeSource: fs}.TreeWalk(ctx, treeID, errHandle, cbs)
-}
-func (fs *RebuiltTrees) TreeLookup(treeID btrfsprim.ObjID, key btrfsprim.Key) (btrfstree.Item, error) {
- return btrfstree.TreeOperatorImpl{NodeSource: fs}.TreeLookup(treeID, key)
-}
-func (fs *RebuiltTrees) TreeSearch(treeID btrfsprim.ObjID, fn func(key btrfsprim.Key, size uint32) int) (btrfstree.Item, error) {
- return btrfstree.TreeOperatorImpl{NodeSource: fs}.TreeSearch(treeID, fn)
-}
-func (fs *RebuiltTrees) TreeSearchAll(treeID btrfsprim.ObjID, fn func(key btrfsprim.Key, size uint32) int) ([]btrfstree.Item, error) {
- return btrfstree.TreeOperatorImpl{NodeSource: fs}.TreeSearchAll(treeID, fn)
-}
-*/
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_s3_reinit.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_s3_reinit.go
deleted file mode 100644
index 5983e2f..0000000
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_s3_reinit.go
+++ /dev/null
@@ -1,139 +0,0 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
-//
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-package rebuildnodes
-
-/*
-import (
- "fmt"
- "reflect"
-
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
- "git.lukeshu.com/btrfs-progs-ng/lib/containers"
- "git.lukeshu.com/btrfs-progs-ng/lib/slices"
-)
-
-type RebuiltNode struct {
- Errs containers.Set[string]
- MinKey, MaxKey btrfsprim.Key
- InTrees containers.Set[btrfsprim.ObjID]
- btrfstree.Node
-}
-
-func (a RebuiltNode) Compat(b RebuiltNode) bool {
- a.Node.Head.Generation = b.Node.Head.Generation
- return reflect.DeepEqual(a.Node, b.Node)
-}
-
-func (a RebuiltNode) Merge(b RebuiltNode) (RebuiltNode, error) {
- if !a.Compat(b) {
- switch {
- case a.Node.Head.Generation > b.Node.Head.Generation:
- return a, nil
- case a.Node.Head.Generation < b.Node.Head.Generation:
- return b, nil
- default:
- return a, fmt.Errorf("mismatch: %v != %v", a, b)
- }
- }
-
- // take the broadest region
- if a.MinKey.Cmp(b.MinKey) > 0 { // if a.MinKey > b.MinKey {
- a.MinKey = b.MinKey // take the min of the two
- }
- if a.MaxKey.Cmp(b.MaxKey) < 0 { // if a.MaxKey < b.MaxKey {
- a.MaxKey = b.MaxKey // take the min of the two
- }
-
- // take the highest generation
- a.Node.Head.Generation = slices.Max(a.Node.Head.Generation, b.Node.Head.Generation)
-
- // take the union
- a.InTrees.InsertFrom(b.InTrees)
- a.Errs.InsertFrom(b.Errs)
-
- return a, nil
-}
-
-func reInitBrokenNodes(ctx context.Context, fs _FS, badNodes []badNode) (map[btrfsvol.LogicalAddr]*RebuiltNode, error) {
- dlog.Info(ctx, "Re-initializing bad nodes...")
-
- sb, err := fs.Superblock()
- if err != nil {
- return nil, err
- }
- chunkTreeUUID, ok := getChunkTreeUUID(ctx, fs)
- if !ok {
- return nil, fmt.Errorf("could not look up chunk tree UUID")
- }
-
- sort.Slice(badNodes, func(i, j int) bool {
- iGen := badNodes[i].Path.Node(-1).ToNodeGeneration
- jGen := badNodes[j].Path.Node(-1).ToNodeGeneration
- switch {
- case iGen < jGen:
- return true
- case iGen > jGen:
- return false
- default:
- iAddr := badNodes[i].Path.Node(-1).ToNodeAddr
- jAddr := badNodes[j].Path.Node(-1).ToNodeAddr
- return iAddr < jAddr
- }
- })
-
- lastPct := -1
- progress := func(done int) {
- pct := int(100 * float64(done) / float64(len(badNodes)))
- if pct != lastPct || done == len(badNodes) {
- dlog.Infof(ctx, "... %v%% (%v/%v)",
- pct, done, len(badNodes))
- lastPct = pct
- }
- }
-
- rebuiltNodes := make(map[btrfsvol.LogicalAddr]*RebuiltNode)
- for i, badNode := range badNodes {
- progress(i)
- path := badNode.Path
-
- min, max := spanOfTreePath(fs, path)
- node := RebuiltNode{
- Errs: containers.NewSet[string](
- badNode.Err,
- ),
- MinKey: min,
- MaxKey: max,
- InTrees: containers.NewSet[btrfsprim.ObjID](
- path.Node(-1).FromTree,
- ),
- Node: btrfstree.Node{
- Size: sb.NodeSize,
- ChecksumType: sb.ChecksumType,
- Head: btrfstree.NodeHeader{
- MetadataUUID: sb.EffectiveMetadataUUID(),
- Addr: path.Node(-1).ToNodeAddr,
- ChunkTreeUUID: chunkTreeUUID,
- //Owner: TBD, // see RebuiltNode.InTrees
- Generation: path.Node(-1).ToNodeGeneration,
- Level: path.Node(-1).ToNodeLevel,
- },
- },
- }
- if other, ok := rebuiltNodes[path.Node(-1).ToNodeAddr]; ok {
- *other, err = other.Merge(node)
- if err != nil {
- dlog.Errorf(ctx, "... %v", err)
- }
- } else {
- rebuiltNodes[path.Node(-1).ToNodeAddr] = &node
- }
- }
- progress(len(badNodes))
-
- dlog.Infof(ctx, "... initialized %d nodes", len(rebuiltNodes))
- return rebuiltNodes, nil
-}
-*/
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_s3_reinit_test.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_s3_reinit_test.go
deleted file mode 100644
index 865cd7e..0000000
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_s3_reinit_test.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
-//
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-package rebuildnodes_test
-
-/*
-import (
- "strings"
- "testing"
-
- "git.lukeshu.com/go/lowmemjson"
- "github.com/stretchr/testify/assert"
-
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes"
- "git.lukeshu.com/btrfs-progs-ng/lib/containers"
-)
-
-func TestEncodeRebuiltNodes(t *testing.T) {
- dat := map[btrfsvol.LogicalAddr]*rebuildnodes.RebuiltNode{
- 100007133184: {
- Errs: containers.NewSet[string](
- "btrfs.ReadNode: node@0x0000001748e3c000: expected generation\u003c=6596014 but claims to be generation=6596025",
- ),
- MinKey: btrfsprim.Key{},
- MaxKey: btrfsprim.Key{},
- InTrees: containers.NewSet[btrfsprim.ObjID](
- 257,
- ),
- Node: btrfstree.Node{},
- },
- }
- var buf strings.Builder
- assert.NoError(t, lowmemjson.Encode(&lowmemjson.ReEncoder{
- Out: &buf,
-
- Indent: "\t",
- ForceTrailingNewlines: true,
- }, dat))
- assert.Equal(t, `{
- "100007133184": {
- "Errs": [
- "btrfs.ReadNode: node@0x0000001748e3c000: expected generation\u003c=6596014 but claims to be generation=6596025"
- ],
- "MinKey": {
- "ObjectID": 0,
- "ItemType": 0,
- "Offset": 0
- },
- "MaxKey": {
- "ObjectID": 0,
- "ItemType": 0,
- "Offset": 0
- },
- "InTrees": [
- 257
- ],
- "Size": 0,
- "ChecksumType": 0,
- "Head": {
- "Checksum": "0000000000000000000000000000000000000000000000000000000000000000",
- "MetadataUUID": "00000000-0000-0000-0000-000000000000",
- "Addr": 0,
- "Flags": 0,
- "BackrefRev": 0,
- "ChunkTreeUUID": "00000000-0000-0000-0000-000000000000",
- "Generation": 0,
- "Owner": 0,
- "NumItems": 0,
- "Level": 0
- },
- "BodyInternal": null,
- "BodyLeaf": null,
- "Padding": null
- }
-}
-`, buf.String())
-}
-*/
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_s4_reattach.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_s4_reattach.go
deleted file mode 100644
index a78d964..0000000
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_s4_reattach.go
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
-//
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-package rebuildnodes
-
-/*
-import (
- "context"
- "sort"
-
- "github.com/datawire/dlib/dlog"
-
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
- "git.lukeshu.com/btrfs-progs-ng/lib/containers"
- "git.lukeshu.com/btrfs-progs-ng/lib/maps"
-)
-
-func (a RebuiltNode) ContainsWholeRegion(min, max btrfsprim.Key) int {
- switch {
- case min.Cmp(a.MinKey) < 0:
- // 'a' is too far right
- return -1
- case max.Cmp(a.MaxKey) > 0:
- // 'a' is too far left
- return 1
- default:
- // just right
- return 0
- }
-}
-
-func reAttachNodes(ctx context.Context, fs _FS, orphanedNodes containers.Set[btrfsvol.LogicalAddr], rebuiltNodes map[btrfsvol.LogicalAddr]*RebuiltNode) error {
- dlog.Info(ctx, "Attaching orphaned nodes to rebuilt nodes...")
-
- sb, err := fs.Superblock()
- if err != nil {
- return err
- }
-
- // Index 'rebuiltNodes' for fast lookups.
- dlog.Info(ctx, "... indexing rebuilt nodes...")
- var byLevel [][]*RebuiltNode
- for _, node := range rebuiltNodes {
- for int(node.Head.Level) >= len(byLevel) {
- byLevel = append(byLevel, []*RebuiltNode(nil))
- }
- byLevel[node.Head.Level] = append(byLevel[node.Head.Level], node)
- }
- for _, slice := range byLevel {
- sort.Slice(slice, func(i, j int) bool {
- return slice[i].MinKey.Cmp(slice[j].MinKey) < 0
- })
- }
- dlog.Info(ctx, "... done indexing")
-
- // Attach orphanedNodes to the gaps.
- dlog.Info(ctx, "... attaching nodes...")
- lastPct := -1
- progress := func(done int) {
- pct := int(100 * float64(done) / float64(len(orphanedNodes)))
- if pct != lastPct || done == len(orphanedNodes) {
- dlog.Infof(ctx, "... %v%% (%v/%v)",
- pct, done, len(orphanedNodes))
- lastPct = pct
- }
- }
- numAttached := 0
- for i, foundLAddr := range maps.SortedKeys(orphanedNodes) {
- progress(i)
- foundRef, err := btrfstree.ReadNode[btrfsvol.LogicalAddr](fs, *sb, foundLAddr, btrfstree.NodeExpectations{
- LAddr: containers.Optional[btrfsvol.LogicalAddr]{OK: true, Val: foundLAddr},
- })
- if foundRef == nil {
- return err
- }
- foundMinKey, ok := foundRef.Data.MinItem()
- if !ok {
- continue
- }
- foundMaxKey, ok := foundRef.Data.MaxItem()
- if !ok {
- continue
- }
-
- // `trees` is the set of trees that the node may be
- // placed in; '0' is a wildcard that means "any tree".
- // We still keep track of the others, in order to try
- // to avoid using the wildcard.
- trees := make(containers.Set[btrfsprim.ObjID])
- tree := foundRef.Data.Head.Owner
- for {
- trees.Insert(tree)
- var ok bool
- tree, ok = fs.ParentTree(tree)
- if !ok {
- // error; accept anything
- trees.Insert(0)
- break
- }
- if tree == 0 {
- // end of the line
- break
- }
- }
- attached := make(containers.Set[btrfsprim.ObjID])
- for level := int(foundRef.Data.Head.Level) + 1; level < len(byLevel) && len(attached) == 0; level++ {
- for _, parent := range byLevel[level] {
- if parent.ContainsWholeRegion(foundMinKey, foundMaxKey) != 0 {
- continue
- }
- if parent.Node.Head.Generation < foundRef.Data.Head.Generation {
- continue
- }
- if !trees.HasAny(parent.InTrees) {
- continue
- }
- parent.BodyInternal = append(parent.BodyInternal, btrfstree.KeyPointer{
- Key: foundMinKey,
- BlockPtr: foundLAddr,
- Generation: foundRef.Data.Head.Generation,
- })
- attached.InsertFrom(parent.InTrees)
- }
- }
- if _, wildcard := trees[0]; wildcard && len(attached) == 0 {
- for level := int(foundRef.Data.Head.Level) + 1; level < len(byLevel) && len(attached) == 0; level++ {
- for _, parent := range byLevel[level] {
- if parent.ContainsWholeRegion(foundMinKey, foundMaxKey) != 0 {
- continue
- }
- if parent.Node.Head.Generation < foundRef.Data.Head.Generation {
- continue
- }
- parent.BodyInternal = append(parent.BodyInternal, btrfstree.KeyPointer{
- Key: foundMinKey,
- BlockPtr: foundLAddr,
- Generation: foundRef.Data.Head.Generation,
- })
- attached.InsertFrom(parent.InTrees)
- }
- }
- }
-
- if len(attached) > 0 {
- numAttached++
- } else {
- dlog.Errorf(ctx, "could not find a broken node to attach node to reattach node@%v to",
- foundRef.Addr)
- }
- }
- progress(len(orphanedNodes))
- dlog.Info(ctx, "... ... done attaching")
-
- dlog.Infof(ctx, "... re-attached %d nodes (%v%% success rate)",
- numAttached, int(100*float64(numAttached)/float64(len(orphanedNodes))))
- return nil
-}
-*/
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/graph.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/graph.go
new file mode 100644
index 0000000..7ae3b89
--- /dev/null
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/graph.go
@@ -0,0 +1,157 @@
+// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+//
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+package graph
+
+import (
+ "fmt"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/containers"
+ "git.lukeshu.com/btrfs-progs-ng/lib/diskio"
+)
+
+type Node struct {
+ Level uint8
+ Generation btrfsprim.Generation
+ Owner btrfsprim.ObjID
+ NumItems uint32
+ MinItem btrfsprim.Key
+ MaxItem btrfsprim.Key
+}
+
+type Edge struct {
+ FromTree btrfsprim.ObjID
+ FromNode btrfsvol.LogicalAddr
+ FromItem int
+
+ ToNode btrfsvol.LogicalAddr
+ ToLevel uint8
+ ToKey btrfsprim.Key
+ ToGeneration btrfsprim.Generation
+}
+
+func (kp Edge) String() string {
+ return fmt.Sprintf(`{t:%v,n:%v}[%d]->{n:%v,l:%v,g:%v,k:(%d,%v,%d)}`,
+ kp.FromTree, kp.FromNode, kp.FromItem,
+ kp.ToNode, kp.ToLevel, kp.ToGeneration,
+ kp.ToKey.ObjectID,
+ kp.ToKey.ItemType,
+ kp.ToKey.Offset)
+}
+
+type Graph struct {
+ Nodes map[btrfsvol.LogicalAddr]Node
+ BadNodes map[btrfsvol.LogicalAddr]error
+ EdgesFrom map[btrfsvol.LogicalAddr][]*Edge
+ EdgesTo map[btrfsvol.LogicalAddr][]*Edge
+}
+
+func (g Graph) insertEdge(kp Edge) {
+ ptr := &kp
+ if kp.ToNode == 0 {
+ panic("kp.ToNode should not be zero")
+ }
+ g.EdgesFrom[kp.FromNode] = append(g.EdgesFrom[kp.FromNode], ptr)
+ g.EdgesTo[kp.ToNode] = append(g.EdgesTo[kp.ToNode], ptr)
+}
+
+func (g Graph) insertTreeRoot(sb btrfstree.Superblock, treeID btrfsprim.ObjID) {
+ treeInfo, err := btrfstree.LookupTreeRoot(nil, sb, treeID)
+ if err != nil {
+ // This shouldn't ever happen for treeIDs that are
+ // mentioned directly in the superblock; which are the
+ // only trees for which we should call
+ // .insertTreeRoot().
+ panic(fmt.Errorf("LookupTreeRoot(%v): %w", treeID, err))
+ }
+ if treeInfo.RootNode == 0 {
+ return
+ }
+ g.insertEdge(Edge{
+ FromTree: treeID,
+ ToNode: treeInfo.RootNode,
+ ToLevel: treeInfo.Level,
+ ToGeneration: treeInfo.Generation,
+ })
+}
+
+func New(sb btrfstree.Superblock) *Graph {
+ g := &Graph{
+ Nodes: make(map[btrfsvol.LogicalAddr]Node),
+ BadNodes: make(map[btrfsvol.LogicalAddr]error),
+ EdgesFrom: make(map[btrfsvol.LogicalAddr][]*Edge),
+ EdgesTo: make(map[btrfsvol.LogicalAddr][]*Edge),
+ }
+
+ // These 4 trees are mentioned directly in the superblock, so
+ // they are always seen.
+ g.insertTreeRoot(sb, btrfsprim.ROOT_TREE_OBJECTID)
+ g.insertTreeRoot(sb, btrfsprim.CHUNK_TREE_OBJECTID)
+ g.insertTreeRoot(sb, btrfsprim.TREE_LOG_OBJECTID)
+ g.insertTreeRoot(sb, btrfsprim.BLOCK_GROUP_TREE_OBJECTID)
+
+ return g
+}
+
+func (g *Graph) InsertNode(nodeRef *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]) {
+ for _, item := range nodeRef.Data.BodyLeaf {
+ switch itemBody := item.Body.(type) {
+ case btrfsitem.Root:
+ g.insertEdge(Edge{
+ FromTree: item.Key.ObjectID,
+ ToNode: itemBody.ByteNr,
+ ToLevel: itemBody.Level,
+ ToGeneration: itemBody.Generation,
+ })
+ }
+ }
+
+ g.Nodes[nodeRef.Addr] = Node{
+ Level: nodeRef.Data.Head.Level,
+ Generation: nodeRef.Data.Head.Generation,
+ Owner: nodeRef.Data.Head.Owner,
+ NumItems: nodeRef.Data.Head.NumItems,
+ MinItem: discardOK(nodeRef.Data.MinItem()),
+ MaxItem: discardOK(nodeRef.Data.MaxItem()),
+ }
+ for i, kp := range nodeRef.Data.BodyInternal {
+ g.insertEdge(Edge{
+ FromTree: nodeRef.Data.Head.Owner,
+ FromNode: nodeRef.Addr,
+ FromItem: i,
+ ToNode: kp.BlockPtr,
+ ToLevel: nodeRef.Data.Head.Level - 1,
+ ToKey: kp.Key,
+ ToGeneration: kp.Generation,
+ })
+ }
+}
+
+func (g *Graph) FinalCheck(fs diskio.File[btrfsvol.LogicalAddr], sb btrfstree.Superblock, progress func(n, d int)) error {
+ total := len(g.EdgesTo)
+ done := 0
+ progress(done, total)
+ for laddr := range g.EdgesTo {
+ if _, ok := g.Nodes[laddr]; !ok {
+ _, err := btrfstree.ReadNode[btrfsvol.LogicalAddr](fs, sb, laddr, btrfstree.NodeExpectations{
+ LAddr: containers.Optional[btrfsvol.LogicalAddr]{OK: true, Val: laddr},
+ })
+ if err == nil {
+ return fmt.Errorf("node@%v exists but was not in node scan results", laddr)
+ }
+ g.BadNodes[laddr] = err
+ }
+ done++
+ progress(done, total)
+ }
+ return nil
+}
+
+func discardOK[T any](val T, _ bool) T {
+ return val
+}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/loops.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/loops.go
index fc71558..e76985c 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/loops.go
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/loops.go
@@ -2,48 +2,37 @@
//
// SPDX-License-Identifier: GPL-2.0-or-later
-package rebuildnodes
+package graph
import (
- "context"
"fmt"
"io"
"strings"
- "github.com/datawire/dlib/dlog"
+ "github.com/datawire/dlib/derror"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect"
"git.lukeshu.com/btrfs-progs-ng/lib/containers"
"git.lukeshu.com/btrfs-progs-ng/lib/maps"
"git.lukeshu.com/btrfs-progs-ng/lib/slices"
)
-func ShowLoops(ctx context.Context, out io.Writer, fs *btrfs.FS, nodeScanResults btrfsinspect.ScanDevicesResult) error {
- scanData, err := ScanDevices(ctx, fs, nodeScanResults)
- if err != nil {
- return err
- }
-
- dlog.Info(ctx, "Collecting orphans...")
+func (g Graph) ShowLoops(out io.Writer) {
orphans := make(containers.Set[btrfsvol.LogicalAddr])
- for node := range scanData.Nodes {
- if len(scanData.EdgesTo[node]) == 0 {
+ for node := range g.Nodes {
+ if len(g.EdgesTo[node]) == 0 {
orphans.Insert(node)
}
}
- dlog.Info(ctx, "Walking graph...")
- loopWalk(out, *scanData, 0)
+ loopWalk(out, g, 0)
for _, orphan := range maps.SortedKeys(orphans) {
- loopWalk(out, *scanData, orphan)
+ loopWalk(out, g, orphan)
}
-
- return nil
}
-func loopWalk(out io.Writer, scanData scanResult, stack ...btrfsvol.LogicalAddr) {
+func loopWalk(out io.Writer, scanData Graph, stack ...btrfsvol.LogicalAddr) {
for _, kp := range scanData.EdgesFrom[stack[len(stack)-1]] {
childStack := append(stack, kp.ToNode)
if slices.Contains(kp.ToNode, stack) {
@@ -54,7 +43,7 @@ func loopWalk(out io.Writer, scanData scanResult, stack ...btrfsvol.LogicalAddr)
}
}
-func nodeRender(scanData scanResult, node btrfsvol.LogicalAddr) []string {
+func nodeRender(scanData Graph, node btrfsvol.LogicalAddr) []string {
if node == 0 {
return []string{"root"}
} else if nodeData, ok := scanData.Nodes[node]; ok {
@@ -82,7 +71,7 @@ func nodeRender(scanData scanResult, node btrfsvol.LogicalAddr) []string {
}
}
-func edgeRender(scanData scanResult, kp kpData) []string {
+func edgeRender(scanData Graph, kp Edge) []string {
a := fmt.Sprintf("[%d]={", kp.FromItem)
b := strings.Repeat(" ", len(a))
ret := []string{
@@ -111,7 +100,7 @@ func edgeRender(scanData scanResult, kp kpData) []string {
return ret
}
-func loopRender(out io.Writer, scanData scanResult, stack ...btrfsvol.LogicalAddr) {
+func loopRender(out io.Writer, scanData Graph, stack ...btrfsvol.LogicalAddr) {
var lines []string
add := func(suffixes []string) {
curLen := 0
@@ -152,3 +141,25 @@ func loopRender(out io.Writer, scanData scanResult, stack ...btrfsvol.LogicalAdd
fmt.Fprintln(out, " "+line)
}
}
+
+func checkNodeExpectations(kp Edge, toNode Node) error {
+ var errs derror.MultiError
+ if toNode.Level != kp.ToLevel {
+ errs = append(errs, fmt.Errorf("kp.level=%v != node.level=%v",
+ kp.ToLevel, toNode.Level))
+ }
+ if toNode.Generation != kp.ToGeneration {
+ errs = append(errs, fmt.Errorf("kp.generation=%v != node.generation=%v",
+ kp.ToGeneration, toNode.Generation))
+ }
+ if toNode.NumItems == 0 {
+ errs = append(errs, fmt.Errorf("node.num_items=0"))
+ } else if kp.ToKey != (btrfsprim.Key{}) && toNode.MinItem != kp.ToKey {
+ errs = append(errs, fmt.Errorf("kp.key=%v != node.items[0].key=%v",
+ kp.ToKey, toNode.MinItem))
+ }
+ if len(errs) > 0 {
+ return errs
+ }
+ return nil
+}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/orphans.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/orphans.go
new file mode 100644
index 0000000..51313a9
--- /dev/null
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/orphans.go
@@ -0,0 +1,166 @@
+// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+//
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+package rebuildnodes
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/datawire/dlib/dlog"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph"
+ "git.lukeshu.com/btrfs-progs-ng/lib/containers"
+ "git.lukeshu.com/btrfs-progs-ng/lib/diskio"
+ "git.lukeshu.com/btrfs-progs-ng/lib/maps"
+ "git.lukeshu.com/btrfs-progs-ng/lib/textui"
+)
+
+func listRoots(graph graph.Graph, leaf btrfsvol.LogicalAddr) containers.Set[btrfsvol.LogicalAddr] {
+ kps := graph.EdgesTo[leaf]
+ if len(kps) == 0 {
+ return containers.NewSet(leaf)
+ }
+ ret := make(containers.Set[btrfsvol.LogicalAddr])
+ for _, kp := range kps {
+ ret.InsertFrom(listRoots(graph, kp.FromNode))
+ }
+ return ret
+}
+
+func walk(graph graph.Graph, root btrfsvol.LogicalAddr, fn func(btrfsvol.LogicalAddr) bool) {
+ if _, ok := graph.Nodes[root]; !ok {
+ return
+ }
+ if !fn(root) {
+ return
+ }
+ for _, kp := range graph.EdgesFrom[root] {
+ walk(graph, kp.ToNode, fn)
+ }
+}
+
+type keyAndTree struct {
+ btrfsprim.Key
+ TreeID btrfsprim.ObjID
+}
+
+func (a keyAndTree) Cmp(b keyAndTree) int {
+ if d := a.Key.Cmp(b.Key); d != 0 {
+ return d
+ }
+ return containers.NativeCmp(a.TreeID, b.TreeID)
+}
+
+type crawlStats struct {
+ DoneOrphans int
+ TotalOrphans int
+
+ VisitedNodes int
+ FoundLeafs int
+}
+
+func (s crawlStats) String() string {
+ return fmt.Sprintf("... crawling orphans %v%% (%v/%v); visited %d nodes, found %d leaf nodes",
+ int(100*float64(s.DoneOrphans)/float64(s.TotalOrphans)),
+ s.DoneOrphans, s.TotalOrphans, s.VisitedNodes, s.FoundLeafs)
+}
+
+type readStats struct {
+ DoneLeafNodes int
+ TotalLeafNodes int
+}
+
+func (s readStats) String() string {
+ return fmt.Sprintf("... reading leafs %v%% (%v/%v)",
+ int(100*float64(s.DoneLeafNodes)/float64(s.TotalLeafNodes)),
+ s.DoneLeafNodes, s.TotalLeafNodes)
+}
+
+func indexOrphans(ctx context.Context, fs diskio.File[btrfsvol.LogicalAddr], sb btrfstree.Superblock, graph graph.Graph) (
+ orphans containers.Set[btrfsvol.LogicalAddr],
+ leaf2orphans map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr],
+ key2leaf *containers.SortedMap[keyAndTree, btrfsvol.LogicalAddr],
+ err error,
+) {
+ dlog.Info(ctx, "... counting orphans")
+ orphans = make(containers.Set[btrfsvol.LogicalAddr])
+ for node := range graph.Nodes {
+ if len(graph.EdgesTo[node]) == 0 {
+ orphans.Insert(node)
+ }
+ }
+
+ leaf2orphans = make(map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr])
+ visited := make(containers.Set[btrfsvol.LogicalAddr])
+
+ done := 0
+ crawlProgressWriter := textui.NewProgress[crawlStats](ctx, dlog.LogLevelInfo, 1*time.Second)
+ progress := func() {
+ crawlProgressWriter.Set(crawlStats{
+ DoneOrphans: done,
+ TotalOrphans: len(orphans),
+ VisitedNodes: len(visited),
+ FoundLeafs: len(leaf2orphans),
+ })
+ }
+ progress()
+ for _, orphan := range maps.SortedKeys(orphans) {
+ walk(graph, orphan, func(node btrfsvol.LogicalAddr) bool {
+ if visited.Has(node) {
+ return false
+ }
+ visited.Insert(node)
+ if graph.Nodes[node].Level == 0 {
+ if roots := listRoots(graph, node); !roots.Has(0) {
+ leaf2orphans[node] = roots
+ }
+ }
+ progress()
+ return true
+ })
+ done++
+ progress()
+ }
+ crawlProgressWriter.Done()
+
+ key2leaf = new(containers.SortedMap[keyAndTree, btrfsvol.LogicalAddr])
+ done = 0
+ readProgressWriter := textui.NewProgress[readStats](ctx, dlog.LogLevelInfo, 1*time.Second)
+ progress = func() {
+ readProgressWriter.Set(readStats{
+ DoneLeafNodes: done,
+ TotalLeafNodes: len(leaf2orphans),
+ })
+ }
+ progress()
+ for _, laddr := range maps.SortedKeys(leaf2orphans) {
+ nodeRef, err := btrfstree.ReadNode[btrfsvol.LogicalAddr](fs, sb, laddr, btrfstree.NodeExpectations{
+ LAddr: containers.Optional[btrfsvol.LogicalAddr]{OK: true, Val: laddr},
+ Level: containers.Optional[uint8]{OK: true, Val: 0},
+ })
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ for _, item := range nodeRef.Data.BodyLeaf {
+ k := keyAndTree{
+ Key: item.Key,
+ TreeID: nodeRef.Data.Head.Owner,
+ }
+ if cur, ok := key2leaf.Load(k); !ok || graph.Nodes[cur].Generation < nodeRef.Data.Head.Generation {
+ key2leaf.Store(k, laddr)
+ }
+ }
+ done++
+ progress()
+ }
+ readProgressWriter.Done()
+
+ return orphans, leaf2orphans, key2leaf, nil
+}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go
new file mode 100644
index 0000000..ef50653
--- /dev/null
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go
@@ -0,0 +1,619 @@
+// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+//
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+package rebuildnodes
+
+import (
+ "context"
+ "fmt"
+ "sort"
+
+ "github.com/datawire/dlib/dlog"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildmappings"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/uuidmap"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsutil"
+ "git.lukeshu.com/btrfs-progs-ng/lib/containers"
+ "git.lukeshu.com/btrfs-progs-ng/lib/maps"
+)
+
+type Rebuilder struct {
+ raw *btrfs.FS
+ inner interface {
+ btrfstree.TreeOperator
+ Augment(treeID btrfsprim.ObjID, nodeAddr btrfsvol.LogicalAddr) ([]btrfsprim.Key, error)
+ }
+ sb btrfstree.Superblock
+
+ graph graph.Graph
+ uuidMap uuidmap.UUIDMap
+ csums containers.RBTree[containers.NativeOrdered[btrfsvol.LogicalAddr], btrfsinspect.SysExtentCSum]
+ orphans containers.Set[btrfsvol.LogicalAddr]
+ leaf2orphans map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr]
+ key2leaf containers.SortedMap[keyAndTree, btrfsvol.LogicalAddr]
+
+ augments map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr]
+
+ pendingAugments map[btrfsprim.ObjID][]map[btrfsvol.LogicalAddr]int
+}
+
+func RebuildNodes(ctx context.Context, fs *btrfs.FS, nodeScanResults btrfsinspect.ScanDevicesResult) (map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr], error) {
+ scanData, err := ScanDevices(ctx, fs, nodeScanResults) // ScanDevices does its own logging
+ if err != nil {
+ return nil, err
+ }
+
+ dlog.Info(ctx, "Reading superblock...")
+ sb, err := fs.Superblock()
+ if err != nil {
+ return nil, err
+ }
+
+ dlog.Info(ctx, "Indexing checksums...")
+ csums, _ := rebuildmappings.ExtractLogicalSums(ctx, nodeScanResults)
+ if csums == nil {
+ csums = new(containers.RBTree[containers.NativeOrdered[btrfsvol.LogicalAddr], btrfsinspect.SysExtentCSum])
+ }
+
+ dlog.Info(ctx, "Indexing orphans...")
+ orphans, leaf2orphans, key2leaf, err := indexOrphans(ctx, fs, *sb, *scanData.nodeGraph)
+ if err != nil {
+ return nil, err
+ }
+
+ dlog.Info(ctx, "Rebuilding node tree...")
+ o := &Rebuilder{
+ raw: fs,
+ inner: btrfsutil.NewBrokenTrees(ctx, fs),
+ sb: *sb,
+
+ graph: *scanData.nodeGraph,
+ uuidMap: *scanData.uuidMap,
+ csums: *csums,
+ orphans: orphans,
+ leaf2orphans: leaf2orphans,
+ key2leaf: *key2leaf,
+
+ augments: make(map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr]),
+ }
+ if err := o.rebuild(ctx); err != nil {
+ return nil, err
+ }
+
+ return o.augments, nil
+}
+
+func (o *Rebuilder) ioErr(ctx context.Context, err error) {
+ err = fmt.Errorf("should not happen: i/o error: %w", err)
+ dlog.Error(ctx, err)
+ panic(err)
+}
+
+func (o *Rebuilder) rebuild(ctx context.Context) error {
+ passNum := 0
+ dlog.Infof(ctx, "... pass %d: scanning for implied items", passNum)
+ o.pendingAugments = make(map[btrfsprim.ObjID][]map[btrfsvol.LogicalAddr]int)
+ btrfsutil.WalkAllTrees(ctx, o.inner, btrfsutil.WalkAllTreesHandler{
+ Err: func(*btrfsutil.WalkError) {},
+ TreeWalkHandler: btrfstree.TreeWalkHandler{
+ Item: func(path btrfstree.TreePath, item btrfstree.Item) error {
+ handleItem(o, ctx, path[0].FromTree, item)
+ return nil
+ },
+ },
+ })
+ for len(o.pendingAugments) > 0 {
+ // Apply the augments, keeping track of what keys are added to what tree.
+ dlog.Infof(ctx, "... pass %d: augmenting trees to add implied items", passNum)
+ newKeys := make(map[btrfsprim.ObjID][]btrfsprim.Key)
+ for _, treeID := range maps.SortedKeys(o.pendingAugments) {
+ dlog.Infof(ctx, "... ... augmenting tree %v:", treeID)
+ treeAugments := o.resolveTreeAugments(ctx, o.pendingAugments[treeID])
+ for _, nodeAddr := range maps.SortedKeys(treeAugments) {
+ added, err := o.inner.Augment(treeID, nodeAddr)
+ if err != nil {
+ dlog.Errorf(ctx, "error augmenting: %v", err)
+ continue
+ }
+ newKeys[treeID] = append(newKeys[treeID], added...)
+
+ set := o.augments[treeID]
+ if set == nil {
+ set = make(containers.Set[btrfsvol.LogicalAddr])
+ o.augments[treeID] = set
+ }
+ set.Insert(nodeAddr)
+ }
+ }
+ // Clear the list of pending augments.
+ o.pendingAugments = make(map[btrfsprim.ObjID][]map[btrfsvol.LogicalAddr]int)
+ passNum++
+ // Call handleItem() for each of the added keys.
+ dlog.Infof(ctx, "... pass %d: scanning for implied items", passNum)
+ for _, treeID := range maps.SortedKeys(newKeys) {
+ for _, key := range newKeys[treeID] {
+ item, err := o.inner.TreeLookup(treeID, key)
+ if err != nil {
+ o.ioErr(ctx, fmt.Errorf("error looking up already-inserted item: tree=%v key=%v: %w",
+ treeID, key, err))
+ }
+ handleItem(o, ctx, treeID, item)
+ }
+ }
+ }
+ return nil
+}
+
+func (o *Rebuilder) resolveTreeAugments(ctx context.Context, listsWithDistances []map[btrfsvol.LogicalAddr]int) containers.Set[btrfsvol.LogicalAddr] {
+ distances := make(map[btrfsvol.LogicalAddr]int)
+ generations := make(map[btrfsvol.LogicalAddr]btrfsprim.Generation)
+ lists := make([]containers.Set[btrfsvol.LogicalAddr], len(listsWithDistances))
+ for i, listWithDistances := range listsWithDistances {
+ lists[i] = make(containers.Set[btrfsvol.LogicalAddr], len(listWithDistances))
+ for item, dist := range listWithDistances {
+ lists[i].Insert(item)
+ distances[item] = dist
+ generations[item] = o.graph.Nodes[item].Generation
+ }
+ }
+
+ // Define an algorithm that takes several lists of items, and returns a
+ // set of those items such that each input list contains zero or one of
+ // the items from your return set. The same item may appear in multiple
+ // of the input lists.
+ //
+ // > Example 1: Given the input lists
+ // >
+ // > 0: [A, B]
+ // > 2: [A, C]
+ // >
+ // > legal solutions would be `[]`, `[A]`, `[B]`, `[C]`, or `[B, C]`. It
+ // > would not be legal to return `[A, B]` or `[A, C]`.
+ //
+ // > Example 2: Given the input lists
+ // >
+ // > 1: [A, B]
+ // > 2: [A]
+ // > 3: [B]
+ // >
+ // > legal solution woudl be `[]`, `[A]` or `[B]`. It would not be legal
+ // > to return `[A, B]`.
+ //
+ // The algorithm should optimize for the following goals:
+ //
+ // - We prefer that each input list have an item in the return set.
+ //
+ // > In Example 1, while `[]`, `[B]`, and `[C]` are permissable
+ // > solutions, they are not optimal, because one or both of the input
+ // > lists are not represented.
+ // >
+ // > It may be the case that it is not possible to represent all lists
+ // > in the result; in Example 2, either list 2 or list 3 must be
+ // > unrepresented.
+ //
+ // - Each item has a non-negative scalar "distance" score, we prefer
+ // lower distances. Distance scores are comparable; 0 is preferred,
+ // and a distance of 4 is twice as bad as a distance of 2.
+ //
+ // - Each item has a "generation" score, we prefer higher generations.
+ // Generation scores should not be treated as a linear scale; the
+ // magnitude of deltas is meaningless; only the sign of a delta is
+ // meaningful.
+ //
+ // > So it would be wrong to say something like
+ // >
+ // > desirability = (-a*distance) + (b*generation) // for some constants `a` and `b`
+ // >
+ // > because `generation` can't be used that way
+ //
+ // - We prefer items that appear in more lists over items that appear in
+ // fewer lists.
+ //
+ // The relative priority of these 4 goals is undefined; preferrably the
+ // algorithm should be defined in a way that makes it easy to adjust the
+ // relative priorities.
+
+ ret := make(containers.Set[btrfsvol.LogicalAddr])
+ illegal := make(containers.Set[btrfsvol.LogicalAddr]) // cannot-be-accepted and already-accepted
+ accept := func(item btrfsvol.LogicalAddr) {
+ ret.Insert(item)
+ for _, list := range lists {
+ if list.Has(item) {
+ illegal.InsertFrom(list)
+ }
+ }
+ }
+
+ counts := make(map[btrfsvol.LogicalAddr]int)
+ for _, list := range lists {
+ for item := range list {
+ counts[item] = counts[item] + 1
+ }
+ }
+
+ sortedItems := maps.Keys(distances)
+ sort.Slice(sortedItems, func(i, j int) bool {
+ iItem, jItem := sortedItems[i], sortedItems[j]
+ if counts[iItem] != counts[jItem] {
+ return counts[iItem] > counts[jItem] // reverse this check; higher counts should sort lower
+ }
+ if distances[iItem] != distances[jItem] {
+ return distances[iItem] < distances[jItem]
+ }
+ if generations[iItem] != generations[jItem] {
+ return generations[iItem] > generations[jItem] // reverse this check; higher generations should sort lower
+ }
+ return iItem < jItem // laddr is as good a tiebreaker as anything
+ })
+ for _, item := range sortedItems {
+ if !illegal.Has(item) {
+ accept(item)
+ }
+ }
+
+ for i, list := range lists {
+ dlog.Infof(ctx, "... ... ... %d: %v: %v", i, list.Intersection(ret).TakeOne(), maps.SortedKeys(list))
+ }
+
+ return ret
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+// _NodeFile is a subset of btrfstree.NodeFile.
+type _NodeFile interface {
+ ParentTree(btrfsprim.ObjID) (btrfsprim.ObjID, bool)
+}
+
+func treeDistance(fs _NodeFile, tree, leaf btrfsprim.ObjID) (int, bool) {
+ dist := 0
+ for {
+ if tree == leaf {
+ return dist, true
+ }
+
+ parentTree, ok := fs.ParentTree(tree)
+ if !ok {
+ // Failed to look up parent info.
+ return 0, false
+ }
+ if parentTree == 0 {
+ // End of the line.
+ return 0, false
+ }
+
+ tree = parentTree
+ dist++
+ }
+}
+
+func (o *Rebuilder) wantAugment(ctx context.Context, treeID btrfsprim.ObjID, choices containers.Set[btrfsvol.LogicalAddr]) {
+ choicesWithDist := make(map[btrfsvol.LogicalAddr]int)
+ for choice := range choices {
+ if dist, ok := treeDistance(o.uuidMap, treeID, o.graph.Nodes[choice].Owner); ok {
+ choicesWithDist[choice] = dist
+ }
+ }
+ if len(choicesWithDist) == 0 {
+ dlog.Errorf(ctx, "augment(tree=%v): could not find wanted item", treeID)
+ return
+ }
+ dlog.Infof(ctx, "augment(tree=%v): %v", treeID, maps.SortedKeys(choicesWithDist))
+ o.pendingAugments[treeID] = append(o.pendingAugments[treeID], choicesWithDist)
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+// fsErr implements rebuildCallbacks.
+func (o *Rebuilder) fsErr(ctx context.Context, e error) {
+ dlog.Errorf(ctx, "filesystem error: %v", e)
+}
+
+// want implements rebuildCallbacks.
+func (o *Rebuilder) want(ctx context.Context, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType) {
+ // check if we already have it
+
+ tgt := btrfsprim.Key{
+ ObjectID: objID,
+ ItemType: typ,
+ }
+ if _, err := o.inner.TreeSearch(treeID, func(key btrfsprim.Key, _ uint32) int {
+ key.Offset = 0
+ return tgt.Cmp(key)
+ }); err == nil {
+ return
+ }
+
+ // OK, we need to insert it
+
+ ctx = dlog.WithField(ctx, "want_key", fmt.Sprintf("tree=%v key={%v %v ?}", treeID, objID, typ))
+ wants := make(containers.Set[btrfsvol.LogicalAddr])
+ o.key2leaf.Subrange(
+ func(k keyAndTree, _ btrfsvol.LogicalAddr) int { k.Key.Offset = 0; return tgt.Cmp(k.Key) },
+ func(_ keyAndTree, v btrfsvol.LogicalAddr) bool {
+ wants.InsertFrom(o.leaf2orphans[v])
+ return true
+ })
+ o.wantAugment(ctx, treeID, wants)
+}
+
+// wantOff implements rebuildCallbacks.
+func (o *Rebuilder) wantOff(ctx context.Context, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, off uint64) {
+ // check if we already have it
+
+ tgt := btrfsprim.Key{
+ ObjectID: objID,
+ ItemType: typ,
+ Offset: off,
+ }
+ if _, err := o.inner.TreeLookup(treeID, tgt); err == nil {
+ return
+ }
+
+ // OK, we need to insert it
+
+ ctx = dlog.WithField(ctx, "want_key", fmt.Sprintf("tree=%v key=%v", treeID, tgt))
+ wants := make(containers.Set[btrfsvol.LogicalAddr])
+ o.key2leaf.Subrange(
+ func(k keyAndTree, _ btrfsvol.LogicalAddr) int { return tgt.Cmp(k.Key) },
+ func(_ keyAndTree, v btrfsvol.LogicalAddr) bool {
+ wants.InsertFrom(o.leaf2orphans[v])
+ return true
+ })
+ o.wantAugment(ctx, treeID, wants)
+}
+
+// wantFunc implements rebuildCallbacks.
+func (o *Rebuilder) wantFunc(ctx context.Context, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, fn func(btrfsitem.Item) bool) {
+ // check if we already have it
+
+ tgt := btrfsprim.Key{
+ ObjectID: objID,
+ ItemType: typ,
+ }
+ items, _ := o.inner.TreeSearchAll(treeID, func(key btrfsprim.Key, _ uint32) int {
+ key.Offset = 0
+ return tgt.Cmp(key)
+ })
+ for _, item := range items {
+ if fn(item.Body) {
+ return
+ }
+ }
+
+ // OK, we need to insert it
+
+ ctx = dlog.WithField(ctx, "want_key", fmt.Sprintf("tree=%v key=%v +func", treeID, tgt))
+ wants := make(containers.Set[btrfsvol.LogicalAddr])
+ o.key2leaf.Subrange(
+ func(k keyAndTree, _ btrfsvol.LogicalAddr) int { k.Key.Offset = 0; return tgt.Cmp(k.Key) },
+ func(k keyAndTree, v btrfsvol.LogicalAddr) bool {
+ nodeRef, err := btrfstree.ReadNode[btrfsvol.LogicalAddr](o.raw, o.sb, v, btrfstree.NodeExpectations{
+ LAddr: containers.Optional[btrfsvol.LogicalAddr]{OK: true, Val: v},
+ Generation: containers.Optional[btrfsprim.Generation]{OK: true, Val: o.graph.Nodes[v].Generation},
+ })
+ if err != nil {
+ o.ioErr(ctx, err)
+ }
+ for _, item := range nodeRef.Data.BodyLeaf {
+ if k.Key == item.Key && fn(item.Body) {
+ wants.InsertFrom(o.leaf2orphans[v])
+ }
+ }
+ return true
+ })
+ o.wantAugment(ctx, treeID, wants)
+}
+
+// func implements rebuildCallbacks.
+//
+// interval is [beg, end)
+func (o *Rebuilder) wantCSum(ctx context.Context, beg, end btrfsvol.LogicalAddr) {
+ for beg < end {
+ // check if we already have it
+ if run, err := btrfs.LookupCSum(o.inner, o.sb.ChecksumType, beg); err == nil {
+ // we already have it
+ beg = run.Addr.Add(run.Size())
+ } else {
+ // we need to insert it
+ ctx := dlog.WithField(ctx, "want_key", fmt.Sprintf("csum for laddr=%v", beg))
+ rbNode := o.csums.Search(func(item btrfsinspect.SysExtentCSum) int {
+ switch {
+ case item.Sums.Addr > beg:
+ return -1
+ case item.Sums.Addr.Add(item.Sums.Size()) <= beg:
+ return 1
+ default:
+ return 0
+ }
+
+ })
+ if rbNode == nil {
+ o.wantAugment(ctx, btrfsprim.CSUM_TREE_OBJECTID, nil) // log an error
+ beg += btrfssum.BlockSize
+ continue
+ }
+ run := rbNode.Value.Sums
+ key := keyAndTree{
+ Key: rbNode.Value.Key,
+ TreeID: btrfsprim.CSUM_TREE_OBJECTID,
+ }
+ leaf, ok := o.key2leaf.Load(key)
+ if !ok {
+ // This is a panic because if we found it in `o.csums` then it has
+ // to be in some Node, and if we didn't find it from
+ // btrfs.LookupCSum(), then that Node must be an orphan.
+ panic(fmt.Errorf("should not happen: no orphan contains %v", key.Key))
+ }
+ o.wantAugment(ctx, key.TreeID, o.leaf2orphans[leaf])
+
+ beg = run.Addr.Add(run.Size())
+ }
+ }
+}
+
+// wantFileExt implements rebuildCallbacks.
+func (o *Rebuilder) wantFileExt(ctx context.Context, treeID btrfsprim.ObjID, ino btrfsprim.ObjID, size int64) {
+ min := btrfsprim.Key{
+ ObjectID: ino,
+ ItemType: btrfsitem.EXTENT_DATA_KEY,
+ Offset: 0,
+ }
+ max := btrfsprim.Key{
+ ObjectID: ino,
+ ItemType: btrfsitem.EXTENT_DATA_KEY,
+ Offset: uint64(size - 1),
+ }
+ exts, _ := o.inner.TreeSearchAll(treeID, func(key btrfsprim.Key, _ uint32) int {
+ switch {
+ case min.Cmp(key) < 0:
+ return 1
+ case max.Cmp(key) > 0:
+ return -1
+ default:
+ return 0
+ }
+ })
+
+ type gap struct {
+ // range is [Beg,End)
+ Beg, End int64
+ }
+ gaps := &containers.RBTree[containers.NativeOrdered[int64], gap]{
+ KeyFn: func(gap gap) containers.NativeOrdered[int64] {
+ return containers.NativeOrdered[int64]{Val: gap.Beg}
+ },
+ }
+ gaps.Insert(gap{
+ Beg: 0,
+ End: size,
+ })
+ for _, ext := range exts {
+ switch extBody := ext.Body.(type) {
+ case btrfsitem.FileExtent:
+ extBeg := int64(ext.Key.Offset)
+ extSize, err := extBody.Size()
+ if err != nil {
+ o.fsErr(ctx, fmt.Errorf("FileExtent: tree=%v key=%v: %w", treeID, ext.Key, err))
+ continue
+ }
+ extEnd := extBeg + extSize
+ overlappingGaps := gaps.SearchRange(func(gap gap) int {
+ switch {
+ case gap.End <= extBeg:
+ return 1
+ case extEnd <= gap.Beg:
+ return -1
+ default:
+ return 0
+ }
+ })
+ if len(overlappingGaps) == 0 {
+ continue
+ }
+ beg := overlappingGaps[0].Beg
+ end := overlappingGaps[len(overlappingGaps)-1].End
+ for _, gap := range overlappingGaps {
+ gaps.Delete(containers.NativeOrdered[int64]{Val: gap.Beg})
+ }
+ if beg < extBeg {
+ gaps.Insert(gap{
+ Beg: beg,
+ End: extBeg,
+ })
+ }
+ if end > extEnd {
+ gaps.Insert(gap{
+ Beg: extEnd,
+ End: end,
+ })
+ }
+ case btrfsitem.Error:
+ o.fsErr(ctx, fmt.Errorf("error decoding item: tree=%v key=%v: %w", treeID, ext.Key, extBody.Err))
+ default:
+ // This is a panic because the item decoder should not emit EXTENT_DATA
+ // items as anything but btrfsitem.FileExtent or btrfsitem.Error without
+ // this code also being updated.
+ panic(fmt.Errorf("should not happen: EXTENT_DATA item has unexpected type: %T", extBody))
+ }
+ }
+ _ = gaps.Walk(func(rbNode *containers.RBNode[gap]) error {
+ gap := rbNode.Value
+ min := btrfsprim.Key{
+ ObjectID: ino,
+ ItemType: btrfsitem.EXTENT_DATA_KEY,
+ Offset: 0,
+ }
+ max := btrfsprim.Key{
+ ObjectID: ino,
+ ItemType: btrfsitem.EXTENT_DATA_KEY,
+ Offset: uint64(gap.End - 1),
+ }
+ ctx := dlog.WithField(ctx, "want_key", fmt.Sprintf("file extent for tree=%v inode=%v bytes [%v, %v)", treeID, ino, gap.Beg, gap.End))
+ wants := make(containers.Set[btrfsvol.LogicalAddr])
+ o.key2leaf.Subrange(
+ func(k keyAndTree, _ btrfsvol.LogicalAddr) int {
+ switch {
+ case min.Cmp(k.Key) < 0:
+ return 1
+ case max.Cmp(k.Key) > 0:
+ return -1
+ default:
+ return 0
+ }
+ },
+ func(k keyAndTree, v btrfsvol.LogicalAddr) bool {
+ nodeRef, err := btrfstree.ReadNode[btrfsvol.LogicalAddr](o.raw, o.sb, v, btrfstree.NodeExpectations{
+ LAddr: containers.Optional[btrfsvol.LogicalAddr]{OK: true, Val: v},
+ Generation: containers.Optional[btrfsprim.Generation]{OK: true, Val: o.graph.Nodes[v].Generation},
+ })
+ if err != nil {
+ o.ioErr(ctx, fmt.Errorf("error reading previously read node@%v: %w", v, err))
+ }
+ for _, item := range nodeRef.Data.BodyLeaf {
+ if k.Key != item.Key {
+ continue
+ }
+ switch itemBody := item.Body.(type) {
+ case btrfsitem.FileExtent:
+ itemBeg := int64(item.Key.Offset)
+ itemSize, err := itemBody.Size()
+ if err != nil {
+ o.fsErr(ctx, fmt.Errorf("FileExtent: tree=%v key=%v: %w", treeID, item.Key, err))
+ continue
+ }
+ itemEnd := itemBeg + itemSize
+ // We're being and "wanting" any extent that has any overlap with the
+ // gap. But maybe instead we sould only want extents that are
+ // *entirely* within the gap. I'll have to run it on real filesystems
+ // to see what works better.
+ //
+ // TODO(lukeshu): Re-evaluate whether being greedy here is the right
+ // thing.
+ if itemEnd > gap.Beg && itemBeg < gap.End {
+ wants.InsertFrom(o.leaf2orphans[v])
+ }
+ case btrfsitem.Error:
+ o.fsErr(ctx, fmt.Errorf("error decoding item: tree=%v key=%v: %w", treeID, item.Key, itemBody.Err))
+ default:
+ // This is a panic because the item decoder should not emit EXTENT_DATA
+ // items as anything but btrfsitem.FileExtent or btrfsitem.Error without
+ // this code also being updated.
+ panic(fmt.Errorf("should not happen: EXTENT_DATA item has unexpected type: %T", itemBody))
+ }
+ }
+ return true
+ })
+ o.wantAugment(ctx, treeID, wants)
+ return nil
+ })
+}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_graph.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_graph.go
new file mode 100644
index 0000000..976716d
--- /dev/null
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_graph.go
@@ -0,0 +1,338 @@
+// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+//
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+package rebuildnodes
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+
+ "github.com/datawire/dlib/dlog"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+)
+
+type rebuildCallbacks interface {
+ fsErr(ctx context.Context, e error)
+ want(ctx context.Context, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType)
+ wantOff(ctx context.Context, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, off uint64)
+ wantFunc(ctx context.Context, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, fn func(btrfsitem.Item) bool)
+ wantCSum(ctx context.Context, beg, end btrfsvol.LogicalAddr) // interval is [beg, end)
+ wantFileExt(ctx context.Context, treeID btrfsprim.ObjID, ino btrfsprim.ObjID, size int64)
+}
+
+func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID, item btrfstree.Item) {
+ ctx = dlog.WithField(ctx, "tree", treeID)
+ ctx = dlog.WithField(ctx, "key", item.Key)
+
+ // Notionally, just express the relationships shown in
+ // https://btrfs.wiki.kernel.org/index.php/File:References.png (from the page
+ // https://btrfs.wiki.kernel.org/index.php/Data_Structures )
+ switch body := item.Body.(type) {
+ case btrfsitem.BlockGroup:
+ o.want(dlog.WithField(ctx, "wants", "Chunk"),
+ btrfsprim.CHUNK_TREE_OBJECTID,
+ body.ChunkObjectID,
+ btrfsitem.CHUNK_ITEM_KEY)
+ o.wantOff(dlog.WithField(ctx, "wants", "FreeSpaceInfo"),
+ btrfsprim.FREE_SPACE_TREE_OBJECTID,
+ item.Key.ObjectID,
+ btrfsitem.FREE_SPACE_INFO_KEY,
+ item.Key.Offset)
+ case btrfsitem.Chunk:
+ o.want(dlog.WithField(ctx, "wants", "owning Root"),
+ btrfsprim.ROOT_TREE_OBJECTID,
+ body.Head.Owner,
+ btrfsitem.ROOT_ITEM_KEY)
+ case btrfsitem.Dev:
+ // nothing
+ case btrfsitem.DevExtent:
+ o.wantOff(dlog.WithField(ctx, "wants", "Chunk"),
+ body.ChunkTree,
+ body.ChunkObjectID,
+ btrfsitem.CHUNK_ITEM_KEY,
+ uint64(body.ChunkOffset))
+ case btrfsitem.DevStats:
+ // nothing
+ case btrfsitem.DirEntry:
+ // containing-directory
+ o.wantOff(dlog.WithField(ctx, "wants", "containing dir inode"),
+ treeID,
+ item.Key.ObjectID,
+ btrfsitem.INODE_ITEM_KEY,
+ 0)
+ // siblings
+ switch item.Key.ItemType {
+ case btrfsitem.DIR_ITEM_KEY:
+ o.wantFunc(dlog.WithField(ctx, "wants", "corresponding DIR_INDEX"),
+ treeID,
+ item.Key.ObjectID,
+ btrfsitem.DIR_INDEX_KEY,
+ func(peerItem btrfsitem.Item) bool {
+ return reflect.DeepEqual(item, peerItem)
+ })
+ case btrfsitem.DIR_INDEX_KEY:
+ o.wantOff(dlog.WithField(ctx, "wants", "corresponding DIR_ITEM"),
+ treeID,
+ item.Key.ObjectID,
+ btrfsitem.DIR_ITEM_KEY,
+ btrfsitem.NameHash(body.Name))
+ case btrfsitem.XATTR_ITEM_KEY:
+ // nothing
+ default:
+ // This is a panic because the item decoder should not emit a
+ // btrfsitem.DirEntry for other item types without this code also being
+ // updated.
+ panic(fmt.Errorf("should not happen: DirEntry: unexpected ItemType=%v", item.Key.ItemType))
+ }
+ // item-within-directory
+ if body.Location != (btrfsprim.Key{}) {
+ switch body.Location.ItemType {
+ case btrfsitem.INODE_ITEM_KEY:
+ o.wantOff(dlog.WithField(ctx, "wants", "item being pointed to"),
+ treeID,
+ body.Location.ObjectID,
+ body.Location.ItemType,
+ body.Location.Offset)
+ o.wantOff(dlog.WithField(ctx, "wants", "backref from item being pointed to"),
+ treeID,
+ body.Location.ObjectID,
+ btrfsitem.INODE_REF_KEY,
+ uint64(item.Key.ObjectID))
+ case btrfsitem.ROOT_ITEM_KEY:
+ o.want(dlog.WithField(ctx, "wants", "Root of subvolume being pointed to"),
+ btrfsprim.ROOT_TREE_OBJECTID,
+ body.Location.ObjectID,
+ body.Location.ItemType)
+ default:
+ o.fsErr(ctx, fmt.Errorf("DirEntry: unexpected .Location.ItemType=%v", body.Location.ItemType))
+ }
+ }
+ case btrfsitem.Empty:
+ // nothing
+ case btrfsitem.Extent:
+ //if body.Head.Flags.Has(btrfsitem.EXTENT_FLAG_TREE_BLOCK) {
+ // // Supposedly this flag indicates that that
+ // // body.Info.Key identifies a node by the
+ // // first key in the node. But nothing in the
+ // // kernel ever reads this, so who knows if it
+ // // always gets updated correctly?
+ //}
+ for i, ref := range body.Refs {
+ switch refBody := ref.Body.(type) {
+ case nil:
+ // nothing
+ case btrfsitem.ExtentDataRef:
+ o.wantOff(dlog.WithField(ctx, "wants", "referencing Inode"),
+ refBody.Root,
+ refBody.ObjectID,
+ btrfsitem.INODE_ITEM_KEY,
+ 0)
+ o.wantOff(dlog.WithField(ctx, "wants", "referencing FileExtent"),
+ refBody.Root,
+ refBody.ObjectID,
+ btrfsitem.EXTENT_DATA_KEY,
+ uint64(refBody.Offset))
+ case btrfsitem.SharedDataRef:
+ // nothing
+ default:
+ // This is a panic because the item decoder should not emit a new
+ // type to ref.Body without this code also being updated.
+ panic(fmt.Errorf("should not happen: Extent: unexpected .Refs[%d].Body type %T", i, refBody))
+ }
+ }
+ case btrfsitem.ExtentCSum:
+ // nothing
+ case btrfsitem.ExtentDataRef:
+ o.want(dlog.WithField(ctx, "wants", "Extent being referenced"),
+ btrfsprim.EXTENT_TREE_OBJECTID,
+ item.Key.ObjectID,
+ btrfsitem.EXTENT_ITEM_KEY)
+ o.wantOff(dlog.WithField(ctx, "wants", "referencing Inode"),
+ body.Root,
+ body.ObjectID,
+ btrfsitem.INODE_ITEM_KEY,
+ 0)
+ o.wantOff(dlog.WithField(ctx, "wants", "referencing FileExtent"),
+ body.Root,
+ body.ObjectID,
+ btrfsitem.EXTENT_DATA_KEY,
+ uint64(body.Offset))
+ case btrfsitem.FileExtent:
+ o.wantOff(dlog.WithField(ctx, "wants", "containing Inode"),
+ treeID,
+ item.Key.ObjectID,
+ btrfsitem.INODE_ITEM_KEY,
+ 0)
+ switch body.Type {
+ case btrfsitem.FILE_EXTENT_INLINE:
+ // nothing
+ case btrfsitem.FILE_EXTENT_REG, btrfsitem.FILE_EXTENT_PREALLOC:
+ // TODO: Check if inodeBody.Flags.Has(btrfsitem.INODE_NODATASUM)
+ o.wantCSum(dlog.WithField(ctx, "wants", "data sum"),
+ roundDown(body.BodyExtent.DiskByteNr, btrfssum.BlockSize),
+ roundUp(body.BodyExtent.DiskByteNr.Add(body.BodyExtent.DiskNumBytes), btrfssum.BlockSize))
+ default:
+ o.fsErr(ctx, fmt.Errorf("FileExtent: unexpected body.Type=%v", body.Type))
+ }
+ case btrfsitem.FreeSpaceBitmap:
+ o.wantOff(dlog.WithField(ctx, "wants", "FreeSpaceInfo"),
+ treeID,
+ item.Key.ObjectID,
+ btrfsitem.FREE_SPACE_INFO_KEY,
+ item.Key.Offset)
+ case btrfsitem.FreeSpaceHeader:
+ o.wantOff(dlog.WithField(ctx, "wants", ".Location"),
+ treeID,
+ body.Location.ObjectID,
+ body.Location.ItemType,
+ body.Location.Offset)
+ case btrfsitem.FreeSpaceInfo:
+ if body.Flags.Has(btrfsitem.FREE_SPACE_USING_BITMAPS) {
+ o.wantOff(dlog.WithField(ctx, "wants", "FreeSpaceBitmap"),
+ treeID,
+ item.Key.ObjectID,
+ btrfsitem.FREE_SPACE_BITMAP_KEY,
+ item.Key.Offset)
+ }
+ case btrfsitem.Inode:
+ o.want(dlog.WithField(ctx, "wants", "backrefs"),
+ treeID, // TODO: validate the number of these against body.NLink
+ item.Key.ObjectID,
+ btrfsitem.INODE_REF_KEY)
+ o.wantFileExt(dlog.WithField(ctx, "wants", "FileExtents"),
+ treeID, item.Key.ObjectID, body.Size)
+ if body.BlockGroup != 0 {
+ o.want(dlog.WithField(ctx, "wants", "BlockGroup"),
+ btrfsprim.EXTENT_TREE_OBJECTID,
+ body.BlockGroup,
+ btrfsitem.BLOCK_GROUP_ITEM_KEY)
+ }
+ case btrfsitem.InodeRefs:
+ o.wantOff(dlog.WithField(ctx, "wants", "child Inode"),
+ treeID,
+ item.Key.ObjectID,
+ btrfsitem.INODE_ITEM_KEY,
+ 0)
+ o.wantOff(dlog.WithField(ctx, "wants", "parent Inode"),
+ treeID,
+ btrfsprim.ObjID(item.Key.Offset),
+ btrfsitem.INODE_ITEM_KEY,
+ 0)
+ for _, ref := range body {
+ o.wantOff(dlog.WithField(ctx, "wants", "DIR_ITEM"),
+ treeID,
+ btrfsprim.ObjID(item.Key.Offset),
+ btrfsitem.DIR_ITEM_KEY,
+ btrfsitem.NameHash(ref.Name))
+ o.wantOff(dlog.WithField(ctx, "wants", "DIR_INDEX"),
+ treeID,
+ btrfsprim.ObjID(item.Key.Offset),
+ btrfsitem.DIR_INDEX_KEY,
+ uint64(ref.Index))
+ }
+ case btrfsitem.Metadata:
+ for i, ref := range body.Refs {
+ switch refBody := ref.Body.(type) {
+ case nil:
+ // nothing
+ case btrfsitem.ExtentDataRef:
+ o.wantOff(dlog.WithField(ctx, "wants", "referencing INode"),
+ refBody.Root,
+ refBody.ObjectID,
+ btrfsitem.INODE_ITEM_KEY,
+ 0)
+ o.wantOff(dlog.WithField(ctx, "wants", "referencing FileExtent"),
+ refBody.Root,
+ refBody.ObjectID,
+ btrfsitem.EXTENT_DATA_KEY,
+ uint64(refBody.Offset))
+ case btrfsitem.SharedDataRef:
+ // nothing
+ default:
+ // This is a panic because the item decoder should not emit a new
+ // type to ref.Body without this code also being updated.
+ panic(fmt.Errorf("should not happen: Metadata: unexpected .Refs[%d].Body type %T", i, refBody))
+ }
+ }
+ case btrfsitem.Root:
+ if body.RootDirID != 0 {
+ o.wantOff(dlog.WithField(ctx, "wants", "root directory"),
+ item.Key.ObjectID,
+ body.RootDirID,
+ btrfsitem.INODE_ITEM_KEY,
+ 0)
+ }
+ case btrfsitem.RootRef:
+ var otherType btrfsprim.ItemType
+ var parent, child btrfsprim.ObjID
+ switch item.Key.ItemType {
+ case btrfsitem.ROOT_REF_KEY:
+ otherType = btrfsitem.ROOT_BACKREF_KEY
+ parent = item.Key.ObjectID
+ child = btrfsprim.ObjID(item.Key.Offset)
+ case btrfsitem.ROOT_BACKREF_KEY:
+ otherType = btrfsitem.ROOT_REF_KEY
+ parent = btrfsprim.ObjID(item.Key.Offset)
+ child = item.Key.ObjectID
+ default:
+ // This is a panic because the item decoder should not emit a
+ // btrfsitem.RootRef for other item types without this code also being
+ // updated.
+ panic(fmt.Errorf("should not happen: RootRef: unexpected ItemType=%v", item.Key.ItemType))
+ }
+ // sibling
+ o.wantOff(dlog.WithField(ctx, "wants", fmt.Sprintf("corresponding %v", otherType)),
+ treeID,
+ btrfsprim.ObjID(item.Key.Offset),
+ otherType,
+ uint64(item.Key.ObjectID))
+ // parent
+ o.want(dlog.WithField(ctx, "wants", "parent subvolume: Root"),
+ treeID,
+ parent,
+ btrfsitem.ROOT_ITEM_KEY)
+ o.wantOff(dlog.WithField(ctx, "wants", "parent subvolume: Inode of parent dir"),
+ parent,
+ body.DirID,
+ btrfsitem.INODE_ITEM_KEY,
+ 0)
+ o.wantOff(dlog.WithField(ctx, "wants", "parent subvolume: DIR_ITEM in parent dir"),
+ parent,
+ body.DirID,
+ btrfsitem.DIR_ITEM_KEY,
+ btrfsitem.NameHash(body.Name))
+ o.wantOff(dlog.WithField(ctx, "wants", "parent subvolume: DIR_INDEX in parent dir"),
+ parent,
+ body.DirID,
+ btrfsitem.DIR_INDEX_KEY,
+ uint64(body.Sequence))
+ // child
+ o.want(dlog.WithField(ctx, "wants", "child subvolume: Root"),
+ treeID,
+ child,
+ btrfsitem.ROOT_ITEM_KEY)
+ case btrfsitem.SharedDataRef:
+ o.want(dlog.WithField(ctx, "wants", "Extent"),
+ btrfsprim.EXTENT_TREE_OBJECTID,
+ item.Key.ObjectID,
+ btrfsitem.EXTENT_ITEM_KEY)
+ case btrfsitem.UUIDMap:
+ o.want(dlog.WithField(ctx, "wants", "subvolume Root"),
+ btrfsprim.ROOT_TREE_OBJECTID,
+ body.ObjID,
+ btrfsitem.ROOT_ITEM_KEY)
+ case btrfsitem.Error:
+ o.fsErr(ctx, fmt.Errorf("error decoding item: %w", body.Err))
+ default:
+ // This is a panic because the item decoder should not emit new types without this
+ // code also being updated.
+ panic(fmt.Errorf("should not happen: unexpected item type: %T", body))
+ }
+}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go
index c32ae8e..3575534 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go
@@ -7,87 +7,34 @@ package rebuildnodes
import (
"context"
"fmt"
+ "time"
"github.com/datawire/dlib/dlog"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/uuidmap"
"git.lukeshu.com/btrfs-progs-ng/lib/containers"
"git.lukeshu.com/btrfs-progs-ng/lib/maps"
+ "git.lukeshu.com/btrfs-progs-ng/lib/textui"
)
-type nodeData struct {
- Level uint8
- Generation btrfsprim.Generation
- Owner btrfsprim.ObjID
- NumItems uint32
- MinItem btrfsprim.Key
- MaxItem btrfsprim.Key
-}
-
-type kpData struct {
- FromTree btrfsprim.ObjID
- FromNode btrfsvol.LogicalAddr
- FromItem int
-
- ToNode btrfsvol.LogicalAddr
- ToLevel uint8
- ToKey btrfsprim.Key
- ToGeneration btrfsprim.Generation
-}
-
-func (kp kpData) String() string {
- return fmt.Sprintf(`{t:%v,n:%v}[%d]->{n:%v,l:%v,g:%v,k:(%d,%v,%d)}`,
- kp.FromTree, kp.FromNode, kp.FromItem,
- kp.ToNode, kp.ToLevel, kp.ToGeneration,
- kp.ToKey.ObjectID,
- kp.ToKey.ItemType,
- kp.ToKey.Offset)
-}
-
-type nodeGraph struct {
- Nodes map[btrfsvol.LogicalAddr]nodeData
- BadNodes map[btrfsvol.LogicalAddr]error
- EdgesFrom map[btrfsvol.LogicalAddr][]*kpData
- EdgesTo map[btrfsvol.LogicalAddr][]*kpData
-}
-
-func (g nodeGraph) insertEdge(kp kpData) {
- ptr := &kp
- if kp.ToNode == 0 {
- panic("kp.ToNode should not be zero")
- }
- g.EdgesFrom[kp.FromNode] = append(g.EdgesFrom[kp.FromNode], ptr)
- g.EdgesTo[kp.ToNode] = append(g.EdgesTo[kp.ToNode], ptr)
+type scanResult struct {
+ uuidMap *uuidmap.UUIDMap
+ nodeGraph *graph.Graph
}
-func (g nodeGraph) insertTreeRoot(sb btrfstree.Superblock, treeID btrfsprim.ObjID) {
- treeInfo, err := btrfstree.LookupTreeRoot(nil, sb, treeID)
- if err != nil {
- // This shouldn't ever happen for treeIDs that are
- // mentioned directly in the superblock; which are the
- // only trees for which we should call
- // .insertTreeRoot().
- panic(fmt.Errorf("LookupTreeRoot(%v): %w", treeID, err))
- }
- if treeInfo.RootNode == 0 {
- return
- }
- g.insertEdge(kpData{
- FromTree: treeID,
- ToNode: treeInfo.RootNode,
- ToLevel: treeInfo.Level,
- ToGeneration: treeInfo.Generation,
- })
+type scanStats struct {
+ N, D int
}
-type scanResult struct {
- uuidMap
- nodeGraph
+func (s scanStats) String() string {
+ return fmt.Sprintf("... %v%% (%v/%v)",
+ int(100*float64(s.N)/float64(s.D)),
+ s.N, s.D)
}
func ScanDevices(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect.ScanDevicesResult) (*scanResult, error) {
@@ -98,51 +45,19 @@ func ScanDevices(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect.Sca
return nil, err
}
- lastPct := -1
total := countNodes(scanResults)
done := 0
- progress := func() {
- pct := int(100 * float64(done) / float64(total))
- if pct != lastPct || done == total {
- dlog.Infof(ctx, "... %v%% (%v/%v)",
- pct, done, total)
- lastPct = pct
- }
+ progressWriter := textui.NewProgress[scanStats](ctx, dlog.LogLevelInfo, 1*time.Second)
+ progress := func(done, total int) {
+ progressWriter.Set(scanStats{N: done, D: total})
}
ret := &scanResult{
- uuidMap: uuidMap{
- ObjID2UUID: make(map[btrfsprim.ObjID]btrfsprim.UUID),
- UUID2ObjID: make(map[btrfsprim.UUID]btrfsprim.ObjID),
- TreeParent: make(map[btrfsprim.ObjID]btrfsprim.UUID),
-
- SeenTrees: make(containers.Set[btrfsprim.ObjID]),
- },
- nodeGraph: nodeGraph{
- Nodes: make(map[btrfsvol.LogicalAddr]nodeData),
- BadNodes: make(map[btrfsvol.LogicalAddr]error),
- EdgesFrom: make(map[btrfsvol.LogicalAddr][]*kpData),
- EdgesTo: make(map[btrfsvol.LogicalAddr][]*kpData),
- },
+ uuidMap: uuidmap.New(),
+ nodeGraph: graph.New(*sb),
}
- // These 4 trees are mentioned directly in the superblock, so
- // they are always seen.
- //
- // 1
- ret.SeenTrees.Insert(btrfsprim.ROOT_TREE_OBJECTID)
- ret.insertTreeRoot(*sb, btrfsprim.ROOT_TREE_OBJECTID)
- // 2
- ret.SeenTrees.Insert(btrfsprim.CHUNK_TREE_OBJECTID)
- ret.insertTreeRoot(*sb, btrfsprim.CHUNK_TREE_OBJECTID)
- // 3
- ret.SeenTrees.Insert(btrfsprim.TREE_LOG_OBJECTID)
- ret.insertTreeRoot(*sb, btrfsprim.TREE_LOG_OBJECTID)
- // 4
- ret.SeenTrees.Insert(btrfsprim.BLOCK_GROUP_TREE_OBJECTID)
- ret.insertTreeRoot(*sb, btrfsprim.BLOCK_GROUP_TREE_OBJECTID)
-
- progress()
+ progress(done, total)
for _, devResults := range scanResults {
for laddr := range devResults.FoundNodes {
nodeRef, err := btrfstree.ReadNode[btrfsvol.LogicalAddr](fs, *sb, laddr, btrfstree.NodeExpectations{
@@ -152,95 +67,34 @@ func ScanDevices(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect.Sca
return nil, err
}
- // UUID map rebuilding
- for _, item := range nodeRef.Data.BodyLeaf {
- switch itemBody := item.Body.(type) {
- case btrfsitem.Root:
- if err := maybeSet("ObjID2UUID", ret.ObjID2UUID, item.Key.ObjectID, itemBody.UUID); err != nil {
- return nil, err
- }
- if itemBody.UUID != (btrfsprim.UUID{}) {
- if err := maybeSet("UUID2ObjID", ret.UUID2ObjID, itemBody.UUID, item.Key.ObjectID); err != nil {
- return nil, err
- }
- }
- if err := maybeSet("ParentUUID", ret.TreeParent, item.Key.ObjectID, itemBody.ParentUUID); err != nil {
- return nil, err
- }
- ret.SeenTrees.Insert(item.Key.ObjectID)
- // graph building
- ret.insertEdge(kpData{
- FromTree: item.Key.ObjectID,
- ToNode: itemBody.ByteNr,
- ToLevel: itemBody.Level,
- ToGeneration: itemBody.Generation,
- })
- case btrfsitem.UUIDMap:
- uuid := btrfsitem.KeyToUUID(item.Key)
- if err := maybeSet("ObjID2UUID", ret.ObjID2UUID, itemBody.ObjID, uuid); err != nil {
- return nil, err
- }
- if err := maybeSet("UUID2ObjID", ret.UUID2ObjID, uuid, itemBody.ObjID); err != nil {
- return nil, err
- }
- }
- }
- ret.SeenTrees.Insert(nodeRef.Data.Head.Owner)
-
- // graph building
- ret.Nodes[laddr] = nodeData{
- Level: nodeRef.Data.Head.Level,
- Generation: nodeRef.Data.Head.Generation,
- Owner: nodeRef.Data.Head.Owner,
- NumItems: nodeRef.Data.Head.NumItems,
- MinItem: func() btrfsprim.Key { k, _ := nodeRef.Data.MinItem(); return k }(),
- MaxItem: func() btrfsprim.Key { k, _ := nodeRef.Data.MaxItem(); return k }(),
- }
- for i, kp := range nodeRef.Data.BodyInternal {
- ret.insertEdge(kpData{
- FromTree: nodeRef.Data.Head.Owner,
- FromNode: laddr,
- FromItem: i,
- ToNode: kp.BlockPtr,
- ToLevel: nodeRef.Data.Head.Level - 1,
- ToKey: kp.Key,
- ToGeneration: kp.Generation,
- })
+ if err := ret.uuidMap.InsertNode(nodeRef); err != nil {
+ return nil, err
}
+ ret.nodeGraph.InsertNode(nodeRef)
+
done++
- progress()
+ progress(done, total)
}
}
-
if done != total {
panic("should not happen")
}
+ progressWriter.Done()
- missing := ret.missingRootItems()
+ missing := ret.uuidMap.MissingRootItems()
if len(missing) > 0 {
dlog.Errorf(ctx, "... could not find root items for %d trees: %v", len(missing), maps.SortedKeys(missing))
}
dlog.Info(ctx, "... done reading node data")
+ progressWriter = textui.NewProgress[scanStats](ctx, dlog.LogLevelInfo, 1*time.Second)
dlog.Infof(ctx, "Checking keypointers for dead-ends...")
- total = len(ret.EdgesTo)
- done = 0
- progress()
- for laddr := range ret.EdgesTo {
- if _, ok := ret.Nodes[laddr]; !ok {
- _, err := btrfstree.ReadNode[btrfsvol.LogicalAddr](fs, *sb, laddr, btrfstree.NodeExpectations{
- LAddr: containers.Optional[btrfsvol.LogicalAddr]{OK: true, Val: laddr},
- })
- if err == nil {
- return nil, fmt.Errorf("node@%v exists but was not in node scan results", laddr)
- }
- ret.BadNodes[laddr] = err
- }
- done++
- progress()
+ if err := ret.nodeGraph.FinalCheck(fs, *sb, progress); err != nil {
+ return nil, err
}
+ progressWriter.Done()
dlog.Info(ctx, "... done checking keypointers")
return ret, nil
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/treeancestors.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/treeancestors.go
deleted file mode 100644
index 42228ae..0000000
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/treeancestors.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
-//
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-package rebuildnodes
-
-/*
-import (
- "context"
-
- //"github.com/datawire/dlib/dlog"
-
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
- "git.lukeshu.com/btrfs-progs-ng/lib/containers"
-)
-
-func getTreeAncestors(ctx context.Context, scanData scanResult) map[btrfsprim.ObjID]containers.Set[btrfsprim.ObjID] {
- treeAncestors := make(map[btrfsprim.ObjID]containers.Set[btrfsprim.ObjID])
-
- for laddr, node := range scanData.Nodes {
- if _, ok := treeAncestors[node.Owner]; !ok {
- treeAncestors[node.Owner] = make(containers.Set[btrfsprim.ObjID])
- }
- for _, edge := range scanData.EdgesTo[laddr] {
- if edge.FromTree != node.Owner {
- if err := checkNodeExpectations(*edge, node); err != nil {
- //dlog.Errorf(ctx, "... ignoring keypointer %v because %v", edge.String(), err)
- } else {
- treeAncestors[node.Owner].Insert(edge.FromTree)
- }
- }
- }
- }
-
- return treeAncestors
-}
-
-func (m uuidMap) considerAncestors(ctx context.Context, treeAncestors map[btrfsprim.ObjID]containers.Set[btrfsprim.ObjID]) {
- if missing := m.missingRootItems(); len(missing) == 0 {
- return
- } else {
- dlog.Infof(ctx, "Rebuilding %d root items...", len(missing))
- }
-
- fa := newFullAncestorLister(m, treeAncestors)
-
- for _, missingRoot := range maps.SortedKeys(m.missingRootItems()) {
- if _, ok := m.TreeParent[missingRoot]; ok {
- // This one is incomplete because it doesn't have a UUID, not
- // because it doesn't have a parent.
- continue
- }
- potentialParents := make(containers.Set[btrfsprim.ObjID])
- potentialParents.InsertFrom(fa.GetFullAncestors(missingRoot))
- for _, ancestor := range maps.SortedKeys(fa.GetFullAncestors(missingRoot)) {
- potentialParents.DeleteFrom(fa.GetFullAncestors(ancestor))
- }
- if len(potentialParents) == 1 {
- parent := potentialParents.TakeOne()
- dlog.Infof(ctx, "... the parent of %v is %v", missingRoot, parent)
- parentUUID, ok := m.ObjID2UUID[parent]
- if !ok {
- dlog.Errorf(ctx, "... but can't synthesize a root item because UUID of %v is unknown", parent)
- continue
- }
- m.TreeParent[missingRoot] = parentUUID
- }
- }
-
- if missing := m.missingRootItems(); len(missing) > 0 {
- dlog.Errorf(ctx, "... could not rebuild root items for %d trees: %v", len(missing), maps.SortedKeys(missing))
- } else {
- dlog.Info(ctx, "... rebuilt all root items")
- }
-}
-*/
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/util.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/util.go
index 7e0eab1..8c43dad 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/util.go
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/util.go
@@ -5,43 +5,11 @@
package rebuildnodes
import (
- "fmt"
+ "golang.org/x/exp/constraints"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect"
)
-func ptrTo[T any](x T) *T {
- return &x
-}
-
-func maybeSet[K, V comparable](name string, m map[K]V, k K, v V) error {
- if other, conflict := m[k]; conflict && other != v {
- return fmt.Errorf("conflict: %s %v can't have both %v and %v", name, k, other, v)
- }
- m[k] = v
- return nil
-}
-
-/*
-var maxKey = btrfsprim.Key{
- ObjectID: math.MaxUint64,
- ItemType: math.MaxUint8,
- Offset: math.MaxUint64,
-}
-
-func keyMm(key btrfsprim.Key) btrfsprim.Key {
- switch {
- case key.Offset > 0:
- key.Offset--
- case key.ItemType > 0:
- key.ItemType--
- case key.ObjectID > 0:
- key.ObjectID--
- }
- return key
-}
-*/
-
func countNodes(nodeScanResults btrfsinspect.ScanDevicesResult) int {
var cnt int
for _, devResults := range nodeScanResults {
@@ -49,3 +17,11 @@ func countNodes(nodeScanResults btrfsinspect.ScanDevicesResult) int {
}
return cnt
}
+
+func roundDown[T constraints.Integer](n, d T) T {
+ return (n / d) * d
+}
+
+func roundUp[T constraints.Integer](n, d T) T {
+ return ((n + d - 1) / d) * d
+}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/uuidmap.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/uuidmap.go
deleted file mode 100644
index 7be903a..0000000
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/uuidmap.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
-//
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-package rebuildnodes
-
-import (
- "fmt"
- "strings"
-
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
- "git.lukeshu.com/btrfs-progs-ng/lib/containers"
- "git.lukeshu.com/btrfs-progs-ng/lib/maps"
-)
-
-type uuidMap struct {
- ObjID2UUID map[btrfsprim.ObjID]btrfsprim.UUID
- UUID2ObjID map[btrfsprim.UUID]btrfsprim.ObjID
- TreeParent map[btrfsprim.ObjID]btrfsprim.UUID
-
- SeenTrees containers.Set[btrfsprim.ObjID]
-}
-
-func (m uuidMap) missingRootItems() containers.Set[btrfsprim.ObjID] {
- missing := make(containers.Set[btrfsprim.ObjID])
- for treeID := range m.SeenTrees {
- if _, ok := m.ObjID2UUID[treeID]; !ok && treeID != btrfsprim.ROOT_TREE_OBJECTID {
- missing.Insert(treeID)
- continue
- }
- if _, ok := m.TreeParent[treeID]; !ok && treeID >= btrfsprim.FIRST_FREE_OBJECTID && treeID <= btrfsprim.LAST_FREE_OBJECTID {
- missing.Insert(treeID)
- }
- }
- return missing
-}
-
-// ParentTree implements btrfstree.NodeFile.
-func (m uuidMap) ParentTree(tree btrfsprim.ObjID) (btrfsprim.ObjID, bool) {
- if tree < btrfsprim.FIRST_FREE_OBJECTID || tree > btrfsprim.LAST_FREE_OBJECTID {
- // no parent
- return 0, true
- }
- parentUUID, ok := m.TreeParent[tree]
- if !ok {
- // could not look up parent info
- return 0, false
- }
- if parentUUID == (btrfsprim.UUID{}) {
- // no parent
- return 0, true
- }
- parentObjID, ok := m.UUID2ObjID[parentUUID]
- if !ok {
- // could not look up parent info
- return 0, false
- }
- return parentObjID, true
-}
-
-type fullAncestorLister struct {
- uuidMap uuidMap
- treeAncestors map[btrfsprim.ObjID]containers.Set[btrfsprim.ObjID]
-
- memos map[btrfsprim.ObjID]containers.Set[btrfsprim.ObjID]
-}
-
-func newFullAncestorLister(uuidMap uuidMap, treeAncestors map[btrfsprim.ObjID]containers.Set[btrfsprim.ObjID]) fullAncestorLister {
- return fullAncestorLister{
- uuidMap: uuidMap,
- treeAncestors: treeAncestors,
- memos: make(map[btrfsprim.ObjID]containers.Set[btrfsprim.ObjID]),
- }
-}
-
-type loopError []btrfsprim.ObjID
-
-func (le loopError) Error() string {
- var buf strings.Builder
- buf.WriteString("loop: ")
- for i, treeID := range le {
- if i > 0 {
- buf.WriteString("->")
- }
- fmt.Fprintf(&buf, "%d", treeID)
- }
- return buf.String()
-}
-
-func (fa fullAncestorLister) GetFullAncestors(child btrfsprim.ObjID) containers.Set[btrfsprim.ObjID] {
- if memoized, ok := fa.memos[child]; ok {
- if memoized == nil {
- panic(loopError{child})
- }
- return memoized
- }
- fa.memos[child] = nil
- defer func() {
- if r := recover(); r != nil {
- if le, ok := r.(loopError); ok {
- r = append(loopError{child}, le...)
- }
- panic(r)
- }
- }()
-
- ret := make(containers.Set[btrfsprim.ObjID])
- defer func() {
- fa.memos[child] = ret
- }()
-
- // Try to use '.uuidMap' instead of '.treeAncestors' if possible.
- knownAncestors := make(containers.Set[btrfsprim.ObjID])
- if parent, ok := fa.uuidMap.ParentTree(child); ok {
- if parent == 0 {
- return ret
- }
- knownAncestors.Insert(parent)
- } else {
- knownAncestors.InsertFrom(fa.treeAncestors[child])
- }
-
- for _, ancestor := range maps.SortedKeys(knownAncestors) {
- ret.Insert(ancestor)
- ret.InsertFrom(fa.GetFullAncestors(ancestor))
- }
- return ret
-}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/uuidmap/scan.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/uuidmap/scan.go
new file mode 100644
index 0000000..98ed097
--- /dev/null
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/uuidmap/scan.go
@@ -0,0 +1,73 @@
+// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+//
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+package uuidmap
+
+import (
+ "fmt"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/containers"
+ "git.lukeshu.com/btrfs-progs-ng/lib/diskio"
+)
+
+func maybeSet[K, V comparable](name string, m map[K]V, k K, v V) error {
+ if other, conflict := m[k]; conflict && other != v {
+ return fmt.Errorf("conflict: %s %v can't have both %v and %v", name, k, other, v)
+ }
+ m[k] = v
+ return nil
+}
+
+func New() *UUIDMap {
+ ret := &UUIDMap{
+ ObjID2UUID: make(map[btrfsprim.ObjID]btrfsprim.UUID),
+ UUID2ObjID: make(map[btrfsprim.UUID]btrfsprim.ObjID),
+ TreeParent: make(map[btrfsprim.ObjID]btrfsprim.UUID),
+
+ SeenTrees: make(containers.Set[btrfsprim.ObjID]),
+ }
+
+ // These 4 trees are mentioned directly in the superblock, so
+ // they are always seen.
+ ret.SeenTrees.Insert(btrfsprim.ROOT_TREE_OBJECTID)
+ ret.SeenTrees.Insert(btrfsprim.CHUNK_TREE_OBJECTID)
+ ret.SeenTrees.Insert(btrfsprim.TREE_LOG_OBJECTID)
+ ret.SeenTrees.Insert(btrfsprim.BLOCK_GROUP_TREE_OBJECTID)
+
+ return ret
+}
+
+func (m *UUIDMap) InsertNode(nodeRef *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]) error {
+ for _, item := range nodeRef.Data.BodyLeaf {
+ switch itemBody := item.Body.(type) {
+ case btrfsitem.Root:
+ if err := maybeSet("ObjID2UUID", m.ObjID2UUID, item.Key.ObjectID, itemBody.UUID); err != nil {
+ return err
+ }
+ if itemBody.UUID != (btrfsprim.UUID{}) {
+ if err := maybeSet("UUID2ObjID", m.UUID2ObjID, itemBody.UUID, item.Key.ObjectID); err != nil {
+ return err
+ }
+ }
+ if err := maybeSet("ParentUUID", m.TreeParent, item.Key.ObjectID, itemBody.ParentUUID); err != nil {
+ return err
+ }
+ m.SeenTrees.Insert(item.Key.ObjectID)
+ case btrfsitem.UUIDMap:
+ uuid := btrfsitem.KeyToUUID(item.Key)
+ if err := maybeSet("ObjID2UUID", m.ObjID2UUID, itemBody.ObjID, uuid); err != nil {
+ return err
+ }
+ if err := maybeSet("UUID2ObjID", m.UUID2ObjID, uuid, itemBody.ObjID); err != nil {
+ return err
+ }
+ }
+ }
+ m.SeenTrees.Insert(nodeRef.Data.Head.Owner)
+ return nil
+}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/uuidmap/uuidmap.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/uuidmap/uuidmap.go
new file mode 100644
index 0000000..32a34d1
--- /dev/null
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/uuidmap/uuidmap.go
@@ -0,0 +1,55 @@
+// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+//
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+package uuidmap
+
+import (
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
+ "git.lukeshu.com/btrfs-progs-ng/lib/containers"
+)
+
+type UUIDMap struct {
+ ObjID2UUID map[btrfsprim.ObjID]btrfsprim.UUID
+ UUID2ObjID map[btrfsprim.UUID]btrfsprim.ObjID
+ TreeParent map[btrfsprim.ObjID]btrfsprim.UUID
+
+ SeenTrees containers.Set[btrfsprim.ObjID]
+}
+
+func (m UUIDMap) MissingRootItems() containers.Set[btrfsprim.ObjID] {
+ missing := make(containers.Set[btrfsprim.ObjID])
+ for treeID := range m.SeenTrees {
+ if _, ok := m.ObjID2UUID[treeID]; !ok && treeID != btrfsprim.ROOT_TREE_OBJECTID {
+ missing.Insert(treeID)
+ continue
+ }
+ if _, ok := m.TreeParent[treeID]; !ok && treeID >= btrfsprim.FIRST_FREE_OBJECTID && treeID <= btrfsprim.LAST_FREE_OBJECTID {
+ missing.Insert(treeID)
+ }
+ }
+ return missing
+}
+
+// ParentTree implements btrfstree.NodeFile.
+func (m UUIDMap) ParentTree(tree btrfsprim.ObjID) (btrfsprim.ObjID, bool) {
+ if tree < btrfsprim.FIRST_FREE_OBJECTID || tree > btrfsprim.LAST_FREE_OBJECTID {
+ // no parent
+ return 0, true
+ }
+ parentUUID, ok := m.TreeParent[tree]
+ if !ok {
+ // could not look up parent info
+ return 0, false
+ }
+ if parentUUID == (btrfsprim.UUID{}) {
+ // no parent
+ return 0, true
+ }
+ parentObjID, ok := m.UUID2ObjID[parentUUID]
+ if !ok {
+ // could not look up parent info
+ return 0, false
+ }
+ return parentObjID, true
+}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/visualizenodes.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/visualizenodes.go
deleted file mode 100644
index fc9d19b..0000000
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/visualizenodes.go
+++ /dev/null
@@ -1,300 +0,0 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
-//
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-package rebuildnodes
-
-import (
- "archive/zip"
- "context"
- "fmt"
- "html"
- "io"
- "strings"
-
- "github.com/datawire/dlib/derror"
- "github.com/datawire/dlib/dlog"
-
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect"
- "git.lukeshu.com/btrfs-progs-ng/lib/containers"
- "git.lukeshu.com/btrfs-progs-ng/lib/maps"
-)
-
-func getCliques(scanData scanResult) map[btrfsprim.ObjID]*containers.Set[btrfsprim.ObjID] {
- cliques := make(map[btrfsprim.ObjID]*containers.Set[btrfsprim.ObjID])
-
- // UUID map
- lister := newFullAncestorLister(scanData.uuidMap, map[btrfsprim.ObjID]containers.Set[btrfsprim.ObjID]{})
- for _, treeID := range maps.SortedKeys(scanData.uuidMap.SeenTrees) {
- clique := ptrTo(make(containers.Set[btrfsprim.ObjID]))
- clique.Insert(treeID)
- clique.InsertFrom(lister.GetFullAncestors(treeID))
- for _, id := range maps.SortedKeys(*clique) {
- if existingClique, ok := cliques[id]; ok {
- clique.InsertFrom(*existingClique)
- }
- cliques[id] = clique
- }
- }
-
- // node graph
- for _, laddr := range maps.SortedKeys(scanData.nodeGraph.Nodes) {
- clique := ptrTo(make(containers.Set[btrfsprim.ObjID]))
- clique.Insert(scanData.nodeGraph.Nodes[laddr].Owner)
- for _, edge := range scanData.nodeGraph.EdgesTo[laddr] {
- clique.Insert(edge.FromTree)
- }
- for _, id := range maps.SortedKeys(*clique) {
- if existingClique, ok := cliques[id]; ok {
- clique.InsertFrom(*existingClique)
- }
- cliques[id] = clique
- }
- }
- return cliques
-}
-
-func getCliqueID(cliques map[btrfsprim.ObjID]*containers.Set[btrfsprim.ObjID], treeID btrfsprim.ObjID) btrfsprim.ObjID {
- clique, ok := cliques[treeID]
- if !ok {
- panic(fmt.Errorf("tree ID %v was not in .SeenTrees", treeID))
- }
- return maps.SortedKeys(*clique)[0]
-}
-
-func VisualizeNodes(ctx context.Context, out io.Writer, fs *btrfs.FS, nodeScanResults btrfsinspect.ScanDevicesResult) error {
- scanData, err := ScanDevices(ctx, fs, nodeScanResults)
- if err != nil {
- return err
- }
-
- ////////////////////////////////////////////////////////////////////////////////////////////
-
- dlog.Info(ctx, "Building cliques...")
- cliques := getCliques(*scanData)
- cliqueIDs := make(containers.Set[btrfsprim.ObjID])
- for treeID := range cliques {
- cliqueIDs.Insert(getCliqueID(cliques, treeID))
- }
- dlog.Infof(ctx, "... built %d cliques of %d trees", len(cliqueIDs), len(cliques))
-
- ////////////////////////////////////////////////////////////////////////////////////////////
-
- dlog.Info(ctx, "Building graphviz graphs...")
-
- type graph struct {
- nodes map[btrfsprim.ObjID]containers.Set[string] // keyed by treeID
- badNodes containers.Set[string]
- edges containers.Set[string]
- }
- graphs := make(map[btrfsprim.ObjID]graph, len(cliques)) // keyed by cliqueID
- for cliqueID := range cliqueIDs {
- graphs[cliqueID] = graph{
- nodes: make(map[btrfsprim.ObjID]containers.Set[string]),
- badNodes: make(containers.Set[string]),
- edges: make(containers.Set[string]),
- }
- }
-
- dlog.Infof(ctx, "... processing %d nodes...", len(scanData.Nodes))
-
- for _, laddr := range maps.SortedKeys(scanData.Nodes) {
- nodeData := scanData.Nodes[laddr]
- cliqueID := getCliqueID(cliques, nodeData.Owner)
- graph, ok := graphs[cliqueID]
- if !ok {
- panic(cliqueID)
- }
- if graph.nodes[nodeData.Owner] == nil {
- graph.nodes[nodeData.Owner] = make(containers.Set[string])
- }
-
- var buf strings.Builder
- fmt.Fprintf(&buf, `n%d [shape=record label="{laddr=%v\lgen=%v\llvl=%v\litems=%v\l|{`,
- laddr,
- laddr,
- nodeData.Generation,
- nodeData.Level,
- nodeData.NumItems)
- if nodeData.Level == 0 {
- buf.WriteString("leaf")
- } else {
- for i := uint32(0); i < nodeData.NumItems; i++ {
- if i == 0 {
- fmt.Fprintf(&buf, "<p%[1]d>%[1]d", i)
- } else {
- fmt.Fprintf(&buf, "|<p%[1]d>%[1]d", i)
- }
- }
- }
- buf.WriteString(`}}"]`)
- graph.nodes[nodeData.Owner].Insert(buf.String())
-
- if len(scanData.EdgesTo[laddr]) == 0 {
- graph.edges.Insert(fmt.Sprintf("orphanRoot -> n%d", laddr))
- }
-
- graphs[cliqueID] = graph
- }
-
- dlog.Infof(ctx, "... processing %d bad nodes...", len(scanData.BadNodes))
-
- for _, laddr := range maps.SortedKeys(scanData.BadNodes) {
- cliqueIDs := make(containers.Set[btrfsprim.ObjID])
- for _, edge := range scanData.EdgesTo[laddr] {
- cliqueIDs.Insert(getCliqueID(cliques, edge.FromTree))
- }
- if len(cliqueIDs) != 1 {
- dlog.Errorf(ctx, "couldn't assign bad node@%v to a clique: %v", laddr, maps.SortedKeys(cliqueIDs))
- continue
- }
-
- cliqueID := cliqueIDs.TakeOne()
- graph, ok := graphs[cliqueID]
- if !ok {
- panic(cliqueID)
- }
- graph.badNodes.Insert(fmt.Sprintf(`n%d [shape=star color=red label="%v\l\lerr=%s"]`,
- laddr, laddr, gvEscape(scanData.BadNodes[laddr].Error()+"\n")))
- graphs[cliqueID] = graph
- }
-
- numEdges := 0
- for _, kps := range scanData.EdgesFrom {
- numEdges += (len(kps))
- }
- dlog.Infof(ctx, "... processing %d keypointers...", numEdges)
-
- for _, laddr := range maps.SortedKeys(scanData.EdgesFrom) {
- for _, kp := range scanData.EdgesFrom[laddr] {
- cliqueID := getCliqueID(cliques, kp.FromTree)
- graph, ok := graphs[cliqueID]
- if !ok {
- panic(cliqueID)
- }
-
- var buf strings.Builder
- if kp.FromNode == 0 {
- if graph.nodes[kp.FromTree] == nil {
- graph.nodes[kp.FromTree] = make(containers.Set[string])
- }
- graph.nodes[kp.FromTree].Insert(fmt.Sprintf(`t%d [label="root of %s"]`, kp.FromTree, gvEscape(kp.FromTree.String())))
- fmt.Fprintf(&buf, "t%d", kp.FromTree)
- } else {
- fmt.Fprintf(&buf, "n%d:p%d", kp.FromNode, kp.FromItem)
- }
- fmt.Fprintf(&buf, ` -> n%d`, kp.ToNode)
-
- var err error
- toNode, ok := scanData.Nodes[kp.ToNode]
- if !ok {
- err = scanData.BadNodes[kp.ToNode]
- } else {
- err = checkNodeExpectations(*kp, toNode)
- }
- if err != nil {
- fmt.Fprintf(&buf, ` [label="key=(%d,%v,%d) gen=%v\l\lerr=%s" color=red]`,
- kp.ToKey.ObjectID,
- kp.ToKey.ItemType,
- kp.ToKey.Offset,
- kp.ToGeneration,
- gvEscape(err.Error()+"\n"))
- }
-
- graph.edges.Insert(buf.String())
- graphs[cliqueID] = graph
- }
- }
-
- ////////////////////////////////////////////////////////////////////////////////////////////
-
- dlog.Info(ctx, "Writing graphviz output...")
-
- zw := zip.NewWriter(out)
- for _, cliqueID := range maps.SortedKeys(graphs) {
- if err := func() (err error) {
- graph := graphs[cliqueID]
-
- buf, err := zw.Create(fmt.Sprintf("%d.dot", cliqueID))
- if err != nil {
- return err
- }
- if _, err := fmt.Fprintf(buf, "strict digraph clique%d {\n", cliqueID); err != nil {
- return err
- }
- if _, err := fmt.Fprintf(buf, "rankdir=LR;\n nodesep=0.1;\n ranksep=25;\n splines=line;\n"); err != nil {
- return err
- }
- for _, treeID := range maps.SortedKeys(graph.nodes) {
- nodes := graph.nodes[treeID]
-
- if _, err := fmt.Fprintf(buf, " subgraph cluster_tree%d {\n", treeID); err != nil {
- return err
- }
- for _, node := range maps.SortedKeys(nodes) {
- if _, err := fmt.Fprintf(buf, " %s;\n", node); err != nil {
- return err
- }
- }
- if _, err := fmt.Fprintln(buf, " }"); err != nil {
- return err
- }
- }
- for _, node := range maps.SortedKeys(graph.badNodes) {
- if _, err := fmt.Fprintf(buf, " %s;\n", node); err != nil {
- return err
- }
- }
- for _, edge := range maps.SortedKeys(graph.edges) {
- if _, err := fmt.Fprintf(buf, " %s;\n", edge); err != nil {
- return err
- }
- }
-
- if _, err := fmt.Fprintln(buf, "}"); err != nil {
- return err
- }
- return nil
- }(); err != nil {
- return err
- }
- }
- if err := zw.Close(); err != nil {
- return err
- }
-
- dlog.Info(ctx, "... done writing")
-
- return nil
-}
-
-func gvEscape(str string) string {
- str = html.EscapeString(str)
- str = strings.ReplaceAll(str, "\n", `\l`)
- str = strings.ReplaceAll(str, `\n`, `\l`)
- return str
-}
-
-func checkNodeExpectations(kp kpData, toNode nodeData) error {
- var errs derror.MultiError
- if toNode.Level != kp.ToLevel {
- errs = append(errs, fmt.Errorf("kp.level=%v != node.level=%v",
- kp.ToLevel, toNode.Level))
- }
- if toNode.Generation != kp.ToGeneration {
- errs = append(errs, fmt.Errorf("kp.generation=%v != node.generation=%v",
- kp.ToGeneration, toNode.Generation))
- }
- if toNode.NumItems == 0 {
- errs = append(errs, fmt.Errorf("node.num_items=0"))
- } else if kp.ToKey != (btrfsprim.Key{}) && toNode.MinItem != kp.ToKey {
- errs = append(errs, fmt.Errorf("kp.key=%v != node.items[0].key=%v",
- kp.ToKey, toNode.MinItem))
- }
- if len(errs) > 0 {
- return errs
- }
- return nil
-}