summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorLuke Shumaker <lukeshu@lukeshu.com>2022-10-17 00:31:38 -0600
committerLuke Shumaker <lukeshu@lukeshu.com>2022-12-20 20:02:11 -0700
commit6c03becc595c9938644e767c852a2627d4a265d3 (patch)
treef5003fe1b9b22f7c97b98fa9b888606869bc2031 /lib
parent580189ee3e43d5595a8700ee21b8900b0dd5389d (diff)
rebuildnodes: wip: New rebuild-nodes implementation
Diffstat (limited to 'lib')
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_rebuildnodes.go102
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_rebuilttrees.go89
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_s3_reinit.go139
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_s3_reinit_test.go82
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_s4_reattach.go161
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/orphans.go102
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go107
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_graph.go322
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/treeancestors.go76
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/util.go30
-rw-r--r--lib/btrfsprogs/btrfsutil/broken_btree.go6
11 files changed, 547 insertions, 669 deletions
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_rebuildnodes.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_rebuildnodes.go
deleted file mode 100644
index 1a9f487..0000000
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_rebuildnodes.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
-//
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-package rebuildnodes
-
-/*
-import (
- "context"
- "errors"
-
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsutil"
- "git.lukeshu.com/btrfs-progs-ng/lib/diskio"
-)
-
-func RebuildNodes(ctx context.Context, fs *btrfs.FS, nodeScanResults btrfsinspect.ScanDevicesResult) (map[btrfsvol.LogicalAddr]*RebuiltNode, error) {
- uuidMap, err := buildUUIDMap(ctx, fs, nodeScanResults)
- if err != nil {
- return nil, err
- }
-
- nfs := &RebuiltTrees{
- inner: fs,
- uuidMap: uuidMap,
- }
-
- orphanedNodes, badNodes, treeAncestors, err := classifyNodes(ctx, nfs, nodeScanResults)
- if err != nil {
- return nil, err
- }
-
- uuidMap.considerAncestors(ctx, treeAncestors)
-
- rebuiltNodes, err := reInitBrokenNodes(ctx, nfs, badNodes)
- if err != nil {
- return nil, err
- }
-
- if err := reAttachNodes(ctx, nfs, orphanedNodes, rebuiltNodes); err != nil {
- return nil, err
- }
-
- return rebuiltNodes, nil
-}
-
-func spanOfTreePath(fs _FS, path btrfstree.TreePath) (btrfsprim.Key, btrfsprim.Key) {
- // tree root error
- if len(path) == 0 {
- return btrfsprim.Key{}, maxKey
- }
-
- // item error
- if path.Node(-1).ToNodeAddr == 0 {
- // If we got an item error, then the node is readable
- node, _ := fs.ReadNode(path.Parent())
- key := node.Data.BodyLeaf[path.Node(-1).FromItemIdx].Key
- return key, key
- }
-
- // node error
- //
- // assume that path.Node(-1).ToNodeAddr is not readable, but that path.Node(-2).ToNodeAddr is.
- if len(path) == 1 {
- return btrfsprim.Key{}, maxKey
- }
- parentNode, _ := fs.ReadNode(path.Parent())
- low := parentNode.Data.BodyInternal[path.Node(-1).FromItemIdx].Key
- var high btrfsprim.Key
- if path.Node(-1).FromItemIdx+1 < len(parentNode.Data.BodyInternal) {
- high = keyMm(parentNode.Data.BodyInternal[path.Node(-1).FromItemIdx+1].Key)
- } else {
- parentPath := path.Parent().DeepCopy()
- _, high = spanOfTreePath(fs, parentPath)
- }
- return low, high
-}
-
-func getChunkTreeUUID(ctx context.Context, fs _FS) (btrfsprim.UUID, bool) {
- ctx, cancel := context.WithCancel(ctx)
- var ret btrfsprim.UUID
- var retOK bool
- btrfsutil.WalkAllTrees(ctx, fs, btrfsutil.WalkAllTreesHandler{
- TreeWalkHandler: btrfstree.TreeWalkHandler{
- Node: func(path btrfstree.TreePath, node *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]) error {
- ret = node.Data.Head.ChunkTreeUUID
- retOK = true
- cancel()
- return nil
- },
- },
- Err: func(err *btrfsutil.WalkError) {
- // do nothing
- },
- })
- return ret, retOK
-}
-*/
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_rebuilttrees.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_rebuilttrees.go
deleted file mode 100644
index 7e1a7e9..0000000
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_rebuilttrees.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
-//
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-package rebuildnodes
-
-/*
-import (
- "context"
- "fmt"
-
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
- "git.lukeshu.com/btrfs-progs-ng/lib/diskio"
-)
-
-type RebuiltTrees struct {
- inner *btrfs.FS
- uuidMap uuidMap
- nodes map[btrfsvol.LogicalAddr]*RebuiltNode
-}
-
-type _FS interface {
- diskio.File[btrfsvol.LogicalAddr]
- btrfstree.NodeFile
- btrfstree.NodeSource
- btrfstree.TreeOperator
-}
-
-// diskio.File
-
-func (fs *RebuiltTrees) Name() string { return fs.inner.Name() }
-func (fs *RebuiltTrees) Size() btrfsvol.LogicalAddr { return fs.inner.Size() }
-func (fs *RebuiltTrees) Close() error { return fs.inner.Close() }
-func (fs *RebuiltTrees) ReadAt(p []byte, off btrfsvol.LogicalAddr) (int, error) {
- sb, err := fs.Superblock()
- if err != nil {
- return 0, err
- }
- if rebuilt, ok := fs.nodes[off]; ok && len(p) == int(sb.NodeSize) {
- rebuilt.Node.Head.Checksum, err = rebuilt.Node.CalculateChecksum()
- if err != nil {
- panic(fmt.Errorf("should not happen: %w", err))
- }
- bs, err := rebuilt.Node.MarshalBinary()
- if err != nil {
- panic(fmt.Errorf("should not happen: %w", err))
- }
- if len(p) != len(bs) {
- panic(fmt.Errorf("should not happen: sb.NodeSize=%v but node marshaled to %v", sb.NodeSize, len(bs)))
- }
- return copy(p, bs), nil
- }
- return fs.inner.ReadAt(p, off)
-}
-func (fs *RebuiltTrees) WriteAt(p []byte, off btrfsvol.LogicalAddr) (int, error) {
- return fs.inner.WriteAt(p, off)
-}
-
-// btrfstree.NodeFile
-
-func (fs *RebuiltTrees) ParentTree(tree btrfsprim.ObjID) (btrfsprim.ObjID, bool) {
- return fs.uuidMap.ParentTree(tree)
-}
-
-// btrfstree.NodeSource
-
-func (fs *RebuiltTrees) Superblock() (*btrfstree.Superblock, error) { return fs.inner.Superblock() }
-func (fs *RebuiltTrees) ReadNode(path btrfstree.TreePath) (*diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node], error) {
- return btrfstree.FSReadNode(fs, path)
-}
-
-// btrfstree.TreeOperator
-
-func (fs *RebuiltTrees) TreeWalk(ctx context.Context, treeID btrfsprim.ObjID, errHandle func(*btrfstree.TreeError), cbs btrfstree.TreeWalkHandler) {
- btrfstree.TreeOperatorImpl{NodeSource: fs}.TreeWalk(ctx, treeID, errHandle, cbs)
-}
-func (fs *RebuiltTrees) TreeLookup(treeID btrfsprim.ObjID, key btrfsprim.Key) (btrfstree.Item, error) {
- return btrfstree.TreeOperatorImpl{NodeSource: fs}.TreeLookup(treeID, key)
-}
-func (fs *RebuiltTrees) TreeSearch(treeID btrfsprim.ObjID, fn func(key btrfsprim.Key, size uint32) int) (btrfstree.Item, error) {
- return btrfstree.TreeOperatorImpl{NodeSource: fs}.TreeSearch(treeID, fn)
-}
-func (fs *RebuiltTrees) TreeSearchAll(treeID btrfsprim.ObjID, fn func(key btrfsprim.Key, size uint32) int) ([]btrfstree.Item, error) {
- return btrfstree.TreeOperatorImpl{NodeSource: fs}.TreeSearchAll(treeID, fn)
-}
-*/
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_s3_reinit.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_s3_reinit.go
deleted file mode 100644
index 5983e2f..0000000
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_s3_reinit.go
+++ /dev/null
@@ -1,139 +0,0 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
-//
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-package rebuildnodes
-
-/*
-import (
- "fmt"
- "reflect"
-
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
- "git.lukeshu.com/btrfs-progs-ng/lib/containers"
- "git.lukeshu.com/btrfs-progs-ng/lib/slices"
-)
-
-type RebuiltNode struct {
- Errs containers.Set[string]
- MinKey, MaxKey btrfsprim.Key
- InTrees containers.Set[btrfsprim.ObjID]
- btrfstree.Node
-}
-
-func (a RebuiltNode) Compat(b RebuiltNode) bool {
- a.Node.Head.Generation = b.Node.Head.Generation
- return reflect.DeepEqual(a.Node, b.Node)
-}
-
-func (a RebuiltNode) Merge(b RebuiltNode) (RebuiltNode, error) {
- if !a.Compat(b) {
- switch {
- case a.Node.Head.Generation > b.Node.Head.Generation:
- return a, nil
- case a.Node.Head.Generation < b.Node.Head.Generation:
- return b, nil
- default:
- return a, fmt.Errorf("mismatch: %v != %v", a, b)
- }
- }
-
- // take the broadest region
- if a.MinKey.Cmp(b.MinKey) > 0 { // if a.MinKey > b.MinKey {
- a.MinKey = b.MinKey // take the min of the two
- }
- if a.MaxKey.Cmp(b.MaxKey) < 0 { // if a.MaxKey < b.MaxKey {
- a.MaxKey = b.MaxKey // take the min of the two
- }
-
- // take the highest generation
- a.Node.Head.Generation = slices.Max(a.Node.Head.Generation, b.Node.Head.Generation)
-
- // take the union
- a.InTrees.InsertFrom(b.InTrees)
- a.Errs.InsertFrom(b.Errs)
-
- return a, nil
-}
-
-func reInitBrokenNodes(ctx context.Context, fs _FS, badNodes []badNode) (map[btrfsvol.LogicalAddr]*RebuiltNode, error) {
- dlog.Info(ctx, "Re-initializing bad nodes...")
-
- sb, err := fs.Superblock()
- if err != nil {
- return nil, err
- }
- chunkTreeUUID, ok := getChunkTreeUUID(ctx, fs)
- if !ok {
- return nil, fmt.Errorf("could not look up chunk tree UUID")
- }
-
- sort.Slice(badNodes, func(i, j int) bool {
- iGen := badNodes[i].Path.Node(-1).ToNodeGeneration
- jGen := badNodes[j].Path.Node(-1).ToNodeGeneration
- switch {
- case iGen < jGen:
- return true
- case iGen > jGen:
- return false
- default:
- iAddr := badNodes[i].Path.Node(-1).ToNodeAddr
- jAddr := badNodes[j].Path.Node(-1).ToNodeAddr
- return iAddr < jAddr
- }
- })
-
- lastPct := -1
- progress := func(done int) {
- pct := int(100 * float64(done) / float64(len(badNodes)))
- if pct != lastPct || done == len(badNodes) {
- dlog.Infof(ctx, "... %v%% (%v/%v)",
- pct, done, len(badNodes))
- lastPct = pct
- }
- }
-
- rebuiltNodes := make(map[btrfsvol.LogicalAddr]*RebuiltNode)
- for i, badNode := range badNodes {
- progress(i)
- path := badNode.Path
-
- min, max := spanOfTreePath(fs, path)
- node := RebuiltNode{
- Errs: containers.NewSet[string](
- badNode.Err,
- ),
- MinKey: min,
- MaxKey: max,
- InTrees: containers.NewSet[btrfsprim.ObjID](
- path.Node(-1).FromTree,
- ),
- Node: btrfstree.Node{
- Size: sb.NodeSize,
- ChecksumType: sb.ChecksumType,
- Head: btrfstree.NodeHeader{
- MetadataUUID: sb.EffectiveMetadataUUID(),
- Addr: path.Node(-1).ToNodeAddr,
- ChunkTreeUUID: chunkTreeUUID,
- //Owner: TBD, // see RebuiltNode.InTrees
- Generation: path.Node(-1).ToNodeGeneration,
- Level: path.Node(-1).ToNodeLevel,
- },
- },
- }
- if other, ok := rebuiltNodes[path.Node(-1).ToNodeAddr]; ok {
- *other, err = other.Merge(node)
- if err != nil {
- dlog.Errorf(ctx, "... %v", err)
- }
- } else {
- rebuiltNodes[path.Node(-1).ToNodeAddr] = &node
- }
- }
- progress(len(badNodes))
-
- dlog.Infof(ctx, "... initialized %d nodes", len(rebuiltNodes))
- return rebuiltNodes, nil
-}
-*/
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_s3_reinit_test.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_s3_reinit_test.go
deleted file mode 100644
index 865cd7e..0000000
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_s3_reinit_test.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
-//
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-package rebuildnodes_test
-
-/*
-import (
- "strings"
- "testing"
-
- "git.lukeshu.com/go/lowmemjson"
- "github.com/stretchr/testify/assert"
-
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes"
- "git.lukeshu.com/btrfs-progs-ng/lib/containers"
-)
-
-func TestEncodeRebuiltNodes(t *testing.T) {
- dat := map[btrfsvol.LogicalAddr]*rebuildnodes.RebuiltNode{
- 100007133184: {
- Errs: containers.NewSet[string](
- "btrfs.ReadNode: node@0x0000001748e3c000: expected generation\u003c=6596014 but claims to be generation=6596025",
- ),
- MinKey: btrfsprim.Key{},
- MaxKey: btrfsprim.Key{},
- InTrees: containers.NewSet[btrfsprim.ObjID](
- 257,
- ),
- Node: btrfstree.Node{},
- },
- }
- var buf strings.Builder
- assert.NoError(t, lowmemjson.Encode(&lowmemjson.ReEncoder{
- Out: &buf,
-
- Indent: "\t",
- ForceTrailingNewlines: true,
- }, dat))
- assert.Equal(t, `{
- "100007133184": {
- "Errs": [
- "btrfs.ReadNode: node@0x0000001748e3c000: expected generation\u003c=6596014 but claims to be generation=6596025"
- ],
- "MinKey": {
- "ObjectID": 0,
- "ItemType": 0,
- "Offset": 0
- },
- "MaxKey": {
- "ObjectID": 0,
- "ItemType": 0,
- "Offset": 0
- },
- "InTrees": [
- 257
- ],
- "Size": 0,
- "ChecksumType": 0,
- "Head": {
- "Checksum": "0000000000000000000000000000000000000000000000000000000000000000",
- "MetadataUUID": "00000000-0000-0000-0000-000000000000",
- "Addr": 0,
- "Flags": 0,
- "BackrefRev": 0,
- "ChunkTreeUUID": "00000000-0000-0000-0000-000000000000",
- "Generation": 0,
- "Owner": 0,
- "NumItems": 0,
- "Level": 0
- },
- "BodyInternal": null,
- "BodyLeaf": null,
- "Padding": null
- }
-}
-`, buf.String())
-}
-*/
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_s4_reattach.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_s4_reattach.go
deleted file mode 100644
index a78d964..0000000
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_s4_reattach.go
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
-//
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-package rebuildnodes
-
-/*
-import (
- "context"
- "sort"
-
- "github.com/datawire/dlib/dlog"
-
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
- "git.lukeshu.com/btrfs-progs-ng/lib/containers"
- "git.lukeshu.com/btrfs-progs-ng/lib/maps"
-)
-
-func (a RebuiltNode) ContainsWholeRegion(min, max btrfsprim.Key) int {
- switch {
- case min.Cmp(a.MinKey) < 0:
- // 'a' is too far right
- return -1
- case max.Cmp(a.MaxKey) > 0:
- // 'a' is too far left
- return 1
- default:
- // just right
- return 0
- }
-}
-
-func reAttachNodes(ctx context.Context, fs _FS, orphanedNodes containers.Set[btrfsvol.LogicalAddr], rebuiltNodes map[btrfsvol.LogicalAddr]*RebuiltNode) error {
- dlog.Info(ctx, "Attaching orphaned nodes to rebuilt nodes...")
-
- sb, err := fs.Superblock()
- if err != nil {
- return err
- }
-
- // Index 'rebuiltNodes' for fast lookups.
- dlog.Info(ctx, "... indexing rebuilt nodes...")
- var byLevel [][]*RebuiltNode
- for _, node := range rebuiltNodes {
- for int(node.Head.Level) >= len(byLevel) {
- byLevel = append(byLevel, []*RebuiltNode(nil))
- }
- byLevel[node.Head.Level] = append(byLevel[node.Head.Level], node)
- }
- for _, slice := range byLevel {
- sort.Slice(slice, func(i, j int) bool {
- return slice[i].MinKey.Cmp(slice[j].MinKey) < 0
- })
- }
- dlog.Info(ctx, "... done indexing")
-
- // Attach orphanedNodes to the gaps.
- dlog.Info(ctx, "... attaching nodes...")
- lastPct := -1
- progress := func(done int) {
- pct := int(100 * float64(done) / float64(len(orphanedNodes)))
- if pct != lastPct || done == len(orphanedNodes) {
- dlog.Infof(ctx, "... %v%% (%v/%v)",
- pct, done, len(orphanedNodes))
- lastPct = pct
- }
- }
- numAttached := 0
- for i, foundLAddr := range maps.SortedKeys(orphanedNodes) {
- progress(i)
- foundRef, err := btrfstree.ReadNode[btrfsvol.LogicalAddr](fs, *sb, foundLAddr, btrfstree.NodeExpectations{
- LAddr: containers.Optional[btrfsvol.LogicalAddr]{OK: true, Val: foundLAddr},
- })
- if foundRef == nil {
- return err
- }
- foundMinKey, ok := foundRef.Data.MinItem()
- if !ok {
- continue
- }
- foundMaxKey, ok := foundRef.Data.MaxItem()
- if !ok {
- continue
- }
-
- // `trees` is the set of trees that the node may be
- // placed in; '0' is a wildcard that means "any tree".
- // We still keep track of the others, in order to try
- // to avoid using the wildcard.
- trees := make(containers.Set[btrfsprim.ObjID])
- tree := foundRef.Data.Head.Owner
- for {
- trees.Insert(tree)
- var ok bool
- tree, ok = fs.ParentTree(tree)
- if !ok {
- // error; accept anything
- trees.Insert(0)
- break
- }
- if tree == 0 {
- // end of the line
- break
- }
- }
- attached := make(containers.Set[btrfsprim.ObjID])
- for level := int(foundRef.Data.Head.Level) + 1; level < len(byLevel) && len(attached) == 0; level++ {
- for _, parent := range byLevel[level] {
- if parent.ContainsWholeRegion(foundMinKey, foundMaxKey) != 0 {
- continue
- }
- if parent.Node.Head.Generation < foundRef.Data.Head.Generation {
- continue
- }
- if !trees.HasAny(parent.InTrees) {
- continue
- }
- parent.BodyInternal = append(parent.BodyInternal, btrfstree.KeyPointer{
- Key: foundMinKey,
- BlockPtr: foundLAddr,
- Generation: foundRef.Data.Head.Generation,
- })
- attached.InsertFrom(parent.InTrees)
- }
- }
- if _, wildcard := trees[0]; wildcard && len(attached) == 0 {
- for level := int(foundRef.Data.Head.Level) + 1; level < len(byLevel) && len(attached) == 0; level++ {
- for _, parent := range byLevel[level] {
- if parent.ContainsWholeRegion(foundMinKey, foundMaxKey) != 0 {
- continue
- }
- if parent.Node.Head.Generation < foundRef.Data.Head.Generation {
- continue
- }
- parent.BodyInternal = append(parent.BodyInternal, btrfstree.KeyPointer{
- Key: foundMinKey,
- BlockPtr: foundLAddr,
- Generation: foundRef.Data.Head.Generation,
- })
- attached.InsertFrom(parent.InTrees)
- }
- }
- }
-
- if len(attached) > 0 {
- numAttached++
- } else {
- dlog.Errorf(ctx, "could not find a broken node to attach node to reattach node@%v to",
- foundRef.Addr)
- }
- }
- progress(len(orphanedNodes))
- dlog.Info(ctx, "... ... done attaching")
-
- dlog.Infof(ctx, "... re-attached %d nodes (%v%% success rate)",
- numAttached, int(100*float64(numAttached)/float64(len(orphanedNodes))))
- return nil
-}
-*/
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/orphans.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/orphans.go
new file mode 100644
index 0000000..3d375f0
--- /dev/null
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/orphans.go
@@ -0,0 +1,102 @@
+// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+//
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+package rebuildnodes
+
+import (
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/containers"
+ "git.lukeshu.com/btrfs-progs-ng/lib/diskio"
+)
+
+func listRoots(graph nodeGraph, leaf btrfsvol.LogicalAddr) containers.Set[btrfsvol.LogicalAddr] {
+ kps := graph.EdgesTo[leaf]
+ if len(kps) == 0 {
+ return containers.NewSet(leaf)
+ }
+ ret := make(containers.Set[btrfsvol.LogicalAddr])
+ for _, kp := range kps {
+ ret.InsertFrom(listRoots(graph, kp.FromNode))
+ }
+ return ret
+}
+
+func walk(graph nodeGraph, root btrfsvol.LogicalAddr, fn func(btrfsvol.LogicalAddr) bool) {
+ if _, ok := graph.Nodes[root]; !ok {
+ return
+ }
+ if !fn(root) {
+ return
+ }
+ for _, kp := range graph.EdgesFrom[root] {
+ walk(graph, kp.ToNode, fn)
+ }
+}
+
+type keyAndTree struct {
+ btrfsprim.Key
+ TreeID btrfsprim.ObjID
+}
+
+func (a keyAndTree) Cmp(b keyAndTree) int {
+ if d := a.Key.Cmp(b.Key); d != 0 {
+ return d
+ }
+ return containers.NativeCmp(a.TreeID, b.TreeID)
+}
+
+func indexOrphans(fs diskio.File[btrfsvol.LogicalAddr], sb btrfstree.Superblock, graph nodeGraph) (
+ orphans containers.Set[btrfsvol.LogicalAddr],
+ leaf2orphans map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr],
+ key2leaf *containers.SortedMap[keyAndTree, btrfsvol.LogicalAddr],
+ err error,
+) {
+ orphans = make(containers.Set[btrfsvol.LogicalAddr])
+ for node := range graph.Nodes {
+ if len(graph.EdgesTo[node]) == 0 {
+ orphans.Insert(node)
+ }
+ }
+
+ leaf2orphans = make(map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr])
+ visited := make(containers.Set[btrfsvol.LogicalAddr])
+ for orphan := range orphans {
+ walk(graph, orphan, func(node btrfsvol.LogicalAddr) bool {
+ if visited.Has(node) {
+ return false
+ }
+ visited.Insert(node)
+ if graph.Nodes[node].Level == 0 {
+ if roots := listRoots(graph, node); !roots.Has(0) {
+ leaf2orphans[node] = roots
+ }
+ }
+ return true
+ })
+ }
+
+ key2leaf = new(containers.SortedMap[keyAndTree, btrfsvol.LogicalAddr])
+ for laddr := range leaf2orphans {
+ nodeRef, err := btrfstree.ReadNode[btrfsvol.LogicalAddr](fs, sb, laddr, btrfstree.NodeExpectations{
+ LAddr: containers.Optional[btrfsvol.LogicalAddr]{OK: true, Val: laddr},
+ Level: containers.Optional[uint8]{OK: true, Val: 0},
+ })
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ for _, item := range nodeRef.Data.BodyLeaf {
+ k := keyAndTree{
+ Key: item.Key,
+ TreeID: nodeRef.Data.Head.Owner,
+ }
+ if cur, ok := key2leaf.Load(k); !ok || graph.Nodes[cur].Generation < nodeRef.Data.Head.Generation {
+ key2leaf.Store(k, laddr)
+ }
+ }
+ }
+ return orphans, leaf2orphans, key2leaf, nil
+}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go
new file mode 100644
index 0000000..536b61d
--- /dev/null
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go
@@ -0,0 +1,107 @@
+// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+//
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+package rebuildnodes
+
+import (
+ "context"
+
+ "github.com/datawire/dlib/dlog"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsutil"
+ "git.lukeshu.com/btrfs-progs-ng/lib/containers"
+)
+
+type Rebuilder struct {
+ inner interface {
+ btrfstree.TreeOperator
+ Augment(treeID btrfsprim.ObjID, nodeAddr btrfsvol.LogicalAddr) ([]btrfsprim.Key, error)
+ }
+
+ orphans containers.Set[btrfsvol.LogicalAddr]
+ leaf2orphans map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr]
+ key2leaf containers.SortedMap[keyAndTree, btrfsvol.LogicalAddr]
+
+ augments map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr]
+}
+
+func RebuildNodes(ctx context.Context, fs *btrfs.FS, nodeScanResults btrfsinspect.ScanDevicesResult) (map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr], error) {
+ scanData, err := ScanDevices(ctx, fs, nodeScanResults) // ScanDevices does its own logging
+ if err != nil {
+ return nil, err
+ }
+
+ dlog.Info(ctx, "Reading superblock...")
+ sb, err := fs.Superblock()
+ if err != nil {
+ return nil, err
+ }
+
+ dlog.Info(ctx, "Indexing orphans...")
+ orphans, leaf2orphans, key2leaf, err := indexOrphans(fs, *sb, scanData.nodeGraph)
+ if err != nil {
+ return nil, err
+ }
+
+ dlog.Info(ctx, "Rebuilding node tree...")
+ o := &Rebuilder{
+ inner: btrfsutil.NewBrokenTrees(ctx, fs),
+
+ orphans: orphans,
+ leaf2orphans: leaf2orphans,
+ key2leaf: *key2leaf,
+
+ augments: make(map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr]),
+ }
+ if err := o.rebuild(ctx); err != nil {
+ return nil, err
+ }
+
+ return o.augments, nil
+}
+
+func (o *Rebuilder) rebuild(ctx context.Context) error {
+ // TODO
+ //btrfsutil.WalkAllTrees(ctx, o.inner)
+ handleItem(o, ctx, 0, btrfstree.Item{})
+ return nil
+}
+
+// err implements rebuildCallbacks.
+func (o *Rebuilder) err(ctx context.Context, e error) {
+ // TODO
+}
+
+// want implements rebuildCallbacks.
+func (o *Rebuilder) want(ctx context.Context, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType) {
+ // TODO
+}
+
+// wantOff implements rebuildCallbacks.
+func (o *Rebuilder) wantOff(ctx context.Context, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, off uint64) {
+ // TODO
+}
+
+// wantFunc implements rebuildCallbacks.
+func (o *Rebuilder) wantFunc(ctx context.Context, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, fn func(btrfsitem.Item) bool) {
+ // TODO
+}
+
+// func implements rebuildCallbacks.
+//
+// interval is [beg, end)
+func (o *Rebuilder) wantCSum(ctx context.Context, beg, end btrfsvol.LogicalAddr) {
+ // TODO
+}
+
+// wantFileExt implements rebuildCallbacks.
+func (o *Rebuilder) wantFileExt(ctx context.Context, treeID btrfsprim.ObjID, ino btrfsprim.ObjID, size int64) {
+ // TODO
+}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_graph.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_graph.go
new file mode 100644
index 0000000..8247651
--- /dev/null
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_graph.go
@@ -0,0 +1,322 @@
+// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+//
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+package rebuildnodes
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+
+ "github.com/datawire/dlib/dlog"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+)
+
+type rebuildCallbacks interface {
+ err(ctx context.Context, e error)
+ want(ctx context.Context, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType)
+ wantOff(ctx context.Context, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, off uint64)
+ wantFunc(ctx context.Context, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, fn func(btrfsitem.Item) bool)
+ wantCSum(ctx context.Context, beg, end btrfsvol.LogicalAddr) // interval is [beg, end)
+ wantFileExt(ctx context.Context, treeID btrfsprim.ObjID, ino btrfsprim.ObjID, size int64)
+}
+
+func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID, item btrfstree.Item) {
+ ctx = dlog.WithField(ctx, "tree", treeID)
+ ctx = dlog.WithField(ctx, "key", item.Key)
+
+ // Notionally, just express the relationships shown in
+ // https://btrfs.wiki.kernel.org/index.php/File:References.png (from the page
+ // https://btrfs.wiki.kernel.org/index.php/Data_Structures )
+ switch body := item.Body.(type) {
+ case btrfsitem.BlockGroup:
+ o.want(dlog.WithField(ctx, "wants", "Chunk"),
+ btrfsprim.CHUNK_TREE_OBJECTID,
+ body.ChunkObjectID,
+ btrfsitem.CHUNK_ITEM_KEY)
+ o.wantOff(dlog.WithField(ctx, "wants", "FreeSpaceInfo"),
+ btrfsprim.FREE_SPACE_TREE_OBJECTID,
+ item.Key.ObjectID,
+ btrfsitem.FREE_SPACE_INFO_KEY,
+ item.Key.Offset)
+ case btrfsitem.Chunk:
+ o.want(dlog.WithField(ctx, "wants", "owning Root"),
+ btrfsprim.ROOT_TREE_OBJECTID,
+ body.Head.Owner,
+ btrfsitem.ROOT_ITEM_KEY)
+ case btrfsitem.Dev:
+ // nothing
+ case btrfsitem.DevExtent:
+ o.wantOff(dlog.WithField(ctx, "wants", "Chunk"),
+ body.ChunkTree,
+ body.ChunkObjectID,
+ btrfsitem.CHUNK_ITEM_KEY,
+ uint64(body.ChunkOffset))
+ case btrfsitem.DevStats:
+ // nothing
+ case btrfsitem.DirEntry:
+ // containing-directory
+ o.wantOff(dlog.WithField(ctx, "wants", "containing dir inode"),
+ treeID,
+ item.Key.ObjectID,
+ btrfsitem.INODE_ITEM_KEY,
+ 0)
+ // siblings
+ switch item.Key.ItemType {
+ case btrfsitem.DIR_ITEM_KEY:
+ o.wantFunc(dlog.WithField(ctx, "wants", "corresponding DIR_INDEX"),
+ treeID,
+ item.Key.ObjectID,
+ btrfsitem.DIR_INDEX_KEY,
+ func(peerItem btrfsitem.Item) bool {
+ return reflect.DeepEqual(item, peerItem)
+ })
+ case btrfsitem.DIR_INDEX_KEY:
+ o.wantOff(dlog.WithField(ctx, "wants", "corresponding DIR_ITEM"),
+ treeID,
+ item.Key.ObjectID,
+ btrfsitem.DIR_ITEM_KEY,
+ btrfsitem.NameHash(body.Name))
+ case btrfsitem.XATTR_ITEM_KEY:
+ // nothing
+ }
+ // item-within-directory
+ if body.Location != (btrfsprim.Key{}) {
+ switch body.Location.ItemType {
+ case btrfsitem.INODE_ITEM_KEY:
+ o.wantOff(dlog.WithField(ctx, "wants", "item being pointed to"),
+ treeID,
+ body.Location.ObjectID,
+ body.Location.ItemType,
+ body.Location.Offset)
+ o.wantOff(dlog.WithField(ctx, "wants", "backref from item being pointed to"),
+ treeID,
+ body.Location.ObjectID,
+ btrfsitem.INODE_REF_KEY,
+ uint64(item.Key.ObjectID))
+ case btrfsitem.ROOT_ITEM_KEY:
+ o.want(dlog.WithField(ctx, "wants", "Root of subvolume being pointed to"),
+ btrfsprim.ROOT_TREE_OBJECTID,
+ body.Location.ObjectID,
+ body.Location.ItemType)
+ default:
+ o.err(ctx, fmt.Errorf("DirEntry: unexpected .Location.ItemType: %v", body.Location.ItemType))
+ }
+ }
+ case btrfsitem.Empty:
+ // nothing
+ case btrfsitem.Extent:
+ //if body.Head.Flags.Has(btrfsitem.EXTENT_FLAG_TREE_BLOCK) {
+ // // Supposedly this flag indicates that that
+ // // body.Info.Key identifies a node by the
+ // // first key in the node. But nothing in the
+ // // kernel ever reads this, so who knows if it
+ // // always gets updated correctly?
+ //}
+ for _, ref := range body.Refs {
+ switch refBody := ref.Body.(type) {
+ case nil:
+ // nothing
+ case btrfsitem.ExtentDataRef:
+ o.wantOff(dlog.WithField(ctx, "wants", "referencing Inode"),
+ refBody.Root,
+ refBody.ObjectID,
+ btrfsitem.INODE_ITEM_KEY,
+ 0)
+ o.wantOff(dlog.WithField(ctx, "wants", "referencing FileExtent"),
+ refBody.Root,
+ refBody.ObjectID,
+ btrfsitem.EXTENT_DATA_KEY,
+ uint64(refBody.Offset))
+ case btrfsitem.SharedDataRef:
+ // nothing
+ default:
+ panic("should not happen")
+ }
+ }
+ case btrfsitem.ExtentCSum:
+ // nothing
+ case btrfsitem.ExtentDataRef:
+ o.want(dlog.WithField(ctx, "wants", "Extent being referenced"),
+ btrfsprim.EXTENT_TREE_OBJECTID,
+ item.Key.ObjectID,
+ btrfsitem.EXTENT_ITEM_KEY)
+ o.wantOff(dlog.WithField(ctx, "wants", "referencing Inode"),
+ body.Root,
+ body.ObjectID,
+ btrfsitem.INODE_ITEM_KEY,
+ 0)
+ o.wantOff(dlog.WithField(ctx, "wants", "referencing FileExtent"),
+ body.Root,
+ body.ObjectID,
+ btrfsitem.EXTENT_DATA_KEY,
+ uint64(body.Offset))
+ case btrfsitem.FileExtent:
+ o.wantOff(dlog.WithField(ctx, "wants", "containing Inode"),
+ treeID,
+ item.Key.ObjectID,
+ btrfsitem.INODE_ITEM_KEY,
+ 0)
+ switch body.Type {
+ case btrfsitem.FILE_EXTENT_INLINE:
+ // nothing
+ case btrfsitem.FILE_EXTENT_REG, btrfsitem.FILE_EXTENT_PREALLOC:
+ // TODO: Check if inodeBody.Flags.Has(btrfsitem.INODE_NODATASUM)
+ o.wantCSum(dlog.WithField(ctx, "wants", "data sum"),
+ roundDown(body.BodyExtent.DiskByteNr, btrfssum.BlockSize),
+ roundUp(body.BodyExtent.DiskByteNr.Add(body.BodyExtent.DiskNumBytes), btrfssum.BlockSize))
+ default:
+ o.err(ctx, fmt.Errorf("FileExtent: unexpected body.Type: %v", body.Type))
+ }
+ case btrfsitem.FreeSpaceBitmap:
+ o.wantOff(dlog.WithField(ctx, "wants", "FreeSpaceInfo"),
+ treeID,
+ item.Key.ObjectID,
+ btrfsitem.FREE_SPACE_INFO_KEY,
+ item.Key.Offset)
+ case btrfsitem.FreeSpaceHeader:
+ o.wantOff(dlog.WithField(ctx, "wants", ".Location"),
+ treeID,
+ body.Location.ObjectID,
+ body.Location.ItemType,
+ body.Location.Offset)
+ case btrfsitem.FreeSpaceInfo:
+ if body.Flags.Has(btrfsitem.FREE_SPACE_USING_BITMAPS) {
+ o.wantOff(dlog.WithField(ctx, "wants", "FreeSpaceBitmap"),
+ treeID,
+ item.Key.ObjectID,
+ btrfsitem.FREE_SPACE_BITMAP_KEY,
+ item.Key.Offset)
+ }
+ case btrfsitem.Inode:
+ o.want(dlog.WithField(ctx, "wants", "backrefs"),
+ treeID, // TODO: validate the number of these against body.NLink
+ item.Key.ObjectID,
+ btrfsitem.INODE_REF_KEY)
+ o.wantFileExt(dlog.WithField(ctx, "wants", "FileExtents"),
+ treeID, item.Key.ObjectID, body.Size)
+ if body.BlockGroup != 0 {
+ o.want(dlog.WithField(ctx, "wants", "BlockGroup"),
+ btrfsprim.EXTENT_TREE_OBJECTID,
+ body.BlockGroup,
+ btrfsitem.BLOCK_GROUP_ITEM_KEY)
+ }
+ case btrfsitem.InodeRefs:
+ o.wantOff(dlog.WithField(ctx, "wants", "child Inode"),
+ treeID,
+ item.Key.ObjectID,
+ btrfsitem.INODE_ITEM_KEY,
+ 0)
+ o.wantOff(dlog.WithField(ctx, "wants", "parent Inode"),
+ treeID,
+ btrfsprim.ObjID(item.Key.Offset),
+ btrfsitem.INODE_ITEM_KEY,
+ 0)
+ for _, ref := range body {
+ o.wantOff(dlog.WithField(ctx, "wants", "DIR_ITEM"),
+ treeID,
+ btrfsprim.ObjID(item.Key.Offset),
+ btrfsitem.DIR_ITEM_KEY,
+ btrfsitem.NameHash(ref.Name))
+ o.wantOff(dlog.WithField(ctx, "wants", "DIR_INDEX"),
+ treeID,
+ btrfsprim.ObjID(item.Key.Offset),
+ btrfsitem.DIR_INDEX_KEY,
+ uint64(ref.Index))
+ }
+ case btrfsitem.Metadata:
+ for _, ref := range body.Refs {
+ switch refBody := ref.Body.(type) {
+ case nil:
+ // nothing
+ case btrfsitem.ExtentDataRef:
+ o.wantOff(dlog.WithField(ctx, "wants", "referencing INode"),
+ refBody.Root,
+ refBody.ObjectID,
+ btrfsitem.INODE_ITEM_KEY,
+ 0)
+ o.wantOff(dlog.WithField(ctx, "wants", "referencing FileExtent"),
+ refBody.Root,
+ refBody.ObjectID,
+ btrfsitem.EXTENT_DATA_KEY,
+ uint64(refBody.Offset))
+ case btrfsitem.SharedDataRef:
+ // nothing
+ default:
+ panic("should not happen")
+ }
+ }
+ case btrfsitem.Root:
+ if body.RootDirID != 0 {
+ o.wantOff(dlog.WithField(ctx, "wants", "root directory"),
+ item.Key.ObjectID,
+ body.RootDirID,
+ btrfsitem.INODE_ITEM_KEY,
+ 0)
+ }
+ case btrfsitem.RootRef:
+ var otherType btrfsprim.ItemType
+ var parent, child btrfsprim.ObjID
+ switch item.Key.ItemType {
+ case btrfsitem.ROOT_REF_KEY:
+ otherType = btrfsitem.ROOT_BACKREF_KEY
+ parent = item.Key.ObjectID
+ child = btrfsprim.ObjID(item.Key.Offset)
+ case btrfsitem.ROOT_BACKREF_KEY:
+ otherType = btrfsitem.ROOT_REF_KEY
+ parent = btrfsprim.ObjID(item.Key.Offset)
+ child = item.Key.ObjectID
+ default:
+ panic("should not happen")
+ }
+ // sibling
+ o.wantOff(dlog.WithField(ctx, "wants", fmt.Sprintf("corresponding %v", otherType)),
+ treeID,
+ btrfsprim.ObjID(item.Key.Offset),
+ otherType,
+ uint64(item.Key.ObjectID))
+ // parent
+ o.want(dlog.WithField(ctx, "wants", "parent subvolume: Root"),
+ treeID,
+ parent,
+ btrfsitem.ROOT_ITEM_KEY)
+ o.wantOff(dlog.WithField(ctx, "wants", "parent subvolume: Inode of parent dir"),
+ parent,
+ body.DirID,
+ btrfsitem.INODE_ITEM_KEY,
+ 0)
+ o.wantOff(dlog.WithField(ctx, "wants", "parent subvolume: DIR_ITEM in parent dir"),
+ parent,
+ body.DirID,
+ btrfsitem.DIR_ITEM_KEY,
+ btrfsitem.NameHash(body.Name))
+ o.wantOff(dlog.WithField(ctx, "wants", "parent subvolume: DIR_INDEX in parent dir"),
+ parent,
+ body.DirID,
+ btrfsitem.DIR_INDEX_KEY,
+ uint64(body.Sequence))
+ // child
+ o.want(dlog.WithField(ctx, "wants", "child subvolume: Root"),
+ treeID,
+ child,
+ btrfsitem.ROOT_ITEM_KEY)
+ case btrfsitem.SharedDataRef:
+ o.want(dlog.WithField(ctx, "wants", "Extent"),
+ btrfsprim.EXTENT_TREE_OBJECTID,
+ item.Key.ObjectID,
+ btrfsitem.EXTENT_ITEM_KEY)
+ case btrfsitem.UUIDMap:
+ o.want(dlog.WithField(ctx, "wants", "subvolume Root"),
+ btrfsprim.ROOT_TREE_OBJECTID,
+ body.ObjID,
+ btrfsitem.ROOT_ITEM_KEY)
+ default:
+ panic(fmt.Errorf("unsupported item type: %T", body))
+ }
+}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/treeancestors.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/treeancestors.go
deleted file mode 100644
index 42228ae..0000000
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/treeancestors.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
-//
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-package rebuildnodes
-
-/*
-import (
- "context"
-
- //"github.com/datawire/dlib/dlog"
-
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
- "git.lukeshu.com/btrfs-progs-ng/lib/containers"
-)
-
-func getTreeAncestors(ctx context.Context, scanData scanResult) map[btrfsprim.ObjID]containers.Set[btrfsprim.ObjID] {
- treeAncestors := make(map[btrfsprim.ObjID]containers.Set[btrfsprim.ObjID])
-
- for laddr, node := range scanData.Nodes {
- if _, ok := treeAncestors[node.Owner]; !ok {
- treeAncestors[node.Owner] = make(containers.Set[btrfsprim.ObjID])
- }
- for _, edge := range scanData.EdgesTo[laddr] {
- if edge.FromTree != node.Owner {
- if err := checkNodeExpectations(*edge, node); err != nil {
- //dlog.Errorf(ctx, "... ignoring keypointer %v because %v", edge.String(), err)
- } else {
- treeAncestors[node.Owner].Insert(edge.FromTree)
- }
- }
- }
- }
-
- return treeAncestors
-}
-
-func (m uuidMap) considerAncestors(ctx context.Context, treeAncestors map[btrfsprim.ObjID]containers.Set[btrfsprim.ObjID]) {
- if missing := m.missingRootItems(); len(missing) == 0 {
- return
- } else {
- dlog.Infof(ctx, "Rebuilding %d root items...", len(missing))
- }
-
- fa := newFullAncestorLister(m, treeAncestors)
-
- for _, missingRoot := range maps.SortedKeys(m.missingRootItems()) {
- if _, ok := m.TreeParent[missingRoot]; ok {
- // This one is incomplete because it doesn't have a UUID, not
- // because it doesn't have a parent.
- continue
- }
- potentialParents := make(containers.Set[btrfsprim.ObjID])
- potentialParents.InsertFrom(fa.GetFullAncestors(missingRoot))
- for _, ancestor := range maps.SortedKeys(fa.GetFullAncestors(missingRoot)) {
- potentialParents.DeleteFrom(fa.GetFullAncestors(ancestor))
- }
- if len(potentialParents) == 1 {
- parent := potentialParents.TakeOne()
- dlog.Infof(ctx, "... the parent of %v is %v", missingRoot, parent)
- parentUUID, ok := m.ObjID2UUID[parent]
- if !ok {
- dlog.Errorf(ctx, "... but can't synthesize a root item because UUID of %v is unknown", parent)
- continue
- }
- m.TreeParent[missingRoot] = parentUUID
- }
- }
-
- if missing := m.missingRootItems(); len(missing) > 0 {
- dlog.Errorf(ctx, "... could not rebuild root items for %d trees: %v", len(missing), maps.SortedKeys(missing))
- } else {
- dlog.Info(ctx, "... rebuilt all root items")
- }
-}
-*/
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/util.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/util.go
index f1033e6..bdaa0c4 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/util.go
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/util.go
@@ -7,6 +7,8 @@ package rebuildnodes
import (
"fmt"
+ "golang.org/x/exp/constraints"
+
"git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect"
)
@@ -18,26 +20,6 @@ func maybeSet[K, V comparable](name string, m map[K]V, k K, v V) error {
return nil
}
-/*
-var maxKey = btrfsprim.Key{
- ObjectID: math.MaxUint64,
- ItemType: math.MaxUint8,
- Offset: math.MaxUint64,
-}
-
-func keyMm(key btrfsprim.Key) btrfsprim.Key {
- switch {
- case key.Offset > 0:
- key.Offset--
- case key.ItemType > 0:
- key.ItemType--
- case key.ObjectID > 0:
- key.ObjectID--
- }
- return key
-}
-*/
-
func countNodes(nodeScanResults btrfsinspect.ScanDevicesResult) int {
var cnt int
for _, devResults := range nodeScanResults {
@@ -45,3 +27,11 @@ func countNodes(nodeScanResults btrfsinspect.ScanDevicesResult) int {
}
return cnt
}
+
+func roundDown[T constraints.Integer](n, d T) T {
+ return (n / d) * d
+}
+
+func roundUp[T constraints.Integer](n, d T) T {
+ return ((n + d - 1) / d) * d
+}
diff --git a/lib/btrfsprogs/btrfsutil/broken_btree.go b/lib/btrfsprogs/btrfsutil/broken_btree.go
index 0660a46..8833937 100644
--- a/lib/btrfsprogs/btrfsutil/broken_btree.go
+++ b/lib/btrfsprogs/btrfsutil/broken_btree.go
@@ -94,6 +94,7 @@ func NewBrokenTrees(ctx context.Context, inner *btrfs.FS) interface {
btrfstree.TreeOperator
Superblock() (*btrfstree.Superblock, error)
ReadAt(p []byte, off btrfsvol.LogicalAddr) (int, error)
+ Augment(treeID btrfsprim.ObjID, nodeAddr btrfsvol.LogicalAddr) ([]btrfsprim.Key, error)
} {
return &brokenTrees{
ctx: ctx,
@@ -315,3 +316,8 @@ func (bt *brokenTrees) Superblock() (*btrfstree.Superblock, error) {
func (bt *brokenTrees) ReadAt(p []byte, off btrfsvol.LogicalAddr) (int, error) {
return bt.inner.ReadAt(p, off)
}
+
+func (bt *brokenTrees) Augment(treeID btrfsprim.ObjID, nodeAddr btrfsvol.LogicalAddr) ([]btrfsprim.Key, error) {
+ // TODO
+ return nil, nil
+}