diff options
author | Luke Shumaker <lukeshu@lukeshu.com> | 2023-02-28 14:05:27 -0700 |
---|---|---|
committer | Luke Shumaker <lukeshu@lukeshu.com> | 2023-03-14 19:45:07 -0600 |
commit | 8c8c6c27552f8554ba014c34d684cb90538ef65b (patch) | |
tree | f3a53ed194e29b516b52770e4949a1e508fad6a7 /lib/btrfsprogs/btrfsinspect/rebuildnodes | |
parent | 34bf167ef33c57b4d6767273f1d265971a4693b9 (diff) |
Move files around [ci-skip]
Diffstat (limited to 'lib/btrfsprogs/btrfsinspect/rebuildnodes')
13 files changed, 0 insertions, 2831 deletions
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/forrest.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/forrest.go deleted file mode 100644 index dbbc6eb..0000000 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/forrest.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com> -// -// SPDX-License-Identifier: GPL-2.0-or-later - -package btrees - -import ( - "context" - - "github.com/datawire/dlib/dlog" - - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" - pkggraph "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio" - "git.lukeshu.com/btrfs-progs-ng/lib/containers" - "git.lukeshu.com/btrfs-progs-ng/lib/slices" - "git.lukeshu.com/btrfs-progs-ng/lib/textui" -) - -type Callbacks interface { - AddedItem(ctx context.Context, tree btrfsprim.ObjID, key btrfsprim.Key) - AddedRoot(ctx context.Context, tree btrfsprim.ObjID, root btrfsvol.LogicalAddr) - LookupRoot(ctx context.Context, tree btrfsprim.ObjID) (offset btrfsprim.Generation, item btrfsitem.Root, ok bool) - LookupUUID(ctx context.Context, uuid btrfsprim.UUID) (id btrfsprim.ObjID, ok bool) -} - -// RebuiltForrest is an abstraction for rebuilding and accessing -// potentially broken btrees. -// -// It is conceptually a btrfstree.TreeOperator, and adds similar -// broken-tree handling to btrfsutil.BrokenForrest. However, the API -// is different thant btrfstree.TreeOperator, and is much more -// efficient than btrfsutil.BrokenForrest. -// -// The efficiency improvements are possible because of the API -// differences, which are necessary for how it is used in -// rebuildnodes: -// -// - it consumes an already-read graph.Graph instead of reading the -// graph itself -// -// - it does not use `btrfstree.TreePath` -// -// - it does not keep track of errors encountered in a tree -// -// Additionally, it provides some functionality that -// btrfsutil.BrokenForrest does not: -// -// - it provides a .LeafToRoots() method to advise on what -// additional roots should be added -// -// - it provides a .COWDistance() method to compare how related two -// trees are -// -// A zero RebuiltForrest is invalid; it must be initialized with -// NewRebuiltForrest(). -type RebuiltForrest struct { - // static - sb btrfstree.Superblock - graph pkggraph.Graph - keyIO *keyio.Handle - cb Callbacks - - // mutable - treesMu nestedMutex - trees map[btrfsprim.ObjID]*RebuiltTree // must hold .treesMu to access - leafs containers.ARCache[btrfsprim.ObjID, map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr]] - incItems containers.ARCache[btrfsprim.ObjID, *itemIndex] - excItems containers.ARCache[btrfsprim.ObjID, *itemIndex] -} - -// NewRebuiltForrest returns a new RebuiltForrest instance. All of -// the callbacks must be non-nil. -func NewRebuiltForrest(sb btrfstree.Superblock, graph pkggraph.Graph, keyIO *keyio.Handle, cb Callbacks) *RebuiltForrest { - return &RebuiltForrest{ - sb: sb, - graph: graph, - keyIO: keyIO, - cb: cb, - - trees: make(map[btrfsprim.ObjID]*RebuiltTree), - leafs: containers.ARCache[btrfsprim.ObjID, map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr]]{ - MaxLen: textui.Tunable(8), - }, - incItems: containers.ARCache[btrfsprim.ObjID, *itemIndex]{ - MaxLen: textui.Tunable(8), - }, - excItems: containers.ARCache[btrfsprim.ObjID, *itemIndex]{ - MaxLen: textui.Tunable(8), - }, - } -} - -// Tree returns a given tree, initializing it if nescessary. If it is -// unable to initialize the tree, then nil is returned, and nothing is -// done to the forrest. -// -// The tree is initialized with the normal root node of the tree. -func (ts *RebuiltForrest) Tree(ctx context.Context, treeID btrfsprim.ObjID) *RebuiltTree { - ctx = ts.treesMu.Lock(ctx) - defer ts.treesMu.Unlock() - if !ts.addTree(ctx, treeID, nil) { - return nil - } - return ts.trees[treeID] -} - -func (ts *RebuiltForrest) addTree(ctx context.Context, treeID btrfsprim.ObjID, stack []btrfsprim.ObjID) (ok bool) { - if tree, ok := ts.trees[treeID]; ok { - return tree != nil - } - defer func() { - if !ok { - // Store a negative cache of this. tree.AddRoot() for the ROOT or UUID - // trees will call .flushNegativeCache(). - ts.trees[treeID] = nil - } - }() - stack = append(stack, treeID) - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.add-tree", stack) - dlog.Info(ctx, "adding tree...") - if slices.Contains(treeID, stack[:len(stack)-1]) { - dlog.Errorf(ctx, "failed to add tree: loop detected: %v", stack) - return false - } - - tree := &RebuiltTree{ - ID: treeID, - Roots: make(containers.Set[btrfsvol.LogicalAddr]), - forrest: ts, - } - var root btrfsvol.LogicalAddr - switch treeID { - case btrfsprim.ROOT_TREE_OBJECTID: - root = ts.sb.RootTree - case btrfsprim.CHUNK_TREE_OBJECTID: - root = ts.sb.ChunkTree - case btrfsprim.TREE_LOG_OBJECTID: - root = ts.sb.LogTree - case btrfsprim.BLOCK_GROUP_TREE_OBJECTID: - root = ts.sb.BlockGroupRoot - default: - if !ts.addTree(ctx, btrfsprim.ROOT_TREE_OBJECTID, stack) { - dlog.Error(ctx, "failed to add tree: add ROOT_TREE") - return false - } - rootOff, rootItem, ok := ts.cb.LookupRoot(ctx, treeID) - if !ok { - dlog.Error(ctx, "failed to add tree: lookup ROOT_ITEM") - return false - } - root = rootItem.ByteNr - tree.UUID = rootItem.UUID - if rootItem.ParentUUID != (btrfsprim.UUID{}) { - tree.ParentGen = rootOff - if !ts.addTree(ctx, btrfsprim.UUID_TREE_OBJECTID, stack) { - return false - } - parentID, ok := ts.cb.LookupUUID(ctx, rootItem.ParentUUID) - if !ok { - dlog.Error(ctx, "failed to add tree: lookup UUID") - return false - } - if !ts.addTree(ctx, parentID, stack) { - dlog.Error(ctx, "failed to add tree: add parent tree") - return false - } - tree.Parent = ts.trees[parentID] - } - } - - ts.trees[treeID] = tree - if root != 0 { - tree.AddRoot(ctx, root) - } - - return true -} - -func (ts *RebuiltForrest) flushNegativeCache(ctx context.Context) { - _ = ts.treesMu.Lock(ctx) - defer ts.treesMu.Unlock() - for treeID, tree := range ts.trees { - if tree == nil { - delete(ts.trees, treeID) - } - } -} - -// ListRoots returns a listing of all initialized trees and their root -// nodes. -// -// Do not mutate the set of roots for a tree; it is a pointer to the -// RebuiltForrest's internal set! -func (ts *RebuiltForrest) ListRoots(ctx context.Context) map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr] { - _ = ts.treesMu.Lock(ctx) - defer ts.treesMu.Unlock() - ret := make(map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr]) - for treeID, tree := range ts.trees { - if tree != nil { - ret[treeID] = tree.Roots - } - } - return ret -} diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/nestedlock.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/nestedlock.go deleted file mode 100644 index c1ffa18..0000000 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/nestedlock.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (C) 2023 Luke Shumaker <lukeshu@lukeshu.com> -// -// SPDX-License-Identifier: GPL-2.0-or-later - -package btrees - -import ( - "context" - "sync" -) - -// A nestedMutex is like a sync.Mutex, but while it is locked by call -// 'A', may be simultaneously locked by subsequent calls if the -// subsequent calls use a Context descended from the one returned by -// the 'A' call to .Lock(). -type nestedMutex struct { - inner sync.Mutex - depth int -} - -type nestedMutexCtxKey struct{} - -// Lock locks the mutex. It is invalid to use a Context returned from -// Lock in a different goroutine than the one it was created in. It -// is invalid to use a Context returned from Lock after the mutex has -// subsequently become unlocked. -func (m *nestedMutex) Lock(ctx context.Context) context.Context { - if other, ok := ctx.Value(nestedMutexCtxKey{}).(*nestedMutex); ok && other == m { - m.depth++ - return ctx - } - m.inner.Lock() - return context.WithValue(ctx, nestedMutexCtxKey{}, m) -} - -// Unlock unlocks the mutex. It is invalid to call Unlock if the -// mutex is not already locked. It is invalid to call Unlock from -// multiple goroutines simultaneously. -func (m *nestedMutex) Unlock() { - if m.depth > 0 { - m.depth-- - } else { - m.inner.Unlock() - } -} diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go deleted file mode 100644 index 39d8871..0000000 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go +++ /dev/null @@ -1,357 +0,0 @@ -// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com> -// -// SPDX-License-Identifier: GPL-2.0-or-later - -package btrees - -import ( - "context" - "fmt" - "sync" - "time" - - "github.com/datawire/dlib/dlog" - - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio" - "git.lukeshu.com/btrfs-progs-ng/lib/containers" - "git.lukeshu.com/btrfs-progs-ng/lib/maps" - "git.lukeshu.com/btrfs-progs-ng/lib/slices" - "git.lukeshu.com/btrfs-progs-ng/lib/textui" -) - -type RebuiltTree struct { - // static - ID btrfsprim.ObjID - UUID btrfsprim.UUID - Parent *RebuiltTree - ParentGen btrfsprim.Generation // offset of this tree's root item - forrest *RebuiltForrest - - // mutable - mu sync.RWMutex - Roots containers.Set[btrfsvol.LogicalAddr] - // There are 3 more mutable "members" that are protected by - // `mu`; but they live in a shared LRUcache. They are all - // derived from tree.Roots, which is why it's OK if they get - // evicted. - // - // 1. tree.leafToRoots() = tree.forrest.leafs.Load(tree.ID) - // 2. tree.Items() = tree.forrest.incItems.Load(tree.ID) - // 3. tree.PotentialItems() = tree.forrest.excItems.Load(tree.ID) -} - -// LRU member 1: .leafToRoots() //////////////////////////////////////////////////////////////////////////////////////// - -// leafToRoots returns all leafs (lvl=0) in the filesystem that pass -// .isOwnerOK, whether or not they're in the tree. -func (tree *RebuiltTree) leafToRoots(ctx context.Context) map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr] { - return containers.LoadOrElse[btrfsprim.ObjID, map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr]](&tree.forrest.leafs, tree.ID, func(btrfsprim.ObjID) map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr] { - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.index-nodes", fmt.Sprintf("tree=%v", tree.ID)) - - nodeToRoots := make(map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr]) - - var stats textui.Portion[int] - stats.D = len(tree.forrest.graph.Nodes) - progressWriter := textui.NewProgress[textui.Portion[int]](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second)) - progress := func() { - stats.N = len(nodeToRoots) - progressWriter.Set(stats) - } - - progress() - for _, node := range maps.SortedKeys(tree.forrest.graph.Nodes) { - tree.indexNode(ctx, node, nodeToRoots, progress, nil) - } - progressWriter.Done() - - ret := make(map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr]) - for node, roots := range nodeToRoots { - if tree.forrest.graph.Nodes[node].Level == 0 && len(roots) > 0 { - ret[node] = roots - } - } - return ret - }) -} - -func (tree *RebuiltTree) indexNode(ctx context.Context, node btrfsvol.LogicalAddr, index map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr], progress func(), stack []btrfsvol.LogicalAddr) { - defer progress() - if err := ctx.Err(); err != nil { - return - } - if _, done := index[node]; done { - return - } - if slices.Contains(node, stack) { - // This is a panic because tree.forrest.graph.FinalCheck() should - // have already checked for loops. - panic("loop") - } - if !tree.isOwnerOK(tree.forrest.graph.Nodes[node].Owner, tree.forrest.graph.Nodes[node].Generation) { - index[node] = nil - return - } - - // tree.leafToRoots - stack = append(stack, node) - var roots containers.Set[btrfsvol.LogicalAddr] - for _, kp := range tree.forrest.graph.EdgesTo[node] { - if !tree.isOwnerOK(tree.forrest.graph.Nodes[kp.FromNode].Owner, tree.forrest.graph.Nodes[kp.FromNode].Generation) { - continue - } - tree.indexNode(ctx, kp.FromNode, index, progress, stack) - if len(index[kp.FromNode]) > 0 { - if roots == nil { - roots = make(containers.Set[btrfsvol.LogicalAddr]) - } - roots.InsertFrom(index[kp.FromNode]) - } - } - if roots == nil { - roots = containers.NewSet[btrfsvol.LogicalAddr](node) - } - index[node] = roots -} - -// isOwnerOK returns whether it is permissible for a node with -// .Head.Owner=owner to be in this tree. -func (tree *RebuiltTree) isOwnerOK(owner btrfsprim.ObjID, gen btrfsprim.Generation) bool { - for { - if owner == tree.ID { - return true - } - if tree.Parent == nil || gen >= tree.ParentGen { - return false - } - tree = tree.Parent - } -} - -// LRU members 2 and 3: .Items() and .PotentialItems() ///////////////////////////////////////////////////////////////// - -// Items returns a map of the items contained in this tree. -// -// Do not mutate the returned map; it is a pointer to the -// RebuiltTree's internal map! -func (tree *RebuiltTree) Items(ctx context.Context) *containers.SortedMap[btrfsprim.Key, keyio.ItemPtr] { - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.index-inc-items", fmt.Sprintf("tree=%v", tree.ID)) - return tree.items(ctx, &tree.forrest.incItems, tree.Roots.HasAny) -} - -// PotentialItems returns a map of items that could be added to this -// tree with .AddRoot(). -// -// Do not mutate the returned map; it is a pointer to the -// RebuiltTree's internal map! -func (tree *RebuiltTree) PotentialItems(ctx context.Context) *containers.SortedMap[btrfsprim.Key, keyio.ItemPtr] { - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.index-exc-items", fmt.Sprintf("tree=%v", tree.ID)) - return tree.items(ctx, &tree.forrest.excItems, - func(roots containers.Set[btrfsvol.LogicalAddr]) bool { - return !tree.Roots.HasAny(roots) - }) -} - -type itemIndex = containers.SortedMap[btrfsprim.Key, keyio.ItemPtr] - -type itemStats struct { - Leafs textui.Portion[int] - NumItems int - NumDups int -} - -func (s itemStats) String() string { - return textui.Sprintf("%v (%v items, %v dups)", - s.Leafs, s.NumItems, s.NumDups) -} - -func (tree *RebuiltTree) items(ctx context.Context, cache containers.Map[btrfsprim.ObjID, *itemIndex], - leafFn func(roots containers.Set[btrfsvol.LogicalAddr]) bool, -) *containers.SortedMap[btrfsprim.Key, keyio.ItemPtr] { - tree.mu.RLock() - defer tree.mu.RUnlock() - - return containers.LoadOrElse(cache, tree.ID, func(btrfsprim.ObjID) *itemIndex { - var leafs []btrfsvol.LogicalAddr - for leaf, roots := range tree.leafToRoots(ctx) { - if leafFn(roots) { - leafs = append(leafs, leaf) - } - } - slices.Sort(leafs) - - var stats itemStats - stats.Leafs.D = len(leafs) - progressWriter := textui.NewProgress[itemStats](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second)) - - index := new(containers.SortedMap[btrfsprim.Key, keyio.ItemPtr]) - for i, leaf := range leafs { - stats.Leafs.N = i - progressWriter.Set(stats) - for j, itemKey := range tree.forrest.graph.Nodes[leaf].Items { - newPtr := keyio.ItemPtr{ - Node: leaf, - Idx: j, - } - if oldPtr, exists := index.Load(itemKey); !exists { - index.Store(itemKey, newPtr) - stats.NumItems++ - } else { - if tree.ShouldReplace(oldPtr.Node, newPtr.Node) { - index.Store(itemKey, newPtr) - } - stats.NumDups++ - } - progressWriter.Set(stats) - } - } - if stats.Leafs.N > 0 { - stats.Leafs.N = len(leafs) - progressWriter.Set(stats) - progressWriter.Done() - } - - return index - }) -} - -func (tree *RebuiltTree) ShouldReplace(oldNode, newNode btrfsvol.LogicalAddr) bool { - oldDist, _ := tree.COWDistance(tree.forrest.graph.Nodes[oldNode].Owner) - newDist, _ := tree.COWDistance(tree.forrest.graph.Nodes[newNode].Owner) - switch { - case newDist < oldDist: - // Replace the old one with the new lower-dist one. - return true - case newDist > oldDist: - // Retain the old lower-dist one. - return false - default: - oldGen := tree.forrest.graph.Nodes[oldNode].Generation - newGen := tree.forrest.graph.Nodes[newNode].Generation - switch { - case newGen > oldGen: - // Replace the old one with the new higher-gen one. - return true - case newGen < oldGen: - // Retain the old higher-gen one. - return false - default: - // TODO: This is a panic because I'm not really sure what the - // best way to handle this is, and so if this happens I want the - // program to crash and force me to figure out how to handle it. - panic(fmt.Errorf("dup nodes in tree=%v: old=%v=%v ; new=%v=%v", - tree.ID, - oldNode, tree.forrest.graph.Nodes[oldNode], - newNode, tree.forrest.graph.Nodes[newNode])) - } - } -} - -// .AddRoot() ////////////////////////////////////////////////////////////////////////////////////////////////////////// - -type rootStats struct { - Leafs textui.Portion[int] - AddedLeafs int - AddedItems int -} - -func (s rootStats) String() string { - return textui.Sprintf("%v (added %v leafs, added %v items)", - s.Leafs, s.AddedLeafs, s.AddedItems) -} - -// AddRoot adds an additional root node to the tree. It is useful to -// call .AddRoot() to re-attach part of the tree that has been broken -// off. -func (tree *RebuiltTree) AddRoot(ctx context.Context, rootNode btrfsvol.LogicalAddr) { - tree.mu.Lock() - defer tree.mu.Unlock() - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.add-root", fmt.Sprintf("tree=%v rootNode=%v", tree.ID, rootNode)) - dlog.Info(ctx, "adding root...") - - leafToRoots := tree.leafToRoots(ctx) - - var stats rootStats - stats.Leafs.D = len(leafToRoots) - progressWriter := textui.NewProgress[rootStats](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second)) - for i, leaf := range maps.SortedKeys(leafToRoots) { - stats.Leafs.N = i - progressWriter.Set(stats) - - if tree.Roots.HasAny(leafToRoots[leaf]) || !leafToRoots[leaf].Has(rootNode) { - continue - } - - stats.AddedLeafs++ - progressWriter.Set(stats) - - for _, itemKey := range tree.forrest.graph.Nodes[leaf].Items { - tree.forrest.cb.AddedItem(ctx, tree.ID, itemKey) - stats.AddedItems++ - progressWriter.Set(stats) - } - } - stats.Leafs.N = len(leafToRoots) - progressWriter.Set(stats) - progressWriter.Done() - - tree.Roots.Insert(rootNode) - tree.forrest.incItems.Delete(tree.ID) // force re-gen - tree.forrest.excItems.Delete(tree.ID) // force re-gen - - if (tree.ID == btrfsprim.ROOT_TREE_OBJECTID || tree.ID == btrfsprim.UUID_TREE_OBJECTID) && stats.AddedItems > 0 { - tree.forrest.flushNegativeCache(ctx) - } - tree.forrest.cb.AddedRoot(ctx, tree.ID, rootNode) -} - -// main public API ///////////////////////////////////////////////////////////////////////////////////////////////////// - -// COWDistance returns how many COW-snapshots down the 'tree' is from -// the 'parent'. -func (tree *RebuiltTree) COWDistance(parentID btrfsprim.ObjID) (dist int, ok bool) { - for { - if parentID == tree.ID { - return dist, true - } - if tree.Parent == nil { - return 0, false - } - tree = tree.Parent - dist++ - } -} - -// ReadItem reads an item from a tree. -func (tree *RebuiltTree) ReadItem(ctx context.Context, key btrfsprim.Key) btrfsitem.Item { - ptr, ok := tree.Items(ctx).Load(key) - if !ok { - panic(fmt.Errorf("should not happen: btrees.RebuiltTree.ReadItem called for not-included key: %v", key)) - } - return tree.forrest.keyIO.ReadItem(ctx, ptr) -} - -// LeafToRoots returns the list of potential roots (to pass to -// .AddRoot) that include a given leaf-node. -func (tree *RebuiltTree) LeafToRoots(ctx context.Context, leaf btrfsvol.LogicalAddr) containers.Set[btrfsvol.LogicalAddr] { - if tree.forrest.graph.Nodes[leaf].Level != 0 { - panic(fmt.Errorf("should not happen: (tree=%v).LeafToRoots(leaf=%v): not a leaf", - tree.ID, leaf)) - } - tree.mu.RLock() - defer tree.mu.RUnlock() - ret := make(containers.Set[btrfsvol.LogicalAddr]) - for root := range tree.leafToRoots(ctx)[leaf] { - if tree.Roots.Has(root) { - panic(fmt.Errorf("should not happen: (tree=%v).LeafToRoots(leaf=%v): tree contains root=%v but not leaf", - tree.ID, leaf, root)) - } - ret.Insert(root) - } - if len(ret) == 0 { - return nil - } - return ret -} diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/graph.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/graph.go deleted file mode 100644 index 2a97ec8..0000000 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/graph.go +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com> -// -// SPDX-License-Identifier: GPL-2.0-or-later - -package graph - -import ( - "context" - "fmt" - "reflect" - "time" - - "github.com/datawire/dlib/dlog" - - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" - "git.lukeshu.com/btrfs-progs-ng/lib/containers" - "git.lukeshu.com/btrfs-progs-ng/lib/diskio" - "git.lukeshu.com/btrfs-progs-ng/lib/maps" - "git.lukeshu.com/btrfs-progs-ng/lib/slices" - "git.lukeshu.com/btrfs-progs-ng/lib/textui" -) - -type Node struct { - Level uint8 - Generation btrfsprim.Generation - Owner btrfsprim.ObjID - NumItems uint32 - MinItem btrfsprim.Key - MaxItem btrfsprim.Key - Items []btrfsprim.Key -} - -func (n Node) String() string { - if reflect.ValueOf(n).IsZero() { - return "{}" - } - return fmt.Sprintf(`{lvl:%v, gen:%v, tree:%v, cnt:%v, min:(%v,%v,%v), max:(%v,%v,%v)}`, - n.Level, n.Generation, n.Owner, n.NumItems, - n.MinItem.ObjectID, n.MinItem.ItemType, n.MinItem.Offset, - n.MaxItem.ObjectID, n.MaxItem.ItemType, n.MaxItem.Offset) -} - -type Edge struct { - // It is invalid for both 'FromRoot' and 'FromNode' to be - // non-zero. If both are zero, then the Edge is from the - // superblock. - FromRoot btrfsvol.LogicalAddr - FromNode btrfsvol.LogicalAddr - FromItem int // only valid if one of FromRoot or FromNode is non-zero - - FromTree btrfsprim.ObjID - - ToNode btrfsvol.LogicalAddr - ToLevel uint8 - ToKey btrfsprim.Key - ToGeneration btrfsprim.Generation -} - -func (kp Edge) String() string { - var from string - switch { - case kp.FromRoot != 0: - from = fmt.Sprintf("root@%v[%d]:%v", - kp.FromRoot, kp.FromItem, kp.FromTree) - case kp.FromNode != 0: - from = fmt.Sprintf("{node:%v, tree:%v}[%d]", - kp.FromNode, kp.FromTree, kp.FromItem) - default: - from = fmt.Sprintf("superblock:%v", kp.FromTree) - } - return fmt.Sprintf(`%s -> {n:%v,l:%v,g:%v,k:(%v,%v,%v)}`, - from, - kp.ToNode, kp.ToLevel, kp.ToGeneration, - kp.ToKey.ObjectID, - kp.ToKey.ItemType, - kp.ToKey.Offset) -} - -type Graph struct { - Nodes map[btrfsvol.LogicalAddr]Node - BadNodes map[btrfsvol.LogicalAddr]error - EdgesFrom map[btrfsvol.LogicalAddr][]*Edge - EdgesTo map[btrfsvol.LogicalAddr][]*Edge -} - -func (g Graph) insertEdge(ptr *Edge) { - if ptr.ToNode == 0 { - panic("kp.ToNode should not be zero") - } - if ptr.FromRoot != 0 && ptr.FromNode != 0 { - panic("kp.FromRoot and kp.FromNode should not both be set") - } - if (ptr.FromRoot == 0 && ptr.FromNode == 0) && ptr.FromItem != 0 { - panic("kp.FromItem should only be set if either kp.FromRoot or kp.FromItem is set") - } - g.EdgesFrom[ptr.FromNode] = append(g.EdgesFrom[ptr.FromNode], ptr) - g.EdgesTo[ptr.ToNode] = append(g.EdgesTo[ptr.ToNode], ptr) -} - -func (g Graph) insertTreeRoot(sb btrfstree.Superblock, treeID btrfsprim.ObjID) { - treeInfo, err := btrfstree.LookupTreeRoot(nil, sb, treeID) - if err != nil { - // This shouldn't ever happen for treeIDs that are - // mentioned directly in the superblock; which are the - // only trees for which we should call - // .insertTreeRoot(). - panic(fmt.Errorf("LookupTreeRoot(%v): %w", treeID, err)) - } - if treeInfo.RootNode == 0 { - return - } - g.insertEdge(&Edge{ - FromTree: treeID, - ToNode: treeInfo.RootNode, - ToLevel: treeInfo.Level, - ToGeneration: treeInfo.Generation, - }) -} - -func New(sb btrfstree.Superblock) *Graph { - g := &Graph{ - Nodes: make(map[btrfsvol.LogicalAddr]Node), - BadNodes: make(map[btrfsvol.LogicalAddr]error), - EdgesFrom: make(map[btrfsvol.LogicalAddr][]*Edge), - EdgesTo: make(map[btrfsvol.LogicalAddr][]*Edge), - } - - // These 4 trees are mentioned directly in the superblock, so - // they are always seen. - g.insertTreeRoot(sb, btrfsprim.ROOT_TREE_OBJECTID) - g.insertTreeRoot(sb, btrfsprim.CHUNK_TREE_OBJECTID) - g.insertTreeRoot(sb, btrfsprim.TREE_LOG_OBJECTID) - g.insertTreeRoot(sb, btrfsprim.BLOCK_GROUP_TREE_OBJECTID) - - return g -} - -func (g Graph) InsertNode(nodeRef *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]) { - nodeData := Node{ - Level: nodeRef.Data.Head.Level, - Generation: nodeRef.Data.Head.Generation, - Owner: nodeRef.Data.Head.Owner, - NumItems: nodeRef.Data.Head.NumItems, - MinItem: discardOK(nodeRef.Data.MinItem()), - MaxItem: discardOK(nodeRef.Data.MaxItem()), - } - - if nodeRef.Data.Head.Level == 0 { - cnt := 0 - for _, item := range nodeRef.Data.BodyLeaf { - if _, ok := item.Body.(*btrfsitem.Root); ok { - cnt++ - } - } - kps := make([]Edge, 0, cnt) - keys := make([]btrfsprim.Key, len(nodeRef.Data.BodyLeaf)) - nodeData.Items = keys - g.Nodes[nodeRef.Addr] = nodeData - for i, item := range nodeRef.Data.BodyLeaf { - keys[i] = item.Key - if itemBody, ok := item.Body.(*btrfsitem.Root); ok { - kps = append(kps, Edge{ - FromRoot: nodeRef.Addr, - FromItem: i, - FromTree: item.Key.ObjectID, - ToNode: itemBody.ByteNr, - ToLevel: itemBody.Level, - ToGeneration: itemBody.Generation, - }) - g.insertEdge(&kps[len(kps)-1]) - } - } - } else { - g.Nodes[nodeRef.Addr] = nodeData - kps := make([]Edge, len(nodeRef.Data.BodyInternal)) - for i, kp := range nodeRef.Data.BodyInternal { - kps[i] = Edge{ - FromNode: nodeRef.Addr, - FromItem: i, - FromTree: nodeRef.Data.Head.Owner, - ToNode: kp.BlockPtr, - ToLevel: nodeRef.Data.Head.Level - 1, - ToKey: kp.Key, - ToGeneration: kp.Generation, - } - g.insertEdge(&kps[i]) - } - } -} - -func (g Graph) FinalCheck(ctx context.Context, fs diskio.File[btrfsvol.LogicalAddr], sb btrfstree.Superblock) error { - var stats textui.Portion[int] - _ctx := ctx - - ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-nodes.read.substep", "check-keypointers") - dlog.Info(_ctx, "Checking keypointers for dead-ends...") - progressWriter := textui.NewProgress[textui.Portion[int]](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second)) - stats.D = len(g.EdgesTo) - progressWriter.Set(stats) - for laddr := range g.EdgesTo { - if _, ok := g.Nodes[laddr]; !ok { - _, err := btrfstree.ReadNode[btrfsvol.LogicalAddr](fs, sb, laddr, btrfstree.NodeExpectations{ - LAddr: containers.Optional[btrfsvol.LogicalAddr]{OK: true, Val: laddr}, - }) - if err == nil { - progressWriter.Done() - return fmt.Errorf("node@%v exists but was not in node scan results", laddr) - } - g.BadNodes[laddr] = err - } - stats.N++ - progressWriter.Set(stats) - } - progressWriter.Done() - dlog.Info(ctx, "... done checking keypointers") - - ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-nodes.read.substep", "check-for-loops") - dlog.Info(_ctx, "Checking for btree loops...") - stats.D = len(g.Nodes) - stats.N = 0 - progressWriter = textui.NewProgress[textui.Portion[int]](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second)) - progressWriter.Set(stats) - visited := make(containers.Set[btrfsvol.LogicalAddr], len(g.Nodes)) - numLoops := 0 - var checkNode func(node btrfsvol.LogicalAddr, stack []btrfsvol.LogicalAddr) - checkNode = func(node btrfsvol.LogicalAddr, stack []btrfsvol.LogicalAddr) { - defer func() { - stats.N = len(visited) - progressWriter.Set(stats) - }() - if visited.Has(node) { - return - } - if slices.Contains(node, stack) { - numLoops++ - dlog.Error(ctx, "loop:") - for _, line := range g.renderLoop(append(stack, node)) { - dlog.Errorf(ctx, " %s", line) - } - return - } - stack = append(stack, node) - for _, kp := range g.EdgesTo[node] { - checkNode(kp.FromNode, stack) - } - visited.Insert(node) - } - for _, node := range maps.SortedKeys(g.Nodes) { - checkNode(node, nil) - } - progressWriter.Done() - if numLoops > 0 { - return fmt.Errorf("%d btree loops", numLoops) - } - dlog.Info(_ctx, "... done checking for loops") - - return nil -} - -func discardOK[T any](val T, _ bool) T { - return val -} diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/loops.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/loops.go deleted file mode 100644 index 0e51805..0000000 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/loops.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com> -// -// SPDX-License-Identifier: GPL-2.0-or-later - -package graph - -import ( - "fmt" - "strings" - - "github.com/datawire/dlib/derror" - - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" -) - -func (g Graph) renderNode(node btrfsvol.LogicalAddr) []string { - if node == 0 { - return []string{"root"} - } else if nodeData, ok := g.Nodes[node]; ok { - return []string{ - fmt.Sprintf("{addr: %v,", node), - fmt.Sprintf(" level: %v,", nodeData.Level), - fmt.Sprintf(" gen: %v,", nodeData.Generation), - fmt.Sprintf(" num_items: %v,", nodeData.NumItems), - fmt.Sprintf(" min_item: {%d,%v,%d},", - nodeData.MinItem.ObjectID, - nodeData.MinItem.ItemType, - nodeData.MinItem.Offset), - fmt.Sprintf(" max_item: {%d,%v,%d}}", - nodeData.MaxItem.ObjectID, - nodeData.MaxItem.ItemType, - nodeData.MaxItem.Offset), - } - } else if nodeErr, ok := g.BadNodes[node]; ok { - return []string{ - fmt.Sprintf("{addr:%v,", node), - fmt.Sprintf(" err:%q}", nodeErr.Error()), - } - } else { - panic("should not happen") - } -} - -func (g Graph) renderEdge(kp Edge) []string { - a := fmt.Sprintf("[%d]={", kp.FromItem) - b := strings.Repeat(" ", len(a)) - ret := []string{ - a + fmt.Sprintf("ToNode: %v,", kp.ToNode), - b + fmt.Sprintf("ToLevel: %v,", kp.ToLevel), - b + fmt.Sprintf("ToGen: %v,", kp.ToGeneration), - b + fmt.Sprintf("ToKey: {%d,%v,%d}}", - kp.ToKey.ObjectID, - kp.ToKey.ItemType, - kp.ToKey.Offset), - } - - var err error - if toNode, ok := g.Nodes[kp.ToNode]; !ok { - err = g.BadNodes[kp.ToNode] - } else { - err = checkNodeExpectations(kp, toNode) - } - if err != nil { - c := strings.Repeat(" ", len(a)-1) - ret = append(ret, - c+"^", - c+"`-err="+strings.ReplaceAll(err.Error(), "\n", "\n"+c+" "), - ) - } - return ret -} - -func (g Graph) renderLoop(stack []btrfsvol.LogicalAddr) []string { - var lines []string - add := func(suffixes []string) { - curLen := 0 - for _, line := range lines { - if len(line) > curLen { - curLen = len(line) - } - } - for i, suffix := range suffixes { - if len(lines) <= i { - lines = append(lines, "") - } - if len(lines[i]) < curLen { - if i == 0 { - lines[i] += strings.Repeat("-", curLen-len(lines[i])-1) + ">" - } else { - lines[i] += strings.Repeat(" ", curLen-len(lines[i])) - } - } - lines[i] += suffix - } - } - - for i, node := range stack { - if i > 0 { - for _, kp := range g.EdgesTo[node] { - if kp.FromNode == stack[i-1] { - add(g.renderEdge(*kp)) - break - } - } - } - add(g.renderNode(node)) - } - - return lines -} - -func checkNodeExpectations(kp Edge, toNode Node) error { - var errs derror.MultiError - if toNode.Level != kp.ToLevel { - errs = append(errs, fmt.Errorf("kp.level=%v != node.level=%v", - kp.ToLevel, toNode.Level)) - } - if toNode.Generation != kp.ToGeneration { - errs = append(errs, fmt.Errorf("kp.generation=%v != node.generation=%v", - kp.ToGeneration, toNode.Generation)) - } - if toNode.NumItems == 0 { - errs = append(errs, fmt.Errorf("node.num_items=0")) - } else if kp.ToKey != (btrfsprim.Key{}) && toNode.MinItem != kp.ToKey { - errs = append(errs, fmt.Errorf("kp.key=%v != node.items[0].key=%v", - kp.ToKey, toNode.MinItem)) - } - if len(errs) > 0 { - return errs - } - return nil -} diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go deleted file mode 100644 index 56da32d..0000000 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com> -// -// SPDX-License-Identifier: GPL-2.0-or-later - -package keyio - -import ( - "context" - "fmt" - "sync" - - "github.com/datawire/dlib/dlog" - - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph" - "git.lukeshu.com/btrfs-progs-ng/lib/containers" - "git.lukeshu.com/btrfs-progs-ng/lib/diskio" - "git.lukeshu.com/btrfs-progs-ng/lib/textui" -) - -type ItemPtr struct { - Node btrfsvol.LogicalAddr - Idx int -} - -func (ptr ItemPtr) String() string { - return fmt.Sprintf("node@%v[%v]", ptr.Node, ptr.Idx) -} - -type SizeAndErr struct { - Size uint64 - Err error -} - -type FlagsAndErr struct { - NoDataSum bool - Err error -} - -type Handle struct { - rawFile diskio.File[btrfsvol.LogicalAddr] - sb btrfstree.Superblock - graph graph.Graph - - Flags map[ItemPtr]FlagsAndErr // INODE_ITEM - Names map[ItemPtr][]byte // DIR_INDEX - Sizes map[ItemPtr]SizeAndErr // EXTENT_CSUM and EXTENT_DATA - - mu sync.Mutex - cache containers.ARCache[btrfsvol.LogicalAddr, *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]] -} - -func NewHandle(file diskio.File[btrfsvol.LogicalAddr], sb btrfstree.Superblock) *Handle { - return &Handle{ - rawFile: file, - sb: sb, - - Flags: make(map[ItemPtr]FlagsAndErr), - Names: make(map[ItemPtr][]byte), - Sizes: make(map[ItemPtr]SizeAndErr), - - cache: containers.ARCache[btrfsvol.LogicalAddr, *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]]{ - MaxLen: textui.Tunable(8), - OnRemove: func(_ btrfsvol.LogicalAddr, nodeRef *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]) { - btrfstree.FreeNodeRef(nodeRef) - }, - }, - } -} - -func (o *Handle) InsertNode(nodeRef *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]) { - for i, item := range nodeRef.Data.BodyLeaf { - ptr := ItemPtr{ - Node: nodeRef.Addr, - Idx: i, - } - switch itemBody := item.Body.(type) { - case *btrfsitem.Inode: - o.Flags[ptr] = FlagsAndErr{ - NoDataSum: itemBody.Flags.Has(btrfsitem.INODE_NODATASUM), - Err: nil, - } - case *btrfsitem.DirEntry: - if item.Key.ItemType == btrfsprim.DIR_INDEX_KEY { - o.Names[ptr] = append([]byte(nil), itemBody.Name...) - } - case *btrfsitem.ExtentCSum: - o.Sizes[ptr] = SizeAndErr{ - Size: uint64(itemBody.Size()), - Err: nil, - } - case *btrfsitem.FileExtent: - size, err := itemBody.Size() - o.Sizes[ptr] = SizeAndErr{ - Size: uint64(size), - Err: err, - } - case *btrfsitem.Error: - switch item.Key.ItemType { - case btrfsprim.INODE_ITEM_KEY: - o.Flags[ptr] = FlagsAndErr{ - Err: fmt.Errorf("error decoding item: ptr=%v (tree=%v key=%v): %w", - ptr, nodeRef.Data.Head.Owner, item.Key, itemBody.Err), - } - case btrfsprim.EXTENT_CSUM_KEY, btrfsprim.EXTENT_DATA_KEY: - o.Sizes[ptr] = SizeAndErr{ - Err: fmt.Errorf("error decoding item: ptr=%v (tree=%v key=%v): %w", - ptr, nodeRef.Data.Head.Owner, item.Key, itemBody.Err), - } - } - } - } -} - -func (o *Handle) SetGraph(graph graph.Graph) { - o.graph = graph -} - -func (o *Handle) readNode(ctx context.Context, laddr btrfsvol.LogicalAddr) *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node] { - if cached, ok := o.cache.Load(laddr); ok { - dlog.Tracef(ctx, "cache-hit node@%v", laddr) - return cached - } - - graphInfo, ok := o.graph.Nodes[laddr] - if !ok { - panic(fmt.Errorf("should not happen: node@%v is not mentioned in the in-memory graph", laddr)) - } - - dlog.Debugf(ctx, "cache-miss node@%v, reading...", laddr) - ref, err := btrfstree.ReadNode(o.rawFile, o.sb, laddr, btrfstree.NodeExpectations{ - LAddr: containers.Optional[btrfsvol.LogicalAddr]{OK: true, Val: laddr}, - Level: containers.Optional[uint8]{OK: true, Val: graphInfo.Level}, - Generation: containers.Optional[btrfsprim.Generation]{OK: true, Val: graphInfo.Generation}, - Owner: func(treeID btrfsprim.ObjID) error { - if treeID != graphInfo.Owner { - return fmt.Errorf("expected owner=%v but claims to have owner=%v", - graphInfo.Owner, treeID) - } - return nil - }, - MinItem: containers.Optional[btrfsprim.Key]{OK: true, Val: graphInfo.MinItem}, - MaxItem: containers.Optional[btrfsprim.Key]{OK: true, Val: graphInfo.MaxItem}, - }) - if err != nil { - panic(fmt.Errorf("should not happen: i/o error: %w", err)) - } - - o.cache.Store(laddr, ref) - - return ref -} - -func (o *Handle) ReadItem(ctx context.Context, ptr ItemPtr) btrfsitem.Item { - o.mu.Lock() - defer o.mu.Unlock() - if o.graph.Nodes[ptr.Node].Level != 0 { - panic(fmt.Errorf("should not happen: keyio.Handle.ReadItem called for non-leaf node@%v", ptr.Node)) - } - if ptr.Idx < 0 { - panic(fmt.Errorf("should not happen: keyio.Handle.ReadItem called for negative item index: %v", ptr.Idx)) - } - items := o.readNode(ctx, ptr.Node).Data.BodyLeaf - if ptr.Idx >= len(items) { - panic(fmt.Errorf("should not happen: keyio.Handle.ReadItem called for out-of-bounds item index: index=%v len=%v", - ptr.Idx, len(items))) - } - return items[ptr.Idx].Body.CloneItem() -} diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go deleted file mode 100644 index cf334a0..0000000 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go +++ /dev/null @@ -1,583 +0,0 @@ -// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com> -// -// SPDX-License-Identifier: GPL-2.0-or-later - -package rebuildnodes - -import ( - "context" - "fmt" - "runtime" - "sort" - "time" - - "github.com/datawire/dlib/dgroup" - "github.com/datawire/dlib/dlog" - - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio" - "git.lukeshu.com/btrfs-progs-ng/lib/containers" - "git.lukeshu.com/btrfs-progs-ng/lib/maps" - "git.lukeshu.com/btrfs-progs-ng/lib/textui" -) - -type keyAndTree struct { - btrfsprim.Key - TreeID btrfsprim.ObjID -} - -func (a keyAndTree) Compare(b keyAndTree) int { - if d := containers.NativeCompare(a.TreeID, b.TreeID); d != 0 { - return d - } - return a.Key.Compare(b.Key) -} - -func (o keyAndTree) String() string { - return fmt.Sprintf("tree=%v key=%v", o.TreeID, o.Key) -} - -type rebuilder struct { - sb btrfstree.Superblock - graph graph.Graph - keyIO *keyio.Handle - - rebuilt *btrees.RebuiltForrest - - curKey struct { - TreeID btrfsprim.ObjID - Key containers.Optional[btrfsprim.Key] - } - treeQueue containers.Set[btrfsprim.ObjID] - retryItemQueue map[btrfsprim.ObjID]containers.Set[keyAndTree] - addedItemQueue containers.Set[keyAndTree] - settledItemQueue containers.Set[keyAndTree] - augmentQueue map[btrfsprim.ObjID]*treeAugmentQueue - numAugments int - numAugmentFailures int -} - -type treeAugmentQueue struct { - zero map[Want]struct{} - single map[Want]btrfsvol.LogicalAddr - multi map[Want]containers.Set[btrfsvol.LogicalAddr] -} - -type Rebuilder interface { - Rebuild(context.Context) error - ListRoots(context.Context) map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr] -} - -func NewRebuilder(ctx context.Context, fs *btrfs.FS, nodeScanResults btrfsinspect.ScanDevicesResult) (Rebuilder, error) { - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.step", "read-fs-data") - sb, nodeGraph, keyIO, err := ScanDevices(ctx, fs, nodeScanResults) // ScanDevices does its own logging - if err != nil { - return nil, err - } - - o := &rebuilder{ - sb: sb, - graph: nodeGraph, - keyIO: keyIO, - } - o.rebuilt = btrees.NewRebuiltForrest(sb, nodeGraph, keyIO, o) - return o, nil -} - -func (o *rebuilder) ListRoots(ctx context.Context) map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr] { - return o.rebuilt.ListRoots(ctx) -} - -func (o *rebuilder) Rebuild(ctx context.Context) error { - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.step", "rebuild") - - // Initialize - o.retryItemQueue = make(map[btrfsprim.ObjID]containers.Set[keyAndTree]) - o.addedItemQueue = make(containers.Set[keyAndTree]) - o.settledItemQueue = make(containers.Set[keyAndTree]) - o.augmentQueue = make(map[btrfsprim.ObjID]*treeAugmentQueue) - - // Seed the queue - o.treeQueue = containers.NewSet[btrfsprim.ObjID]( - btrfsprim.ROOT_TREE_OBJECTID, - btrfsprim.CHUNK_TREE_OBJECTID, - // btrfsprim.TREE_LOG_OBJECTID, // TODO(lukeshu): Special LOG_TREE handling - btrfsprim.BLOCK_GROUP_TREE_OBJECTID, - ) - - // Run - for passNum := 0; len(o.treeQueue) > 0 || len(o.addedItemQueue) > 0 || len(o.settledItemQueue) > 0 || len(o.augmentQueue) > 0; passNum++ { - ctx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.pass", passNum) - - // Crawl trees (Drain o.treeQueue, fill o.addedItemQueue). - if err := o.processTreeQueue(ctx); err != nil { - return err - } - runtime.GC() - - if len(o.addedItemQueue) > 0 { - // Settle items (drain o.addedItemQueue, fill o.augmentQueue and o.settledItemQueue). - if err := o.processAddedItemQueue(ctx); err != nil { - return err - } - } else { - // Process items (drain o.settledItemQueue, fill o.augmentQueue and o.treeQueue). - if err := o.processSettledItemQueue(ctx); err != nil { - return err - } - } - runtime.GC() - - // Apply augments (drain o.augmentQueue (and maybe o.retryItemQueue), fill o.addedItemQueue). - if err := o.processAugmentQueue(ctx); err != nil { - return err - } - runtime.GC() - } - - return nil -} - -// processTreeQueue drains o.treeQueue, filling o.addedItemQueue. -func (o *rebuilder) processTreeQueue(ctx context.Context) error { - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep", "collect-items") - - queue := maps.SortedKeys(o.treeQueue) - o.treeQueue = make(containers.Set[btrfsprim.ObjID]) - - // Because trees can be wildly different sizes, it's impossible to have a meaningful - // progress percentage here. - o.curKey.Key.OK = false - for _, o.curKey.TreeID = range queue { - if err := ctx.Err(); err != nil { - return err - } - // This will call o.AddedItem as nescessary, which - // inserts to o.addedItemQueue. - _ = o.rebuilt.Tree(ctx, o.curKey.TreeID) - } - - return nil -} - -type settleItemStats struct { - textui.Portion[int] - NumAugments int - NumAugmentTrees int -} - -func (s settleItemStats) String() string { - // return textui.Sprintf("%v (queued %v augments across %v trees)", - return textui.Sprintf("%v (aug:%v trees:%v)", - s.Portion, s.NumAugments, s.NumAugmentTrees) -} - -// processAddedItemQueue drains o.addedItemQueue, filling o.augmentQueue and o.settledItemQueue. -func (o *rebuilder) processAddedItemQueue(ctx context.Context) error { - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep", "settle-items") - - queue := maps.Keys(o.addedItemQueue) - o.addedItemQueue = make(containers.Set[keyAndTree]) - sort.Slice(queue, func(i, j int) bool { - return queue[i].Compare(queue[j]) < 0 - }) - - var progress settleItemStats - progress.D = len(queue) - progressWriter := textui.NewProgress[settleItemStats](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second)) - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep.progress", &progress) - - for i, key := range queue { - progress.N = i - progressWriter.Set(progress) - - ctx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.settle.item", key) - tree := o.rebuilt.Tree(ctx, key.TreeID) - incPtr, ok := tree.Items(ctx).Load(key.Key) - if !ok { - panic(fmt.Errorf("should not happen: failed to load already-added item: %v", key)) - } - excPtr, ok := tree.PotentialItems(ctx).Load(key.Key) - if ok && tree.ShouldReplace(incPtr.Node, excPtr.Node) { - wantKey := WantWithTree{ - TreeID: key.TreeID, - Key: wantFromKey(key.Key), - } - o.wantAugment(ctx, wantKey, tree.LeafToRoots(ctx, excPtr.Node)) - progress.NumAugments = o.numAugments - progress.NumAugmentTrees = len(o.augmentQueue) - progressWriter.Set(progress) - } else if !handleWouldBeNoOp(key.ItemType) { - o.settledItemQueue.Insert(key) - } - } - - progress.N = len(queue) - progressWriter.Set(progress) - progressWriter.Done() - return nil -} - -type processItemStats struct { - textui.Portion[int] - NumAugments int - NumFailures int - NumAugmentTrees int -} - -func (s processItemStats) String() string { - // return textui.Sprintf("%v (queued %v augments and %v failures across %v trees)", - return textui.Sprintf("%v (aug:%v fail:%v trees:%v)", - s.Portion, s.NumAugments, s.NumFailures, s.NumAugmentTrees) -} - -// processSettledItemQueue drains o.settledItemQueue, filling o.augmentQueue and o.treeQueue. -func (o *rebuilder) processSettledItemQueue(ctx context.Context) error { - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep", "process-items") - - queue := maps.Keys(o.settledItemQueue) - o.settledItemQueue = make(containers.Set[keyAndTree]) - sort.Slice(queue, func(i, j int) bool { - return queue[i].Compare(queue[j]) < 0 - }) - - var progress processItemStats - progress.D = len(queue) - progressWriter := textui.NewProgress[processItemStats](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second)) - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep.progress", &progress) - - type keyAndBody struct { - keyAndTree - Body btrfsitem.Item - } - itemChan := make(chan keyAndBody, textui.Tunable(300)) // average items-per-node≈100; let's have a buffer of ~3 nodes - grp := dgroup.NewGroup(ctx, dgroup.GroupConfig{}) - grp.Go("io", func(ctx context.Context) error { - defer close(itemChan) - for _, key := range queue { - if err := ctx.Err(); err != nil { - return err - } - ctx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.process.item", key) - item := keyAndBody{ - keyAndTree: key, - Body: o.rebuilt.Tree(ctx, key.TreeID).ReadItem(ctx, key.Key), - } - select { - case itemChan <- item: - case <-ctx.Done(): - } - } - return nil - }) - grp.Go("cpu", func(ctx context.Context) error { - defer progressWriter.Done() - o.curKey.Key.OK = true - for item := range itemChan { - ctx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.process.item", item.keyAndTree) - o.curKey.TreeID = item.TreeID - o.curKey.Key.Val = item.Key - handleItem(o, ctx, item.TreeID, btrfstree.Item{ - Key: item.Key, - Body: item.Body, - }) - item.Body.Free() - if item.ItemType == btrfsitem.ROOT_ITEM_KEY { - o.treeQueue.Insert(item.ObjectID) - } - progress.N++ - progress.NumAugments = o.numAugments - progress.NumFailures = o.numAugmentFailures - progress.NumAugmentTrees = len(o.augmentQueue) - progressWriter.Set(progress) - } - return nil - }) - return grp.Wait() -} - -// processAugmentQueue drains o.augmentQueue (and maybe o.retryItemQueue), filling o.addedItemQueue. -func (o *rebuilder) processAugmentQueue(ctx context.Context) error { - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep", "apply-augments") - - resolvedAugments := make(map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr], len(o.augmentQueue)) - var progress textui.Portion[int] - for _, treeID := range maps.SortedKeys(o.augmentQueue) { - if err := ctx.Err(); err != nil { - return err - } - ctx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.augment.tree", treeID) - resolvedAugments[treeID] = o.resolveTreeAugments(ctx, treeID) - progress.D += len(resolvedAugments[treeID]) - } - o.augmentQueue = make(map[btrfsprim.ObjID]*treeAugmentQueue) - o.numAugments = 0 - o.numAugmentFailures = 0 - runtime.GC() - - progressWriter := textui.NewProgress[textui.Portion[int]](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second)) - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep.progress", &progress) - for _, treeID := range maps.SortedKeys(resolvedAugments) { - ctx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.augment.tree", treeID) - for _, nodeAddr := range maps.SortedKeys(resolvedAugments[treeID]) { - if err := ctx.Err(); err != nil { - progressWriter.Set(progress) - progressWriter.Done() - return err - } - progressWriter.Set(progress) - // This will call o.AddedItem as nescessary, which - // inserts to o.addedItemQueue. - o.rebuilt.Tree(ctx, treeID).AddRoot(ctx, nodeAddr) - progress.N++ - } - } - progressWriter.Set(progress) - progressWriter.Done() - - return nil -} - -func (o *rebuilder) enqueueRetry(ifTreeID btrfsprim.ObjID) { - if o.curKey.Key.OK { - if o.retryItemQueue[ifTreeID] == nil { - o.retryItemQueue[ifTreeID] = make(containers.Set[keyAndTree]) - } - o.retryItemQueue[ifTreeID].Insert(keyAndTree{ - TreeID: o.curKey.TreeID, - Key: o.curKey.Key.Val, - }) - } else { - o.treeQueue.Insert(o.curKey.TreeID) - } -} - -func (o *rebuilder) resolveTreeAugments(ctx context.Context, treeID btrfsprim.ObjID) containers.Set[btrfsvol.LogicalAddr] { - // Define an algorithm that takes several lists of items, and returns a - // set of those items such that each input list contains zero or one of - // the items from your return set. The same item may appear in multiple - // of the input lists. - - type ChoiceInfo struct { - Count int - Distance int - Generation btrfsprim.Generation - } - choices := make(map[btrfsvol.LogicalAddr]ChoiceInfo) - // o.augmentQueue[treeID].zero is optimized storage for lists - // with zero items. Go ahead and free that memory up. - o.augmentQueue[treeID].zero = nil - // o.augmentQueue[treeID].single is optimized storage for - // lists with exactly 1 item. - for _, choice := range o.augmentQueue[treeID].single { - if old, ok := choices[choice]; ok { - old.Count++ - choices[choice] = old - } else { - choices[choice] = ChoiceInfo{ - Count: 1, - Distance: discardOK(o.rebuilt.Tree(ctx, treeID).COWDistance(o.graph.Nodes[choice].Owner)), - Generation: o.graph.Nodes[choice].Generation, - } - } - } - // o.augmentQueue[treeID].multi is the main list storage. - for _, list := range o.augmentQueue[treeID].multi { - for choice := range list { - if old, ok := choices[choice]; ok { - old.Count++ - choices[choice] = old - } else { - choices[choice] = ChoiceInfo{ - Count: 1, - Distance: discardOK(o.rebuilt.Tree(ctx, treeID).COWDistance(o.graph.Nodes[choice].Owner)), - Generation: o.graph.Nodes[choice].Generation, - } - } - } - } - - // > Example 1: Given the input lists - // > - // > 0: [A, B] - // > 2: [A, C] - // > - // > legal solutions would be `[]`, `[A]`, `[B]`, `[C]`, or `[B, C]`. It - // > would not be legal to return `[A, B]` or `[A, C]`. - // - // > Example 2: Given the input lists - // > - // > 1: [A, B] - // > 2: [A] - // > 3: [B] - // > - // > legal solution would be `[]`, `[A]` or `[B]`. It would not be legal - // > to return `[A, B]`. - // - // The algorithm should optimize for the following goals: - // - // - We prefer that each input list have an item in the return set. - // - // > In Example 1, while `[]`, `[B]`, and `[C]` are permissible - // > solutions, they are not optimal, because one or both of the input - // > lists are not represented. - // > - // > It may be the case that it is not possible to represent all lists - // > in the result; in Example 2, either list 2 or list 3 must be - // > unrepresented. - // - // - Each item has a non-negative scalar "distance" score, we prefer - // lower distances. Distance scores are comparable; 0 is preferred, - // and a distance of 4 is twice as bad as a distance of 2. - // - // - Each item has a "generation" score, we prefer higher generations. - // Generation scores should not be treated as a linear scale; the - // magnitude of deltas is meaningless; only the sign of a delta is - // meaningful. - // - // > So it would be wrong to say something like - // > - // > desirability = (-a*distance) + (b*generation) // for some constants `a` and `b` - // > - // > because `generation` can't be used that way - // - // - We prefer items that appear in more lists over items that appear in - // fewer lists. - // - // The relative priority of these 4 goals is undefined; preferably the - // algorithm should be defined in a way that makes it easy to adjust the - // relative priorities. - - ret := make(containers.Set[btrfsvol.LogicalAddr]) - illegal := make(containers.Set[btrfsvol.LogicalAddr]) // cannot-be-accepted and already-accepted - accept := func(item btrfsvol.LogicalAddr) { - ret.Insert(item) - for _, list := range o.augmentQueue[treeID].multi { - if list.Has(item) { - illegal.InsertFrom(list) - } - } - } - - sortedItems := maps.Keys(choices) - sort.Slice(sortedItems, func(i, j int) bool { - iItem, jItem := sortedItems[i], sortedItems[j] - if choices[iItem].Count != choices[jItem].Count { - return choices[iItem].Count > choices[jItem].Count // reverse this check; higher counts should sort lower - } - if choices[iItem].Distance != choices[jItem].Distance { - return choices[iItem].Distance < choices[jItem].Distance - } - if choices[iItem].Generation != choices[jItem].Generation { - return choices[iItem].Generation > choices[jItem].Generation // reverse this check; higher generations should sort lower - } - return iItem < jItem // laddr is as good a tiebreaker as anything - }) - for _, item := range sortedItems { - if !illegal.Has(item) { - accept(item) - } - } - - // Log our result - wantKeys := append( - maps.Keys(o.augmentQueue[treeID].single), - maps.Keys(o.augmentQueue[treeID].multi)...) - sort.Slice(wantKeys, func(i, j int) bool { - return wantKeys[i].Compare(wantKeys[j]) < 0 - }) - for _, wantKey := range wantKeys { - list, ok := o.augmentQueue[treeID].multi[wantKey] - if !ok { - list = containers.NewSet[btrfsvol.LogicalAddr](o.augmentQueue[treeID].single[wantKey]) - } - chose := list.Intersection(ret) - switch { - case len(chose) == 0: - dlog.Infof(ctx, "lists[%q]: chose (none) from %v", wantKey, maps.SortedKeys(list)) - case len(list) > 1: - dlog.Infof(ctx, "lists[%q]: chose %v from %v", wantKey, chose.TakeOne(), maps.SortedKeys(list)) - default: - dlog.Debugf(ctx, "lists[%q]: chose %v from %v", wantKey, chose.TakeOne(), maps.SortedKeys(list)) - } - } - - // Free some memory - o.augmentQueue[treeID].single = nil - o.augmentQueue[treeID].multi = nil - - return ret -} - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - -func (queue *treeAugmentQueue) has(wantKey Want) bool { - if queue != nil { - if queue.zero != nil { - if _, ok := queue.zero[wantKey]; ok { - return true - } - } - if queue.single != nil { - if _, ok := queue.single[wantKey]; ok { - return true - } - } - if queue.multi != nil { - if _, ok := queue.multi[wantKey]; ok { - return true - } - } - } - return false -} - -func (queue *treeAugmentQueue) store(wantKey Want, choices containers.Set[btrfsvol.LogicalAddr]) { - if len(choices) == 0 && wantKey.OffsetType > offsetExact { - // This wantKey is unlikely to come up again, so it's - // not worth the RAM of storing a negative result. - return - } - switch len(choices) { - case 0: - if queue.zero == nil { - queue.zero = make(map[Want]struct{}) - } - queue.zero[wantKey] = struct{}{} - case 1: - if queue.single == nil { - queue.single = make(map[Want]btrfsvol.LogicalAddr) - } - queue.single[wantKey] = choices.TakeOne() - default: - if queue.multi == nil { - queue.multi = make(map[Want]containers.Set[btrfsvol.LogicalAddr]) - } - queue.multi[wantKey] = choices - } -} - -func (o *rebuilder) hasAugment(wantKey WantWithTree) bool { - return o.augmentQueue[wantKey.TreeID].has(wantKey.Key) -} - -func (o *rebuilder) wantAugment(ctx context.Context, wantKey WantWithTree, choices containers.Set[btrfsvol.LogicalAddr]) { - if o.augmentQueue[wantKey.TreeID] == nil { - o.augmentQueue[wantKey.TreeID] = new(treeAugmentQueue) - } - o.augmentQueue[wantKey.TreeID].store(wantKey.Key, choices) - if len(choices) == 0 { - o.numAugmentFailures++ - dlog.Debug(ctx, "ERR: could not find wanted item") - } else { - o.numAugments++ - dlog.Debugf(ctx, "choices=%v", maps.SortedKeys(choices)) - } -} diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_graph.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_graph.go deleted file mode 100644 index 710030c..0000000 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_graph.go +++ /dev/null @@ -1,367 +0,0 @@ -// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com> -// -// SPDX-License-Identifier: GPL-2.0-or-later - -package rebuildnodes - -import ( - "context" - "fmt" - - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" -) - -type rebuildCallbacks interface { - fsErr(ctx context.Context, e error) - want(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType) - wantOff(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, off uint64) - wantDirIndex(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, name []byte) - wantCSum(ctx context.Context, reason string, inodeTree, inodeItem btrfsprim.ObjID, beg, end btrfsvol.LogicalAddr) // interval is [beg, end) - wantFileExt(ctx context.Context, reason string, treeID btrfsprim.ObjID, ino btrfsprim.ObjID, size int64) -} - -// handleWouldBeNoOp returns whether or not a call to handleItem for a -// given item type would be a no-op. -func handleWouldBeNoOp(typ btrfsprim.ItemType) bool { - switch typ { - case // btrfsitem.Dev - btrfsprim.DEV_ITEM_KEY, - // btrfsitem.DevStats - btrfsprim.PERSISTENT_ITEM_KEY, - // btrfsitem.Empty - btrfsprim.ORPHAN_ITEM_KEY, - btrfsprim.TREE_BLOCK_REF_KEY, - btrfsprim.SHARED_BLOCK_REF_KEY, - btrfsprim.FREE_SPACE_EXTENT_KEY, - btrfsprim.QGROUP_RELATION_KEY, - // btrfsite.ExtentCSum - btrfsprim.EXTENT_CSUM_KEY: - return true - default: - return false - } -} - -func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID, item btrfstree.Item) { - // Notionally, just express the relationships shown in - // https://btrfs.wiki.kernel.org/index.php/File:References.png (from the page - // https://btrfs.wiki.kernel.org/index.php/Data_Structures ) - switch body := item.Body.(type) { - case *btrfsitem.BlockGroup: - o.want(ctx, "Chunk", - btrfsprim.CHUNK_TREE_OBJECTID, - body.ChunkObjectID, - btrfsitem.CHUNK_ITEM_KEY) - o.wantOff(ctx, "FreeSpaceInfo", - btrfsprim.FREE_SPACE_TREE_OBJECTID, - item.Key.ObjectID, - btrfsitem.FREE_SPACE_INFO_KEY, - item.Key.Offset) - case *btrfsitem.Chunk: - o.want(ctx, "owning Root", - btrfsprim.ROOT_TREE_OBJECTID, - body.Head.Owner, - btrfsitem.ROOT_ITEM_KEY) - case *btrfsitem.Dev: - // nothing - case *btrfsitem.DevExtent: - o.wantOff(ctx, "Chunk", - body.ChunkTree, - body.ChunkObjectID, - btrfsitem.CHUNK_ITEM_KEY, - uint64(body.ChunkOffset)) - case *btrfsitem.DevStats: - // nothing - case *btrfsitem.DirEntry: - // containing-directory - o.wantOff(ctx, "containing dir inode", - treeID, - item.Key.ObjectID, - btrfsitem.INODE_ITEM_KEY, - 0) - // siblings - switch item.Key.ItemType { - case btrfsitem.DIR_ITEM_KEY: - o.wantDirIndex(ctx, "corresponding DIR_INDEX", - treeID, - item.Key.ObjectID, - body.Name) - case btrfsitem.DIR_INDEX_KEY: - o.wantOff(ctx, "corresponding DIR_ITEM", - treeID, - item.Key.ObjectID, - btrfsitem.DIR_ITEM_KEY, - btrfsitem.NameHash(body.Name)) - case btrfsitem.XATTR_ITEM_KEY: - // nothing - default: - // This is a panic because the item decoder should not emit a - // btrfsitem.DirEntry for other item types without this code also being - // updated. - panic(fmt.Errorf("should not happen: DirEntry: unexpected ItemType=%v", item.Key.ItemType)) - } - // item-within-directory - if body.Location != (btrfsprim.Key{}) { - switch body.Location.ItemType { - case btrfsitem.INODE_ITEM_KEY: - o.wantOff(ctx, "item being pointed to", - treeID, - body.Location.ObjectID, - body.Location.ItemType, - body.Location.Offset) - o.wantOff(ctx, "backref from item being pointed to", - treeID, - body.Location.ObjectID, - btrfsitem.INODE_REF_KEY, - uint64(item.Key.ObjectID)) - case btrfsitem.ROOT_ITEM_KEY: - o.want(ctx, "Root of subvolume being pointed to", - btrfsprim.ROOT_TREE_OBJECTID, - body.Location.ObjectID, - body.Location.ItemType) - default: - o.fsErr(ctx, fmt.Errorf("DirEntry: unexpected .Location.ItemType=%v", body.Location.ItemType)) - } - } - case *btrfsitem.Empty: - // nothing - case *btrfsitem.Extent: - // if body.Head.Flags.Has(btrfsitem.EXTENT_FLAG_TREE_BLOCK) { - // // Supposedly this flag indicates that - // // body.Info.Key identifies a node by the - // // first key in the node. But nothing in the - // // kernel ever reads this, so who knows if it - // // always gets updated correctly? - // } - for i, ref := range body.Refs { - switch refBody := ref.Body.(type) { - case nil: - // nothing - case *btrfsitem.ExtentDataRef: - o.wantOff(ctx, "referencing Inode", - refBody.Root, - refBody.ObjectID, - btrfsitem.INODE_ITEM_KEY, - 0) - o.wantOff(ctx, "referencing FileExtent", - refBody.Root, - refBody.ObjectID, - btrfsitem.EXTENT_DATA_KEY, - uint64(refBody.Offset)) - case *btrfsitem.SharedDataRef: - // nothing - default: - // This is a panic because the item decoder should not emit a new - // type to ref.Body without this code also being updated. - panic(fmt.Errorf("should not happen: Extent: unexpected .Refs[%d].Body type %T", i, refBody)) - } - } - case *btrfsitem.ExtentCSum: - // nothing - case *btrfsitem.ExtentDataRef: - o.want(ctx, "Extent being referenced", - btrfsprim.EXTENT_TREE_OBJECTID, - item.Key.ObjectID, - btrfsitem.EXTENT_ITEM_KEY) - o.wantOff(ctx, "referencing Inode", - body.Root, - body.ObjectID, - btrfsitem.INODE_ITEM_KEY, - 0) - o.wantOff(ctx, "referencing FileExtent", - body.Root, - body.ObjectID, - btrfsitem.EXTENT_DATA_KEY, - uint64(body.Offset)) - case *btrfsitem.FileExtent: - o.wantOff(ctx, "containing Inode", - treeID, - item.Key.ObjectID, - btrfsitem.INODE_ITEM_KEY, - 0) - switch body.Type { - case btrfsitem.FILE_EXTENT_INLINE: - // nothing - case btrfsitem.FILE_EXTENT_REG, btrfsitem.FILE_EXTENT_PREALLOC: - // NB: o.wantCSum checks inodeBody.Flags.Has(btrfsitem.INODE_NODATASUM) for us. - o.wantCSum(ctx, "data sum", - treeID, item.Key.ObjectID, - body.BodyExtent.DiskByteNr, - body.BodyExtent.DiskByteNr.Add(body.BodyExtent.DiskNumBytes)) - default: - o.fsErr(ctx, fmt.Errorf("FileExtent: unexpected body.Type=%v", body.Type)) - } - case *btrfsitem.FreeSpaceBitmap: - o.wantOff(ctx, "FreeSpaceInfo", - treeID, - item.Key.ObjectID, - btrfsitem.FREE_SPACE_INFO_KEY, - item.Key.Offset) - case *btrfsitem.FreeSpaceHeader: - o.wantOff(ctx, ".Location", - treeID, - body.Location.ObjectID, - body.Location.ItemType, - body.Location.Offset) - case *btrfsitem.FreeSpaceInfo: - if body.Flags.Has(btrfsitem.FREE_SPACE_USING_BITMAPS) { - o.wantOff(ctx, "FreeSpaceBitmap", - treeID, - item.Key.ObjectID, - btrfsitem.FREE_SPACE_BITMAP_KEY, - item.Key.Offset) - } - case *btrfsitem.Inode: - o.want(ctx, "backrefs", - treeID, // TODO: validate the number of these against body.NLink - item.Key.ObjectID, - btrfsitem.INODE_REF_KEY) - o.wantFileExt(ctx, "FileExtents", - treeID, item.Key.ObjectID, body.Size) - if body.BlockGroup != 0 { - o.want(ctx, "BlockGroup", - btrfsprim.EXTENT_TREE_OBJECTID, - body.BlockGroup, - btrfsitem.BLOCK_GROUP_ITEM_KEY) - } - case *btrfsitem.InodeRefs: - o.wantOff(ctx, "child Inode", - treeID, - item.Key.ObjectID, - btrfsitem.INODE_ITEM_KEY, - 0) - o.wantOff(ctx, "parent Inode", - treeID, - btrfsprim.ObjID(item.Key.Offset), - btrfsitem.INODE_ITEM_KEY, - 0) - for _, ref := range body.Refs { - o.wantOff(ctx, "DIR_ITEM", - treeID, - btrfsprim.ObjID(item.Key.Offset), - btrfsitem.DIR_ITEM_KEY, - btrfsitem.NameHash(ref.Name)) - o.wantOff(ctx, "DIR_INDEX", - treeID, - btrfsprim.ObjID(item.Key.Offset), - btrfsitem.DIR_INDEX_KEY, - uint64(ref.Index)) - } - case *btrfsitem.Metadata: - for i, ref := range body.Refs { - switch refBody := ref.Body.(type) { - case nil: - // nothing - case *btrfsitem.ExtentDataRef: - o.wantOff(ctx, "referencing INode", - refBody.Root, - refBody.ObjectID, - btrfsitem.INODE_ITEM_KEY, - 0) - o.wantOff(ctx, "referencing FileExtent", - refBody.Root, - refBody.ObjectID, - btrfsitem.EXTENT_DATA_KEY, - uint64(refBody.Offset)) - case *btrfsitem.SharedDataRef: - // nothing - default: - // This is a panic because the item decoder should not emit a new - // type to ref.Body without this code also being updated. - panic(fmt.Errorf("should not happen: Metadata: unexpected .Refs[%d].Body type %T", i, refBody)) - } - } - case *btrfsitem.Root: - if body.RootDirID != 0 { - o.wantOff(ctx, "root directory", - item.Key.ObjectID, - body.RootDirID, - btrfsitem.INODE_ITEM_KEY, - 0) - } - if body.UUID != (btrfsprim.UUID{}) { - key := btrfsitem.UUIDToKey(body.UUID) - o.wantOff(ctx, "uuid", - btrfsprim.UUID_TREE_OBJECTID, - key.ObjectID, - key.ItemType, - key.Offset) - } - if body.ParentUUID != (btrfsprim.UUID{}) { - key := btrfsitem.UUIDToKey(body.ParentUUID) - o.wantOff(ctx, "parent uuid", - btrfsprim.UUID_TREE_OBJECTID, - key.ObjectID, - key.ItemType, - key.Offset) - } - case *btrfsitem.RootRef: - var otherType btrfsprim.ItemType - var parent, child btrfsprim.ObjID - switch item.Key.ItemType { - case btrfsitem.ROOT_REF_KEY: - otherType = btrfsitem.ROOT_BACKREF_KEY - parent = item.Key.ObjectID - child = btrfsprim.ObjID(item.Key.Offset) - case btrfsitem.ROOT_BACKREF_KEY: - otherType = btrfsitem.ROOT_REF_KEY - parent = btrfsprim.ObjID(item.Key.Offset) - child = item.Key.ObjectID - default: - // This is a panic because the item decoder should not emit a - // btrfsitem.RootRef for other item types without this code also being - // updated. - panic(fmt.Errorf("should not happen: RootRef: unexpected ItemType=%v", item.Key.ItemType)) - } - // sibling - o.wantOff(ctx, fmt.Sprintf("corresponding %v", otherType), - treeID, - btrfsprim.ObjID(item.Key.Offset), - otherType, - uint64(item.Key.ObjectID)) - // parent - o.want(ctx, "parent subvolume: Root", - treeID, - parent, - btrfsitem.ROOT_ITEM_KEY) - o.wantOff(ctx, "parent subvolume: Inode of parent dir", - parent, - body.DirID, - btrfsitem.INODE_ITEM_KEY, - 0) - o.wantOff(ctx, "parent subvolume: DIR_ITEM in parent dir", - parent, - body.DirID, - btrfsitem.DIR_ITEM_KEY, - btrfsitem.NameHash(body.Name)) - o.wantOff(ctx, "parent subvolume: DIR_INDEX in parent dir", - parent, - body.DirID, - btrfsitem.DIR_INDEX_KEY, - uint64(body.Sequence)) - // child - o.want(ctx, "child subvolume: Root", - treeID, - child, - btrfsitem.ROOT_ITEM_KEY) - case *btrfsitem.SharedDataRef: - o.want(ctx, "Extent", - btrfsprim.EXTENT_TREE_OBJECTID, - item.Key.ObjectID, - btrfsitem.EXTENT_ITEM_KEY) - case *btrfsitem.UUIDMap: - o.want(ctx, "subvolume Root", - btrfsprim.ROOT_TREE_OBJECTID, - body.ObjID, - btrfsitem.ROOT_ITEM_KEY) - case *btrfsitem.Error: - o.fsErr(ctx, fmt.Errorf("error decoding item: %w", body.Err)) - default: - // This is a panic because the item decoder should not emit new types without this - // code also being updated. - panic(fmt.Errorf("should not happen: unexpected item type: %T", body)) - } -} diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_treecb.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_treecb.go deleted file mode 100644 index 492436b..0000000 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_treecb.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com> -// -// SPDX-License-Identifier: GPL-2.0-or-later - -package rebuildnodes - -import ( - "context" - "fmt" - - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" -) - -// AddedItem implements btrees.Callbacks. -func (o *rebuilder) AddedItem(ctx context.Context, tree btrfsprim.ObjID, key btrfsprim.Key) { - o.addedItemQueue.Insert(keyAndTree{ - TreeID: tree, - Key: key, - }) -} - -// AddedRoot implements btrees.Callbacks. -func (o *rebuilder) AddedRoot(ctx context.Context, tree btrfsprim.ObjID, root btrfsvol.LogicalAddr) { - if retries := o.retryItemQueue[tree]; retries != nil { - o.addedItemQueue.InsertFrom(retries) - } -} - -// LookupRoot implements btrees.Callbacks. -func (o *rebuilder) LookupRoot(ctx context.Context, tree btrfsprim.ObjID) (offset btrfsprim.Generation, item btrfsitem.Root, ok bool) { - wantKey := WantWithTree{ - TreeID: btrfsprim.ROOT_TREE_OBJECTID, - Key: Want{ - ObjectID: tree, - ItemType: btrfsitem.ROOT_ITEM_KEY, - OffsetType: offsetAny, - }, - } - ctx = withWant(ctx, logFieldTreeWant, "tree Root", wantKey) - foundKey, ok := o._want(ctx, wantKey) - if !ok { - o.enqueueRetry(btrfsprim.ROOT_TREE_OBJECTID) - return 0, btrfsitem.Root{}, false - } - itemBody := o.rebuilt.Tree(ctx, wantKey.TreeID).ReadItem(ctx, foundKey) - defer itemBody.Free() - switch itemBody := itemBody.(type) { - case *btrfsitem.Root: - return btrfsprim.Generation(foundKey.Offset), *itemBody, true - case *btrfsitem.Error: - o.fsErr(ctx, fmt.Errorf("error decoding item: %v: %w", foundKey, itemBody.Err)) - return 0, btrfsitem.Root{}, false - default: - // This is a panic because the item decoder should not emit ROOT_ITEM items as anything but - // btrfsitem.Root or btrfsitem.Error without this code also being updated. - panic(fmt.Errorf("should not happen: ROOT_ITEM item has unexpected type: %T", itemBody)) - } -} - -// LookupUUID implements btrees.Callbacks. -func (o *rebuilder) LookupUUID(ctx context.Context, uuid btrfsprim.UUID) (id btrfsprim.ObjID, ok bool) { - wantKey := WantWithTree{ - TreeID: btrfsprim.UUID_TREE_OBJECTID, - Key: wantFromKey(btrfsitem.UUIDToKey(uuid)), - } - ctx = withWant(ctx, logFieldTreeWant, "resolve parent UUID", wantKey) - if !o._wantOff(ctx, wantKey) { - o.enqueueRetry(btrfsprim.UUID_TREE_OBJECTID) - return 0, false - } - itemBody := o.rebuilt.Tree(ctx, wantKey.TreeID).ReadItem(ctx, wantKey.Key.Key()) - defer itemBody.Free() - switch itemBody := itemBody.(type) { - case *btrfsitem.UUIDMap: - return itemBody.ObjID, true - case *btrfsitem.Error: - o.fsErr(ctx, fmt.Errorf("error decoding item: %v: %w", wantKey, itemBody.Err)) - return 0, false - default: - // This is a panic because the item decoder should not emit UUID_SUBVOL items as anything but - // btrfsitem.UUIDMap or btrfsitem.Error without this code also being updated. - panic(fmt.Errorf("should not happen: UUID_SUBVOL item has unexpected type: %T", itemBody)) - } -} diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_wantcb.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_wantcb.go deleted file mode 100644 index adf3cff..0000000 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_wantcb.go +++ /dev/null @@ -1,400 +0,0 @@ -// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com> -// -// SPDX-License-Identifier: GPL-2.0-or-later - -package rebuildnodes - -import ( - "bytes" - "context" - "fmt" - - "github.com/datawire/dlib/dlog" - - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio" - "git.lukeshu.com/btrfs-progs-ng/lib/containers" -) - -// fsErr implements rebuildCallbacks. -func (o *rebuilder) fsErr(ctx context.Context, e error) { - dlog.Errorf(ctx, "filesystem error: %v", e) -} - -// want implements rebuildCallbacks. -func (o *rebuilder) want(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType) { - wantKey := WantWithTree{ - TreeID: treeID, - Key: Want{ - ObjectID: objID, - ItemType: typ, - OffsetType: offsetAny, - }, - } - ctx = withWant(ctx, logFieldItemWant, reason, wantKey) - o._want(ctx, wantKey) -} - -func (o *rebuilder) _want(ctx context.Context, wantKey WantWithTree) (key btrfsprim.Key, ok bool) { - if o.rebuilt.Tree(ctx, wantKey.TreeID) == nil { - o.enqueueRetry(wantKey.TreeID) - return btrfsprim.Key{}, false - } - - // check if we already have it - - tgt := wantKey.Key.Key() - if key, _, ok := o.rebuilt.Tree(ctx, wantKey.TreeID).Items(ctx).Search(func(key btrfsprim.Key, _ keyio.ItemPtr) int { - key.Offset = 0 - return tgt.Compare(key) - }); ok { - return key, true - } - - // OK, we need to insert it - - if o.hasAugment(wantKey) { - return btrfsprim.Key{}, false - } - wants := make(containers.Set[btrfsvol.LogicalAddr]) - o.rebuilt.Tree(ctx, wantKey.TreeID).PotentialItems(ctx).Subrange( - func(k btrfsprim.Key, _ keyio.ItemPtr) int { - k.Offset = 0 - return tgt.Compare(k) - }, - func(_ btrfsprim.Key, v keyio.ItemPtr) bool { - wants.InsertFrom(o.rebuilt.Tree(ctx, wantKey.TreeID).LeafToRoots(ctx, v.Node)) - return true - }) - o.wantAugment(ctx, wantKey, wants) - return btrfsprim.Key{}, false -} - -// wantOff implements rebuildCallbacks. -func (o *rebuilder) wantOff(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, off uint64) { - wantKey := WantWithTree{ - TreeID: treeID, - Key: Want{ - ObjectID: objID, - ItemType: typ, - OffsetType: offsetExact, - OffsetLow: off, - }, - } - ctx = withWant(ctx, logFieldItemWant, reason, wantKey) - o._wantOff(ctx, wantKey) -} - -func (o *rebuilder) _wantOff(ctx context.Context, wantKey WantWithTree) (ok bool) { - if o.rebuilt.Tree(ctx, wantKey.TreeID) == nil { - o.enqueueRetry(wantKey.TreeID) - return false - } - - // check if we already have it - - tgt := wantKey.Key.Key() - if _, ok := o.rebuilt.Tree(ctx, wantKey.TreeID).Items(ctx).Load(tgt); ok { - return true - } - - // OK, we need to insert it - - if o.hasAugment(wantKey) { - return false - } - wants := make(containers.Set[btrfsvol.LogicalAddr]) - o.rebuilt.Tree(ctx, wantKey.TreeID).PotentialItems(ctx).Subrange( - func(k btrfsprim.Key, _ keyio.ItemPtr) int { return tgt.Compare(k) }, - func(_ btrfsprim.Key, v keyio.ItemPtr) bool { - wants.InsertFrom(o.rebuilt.Tree(ctx, wantKey.TreeID).LeafToRoots(ctx, v.Node)) - return true - }) - o.wantAugment(ctx, wantKey, wants) - return false -} - -// wantDirIndex implements rebuildCallbacks. -func (o *rebuilder) wantDirIndex(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, name []byte) { - wantKey := WantWithTree{ - TreeID: treeID, - Key: Want{ - ObjectID: objID, - ItemType: btrfsitem.DIR_INDEX_KEY, - OffsetType: offsetName, - OffsetName: string(name), - }, - } - ctx = withWant(ctx, logFieldItemWant, reason, wantKey) - - if o.rebuilt.Tree(ctx, treeID) == nil { - o.enqueueRetry(treeID) - return - } - - // check if we already have it - - tgt := wantKey.Key.Key() - found := false - o.rebuilt.Tree(ctx, treeID).Items(ctx).Subrange( - func(key btrfsprim.Key, _ keyio.ItemPtr) int { - key.Offset = 0 - return tgt.Compare(key) - }, - func(_ btrfsprim.Key, ptr keyio.ItemPtr) bool { - if itemName, ok := o.keyIO.Names[ptr]; ok && bytes.Equal(itemName, name) { - found = true - } - return !found - }) - if found { - return - } - - // OK, we need to insert it - - if o.hasAugment(wantKey) { - return - } - wants := make(containers.Set[btrfsvol.LogicalAddr]) - o.rebuilt.Tree(ctx, treeID).PotentialItems(ctx).Subrange( - func(key btrfsprim.Key, _ keyio.ItemPtr) int { - key.Offset = 0 - return tgt.Compare(key) - }, - func(_ btrfsprim.Key, ptr keyio.ItemPtr) bool { - if itemName, ok := o.keyIO.Names[ptr]; ok && bytes.Equal(itemName, name) { - wants.InsertFrom(o.rebuilt.Tree(ctx, treeID).LeafToRoots(ctx, ptr.Node)) - } - return true - }) - o.wantAugment(ctx, wantKey, wants) -} - -func (o *rebuilder) _walkRange( - ctx context.Context, - items *containers.SortedMap[btrfsprim.Key, keyio.ItemPtr], - treeID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, - beg, end uint64, - fn func(key btrfsprim.Key, ptr keyio.ItemPtr, beg, end uint64), -) { - min := btrfsprim.Key{ - ObjectID: objID, - ItemType: typ, - Offset: 0, // *NOT* `beg` - } - max := btrfsprim.Key{ - ObjectID: objID, - ItemType: typ, - Offset: end - 1, - } - items.Subrange( - func(runKey btrfsprim.Key, _ keyio.ItemPtr) int { - switch { - case min.Compare(runKey) < 0: - return 1 - case max.Compare(runKey) > 0: - return -1 - default: - return 0 - } - }, - func(runKey btrfsprim.Key, runPtr keyio.ItemPtr) bool { - runSizeAndErr, ok := o.keyIO.Sizes[runPtr] - if !ok { - panic(fmt.Errorf("should not happen: %v (%v) did not have a size recorded", - runPtr, keyAndTree{TreeID: treeID, Key: runKey})) - } - if runSizeAndErr.Err != nil { - o.fsErr(ctx, fmt.Errorf("get size: %v (%v): %w", - runPtr, keyAndTree{TreeID: treeID, Key: runKey}, - runSizeAndErr.Err)) - return true - } - runSize := runSizeAndErr.Size - if runSize == 0 { - return true - } - runBeg := runKey.Offset - runEnd := runBeg + runSize - if runEnd <= beg { - return true - } - - fn(runKey, runPtr, runBeg, runEnd) - return true - }) -} - -type gap struct { - // range is [Beg,End) - Beg, End uint64 -} - -// Compare implements containers.Ordered. -func (a gap) Compare(b gap) int { - return containers.NativeCompare(a.Beg, b.Beg) -} - -func (o *rebuilder) _wantRange( - ctx context.Context, reason string, - treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, - beg, end uint64, -) { - wantKey := WantWithTree{ - TreeID: treeID, - Key: Want{ - ObjectID: objID, - ItemType: typ, - OffsetType: offsetAny, - }, - } - ctx = withWant(ctx, logFieldItemWant, reason, wantKey) - wantKey.Key.OffsetType = offsetRange - - if o.rebuilt.Tree(ctx, treeID) == nil { - o.enqueueRetry(treeID) - return - } - - // Step 1: Build a listing of the gaps. - // - // Start with a gap of the whole range, then subtract each run - // from it. - gaps := new(containers.RBTree[gap]) - gaps.Insert(gap{ - Beg: beg, - End: end, - }) - o._walkRange( - ctx, - o.rebuilt.Tree(ctx, treeID).Items(ctx), - treeID, objID, typ, beg, end, - func(runKey btrfsprim.Key, _ keyio.ItemPtr, runBeg, runEnd uint64) { - var overlappingGaps []*containers.RBNode[gap] - gaps.Subrange( - func(gap gap) int { - switch { - case gap.End <= runBeg: - return 1 - case runEnd <= gap.Beg: - return -1 - default: - return 0 - } - }, - func(node *containers.RBNode[gap]) bool { - overlappingGaps = append(overlappingGaps, node) - return true - }) - if len(overlappingGaps) == 0 { - return - } - gapsBeg := overlappingGaps[0].Value.Beg - gapsEnd := overlappingGaps[len(overlappingGaps)-1].Value.End - for _, gap := range overlappingGaps { - gaps.Delete(gap) - } - if gapsBeg < runBeg { - gaps.Insert(gap{ - Beg: gapsBeg, - End: runBeg, - }) - } - if gapsEnd > runEnd { - gaps.Insert(gap{ - Beg: runEnd, - End: gapsEnd, - }) - } - }) - - // Step 2: Fill each gap. - if gaps.Len() == 0 { - return - } - potentialItems := o.rebuilt.Tree(ctx, treeID).PotentialItems(ctx) - gaps.Range(func(rbNode *containers.RBNode[gap]) bool { - gap := rbNode.Value - last := gap.Beg - o._walkRange( - ctx, - potentialItems, - treeID, objID, typ, gap.Beg, gap.End, - func(k btrfsprim.Key, v keyio.ItemPtr, runBeg, runEnd uint64) { - // TODO: This is dumb and greedy. - if last < runBeg { - // log an error - wantKey.Key.OffsetLow = last - wantKey.Key.OffsetHigh = runBeg - wantCtx := withWant(ctx, logFieldItemWant, reason, wantKey) - o.wantAugment(wantCtx, wantKey, nil) - } - wantKey.Key.OffsetLow = gap.Beg - wantKey.Key.OffsetHigh = gap.End - wantCtx := withWant(ctx, logFieldItemWant, reason, wantKey) - o.wantAugment(wantCtx, wantKey, o.rebuilt.Tree(wantCtx, treeID).LeafToRoots(wantCtx, v.Node)) - last = runEnd - }) - if last < gap.End { - // log an error - wantKey.Key.OffsetLow = last - wantKey.Key.OffsetHigh = gap.End - wantCtx := withWant(ctx, logFieldItemWant, reason, wantKey) - o.wantAugment(wantCtx, wantKey, nil) - } - return true - }) -} - -// func implements rebuildCallbacks. -// -// interval is [beg, end) -func (o *rebuilder) wantCSum(ctx context.Context, reason string, inodeTree, inode btrfsprim.ObjID, beg, end btrfsvol.LogicalAddr) { - inodeWant := WantWithTree{ - TreeID: inodeTree, - Key: Want{ - ObjectID: inode, - ItemType: btrfsitem.INODE_ITEM_KEY, - OffsetType: offsetExact, - OffsetLow: 0, - }, - } - inodeCtx := withWant(ctx, logFieldItemWant, reason, inodeWant) - if !o._wantOff(inodeCtx, inodeWant) { - o.enqueueRetry(inodeTree) - return - } - inodePtr, ok := o.rebuilt.Tree(inodeCtx, inodeTree).Items(inodeCtx).Load(inodeWant.Key.Key()) - if !ok { - panic(fmt.Errorf("should not happen: could not load key: %v", inodeWant)) - } - inodeFlags, ok := o.keyIO.Flags[inodePtr] - if !ok { - panic(fmt.Errorf("should not happen: INODE_ITEM did not have flags recorded")) - } - if inodeFlags.Err != nil { - o.fsErr(inodeCtx, inodeFlags.Err) - return - } - - if inodeFlags.NoDataSum { - return - } - - o._wantRange( - ctx, reason, - btrfsprim.CSUM_TREE_OBJECTID, btrfsprim.EXTENT_CSUM_OBJECTID, btrfsprim.EXTENT_CSUM_KEY, - uint64(roundDown(beg, btrfssum.BlockSize)), uint64(roundUp(end, btrfssum.BlockSize))) -} - -// wantFileExt implements rebuildCallbacks. -func (o *rebuilder) wantFileExt(ctx context.Context, reason string, treeID btrfsprim.ObjID, ino btrfsprim.ObjID, size int64) { - o._wantRange( - ctx, reason, - treeID, ino, btrfsprim.EXTENT_DATA_KEY, - 0, uint64(size)) -} diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_wanttyp.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_wanttyp.go deleted file mode 100644 index 2b471fe..0000000 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_wanttyp.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com> -// -// SPDX-License-Identifier: GPL-2.0-or-later - -package rebuildnodes - -import ( - "context" - "fmt" - - "github.com/datawire/dlib/dlog" - - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" - "git.lukeshu.com/btrfs-progs-ng/lib/containers" -) - -type wantOffsetType int8 - -const ( - offsetAny = wantOffsetType(iota) - offsetExact - offsetRange - offsetName -) - -type Want struct { - ObjectID btrfsprim.ObjID - ItemType btrfsprim.ItemType - OffsetType wantOffsetType - OffsetLow uint64 - OffsetHigh uint64 - OffsetName string -} - -func (a Want) Compare(b Want) int { - if d := containers.NativeCompare(a.ObjectID, b.ObjectID); d != 0 { - return d - } - if d := containers.NativeCompare(a.ItemType, b.ItemType); d != 0 { - return d - } - if d := containers.NativeCompare(a.OffsetType, b.OffsetType); d != 0 { - return d - } - if d := containers.NativeCompare(a.OffsetLow, b.OffsetLow); d != 0 { - return d - } - if d := containers.NativeCompare(a.OffsetHigh, b.OffsetHigh); d != 0 { - return d - } - if d := containers.NativeCompare(a.OffsetName, b.OffsetName); d != 0 { - return d - } - return 0 -} - -func (o Want) Key() btrfsprim.Key { - return btrfsprim.Key{ - ObjectID: o.ObjectID, - ItemType: o.ItemType, - Offset: o.OffsetLow, - } -} - -func wantFromKey(k btrfsprim.Key) Want { - return Want{ - ObjectID: k.ObjectID, - ItemType: k.ItemType, - OffsetType: offsetExact, - OffsetLow: k.Offset, - } -} - -func (o Want) String() string { - switch o.OffsetType { - case offsetAny: - return fmt.Sprintf("{%v %v ?}", o.ObjectID, o.ItemType) - case offsetExact: - return fmt.Sprintf("{%v %v %v}", o.ObjectID, o.ItemType, o.OffsetLow) - case offsetRange: - return fmt.Sprintf("{%v %v %v-%v}", o.ObjectID, o.ItemType, o.OffsetLow, o.OffsetHigh) - case offsetName: - return fmt.Sprintf("{%v %v name=%q}", o.ObjectID, o.ItemType, o.OffsetName) - default: - panic(fmt.Errorf("should not happen: OffsetType=%#v", o.OffsetType)) - } -} - -type WantWithTree struct { - TreeID btrfsprim.ObjID - Key Want -} - -func (o WantWithTree) String() string { - return fmt.Sprintf("tree=%v key=%v", o.TreeID, o.Key) -} - -const ( - logFieldItemWant = "btrfsinspect.rebuild-nodes.rebuild.want" - logFieldTreeWant = "btrfsinspect.rebuild-nodes.rebuild.add-tree.want" -) - -func withWant(ctx context.Context, logField, reason string, wantKey WantWithTree) context.Context { - ctx = dlog.WithField(ctx, logField+".reason", reason) - ctx = dlog.WithField(ctx, logField+".key", wantKey) - return ctx -} diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go deleted file mode 100644 index 17949ab..0000000 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com> -// -// SPDX-License-Identifier: GPL-2.0-or-later - -package rebuildnodes - -import ( - "context" - "time" - - "github.com/datawire/dlib/dlog" - - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio" - "git.lukeshu.com/btrfs-progs-ng/lib/containers" - "git.lukeshu.com/btrfs-progs-ng/lib/maps" - "git.lukeshu.com/btrfs-progs-ng/lib/textui" -) - -func ScanDevices(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect.ScanDevicesResult) (btrfstree.Superblock, graph.Graph, *keyio.Handle, error) { - dlog.Info(ctx, "Reading superblock...") - sb, err := fs.Superblock() - if err != nil { - return btrfstree.Superblock{}, graph.Graph{}, nil, err - } - - dlog.Infof(ctx, "Reading node data from FS...") - - var stats textui.Portion[int] - stats.D = countNodes(scanResults) - progressWriter := textui.NewProgress[textui.Portion[int]]( - dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.read.substep", "read-nodes"), - dlog.LogLevelInfo, textui.Tunable(1*time.Second)) - - nodeGraph := graph.New(*sb) - keyIO := keyio.NewHandle(fs, *sb) - - progressWriter.Set(stats) - for _, devResults := range scanResults { - for _, laddr := range maps.SortedKeys(devResults.FoundNodes) { - if err := ctx.Err(); err != nil { - return btrfstree.Superblock{}, graph.Graph{}, nil, err - } - nodeRef, err := btrfstree.ReadNode[btrfsvol.LogicalAddr](fs, *sb, laddr, btrfstree.NodeExpectations{ - LAddr: containers.Optional[btrfsvol.LogicalAddr]{OK: true, Val: laddr}, - }) - if err != nil { - btrfstree.FreeNodeRef(nodeRef) - return btrfstree.Superblock{}, graph.Graph{}, nil, err - } - - nodeGraph.InsertNode(nodeRef) - keyIO.InsertNode(nodeRef) - - btrfstree.FreeNodeRef(nodeRef) - - stats.N++ - progressWriter.Set(stats) - } - } - if stats.N != stats.D { - panic("should not happen") - } - progressWriter.Done() - dlog.Info(ctx, "... done reading node data") - - if err := nodeGraph.FinalCheck(ctx, fs, *sb); err != nil { - return btrfstree.Superblock{}, graph.Graph{}, nil, err - } - keyIO.SetGraph(*nodeGraph) - - return *sb, *nodeGraph, keyIO, nil -} diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/util.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/util.go deleted file mode 100644 index 9d91f23..0000000 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/util.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com> -// -// SPDX-License-Identifier: GPL-2.0-or-later - -package rebuildnodes - -import ( - "golang.org/x/exp/constraints" - - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect" -) - -func countNodes(nodeScanResults btrfsinspect.ScanDevicesResult) int { - var cnt int - for _, devResults := range nodeScanResults { - cnt += len(devResults.FoundNodes) - } - return cnt -} - -func roundDown[T constraints.Integer](n, d T) T { - return (n / d) * d -} - -func roundUp[T constraints.Integer](n, d T) T { - return ((n + d - 1) / d) * d -} - -func discardOK[T any](val T, _ bool) T { - return val -} |