summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--cmd/btrfs-rec/inspect_dbgsums.go18
-rw-r--r--cmd/btrfs-rec/inspect_rebuildmappings.go62
-rw-r--r--cmd/btrfs-rec/inspect_scandevices.go48
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildmappings.go130
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go164
5 files changed, 221 insertions, 201 deletions
diff --git a/cmd/btrfs-rec/inspect_dbgsums.go b/cmd/btrfs-rec/inspect_dbgsums.go
index 8e230f4..b46a305 100644
--- a/cmd/btrfs-rec/inspect_dbgsums.go
+++ b/cmd/btrfs-rec/inspect_dbgsums.go
@@ -18,7 +18,6 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect"
"git.lukeshu.com/btrfs-progs-ng/lib/maps"
)
@@ -37,19 +36,10 @@ func init() {
ctx := cmd.Context()
dlog.Infof(ctx, "Reading %q...", args[0])
- scanResults, err := func() (btrfsinspect.ScanDevicesResult, error) {
- fh, err := os.Open(args[0])
- if err != nil {
- return nil, err
- }
- var scanResults btrfsinspect.ScanDevicesResult
- buf := bufio.NewReader(fh)
- if err := lowmemjson.DecodeThenEOF(buf, &scanResults); err != nil {
- return nil, err
- }
- _ = fh.Close()
- return scanResults, nil
- }()
+ scanResults, err := readScanResults(args[0])
+ if err != nil {
+ return err
+ }
dlog.Infof(ctx, "... done reading %q", args[0])
dlog.Info(ctx, "Mapping the logical address space...")
diff --git a/cmd/btrfs-rec/inspect_rebuildmappings.go b/cmd/btrfs-rec/inspect_rebuildmappings.go
index 5b252d5..ce90139 100644
--- a/cmd/btrfs-rec/inspect_rebuildmappings.go
+++ b/cmd/btrfs-rec/inspect_rebuildmappings.go
@@ -5,19 +5,17 @@
package main
import (
- "encoding/json"
- "fmt"
+ "bufio"
"io"
"os"
+ "git.lukeshu.com/go/lowmemjson"
"github.com/datawire/dlib/dlog"
"github.com/datawire/ocibuild/pkg/cliutil"
"github.com/spf13/cobra"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect"
- "git.lukeshu.com/btrfs-progs-ng/lib/maps"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildmappings"
)
func init() {
@@ -38,25 +36,15 @@ func init() {
RunE: func(fs *btrfs.FS, cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
- scanResultsBytes, err := os.ReadFile(args[0])
+ dlog.Infof(ctx, "Reading %q...", args[0])
+ scanResults, err := readScanResults(args[0])
if err != nil {
return err
}
- var scanResults map[btrfsvol.DeviceID]btrfsinspect.ScanOneDeviceResult
- if err := json.Unmarshal(scanResultsBytes, &scanResults); err != nil {
- return err
- }
+ dlog.Infof(ctx, "... done reading %q", args[0])
- devices := fs.LV.PhysicalVolumes()
- for _, devID := range maps.SortedKeys(scanResults) {
- dev, ok := devices[devID]
- if !ok {
- return fmt.Errorf("device ID %v mentioned in %q is not part of the filesystem",
- devID, args[0])
- }
- dlog.Infof(ctx, "Rebuilding mappings from results on device %v...",
- dev.Name())
- scanResults[devID].AddToLV(ctx, fs, dev)
+ if err := rebuildmappings.RebuildMappings(ctx, fs, scanResults); err != nil {
+ return err
}
dlog.Infof(ctx, "Writing reconstructed mappings to stdout...")
@@ -69,26 +57,18 @@ func init() {
})
}
-func writeMappingsJSON(w io.Writer, fs *btrfs.FS) error {
- mappings := fs.LV.Mappings()
- if _, err := io.WriteString(w, "[\n"); err != nil {
- return err
- }
- for i, mapping := range mappings {
- suffix := ","
- if i == len(mappings)-1 {
- suffix = ""
+func writeMappingsJSON(w io.Writer, fs *btrfs.FS) (err error) {
+ buffer := bufio.NewWriter(w)
+ defer func() {
+ if _err := buffer.Flush(); err == nil && _err != nil {
+ err = _err
}
- bs, err := json.Marshal(mapping)
- if err != nil {
- return err
- }
- if _, err := fmt.Printf(" %s%s\n", bs, suffix); err != nil {
- return err
- }
- }
- if _, err := io.WriteString(w, "]\n"); err != nil {
- return err
- }
- return nil
+ }()
+ return lowmemjson.Encode(&lowmemjson.ReEncoder{
+ Out: buffer,
+
+ Indent: "\t",
+ ForceTrailingNewlines: true,
+ CompactIfUnder: 120,
+ }, fs.LV.Mappings())
}
diff --git a/cmd/btrfs-rec/inspect_scandevices.go b/cmd/btrfs-rec/inspect_scandevices.go
index a7a7fb2..37bafe4 100644
--- a/cmd/btrfs-rec/inspect_scandevices.go
+++ b/cmd/btrfs-rec/inspect_scandevices.go
@@ -6,6 +6,7 @@ package main
import (
"bufio"
+ "io"
"os"
"git.lukeshu.com/go/lowmemjson"
@@ -24,11 +25,6 @@ func init() {
Args: cliutil.WrapPositionalArgs(cobra.NoArgs),
},
RunE: func(fs *btrfs.FS, cmd *cobra.Command, _ []string) (err error) {
- maybeSetErr := func(_err error) {
- if err == nil && _err != nil {
- err = _err
- }
- }
ctx := cmd.Context()
results, err := btrfsinspect.ScanDevices(ctx, fs)
@@ -37,17 +33,37 @@ func init() {
}
dlog.Info(ctx, "Writing scan results to stdout...")
- buffer := bufio.NewWriter(os.Stdout)
- defer func() {
- maybeSetErr(buffer.Flush())
- }()
- return lowmemjson.Encode(&lowmemjson.ReEncoder{
- Out: buffer,
-
- Indent: "\t",
- ForceTrailingNewlines: true,
- CompactIfUnder: 16,
- }, results)
+ return writeScanResults(os.Stdout, results)
},
})
}
+
+func writeScanResults(w io.Writer, results btrfsinspect.ScanDevicesResult) (err error) {
+ buffer := bufio.NewWriter(w)
+ defer func() {
+ if _err := buffer.Flush(); err == nil && _err != nil {
+ err = _err
+ }
+ }()
+ return lowmemjson.Encode(&lowmemjson.ReEncoder{
+ Out: buffer,
+
+ Indent: "\t",
+ ForceTrailingNewlines: true,
+ CompactIfUnder: 16,
+ }, results)
+}
+
+func readScanResults(filename string) (btrfsinspect.ScanDevicesResult, error) {
+ fh, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ var scanResults btrfsinspect.ScanDevicesResult
+ buf := bufio.NewReader(fh)
+ if err := lowmemjson.DecodeThenEOF(buf, &scanResults); err != nil {
+ return nil, err
+ }
+ _ = fh.Close()
+ return scanResults, nil
+}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildmappings.go b/lib/btrfsprogs/btrfsinspect/rebuildmappings.go
deleted file mode 100644
index bbfd6b7..0000000
--- a/lib/btrfsprogs/btrfsinspect/rebuildmappings.go
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
-//
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-package btrfsinspect
-
-import (
- "context"
- "sort"
-
- "github.com/datawire/dlib/dlog"
-
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
- "git.lukeshu.com/btrfs-progs-ng/lib/containers"
- "git.lukeshu.com/btrfs-progs-ng/lib/maps"
-)
-
-func (found ScanOneDeviceResult) AddToLV(ctx context.Context, fs *btrfs.FS, dev *btrfs.Device) {
- sb, _ := dev.Superblock()
-
- total := len(found.FoundChunks) + len(found.FoundDevExtents)
- for _, paddrs := range found.FoundNodes {
- total += len(paddrs)
- }
- lastProgress := -1
- done := 0
- printProgress := func() {
- pct := int(100 * float64(done) / float64(total))
- if pct != lastProgress || done == total {
- dlog.Infof(ctx, "... dev[%q] added %v%% of the mappings (%v/%v=>%v)",
- dev.Name(), pct, done, total, len(fs.LV.Mappings()))
- lastProgress = pct
- }
- }
- printProgress()
-
- for _, chunk := range found.FoundChunks {
- for _, mapping := range chunk.Chunk.Mappings(chunk.Key) {
- if err := fs.LV.AddMapping(mapping); err != nil {
- dlog.Errorf(ctx, "... dev[%q] error: adding chunk: %v",
- dev.Name(), err)
- }
- done++
- printProgress()
- }
- }
-
- for _, ext := range found.FoundDevExtents {
- if err := fs.LV.AddMapping(ext.DevExt.Mapping(ext.Key)); err != nil {
- dlog.Errorf(ctx, "... dev[%q] error: adding devext: %v",
- dev.Name(), err)
- }
- done++
- printProgress()
- }
-
- // Do the nodes last to avoid bloating the mappings table too
- // much. (Because nodes are numerous and small, while the
- // others are few and large; so it is likely that many of the
- // nodes will be subsumed by other things.)
- //
- // Sort them so that progress numbers are predictable.
- for _, laddr := range maps.SortedKeys(found.FoundNodes) {
- for _, paddr := range found.FoundNodes[laddr] {
- if err := fs.LV.AddMapping(btrfsvol.Mapping{
- LAddr: laddr,
- PAddr: btrfsvol.QualifiedPhysicalAddr{
- Dev: sb.DevItem.DevID,
- Addr: paddr,
- },
- Size: btrfsvol.AddrDelta(sb.NodeSize),
- SizeLocked: false,
- }); err != nil {
- dlog.Errorf(ctx, "... dev[%q] error: adding node ident: %v",
- dev.Name(), err)
- }
- done++
- printProgress()
- }
- }
-
- // Use block groups to add missing flags (and as a hint to
- // combine node entries).
- //
- // First dedup them, because they change for allocations and
- // CoW means that they'll bounce around a lot, so you likely
- // have oodles of duplicates?
- type blockgroup struct {
- LAddr btrfsvol.LogicalAddr
- Size btrfsvol.AddrDelta
- Flags btrfsvol.BlockGroupFlags
- }
- bgsSet := make(map[blockgroup]struct{})
- for _, bg := range found.FoundBlockGroups {
- bgsSet[blockgroup{
- LAddr: btrfsvol.LogicalAddr(bg.Key.ObjectID),
- Size: btrfsvol.AddrDelta(bg.Key.Offset),
- Flags: bg.BG.Flags,
- }] = struct{}{}
- }
- bgsOrdered := maps.Keys(bgsSet)
- sort.Slice(bgsOrdered, func(i, j int) bool {
- return bgsOrdered[i].LAddr < bgsOrdered[j].LAddr
- })
- for _, bg := range bgsOrdered {
- otherLAddr, otherPAddr := fs.LV.ResolveAny(bg.LAddr, bg.Size)
- if otherLAddr < 0 || otherPAddr.Addr < 0 {
- dlog.Errorf(ctx, "... dev[%q] error: could not pair blockgroup laddr=%v (size=%v flags=%v) with a mapping",
- dev.Name(), bg.LAddr, bg.Size, bg.Flags)
- continue
- }
-
- offsetWithinChunk := otherLAddr.Sub(bg.LAddr)
- mapping := btrfsvol.Mapping{
- LAddr: bg.LAddr,
- PAddr: otherPAddr.Add(-offsetWithinChunk),
- Size: bg.Size,
- SizeLocked: true,
- Flags: containers.Optional[btrfsvol.BlockGroupFlags]{
- OK: true,
- Val: bg.Flags,
- },
- }
- if err := fs.LV.AddMapping(mapping); err != nil {
- dlog.Errorf(ctx, "... dev[%q] error: adding flags from blockgroup: %v",
- dev.Name(), err)
- }
- }
-}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go b/lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go
new file mode 100644
index 0000000..ce3b7ed
--- /dev/null
+++ b/lib/btrfsprogs/btrfsinspect/rebuildmappings/rebuildmappings.go
@@ -0,0 +1,164 @@
+// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+//
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+package rebuildmappings
+
+import (
+ "context"
+ "fmt"
+ "sort"
+
+ "github.com/datawire/dlib/dlog"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect"
+ "git.lukeshu.com/btrfs-progs-ng/lib/containers"
+ "git.lukeshu.com/btrfs-progs-ng/lib/maps"
+)
+
+func getNodeSize(fs *btrfs.FS) (btrfsvol.AddrDelta, error) {
+ sb, err := fs.Superblock()
+ if err != nil {
+ return 0, err
+ }
+ return btrfsvol.AddrDelta(sb.NodeSize), nil
+}
+
+type blockgroup struct {
+ LAddr btrfsvol.LogicalAddr
+ Size btrfsvol.AddrDelta
+ Flags btrfsvol.BlockGroupFlags
+}
+
+func dedupBlockGroups(scanResults btrfsinspect.ScanDevicesResult) []blockgroup {
+ bgsSet := make(map[blockgroup]struct{})
+ for _, devResults := range scanResults {
+ for _, bg := range devResults.FoundBlockGroups {
+ bgsSet[blockgroup{
+ LAddr: btrfsvol.LogicalAddr(bg.Key.ObjectID),
+ Size: btrfsvol.AddrDelta(bg.Key.Offset),
+ Flags: bg.BG.Flags,
+ }] = struct{}{}
+ }
+ }
+ bgsOrdered := maps.Keys(bgsSet)
+ sort.Slice(bgsOrdered, func(i, j int) bool {
+ return bgsOrdered[i].LAddr < bgsOrdered[j].LAddr
+ })
+ return bgsOrdered
+}
+
+func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect.ScanDevicesResult) error {
+ nodeSize, err := getNodeSize(fs)
+ if err != nil {
+ return err
+ }
+
+ var numChunks, numDevExts, numBlockGroups, numNodes int
+ devIDs := maps.SortedKeys(scanResults)
+ devices := fs.LV.PhysicalVolumes()
+ for _, devID := range devIDs {
+ if _, ok := devices[devID]; !ok {
+ return fmt.Errorf("device ID %v mentioned in scan results is not part of the filesystem", devID)
+ }
+ devResults := scanResults[devID]
+ numChunks += len(devResults.FoundChunks)
+ numDevExts += len(devResults.FoundDevExtents)
+ numBlockGroups += len(devResults.FoundBlockGroups)
+ for _, paddrs := range devResults.FoundNodes {
+ numNodes += len(paddrs)
+ }
+ }
+ dlog.Infof(ctx, "plan: 1/5 process %d chunks", numChunks)
+ dlog.Infof(ctx, "plan: 2/5 process %d device extents", numDevExts)
+ dlog.Infof(ctx, "plan: 3/5 process %d nodes", numNodes)
+ dlog.Infof(ctx, "plan: 4/5 process %d block groups", numBlockGroups)
+ dlog.Infof(ctx, "plan: 5/5 process sums")
+
+ dlog.Infof(ctx, "1/5: Processing %d chunks...", numChunks)
+ for _, devID := range devIDs {
+ devResults := scanResults[devID]
+ for _, chunk := range devResults.FoundChunks {
+ for _, mapping := range chunk.Chunk.Mappings(chunk.Key) {
+ if err := fs.LV.AddMapping(mapping); err != nil {
+ dlog.Errorf(ctx, "... error: adding chunk: %v", err)
+ }
+ }
+ }
+ }
+ dlog.Info(ctx, "... done processing chunks")
+
+ dlog.Infof(ctx, "2/5: Processing %d device extents...", numDevExts)
+ for _, devID := range devIDs {
+ devResults := scanResults[devID]
+ for _, ext := range devResults.FoundDevExtents {
+ if err := fs.LV.AddMapping(ext.DevExt.Mapping(ext.Key)); err != nil {
+ dlog.Errorf(ctx, "... error: adding devext: %v", err)
+ }
+ }
+ }
+ dlog.Info(ctx, "... done processing device extents")
+
+ // Do the nodes "last" to avoid bloating the mappings table
+ // too much. (Because nodes are numerous and small, while the
+ // others are few and large; so it is likely that many of the
+ // nodes will be subsumed by other things.)
+ dlog.Infof(ctx, "3/5: Processing %d nodes...", numNodes)
+ for _, devID := range devIDs {
+ devResults := scanResults[devID]
+ // Sort them so that progress numbers are predictable.
+ for _, laddr := range maps.SortedKeys(devResults.FoundNodes) {
+ for _, paddr := range devResults.FoundNodes[laddr] {
+ if err := fs.LV.AddMapping(btrfsvol.Mapping{
+ LAddr: laddr,
+ PAddr: btrfsvol.QualifiedPhysicalAddr{
+ Dev: devID,
+ Addr: paddr,
+ },
+ Size: nodeSize,
+ SizeLocked: false,
+ }); err != nil {
+ dlog.Errorf(ctx, "... error: adding node ident: %v", err)
+ }
+ }
+ }
+ }
+ dlog.Info(ctx, "... done processing nodes")
+
+ // Use block groups to add missing flags (and as a hint to
+ // combine node entries).
+ dlog.Infof(ctx, "4/5: Processing %d block groups...", numBlockGroups)
+ // First dedup them, because they change for allocations and
+ // CoW means that they'll bounce around a lot, so you likely
+ // have oodles of duplicates?
+ bgsOrdered := dedupBlockGroups(scanResults)
+ for _, bg := range bgsOrdered {
+ otherLAddr, otherPAddr := fs.LV.ResolveAny(bg.LAddr, bg.Size)
+ if otherLAddr < 0 || otherPAddr.Addr < 0 {
+ dlog.Errorf(ctx, "... error: could not pair blockgroup laddr=%v (size=%v flags=%v) with a mapping",
+ bg.LAddr, bg.Size, bg.Flags)
+ continue
+ }
+
+ offsetWithinChunk := otherLAddr.Sub(bg.LAddr)
+ mapping := btrfsvol.Mapping{
+ LAddr: bg.LAddr,
+ PAddr: otherPAddr.Add(-offsetWithinChunk),
+ Size: bg.Size,
+ SizeLocked: true,
+ Flags: containers.Optional[btrfsvol.BlockGroupFlags]{
+ OK: true,
+ Val: bg.Flags,
+ },
+ }
+ if err := fs.LV.AddMapping(mapping); err != nil {
+ dlog.Errorf(ctx, "... error: adding flags from blockgroup: %v", err)
+ }
+ }
+ dlog.Info(ctx, "... done processing block groups")
+
+ dlog.Infof(ctx, "5/5: Processing sums: TODO")
+ return nil
+}