summaryrefslogtreecommitdiff
path: root/cmd
diff options
context:
space:
mode:
authorLuke Shumaker <lukeshu@lukeshu.com>2022-07-06 01:48:48 -0600
committerLuke Shumaker <lukeshu@lukeshu.com>2022-07-08 00:15:58 -0600
commit5647659f27f8aa18bc10ca4742f8856162325d5c (patch)
tree249fd2d4ea5e0e8bf6dab24d305a4daa5bf0cc6e /cmd
parentb392ad64a8fd04d20b35ad21d2d4ea3ff2778e3f (diff)
file reading!
Diffstat (limited to 'cmd')
-rw-r--r--cmd/btrfs-mount/subvol.go110
-rw-r--r--cmd/btrfs-mount/subvol_fuse.go18
2 files changed, 124 insertions, 4 deletions
diff --git a/cmd/btrfs-mount/subvol.go b/cmd/btrfs-mount/subvol.go
index 4e741ad..1d5e9db 100644
--- a/cmd/btrfs-mount/subvol.go
+++ b/cmd/btrfs-mount/subvol.go
@@ -3,7 +3,9 @@ package main
import (
"context"
"fmt"
+ "io"
"reflect"
+ "sort"
"sync"
"github.com/datawire/dlib/dcontext"
@@ -35,9 +37,15 @@ type dir struct {
ChildrenByIndex map[uint64]btrfsitem.DirEntry
}
+type fileExtent struct {
+ OffsetWithinFile int64
+ btrfsitem.FileExtent
+}
+
type file struct {
fullInode
- // TODO
+ Extents []fileExtent
+ FS *btrfs.FS
}
type Subvolume struct {
@@ -296,7 +304,8 @@ func (sv *Subvolume) loadFile(inode btrfs.ObjID) (*file, error) {
return
}
val.fullInode = *fullInode
- // TODO
+ val.FS = sv.FS
+ val.populate()
return
})
if val.Inode == 0 {
@@ -304,3 +313,100 @@ func (sv *Subvolume) loadFile(inode btrfs.ObjID) (*file, error) {
}
return val, nil
}
+
+func (ret *file) populate() {
+ for _, item := range ret.OtherItems {
+ switch item.Head.Key.ItemType {
+ case btrfsitem.INODE_REF_KEY:
+ // TODO
+ case btrfsitem.EXTENT_DATA_KEY:
+ ret.Extents = append(ret.Extents, fileExtent{
+ OffsetWithinFile: int64(item.Head.Key.Offset),
+ FileExtent: item.Body.(btrfsitem.FileExtent),
+ })
+ default:
+ panic(fmt.Errorf("TODO: handle item type %v", item.Head.Key.ItemType))
+ }
+ }
+
+ // These should already be sorted, because of the nature of
+ // the btree; but this is a recovery tool for corrupt
+ // filesystems, so go ahead and ensure that it's sorted.
+ sort.Slice(ret.Extents, func(i, j int) bool {
+ return ret.Extents[i].OffsetWithinFile < ret.Extents[j].OffsetWithinFile
+ })
+
+ pos := int64(0)
+ for _, extent := range ret.Extents {
+ if extent.OffsetWithinFile != pos {
+ if extent.OffsetWithinFile > pos {
+ ret.Errs = append(ret.Errs, fmt.Errorf("extent gap from %v to %v",
+ pos, extent.OffsetWithinFile))
+ } else {
+ ret.Errs = append(ret.Errs, fmt.Errorf("extent overlap from %v to %v",
+ extent.OffsetWithinFile, pos))
+ }
+ }
+ size, err := extent.Size()
+ if err != nil {
+ ret.Errs = append(ret.Errs, fmt.Errorf("extent %v: %w", extent.OffsetWithinFile, err))
+ }
+ pos += size
+ }
+ if ret.InodeItem != nil && pos != ret.InodeItem.Size {
+ if ret.InodeItem.Size > pos {
+ ret.Errs = append(ret.Errs, fmt.Errorf("extent gap from %v to %v",
+ pos, ret.InodeItem.Size))
+ } else {
+ ret.Errs = append(ret.Errs, fmt.Errorf("extent mapped past end of file from %v to %v",
+ ret.InodeItem.Size, pos))
+ }
+ }
+}
+
+func (file *file) ReadAt(dat []byte, off int64) (int, error) {
+ // These stateles maybe-short-reads each do an O(n) extent
+ // lookup, so reading a file is O(n^2), but we expect n to be
+ // small, so whatev. Turn file.Extents it in to an rbtree if
+ // it becomes a problem.
+ done := 0
+ for done < len(dat) {
+ n, err := file.maybeShortReadAt(dat[done:], off+int64(done))
+ done += n
+ if err != nil {
+ return done, err
+ }
+ }
+ return done, nil
+}
+
+func (file *file) maybeShortReadAt(dat []byte, off int64) (int, error) {
+ for _, extent := range file.Extents {
+ extBeg := extent.OffsetWithinFile
+ if extBeg > off {
+ break
+ }
+ extLen, err := extent.Size()
+ if err != nil {
+ continue
+ }
+ extEnd := extBeg + extLen
+ if extEnd <= off {
+ continue
+ }
+ offsetWithinExt := off - extent.OffsetWithinFile
+ readSize := util.Min(int64(len(dat)), extLen-offsetWithinExt)
+ switch extent.Type {
+ case btrfsitem.FILE_EXTENT_INLINE:
+ return copy(dat, extent.BodyInline[offsetWithinExt:offsetWithinExt+readSize]), nil
+ case btrfsitem.FILE_EXTENT_REG, btrfsitem.FILE_EXTENT_PREALLOC:
+ return file.FS.ReadAt(dat[:readSize],
+ extent.BodyExtent.DiskByteNr.
+ Add(extent.BodyExtent.Offset).
+ Add(btrfsvol.AddrDelta(offsetWithinExt)))
+ }
+ }
+ return 0, fmt.Errorf("read: could not map position %v", off)
+}
+
+var _ io.ReaderAt = (*file)(nil)
diff --git a/cmd/btrfs-mount/subvol_fuse.go b/cmd/btrfs-mount/subvol_fuse.go
index 7500e0b..6f3f267 100644
--- a/cmd/btrfs-mount/subvol_fuse.go
+++ b/cmd/btrfs-mount/subvol_fuse.go
@@ -197,11 +197,25 @@ func (sv *Subvolume) OpenFile(_ context.Context, op *fuseops.OpenFileOp) error {
return nil
}
func (sv *Subvolume) ReadFile(_ context.Context, op *fuseops.ReadFileOp) error {
- _, ok := sv.fileHandles.Load(op.Handle)
+ state, ok := sv.fileHandles.Load(op.Handle)
if !ok {
return syscall.EBADF
}
- return syscall.ENOSYS
+
+ size := op.Size
+ var dat []byte
+ if op.Dst != nil {
+ size = util.Min(int64(len(op.Dst)), size)
+ dat = op.Dst
+ } else {
+ dat = make([]byte, op.Size)
+ op.Data = [][]byte{dat}
+ }
+
+ var err error
+ op.BytesRead, err = state.File.ReadAt(dat, op.Offset)
+
+ return err
}
func (sv *Subvolume) ReleaseFileHandle(_ context.Context, op *fuseops.ReleaseFileHandleOp) error {
_, ok := sv.fileHandles.LoadAndDelete(op.Handle)