summaryrefslogtreecommitdiff
path: root/cmd/btrfs-mount/subvol.go
diff options
context:
space:
mode:
authorLuke Shumaker <lukeshu@lukeshu.com>2022-07-06 03:05:07 -0600
committerLuke Shumaker <lukeshu@lukeshu.com>2022-07-08 00:15:59 -0600
commitcc09287d752ec6d9c4d4876ab41c4e5859b90f78 (patch)
tree62be91b774959caf88bc160cf5ef6d244c6429c3 /cmd/btrfs-mount/subvol.go
parent5647659f27f8aa18bc10ca4742f8856162325d5c (diff)
Move subvol.go to btrfs/io3_fs.go
Diffstat (limited to 'cmd/btrfs-mount/subvol.go')
-rw-r--r--cmd/btrfs-mount/subvol.go412
1 files changed, 0 insertions, 412 deletions
diff --git a/cmd/btrfs-mount/subvol.go b/cmd/btrfs-mount/subvol.go
deleted file mode 100644
index 1d5e9db..0000000
--- a/cmd/btrfs-mount/subvol.go
+++ /dev/null
@@ -1,412 +0,0 @@
-package main
-
-import (
- "context"
- "fmt"
- "io"
- "reflect"
- "sort"
- "sync"
-
- "github.com/datawire/dlib/dcontext"
- "github.com/datawire/dlib/derror"
- "github.com/datawire/dlib/dlog"
- "github.com/jacobsa/fuse"
- "github.com/jacobsa/fuse/fuseutil"
-
- "lukeshu.com/btrfs-tools/pkg/btrfs"
- "lukeshu.com/btrfs-tools/pkg/btrfs/btrfsitem"
- "lukeshu.com/btrfs-tools/pkg/btrfs/btrfsvol"
- "lukeshu.com/btrfs-tools/pkg/util"
-)
-
-type bareInode struct {
- Inode btrfs.ObjID
- InodeItem *btrfsitem.Inode
- Errs derror.MultiError
-}
-
-type fullInode struct {
- bareInode
- OtherItems []btrfs.Item
-}
-
-type dir struct {
- fullInode
- ChildrenByName map[string]btrfsitem.DirEntry
- ChildrenByIndex map[uint64]btrfsitem.DirEntry
-}
-
-type fileExtent struct {
- OffsetWithinFile int64
- btrfsitem.FileExtent
-}
-
-type file struct {
- fullInode
- Extents []fileExtent
- FS *btrfs.FS
-}
-
-type Subvolume struct {
- FS *btrfs.FS
- DeviceName string
- Mountpoint string
- TreeID btrfs.ObjID
-
- rootOnce sync.Once
- rootVal btrfsitem.Root
- rootErr error
-
- bareInodeCache LRUCache[btrfs.ObjID, *bareInode]
- fullInodeCache LRUCache[btrfs.ObjID, *fullInode]
- dirCache LRUCache[btrfs.ObjID, *dir]
- fileCache LRUCache[btrfs.ObjID, *file]
-
- subvolumeFUSE
-}
-
-func (sv *Subvolume) Run(ctx context.Context) error {
- mount, err := fuse.Mount(
- sv.Mountpoint,
- fuseutil.NewFileSystemServer(sv),
- &fuse.MountConfig{
- OpContext: ctx,
- ErrorLogger: dlog.StdLogger(ctx, dlog.LogLevelError),
- DebugLogger: dlog.StdLogger(ctx, dlog.LogLevelDebug),
-
- FSName: sv.DeviceName,
- Subtype: "btrfs",
-
- ReadOnly: true,
- })
- if err != nil {
- return err
- }
- return mount.Join(dcontext.HardContext(ctx))
-}
-
-func (sv *Subvolume) init() {
- sv.rootOnce.Do(func() {
- sb, err := sv.FS.Superblock()
- if err != nil {
- sv.rootErr = err
- return
- }
-
- root, err := sv.FS.TreeLookup(sb.Data.RootTree, btrfs.Key{
- ObjectID: sv.TreeID,
- ItemType: btrfsitem.ROOT_ITEM_KEY,
- Offset: 0,
- })
- if err != nil {
- sv.rootErr = err
- return
- }
-
- rootBody, ok := root.Body.(btrfsitem.Root)
- if !ok {
- sv.rootErr = fmt.Errorf("FS_TREE_ ROOT_ITEM has malformed body")
- return
- }
-
- sv.rootVal = rootBody
- })
-}
-
-func (sv *Subvolume) getRootInode() (btrfs.ObjID, error) {
- sv.init()
- return sv.rootVal.RootDirID, sv.rootErr
-}
-
-func (sv *Subvolume) getFSTree() (btrfsvol.LogicalAddr, error) {
- sv.init()
- return sv.rootVal.ByteNr, sv.rootErr
-}
-
-func (sv *Subvolume) loadBareInode(inode btrfs.ObjID) (*bareInode, error) {
- val := sv.bareInodeCache.GetOrElse(inode, func() (val *bareInode) {
- val = &bareInode{
- Inode: inode,
- }
- tree, err := sv.getFSTree()
- if err != nil {
- val.Errs = append(val.Errs, err)
- return
- }
- item, err := sv.FS.TreeLookup(tree, btrfs.Key{
- ObjectID: inode,
- ItemType: btrfsitem.INODE_ITEM_KEY,
- Offset: 0,
- })
- if err != nil {
- val.Errs = append(val.Errs, err)
- return
- }
-
- itemBody, ok := item.Body.(btrfsitem.Inode)
- if !ok {
- val.Errs = append(val.Errs, fmt.Errorf("malformed inode"))
- return
- }
- val.InodeItem = &itemBody
-
- return
- })
- if val.InodeItem == nil {
- return nil, val.Errs
- }
- return val, nil
-}
-
-func (sv *Subvolume) loadFullInode(inode btrfs.ObjID) (*fullInode, error) {
- val := sv.fullInodeCache.GetOrElse(inode, func() (val *fullInode) {
- val = &fullInode{
- bareInode: bareInode{
- Inode: inode,
- },
- }
- tree, err := sv.getFSTree()
- if err != nil {
- val.Errs = append(val.Errs, err)
- return
- }
- items, err := sv.FS.TreeSearchAll(tree, func(key btrfs.Key) int {
- return util.CmpUint(inode, key.ObjectID)
- })
- if err != nil {
- val.Errs = append(val.Errs, err)
- if len(items) == 0 {
- return
- }
- }
- for _, item := range items {
- switch item.Head.Key.ItemType {
- case btrfsitem.INODE_ITEM_KEY:
- itemBody := item.Body.(btrfsitem.Inode)
- if val.InodeItem != nil {
- if !reflect.DeepEqual(itemBody, *val.InodeItem) {
- val.Errs = append(val.Errs, fmt.Errorf("multiple inodes"))
- }
- continue
- }
- val.InodeItem = &itemBody
- default:
- val.OtherItems = append(val.OtherItems, item)
- }
- }
- return
- })
- if val.InodeItem == nil && val.OtherItems == nil {
- return nil, val.Errs
- }
- return val, nil
-}
-
-func (sv *Subvolume) loadDir(inode btrfs.ObjID) (*dir, error) {
- val := sv.dirCache.GetOrElse(inode, func() (val *dir) {
- val = new(dir)
- fullInode, err := sv.loadFullInode(inode)
- if err != nil {
- val.Errs = append(val.Errs, err)
- return
- }
- val.fullInode = *fullInode
- val.populate()
- return
- })
- if val.Inode == 0 {
- return nil, val.Errs
- }
- return val, nil
-}
-
-func (ret *dir) populate() {
- ret.ChildrenByName = make(map[string]btrfsitem.DirEntry)
- ret.ChildrenByIndex = make(map[uint64]btrfsitem.DirEntry)
- for _, item := range ret.OtherItems {
- switch item.Head.Key.ItemType {
- case btrfsitem.INODE_REF_KEY:
- // TODO
- case btrfsitem.DIR_ITEM_KEY:
- body := item.Body.(btrfsitem.DirEntries)
- if len(body) != 1 {
- ret.Errs = append(ret.Errs, fmt.Errorf("multiple direntries in single DIR_ITEM?"))
- continue
- }
- for _, entry := range body {
- namehash := btrfsitem.NameHash(entry.Name)
- if namehash != item.Head.Key.Offset {
- ret.Errs = append(ret.Errs, fmt.Errorf("direntry crc32c mismatch: key=%#x crc32c(%q)=%#x",
- item.Head.Key.Offset, entry.Name, namehash))
- continue
- }
- if other, exists := ret.ChildrenByName[string(entry.Name)]; exists {
- if !reflect.DeepEqual(entry, other) {
- ret.Errs = append(ret.Errs, fmt.Errorf("multiple instances of direntry name %q", entry.Name))
- }
- continue
- }
- ret.ChildrenByName[string(entry.Name)] = entry
- }
- case btrfsitem.DIR_INDEX_KEY:
- index := item.Head.Key.Offset
- body := item.Body.(btrfsitem.DirEntries)
- if len(body) != 1 {
- ret.Errs = append(ret.Errs, fmt.Errorf("multiple direntries in single DIR_INDEX?"))
- continue
- }
- for _, entry := range body {
- if other, exists := ret.ChildrenByIndex[index]; exists {
- if !reflect.DeepEqual(entry, other) {
- ret.Errs = append(ret.Errs, fmt.Errorf("multiple instances of direntry index %v", index))
- }
- continue
- }
- ret.ChildrenByIndex[index] = entry
- }
- //case btrfsitem.XATTR_ITEM_KEY:
- default:
- panic(fmt.Errorf("TODO: handle item type %v", item.Head.Key.ItemType))
- }
- }
- entriesWithIndexes := make(map[string]struct{})
- nextIndex := uint64(2)
- for index, entry := range ret.ChildrenByIndex {
- if index+1 > nextIndex {
- nextIndex = index + 1
- }
- entriesWithIndexes[string(entry.Name)] = struct{}{}
- if other, exists := ret.ChildrenByName[string(entry.Name)]; !exists {
- ret.Errs = append(ret.Errs, fmt.Errorf("missing by-name direntry for %q", entry.Name))
- ret.ChildrenByName[string(entry.Name)] = entry
- } else if !reflect.DeepEqual(entry, other) {
- ret.Errs = append(ret.Errs, fmt.Errorf("direntry index %v and direntry name %q disagree", index, entry.Name))
- ret.ChildrenByName[string(entry.Name)] = entry
- }
- }
- for _, name := range util.SortedMapKeys(ret.ChildrenByName) {
- if _, exists := entriesWithIndexes[name]; !exists {
- ret.Errs = append(ret.Errs, fmt.Errorf("missing by-index direntry for %q", name))
- ret.ChildrenByIndex[nextIndex] = ret.ChildrenByName[name]
- nextIndex++
- }
- }
- return
-}
-
-func (sv *Subvolume) loadFile(inode btrfs.ObjID) (*file, error) {
- val := sv.fileCache.GetOrElse(inode, func() (val *file) {
- val = new(file)
- fullInode, err := sv.loadFullInode(inode)
- if err != nil {
- val.Errs = append(val.Errs, err)
- return
- }
- val.fullInode = *fullInode
- val.FS = sv.FS
- val.populate()
- return
- })
- if val.Inode == 0 {
- return nil, val.Errs
- }
- return val, nil
-}
-
-func (ret *file) populate() {
- for _, item := range ret.OtherItems {
- switch item.Head.Key.ItemType {
- case btrfsitem.INODE_REF_KEY:
- // TODO
- case btrfsitem.EXTENT_DATA_KEY:
- ret.Extents = append(ret.Extents, fileExtent{
- OffsetWithinFile: int64(item.Head.Key.Offset),
- FileExtent: item.Body.(btrfsitem.FileExtent),
- })
- default:
- panic(fmt.Errorf("TODO: handle item type %v", item.Head.Key.ItemType))
- }
- }
-
- // These should already be sorted, because of the nature of
- // the btree; but this is a recovery tool for corrupt
- // filesystems, so go ahead and ensure that it's sorted.
- sort.Slice(ret.Extents, func(i, j int) bool {
- return ret.Extents[i].OffsetWithinFile < ret.Extents[j].OffsetWithinFile
- })
-
- pos := int64(0)
- for _, extent := range ret.Extents {
- if extent.OffsetWithinFile != pos {
- if extent.OffsetWithinFile > pos {
- ret.Errs = append(ret.Errs, fmt.Errorf("extent gap from %v to %v",
- pos, extent.OffsetWithinFile))
- } else {
- ret.Errs = append(ret.Errs, fmt.Errorf("extent overlap from %v to %v",
- extent.OffsetWithinFile, pos))
- }
- }
- size, err := extent.Size()
- if err != nil {
- ret.Errs = append(ret.Errs, fmt.Errorf("extent %v: %w", extent.OffsetWithinFile, err))
- }
- pos += size
- }
- if ret.InodeItem != nil && pos != ret.InodeItem.Size {
- if ret.InodeItem.Size > pos {
- ret.Errs = append(ret.Errs, fmt.Errorf("extent gap from %v to %v",
- pos, ret.InodeItem.Size))
- } else {
- ret.Errs = append(ret.Errs, fmt.Errorf("extent mapped past end of file from %v to %v",
- ret.InodeItem.Size, pos))
- }
- }
-}
-
-func (file *file) ReadAt(dat []byte, off int64) (int, error) {
- // These stateles maybe-short-reads each do an O(n) extent
- // lookup, so reading a file is O(n^2), but we expect n to be
- // small, so whatev. Turn file.Extents it in to an rbtree if
- // it becomes a problem.
- done := 0
- for done < len(dat) {
- n, err := file.maybeShortReadAt(dat[done:], off+int64(done))
- done += n
- if err != nil {
- return done, err
- }
- }
- return done, nil
-}
-
-func (file *file) maybeShortReadAt(dat []byte, off int64) (int, error) {
- for _, extent := range file.Extents {
- extBeg := extent.OffsetWithinFile
- if extBeg > off {
- break
- }
- extLen, err := extent.Size()
- if err != nil {
- continue
- }
- extEnd := extBeg + extLen
- if extEnd <= off {
- continue
- }
- offsetWithinExt := off - extent.OffsetWithinFile
- readSize := util.Min(int64(len(dat)), extLen-offsetWithinExt)
- switch extent.Type {
- case btrfsitem.FILE_EXTENT_INLINE:
- return copy(dat, extent.BodyInline[offsetWithinExt:offsetWithinExt+readSize]), nil
- case btrfsitem.FILE_EXTENT_REG, btrfsitem.FILE_EXTENT_PREALLOC:
- return file.FS.ReadAt(dat[:readSize],
- extent.BodyExtent.DiskByteNr.
- Add(extent.BodyExtent.Offset).
- Add(btrfsvol.AddrDelta(offsetWithinExt)))
- }
- }
- return 0, fmt.Errorf("read: could not map position %v", off)
-}
-
-var _ io.ReaderAt = (*file)(nil)