diff options
Diffstat (limited to 'cmd')
-rw-r--r-- | cmd/btrfs-mount/lru.go | 73 | ||||
-rw-r--r-- | cmd/btrfs-mount/main.go | 6 | ||||
-rw-r--r-- | cmd/btrfs-mount/subvol.go | 412 | ||||
-rw-r--r-- | cmd/btrfs-mount/subvol_fuse.go | 51 |
4 files changed, 43 insertions, 499 deletions
diff --git a/cmd/btrfs-mount/lru.go b/cmd/btrfs-mount/lru.go deleted file mode 100644 index bf91c5e..0000000 --- a/cmd/btrfs-mount/lru.go +++ /dev/null @@ -1,73 +0,0 @@ -package main - -import ( - "sync" - - lru "github.com/hashicorp/golang-lru" -) - -type LRUCache[K comparable, V any] struct { - initOnce sync.Once - inner *lru.ARCCache -} - -func (c *LRUCache[K, V]) init() { - c.initOnce.Do(func() { - c.inner, _ = lru.NewARC(128) - }) -} - -func (c *LRUCache[K, V]) Add(key K, value V) { - c.init() - c.inner.Add(key, value) -} -func (c *LRUCache[K, V]) Contains(key K) bool { - c.init() - return c.inner.Contains(key) -} -func (c *LRUCache[K, V]) Get(key K) (value V, ok bool) { - c.init() - _value, ok := c.inner.Get(key) - if ok { - value = _value.(V) - } - return value, ok -} -func (c *LRUCache[K, V]) Keys() []K { - c.init() - untyped := c.inner.Keys() - typed := make([]K, len(untyped)) - for i := range untyped { - typed[i] = untyped[i].(K) - } - return typed -} -func (c *LRUCache[K, V]) Len() int { - c.init() - return c.inner.Len() -} -func (c *LRUCache[K, V]) Peek(key K) (value V, ok bool) { - c.init() - _value, ok := c.inner.Peek(key) - if ok { - value = _value.(V) - } - return value, ok -} -func (c *LRUCache[K, V]) Purge() { - c.init() - c.inner.Purge() -} -func (c *LRUCache[K, V]) Remove(key K) { - c.init() - c.inner.Remove(key) -} - -func (c *LRUCache[K, V]) GetOrElse(key K, fn func() V) V { - var value V - var ok bool - for value, ok = c.Get(key); !ok; value, ok = c.Get(key) { - c.Add(key, fn()) - } - return value -} diff --git a/cmd/btrfs-mount/main.go b/cmd/btrfs-mount/main.go index e059351..64cae30 100644 --- a/cmd/btrfs-mount/main.go +++ b/cmd/btrfs-mount/main.go @@ -71,10 +71,12 @@ func Main(ctx context.Context, mountpoint string, imgfilenames ...string) (err e grp.Go("main", func(ctx context.Context) error { defer atomic.StoreUint32(&mounted, 0) rootSubvol := &Subvolume{ - FS: fs, + Subvolume: btrfs.Subvolume{ + FS: fs, + TreeID: btrfs.FS_TREE_OBJECTID, + }, DeviceName: tryAbs(imgfilenames[0]), Mountpoint: mountpoint, - TreeID: btrfs.FS_TREE_OBJECTID, } return rootSubvol.Run(ctx) }) diff --git a/cmd/btrfs-mount/subvol.go b/cmd/btrfs-mount/subvol.go deleted file mode 100644 index 1d5e9db..0000000 --- a/cmd/btrfs-mount/subvol.go +++ /dev/null @@ -1,412 +0,0 @@ -package main - -import ( - "context" - "fmt" - "io" - "reflect" - "sort" - "sync" - - "github.com/datawire/dlib/dcontext" - "github.com/datawire/dlib/derror" - "github.com/datawire/dlib/dlog" - "github.com/jacobsa/fuse" - "github.com/jacobsa/fuse/fuseutil" - - "lukeshu.com/btrfs-tools/pkg/btrfs" - "lukeshu.com/btrfs-tools/pkg/btrfs/btrfsitem" - "lukeshu.com/btrfs-tools/pkg/btrfs/btrfsvol" - "lukeshu.com/btrfs-tools/pkg/util" -) - -type bareInode struct { - Inode btrfs.ObjID - InodeItem *btrfsitem.Inode - Errs derror.MultiError -} - -type fullInode struct { - bareInode - OtherItems []btrfs.Item -} - -type dir struct { - fullInode - ChildrenByName map[string]btrfsitem.DirEntry - ChildrenByIndex map[uint64]btrfsitem.DirEntry -} - -type fileExtent struct { - OffsetWithinFile int64 - btrfsitem.FileExtent -} - -type file struct { - fullInode - Extents []fileExtent - FS *btrfs.FS -} - -type Subvolume struct { - FS *btrfs.FS - DeviceName string - Mountpoint string - TreeID btrfs.ObjID - - rootOnce sync.Once - rootVal btrfsitem.Root - rootErr error - - bareInodeCache LRUCache[btrfs.ObjID, *bareInode] - fullInodeCache LRUCache[btrfs.ObjID, *fullInode] - dirCache LRUCache[btrfs.ObjID, *dir] - fileCache LRUCache[btrfs.ObjID, *file] - - subvolumeFUSE -} - -func (sv *Subvolume) Run(ctx context.Context) error { - mount, err := fuse.Mount( - sv.Mountpoint, - fuseutil.NewFileSystemServer(sv), - &fuse.MountConfig{ - OpContext: ctx, - ErrorLogger: dlog.StdLogger(ctx, dlog.LogLevelError), - DebugLogger: dlog.StdLogger(ctx, dlog.LogLevelDebug), - - FSName: sv.DeviceName, - Subtype: "btrfs", - - ReadOnly: true, - }) - if err != nil { - return err - } - return mount.Join(dcontext.HardContext(ctx)) -} - -func (sv *Subvolume) init() { - sv.rootOnce.Do(func() { - sb, err := sv.FS.Superblock() - if err != nil { - sv.rootErr = err - return - } - - root, err := sv.FS.TreeLookup(sb.Data.RootTree, btrfs.Key{ - ObjectID: sv.TreeID, - ItemType: btrfsitem.ROOT_ITEM_KEY, - Offset: 0, - }) - if err != nil { - sv.rootErr = err - return - } - - rootBody, ok := root.Body.(btrfsitem.Root) - if !ok { - sv.rootErr = fmt.Errorf("FS_TREE_ ROOT_ITEM has malformed body") - return - } - - sv.rootVal = rootBody - }) -} - -func (sv *Subvolume) getRootInode() (btrfs.ObjID, error) { - sv.init() - return sv.rootVal.RootDirID, sv.rootErr -} - -func (sv *Subvolume) getFSTree() (btrfsvol.LogicalAddr, error) { - sv.init() - return sv.rootVal.ByteNr, sv.rootErr -} - -func (sv *Subvolume) loadBareInode(inode btrfs.ObjID) (*bareInode, error) { - val := sv.bareInodeCache.GetOrElse(inode, func() (val *bareInode) { - val = &bareInode{ - Inode: inode, - } - tree, err := sv.getFSTree() - if err != nil { - val.Errs = append(val.Errs, err) - return - } - item, err := sv.FS.TreeLookup(tree, btrfs.Key{ - ObjectID: inode, - ItemType: btrfsitem.INODE_ITEM_KEY, - Offset: 0, - }) - if err != nil { - val.Errs = append(val.Errs, err) - return - } - - itemBody, ok := item.Body.(btrfsitem.Inode) - if !ok { - val.Errs = append(val.Errs, fmt.Errorf("malformed inode")) - return - } - val.InodeItem = &itemBody - - return - }) - if val.InodeItem == nil { - return nil, val.Errs - } - return val, nil -} - -func (sv *Subvolume) loadFullInode(inode btrfs.ObjID) (*fullInode, error) { - val := sv.fullInodeCache.GetOrElse(inode, func() (val *fullInode) { - val = &fullInode{ - bareInode: bareInode{ - Inode: inode, - }, - } - tree, err := sv.getFSTree() - if err != nil { - val.Errs = append(val.Errs, err) - return - } - items, err := sv.FS.TreeSearchAll(tree, func(key btrfs.Key) int { - return util.CmpUint(inode, key.ObjectID) - }) - if err != nil { - val.Errs = append(val.Errs, err) - if len(items) == 0 { - return - } - } - for _, item := range items { - switch item.Head.Key.ItemType { - case btrfsitem.INODE_ITEM_KEY: - itemBody := item.Body.(btrfsitem.Inode) - if val.InodeItem != nil { - if !reflect.DeepEqual(itemBody, *val.InodeItem) { - val.Errs = append(val.Errs, fmt.Errorf("multiple inodes")) - } - continue - } - val.InodeItem = &itemBody - default: - val.OtherItems = append(val.OtherItems, item) - } - } - return - }) - if val.InodeItem == nil && val.OtherItems == nil { - return nil, val.Errs - } - return val, nil -} - -func (sv *Subvolume) loadDir(inode btrfs.ObjID) (*dir, error) { - val := sv.dirCache.GetOrElse(inode, func() (val *dir) { - val = new(dir) - fullInode, err := sv.loadFullInode(inode) - if err != nil { - val.Errs = append(val.Errs, err) - return - } - val.fullInode = *fullInode - val.populate() - return - }) - if val.Inode == 0 { - return nil, val.Errs - } - return val, nil -} - -func (ret *dir) populate() { - ret.ChildrenByName = make(map[string]btrfsitem.DirEntry) - ret.ChildrenByIndex = make(map[uint64]btrfsitem.DirEntry) - for _, item := range ret.OtherItems { - switch item.Head.Key.ItemType { - case btrfsitem.INODE_REF_KEY: - // TODO - case btrfsitem.DIR_ITEM_KEY: - body := item.Body.(btrfsitem.DirEntries) - if len(body) != 1 { - ret.Errs = append(ret.Errs, fmt.Errorf("multiple direntries in single DIR_ITEM?")) - continue - } - for _, entry := range body { - namehash := btrfsitem.NameHash(entry.Name) - if namehash != item.Head.Key.Offset { - ret.Errs = append(ret.Errs, fmt.Errorf("direntry crc32c mismatch: key=%#x crc32c(%q)=%#x", - item.Head.Key.Offset, entry.Name, namehash)) - continue - } - if other, exists := ret.ChildrenByName[string(entry.Name)]; exists { - if !reflect.DeepEqual(entry, other) { - ret.Errs = append(ret.Errs, fmt.Errorf("multiple instances of direntry name %q", entry.Name)) - } - continue - } - ret.ChildrenByName[string(entry.Name)] = entry - } - case btrfsitem.DIR_INDEX_KEY: - index := item.Head.Key.Offset - body := item.Body.(btrfsitem.DirEntries) - if len(body) != 1 { - ret.Errs = append(ret.Errs, fmt.Errorf("multiple direntries in single DIR_INDEX?")) - continue - } - for _, entry := range body { - if other, exists := ret.ChildrenByIndex[index]; exists { - if !reflect.DeepEqual(entry, other) { - ret.Errs = append(ret.Errs, fmt.Errorf("multiple instances of direntry index %v", index)) - } - continue - } - ret.ChildrenByIndex[index] = entry - } - //case btrfsitem.XATTR_ITEM_KEY: - default: - panic(fmt.Errorf("TODO: handle item type %v", item.Head.Key.ItemType)) - } - } - entriesWithIndexes := make(map[string]struct{}) - nextIndex := uint64(2) - for index, entry := range ret.ChildrenByIndex { - if index+1 > nextIndex { - nextIndex = index + 1 - } - entriesWithIndexes[string(entry.Name)] = struct{}{} - if other, exists := ret.ChildrenByName[string(entry.Name)]; !exists { - ret.Errs = append(ret.Errs, fmt.Errorf("missing by-name direntry for %q", entry.Name)) - ret.ChildrenByName[string(entry.Name)] = entry - } else if !reflect.DeepEqual(entry, other) { - ret.Errs = append(ret.Errs, fmt.Errorf("direntry index %v and direntry name %q disagree", index, entry.Name)) - ret.ChildrenByName[string(entry.Name)] = entry - } - } - for _, name := range util.SortedMapKeys(ret.ChildrenByName) { - if _, exists := entriesWithIndexes[name]; !exists { - ret.Errs = append(ret.Errs, fmt.Errorf("missing by-index direntry for %q", name)) - ret.ChildrenByIndex[nextIndex] = ret.ChildrenByName[name] - nextIndex++ - } - } - return -} - -func (sv *Subvolume) loadFile(inode btrfs.ObjID) (*file, error) { - val := sv.fileCache.GetOrElse(inode, func() (val *file) { - val = new(file) - fullInode, err := sv.loadFullInode(inode) - if err != nil { - val.Errs = append(val.Errs, err) - return - } - val.fullInode = *fullInode - val.FS = sv.FS - val.populate() - return - }) - if val.Inode == 0 { - return nil, val.Errs - } - return val, nil -} - -func (ret *file) populate() { - for _, item := range ret.OtherItems { - switch item.Head.Key.ItemType { - case btrfsitem.INODE_REF_KEY: - // TODO - case btrfsitem.EXTENT_DATA_KEY: - ret.Extents = append(ret.Extents, fileExtent{ - OffsetWithinFile: int64(item.Head.Key.Offset), - FileExtent: item.Body.(btrfsitem.FileExtent), - }) - default: - panic(fmt.Errorf("TODO: handle item type %v", item.Head.Key.ItemType)) - } - } - - // These should already be sorted, because of the nature of - // the btree; but this is a recovery tool for corrupt - // filesystems, so go ahead and ensure that it's sorted. - sort.Slice(ret.Extents, func(i, j int) bool { - return ret.Extents[i].OffsetWithinFile < ret.Extents[j].OffsetWithinFile - }) - - pos := int64(0) - for _, extent := range ret.Extents { - if extent.OffsetWithinFile != pos { - if extent.OffsetWithinFile > pos { - ret.Errs = append(ret.Errs, fmt.Errorf("extent gap from %v to %v", - pos, extent.OffsetWithinFile)) - } else { - ret.Errs = append(ret.Errs, fmt.Errorf("extent overlap from %v to %v", - extent.OffsetWithinFile, pos)) - } - } - size, err := extent.Size() - if err != nil { - ret.Errs = append(ret.Errs, fmt.Errorf("extent %v: %w", extent.OffsetWithinFile, err)) - } - pos += size - } - if ret.InodeItem != nil && pos != ret.InodeItem.Size { - if ret.InodeItem.Size > pos { - ret.Errs = append(ret.Errs, fmt.Errorf("extent gap from %v to %v", - pos, ret.InodeItem.Size)) - } else { - ret.Errs = append(ret.Errs, fmt.Errorf("extent mapped past end of file from %v to %v", - ret.InodeItem.Size, pos)) - } - } -} - -func (file *file) ReadAt(dat []byte, off int64) (int, error) { - // These stateles maybe-short-reads each do an O(n) extent - // lookup, so reading a file is O(n^2), but we expect n to be - // small, so whatev. Turn file.Extents it in to an rbtree if - // it becomes a problem. - done := 0 - for done < len(dat) { - n, err := file.maybeShortReadAt(dat[done:], off+int64(done)) - done += n - if err != nil { - return done, err - } - } - return done, nil -} - -func (file *file) maybeShortReadAt(dat []byte, off int64) (int, error) { - for _, extent := range file.Extents { - extBeg := extent.OffsetWithinFile - if extBeg > off { - break - } - extLen, err := extent.Size() - if err != nil { - continue - } - extEnd := extBeg + extLen - if extEnd <= off { - continue - } - offsetWithinExt := off - extent.OffsetWithinFile - readSize := util.Min(int64(len(dat)), extLen-offsetWithinExt) - switch extent.Type { - case btrfsitem.FILE_EXTENT_INLINE: - return copy(dat, extent.BodyInline[offsetWithinExt:offsetWithinExt+readSize]), nil - case btrfsitem.FILE_EXTENT_REG, btrfsitem.FILE_EXTENT_PREALLOC: - return file.FS.ReadAt(dat[:readSize], - extent.BodyExtent.DiskByteNr. - Add(extent.BodyExtent.Offset). - Add(btrfsvol.AddrDelta(offsetWithinExt))) - } - } - return 0, fmt.Errorf("read: could not map position %v", off) -} - -var _ io.ReaderAt = (*file)(nil) diff --git a/cmd/btrfs-mount/subvol_fuse.go b/cmd/btrfs-mount/subvol_fuse.go index 6f3f267..1163e9f 100644 --- a/cmd/btrfs-mount/subvol_fuse.go +++ b/cmd/btrfs-mount/subvol_fuse.go @@ -6,6 +6,9 @@ import ( "sync/atomic" "syscall" + "github.com/datawire/dlib/dcontext" + "github.com/datawire/dlib/dlog" + "github.com/jacobsa/fuse" "github.com/jacobsa/fuse/fuseops" "github.com/jacobsa/fuse/fuseutil" @@ -15,21 +18,45 @@ import ( ) type dirState struct { - Dir *dir + Dir *btrfs.Dir } type fileState struct { - File *file + File *btrfs.File } -type subvolumeFUSE struct { +type Subvolume struct { + btrfs.Subvolume + DeviceName string + Mountpoint string + fuseutil.NotImplementedFileSystem lastHandle uint64 dirHandles util.SyncMap[fuseops.HandleID, *dirState] fileHandles util.SyncMap[fuseops.HandleID, *fileState] } -func (sv *subvolumeFUSE) newHandle() fuseops.HandleID { +func (sv *Subvolume) Run(ctx context.Context) error { + mount, err := fuse.Mount( + sv.Mountpoint, + fuseutil.NewFileSystemServer(sv), + &fuse.MountConfig{ + OpContext: ctx, + ErrorLogger: dlog.StdLogger(ctx, dlog.LogLevelError), + DebugLogger: dlog.StdLogger(ctx, dlog.LogLevelDebug), + + FSName: sv.DeviceName, + Subtype: "btrfs", + + ReadOnly: true, + }) + if err != nil { + return err + } + return mount.Join(dcontext.HardContext(ctx)) +} + +func (sv *Subvolume) newHandle() fuseops.HandleID { return fuseops.HandleID(atomic.AddUint64(&sv.lastHandle, 1)) } @@ -73,14 +100,14 @@ func (sv *Subvolume) StatFS(_ context.Context, op *fuseops.StatFSOp) error { func (sv *Subvolume) LookUpInode(_ context.Context, op *fuseops.LookUpInodeOp) error { if op.Parent == fuseops.RootInodeID { - parent, err := sv.getRootInode() + parent, err := sv.GetRootInode() if err != nil { return err } op.Parent = fuseops.InodeID(parent) } - dir, err := sv.loadDir(btrfs.ObjID(op.Parent)) + dir, err := sv.LoadDir(btrfs.ObjID(op.Parent)) if err != nil { return err } @@ -91,7 +118,7 @@ func (sv *Subvolume) LookUpInode(_ context.Context, op *fuseops.LookUpInodeOp) e if entry.Location.ItemType != btrfsitem.INODE_ITEM_KEY { return fmt.Errorf("child %q is not an inode: %w", op.Name, syscall.ENOSYS) } - bareInode, err := sv.loadBareInode(entry.Location.ObjectID) + bareInode, err := sv.LoadBareInode(entry.Location.ObjectID) if err != nil { return err } @@ -105,14 +132,14 @@ func (sv *Subvolume) LookUpInode(_ context.Context, op *fuseops.LookUpInodeOp) e func (sv *Subvolume) GetInodeAttributes(_ context.Context, op *fuseops.GetInodeAttributesOp) error { if op.Inode == fuseops.RootInodeID { - inode, err := sv.getRootInode() + inode, err := sv.GetRootInode() if err != nil { return err } op.Inode = fuseops.InodeID(inode) } - bareInode, err := sv.loadBareInode(btrfs.ObjID(op.Inode)) + bareInode, err := sv.LoadBareInode(btrfs.ObjID(op.Inode)) if err != nil { return err } @@ -123,14 +150,14 @@ func (sv *Subvolume) GetInodeAttributes(_ context.Context, op *fuseops.GetInodeA func (sv *Subvolume) OpenDir(_ context.Context, op *fuseops.OpenDirOp) error { if op.Inode == fuseops.RootInodeID { - inode, err := sv.getRootInode() + inode, err := sv.GetRootInode() if err != nil { return err } op.Inode = fuseops.InodeID(inode) } - dir, err := sv.loadDir(btrfs.ObjID(op.Inode)) + dir, err := sv.LoadDir(btrfs.ObjID(op.Inode)) if err != nil { return err } @@ -184,7 +211,7 @@ func (sv *Subvolume) ReleaseDirHandle(_ context.Context, op *fuseops.ReleaseDirH } func (sv *Subvolume) OpenFile(_ context.Context, op *fuseops.OpenFileOp) error { - file, err := sv.loadFile(btrfs.ObjID(op.Inode)) + file, err := sv.LoadFile(btrfs.ObjID(op.Inode)) if err != nil { return err } |