summaryrefslogtreecommitdiff
path: root/lib/btrfsprogs
diff options
context:
space:
mode:
authorLuke Shumaker <lukeshu@lukeshu.com>2022-07-10 17:24:51 -0600
committerLuke Shumaker <lukeshu@lukeshu.com>2022-07-11 00:44:30 -0600
commitbde202f286461ab575dc7e3d83f996d9a5f4a6ec (patch)
tree64782c354c15f64a164996125a06c1bca30c9aa7 /lib/btrfsprogs
parentd2da99882ea49cc67780c0255bf624698898e7fe (diff)
Have a go at rearranging things in to a lib/btrfsprogs
Diffstat (limited to 'lib/btrfsprogs')
-rw-r--r--lib/btrfsprogs/btrfsinspect/mount.go407
-rw-r--r--lib/btrfsprogs/btrfsinspect/print_tree.go436
-rw-r--r--lib/btrfsprogs/btrfsrepair/clearnodes.go91
-rw-r--r--lib/btrfsprogs/btrfsutil/open.go28
-rw-r--r--lib/btrfsprogs/btrfsutil/scan.go55
-rw-r--r--lib/btrfsprogs/btrfsutil/walk.go119
6 files changed, 1136 insertions, 0 deletions
diff --git a/lib/btrfsprogs/btrfsinspect/mount.go b/lib/btrfsprogs/btrfsinspect/mount.go
new file mode 100644
index 0000000..641bc64
--- /dev/null
+++ b/lib/btrfsprogs/btrfsinspect/mount.go
@@ -0,0 +1,407 @@
+// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+//
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+package btrfsinspect
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "path/filepath"
+ "sync"
+ "sync/atomic"
+ "syscall"
+
+ "github.com/datawire/dlib/dcontext"
+ "github.com/datawire/dlib/dgroup"
+ "github.com/datawire/dlib/dlog"
+ "github.com/jacobsa/fuse"
+ "github.com/jacobsa/fuse/fuseops"
+ "github.com/jacobsa/fuse/fuseutil"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
+ "git.lukeshu.com/btrfs-progs-ng/lib/linux"
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+func MountRO(ctx context.Context, fs *btrfs.FS, mountpoint string) error {
+ pvs := fs.LV.PhysicalVolumes()
+ if len(pvs) < 1 {
+ return errors.New("no devices")
+ }
+
+ deviceName := pvs[util.SortedMapKeys(pvs)[0]].Name()
+ if abs, err := filepath.Abs(deviceName); err == nil {
+ deviceName = abs
+ }
+
+ rootSubvol := &subvolume{
+ Subvolume: btrfs.Subvolume{
+ FS: fs,
+ TreeID: btrfs.FS_TREE_OBJECTID,
+ },
+ DeviceName: deviceName,
+ Mountpoint: mountpoint,
+ }
+ return rootSubvol.Run(ctx)
+}
+
+func fuseMount(ctx context.Context, mountpoint string, server fuse.Server, cfg *fuse.MountConfig) error {
+ grp := dgroup.NewGroup(ctx, dgroup.GroupConfig{
+ // Allow mountHandle.Join() returning to cause the
+ // "unmount" goroutine to quit.
+ ShutdownOnNonError: true,
+ })
+ mounted := uint32(1)
+ grp.Go("unmount", func(ctx context.Context) error {
+ <-ctx.Done()
+ var err error
+ var gotNil bool
+ // Keep retrying, because the FS might be busy.
+ for atomic.LoadUint32(&mounted) != 0 {
+ if _err := fuse.Unmount(mountpoint); _err == nil {
+ gotNil = true
+ } else if !gotNil {
+ err = _err
+ }
+ }
+ if gotNil {
+ return nil
+ }
+ return err
+ })
+ grp.Go("mount", func(ctx context.Context) error {
+ defer atomic.StoreUint32(&mounted, 0)
+
+ cfg.OpContext = ctx
+ cfg.ErrorLogger = dlog.StdLogger(ctx, dlog.LogLevelError)
+ cfg.DebugLogger = dlog.StdLogger(ctx, dlog.LogLevelDebug)
+
+ mountHandle, err := fuse.Mount(mountpoint, server, cfg)
+ if err != nil {
+ return err
+ }
+ dlog.Infof(ctx, "mounted %q", mountpoint)
+ return mountHandle.Join(dcontext.HardContext(ctx))
+ })
+ return grp.Wait()
+}
+
+type dirState struct {
+ Dir *btrfs.Dir
+}
+
+type fileState struct {
+ File *btrfs.File
+}
+
+type subvolume struct {
+ btrfs.Subvolume
+ DeviceName string
+ Mountpoint string
+
+ fuseutil.NotImplementedFileSystem
+ lastHandle uint64
+ dirHandles util.SyncMap[fuseops.HandleID, *dirState]
+ fileHandles util.SyncMap[fuseops.HandleID, *fileState]
+
+ subvolMu sync.Mutex
+ subvols map[string]struct{}
+ grp *dgroup.Group
+}
+
+func (sv *subvolume) Run(ctx context.Context) error {
+ sv.grp = dgroup.NewGroup(ctx, dgroup.GroupConfig{})
+ sv.grp.Go("self", func(ctx context.Context) error {
+ cfg := &fuse.MountConfig{
+ FSName: sv.DeviceName,
+ Subtype: "btrfs",
+
+ ReadOnly: true,
+
+ Options: map[string]string{
+ "allow_other": "",
+ },
+ }
+ return fuseMount(ctx, sv.Mountpoint, fuseutil.NewFileSystemServer(sv), cfg)
+ })
+ return sv.grp.Wait()
+}
+
+func (sv *subvolume) newHandle() fuseops.HandleID {
+ return fuseops.HandleID(atomic.AddUint64(&sv.lastHandle, 1))
+}
+
+func inodeItemToFUSE(itemBody btrfsitem.Inode) fuseops.InodeAttributes {
+ return fuseops.InodeAttributes{
+ Size: uint64(itemBody.Size),
+ Nlink: uint32(itemBody.NLink),
+ Mode: uint32(itemBody.Mode),
+ //RDev: itemBody.Rdev, // jacobsa/fuse doesn't expose rdev
+ Atime: itemBody.ATime.ToStd(),
+ Mtime: itemBody.MTime.ToStd(),
+ Ctime: itemBody.CTime.ToStd(),
+ //Crtime: itemBody.OTime,
+ Uid: uint32(itemBody.UID),
+ Gid: uint32(itemBody.GID),
+ }
+}
+
+func (sv *subvolume) LoadDir(inode btrfs.ObjID) (val *btrfs.Dir, err error) {
+ val, err = sv.Subvolume.LoadDir(inode)
+ if val != nil {
+ haveSubvolumes := false
+ for _, index := range util.SortedMapKeys(val.ChildrenByIndex) {
+ entry := val.ChildrenByIndex[index]
+ if entry.Location.ItemType == btrfsitem.ROOT_ITEM_KEY {
+ haveSubvolumes = true
+ break
+ }
+ }
+ if haveSubvolumes {
+ abspath, _err := val.AbsPath()
+ if _err != nil {
+ return
+ }
+ sv.subvolMu.Lock()
+ for _, index := range util.SortedMapKeys(val.ChildrenByIndex) {
+ entry := val.ChildrenByIndex[index]
+ if entry.Location.ItemType != btrfsitem.ROOT_ITEM_KEY {
+ continue
+ }
+ if sv.subvols == nil {
+ sv.subvols = make(map[string]struct{})
+ }
+ subMountpoint := filepath.Join(abspath, string(entry.Name))
+ if _, alreadyMounted := sv.subvols[subMountpoint]; !alreadyMounted {
+ sv.subvols[subMountpoint] = struct{}{}
+ workerName := fmt.Sprintf("%d-%s", val.Inode, filepath.Base(subMountpoint))
+ sv.grp.Go(workerName, func(ctx context.Context) error {
+ subSv := &subvolume{
+ Subvolume: btrfs.Subvolume{
+ FS: sv.FS,
+ TreeID: entry.Location.ObjectID,
+ },
+ DeviceName: sv.DeviceName,
+ Mountpoint: filepath.Join(sv.Mountpoint, subMountpoint[1:]),
+ }
+ return subSv.Run(ctx)
+ })
+ }
+ }
+ sv.subvolMu.Unlock()
+ }
+ }
+ return
+}
+
+func (sv *subvolume) StatFS(_ context.Context, op *fuseops.StatFSOp) error {
+ // See linux.git/fs/btrfs/super.c:btrfs_statfs()
+ sb, err := sv.FS.Superblock()
+ if err != nil {
+ return err
+ }
+
+ op.IoSize = sb.Data.SectorSize
+ op.BlockSize = sb.Data.SectorSize
+ op.Blocks = sb.Data.TotalBytes / uint64(sb.Data.SectorSize) // TODO: adjust for RAID type
+ //op.BlocksFree = TODO
+
+ // btrfs doesn't have a fixed number of inodes
+ op.Inodes = 0
+ op.InodesFree = 0
+
+ // jacobsa/fuse doesn't expose namelen, instead hard-coding it
+ // to 255. Which is fine by us, because that's what it is for
+ // btrfs.
+
+ return nil
+}
+
+func (sv *subvolume) LookUpInode(_ context.Context, op *fuseops.LookUpInodeOp) error {
+ if op.Parent == fuseops.RootInodeID {
+ parent, err := sv.GetRootInode()
+ if err != nil {
+ return err
+ }
+ op.Parent = fuseops.InodeID(parent)
+ }
+
+ dir, err := sv.LoadDir(btrfs.ObjID(op.Parent))
+ if err != nil {
+ return err
+ }
+ entry, ok := dir.ChildrenByName[op.Name]
+ if !ok {
+ return syscall.ENOENT
+ }
+ if entry.Location.ItemType != btrfsitem.INODE_ITEM_KEY {
+ // Subvolume
+ //
+ // Because each subvolume has its own pool of inodes
+ // (as in 2 different subvolumes can have files with
+ // te same inode number), so to represent that to FUSE
+ // we need to have this be a full separate mountpoint.
+ //
+ // I'd want to return EIO or EINTR or something here,
+ // but both the FUSE userspace tools and the kernel
+ // itself stat the mountpoint before mounting it, so
+ // we've got to return something bogus here to let
+ // that mount happen.
+ op.Entry = fuseops.ChildInodeEntry{
+ Child: 2, // an inode number that a real file will never have
+ Attributes: fuseops.InodeAttributes{
+ Nlink: 1,
+ Mode: uint32(linux.ModeFmtDir | 0700),
+ },
+ }
+ return nil
+ }
+ bareInode, err := sv.LoadBareInode(entry.Location.ObjectID)
+ if err != nil {
+ return err
+ }
+ op.Entry = fuseops.ChildInodeEntry{
+ Child: fuseops.InodeID(entry.Location.ObjectID),
+ Generation: fuseops.GenerationNumber(bareInode.InodeItem.Sequence),
+ Attributes: inodeItemToFUSE(*bareInode.InodeItem),
+ }
+ return nil
+}
+
+func (sv *subvolume) GetInodeAttributes(_ context.Context, op *fuseops.GetInodeAttributesOp) error {
+ if op.Inode == fuseops.RootInodeID {
+ inode, err := sv.GetRootInode()
+ if err != nil {
+ return err
+ }
+ op.Inode = fuseops.InodeID(inode)
+ }
+
+ bareInode, err := sv.LoadBareInode(btrfs.ObjID(op.Inode))
+ if err != nil {
+ return err
+ }
+
+ op.Attributes = inodeItemToFUSE(*bareInode.InodeItem)
+ return nil
+}
+
+func (sv *subvolume) OpenDir(_ context.Context, op *fuseops.OpenDirOp) error {
+ if op.Inode == fuseops.RootInodeID {
+ inode, err := sv.GetRootInode()
+ if err != nil {
+ return err
+ }
+ op.Inode = fuseops.InodeID(inode)
+ }
+
+ dir, err := sv.LoadDir(btrfs.ObjID(op.Inode))
+ if err != nil {
+ return err
+ }
+ handle := sv.newHandle()
+ sv.dirHandles.Store(handle, &dirState{
+ Dir: dir,
+ })
+ op.Handle = handle
+ return nil
+}
+func (sv *subvolume) ReadDir(_ context.Context, op *fuseops.ReadDirOp) error {
+ state, ok := sv.dirHandles.Load(op.Handle)
+ if !ok {
+ return syscall.EBADF
+ }
+ origOffset := op.Offset
+ for _, index := range util.SortedMapKeys(state.Dir.ChildrenByIndex) {
+ if index < uint64(origOffset) {
+ continue
+ }
+ entry := state.Dir.ChildrenByIndex[index]
+ n := fuseutil.WriteDirent(op.Dst[op.BytesRead:], fuseutil.Dirent{
+ Offset: fuseops.DirOffset(index + 1),
+ Inode: fuseops.InodeID(entry.Location.ObjectID),
+ Name: string(entry.Name),
+ Type: map[btrfsitem.FileType]fuseutil.DirentType{
+ btrfsitem.FT_UNKNOWN: fuseutil.DT_Unknown,
+ btrfsitem.FT_REG_FILE: fuseutil.DT_File,
+ btrfsitem.FT_DIR: fuseutil.DT_Directory,
+ btrfsitem.FT_CHRDEV: fuseutil.DT_Char,
+ btrfsitem.FT_BLKDEV: fuseutil.DT_Block,
+ btrfsitem.FT_FIFO: fuseutil.DT_FIFO,
+ btrfsitem.FT_SOCK: fuseutil.DT_Socket,
+ btrfsitem.FT_SYMLINK: fuseutil.DT_Link,
+ }[entry.Type],
+ })
+ if n == 0 {
+ break
+ }
+ op.BytesRead += n
+ }
+ return nil
+}
+func (sv *subvolume) ReleaseDirHandle(_ context.Context, op *fuseops.ReleaseDirHandleOp) error {
+ _, ok := sv.dirHandles.LoadAndDelete(op.Handle)
+ if !ok {
+ return syscall.EBADF
+ }
+ return nil
+}
+
+func (sv *subvolume) OpenFile(_ context.Context, op *fuseops.OpenFileOp) error {
+ file, err := sv.LoadFile(btrfs.ObjID(op.Inode))
+ if err != nil {
+ return err
+ }
+ handle := sv.newHandle()
+ sv.fileHandles.Store(handle, &fileState{
+ File: file,
+ })
+ op.Handle = handle
+ op.KeepPageCache = true
+ return nil
+}
+func (sv *subvolume) ReadFile(_ context.Context, op *fuseops.ReadFileOp) error {
+ state, ok := sv.fileHandles.Load(op.Handle)
+ if !ok {
+ return syscall.EBADF
+ }
+
+ var dat []byte
+ if op.Dst != nil {
+ size := util.Min(int64(len(op.Dst)), op.Size)
+ dat = op.Dst[:size]
+ } else {
+ dat = make([]byte, op.Size)
+ op.Data = [][]byte{dat}
+ }
+
+ var err error
+ op.BytesRead, err = state.File.ReadAt(dat, op.Offset)
+ if errors.Is(err, io.EOF) {
+ err = nil
+ }
+
+ return err
+}
+func (sv *subvolume) ReleaseFileHandle(_ context.Context, op *fuseops.ReleaseFileHandleOp) error {
+ _, ok := sv.fileHandles.LoadAndDelete(op.Handle)
+ if !ok {
+ return syscall.EBADF
+ }
+ return nil
+}
+
+func (sv *subvolume) ReadSymlink(_ context.Context, op *fuseops.ReadSymlinkOp) error {
+ return syscall.ENOSYS
+}
+
+func (sv *subvolume) GetXattr(_ context.Context, op *fuseops.GetXattrOp) error { return syscall.ENOSYS }
+func (sv *subvolume) ListXattr(_ context.Context, op *fuseops.ListXattrOp) error {
+ return syscall.ENOSYS
+}
+
+func (sv *subvolume) Destroy() {}
diff --git a/lib/btrfsprogs/btrfsinspect/print_tree.go b/lib/btrfsprogs/btrfsinspect/print_tree.go
new file mode 100644
index 0000000..72ff13e
--- /dev/null
+++ b/lib/btrfsprogs/btrfsinspect/print_tree.go
@@ -0,0 +1,436 @@
+// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+//
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+package btrfsinspect
+
+import (
+ "fmt"
+ "os"
+ "strings"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+func DumpTrees(fs *btrfs.FS) error {
+ superblock, err := fs.Superblock()
+ if err != nil {
+ return err
+ }
+
+ if superblock.Data.RootTree != 0 {
+ fmt.Printf("root tree\n")
+ if err := printTree(fs, btrfs.ROOT_TREE_OBJECTID); err != nil {
+ return err
+ }
+ }
+ if superblock.Data.ChunkTree != 0 {
+ fmt.Printf("chunk tree\n")
+ if err := printTree(fs, btrfs.CHUNK_TREE_OBJECTID); err != nil {
+ return err
+ }
+ }
+ if superblock.Data.LogTree != 0 {
+ fmt.Printf("log root tree\n")
+ if err := printTree(fs, btrfs.TREE_LOG_OBJECTID); err != nil {
+ return err
+ }
+ }
+ if superblock.Data.BlockGroupRoot != 0 {
+ fmt.Printf("block group tree\n")
+ if err := printTree(fs, btrfs.BLOCK_GROUP_TREE_OBJECTID); err != nil {
+ return err
+ }
+ }
+ if err := fs.TreeWalk(btrfs.ROOT_TREE_OBJECTID, btrfs.TreeWalkHandler{
+ Item: func(_ btrfs.TreePath, item btrfs.Item) error {
+ if item.Head.Key.ItemType != btrfsitem.ROOT_ITEM_KEY {
+ return nil
+ }
+ treeName, ok := map[btrfs.ObjID]string{
+ btrfs.ROOT_TREE_OBJECTID: "root",
+ btrfs.EXTENT_TREE_OBJECTID: "extent",
+ btrfs.CHUNK_TREE_OBJECTID: "chunk",
+ btrfs.DEV_TREE_OBJECTID: "device",
+ btrfs.FS_TREE_OBJECTID: "fs",
+ btrfs.ROOT_TREE_DIR_OBJECTID: "directory",
+ btrfs.CSUM_TREE_OBJECTID: "checksum",
+ btrfs.ORPHAN_OBJECTID: "orphan",
+ btrfs.TREE_LOG_OBJECTID: "log",
+ btrfs.TREE_LOG_FIXUP_OBJECTID: "log fixup",
+ btrfs.TREE_RELOC_OBJECTID: "reloc",
+ btrfs.DATA_RELOC_TREE_OBJECTID: "data reloc",
+ btrfs.EXTENT_CSUM_OBJECTID: "extent checksum",
+ btrfs.QUOTA_TREE_OBJECTID: "quota",
+ btrfs.UUID_TREE_OBJECTID: "uuid",
+ btrfs.FREE_SPACE_TREE_OBJECTID: "free space",
+ btrfs.MULTIPLE_OBJECTIDS: "multiple",
+ btrfs.BLOCK_GROUP_TREE_OBJECTID: "block group",
+ }[item.Head.Key.ObjectID]
+ if !ok {
+ treeName = "file"
+ }
+ fmt.Printf("%v tree %v \n", treeName, fmtKey(item.Head.Key))
+ return printTree(fs, item.Head.Key.ObjectID)
+ },
+ }); err != nil {
+ return err
+ }
+ fmt.Printf("total bytes %v\n", superblock.Data.TotalBytes)
+ fmt.Printf("bytes used %v\n", superblock.Data.BytesUsed)
+ fmt.Printf("uuid %v\n", superblock.Data.FSUUID)
+ return nil
+}
+
+// printTree mimics btrfs-progs
+// kernel-shared/print-tree.c:btrfs_print_tree() and
+// kernel-shared/print-tree.c:btrfs_print_leaf()
+func printTree(fs *btrfs.FS, treeID btrfs.ObjID) error {
+ return fs.TreeWalk(treeID, btrfs.TreeWalkHandler{
+ Node: func(path btrfs.TreePath, nodeRef *util.Ref[btrfsvol.LogicalAddr, btrfs.Node], err error) error {
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error: %v: %v\n", path, err)
+ }
+ if nodeRef != nil {
+ printHeaderInfo(nodeRef.Data)
+ }
+ return nil
+ },
+ PreKeyPointer: func(_ btrfs.TreePath, item btrfs.KeyPointer) error {
+ fmt.Printf("\t%v block %v gen %v\n",
+ fmtKey(item.Key),
+ item.BlockPtr,
+ item.Generation)
+ return nil
+ },
+ Item: func(path btrfs.TreePath, item btrfs.Item) error {
+ i := path[len(path)-1].ItemIdx
+ fmt.Printf("\titem %v %v itemoff %v itemsize %v\n",
+ i,
+ fmtKey(item.Head.Key),
+ item.Head.DataOffset,
+ item.Head.DataSize)
+ switch body := item.Body.(type) {
+ case btrfsitem.FreeSpaceHeader:
+ fmt.Printf("\t\tlocation %v\n", fmtKey(body.Location))
+ fmt.Printf("\t\tcache generation %v entries %v bitmaps %v\n",
+ body.Generation, body.NumEntries, body.NumBitmaps)
+ case btrfsitem.Inode:
+ fmt.Printf(""+
+ "\t\tgeneration %v transid %v size %v nbytes %v\n"+
+ "\t\tblock group %v mode %o links %v uid %v gid %v rdev %v\n"+
+ "\t\tsequence %v flags %v\n",
+ body.Generation, body.TransID, body.Size, body.NumBytes,
+ body.BlockGroup, body.Mode, body.NLink, body.UID, body.GID, body.RDev,
+ body.Sequence, body.Flags)
+ fmt.Printf("\t\tatime %v\n", fmtTime(body.ATime))
+ fmt.Printf("\t\tctime %v\n", fmtTime(body.CTime))
+ fmt.Printf("\t\tmtime %v\n", fmtTime(body.MTime))
+ fmt.Printf("\t\totime %v\n", fmtTime(body.OTime))
+ case btrfsitem.InodeRef:
+ fmt.Printf("\t\tindex %v namelen %v name: %s\n",
+ body.Index, body.NameLen, body.Name)
+ //case btrfsitem.INODE_EXTREF_KEY:
+ // // TODO
+ case btrfsitem.DirEntries:
+ for _, dir := range body {
+ fmt.Printf("\t\tlocation %v type %v\n",
+ fmtKey(dir.Location), dir.Type)
+ fmt.Printf("\t\ttransid %v data_len %v name_len %v\n",
+ dir.TransID, dir.DataLen, dir.NameLen)
+ fmt.Printf("\t\tname: %s\n", dir.Name)
+ if len(dir.Data) > 0 {
+ fmt.Printf("\t\tdata %v\n", dir.Data)
+ }
+ }
+ //case btrfsitem.DIR_LOG_INDEX_KEY, btrfsitem.DIR_LOG_ITEM_KEY:
+ // // TODO
+ case btrfsitem.Root:
+ fmt.Printf("\t\tgeneration %v root_dirid %v bytenr %d byte_limit %v bytes_used %v\n",
+ body.Generation, body.RootDirID, body.ByteNr, body.ByteLimit, body.BytesUsed)
+ fmt.Printf("\t\tlast_snapshot %v flags %v refs %v\n",
+ body.LastSnapshot, body.Flags, body.Refs)
+ fmt.Printf("\t\tdrop_progress %v drop_level %v\n",
+ fmtKey(body.DropProgress), body.DropLevel)
+ fmt.Printf("\t\tlevel %v generation_v2 %v\n",
+ body.Level, body.GenerationV2)
+ if body.Generation == body.GenerationV2 {
+ fmt.Printf("\t\tuuid %v\n", body.UUID)
+ fmt.Printf("\t\tparent_uuid %v\n", body.ParentUUID)
+ fmt.Printf("\t\treceived_uuid %v\n", body.ReceivedUUID)
+ fmt.Printf("\t\tctransid %v otransid %v stransid %v rtransid %v\n",
+ body.CTransID, body.OTransID, body.STransID, body.RTransID)
+ fmt.Printf("\t\tctime %v\n", fmtTime(body.CTime))
+ fmt.Printf("\t\totime %v\n", fmtTime(body.OTime))
+ fmt.Printf("\t\tstime %v\n", fmtTime(body.STime))
+ fmt.Printf("\t\trtime %v\n", fmtTime(body.RTime))
+ }
+ case btrfsitem.RootRef:
+ var tag string
+ switch item.Head.Key.ItemType {
+ case btrfsitem.ROOT_REF_KEY:
+ tag = "ref"
+ case btrfsitem.ROOT_BACKREF_KEY:
+ tag = "backref"
+ default:
+ tag = fmt.Sprintf("(error: unhandled RootRef item type: %v)", item.Head.Key.ItemType)
+ }
+ fmt.Printf("\t\troot %v key dirid %v sequence %v name %s\n",
+ tag, body.DirID, body.Sequence, body.Name)
+ case btrfsitem.Extent:
+ fmt.Printf("\t\trefs %v gen %v flags %v\n",
+ body.Head.Refs, body.Head.Generation, body.Head.Flags)
+ if body.Head.Flags.Has(btrfsitem.EXTENT_FLAG_TREE_BLOCK) {
+ fmt.Printf("\t\ttree block %v level %v\n",
+ fmtKey(body.Info.Key), body.Info.Level)
+ }
+ printExtentInlineRefs(body.Refs)
+ case btrfsitem.Metadata:
+ fmt.Printf("\t\trefs %v gen %v flags %v\n",
+ body.Head.Refs, body.Head.Generation, body.Head.Flags)
+ fmt.Printf("\t\ttree block skinny level %v\n", item.Head.Key.Offset)
+ printExtentInlineRefs(body.Refs)
+ //case btrfsitem.EXTENT_DATA_REF_KEY:
+ // // TODO
+ //case btrfsitem.SHARED_DATA_REF_KEY:
+ // // TODO
+ case btrfsitem.ExtentCSum:
+ sb, _ := fs.Superblock()
+ sectorSize := btrfsvol.AddrDelta(sb.Data.SectorSize)
+
+ start := btrfsvol.LogicalAddr(item.Head.Key.Offset)
+ itemSize := btrfsvol.AddrDelta(len(body.Sums)) * sectorSize
+ fmt.Printf("\t\trange start %d end %d length %d",
+ start, start.Add(itemSize), itemSize)
+ sumsPerLine := util.Max(1, len(btrfssum.CSum{})/body.ChecksumSize/2)
+
+ pos := start
+ for i, sum := range body.Sums {
+ if i%sumsPerLine == 0 {
+ fmt.Printf("\n\t\t")
+ } else {
+ fmt.Printf(" ")
+ }
+ fmt.Printf("[%d] 0x%s", pos, sum.Fmt(sb.Data.ChecksumType))
+ pos = pos.Add(sectorSize)
+ }
+ fmt.Printf("\n")
+ case btrfsitem.FileExtent:
+ fmt.Printf("\t\tgeneration %v type %v\n",
+ body.Generation, body.Type)
+ switch body.Type {
+ case btrfsitem.FILE_EXTENT_INLINE:
+ fmt.Printf("\t\tinline extent data size %v ram_bytes %v compression %v\n",
+ len(body.BodyInline), body.RAMBytes, body.Compression)
+ case btrfsitem.FILE_EXTENT_PREALLOC:
+ fmt.Printf("\t\tprealloc data disk byte %v nr %v\n",
+ body.BodyExtent.DiskByteNr,
+ body.BodyExtent.DiskNumBytes)
+ fmt.Printf("\t\tprealloc data offset %v nr %v\n",
+ body.BodyExtent.Offset,
+ body.BodyExtent.NumBytes)
+ case btrfsitem.FILE_EXTENT_REG:
+ fmt.Printf("\t\textent data disk byte %d nr %d\n",
+ body.BodyExtent.DiskByteNr,
+ body.BodyExtent.DiskNumBytes)
+ fmt.Printf("\t\textent data offset %d nr %d ram %v\n",
+ body.BodyExtent.Offset,
+ body.BodyExtent.NumBytes,
+ body.RAMBytes)
+ fmt.Printf("\t\textent compression %v\n",
+ body.Compression)
+ default:
+ fmt.Printf("\t\t(error) unknown file extent type %v", body.Type)
+ }
+ case btrfsitem.BlockGroup:
+ fmt.Printf("\t\tblock group used %v chunk_objectid %v flags %v\n",
+ body.Used, body.ChunkObjectID, body.Flags)
+ case btrfsitem.FreeSpaceInfo:
+ fmt.Printf("\t\tfree space info extent count %v flags %v\n",
+ body.ExtentCount, body.Flags)
+ case btrfsitem.FreeSpaceBitmap:
+ fmt.Printf("\t\tfree space bitmap\n")
+ case btrfsitem.Chunk:
+ fmt.Printf("\t\tlength %d owner %d stripe_len %v type %v\n",
+ body.Head.Size, body.Head.Owner, body.Head.StripeLen, body.Head.Type)
+ fmt.Printf("\t\tio_align %v io_width %v sector_size %v\n",
+ body.Head.IOOptimalAlign, body.Head.IOOptimalWidth, body.Head.IOMinSize)
+ fmt.Printf("\t\tnum_stripes %v sub_stripes %v\n",
+ body.Head.NumStripes, body.Head.SubStripes)
+ for i, stripe := range body.Stripes {
+ fmt.Printf("\t\t\tstripe %v devid %d offset %d\n",
+ i, stripe.DeviceID, stripe.Offset)
+ fmt.Printf("\t\t\tdev_uuid %v\n",
+ stripe.DeviceUUID)
+ }
+ case btrfsitem.Dev:
+ fmt.Printf(""+
+ "\t\tdevid %d total_bytes %v bytes_used %v\n"+
+ "\t\tio_align %v io_width %v sector_size %v type %v\n"+
+ "\t\tgeneration %v start_offset %v dev_group %v\n"+
+ "\t\tseek_speed %v bandwidth %v\n"+
+ "\t\tuuid %v\n"+
+ "\t\tfsid %v\n",
+ body.DevID, body.NumBytes, body.NumBytesUsed,
+ body.IOOptimalAlign, body.IOOptimalWidth, body.IOMinSize, body.Type,
+ body.Generation, body.StartOffset, body.DevGroup,
+ body.SeekSpeed, body.Bandwidth,
+ body.DevUUID,
+ body.FSUUID)
+ case btrfsitem.DevExtent:
+ fmt.Printf(""+
+ "\t\tdev extent chunk_tree %v\n"+
+ "\t\tchunk_objectid %v chunk_offset %d length %d\n"+
+ "\t\tchunk_tree_uuid %v\n",
+ body.ChunkTree, body.ChunkObjectID, body.ChunkOffset, body.Length,
+ body.ChunkTreeUUID)
+ //case btrfsitem.QGROUP_STATUS_KEY:
+ // // TODO
+ //case btrfsitem.QGROUP_INFO_KEY:
+ // // TODO
+ //case btrfsitem.QGROUP_LIMIT_KEY:
+ // // TODO
+ case btrfsitem.UUIDMap:
+ fmt.Printf("\t\tsubvol_id %d\n", body.ObjID)
+ //case btrfsitem.STRING_ITEM_KEY:
+ // // TODO
+ case btrfsitem.DevStats:
+ fmt.Printf("\t\tpersistent item objectid %v offset %v\n",
+ item.Head.Key.ObjectID.Format(item.Head.Key.ItemType), item.Head.Key.Offset)
+ switch item.Head.Key.ObjectID {
+ case btrfs.DEV_STATS_OBJECTID:
+ fmt.Printf("\t\tdevice stats\n")
+ fmt.Printf("\t\twrite_errs %v read_errs %v flush_errs %v corruption_errs %v generation %v\n",
+ body.Values[btrfsitem.DEV_STAT_WRITE_ERRS],
+ body.Values[btrfsitem.DEV_STAT_READ_ERRS],
+ body.Values[btrfsitem.DEV_STAT_FLUSH_ERRS],
+ body.Values[btrfsitem.DEV_STAT_CORRUPTION_ERRS],
+ body.Values[btrfsitem.DEV_STAT_GENERATION_ERRS])
+ default:
+ fmt.Printf("\t\tunknown persistent item objectid %v\n", item.Head.Key.ObjectID)
+ }
+ //case btrfsitem.TEMPORARY_ITEM_KEY:
+ // // TODO
+ case btrfsitem.Empty:
+ switch item.Head.Key.ItemType {
+ case btrfsitem.ORPHAN_ITEM_KEY: // 48
+ fmt.Printf("\t\torphan item\n")
+ case btrfsitem.TREE_BLOCK_REF_KEY: // 176
+ fmt.Printf("\t\ttree block backref\n")
+ case btrfsitem.SHARED_BLOCK_REF_KEY: // 182
+ fmt.Printf("\t\tshared block backref\n")
+ case btrfsitem.FREE_SPACE_EXTENT_KEY: // 199
+ fmt.Printf("\t\tfree space extent\n")
+ case btrfsitem.QGROUP_RELATION_KEY: // 246
+ // do nothing
+ //case btrfsitem.EXTENT_REF_V0_KEY:
+ // fmt.Printf("\t\textent ref v0 (deprecated)\n")
+ //case btrfsitem.CSUM_ITEM_KEY:
+ // fmt.Printf("\t\tcsum item\n")
+ default:
+ fmt.Printf("\t\t(error) unhandled empty item type: %v\n", item.Head.Key.ItemType)
+ }
+ case btrfsitem.Error:
+ fmt.Printf("\t\t(error) error item: %v\n", body.Err)
+ default:
+ fmt.Printf("\t\t(error) unhandled item type: %T\n", body)
+ }
+ return nil
+ },
+ })
+}
+
+// printHeaderInfo mimics btrfs-progs kernel-shared/print-tree.c:print_header_info()
+func printHeaderInfo(node btrfs.Node) {
+ var typename string
+ if node.Head.Level > 0 { // internal node
+ typename = "node"
+ fmt.Printf("node %v level %v items %v free space %v",
+ node.Head.Addr,
+ node.Head.Level,
+ node.Head.NumItems,
+ node.MaxItems()-node.Head.NumItems)
+ } else { // leaf node
+ typename = "leaf"
+ fmt.Printf("leaf %d items %v free space %v",
+ node.Head.Addr,
+ node.Head.NumItems,
+ node.LeafFreeSpace())
+ }
+ fmt.Printf(" generation %v owner %v\n",
+ node.Head.Generation,
+ node.Head.Owner)
+
+ fmt.Printf("%v %d flags %v backref revision %v\n",
+ typename,
+ node.Head.Addr,
+ node.Head.Flags,
+ node.Head.BackrefRev)
+
+ fmt.Printf("checksum stored %v\n", node.Head.Checksum.Fmt(node.ChecksumType))
+ if calcSum, err := node.CalculateChecksum(); err != nil {
+ fmt.Printf("checksum calced %v\n", err)
+ } else {
+ fmt.Printf("checksum calced %v\n", calcSum.Fmt(node.ChecksumType))
+ }
+
+ fmt.Printf("fs uuid %v\n", node.Head.MetadataUUID)
+ fmt.Printf("chunk uuid %v\n", node.Head.ChunkTreeUUID)
+}
+
+// printExtentInlineRefs mimics part of btrfs-progs kernel-shared/print-tree.c:print_extent_item()
+func printExtentInlineRefs(refs []btrfsitem.ExtentInlineRef) {
+ for _, ref := range refs {
+ switch subitem := ref.Body.(type) {
+ case nil:
+ switch ref.Type {
+ case btrfsitem.TREE_BLOCK_REF_KEY:
+ fmt.Printf("\t\ttree block backref root %v\n",
+ btrfs.ObjID(ref.Offset))
+ case btrfsitem.SHARED_BLOCK_REF_KEY:
+ fmt.Printf("\t\tshared block backref parent %v\n",
+ ref.Offset)
+ default:
+ fmt.Printf("\t\t(error) unexpected empty sub-item type: %v\n", ref.Type)
+ }
+ case btrfsitem.ExtentDataRef:
+ fmt.Printf("\t\textent data backref root %v objectid %v offset %v count %v\n",
+ subitem.Root, subitem.ObjectID, subitem.Offset, subitem.Count)
+ case btrfsitem.SharedDataRef:
+ fmt.Printf("\t\tshared data backref parent %v count %v\n",
+ ref.Offset, subitem.Count)
+ default:
+ fmt.Printf("\t\t(error) unexpected sub-item type: %T\n", subitem)
+ }
+ }
+}
+
+// mimics print-tree.c:btrfs_print_key()
+func fmtKey(key btrfs.Key) string {
+ var out strings.Builder
+ fmt.Fprintf(&out, "key (%v %v", key.ObjectID.Format(key.ItemType), key.ItemType)
+ switch key.ItemType {
+ case btrfsitem.QGROUP_RELATION_KEY: //TODO, btrfsitem.QGROUP_INFO_KEY, btrfsitem.QGROUP_LIMIT_KEY:
+ panic("not implemented")
+ case btrfsitem.UUID_SUBVOL_KEY, btrfsitem.UUID_RECEIVED_SUBVOL_KEY:
+ fmt.Fprintf(&out, " %#08x)", key.Offset)
+ case btrfsitem.ROOT_ITEM_KEY:
+ fmt.Fprintf(&out, " %v)", btrfs.ObjID(key.Offset))
+ default:
+ if key.Offset == util.MaxUint64pp-1 {
+ fmt.Fprintf(&out, " -1)")
+ } else {
+ fmt.Fprintf(&out, " %v)", key.Offset)
+ }
+ }
+ return out.String()
+}
+
+func fmtTime(t btrfs.Time) string {
+ return fmt.Sprintf("%v.%v (%v)",
+ t.Sec, t.NSec, t.ToStd().Format("2006-01-02 15:04:05"))
+}
diff --git a/lib/btrfsprogs/btrfsrepair/clearnodes.go b/lib/btrfsprogs/btrfsrepair/clearnodes.go
new file mode 100644
index 0000000..595fef0
--- /dev/null
+++ b/lib/btrfsprogs/btrfsrepair/clearnodes.go
@@ -0,0 +1,91 @@
+// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+//
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+package btrfsrepair
+
+import (
+ "errors"
+ "fmt"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsutil"
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+func ClearBadNodes(fs *btrfs.FS) error {
+ var uuidsInited bool
+ var metadataUUID, chunkTreeUUID btrfs.UUID
+
+ var treeName string
+ var treeID btrfs.ObjID
+ btrfsutil.WalkAllTrees(fs, btrfsutil.WalkAllTreesHandler{
+ PreTree: func(name string, id btrfs.ObjID) {
+ treeName = name
+ treeID = id
+ },
+ Err: func(err error) {
+ fmt.Printf("error: %v\n", err)
+ },
+ UnsafeNodes: true,
+ TreeWalkHandler: btrfs.TreeWalkHandler{
+ Node: func(path btrfs.TreePath, node *util.Ref[btrfsvol.LogicalAddr, btrfs.Node], err error) error {
+ if err == nil {
+ if !uuidsInited {
+ metadataUUID = node.Data.Head.MetadataUUID
+ chunkTreeUUID = node.Data.Head.ChunkTreeUUID
+ uuidsInited = true
+ }
+ return nil
+ }
+ if !errors.Is(err, btrfs.ErrNotANode) {
+ err = btrfsutil.WalkErr{
+ TreeName: treeName,
+ Path: path,
+ Err: err,
+ }
+ fmt.Printf("error: %v\n", err)
+ return nil
+ }
+ origErr := err
+ if !uuidsInited {
+ // TODO(lukeshu): Is there a better way to get the chunk
+ // tree UUID?
+ return fmt.Errorf("cannot repair node@%v: not (yet?) sure what the chunk tree UUID is", node.Addr)
+ }
+ node.Data = btrfs.Node{
+ Size: node.Data.Size,
+ ChecksumType: node.Data.ChecksumType,
+ Head: btrfs.NodeHeader{
+ //Checksum: filled below,
+ MetadataUUID: metadataUUID,
+ Addr: node.Addr,
+ Flags: btrfs.NodeWritten,
+ BackrefRev: btrfs.MixedBackrefRev,
+ ChunkTreeUUID: chunkTreeUUID,
+ Generation: 0,
+ Owner: treeID,
+ NumItems: 0,
+ Level: path[len(path)-1].NodeLevel,
+ },
+ }
+ node.Data.Head.Checksum, err = node.Data.CalculateChecksum()
+ if err != nil {
+ return btrfsutil.WalkErr{
+ TreeName: treeName,
+ Path: path,
+ Err: err,
+ }
+ }
+ if err := node.Write(); err != nil {
+ return err
+ }
+
+ fmt.Printf("fixed node@%v (err was %v)\n", node.Addr, origErr)
+ return nil
+ },
+ },
+ })
+ return nil
+}
diff --git a/lib/btrfsprogs/btrfsutil/open.go b/lib/btrfsprogs/btrfsutil/open.go
new file mode 100644
index 0000000..cc081a6
--- /dev/null
+++ b/lib/btrfsprogs/btrfsutil/open.go
@@ -0,0 +1,28 @@
+// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+//
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+package btrfsutil
+
+import (
+ "fmt"
+ "os"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
+)
+
+func Open(flag int, filenames ...string) (*btrfs.FS, error) {
+ fs := new(btrfs.FS)
+ for _, filename := range filenames {
+ fh, err := os.OpenFile(filename, flag, 0)
+ if err != nil {
+ _ = fs.Close()
+ return nil, fmt.Errorf("file %q: %w", filename, err)
+ }
+ if err := fs.AddDevice(&btrfs.Device{File: fh}); err != nil {
+ _ = fs.Close()
+ return nil, fmt.Errorf("file %q: %w", filename, err)
+ }
+ }
+ return fs, nil
+}
diff --git a/lib/btrfsprogs/btrfsutil/scan.go b/lib/btrfsprogs/btrfsutil/scan.go
new file mode 100644
index 0000000..d83525c
--- /dev/null
+++ b/lib/btrfsprogs/btrfsutil/scan.go
@@ -0,0 +1,55 @@
+// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+//
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+package btrfsutil
+
+import (
+ "errors"
+ "fmt"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+// ScanForNodes mimics btrfs-progs
+// cmds/rescue-chunk-recover.c:scan_one_device(), except rather than
+// doing something itself when it finds a node, it simply calls a
+// callback function.
+func ScanForNodes(dev *btrfs.Device, sb btrfs.Superblock, fn func(*util.Ref[btrfsvol.PhysicalAddr, btrfs.Node], error), prog func(btrfsvol.PhysicalAddr)) error {
+ devSize, err := dev.Size()
+ if err != nil {
+ return err
+ }
+
+ if sb.NodeSize < sb.SectorSize {
+ return fmt.Errorf("node_size(%v) < sector_size(%v)",
+ sb.NodeSize, sb.SectorSize)
+ }
+
+ for pos := btrfsvol.PhysicalAddr(0); pos+btrfsvol.PhysicalAddr(sb.NodeSize) < devSize; pos += btrfsvol.PhysicalAddr(sb.SectorSize) {
+ if util.InSlice(pos, btrfs.SuperblockAddrs) {
+ //fmt.Printf("sector@%v is a superblock\n", pos)
+ continue
+ }
+
+ if prog != nil {
+ prog(pos)
+ }
+
+ nodeRef, err := btrfs.ReadNode[btrfsvol.PhysicalAddr](dev, sb, pos, nil)
+ if err != nil && errors.Is(err, btrfs.ErrNotANode) {
+ continue
+ }
+ fn(nodeRef, err)
+
+ pos += btrfsvol.PhysicalAddr(sb.NodeSize) - btrfsvol.PhysicalAddr(sb.SectorSize)
+ }
+
+ if prog != nil {
+ prog(devSize)
+ }
+
+ return nil
+}
diff --git a/lib/btrfsprogs/btrfsutil/walk.go b/lib/btrfsprogs/btrfsutil/walk.go
new file mode 100644
index 0000000..0c54384
--- /dev/null
+++ b/lib/btrfsprogs/btrfsutil/walk.go
@@ -0,0 +1,119 @@
+// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+//
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+package btrfsutil
+
+import (
+ "fmt"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+type WalkErr struct {
+ TreeName string
+ Path btrfs.TreePath
+ Err error
+}
+
+func (e WalkErr) Unwrap() error { return e.Err }
+
+func (e WalkErr) Error() string {
+ if len(e.Path) == 0 {
+ return fmt.Sprintf("%v: %v", e.TreeName, e.Err)
+ }
+ return fmt.Sprintf("%v: %v: %v", e.TreeName, e.Path, e.Err)
+}
+
+type WalkAllTreesHandler struct {
+ Err func(error)
+ // Callbacks for entire trees
+ PreTree func(name string, id btrfs.ObjID)
+ PostTree func(name string, id btrfs.ObjID)
+ // Callbacks for nodes or smaller
+ UnsafeNodes bool
+ btrfs.TreeWalkHandler
+}
+
+// WalkAllTrees walks all trees in a *btrfs.FS. Rather than returning
+// an error, it calls errCb each time an error is encountered. The
+// error will always be of type WalkErr.
+func WalkAllTrees(fs *btrfs.FS, cbs WalkAllTreesHandler) {
+ var treeName string
+ handleErr := func(path btrfs.TreePath, err error) {
+ cbs.Err(WalkErr{
+ TreeName: treeName,
+ Path: path,
+ Err: err,
+ })
+ }
+
+ trees := []struct {
+ Name string
+ ID btrfs.ObjID
+ }{
+ {
+ Name: "root tree",
+ ID: btrfs.ROOT_TREE_OBJECTID,
+ },
+ {
+ Name: "chunk tree",
+ ID: btrfs.CHUNK_TREE_OBJECTID,
+ },
+ {
+ Name: "log tree",
+ ID: btrfs.TREE_LOG_OBJECTID,
+ },
+ {
+ Name: "block group tree",
+ ID: btrfs.BLOCK_GROUP_TREE_OBJECTID,
+ },
+ }
+ origItem := cbs.Item
+ cbs.Item = func(path btrfs.TreePath, item btrfs.Item) error {
+ if item.Head.Key.ItemType == btrfsitem.ROOT_ITEM_KEY {
+ trees = append(trees, struct {
+ Name string
+ ID btrfs.ObjID
+ }{
+ Name: fmt.Sprintf("tree %v (via %v %v)",
+ item.Head.Key.ObjectID.Format(0), treeName, path),
+ ID: item.Head.Key.ObjectID,
+ })
+ }
+ if origItem != nil {
+ return origItem(path, item)
+ }
+ return nil
+ }
+
+ if !cbs.UnsafeNodes {
+ origNode := cbs.Node
+ cbs.Node = func(path btrfs.TreePath, node *util.Ref[btrfsvol.LogicalAddr, btrfs.Node], err error) error {
+ if err != nil {
+ handleErr(path, err)
+ }
+ if node != nil && origNode != nil {
+ return origNode(path, node, nil)
+ }
+ return nil
+ }
+ }
+
+ for i := 0; i < len(trees); i++ {
+ tree := trees[i]
+ treeName = tree.Name
+ if cbs.PreTree != nil {
+ cbs.PreTree(treeName, tree.ID)
+ }
+ if err := fs.TreeWalk(tree.ID, cbs.TreeWalkHandler); err != nil {
+ handleErr(nil, err)
+ }
+ if cbs.PostTree != nil {
+ cbs.PostTree(treeName, tree.ID)
+ }
+ }
+}