summaryrefslogtreecommitdiff
path: root/pkg
diff options
context:
space:
mode:
authorLuke Shumaker <lukeshu@lukeshu.com>2022-06-05 16:46:34 -0600
committerLuke Shumaker <lukeshu@lukeshu.com>2022-06-05 16:46:34 -0600
commit74b109b0a75ae6648f9381252d8beb5ce6025df3 (patch)
tree64b8b2250c67f749566761bab56676bed9c33846 /pkg
parente134a9fbd0d8ae43e2d24c5aabad8bf6a16190ed (diff)
factor out a btrfsmisc pacage
Diffstat (limited to 'pkg')
-rw-r--r--pkg/btrfs/io1_device.go7
-rw-r--r--pkg/btrfs/types_btree.go10
-rw-r--r--pkg/btrfsmisc/fsck.go (renamed from pkg/btrfs/fsck.go)16
-rw-r--r--pkg/btrfsmisc/print_tree.go340
-rw-r--r--pkg/util/generic.go (renamed from pkg/btrfs/util.go)8
5 files changed, 362 insertions, 19 deletions
diff --git a/pkg/btrfs/io1_device.go b/pkg/btrfs/io1_device.go
index efd8cb9..55b7525 100644
--- a/pkg/btrfs/io1_device.go
+++ b/pkg/btrfs/io1_device.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ "lukeshu.com/btrfs-tools/pkg/binstruct"
"lukeshu.com/btrfs-tools/pkg/util"
)
@@ -19,7 +20,7 @@ func (dev Device) Size() (PhysicalAddr, error) {
return PhysicalAddr(fi.Size()), nil
}
-var superblockAddrs = []PhysicalAddr{
+var SuperblockAddrs = []PhysicalAddr{
0x00_0001_0000, // 64KiB
0x00_0400_0000, // 64MiB
0x40_0000_0000, // 256GiB
@@ -30,7 +31,7 @@ func (dev *Device) ReadAt(dat []byte, paddr PhysicalAddr) (int, error) {
}
func (dev *Device) Superblocks() ([]util.Ref[PhysicalAddr, Superblock], error) {
- const superblockSize = 0x1000
+ superblockSize := PhysicalAddr(binstruct.StaticSize(Superblock{}))
sz, err := dev.Size()
if err != nil {
@@ -38,7 +39,7 @@ func (dev *Device) Superblocks() ([]util.Ref[PhysicalAddr, Superblock], error) {
}
var ret []util.Ref[PhysicalAddr, Superblock]
- for i, addr := range superblockAddrs {
+ for i, addr := range SuperblockAddrs {
if addr+superblockSize <= sz {
superblock := util.Ref[PhysicalAddr, Superblock]{
File: dev,
diff --git a/pkg/btrfs/types_btree.go b/pkg/btrfs/types_btree.go
index d65e599..96b069a 100644
--- a/pkg/btrfs/types_btree.go
+++ b/pkg/btrfs/types_btree.go
@@ -137,18 +137,18 @@ func (node *Node) UnmarshalBinary(nodeBuf []byte) (int, error) {
dataOff := binstruct.StaticSize(NodeHeader{}) + int(item.Head.DataOffset)
dataSize := int(item.Head.DataSize)
if dataOff+dataSize > len(nodeBuf) {
- return max(n, lastRead), fmt.Errorf("(leaf): item references byte %d, but node only has %d bytes",
+ return util.Max(n, lastRead), fmt.Errorf("(leaf): item references byte %d, but node only has %d bytes",
dataOff+dataSize, len(nodeBuf))
}
dataBuf := nodeBuf[dataOff : dataOff+dataSize]
- firstRead = min(firstRead, dataOff)
- lastRead = max(lastRead, dataOff+dataSize)
+ firstRead = util.Min(firstRead, dataOff)
+ lastRead = util.Max(lastRead, dataOff+dataSize)
item.Body = btrfsitem.UnmarshalItem(item.Head.Key, dataBuf)
node.BodyLeaf = append(node.BodyLeaf, item)
}
node.Padding = nodeBuf[n:firstRead]
- return max(n, lastRead), nil
+ return util.Max(n, lastRead), nil
}
}
@@ -201,7 +201,7 @@ func (node Node) MarshalBinary() ([]byte, error) {
return ret, err
}
dataOff := binstruct.StaticSize(NodeHeader{}) + int(item.Head.DataOffset)
- minData = min(minData, dataOff)
+ minData = util.Min(minData, dataOff)
if copy(ret[dataOff:], dat) < len(dat) {
return ret, fmt.Errorf("btrfs.Node.MarshalBinary: need at least %d bytes, but .Size is only %d",
dataOff+len(dat), node.Size)
diff --git a/pkg/btrfs/fsck.go b/pkg/btrfsmisc/fsck.go
index 33ccd44..527329d 100644
--- a/pkg/btrfs/fsck.go
+++ b/pkg/btrfsmisc/fsck.go
@@ -1,15 +1,17 @@
-package btrfs
+package btrfsmisc
import (
"fmt"
"lukeshu.com/btrfs-tools/pkg/binstruct"
+ "lukeshu.com/btrfs-tools/pkg/btrfs"
+ "lukeshu.com/btrfs-tools/pkg/util"
)
// ScanForNodes mimics btrfs-progs
// cmds/rescue-chunk-recover.c:scan_one_device(), except it doesn't do
// anything but log when it finds a node.
-func ScanForNodes(dev *Device, sb Superblock) error {
+func ScanForNodes(dev *btrfs.Device, sb btrfs.Superblock) error {
devSize, err := dev.Size()
if err != nil {
return err
@@ -21,15 +23,15 @@ func ScanForNodes(dev *Device, sb Superblock) error {
}
nodeBuf := make([]byte, sb.NodeSize)
- for pos := PhysicalAddr(0); pos+PhysicalAddr(sb.SectorSize) < devSize; pos += PhysicalAddr(sb.SectorSize) {
- if inSlice(pos, superblockAddrs) {
+ for pos := btrfs.PhysicalAddr(0); pos+btrfs.PhysicalAddr(sb.SectorSize) < devSize; pos += btrfs.PhysicalAddr(sb.SectorSize) {
+ if util.InSlice(pos, btrfs.SuperblockAddrs) {
fmt.Printf("sector@%d is a superblock\n", pos)
continue
}
if _, err := dev.ReadAt(nodeBuf, pos); err != nil {
return fmt.Errorf("sector@%d: %w", pos, err)
}
- var nodeHeader NodeHeader
+ var nodeHeader btrfs.NodeHeader
if _, err := binstruct.Unmarshal(nodeBuf, &nodeHeader); err != nil {
return fmt.Errorf("sector@%d: %w", pos, err)
}
@@ -37,7 +39,7 @@ func ScanForNodes(dev *Device, sb Superblock) error {
//fmt.Printf("sector@%d does not look like a node\n", pos)
continue
}
- if !nodeHeader.Checksum.Equal(CRC32c(nodeBuf[0x20:])) {
+ if !nodeHeader.Checksum.Equal(btrfs.CRC32c(nodeBuf[0x20:])) {
fmt.Printf("sector@%d looks like a node but is corrupt (checksum doesn't match)\n", pos)
continue
}
@@ -45,7 +47,7 @@ func ScanForNodes(dev *Device, sb Superblock) error {
fmt.Printf("node@%d: physical_addr=0x%0X logical_addr=0x%0X generation=%d owner=%v level=%d\n",
pos, pos, nodeHeader.Addr, nodeHeader.Generation, nodeHeader.Owner, nodeHeader.Level)
- pos += PhysicalAddr(sb.NodeSize) - PhysicalAddr(sb.SectorSize)
+ pos += btrfs.PhysicalAddr(sb.NodeSize) - btrfs.PhysicalAddr(sb.SectorSize)
}
return nil
diff --git a/pkg/btrfsmisc/print_tree.go b/pkg/btrfsmisc/print_tree.go
new file mode 100644
index 0000000..b7c3103
--- /dev/null
+++ b/pkg/btrfsmisc/print_tree.go
@@ -0,0 +1,340 @@
+package btrfsmisc
+
+import (
+ "fmt"
+ "os"
+ "strings"
+
+ "lukeshu.com/btrfs-tools/pkg/btrfs"
+ "lukeshu.com/btrfs-tools/pkg/btrfs/btrfsitem"
+ "lukeshu.com/btrfs-tools/pkg/util"
+)
+
+// PrintTree mimics btrfs-progs
+// kernel-shared/print-tree.c:btrfs_print_tree() and
+// kernel-shared/print-tree.c:btrfs_print_leaf()
+func PrintTree(fs *btrfs.FS, root btrfs.LogicalAddr) error {
+ nodeRef, err := fs.ReadNode(root)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error: %v\n", err)
+ return nil
+ }
+ node := nodeRef.Data
+ printHeaderInfo(node)
+ if node.Head.Level > 0 { // internal
+ for _, item := range node.BodyInternal {
+ fmt.Printf("\t%s block %d gen %d\n",
+ FmtKey(item.Key),
+ item.BlockPtr,
+ item.Generation)
+ }
+ for _, item := range node.BodyInternal {
+ if err := PrintTree(fs, item.BlockPtr); err != nil {
+ return err
+ }
+ }
+ } else { // leaf
+ for i, item := range node.BodyLeaf {
+ fmt.Printf("\titem %d %s itemoff %d itemsize %d\n",
+ i,
+ FmtKey(item.Head.Key),
+ item.Head.DataOffset,
+ item.Head.DataSize)
+ switch body := item.Body.(type) {
+ case btrfsitem.FreeSpaceHeader:
+ fmt.Printf("\t\tlocation %s\n", FmtKey(body.Location))
+ fmt.Printf("\t\tcache generation %d entries %d bitmaps %d\n",
+ body.Generation, body.NumEntries, body.NumBitmaps)
+ case btrfsitem.Inode:
+ fmt.Printf(""+
+ "\t\tgeneration %d transid %d size %d nbytes %d\n"+
+ "\t\tblock group %d mode %o links %d uid %d gid %d rdev %d\n"+
+ "\t\tsequence %d flags %v\n",
+ body.Generation, body.TransID, body.Size, body.NumBytes,
+ body.BlockGroup, body.Mode, body.NLink, body.UID, body.GID, body.RDev,
+ body.Sequence, body.Flags)
+ fmt.Printf("\t\tatime %s\n", fmtTime(body.ATime))
+ fmt.Printf("\t\tctime %s\n", fmtTime(body.CTime))
+ fmt.Printf("\t\tmtime %s\n", fmtTime(body.MTime))
+ fmt.Printf("\t\totime %s\n", fmtTime(body.OTime))
+ case btrfsitem.InodeRefList:
+ for _, ref := range body {
+ fmt.Printf("\t\tindex %d namelen %d name: %s\n",
+ ref.Index, ref.NameLen, ref.Name)
+ }
+ //case btrfsitem.INODE_EXTREF_KEY:
+ // // TODO
+ case btrfsitem.DirList:
+ for _, dir := range body {
+ fmt.Printf("\t\tlocation %s type %v\n",
+ FmtKey(dir.Location), dir.Type)
+ fmt.Printf("\t\ttransid %d data_len %d name_len %d\n",
+ dir.TransID, dir.DataLen, dir.NameLen)
+ fmt.Printf("\t\tname: %s\n", dir.Name)
+ if len(dir.Data) > 0 {
+ fmt.Printf("\t\tdata %s\n", dir.Data)
+ }
+ }
+ //case btrfsitem.DIR_LOG_INDEX_KEY, btrfsitem.DIR_LOG_ITEM_KEY:
+ // // TODO
+ case btrfsitem.Root:
+ fmt.Printf("\t\tgeneration %d root_dirid %d bytenr %d byte_limit %d bytes_used %d\n",
+ body.Generation, body.RootDirID, body.ByteNr, body.ByteLimit, body.BytesUsed)
+ fmt.Printf("\t\tlast_snapshot %d flags %s refs %d\n",
+ body.LastSnapshot, body.Flags, body.Refs)
+ fmt.Printf("\t\tdrop_progress %s drop_level %d\n",
+ FmtKey(body.DropProgress), body.DropLevel)
+ fmt.Printf("\t\tlevel %d generation_v2 %d\n",
+ body.Level, body.GenerationV2)
+ if body.Generation == body.GenerationV2 {
+ fmt.Printf("\t\tuuid %s\n", body.UUID)
+ fmt.Printf("\t\tparent_uuid %s\n", body.ParentUUID)
+ fmt.Printf("\t\treceived_uuid %s\n", body.ReceivedUUID)
+ fmt.Printf("\t\tctransid %d otransid %d stransid %d rtransid %d\n",
+ body.CTransID, body.OTransID, body.STransID, body.RTransID)
+ fmt.Printf("\t\tctime %s\n", fmtTime(body.CTime))
+ fmt.Printf("\t\totime %s\n", fmtTime(body.OTime))
+ fmt.Printf("\t\tstime %s\n", fmtTime(body.STime))
+ fmt.Printf("\t\trtime %s\n", fmtTime(body.RTime))
+ }
+ //case btrfsitem.ROOT_REF_KEY:
+ // // TODO
+ //case btrfsitem.ROOT_BACKREF_KEY:
+ // // TODO
+ case btrfsitem.Extent:
+ fmt.Printf("\t\trefs %d gen %d flags %v\n",
+ body.Head.Refs, body.Head.Generation, body.Head.Flags)
+ if body.Head.Flags.Has(btrfsitem.EXTENT_FLAG_TREE_BLOCK) {
+ fmt.Printf("\t\ttree block %s level %d\n",
+ FmtKey(body.Info.Key), body.Info.Level)
+ }
+ printExtentInlineRefs(body.Refs)
+ case btrfsitem.Metadata:
+ fmt.Printf("\t\trefs %d gen %d flags %v\n",
+ body.Head.Refs, body.Head.Generation, body.Head.Flags)
+ fmt.Printf("\t\ttree block skinny level %d\n", item.Head.Key.Offset)
+ printExtentInlineRefs(body.Refs)
+ //case btrfsitem.EXTENT_DATA_REF_KEY:
+ // // TODO
+ //case btrfsitem.SHARED_DATA_REF_KEY:
+ // // TODO
+ //case btrfsitem.EXTENT_CSUM_KEY:
+ // // TODO
+ case btrfsitem.FileExtent:
+ fmt.Printf("\t\tgeneration %d type %v\n",
+ body.Generation, body.Type)
+ switch body.Type {
+ case btrfsitem.FILE_EXTENT_INLINE:
+ fmt.Printf("\t\tinline extent data size %d ram_bytes %d compression %v\n",
+ len(body.BodyInline), body.RAMBytes, body.Compression)
+ case btrfsitem.FILE_EXTENT_PREALLOC:
+ fmt.Printf("\t\tprealloc data disk byte %d nr %d\n",
+ body.BodyPrealloc.DiskByteNr,
+ body.BodyPrealloc.DiskNumBytes)
+ fmt.Printf("\t\tprealloc data offset %d nr %d\n",
+ body.BodyPrealloc.Offset,
+ body.BodyPrealloc.NumBytes)
+ case btrfsitem.FILE_EXTENT_REG:
+ fmt.Printf("\t\textent data disk byte %d nr %d\n",
+ body.BodyReg.DiskByteNr,
+ body.BodyReg.DiskNumBytes)
+ fmt.Printf("\t\textenti data offset %d nr %d ram %d\n",
+ body.BodyReg.Offset,
+ body.BodyReg.NumBytes,
+ body.RAMBytes)
+ fmt.Printf("\t\textent compression %v\n",
+ body.Compression)
+ default:
+ fmt.Printf("\t\t(error) unknown file extent type %v", body.Type)
+ }
+ case btrfsitem.BlockGroup:
+ fmt.Printf("\t\tblock group used %d chunk_objectid %d flags %v\n",
+ body.Used, body.ChunkObjectID, body.Flags)
+ case btrfsitem.FreeSpaceInfo:
+ fmt.Printf("\t\tfree space info extent count %d flags %d\n",
+ body.ExtentCount, body.Flags)
+ case btrfsitem.FreeSpaceBitmap:
+ fmt.Printf("\t\tfree space bitmap\n")
+ case btrfsitem.Chunk:
+ fmt.Printf("\t\tlength %d owner %d stripe_len %d type %v\n",
+ body.Size, body.Owner, body.StripeLen, body.Type)
+ fmt.Printf("\t\tio_align %d io_width %d sector_size %d\n",
+ body.IOOptimalAlign, body.IOOptimalWidth, body.IOMinSize)
+ fmt.Printf("\t\tnum_stripes %d sub_stripes %d\n",
+ body.NumStripes, body.SubStripes)
+ for i, stripe := range body.Stripes {
+ fmt.Printf("\t\t\tstripe %d devid %d offset %d\n",
+ i, stripe.DeviceID, stripe.Offset)
+ fmt.Printf("\t\t\tdev_uuid %s\n",
+ stripe.DeviceUUID)
+ }
+ case btrfsitem.Dev:
+ fmt.Printf(""+
+ "\t\tdevid %d total_bytes %d bytes_used %d\n"+
+ "\t\tio_align %d io_width %d sector_size %d type %d\n"+
+ "\t\tgeneration %d start_offset %d dev_group %d\n"+
+ "\t\tseek_speed %d bandwidth %d\n"+
+ "\t\tuuid %s\n"+
+ "\t\tfsid %s\n",
+ body.DeviceID, body.NumBytes, body.NumBytesUsed,
+ body.IOOptimalAlign, body.IOOptimalWidth, body.IOMinSize, body.Type,
+ body.Generation, body.StartOffset, body.DevGroup,
+ body.SeekSpeed, body.Bandwidth,
+ body.DevUUID,
+ body.FSUUID)
+ case btrfsitem.DevExtent:
+ fmt.Printf(""+
+ "\t\tdev extent chunk_tree %d\n"+
+ "\t\tchunk_objectid %d chunk_offset %d length %d\n"+
+ "\t\tchunk_tree_uuid %s\n",
+ body.ChunkTree, body.ChunkObjectID, body.ChunkOffset, body.Length,
+ body.ChunkTreeUUID)
+ //case btrfsitem.QGROUP_STATUS_KEY:
+ // // TODO
+ //case btrfsitem.QGROUP_INFO_KEY:
+ // // TODO
+ //case btrfsitem.QGROUP_LIMIT_KEY:
+ // // TODO
+ case btrfsitem.UUIDMap:
+ for _, subvolID := range body {
+ fmt.Printf("\t\tsubvol_id %d\n",
+ subvolID)
+ }
+ //case btrfsitem.STRING_ITEM_KEY:
+ // // TODO
+ case btrfsitem.DevStats:
+ fmt.Printf("\t\tpersistent item objectid %s offset %d\n",
+ item.Head.Key.ObjectID.Format(item.Head.Key.ItemType), item.Head.Key.Offset)
+ switch item.Head.Key.ObjectID {
+ case btrfs.DEV_STATS_OBJECTID:
+ fmt.Printf("\t\tdevice stats\n")
+ fmt.Printf("\t\twrite_errs %d read_errs %d flush_errs %d corruption_errs %d generation %d\n",
+ body.Values[btrfsitem.DEV_STAT_WRITE_ERRS],
+ body.Values[btrfsitem.DEV_STAT_READ_ERRS],
+ body.Values[btrfsitem.DEV_STAT_FLUSH_ERRS],
+ body.Values[btrfsitem.DEV_STAT_CORRUPTION_ERRS],
+ body.Values[btrfsitem.DEV_STAT_GENERATION_ERRS])
+ default:
+ fmt.Printf("\t\tunknown persistent item objectid %d\n", item.Head.Key.ObjectID)
+ }
+ //case btrfsitem.TEMPORARY_ITEM_KEY:
+ // // TODO
+ case btrfsitem.Empty:
+ switch item.Head.Key.ItemType {
+ case btrfsitem.ORPHAN_ITEM_KEY: // 48
+ fmt.Printf("\t\torphan item\n")
+ case btrfsitem.TREE_BLOCK_REF_KEY: // 176
+ fmt.Printf("\t\ttree block backref\n")
+ case btrfsitem.SHARED_BLOCK_REF_KEY: // 182
+ fmt.Printf("\t\tshared block backref\n")
+ case btrfsitem.FREE_SPACE_EXTENT_KEY: // 199
+ fmt.Printf("\t\tfree space extent\n")
+ case btrfsitem.QGROUP_RELATION_KEY: // 246
+ // do nothing
+ //case btrfsitem.EXTENT_REF_V0_KEY:
+ // fmt.Printf("\t\textent ref v0 (deprecated)\n")
+ //case btrfsitem.CSUM_ITEM_KEY:
+ // fmt.Printf("\t\tcsum item\n")
+ default:
+ fmt.Printf("\t\t(error) unhandled empty item type: %v\n", item.Head.Key.ItemType)
+ }
+ case btrfsitem.Error:
+ fmt.Printf("\t\t(error) error item: %v\n", body.Err)
+ default:
+ fmt.Printf("\t\t(error) unhandled item type: %T\n", body)
+ }
+ }
+ }
+ return nil
+}
+
+// printHeaderInfo mimics btrfs-progs kernel-shared/print-tree.c:print_header_info()
+func printHeaderInfo(node btrfs.Node) {
+ var typename string
+ if node.Head.Level > 0 { // internal node
+ typename = "node"
+ fmt.Printf("node %d level %d items %d free space %d",
+ node.Head.Addr,
+ node.Head.Level,
+ node.Head.NumItems,
+ node.MaxItems()-node.Head.NumItems)
+ } else { // leaf node
+ typename = "leaf"
+ fmt.Printf("leaf %d items %d free space %d",
+ node.Head.Addr,
+ node.Head.NumItems,
+ node.LeafFreeSpace())
+ }
+ fmt.Printf(" generation %d owner %v\n",
+ node.Head.Generation,
+ node.Head.Owner)
+
+ fmt.Printf("%s %d flags %s backref revision %d\n",
+ typename,
+ node.Head.Addr,
+ node.Head.Flags,
+ node.Head.BackrefRev)
+
+ fmt.Printf("checksum stored %x\n", node.Head.Checksum)
+ if calcSum, err := node.CalculateChecksum(); err != nil {
+ fmt.Printf("checksum calced %v\n", err)
+ } else {
+ fmt.Printf("checksum calced %x\n", calcSum)
+ }
+
+ fmt.Printf("fs uuid %s\n", node.Head.MetadataUUID)
+ fmt.Printf("chunk uuid %s\n", node.Head.ChunkTreeUUID)
+}
+
+// printExtentInlineRefs mimics part of btrfs-progs kernel-shared/print-tree.c:print_extent_item()
+func printExtentInlineRefs(refs []btrfsitem.ExtentInlineRef) {
+ for _, ref := range refs {
+ switch subitem := ref.Body.(type) {
+ case btrfsitem.Empty:
+ switch ref.Type {
+ case btrfsitem.TREE_BLOCK_REF_KEY:
+ fmt.Printf("\t\ttree block backref root %v\n",
+ btrfs.ObjID(ref.Offset))
+ case btrfsitem.SHARED_BLOCK_REF_KEY:
+ fmt.Printf("\t\tshared block backref parent %d\n",
+ ref.Offset)
+ default:
+ fmt.Printf("\t\t(error) unexpected empty sub-item type: %v\n", ref.Type)
+ }
+ case btrfsitem.ExtentDataRef:
+ fmt.Printf("\t\textent data backref root %v objectid %d offset %d count %d\n",
+ subitem.Root, subitem.ObjectID, subitem.Offset, subitem.Count)
+ case btrfsitem.SharedDataRef:
+ fmt.Printf("\t\tshared data backref parent %d count %d\n",
+ ref.Offset, subitem.Count)
+ default:
+ fmt.Printf("\t\t(error) unexpected sub-item type: %T\n", subitem)
+ }
+ }
+}
+
+// mimics print-tree.c:btrfs_print_key()
+func FmtKey(key btrfs.Key) string {
+ var out strings.Builder
+ fmt.Fprintf(&out, "key (%s %v", key.ObjectID.Format(key.ItemType), key.ItemType)
+ switch key.ItemType {
+ case btrfsitem.QGROUP_RELATION_KEY: //TODO, btrfsitem.QGROUP_INFO_KEY, btrfsitem.QGROUP_LIMIT_KEY:
+ panic("not implemented")
+ case btrfsitem.UUID_SUBVOL_KEY, btrfsitem.UUID_RECEIVED_SUBVOL_KEY:
+ fmt.Fprintf(&out, " 0x%016x)", key.Offset)
+ case btrfsitem.ROOT_ITEM_KEY:
+ fmt.Fprintf(&out, " %v)", btrfs.ObjID(key.Offset))
+ default:
+ if key.Offset == util.MaxUint64pp-1 {
+ fmt.Fprintf(&out, " -1)")
+ } else {
+ fmt.Fprintf(&out, " %d)", key.Offset)
+ }
+ }
+ return out.String()
+}
+
+func fmtTime(t btrfs.Time) string {
+ return fmt.Sprintf("%d.%d (%s)",
+ t.Sec, t.NSec, t.ToStd().Format("2006-01-02 15:04:05"))
+}
diff --git a/pkg/btrfs/util.go b/pkg/util/generic.go
index 671d6fc..79096ab 100644
--- a/pkg/btrfs/util.go
+++ b/pkg/util/generic.go
@@ -1,10 +1,10 @@
-package btrfs
+package util
import (
"golang.org/x/exp/constraints"
)
-func inSlice[T comparable](needle T, haystack []T) bool {
+func InSlice[T comparable](needle T, haystack []T) bool {
for _, straw := range haystack {
if needle == straw {
return true
@@ -13,14 +13,14 @@ func inSlice[T comparable](needle T, haystack []T) bool {
return false
}
-func max[T constraints.Ordered](a, b T) T {
+func Max[T constraints.Ordered](a, b T) T {
if a > b {
return a
}
return b
}
-func min[T constraints.Ordered](a, b T) T {
+func Min[T constraints.Ordered](a, b T) T {
if a < b {
return a
}