summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/containers/syncpool.go33
-rw-r--r--lib/containers/syncvalue.go67
2 files changed, 100 insertions, 0 deletions
diff --git a/lib/containers/syncpool.go b/lib/containers/syncpool.go
new file mode 100644
index 0000000..cb5398d
--- /dev/null
+++ b/lib/containers/syncpool.go
@@ -0,0 +1,33 @@
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
+//
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+package containers
+
+import (
+ "sync"
+)
+
+type SyncPool[T any] struct {
+ New func() T
+
+ inner sync.Pool
+}
+
+func (p *SyncPool[T]) Get() (val T, ok bool) {
+ _val := p.inner.Get()
+ switch {
+ case _val != nil:
+ //nolint:forcetypeassert // Typed wrapper around untyped lib.
+ return _val.(T), true
+ case p.New != nil:
+ return p.New(), true
+ default:
+ var zero T
+ return zero, false
+ }
+}
+
+func (p *SyncPool[T]) Put(val T) {
+ p.inner.Put(val)
+}
diff --git a/lib/containers/syncvalue.go b/lib/containers/syncvalue.go
new file mode 100644
index 0000000..160db3c
--- /dev/null
+++ b/lib/containers/syncvalue.go
@@ -0,0 +1,67 @@
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
+//
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+package containers
+
+import (
+ "sync"
+)
+
+// SyncValue is a typed equivalent of sync/atomic.Value.
+//
+// It is not actually a wrapper around sync/atomic.Value for
+// allocation-performance reasons.
+type SyncValue[T comparable] struct {
+ mu sync.Mutex
+ ok bool
+ val T
+}
+
+// This uses a dumb mutex-based solution because
+//
+// 1. Performance is good enough, because in the fast-path mutexes
+// use the same compare-and-swap as sync/atomic.Value; and because
+// all of these methods are short we're unlikely to hit the
+// mutex's slow path.
+//
+// 2. We could use sync/atomic.Pointer[T], which by itself would have
+// the same performance characteristics as sync/atomic.Value but
+// without the benefit of runtime_procPin()/runtime_procUnpin().
+// We want to avoid that because it means we're doing an
+// allocation for every store/swap; avoiding that is our whole
+// reason for not just wraping sync/atomic.Value. So then we'd
+// want to use a SyncPool to reuse allocations; but (1) that adds
+// more sync-overhead, and (2) it also gets trickier because we'd
+// have to be careful about not adding a pointer back to the pool
+// when load has grabbed the pointer but not yet dereferenced it.
+
+func (v *SyncValue[T]) Load() (val T, ok bool) {
+ v.mu.Lock()
+ defer v.mu.Unlock()
+ return v.val, v.ok
+}
+
+func (v *SyncValue[T]) Store(val T) {
+ v.mu.Lock()
+ defer v.mu.Unlock()
+ v.val, v.ok = val, true
+}
+
+func (v *SyncValue[T]) Swap(newV T) (oldV T, oldOK bool) {
+ v.mu.Lock()
+ defer v.mu.Unlock()
+ oldV, oldOK = v.val, v.ok
+ v.val, v.ok = newV, true
+ return
+}
+
+func (v *SyncValue[T]) CompareAndSwap(oldV, newV T) (swapped bool) {
+ v.mu.Lock()
+ defer v.mu.Unlock()
+ if !v.ok || v.val != oldV {
+ return false
+ }
+ v.val = newV
+ return true
+}