summaryrefslogtreecommitdiff
path: root/lib/containers/lrucache.go
diff options
context:
space:
mode:
Diffstat (limited to 'lib/containers/lrucache.go')
-rw-r--r--lib/containers/lrucache.go251
1 files changed, 174 insertions, 77 deletions
diff --git a/lib/containers/lrucache.go b/lib/containers/lrucache.go
index 94094b9..d2aff41 100644
--- a/lib/containers/lrucache.go
+++ b/lib/containers/lrucache.go
@@ -4,112 +4,209 @@
package containers
+import (
+ "context"
+ "fmt"
+ "sync"
+)
+
+// NewLRUCache returns a new thread-safe Cache with a simple
+// Least-Recently-Used eviction policy.
+//
+// It is invalid (runtime-panic) to call NewLRUCache with a
+// non-positive capacity or a nil source.
+//
+//nolint:predeclared // 'cap' is the best name for it.
+func NewLRUCache[K comparable, V any](cap int, src Source[K, V]) Cache[K, V] {
+ if cap <= 0 {
+ panic(fmt.Errorf("containers.NewLRUCache: invalid capacity: %v", cap))
+ }
+ if src == nil {
+ panic(fmt.Errorf("containers.NewLRUCache: nil source"))
+ }
+ ret := &lruCache[K, V]{
+ cap: cap,
+ src: src,
+
+ byName: make(map[K]*LinkedListEntry[lruEntry[K, V]], cap),
+ }
+ for i := 0; i < cap; i++ {
+ ret.unused.Store(new(LinkedListEntry[lruEntry[K, V]]))
+ }
+ return ret
+}
+
type lruEntry[K comparable, V any] struct {
key K
val V
+
+ refs int
+ del chan struct{} // non-nil if a delete is waiting on .refs to drop to zero
}
-// lruCache is a NON-thread-safe cache with Least Recently Used
-// eviction.
-//
-// lruCache is non-thread-safe and unexported because it only exists
-// for internal use by the more sophisticated ARCache.
-//
-// Similarly, it does not implement some common features of an LRU
-// cache, such as specifying a maximum size (instead requiring the
-// `.EvictOldest` method to be called), because it is only meant for
-// use by the ARCache; which does not need such features.
type lruCache[K comparable, V any] struct {
- // OnRemove is (if non-nil) called *after* removal whenever
- // an entry is removed, for any reason:
- //
- // - evicted by .EvictOldest()
- // - replaced by .Store(k, v)
- // - deleted by .Delete(k)
- OnRemove func(K, V)
- // OnEvict is (if non-nil) called *after* removal whenever an
- // entry is evicted by .EvictOldest(). If both OnEvict and
- // OnRemove are set, OnRemove is called first.
- OnEvict func(K, V)
-
- byAge LinkedList[lruEntry[K, V]]
- byName map[K]*LinkedListEntry[lruEntry[K, V]]
+ cap int
+ src Source[K, V]
+
+ mu sync.Mutex
+
+ // Pinned entries are in .byName, but not in any LinkedList.
+ unused LinkedList[lruEntry[K, V]]
+ evictable LinkedList[lruEntry[K, V]] // only entries with .refs==0
+ byName map[K]*LinkedListEntry[lruEntry[K, V]]
+
+ waiters LinkedList[chan struct{}]
}
-var _ Map[int, string] = (*lruCache[int, string])(nil)
+// Blocking primitives /////////////////////////////////////////////////////////
-func (c *lruCache[K, V]) rem(entry *LinkedListEntry[lruEntry[K, V]]) {
- k, v := entry.Value.key, entry.Value.val
- delete(c.byName, entry.Value.key)
- c.byAge.Delete(entry)
- if c.OnRemove != nil {
- c.OnRemove(k, v)
+// Because of pinning, there might not actually be an available entry
+// for us to use/evict. If we need an entry to use or evict, we'll
+// call waitForAvail to block until there is en entry that is either
+// unused or evictable. We'll give waiters FIFO priority.
+func (c *lruCache[K, V]) waitForAvail() {
+ if !(c.unused.IsEmpty() && c.evictable.IsEmpty()) {
+ return
}
+ ch := make(chan struct{})
+ c.waiters.Store(&LinkedListEntry[chan struct{}]{Value: ch})
+ c.mu.Unlock()
+ <-ch
+ c.mu.Lock()
}
-func (c *lruCache[K, V]) evict(entry *LinkedListEntry[lruEntry[K, V]]) {
- k, v := entry.Value.key, entry.Value.val
- c.rem(entry)
- if c.OnEvict != nil {
- c.OnEvict(k, v)
+// notifyAvail is called when an entry becomes unused or evictable,
+// and wakes up the highest-priority .waitForAvail() waiter (if there
+// is one).
+func (c *lruCache[K, V]) notifyAvail() {
+ waiter := c.waiters.Oldest
+ if waiter == nil {
+ return
}
+ c.waiters.Delete(waiter)
+ close(waiter.Value)
}
-// EvictOldest deletes the oldest entry in the cache.
-//
-// It is a panic to call EvictOldest if the cache is empty.
-func (c *lruCache[K, V]) EvictOldest() {
- c.evict(c.byAge.Oldest())
+// Calling .Delete(k) on an entry that is pinned needs to block until
+// the entry is no longer pinned.
+func (c *lruCache[K, V]) unlockAndWaitForDel(entry *LinkedListEntry[lruEntry[K, V]]) {
+ if entry.Value.del == nil {
+ entry.Value.del = make(chan struct{})
+ }
+ ch := entry.Value.del
+ c.mu.Unlock()
+ <-ch
}
-// Store a key/value pair in to the cache.
-func (c *lruCache[K, V]) Store(k K, v V) {
- if c.byName == nil {
- c.byName = make(map[K]*LinkedListEntry[lruEntry[K, V]])
- } else if old, ok := c.byName[k]; ok {
- c.rem(old)
+// notifyOfDel unblocks any calls to .Delete(k), notifying them that
+// the entry has been deleted and they can now return.
+func (*lruCache[K, V]) notifyOfDel(entry *LinkedListEntry[lruEntry[K, V]]) {
+ if entry.Value.del != nil {
+ close(entry.Value.del)
+ entry.Value.del = nil
}
- c.byName[k] = c.byAge.Store(lruEntry[K, V]{key: k, val: v})
}
-// Load an entry from the cache, recording a "use" for the purposes of
-// "least-recently-used" eviction.
-func (c *lruCache[K, V]) Load(k K) (v V, ok bool) {
- entry, ok := c.byName[k]
- if !ok {
- var zero V
- return zero, false
+// Main implementation /////////////////////////////////////////////////////////
+
+// lruReplace is the LRU(c) replacement policy. It returns an entry
+// that is not in any list.
+func (c *lruCache[K, V]) lruReplace() *LinkedListEntry[lruEntry[K, V]] {
+ c.waitForAvail()
+
+ // If the cache isn't full, no need to do an eviction.
+ if entry := c.unused.Oldest; entry != nil {
+ c.unused.Delete(entry)
+ return entry
}
- c.byAge.MoveToNewest(entry)
- return entry.Value.val, true
+
+ // Replace the oldest entry.
+ entry := c.evictable.Oldest
+ c.evictable.Delete(entry)
+ delete(c.byName, entry.Value.key)
+ return entry
}
-// Peek is like Load, but doesn't count as a "use" for
-// "least-recently-used".
-func (c *lruCache[K, V]) Peek(k K) (v V, ok bool) {
- entry, ok := c.byName[k]
- if !ok {
- var zero V
- return zero, false
+// Acquire implements the 'Cache' interface.
+func (c *lruCache[K, V]) Acquire(ctx context.Context, k K) *V {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ entry := c.byName[k]
+ if entry != nil {
+ if entry.Value.refs == 0 {
+ c.evictable.Delete(entry)
+ }
+ entry.Value.refs++
+ } else {
+ entry = c.lruReplace()
+
+ entry.Value.key = k
+ c.src.Load(ctx, k, &entry.Value.val)
+ entry.Value.refs = 1
+
+ c.byName[k] = entry
}
- return entry.Value.val, true
-}
-// Has returns whether an entry is present in the cache. Calling Has
-// does not count as a "use" for "least-recently-used".
-func (c *lruCache[K, V]) Has(k K) bool {
- _, ok := c.byName[k]
- return ok
+ return &entry.Value.val
}
-// Delete an entry from the cache.
+// Delete implements the 'Cache' interface.
func (c *lruCache[K, V]) Delete(k K) {
- if entry, ok := c.byName[k]; ok {
- c.rem(entry)
+ c.mu.Lock()
+
+ entry := c.byName[k]
+ if entry == nil {
+ return
+ }
+ if entry.Value.refs > 0 {
+ // Let .Release(k) do the deletion when the
+ // refcount drops to 0.
+ c.unlockAndWaitForDel(entry)
+ return
+ }
+ delete(c.byName, k)
+ c.evictable.Delete(entry)
+ c.unused.Store(entry)
+
+ // No need to call c.notifyAvail(); if we were able to delete
+ // it, it was already available.
+
+ c.mu.Unlock()
+}
+
+// Release implements the 'Cache' interface.
+func (c *lruCache[K, V]) Release(k K) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ entry := c.byName[k]
+ if entry == nil || entry.Value.refs <= 0 {
+ panic(fmt.Errorf("containers.lruCache.Release called on key that is not held: %v", k))
+ }
+
+ entry.Value.refs--
+ if entry.Value.refs == 0 {
+ if entry.Value.del != nil {
+ delete(c.byName, k)
+ c.unused.Store(entry)
+ c.notifyOfDel(entry)
+ } else {
+ c.evictable.Store(entry)
+ }
+ c.notifyAvail()
}
}
-// Len returns the number of entries in the cache.
-func (c *lruCache[K, V]) Len() int {
- return len(c.byName)
+// Flush implements the 'Cache' interface.
+func (c *lruCache[K, V]) Flush(ctx context.Context) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ for _, entry := range c.byName {
+ c.src.Flush(ctx, &entry.Value.val)
+ }
+ for entry := c.unused.Oldest; entry != nil; entry = entry.Newer {
+ c.src.Flush(ctx, &entry.Value.val)
+ }
}