|
| 1 | +// Copyright (c) HashiCorp, Inc. |
| 2 | +// SPDX-License-Identifier: MPL-2.0 |
| 3 | + |
| 4 | +package lru |
| 5 | + |
| 6 | +import ( |
| 7 | + "errors" |
| 8 | + "sync" |
| 9 | + |
| 10 | + "github.com/hashicorp/golang-lru/v2/simplelru" |
| 11 | +) |
| 12 | + |
| 13 | +const ( |
| 14 | + // Default2QRecentRatio is the ratio of the 2Q cache dedicated |
| 15 | + // to recently added entries that have only been accessed once. |
| 16 | + Default2QRecentRatio = 0.25 |
| 17 | + |
| 18 | + // Default2QGhostEntries is the default ratio of ghost |
| 19 | + // entries kept to track entries recently evicted |
| 20 | + Default2QGhostEntries = 0.50 |
| 21 | +) |
| 22 | + |
| 23 | +// TwoQueueCache is a thread-safe fixed size 2Q cache. |
| 24 | +// 2Q is an enhancement over the standard LRU cache |
| 25 | +// in that it tracks both frequently and recently used |
| 26 | +// entries separately. This avoids a burst in access to new |
| 27 | +// entries from evicting frequently used entries. It adds some |
| 28 | +// additional tracking overhead to the standard LRU cache, and is |
| 29 | +// computationally about 2x the cost, and adds some metadata over |
| 30 | +// head. The ARCCache is similar, but does not require setting any |
| 31 | +// parameters. |
| 32 | +type TwoQueueCache[K comparable, V any] struct { |
| 33 | + size int |
| 34 | + recentSize int |
| 35 | + recentRatio float64 |
| 36 | + ghostRatio float64 |
| 37 | + |
| 38 | + recent simplelru.LRUCache[K, V] |
| 39 | + frequent simplelru.LRUCache[K, V] |
| 40 | + recentEvict simplelru.LRUCache[K, struct{}] |
| 41 | + lock sync.RWMutex |
| 42 | +} |
| 43 | + |
| 44 | +// New2Q creates a new TwoQueueCache using the default |
| 45 | +// values for the parameters. |
| 46 | +func New2Q[K comparable, V any](size int) (*TwoQueueCache[K, V], error) { |
| 47 | + return New2QParams[K, V](size, Default2QRecentRatio, Default2QGhostEntries) |
| 48 | +} |
| 49 | + |
| 50 | +// New2QParams creates a new TwoQueueCache using the provided |
| 51 | +// parameter values. |
| 52 | +func New2QParams[K comparable, V any](size int, recentRatio, ghostRatio float64) (*TwoQueueCache[K, V], error) { |
| 53 | + if size <= 0 { |
| 54 | + return nil, errors.New("invalid size") |
| 55 | + } |
| 56 | + if recentRatio < 0.0 || recentRatio > 1.0 { |
| 57 | + return nil, errors.New("invalid recent ratio") |
| 58 | + } |
| 59 | + if ghostRatio < 0.0 || ghostRatio > 1.0 { |
| 60 | + return nil, errors.New("invalid ghost ratio") |
| 61 | + } |
| 62 | + |
| 63 | + // Determine the sub-sizes |
| 64 | + recentSize := int(float64(size) * recentRatio) |
| 65 | + evictSize := int(float64(size) * ghostRatio) |
| 66 | + |
| 67 | + // Allocate the LRUs |
| 68 | + recent, err := simplelru.NewLRU[K, V](size, nil) |
| 69 | + if err != nil { |
| 70 | + return nil, err |
| 71 | + } |
| 72 | + frequent, err := simplelru.NewLRU[K, V](size, nil) |
| 73 | + if err != nil { |
| 74 | + return nil, err |
| 75 | + } |
| 76 | + recentEvict, err := simplelru.NewLRU[K, struct{}](evictSize, nil) |
| 77 | + if err != nil { |
| 78 | + return nil, err |
| 79 | + } |
| 80 | + |
| 81 | + // Initialize the cache |
| 82 | + c := &TwoQueueCache[K, V]{ |
| 83 | + size: size, |
| 84 | + recentSize: recentSize, |
| 85 | + recentRatio: recentRatio, |
| 86 | + ghostRatio: ghostRatio, |
| 87 | + recent: recent, |
| 88 | + frequent: frequent, |
| 89 | + recentEvict: recentEvict, |
| 90 | + } |
| 91 | + return c, nil |
| 92 | +} |
| 93 | + |
| 94 | +// Get looks up a key's value from the cache. |
| 95 | +func (c *TwoQueueCache[K, V]) Get(key K) (value V, ok bool) { |
| 96 | + c.lock.Lock() |
| 97 | + defer c.lock.Unlock() |
| 98 | + |
| 99 | + // Check if this is a frequent value |
| 100 | + if val, ok := c.frequent.Get(key); ok { |
| 101 | + return val, ok |
| 102 | + } |
| 103 | + |
| 104 | + // If the value is contained in recent, then we |
| 105 | + // promote it to frequent |
| 106 | + if val, ok := c.recent.Peek(key); ok { |
| 107 | + c.recent.Remove(key) |
| 108 | + c.frequent.Add(key, val) |
| 109 | + return val, ok |
| 110 | + } |
| 111 | + |
| 112 | + // No hit |
| 113 | + return |
| 114 | +} |
| 115 | + |
| 116 | +// Add adds a value to the cache. |
| 117 | +func (c *TwoQueueCache[K, V]) Add(key K, value V) { |
| 118 | + c.lock.Lock() |
| 119 | + defer c.lock.Unlock() |
| 120 | + |
| 121 | + // Check if the value is frequently used already, |
| 122 | + // and just update the value |
| 123 | + if c.frequent.Contains(key) { |
| 124 | + c.frequent.Add(key, value) |
| 125 | + return |
| 126 | + } |
| 127 | + |
| 128 | + // Check if the value is recently used, and promote |
| 129 | + // the value into the frequent list |
| 130 | + if c.recent.Contains(key) { |
| 131 | + c.recent.Remove(key) |
| 132 | + c.frequent.Add(key, value) |
| 133 | + return |
| 134 | + } |
| 135 | + |
| 136 | + // If the value was recently evicted, add it to the |
| 137 | + // frequently used list |
| 138 | + if c.recentEvict.Contains(key) { |
| 139 | + c.ensureSpace(true) |
| 140 | + c.recentEvict.Remove(key) |
| 141 | + c.frequent.Add(key, value) |
| 142 | + return |
| 143 | + } |
| 144 | + |
| 145 | + // Add to the recently seen list |
| 146 | + c.ensureSpace(false) |
| 147 | + c.recent.Add(key, value) |
| 148 | +} |
| 149 | + |
| 150 | +// ensureSpace is used to ensure we have space in the cache |
| 151 | +func (c *TwoQueueCache[K, V]) ensureSpace(recentEvict bool) { |
| 152 | + // If we have space, nothing to do |
| 153 | + recentLen := c.recent.Len() |
| 154 | + freqLen := c.frequent.Len() |
| 155 | + if recentLen+freqLen < c.size { |
| 156 | + return |
| 157 | + } |
| 158 | + |
| 159 | + // If the recent buffer is larger than |
| 160 | + // the target, evict from there |
| 161 | + if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) { |
| 162 | + k, _, _ := c.recent.RemoveOldest() |
| 163 | + c.recentEvict.Add(k, struct{}{}) |
| 164 | + return |
| 165 | + } |
| 166 | + |
| 167 | + // Remove from the frequent list otherwise |
| 168 | + c.frequent.RemoveOldest() |
| 169 | +} |
| 170 | + |
| 171 | +// Len returns the number of items in the cache. |
| 172 | +func (c *TwoQueueCache[K, V]) Len() int { |
| 173 | + c.lock.RLock() |
| 174 | + defer c.lock.RUnlock() |
| 175 | + return c.recent.Len() + c.frequent.Len() |
| 176 | +} |
| 177 | + |
| 178 | +// Resize changes the cache size. |
| 179 | +func (c *TwoQueueCache[K, V]) Resize(size int) (evicted int) { |
| 180 | + c.lock.Lock() |
| 181 | + defer c.lock.Unlock() |
| 182 | + |
| 183 | + // Recalculate the sub-sizes |
| 184 | + recentSize := int(float64(size) * c.recentRatio) |
| 185 | + evictSize := int(float64(size) * c.ghostRatio) |
| 186 | + c.size = size |
| 187 | + c.recentSize = recentSize |
| 188 | + |
| 189 | + // ensureSpace |
| 190 | + diff := c.recent.Len() + c.frequent.Len() - size |
| 191 | + if diff < 0 { |
| 192 | + diff = 0 |
| 193 | + } |
| 194 | + for i := 0; i < diff; i++ { |
| 195 | + c.ensureSpace(true) |
| 196 | + } |
| 197 | + |
| 198 | + // Reallocate the LRUs |
| 199 | + c.recent.Resize(size) |
| 200 | + c.frequent.Resize(size) |
| 201 | + c.recentEvict.Resize(evictSize) |
| 202 | + |
| 203 | + return diff |
| 204 | +} |
| 205 | + |
| 206 | +// Keys returns a slice of the keys in the cache. |
| 207 | +// The frequently used keys are first in the returned slice. |
| 208 | +func (c *TwoQueueCache[K, V]) Keys() []K { |
| 209 | + c.lock.RLock() |
| 210 | + defer c.lock.RUnlock() |
| 211 | + k1 := c.frequent.Keys() |
| 212 | + k2 := c.recent.Keys() |
| 213 | + return append(k1, k2...) |
| 214 | +} |
| 215 | + |
| 216 | +// Values returns a slice of the values in the cache. |
| 217 | +// The frequently used values are first in the returned slice. |
| 218 | +func (c *TwoQueueCache[K, V]) Values() []V { |
| 219 | + c.lock.RLock() |
| 220 | + defer c.lock.RUnlock() |
| 221 | + v1 := c.frequent.Values() |
| 222 | + v2 := c.recent.Values() |
| 223 | + return append(v1, v2...) |
| 224 | +} |
| 225 | + |
| 226 | +// Remove removes the provided key from the cache. |
| 227 | +func (c *TwoQueueCache[K, V]) Remove(key K) { |
| 228 | + c.lock.Lock() |
| 229 | + defer c.lock.Unlock() |
| 230 | + if c.frequent.Remove(key) { |
| 231 | + return |
| 232 | + } |
| 233 | + if c.recent.Remove(key) { |
| 234 | + return |
| 235 | + } |
| 236 | + if c.recentEvict.Remove(key) { |
| 237 | + return |
| 238 | + } |
| 239 | +} |
| 240 | + |
| 241 | +// Purge is used to completely clear the cache. |
| 242 | +func (c *TwoQueueCache[K, V]) Purge() { |
| 243 | + c.lock.Lock() |
| 244 | + defer c.lock.Unlock() |
| 245 | + c.recent.Purge() |
| 246 | + c.frequent.Purge() |
| 247 | + c.recentEvict.Purge() |
| 248 | +} |
| 249 | + |
| 250 | +// Contains is used to check if the cache contains a key |
| 251 | +// without updating recency or frequency. |
| 252 | +func (c *TwoQueueCache[K, V]) Contains(key K) bool { |
| 253 | + c.lock.RLock() |
| 254 | + defer c.lock.RUnlock() |
| 255 | + return c.frequent.Contains(key) || c.recent.Contains(key) |
| 256 | +} |
| 257 | + |
| 258 | +// Peek is used to inspect the cache value of a key |
| 259 | +// without updating recency or frequency. |
| 260 | +func (c *TwoQueueCache[K, V]) Peek(key K) (value V, ok bool) { |
| 261 | + c.lock.RLock() |
| 262 | + defer c.lock.RUnlock() |
| 263 | + if val, ok := c.frequent.Peek(key); ok { |
| 264 | + return val, ok |
| 265 | + } |
| 266 | + return c.recent.Peek(key) |
| 267 | +} |
0 commit comments