Skip to content

Commit b1107e7

Browse files
committed
Merge branch 'master' into generic
2 parents faaa8b2 + 42f3676 commit b1107e7

File tree

5 files changed

+63
-1
lines changed

5 files changed

+63
-1
lines changed

cache.go

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -115,6 +115,13 @@ func (c *Cache[T]) Get(key string) *Item[T] {
115115
return item
116116
}
117117

118+
// Same as Get but does not promote the value. This essentially circumvents the
119+
// "least recently used" aspect of this cache. To some degree, it's akin to a
120+
// "peak"
121+
func (c *Cache[T]) GetWithoutPromote(key string) *Item[T] {
122+
return c.bucket(key).get(key)
123+
}
124+
118125
// Used when the cache was created with the Track() configuration option.
119126
// Avoid otherwise
120127
func (c *Cache[T]) TrackingGet(key string) TrackedItem[T] {

cache_test.go

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -138,6 +138,20 @@ func Test_CachePromotedItemsDontGetPruned(t *testing.T) {
138138
assert.Equal(t, cache.Get("11").Value(), 11)
139139
}
140140

141+
func Test_GetWithoutPromoteDoesNotPromote(t *testing.T) {
142+
cache := New(Configure[int]().ItemsToPrune(10).GetsPerPromote(1))
143+
for i := 0; i < 500; i++ {
144+
cache.Set(strconv.Itoa(i), i, time.Minute)
145+
}
146+
cache.SyncUpdates()
147+
cache.GetWithoutPromote("9")
148+
cache.SyncUpdates()
149+
cache.GC()
150+
assert.Equal(t, cache.Get("9"), nil)
151+
assert.Equal(t, cache.Get("10").Value(), 10)
152+
assert.Equal(t, cache.Get("11").Value(), 11)
153+
}
154+
141155
func Test_CacheTrackerDoesNotCleanupHeldInstance(t *testing.T) {
142156
cache := New(Configure[int]().ItemsToPrune(11).Track())
143157
item0 := cache.TrackingSet("0", 0, time.Minute)

layeredcache.go

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,13 @@ func (c *LayeredCache[T]) Get(primary, secondary string) *Item[T] {
7575
return item
7676
}
7777

78+
// Same as Get but does not promote the value. This essentially circumvents the
79+
// "least recently used" aspect of this cache. To some degree, it's akin to a
80+
// "peak"
81+
func (c *LayeredCache[T]) GetWithoutPromote(primary, secondary string) *Item[T] {
82+
return c.bucket(primary).get(primary, secondary)
83+
}
84+
7885
func (c *LayeredCache[T]) ForEachFunc(primary string, matches func(key string, item *Item[T]) bool) {
7986
c.bucket(primary).forEachFunc(primary, matches)
8087
}
@@ -345,6 +352,9 @@ func (c *LayeredCache[T]) gc() int {
345352
c.bucket(item.group).delete(item.group, item.key)
346353
c.size -= item.size
347354
c.list.Remove(node)
355+
if c.onDelete != nil {
356+
c.onDelete(item)
357+
}
348358
item.promotions = -2
349359
dropped += 1
350360
}

layeredcache_test.go

Lines changed: 24 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -188,6 +188,20 @@ func Test_LayeredCache_PromotedItemsDontGetPruned(t *testing.T) {
188188
assert.Equal(t, cache.Get("11", "a").Value(), 11)
189189
}
190190

191+
func Test_LayeredCache_GetWithoutPromoteDoesNotPromote(t *testing.T) {
192+
cache := Layered(Configure[int]().ItemsToPrune(10).GetsPerPromote(1))
193+
for i := 0; i < 500; i++ {
194+
cache.Set(strconv.Itoa(i), "a", i, time.Minute)
195+
}
196+
cache.SyncUpdates()
197+
cache.GetWithoutPromote("9", "a")
198+
cache.SyncUpdates()
199+
cache.GC()
200+
assert.Equal(t, cache.Get("9", "a"), nil)
201+
assert.Equal(t, cache.Get("10", "a").Value(), 10)
202+
assert.Equal(t, cache.Get("11", "a").Value(), 11)
203+
}
204+
191205
func Test_LayeredCache_TrackerDoesNotCleanupHeldInstance(t *testing.T) {
192206
cache := Layered(Configure[int]().ItemsToPrune(10).Track())
193207
item0 := cache.TrackingSet("0", "a", 0, time.Minute)
@@ -207,13 +221,21 @@ func Test_LayeredCache_TrackerDoesNotCleanupHeldInstance(t *testing.T) {
207221
}
208222

209223
func Test_LayeredCache_RemovesOldestItemWhenFull(t *testing.T) {
210-
cache := Layered(Configure[int]().MaxSize(5).ItemsToPrune(1))
224+
onDeleteFnCalled := false
225+
onDeleteFn := func(item *Item[int]) {
226+
if item.key == "a" {
227+
onDeleteFnCalled = true
228+
}
229+
}
230+
cache := Layered(Configure[int]().MaxSize(5).ItemsToPrune(1).OnDelete(onDeleteFn))
231+
211232
cache.Set("xx", "a", 23, time.Minute)
212233
for i := 0; i < 7; i++ {
213234
cache.Set(strconv.Itoa(i), "a", i, time.Minute)
214235
}
215236
cache.Set("xx", "b", 9001, time.Minute)
216237
cache.SyncUpdates()
238+
217239
assert.Equal(t, cache.Get("xx", "a"), nil)
218240
assert.Equal(t, cache.Get("0", "a"), nil)
219241
assert.Equal(t, cache.Get("1", "a"), nil)
@@ -222,6 +244,7 @@ func Test_LayeredCache_RemovesOldestItemWhenFull(t *testing.T) {
222244
assert.Equal(t, cache.Get("xx", "b").Value(), 9001)
223245
assert.Equal(t, cache.GetDropped(), 4)
224246
assert.Equal(t, cache.GetDropped(), 0)
247+
assert.Equal(t, onDeleteFnCalled, true)
225248
}
226249

227250
func Test_LayeredCache_ResizeOnTheFly(t *testing.T) {

readme.md

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,9 @@
11
# CCache
2+
3+
Generic version is on the way:
4+
https://github.com/karlseguin/ccache/tree/generic
5+
6+
27
CCache is an LRU Cache, written in Go, focused on supporting high concurrency.
38

49
Lock contention on the list is reduced by:
@@ -64,6 +69,9 @@ The returned `*Item` exposes a number of methods:
6469

6570
By returning expired items, CCache lets you decide if you want to serve stale content or not. For example, you might decide to serve up slightly stale content (< 30 seconds old) while re-fetching newer data in the background. You might also decide to serve up infinitely stale content if you're unable to get new data from your source.
6671

72+
### GetWithoutPromote
73+
Same as `Get` but does not "promote" the value, which is to say it circumvents the "lru" aspect of this cache. Should only be used in limited cases, such as peaking at the value.
74+
6775
### Set
6876
`Set` expects the key, value and ttl:
6977

0 commit comments

Comments
 (0)