@@ -53,8 +53,10 @@ type CacheStats interface {
53
53
GetTotalGets () int64 // the number of gets against the near cache
54
54
GetCachePrunes () int64 // the number of times the near cache was pruned
55
55
GetCachePrunesDuration () time.Duration // the duration of all prunes
56
- GetCacheExpires () int64 // the number of times the near cache had expiry event
56
+ GetCacheEntriesPruned () int64 // the actual number of cache entries that were pruned
57
+ GetCacheExpires () int64 // the number of times the near cache expired entries
57
58
GetCacheExpiresDuration () time.Duration // the duration of all expires
59
+ GetCacheEntriesExpired () int64 // the actual number of cache entries that were expired
58
60
Size () int // the number of entries in the near cache
59
61
SizeBytes () int64 // the number of bytes used by the entries (keys and values) in the near cache
60
62
ResetStats () // reset the stats for the near cache, not including Size() or SizeBytes()
@@ -64,16 +66,20 @@ type localCacheImpl[K comparable, V any] struct {
64
66
Name string
65
67
options * localCacheOptions
66
68
sync.Mutex
67
- data map [K ]* localCacheEntry [K , V ]
68
- cacheHits int64
69
- cacheMisses int64
70
- cacheMissesNannos int64
71
- cachePuts int64
72
- cachePrunes int64
73
- cachePrunesNannos int64
74
- cacheExpires int64
75
- cacheExpiresNannos int64
76
- cacheMemory int64
69
+ data map [K ]* localCacheEntry [K , V ]
70
+ expiryMap map [int64 ]* []K
71
+ nextExpiry time.Time
72
+ cacheHits int64
73
+ cacheMisses int64
74
+ cacheMissesNannos int64
75
+ cachePuts int64
76
+ cacheEntriesPruned int64
77
+ cachePrunes int64
78
+ cachePrunesNannos int64
79
+ cacheEntriesExpired int64
80
+ cacheExpires int64
81
+ cacheExpiresNannos int64
82
+ cacheMemory int64
77
83
}
78
84
79
85
type localCacheEntry [K comparable , V any ] struct {
@@ -82,6 +88,7 @@ type localCacheEntry[K comparable, V any] struct {
82
88
ttl time.Duration
83
89
insertTime time.Time
84
90
lastAccess time.Time
91
+ expiresAt time.Time
85
92
}
86
93
87
94
type pair [K comparable ] struct {
@@ -123,6 +130,7 @@ func (l *localCacheImpl[K, V]) PutWithExpiry(key K, value V, ttl time.Duration)
123
130
newEntry := newLocalCacheEntry [K , V ](key , value , ttl )
124
131
125
132
l .updateEntrySize (newEntry , 1 )
133
+ l .registerExpiry (newEntry )
126
134
127
135
prev , ok := l .data [key ]
128
136
@@ -185,6 +193,7 @@ func (l *localCacheImpl[K, V]) Remove(key K) *V {
185
193
v , ok := l .data [key ]
186
194
187
195
if ok {
196
+ l .removeExpiry (key )
188
197
delete (l .data , key )
189
198
l .updateEntrySize (v , - 1 )
190
199
return & v .value
@@ -219,6 +228,7 @@ func (l *localCacheImpl[K, V]) Clear() {
219
228
defer l .Unlock ()
220
229
221
230
l .data = make (map [K ]* localCacheEntry [K , V ], 0 )
231
+ l .expiryMap = make (map [int64 ]* []K , 0 )
222
232
l .updateCacheMemory (0 )
223
233
}
224
234
@@ -232,26 +242,56 @@ func (l *localCacheImpl[K, V]) GetStats() CacheStats {
232
242
}
233
243
234
244
// expireEntries goes through the map to see if any entries have expired due to ttl.
245
+ // this is done in buckets of 1/4 second as so to be more efficient. this means the
246
+ // min expiry duration is 1/4 of a second.
235
247
func (l * localCacheImpl [K , V ]) expireEntries () {
248
+ if len (l .expiryMap ) == 0 {
249
+ return
250
+ }
251
+
236
252
var (
237
- keysToDelete = make ([]K , 0 )
238
- start = time .Now ()
253
+ bucketsToRemove = make ([]int64 , 0 )
254
+ expiryKeys = make ([]int64 , len (l .expiryMap ))
255
+ start = time .Now ()
256
+ startUnixMillis = start .UnixMilli ()
257
+ index = 0
239
258
)
240
259
241
- // check for cache expiry
242
- for k , v := range l .data {
243
- if v .ttl > 0 && start .Sub (v .insertTime ) > v .ttl {
244
- keysToDelete = append (keysToDelete , k )
245
- }
260
+ if start .Before (l .nextExpiry ) {
261
+ return
246
262
}
247
263
248
- // delete all the keys that were flagged from the expiry, this may be enough to free up space
249
- for _ , k := range keysToDelete {
250
- l . updateEntrySize ( l . data [ k ], - 1 )
251
- delete ( l . data , k )
264
+ // get the keys from the map and sort them, so we are seeing the earliest first
265
+ for key := range l . expiryMap {
266
+ expiryKeys [ index ] = key
267
+ index ++
252
268
}
253
269
254
- if len (keysToDelete ) > 0 {
270
+ sort .Slice (expiryKeys , func (p , q int ) bool {
271
+ return p < q
272
+ })
273
+
274
+ for _ , expireTime := range expiryKeys {
275
+ if expireTime < startUnixMillis {
276
+ // need to expire all entries for the expiry key, retrieve the entry
277
+ if v , ok := l .expiryMap [expireTime ]; ok {
278
+ bucketsToRemove = append (bucketsToRemove , expireTime )
279
+ for _ , k := range * v {
280
+ l .updateEntrySize (l .data [k ], - 1 )
281
+ atomic .AddInt64 (& l .cacheEntriesExpired , 1 )
282
+ delete (l .data , k )
283
+ }
284
+ }
285
+ }
286
+ }
287
+
288
+ if len (bucketsToRemove ) > 0 {
289
+ l .nextExpiry = time .Now ().Add (time .Duration (256 ) * time .Millisecond )
290
+
291
+ for _ , b := range bucketsToRemove {
292
+ delete (l .expiryMap , b )
293
+ }
294
+
255
295
l .registerExpireNanos (time .Since (start ).Nanoseconds ())
256
296
}
257
297
}
@@ -300,24 +340,35 @@ func (l *localCacheImpl[K, V]) pruneEntries() {
300
340
break
301
341
}
302
342
l .updateEntrySize (l .data [v .key ], - 1 )
343
+ atomic .AddInt64 (& l .cacheEntriesPruned , 1 )
344
+ l .removeExpiry (v .key )
303
345
delete (l .data , v .key )
304
346
}
305
347
}
306
348
}
307
349
308
350
func newLocalCacheEntry [K comparable , V any ](key K , value V , ttl time.Duration ) * localCacheEntry [K , V ] {
309
- return & localCacheEntry [K , V ]{
351
+ now := time .Now ()
352
+ entry := & localCacheEntry [K , V ]{
310
353
key : key ,
311
354
value : value ,
312
355
ttl : ttl ,
313
- insertTime : time . Now () ,
356
+ insertTime : now ,
314
357
}
358
+ if ttl > 0 {
359
+ // granularity of expiry is minimum of 250ms
360
+ entry .expiresAt = now .Add (getMillisBucket (ttl ))
361
+ }
362
+
363
+ return entry
315
364
}
316
365
317
366
func newLocalCache [K comparable , V any ](name string , options ... func (localCache * localCacheOptions )) * localCacheImpl [K , V ] {
318
367
cache := & localCacheImpl [K , V ]{
319
- Name : name ,
320
- data : make (map [K ]* localCacheEntry [K , V ], 0 ),
368
+ Name : name ,
369
+ data : make (map [K ]* localCacheEntry [K , V ], 0 ),
370
+ expiryMap : make (map [int64 ]* []K , 0 ),
371
+ nextExpiry : time .Now ().Add (time .Duration (256 ) * time .Millisecond ),
321
372
options : & localCacheOptions {
322
373
TTL : 0 ,
323
374
HighUnits : 0 ,
@@ -432,6 +483,14 @@ func (l *localCacheImpl[K, V]) GetCachePuts() int64 {
432
483
return l .cachePuts
433
484
}
434
485
486
+ func (l * localCacheImpl [K , V ]) GetCacheEntriesExpired () int64 {
487
+ return l .cacheEntriesExpired
488
+ }
489
+
490
+ func (l * localCacheImpl [K , V ]) GetCacheEntriesPruned () int64 {
491
+ return l .cacheEntriesPruned
492
+ }
493
+
435
494
func (l * localCacheImpl [K , V ]) GetCachePrunes () int64 {
436
495
return l .cachePrunes
437
496
}
@@ -462,26 +521,32 @@ func (l *localCacheImpl[K, V]) GetHitRate() float32 {
462
521
463
522
func (l * localCacheImpl [K , V ]) ResetStats () {
464
523
atomic .StoreInt64 (& l .cachePrunesNannos , 0 )
524
+ atomic .StoreInt64 (& l .cacheExpiresNannos , 0 )
465
525
atomic .StoreInt64 (& l .cacheMissesNannos , 0 )
466
526
atomic .StoreInt64 (& l .cachePrunes , 0 )
467
527
atomic .StoreInt64 (& l .cacheHits , 0 )
468
528
atomic .StoreInt64 (& l .cacheMisses , 0 )
469
529
atomic .StoreInt64 (& l .cachePuts , 0 )
530
+ atomic .StoreInt64 (& l .cacheEntriesExpired , 0 )
531
+ atomic .StoreInt64 (& l .cacheEntriesPruned , 0 )
470
532
}
471
533
472
534
func (l * localCacheImpl [K , V ]) String () string {
473
535
return fmt .Sprintf ("localCache{name=%s, options=%v, stats=CacheStats{puts=%v, gets=%v, hits=%v, misses=%v, " +
474
- "missesDuration=%v, hitRate=%v%%, prunes=%v, prunesDuration=%v, expires=%v, expiresDuration=%v, size=%v, memoryUsed=%v}}" ,
536
+ "missesDuration=%v, hitRate=%v%%, prunes=%v, prunesDuration=%v, entriesPruned=%v, expires=%v, expiresDuration=%v, entriesExpired =%v, size=%v, memoryUsed=%v}}" ,
475
537
l .Name , l .options , l .GetCachePuts (), l .GetTotalGets (), l .GetCacheHits (), l .GetCacheMisses (),
476
- l .GetCacheMissesDuration (), l .GetHitRate ()* 100 , l .GetCachePrunes (), l .GetCachePrunesDuration (),
477
- l .GetCacheExpires (), l .GetCacheExpiresDuration (), l .Size (), formatMemory (l .cacheMemory ))
538
+ l .GetCacheMissesDuration (), l .GetHitRate ()* 100 ,
539
+ l .GetCachePrunes (), l .GetCachePrunesDuration (), l .GetCacheEntriesPruned (),
540
+ l .GetCacheExpires (), l .GetCacheExpiresDuration (), l .GetCacheEntriesExpired (),
541
+ l .Size (), formatMemory (l .cacheMemory ))
478
542
}
479
543
480
544
// updateEntrySize updates the cacheMemory size based upon a local entry. The sign indicates to either remove or add.
481
545
func (l * localCacheImpl [K , V ]) updateEntrySize (entry * localCacheEntry [K , V ], sign int ) {
482
546
var size = int64 (unsafe .Sizeof (entry .key )) + int64 (unsafe .Sizeof (entry .value )) +
483
547
int64 (unsafe .Sizeof (entry .lastAccess )) + int64 (unsafe .Sizeof (entry .ttl )) +
484
- int64 (unsafe .Sizeof (entry .insertTime )) + int64 (unsafe .Sizeof (entry ))
548
+ int64 (unsafe .Sizeof (entry .insertTime )) + int64 (unsafe .Sizeof (entry .expiresAt )) +
549
+ int64 (unsafe .Sizeof (entry ))
485
550
l .updateCacheMemory (int64 (sign ) * size )
486
551
}
487
552
@@ -498,3 +563,58 @@ func formatMemory(bytesValue int64) string {
498
563
}
499
564
return printer .Sprintf ("%-.1fGB" , float64 (bytesValue )/ 1024 / 1024 / 1024 )
500
565
}
566
+
567
+ func (l * localCacheImpl [K , V ]) registerExpiry (entry * localCacheEntry [K , V ]) {
568
+ if entry .ttl > 0 {
569
+ // get the expires millis in unix millis and key on this
570
+ expiresAtMillis := entry .expiresAt .UnixMilli ()
571
+
572
+ // see if we can find an entry for the expires time as millis
573
+ v , ok := l .expiryMap [expiresAtMillis ]
574
+ if ! ok {
575
+ // create a new map entry
576
+ newSlice := []K {entry .key }
577
+ l .expiryMap [expiresAtMillis ] = & newSlice
578
+ } else {
579
+ // append to the existing one
580
+ * v = append (* v , entry .key )
581
+ }
582
+ }
583
+ }
584
+
585
+ func (l * localCacheImpl [K , V ]) removeExpiry (k K ) {
586
+ // find the entry for the key and process if it exists
587
+ if entry , ok1 := l .data [k ]; ok1 {
588
+ if entry .ttl > 0 {
589
+ expiresAtMillis := entry .expiresAt .UnixMilli ()
590
+
591
+ // see if we can find an entry for the expires time as millis
592
+ v , ok := l .expiryMap [expiresAtMillis ]
593
+ if ok {
594
+ // entry exists for expiry, so remove the entry from the slice
595
+ existingKeys := * v
596
+
597
+ if len (existingKeys ) == 1 {
598
+ // delete the TTL map entry as no keys left in slice
599
+ delete (l .expiryMap , expiresAtMillis )
600
+ return
601
+ }
602
+
603
+ newSlice := existingKeys [:0 ]
604
+
605
+ for _ , key := range existingKeys {
606
+ if key != entry .key {
607
+ newSlice = append (newSlice , key )
608
+ }
609
+ }
610
+
611
+ * v = newSlice
612
+ }
613
+ }
614
+ }
615
+ }
616
+
617
+ // getMillisBucket returns the ttl in buckets of 256ms for expiry.
618
+ func getMillisBucket (ttl time.Duration ) time.Duration {
619
+ return time .Duration (ttl .Milliseconds () & ^ 0xFF ) * time .Millisecond
620
+ }
0 commit comments