Skip to content

Commit 20dc7a0

Browse files
committed
Additional tests and optimisations
1 parent 5680146 commit 20dc7a0

File tree

4 files changed

+226
-10
lines changed

4 files changed

+226
-10
lines changed

cache.go

Lines changed: 18 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,8 @@ type CacheMap struct {
1414

1515
// A "thread" safe string to anything map
1616
type CacheMapShared struct {
17-
shutdown chan bool
17+
shutdown chan struct{}
18+
ticker *time.Ticker
1819
cleanupCycle time.Duration
1920
items map[string]*Item
2021
sync.RWMutex // Read Write mutex, guards access to internal map.
@@ -43,9 +44,20 @@ func (m CacheMap) Close() {
4344
}
4445
}
4546

46-
// Close stops the cleanup
47-
func (ms CacheMapShared) Close() {
48-
ms.shutdown <- true
47+
// Close stops the cleanup background goroutine and ticker for this shard
48+
func (ms *CacheMapShared) Close() {
49+
// Protect against double-close: close is safe only once; recover if already closed.
50+
if ms.shutdown != nil {
51+
select {
52+
case <-ms.shutdown:
53+
// already closed
54+
default:
55+
close(ms.shutdown)
56+
}
57+
}
58+
if ms.ticker != nil {
59+
ms.ticker.Stop()
60+
}
4961
}
5062

5163
// Returns shard under given key
@@ -130,14 +142,14 @@ func (m CacheMap) Remove(key string) {
130142
}
131143

132144
// Removes an element from the map
133-
func (ms CacheMapShared) Remove(key string) {
145+
func (ms *CacheMapShared) Remove(key string) {
134146
ms.Lock()
135147
ms.remove(key)
136148
ms.Unlock()
137149
}
138150

139151
// Removes an element from the map
140-
func (ms CacheMapShared) remove(key string) {
152+
func (ms *CacheMapShared) remove(key string) {
141153
if itm, ok := ms.items[key]; ok && itm.onDelete != nil {
142154
itm.onDelete(itm)
143155
}

cache_bench_test.go

Lines changed: 94 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,94 @@
1+
package ttlmap_test
2+
3+
import (
4+
"math/rand"
5+
"strconv"
6+
"testing"
7+
"time"
8+
9+
"github.com/packaged/ttlmap"
10+
)
11+
12+
// helper to prepopulate cache with n keys
13+
func prepopulate(c ttlmap.CacheMap, n int) []string {
14+
keys := make([]string, n)
15+
for i := 0; i < n; i++ {
16+
k := "k" + strconv.Itoa(i)
17+
c.Set(k, i, nil)
18+
keys[i] = k
19+
}
20+
return keys
21+
}
22+
23+
func BenchmarkSet(b *testing.B) {
24+
cache := ttlmap.New(ttlmap.WithCleanupDuration(time.Hour)) // disable cleanup overhead
25+
b.ResetTimer()
26+
for i := 0; i < b.N; i++ {
27+
cache.Set("key"+strconv.Itoa(i), i, nil)
28+
}
29+
}
30+
31+
func BenchmarkGetHit(b *testing.B) {
32+
cache := ttlmap.New(ttlmap.WithCleanupDuration(time.Hour))
33+
keys := prepopulate(cache, 1024)
34+
// rotate through keys
35+
b.ResetTimer()
36+
idx := 0
37+
for i := 0; i < b.N; i++ {
38+
if idx == len(keys) {
39+
idx = 0
40+
}
41+
cache.Get(keys[idx])
42+
idx++
43+
}
44+
}
45+
46+
func BenchmarkGetMiss(b *testing.B) {
47+
cache := ttlmap.New(ttlmap.WithCleanupDuration(time.Hour))
48+
b.ResetTimer()
49+
for i := 0; i < b.N; i++ {
50+
cache.Get("missing-" + strconv.Itoa(i))
51+
}
52+
}
53+
54+
func BenchmarkParallelSet(b *testing.B) {
55+
cache := ttlmap.New(ttlmap.WithCleanupDuration(time.Hour))
56+
b.ResetTimer()
57+
b.RunParallel(func(pb *testing.PB) {
58+
// local counter to avoid contention on atomics
59+
j := 0
60+
for pb.Next() {
61+
cache.Set("pset-"+strconv.Itoa(j), j, nil)
62+
j++
63+
}
64+
})
65+
}
66+
67+
func BenchmarkParallelGetHit(b *testing.B) {
68+
cache := ttlmap.New(ttlmap.WithCleanupDuration(time.Hour))
69+
keys := prepopulate(cache, 4096)
70+
b.ResetTimer()
71+
b.RunParallel(func(pb *testing.PB) {
72+
r := rand.New(rand.NewSource(time.Now().UnixNano()))
73+
for pb.Next() {
74+
cache.Get(keys[r.Intn(len(keys))])
75+
}
76+
})
77+
}
78+
79+
func BenchmarkMixedRW(b *testing.B) {
80+
cache := ttlmap.New(ttlmap.WithCleanupDuration(time.Hour))
81+
keys := prepopulate(cache, 2048)
82+
b.ResetTimer()
83+
b.RunParallel(func(pb *testing.PB) {
84+
r := rand.New(rand.NewSource(time.Now().UnixNano()))
85+
for pb.Next() {
86+
if r.Intn(10) == 0 { // ~10% writes
87+
k := keys[r.Intn(len(keys))]
88+
cache.Set(k, r.Int(), nil)
89+
} else {
90+
cache.Get(keys[r.Intn(len(keys))])
91+
}
92+
}
93+
})
94+
}

cleanup.go

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -26,15 +26,20 @@ func (ms *CacheMapShared) Cleanup() {
2626
}
2727

2828
func (ms *CacheMapShared) initCleanup(dur time.Duration) {
29-
ticker := time.Tick(dur)
30-
go (func() {
29+
// Initialize shutdown channel once
30+
if ms.shutdown == nil {
31+
ms.shutdown = make(chan struct{})
32+
}
33+
// Use NewTicker so we can Stop it later to avoid ticker leaks
34+
ms.ticker = time.NewTicker(dur)
35+
go func() {
3136
for {
3237
select {
3338
case <-ms.shutdown:
3439
return
35-
case <-ticker:
40+
case <-ms.ticker.C:
3641
ms.Cleanup()
3742
}
3843
}
39-
})()
44+
}()
4045
}

ttlmap_test.go

Lines changed: 105 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,105 @@
1+
package ttlmap_test
2+
3+
import (
4+
"sync"
5+
"testing"
6+
"time"
7+
8+
"github.com/packaged/ttlmap"
9+
)
10+
11+
func TestHasAndExpiry(t *testing.T) {
12+
cache := ttlmap.New(ttlmap.WithDefaultTTL(50*time.Millisecond), ttlmap.WithCleanupDuration(10*time.Millisecond))
13+
if cache.Has("missing") {
14+
t.Fatalf("expected Has to be false for missing key")
15+
}
16+
cache.Set("a", 1, nil)
17+
if !cache.Has("a") {
18+
t.Fatalf("expected Has to be true right after Set")
19+
}
20+
time.Sleep(70 * time.Millisecond)
21+
if cache.Has("a") {
22+
t.Fatalf("expected Has to be false after TTL expiry")
23+
}
24+
}
25+
26+
func TestGetExpiryAndGetItemCopy(t *testing.T) {
27+
ttl := 80 * time.Millisecond
28+
cache := ttlmap.New(ttlmap.WithDefaultTTL(ttl), ttlmap.WithCleanupDuration(10*time.Millisecond))
29+
cache.Set("k", "v", &ttl)
30+
31+
exp1 := cache.GetExpiry("k")
32+
if exp1 == nil {
33+
t.Fatalf("expected expiry for existing key")
34+
}
35+
// Missing key should return nil
36+
if exp := cache.GetExpiry("missing"); exp != nil {
37+
t.Fatalf("expected nil expiry for missing key")
38+
}
39+
40+
// Get a copy and Touch it; the underlying stored item should not change expiry
41+
itm, ok := cache.GetItem("k")
42+
if !ok || itm == nil {
43+
t.Fatalf("expected GetItem to succeed")
44+
}
45+
// Touch the returned copy
46+
itm.Touch()
47+
exp2 := cache.GetExpiry("k")
48+
if exp2 == nil || !exp2.Equal(*exp1) {
49+
t.Fatalf("expected stored item expiry to remain unchanged after touching copy")
50+
}
51+
}
52+
53+
func TestFlush(t *testing.T) {
54+
cache := ttlmap.New(ttlmap.WithDefaultTTL(500 * time.Millisecond))
55+
cache.Set("x", 1, nil)
56+
cache.Set("y", 2, nil)
57+
if !cache.Has("x") || !cache.Has("y") {
58+
t.Fatalf("expected keys to exist before Flush")
59+
}
60+
cache.Flush()
61+
if cache.Has("x") || cache.Has("y") {
62+
t.Fatalf("expected keys to be removed after Flush")
63+
}
64+
}
65+
66+
func TestCloseIdempotent(t *testing.T) {
67+
cache := ttlmap.New(ttlmap.WithCleanupDuration(5 * time.Millisecond))
68+
// Should not panic on repeated Close
69+
cache.Close()
70+
cache.Close()
71+
}
72+
73+
func TestBackgroundUpdateSingleFlight(t *testing.T) {
74+
cache := ttlmap.New(ttlmap.WithDefaultTTL(20 * time.Millisecond))
75+
key := "sf"
76+
cache.Set(key, 1, nil)
77+
78+
var calls int
79+
var mu sync.Mutex
80+
start := make(chan struct{})
81+
var wg sync.WaitGroup
82+
// Launch many contenders; only one updater should run at a time for the key
83+
n := 50
84+
wg.Add(n)
85+
for i := 0; i < n; i++ {
86+
go func() {
87+
defer wg.Done()
88+
<-start
89+
cache.BackgroundUpdate(key, func() (interface{}, error) {
90+
mu.Lock()
91+
calls++
92+
mu.Unlock()
93+
time.Sleep(5 * time.Millisecond)
94+
return 2, nil
95+
})
96+
}()
97+
}
98+
close(start)
99+
wg.Wait()
100+
101+
// Only one should have executed; the rest return immediately because the key is locked
102+
if calls != 1 {
103+
t.Fatalf("expected exactly one updater call, got %d", calls)
104+
}
105+
}

0 commit comments

Comments
 (0)