Skip to content

Commit 40275a3

Browse files
committed
Ability to dynamically SetMaxSize
To support this, rather than adding another field/channel like `getDroppedReq`, I added a `control` channel that can be used for these miscellaneous interactions with the worker. The control can also be used to take over for the `donec` channel
1 parent d9aec58 commit 40275a3

File tree

4 files changed

+135
-38
lines changed

4 files changed

+135
-38
lines changed

cache.go

Lines changed: 40 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -8,17 +8,24 @@ import (
88
"time"
99
)
1010

11+
// The cache has a generic 'control' channel that is used to send
12+
// messages to the worker. These are the messages that can be sent to it
13+
type getDropped struct {
14+
res chan int
15+
}
16+
type setMaxSize struct {
17+
size int64
18+
}
19+
1120
type Cache struct {
1221
*Configuration
13-
list *list.List
14-
size int64
15-
buckets []*bucket
16-
bucketMask uint32
17-
deletables chan *Item
18-
promotables chan *Item
19-
donec chan struct{}
20-
getDroppedReq chan struct{}
21-
getDroppedRes chan int
22+
list *list.List
23+
size int64
24+
buckets []*bucket
25+
bucketMask uint32
26+
deletables chan *Item
27+
promotables chan *Item
28+
control chan interface{}
2229
}
2330

2431
// Create a new cache with the specified configuration
@@ -29,8 +36,7 @@ func New(config *Configuration) *Cache {
2936
Configuration: config,
3037
bucketMask: uint32(config.buckets) - 1,
3138
buckets: make([]*bucket, config.buckets),
32-
getDroppedReq: make(chan struct{}),
33-
getDroppedRes: make(chan int),
39+
control: make(chan interface{}),
3440
}
3541
for i := 0; i < int(config.buckets); i++ {
3642
c.buckets[i] = &bucket{
@@ -138,20 +144,27 @@ func (c *Cache) Clear() {
138144
// is called are likely to panic
139145
func (c *Cache) Stop() {
140146
close(c.promotables)
141-
<-c.donec
147+
<-c.control
142148
}
143149

144150
// Gets the number of items removed from the cache due to memory pressure since
145151
// the last time GetDropped was called
146152
func (c *Cache) GetDropped() int {
147-
c.getDroppedReq <- struct{}{}
148-
return <-c.getDroppedRes
153+
res := make(chan int)
154+
c.control <- getDropped{res: res}
155+
return <-res
156+
}
157+
158+
// Sets a new max size. That can result in a GC being run if the new maxium size
159+
// is smaller than the cached size
160+
func (c *Cache) SetMaxSize(size int64) {
161+
c.control <- setMaxSize{size}
149162
}
150163

151164
func (c *Cache) restart() {
152165
c.deletables = make(chan *Item, c.deleteBuffer)
153166
c.promotables = make(chan *Item, c.promoteBuffer)
154-
c.donec = make(chan struct{})
167+
c.control = make(chan interface{})
155168
go c.worker()
156169
}
157170

@@ -180,7 +193,7 @@ func (c *Cache) promote(item *Item) {
180193
}
181194

182195
func (c *Cache) worker() {
183-
defer close(c.donec)
196+
defer close(c.control)
184197
dropped := 0
185198
for {
186199
select {
@@ -193,9 +206,17 @@ func (c *Cache) worker() {
193206
}
194207
case item := <-c.deletables:
195208
c.doDelete(item)
196-
case _ = <-c.getDroppedReq:
197-
c.getDroppedRes <- dropped
198-
dropped = 0
209+
case control := <-c.control:
210+
switch msg := control.(type) {
211+
case getDropped:
212+
msg.res <- dropped
213+
dropped = 0
214+
case setMaxSize:
215+
c.maxSize = msg.size
216+
if c.size > c.maxSize {
217+
dropped += c.gc()
218+
}
219+
}
199220
}
200221
}
201222

cache_test.go

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -208,6 +208,38 @@ func (_ CacheTests) ReplaceChangesSize() {
208208
checkSize(cache, 5)
209209
}
210210

211+
func (_ CacheTests) ResizeOnTheFly() {
212+
cache := New(Configure().MaxSize(9).ItemsToPrune(1))
213+
for i := 0; i < 5; i++ {
214+
cache.Set(strconv.Itoa(i), i, time.Minute)
215+
}
216+
cache.SetMaxSize(3)
217+
time.Sleep(time.Millisecond * 10)
218+
Expect(cache.GetDropped()).To.Equal(2)
219+
Expect(cache.Get("0")).To.Equal(nil)
220+
Expect(cache.Get("1")).To.Equal(nil)
221+
Expect(cache.Get("2").Value()).To.Equal(2)
222+
Expect(cache.Get("3").Value()).To.Equal(3)
223+
Expect(cache.Get("4").Value()).To.Equal(4)
224+
225+
cache.Set("5", 5, time.Minute)
226+
time.Sleep(time.Millisecond * 5)
227+
Expect(cache.GetDropped()).To.Equal(1)
228+
Expect(cache.Get("2")).To.Equal(nil)
229+
Expect(cache.Get("3").Value()).To.Equal(3)
230+
Expect(cache.Get("4").Value()).To.Equal(4)
231+
Expect(cache.Get("5").Value()).To.Equal(5)
232+
233+
cache.SetMaxSize(10)
234+
cache.Set("6", 6, time.Minute)
235+
time.Sleep(time.Millisecond * 10)
236+
Expect(cache.GetDropped()).To.Equal(0)
237+
Expect(cache.Get("3").Value()).To.Equal(3)
238+
Expect(cache.Get("4").Value()).To.Equal(4)
239+
Expect(cache.Get("5").Value()).To.Equal(5)
240+
Expect(cache.Get("6").Value()).To.Equal(6)
241+
}
242+
211243
type SizedItem struct {
212244
id int
213245
s int64

layeredcache.go

Lines changed: 31 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -10,15 +10,13 @@ import (
1010

1111
type LayeredCache struct {
1212
*Configuration
13-
list *list.List
14-
buckets []*layeredBucket
15-
bucketMask uint32
16-
size int64
17-
deletables chan *Item
18-
promotables chan *Item
19-
donec chan struct{}
20-
getDroppedReq chan struct{}
21-
getDroppedRes chan int
13+
list *list.List
14+
buckets []*layeredBucket
15+
bucketMask uint32
16+
size int64
17+
deletables chan *Item
18+
promotables chan *Item
19+
control chan interface{}
2220
}
2321

2422
// Create a new layered cache with the specified configuration.
@@ -41,8 +39,7 @@ func Layered(config *Configuration) *LayeredCache {
4139
bucketMask: uint32(config.buckets) - 1,
4240
buckets: make([]*layeredBucket, config.buckets),
4341
deletables: make(chan *Item, config.deleteBuffer),
44-
getDroppedReq: make(chan struct{}),
45-
getDroppedRes: make(chan int),
42+
control: make(chan interface{}),
4643
}
4744
for i := 0; i < int(config.buckets); i++ {
4845
c.buckets[i] = &layeredBucket{
@@ -163,19 +160,26 @@ func (c *LayeredCache) Clear() {
163160

164161
func (c *LayeredCache) Stop() {
165162
close(c.promotables)
166-
<-c.donec
163+
<-c.control
167164
}
168165

169166
// Gets the number of items removed from the cache due to memory pressure since
170167
// the last time GetDropped was called
171168
func (c *LayeredCache) GetDropped() int {
172-
c.getDroppedReq <- struct{}{}
173-
return <-c.getDroppedRes
169+
res := make(chan int)
170+
c.control <- getDropped{res: res}
171+
return <-res
172+
}
173+
174+
// Sets a new max size. That can result in a GC being run if the new maxium size
175+
// is smaller than the cached size
176+
func (c *LayeredCache) SetMaxSize(size int64) {
177+
c.control <- setMaxSize{size}
174178
}
175179

176180
func (c *LayeredCache) restart() {
177181
c.promotables = make(chan *Item, c.promoteBuffer)
178-
c.donec = make(chan struct{})
182+
c.control = make(chan interface{})
179183
go c.worker()
180184
}
181185

@@ -199,7 +203,7 @@ func (c *LayeredCache) promote(item *Item) {
199203
}
200204

201205
func (c *LayeredCache) worker() {
202-
defer close(c.donec)
206+
defer close(c.control)
203207
dropped := 0
204208
for {
205209
select {
@@ -220,9 +224,17 @@ func (c *LayeredCache) worker() {
220224
}
221225
c.list.Remove(item.element)
222226
}
223-
case _ = <-c.getDroppedReq:
224-
c.getDroppedRes <- dropped
225-
dropped = 0
227+
case control := <-c.control:
228+
switch msg := control.(type) {
229+
case getDropped:
230+
msg.res <- dropped
231+
dropped = 0
232+
case setMaxSize:
233+
c.maxSize = msg.size
234+
if c.size > c.maxSize {
235+
dropped += c.gc()
236+
}
237+
}
226238
}
227239
}
228240
}

layeredcache_test.go

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -174,6 +174,38 @@ func (_ LayeredCacheTests) RemovesOldestItemWhenFull() {
174174
Expect(cache.GetDropped()).To.Equal(0)
175175
}
176176

177+
func (_ LayeredCacheTests) ResizeOnTheFly() {
178+
cache := Layered(Configure().MaxSize(9).ItemsToPrune(1))
179+
for i := 0; i < 5; i++ {
180+
cache.Set(strconv.Itoa(i), "a", i, time.Minute)
181+
}
182+
cache.SetMaxSize(3)
183+
time.Sleep(time.Millisecond * 10)
184+
Expect(cache.GetDropped()).To.Equal(2)
185+
Expect(cache.Get("0", "a")).To.Equal(nil)
186+
Expect(cache.Get("1", "a")).To.Equal(nil)
187+
Expect(cache.Get("2", "a").Value()).To.Equal(2)
188+
Expect(cache.Get("3", "a").Value()).To.Equal(3)
189+
Expect(cache.Get("4", "a").Value()).To.Equal(4)
190+
191+
cache.Set("5", "a", 5, time.Minute)
192+
time.Sleep(time.Millisecond * 5)
193+
Expect(cache.GetDropped()).To.Equal(1)
194+
Expect(cache.Get("2", "a")).To.Equal(nil)
195+
Expect(cache.Get("3", "a").Value()).To.Equal(3)
196+
Expect(cache.Get("4", "a").Value()).To.Equal(4)
197+
Expect(cache.Get("5", "a").Value()).To.Equal(5)
198+
199+
cache.SetMaxSize(10)
200+
cache.Set("6", "a", 6, time.Minute)
201+
time.Sleep(time.Millisecond * 10)
202+
Expect(cache.GetDropped()).To.Equal(0)
203+
Expect(cache.Get("3", "a").Value()).To.Equal(3)
204+
Expect(cache.Get("4", "a").Value()).To.Equal(4)
205+
Expect(cache.Get("5", "a").Value()).To.Equal(5)
206+
Expect(cache.Get("6", "a").Value()).To.Equal(6)
207+
}
208+
177209
func newLayered() *LayeredCache {
178210
return Layered(Configure())
179211
}

0 commit comments

Comments
 (0)