Skip to content

Commit 3cb6b16

Browse files
committed
add sync.Pool for fringe and leaf
1 parent ccbc273 commit 3cb6b16

File tree

13 files changed

+479
-251
lines changed

13 files changed

+479
-251
lines changed

README.md

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -149,9 +149,7 @@ other methods unmodified to the underlying Table.
149149
Some delegated methods are pointless without a payload.
150150

151151
```golang
152-
type Lite struct {
153-
Table[struct{}]
154-
}
152+
type Lite struct { ... }
155153
// Lite is just a convenience wrapper for Table, instantiated with an
156154
// empty struct as payload. Lite is ideal for simple IP ACLs
157155
// (access-control-lists) with plain true/false results without a payload.

dumper.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,7 @@ func (n *node[V]) dump(w io.Writer, path stridePath, depth int, is4 bool) {
9797
fmt.Fprintln(w)
9898

9999
// skip values if the payload is the empty struct
100-
if _, ok := any(n.prefixes.Items[0]).(struct{}); !ok {
100+
if _, ok := any(n.prefixes.Items[0]).(zeroStruct); !ok {
101101

102102
// print the values for this node
103103
fmt.Fprintf(w, "%svalues(#%d):", indent, nPfxCount)
@@ -147,7 +147,7 @@ func (n *node[V]) dump(w io.Writer, path stridePath, depth int, is4 bool) {
147147

148148
// Lite: val is the empty struct, don't print it
149149
switch any(pc.value).(type) {
150-
case struct{}:
150+
case zeroStruct:
151151
fmt.Fprintf(w, " %s:{%s}", addrFmt(addr, is4), pc.prefix)
152152
default:
153153
fmt.Fprintf(w, " %s:{%s, %v}", addrFmt(addr, is4), pc.prefix, pc.value)
@@ -169,7 +169,7 @@ func (n *node[V]) dump(w io.Writer, path stridePath, depth int, is4 bool) {
169169

170170
// Lite: val is the empty struct, don't print it
171171
switch any(pc.value).(type) {
172-
case struct{}:
172+
case zeroStruct:
173173
fmt.Fprintf(w, " %s:{%s}", addrFmt(addr, is4), fringePfx)
174174
default:
175175
fmt.Fprintf(w, " %s:{%s, %v}", addrFmt(addr, is4), fringePfx, pc.value)

example_lite_concurrent_test.go

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -89,15 +89,16 @@ func (lf *SyncLite) Delete(pfx netip.Prefix) {
8989
func ExampleLite_concurrent() {
9090
wg := sync.WaitGroup{}
9191

92-
syncTbl := NewSyncLite().WithPool()
92+
syncLite := NewSyncLite()
93+
syncLite.WithPool()
9394

9495
wg.Add(1)
9596
go func() {
9697
defer wg.Done()
9798
for range 1_000_000 {
9899
for _, s := range exampleIPs {
99100
ip := netip.MustParseAddr(s)
100-
_ = syncTbl.Contains(ip)
101+
_ = syncLite.Contains(ip)
101102
}
102103
}
103104
}()
@@ -108,7 +109,7 @@ func ExampleLite_concurrent() {
108109
for range 10_000 {
109110
for _, s := range examplePrefixes {
110111
pfx := netip.MustParsePrefix(s)
111-
syncTbl.Insert(pfx)
112+
syncLite.Insert(pfx)
112113
}
113114
}
114115
}()
@@ -119,7 +120,7 @@ func ExampleLite_concurrent() {
119120
for range 10_000 {
120121
for _, s := range examplePrefixes {
121122
pfx := netip.MustParsePrefix(s)
122-
syncTbl.Delete(pfx)
123+
syncLite.Delete(pfx)
123124
}
124125
}
125126
}()

lite.go

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,13 @@ import (
77
"net/netip"
88
)
99

10+
type zeroStruct struct{}
11+
12+
// zeroStruct must implement Cloner.
13+
func (z zeroStruct) Clone() zeroStruct {
14+
return zeroStruct{}
15+
}
16+
1017
// Lite is just a convenience wrapper for Table, instantiated with an
1118
// empty struct as payload. Lite is ideal for simple IP ACLs
1219
// (access-control-lists) with plain true/false results without a payload.
@@ -20,7 +27,7 @@ import (
2027
// - Update
2128
// - UpdatePersist
2229
type Lite struct {
23-
Table[struct{}]
30+
Table[zeroStruct]
2431
}
2532

2633
// WithPool is an adapter for the underlying table.

multipool.go

Lines changed: 187 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,187 @@
1+
package bart
2+
3+
import (
4+
"net/netip"
5+
"sync"
6+
"sync/atomic"
7+
)
8+
9+
// multiPool groups sub-pools for internal node, leaf, and fringe types.
10+
// Each sub-multiPool handles allocation, reuse, and statistics tracking
11+
// for its corresponding node type.
12+
type multiPool[V any] struct {
13+
node *nodePool[V]
14+
leaf *leafPool[V]
15+
fringe *fringePool[V]
16+
}
17+
18+
// newMultiPool initializes and returns a new pool structure with sub-pools
19+
// for internal, leaf, and fringe nodes.
20+
func newMultiPool[V any]() *multiPool[V] {
21+
return &multiPool[V]{
22+
node: newNodePool[V](),
23+
leaf: newLeafPool[V](),
24+
fringe: newFringePool[V](),
25+
}
26+
}
27+
28+
// getNode obtains a *node[V] from the pool.
29+
// If the parent pool is nil, a new instance is returned without tracking.
30+
func (mp *multiPool[V]) getNode() *node[V] {
31+
if mp == nil {
32+
return new(node[V])
33+
}
34+
mp.node.currentLive.Add(1)
35+
return mp.node.Get().(*node[V])
36+
}
37+
38+
// getLeaf obtains a *leafNode[V] from the pool, initialized with
39+
// a prefix and value. If the pool is nil, a fresh instance is created.
40+
func (mp *multiPool[V]) getLeaf(pfx netip.Prefix, val V) *leafNode[V] {
41+
if mp == nil {
42+
return &leafNode[V]{prefix: pfx, value: val}
43+
}
44+
mp.leaf.currentLive.Add(1)
45+
l := mp.leaf.Get().(*leafNode[V])
46+
l.prefix = pfx
47+
l.value = val
48+
return l
49+
}
50+
51+
// getFringe obtains a *fringeNode[V] from the pool, initialized with a value.
52+
// If the pool is nil, a new instance is returned without tracking.
53+
func (mp *multiPool[V]) getFringe(val V) *fringeNode[V] {
54+
if mp == nil {
55+
return &fringeNode[V]{value: val}
56+
}
57+
mp.fringe.currentLive.Add(1)
58+
f := mp.fringe.Get().(*fringeNode[V])
59+
f.value = val
60+
return f
61+
}
62+
63+
// putNode returns an internal node back to its pool for reuse.
64+
// If the pool is nil, the node is discarded.
65+
func (mp *multiPool[V]) putNode(n *node[V]) {
66+
if mp != nil {
67+
n.reset() // clear internal state but keep allocated memory
68+
mp.node.currentLive.Add(-1)
69+
mp.node.Put(n)
70+
}
71+
}
72+
73+
// putLeaf returns a leaf node back to its pool for reuse.
74+
// If the pool is nil, the node is discarded.
75+
func (mp *multiPool[V]) putLeaf(l *leafNode[V]) {
76+
if mp != nil {
77+
mp.leaf.currentLive.Add(-1)
78+
mp.leaf.Put(l)
79+
}
80+
}
81+
82+
// putFringe returns a fringe node back to its pool for reuse.
83+
// If the pool is nil, the node is discarded.
84+
func (mp *multiPool[V]) putFringe(f *fringeNode[V]) {
85+
if mp != nil {
86+
mp.fringe.currentLive.Add(-1)
87+
mp.fringe.Put(f)
88+
}
89+
}
90+
91+
// nodeStats returns the number of currently live (checked-out) nodes
92+
// and the total number of *node[V] objects ever allocated by this pool.
93+
func (mp *multiPool[V]) nodeStats() (live int64, total int64) {
94+
if mp == nil {
95+
return 0, 0
96+
}
97+
return mp.node.currentLive.Load(), mp.node.totalAllocated.Load()
98+
}
99+
100+
// leafStats returns the current number of in-use leaf nodes and
101+
// the total number created across the pool's lifetime.
102+
func (mp *multiPool[V]) leafStats() (live int64, total int64) {
103+
if mp == nil {
104+
return 0, 0
105+
}
106+
return mp.leaf.currentLive.Load(), mp.leaf.totalAllocated.Load()
107+
}
108+
109+
// leafStats returns the current number of in-use fringe nodes and
110+
// the total number created across the pool's lifetime.
111+
func (mp *multiPool[V]) fringeStats() (live int64, total int64) {
112+
if mp == nil {
113+
return 0, 0
114+
}
115+
return mp.fringe.currentLive.Load(), mp.fringe.totalAllocated.Load()
116+
}
117+
118+
// ##################################################################
119+
120+
// nodePool is a type-safe wrapper around sync.Pool,
121+
// specialized for managing *node[V] instances.
122+
//
123+
// It supports efficient memory reuse and tracks allocation
124+
// and usage statistics to aid debugging and profiling.
125+
type nodePool[V any] struct {
126+
sync.Pool
127+
totalAllocated atomic.Int64 // total number of *node[V] instances ever created
128+
currentLive atomic.Int64 // number of currently checked-out (in-use) nodes
129+
}
130+
131+
// newNodePool constructs and returns a nodePool with tracking enabled.
132+
func newNodePool[V any]() *nodePool[V] {
133+
np := &nodePool[V]{}
134+
np.New = func() any {
135+
np.totalAllocated.Add(1)
136+
return new(node[V])
137+
}
138+
return np
139+
}
140+
141+
// ##################################################################
142+
143+
// leafPool is a sync.Pool wrapper for *leafNode[V] objects.
144+
// It tracks allocation and reuse statistics for monitoring purposes.
145+
type leafPool[V any] struct {
146+
sync.Pool
147+
totalAllocated atomic.Int64
148+
currentLive atomic.Int64
149+
}
150+
151+
// newLeafPool initializes a leafPool instance with a node constructor.
152+
func newLeafPool[V any]() *leafPool[V] {
153+
lp := &leafPool[V]{}
154+
lp.New = func() any {
155+
lp.totalAllocated.Add(1)
156+
return new(leafNode[V])
157+
}
158+
return lp
159+
}
160+
161+
// ##################################################################
162+
163+
// fringePool is a type-safe wrapper around sync.Pool,
164+
// specialized for managing *node[V] instances.
165+
//
166+
// It efficiently reuses node memory and tracks statistics
167+
// on allocations and active use for debugging and performance tuning.
168+
type fringePool[V any] struct {
169+
sync.Pool // embedded Sync Pool for *node[V]
170+
171+
totalAllocated atomic.Int64 // total number of *node[V] ever allocated
172+
currentLive atomic.Int64 // number of nodes currently in use (not returned to pool)
173+
}
174+
175+
// newFringePool creates and returns a new pool for *fringeNode[V] instances.
176+
//
177+
// The pool uses sync.Pool internally, and defines a New function
178+
// that creates new nodes with statistical tracking.
179+
func newFringePool[V any]() *fringePool[V] {
180+
fp := &fringePool[V]{}
181+
fp.New = func() any {
182+
fp.totalAllocated.Add(1)
183+
184+
return new(fringeNode[V])
185+
}
186+
return fp
187+
}

0 commit comments

Comments
 (0)