@@ -48,12 +48,18 @@ pub const BwTree = struct {
4848 root_pid : usize ,
4949 next_page_id : std .atomic .Value (usize ),
5050 allocator : std.mem.Allocator ,
51+ // Deferred reclamation: old chains retired after consolidation are parked here
52+ // and freed on the next consolidation or deinit (simple two-phase approach).
53+ retired : std .ArrayList (* Page ),
54+ retired_mu : std.Thread.Mutex ,
5155
52- pub fn init (allocator : std.mem.Allocator ) BwTree {
56+ pub fn init (allocator : std.mem.Allocator ) ! BwTree {
5357 var tree : BwTree = undefined ;
5458 tree .allocator = allocator ;
5559 tree .root_pid = 0 ;
5660 tree .next_page_id = std .atomic .Value (usize ).init (1 );
61+ tree .retired = std .ArrayList (* Page ).init (allocator );
62+ tree .retired_mu = .{};
5763
5864 // Zero out all mapping slots
5965 var i : usize = 0 ;
@@ -62,15 +68,19 @@ pub const BwTree = struct {
6268 }
6369
6470 // Create root base page (empty)
65- const root_page = allocator .create (Page ) catch unreachable ;
66- const empty_entries = allocator .alloc (Entry , 0 ) catch unreachable ;
71+ const root_page = try allocator .create (Page );
72+ errdefer allocator .destroy (root_page );
73+ const empty_entries = try allocator .alloc (Entry , 0 );
6774 root_page .* = Page { .base = BasePage { .entries = empty_entries } };
6875 tree .mapping [0 ] = std .atomic .Value (usize ).init (@intFromPtr (root_page ));
6976
7077 return tree ;
7178 }
7279
7380 pub fn deinit (self : * BwTree ) void {
81+ // Free all retired chains first
82+ self .drainRetired ();
83+ self .retired .deinit ();
7484 var i : usize = 0 ;
7585 while (i < MAX_PAGES ) : (i += 1 ) {
7686 const ptr_val = self .mapping [i ].load (.acquire );
@@ -95,6 +105,16 @@ pub const BwTree = struct {
95105 }
96106 }
97107
108+ /// Drain the retired list — frees chains that were parked on previous consolidations.
109+ fn drainRetired (self : * BwTree ) void {
110+ self .retired_mu .lock ();
111+ defer self .retired_mu .unlock ();
112+ for (self .retired .items ) | page | {
113+ self .freeChain (page );
114+ }
115+ self .retired .clearRetainingCapacity ();
116+ }
117+
98118 // ─── allocPage ───────────────────────────────────────────────────────
99119
100120 fn allocPage (self : * BwTree ) usize {
@@ -225,6 +245,10 @@ pub const BwTree = struct {
225245
226246 /// When delta chain exceeds MAX_DELTA_CHAIN, merge into a new base page.
227247 pub fn consolidate (self : * BwTree , page_id : usize ) void {
248+ // Drain previously retired chains — they've survived at least one full
249+ // consolidation cycle, so readers from the previous epoch are done.
250+ self .drainRetired ();
251+
228252 const old = self .mapping [page_id ].load (.acquire );
229253 if (old == 0 ) return ;
230254
@@ -264,9 +288,14 @@ pub const BwTree = struct {
264288 .acq_rel ,
265289 .acquire ,
266290 ) == null ) {
267- // Success — old chain will be reclaimed by epoch-based GC.
268- // Do NOT free here — concurrent readers may still be traversing it.
269- // TODO: integrate with mvcc.zig epoch GC for safe reclamation.
291+ // Success — park old chain head for deferred reclamation.
292+ // Concurrent readers may still be traversing it; it will be freed
293+ // on the next consolidation cycle (two-phase epoch approach).
294+ self .retired_mu .lock ();
295+ defer self .retired_mu .unlock ();
296+ self .retired .append (page ) catch {
297+ // If we can't track it, leak it — better than use-after-free.
298+ };
270299 } else {
271300 // Another thread consolidated first; discard our work
272301 self .allocator .free (new_entries );
@@ -276,12 +305,17 @@ pub const BwTree = struct {
276305
277306 fn collectEntries (self : * BwTree , page : * Page , map : * std .AutoHashMap (u64 , Entry )) void {
278307 _ = self ;
279- // Walk to base first, then apply deltas in reverse (base → newest)
280- var stack : [256 ]* Page = undefined ;
308+ // Walk to base first, then apply deltas in reverse (base → newest).
309+ // Use a bounded stack — MAX_DELTA_CHAIN is 8, but under CAS contention
310+ // chains can temporarily grow longer. 1024 is generous; if exceeded we
311+ // truncate (lose oldest deltas) rather than crash.
312+ const STACK_CAP = 1024 ;
313+ var stack : [STACK_CAP ]* Page = undefined ;
281314 var depth : usize = 0 ;
282315 var cur : ? * Page = page ;
283316
284317 while (cur ) | p | {
318+ if (depth >= STACK_CAP ) break ; // safety bound
285319 stack [depth ] = p ;
286320 depth += 1 ;
287321 switch (p .* ) {
@@ -342,7 +376,7 @@ fn makeEntry(key: u64, doc_id: u64) Entry {
342376}
343377
344378test "bwtree insert and search" {
345- var tree = BwTree .init (std .testing .allocator );
379+ var tree = try BwTree .init (std .testing .allocator );
346380 defer tree .deinit ();
347381
348382 try tree .insert (10 , makeEntry (10 , 100 ));
@@ -366,7 +400,7 @@ test "bwtree insert and search" {
366400}
367401
368402test "bwtree delete" {
369- var tree = BwTree .init (std .testing .allocator );
403+ var tree = try BwTree .init (std .testing .allocator );
370404 defer tree .deinit ();
371405
372406 try tree .insert (10 , makeEntry (10 , 100 ));
@@ -385,7 +419,7 @@ test "bwtree delete" {
385419
386420test "bwtree consolidation" {
387421 // Use page_allocator — old chains deferred to epoch GC
388- var tree = BwTree .init (std .heap .page_allocator );
422+ var tree = try BwTree .init (std .heap .page_allocator );
389423
390424 // Insert enough entries to trigger consolidation (> MAX_DELTA_CHAIN = 8)
391425 var i : u64 = 0 ;
@@ -408,7 +442,7 @@ test "bwtree consolidation" {
408442test "bwtree concurrent inserts" {
409443 // Use page_allocator — consolidated chains are intentionally leaked
410444 // (deferred to epoch-based GC, not available in test context)
411- var tree = BwTree .init (std .heap .page_allocator );
445+ var tree = try BwTree .init (std .heap .page_allocator );
412446
413447 const NUM_THREADS = 4 ;
414448 const KEYS_PER_THREAD = 50 ;
0 commit comments