@@ -2998,6 +2998,10 @@ rb_mmtk_make_dfree_job(void (*dfree)(void *), void *data)
2998
2998
job -> kind = MMTK_FJOB_DFREE ;
2999
2999
job -> as .dfree .dfree = dfree ;
3000
3000
job -> as .dfree .data = data ;
3001
+
3002
+ RUBY_DEBUG_LOG ("Created dfree job %p. dfree: %p, data: %p\n" ,
3003
+ job , dfree , data );
3004
+
3001
3005
rb_mmtk_push_final_job (job );
3002
3006
}
3003
3007
@@ -3011,6 +3015,9 @@ rb_mmtk_make_finalize_job(VALUE obj, VALUE finalizer_array)
3011
3015
struct MMTk_FinalJob * job = (struct MMTk_FinalJob * )xmalloc (sizeof (struct MMTk_FinalJob ));
3012
3016
job -> kind = MMTK_FJOB_FINALIZE ;
3013
3017
3018
+ RUBY_DEBUG_LOG ("Created finalize job %p. obj: %p, finalizer_array: %p\n" ,
3019
+ job , (void * )obj , (void * )finalizer_array );
3020
+
3014
3021
VALUE observed_id = Qnil ;
3015
3022
if (FL_TEST (obj , FL_SEEN_OBJ_ID )) {
3016
3023
// obj is technically dead already,
@@ -3364,16 +3371,22 @@ rb_mmtk_run_final_job(struct MMTk_FinalJob *job)
3364
3371
{
3365
3372
switch (job -> kind ) {
3366
3373
case MMTK_FJOB_DFREE : {
3374
+ RUBY_DEBUG_LOG ("Running dfree job %p. dfree: %p, data: %p\n" ,
3375
+ job , job -> as .dfree .dfree , job -> as .dfree .data );
3367
3376
job -> as .dfree .dfree (job -> as .dfree .data );
3368
3377
break ;
3369
3378
}
3370
3379
case MMTK_FJOB_FINALIZE : {
3380
+ VALUE objid = job -> as .finalize .observed_id ;
3381
+ VALUE table = job -> as .finalize .finalizer_array ;
3382
+
3383
+ RUBY_DEBUG_LOG ("Running finalize job %p. observed_id: %p, table: %p\n" ,
3384
+ job , (void * )objid , (void * )table );
3385
+
3371
3386
if (rb_gc_obj_free_on_exit_started ()) {
3372
3387
rb_bug ("Finalize job still exists after obj_free on exit has started." );
3373
3388
}
3374
3389
3375
- VALUE objid = job -> as .finalize .observed_id ;
3376
- VALUE table = job -> as .finalize .finalizer_array ;
3377
3390
rb_gc_run_obj_finalizer (objid , RARRAY_LEN (table ), get_final , (void * )table );
3378
3391
3379
3392
break ;
@@ -3580,7 +3593,13 @@ rb_gc_impl_shutdown_call_finalizer(void *objspace_ptr)
3580
3593
#if USE_MMTK
3581
3594
if (rb_mmtk_enabled_p ()) {
3582
3595
// Force to run finalizers, the MMTk style.
3583
- while (finalizer_table -> num_entries ) {
3596
+ // We repeatedly vacate the finalizer table and run final jobs
3597
+ // until the finalizer table is empty and there are no pending final jobs
3598
+ // Note: Final jobs are executed immediately after `GC.start`,
3599
+ // and are also executed when interrupted after a GC triggered by allocation.
3600
+ // Just in case the VM exits before the interrupts are handled,
3601
+ // we explicitly drain the pending final jobs here.
3602
+ while (finalizer_table -> num_entries || heap_pages_deferred_final != 0 ) {
3584
3603
// We move all elements from the finalizer_table to heap_pages_deferred_final.
3585
3604
st_foreach (finalizer_table , rb_mmtk_evacuate_finalizer_table_on_exit_i , 0 );
3586
3605
@@ -3590,6 +3609,9 @@ rb_gc_impl_shutdown_call_finalizer(void *objspace_ptr)
3590
3609
// We need to repeat because a finalizer may register new finalizers.
3591
3610
}
3592
3611
3612
+ RUBY_ASSERT (finalizer_table -> num_entries == 0 );
3613
+ RUBY_ASSERT (heap_pages_deferred_final == 0 );
3614
+
3593
3615
// Tell the world that obj_free on exit has started.
3594
3616
rb_gc_set_obj_free_on_exit_started ();
3595
3617
@@ -10608,4 +10630,11 @@ rb_mmtk_newobj_raw(VALUE klass, VALUE flags, int wb_protected, size_t payload_si
10608
10630
{
10609
10631
return rb_mmtk_newobj_of_inner (klass , flags , wb_protected , payload_size );
10610
10632
}
10633
+
10634
+ void
10635
+ rb_mmtk_gc_finalize_deferred_register (void )
10636
+ {
10637
+ rb_objspace_t * objspace = rb_gc_get_objspace ();
10638
+ gc_finalize_deferred_register (objspace );
10639
+ }
10611
10640
#endif
0 commit comments