-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathmmtk_support.c
1961 lines (1684 loc) · 64.7 KB
/
mmtk_support.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#include "ruby/internal/config.h"
#include "internal.h"
#include "internal/cmdlineopt.h"
#include "internal/gc.h"
#include "internal/imemo.h"
#include "internal/thread.h"
#include "internal/variable.h"
#include "iseq.h"
#include "ruby/ruby.h"
#include "ractor_core.h"
#include "vm_core.h"
#include "ruby/st.h"
#include "vm_sync.h"
#ifndef _WIN32
#include "stdatomic.h"
#endif
#ifdef __APPLE__
#include <sys/sysctl.h>
#endif
#ifdef __GNUC__
#define PREFETCH(addr, write_p) __builtin_prefetch(addr, write_p)
#define EXPECT(expr, val) __builtin_expect(expr, val)
#define ATTRIBUTE_UNUSED __attribute__((unused))
#else
#define PREFETCH(addr, write_p)
#define EXPECT(expr, val) (expr)
#define ATTRIBUTE_UNUSED
#endif
////////////////////////////////////////////////////////////////////////////////
// Workaround: Declare some data types defined elsewhere.
////////////////////////////////////////////////////////////////////////////////
// rb_objspace_t from gc.c
typedef struct rb_objspace rb_objspace_t;
#define rb_objspace (*rb_objspace_of(GET_VM()))
#define rb_objspace_of(vm) ((vm)->objspace)
// From ractor.c. gc.c also declared this function locally.
bool rb_obj_is_main_ractor(VALUE gv);
#if USE_MMTK
#include "internal/mmtk_support.h"
#include "internal/mmtk.h"
////////////////////////////////////////////////////////////////////////////////
// Mirror some data structures from mmtk-core.
// TODO: We are having problem generating the BumpPointer struct from mmtk-core.
// It should be generated automatically using cbindgen.
////////////////////////////////////////////////////////////////////////////////
struct BumpPointer {
uintptr_t cursor;
uintptr_t limit;
};
////////////////////////////////////////////////////////////////////////////////
// Command line arguments
////////////////////////////////////////////////////////////////////////////////
const char *rb_mmtk_pre_arg_plan = NULL;
const char *rb_mmtk_post_arg_plan = NULL;
const char *rb_mmtk_chosen_plan = NULL;
bool rb_mmtk_plan_is_immix = false;
bool rb_mmtk_plan_uses_bump_pointer = false;
bool rb_mmtk_plan_implicitly_pinning = false;
bool rb_mmtk_use_barrier = false;
size_t rb_mmtk_pre_max_heap_size = 0;
size_t rb_mmtk_post_max_heap_size = 0;
bool rb_mmtk_max_heap_parse_error = false;
size_t rb_mmtk_max_heap_size = 0;
// Use up to 80% of memory for the heap
static const int rb_mmtk_heap_limit_percentage = 80;
////////////////////////////////////////////////////////////////////////////////
// Global and thread-local states.
////////////////////////////////////////////////////////////////////////////////
static bool mmtk_enable = false;
RubyBindingOptions ruby_binding_options;
MMTk_RubyUpcalls ruby_upcalls;
// TODO: Generate them as constants.
static uintptr_t mmtk_vo_bit_log_region_size;
static uintptr_t mmtk_vo_bit_base_addr;
bool rb_mmtk_obj_free_on_exit_started = false;
// DEBUG: Vanilla GC timing
static struct gc_timing {
bool enabled;
bool in_alloc_slow_path;
uint64_t gc_time_ns;
struct timespec last_enabled;
struct timespec last_gc_start;
uint64_t last_num_of_gc;
uint64_t last_vanilla_mark;
uint64_t last_vanilla_sweep;
} rb_mmtk_vanilla_timing;
// xmalloc accounting
struct rb_mmtk_xmalloc_accounting{
size_t malloc_total;
} rb_mmtk_xmalloc_accounting_t;
struct RubyMMTKGlobal {
pthread_mutex_t mutex;
pthread_cond_t cond_world_stopped;
pthread_cond_t cond_world_started;
size_t stopped_ractors;
size_t start_the_world_count;
} rb_mmtk_global = {
.mutex = PTHREAD_MUTEX_INITIALIZER,
.cond_world_stopped = PTHREAD_COND_INITIALIZER,
.cond_world_started = PTHREAD_COND_INITIALIZER,
.stopped_ractors = 0,
.start_the_world_count = 0,
};
struct rb_mmtk_address_buffer {
void **slots;
size_t len;
size_t capa;
};
#define RB_MMTK_VALUES_BUFFER_SIZE 4096
struct rb_mmtk_values_buffer {
VALUE objects[RB_MMTK_VALUES_BUFFER_SIZE];
size_t len;
};
struct rb_mmtk_mutator_local {
struct BumpPointer *immix_bump_pointer;
// for prefetching
uintptr_t last_new_cursor;
// for prefetching
uintptr_t last_meta_addr;
struct rb_mmtk_values_buffer obj_free_candidates;
struct rb_mmtk_values_buffer ppp_buffer;
};
#ifdef RB_THREAD_LOCAL_SPECIFIER
RB_THREAD_LOCAL_SPECIFIER struct MMTk_GCThreadTLS *rb_mmtk_gc_thread_tls;
RB_THREAD_LOCAL_SPECIFIER struct rb_mmtk_mutator_local rb_mmtk_mutator_local;
#else // RB_THREAD_LOCAL_SPECIFIER
#error We currently need language-supported TLS
#endif // RB_THREAD_LOCAL_SPECIFIER
static void
rb_mmtk_use_mmtk_global(void (*func)(void *), void* arg)
{
int err;
if ((err = pthread_mutex_lock(&rb_mmtk_global.mutex)) != 0) {
fprintf(stderr, "ERROR: cannot lock rb_mmtk_global.mutex: %s", strerror(err));
abort();
}
func(arg);
if ((err = pthread_mutex_unlock(&rb_mmtk_global.mutex)) != 0) {
fprintf(stderr, "ERROR: cannot release rb_mmtk_global.mutex: %s", strerror(err));
abort();
}
}
// Helper functions for rb_mmtk_values_buffer
static bool
rb_mmtk_values_buffer_append(struct rb_mmtk_values_buffer *buffer, VALUE obj)
{
RUBY_ASSERT(buffer != NULL);
buffer->objects[buffer->len] = obj;
buffer->len++;
return buffer->len == RB_MMTK_VALUES_BUFFER_SIZE;
}
static void
rb_mmtk_values_buffer_clear(struct rb_mmtk_values_buffer *buffer)
{
buffer->len = 0;
// Just to be safe.
memset(buffer->objects, 0, sizeof(buffer->objects));
}
////////////////////////////////////////////////////////////////////////////////
// Query for enabled/disabled.
////////////////////////////////////////////////////////////////////////////////
bool
rb_mmtk_enabled_p(void)
{
return mmtk_enable;
}
////////////////////////////////////////////////////////////////////////////////
// MMTk binding initialization
////////////////////////////////////////////////////////////////////////////////
void
rb_mmtk_bind_mutator(MMTk_VMMutatorThread cur_thread)
{
MMTk_Mutator *mutator = mmtk_bind_mutator((MMTk_VMMutatorThread)cur_thread);
cur_thread->mutator = mutator;
cur_thread->mutator_local = (void*)&rb_mmtk_mutator_local;
rb_mmtk_mutator_local.immix_bump_pointer = (struct BumpPointer*)((char*)mutator + mmtk_get_immix_bump_ptr_offset());
}
static size_t
rb_mmtk_system_physical_memory(void)
{
#ifdef __linux__
const long physical_pages = sysconf(_SC_PHYS_PAGES);
const long page_size = sysconf(_SC_PAGE_SIZE);
if (physical_pages == -1 || page_size == -1)
{
rb_bug("failed to get system physical memory size");
}
return (size_t) physical_pages * (size_t) page_size;
#elif defined(__APPLE__)
int mib[2];
mib[0] = CTL_HW;
mib[1] = HW_MEMSIZE; // total physical memory
int64_t physical_memory;
size_t length = sizeof(int64_t);
if (sysctl(mib, 2, &physical_memory, &length, NULL, 0) == -1)
{
rb_bug("failed to get system physical memory size");
}
return (size_t) physical_memory;
#else
#error no implementation of rb_mmtk_system_physical_memory on this platform
#endif
}
static size_t
rb_mmtk_available_system_memory(void)
{
/*
* If we're in a container, we should use the maximum container memory,
* otherwise each container will try to use all system memory. There's
* example logic for this in the JVM and SVM (see CgroupV1Subsystem
* and CgroupV2Subsystem).
*/
return rb_mmtk_system_physical_memory();
}
static void
set_default_options(MMTk_Builder *mmtk_builder)
{
mmtk_builder_set_plan(mmtk_builder, MMTK_DEFAULT_PLAN);
const size_t default_min = 1024 * 1024;
size_t default_max = rb_mmtk_available_system_memory() / 100 * rb_mmtk_heap_limit_percentage;
if (default_max < default_min) {
default_max = default_min;
}
mmtk_builder_set_dynamic_heap_size(mmtk_builder, default_min, default_max);
}
static void
apply_cmdline_options(MMTk_Builder *mmtk_builder)
{
if (rb_mmtk_chosen_plan != NULL) {
mmtk_builder_set_plan(mmtk_builder, rb_mmtk_chosen_plan);
}
if (rb_mmtk_max_heap_size > 0) {
mmtk_builder_set_fixed_heap_size(mmtk_builder, rb_mmtk_max_heap_size);
}
}
static void
set_variables_from_options(MMTk_Builder *mmtk_builder)
{
rb_mmtk_plan_is_immix = mmtk_builder_is_immix(mmtk_builder) || mmtk_builder_is_sticky_immix(mmtk_builder);
RUBY_DEBUG_LOG("mmtk_plan_is_immix = %d\n", rb_mmtk_plan_is_immix);
rb_mmtk_plan_uses_bump_pointer = rb_mmtk_plan_is_immix;
RUBY_DEBUG_LOG("mmtk_plan_uses_bump_pointer = %d\n", rb_mmtk_plan_uses_bump_pointer);
rb_mmtk_plan_implicitly_pinning = mmtk_builder_is_mark_sweep(mmtk_builder);
RUBY_DEBUG_LOG("mmtk_plan_implicitly_pinning = %d\n", rb_mmtk_plan_implicitly_pinning);
// We sometimes for disabling or enabling barriers to measure the impact of barriers.
const char* barrier_env_var = getenv("RB_MMTK_FORCE_BARRIER");
if (barrier_env_var != NULL) {
if (strcmp(barrier_env_var, "1") == 0) {
rb_mmtk_use_barrier = true;
fprintf(stderr, "WARNING: Force enabling barrier!\n");
} else {
rb_mmtk_use_barrier = false;
fprintf(stderr, "WARNING: Force disabling barrier!\n");
}
} else {
rb_mmtk_use_barrier = mmtk_builder_is_sticky_immix(mmtk_builder);
}
RUBY_DEBUG_LOG("rb_mmtk_use_barrier = %d\n", rb_mmtk_use_barrier);
}
void
rb_mmtk_main_thread_init(void)
{
// (1) Create the builder, using MMTk's built-in defaults.
MMTk_Builder *mmtk_builder = mmtk_builder_default();
// (2) Override MMTK defaults with Ruby defaults.
set_default_options(mmtk_builder);
// (3) Read MMTk environment options (e.g. MMTK_THREADS=100)
mmtk_builder_read_env_var_settings(mmtk_builder);
// (4) Apply cmdline or RUBYOPT options if set.
apply_cmdline_options(mmtk_builder);
// Set Ruby-level variables from the actually set options.
set_variables_from_options(mmtk_builder);
#if RACTOR_CHECK_MODE
ruby_binding_options.ractor_check_mode = true;
// Ruby only needs a uint32_t for the ractor ID.
// But we make the object size a multiple of alignment.
ruby_binding_options.suffix_size = MMTK_MIN_OBJ_ALIGN > sizeof(uint32_t) ?
MMTK_MIN_OBJ_ALIGN : sizeof(uint32_t);
#else
ruby_binding_options.ractor_check_mode = false;
ruby_binding_options.suffix_size = 0;
#endif
mmtk_init_binding(mmtk_builder, &ruby_binding_options, &ruby_upcalls);
mmtk_vo_bit_base_addr = mmtk_get_vo_bit_base();
mmtk_vo_bit_log_region_size = mmtk_get_vo_bit_log_region_size();
}
////////////////////////////////////////////////////////////////////////////////
// Flushing and de-initialization
////////////////////////////////////////////////////////////////////////////////
static void rb_mmtk_flush_obj_free_candidates(struct rb_mmtk_values_buffer *buffer);
static void rb_mmtk_flush_ppp_buffer(struct rb_mmtk_values_buffer *buffer);
void
rb_mmtk_flush_mutator_local_buffers(MMTk_VMMutatorThread thread)
{
struct rb_mmtk_mutator_local *local = (struct rb_mmtk_mutator_local*)thread->mutator_local;
rb_mmtk_flush_obj_free_candidates(&local->obj_free_candidates);
rb_mmtk_flush_ppp_buffer(&local->ppp_buffer);
}
void
rb_mmtk_destroy_mutator(MMTk_VMMutatorThread cur_thread, bool at_fork)
{
if (!at_fork) {
// A thread only destroys its own mutator when it exits normally (not at fork).
// But after forking, only the forking thread continue to live in the child process.
// The living thread will call this function to close the mutators of all dead threads.
// So we skip the assertions at fork.
RUBY_ASSERT(cur_thread == GET_THREAD());
RUBY_ASSERT(cur_thread->mutator_local == &rb_mmtk_mutator_local);
}
rb_mmtk_flush_mutator_local_buffers(cur_thread);
MMTk_Mutator *mutator = cur_thread->mutator;
mmtk_destroy_mutator(mutator);
cur_thread->mutator = NULL;
cur_thread->mutator_local = NULL;
}
////////////////////////////////////////////////////////////////////////////////
// Object layout
////////////////////////////////////////////////////////////////////////////////
size_t
rb_mmtk_prefix_size(void)
{
return MMTK_OBJREF_OFFSET;
}
size_t
rb_mmtk_suffix_size(void)
{
// In RACTOR_CHECK_MODE, an additional hidden field is added to hold the Ractor ID.
return ruby_binding_options.suffix_size;
}
void
rb_mmtk_init_hidden_header(VALUE object, size_t payload_size)
{
RUBY_ASSERT(payload_size <= MMTK_HIDDEN_SIZE_MASK,
"payload size greater than MMTK_HIDDEN_SIZE_MASK. payload_size: %zu", payload_size);
struct MMTk_HiddenHeader *hidden_header = (struct MMTk_HiddenHeader*)(object - MMTK_OBJREF_OFFSET);
hidden_header->prefix = payload_size;
RUBY_ASSERT(mmtk_hidden_header_is_sane(hidden_header),
"Hidden header is not sane on construction. Object: %p, prefix: %zx",
(void*) object, hidden_header->prefix);
}
size_t
rb_mmtk_get_payload_size(VALUE object)
{
struct MMTk_HiddenHeader *hidden_header = (struct MMTk_HiddenHeader*)(object - MMTK_OBJREF_OFFSET);
size_t prefix = hidden_header->prefix;
RUBY_ASSERT(mmtk_hidden_header_is_sane(hidden_header),
"Hidden header is corrupted. Object: %p, prefix: %zx",
(void*) object, prefix);
return prefix & MMTK_HIDDEN_SIZE_MASK;
}
////////////////////////////////////////////////////////////////////////////////
// Allocation
////////////////////////////////////////////////////////////////////////////////
static void*
rb_mmtk_immix_alloc_fast_bump_pointer(size_t size)
{
struct rb_mmtk_mutator_local *local = &rb_mmtk_mutator_local;
// TODO: verify the usefulness of this prefetching.
PREFETCH((void*)local->last_new_cursor, 1);
PREFETCH((void*)local->last_meta_addr, 1);
struct BumpPointer *immix_bump_pointer = local->immix_bump_pointer;
uintptr_t cursor = immix_bump_pointer->cursor;
uintptr_t limit = immix_bump_pointer->limit;
void *result = (void*)cursor;
uintptr_t new_cursor = cursor + size;
// Note: If the selected plan is not Immix, then both the cursor and the limit will always be
// 0. In that case this function will return NULL and the caller will try the slow path.
if (new_cursor > limit) {
return NULL;
} else {
immix_bump_pointer->cursor = new_cursor;
local->last_new_cursor = new_cursor; // save for prefetching
return result;
}
}
/// Wrap mmtk_alloc, but use fast path if possible.
static void*
rb_mmtk_alloc(size_t size, MMTk_AllocationSemantics semantics)
{
if (semantics == MMTK_ALLOCATION_SEMANTICS_DEFAULT && rb_mmtk_plan_uses_bump_pointer) {
// Try the fast path.
void *fast_result = rb_mmtk_immix_alloc_fast_bump_pointer(size);
if (fast_result != NULL) {
return fast_result;
}
}
// Fall back to the slow path.
void *result = mmtk_alloc(GET_THREAD()->mutator, size, MMTK_MIN_OBJ_ALIGN, 0, semantics);
return result;
}
#define RB_MMTK_USE_POST_ALLOC_FAST_PATH true
#define RB_MMTK_VO_BIT_SET_NON_ATOMIC true
static void
rb_mmtk_post_alloc_fast_immix(VALUE obj)
{
uintptr_t obj_addr = obj;
uintptr_t region_offset = obj_addr >> mmtk_vo_bit_log_region_size;
uintptr_t byte_offset = region_offset / 8;
uintptr_t bit_offset = region_offset % 8;
uintptr_t meta_byte_address = mmtk_vo_bit_base_addr + byte_offset;
uint8_t byte = 1 << bit_offset;
if (RB_MMTK_VO_BIT_SET_NON_ATOMIC) {
uint8_t *meta_byte_ptr = (uint8_t*)meta_byte_address;
*meta_byte_ptr |= byte;
} else {
volatile _Atomic uint8_t *meta_byte_ptr = (volatile _Atomic uint8_t*)meta_byte_address;
// relaxed: We don't use VO bits for synchronisation during mutator phase.
// When GC is triggered, the handshake between GC and mutator provides synchronization.
atomic_fetch_or_explicit(meta_byte_ptr, byte, memory_order_relaxed);
}
rb_mmtk_mutator_local.last_meta_addr = meta_byte_address;
}
/// Wrap mmtk_post_alloc, but use fast path if possible.
static void
rb_mmtk_post_alloc(VALUE obj, size_t mmtk_alloc_size, MMTk_AllocationSemantics semantics)
{
if (RB_MMTK_USE_POST_ALLOC_FAST_PATH && semantics == MMTK_ALLOCATION_SEMANTICS_DEFAULT && rb_mmtk_plan_is_immix) {
rb_mmtk_post_alloc_fast_immix(obj);
} else {
// Call post_alloc. This will initialize GC-specific metadata.
mmtk_post_alloc(GET_THREAD()->mutator, (void*)obj, mmtk_alloc_size, semantics);
}
}
VALUE
rb_mmtk_alloc_obj(size_t mmtk_alloc_size, size_t size_pool_size, size_t prefix_size)
{
MMTk_AllocationSemantics semantics = mmtk_alloc_size <= MMTK_MAX_IMMIX_OBJECT_SIZE ? MMTK_ALLOCATION_SEMANTICS_DEFAULT
: MMTK_ALLOCATION_SEMANTICS_LOS;
// Allocate the object.
void *addr = rb_mmtk_alloc(mmtk_alloc_size, semantics);
// The Ruby-level object reference (i.e. VALUE) is at an offset from the MMTk-level
// allocation unit.
VALUE obj = (VALUE)addr + prefix_size;
// Store the Ruby-level object size before the object.
rb_mmtk_init_hidden_header(obj, size_pool_size);
rb_mmtk_post_alloc(obj, mmtk_alloc_size, semantics);
return obj;
}
////////////////////////////////////////////////////////////////////////////////
// Tracing
////////////////////////////////////////////////////////////////////////////////
static inline MMTk_ObjectReference
rb_mmtk_call_object_closure(MMTk_ObjectReference object, bool pin) {
return rb_mmtk_gc_thread_tls->object_closure.c_function(rb_mmtk_gc_thread_tls->object_closure.rust_closure,
rb_mmtk_gc_thread_tls->gc_context,
object,
pin);
}
static inline void
rb_mmtk_mark(VALUE obj, bool pin)
{
rb_mmtk_assert_mmtk_worker();
RUBY_DEBUG_LOG("Marking: %s %s %p",
pin ? "(pin)" : " ",
RB_SPECIAL_CONST_P(obj) ? "(spc)" : " ",
(void*)obj);
RUBY_ASSERT(!RB_SPECIAL_CONST_P(obj));
rb_mmtk_call_object_closure((MMTk_ObjectReference)obj, pin);
}
// This function is used to visit and update all fields during tracing.
// It shall call both gc_mark_children and gc_update_object_references during copying GC.
static inline void
rb_mmtk_scan_object_ruby_style(MMTk_ObjectReference object)
{
rb_mmtk_assert_mmtk_worker();
VALUE obj = (VALUE)object;
// TODO: When mmtk-core can clear the VO bit (a.k.a. alloc-bit), we can remove this.
if (RB_BUILTIN_TYPE(obj) == T_NONE) {
return;
}
rb_mmtk_mark_children(obj);
rb_mmtk_update_object_references(obj);
}
// This is used to determine the pinning fields of potential pinning parents (PPPs).
// It should only call gc_mark_children.
static inline void
rb_mmtk_call_gc_mark_children(MMTk_ObjectReference object)
{
rb_mmtk_assert_mmtk_worker();
VALUE obj = (VALUE)object;
// TODO: When mmtk-core can clear the VO bit (a.k.a. alloc-bit), we can remove this.
if (RB_BUILTIN_TYPE(obj) == T_NONE) {
return;
}
rb_mmtk_mark_children(obj);
}
void
rb_mmtk_mark_movable(VALUE obj)
{
rb_mmtk_mark(obj, false);
}
void
rb_mmtk_mark_pin(VALUE obj)
{
rb_mmtk_mark(obj, true);
}
void
rb_mmtk_mark_and_move(VALUE *field)
{
VALUE obj = *field;
RUBY_ASSERT(!RB_SPECIAL_CONST_P(obj));
MMTk_ObjectReference old_ref = (MMTk_ObjectReference)obj;
MMTk_ObjectReference new_ref = rb_mmtk_call_object_closure(old_ref, false);
if (new_ref != old_ref) {
*field = (VALUE)new_ref;
}
}
bool
rb_mmtk_object_moved_p(VALUE value)
{
if (!SPECIAL_CONST_P(value)) {
MMTk_ObjectReference object = (MMTk_ObjectReference)value;
return rb_mmtk_call_object_closure(object, false) != object;
} else {
return false;
}
}
VALUE
rb_mmtk_maybe_forward(VALUE value)
{
if (!SPECIAL_CONST_P(value)) {
return (VALUE)rb_mmtk_call_object_closure((MMTk_ObjectReference)value, false);
} else {
return value;
}
}
////////////////////////////////////////////////////////////////////////////////
// PPP support
////////////////////////////////////////////////////////////////////////////////
// Return true if an object is a PPP when allocated.
// This does not include PPP types that may become PPPs during its lifetime, such as
// - Hash (when starting to compare keys by identity)
// - iseq (since ISEQ_COMPILE_DATA_ALLOC)
static bool
rb_mmtk_is_initially_ppp(VALUE obj)
{
RUBY_ASSERT(!rb_special_const_p(obj));
switch (RB_BUILTIN_TYPE(obj)) {
case T_DATA:
return true;
case T_IMEMO:
switch (imemo_type(obj)) {
case imemo_tmpbuf:
case imemo_ast:
case imemo_ifunc:
case imemo_memo:
case imemo_parser_strterm:
return true;
default:
return false;
}
default:
return false;
}
}
// Return true if a registered PPP is no longer a PPP. Return false otherwise.
// The return value doesn't matter for objects that are not registered as PPP.
static bool
rb_mmtk_is_no_longer_ppp(MMTk_ObjectReference objref)
{
// We no longer have any objects that become non-PPP during execution.
// But we keep this function just in case any use cases appear again in the future.
VALUE obj = (VALUE)objref;
RUBY_ASSERT(!rb_special_const_p(obj));
switch (RB_BUILTIN_TYPE(obj)) {
case T_IMEMO:
switch (imemo_type(obj)) {
default:
break;
}
default:
break;
}
return false;
}
static void
rb_mmtk_flush_ppp_buffer(struct rb_mmtk_values_buffer *buffer)
{
RUBY_ASSERT(buffer != NULL);
mmtk_register_ppps((MMTk_ObjectReference*)buffer->objects, buffer->len);
rb_mmtk_values_buffer_clear(buffer);
}
void
rb_mmtk_register_ppp(VALUE obj)
{
RUBY_ASSERT(!rb_special_const_p(obj));
struct rb_mmtk_values_buffer *buffer = &rb_mmtk_mutator_local.ppp_buffer;
if (rb_mmtk_values_buffer_append(buffer, obj)) {
rb_mmtk_flush_ppp_buffer(buffer);
}
}
void
rb_mmtk_maybe_register_initial_ppp(VALUE obj)
{
RUBY_ASSERT(!rb_special_const_p(obj));
if (rb_mmtk_is_initially_ppp(obj)) {
rb_mmtk_register_ppp(obj);
}
}
////////////////////////////////////////////////////////////////////////////////
// Finalization and exiting
////////////////////////////////////////////////////////////////////////////////
static void
rb_mmtk_flush_obj_free_candidates(struct rb_mmtk_values_buffer *buffer)
{
RUBY_ASSERT(buffer != NULL);
mmtk_add_obj_free_candidates((MMTk_ObjectReference*)buffer->objects, buffer->len);
rb_mmtk_values_buffer_clear(buffer);
}
void
rb_mmtk_register_obj_free_candidate(VALUE obj)
{
RUBY_DEBUG_LOG("Object registered for obj_free: %p: %s %s",
(void*)obj,
rb_type_str(RB_BUILTIN_TYPE(obj)),
RB_BUILTIN_TYPE(obj) == T_IMEMO ? rb_imemo_name(imemo_type(obj)) :
rb_obj_class(obj) == 0 ? "(null klass)" :
rb_class2name(rb_obj_class(obj))
);
struct rb_mmtk_values_buffer *buffer = &rb_mmtk_mutator_local.obj_free_candidates;
if (rb_mmtk_values_buffer_append(buffer, obj)) {
rb_mmtk_flush_obj_free_candidates(buffer);
}
}
static bool
rb_mmtk_is_initial_obj_free_candidate(VALUE obj)
{
// Any object that has non-trivial cleaning-up code in `obj_free`
// should be registered as "finalizable" to MMTk.
switch (RB_BUILTIN_TYPE(obj)) {
case T_OBJECT:
// FIXME: Ordinary objects can be non-embedded, too,
// but there are just too many such objects,
// and few of them have large buffers.
// Just let them leak for now.
// We'll prioritize eliminating the underlying buffer of ordinary objects.
return false;
case T_DATA:
// RTypedData with both RUBY_TYPED_EMBEDDABLE and RUBY_TYPED_DEFAULT_FREE do not need
// obj_free. However, this function is called in the early stage of allocation, so we can't
// make this decision, yet. So we return `false` for now and let `rb_data_object_wrap` and
// `typed_data_alloc` to decide whether to register the object as a candidate.
return false;
case T_MODULE:
case T_CLASS:
case T_HASH:
case T_REGEXP:
case T_FILE:
case T_ICLASS:
case T_BIGNUM:
case T_STRUCT:
// These types need obj_free.
return true;
case T_IMEMO:
switch (imemo_type(obj)) {
case imemo_ast:
rb_bug("imemo_ast is obsolete");
UNREACHABLE;
case imemo_callinfo:
case imemo_env:
case imemo_iseq:
case imemo_ment:
case imemo_tmpbuf:
// These imemos need obj_free.
return true;
default:
// Other imemos don't need obj_free.
return false;
}
case T_SYMBOL:
// Will be unregistered from global symbol table during weak reference processing phase.
return false;
case T_STRING:
// We use imemo:mmtk_strbuf (rb_mmtk_strbuf_t) as the underlying buffer.
return false;
case T_ARRAY:
// We use imemo:mmtk_objbuf (rb_mmtk_objbuf_t) as the underlying buffer.
return false;
case T_MATCH:
// We use imemo:mmtk_strbuf (rb_mmtk_strbuf_t) for its several underlying buffers.
return false;
case T_RATIONAL:
case T_COMPLEX:
case T_FLOAT:
// There are only counters increments for these types in `obj_free`
return false;
case T_NIL:
case T_FIXNUM:
case T_TRUE:
case T_FALSE:
// These are non-heap value types.
case T_MOVED:
// Should not see this when object is just created.
case T_NODE:
// GC doesn't handle T_NODE.
rb_bug("rb_mmtk_maybe_register_obj_free_candidate: unexpected data type 0x%x(%p) 0x%"PRIxVALUE,
BUILTIN_TYPE(obj), (void*)obj, RBASIC(obj)->flags);
default:
rb_bug("rb_mmtk_maybe_register_obj_free_candidate: unknown data type 0x%x(%p) 0x%"PRIxVALUE,
BUILTIN_TYPE(obj), (void*)obj, RBASIC(obj)->flags);
}
UNREACHABLE;
}
void
rb_mmtk_maybe_register_initial_obj_free_candidate(VALUE obj)
{
if (rb_mmtk_is_initial_obj_free_candidate(obj)) {
rb_mmtk_register_obj_free_candidate(obj);
}
}
static void
rb_mmtk_call_obj_free_inner(VALUE obj, bool on_exit) {
if (on_exit) {
switch (BUILTIN_TYPE(obj)) {
case T_DATA:
if (!DATA_PTR(obj) || !((struct RData*)obj)->dfree) {
RUBY_DEBUG_LOG("Skipped data without dfree: %p: %s", (void*)obj, rb_type_str(RB_BUILTIN_TYPE(obj)));
return;
}
if (rb_obj_is_thread(obj)) {
RUBY_DEBUG_LOG("Skipped thread: %p: %s", (void*)obj, rb_type_str(RB_BUILTIN_TYPE(obj)));
return;
}
if (rb_obj_is_mutex(obj)) {
RUBY_DEBUG_LOG("Skipped mutex: %p: %s", (void*)obj, rb_type_str(RB_BUILTIN_TYPE(obj)));
return;
}
if (rb_obj_is_fiber(obj)) {
RUBY_DEBUG_LOG("Skipped fiber: %p: %s", (void*)obj, rb_type_str(RB_BUILTIN_TYPE(obj)));
return;
}
if (rb_obj_is_main_ractor(obj)) {
RUBY_DEBUG_LOG("Skipped main ractor: %p: %s", (void*)obj, rb_type_str(RB_BUILTIN_TYPE(obj)));
return;
}
break;
case T_FILE:
if (!((struct RFile*)obj)->fptr) {
RUBY_DEBUG_LOG("Skipped file without fptr: %p: %s", (void*)obj, rb_type_str(RB_BUILTIN_TYPE(obj)));
return;
}
break;
default:
RUBY_DEBUG_LOG("Skipped obj-free candidate that is neither T_DATA nor T_FILE: %p: %s",
(void*)obj, rb_type_str(RB_BUILTIN_TYPE(obj)));
return;
}
}
RUBY_DEBUG_LOG("Freeing object: %p: %s", (void*)obj, rb_type_str(RB_BUILTIN_TYPE(obj)));
rb_mmtk_obj_free(obj);
// The object may contain dangling pointers after `obj_free`.
// Clear its flags field to ensure the GC does not attempt to scan it.
// TODO: We can instead clear the VO bit (a.k.a. alloc-bit) when mmtk-core supports that.
RBASIC(obj)->flags = 0;
*(VALUE*)(&RBASIC(obj)->klass) = 0;
}
static inline void
rb_mmtk_call_obj_free(MMTk_ObjectReference object)
{
rb_mmtk_assert_mmtk_worker();
VALUE obj = (VALUE)object;
rb_mmtk_call_obj_free_inner(obj, false);
}
static void
rb_mmtk_call_obj_free_for_each_on_exit(VALUE *objects, size_t len)
{
for (size_t i = 0; i < len; i++) {
VALUE obj = objects[i];
rb_mmtk_call_obj_free_inner(obj, true);
}
}
void
rb_mmtk_call_obj_free_on_exit(void)
{
struct MMTk_RawVecOfObjRef registered_candidates = mmtk_get_all_obj_free_candidates();
rb_mmtk_call_obj_free_for_each_on_exit((VALUE*)registered_candidates.ptr, registered_candidates.len);
mmtk_free_raw_vec_of_obj_ref(registered_candidates);
rb_ractor_t *main_ractor = GET_VM()->ractor.main_ractor;
rb_thread_t *th = NULL;
ccan_list_for_each(&main_ractor->threads.set, th, lt_node) {
// Ruby caches native threads on some platforms,
// and the rb_thread_t structs can be reused while a thread is cached.
// Currently we destroy the mutator and the mutator_local structs when a thread exits.
if (th->mutator != NULL) {
struct rb_mmtk_mutator_local *local = (struct rb_mmtk_mutator_local*)th->mutator_local;
struct rb_mmtk_values_buffer *buffer = &local->obj_free_candidates;
rb_mmtk_call_obj_free_for_each_on_exit(buffer->objects, buffer->len);
} else {
RUBY_ASSERT(th->mutator_local == NULL);
}
}
}
bool
rb_gc_obj_free_on_exit_started(void) {
return rb_mmtk_obj_free_on_exit_started;
}
void
rb_gc_set_obj_free_on_exit_started(void) {
rb_mmtk_obj_free_on_exit_started = true;
}
////////////////////////////////////////////////////////////////////////////////
// Weak table processing
////////////////////////////////////////////////////////////////////////////////
struct rb_mmtk_weak_table_rebuilding_context {
st_table *old_table;
st_table *new_table;
enum RbMmtkWeakTableValueKind values_kind;
rb_mmtk_hash_on_delete_func on_delete;
void *on_delete_arg;
};
static int
rb_mmtk_update_weak_table_migrate_each(st_data_t key, st_data_t value, st_data_t arg)
{
struct rb_mmtk_weak_table_rebuilding_context *ctx =
(struct rb_mmtk_weak_table_rebuilding_context*)arg;
// Preconditions:
// The key must be an object reference,
RUBY_ASSERT(!SPECIAL_CONST_P((VALUE)key));
// and the key must point to a valid object (may be dead, but must be allocated).
RUBY_ASSERT(mmtk_is_mmtk_object((MMTk_ObjectReference)key));
bool key_live = mmtk_is_reachable((MMTk_ObjectReference)key);
bool keep = key_live;
bool value_live = true;
if (ctx->values_kind == RB_MMTK_VALUES_WEAK_REF) {
RUBY_ASSERT(
// The value is either a primitive value (e.g. Fixnum that represents an ID)
SPECIAL_CONST_P((VALUE)value) ||
// or a valid object reference (e.g. to a Bignum that represents an ID).
// It may be dead, but must be allocated.
mmtk_is_mmtk_object((MMTk_ObjectReference)value));
if (!SPECIAL_CONST_P((VALUE)value)) {
value_live = mmtk_is_reachable((MMTk_ObjectReference)value);
keep = keep && value_live;
}
}
if (keep) {
st_data_t new_key = (st_data_t)rb_mmtk_call_object_closure((MMTk_ObjectReference)key, false);
st_data_t new_value = ctx->values_kind == RB_MMTK_VALUES_NON_REF ?
value :
(st_data_t)rb_mmtk_maybe_forward((VALUE)value); // Note that value may be primitive value or objref.
st_insert(ctx->new_table, new_key, new_value);
RUBY_DEBUG_LOG("Forwarding key-value pair: (%p, %p) -> (%p, %p)",
(void*)key, (void*)value, (void*)new_key, (void*)new_value);
} else {
// The key or the value is dead. Discard the entry.
RUBY_DEBUG_LOG("Discarding key-value pair: (%p, %p). Key is %s, value is %s",
(void*)key, (void*)value, key_live ? "live" : "dead", value_live ? "live" : "dead");
if (ctx->on_delete != NULL) {
ctx->on_delete(key, value, ctx->on_delete_arg);
}
}
return ST_CONTINUE;
}