Ruby 3.3.7p123 (2025-01-15 revision be31f993d7fa0219d85f7b3c694d454da4ecc10b)
vm_core.h
1#ifndef RUBY_VM_CORE_H
2#define RUBY_VM_CORE_H
3/**********************************************************************
4
5 vm_core.h -
6
7 $Author$
8 created at: 04/01/01 19:41:38 JST
9
10 Copyright (C) 2004-2007 Koichi Sasada
11
12**********************************************************************/
13
14/*
15 * Enable check mode.
16 * 1: enable local assertions.
17 */
18#ifndef VM_CHECK_MODE
19
20// respect RUBY_DUBUG: if given n is 0, then use RUBY_DEBUG
21#define N_OR_RUBY_DEBUG(n) (((n) > 0) ? (n) : RUBY_DEBUG)
22
23#define VM_CHECK_MODE N_OR_RUBY_DEBUG(0)
24#endif
25
38
39#ifndef VMDEBUG
40#define VMDEBUG 0
41#endif
42
43#if 0
44#undef VMDEBUG
45#define VMDEBUG 3
46#endif
47
48#include "ruby/internal/config.h"
49
50#include <stddef.h>
51#include <signal.h>
52#include <stdarg.h>
53
54#include "ruby_assert.h"
55
56#define RVALUE_SIZE (sizeof(struct RBasic) + sizeof(VALUE[RBIMPL_RVALUE_EMBED_LEN_MAX]))
57
58#if VM_CHECK_MODE > 0
59#define VM_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(VM_CHECK_MODE > 0, expr, #expr)
60#define VM_UNREACHABLE(func) rb_bug(#func ": unreachable")
61#define RUBY_ASSERT_CRITICAL_SECTION
62#define RUBY_DEBUG_THREAD_SCHEDULE() rb_thread_schedule()
63#else
64#define VM_ASSERT(expr) ((void)0)
65#define VM_UNREACHABLE(func) UNREACHABLE
66#define RUBY_DEBUG_THREAD_SCHEDULE()
67#endif
68
69#define RUBY_ASSERT_MUTEX_OWNED(mutex) VM_ASSERT(rb_mutex_owned_p(mutex))
70
71#if defined(RUBY_ASSERT_CRITICAL_SECTION)
72// TODO add documentation
73extern int ruby_assert_critical_section_entered;
74#define RUBY_ASSERT_CRITICAL_SECTION_ENTER() do{ruby_assert_critical_section_entered += 1;}while(false)
75#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE() do{VM_ASSERT(ruby_assert_critical_section_entered > 0);ruby_assert_critical_section_entered -= 1;}while(false)
76#else
77#define RUBY_ASSERT_CRITICAL_SECTION_ENTER()
78#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE()
79#endif
80
81#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
82# include "wasm/setjmp.h"
83#else
84# include <setjmp.h>
85#endif
86
87#if defined(__linux__) || defined(__FreeBSD__)
88# define RB_THREAD_T_HAS_NATIVE_ID
89#endif
90
92#include "ccan/list/list.h"
93#include "id.h"
94#include "internal.h"
95#include "internal/array.h"
96#include "internal/basic_operators.h"
97#include "internal/serial.h"
98#include "internal/vm.h"
99#include "method.h"
100#include "node.h"
101#include "ruby/ruby.h"
102#include "ruby/st.h"
103#include "ruby_atomic.h"
104#include "vm_opts.h"
105
106#include "ruby/thread_native.h"
107
108/*
109 * implementation selector of get_insn_info algorithm
110 * 0: linear search
111 * 1: binary search
112 * 2: succinct bitvector
113 */
114#ifndef VM_INSN_INFO_TABLE_IMPL
115# define VM_INSN_INFO_TABLE_IMPL 2
116#endif
117
118#if defined(NSIG_MAX) /* POSIX issue 8 */
119# undef NSIG
120# define NSIG NSIG_MAX
121#elif defined(_SIG_MAXSIG) /* FreeBSD */
122# undef NSIG
123# define NSIG _SIG_MAXSIG
124#elif defined(_SIGMAX) /* QNX */
125# define NSIG (_SIGMAX + 1)
126#elif defined(NSIG) /* 99% of everything else */
127# /* take it */
128#else /* Last resort */
129# define NSIG (sizeof(sigset_t) * CHAR_BIT + 1)
130#endif
131
132#define RUBY_NSIG NSIG
133
134#if defined(SIGCLD)
135# define RUBY_SIGCHLD (SIGCLD)
136#elif defined(SIGCHLD)
137# define RUBY_SIGCHLD (SIGCHLD)
138#endif
139
140#if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
141# define USE_SIGALTSTACK
142void *rb_allocate_sigaltstack(void);
143void *rb_register_sigaltstack(void *);
144# define RB_ALTSTACK_INIT(var, altstack) var = rb_register_sigaltstack(altstack)
145# define RB_ALTSTACK_FREE(var) free(var)
146# define RB_ALTSTACK(var) var
147#else /* noop */
148# define RB_ALTSTACK_INIT(var, altstack)
149# define RB_ALTSTACK_FREE(var)
150# define RB_ALTSTACK(var) (0)
151#endif
152
153#include THREAD_IMPL_H
154#define RUBY_VM_THREAD_MODEL 2
155
156/*****************/
157/* configuration */
158/*****************/
159
160/* gcc ver. check */
161#if defined(__GNUC__) && __GNUC__ >= 2
162
163#if OPT_TOKEN_THREADED_CODE
164#if OPT_DIRECT_THREADED_CODE
165#undef OPT_DIRECT_THREADED_CODE
166#endif
167#endif
168
169#else /* defined(__GNUC__) && __GNUC__ >= 2 */
170
171/* disable threaded code options */
172#if OPT_DIRECT_THREADED_CODE
173#undef OPT_DIRECT_THREADED_CODE
174#endif
175#if OPT_TOKEN_THREADED_CODE
176#undef OPT_TOKEN_THREADED_CODE
177#endif
178#endif
179
180/* call threaded code */
181#if OPT_CALL_THREADED_CODE
182#if OPT_DIRECT_THREADED_CODE
183#undef OPT_DIRECT_THREADED_CODE
184#endif /* OPT_DIRECT_THREADED_CODE */
185#endif /* OPT_CALL_THREADED_CODE */
186
187void rb_vm_encoded_insn_data_table_init(void);
188typedef unsigned long rb_num_t;
189typedef signed long rb_snum_t;
190
191enum ruby_tag_type {
192 RUBY_TAG_NONE = 0x0,
193 RUBY_TAG_RETURN = 0x1,
194 RUBY_TAG_BREAK = 0x2,
195 RUBY_TAG_NEXT = 0x3,
196 RUBY_TAG_RETRY = 0x4,
197 RUBY_TAG_REDO = 0x5,
198 RUBY_TAG_RAISE = 0x6,
199 RUBY_TAG_THROW = 0x7,
200 RUBY_TAG_FATAL = 0x8,
201 RUBY_TAG_MASK = 0xf
202};
203
204#define TAG_NONE RUBY_TAG_NONE
205#define TAG_RETURN RUBY_TAG_RETURN
206#define TAG_BREAK RUBY_TAG_BREAK
207#define TAG_NEXT RUBY_TAG_NEXT
208#define TAG_RETRY RUBY_TAG_RETRY
209#define TAG_REDO RUBY_TAG_REDO
210#define TAG_RAISE RUBY_TAG_RAISE
211#define TAG_THROW RUBY_TAG_THROW
212#define TAG_FATAL RUBY_TAG_FATAL
213#define TAG_MASK RUBY_TAG_MASK
214
215enum ruby_vm_throw_flags {
216 VM_THROW_NO_ESCAPE_FLAG = 0x8000,
217 VM_THROW_STATE_MASK = 0xff
218};
219
220/* forward declarations */
221struct rb_thread_struct;
223
224/* iseq data type */
225typedef struct rb_compile_option_struct rb_compile_option_t;
226
228 rb_serial_t raw;
229 VALUE data[2];
230};
231
232// imemo_constcache
234 VALUE flags;
235
236 VALUE value; // v0
237 VALUE _unused1; // v1
238 VALUE _unused2; // v2
239 const rb_cref_t *ic_cref; // v3
240};
241STATIC_ASSERT(sizeof_iseq_inline_constant_cache_entry,
242 (offsetof(struct iseq_inline_constant_cache_entry, ic_cref) +
243 sizeof(const rb_cref_t *)) <= RVALUE_SIZE);
244
261
263 uintptr_t value; // attr_index in lower bits, dest_shape_id in upper bits
264 ID iv_set_name;
265};
266
270
272 struct {
273 struct rb_thread_struct *running_thread;
274 VALUE value;
275 } once;
276 struct iseq_inline_constant_cache ic_cache;
277 struct iseq_inline_iv_cache_entry iv_cache;
278};
279
281 const struct rb_call_data *cd;
282 const struct rb_callcache *cc;
283 VALUE block_handler;
284 VALUE recv;
285 int argc;
286 bool kw_splat;
287 VALUE heap_argv;
288};
289
290#ifndef VM_ARGC_STACK_MAX
291#define VM_ARGC_STACK_MAX 128
292#endif
293
294# define CALLING_ARGC(calling) ((calling)->heap_argv ? RARRAY_LENINT((calling)->heap_argv) : (calling)->argc)
295
297
298#if 1
299#define CoreDataFromValue(obj, type) (type*)DATA_PTR(obj)
300#else
301#define CoreDataFromValue(obj, type) (type*)rb_data_object_get(obj)
302#endif
303#define GetCoreDataFromValue(obj, type, ptr) ((ptr) = CoreDataFromValue((obj), type))
304
306 VALUE pathobj; /* String (path) or Array [path, realpath]. Frozen. */
307 VALUE base_label; /* String */
308 VALUE label; /* String */
309 int first_lineno;
310 int node_id;
311 rb_code_location_t code_location;
312} rb_iseq_location_t;
313
314#define PATHOBJ_PATH 0
315#define PATHOBJ_REALPATH 1
316
317static inline VALUE
318pathobj_path(VALUE pathobj)
319{
320 if (RB_TYPE_P(pathobj, T_STRING)) {
321 return pathobj;
322 }
323 else {
324 VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
325 return RARRAY_AREF(pathobj, PATHOBJ_PATH);
326 }
327}
328
329static inline VALUE
330pathobj_realpath(VALUE pathobj)
331{
332 if (RB_TYPE_P(pathobj, T_STRING)) {
333 return pathobj;
334 }
335 else {
336 VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
337 return RARRAY_AREF(pathobj, PATHOBJ_REALPATH);
338 }
339}
340
341/* Forward declarations */
342struct rb_rjit_unit;
343
344typedef uintptr_t iseq_bits_t;
345
346#define ISEQ_IS_SIZE(body) (body->ic_size + body->ivc_size + body->ise_size + body->icvarc_size)
347
348/* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
349#define ISEQ_IS_IC_ENTRY(body, idx) (body->is_entries[(idx) + body->ise_size + body->icvarc_size + body->ivc_size].ic_cache);
350
351/* instruction sequence type */
352enum rb_iseq_type {
353 ISEQ_TYPE_TOP,
354 ISEQ_TYPE_METHOD,
355 ISEQ_TYPE_BLOCK,
356 ISEQ_TYPE_CLASS,
357 ISEQ_TYPE_RESCUE,
358 ISEQ_TYPE_ENSURE,
359 ISEQ_TYPE_EVAL,
360 ISEQ_TYPE_MAIN,
361 ISEQ_TYPE_PLAIN
362};
363
364// Attributes specified by Primitive.attr!
365enum rb_builtin_attr {
366 // The iseq does not call methods.
367 BUILTIN_ATTR_LEAF = 0x01,
368 // The iseq does not allocate objects.
369 BUILTIN_ATTR_NO_GC = 0x02,
370 // This iseq only contains single `opt_invokebuiltin_delegate_leave` instruction with 0 arguments.
371 BUILTIN_ATTR_SINGLE_NOARG_INLINE = 0x04,
372};
373
374typedef VALUE (*rb_jit_func_t)(struct rb_execution_context_struct *, struct rb_control_frame_struct *);
375
377 enum rb_iseq_type type;
378
379 unsigned int iseq_size;
380 VALUE *iseq_encoded; /* encoded iseq (insn addr and operands) */
381
404
405 struct {
406 struct {
407 unsigned int has_lead : 1;
408 unsigned int has_opt : 1;
409 unsigned int has_rest : 1;
410 unsigned int has_post : 1;
411 unsigned int has_kw : 1;
412 unsigned int has_kwrest : 1;
413 unsigned int has_block : 1;
414
415 unsigned int ambiguous_param0 : 1; /* {|a|} */
416 unsigned int accepts_no_kwarg : 1;
417 unsigned int ruby2_keywords: 1;
418 } flags;
419
420 unsigned int size;
421
422 int lead_num;
423 int opt_num;
424 int rest_start;
425 int post_start;
426 int post_num;
427 int block_start;
428
429 const VALUE *opt_table; /* (opt_num + 1) entries. */
430 /* opt_num and opt_table:
431 *
432 * def foo o1=e1, o2=e2, ..., oN=eN
433 * #=>
434 * # prologue code
435 * A1: e1
436 * A2: e2
437 * ...
438 * AN: eN
439 * AL: body
440 * opt_num = N
441 * opt_table = [A1, A2, ..., AN, AL]
442 */
443
444 const struct rb_iseq_param_keyword {
445 int num;
446 int required_num;
447 int bits_start;
448 int rest_start;
449 const ID *table;
450 VALUE *default_values;
451 } *keyword;
453
454 rb_iseq_location_t location;
455
456 /* insn info, must be freed */
458 const struct iseq_insn_info_entry *body;
459 unsigned int *positions;
460 unsigned int size;
461#if VM_INSN_INFO_TABLE_IMPL == 2
462 struct succ_index_table *succ_index_table;
463#endif
464 } insns_info;
465
466 const ID *local_table; /* must free */
467
468 /* catch table */
469 struct iseq_catch_table *catch_table;
470
471 /* for child iseq */
472 const struct rb_iseq_struct *parent_iseq;
473 struct rb_iseq_struct *local_iseq; /* local_iseq->flip_cnt can be modified */
474
475 union iseq_inline_storage_entry *is_entries; /* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
476 struct rb_call_data *call_data; //struct rb_call_data calls[ci_size];
477
478 struct {
479 rb_snum_t flip_count;
480 VALUE script_lines;
481 VALUE coverage;
482 VALUE pc2branchindex;
483 VALUE *original_iseq;
484 } variable;
485
486 unsigned int local_table_size;
487 unsigned int ic_size; // Number of IC caches
488 unsigned int ise_size; // Number of ISE caches
489 unsigned int ivc_size; // Number of IVC caches
490 unsigned int icvarc_size; // Number of ICVARC caches
491 unsigned int ci_size;
492 unsigned int stack_max; /* for stack overflow check */
493
494 unsigned int builtin_attrs; // Union of rb_builtin_attr
495
496 union {
497 iseq_bits_t * list; /* Find references for GC */
498 iseq_bits_t single;
499 } mark_bits;
500
501 struct rb_id_table *outer_variables;
502
503 const rb_iseq_t *mandatory_only_iseq;
504
505#if USE_RJIT || USE_YJIT
506 // Function pointer for JIT code on jit_exec()
507 rb_jit_func_t jit_entry;
508 // Number of calls on jit_exec()
509 long unsigned jit_entry_calls;
510#endif
511
512#if USE_YJIT
513 // Function pointer for JIT code on jit_exec_exception()
514 rb_jit_func_t jit_exception;
515 // Number of calls on jit_exec_exception()
516 long unsigned jit_exception_calls;
517#endif
518
519#if USE_RJIT
520 // RJIT stores some data on each iseq.
521 VALUE rjit_blocks;
522#endif
523
524#if USE_YJIT
525 // YJIT stores some data on each iseq.
526 void *yjit_payload;
527 // Used to estimate how frequently this ISEQ gets called
528 uint64_t yjit_calls_at_interv;
529#endif
530};
531
532/* T_IMEMO/iseq */
533/* typedef rb_iseq_t is in method.h */
535 VALUE flags; /* 1 */
536 VALUE wrapper; /* 2 */
537
538 struct rb_iseq_constant_body *body; /* 3 */
539
540 union { /* 4, 5 words */
541 struct iseq_compile_data *compile_data; /* used at compile time */
542
543 struct {
544 VALUE obj;
545 int index;
546 } loader;
547
548 struct {
549 struct rb_hook_list_struct *local_hooks;
550 rb_event_flag_t global_trace_events;
551 } exec;
552 } aux;
553};
554
555#define ISEQ_BODY(iseq) ((iseq)->body)
556
557#if !defined(USE_LAZY_LOAD) || !(USE_LAZY_LOAD+0)
558#define USE_LAZY_LOAD 0
559#endif
560
561#if !USE_LAZY_LOAD
562static inline const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq) {return 0;}
563#endif
564const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq);
565
566static inline const rb_iseq_t *
567rb_iseq_check(const rb_iseq_t *iseq)
568{
569 if (USE_LAZY_LOAD && ISEQ_BODY(iseq) == NULL) {
570 rb_iseq_complete((rb_iseq_t *)iseq);
571 }
572 return iseq;
573}
574
575static inline const rb_iseq_t *
576def_iseq_ptr(rb_method_definition_t *def)
577{
578//TODO: re-visit. to check the bug, enable this assertion.
579#if VM_CHECK_MODE > 0
580 if (def->type != VM_METHOD_TYPE_ISEQ) rb_bug("def_iseq_ptr: not iseq (%d)", def->type);
581#endif
582 return rb_iseq_check(def->body.iseq.iseqptr);
583}
584
585enum ruby_special_exceptions {
586 ruby_error_reenter,
587 ruby_error_nomemory,
588 ruby_error_sysstack,
589 ruby_error_stackfatal,
590 ruby_error_stream_closed,
591 ruby_special_error_count
592};
593
594#define GetVMPtr(obj, ptr) \
595 GetCoreDataFromValue((obj), rb_vm_t, (ptr))
596
597struct rb_vm_struct;
598typedef void rb_vm_at_exit_func(struct rb_vm_struct*);
599
600typedef struct rb_at_exit_list {
601 rb_vm_at_exit_func *func;
602 struct rb_at_exit_list *next;
604
605struct rb_objspace;
606struct rb_objspace *rb_objspace_alloc(void);
607void rb_objspace_free(struct rb_objspace *);
608void rb_objspace_call_finalizer(struct rb_objspace *);
609
610typedef struct rb_hook_list_struct {
611 struct rb_event_hook_struct *hooks;
612 rb_event_flag_t events;
613 unsigned int running;
614 bool need_clean;
615 bool is_local;
616} rb_hook_list_t;
617
618
619// see builtin.h for definition
620typedef const struct rb_builtin_function *RB_BUILTIN;
621
622typedef struct rb_vm_struct {
623 VALUE self;
624
625 struct {
626 struct ccan_list_head set;
627 unsigned int cnt;
628 unsigned int blocking_cnt;
629
630 struct rb_ractor_struct *main_ractor;
631 struct rb_thread_struct *main_thread; // == vm->ractor.main_ractor->threads.main
632
633 struct {
634 // monitor
635 rb_nativethread_lock_t lock;
636 struct rb_ractor_struct *lock_owner;
637 unsigned int lock_rec;
638
639 // join at exit
640 rb_nativethread_cond_t terminate_cond;
641 bool terminate_waiting;
642
643#ifndef RUBY_THREAD_PTHREAD_H
644 bool barrier_waiting;
645 unsigned int barrier_cnt;
646 rb_nativethread_cond_t barrier_cond;
647#endif
648 } sync;
649
650 // ractor scheduling
651 struct {
652 rb_nativethread_lock_t lock;
653 struct rb_ractor_struct *lock_owner;
654 bool locked;
655
656 rb_nativethread_cond_t cond; // GRQ
657 unsigned int snt_cnt; // count of shared NTs
658 unsigned int dnt_cnt; // count of dedicated NTs
659
660 unsigned int running_cnt;
661
662 unsigned int max_cpu;
663 struct ccan_list_head grq; // // Global Ready Queue
664 unsigned int grq_cnt;
665
666 // running threads
667 struct ccan_list_head running_threads;
668
669 // threads which switch context by timeslice
670 struct ccan_list_head timeslice_threads;
671
672 struct ccan_list_head zombie_threads;
673
674 // true if timeslice timer is not enable
675 bool timeslice_wait_inf;
676
677 // barrier
678 rb_nativethread_cond_t barrier_complete_cond;
679 rb_nativethread_cond_t barrier_release_cond;
680 bool barrier_waiting;
681 unsigned int barrier_waiting_cnt;
682 unsigned int barrier_serial;
683 } sched;
684 } ractor;
685
686#ifdef USE_SIGALTSTACK
687 void *main_altstack;
688#endif
689
690 rb_serial_t fork_gen;
691 struct ccan_list_head waiting_fds; /* <=> struct waiting_fd */
692
693 /* set in single-threaded processes only: */
694 volatile int ubf_async_safe;
695
696 unsigned int running: 1;
697 unsigned int thread_abort_on_exception: 1;
698 unsigned int thread_report_on_exception: 1;
699 unsigned int thread_ignore_deadlock: 1;
700
701 /* object management */
702 VALUE mark_object_ary;
703 const VALUE special_exceptions[ruby_special_error_count];
704
705 /* load */
706 VALUE top_self;
707 VALUE load_path;
708 VALUE load_path_snapshot;
709 VALUE load_path_check_cache;
710 VALUE expanded_load_path;
711 VALUE loaded_features;
712 VALUE loaded_features_snapshot;
713 VALUE loaded_features_realpaths;
714 VALUE loaded_features_realpath_map;
715 struct st_table *loaded_features_index;
716 struct st_table *loading_table;
717 // For running the init function of statically linked
718 // extensions when they are loaded
719 struct st_table *static_ext_inits;
720
721 /* signal */
722 struct {
723 VALUE cmd[RUBY_NSIG];
724 } trap_list;
725
726 /* relation table of ensure - rollback for callcc */
727 struct st_table *ensure_rollback_table;
728
729 /* postponed_job (async-signal-safe, and thread-safe) */
730 struct rb_postponed_job_queue *postponed_job_queue;
731
732 int src_encoding_index;
733
734 /* workqueue (thread-safe, NOT async-signal-safe) */
735 struct ccan_list_head workqueue; /* <=> rb_workqueue_job.jnode */
736 rb_nativethread_lock_t workqueue_lock;
737
738 VALUE orig_progname, progname;
739 VALUE coverages, me2counter;
740 int coverage_mode;
741
742 st_table * defined_module_hash;
743
744 struct rb_objspace *objspace;
745
746 rb_at_exit_list *at_exit;
747
748 st_table *frozen_strings;
749
750 const struct rb_builtin_function *builtin_function_table;
751 int builtin_inline_index;
752
753 st_table *ci_table;
754 struct rb_id_table *negative_cme_table;
755 st_table *overloaded_cme_table; // cme -> overloaded_cme
756
757 // This id table contains a mapping from ID to ICs. It does this with ID
758 // keys and nested st_tables as values. The nested tables have ICs as keys
759 // and Qtrue as values. It is used when inline constant caches need to be
760 // invalidated or ISEQs are being freed.
761 struct rb_id_table *constant_cache;
762 ID inserting_constant_cache_id;
763
764#ifndef VM_GLOBAL_CC_CACHE_TABLE_SIZE
765#define VM_GLOBAL_CC_CACHE_TABLE_SIZE 1023
766#endif
767 const struct rb_callcache *global_cc_cache_table[VM_GLOBAL_CC_CACHE_TABLE_SIZE]; // vm_eval.c
768
769#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
770 uint32_t clock;
771#endif
772
773 /* params */
774 struct { /* size in byte */
775 size_t thread_vm_stack_size;
776 size_t thread_machine_stack_size;
777 size_t fiber_vm_stack_size;
778 size_t fiber_machine_stack_size;
779 } default_params;
780
781} rb_vm_t;
782
783/* default values */
784
785#define RUBY_VM_SIZE_ALIGN 4096
786
787#define RUBY_VM_THREAD_VM_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
788#define RUBY_VM_THREAD_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
789#define RUBY_VM_THREAD_MACHINE_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
790#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
791
792#define RUBY_VM_FIBER_VM_STACK_SIZE ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
793#define RUBY_VM_FIBER_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
794#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 64 * 1024 * sizeof(VALUE)) /* 256 KB or 512 KB */
795#if defined(__powerpc64__) || defined(__ppc64__) // macOS has __ppc64__
796#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 32 * 1024 * sizeof(VALUE)) /* 128 KB or 256 KB */
797#else
798#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
799#endif
800
801#if __has_feature(memory_sanitizer) || __has_feature(address_sanitizer)
802/* It seems sanitizers consume A LOT of machine stacks */
803#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE
804#define RUBY_VM_THREAD_MACHINE_STACK_SIZE (1024 * 1024 * sizeof(VALUE))
805#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN
806#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 512 * 1024 * sizeof(VALUE))
807#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE
808#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 256 * 1024 * sizeof(VALUE))
809#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN
810#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 128 * 1024 * sizeof(VALUE))
811#endif
812
813#ifndef VM_DEBUG_BP_CHECK
814#define VM_DEBUG_BP_CHECK 0
815#endif
816
817#ifndef VM_DEBUG_VERIFY_METHOD_CACHE
818#define VM_DEBUG_VERIFY_METHOD_CACHE (VMDEBUG != 0)
819#endif
820
822 VALUE self;
823 const VALUE *ep;
824 union {
825 const rb_iseq_t *iseq;
826 const struct vm_ifunc *ifunc;
827 VALUE val;
828 } code;
829};
830
831enum rb_block_handler_type {
832 block_handler_type_iseq,
833 block_handler_type_ifunc,
834 block_handler_type_symbol,
835 block_handler_type_proc
836};
837
838enum rb_block_type {
839 block_type_iseq,
840 block_type_ifunc,
841 block_type_symbol,
842 block_type_proc
843};
844
845struct rb_block {
846 union {
847 struct rb_captured_block captured;
848 VALUE symbol;
849 VALUE proc;
850 } as;
851 enum rb_block_type type;
852};
853
855 const VALUE *pc; // cfp[0]
856 VALUE *sp; // cfp[1]
857 const rb_iseq_t *iseq; // cfp[2]
858 VALUE self; // cfp[3] / block[0]
859 const VALUE *ep; // cfp[4] / block[1]
860 const void *block_code; // cfp[5] / block[2] -- iseq, ifunc, or forwarded block handler
861 void *jit_return; // cfp[6] -- return address for JIT code
862#if VM_DEBUG_BP_CHECK
863 VALUE *bp_check; // cfp[7]
864#endif
865} rb_control_frame_t;
866
867extern const rb_data_type_t ruby_threadptr_data_type;
868
869static inline struct rb_thread_struct *
870rb_thread_ptr(VALUE thval)
871{
872 return (struct rb_thread_struct *)rb_check_typeddata(thval, &ruby_threadptr_data_type);
873}
874
875enum rb_thread_status {
876 THREAD_RUNNABLE,
877 THREAD_STOPPED,
878 THREAD_STOPPED_FOREVER,
879 THREAD_KILLED
880};
881
882#ifdef RUBY_JMP_BUF
883typedef RUBY_JMP_BUF rb_jmpbuf_t;
884#else
885typedef void *rb_jmpbuf_t[5];
886#endif
887
888/*
889 `rb_vm_tag_jmpbuf_t` type represents a buffer used to
890 long jump to a C frame associated with `rb_vm_tag`.
891
892 Use-site of `rb_vm_tag_jmpbuf_t` is responsible for calling the
893 following functions:
894 - `rb_vm_tag_jmpbuf_init` once `rb_vm_tag_jmpbuf_t` is allocated.
895 - `rb_vm_tag_jmpbuf_deinit` once `rb_vm_tag_jmpbuf_t` is no longer necessary.
896
897 `RB_VM_TAG_JMPBUF_GET` transforms a `rb_vm_tag_jmpbuf_t` into a
898 `rb_jmpbuf_t` to be passed to `rb_setjmp/rb_longjmp`.
899*/
900#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
901/*
902 WebAssembly target with Asyncify-based SJLJ needs
903 to capture the execution context by unwind/rewind-ing
904 call frames into a jump buffer. The buffer space tends
905 to be considerably large unlike other architectures'
906 register-based buffers.
907 Therefore, we allocates the buffer on the heap on such
908 environments.
909*/
910typedef rb_jmpbuf_t *rb_vm_tag_jmpbuf_t;
911
912#define RB_VM_TAG_JMPBUF_GET(buf) (*buf)
913
914static inline void
915rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
916{
917 *jmpbuf = ruby_xmalloc(sizeof(rb_jmpbuf_t));
918}
919
920static inline void
921rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
922{
923 ruby_xfree(*jmpbuf);
924}
925#else
926typedef rb_jmpbuf_t rb_vm_tag_jmpbuf_t;
927
928#define RB_VM_TAG_JMPBUF_GET(buf) (buf)
929
930static inline void
931rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
932{
933 // no-op
934}
935
936static inline void
937rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
938{
939 // no-op
940}
941#endif
942
943/*
944 the members which are written in EC_PUSH_TAG() should be placed at
945 the beginning and the end, so that entire region is accessible.
946*/
947struct rb_vm_tag {
948 VALUE tag;
949 VALUE retval;
950 rb_vm_tag_jmpbuf_t buf;
951 struct rb_vm_tag *prev;
952 enum ruby_tag_type state;
953 unsigned int lock_rec;
954};
955
956STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0);
957STATIC_ASSERT(rb_vm_tag_buf_end,
958 offsetof(struct rb_vm_tag, buf) + sizeof(rb_vm_tag_jmpbuf_t) <
959 sizeof(struct rb_vm_tag));
960
963 void *arg;
964};
965
966struct rb_mutex_struct;
967
968typedef struct rb_ensure_entry {
969 VALUE marker;
970 VALUE (*e_proc)(VALUE);
971 VALUE data2;
972} rb_ensure_entry_t;
973
974typedef struct rb_ensure_list {
975 struct rb_ensure_list *next;
976 struct rb_ensure_entry entry;
977} rb_ensure_list_t;
978
979typedef struct rb_fiber_struct rb_fiber_t;
980
982 struct rb_waiting_list *next;
983 struct rb_thread_struct *thread;
984 struct rb_fiber_struct *fiber;
985};
986
988 /* execution information */
989 VALUE *vm_stack; /* must free, must mark */
990 size_t vm_stack_size; /* size in word (byte size / sizeof(VALUE)) */
991 rb_control_frame_t *cfp;
992
993 struct rb_vm_tag *tag;
994
995 /* interrupt flags */
996 rb_atomic_t interrupt_flag;
997 rb_atomic_t interrupt_mask; /* size should match flag */
998#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
999 uint32_t checked_clock;
1000#endif
1001
1002 rb_fiber_t *fiber_ptr;
1003 struct rb_thread_struct *thread_ptr;
1004
1005 /* storage (ec (fiber) local) */
1006 struct rb_id_table *local_storage;
1007 VALUE local_storage_recursive_hash;
1008 VALUE local_storage_recursive_hash_for_trace;
1009
1010 /* Inheritable fiber storage. */
1011 VALUE storage;
1012
1013 /* eval env */
1014 const VALUE *root_lep;
1015 VALUE root_svar;
1016
1017 /* ensure & callcc */
1018 rb_ensure_list_t *ensure_list;
1019
1020 /* trace information */
1021 struct rb_trace_arg_struct *trace_arg;
1022
1023 /* temporary places */
1024 VALUE errinfo;
1025 VALUE passed_block_handler; /* for rb_iterate */
1026
1027 uint8_t raised_flag; /* only 3 bits needed */
1028
1029 /* n.b. only 7 bits needed, really: */
1030 BITFIELD(enum method_missing_reason, method_missing_reason, 8);
1031
1032 VALUE private_const_reference;
1033
1034 /* for GC */
1035 struct {
1036 VALUE *stack_start;
1037 VALUE *stack_end;
1038 size_t stack_maxsize;
1039 RUBY_ALIGNAS(SIZEOF_VALUE) jmp_buf regs;
1040 } machine;
1041};
1042
1043#ifndef rb_execution_context_t
1044typedef struct rb_execution_context_struct rb_execution_context_t;
1045#define rb_execution_context_t rb_execution_context_t
1046#endif
1047
1048// for builtin.h
1049#define VM_CORE_H_EC_DEFINED 1
1050
1051// Set the vm_stack pointer in the execution context.
1052void rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
1053
1054// Initialize the vm_stack pointer in the execution context and push the initial stack frame.
1055// @param ec the execution context to update.
1056// @param stack a pointer to the stack to use.
1057// @param size the size of the stack, as in `VALUE stack[size]`.
1058void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
1059
1060// Clear (set to `NULL`) the vm_stack pointer.
1061// @param ec the execution context to update.
1062void rb_ec_clear_vm_stack(rb_execution_context_t *ec);
1063
1065 bool ractor_safe;
1066};
1067
1068typedef struct rb_ractor_struct rb_ractor_t;
1069
1070struct rb_native_thread;
1071
1072typedef struct rb_thread_struct {
1073 struct ccan_list_node lt_node; // managed by a ractor
1074 VALUE self;
1075 rb_ractor_t *ractor;
1076 rb_vm_t *vm;
1077 struct rb_native_thread *nt;
1078 rb_execution_context_t *ec;
1079
1080 struct rb_thread_sched_item sched;
1081 rb_atomic_t serial; // only for RUBY_DEBUG_LOG()
1082
1083 VALUE last_status; /* $? */
1084
1085 /* for cfunc */
1086 struct rb_calling_info *calling;
1087
1088 /* for load(true) */
1089 VALUE top_self;
1090 VALUE top_wrapper;
1091
1092 /* thread control */
1093
1094 BITFIELD(enum rb_thread_status, status, 2);
1095 /* bit flags */
1096 unsigned int has_dedicated_nt : 1;
1097 unsigned int to_kill : 1;
1098 unsigned int abort_on_exception: 1;
1099 unsigned int report_on_exception: 1;
1100 unsigned int pending_interrupt_queue_checked: 1;
1101 int8_t priority; /* -3 .. 3 (RUBY_THREAD_PRIORITY_{MIN,MAX}) */
1102 uint32_t running_time_us; /* 12500..800000 */
1103
1104 void *blocking_region_buffer;
1105
1106 VALUE thgroup;
1107 VALUE value;
1108
1109 /* temporary place of retval on OPT_CALL_THREADED_CODE */
1110#if OPT_CALL_THREADED_CODE
1111 VALUE retval;
1112#endif
1113
1114 /* async errinfo queue */
1115 VALUE pending_interrupt_queue;
1116 VALUE pending_interrupt_mask_stack;
1117
1118 /* interrupt management */
1119 rb_nativethread_lock_t interrupt_lock;
1120 struct rb_unblock_callback unblock;
1121 VALUE locking_mutex;
1122 struct rb_mutex_struct *keeping_mutexes;
1123
1124 struct rb_waiting_list *join_list;
1125
1126 union {
1127 struct {
1128 VALUE proc;
1129 VALUE args;
1130 int kw_splat;
1131 } proc;
1132 struct {
1133 VALUE (*func)(void *);
1134 void *arg;
1135 } func;
1136 } invoke_arg;
1137
1138 enum thread_invoke_type {
1139 thread_invoke_type_none = 0,
1140 thread_invoke_type_proc,
1141 thread_invoke_type_ractor_proc,
1142 thread_invoke_type_func
1143 } invoke_type;
1144
1145 /* statistics data for profiler */
1146 VALUE stat_insn_usage;
1147
1148 /* fiber */
1149 rb_fiber_t *root_fiber;
1150
1151 VALUE scheduler;
1152 unsigned int blocking;
1153
1154 /* misc */
1155 VALUE name;
1156 void **specific_storage;
1157
1158 struct rb_ext_config ext_config;
1159} rb_thread_t;
1160
1161static inline unsigned int
1162rb_th_serial(const rb_thread_t *th)
1163{
1164 return th ? (unsigned int)th->serial : 0;
1165}
1166
1167typedef enum {
1168 VM_DEFINECLASS_TYPE_CLASS = 0x00,
1169 VM_DEFINECLASS_TYPE_SINGLETON_CLASS = 0x01,
1170 VM_DEFINECLASS_TYPE_MODULE = 0x02,
1171 /* 0x03..0x06 is reserved */
1172 VM_DEFINECLASS_TYPE_MASK = 0x07
1173} rb_vm_defineclass_type_t;
1174
1175#define VM_DEFINECLASS_TYPE(x) ((rb_vm_defineclass_type_t)(x) & VM_DEFINECLASS_TYPE_MASK)
1176#define VM_DEFINECLASS_FLAG_SCOPED 0x08
1177#define VM_DEFINECLASS_FLAG_HAS_SUPERCLASS 0x10
1178#define VM_DEFINECLASS_SCOPED_P(x) ((x) & VM_DEFINECLASS_FLAG_SCOPED)
1179#define VM_DEFINECLASS_HAS_SUPERCLASS_P(x) \
1180 ((x) & VM_DEFINECLASS_FLAG_HAS_SUPERCLASS)
1181
1182/* iseq.c */
1183RUBY_SYMBOL_EXPORT_BEGIN
1184
1185/* node -> iseq */
1186rb_iseq_t *rb_iseq_new (const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum rb_iseq_type);
1187rb_iseq_t *rb_iseq_new_top (const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent);
1188rb_iseq_t *rb_iseq_new_main (const rb_ast_body_t *ast, VALUE path, VALUE realpath, const rb_iseq_t *parent, int opt);
1189rb_iseq_t *rb_iseq_new_eval (const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth);
1190rb_iseq_t *rb_iseq_new_with_opt(const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth,
1191 enum rb_iseq_type, const rb_compile_option_t*);
1192
1193struct iseq_link_anchor;
1195 VALUE flags;
1196 VALUE reserved;
1197 void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *);
1198 const void *data;
1199};
1200static inline struct rb_iseq_new_with_callback_callback_func *
1201rb_iseq_new_with_callback_new_callback(
1202 void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *), const void *ptr)
1203{
1204 VALUE memo = rb_imemo_new(imemo_ifunc, (VALUE)func, (VALUE)ptr, Qundef, Qfalse);
1205 return (struct rb_iseq_new_with_callback_callback_func *)memo;
1206}
1207rb_iseq_t *rb_iseq_new_with_callback(const struct rb_iseq_new_with_callback_callback_func * ifunc,
1208 VALUE name, VALUE path, VALUE realpath, int first_lineno,
1209 const rb_iseq_t *parent, enum rb_iseq_type, const rb_compile_option_t*);
1210
1211VALUE rb_iseq_disasm(const rb_iseq_t *iseq);
1212int rb_iseq_disasm_insn(VALUE str, const VALUE *iseqval, size_t pos, const rb_iseq_t *iseq, VALUE child);
1213
1214VALUE rb_iseq_coverage(const rb_iseq_t *iseq);
1215
1216RUBY_EXTERN VALUE rb_cISeq;
1217RUBY_EXTERN VALUE rb_cRubyVM;
1218RUBY_EXTERN VALUE rb_mRubyVMFrozenCore;
1219RUBY_EXTERN VALUE rb_block_param_proxy;
1220RUBY_SYMBOL_EXPORT_END
1221
1222#define GetProcPtr(obj, ptr) \
1223 GetCoreDataFromValue((obj), rb_proc_t, (ptr))
1224
1225typedef struct {
1226 const struct rb_block block;
1227 unsigned int is_from_method: 1; /* bool */
1228 unsigned int is_lambda: 1; /* bool */
1229 unsigned int is_isolated: 1; /* bool */
1230} rb_proc_t;
1231
1232RUBY_SYMBOL_EXPORT_BEGIN
1233VALUE rb_proc_isolate(VALUE self);
1234VALUE rb_proc_isolate_bang(VALUE self);
1235VALUE rb_proc_ractor_make_shareable(VALUE self);
1236RUBY_SYMBOL_EXPORT_END
1237
1238typedef struct {
1239 VALUE flags; /* imemo header */
1240 rb_iseq_t *iseq;
1241 const VALUE *ep;
1242 const VALUE *env;
1243 unsigned int env_size;
1244} rb_env_t;
1245
1246extern const rb_data_type_t ruby_binding_data_type;
1247
1248#define GetBindingPtr(obj, ptr) \
1249 GetCoreDataFromValue((obj), rb_binding_t, (ptr))
1250
1251typedef struct {
1252 const struct rb_block block;
1253 const VALUE pathobj;
1254 int first_lineno;
1255} rb_binding_t;
1256
1257/* used by compile time and send insn */
1258
1259enum vm_check_match_type {
1260 VM_CHECKMATCH_TYPE_WHEN = 1,
1261 VM_CHECKMATCH_TYPE_CASE = 2,
1262 VM_CHECKMATCH_TYPE_RESCUE = 3
1263};
1264
1265#define VM_CHECKMATCH_TYPE_MASK 0x03
1266#define VM_CHECKMATCH_ARRAY 0x04
1267
1268enum vm_special_object_type {
1269 VM_SPECIAL_OBJECT_VMCORE = 1,
1270 VM_SPECIAL_OBJECT_CBASE,
1271 VM_SPECIAL_OBJECT_CONST_BASE
1272};
1273
1274enum vm_svar_index {
1275 VM_SVAR_LASTLINE = 0, /* $_ */
1276 VM_SVAR_BACKREF = 1, /* $~ */
1277
1278 VM_SVAR_EXTRA_START = 2,
1279 VM_SVAR_FLIPFLOP_START = 2 /* flipflop */
1280};
1281
1282/* inline cache */
1283typedef struct iseq_inline_constant_cache *IC;
1284typedef struct iseq_inline_iv_cache_entry *IVC;
1285typedef struct iseq_inline_cvar_cache_entry *ICVARC;
1286typedef union iseq_inline_storage_entry *ISE;
1287typedef const struct rb_callinfo *CALL_INFO;
1288typedef const struct rb_callcache *CALL_CACHE;
1289typedef struct rb_call_data *CALL_DATA;
1290
1291typedef VALUE CDHASH;
1292
1293#ifndef FUNC_FASTCALL
1294#define FUNC_FASTCALL(x) x
1295#endif
1296
1297typedef rb_control_frame_t *
1298 (FUNC_FASTCALL(*rb_insn_func_t))(rb_execution_context_t *, rb_control_frame_t *);
1299
1300#define VM_TAGGED_PTR_SET(p, tag) ((VALUE)(p) | (tag))
1301#define VM_TAGGED_PTR_REF(v, mask) ((void *)((v) & ~mask))
1302
1303#define GC_GUARDED_PTR(p) VM_TAGGED_PTR_SET((p), 0x01)
1304#define GC_GUARDED_PTR_REF(p) VM_TAGGED_PTR_REF((p), 0x03)
1305#define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
1306
1307enum vm_frame_env_flags {
1308 /* Frame/Environment flag bits:
1309 * MMMM MMMM MMMM MMMM ____ FFFF FFFE EEEX (LSB)
1310 *
1311 * X : tag for GC marking (It seems as Fixnum)
1312 * EEE : 4 bits Env flags
1313 * FF..: 7 bits Frame flags
1314 * MM..: 15 bits frame magic (to check frame corruption)
1315 */
1316
1317 /* frame types */
1318 VM_FRAME_MAGIC_METHOD = 0x11110001,
1319 VM_FRAME_MAGIC_BLOCK = 0x22220001,
1320 VM_FRAME_MAGIC_CLASS = 0x33330001,
1321 VM_FRAME_MAGIC_TOP = 0x44440001,
1322 VM_FRAME_MAGIC_CFUNC = 0x55550001,
1323 VM_FRAME_MAGIC_IFUNC = 0x66660001,
1324 VM_FRAME_MAGIC_EVAL = 0x77770001,
1325 VM_FRAME_MAGIC_RESCUE = 0x78880001,
1326 VM_FRAME_MAGIC_DUMMY = 0x79990001,
1327
1328 VM_FRAME_MAGIC_MASK = 0x7fff0001,
1329
1330 /* frame flag */
1331 VM_FRAME_FLAG_FINISH = 0x0020,
1332 VM_FRAME_FLAG_BMETHOD = 0x0040,
1333 VM_FRAME_FLAG_CFRAME = 0x0080,
1334 VM_FRAME_FLAG_LAMBDA = 0x0100,
1335 VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM = 0x0200,
1336 VM_FRAME_FLAG_CFRAME_KW = 0x0400,
1337 VM_FRAME_FLAG_PASSED = 0x0800,
1338
1339 /* env flag */
1340 VM_ENV_FLAG_LOCAL = 0x0002,
1341 VM_ENV_FLAG_ESCAPED = 0x0004,
1342 VM_ENV_FLAG_WB_REQUIRED = 0x0008,
1343 VM_ENV_FLAG_ISOLATED = 0x0010,
1344};
1345
1346#define VM_ENV_DATA_SIZE ( 3)
1347
1348#define VM_ENV_DATA_INDEX_ME_CREF (-2) /* ep[-2] */
1349#define VM_ENV_DATA_INDEX_SPECVAL (-1) /* ep[-1] */
1350#define VM_ENV_DATA_INDEX_FLAGS ( 0) /* ep[ 0] */
1351#define VM_ENV_DATA_INDEX_ENV ( 1) /* ep[ 1] */
1352
1353#define VM_ENV_INDEX_LAST_LVAR (-VM_ENV_DATA_SIZE)
1354
1355static inline void VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value);
1356
1357static inline void
1358VM_ENV_FLAGS_SET(const VALUE *ep, VALUE flag)
1359{
1360 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1361 VM_ASSERT(FIXNUM_P(flags));
1362 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags | flag);
1363}
1364
1365static inline void
1366VM_ENV_FLAGS_UNSET(const VALUE *ep, VALUE flag)
1367{
1368 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1369 VM_ASSERT(FIXNUM_P(flags));
1370 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags & ~flag);
1371}
1372
1373static inline unsigned long
1374VM_ENV_FLAGS(const VALUE *ep, long flag)
1375{
1376 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1377 VM_ASSERT(FIXNUM_P(flags));
1378 return flags & flag;
1379}
1380
1381static inline unsigned long
1382VM_FRAME_TYPE(const rb_control_frame_t *cfp)
1383{
1384 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_MAGIC_MASK);
1385}
1386
1387static inline int
1388VM_FRAME_LAMBDA_P(const rb_control_frame_t *cfp)
1389{
1390 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_LAMBDA) != 0;
1391}
1392
1393static inline int
1394VM_FRAME_CFRAME_KW_P(const rb_control_frame_t *cfp)
1395{
1396 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME_KW) != 0;
1397}
1398
1399static inline int
1400VM_FRAME_FINISHED_P(const rb_control_frame_t *cfp)
1401{
1402 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1403}
1404
1405static inline int
1406VM_FRAME_BMETHOD_P(const rb_control_frame_t *cfp)
1407{
1408 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BMETHOD) != 0;
1409}
1410
1411static inline int
1412rb_obj_is_iseq(VALUE iseq)
1413{
1414 return imemo_type_p(iseq, imemo_iseq);
1415}
1416
1417#if VM_CHECK_MODE > 0
1418#define RUBY_VM_NORMAL_ISEQ_P(iseq) rb_obj_is_iseq((VALUE)iseq)
1419#endif
1420
1421static inline int
1422VM_FRAME_CFRAME_P(const rb_control_frame_t *cfp)
1423{
1424 int cframe_p = VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1425 VM_ASSERT(RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) != cframe_p ||
1426 (VM_FRAME_TYPE(cfp) & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY);
1427 return cframe_p;
1428}
1429
1430static inline int
1431VM_FRAME_RUBYFRAME_P(const rb_control_frame_t *cfp)
1432{
1433 return !VM_FRAME_CFRAME_P(cfp);
1434}
1435
1436#define RUBYVM_CFUNC_FRAME_P(cfp) \
1437 (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
1438
1439#define VM_GUARDED_PREV_EP(ep) GC_GUARDED_PTR(ep)
1440#define VM_BLOCK_HANDLER_NONE 0
1441
1442static inline int
1443VM_ENV_LOCAL_P(const VALUE *ep)
1444{
1445 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1446}
1447
1448static inline const VALUE *
1449VM_ENV_PREV_EP(const VALUE *ep)
1450{
1451 VM_ASSERT(VM_ENV_LOCAL_P(ep) == 0);
1452 return GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1453}
1454
1455static inline VALUE
1456VM_ENV_BLOCK_HANDLER(const VALUE *ep)
1457{
1458 VM_ASSERT(VM_ENV_LOCAL_P(ep));
1459 return ep[VM_ENV_DATA_INDEX_SPECVAL];
1460}
1461
1462#if VM_CHECK_MODE > 0
1463int rb_vm_ep_in_heap_p(const VALUE *ep);
1464#endif
1465
1466static inline int
1467VM_ENV_ESCAPED_P(const VALUE *ep)
1468{
1469 VM_ASSERT(rb_vm_ep_in_heap_p(ep) == !!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
1470 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) ? 1 : 0;
1471}
1472
1473#if VM_CHECK_MODE > 0
1474static inline int
1475vm_assert_env(VALUE obj)
1476{
1477 VM_ASSERT(imemo_type_p(obj, imemo_env));
1478 return 1;
1479}
1480#endif
1481
1483static inline VALUE
1484VM_ENV_ENVVAL(const VALUE *ep)
1485{
1486 VALUE envval = ep[VM_ENV_DATA_INDEX_ENV];
1487 VM_ASSERT(VM_ENV_ESCAPED_P(ep));
1488 VM_ASSERT(vm_assert_env(envval));
1489 return envval;
1490}
1491
1493static inline const rb_env_t *
1494VM_ENV_ENVVAL_PTR(const VALUE *ep)
1495{
1496 return (const rb_env_t *)VM_ENV_ENVVAL(ep);
1497}
1498
1499static inline const rb_env_t *
1500vm_env_new(VALUE *env_ep, VALUE *env_body, unsigned int env_size, const rb_iseq_t *iseq)
1501{
1502 rb_env_t *env = (rb_env_t *)rb_imemo_new(imemo_env, (VALUE)env_ep, (VALUE)env_body, 0, (VALUE)iseq);
1503 env->env_size = env_size;
1504 env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env;
1505 return env;
1506}
1507
1508static inline void
1509VM_FORCE_WRITE(const VALUE *ptr, VALUE v)
1510{
1511 *((VALUE *)ptr) = v;
1512}
1513
1514static inline void
1515VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value)
1516{
1517 VM_ASSERT(RB_SPECIAL_CONST_P(special_const_value));
1518 VM_FORCE_WRITE(ptr, special_const_value);
1519}
1520
1521static inline void
1522VM_STACK_ENV_WRITE(const VALUE *ep, int index, VALUE v)
1523{
1524 VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_WB_REQUIRED) == 0);
1525 VM_FORCE_WRITE(&ep[index], v);
1526}
1527
1528const VALUE *rb_vm_ep_local_ep(const VALUE *ep);
1529const VALUE *rb_vm_proc_local_ep(VALUE proc);
1530void rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep);
1531void rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src);
1532
1533VALUE rb_vm_frame_block_handler(const rb_control_frame_t *cfp);
1534
1535#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
1536#define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
1537
1538#define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
1539 ((void *)(ecfp) > (void *)(cfp))
1540
1541static inline const rb_control_frame_t *
1542RUBY_VM_END_CONTROL_FRAME(const rb_execution_context_t *ec)
1543{
1544 return (rb_control_frame_t *)(ec->vm_stack + ec->vm_stack_size);
1545}
1546
1547static inline int
1548RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
1549{
1550 return !RUBY_VM_VALID_CONTROL_FRAME_P(cfp, RUBY_VM_END_CONTROL_FRAME(ec));
1551}
1552
1553static inline int
1554VM_BH_ISEQ_BLOCK_P(VALUE block_handler)
1555{
1556 if ((block_handler & 0x03) == 0x01) {
1557#if VM_CHECK_MODE > 0
1558 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1559 VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq));
1560#endif
1561 return 1;
1562 }
1563 else {
1564 return 0;
1565 }
1566}
1567
1568static inline VALUE
1569VM_BH_FROM_ISEQ_BLOCK(const struct rb_captured_block *captured)
1570{
1571 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x01);
1572 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1573 return block_handler;
1574}
1575
1576static inline const struct rb_captured_block *
1577VM_BH_TO_ISEQ_BLOCK(VALUE block_handler)
1578{
1579 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1580 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1581 return captured;
1582}
1583
1584static inline int
1585VM_BH_IFUNC_P(VALUE block_handler)
1586{
1587 if ((block_handler & 0x03) == 0x03) {
1588#if VM_CHECK_MODE > 0
1589 struct rb_captured_block *captured = (void *)(block_handler & ~0x03);
1590 VM_ASSERT(imemo_type_p(captured->code.val, imemo_ifunc));
1591#endif
1592 return 1;
1593 }
1594 else {
1595 return 0;
1596 }
1597}
1598
1599static inline VALUE
1600VM_BH_FROM_IFUNC_BLOCK(const struct rb_captured_block *captured)
1601{
1602 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x03);
1603 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1604 return block_handler;
1605}
1606
1607static inline const struct rb_captured_block *
1608VM_BH_TO_IFUNC_BLOCK(VALUE block_handler)
1609{
1610 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1611 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1612 return captured;
1613}
1614
1615static inline const struct rb_captured_block *
1616VM_BH_TO_CAPT_BLOCK(VALUE block_handler)
1617{
1618 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1619 VM_ASSERT(VM_BH_IFUNC_P(block_handler) || VM_BH_ISEQ_BLOCK_P(block_handler));
1620 return captured;
1621}
1622
1623static inline enum rb_block_handler_type
1624vm_block_handler_type(VALUE block_handler)
1625{
1626 if (VM_BH_ISEQ_BLOCK_P(block_handler)) {
1627 return block_handler_type_iseq;
1628 }
1629 else if (VM_BH_IFUNC_P(block_handler)) {
1630 return block_handler_type_ifunc;
1631 }
1632 else if (SYMBOL_P(block_handler)) {
1633 return block_handler_type_symbol;
1634 }
1635 else {
1636 VM_ASSERT(rb_obj_is_proc(block_handler));
1637 return block_handler_type_proc;
1638 }
1639}
1640
1641static inline void
1642vm_block_handler_verify(MAYBE_UNUSED(VALUE block_handler))
1643{
1644 VM_ASSERT(block_handler == VM_BLOCK_HANDLER_NONE ||
1645 (vm_block_handler_type(block_handler), 1));
1646}
1647
1648static inline enum rb_block_type
1649vm_block_type(const struct rb_block *block)
1650{
1651#if VM_CHECK_MODE > 0
1652 switch (block->type) {
1653 case block_type_iseq:
1654 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_iseq));
1655 break;
1656 case block_type_ifunc:
1657 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_ifunc));
1658 break;
1659 case block_type_symbol:
1660 VM_ASSERT(SYMBOL_P(block->as.symbol));
1661 break;
1662 case block_type_proc:
1663 VM_ASSERT(rb_obj_is_proc(block->as.proc));
1664 break;
1665 }
1666#endif
1667 return block->type;
1668}
1669
1670static inline void
1671vm_block_type_set(const struct rb_block *block, enum rb_block_type type)
1672{
1673 struct rb_block *mb = (struct rb_block *)block;
1674 mb->type = type;
1675}
1676
1677static inline const struct rb_block *
1678vm_proc_block(VALUE procval)
1679{
1680 VM_ASSERT(rb_obj_is_proc(procval));
1681 return &((rb_proc_t *)RTYPEDDATA_DATA(procval))->block;
1682}
1683
1684static inline const rb_iseq_t *vm_block_iseq(const struct rb_block *block);
1685static inline const VALUE *vm_block_ep(const struct rb_block *block);
1686
1687static inline const rb_iseq_t *
1688vm_proc_iseq(VALUE procval)
1689{
1690 return vm_block_iseq(vm_proc_block(procval));
1691}
1692
1693static inline const VALUE *
1694vm_proc_ep(VALUE procval)
1695{
1696 return vm_block_ep(vm_proc_block(procval));
1697}
1698
1699static inline const rb_iseq_t *
1700vm_block_iseq(const struct rb_block *block)
1701{
1702 switch (vm_block_type(block)) {
1703 case block_type_iseq: return rb_iseq_check(block->as.captured.code.iseq);
1704 case block_type_proc: return vm_proc_iseq(block->as.proc);
1705 case block_type_ifunc:
1706 case block_type_symbol: return NULL;
1707 }
1708 VM_UNREACHABLE(vm_block_iseq);
1709 return NULL;
1710}
1711
1712static inline const VALUE *
1713vm_block_ep(const struct rb_block *block)
1714{
1715 switch (vm_block_type(block)) {
1716 case block_type_iseq:
1717 case block_type_ifunc: return block->as.captured.ep;
1718 case block_type_proc: return vm_proc_ep(block->as.proc);
1719 case block_type_symbol: return NULL;
1720 }
1721 VM_UNREACHABLE(vm_block_ep);
1722 return NULL;
1723}
1724
1725static inline VALUE
1726vm_block_self(const struct rb_block *block)
1727{
1728 switch (vm_block_type(block)) {
1729 case block_type_iseq:
1730 case block_type_ifunc:
1731 return block->as.captured.self;
1732 case block_type_proc:
1733 return vm_block_self(vm_proc_block(block->as.proc));
1734 case block_type_symbol:
1735 return Qundef;
1736 }
1737 VM_UNREACHABLE(vm_block_self);
1738 return Qundef;
1739}
1740
1741static inline VALUE
1742VM_BH_TO_SYMBOL(VALUE block_handler)
1743{
1744 VM_ASSERT(SYMBOL_P(block_handler));
1745 return block_handler;
1746}
1747
1748static inline VALUE
1749VM_BH_FROM_SYMBOL(VALUE symbol)
1750{
1751 VM_ASSERT(SYMBOL_P(symbol));
1752 return symbol;
1753}
1754
1755static inline VALUE
1756VM_BH_TO_PROC(VALUE block_handler)
1757{
1758 VM_ASSERT(rb_obj_is_proc(block_handler));
1759 return block_handler;
1760}
1761
1762static inline VALUE
1763VM_BH_FROM_PROC(VALUE procval)
1764{
1765 VM_ASSERT(rb_obj_is_proc(procval));
1766 return procval;
1767}
1768
1769/* VM related object allocate functions */
1770VALUE rb_thread_alloc(VALUE klass);
1771VALUE rb_binding_alloc(VALUE klass);
1772VALUE rb_proc_alloc(VALUE klass);
1773VALUE rb_proc_dup(VALUE self);
1774
1775/* for debug */
1776extern bool rb_vmdebug_stack_dump_raw(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *);
1777extern bool rb_vmdebug_debug_print_pre(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE *_pc, FILE *);
1778extern bool rb_vmdebug_debug_print_post(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *);
1779
1780#define SDR() rb_vmdebug_stack_dump_raw(GET_EC(), GET_EC()->cfp, stderr)
1781#define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_EC(), (cfp), stderr)
1782bool rb_vm_bugreport(const void *, FILE *);
1783typedef void (*ruby_sighandler_t)(int);
1784RBIMPL_ATTR_FORMAT(RBIMPL_PRINTF_FORMAT, 4, 5)
1785NORETURN(void rb_bug_for_fatal_signal(ruby_sighandler_t default_sighandler, int sig, const void *, const char *fmt, ...));
1786
1787/* functions about thread/vm execution */
1788RUBY_SYMBOL_EXPORT_BEGIN
1789VALUE rb_iseq_eval(const rb_iseq_t *iseq);
1790VALUE rb_iseq_eval_main(const rb_iseq_t *iseq);
1791VALUE rb_iseq_path(const rb_iseq_t *iseq);
1792VALUE rb_iseq_realpath(const rb_iseq_t *iseq);
1793RUBY_SYMBOL_EXPORT_END
1794
1795VALUE rb_iseq_pathobj_new(VALUE path, VALUE realpath);
1796void rb_iseq_pathobj_set(const rb_iseq_t *iseq, VALUE path, VALUE realpath);
1797
1798int rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp);
1799void rb_ec_setup_exception(const rb_execution_context_t *ec, VALUE mesg, VALUE cause);
1800
1801VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE block_handler);
1802
1803VALUE rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda);
1804static inline VALUE
1805rb_vm_make_proc(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1806{
1807 return rb_vm_make_proc_lambda(ec, captured, klass, 0);
1808}
1809
1810static inline VALUE
1811rb_vm_make_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1812{
1813 return rb_vm_make_proc_lambda(ec, captured, klass, 1);
1814}
1815
1816VALUE rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp);
1817VALUE rb_vm_env_local_variables(const rb_env_t *env);
1818const rb_env_t *rb_vm_env_prev_env(const rb_env_t *env);
1819const VALUE *rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars);
1820void rb_vm_inc_const_missing_count(void);
1821VALUE rb_vm_call_kw(rb_execution_context_t *ec, VALUE recv, VALUE id, int argc,
1822 const VALUE *argv, const rb_callable_method_entry_t *me, int kw_splat);
1823void rb_vm_pop_frame_no_int(rb_execution_context_t *ec);
1824void rb_vm_pop_frame(rb_execution_context_t *ec);
1825
1826void rb_thread_start_timer_thread(void);
1827void rb_thread_stop_timer_thread(void);
1828void rb_thread_reset_timer_thread(void);
1829void rb_thread_wakeup_timer_thread(int);
1830
1831static inline void
1832rb_vm_living_threads_init(rb_vm_t *vm)
1833{
1834 ccan_list_head_init(&vm->waiting_fds);
1835 ccan_list_head_init(&vm->workqueue);
1836 ccan_list_head_init(&vm->ractor.set);
1837 ccan_list_head_init(&vm->ractor.sched.zombie_threads);
1838}
1839
1840typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
1841rb_control_frame_t *rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1842rb_control_frame_t *rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1843VALUE *rb_vm_svar_lep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1844int rb_vm_get_sourceline(const rb_control_frame_t *);
1845void rb_vm_stack_to_heap(rb_execution_context_t *ec);
1846void ruby_thread_init_stack(rb_thread_t *th);
1847rb_thread_t * ruby_thread_from_native(void);
1848int ruby_thread_set_native(rb_thread_t *th);
1849int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp);
1850void rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp);
1851void rb_vm_env_write(const VALUE *ep, int index, VALUE v);
1852VALUE rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler);
1853
1854void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE exception_class, VALUE mesg);
1855
1856#define rb_vm_register_special_exception(sp, e, m) \
1857 rb_vm_register_special_exception_str(sp, e, rb_usascii_str_new_static((m), (long)rb_strlen_lit(m)))
1858
1859void rb_gc_mark_machine_stack(const rb_execution_context_t *ec);
1860
1861void rb_vm_rewrite_cref(rb_cref_t *node, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr);
1862
1863const rb_callable_method_entry_t *rb_vm_frame_method_entry(const rb_control_frame_t *cfp);
1864
1865#define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
1866
1867#define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin) do { \
1868 STATIC_ASSERT(sizeof_sp, sizeof(*(sp)) == sizeof(VALUE)); \
1869 STATIC_ASSERT(sizeof_cfp, sizeof(*(cfp)) == sizeof(rb_control_frame_t)); \
1870 const struct rb_control_frame_struct *bound = (void *)&(sp)[(margin)]; \
1871 if (UNLIKELY((cfp) <= &bound[1])) { \
1872 vm_stackoverflow(); \
1873 } \
1874} while (0)
1875
1876#define CHECK_VM_STACK_OVERFLOW(cfp, margin) \
1877 CHECK_VM_STACK_OVERFLOW0((cfp), (cfp)->sp, (margin))
1878
1879VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr);
1880
1881rb_execution_context_t *rb_vm_main_ractor_ec(rb_vm_t *vm); // ractor.c
1882
1883/* for thread */
1884
1885#if RUBY_VM_THREAD_MODEL == 2
1886
1887RUBY_EXTERN struct rb_ractor_struct *ruby_single_main_ractor; // ractor.c
1888RUBY_EXTERN rb_vm_t *ruby_current_vm_ptr;
1889RUBY_EXTERN rb_event_flag_t ruby_vm_event_flags;
1890RUBY_EXTERN rb_event_flag_t ruby_vm_event_enabled_global_flags;
1891RUBY_EXTERN unsigned int ruby_vm_event_local_num;
1892
1893#define GET_VM() rb_current_vm()
1894#define GET_RACTOR() rb_current_ractor()
1895#define GET_THREAD() rb_current_thread()
1896#define GET_EC() rb_current_execution_context(true)
1897
1898static inline rb_thread_t *
1899rb_ec_thread_ptr(const rb_execution_context_t *ec)
1900{
1901 return ec->thread_ptr;
1902}
1903
1904static inline rb_ractor_t *
1905rb_ec_ractor_ptr(const rb_execution_context_t *ec)
1906{
1907 const rb_thread_t *th = rb_ec_thread_ptr(ec);
1908 if (th) {
1909 VM_ASSERT(th->ractor != NULL);
1910 return th->ractor;
1911 }
1912 else {
1913 return NULL;
1914 }
1915}
1916
1917static inline rb_vm_t *
1918rb_ec_vm_ptr(const rb_execution_context_t *ec)
1919{
1920 const rb_thread_t *th = rb_ec_thread_ptr(ec);
1921 if (th) {
1922 return th->vm;
1923 }
1924 else {
1925 return NULL;
1926 }
1927}
1928
1929static inline rb_execution_context_t *
1930rb_current_execution_context(bool expect_ec)
1931{
1932#ifdef RB_THREAD_LOCAL_SPECIFIER
1933 #ifdef __APPLE__
1934 rb_execution_context_t *ec = rb_current_ec();
1935 #else
1936 rb_execution_context_t *ec = ruby_current_ec;
1937 #endif
1938
1939 /* On the shared objects, `__tls_get_addr()` is used to access the TLS
1940 * and the address of the `ruby_current_ec` can be stored on a function
1941 * frame. However, this address can be mis-used after native thread
1942 * migration of a coroutine.
1943 * 1) Get `ptr =&ruby_current_ec` op NT1 and store it on the frame.
1944 * 2) Context switch and resume it on the NT2.
1945 * 3) `ptr` is used on NT2 but it accesses to the TLS on NT1.
1946 * This assertion checks such misusage.
1947 *
1948 * To avoid accidents, `GET_EC()` should be called once on the frame.
1949 * Note that inlining can produce the problem.
1950 */
1951 VM_ASSERT(ec == rb_current_ec_noinline());
1952#else
1953 rb_execution_context_t *ec = native_tls_get(ruby_current_ec_key);
1954#endif
1955 VM_ASSERT(!expect_ec || ec != NULL);
1956 return ec;
1957}
1958
1959static inline rb_thread_t *
1960rb_current_thread(void)
1961{
1962 const rb_execution_context_t *ec = GET_EC();
1963 return rb_ec_thread_ptr(ec);
1964}
1965
1966static inline rb_ractor_t *
1967rb_current_ractor_raw(bool expect)
1968{
1969 if (ruby_single_main_ractor) {
1970 return ruby_single_main_ractor;
1971 }
1972 else {
1973 const rb_execution_context_t *ec = rb_current_execution_context(expect);
1974 return (expect || ec) ? rb_ec_ractor_ptr(ec) : NULL;
1975 }
1976}
1977
1978static inline rb_ractor_t *
1979rb_current_ractor(void)
1980{
1981 return rb_current_ractor_raw(true);
1982}
1983
1984static inline rb_vm_t *
1985rb_current_vm(void)
1986{
1987#if 0 // TODO: reconsider the assertions
1988 VM_ASSERT(ruby_current_vm_ptr == NULL ||
1989 ruby_current_execution_context_ptr == NULL ||
1990 rb_ec_thread_ptr(GET_EC()) == NULL ||
1991 rb_ec_thread_ptr(GET_EC())->status == THREAD_KILLED ||
1992 rb_ec_vm_ptr(GET_EC()) == ruby_current_vm_ptr);
1993#endif
1994
1995 return ruby_current_vm_ptr;
1996}
1997
1998void rb_ec_vm_lock_rec_release(const rb_execution_context_t *ec,
1999 unsigned int recorded_lock_rec,
2000 unsigned int current_lock_rec);
2001
2002static inline unsigned int
2003rb_ec_vm_lock_rec(const rb_execution_context_t *ec)
2004{
2005 rb_vm_t *vm = rb_ec_vm_ptr(ec);
2006
2007 if (vm->ractor.sync.lock_owner != rb_ec_ractor_ptr(ec)) {
2008 return 0;
2009 }
2010 else {
2011 return vm->ractor.sync.lock_rec;
2012 }
2013}
2014
2015#else
2016#error "unsupported thread model"
2017#endif
2018
2019enum {
2020 TIMER_INTERRUPT_MASK = 0x01,
2021 PENDING_INTERRUPT_MASK = 0x02,
2022 POSTPONED_JOB_INTERRUPT_MASK = 0x04,
2023 TRAP_INTERRUPT_MASK = 0x08,
2024 TERMINATE_INTERRUPT_MASK = 0x10,
2025 VM_BARRIER_INTERRUPT_MASK = 0x20,
2026};
2027
2028#define RUBY_VM_SET_TIMER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TIMER_INTERRUPT_MASK)
2029#define RUBY_VM_SET_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, PENDING_INTERRUPT_MASK)
2030#define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
2031#define RUBY_VM_SET_TRAP_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TRAP_INTERRUPT_MASK)
2032#define RUBY_VM_SET_TERMINATE_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TERMINATE_INTERRUPT_MASK)
2033#define RUBY_VM_SET_VM_BARRIER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, VM_BARRIER_INTERRUPT_MASK)
2034#define RUBY_VM_INTERRUPTED(ec) ((ec)->interrupt_flag & ~(ec)->interrupt_mask & \
2035 (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK))
2036
2037static inline bool
2038RUBY_VM_INTERRUPTED_ANY(rb_execution_context_t *ec)
2039{
2040#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
2041 uint32_t current_clock = rb_ec_vm_ptr(ec)->clock;
2042
2043 if (current_clock != ec->checked_clock) {
2044 ec->checked_clock = current_clock;
2045 RUBY_VM_SET_TIMER_INTERRUPT(ec);
2046 }
2047#endif
2048 return ec->interrupt_flag & ~(ec)->interrupt_mask;
2049}
2050
2051VALUE rb_exc_set_backtrace(VALUE exc, VALUE bt);
2052int rb_signal_buff_size(void);
2053int rb_signal_exec(rb_thread_t *th, int sig);
2054void rb_threadptr_check_signal(rb_thread_t *mth);
2055void rb_threadptr_signal_raise(rb_thread_t *th, int sig);
2056void rb_threadptr_signal_exit(rb_thread_t *th);
2057int rb_threadptr_execute_interrupts(rb_thread_t *, int);
2058void rb_threadptr_interrupt(rb_thread_t *th);
2059void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th);
2060void rb_threadptr_pending_interrupt_clear(rb_thread_t *th);
2061void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v);
2062VALUE rb_ec_get_errinfo(const rb_execution_context_t *ec);
2063void rb_ec_error_print(rb_execution_context_t * volatile ec, volatile VALUE errinfo);
2064void rb_execution_context_update(rb_execution_context_t *ec);
2065void rb_execution_context_mark(const rb_execution_context_t *ec);
2066void rb_fiber_close(rb_fiber_t *fib);
2067void Init_native_thread(rb_thread_t *th);
2068int rb_vm_check_ints_blocking(rb_execution_context_t *ec);
2069
2070// vm_sync.h
2071void rb_vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond);
2072void rb_vm_cond_timedwait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec);
2073
2074#define RUBY_VM_CHECK_INTS(ec) rb_vm_check_ints(ec)
2075static inline void
2076rb_vm_check_ints(rb_execution_context_t *ec)
2077{
2078#ifdef RUBY_ASSERT_CRITICAL_SECTION
2079 VM_ASSERT(ruby_assert_critical_section_entered == 0);
2080#endif
2081
2082 VM_ASSERT(ec == GET_EC());
2083
2084 if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(ec))) {
2085 rb_threadptr_execute_interrupts(rb_ec_thread_ptr(ec), 0);
2086 }
2087}
2088
2089/* tracer */
2090
2092 rb_event_flag_t event;
2093 rb_execution_context_t *ec;
2094 const rb_control_frame_t *cfp;
2095 VALUE self;
2096 ID id;
2097 ID called_id;
2098 VALUE klass;
2099 VALUE data;
2100
2101 int klass_solved;
2102
2103 /* calc from cfp */
2104 int lineno;
2105 VALUE path;
2106};
2107
2108void rb_hook_list_mark(rb_hook_list_t *hooks);
2109void rb_hook_list_mark_and_update(rb_hook_list_t *hooks);
2110void rb_hook_list_free(rb_hook_list_t *hooks);
2111void rb_hook_list_connect_tracepoint(VALUE target, rb_hook_list_t *list, VALUE tpval, unsigned int target_line);
2112void rb_hook_list_remove_tracepoint(rb_hook_list_t *list, VALUE tpval);
2113
2114void rb_exec_event_hooks(struct rb_trace_arg_struct *trace_arg, rb_hook_list_t *hooks, int pop_p);
2115
2116#define EXEC_EVENT_HOOK_ORIG(ec_, hooks_, flag_, self_, id_, called_id_, klass_, data_, pop_p_) do { \
2117 const rb_event_flag_t flag_arg_ = (flag_); \
2118 rb_hook_list_t *hooks_arg_ = (hooks_); \
2119 if (UNLIKELY((hooks_arg_)->events & (flag_arg_))) { \
2120 /* defer evaluating the other arguments */ \
2121 rb_exec_event_hook_orig(ec_, hooks_arg_, flag_arg_, self_, id_, called_id_, klass_, data_, pop_p_); \
2122 } \
2123} while (0)
2124
2125static inline void
2126rb_exec_event_hook_orig(rb_execution_context_t *ec, rb_hook_list_t *hooks, rb_event_flag_t flag,
2127 VALUE self, ID id, ID called_id, VALUE klass, VALUE data, int pop_p)
2128{
2129 struct rb_trace_arg_struct trace_arg;
2130
2131 VM_ASSERT((hooks->events & flag) != 0);
2132
2133 trace_arg.event = flag;
2134 trace_arg.ec = ec;
2135 trace_arg.cfp = ec->cfp;
2136 trace_arg.self = self;
2137 trace_arg.id = id;
2138 trace_arg.called_id = called_id;
2139 trace_arg.klass = klass;
2140 trace_arg.data = data;
2141 trace_arg.path = Qundef;
2142 trace_arg.klass_solved = 0;
2143
2144 rb_exec_event_hooks(&trace_arg, hooks, pop_p);
2145}
2146
2148 VALUE self;
2149 uint32_t id;
2150 rb_hook_list_t hooks;
2151};
2152
2153static inline rb_hook_list_t *
2154rb_ec_ractor_hooks(const rb_execution_context_t *ec)
2155{
2156 struct rb_ractor_pub *cr_pub = (struct rb_ractor_pub *)rb_ec_ractor_ptr(ec);
2157 return &cr_pub->hooks;
2158}
2159
2160#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2161 EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 0)
2162
2163#define EXEC_EVENT_HOOK_AND_POP_FRAME(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2164 EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 1)
2165
2166static inline void
2167rb_exec_event_hook_script_compiled(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE eval_script)
2168{
2169 EXEC_EVENT_HOOK(ec, RUBY_EVENT_SCRIPT_COMPILED, ec->cfp->self, 0, 0, 0,
2170 NIL_P(eval_script) ? (VALUE)iseq :
2171 rb_ary_new_from_args(2, eval_script, (VALUE)iseq));
2172}
2173
2174void rb_vm_trap_exit(rb_vm_t *vm);
2175void rb_vm_postponed_job_atfork(void); /* vm_trace.c */
2176void rb_vm_postponed_job_free(void); /* vm_trace.c */
2177size_t rb_vm_memsize_postponed_job_queue(void); /* vm_trace.c */
2178void rb_vm_postponed_job_queue_init(rb_vm_t *vm); /* vm_trace.c */
2179
2180RUBY_SYMBOL_EXPORT_BEGIN
2181
2182int rb_thread_check_trap_pending(void);
2183
2184/* #define RUBY_EVENT_RESERVED_FOR_INTERNAL_USE 0x030000 */ /* from vm_core.h */
2185#define RUBY_EVENT_COVERAGE_LINE 0x010000
2186#define RUBY_EVENT_COVERAGE_BRANCH 0x020000
2187
2188extern VALUE rb_get_coverages(void);
2189extern void rb_set_coverages(VALUE, int, VALUE);
2190extern void rb_clear_coverages(void);
2191extern void rb_reset_coverages(void);
2192extern void rb_resume_coverages(void);
2193extern void rb_suspend_coverages(void);
2194
2195void rb_postponed_job_flush(rb_vm_t *vm);
2196
2197// ractor.c
2198RUBY_EXTERN VALUE rb_eRactorUnsafeError;
2199RUBY_EXTERN VALUE rb_eRactorIsolationError;
2200
2201RUBY_SYMBOL_EXPORT_END
2202
2203#endif /* RUBY_VM_CORE_H */
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ALIGNAS
Wraps (or simulates) alignas.
Definition stdalign.h:27
#define RUBY_EXTERN
Declaration of externally visible global variables.
Definition dllexport.h:45
#define RUBY_EVENT_SCRIPT_COMPILED
Encountered an eval.
Definition event.h:60
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RBIMPL_ATTR_FORMAT(x, y, z)
Wraps (or simulates) __attribute__((format))
Definition format.h:29
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define Qundef
Old name of RUBY_Qundef.
#define Qfalse
Old name of RUBY_Qfalse.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define NIL_P
Old name of RB_NIL_P.
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void * rb_check_typeddata(VALUE obj, const rb_data_type_t *data_type)
Identical to rb_typeddata_is_kind_of(), except it raises exceptions instead of returning false.
Definition error.c:1311
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition proc.c:119
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
VALUE rb_block_call_func(RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg))
This is the type of a function that the interpreter expect for C-backended blocks.
Definition iterator.h:83
VALUE type(ANYARGS)
ANYARGS-ed function type.
#define RBIMPL_ATTR_NONNULL(list)
Wraps (or simulates) __attribute__((nonnull))
Definition nonnull.h:30
#define inline
Old Visual Studio versions do not support the inline keyword, so we need to define it to be __inline.
Definition defines.h:66
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RTYPEDDATA_DATA(v)
Convenient getter macro.
Definition rtypeddata.h:102
struct rb_data_type_struct rb_data_type_t
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:197
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
Defines old _.
C99 shim for <stdbool.h>
Definition vm_core.h:233
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
Definition vm_core.h:259
Definition vm_core.h:267
Definition vm_core.h:262
Definition iseq.h:239
Definition class.h:36
Definition vm_core.h:968
struct rb_iseq_constant_body::@000024342312237062266020177166377106262102236123 param
parameter information
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:135
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:83
Definition vm_core.h:227
Definition vm_core.h:271
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40