22#include "insns_info.inc"
26#define NOT_COMPILED_STACK_SIZE -1
27#define ALREADY_COMPILED_P(status, pos) (status->stack_size_for_pos[pos] != NOT_COMPILED_STACK_SIZE)
36 if (kw_cd < kw_calls) {
40 return kw_cd - kw_calls + body->
ci_size;
45struct inlined_call_context {
55struct compile_status {
57 int *stack_size_for_pos;
68 struct inlined_call_context inline_context;
74struct compile_branch {
75 unsigned int stack_size;
79struct case_dispatch_var {
81 unsigned int base_pos;
103 && vm_call_iseq_optimizable_p(
ci,
cc);
109 struct case_dispatch_var *var = (
struct case_dispatch_var *)
arg;
112 if (var->last_value != value) {
114 var->last_value = value;
115 fprintf(var->f,
" case %d:\n", offset);
116 fprintf(var->f,
" goto label_%d;\n", var->base_pos + offset);
126#ifdef MJIT_COMMENT_ID
137 case '*':
case '/':
if (prev != (c ^ (
'/' ^
'*')))
break;
138 case '\\':
case '"':
fputc(
'\\',
f);
148 unsigned int pos,
struct compile_status *status);
158 const unsigned int pos,
struct compile_status *status,
struct compile_branch *b)
160 unsigned int next_pos = pos + insn_len(insn);
163 #include "mjit_compile.inc"
168 if (!b->finish_p && next_pos < body->iseq_size && ALREADY_COMPILED_P(status, next_pos)) {
169 fprintf(
f,
"goto label_%d;\n", next_pos);
172 if ((
unsigned int)status->stack_size_for_pos[next_pos] != b->stack_size) {
174 fprintf(
stderr,
"MJIT warning: JIT stack assumption is not the same between branches (%d != %u)\n",
175 status->stack_size_for_pos[next_pos], b->stack_size);
176 status->success =
false;
187 unsigned int pos,
struct compile_status *status)
190 struct compile_branch branch;
192 branch.stack_size = stack_size;
193 branch.finish_p =
false;
195 while (pos < body->iseq_size && !ALREADY_COMPILED_P(status, pos) && !branch.finish_p) {
196#if OPT_DIRECT_THREADED_CODE || OPT_CALL_THREADED_CODE
201 status->stack_size_for_pos[pos] = (
int)branch.stack_size;
203 fprintf(
f,
"\nlabel_%d: /* %s */\n", pos, insn_name(insn));
204 pos = compile_insn(
f, body, insn, body->
iseq_encoded + (pos+1), pos, status, &branch);
205 if (status->success && branch.stack_size > body->
stack_max) {
207 fprintf(
stderr,
"MJIT warning: JIT stack size (%d) exceeded its max size (%d)\n", branch.stack_size, body->
stack_max);
208 status->success =
false;
210 if (!status->success)
220 fprintf(
f,
" RB_DEBUG_COUNTER_INC(mjit_cancel);\n");
221 fprintf(
f,
" rb_mjit_iseq_compile_info(original_iseq->body)->disable_inlining = true;\n");
222 fprintf(
f,
" rb_mjit_recompile_iseq(original_iseq);\n");
225 fprintf(
f,
" const VALUE current_pc = reg_cfp->pc;\n");
226 fprintf(
f,
" const VALUE current_sp = reg_cfp->sp;\n");
227 fprintf(
f,
" reg_cfp->pc = orig_pc;\n");
228 fprintf(
f,
" reg_cfp->sp = orig_sp;\n\n");
231 fprintf(
f,
" struct rb_calling_info calling;\n");
232 fprintf(
f,
" calling.block_handler = VM_BLOCK_HANDLER_NONE;\n");
233 fprintf(
f,
" calling.argc = %d;\n", inline_context->orig_argc);
234 fprintf(
f,
" calling.recv = reg_cfp->self;\n");
235 fprintf(
f,
" reg_cfp->self = orig_self;\n");
236 fprintf(
f,
" vm_call_iseq_setup_normal(ec, reg_cfp, &calling, (const rb_callable_method_entry_t *)0x%"PRIxVALUE", 0, %d, %d);\n\n",
237 inline_context->me, inline_context->param_size, inline_context->local_size);
240 fprintf(
f,
" reg_cfp = ec->cfp;\n");
241 fprintf(
f,
" reg_cfp->pc = current_pc;\n");
242 fprintf(
f,
" reg_cfp->sp = current_sp;\n");
244 fprintf(
f,
" *(vm_base_ptr(reg_cfp) + %d) = stack[%d];\n",
i,
i);
248 fprintf(
f,
" return vm_exec(ec, ec->cfp);\n");
255 if (status->inlined_iseqs ==
NULL) {
256 compile_inlined_cancel_handler(
f, body, &status->inline_context);
261 fprintf(
f,
" RB_DEBUG_COUNTER_INC(mjit_cancel_send_inline);\n");
262 fprintf(
f,
" rb_mjit_iseq_compile_info(original_iseq->body)->disable_send_cache = true;\n");
263 fprintf(
f,
" rb_mjit_recompile_iseq(original_iseq);\n");
267 fprintf(
f,
" RB_DEBUG_COUNTER_INC(mjit_cancel_ivar_inline);\n");
268 fprintf(
f,
" rb_mjit_iseq_compile_info(original_iseq->body)->disable_ivar_cache = true;\n");
269 fprintf(
f,
" rb_mjit_recompile_iseq(original_iseq);\n");
273 fprintf(
f,
" RB_DEBUG_COUNTER_INC(mjit_cancel);\n");
274 if (status->local_stack_p) {
276 fprintf(
f,
" *(vm_base_ptr(reg_cfp) + %d) = stack[%d];\n",
i,
i);
288 status->success =
true;
291 if (status->local_stack_p) {
295 fprintf(
f,
" VALUE *stack = reg_cfp->sp;\n");
297 if (status->inlined_iseqs !=
NULL)
299 fprintf(
f,
" static const VALUE *const original_body_iseq = (VALUE *)0x%"PRIxVALUE";\n",
307 fprintf(
f,
" switch (reg_cfp->pc - reg_cfp->iseq->body->iseq_encoded) {\n");
316 compile_insns(
f, body, 0, 0, status);
317 compile_cancel_handler(
f, body, status);
318 return status->success;
336 unsigned int pos = 0;
338#if OPT_DIRECT_THREADED_CODE || OPT_CALL_THREADED_CODE
348 if (insn !=
BIN(leave) && insn_may_depend_on_sp_or_pc(insn, body->
iseq_encoded + (pos + 1)))
353 case
BIN(getlocal_WC_0):
354 case
BIN(getlocal_WC_1):
356 case
BIN(setlocal_WC_0):
357 case
BIN(setlocal_WC_1):
358 case
BIN(getblockparam):
359 case
BIN(getblockparamproxy):
360 case
BIN(setblockparam):
363 pos += insn_len(insn);
369#define INIT_COMPILE_STATUS(status, body, compile_root_p) do { \
370 status = (struct compile_status){ \
371 .stack_size_for_pos = (int *)alloca(sizeof(int) * body->iseq_size), \
372 .inlined_iseqs = compile_root_p ? \
373 alloca(sizeof(const struct rb_iseq_constant_body *) * body->iseq_size) : NULL, \
374 .cc_entries = (body->ci_size + body->ci_kw_size) > 0 ? \
375 alloca(sizeof(struct rb_call_cache) * (body->ci_size + body->ci_kw_size)) : NULL, \
376 .is_entries = (body->is_size > 0) ? \
377 alloca(sizeof(union iseq_inline_storage_entry) * body->is_size) : NULL, \
378 .compile_info = compile_root_p ? \
379 rb_mjit_iseq_compile_info(body) : alloca(sizeof(struct rb_mjit_compile_info)) \
381 memset(status.stack_size_for_pos, NOT_COMPILED_STACK_SIZE, sizeof(int) * body->iseq_size); \
382 if (compile_root_p) \
383 memset((void *)status.inlined_iseqs, 0, sizeof(const struct rb_iseq_constant_body *) * body->iseq_size); \
385 memset(status.compile_info, 0, sizeof(struct rb_mjit_compile_info)); \
390precompile_inlinable_iseqs(
FILE *
f,
const rb_iseq_t *
iseq,
struct compile_status *status)
393 unsigned int pos = 0;
395#if OPT_DIRECT_THREADED_CODE || OPT_CALL_THREADED_CODE
401 if (insn ==
BIN(opt_send_without_block)) {
404 CALL_CACHE cc_copy = status->cc_entries + call_data_index(cd, body);
407 if (has_valid_method_type(cc_copy) &&
409 cc_copy->me->def->type ==
VM_METHOD_TYPE_ISEQ && fastpath_applied_iseq_p(
ci, cc_copy, child_iseq = def_iseq_ptr(cc_copy->me->def)) &&
410 inlinable_iseq_p(child_iseq->
body)) {
411 status->inlined_iseqs[pos] = child_iseq->
body;
420 struct compile_status child_status;
421 INIT_COMPILE_STATUS(child_status, child_iseq->
body,
false);
422 child_status.inline_context = (
struct inlined_call_context){
423 .orig_argc =
ci->orig_argc,
424 .me = (
VALUE)cc_copy->me,
428 if ((child_status.cc_entries !=
NULL || child_status.is_entries !=
NULL)
432 fprintf(
f,
"ALWAYS_INLINE(static VALUE _mjit_inlined_%d(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE orig_self, const rb_iseq_t *original_iseq));\n", pos);
433 fprintf(
f,
"static inline VALUE\n_mjit_inlined_%d(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE orig_self, const rb_iseq_t *original_iseq)\n{\n", pos);
434 fprintf(
f,
" const VALUE *orig_pc = reg_cfp->pc;\n");
435 fprintf(
f,
" const VALUE *orig_sp = reg_cfp->sp;\n");
436 bool success = mjit_compile_body(
f, child_iseq, &child_status);
437 fprintf(
f,
"\n} /* end of _mjit_inlined_%d */\n\n", pos);
443 pos += insn_len(insn);
454 fprintf(
f,
"#undef OPT_CHECKED_RUN\n");
455 fprintf(
f,
"#define OPT_CHECKED_RUN 0\n\n");
458 struct compile_status status;
459 INIT_COMPILE_STATUS(status,
iseq->
body,
true);
460 if ((status.cc_entries !=
NULL || status.is_entries !=
NULL)
464 if (!status.compile_info->disable_send_cache && !status.compile_info->disable_inlining) {
465 if (!precompile_inlinable_iseqs(
f,
iseq, &status))
470 fprintf(
f,
"__declspec(dllexport)\n");
472 fprintf(
f,
"VALUE\n%s(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)\n{\n", funcname);
473 bool success = mjit_compile_body(
f,
iseq, &status);
474 fprintf(
f,
"\n} // end of %s\n", funcname);
bool mjit_valid_class_serial_p(rb_serial_t class_serial)
bool mjit_copy_cache_from_main_thread(const rb_iseq_t *iseq, struct rb_call_cache *cc_entries, union iseq_inline_storage_entry *is_entries)
const struct rb_callable_method_entry_struct * me
rb_serial_t class_serial[(64 - sizeof(rb_serial_t) - sizeof(struct rb_callable_method_entry_struct *) - sizeof(uintptr_t) - sizeof(enum method_missing_reason) - sizeof(VALUE(*)(struct rb_execution_context_struct *e, struct rb_control_frame_struct *, struct rb_calling_info *, const struct rb_call_data *)))/sizeof(rb_serial_t)]
struct rb_iseq_constant_body::@45 param
unsigned int local_table_size
rb_iseq_location_t location
struct rb_iseq_constant_body::@45::@47 flags
struct rb_call_data * call_data
struct rb_iseq_constant_body * body
MJIT_STATIC bool rb_simple_iseq_p(const rb_iseq_t *iseq)