Ruby 2.7.6p219 (2022-04-12 revision c9c2245c0a25176072e02db9254f0e0c84c805cd)
mjit.c
Go to the documentation of this file.
1/**********************************************************************
2
3 mjit.c - MRI method JIT compiler functions for Ruby's main thread
4
5 Copyright (C) 2017 Vladimir Makarov <vmakarov@redhat.com>.
6
7**********************************************************************/
8
9// Functions in this file are never executed on MJIT worker thread.
10// So you can safely use Ruby methods and GC in this file.
11
12// To share variables privately, include mjit_worker.c instead of linking.
13
14#include "internal.h"
15
16#if USE_MJIT
17
18#include "mjit_worker.c"
19
20#include "constant.h"
21#include "id_table.h"
22
23// Copy ISeq's states so that race condition does not happen on compilation.
24static void
25mjit_copy_job_handler(void *data)
26{
27 mjit_copy_job_t *job = data;
28 if (stop_worker_p) { // check if mutex is still alive, before calling CRITICAL_SECTION_START.
29 return;
30 }
31
32 CRITICAL_SECTION_START(3, "in mjit_copy_job_handler");
33 // Make sure that this job is never executed when:
34 // 1. job is being modified
35 // 2. alloca memory inside job is expired
36 // 3. ISeq is GC-ed
37 if (job->finish_p) {
38 CRITICAL_SECTION_FINISH(3, "in mjit_copy_job_handler");
39 return;
40 }
41 else if (job->iseq == NULL) { // ISeq GC notified in mjit_mark_iseq
42 job->finish_p = true;
43 CRITICAL_SECTION_FINISH(3, "in mjit_copy_job_handler");
44 return;
45 }
46
47 const struct rb_iseq_constant_body *body = job->iseq->body;
48 if (job->cc_entries) {
49 unsigned int i;
50 struct rb_call_cache *sink = job->cc_entries;
51 const struct rb_call_data *calls = body->call_data;
52 const struct rb_kwarg_call_data *kw_calls = (struct rb_kwarg_call_data *)&body->call_data[body->ci_size];
53 for (i = 0; i < body->ci_size; i++) {
54 *sink++ = calls[i].cc;
55 }
56 for (i = 0; i < body->ci_kw_size; i++) {
57 *sink++ = kw_calls[i].cc;
58 }
59 }
60 if (job->is_entries) {
61 memcpy(job->is_entries, body->is_entries, sizeof(union iseq_inline_storage_entry) * body->is_size);
62 }
63
64 job->finish_p = true;
65 rb_native_cond_broadcast(&mjit_worker_wakeup);
66 CRITICAL_SECTION_FINISH(3, "in mjit_copy_job_handler");
67}
68
69extern int rb_thread_create_mjit_thread(void (*worker_func)(void));
70
71// Return an unique file name in /tmp with PREFIX and SUFFIX and
72// number ID. Use getpid if ID == 0. The return file name exists
73// until the next function call.
74static char *
75get_uniq_filename(unsigned long id, const char *prefix, const char *suffix)
76{
77 char buff[70], *str = buff;
78 int size = sprint_uniq_filename(buff, sizeof(buff), id, prefix, suffix);
79 str = 0;
80 ++size;
81 str = xmalloc(size);
82 if (size <= (int)sizeof(buff)) {
83 memcpy(str, buff, size);
84 }
85 else {
86 sprint_uniq_filename(str, size, id, prefix, suffix);
87 }
88 return str;
89}
90
91// Wait until workers don't compile any iseq. It is called at the
92// start of GC.
93void
95{
96 if (!mjit_enabled)
97 return;
98 CRITICAL_SECTION_START(4, "mjit_gc_start_hook");
99 while (in_jit) {
100 verbose(4, "Waiting wakeup from a worker for GC");
101 rb_native_cond_wait(&mjit_client_wakeup, &mjit_engine_mutex);
102 verbose(4, "Getting wakeup from a worker for GC");
103 }
104 in_gc = true;
105 CRITICAL_SECTION_FINISH(4, "mjit_gc_start_hook");
106}
107
108// Send a signal to workers to continue iseq compilations. It is
109// called at the end of GC.
110void
112{
113 if (!mjit_enabled)
114 return;
115 CRITICAL_SECTION_START(4, "mjit_gc_exit_hook");
116 in_gc = false;
117 verbose(4, "Sending wakeup signal to workers after GC");
118 rb_native_cond_broadcast(&mjit_gc_wakeup);
119 CRITICAL_SECTION_FINISH(4, "mjit_gc_exit_hook");
120}
121
122// Deal with ISeq movement from compactor
123void
125{
126 if (!mjit_enabled)
127 return;
128
129 CRITICAL_SECTION_START(4, "mjit_update_references");
130 if (iseq->body->jit_unit) {
132 // We need to invalidate JIT-ed code for the ISeq because it embeds pointer addresses.
133 // To efficiently do that, we use the same thing as TracePoint and thus everything is cancelled for now.
134 // See mjit.h and tool/ruby_vm/views/_mjit_compile_insn.erb for how `mjit_call_p` is used.
135 mjit_call_p = false; // TODO: instead of cancelling all, invalidate only this one and recompile it with some threshold.
136 }
137
138 // Units in stale_units (list of over-speculated and invalidated code) are not referenced from
139 // `iseq->body->jit_unit` anymore (because new one replaces that). So we need to check them too.
140 // TODO: we should be able to reduce the number of units checked here.
141 struct rb_mjit_unit *unit = NULL;
142 list_for_each(&stale_units.head, unit, unode) {
143 if (unit->iseq == iseq) {
144 unit->iseq = (rb_iseq_t *)rb_gc_location((VALUE)unit->iseq);
145 }
146 }
147 CRITICAL_SECTION_FINISH(4, "mjit_update_references");
148}
149
150// Iseqs can be garbage collected. This function should call when it
151// happens. It removes iseq from the unit.
152void
154{
155 if (!mjit_enabled)
156 return;
157
158 CRITICAL_SECTION_START(4, "mjit_free_iseq");
159 if (mjit_copy_job.iseq == iseq) {
160 mjit_copy_job.iseq = NULL;
161 }
162 if (iseq->body->jit_unit) {
163 // jit_unit is not freed here because it may be referred by multiple
164 // lists of units. `get_from_list` and `mjit_finish` do the job.
166 }
167 // Units in stale_units (list of over-speculated and invalidated code) are not referenced from
168 // `iseq->body->jit_unit` anymore (because new one replaces that). So we need to check them too.
169 // TODO: we should be able to reduce the number of units checked here.
170 struct rb_mjit_unit *unit = NULL;
171 list_for_each(&stale_units.head, unit, unode) {
172 if (unit->iseq == iseq) {
173 unit->iseq = NULL;
174 }
175 }
176 CRITICAL_SECTION_FINISH(4, "mjit_free_iseq");
177}
178
179// Free unit list. This should be called only when worker is finished
180// because node of unit_queue and one of active_units may have the same unit
181// during proceeding unit.
182static void
183free_list(struct rb_mjit_unit_list *list, bool close_handle_p)
184{
185 struct rb_mjit_unit *unit = 0, *next;
186
187 list_for_each_safe(&list->head, unit, next, unode) {
188 list_del(&unit->unode);
189 if (!close_handle_p) unit->handle = NULL; /* Skip dlclose in free_unit() */
190
191 if (list == &stale_units) { // `free_unit(unit)` crashes after GC.compact on `stale_units`
192 /*
193 * TODO: REVERT THIS BRANCH
194 * Debug the crash on stale_units w/ GC.compact and just use `free_unit(unit)`!!
195 */
196 if (unit->handle && dlclose(unit->handle)) {
197 mjit_warning("failed to close handle for u%d: %s", unit->id, dlerror());
198 }
199 clean_object_files(unit);
200 free(unit);
201 }
202 else {
203 free_unit(unit);
204 }
205 }
206 list->length = 0;
207}
208
209// MJIT info related to an existing continutaion.
210struct mjit_cont {
211 rb_execution_context_t *ec; // continuation ec
212 struct mjit_cont *prev, *next; // used to form lists
213};
214
215// Double linked list of registered continuations. This is used to detect
216// units which are in use in unload_units.
217static struct mjit_cont *first_cont;
218
219// Register a new continuation with execution context `ec`. Return MJIT info about
220// the continuation.
221struct mjit_cont *
223{
224 struct mjit_cont *cont;
225
226 cont = ZALLOC(struct mjit_cont);
227 cont->ec = ec;
228
229 CRITICAL_SECTION_START(3, "in mjit_cont_new");
230 if (first_cont == NULL) {
231 cont->next = cont->prev = NULL;
232 }
233 else {
234 cont->prev = NULL;
235 cont->next = first_cont;
236 first_cont->prev = cont;
237 }
238 first_cont = cont;
239 CRITICAL_SECTION_FINISH(3, "in mjit_cont_new");
240
241 return cont;
242}
243
244// Unregister continuation `cont`.
245void
246mjit_cont_free(struct mjit_cont *cont)
247{
248 CRITICAL_SECTION_START(3, "in mjit_cont_new");
249 if (cont == first_cont) {
250 first_cont = cont->next;
251 if (first_cont != NULL)
252 first_cont->prev = NULL;
253 }
254 else {
255 cont->prev->next = cont->next;
256 if (cont->next != NULL)
257 cont->next->prev = cont->prev;
258 }
259 CRITICAL_SECTION_FINISH(3, "in mjit_cont_new");
260
261 xfree(cont);
262}
263
264// Finish work with continuation info.
265static void
266finish_conts(void)
267{
268 struct mjit_cont *cont, *next;
269
270 for (cont = first_cont; cont != NULL; cont = next) {
271 next = cont->next;
272 xfree(cont);
273 }
274}
275
276// Create unit for `iseq`.
277static void
278create_unit(const rb_iseq_t *iseq)
279{
280 struct rb_mjit_unit *unit;
281
282 unit = ZALLOC(struct rb_mjit_unit);
283 if (unit == NULL)
284 return;
285
286 unit->id = current_unit_num++;
287 unit->iseq = (rb_iseq_t *)iseq;
288 iseq->body->jit_unit = unit;
289}
290
291// Set up field `used_code_p` for unit iseqs whose iseq on the stack of ec.
292static void
293mark_ec_units(rb_execution_context_t *ec)
294{
295 const rb_control_frame_t *cfp;
296
297 if (ec->vm_stack == NULL)
298 return;
299 for (cfp = RUBY_VM_END_CONTROL_FRAME(ec) - 1; ; cfp = RUBY_VM_NEXT_CONTROL_FRAME(cfp)) {
300 const rb_iseq_t *iseq;
301 if (cfp->pc && (iseq = cfp->iseq) != NULL
303 && (iseq->body->jit_unit) != NULL) {
305 }
306
307 if (cfp == ec->cfp)
308 break; // reached the most recent cfp
309 }
310}
311
312// Unload JIT code of some units to satisfy the maximum permitted
313// number of units with a loaded code.
314static void
315unload_units(void)
316{
317 rb_vm_t *vm = GET_THREAD()->vm;
318 rb_thread_t *th = NULL;
319 struct rb_mjit_unit *unit = 0, *next, *worst;
320 struct mjit_cont *cont;
321 int delete_num, units_num = active_units.length;
322
323 // For now, we don't unload units when ISeq is GCed. We should
324 // unload such ISeqs first here.
325 list_for_each_safe(&active_units.head, unit, next, unode) {
326 if (unit->iseq == NULL) { // ISeq is GCed.
327 remove_from_list(unit, &active_units);
328 free_unit(unit);
329 }
330 }
331
332 // Detect units which are in use and can't be unloaded.
333 list_for_each(&active_units.head, unit, unode) {
334 assert(unit->iseq != NULL && unit->handle != NULL);
335 unit->used_code_p = FALSE;
336 }
337 list_for_each(&vm->living_threads, th, vmlt_node) {
338 mark_ec_units(th->ec);
339 }
340 for (cont = first_cont; cont != NULL; cont = cont->next) {
341 mark_ec_units(cont->ec);
342 }
343 // TODO: check slale_units and unload unused ones! (note that the unit is not associated to ISeq anymore)
344
345 // Remove 1/10 units more to decrease unloading calls.
346 // TODO: Calculate max total_calls in unit_queue and don't unload units
347 // whose total_calls are larger than the max.
348 delete_num = active_units.length / 10;
349 for (; active_units.length > mjit_opts.max_cache_size - delete_num;) {
350 // Find one unit that has the minimum total_calls.
351 worst = NULL;
352 list_for_each(&active_units.head, unit, unode) {
353 if (unit->used_code_p) // We can't unload code on stack.
354 continue;
355
356 if (worst == NULL || worst->iseq->body->total_calls > unit->iseq->body->total_calls) {
357 worst = unit;
358 }
359 }
360 if (worst == NULL)
361 break;
362
363 // Unload the worst node.
364 verbose(2, "Unloading unit %d (calls=%lu)", worst->id, worst->iseq->body->total_calls);
365 assert(worst->handle != NULL);
366 remove_from_list(worst, &active_units);
367 free_unit(worst);
368 }
369
370 if (units_num == active_units.length && mjit_opts.wait) {
371 mjit_opts.max_cache_size++; // avoid infinite loop on `rb_mjit_wait_call`. Note that --jit-wait is just for testing.
372 verbose(1, "No units can be unloaded -- incremented max-cache-size to %d for --jit-wait", mjit_opts.max_cache_size);
373 }
374 else {
375 verbose(1, "Too many JIT code -- %d units unloaded", units_num - active_units.length);
376 }
377}
378
379static void
380mjit_add_iseq_to_process(const rb_iseq_t *iseq, const struct rb_mjit_compile_info *compile_info)
381{
382 if (!mjit_enabled || pch_status == PCH_FAILED)
383 return;
384
386 create_unit(iseq);
387 if (iseq->body->jit_unit == NULL)
388 // Failure in creating the unit.
389 return;
390 if (compile_info != NULL)
391 iseq->body->jit_unit->compile_info = *compile_info;
392
393 CRITICAL_SECTION_START(3, "in add_iseq_to_process");
394 add_to_list(iseq->body->jit_unit, &unit_queue);
395 if (active_units.length >= mjit_opts.max_cache_size) {
396 unload_units();
397 }
398 verbose(3, "Sending wakeup signal to workers in mjit_add_iseq_to_process");
399 rb_native_cond_broadcast(&mjit_worker_wakeup);
400 CRITICAL_SECTION_FINISH(3, "in add_iseq_to_process");
401}
402
403// Add ISEQ to be JITed in parallel with the current thread.
404// Unload some JIT codes if there are too many of them.
405void
407{
408 mjit_add_iseq_to_process(iseq, NULL);
409}
410
411// For this timeout seconds, --jit-wait will wait for JIT compilation finish.
412#define MJIT_WAIT_TIMEOUT_SECONDS 60
413
414static void
415mjit_wait(struct rb_iseq_constant_body *body)
416{
417 struct timeval tv;
418 int tries = 0;
419 tv.tv_sec = 0;
420 tv.tv_usec = 1000;
422 tries++;
423 if (tries / 1000 > MJIT_WAIT_TIMEOUT_SECONDS || pch_status == PCH_FAILED) {
424 CRITICAL_SECTION_START(3, "in rb_mjit_wait_call to set jit_func");
425 body->jit_func = (mjit_func_t)NOT_COMPILED_JIT_ISEQ_FUNC; // JIT worker seems dead. Give up.
426 CRITICAL_SECTION_FINISH(3, "in rb_mjit_wait_call to set jit_func");
427 mjit_warning("timed out to wait for JIT finish");
428 break;
429 }
430
431 CRITICAL_SECTION_START(3, "in rb_mjit_wait_call for a client wakeup");
432 rb_native_cond_broadcast(&mjit_worker_wakeup);
433 CRITICAL_SECTION_FINISH(3, "in rb_mjit_wait_call for a client wakeup");
435 }
436}
437
438// Wait for JIT compilation finish for --jit-wait, and call the function pointer
439// if the compiled result is not NOT_COMPILED_JIT_ISEQ_FUNC.
440VALUE
442{
443 mjit_wait(body);
445 return Qundef;
446 }
447 return body->jit_func(ec, ec->cfp);
448}
449
452{
453 assert(body->jit_unit != NULL);
454 return &body->jit_unit->compile_info;
455}
456
457void
459{
461 return;
462
463 verbose(1, "JIT recompile: %s@%s:%d", RSTRING_PTR(iseq->body->location.label),
465
466 CRITICAL_SECTION_START(3, "in rb_mjit_recompile_iseq");
467 remove_from_list(iseq->body->jit_unit, &active_units);
469 add_to_list(iseq->body->jit_unit, &stale_units);
470 CRITICAL_SECTION_FINISH(3, "in rb_mjit_recompile_iseq");
471
472 mjit_add_iseq_to_process(iseq, &iseq->body->jit_unit->compile_info);
473 if (UNLIKELY(mjit_opts.wait)) {
474 mjit_wait(iseq->body);
475 }
476}
477
479
480// Initialize header_file, pch_file, libruby_pathflag. Return true on success.
481static bool
482init_header_filename(void)
483{
484 int fd;
485#ifdef LOAD_RELATIVE
486 // Root path of the running ruby process. Equal to RbConfig::TOPDIR.
487 VALUE basedir_val;
488#endif
489 const char *basedir = "";
490 size_t baselen = 0;
491 char *p;
492#ifdef _WIN32
493 static const char libpathflag[] =
494# ifdef _MSC_VER
495 "-LIBPATH:"
496# else
497 "-L"
498# endif
499 ;
500 const size_t libpathflag_len = sizeof(libpathflag) - 1;
501#endif
502
503#ifdef LOAD_RELATIVE
504 basedir_val = ruby_prefix_path;
505 basedir = StringValuePtr(basedir_val);
506 baselen = RSTRING_LEN(basedir_val);
507#else
508 if (getenv("MJIT_SEARCH_BUILD_DIR")) {
509 // This path is not intended to be used on production, but using build directory's
510 // header file here because people want to run `make test-all` without running
511 // `make install`. Don't use $MJIT_SEARCH_BUILD_DIR except for test-all.
512
513 struct stat st;
514 const char *hdr = dlsym(RTLD_DEFAULT, "MJIT_HEADER");
515 if (!hdr) {
516 verbose(1, "No MJIT_HEADER");
517 }
518 else if (hdr[0] != '/') {
519 verbose(1, "Non-absolute header file path: %s", hdr);
520 }
521 else if (stat(hdr, &st) || !S_ISREG(st.st_mode)) {
522 verbose(1, "Non-file header file path: %s", hdr);
523 }
524 else if ((st.st_uid != getuid()) || (st.st_mode & 022) ||
525 !rb_path_check(hdr)) {
526 verbose(1, "Unsafe header file: uid=%ld mode=%#o %s",
527 (long)st.st_uid, (unsigned)st.st_mode, hdr);
528 return FALSE;
529 }
530 else {
531 // Do not pass PRELOADENV to child processes, on
532 // multi-arch environment
533 verbose(3, "PRELOADENV("PRELOADENV")=%s", getenv(PRELOADENV));
534 // assume no other PRELOADENV in test-all
536 verbose(3, "MJIT_HEADER: %s", hdr);
537 header_file = ruby_strdup(hdr);
538 if (!header_file) return false;
539 }
540 }
541 else
542#endif
543#ifndef _MSC_VER
544 {
545 // A name of the header file included in any C file generated by MJIT for iseqs.
546 static const char header_name[] = MJIT_HEADER_INSTALL_DIR "/" MJIT_MIN_HEADER_NAME;
547 const size_t header_name_len = sizeof(header_name) - 1;
548
549 header_file = xmalloc(baselen + header_name_len + 1);
550 p = append_str2(header_file, basedir, baselen);
551 p = append_str2(p, header_name, header_name_len + 1);
552
553 if ((fd = rb_cloexec_open(header_file, O_RDONLY, 0)) < 0) {
554 verbose(1, "Cannot access header file: %s", header_file);
555 xfree(header_file);
556 header_file = NULL;
557 return false;
558 }
559 (void)close(fd);
560 }
561
562 pch_file = get_uniq_filename(0, MJIT_TMP_PREFIX "h", ".h.gch");
563#else
564 {
565 static const char pch_name[] = MJIT_HEADER_INSTALL_DIR "/" MJIT_PRECOMPILED_HEADER_NAME;
566 const size_t pch_name_len = sizeof(pch_name) - 1;
567
568 pch_file = xmalloc(baselen + pch_name_len + 1);
569 p = append_str2(pch_file, basedir, baselen);
570 p = append_str2(p, pch_name, pch_name_len + 1);
571 if ((fd = rb_cloexec_open(pch_file, O_RDONLY, 0)) < 0) {
572 verbose(1, "Cannot access precompiled header file: %s", pch_file);
573 xfree(pch_file);
574 pch_file = NULL;
575 return false;
576 }
577 (void)close(fd);
578 }
579#endif
580
581#ifdef _WIN32
582 basedir_val = ruby_archlibdir_path;
583 basedir = StringValuePtr(basedir_val);
584 baselen = RSTRING_LEN(basedir_val);
585 libruby_pathflag = p = xmalloc(libpathflag_len + baselen + 1);
586 p = append_str(p, libpathflag);
587 p = append_str2(p, basedir, baselen);
588 *p = '\0';
589#endif
590
591 return true;
592}
593
595valid_class_serials_add_i(ID key, VALUE v, void *unused)
596{
598 VALUE value = ce->value;
599
601 if (RB_TYPE_P(value, T_MODULE) || RB_TYPE_P(value, T_CLASS)) {
603 }
604 return ID_TABLE_CONTINUE;
605}
606
607#ifdef _WIN32
608UINT rb_w32_system_tmpdir(WCHAR *path, UINT len);
609#endif
610
611static char *
612system_default_tmpdir(void)
613{
614 // c.f. ext/etc/etc.c:etc_systmpdir()
615#ifdef _WIN32
616 WCHAR tmppath[_MAX_PATH];
617 UINT len = rb_w32_system_tmpdir(tmppath, numberof(tmppath));
618 if (len) {
619 int blen = WideCharToMultiByte(CP_UTF8, 0, tmppath, len, NULL, 0, NULL, NULL);
620 char *tmpdir = xmalloc(blen + 1);
621 WideCharToMultiByte(CP_UTF8, 0, tmppath, len, tmpdir, blen, NULL, NULL);
622 tmpdir[blen] = '\0';
623 return tmpdir;
624 }
625#elif defined _CS_DARWIN_USER_TEMP_DIR
626 char path[MAXPATHLEN];
627 size_t len = confstr(_CS_DARWIN_USER_TEMP_DIR, path, sizeof(path));
628 if (len > 0) {
629 char *tmpdir = xmalloc(len);
630 if (len > sizeof(path)) {
631 confstr(_CS_DARWIN_USER_TEMP_DIR, tmpdir, len);
632 }
633 else {
634 memcpy(tmpdir, path, len);
635 }
636 return tmpdir;
637 }
638#endif
639 return 0;
640}
641
642static int
643check_tmpdir(const char *dir)
644{
645 struct stat st;
646
647 if (!dir) return FALSE;
648 if (stat(dir, &st)) return FALSE;
649#ifndef S_ISDIR
650# define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR)
651#endif
652 if (!S_ISDIR(st.st_mode)) return FALSE;
653#ifndef _WIN32
654# ifndef S_IWOTH
655# define S_IWOTH 002
656# endif
657 if (st.st_mode & S_IWOTH) {
658# ifdef S_ISVTX
659 if (!(st.st_mode & S_ISVTX)) return FALSE;
660# else
661 return FALSE;
662# endif
663 }
664 if (access(dir, W_OK)) return FALSE;
665#endif
666 return TRUE;
667}
668
669static char *
670system_tmpdir(void)
671{
672 char *tmpdir;
673# define RETURN_ENV(name) \
674 if (check_tmpdir(tmpdir = getenv(name))) return ruby_strdup(tmpdir)
675 RETURN_ENV("TMPDIR");
676 RETURN_ENV("TMP");
677 tmpdir = system_default_tmpdir();
678 if (check_tmpdir(tmpdir)) return tmpdir;
679 return ruby_strdup("/tmp");
680# undef RETURN_ENV
681}
682
683// Minimum value for JIT cache size.
684#define MIN_CACHE_SIZE 10
685// Default permitted number of units with a JIT code kept in memory.
686#define DEFAULT_MAX_CACHE_SIZE 100
687// A default threshold used to add iseq to JIT.
688#define DEFAULT_MIN_CALLS_TO_ADD 10000
689
690// Start MJIT worker. Return TRUE if worker is successfully started.
691static bool
692start_worker(void)
693{
694 stop_worker_p = false;
695 worker_stopped = false;
696
697 if (!rb_thread_create_mjit_thread(mjit_worker)) {
698 mjit_enabled = false;
699 rb_native_mutex_destroy(&mjit_engine_mutex);
700 rb_native_cond_destroy(&mjit_pch_wakeup);
701 rb_native_cond_destroy(&mjit_client_wakeup);
702 rb_native_cond_destroy(&mjit_worker_wakeup);
703 rb_native_cond_destroy(&mjit_gc_wakeup);
704 verbose(1, "Failure in MJIT thread initialization\n");
705 return false;
706 }
707 return true;
708}
709
710// There's no strndup on Windows
711static char*
712ruby_strndup(const char *str, size_t n)
713{
714 char *ret = xmalloc(n + 1);
715 memcpy(ret, str, n);
716 ret[n] = '\0';
717 return ret;
718}
719
720// Convert "foo bar" to {"foo", "bar", NULL} array. Caller is responsible for
721// freeing a returned buffer and its elements.
722static char **
723split_flags(const char *flags)
724{
725 char *buf[MAXPATHLEN];
726 int i = 0;
727 char *next;
728 for (; flags != NULL; flags = next) {
729 next = strchr(flags, ' ');
730 if (next == NULL) {
731 if (strlen(flags) > 0)
732 buf[i++] = strdup(flags);
733 }
734 else {
735 if (next > flags)
736 buf[i++] = ruby_strndup(flags, next - flags);
737 next++; // skip space
738 }
739 }
740
741 char **ret = xmalloc(sizeof(char *) * (i + 1));
742 memcpy(ret, buf, sizeof(char *) * i);
743 ret[i] = NULL;
744 return ret;
745}
746
747// Initialize MJIT. Start a thread creating the precompiled header and
748// processing ISeqs. The function should be called first for using MJIT.
749// If everything is successful, MJIT_INIT_P will be TRUE.
750void
751mjit_init(const struct mjit_options *opts)
752{
753 mjit_opts = *opts;
754 mjit_enabled = true;
755 mjit_call_p = true;
756
757 // Normalize options
758 if (mjit_opts.min_calls == 0)
759 mjit_opts.min_calls = DEFAULT_MIN_CALLS_TO_ADD;
760 if (mjit_opts.max_cache_size <= 0)
761 mjit_opts.max_cache_size = DEFAULT_MAX_CACHE_SIZE;
762 if (mjit_opts.max_cache_size < MIN_CACHE_SIZE)
763 mjit_opts.max_cache_size = MIN_CACHE_SIZE;
764
765 // Initialize variables for compilation
766#ifdef _MSC_VER
767 pch_status = PCH_SUCCESS; // has prebuilt precompiled header
768#else
769 pch_status = PCH_NOT_READY;
770#endif
771 cc_path = CC_COMMON_ARGS[0];
772 verbose(2, "MJIT: CC defaults to %s", cc_path);
773 cc_common_args = xmalloc(sizeof(CC_COMMON_ARGS));
774 memcpy((void *)cc_common_args, CC_COMMON_ARGS, sizeof(CC_COMMON_ARGS));
775 cc_added_args = split_flags(opts->debug_flags);
776 xfree(opts->debug_flags);
777#if MJIT_CFLAGS_PIPE
778 // eliminate a flag incompatible with `-pipe`
779 for (size_t i = 0, j = 0; i < sizeof(CC_COMMON_ARGS) / sizeof(char *); i++) {
780 if (CC_COMMON_ARGS[i] && strncmp("-save-temps", CC_COMMON_ARGS[i], strlen("-save-temps")) == 0)
781 continue; // skip -save-temps flag
782 cc_common_args[j] = CC_COMMON_ARGS[i];
783 j++;
784 }
785#endif
786
787 tmp_dir = system_tmpdir();
788 verbose(2, "MJIT: tmp_dir is %s", tmp_dir);
789
790 if (!init_header_filename()) {
791 mjit_enabled = false;
792 verbose(1, "Failure in MJIT header file name initialization\n");
793 return;
794 }
795 pch_owner_pid = getpid();
796
797 // Initialize mutex
798 rb_native_mutex_initialize(&mjit_engine_mutex);
799 rb_native_cond_initialize(&mjit_pch_wakeup);
800 rb_native_cond_initialize(&mjit_client_wakeup);
801 rb_native_cond_initialize(&mjit_worker_wakeup);
802 rb_native_cond_initialize(&mjit_gc_wakeup);
803
804 // Make sure root_fiber's saved_ec is scanned by mark_ec_units
805 rb_fiber_init_mjit_cont(GET_EC()->fiber_ptr);
806
807 // Initialize class_serials cache for compilation
808 valid_class_serials = rb_hash_new();
809 rb_obj_hide(valid_class_serials);
810 rb_gc_register_mark_object(valid_class_serials);
814 rb_id_table_foreach(RCLASS_CONST_TBL(rb_cObject), valid_class_serials_add_i, NULL);
815 }
816
817 // Initialize worker thread
818 start_worker();
819}
820
821static void
822stop_worker(void)
823{
825
826 while (!worker_stopped) {
827 verbose(3, "Sending cancel signal to worker");
828 CRITICAL_SECTION_START(3, "in stop_worker");
829 stop_worker_p = true; // Setting this inside loop because RUBY_VM_CHECK_INTS may make this false.
830 rb_native_cond_broadcast(&mjit_worker_wakeup);
831 CRITICAL_SECTION_FINISH(3, "in stop_worker");
833 }
834}
835
836// Stop JIT-compiling methods but compiled code is kept available.
837VALUE
838mjit_pause(bool wait_p)
839{
840 if (!mjit_enabled) {
841 rb_raise(rb_eRuntimeError, "MJIT is not enabled");
842 }
843 if (worker_stopped) {
844 return Qfalse;
845 }
846
847 // Flush all queued units with no option or `wait: true`
848 if (wait_p) {
849 struct timeval tv;
850 tv.tv_sec = 0;
851 tv.tv_usec = 1000;
852
853 while (unit_queue.length > 0 && active_units.length < mjit_opts.max_cache_size) { // inverse of condition that waits for mjit_worker_wakeup
854 CRITICAL_SECTION_START(3, "in mjit_pause for a worker wakeup");
855 rb_native_cond_broadcast(&mjit_worker_wakeup);
856 CRITICAL_SECTION_FINISH(3, "in mjit_pause for a worker wakeup");
858 }
859 }
860
861 stop_worker();
862 return Qtrue;
863}
864
865// Restart JIT-compiling methods after mjit_pause.
866VALUE
867mjit_resume(void)
868{
869 if (!mjit_enabled) {
870 rb_raise(rb_eRuntimeError, "MJIT is not enabled");
871 }
872 if (!worker_stopped) {
873 return Qfalse;
874 }
875
876 if (!start_worker()) {
877 rb_raise(rb_eRuntimeError, "Failed to resume MJIT worker");
878 }
879 return Qtrue;
880}
881
882// Skip calling `clean_object_files` for units which currently exist in the list.
883static void
884skip_cleaning_object_files(struct rb_mjit_unit_list *list)
885{
886 struct rb_mjit_unit *unit = NULL, *next;
887
888 // No mutex for list, assuming MJIT worker does not exist yet since it's immediately after fork.
889 list_for_each_safe(&list->head, unit, next, unode) {
890#ifndef _MSC_VER // Actually mswin does not reach here since it doesn't have fork
891 if (unit->o_file) unit->o_file_inherited_p = true;
892#endif
893
894#if defined(_WIN32) // mswin doesn't reach here either. This is for MinGW.
895 if (unit->so_file) unit->so_file = NULL;
896#endif
897 }
898}
899
900// This is called after fork initiated by Ruby's method to launch MJIT worker thread
901// for child Ruby process.
902//
903// In multi-process Ruby applications, child Ruby processes do most of the jobs.
904// Thus we want child Ruby processes to enqueue ISeqs to MJIT worker's queue and
905// call the JIT-ed code.
906//
907// But unfortunately current MJIT-generated code is process-specific. After the fork,
908// JIT-ed code created by parent Ruby process cannot be used in child Ruby process
909// because the code could rely on inline cache values (ivar's IC, send's CC) which
910// may vary between processes after fork or embed some process-specific addresses.
911//
912// So child Ruby process can't request parent process to JIT an ISeq and use the code.
913// Instead of that, MJIT worker thread is created for all child Ruby processes, even
914// while child processes would end up with compiling the same ISeqs.
915void
917{
918 if (!mjit_enabled)
919 return;
920
921 /* Let parent process delete the already-compiled object files.
922 This must be done before starting MJIT worker on child process. */
923 skip_cleaning_object_files(&active_units);
924
925 /* MJIT worker thread is not inherited on fork. Start it for this child process. */
926 start_worker();
927}
928
929// Finish the threads processing units and creating PCH, finalize
930// and free MJIT data. It should be called last during MJIT
931// life.
932//
933// If close_handle_p is true, it calls dlclose() for JIT-ed code. So it should be false
934// if the code can still be on stack. ...But it means to leak JIT-ed handle forever (FIXME).
935void
936mjit_finish(bool close_handle_p)
937{
938 if (!mjit_enabled)
939 return;
940
941 // Wait for pch finish
942 verbose(2, "Stopping worker thread");
943 CRITICAL_SECTION_START(3, "in mjit_finish to wakeup from pch");
944 // As our threads are detached, we could just cancel them. But it
945 // is a bad idea because OS processes (C compiler) started by
946 // threads can produce temp files. And even if the temp files are
947 // removed, the used C compiler still complaint about their
948 // absence. So wait for a clean finish of the threads.
949 while (pch_status == PCH_NOT_READY) {
950 verbose(3, "Waiting wakeup from make_pch");
951 rb_native_cond_wait(&mjit_pch_wakeup, &mjit_engine_mutex);
952 }
953 CRITICAL_SECTION_FINISH(3, "in mjit_finish to wakeup from pch");
954
955 // Stop worker
956 stop_worker();
957
958 rb_native_mutex_destroy(&mjit_engine_mutex);
959 rb_native_cond_destroy(&mjit_pch_wakeup);
960 rb_native_cond_destroy(&mjit_client_wakeup);
961 rb_native_cond_destroy(&mjit_worker_wakeup);
962 rb_native_cond_destroy(&mjit_gc_wakeup);
963
964#ifndef _MSC_VER // mswin has prebuilt precompiled header
965 if (!mjit_opts.save_temps && getpid() == pch_owner_pid)
966 remove_file(pch_file);
967
968 xfree(header_file); header_file = NULL;
969#endif
970 xfree((void *)cc_common_args); cc_common_args = NULL;
971 for (char **flag = cc_added_args; *flag != NULL; flag++)
972 xfree(*flag);
973 xfree((void *)cc_added_args); cc_added_args = NULL;
974 xfree(tmp_dir); tmp_dir = NULL;
975 xfree(pch_file); pch_file = NULL;
976
977 mjit_call_p = false;
978 free_list(&unit_queue, close_handle_p);
979 free_list(&active_units, close_handle_p);
980 free_list(&compact_units, close_handle_p);
981 free_list(&stale_units, close_handle_p);
982 finish_conts();
983
984 mjit_enabled = false;
985 verbose(1, "Successful MJIT finish");
986}
987
988void
989mjit_mark(void)
990{
991 if (!mjit_enabled)
992 return;
993 RUBY_MARK_ENTER("mjit");
994
995 CRITICAL_SECTION_START(4, "mjit_mark");
996 VALUE iseq = (VALUE)mjit_copy_job.iseq;
997 CRITICAL_SECTION_FINISH(4, "mjit_mark");
998
999 // Don't wrap critical section with this. This may trigger GC,
1000 // and in that case mjit_gc_start_hook causes deadlock.
1001 if (iseq) rb_gc_mark(iseq);
1002
1003 struct rb_mjit_unit *unit = NULL;
1004 CRITICAL_SECTION_START(4, "mjit_mark");
1005 list_for_each(&unit_queue.head, unit, unode) {
1006 if (unit->iseq) { // ISeq is still not GCed
1007 iseq = (VALUE)unit->iseq;
1008 CRITICAL_SECTION_FINISH(4, "mjit_mark rb_gc_mark");
1009
1010 // Don't wrap critical section with this. This may trigger GC,
1011 // and in that case mjit_gc_start_hook causes deadlock.
1013
1014 CRITICAL_SECTION_START(4, "mjit_mark rb_gc_mark");
1015 }
1016 }
1017 CRITICAL_SECTION_FINISH(4, "mjit_mark");
1018
1019 RUBY_MARK_LEAVE("mjit");
1020}
1021
1022// A hook to update valid_class_serials.
1023void
1025{
1026 if (!mjit_enabled)
1027 return;
1028
1029 // Do not wrap CRITICAL_SECTION here. This function is only called in main thread
1030 // and guarded by GVL, and `rb_hash_aset` may cause GC and deadlock in it.
1031 rb_hash_aset(valid_class_serials, LONG2FIX(class_serial), Qtrue);
1032}
1033
1034// A hook to update valid_class_serials.
1035void
1037{
1038 if (!mjit_enabled)
1039 return;
1040
1041 CRITICAL_SECTION_START(3, "in mjit_remove_class_serial");
1042 rb_hash_delete_entry(valid_class_serials, LONG2FIX(class_serial));
1043 CRITICAL_SECTION_FINISH(3, "in mjit_remove_class_serial");
1044}
1045
1046#endif
#define free(x)
Definition: dln.c:52
struct rb_encoding_entry * list
Definition: encoding.c:56
char str[HTML_ESCAPE_MAX_LEN+1]
Definition: escape.c:18
VALUE rb_cObject
Object class.
Definition: ruby.h:2012
void rb_raise(VALUE exc, const char *fmt,...)
Definition: error.c:2671
VALUE rb_eRuntimeError
Definition: error.c:922
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition: object.c:78
#define RTLD_DEFAULT
Definition: handle.c:291
void rb_id_table_foreach(struct rb_id_table *tbl, rb_id_table_foreach_func_t *func, void *data)
Definition: id_table.c:292
rb_id_table_iterator_result
Definition: id_table.h:8
@ ID_TABLE_CONTINUE
Definition: id_table.h:9
#define mjit_enabled
Definition: internal.h:1766
#define MJIT_HEADER_INSTALL_DIR
Definition: mjit_config.h:7
#define MJIT_MIN_HEADER_NAME
Definition: mjit_config.h:9
#define PRELOADENV
Definition: mjit_config.h:17
@ PCH_SUCCESS
Definition: mjit_worker.c:232
@ PCH_FAILED
Definition: mjit_worker.c:232
@ PCH_NOT_READY
Definition: mjit_worker.c:232
void mjit_worker(void)
Definition: mjit_worker.c:1195
#define append_str(p, str)
Definition: mjit_worker.c:671
verbose(int level, const char *format,...)
Definition: mjit_worker.c:303
void rb_native_cond_initialize(rb_nativethread_cond_t *cond)
#define append_str2(p, str, len)
Definition: mjit_worker.c:670
void rb_native_cond_broadcast(rb_nativethread_cond_t *cond)
mjit_warning(const char *format,...)
Definition: mjit_worker.c:322
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
#define MJIT_TMP_PREFIX
Definition: mjit_worker.c:123
void rb_native_cond_destroy(rb_nativethread_cond_t *cond)
void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex)
void rb_thread_wait_for(struct timeval)
Definition: thread.c:1346
#define RUBY_MARK_LEAVE(msg)
#define list_del(n)
#define NULL
VALUE rb_iseq_path(const rb_iseq_t *iseq)
Definition: iseq.c:1027
#define RSTRING_LEN(str)
void mjit_finish(_Bool close_handle_p)
enum ruby_tag_type st
size_t strlen(const char *)
_Bool mjit_call_p
Definition: mjit_worker.c:180
void mjit_gc_exit_hook(void)
#define StringValuePtr(v)
rb_control_frame_t * cfp
#define RUBY_MARK_ENTER(msg)
int close(int __fildes)
#define xfree
#define LONG2FIX(i)
#define Qundef
uid_t getuid(void)
Definition: win32.c:2795
void mjit_update_references(const rb_iseq_t *iseq)
#define RSTRING_PTR(str)
void rb_gc_register_mark_object(VALUE)
Definition: gc.c:7079
#define RCLASS_SERIAL(c)
VALUE(* mjit_func_t)(rb_execution_context_t *, rb_control_frame_t *)
#define GET_EC()
#define RUBY_VM_NEXT_CONTROL_FRAME(cfp)
#define numberof(array)
int unsetenv(const char *)
#define S_IWOTH
const char size_t n
int rb_path_check(const char *)
Definition: file.c:6189
unsigned long VALUE
#define xmalloc
#define T_MODULE
uint32_t i
#define S_ISDIR(m)
int strncmp(const char *, const char *, size_t)
__inline__ const void *__restrict__ size_t len
int rb_cloexec_open(const char *pathname, int flags, mode_t mode)
Definition: io.c:292
#define ZALLOC(type)
#define RUBY_VM_CHECK_INTS(ec)
void mjit_child_after_fork(void)
VALUE rb_gc_location(VALUE)
Definition: gc.c:8127
void mjit_init(const struct mjit_options *opts)
size_t confstr(int __name, char *__buf, size_t __len)
void rb_fiber_init_mjit_cont(struct rb_fiber_struct *fiber)
Definition: cont.c:1134
#define GET_THREAD()
unsigned long long rb_serial_t
struct mjit_options mjit_opts
Definition: mjit_worker.c:174
#define FIX2INT(x)
int VALUE v
@ NOT_COMPILED_JIT_ISEQ_FUNC
@ NOT_READY_JIT_ISEQ_FUNC
@ NOT_ADDED_JIT_ISEQ_FUNC
#define list_for_each_safe(h, i, nxt, member)
#define S_ISVTX
void rb_gc_mark(VALUE)
Definition: gc.c:5228
#define RCLASS_CONST_TBL(c)
const rb_iseq_t * iseq
char * strchr(const char *, int)
Definition: strchr.c:8
#define TRUE
#define FALSE
unsigned int size
#define Qtrue
char * strdup(const char *) __attribute__((__malloc__)) __attribute__((__warn_unused_result__))
#define UNLIKELY(x)
struct rb_call_cache buf
VALUE rb_hash_delete_entry(VALUE hash, VALUE key)
Definition: hash.c:2326
__uintptr_t uintptr_t
void mjit_free_iseq(const rb_iseq_t *iseq)
#define Qfalse
int stat(const char *__restrict__ __path, struct stat *__restrict__ __sbuf)
void * memcpy(void *__restrict__, const void *__restrict__, size_t)
void rb_mjit_add_iseq_to_process(const rb_iseq_t *iseq)
#define S_ISREG(m)
#define list_for_each(h, i, member)
int access(const char *__path, int __amode)
void rb_mjit_recompile_iseq(const rb_iseq_t *iseq)
#define RB_TYPE_P(obj, type)
pid_t getpid(void)
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
#define T_CLASS
#define CLASS_OF(v)
VALUE rb_hash_aset(VALUE, VALUE, VALUE)
Definition: hash.c:2852
void mjit_remove_class_serial(rb_serial_t class_serial)
#define assert
VALUE rb_mjit_wait_call(rb_execution_context_t *ec, struct rb_iseq_constant_body *body)
#define MAXPATHLEN
void mjit_cont_free(struct mjit_cont *cont)
unsigned long ID
void mjit_add_class_serial(rb_serial_t class_serial)
#define W_OK
void mjit_mark(void)
VALUE rb_hash_new(void)
Definition: hash.c:1523
struct rb_mjit_compile_info * rb_mjit_iseq_compile_info(const struct rb_iseq_constant_body *body)
struct mjit_cont * mjit_cont_new(rb_execution_context_t *ec)
void mjit_gc_start_hook(void)
VALUE mjit_resume(void)
VALUE mjit_pause(_Bool wait_p)
VALUE ruby_prefix_path
Definition: ruby.c:579
VALUE ruby_archlibdir_path
Definition: ruby.c:579
struct rb_call_cache * cc_entries
Definition: mjit_worker.c:1121
union iseq_inline_storage_entry * is_entries
Definition: mjit_worker.c:1122
const rb_iseq_t * iseq
Definition: mjit_worker.c:1120
struct rb_call_cache cc
VALUE value
union iseq_inline_storage_entry * is_entries
VALUE(* jit_func)(struct rb_execution_context_struct *, struct rb_control_frame_struct *)
struct rb_call_data * call_data
struct rb_mjit_unit * jit_unit
struct rb_iseq_constant_body * body
void * handle
Definition: mjit_worker.c:130
rb_iseq_t * iseq
Definition: mjit_worker.c:131
bool o_file_inherited_p
Definition: mjit_worker.c:138
struct rb_mjit_compile_info compile_info
Definition: mjit_worker.c:148
char used_code_p
Definition: mjit_worker.c:145
char * o_file
Definition: mjit_worker.c:134
struct list_node unode
Definition: mjit_worker.c:146
rb_execution_context_t * ec
struct list_head living_threads
int rb_is_const_id(ID id)
Definition: symbol.c:854
char * ruby_strdup(const char *)
Definition: util.c:527
VALUE rb_vm_top_self(void)
Definition: vm.c:3349
#define getenv(name)
Definition: win32.c:73
UINT rb_w32_system_tmpdir(WCHAR *path, UINT len)
Definition: win32.c:515