Ruby 2.7.6p219 (2022-04-12 revision c9c2245c0a25176072e02db9254f0e0c84c805cd)
vm_trace.c
Go to the documentation of this file.
1/**********************************************************************
2
3 vm_trace.c -
4
5 $Author: ko1 $
6 created at: Tue Aug 14 19:37:09 2012
7
8 Copyright (C) 1993-2012 Yukihiro Matsumoto
9
10**********************************************************************/
11
12/*
13 * This file include two parts:
14 *
15 * (1) set_trace_func internal mechanisms
16 * and C level API
17 *
18 * (2) Ruby level API
19 * (2-1) set_trace_func API
20 * (2-2) TracePoint API (not yet)
21 *
22 */
23
24#include "internal.h"
25#include "ruby/debug.h"
26
27#include "vm_core.h"
28#include "mjit.h"
29#include "iseq.h"
30#include "eval_intern.h"
31#include "builtin.h"
32
33/* (1) trace mechanisms */
34
35typedef struct rb_event_hook_struct {
41
42 struct {
44 unsigned int target_line;
47
49
50#define MAX_EVENT_NUM 32
51
52void
54{
55 rb_event_hook_t *hook = hooks->hooks;
56
57 while (hook) {
58 rb_gc_mark(hook->data);
59 hook = hook->next;
60 }
61}
62
63static void clean_hooks(const rb_execution_context_t *ec, rb_hook_list_t *list);
64
65void
67{
68 clean_hooks(GET_EC(), hooks);
69}
70
71/* ruby_vm_event_flags management */
72
73static void
74update_global_event_hook(rb_event_flag_t vm_events)
75{
76 rb_event_flag_t new_iseq_events = vm_events & ISEQ_TRACE_EVENTS;
78
79 if (new_iseq_events & ~enabled_iseq_events) {
80 /* Stop calling all JIT-ed code. Compiling trace insns is not supported for now. */
81#if USE_MJIT
83#endif
84
85 /* write all ISeqs iff new events are added */
86 rb_iseq_trace_set_all(new_iseq_events | enabled_iseq_events);
87 }
88
89 ruby_vm_event_flags = vm_events;
92}
93
94/* add/remove hooks */
95
96static rb_event_hook_t *
97alloc_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data, rb_event_hook_flag_t hook_flags)
98{
99 rb_event_hook_t *hook;
100
101 if ((events & RUBY_INTERNAL_EVENT_MASK) && (events & ~RUBY_INTERNAL_EVENT_MASK)) {
102 rb_raise(rb_eTypeError, "Can not specify normal event and internal event simultaneously.");
103 }
104
105 hook = ALLOC(rb_event_hook_t);
106 hook->hook_flags = hook_flags;
107 hook->events = events;
108 hook->func = func;
109 hook->data = data;
110
111 /* no filters */
112 hook->filter.th = NULL;
113 hook->filter.target_line = 0;
114
115 return hook;
116}
117
118static void
119hook_list_connect(VALUE list_owner, rb_hook_list_t *list, rb_event_hook_t *hook, int global_p)
120{
121 hook->next = list->hooks;
122 list->hooks = hook;
123 list->events |= hook->events;
124
125 if (global_p) {
126 /* global hooks are root objects at GC mark. */
127 update_global_event_hook(list->events);
128 }
129 else {
130 RB_OBJ_WRITTEN(list_owner, Qundef, hook->data);
131 }
132}
133
134static void
135connect_event_hook(const rb_execution_context_t *ec, rb_event_hook_t *hook)
136{
137 rb_hook_list_t *list = rb_vm_global_hooks(ec);
138 hook_list_connect(Qundef, list, hook, TRUE);
139}
140
141static void
142rb_threadptr_add_event_hook(const rb_execution_context_t *ec, rb_thread_t *th,
143 rb_event_hook_func_t func, rb_event_flag_t events, VALUE data, rb_event_hook_flag_t hook_flags)
144{
145 rb_event_hook_t *hook = alloc_event_hook(func, events, data, hook_flags);
146 hook->filter.th = th;
147 connect_event_hook(ec, hook);
148}
149
150void
152{
153 rb_threadptr_add_event_hook(GET_EC(), rb_thread_ptr(thval), func, events, data, RUBY_EVENT_HOOK_FLAG_SAFE);
154}
155
156void
158{
159 rb_event_hook_t *hook = alloc_event_hook(func, events, data, RUBY_EVENT_HOOK_FLAG_SAFE);
160 connect_event_hook(GET_EC(), hook);
161}
162
163void
165{
166 rb_threadptr_add_event_hook(GET_EC(), rb_thread_ptr(thval), func, events, data, hook_flags);
167}
168
169void
171{
172 rb_event_hook_t *hook = alloc_event_hook(func, events, data, hook_flags);
173 connect_event_hook(GET_EC(), hook);
174}
175
176static void
177clean_hooks(const rb_execution_context_t *ec, rb_hook_list_t *list)
178{
179 rb_event_hook_t *hook, **nextp = &list->hooks;
180 VM_ASSERT(list->need_clean == TRUE);
181
182 list->events = 0;
183 list->need_clean = FALSE;
184
185 while ((hook = *nextp) != 0) {
187 *nextp = hook->next;
188 xfree(hook);
189 }
190 else {
191 list->events |= hook->events; /* update active events */
192 nextp = &hook->next;
193 }
194 }
195
196 if (list == rb_vm_global_hooks(ec)) {
197 /* global events */
198 update_global_event_hook(list->events);
199 }
200 else {
201 /* local events */
202 }
203}
204
205static void
206clean_hooks_check(const rb_execution_context_t *ec, rb_hook_list_t *list)
207{
208 if (UNLIKELY(list->need_clean != FALSE)) {
209 if (list->running == 0) {
210 clean_hooks(ec, list);
211 }
212 }
213}
214
215#define MATCH_ANY_FILTER_TH ((rb_thread_t *)1)
216
217/* if func is 0, then clear all funcs */
218static int
219remove_event_hook(const rb_execution_context_t *ec, const rb_thread_t *filter_th, rb_event_hook_func_t func, VALUE data)
220{
221 rb_vm_t *vm = rb_ec_vm_ptr(ec);
223 int ret = 0;
224 rb_event_hook_t *hook = list->hooks;
225
226 while (hook) {
227 if (func == 0 || hook->func == func) {
228 if (hook->filter.th == filter_th || filter_th == MATCH_ANY_FILTER_TH) {
229 if (data == Qundef || hook->data == data) {
231 ret+=1;
232 list->need_clean = TRUE;
233 }
234 }
235 }
236 hook = hook->next;
237 }
238
239 clean_hooks_check(ec, list);
240 return ret;
241}
242
243static int
244rb_threadptr_remove_event_hook(const rb_execution_context_t *ec, const rb_thread_t *filter_th, rb_event_hook_func_t func, VALUE data)
245{
246 return remove_event_hook(ec, filter_th, func, data);
247}
248
249int
251{
252 return rb_threadptr_remove_event_hook(GET_EC(), rb_thread_ptr(thval), func, Qundef);
253}
254
255int
257{
258 return rb_threadptr_remove_event_hook(GET_EC(), rb_thread_ptr(thval), func, data);
259}
260
261int
263{
264 return remove_event_hook(GET_EC(), NULL, func, Qundef);
265}
266
267int
269{
270 return remove_event_hook(GET_EC(), NULL, func, data);
271}
272
273void
275{
276 rb_threadptr_remove_event_hook(ec, rb_ec_thread_ptr(ec), 0, Qundef);
277}
278
279void
281{
282 rb_threadptr_remove_event_hook(ec, MATCH_ANY_FILTER_TH, 0, Qundef);
283}
284
285/* invoke hooks */
286
287static void
288exec_hooks_body(const rb_execution_context_t *ec, rb_hook_list_t *list, const rb_trace_arg_t *trace_arg)
289{
290 rb_event_hook_t *hook;
291
292 for (hook = list->hooks; hook; hook = hook->next) {
294 (trace_arg->event & hook->events) &&
295 (LIKELY(hook->filter.th == 0) || hook->filter.th == rb_ec_thread_ptr(ec)) &&
296 (LIKELY(hook->filter.target_line == 0) || (hook->filter.target_line == (unsigned int)rb_vm_get_sourceline(ec->cfp)))) {
298 (*hook->func)(trace_arg->event, hook->data, trace_arg->self, trace_arg->id, trace_arg->klass);
299 }
300 else {
301 (*((rb_event_hook_raw_arg_func_t)hook->func))(hook->data, trace_arg);
302 }
303 }
304 }
305}
306
307static int
308exec_hooks_precheck(const rb_execution_context_t *ec, rb_hook_list_t *list, const rb_trace_arg_t *trace_arg)
309{
310 if (list->events & trace_arg->event) {
311 list->running++;
312 return TRUE;
313 }
314 else {
315 return FALSE;
316 }
317}
318
319static void
320exec_hooks_postcheck(const rb_execution_context_t *ec, rb_hook_list_t *list)
321{
322 list->running--;
323 clean_hooks_check(ec, list);
324}
325
326static void
327exec_hooks_unprotected(const rb_execution_context_t *ec, rb_hook_list_t *list, const rb_trace_arg_t *trace_arg)
328{
329 if (exec_hooks_precheck(ec, list, trace_arg) == 0) return;
330 exec_hooks_body(ec, list, trace_arg);
331 exec_hooks_postcheck(ec, list);
332}
333
334static int
335exec_hooks_protected(rb_execution_context_t *ec, rb_hook_list_t *list, const rb_trace_arg_t *trace_arg)
336{
337 enum ruby_tag_type state;
338 volatile int raised;
339
340 if (exec_hooks_precheck(ec, list, trace_arg) == 0) return 0;
341
342 raised = rb_ec_reset_raised(ec);
343
344 /* TODO: Support !RUBY_EVENT_HOOK_FLAG_SAFE hooks */
345
346 EC_PUSH_TAG(ec);
347 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
348 exec_hooks_body(ec, list, trace_arg);
349 }
350 EC_POP_TAG();
351
352 exec_hooks_postcheck(ec, list);
353
354 if (raised) {
356 }
357
358 return state;
359}
360
363{
364 rb_execution_context_t *ec = trace_arg->ec;
365
366 if (UNLIKELY(trace_arg->event & RUBY_INTERNAL_EVENT_MASK)) {
368 /* skip hooks because this thread doing INTERNAL_EVENT */
369 }
370 else {
371 rb_trace_arg_t *prev_trace_arg = ec->trace_arg;
372
373 ec->trace_arg = trace_arg;
374 /* only global hooks */
375 exec_hooks_unprotected(ec, rb_vm_global_hooks(ec), trace_arg);
376 ec->trace_arg = prev_trace_arg;
377 }
378 }
379 else {
380 if (ec->trace_arg == NULL && /* check reentrant */
381 trace_arg->self != rb_mRubyVMFrozenCore /* skip special methods. TODO: remove it. */) {
382 const VALUE errinfo = ec->errinfo;
383 const VALUE old_recursive = ec->local_storage_recursive_hash;
384 int state = 0;
385
386 /* setup */
388 ec->errinfo = Qnil;
389 ec->trace_arg = trace_arg;
390
391 /* kick hooks */
392 if ((state = exec_hooks_protected(ec, hooks, trace_arg)) == TAG_NONE) {
393 ec->errinfo = errinfo;
394 }
395
396 /* cleanup */
397 ec->trace_arg = NULL;
399 ec->local_storage_recursive_hash = old_recursive;
400
401 if (state) {
402 if (pop_p) {
403 if (VM_FRAME_FINISHED_P(ec->cfp)) {
404 ec->tag = ec->tag->prev;
405 }
406 rb_vm_pop_frame(ec);
407 }
408 EC_JUMP_TAG(ec, state);
409 }
410 }
411 }
412}
413
414VALUE
416{
417 volatile int raised;
418 volatile VALUE result = Qnil;
419 rb_execution_context_t *const ec = GET_EC();
420 rb_vm_t *const vm = rb_ec_vm_ptr(ec);
421 enum ruby_tag_type state;
422 rb_trace_arg_t dummy_trace_arg;
423 dummy_trace_arg.event = 0;
424
425 if (!ec->trace_arg) {
426 ec->trace_arg = &dummy_trace_arg;
427 }
428
429 raised = rb_ec_reset_raised(ec);
430
431 EC_PUSH_TAG(ec);
432 if (LIKELY((state = EC_EXEC_TAG()) == TAG_NONE)) {
433 result = (*func)(arg);
434 }
435 else {
436 (void)*&vm; /* suppress "clobbered" warning */
437 }
438 EC_POP_TAG();
439
440 if (raised) {
442 }
443
444 if (ec->trace_arg == &dummy_trace_arg) {
445 ec->trace_arg = NULL;
446 }
447
448 if (state) {
449#if defined RUBY_USE_SETJMPEX && RUBY_USE_SETJMPEX
450 RB_GC_GUARD(result);
451#endif
452 EC_JUMP_TAG(ec, state);
453 }
454
455 return result;
456}
457
458static void call_trace_func(rb_event_flag_t, VALUE data, VALUE self, ID id, VALUE klass);
459
460/* (2-1) set_trace_func (old API) */
461
462/*
463 * call-seq:
464 * set_trace_func(proc) -> proc
465 * set_trace_func(nil) -> nil
466 *
467 * Establishes _proc_ as the handler for tracing, or disables
468 * tracing if the parameter is +nil+.
469 *
470 * *Note:* this method is obsolete, please use TracePoint instead.
471 *
472 * _proc_ takes up to six parameters:
473 *
474 * * an event name
475 * * a filename
476 * * a line number
477 * * an object id
478 * * a binding
479 * * the name of a class
480 *
481 * _proc_ is invoked whenever an event occurs.
482 *
483 * Events are:
484 *
485 * +c-call+:: call a C-language routine
486 * +c-return+:: return from a C-language routine
487 * +call+:: call a Ruby method
488 * +class+:: start a class or module definition
489 * +end+:: finish a class or module definition
490 * +line+:: execute code on a new line
491 * +raise+:: raise an exception
492 * +return+:: return from a Ruby method
493 *
494 * Tracing is disabled within the context of _proc_.
495 *
496 * class Test
497 * def test
498 * a = 1
499 * b = 2
500 * end
501 * end
502 *
503 * set_trace_func proc { |event, file, line, id, binding, classname|
504 * printf "%8s %s:%-2d %10s %8s\n", event, file, line, id, classname
505 * }
506 * t = Test.new
507 * t.test
508 *
509 * line prog.rb:11 false
510 * c-call prog.rb:11 new Class
511 * c-call prog.rb:11 initialize Object
512 * c-return prog.rb:11 initialize Object
513 * c-return prog.rb:11 new Class
514 * line prog.rb:12 false
515 * call prog.rb:2 test Test
516 * line prog.rb:3 test Test
517 * line prog.rb:4 test Test
518 * return prog.rb:4 test Test
519 */
520
521static VALUE
522set_trace_func(VALUE obj, VALUE trace)
523{
524 rb_remove_event_hook(call_trace_func);
525
526 if (NIL_P(trace)) {
527 return Qnil;
528 }
529
530 if (!rb_obj_is_proc(trace)) {
531 rb_raise(rb_eTypeError, "trace_func needs to be Proc");
532 }
533
534 rb_add_event_hook(call_trace_func, RUBY_EVENT_ALL, trace);
535 return trace;
536}
537
538static void
539thread_add_trace_func(rb_execution_context_t *ec, rb_thread_t *filter_th, VALUE trace)
540{
541 if (!rb_obj_is_proc(trace)) {
542 rb_raise(rb_eTypeError, "trace_func needs to be Proc");
543 }
544
545 rb_threadptr_add_event_hook(ec, filter_th, call_trace_func, RUBY_EVENT_ALL, trace, RUBY_EVENT_HOOK_FLAG_SAFE);
546}
547
548/*
549 * call-seq:
550 * thr.add_trace_func(proc) -> proc
551 *
552 * Adds _proc_ as a handler for tracing.
553 *
554 * See Thread#set_trace_func and Kernel#set_trace_func.
555 */
556
557static VALUE
558thread_add_trace_func_m(VALUE obj, VALUE trace)
559{
560 thread_add_trace_func(GET_EC(), rb_thread_ptr(obj), trace);
561 return trace;
562}
563
564/*
565 * call-seq:
566 * thr.set_trace_func(proc) -> proc
567 * thr.set_trace_func(nil) -> nil
568 *
569 * Establishes _proc_ on _thr_ as the handler for tracing, or
570 * disables tracing if the parameter is +nil+.
571 *
572 * See Kernel#set_trace_func.
573 */
574
575static VALUE
576thread_set_trace_func_m(VALUE target_thread, VALUE trace)
577{
579 rb_thread_t *target_th = rb_thread_ptr(target_thread);
580
581 rb_threadptr_remove_event_hook(ec, target_th, call_trace_func, Qundef);
582
583 if (NIL_P(trace)) {
584 return Qnil;
585 }
586 else {
587 thread_add_trace_func(ec, target_th, trace);
588 return trace;
589 }
590}
591
592static const char *
593get_event_name(rb_event_flag_t event)
594{
595 switch (event) {
596 case RUBY_EVENT_LINE: return "line";
597 case RUBY_EVENT_CLASS: return "class";
598 case RUBY_EVENT_END: return "end";
599 case RUBY_EVENT_CALL: return "call";
600 case RUBY_EVENT_RETURN: return "return";
601 case RUBY_EVENT_C_CALL: return "c-call";
602 case RUBY_EVENT_C_RETURN: return "c-return";
603 case RUBY_EVENT_RAISE: return "raise";
604 default:
605 return "unknown";
606 }
607}
608
609static ID
610get_event_id(rb_event_flag_t event)
611{
612 ID id;
613
614 switch (event) {
615#define C(name, NAME) case RUBY_EVENT_##NAME: CONST_ID(id, #name); return id;
616 C(line, LINE);
617 C(class, CLASS);
618 C(end, END);
619 C(call, CALL);
620 C(return, RETURN);
621 C(c_call, C_CALL);
622 C(c_return, C_RETURN);
623 C(raise, RAISE);
624 C(b_call, B_CALL);
625 C(b_return, B_RETURN);
626 C(thread_begin, THREAD_BEGIN);
627 C(thread_end, THREAD_END);
628 C(fiber_switch, FIBER_SWITCH);
629 C(script_compiled, SCRIPT_COMPILED);
630#undef C
631 default:
632 return 0;
633 }
634}
635
636static void
637get_path_and_lineno(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, rb_event_flag_t event, VALUE *pathp, int *linep)
638{
640
641 if (cfp) {
642 const rb_iseq_t *iseq = cfp->iseq;
643 *pathp = rb_iseq_path(iseq);
644
645 if (event & (RUBY_EVENT_CLASS |
649 }
650 else {
651 *linep = rb_vm_get_sourceline(cfp);
652 }
653 }
654 else {
655 *pathp = Qnil;
656 *linep = 0;
657 }
658}
659
660static void
661call_trace_func(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
662{
663 int line;
664 VALUE filename;
665 VALUE eventname = rb_str_new2(get_event_name(event));
666 VALUE argv[6];
667 const rb_execution_context_t *ec = GET_EC();
668
669 get_path_and_lineno(ec, ec->cfp, event, &filename, &line);
670
671 if (!klass) {
673 }
674
675 if (klass) {
676 if (RB_TYPE_P(klass, T_ICLASS)) {
677 klass = RBASIC(klass)->klass;
678 }
679 else if (FL_TEST(klass, FL_SINGLETON)) {
681 }
682 }
683
684 argv[0] = eventname;
685 argv[1] = filename;
686 argv[2] = INT2FIX(line);
687 argv[3] = id ? ID2SYM(id) : Qnil;
688 argv[4] = (self && (filename != Qnil)) ? rb_binding_new() : Qnil;
689 argv[5] = klass ? klass : Qnil;
690
692}
693
694/* (2-2) TracePoint API */
695
696static VALUE rb_cTracePoint;
697
698typedef struct rb_tp_struct {
700 int tracing; /* bool */
702 VALUE local_target_set; /* Hash: target ->
703 * Qtrue (if target is iseq) or
704 * Qfalse (if target is bmethod)
705 */
706 void (*func)(VALUE tpval, void *data);
707 void *data;
709 VALUE self;
711
712static void
713tp_mark(void *ptr)
714{
715 rb_tp_t *tp = ptr;
716 rb_gc_mark(tp->proc);
718 if (tp->target_th) rb_gc_mark(tp->target_th->self);
719}
720
721static size_t
722tp_memsize(const void *ptr)
723{
724 return sizeof(rb_tp_t);
725}
726
727static const rb_data_type_t tp_data_type = {
728 "tracepoint",
729 {tp_mark, RUBY_TYPED_DEFAULT_FREE, tp_memsize,},
731};
732
733static VALUE
734tp_alloc(VALUE klass)
735{
736 rb_tp_t *tp;
737 return TypedData_Make_Struct(klass, rb_tp_t, &tp_data_type, tp);
738}
739
740static rb_event_flag_t
741symbol2event_flag(VALUE v)
742{
743 ID id;
745 const rb_event_flag_t RUBY_EVENT_A_CALL =
747 const rb_event_flag_t RUBY_EVENT_A_RETURN =
749
750#define C(name, NAME) CONST_ID(id, #name); if (sym == ID2SYM(id)) return RUBY_EVENT_##NAME
751 C(line, LINE);
752 C(class, CLASS);
753 C(end, END);
754 C(call, CALL);
755 C(return, RETURN);
756 C(c_call, C_CALL);
757 C(c_return, C_RETURN);
758 C(raise, RAISE);
759 C(b_call, B_CALL);
760 C(b_return, B_RETURN);
761 C(thread_begin, THREAD_BEGIN);
762 C(thread_end, THREAD_END);
763 C(fiber_switch, FIBER_SWITCH);
764 C(script_compiled, SCRIPT_COMPILED);
765
766 /* joke */
767 C(a_call, A_CALL);
768 C(a_return, A_RETURN);
769#undef C
770 rb_raise(rb_eArgError, "unknown event: %"PRIsVALUE, rb_sym2str(sym));
771}
772
773static rb_tp_t *
774tpptr(VALUE tpval)
775{
776 rb_tp_t *tp;
777 TypedData_Get_Struct(tpval, rb_tp_t, &tp_data_type, tp);
778 return tp;
779}
780
781static rb_trace_arg_t *
782get_trace_arg(void)
783{
784 rb_trace_arg_t *trace_arg = GET_EC()->trace_arg;
785 if (trace_arg == 0) {
786 rb_raise(rb_eRuntimeError, "access from outside");
787 }
788 return trace_arg;
789}
790
791struct rb_trace_arg_struct *
793{
794 return get_trace_arg();
795}
796
799{
800 return trace_arg->event;
801}
802
803VALUE
805{
806 return ID2SYM(get_event_id(trace_arg->event));
807}
808
809static void
810fill_path_and_lineno(rb_trace_arg_t *trace_arg)
811{
812 if (trace_arg->path == Qundef) {
813 get_path_and_lineno(trace_arg->ec, trace_arg->cfp, trace_arg->event, &trace_arg->path, &trace_arg->lineno);
814 }
815}
816
817VALUE
819{
820 fill_path_and_lineno(trace_arg);
821 return INT2FIX(trace_arg->lineno);
822}
823VALUE
825{
826 fill_path_and_lineno(trace_arg);
827 return trace_arg->path;
828}
829
830static void
831fill_id_and_klass(rb_trace_arg_t *trace_arg)
832{
833 if (!trace_arg->klass_solved) {
834 if (!trace_arg->klass) {
835 rb_vm_control_frame_id_and_class(trace_arg->cfp, &trace_arg->id, &trace_arg->called_id, &trace_arg->klass);
836 }
837
838 if (trace_arg->klass) {
839 if (RB_TYPE_P(trace_arg->klass, T_ICLASS)) {
840 trace_arg->klass = RBASIC(trace_arg->klass)->klass;
841 }
842 }
843 else {
844 trace_arg->klass = Qnil;
845 }
846
847 trace_arg->klass_solved = 1;
848 }
849}
850
851VALUE
853{
854 switch(trace_arg->event) {
855 case RUBY_EVENT_CALL:
858 case RUBY_EVENT_B_RETURN: {
859 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(trace_arg->ec, trace_arg->cfp);
860 if (cfp) {
861 int is_proc = 0;
862 if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_BLOCK && !VM_FRAME_LAMBDA_P(cfp)) {
863 is_proc = 1;
864 }
865 return rb_iseq_parameters(cfp->iseq, is_proc);
866 }
867 break;
868 }
870 case RUBY_EVENT_C_RETURN: {
871 fill_id_and_klass(trace_arg);
872 if (trace_arg->klass && trace_arg->id) {
873 const rb_method_entry_t *me;
874 VALUE iclass = Qnil;
875 me = rb_method_entry_without_refinements(trace_arg->klass, trace_arg->id, &iclass);
877 }
878 break;
879 }
880 case RUBY_EVENT_RAISE:
881 case RUBY_EVENT_LINE:
882 case RUBY_EVENT_CLASS:
883 case RUBY_EVENT_END:
885 rb_raise(rb_eRuntimeError, "not supported by this event");
886 break;
887 }
888 return Qnil;
889}
890
891VALUE
893{
894 fill_id_and_klass(trace_arg);
895 return trace_arg->id ? ID2SYM(trace_arg->id) : Qnil;
896}
897
898VALUE
900{
901 fill_id_and_klass(trace_arg);
902 return trace_arg->called_id ? ID2SYM(trace_arg->called_id) : Qnil;
903}
904
905VALUE
907{
908 fill_id_and_klass(trace_arg);
909 return trace_arg->klass;
910}
911
912VALUE
914{
916 cfp = rb_vm_get_binding_creatable_next_cfp(trace_arg->ec, trace_arg->cfp);
917
918 if (cfp) {
919 return rb_vm_make_binding(trace_arg->ec, cfp);
920 }
921 else {
922 return Qnil;
923 }
924}
925
926VALUE
928{
929 return trace_arg->self;
930}
931
932VALUE
934{
936 /* ok */
937 }
938 else {
939 rb_raise(rb_eRuntimeError, "not supported by this event");
940 }
941 if (trace_arg->data == Qundef) {
942 rb_bug("rb_tracearg_return_value: unreachable");
943 }
944 return trace_arg->data;
945}
946
947VALUE
949{
950 if (trace_arg->event & (RUBY_EVENT_RAISE)) {
951 /* ok */
952 }
953 else {
954 rb_raise(rb_eRuntimeError, "not supported by this event");
955 }
956 if (trace_arg->data == Qundef) {
957 rb_bug("rb_tracearg_raised_exception: unreachable");
958 }
959 return trace_arg->data;
960}
961
962VALUE
964{
965 VALUE data = trace_arg->data;
966
967 if (trace_arg->event & (RUBY_EVENT_SCRIPT_COMPILED)) {
968 /* ok */
969 }
970 else {
971 rb_raise(rb_eRuntimeError, "not supported by this event");
972 }
973 if (data == Qundef) {
974 rb_bug("rb_tracearg_raised_exception: unreachable");
975 }
976 if (rb_obj_is_iseq(data)) {
977 return Qnil;
978 }
979 else {
981 /* [src, iseq] */
982 return RARRAY_AREF(data, 0);
983 }
984}
985
986VALUE
988{
989 VALUE data = trace_arg->data;
990
991 if (trace_arg->event & (RUBY_EVENT_SCRIPT_COMPILED)) {
992 /* ok */
993 }
994 else {
995 rb_raise(rb_eRuntimeError, "not supported by this event");
996 }
997 if (data == Qundef) {
998 rb_bug("rb_tracearg_raised_exception: unreachable");
999 }
1000
1001 if (rb_obj_is_iseq(data)) {
1002 return rb_iseqw_new((const rb_iseq_t *)data);
1003 }
1004 else {
1006 VM_ASSERT(rb_obj_is_iseq(RARRAY_AREF(data, 1)));
1007
1008 /* [src, iseq] */
1009 return rb_iseqw_new((const rb_iseq_t *)RARRAY_AREF(data, 1));
1010 }
1011}
1012
1013VALUE
1015{
1017 /* ok */
1018 }
1019 else {
1020 rb_raise(rb_eRuntimeError, "not supported by this event");
1021 }
1022 if (trace_arg->data == Qundef) {
1023 rb_bug("rb_tracearg_object: unreachable");
1024 }
1025 return trace_arg->data;
1026}
1027
1028static VALUE
1029tracepoint_attr_event(rb_execution_context_t *ec, VALUE tpval)
1030{
1031 return rb_tracearg_event(get_trace_arg());
1032}
1033
1034static VALUE
1035tracepoint_attr_lineno(rb_execution_context_t *ec, VALUE tpval)
1036{
1037 return rb_tracearg_lineno(get_trace_arg());
1038}
1039static VALUE
1040tracepoint_attr_path(rb_execution_context_t *ec, VALUE tpval)
1041{
1042 return rb_tracearg_path(get_trace_arg());
1043}
1044
1045static VALUE
1046tracepoint_attr_parameters(rb_execution_context_t *ec, VALUE tpval)
1047{
1048 return rb_tracearg_parameters(get_trace_arg());
1049}
1050
1051static VALUE
1052tracepoint_attr_method_id(rb_execution_context_t *ec, VALUE tpval)
1053{
1054 return rb_tracearg_method_id(get_trace_arg());
1055}
1056
1057static VALUE
1058tracepoint_attr_callee_id(rb_execution_context_t *ec, VALUE tpval)
1059{
1060 return rb_tracearg_callee_id(get_trace_arg());
1061}
1062
1063static VALUE
1064tracepoint_attr_defined_class(rb_execution_context_t *ec, VALUE tpval)
1065{
1066 return rb_tracearg_defined_class(get_trace_arg());
1067}
1068
1069static VALUE
1070tracepoint_attr_binding(rb_execution_context_t *ec, VALUE tpval)
1071{
1072 return rb_tracearg_binding(get_trace_arg());
1073}
1074
1075static VALUE
1076tracepoint_attr_self(rb_execution_context_t *ec, VALUE tpval)
1077{
1078 return rb_tracearg_self(get_trace_arg());
1079}
1080
1081static VALUE
1082tracepoint_attr_return_value(rb_execution_context_t *ec, VALUE tpval)
1083{
1084 return rb_tracearg_return_value(get_trace_arg());
1085}
1086
1087static VALUE
1088tracepoint_attr_raised_exception(rb_execution_context_t *ec, VALUE tpval)
1089{
1090 return rb_tracearg_raised_exception(get_trace_arg());
1091}
1092
1093static VALUE
1094tracepoint_attr_eval_script(rb_execution_context_t *ec, VALUE tpval)
1095{
1096 return rb_tracearg_eval_script(get_trace_arg());
1097}
1098
1099static VALUE
1100tracepoint_attr_instruction_sequence(rb_execution_context_t *ec, VALUE tpval)
1101{
1102 return rb_tracearg_instruction_sequence(get_trace_arg());
1103}
1104
1105static void
1106tp_call_trace(VALUE tpval, rb_trace_arg_t *trace_arg)
1107{
1108 rb_tp_t *tp = tpptr(tpval);
1109
1110 if (tp->func) {
1111 (*tp->func)(tpval, tp->data);
1112 }
1113 else {
1114 rb_proc_call_with_block((VALUE)tp->proc, 1, &tpval, Qnil);
1115 }
1116}
1117
1118VALUE
1120{
1121 rb_tp_t *tp;
1122 tp = tpptr(tpval);
1123
1124 if (tp->local_target_set != Qfalse) {
1125 rb_raise(rb_eArgError, "can't nest-enable a targeting TracePoint");
1126 }
1127
1128 if (tp->target_th) {
1129 rb_thread_add_event_hook2(tp->target_th->self, (rb_event_hook_func_t)tp_call_trace, tp->events, tpval,
1131 }
1132 else {
1133 rb_add_event_hook2((rb_event_hook_func_t)tp_call_trace, tp->events, tpval,
1135 }
1136 tp->tracing = 1;
1137 return Qundef;
1138}
1139
1140static const rb_iseq_t *
1141iseq_of(VALUE target)
1142{
1143 VALUE iseqv = rb_funcall(rb_cISeq, rb_intern("of"), 1, target);
1144 if (NIL_P(iseqv)) {
1145 rb_raise(rb_eArgError, "specified target is not supported");
1146 }
1147 else {
1148 return rb_iseqw_to_iseq(iseqv);
1149 }
1150}
1151
1152const rb_method_definition_t *rb_method_def(VALUE method); /* proc.c */
1153
1154static VALUE
1155rb_tracepoint_enable_for_target(VALUE tpval, VALUE target, VALUE target_line)
1156{
1157 rb_tp_t *tp = tpptr(tpval);
1158 const rb_iseq_t *iseq = iseq_of(target);
1159 int n;
1160 unsigned int line = 0;
1161
1162 if (tp->tracing > 0) {
1163 rb_raise(rb_eArgError, "can't nest-enable a targeting TracePoint");
1164 }
1165
1166 if (!NIL_P(target_line)) {
1167 if ((tp->events & RUBY_EVENT_LINE) == 0) {
1168 rb_raise(rb_eArgError, "target_line is specified, but line event is not specified");
1169 }
1170 else {
1171 line = NUM2UINT(target_line);
1172 }
1173 }
1174
1177
1178 /* iseq */
1181
1182 /* bmethod */
1183 if (rb_obj_is_method(target)) {
1185 if (def->type == VM_METHOD_TYPE_BMETHOD &&
1188 rb_hook_list_connect_tracepoint(target, def->body.bmethod.hooks, tpval, 0);
1189 rb_hash_aset(tp->local_target_set, target, Qfalse);
1190
1191 n++;
1192 }
1193 }
1194
1195 if (n == 0) {
1196 rb_raise(rb_eArgError, "can not enable any hooks");
1197 }
1198
1200
1201 tp->tracing = 1;
1202
1203 return Qnil;
1204}
1205
1206static int
1207disable_local_event_iseq_i(VALUE target, VALUE iseq_p, VALUE tpval)
1208{
1209 if (iseq_p) {
1211 }
1212 else {
1213 /* bmethod */
1215 rb_hook_list_t *hooks = def->body.bmethod.hooks;
1216 VM_ASSERT(hooks != NULL);
1217 rb_hook_list_remove_tracepoint(hooks, tpval);
1218 if (hooks->running == 0) {
1220 }
1221 def->body.bmethod.hooks = NULL;
1222 }
1223 return ST_CONTINUE;
1224}
1225
1226VALUE
1228{
1229 rb_tp_t *tp;
1230
1231 tp = tpptr(tpval);
1232
1233 if (tp->local_target_set) {
1234 rb_hash_foreach(tp->local_target_set, disable_local_event_iseq_i, tpval);
1237 }
1238 else {
1239 if (tp->target_th) {
1241 }
1242 else {
1244 }
1245 }
1246 tp->tracing = 0;
1247 tp->target_th = NULL;
1248 return Qundef;
1249}
1250
1251void
1252rb_hook_list_connect_tracepoint(VALUE target, rb_hook_list_t *list, VALUE tpval, unsigned int target_line)
1253{
1254 rb_tp_t *tp = tpptr(tpval);
1255 rb_event_hook_t *hook = alloc_event_hook((rb_event_hook_func_t)tp_call_trace, tp->events, tpval,
1257 hook->filter.target_line = target_line;
1258 hook_list_connect(target, list, hook, FALSE);
1259}
1260
1261void
1263{
1264 rb_event_hook_t *hook = list->hooks;
1265 rb_event_flag_t events = 0;
1266
1267 while (hook) {
1268 if (hook->data == tpval) {
1270 list->need_clean = TRUE;
1271 }
1272 else {
1273 events |= hook->events;
1274 }
1275 hook = hook->next;
1276 }
1277
1278 list->events = events;
1279}
1280
1281static VALUE
1282tracepoint_enable_m(rb_execution_context_t *ec, VALUE tpval, VALUE target, VALUE target_line, VALUE target_thread)
1283{
1284 rb_tp_t *tp = tpptr(tpval);
1285 int previous_tracing = tp->tracing;
1286
1287 /* check target_thread */
1288 if (RTEST(target_thread)) {
1289 if (tp->target_th) {
1290 rb_raise(rb_eArgError, "can not override target_thread filter");
1291 }
1292 tp->target_th = rb_thread_ptr(target_thread);
1293 }
1294 else {
1295 tp->target_th = NULL;
1296 }
1297
1298 if (NIL_P(target)) {
1299 if (!NIL_P(target_line)) {
1300 rb_raise(rb_eArgError, "only target_line is specified");
1301 }
1302 rb_tracepoint_enable(tpval);
1303 }
1304 else {
1305 rb_tracepoint_enable_for_target(tpval, target, target_line);
1306 }
1307
1308 if (rb_block_given_p()) {
1309 return rb_ensure(rb_yield, Qundef,
1310 previous_tracing ? rb_tracepoint_enable : rb_tracepoint_disable,
1311 tpval);
1312 }
1313 else {
1314 return previous_tracing ? Qtrue : Qfalse;
1315 }
1316}
1317
1318static VALUE
1319tracepoint_disable_m(rb_execution_context_t *ec, VALUE tpval)
1320{
1321 rb_tp_t *tp = tpptr(tpval);
1322 int previous_tracing = tp->tracing;
1323
1324 if (rb_block_given_p()) {
1325 if (tp->local_target_set != Qfalse) {
1326 rb_raise(rb_eArgError, "can't disable a targeting TracePoint in a block");
1327 }
1328
1329 rb_tracepoint_disable(tpval);
1330 return rb_ensure(rb_yield, Qundef,
1331 previous_tracing ? rb_tracepoint_enable : rb_tracepoint_disable,
1332 tpval);
1333 }
1334 else {
1335 rb_tracepoint_disable(tpval);
1336 return previous_tracing ? Qtrue : Qfalse;
1337 }
1338}
1339
1340VALUE
1342{
1343 rb_tp_t *tp = tpptr(tpval);
1344 return tp->tracing ? Qtrue : Qfalse;
1345}
1346
1347static VALUE
1348tracepoint_enabled_p(rb_execution_context_t *ec, VALUE tpval)
1349{
1350 return rb_tracepoint_enabled_p(tpval);
1351}
1352
1353static VALUE
1354tracepoint_new(VALUE klass, rb_thread_t *target_th, rb_event_flag_t events, void (func)(VALUE, void*), void *data, VALUE proc)
1355{
1356 VALUE tpval = tp_alloc(klass);
1357 rb_tp_t *tp;
1358 TypedData_Get_Struct(tpval, rb_tp_t, &tp_data_type, tp);
1359
1360 tp->proc = proc;
1361 tp->func = func;
1362 tp->data = data;
1363 tp->events = events;
1364 tp->self = tpval;
1365
1366 return tpval;
1367}
1368
1369/*
1370 * Creates a tracepoint by registering a callback function for one or more
1371 * tracepoint events. Once the tracepoint is created, you can use
1372 * rb_tracepoint_enable to enable the tracepoint.
1373 *
1374 * Parameters:
1375 * 1. VALUE target_thval - Meant for picking the thread in which the tracepoint
1376 * is to be created. However, current implementation ignore this parameter,
1377 * tracepoint is created for all threads. Simply specify Qnil.
1378 * 2. rb_event_flag_t events - Event(s) to listen to.
1379 * 3. void (*func)(VALUE, void *) - A callback function.
1380 * 4. void *data - Void pointer that will be passed to the callback function.
1381 *
1382 * When the callback function is called, it will be passed 2 parameters:
1383 * 1)VALUE tpval - the TracePoint object from which trace args can be extracted.
1384 * 2)void *data - A void pointer which helps to share scope with the callback function.
1385 *
1386 * It is important to note that you cannot register callbacks for normal events and internal events
1387 * simultaneously because they are different purpose.
1388 * You can use any Ruby APIs (calling methods and so on) on normal event hooks.
1389 * However, in internal events, you can not use any Ruby APIs (even object creations).
1390 * This is why we can't specify internal events by TracePoint directly.
1391 * Limitations are MRI version specific.
1392 *
1393 * Example:
1394 * rb_tracepoint_new(Qnil, RUBY_INTERNAL_EVENT_NEWOBJ | RUBY_INTERNAL_EVENT_FREEOBJ, obj_event_i, data);
1395 *
1396 * In this example, a callback function obj_event_i will be registered for
1397 * internal events RUBY_INTERNAL_EVENT_NEWOBJ and RUBY_INTERNAL_EVENT_FREEOBJ.
1398 */
1399VALUE
1400rb_tracepoint_new(VALUE target_thval, rb_event_flag_t events, void (*func)(VALUE, void *), void *data)
1401{
1402 rb_thread_t *target_th = NULL;
1403
1404 if (RTEST(target_thval)) {
1405 target_th = rb_thread_ptr(target_thval);
1406 /* TODO: Test it!
1407 * Warning: This function is not tested.
1408 */
1409 }
1410 return tracepoint_new(rb_cTracePoint, target_th, events, func, data, Qundef);
1411}
1412
1413static VALUE
1414tracepoint_new_s(rb_execution_context_t *ec, VALUE self, VALUE args)
1415{
1416 rb_event_flag_t events = 0;
1417 long i;
1418 long argc = RARRAY_LEN(args);
1419
1420 if (argc > 0) {
1421 for (i=0; i<argc; i++) {
1422 events |= symbol2event_flag(RARRAY_AREF(args, i));
1423 }
1424 }
1425 else {
1427 }
1428
1429 if (!rb_block_given_p()) {
1430 rb_raise(rb_eArgError, "must be called with a block");
1431 }
1432
1433 return tracepoint_new(self, 0, events, 0, 0, rb_block_proc());
1434}
1435
1436static VALUE
1437tracepoint_trace_s(rb_execution_context_t *ec, VALUE self, VALUE args)
1438{
1439 VALUE trace = tracepoint_new_s(ec, self, args);
1440 rb_tracepoint_enable(trace);
1441 return trace;
1442}
1443
1444static VALUE
1445tracepoint_inspect(rb_execution_context_t *ec, VALUE self)
1446{
1447 rb_tp_t *tp = tpptr(self);
1448 rb_trace_arg_t *trace_arg = GET_EC()->trace_arg;
1449
1450 if (trace_arg) {
1451 switch (trace_arg->event) {
1452 case RUBY_EVENT_LINE:
1453 {
1454 VALUE sym = rb_tracearg_method_id(trace_arg);
1455 if (NIL_P(sym))
1456 goto default_inspect;
1457 return rb_sprintf("#<TracePoint:%"PRIsVALUE"@%"PRIsVALUE":%d in `%"PRIsVALUE"'>",
1458 rb_tracearg_event(trace_arg),
1459 rb_tracearg_path(trace_arg),
1460 FIX2INT(rb_tracearg_lineno(trace_arg)),
1461 sym);
1462 }
1463 case RUBY_EVENT_CALL:
1464 case RUBY_EVENT_C_CALL:
1465 case RUBY_EVENT_RETURN:
1467 return rb_sprintf("#<TracePoint:%"PRIsVALUE" `%"PRIsVALUE"'@%"PRIsVALUE":%d>",
1468 rb_tracearg_event(trace_arg),
1469 rb_tracearg_method_id(trace_arg),
1470 rb_tracearg_path(trace_arg),
1471 FIX2INT(rb_tracearg_lineno(trace_arg)));
1474 return rb_sprintf("#<TracePoint:%"PRIsVALUE" %"PRIsVALUE">",
1475 rb_tracearg_event(trace_arg),
1476 rb_tracearg_self(trace_arg));
1477 default:
1479 return rb_sprintf("#<TracePoint:%"PRIsVALUE"@%"PRIsVALUE":%d>",
1480 rb_tracearg_event(trace_arg),
1481 rb_tracearg_path(trace_arg),
1482 FIX2INT(rb_tracearg_lineno(trace_arg)));
1483 }
1484 }
1485 else {
1486 return rb_sprintf("#<TracePoint:%s>", tp->tracing ? "enabled" : "disabled");
1487 }
1488}
1489
1490static void
1491tracepoint_stat_event_hooks(VALUE hash, VALUE key, rb_event_hook_t *hook)
1492{
1493 int active = 0, deleted = 0;
1494
1495 while (hook) {
1497 deleted++;
1498 }
1499 else {
1500 active++;
1501 }
1502 hook = hook->next;
1503 }
1504
1505 rb_hash_aset(hash, key, rb_ary_new3(2, INT2FIX(active), INT2FIX(deleted)));
1506}
1507
1508static VALUE
1509tracepoint_stat_s(rb_execution_context_t *ec, VALUE self)
1510{
1511 rb_vm_t *vm = GET_VM();
1513
1514 tracepoint_stat_event_hooks(stat, vm->self, vm->global_hooks.hooks);
1515 /* TODO: thread local hooks */
1516
1517 return stat;
1518}
1519
1520#include "trace_point.rbinc"
1521
1522/* This function is called from inits.c */
1523void
1525{
1526 /* trace_func */
1527 rb_define_global_function("set_trace_func", set_trace_func, 1);
1528 rb_define_method(rb_cThread, "set_trace_func", thread_set_trace_func_m, 1);
1529 rb_define_method(rb_cThread, "add_trace_func", thread_add_trace_func_m, 1);
1530
1531 rb_cTracePoint = rb_define_class("TracePoint", rb_cObject);
1532 rb_undef_alloc_func(rb_cTracePoint);
1533
1534 load_trace_point();
1535}
1536
1539 void *data;
1541
1542#define MAX_POSTPONED_JOB 1000
1543#define MAX_POSTPONED_JOB_SPECIAL_ADDITION 24
1544
1546 struct list_node jnode; /* <=> vm->workqueue */
1548};
1549
1550void
1552{
1553 rb_vm_t *vm = GET_VM();
1555 vm->postponed_job_index = 0;
1556 /* workqueue is initialized when VM locks are initialized */
1557}
1558
1564
1565/* Async-signal-safe */
1567postponed_job_register(rb_execution_context_t *ec, rb_vm_t *vm,
1568 unsigned int flags, rb_postponed_job_func_t func, void *data, int max, int expected_index)
1569{
1570 rb_postponed_job_t *pjob;
1571
1572 if (expected_index >= max) return PJRR_FULL; /* failed */
1573
1574 if (ATOMIC_CAS(vm->postponed_job_index, expected_index, expected_index+1) == expected_index) {
1575 pjob = &vm->postponed_job_buffer[expected_index];
1576 }
1577 else {
1578 return PJRR_INTERRUPTED;
1579 }
1580
1581 /* unused: pjob->flags = flags; */
1582 pjob->func = func;
1583 pjob->data = data;
1584
1586
1587 return PJRR_SUCCESS;
1588}
1589
1590/*
1591 * return 0 if job buffer is full
1592 * Async-signal-safe
1593 */
1594int
1595rb_postponed_job_register(unsigned int flags, rb_postponed_job_func_t func, void *data)
1596{
1598 rb_vm_t *vm = rb_ec_vm_ptr(ec);
1599
1600 begin:
1601 switch (postponed_job_register(ec, vm, flags, func, data, MAX_POSTPONED_JOB, vm->postponed_job_index)) {
1602 case PJRR_SUCCESS : return 1;
1603 case PJRR_FULL : return 0;
1604 case PJRR_INTERRUPTED: goto begin;
1605 default: rb_bug("unreachable\n");
1606 }
1607}
1608
1609/*
1610 * return 0 if job buffer is full
1611 * Async-signal-safe
1612 */
1613int
1614rb_postponed_job_register_one(unsigned int flags, rb_postponed_job_func_t func, void *data)
1615{
1617 rb_vm_t *vm = rb_ec_vm_ptr(ec);
1618 rb_postponed_job_t *pjob;
1619 int i, index;
1620
1621 begin:
1623 for (i=0; i<index; i++) {
1624 pjob = &vm->postponed_job_buffer[i];
1625 if (pjob->func == func) {
1627 return 2;
1628 }
1629 }
1630 switch (postponed_job_register(ec, vm, flags, func, data, MAX_POSTPONED_JOB + MAX_POSTPONED_JOB_SPECIAL_ADDITION, index)) {
1631 case PJRR_SUCCESS : return 1;
1632 case PJRR_FULL : return 0;
1633 case PJRR_INTERRUPTED: goto begin;
1634 default: rb_bug("unreachable\n");
1635 }
1636}
1637
1638/*
1639 * thread-safe and called from non-Ruby thread
1640 * returns FALSE on failure (ENOMEM), TRUE otherwise
1641 */
1642int
1643rb_workqueue_register(unsigned flags, rb_postponed_job_func_t func, void *data)
1644{
1645 struct rb_workqueue_job *wq_job = malloc(sizeof(*wq_job));
1646 rb_vm_t *vm = GET_VM();
1647
1648 if (!wq_job) return FALSE;
1649 wq_job->job.func = func;
1650 wq_job->job.data = data;
1651
1653 list_add_tail(&vm->workqueue, &wq_job->jnode);
1655
1657
1658 return TRUE;
1659}
1660
1661void
1663{
1666 volatile rb_atomic_t saved_mask = ec->interrupt_mask & block_mask;
1667 VALUE volatile saved_errno = ec->errinfo;
1668 struct list_head tmp;
1669
1670 list_head_init(&tmp);
1671
1673 list_append_list(&tmp, &vm->workqueue);
1675
1676 ec->errinfo = Qnil;
1677 /* mask POSTPONED_JOB dispatch */
1678 ec->interrupt_mask |= block_mask;
1679 {
1680 EC_PUSH_TAG(ec);
1681 if (EC_EXEC_TAG() == TAG_NONE) {
1682 int index;
1683 struct rb_workqueue_job *wq_job;
1684
1685 while ((index = vm->postponed_job_index) > 0) {
1688 (*pjob->func)(pjob->data);
1689 }
1690 }
1691 while ((wq_job = list_pop(&tmp, struct rb_workqueue_job, jnode))) {
1692 rb_postponed_job_t pjob = wq_job->job;
1693
1694 free(wq_job);
1695 (pjob.func)(pjob.data);
1696 }
1697 }
1698 EC_POP_TAG();
1699 }
1700 /* restore POSTPONED_JOB mask */
1701 ec->interrupt_mask &= ~(saved_mask ^ block_mask);
1702 ec->errinfo = saved_errno;
1703
1704 /* don't leak memory if a job threw an exception */
1705 if (!list_empty(&tmp)) {
1707 list_prepend_list(&vm->workqueue, &tmp);
1709
1711 }
1712}
#define END(name)
Definition: asm.h:115
#define sym(x)
Definition: date_core.c:3717
struct RIMemo * ptr
Definition: debug.c:65
rb_event_hook_flag_t
Definition: debug.h:97
@ RUBY_EVENT_HOOK_FLAG_DELETED
Definition: debug.h:99
@ RUBY_EVENT_HOOK_FLAG_SAFE
Definition: debug.h:98
@ RUBY_EVENT_HOOK_FLAG_RAW_ARG
Definition: debug.h:100
void(* rb_postponed_job_func_t)(void *arg)
Definition: debug.h:91
#define RETURN(val)
Definition: dir.c:295
#define free(x)
Definition: dln.c:52
struct rb_encoding_entry * list
Definition: encoding.c:56
VALUE rb_define_class(const char *, VALUE)
Defines a top-level class.
Definition: class.c:662
int rb_block_given_p(void)
Determines if the current method is given a block.
Definition: eval.c:898
VALUE rb_cThread
Definition: ruby.h:2049
void rb_add_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
Definition: vm_trace.c:157
VALUE rb_cObject
Object class.
Definition: ruby.h:2012
int rb_remove_event_hook(rb_event_hook_func_t func)
Definition: vm_trace.c:262
void rb_raise(VALUE exc, const char *fmt,...)
Definition: error.c:2671
void rb_bug(const char *fmt,...)
Definition: error.c:636
VALUE rb_eTypeError
Definition: error.c:924
VALUE rb_eRuntimeError
Definition: error.c:922
VALUE rb_eArgError
Definition: error.c:925
VALUE rb_ensure(VALUE(*)(VALUE), VALUE, VALUE(*)(VALUE), VALUE)
An equivalent to ensure clause.
Definition: eval.c:1115
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition: object.c:78
#define CALL(n)
Definition: inits.c:16
#define RARRAY_LEN(a)
#define RUBY_EVENT_END
int rb_ec_set_raised(rb_execution_context_t *ec)
Definition: thread.c:2343
#define rb_str_new2
void rb_hash_foreach(VALUE, int(*)(VALUE, VALUE, VALUE), VALUE)
#define NULL
VALUE rb_iseq_path(const rb_iseq_t *iseq)
Definition: iseq.c:1027
#define FL_SINGLETON
#define RUBY_EVENT_C_CALL
#define RUBY_EVENT_TRACEPOINT_ALL
#define RTEST(v)
#define RUBY_EVENT_RAISE
VALUE rb_mRubyVMFrozenCore
Definition: vm.c:367
#define TAG_NONE
#define FL_TEST(x, f)
int rb_method_entry_arity(const rb_method_entry_t *me)
Definition: proc.c:2555
#define RBASIC(obj)
#define RUBY_EVENT_B_RETURN
void * malloc(size_t) __attribute__((__malloc__)) __attribute__((__warn_unused_result__)) __attribute__((__alloc_size__(1)))
_Bool mjit_call_p
Definition: mjit_worker.c:180
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
Definition: thread.c:440
rb_control_frame_t * cfp
#define xfree
void rb_define_global_function(const char *, VALUE(*)(), int)
#define Qundef
VALUE rb_cISeq
Definition: iseq.c:32
#define RUBY_EVENT_SCRIPT_COMPILED
VALUE rb_ivar_get(VALUE, ID)
Definition: variable.c:1070
const VALUE VALUE obj
#define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec)
VALUE rb_obj_is_method(VALUE)
Definition: proc.c:1459
#define list_prepend_list(t, f)
#define GET_EC()
VALUE rb_ident_hash_new(void)
Definition: hash.c:4278
VALUE rb_obj_is_proc(VALUE)
Definition: proc.c:152
VALUE rb_iseq_parameters(const rb_iseq_t *iseq, int is_proc)
Definition: iseq.c:2939
#define NIL_P(v)
const rb_callable_method_entry_t * me
#define VM_ASSERT(expr)
#define ID2SYM(x)
#define RUBY_INTERNAL_EVENT_MASK
#define EC_EXEC_TAG()
#define list_add_tail(h, n)
#define RUBY_TYPED_DEFAULT_FREE
const char size_t n
VALUE rb_proc_call_with_block(VALUE, int argc, const VALUE *argv, VALUE)
Definition: proc.c:1000
VALUE rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp)
Definition: vm.c:953
unsigned long VALUE
#define EC_PUSH_TAG(ec)
@ POSTPONED_JOB_INTERRUPT_MASK
VALUE rb_sym2str(VALUE)
Definition: symbol.c:784
#define EC_JUMP_TAG(ec, st)
int rb_vm_get_sourceline(const rb_control_frame_t *)
Definition: vm_backtrace.c:68
#define GET_VM()
uint32_t i
#define list_pop(h, type, member)
#define NUM2UINT(x)
#define RUBY_EVENT_ALL
#define ALLOC_N(type, n)
VALUE rb_iseqw_new(const rb_iseq_t *iseq)
Definition: iseq.c:1157
VALUE rb_block_proc(void)
Definition: proc.c:837
#define RUBY_EVENT_THREAD_BEGIN
#define ZALLOC(type)
VALUE rb_to_symbol_type(VALUE obj)
Definition: symbol.c:1044
unsigned int rb_atomic_t
#define T_ICLASS
#define RUBY_EVENT_CLASS
@ VM_METHOD_TYPE_BMETHOD
return cc call
void rb_objspace_set_event_hook(const rb_event_flag_t event)
Definition: gc.c:2095
#define RB_GC_GUARD(v)
void(* rb_event_hook_func_t)(rb_event_flag_t evflag, VALUE data, VALUE self, ID mid, VALUE klass)
#define RUBY_TYPED_FREE_IMMEDIATELY
int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp)
Definition: vm.c:2184
#define TypedData_Get_Struct(obj, type, data_type, sval)
#define PRIsVALUE
#define rb_ary_new3
const rb_iseq_t * rb_iseqw_to_iseq(VALUE iseqw)
Definition: iseq.c:1350
#define rb_funcall(recv, mid, argc,...)
#define FIX2INT(x)
int VALUE v
#define list_empty(h)
VALUE rb_unnamed_parameters(int arity)
Definition: proc.c:1262
rb_control_frame_t * rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
Definition: vm.c:553
#define EC_POP_TAG()
void rb_gc_mark(VALUE)
Definition: gc.c:5228
#define rb_intern(str)
#define RUBY_EVENT_LINE
void rb_iseq_trace_set_all(rb_event_flag_t turnon_events)
Definition: iseq.c:3319
#define ISEQ_TRACE_EVENTS
const rb_iseq_t * iseq
rb_control_frame_t * rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
Definition: vm.c:541
#define RUBY_EVENT_RETURN
#define TRUE
#define RUBY_EVENT_C_RETURN
#define FALSE
#define Qtrue
#define RUBY_EVENT_B_CALL
#define RUBY_INTERNAL_EVENT_FREEOBJ
#define UNLIKELY(x)
VALUE rb_iseq_first_lineno(const rb_iseq_t *iseq)
Definition: iseq.c:1057
rb_event_flag_t ruby_vm_event_flags
Definition: vm.c:375
#define Qnil
#define Qfalse
int stat(const char *__restrict__ __path, struct stat *__restrict__ __sbuf)
const rb_method_entry_t * rb_method_entry_without_refinements(VALUE klass, ID id, VALUE *defined_class)
Definition: vm_method.c:942
#define T_ARRAY
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
Definition: thread.c:446
unsigned int ruby_vm_event_local_num
Definition: vm.c:377
#define RB_TYPE_P(obj, type)
#define INT2FIX(i)
#define ALLOC(type)
#define TypedData_Make_Struct(klass, type, data_type, sval)
#define MJIT_FUNC_EXPORTED
const VALUE * argv
int rb_ec_reset_raised(rb_execution_context_t *ec)
Definition: thread.c:2353
uint32_t rb_event_flag_t
void rb_undef_alloc_func(VALUE)
Definition: vm_method.c:729
VALUE rb_hash_aset(VALUE, VALUE, VALUE)
Definition: hash.c:2852
int rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp)
Definition: vm.c:2200
#define list_append_list(t, f)
#define RUBY_EVENT_CALL
#define RUBY_INTERNAL_EVENT_NEWOBJ
VALUE rb_sprintf(const char *,...) __attribute__((format(printf
VALUE rb_binding_new(void)
Definition: proc.c:364
unsigned long ID
VALUE rb_yield(VALUE)
Definition: vm_eval.c:1237
rb_event_flag_t ruby_vm_event_enabled_global_flags
Definition: vm.c:376
VALUE ID id
#define ATOMIC_CAS(var, oldval, newval)
#define RUBY_EVENT_THREAD_END
void rb_define_method(VALUE, const char *, VALUE(*)(), int)
#define RARRAY_AREF(a, i)
int rb_iseq_remove_local_tracepoint_recursively(const rb_iseq_t *iseq, VALUE tpval)
Definition: iseq.c:3262
VALUE rb_hash_new(void)
Definition: hash.c:1523
#define RB_OBJ_WRITTEN(a, oldv, b)
int rb_iseq_add_local_tracepoint_recursively(const rb_iseq_t *iseq, rb_event_flag_t turnon_events, VALUE tpval, unsigned int target_line)
Definition: iseq.c:3206
#define LIKELY(x)
rb_event_flag_t events
Definition: vm_trace.c:37
rb_thread_t * th
Definition: vm_trace.c:43
unsigned int target_line
Definition: vm_trace.c:44
struct rb_event_hook_struct::@255 filter
rb_event_hook_func_t func
Definition: vm_trace.c:38
rb_event_hook_flag_t hook_flags
Definition: vm_trace.c:36
struct rb_event_hook_struct * next
Definition: vm_trace.c:40
struct rb_trace_arg_struct * trace_arg
struct rb_event_hook_struct * hooks
struct rb_hook_list_struct * hooks
union rb_method_definition_struct::@41 body
rb_postponed_job_func_t func
Definition: vm_trace.c:1538
rb_thread_t * target_th
Definition: vm_trace.c:701
VALUE proc
Definition: vm_trace.c:708
rb_event_flag_t events
Definition: vm_trace.c:699
VALUE self
Definition: vm_trace.c:709
void * data
Definition: vm_trace.c:707
void(* func)(VALUE tpval, void *data)
Definition: vm_trace.c:706
VALUE local_target_set
Definition: vm_trace.c:702
const rb_control_frame_t * cfp
rb_execution_context_t * ec
rb_hook_list_t global_hooks
struct list_head workqueue
struct rb_postponed_job_struct * postponed_job_buffer
rb_nativethread_lock_t workqueue_lock
struct rb_vm_tag * prev
struct list_node jnode
Definition: vm_trace.c:1546
rb_postponed_job_t job
Definition: vm_trace.c:1547
MJIT_STATIC void rb_vm_pop_frame(rb_execution_context_t *ec)
VALUE rb_tracearg_binding(rb_trace_arg_t *trace_arg)
Definition: vm_trace.c:913
VALUE rb_tracearg_parameters(rb_trace_arg_t *trace_arg)
Definition: vm_trace.c:852
struct rb_postponed_job_struct rb_postponed_job_t
void Init_vm_trace(void)
Definition: vm_trace.c:1524
VALUE rb_tracearg_instruction_sequence(rb_trace_arg_t *trace_arg)
Definition: vm_trace.c:987
void(* rb_event_hook_raw_arg_func_t)(VALUE data, const rb_trace_arg_t *arg)
Definition: vm_trace.c:48
VALUE rb_tracepoint_enabled_p(VALUE tpval)
Definition: vm_trace.c:1341
const rb_method_definition_t * rb_method_def(VALUE method)
Definition: proc.c:2658
VALUE rb_tracearg_object(rb_trace_arg_t *trace_arg)
Definition: vm_trace.c:1014
VALUE rb_tracearg_callee_id(rb_trace_arg_t *trace_arg)
Definition: vm_trace.c:899
VALUE rb_tracearg_defined_class(rb_trace_arg_t *trace_arg)
Definition: vm_trace.c:906
#define MATCH_ANY_FILTER_TH
Definition: vm_trace.c:215
MJIT_FUNC_EXPORTED void rb_exec_event_hooks(rb_trace_arg_t *trace_arg, rb_hook_list_t *hooks, int pop_p)
Definition: vm_trace.c:362
struct rb_trace_arg_struct * rb_tracearg_from_tracepoint(VALUE tpval)
Definition: vm_trace.c:792
VALUE rb_suppress_tracing(VALUE(*func)(VALUE), VALUE arg)
Definition: vm_trace.c:415
VALUE rb_tracearg_raised_exception(rb_trace_arg_t *trace_arg)
Definition: vm_trace.c:948
postponed_job_register_result
Definition: vm_trace.c:1559
@ PJRR_INTERRUPTED
Definition: vm_trace.c:1562
@ PJRR_SUCCESS
Definition: vm_trace.c:1560
@ PJRR_FULL
Definition: vm_trace.c:1561
void rb_thread_add_event_hook(VALUE thval, rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
Definition: vm_trace.c:151
#define MAX_POSTPONED_JOB_SPECIAL_ADDITION
Definition: vm_trace.c:1543
VALUE rb_tracepoint_disable(VALUE tpval)
Definition: vm_trace.c:1227
VALUE rb_tracearg_self(rb_trace_arg_t *trace_arg)
Definition: vm_trace.c:927
VALUE rb_tracepoint_new(VALUE target_thval, rb_event_flag_t events, void(*func)(VALUE, void *), void *data)
Definition: vm_trace.c:1400
void rb_hook_list_free(rb_hook_list_t *hooks)
Definition: vm_trace.c:66
int rb_thread_remove_event_hook(VALUE thval, rb_event_hook_func_t func)
Definition: vm_trace.c:250
int rb_postponed_job_register_one(unsigned int flags, rb_postponed_job_func_t func, void *data)
Definition: vm_trace.c:1614
VALUE rb_tracearg_return_value(rb_trace_arg_t *trace_arg)
Definition: vm_trace.c:933
void Init_vm_postponed_job(void)
Definition: vm_trace.c:1551
struct rb_tp_struct rb_tp_t
rb_event_flag_t rb_tracearg_event_flag(rb_trace_arg_t *trace_arg)
Definition: vm_trace.c:798
void rb_ec_clear_all_trace_func(const rb_execution_context_t *ec)
Definition: vm_trace.c:280
struct rb_event_hook_struct rb_event_hook_t
#define C(name, NAME)
VALUE rb_tracearg_path(rb_trace_arg_t *trace_arg)
Definition: vm_trace.c:824
VALUE rb_tracearg_eval_script(rb_trace_arg_t *trace_arg)
Definition: vm_trace.c:963
int rb_thread_remove_event_hook_with_data(VALUE thval, rb_event_hook_func_t func, VALUE data)
Definition: vm_trace.c:256
VALUE rb_tracepoint_enable(VALUE tpval)
Definition: vm_trace.c:1119
#define MAX_POSTPONED_JOB
Definition: vm_trace.c:1542
void rb_hook_list_mark(rb_hook_list_t *hooks)
Definition: vm_trace.c:53
int rb_postponed_job_register(unsigned int flags, rb_postponed_job_func_t func, void *data)
Definition: vm_trace.c:1595
VALUE rb_tracearg_method_id(rb_trace_arg_t *trace_arg)
Definition: vm_trace.c:892
int rb_remove_event_hook_with_data(rb_event_hook_func_t func, VALUE data)
Definition: vm_trace.c:268
void rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec)
Definition: vm_trace.c:274
void rb_thread_add_event_hook2(VALUE thval, rb_event_hook_func_t func, rb_event_flag_t events, VALUE data, rb_event_hook_flag_t hook_flags)
Definition: vm_trace.c:164
void rb_add_event_hook2(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data, rb_event_hook_flag_t hook_flags)
Definition: vm_trace.c:170
void rb_hook_list_remove_tracepoint(rb_hook_list_t *list, VALUE tpval)
Definition: vm_trace.c:1262
VALUE rb_tracearg_lineno(rb_trace_arg_t *trace_arg)
Definition: vm_trace.c:818
int rb_workqueue_register(unsigned flags, rb_postponed_job_func_t func, void *data)
Definition: vm_trace.c:1643
void rb_hook_list_connect_tracepoint(VALUE target, rb_hook_list_t *list, VALUE tpval, unsigned int target_line)
Definition: vm_trace.c:1252
void rb_postponed_job_flush(rb_vm_t *vm)
Definition: vm_trace.c:1662
VALUE rb_tracearg_event(rb_trace_arg_t *trace_arg)
Definition: vm_trace.c:804
VALUE default_inspect(VALUE self, const char *class_name)
Definition: win32ole.c:1345