Ruby 2.7.6p219 (2022-04-12 revision c9c2245c0a25176072e02db9254f0e0c84c805cd)
thread_pthread.c
Go to the documentation of this file.
1/* -*-c-*- */
2/**********************************************************************
3
4 thread_pthread.c -
5
6 $Author$
7
8 Copyright (C) 2004-2007 Koichi Sasada
9
10**********************************************************************/
11
12#ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
13
14#include "gc.h"
15#include "mjit.h"
16
17#ifdef HAVE_SYS_RESOURCE_H
18#include <sys/resource.h>
19#endif
20#ifdef HAVE_THR_STKSEGMENT
21#include <thread.h>
22#endif
23#if HAVE_FCNTL_H
24#include <fcntl.h>
25#elif HAVE_SYS_FCNTL_H
26#include <sys/fcntl.h>
27#endif
28#ifdef HAVE_SYS_PRCTL_H
29#include <sys/prctl.h>
30#endif
31#if defined(HAVE_SYS_TIME_H)
32#include <sys/time.h>
33#endif
34#if defined(__HAIKU__)
35#include <kernel/OS.h>
36#endif
37#include <time.h>
38#include <signal.h>
39
40#if defined(HAVE_SYS_EVENTFD_H) && defined(HAVE_EVENTFD)
41# define USE_EVENTFD (1)
42# include <sys/eventfd.h>
43#else
44# define USE_EVENTFD (0)
45#endif
46
47#if defined(SIGVTALRM) && !defined(__CYGWIN__)
48# define USE_UBF_LIST 1
49#endif
50
51/*
52 * UBF_TIMER and ubf_list both use SIGVTALRM.
53 *
54 * UBF_TIMER has NOTHING to do with thread timeslices (TIMER_INTERRUPT_MASK)
55 *
56 * UBF_TIMER is to close TOCTTOU signal race on programs where we
57 * cannot rely on GVL contention (vm->gvl.timer) to perform wakeups
58 * while a thread is doing blocking I/O on sockets or pipes. With
59 * rb_thread_call_without_gvl and similar functions:
60 *
61 * (1) Check interrupts.
62 * (2) release GVL.
63 * (2a) signal received
64 * (3) call func with data1 (blocks for a long time without ubf_timer)
65 * (4) acquire GVL.
66 * Other Ruby threads can not run in parallel any more.
67 * (5) Check interrupts.
68 *
69 * We need UBF_TIMER to break out of (3) if (2a) happens.
70 *
71 * ubf_list wakeups may be triggered on gvl_yield.
72 *
73 * If we have vm->gvl.timer (on GVL contention), we don't need UBF_TIMER
74 * as it can perform the same tasks while doing timeslices.
75 */
76#define UBF_TIMER_NONE 0
77#define UBF_TIMER_POSIX 1
78#define UBF_TIMER_PTHREAD 2
79
80#ifndef UBF_TIMER
81# if defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_CREATE) && \
82 defined(CLOCK_MONOTONIC) && defined(USE_UBF_LIST)
83 /* preferred */
84# define UBF_TIMER UBF_TIMER_POSIX
85# elif defined(USE_UBF_LIST)
86 /* safe, but inefficient */
87# define UBF_TIMER UBF_TIMER_PTHREAD
88# else
89 /* we'll be racy without SIGVTALRM for ubf_list */
90# define UBF_TIMER UBF_TIMER_NONE
91# endif
92#endif
93
94enum rtimer_state {
95 /* alive, after timer_create: */
96 RTIMER_DISARM,
97 RTIMER_ARMING,
98 RTIMER_ARMED,
99
100 RTIMER_DEAD
101};
102
103#if UBF_TIMER == UBF_TIMER_POSIX
104static const struct itimerspec zero;
105static struct {
106 rb_atomic_t state; /* rtimer_state */
107 rb_pid_t owner;
108 timer_t timerid;
109} timer_posix = {
110 /* .state = */ RTIMER_DEAD,
111};
112
113#elif UBF_TIMER == UBF_TIMER_PTHREAD
114static void *timer_pthread_fn(void *);
115static struct {
116 int low[2];
117 rb_atomic_t armed; /* boolean */
118 rb_pid_t owner;
119 pthread_t thid;
120} timer_pthread = {
121 { -1, -1 },
122};
123#endif
124
127static int native_mutex_trylock(rb_nativethread_lock_t *lock);
135static void clear_thread_cache_altstack(void);
136static void ubf_wakeup_all_threads(void);
137static int ubf_threads_empty(void);
138static int native_cond_timedwait(rb_nativethread_cond_t *, pthread_mutex_t *,
139 const rb_hrtime_t *abs);
140static const rb_hrtime_t *sigwait_timeout(rb_thread_t *, int sigwait_fd,
141 const rb_hrtime_t *,
142 int *drained_p);
143static void ubf_timer_disarm(void);
144static void threadptr_trap_interrupt(rb_thread_t *);
145
146#define TIMER_THREAD_CREATED_P() (signal_self_pipe.owner_process == getpid())
147
148/* for testing, and in case we come across a platform w/o pipes: */
149#define BUSY_WAIT_SIGNALS (0)
150
151/*
152 * sigwait_th is the thread which owns sigwait_fd and sleeps on it
153 * (using ppoll). MJIT worker can be sigwait_th==0, so we initialize
154 * it to THREAD_INVALID at startup and fork time. It is the ONLY thread
155 * allowed to read from sigwait_fd, otherwise starvation can occur.
156 */
157#define THREAD_INVALID ((const rb_thread_t *)-1)
158static const rb_thread_t *sigwait_th;
159
160#ifdef HAVE_SCHED_YIELD
161#define native_thread_yield() (void)sched_yield()
162#else
163#define native_thread_yield() ((void)0)
164#endif
165
166#if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && \
167 defined(CLOCK_REALTIME) && defined(CLOCK_MONOTONIC) && \
168 defined(HAVE_CLOCK_GETTIME)
169static pthread_condattr_t condattr_mono;
170static pthread_condattr_t *condattr_monotonic = &condattr_mono;
171#else
172static const void *const condattr_monotonic = NULL;
173#endif
174
175/* 100ms. 10ms is too small for user level thread scheduling
176 * on recent Linux (tested on 2.6.35)
177 */
178#define TIME_QUANTUM_MSEC (100)
179#define TIME_QUANTUM_USEC (TIME_QUANTUM_MSEC * 1000)
180#define TIME_QUANTUM_NSEC (TIME_QUANTUM_USEC * 1000)
181
182static rb_hrtime_t native_cond_timeout(rb_nativethread_cond_t *, rb_hrtime_t);
183
184/*
185 * Designate the next gvl.timer thread, favor the last thread in
186 * the waitq since it will be in waitq longest
187 */
188static int
189designate_timer_thread(rb_vm_t *vm)
190{
192
194 if (last) {
195 rb_native_cond_signal(&last->cond.gvlq);
196 return TRUE;
197 }
198 return FALSE;
199}
200
201/*
202 * We become designated timer thread to kick vm->gvl.owner
203 * periodically. Continue on old timeout if it expired.
204 */
205static void
206do_gvl_timer(rb_vm_t *vm, rb_thread_t *th)
207{
208 static rb_hrtime_t abs;
210
211 vm->gvl.timer = th;
212
213 /* take over wakeups from UBF_TIMER */
214 ubf_timer_disarm();
215
216 if (vm->gvl.timer_err == ETIMEDOUT) {
217 abs = native_cond_timeout(&nd->cond.gvlq, TIME_QUANTUM_NSEC);
218 }
219 vm->gvl.timer_err = native_cond_timedwait(&nd->cond.gvlq, &vm->gvl.lock, &abs);
220
221 ubf_wakeup_all_threads();
224 if (th == vm->main_thread) {
226 }
227 else {
228 threadptr_trap_interrupt(vm->main_thread);
229 }
230 }
231
232 /*
233 * Timeslice. Warning: the process may fork while this
234 * thread is contending for GVL:
235 */
236 if (vm->gvl.owner) timer_thread_function();
237 vm->gvl.timer = 0;
238}
239
240static void
241gvl_acquire_common(rb_vm_t *vm, rb_thread_t *th)
242{
243 if (vm->gvl.owner) {
245
246 VM_ASSERT(th->unblock.func == 0 &&
247 "we must not be in ubf_list and GVL waitq at the same time");
248
249 list_add_tail(&vm->gvl.waitq, &nd->node.gvl);
250
251 do {
252 if (!vm->gvl.timer) {
253 do_gvl_timer(vm, th);
254 }
255 else {
257 }
258 } while (vm->gvl.owner);
259
260 list_del_init(&nd->node.gvl);
261
262 if (vm->gvl.need_yield) {
263 vm->gvl.need_yield = 0;
265 }
266 }
267 else { /* reset timer if uncontended */
268 vm->gvl.timer_err = ETIMEDOUT;
269 }
270 vm->gvl.owner = th;
271 if (!vm->gvl.timer) {
272 if (!designate_timer_thread(vm) && !ubf_threads_empty()) {
274 }
275 }
276}
277
278static void
279gvl_acquire(rb_vm_t *vm, rb_thread_t *th)
280{
282 gvl_acquire_common(vm, th);
284}
285
286static const native_thread_data_t *
287gvl_release_common(rb_vm_t *vm)
288{
290 vm->gvl.owner = 0;
291 next = list_top(&vm->gvl.waitq, native_thread_data_t, node.ubf);
292 if (next) rb_native_cond_signal(&next->cond.gvlq);
293
294 return next;
295}
296
297static void
298gvl_release(rb_vm_t *vm)
299{
301 gvl_release_common(vm);
303}
304
305static void
306gvl_yield(rb_vm_t *vm, rb_thread_t *th)
307{
308 const native_thread_data_t *next;
309
310 /*
311 * Perhaps other threads are stuck in blocking region w/o GVL, too,
312 * (perhaps looping in io_close_fptr) so we kick them:
313 */
314 ubf_wakeup_all_threads();
316 next = gvl_release_common(vm);
317
318 /* An another thread is processing GVL yield. */
319 if (UNLIKELY(vm->gvl.wait_yield)) {
320 while (vm->gvl.wait_yield)
322 }
323 else if (next) {
324 /* Wait until another thread task takes GVL. */
325 vm->gvl.need_yield = 1;
326 vm->gvl.wait_yield = 1;
327 while (vm->gvl.need_yield)
329 vm->gvl.wait_yield = 0;
331 }
332 else {
334 native_thread_yield();
337 }
338 gvl_acquire_common(vm, th);
340}
341
342static void
343gvl_init(rb_vm_t *vm)
344{
348 list_head_init(&vm->gvl.waitq);
349 vm->gvl.owner = 0;
350 vm->gvl.timer = 0;
351 vm->gvl.timer_err = ETIMEDOUT;
352 vm->gvl.need_yield = 0;
353 vm->gvl.wait_yield = 0;
354}
355
356static void
357gvl_destroy(rb_vm_t *vm)
358{
359 /*
360 * only called once at VM shutdown (not atfork), another thread
361 * may still grab vm->gvl.lock when calling gvl_release at
362 * the end of thread_start_func_2
363 */
364 if (0) {
368 }
369 clear_thread_cache_altstack();
370}
371
372#if defined(HAVE_WORKING_FORK)
373static void thread_cache_reset(void);
374static void
375gvl_atfork(rb_vm_t *vm)
376{
377 thread_cache_reset();
378 gvl_init(vm);
379 gvl_acquire(vm, GET_THREAD());
380}
381#endif
382
383#define NATIVE_MUTEX_LOCK_DEBUG 0
384
385static void
386mutex_debug(const char *msg, void *lock)
387{
388 if (NATIVE_MUTEX_LOCK_DEBUG) {
389 int r;
391
392 if ((r = pthread_mutex_lock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
393 fprintf(stdout, "%s: %p\n", msg, lock);
394 if ((r = pthread_mutex_unlock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
395 }
396}
397
398void
400{
401 int r;
402 mutex_debug("lock", lock);
403 if ((r = pthread_mutex_lock(lock)) != 0) {
404 rb_bug_errno("pthread_mutex_lock", r);
405 }
406}
407
408void
410{
411 int r;
412 mutex_debug("unlock", lock);
413 if ((r = pthread_mutex_unlock(lock)) != 0) {
414 rb_bug_errno("pthread_mutex_unlock", r);
415 }
416}
417
418static inline int
419native_mutex_trylock(pthread_mutex_t *lock)
420{
421 int r;
422 mutex_debug("trylock", lock);
423 if ((r = pthread_mutex_trylock(lock)) != 0) {
424 if (r == EBUSY) {
425 return EBUSY;
426 }
427 else {
428 rb_bug_errno("pthread_mutex_trylock", r);
429 }
430 }
431 return 0;
432}
433
434void
436{
437 int r = pthread_mutex_init(lock, 0);
438 mutex_debug("init", lock);
439 if (r != 0) {
440 rb_bug_errno("pthread_mutex_init", r);
441 }
442}
443
444void
446{
447 int r = pthread_mutex_destroy(lock);
448 mutex_debug("destroy", lock);
449 if (r != 0) {
450 rb_bug_errno("pthread_mutex_destroy", r);
451 }
452}
453
454void
456{
457 int r = pthread_cond_init(cond, condattr_monotonic);
458 if (r != 0) {
459 rb_bug_errno("pthread_cond_init", r);
460 }
461}
462
463void
465{
466 int r = pthread_cond_destroy(cond);
467 if (r != 0) {
468 rb_bug_errno("pthread_cond_destroy", r);
469 }
470}
471
472/*
473 * In OS X 10.7 (Lion), pthread_cond_signal and pthread_cond_broadcast return
474 * EAGAIN after retrying 8192 times. You can see them in the following page:
475 *
476 * http://www.opensource.apple.com/source/Libc/Libc-763.11/pthreads/pthread_cond.c
477 *
478 * The following rb_native_cond_signal and rb_native_cond_broadcast functions
479 * need to retrying until pthread functions don't return EAGAIN.
480 */
481
482void
484{
485 int r;
486 do {
487 r = pthread_cond_signal(cond);
488 } while (r == EAGAIN);
489 if (r != 0) {
490 rb_bug_errno("pthread_cond_signal", r);
491 }
492}
493
494void
496{
497 int r;
498 do {
499 r = pthread_cond_broadcast(cond);
500 } while (r == EAGAIN);
501 if (r != 0) {
502 rb_bug_errno("rb_native_cond_broadcast", r);
503 }
504}
505
506void
508{
509 int r = pthread_cond_wait(cond, mutex);
510 if (r != 0) {
511 rb_bug_errno("pthread_cond_wait", r);
512 }
513}
514
515static int
516native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex,
517 const rb_hrtime_t *abs)
518{
519 int r;
520 struct timespec ts;
521
522 /*
523 * An old Linux may return EINTR. Even though POSIX says
524 * "These functions shall not return an error code of [EINTR]".
525 * http://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_cond_timedwait.html
526 * Let's hide it from arch generic code.
527 */
528 do {
529 r = pthread_cond_timedwait(cond, mutex, rb_hrtime2timespec(&ts, abs));
530 } while (r == EINTR);
531
532 if (r != 0 && r != ETIMEDOUT) {
533 rb_bug_errno("pthread_cond_timedwait", r);
534 }
535
536 return r;
537}
538
539static rb_hrtime_t
540native_cond_timeout(rb_nativethread_cond_t *cond, const rb_hrtime_t rel)
541{
542 if (condattr_monotonic) {
543 return rb_hrtime_add(rb_hrtime_now(), rel);
544 }
545 else {
546 struct timespec ts;
547
548 rb_timespec_now(&ts);
549 return rb_hrtime_add(rb_timespec2hrtime(&ts), rel);
550 }
551}
552
553#define native_cleanup_push pthread_cleanup_push
554#define native_cleanup_pop pthread_cleanup_pop
555
556static pthread_key_t ruby_native_thread_key;
557
558static void
559null_func(int i)
560{
561 /* null */
562}
563
564static rb_thread_t *
565ruby_thread_from_native(void)
566{
567 return pthread_getspecific(ruby_native_thread_key);
568}
569
570static int
571ruby_thread_set_native(rb_thread_t *th)
572{
573 return pthread_setspecific(ruby_native_thread_key, th) == 0;
574}
575
576static void native_thread_init(rb_thread_t *th);
577
578void
580{
581#if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK)
582 if (condattr_monotonic) {
583 int r = pthread_condattr_init(condattr_monotonic);
584 if (r == 0) {
585 r = pthread_condattr_setclock(condattr_monotonic, CLOCK_MONOTONIC);
586 }
587 if (r) condattr_monotonic = NULL;
588 }
589#endif
590 pthread_key_create(&ruby_native_thread_key, NULL);
591 th->thread_id = pthread_self();
593 native_thread_init(th);
594 posix_signal(SIGVTALRM, null_func);
595}
596
597static void
598native_thread_init(rb_thread_t *th)
599{
601
602#ifdef USE_UBF_LIST
603 list_node_init(&nd->node.ubf);
604#endif
606 if (&nd->cond.gvlq != &nd->cond.intr)
608 ruby_thread_set_native(th);
609}
610
611#ifndef USE_THREAD_CACHE
612#define USE_THREAD_CACHE 1
613#endif
614
615static void
616native_thread_destroy(rb_thread_t *th)
617{
619
621 if (&nd->cond.gvlq != &nd->cond.intr)
623
624 /*
625 * prevent false positive from ruby_thread_has_gvl_p if that
626 * gets called from an interposing function wrapper
627 */
628 if (USE_THREAD_CACHE)
629 ruby_thread_set_native(0);
630}
631
632#if USE_THREAD_CACHE
633static rb_thread_t *register_cached_thread_and_wait(void *);
634#endif
635
636#if defined HAVE_PTHREAD_GETATTR_NP || defined HAVE_PTHREAD_ATTR_GET_NP
637#define STACKADDR_AVAILABLE 1
638#elif defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP
639#define STACKADDR_AVAILABLE 1
640#undef MAINSTACKADDR_AVAILABLE
641#define MAINSTACKADDR_AVAILABLE 1
642void *pthread_get_stackaddr_np(pthread_t);
643size_t pthread_get_stacksize_np(pthread_t);
644#elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
645#define STACKADDR_AVAILABLE 1
646#elif defined HAVE_PTHREAD_GETTHRDS_NP
647#define STACKADDR_AVAILABLE 1
648#elif defined __HAIKU__
649#define STACKADDR_AVAILABLE 1
650#endif
651
652#ifndef MAINSTACKADDR_AVAILABLE
653# ifdef STACKADDR_AVAILABLE
654# define MAINSTACKADDR_AVAILABLE 1
655# else
656# define MAINSTACKADDR_AVAILABLE 0
657# endif
658#endif
659#if MAINSTACKADDR_AVAILABLE && !defined(get_main_stack)
660# define get_main_stack(addr, size) get_stack(addr, size)
661#endif
662
663#ifdef STACKADDR_AVAILABLE
664/*
665 * Get the initial address and size of current thread's stack
666 */
667static int
668get_stack(void **addr, size_t *size)
669{
670#define CHECK_ERR(expr) \
671 {int err = (expr); if (err) return err;}
672#ifdef HAVE_PTHREAD_GETATTR_NP /* Linux */
673 pthread_attr_t attr;
674 size_t guard = 0;
676 CHECK_ERR(pthread_getattr_np(pthread_self(), &attr));
677# ifdef HAVE_PTHREAD_ATTR_GETSTACK
678 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
679 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
680# else
681 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
682 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
683# endif
684# ifdef HAVE_PTHREAD_ATTR_GETGUARDSIZE
685 CHECK_ERR(pthread_attr_getguardsize(&attr, &guard));
686 *size -= guard;
687# else
688 *size -= getpagesize();
689# endif
691#elif defined HAVE_PTHREAD_ATTR_GET_NP /* FreeBSD, DragonFly BSD, NetBSD */
692 pthread_attr_t attr;
693 CHECK_ERR(pthread_attr_init(&attr));
694 CHECK_ERR(pthread_attr_get_np(pthread_self(), &attr));
695# ifdef HAVE_PTHREAD_ATTR_GETSTACK
696 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
697# else
698 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
699 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
700# endif
701 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
703#elif (defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP) /* MacOS X */
704 pthread_t th = pthread_self();
705 *addr = pthread_get_stackaddr_np(th);
706 *size = pthread_get_stacksize_np(th);
707#elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
708 stack_t stk;
709# if defined HAVE_THR_STKSEGMENT /* Solaris */
710 CHECK_ERR(thr_stksegment(&stk));
711# else /* OpenBSD */
712 CHECK_ERR(pthread_stackseg_np(pthread_self(), &stk));
713# endif
714 *addr = stk.ss_sp;
715 *size = stk.ss_size;
716#elif defined HAVE_PTHREAD_GETTHRDS_NP /* AIX */
717 pthread_t th = pthread_self();
718 struct __pthrdsinfo thinfo;
719 char reg[256];
720 int regsiz=sizeof(reg);
721 CHECK_ERR(pthread_getthrds_np(&th, PTHRDSINFO_QUERY_ALL,
722 &thinfo, sizeof(thinfo),
723 &reg, &regsiz));
724 *addr = thinfo.__pi_stackaddr;
725 /* Must not use thinfo.__pi_stacksize for size.
726 It is around 3KB smaller than the correct size
727 calculated by thinfo.__pi_stackend - thinfo.__pi_stackaddr. */
728 *size = thinfo.__pi_stackend - thinfo.__pi_stackaddr;
729 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
730#elif defined __HAIKU__
731 thread_info info;
733 CHECK_ERR(get_thread_info(find_thread(NULL), &info));
734 *addr = info.stack_base;
735 *size = (uintptr_t)info.stack_end - (uintptr_t)info.stack_base;
736 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
737#else
738#error STACKADDR_AVAILABLE is defined but not implemented.
739#endif
740 return 0;
741#undef CHECK_ERR
742}
743#endif
744
745static struct {
747 size_t stack_maxsize;
748 VALUE *stack_start;
749} native_main_thread;
750
751#ifdef STACK_END_ADDRESS
752extern void *STACK_END_ADDRESS;
753#endif
754
755enum {
756 RUBY_STACK_SPACE_LIMIT = 1024 * 1024, /* 1024KB */
757 RUBY_STACK_SPACE_RATIO = 5
758};
759
760static size_t
761space_size(size_t stack_size)
762{
763 size_t space_size = stack_size / RUBY_STACK_SPACE_RATIO;
764 if (space_size > RUBY_STACK_SPACE_LIMIT) {
765 return RUBY_STACK_SPACE_LIMIT;
766 }
767 else {
768 return space_size;
769 }
770}
771
772#ifdef __linux__
773static __attribute__((noinline)) void
774reserve_stack(volatile char *limit, size_t size)
775{
776# ifdef C_ALLOCA
777# error needs alloca()
778# endif
779 struct rlimit rl;
780 volatile char buf[0x100];
781 enum {stack_check_margin = 0x1000}; /* for -fstack-check */
782
784
785 if (!getrlimit(RLIMIT_STACK, &rl) && rl.rlim_cur == RLIM_INFINITY)
786 return;
787
788 if (size < stack_check_margin) return;
789 size -= stack_check_margin;
790
791 size -= sizeof(buf); /* margin */
792 if (IS_STACK_DIR_UPPER()) {
793 const volatile char *end = buf + sizeof(buf);
794 limit += size;
795 if (limit > end) {
796 /* |<-bottom (=limit(a)) top->|
797 * | .. |<-buf 256B |<-end | stack check |
798 * | 256B | =size= | margin (4KB)|
799 * | =size= limit(b)->| 256B | |
800 * | | alloca(sz) | | |
801 * | .. |<-buf |<-limit(c) [sz-1]->0> | |
802 */
803 size_t sz = limit - end;
804 limit = alloca(sz);
805 limit[sz-1] = 0;
806 }
807 }
808 else {
809 limit -= size;
810 if (buf > limit) {
811 /* |<-top (=limit(a)) bottom->|
812 * | .. | 256B buf->| | stack check |
813 * | 256B | =size= | margin (4KB)|
814 * | =size= limit(b)->| 256B | |
815 * | | alloca(sz) | | |
816 * | .. | buf->| limit(c)-><0> | |
817 */
818 size_t sz = buf - limit;
819 limit = alloca(sz);
820 limit[0] = 0;
821 }
822 }
823}
824#else
825# define reserve_stack(limit, size) ((void)(limit), (void)(size))
826#endif
827
828#undef ruby_init_stack
829/* Set stack bottom of Ruby implementation.
830 *
831 * You must call this function before any heap allocation by Ruby implementation.
832 * Or GC will break living objects */
833void
834ruby_init_stack(volatile VALUE *addr)
835{
836 native_main_thread.id = pthread_self();
837
838#if MAINSTACKADDR_AVAILABLE
839 if (native_main_thread.stack_maxsize) return;
840 {
841 void* stackaddr;
842 size_t size;
843 if (get_main_stack(&stackaddr, &size) == 0) {
844 native_main_thread.stack_maxsize = size;
845 native_main_thread.stack_start = stackaddr;
846 reserve_stack(stackaddr, size);
847 goto bound_check;
848 }
849 }
850#endif
851#ifdef STACK_END_ADDRESS
852 native_main_thread.stack_start = STACK_END_ADDRESS;
853#else
854 if (!native_main_thread.stack_start ||
855 STACK_UPPER((VALUE *)(void *)&addr,
856 native_main_thread.stack_start > addr,
857 native_main_thread.stack_start < addr)) {
858 native_main_thread.stack_start = (VALUE *)addr;
859 }
860#endif
861 {
862#if defined(HAVE_GETRLIMIT)
863#if defined(PTHREAD_STACK_DEFAULT)
864# if PTHREAD_STACK_DEFAULT < RUBY_STACK_SPACE*5
865# error "PTHREAD_STACK_DEFAULT is too small"
866# endif
867 size_t size = PTHREAD_STACK_DEFAULT;
868#else
870#endif
871 size_t space;
872 int pagesize = getpagesize();
873 struct rlimit rlim;
875 if (getrlimit(RLIMIT_STACK, &rlim) == 0) {
876 size = (size_t)rlim.rlim_cur;
877 }
878 addr = native_main_thread.stack_start;
879 if (IS_STACK_DIR_UPPER()) {
880 space = ((size_t)((char *)addr + size) / pagesize) * pagesize - (size_t)addr;
881 }
882 else {
883 space = (size_t)addr - ((size_t)((char *)addr - size) / pagesize + 1) * pagesize;
884 }
885 native_main_thread.stack_maxsize = space;
886#endif
887 }
888
889#if MAINSTACKADDR_AVAILABLE
890 bound_check:
891#endif
892 /* If addr is out of range of main-thread stack range estimation, */
893 /* it should be on co-routine (alternative stack). [Feature #2294] */
894 {
895 void *start, *end;
897
898 if (IS_STACK_DIR_UPPER()) {
899 start = native_main_thread.stack_start;
900 end = (char *)native_main_thread.stack_start + native_main_thread.stack_maxsize;
901 }
902 else {
903 start = (char *)native_main_thread.stack_start - native_main_thread.stack_maxsize;
904 end = native_main_thread.stack_start;
905 }
906
907 if ((void *)addr < start || (void *)addr > end) {
908 /* out of range */
909 native_main_thread.stack_start = (VALUE *)addr;
910 native_main_thread.stack_maxsize = 0; /* unknown */
911 }
912 }
913}
914
915#define CHECK_ERR(expr) \
916 {int err = (expr); if (err) {rb_bug_errno(#expr, err);}}
917
918static int
919native_thread_init_stack(rb_thread_t *th)
920{
922
923 if (pthread_equal(curr, native_main_thread.id)) {
924 th->ec->machine.stack_start = native_main_thread.stack_start;
925 th->ec->machine.stack_maxsize = native_main_thread.stack_maxsize;
926 }
927 else {
928#ifdef STACKADDR_AVAILABLE
929 void *start;
930 size_t size;
931
932 if (get_stack(&start, &size) == 0) {
933 uintptr_t diff = (uintptr_t)start - (uintptr_t)&curr;
934 th->ec->machine.stack_start = (VALUE *)&curr;
935 th->ec->machine.stack_maxsize = size - diff;
936 }
937#else
938 rb_raise(rb_eNotImpError, "ruby engine can initialize only in the main thread");
939#endif
940 }
941
942 return 0;
943}
944
945#ifndef __CYGWIN__
946#define USE_NATIVE_THREAD_INIT 1
947#endif
948
949static void *
950thread_start_func_1(void *th_ptr)
951{
952 rb_thread_t *th = th_ptr;
953 RB_ALTSTACK_INIT(void *altstack);
954#if USE_THREAD_CACHE
955 thread_start:
956#endif
957 {
958#if !defined USE_NATIVE_THREAD_INIT
959 VALUE stack_start;
960#endif
961
963#if defined USE_NATIVE_THREAD_INIT
964 native_thread_init_stack(th);
965#endif
966 native_thread_init(th);
967 /* run */
968#if defined USE_NATIVE_THREAD_INIT
969 thread_start_func_2(th, th->ec->machine.stack_start);
970#else
971 thread_start_func_2(th, &stack_start);
972#endif
973 }
974#if USE_THREAD_CACHE
975 /* cache thread */
976 if ((th = register_cached_thread_and_wait(RB_ALTSTACK(altstack))) != 0) {
977 goto thread_start;
978 }
979#else
980 RB_ALTSTACK_FREE(altstack);
981#endif
982 return 0;
983}
984
985struct cached_thread_entry {
987 rb_nativethread_id_t thread_id;
988 rb_thread_t *th;
989 void *altstack;
990 struct list_node node;
991};
992
993#if USE_THREAD_CACHE
994static rb_nativethread_lock_t thread_cache_lock = RB_NATIVETHREAD_LOCK_INIT;
995static LIST_HEAD(cached_thread_head);
996
997# if defined(HAVE_WORKING_FORK)
998static void
999thread_cache_reset(void)
1000{
1001 rb_native_mutex_initialize(&thread_cache_lock);
1002 list_head_init(&cached_thread_head);
1003}
1004# endif
1005
1006/*
1007 * number of seconds to cache for, I think 1-5s is sufficient to obviate
1008 * the need for thread pool in many network programs (taking into account
1009 * worst case network latency across the globe) without wasting memory
1010 */
1011#ifndef THREAD_CACHE_TIME
1012# define THREAD_CACHE_TIME ((rb_hrtime_t)3 * RB_HRTIME_PER_SEC)
1013#endif
1014
1015static rb_thread_t *
1016register_cached_thread_and_wait(void *altstack)
1017{
1018 rb_hrtime_t end = THREAD_CACHE_TIME;
1019 struct cached_thread_entry entry;
1020
1021 rb_native_cond_initialize(&entry.cond);
1022 entry.altstack = altstack;
1023 entry.th = NULL;
1024 entry.thread_id = pthread_self();
1025 end = native_cond_timeout(&entry.cond, end);
1026
1027 rb_native_mutex_lock(&thread_cache_lock);
1028 {
1029 list_add(&cached_thread_head, &entry.node);
1030
1031 native_cond_timedwait(&entry.cond, &thread_cache_lock, &end);
1032
1033 if (entry.th == NULL) { /* unused */
1034 list_del(&entry.node);
1035 }
1036 }
1037 rb_native_mutex_unlock(&thread_cache_lock);
1038
1039 rb_native_cond_destroy(&entry.cond);
1040 if (!entry.th) {
1041 RB_ALTSTACK_FREE(entry.altstack);
1042 }
1043
1044 return entry.th;
1045}
1046#else
1047# if defined(HAVE_WORKING_FORK)
1048static void thread_cache_reset(void) { }
1049# endif
1050#endif
1051
1052static int
1053use_cached_thread(rb_thread_t *th)
1054{
1055#if USE_THREAD_CACHE
1056 struct cached_thread_entry *entry;
1057
1058 rb_native_mutex_lock(&thread_cache_lock);
1059 entry = list_pop(&cached_thread_head, struct cached_thread_entry, node);
1060 if (entry) {
1061 entry->th = th;
1062 /* th->thread_id must be set before signal for Thread#name= */
1063 th->thread_id = entry->thread_id;
1065 rb_native_cond_signal(&entry->cond);
1066 }
1067 rb_native_mutex_unlock(&thread_cache_lock);
1068 return !!entry;
1069#endif
1070 return 0;
1071}
1072
1073static void
1074clear_thread_cache_altstack(void)
1075{
1076#if USE_THREAD_CACHE
1077 struct cached_thread_entry *entry;
1078
1079 rb_native_mutex_lock(&thread_cache_lock);
1080 list_for_each(&cached_thread_head, entry, node) {
1081 void MAYBE_UNUSED(*altstack) = entry->altstack;
1082 entry->altstack = 0;
1083 RB_ALTSTACK_FREE(altstack);
1084 }
1085 rb_native_mutex_unlock(&thread_cache_lock);
1086#endif
1087}
1088
1089static int
1090native_thread_create(rb_thread_t *th)
1091{
1092 int err = 0;
1093
1094 if (use_cached_thread(th)) {
1095 thread_debug("create (use cached thread): %p\n", (void *)th);
1096 }
1097 else {
1098 pthread_attr_t attr;
1100 const size_t space = space_size(stack_size);
1101
1102 th->ec->machine.stack_maxsize = stack_size - space;
1103
1104 CHECK_ERR(pthread_attr_init(&attr));
1105
1106# ifdef PTHREAD_STACK_MIN
1107 thread_debug("create - stack size: %lu\n", (unsigned long)stack_size);
1108 CHECK_ERR(pthread_attr_setstacksize(&attr, stack_size));
1109# endif
1110
1111# ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED
1113# endif
1115
1116 err = pthread_create(&th->thread_id, &attr, thread_start_func_1, th);
1117 thread_debug("create: %p (%d)\n", (void *)th, err);
1118 /* should be done in the created thread */
1120 CHECK_ERR(pthread_attr_destroy(&attr));
1121 }
1122 return err;
1123}
1124
1125#if USE_NATIVE_THREAD_PRIORITY
1126
1127static void
1128native_thread_apply_priority(rb_thread_t *th)
1129{
1130#if defined(_POSIX_PRIORITY_SCHEDULING) && (_POSIX_PRIORITY_SCHEDULING > 0)
1131 struct sched_param sp;
1132 int policy;
1133 int priority = 0 - th->priority;
1134 int max, min;
1135 pthread_getschedparam(th->thread_id, &policy, &sp);
1136 max = sched_get_priority_max(policy);
1137 min = sched_get_priority_min(policy);
1138
1139 if (min > priority) {
1140 priority = min;
1141 }
1142 else if (max < priority) {
1143 priority = max;
1144 }
1145
1146 sp.sched_priority = priority;
1147 pthread_setschedparam(th->thread_id, policy, &sp);
1148#else
1149 /* not touched */
1150#endif
1151}
1152
1153#endif /* USE_NATIVE_THREAD_PRIORITY */
1154
1155static int
1156native_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout, rb_thread_t *th)
1157{
1158 return rb_fd_select(n, readfds, writefds, exceptfds, timeout);
1159}
1160
1161static void
1162ubf_pthread_cond_signal(void *ptr)
1163{
1164 rb_thread_t *th = (rb_thread_t *)ptr;
1165 thread_debug("ubf_pthread_cond_signal (%p)\n", (void *)th);
1167}
1168
1169static void
1170native_cond_sleep(rb_thread_t *th, rb_hrtime_t *rel)
1171{
1174
1175 /* Solaris cond_timedwait() return EINVAL if an argument is greater than
1176 * current_time + 100,000,000. So cut up to 100,000,000. This is
1177 * considered as a kind of spurious wakeup. The caller to native_sleep
1178 * should care about spurious wakeup.
1179 *
1180 * See also [Bug #1341] [ruby-core:29702]
1181 * http://download.oracle.com/docs/cd/E19683-01/816-0216/6m6ngupgv/index.html
1182 */
1183 const rb_hrtime_t max = (rb_hrtime_t)100000000 * RB_HRTIME_PER_SEC;
1184
1185 GVL_UNLOCK_BEGIN(th);
1186 {
1188 th->unblock.func = ubf_pthread_cond_signal;
1189 th->unblock.arg = th;
1190
1191 if (RUBY_VM_INTERRUPTED(th->ec)) {
1192 /* interrupted. return immediate */
1193 thread_debug("native_sleep: interrupted before sleep\n");
1194 }
1195 else {
1196 if (!rel) {
1197 rb_native_cond_wait(cond, lock);
1198 }
1199 else {
1200 rb_hrtime_t end;
1201
1202 if (*rel > max) {
1203 *rel = max;
1204 }
1205
1206 end = native_cond_timeout(cond, *rel);
1207 native_cond_timedwait(cond, lock, &end);
1208 }
1209 }
1210 th->unblock.func = 0;
1211
1213 }
1214 GVL_UNLOCK_END(th);
1215
1216 thread_debug("native_sleep done\n");
1217}
1218
1219#ifdef USE_UBF_LIST
1220static LIST_HEAD(ubf_list_head);
1222
1223static void
1224ubf_list_atfork(void)
1225{
1226 list_head_init(&ubf_list_head);
1227 rb_native_mutex_initialize(&ubf_list_lock);
1228}
1229
1230/* The thread 'th' is registered to be trying unblock. */
1231static void
1232register_ubf_list(rb_thread_t *th)
1233{
1234 struct list_node *node = &th->native_thread_data.node.ubf;
1235
1236 if (list_empty((struct list_head*)node)) {
1237 rb_native_mutex_lock(&ubf_list_lock);
1238 list_add(&ubf_list_head, node);
1239 rb_native_mutex_unlock(&ubf_list_lock);
1240 }
1241}
1242
1243/* The thread 'th' is unblocked. It no longer need to be registered. */
1244static void
1245unregister_ubf_list(rb_thread_t *th)
1246{
1247 struct list_node *node = &th->native_thread_data.node.ubf;
1248
1249 /* we can't allow re-entry into ubf_list_head */
1250 VM_ASSERT(th->unblock.func == 0);
1251
1252 if (!list_empty((struct list_head*)node)) {
1253 rb_native_mutex_lock(&ubf_list_lock);
1254 list_del_init(node);
1255 if (list_empty(&ubf_list_head) && !rb_signal_buff_size()) {
1256 ubf_timer_disarm();
1257 }
1258 rb_native_mutex_unlock(&ubf_list_lock);
1259 }
1260}
1261
1262/*
1263 * send a signal to intent that a target thread return from blocking syscall.
1264 * Maybe any signal is ok, but we chose SIGVTALRM.
1265 */
1266static void
1267ubf_wakeup_thread(rb_thread_t *th)
1268{
1269 thread_debug("thread_wait_queue_wakeup (%"PRI_THREAD_ID")\n", thread_id_str(th));
1271}
1272
1273static void
1274ubf_select(void *ptr)
1275{
1276 rb_thread_t *th = (rb_thread_t *)ptr;
1277 rb_vm_t *vm = th->vm;
1278 const rb_thread_t *cur = ruby_thread_from_native(); /* may be 0 */
1279
1280 register_ubf_list(th);
1281
1282 /*
1283 * ubf_wakeup_thread() doesn't guarantee to wake up a target thread.
1284 * Therefore, we repeatedly call ubf_wakeup_thread() until a target thread
1285 * exit from ubf function. We must have a timer to perform this operation.
1286 * We use double-checked locking here because this function may be called
1287 * while vm->gvl.lock is held in do_gvl_timer.
1288 * There is also no need to start a timer if we're the designated
1289 * sigwait_th thread, otherwise we can deadlock with a thread
1290 * in unblock_function_clear.
1291 */
1292 if (cur != vm->gvl.timer && cur != sigwait_th) {
1293 /*
1294 * Double-checked locking above was to prevent nested locking
1295 * by the SAME thread. We use trylock here to prevent deadlocks
1296 * between DIFFERENT threads
1297 */
1298 if (native_mutex_trylock(&vm->gvl.lock) == 0) {
1299 if (!vm->gvl.timer) {
1301 }
1303 }
1304 }
1305
1306 ubf_wakeup_thread(th);
1307}
1308
1309static int
1310ubf_threads_empty(void)
1311{
1312 return list_empty(&ubf_list_head);
1313}
1314
1315static void
1316ubf_wakeup_all_threads(void)
1317{
1318 rb_thread_t *th;
1320
1321 if (!ubf_threads_empty()) {
1322 rb_native_mutex_lock(&ubf_list_lock);
1323 list_for_each(&ubf_list_head, dat, node.ubf) {
1324 th = container_of(dat, rb_thread_t, native_thread_data);
1325 ubf_wakeup_thread(th);
1326 }
1327 rb_native_mutex_unlock(&ubf_list_lock);
1328 }
1329}
1330
1331#else /* USE_UBF_LIST */
1332#define register_ubf_list(th) (void)(th)
1333#define unregister_ubf_list(th) (void)(th)
1334#define ubf_select 0
1335static void ubf_wakeup_all_threads(void) { return; }
1336static int ubf_threads_empty(void) { return 1; }
1337#define ubf_list_atfork() do {} while (0)
1338#endif /* USE_UBF_LIST */
1339
1340#define TT_DEBUG 0
1341#define WRITE_CONST(fd, str) (void)(write((fd),(str),sizeof(str)-1)<0)
1342
1343static struct {
1344 /* pipes are closed in forked children when owner_process does not match */
1345 int normal[2]; /* [0] == sigwait_fd */
1346 int ub_main[2]; /* unblock main thread from native_ppoll_sleep */
1347
1348 /* volatile for signal handler use: */
1349 volatile rb_pid_t owner_process;
1350} signal_self_pipe = {
1351 {-1, -1},
1352 {-1, -1},
1353};
1354
1355/* only use signal-safe system calls here */
1356static void
1357rb_thread_wakeup_timer_thread_fd(int fd)
1358{
1359#if USE_EVENTFD
1360 const uint64_t buff = 1;
1361#else
1362 const char buff = '!';
1363#endif
1364 ssize_t result;
1365
1366 /* already opened */
1367 if (fd >= 0) {
1368 retry:
1369 if ((result = write(fd, &buff, sizeof(buff))) <= 0) {
1370 int e = errno;
1371 switch (e) {
1372 case EINTR: goto retry;
1373 case EAGAIN:
1374#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
1375 case EWOULDBLOCK:
1376#endif
1377 break;
1378 default:
1379 async_bug_fd("rb_thread_wakeup_timer_thread: write", e, fd);
1380 }
1381 }
1382 if (TT_DEBUG) WRITE_CONST(2, "rb_thread_wakeup_timer_thread: write\n");
1383 }
1384 else {
1385 /* ignore wakeup */
1386 }
1387}
1388
1389/*
1390 * This ensures we get a SIGVTALRM in TIME_QUANTUM_MSEC if our
1391 * process could not react to the original signal in time.
1392 */
1393static void
1394ubf_timer_arm(rb_pid_t current) /* async signal safe */
1395{
1396#if UBF_TIMER == UBF_TIMER_POSIX
1397 if ((!current || timer_posix.owner == current) &&
1398 !ATOMIC_CAS(timer_posix.state, RTIMER_DISARM, RTIMER_ARMING)) {
1399 struct itimerspec it;
1400
1401 it.it_interval.tv_sec = it.it_value.tv_sec = 0;
1402 it.it_interval.tv_nsec = it.it_value.tv_nsec = TIME_QUANTUM_NSEC;
1403
1404 if (timer_settime(timer_posix.timerid, 0, &it, 0))
1405 rb_async_bug_errno("timer_settime (arm)", errno);
1406
1407 switch (ATOMIC_CAS(timer_posix.state, RTIMER_ARMING, RTIMER_ARMED)) {
1408 case RTIMER_DISARM:
1409 /* somebody requested a disarm while we were arming */
1410 /* may race harmlessly with ubf_timer_destroy */
1411 (void)timer_settime(timer_posix.timerid, 0, &zero, 0);
1412
1413 case RTIMER_ARMING: return; /* success */
1414 case RTIMER_ARMED:
1415 /*
1416 * it is possible to have another thread disarm, and
1417 * a third thread arm finish re-arming before we get
1418 * here, so we wasted a syscall with timer_settime but
1419 * probably unavoidable in a signal handler.
1420 */
1421 return;
1422 case RTIMER_DEAD:
1423 /* may race harmlessly with ubf_timer_destroy */
1424 (void)timer_settime(timer_posix.timerid, 0, &zero, 0);
1425 return;
1426 default:
1427 rb_async_bug_errno("UBF_TIMER_POSIX unknown state", ERANGE);
1428 }
1429 }
1430#elif UBF_TIMER == UBF_TIMER_PTHREAD
1431 if (!current || current == timer_pthread.owner) {
1432 if (ATOMIC_EXCHANGE(timer_pthread.armed, 1) == 0)
1433 rb_thread_wakeup_timer_thread_fd(timer_pthread.low[1]);
1434 }
1435#endif
1436}
1437
1438void
1440{
1441 rb_pid_t current;
1442
1443 /* non-sighandler path */
1444 if (sig <= 0) {
1445 rb_thread_wakeup_timer_thread_fd(signal_self_pipe.normal[1]);
1446 if (sig < 0) {
1447 ubf_timer_arm(0);
1448 }
1449 return;
1450 }
1451
1452 /* must be safe inside sighandler, so no mutex */
1453 current = getpid();
1454 if (signal_self_pipe.owner_process == current) {
1455 rb_thread_wakeup_timer_thread_fd(signal_self_pipe.normal[1]);
1456
1457 /*
1458 * system_working check is required because vm and main_thread are
1459 * freed during shutdown
1460 */
1461 if (system_working > 0) {
1462 volatile rb_execution_context_t *ec;
1463 rb_vm_t *vm = GET_VM();
1464 rb_thread_t *mth;
1465
1466 /*
1467 * FIXME: root VM and main_thread should be static and not
1468 * on heap for maximum safety (and startup/shutdown speed)
1469 */
1470 if (!vm) return;
1471 mth = vm->main_thread;
1472 if (!mth || system_working <= 0) return;
1473
1474 /* this relies on GC for grace period before cont_free */
1476
1477 if (ec) {
1479 ubf_timer_arm(current);
1480
1481 /* some ubfs can interrupt single-threaded process directly */
1482 if (vm->ubf_async_safe && mth->unblock.func) {
1483 (mth->unblock.func)(mth->unblock.arg);
1484 }
1485 }
1486 }
1487 }
1488}
1489
1490#define CLOSE_INVALIDATE_PAIR(expr) \
1491 close_invalidate_pair(expr,"close_invalidate: "#expr)
1492static void
1493close_invalidate(int *fdp, const char *msg)
1494{
1495 int fd = *fdp;
1496
1497 *fdp = -1;
1498 if (close(fd) < 0) {
1499 async_bug_fd(msg, errno, fd);
1500 }
1501}
1502
1503static void
1504close_invalidate_pair(int fds[2], const char *msg)
1505{
1506 if (USE_EVENTFD && fds[0] == fds[1]) {
1507 close_invalidate(&fds[0], msg);
1508 fds[1] = -1;
1509 }
1510 else {
1511 close_invalidate(&fds[0], msg);
1512 close_invalidate(&fds[1], msg);
1513 }
1514}
1515
1516static void
1517set_nonblock(int fd)
1518{
1519 int oflags;
1520 int err;
1521
1522 oflags = fcntl(fd, F_GETFL);
1523 if (oflags == -1)
1524 rb_sys_fail(0);
1525 oflags |= O_NONBLOCK;
1526 err = fcntl(fd, F_SETFL, oflags);
1527 if (err == -1)
1528 rb_sys_fail(0);
1529}
1530
1531/* communication pipe with timer thread and signal handler */
1532static int
1533setup_communication_pipe_internal(int pipes[2])
1534{
1535 int err;
1536
1537 if (pipes[0] >= 0 || pipes[1] >= 0) {
1538 VM_ASSERT(pipes[0] >= 0);
1539 VM_ASSERT(pipes[1] >= 0);
1540 return 0;
1541 }
1542
1543 /*
1544 * Don't bother with eventfd on ancient Linux 2.6.22..2.6.26 which were
1545 * missing EFD_* flags, they can fall back to pipe
1546 */
1547#if USE_EVENTFD && defined(EFD_NONBLOCK) && defined(EFD_CLOEXEC)
1548 pipes[0] = pipes[1] = eventfd(0, EFD_NONBLOCK|EFD_CLOEXEC);
1549 if (pipes[0] >= 0) {
1550 rb_update_max_fd(pipes[0]);
1551 return 0;
1552 }
1553#endif
1554
1555 err = rb_cloexec_pipe(pipes);
1556 if (err != 0) {
1557 rb_warn("pipe creation failed for timer: %s, scheduling broken",
1558 strerror(errno));
1559 return -1;
1560 }
1561 rb_update_max_fd(pipes[0]);
1562 rb_update_max_fd(pipes[1]);
1563 set_nonblock(pipes[0]);
1564 set_nonblock(pipes[1]);
1565 return 0;
1566}
1567
1568#if !defined(SET_CURRENT_THREAD_NAME) && defined(__linux__) && defined(PR_SET_NAME)
1569# define SET_CURRENT_THREAD_NAME(name) prctl(PR_SET_NAME, name)
1570#endif
1571
1572static VALUE threadptr_invoke_proc_location(rb_thread_t *th);
1573
1574static void
1575native_set_thread_name(rb_thread_t *th)
1576{
1577#ifdef SET_CURRENT_THREAD_NAME
1578 VALUE loc;
1579 if (!NIL_P(loc = th->name)) {
1581 }
1582 else if ((loc = threadptr_invoke_proc_location(th)) != Qnil) {
1583 char *name, *p;
1584 char buf[16];
1585 size_t len;
1586 int n;
1587
1588 name = RSTRING_PTR(RARRAY_AREF(loc, 0));
1589 p = strrchr(name, '/'); /* show only the basename of the path. */
1590 if (p && p[1])
1591 name = p + 1;
1592
1593 n = snprintf(buf, sizeof(buf), "%s:%d", name, NUM2INT(RARRAY_AREF(loc, 1)));
1594 rb_gc_force_recycle(loc); /* acts as a GC guard, too */
1595
1596 len = (size_t)n;
1597 if (len >= sizeof(buf)) {
1598 buf[sizeof(buf)-2] = '*';
1599 buf[sizeof(buf)-1] = '\0';
1600 }
1602 }
1603#endif
1604}
1605
1606static VALUE
1607native_set_another_thread_name(rb_nativethread_id_t thread_id, VALUE name)
1608{
1609#ifdef SET_ANOTHER_THREAD_NAME
1610 const char *s = "";
1611 if (!NIL_P(name)) s = RSTRING_PTR(name);
1612 SET_ANOTHER_THREAD_NAME(thread_id, s);
1613#endif
1614 return name;
1615}
1616
1617static void
1618ubf_timer_invalidate(void)
1619{
1620#if UBF_TIMER == UBF_TIMER_PTHREAD
1621 CLOSE_INVALIDATE_PAIR(timer_pthread.low);
1622#endif
1623}
1624
1625static void
1626ubf_timer_pthread_create(rb_pid_t current)
1627{
1628#if UBF_TIMER == UBF_TIMER_PTHREAD
1629 int err;
1630 if (timer_pthread.owner == current)
1631 return;
1632
1633 if (setup_communication_pipe_internal(timer_pthread.low) < 0)
1634 return;
1635
1636 err = pthread_create(&timer_pthread.thid, 0, timer_pthread_fn, GET_VM());
1637 if (!err)
1638 timer_pthread.owner = current;
1639 else
1640 rb_warn("pthread_create failed for timer: %s, signals racy",
1641 strerror(err));
1642#endif
1643}
1644
1645static void
1646ubf_timer_create(rb_pid_t current)
1647{
1648#if UBF_TIMER == UBF_TIMER_POSIX
1649# if defined(__sun)
1650# define UBF_TIMER_CLOCK CLOCK_REALTIME
1651# else /* Tested Linux and FreeBSD: */
1652# define UBF_TIMER_CLOCK CLOCK_MONOTONIC
1653# endif
1654
1655 struct sigevent sev;
1656
1658 sev.sigev_signo = SIGVTALRM;
1659 sev.sigev_value.sival_ptr = &timer_posix;
1660
1661 if (!timer_create(UBF_TIMER_CLOCK, &sev, &timer_posix.timerid)) {
1662 rb_atomic_t prev = ATOMIC_EXCHANGE(timer_posix.state, RTIMER_DISARM);
1663
1664 if (prev != RTIMER_DEAD) {
1665 rb_bug("timer_posix was not dead: %u\n", (unsigned)prev);
1666 }
1667 timer_posix.owner = current;
1668 }
1669 else {
1670 rb_warn("timer_create failed: %s, signals racy", strerror(errno));
1671 }
1672#endif
1673 if (UBF_TIMER == UBF_TIMER_PTHREAD)
1674 ubf_timer_pthread_create(current);
1675}
1676
1677static void
1678rb_thread_create_timer_thread(void)
1679{
1680 /* we only create the pipe, and lazy-spawn */
1681 rb_pid_t current = getpid();
1682 rb_pid_t owner = signal_self_pipe.owner_process;
1683
1684 if (owner && owner != current) {
1685 CLOSE_INVALIDATE_PAIR(signal_self_pipe.normal);
1686 CLOSE_INVALIDATE_PAIR(signal_self_pipe.ub_main);
1687 ubf_timer_invalidate();
1688 }
1689
1690 if (setup_communication_pipe_internal(signal_self_pipe.normal) < 0) return;
1691 if (setup_communication_pipe_internal(signal_self_pipe.ub_main) < 0) return;
1692
1693 ubf_timer_create(current);
1694 if (owner != current) {
1695 /* validate pipe on this process */
1696 sigwait_th = THREAD_INVALID;
1697 signal_self_pipe.owner_process = current;
1698 }
1699}
1700
1701static void
1702ubf_timer_disarm(void)
1703{
1704#if UBF_TIMER == UBF_TIMER_POSIX
1705 rb_atomic_t prev;
1706
1707 prev = ATOMIC_CAS(timer_posix.state, RTIMER_ARMED, RTIMER_DISARM);
1708 switch (prev) {
1709 case RTIMER_DISARM: return; /* likely */
1710 case RTIMER_ARMING: return; /* ubf_timer_arm will disarm itself */
1711 case RTIMER_ARMED:
1712 if (timer_settime(timer_posix.timerid, 0, &zero, 0)) {
1713 int err = errno;
1714
1715 if (err == EINVAL) {
1716 prev = ATOMIC_CAS(timer_posix.state, RTIMER_DISARM, RTIMER_DISARM);
1717
1718 /* main thread may have killed the timer */
1719 if (prev == RTIMER_DEAD) return;
1720
1721 rb_bug_errno("timer_settime (disarm)", err);
1722 }
1723 }
1724 return;
1725 case RTIMER_DEAD: return; /* stay dead */
1726 default:
1727 rb_bug("UBF_TIMER_POSIX bad state: %u\n", (unsigned)prev);
1728 }
1729
1730#elif UBF_TIMER == UBF_TIMER_PTHREAD
1731 ATOMIC_SET(timer_pthread.armed, 0);
1732#endif
1733}
1734
1735static void
1736ubf_timer_destroy(void)
1737{
1738#if UBF_TIMER == UBF_TIMER_POSIX
1739 if (timer_posix.owner == getpid()) {
1740 rb_atomic_t expect = RTIMER_DISARM;
1741 size_t i, max = 10000000;
1742
1743 /* prevent signal handler from arming: */
1744 for (i = 0; i < max; i++) {
1745 switch (ATOMIC_CAS(timer_posix.state, expect, RTIMER_DEAD)) {
1746 case RTIMER_DISARM:
1747 if (expect == RTIMER_DISARM) goto done;
1748 expect = RTIMER_DISARM;
1749 break;
1750 case RTIMER_ARMING:
1751 native_thread_yield(); /* let another thread finish arming */
1752 expect = RTIMER_ARMED;
1753 break;
1754 case RTIMER_ARMED:
1755 if (expect == RTIMER_ARMED) {
1756 if (timer_settime(timer_posix.timerid, 0, &zero, 0))
1757 rb_bug_errno("timer_settime (destroy)", errno);
1758 goto done;
1759 }
1760 expect = RTIMER_ARMED;
1761 break;
1762 case RTIMER_DEAD:
1763 rb_bug("RTIMER_DEAD unexpected");
1764 }
1765 }
1766 rb_bug("timed out waiting for timer to arm");
1767done:
1768 if (timer_delete(timer_posix.timerid) < 0)
1769 rb_sys_fail("timer_delete");
1770
1771 VM_ASSERT(ATOMIC_EXCHANGE(timer_posix.state, RTIMER_DEAD) == RTIMER_DEAD);
1772 }
1773#elif UBF_TIMER == UBF_TIMER_PTHREAD
1774 int err;
1775
1776 timer_pthread.owner = 0;
1777 ubf_timer_disarm();
1778 rb_thread_wakeup_timer_thread_fd(timer_pthread.low[1]);
1779 err = pthread_join(timer_pthread.thid, 0);
1780 if (err) {
1781 rb_raise(rb_eThreadError, "native_thread_join() failed (%d)", err);
1782 }
1783#endif
1784}
1785
1786static int
1787native_stop_timer_thread(void)
1788{
1789 int stopped;
1790 stopped = --system_working <= 0;
1791 if (stopped)
1792 ubf_timer_destroy();
1793
1794 if (TT_DEBUG) fprintf(stderr, "stop timer thread\n");
1795 return stopped;
1796}
1797
1798static void
1799native_reset_timer_thread(void)
1800{
1801 if (TT_DEBUG) fprintf(stderr, "reset timer thread\n");
1802}
1803
1804#ifdef HAVE_SIGALTSTACK
1805int
1806ruby_stack_overflowed_p(const rb_thread_t *th, const void *addr)
1807{
1808 void *base;
1809 size_t size;
1810 const size_t water_mark = 1024 * 1024;
1812
1813#ifdef STACKADDR_AVAILABLE
1814 if (get_stack(&base, &size) == 0) {
1815# ifdef __APPLE__
1816 if (pthread_equal(th->thread_id, native_main_thread.id)) {
1817 struct rlimit rlim;
1818 if (getrlimit(RLIMIT_STACK, &rlim) == 0 && rlim.rlim_cur > size) {
1819 size = (size_t)rlim.rlim_cur;
1820 }
1821 }
1822# endif
1823 base = (char *)base + STACK_DIR_UPPER(+size, -size);
1824 }
1825 else
1826#endif
1827 if (th) {
1829 base = (char *)th->ec->machine.stack_start - STACK_DIR_UPPER(0, size);
1830 }
1831 else {
1832 return 0;
1833 }
1834 size /= RUBY_STACK_SPACE_RATIO;
1835 if (size > water_mark) size = water_mark;
1836 if (IS_STACK_DIR_UPPER()) {
1837 if (size > ~(size_t)base+1) size = ~(size_t)base+1;
1838 if (addr > base && addr <= (void *)((char *)base + size)) return 1;
1839 }
1840 else {
1841 if (size > (size_t)base) size = (size_t)base;
1842 if (addr > (void *)((char *)base - size) && addr <= base) return 1;
1843 }
1844 return 0;
1845}
1846#endif
1847
1848int
1849rb_reserved_fd_p(int fd)
1850{
1851 /* no false-positive if out-of-FD at startup */
1852 if (fd < 0)
1853 return 0;
1854
1855#if UBF_TIMER == UBF_TIMER_PTHREAD
1856 if (fd == timer_pthread.low[0] || fd == timer_pthread.low[1])
1857 goto check_pid;
1858#endif
1859 if (fd == signal_self_pipe.normal[0] || fd == signal_self_pipe.normal[1])
1860 goto check_pid;
1861 if (fd == signal_self_pipe.ub_main[0] || fd == signal_self_pipe.ub_main[1])
1862 goto check_pid;
1863 return 0;
1864check_pid:
1865 if (signal_self_pipe.owner_process == getpid()) /* async-signal-safe */
1866 return 1;
1867 return 0;
1868}
1869
1872{
1873 return pthread_self();
1874}
1875
1876#if USE_MJIT
1877/* A function that wraps actual worker function, for pthread abstraction. */
1878static void *
1879mjit_worker(void *arg)
1880{
1881 void (*worker_func)(void) = (void(*)(void))arg;
1882
1883#ifdef SET_CURRENT_THREAD_NAME
1884 SET_CURRENT_THREAD_NAME("ruby-mjitworker"); /* 16 byte including NUL */
1885#endif
1886 worker_func();
1887 return NULL;
1888}
1889
1890/* Launch MJIT thread. Returns FALSE if it fails to create thread. */
1891int
1892rb_thread_create_mjit_thread(void (*worker_func)(void))
1893{
1894 pthread_attr_t attr;
1895 pthread_t worker_pid;
1896 int ret = FALSE;
1897
1898 if (pthread_attr_init(&attr) != 0) return ret;
1899
1900 /* jit_worker thread is not to be joined */
1902 && pthread_create(&worker_pid, &attr, mjit_worker, (void *)worker_func) == 0) {
1903 ret = TRUE;
1904 }
1905 pthread_attr_destroy(&attr);
1906 return ret;
1907}
1908#endif
1909
1910int
1912{
1913 if (signal_self_pipe.normal[0] >= 0) {
1914 VM_ASSERT(signal_self_pipe.owner_process == getpid());
1915 /*
1916 * no need to keep firing the timer if any thread is sleeping
1917 * on the signal self-pipe
1918 */
1919 ubf_timer_disarm();
1920
1921 if (ATOMIC_PTR_CAS(sigwait_th, THREAD_INVALID, th) == THREAD_INVALID) {
1922 return signal_self_pipe.normal[0];
1923 }
1924 }
1925 return -1; /* avoid thundering herd and work stealing/starvation */
1926}
1927
1928void
1929rb_sigwait_fd_put(const rb_thread_t *th, int fd)
1930{
1931 const rb_thread_t *old;
1932
1933 VM_ASSERT(signal_self_pipe.normal[0] == fd);
1934 old = ATOMIC_PTR_EXCHANGE(sigwait_th, THREAD_INVALID);
1935 if (old != th) assert(old == th);
1936}
1937
1938#ifndef HAVE_PPOLL
1939/* TODO: don't ignore sigmask */
1940static int
1941ruby_ppoll(struct pollfd *fds, nfds_t nfds,
1942 const struct timespec *ts, const sigset_t *sigmask)
1943{
1944 int timeout_ms;
1945
1946 if (ts) {
1947 int tmp, tmp2;
1948
1949 if (ts->tv_sec > INT_MAX/1000)
1950 timeout_ms = INT_MAX;
1951 else {
1952 tmp = (int)(ts->tv_sec * 1000);
1953 /* round up 1ns to 1ms to avoid excessive wakeups for <1ms sleep */
1954 tmp2 = (int)((ts->tv_nsec + 999999L) / (1000L * 1000L));
1955 if (INT_MAX - tmp < tmp2)
1956 timeout_ms = INT_MAX;
1957 else
1958 timeout_ms = (int)(tmp + tmp2);
1959 }
1960 }
1961 else
1962 timeout_ms = -1;
1963
1964 return poll(fds, nfds, timeout_ms);
1965}
1966# define ppoll(fds,nfds,ts,sigmask) ruby_ppoll((fds),(nfds),(ts),(sigmask))
1967#endif
1968
1969void
1970rb_sigwait_sleep(rb_thread_t *th, int sigwait_fd, const rb_hrtime_t *rel)
1971{
1972 struct pollfd pfd;
1973 struct timespec ts;
1974
1975 pfd.fd = sigwait_fd;
1976 pfd.events = POLLIN;
1977
1978 if (!BUSY_WAIT_SIGNALS && ubf_threads_empty()) {
1979 (void)ppoll(&pfd, 1, rb_hrtime2timespec(&ts, rel), 0);
1980 check_signals_nogvl(th, sigwait_fd);
1981 }
1982 else {
1983 rb_hrtime_t to = RB_HRTIME_MAX, end;
1984 int n = 0;
1985
1986 if (rel) {
1987 to = *rel;
1988 end = rb_hrtime_add(rb_hrtime_now(), to);
1989 }
1990 /*
1991 * tricky: this needs to return on spurious wakeup (no auto-retry).
1992 * But we also need to distinguish between periodic quantum
1993 * wakeups, so we care about the result of consume_communication_pipe
1994 *
1995 * We want to avoid spurious wakeup for Mutex#sleep compatibility
1996 * [ruby-core:88102]
1997 */
1998 for (;;) {
1999 const rb_hrtime_t *sto = sigwait_timeout(th, sigwait_fd, &to, &n);
2000
2001 if (n) return;
2002 n = ppoll(&pfd, 1, rb_hrtime2timespec(&ts, sto), 0);
2003 if (check_signals_nogvl(th, sigwait_fd))
2004 return;
2005 if (n || (th && RUBY_VM_INTERRUPTED(th->ec)))
2006 return;
2007 if (rel && hrtime_update_expire(&to, end))
2008 return;
2009 }
2010 }
2011}
2012
2013/*
2014 * we need to guarantee wakeups from native_ppoll_sleep because
2015 * ubf_select may not be going through ubf_list if other threads
2016 * are all sleeping.
2017 */
2018static void
2019ubf_ppoll_sleep(void *ignore)
2020{
2021 rb_thread_wakeup_timer_thread_fd(signal_self_pipe.ub_main[1]);
2022}
2023
2024/*
2025 * Single CPU setups benefit from explicit sched_yield() before ppoll(),
2026 * since threads may be too starved to enter the GVL waitqueue for
2027 * us to detect contention. Instead, we want to kick other threads
2028 * so they can run and possibly prevent us from entering slow paths
2029 * in ppoll() or similar syscalls.
2030 *
2031 * Confirmed on FreeBSD 11.2 and Linux 4.19.
2032 * [ruby-core:90417] [Bug #15398]
2033 */
2034#define GVL_UNLOCK_BEGIN_YIELD(th) do { \
2035 const native_thread_data_t *next; \
2036 rb_vm_t *vm = th->vm; \
2037 RB_GC_SAVE_MACHINE_CONTEXT(th); \
2038 rb_native_mutex_lock(&vm->gvl.lock); \
2039 next = gvl_release_common(vm); \
2040 rb_native_mutex_unlock(&vm->gvl.lock); \
2041 if (!next && vm_living_thread_num(vm) > 1) { \
2042 native_thread_yield(); \
2043 }
2044
2045/*
2046 * This function does not exclusively acquire sigwait_fd, so it
2047 * cannot safely read from it. However, it can be woken up in
2048 * 4 ways:
2049 *
2050 * 1) ubf_ppoll_sleep (from another thread)
2051 * 2) rb_thread_wakeup_timer_thread (from signal handler)
2052 * 3) any unmasked signal hitting the process
2053 * 4) periodic ubf timer wakeups (after 3)
2054 */
2055static void
2056native_ppoll_sleep(rb_thread_t *th, rb_hrtime_t *rel)
2057{
2059 th->unblock.func = ubf_ppoll_sleep;
2061
2062 GVL_UNLOCK_BEGIN_YIELD(th);
2063
2064 if (!RUBY_VM_INTERRUPTED(th->ec)) {
2065 struct pollfd pfd[2];
2066 struct timespec ts;
2067
2068 pfd[0].fd = signal_self_pipe.normal[0]; /* sigwait_fd */
2069 pfd[1].fd = signal_self_pipe.ub_main[0];
2070 pfd[0].events = pfd[1].events = POLLIN;
2071 if (ppoll(pfd, 2, rb_hrtime2timespec(&ts, rel), 0) > 0) {
2072 if (pfd[1].revents & POLLIN) {
2073 (void)consume_communication_pipe(pfd[1].fd);
2074 }
2075 }
2076 /*
2077 * do not read the sigwait_fd, here, let uplevel callers
2078 * or other threads that, otherwise we may steal and starve
2079 * other threads
2080 */
2081 }
2082 unblock_function_clear(th);
2083 GVL_UNLOCK_END(th);
2084}
2085
2086static void
2087native_sleep(rb_thread_t *th, rb_hrtime_t *rel)
2088{
2089 int sigwait_fd = rb_sigwait_fd_get(th);
2090
2091 if (sigwait_fd >= 0) {
2093 th->unblock.func = ubf_sigwait;
2095
2096 GVL_UNLOCK_BEGIN_YIELD(th);
2097
2098 if (!RUBY_VM_INTERRUPTED(th->ec)) {
2099 rb_sigwait_sleep(th, sigwait_fd, rel);
2100 }
2101 else {
2102 check_signals_nogvl(th, sigwait_fd);
2103 }
2104 unblock_function_clear(th);
2105 GVL_UNLOCK_END(th);
2106 rb_sigwait_fd_put(th, sigwait_fd);
2108 }
2109 else if (th == th->vm->main_thread) { /* always able to handle signals */
2110 native_ppoll_sleep(th, rel);
2111 }
2112 else {
2113 native_cond_sleep(th, rel);
2114 }
2115}
2116
2117#if UBF_TIMER == UBF_TIMER_PTHREAD
2118static void *
2119timer_pthread_fn(void *p)
2120{
2121 rb_vm_t *vm = p;
2122 pthread_t main_thread_id = vm->main_thread->thread_id;
2123 struct pollfd pfd;
2124 int timeout = -1;
2125 int ccp;
2126
2127 pfd.fd = timer_pthread.low[0];
2128 pfd.events = POLLIN;
2129
2130 while (system_working > 0) {
2131 (void)poll(&pfd, 1, timeout);
2132 ccp = consume_communication_pipe(pfd.fd);
2133
2134 if (system_working > 0) {
2135 if (ATOMIC_CAS(timer_pthread.armed, 1, 1)) {
2136 pthread_kill(main_thread_id, SIGVTALRM);
2137
2138 if (rb_signal_buff_size() || !ubf_threads_empty()) {
2139 timeout = TIME_QUANTUM_MSEC;
2140 }
2141 else {
2142 ATOMIC_SET(timer_pthread.armed, 0);
2143 timeout = -1;
2144 }
2145 }
2146 else if (ccp) {
2147 pthread_kill(main_thread_id, SIGVTALRM);
2148 ATOMIC_SET(timer_pthread.armed, 0);
2149 timeout = -1;
2150 }
2151 }
2152 }
2153
2154 return 0;
2155}
2156#endif /* UBF_TIMER_PTHREAD */
2157
2158static VALUE
2159ubf_caller(void *ignore)
2160{
2162
2163 return Qfalse;
2164}
2165
2166/*
2167 * Called if and only if one thread is running, and
2168 * the unblock function is NOT async-signal-safe
2169 * This assumes USE_THREAD_CACHE is true for performance reasons
2170 */
2171static VALUE
2172rb_thread_start_unblock_thread(void)
2173{
2174 return rb_thread_create(ubf_caller, 0);
2175}
2176#endif /* THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION */
int errno
struct RIMemo * ptr
Definition: debug.c:65
VALUE rb_eThreadError
Definition: eval.c:924
void rb_raise(VALUE exc, const char *fmt,...)
Definition: error.c:2671
VALUE rb_eNotImpError
Definition: error.c:934
void rb_bug(const char *fmt,...)
Definition: error.c:636
void rb_bug_errno(const char *mesg, int errno_arg)
Definition: error.c:669
void rb_warn(const char *fmt,...)
Definition: error.c:315
void rb_async_bug_errno(const char *mesg, int errno_arg)
Definition: error.c:690
void rb_sys_fail(const char *mesg)
Definition: error.c:2795
#define WRITE_CONST(fd, str)
Definition: error.c:687
#define RB_HRTIME_MAX
Definition: hrtime.h:38
rb_hrtime_t rb_hrtime_now(void)
Definition: thread.c:1228
uint64_t rb_hrtime_t
Definition: hrtime.h:47
#define RB_HRTIME_PER_SEC
Definition: hrtime.h:37
void mjit_worker(void)
Definition: mjit_worker.c:1195
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
void rb_native_cond_initialize(rb_nativethread_cond_t *cond)
void rb_native_cond_broadcast(rb_nativethread_cond_t *cond)
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
void rb_native_cond_destroy(rb_nativethread_cond_t *cond)
void rb_native_cond_signal(rb_nativethread_cond_t *cond)
void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex)
const char * name
Definition: nkf.c:208
unsigned int last
Definition: nkf.c:4324
void rb_sigwait_fd_migrate(rb_vm_t *vm)
Definition: process.c:1026
void rb_sigwait_fd_put(const rb_thread_t *, int fd)
int rb_sigwait_fd_get(const rb_thread_t *)
void rb_sigwait_sleep(const rb_thread_t *, int fd, const rb_hrtime_t *)
#define alloca(size)
int pthread_condattr_setclock(pthread_condattr_t *, clockid_t)
#define list_del(n)
#define NULL
rb_nativethread_id_t rb_nativethread_self()
#define stdout
#define list_del_init(n)
#define RB_NATIVETHREAD_LOCK_INIT
int pthread_attr_getguardsize(const pthread_attr_t *, size_t *)
int rb_cloexec_pipe(int fildes[2])
Definition: io.c:378
__sigset_t sigset_t
#define RUBY_VM_INTERRUPTED(ec)
int pthread_cond_signal(pthread_cond_t *)
#define ATOMIC_EXCHANGE(var, val)
void Init_native_thread(rb_thread_t *th)
int abs(int)
int close(int __fildes)
int timer_settime(timer_t timerid, int flags, const struct itimerspec *__restrict__ value, struct itimerspec *__restrict__ ovalue)
#define EINVAL
#define MAYBE_UNUSED(x)
#define RB_ALTSTACK_FREE(var)
int pthread_attr_init(pthread_attr_t *)
#define PTHREAD_CREATE_DETACHED
int pthread_cond_init(pthread_cond_t *, const pthread_condattr_t *)
#define RSTRING_PTR(str)
int pthread_mutex_unlock(pthread_mutex_t *)
char * strerror(int)
Definition: strerror.c:11
#define LIST_HEAD(name)
int snprintf(char *__restrict__, size_t, const char *__restrict__,...) __attribute__((__format__(__printf__
int pthread_setschedparam(pthread_t, int, const struct sched_param *)
#define EINTR
#define PTHREAD_INHERIT_SCHED
#define NIL_P(v)
int pthread_attr_setstacksize(pthread_attr_t *, size_t)
#define EWOULDBLOCK
pthread_t pthread_self(void)
#define VM_ASSERT(expr)
char * strrchr(const char *, int)
Definition: strchr.c:20
int pthread_attr_getstack(const pthread_attr_t *, void **, size_t *)
#define list_add_tail(h, n)
int fprintf(FILE *__restrict__, const char *__restrict__,...) __attribute__((__format__(__printf__
#define ATOMIC_PTR_EXCHANGE(var, val)
const char size_t n
void rb_thread_sleep_forever(void)
Definition: thread.c:1313
int pthread_create(pthread_t *, const pthread_attr_t *, void *(*)(void *), void *)
#define ETIMEDOUT
#define ATOMIC_SET(var, val)
unsigned long VALUE
#define stderr
#define posix_signal
int getpagesize(void)
void rb_update_max_fd(int fd)
Definition: io.c:218
#define STACK_DIR_UPPER(a, b)
#define RB_ALTSTACK_INIT(var)
#define GET_VM()
uint32_t i
#define CLOCK_MONOTONIC
#define list_pop(h, type, member)
#define SIGVTALRM
__inline__ const void *__restrict__ size_t len
void * pthread_getspecific(pthread_key_t)
#define EXIT_FAILURE
__uint64_t uint64_t
int pthread_key_create(pthread_key_t *, void(*)(void *))
int pthread_mutex_trylock(pthread_mutex_t *)
VALUE rb_thread_create(VALUE(*)(void *), void *)
Definition: thread.c:965
int pthread_cond_wait(pthread_cond_t *, pthread_mutex_t *)
unsigned int rb_atomic_t
#define list_top(h, type, member)
#define EBUSY
#define PTHREAD_MUTEX_INITIALIZER
int pthread_mutex_destroy(pthread_mutex_t *)
int pthread_cond_timedwait(pthread_cond_t *, pthread_mutex_t *, const struct timespec *)
#define NUM2INT(x)
#define RUBY_VM_THREAD_VM_STACK_SIZE
int pthread_setspecific(pthread_key_t, const void *)
int pthread_attr_setinheritsched(pthread_attr_t *, int)
#define GET_THREAD()
int pthread_attr_setdetachstate(pthread_attr_t *, int)
#define list_empty(h)
int rb_reserved_fd_p(int fd)
int pthread_join(pthread_t, void **)
#define INT_MAX
VALUE ID VALUE old
#define ERANGE
#define rb_pid_t
int pthread_mutex_lock(pthread_mutex_t *)
#define list_tail(h, type, member)
#define TRUE
#define FALSE
unsigned int size
#define SET_CURRENT_THREAD_NAME(name)
long unsigned int size_t
int pthread_kill(pthread_t, int)
#define UNLIKELY(x)
int pthread_getattr_np(pthread_t, pthread_attr_t *)
struct rb_call_cache buf
#define ACCESS_ONCE(type, x)
__uintptr_t uintptr_t
void ruby_init_stack(volatile VALUE *)
void exit(int __status) __attribute__((__noreturn__))
#define Qnil
#define Qfalse
int pthread_condattr_init(pthread_condattr_t *)
#define list_for_each(h, i, member)
int pthread_attr_destroy(pthread_attr_t *)
__timer_t timer_t
int pthread_cond_destroy(pthread_cond_t *)
#define container_of(member_ptr, containing_type, member)
void rb_thread_wakeup_timer_thread(int)
pid_t getpid(void)
int pthread_cond_broadcast(pthread_cond_t *)
void rb_gc_force_recycle(VALUE)
Definition: gc.c:7027
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
#define ATOMIC_PTR_CAS(var, oldval, newval)
_ssize_t ssize_t
__inline__ int
int timer_delete(timer_t timerid)
int sched_get_priority_max(int __policy)
#define STACK_UPPER(x, a, b)
#define assert
int timer_create(clockid_t clock_id, struct sigevent *__restrict__ evp, timer_t *__restrict__ timerid)
int pthread_getschedparam(pthread_t, int *, struct sched_param *)
#define RB_ALTSTACK(var)
#define STACK_GROW_DIR_DETECTION
#define IS_STACK_DIR_UPPER()
rb_control_frame_t * __attribute__((__fastcall__)) *rb_insn_func_t)(rb_execution_context_t *
int rb_fd_select(int, rb_fdset_t *, rb_fdset_t *, rb_fdset_t *, struct timeval *)
int rb_signal_buff_size(void)
Definition: signal.c:726
#define list_add(h, n)
int sched_get_priority_min(int __policy)
int pthread_attr_getstacksize(const pthread_attr_t *, size_t *)
VALUE ID id
#define ATOMIC_CAS(var, oldval, newval)
int pthread_equal(pthread_t, pthread_t)
#define RARRAY_AREF(a, i)
#define SET_ANOTHER_THREAD_NAME(thid, name)
#define EAGAIN
#define RUBY_VM_SET_TRAP_INTERRUPT(ec)
int pthread_mutex_init(pthread_mutex_t *, const pthread_mutexattr_t *)
void rb_timespec_now(struct timespec *)
Definition: time.c:1873
_ssize_t write(int __fd, const void *__buf, size_t __nbyte)
void ruby_sigchld_handler(rb_vm_t *vm)
Definition: signal.c:1073
struct timespec it_interval
struct native_thread_data_struct::@43 cond
union native_thread_data_struct::@42 node
struct rb_execution_context_struct::@55 machine
rb_nativethread_cond_t switch_wait_cond
const struct rb_thread_struct * owner
rb_nativethread_cond_t switch_cond
const struct rb_thread_struct * timer
rb_execution_context_t * ec
struct rb_unblock_callback unblock
native_thread_data_t native_thread_data
rb_nativethread_id_t thread_id
rb_nativethread_lock_t interrupt_lock
rb_unblock_function_t * func
rb_global_vm_lock_t gvl
struct rb_thread_struct * main_thread
struct rb_vm_struct::@52 default_params
#define PRI_THREAD_ID
Definition: thread.c:337
#define GVL_UNLOCK_BEGIN(th)
Definition: thread.c:170
#define USE_EVENTFD
Definition: thread.c:383
#define thread_id_str(th)
Definition: thread.c:336
#define GVL_UNLOCK_END(th)
Definition: thread.c:174
#define fill_thread_id_str(th)
Definition: thread.c:335
#define thread_debug
Definition: thread.c:330
#define BUSY_WAIT_SIGNALS
Definition: thread.c:379
#define O_NONBLOCK
Definition: win32.h:611
int fcntl(int, int,...)
Definition: win32.c:4312
#define F_SETFL
Definition: win32.h:608