Ruby 2.7.6p219 (2022-04-12 revision c9c2245c0a25176072e02db9254f0e0c84c805cd)
thread_win32.c
Go to the documentation of this file.
1/* -*-c-*- */
2/**********************************************************************
3
4 thread_win32.c -
5
6 $Author$
7
8 Copyright (C) 2004-2007 Koichi Sasada
9
10**********************************************************************/
11
12#ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
13
14#include <process.h>
15
16#define TIME_QUANTUM_USEC (10 * 1000)
17#define RB_CONDATTR_CLOCK_MONOTONIC 1 /* no effect */
18
19#undef Sleep
20
21#define native_thread_yield() Sleep(0)
22#define unregister_ubf_list(th)
23#define ubf_wakeup_all_threads() do {} while (0)
24#define ubf_threads_empty() (1)
25#define ubf_timer_disarm() do {} while (0)
26#define ubf_list_atfork() do {} while (0)
27
28static volatile DWORD ruby_native_thread_key = TLS_OUT_OF_INDEXES;
29
30static int w32_wait_events(HANDLE *events, int count, DWORD timeout, rb_thread_t *th);
33
34static void
35w32_error(const char *func)
36{
37 LPVOID lpMsgBuf;
38 DWORD err = GetLastError();
39 if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
40 FORMAT_MESSAGE_FROM_SYSTEM |
41 FORMAT_MESSAGE_IGNORE_INSERTS,
42 NULL,
43 err,
44 MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US),
45 (LPTSTR) & lpMsgBuf, 0, NULL) == 0)
46 FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
47 FORMAT_MESSAGE_FROM_SYSTEM |
48 FORMAT_MESSAGE_IGNORE_INSERTS,
49 NULL,
50 err,
51 MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
52 (LPTSTR) & lpMsgBuf, 0, NULL);
53 rb_bug("%s: %s", func, (char*)lpMsgBuf);
54}
55
56static int
57w32_mutex_lock(HANDLE lock)
58{
59 DWORD result;
60 while (1) {
61 thread_debug("rb_native_mutex_lock: %p\n", lock);
62 result = w32_wait_events(&lock, 1, INFINITE, 0);
63 switch (result) {
64 case WAIT_OBJECT_0:
65 /* get mutex object */
66 thread_debug("acquire mutex: %p\n", lock);
67 return 0;
68 case WAIT_OBJECT_0 + 1:
69 /* interrupt */
70 errno = EINTR;
71 thread_debug("acquire mutex interrupted: %p\n", lock);
72 return 0;
73 case WAIT_TIMEOUT:
74 thread_debug("timeout mutex: %p\n", lock);
75 break;
76 case WAIT_ABANDONED:
77 rb_bug("win32_mutex_lock: WAIT_ABANDONED");
78 break;
79 default:
80 rb_bug("win32_mutex_lock: unknown result (%ld)", result);
81 break;
82 }
83 }
84 return 0;
85}
86
87static HANDLE
88w32_mutex_create(void)
89{
90 HANDLE lock = CreateMutex(NULL, FALSE, NULL);
91 if (lock == NULL) {
92 w32_error("rb_native_mutex_initialize");
93 }
94 return lock;
95}
96
97#define GVL_DEBUG 0
98
99static void
100gvl_acquire(rb_vm_t *vm, rb_thread_t *th)
101{
102 w32_mutex_lock(vm->gvl.lock);
103 if (GVL_DEBUG) fprintf(stderr, "gvl acquire (%p): acquire\n", th);
104}
105
106static void
107gvl_release(rb_vm_t *vm)
108{
109 ReleaseMutex(vm->gvl.lock);
110}
111
112static void
113gvl_yield(rb_vm_t *vm, rb_thread_t *th)
114{
115 gvl_release(th->vm);
116 native_thread_yield();
117 gvl_acquire(vm, th);
118}
119
120static void
121gvl_init(rb_vm_t *vm)
122{
123 if (GVL_DEBUG) fprintf(stderr, "gvl init\n");
124 vm->gvl.lock = w32_mutex_create();
125}
126
127static void
128gvl_destroy(rb_vm_t *vm)
129{
130 if (GVL_DEBUG) fprintf(stderr, "gvl destroy\n");
131 CloseHandle(vm->gvl.lock);
132}
133
134static rb_thread_t *
135ruby_thread_from_native(void)
136{
137 return TlsGetValue(ruby_native_thread_key);
138}
139
140static int
141ruby_thread_set_native(rb_thread_t *th)
142{
143 return TlsSetValue(ruby_native_thread_key, th);
144}
145
146void
148{
149 ruby_native_thread_key = TlsAlloc();
150 ruby_thread_set_native(th);
151 DuplicateHandle(GetCurrentProcess(),
152 GetCurrentThread(),
153 GetCurrentProcess(),
154 &th->thread_id, 0, FALSE, DUPLICATE_SAME_ACCESS);
155
156 th->native_thread_data.interrupt_event = CreateEvent(0, TRUE, FALSE, 0);
157
158 thread_debug("initial thread (th: %p, thid: %p, event: %p)\n",
159 th, GET_THREAD()->thread_id,
161}
162
163static int
164w32_wait_events(HANDLE *events, int count, DWORD timeout, rb_thread_t *th)
165{
166 HANDLE *targets = events;
167 HANDLE intr;
168 const int initcount = count;
169 DWORD ret;
170
171 thread_debug(" w32_wait_events events:%p, count:%d, timeout:%ld, th:%p\n",
172 events, count, timeout, th);
173 if (th && (intr = th->native_thread_data.interrupt_event)) {
174 if (ResetEvent(intr) && (!RUBY_VM_INTERRUPTED(th->ec) || SetEvent(intr))) {
175 targets = ALLOCA_N(HANDLE, count + 1);
176 memcpy(targets, events, sizeof(HANDLE) * count);
177
178 targets[count++] = intr;
179 thread_debug(" * handle: %p (count: %d, intr)\n", intr, count);
180 }
181 else if (intr == th->native_thread_data.interrupt_event) {
182 w32_error("w32_wait_events");
183 }
184 }
185
186 thread_debug(" WaitForMultipleObjects start (count: %d)\n", count);
187 ret = WaitForMultipleObjects(count, targets, FALSE, timeout);
188 thread_debug(" WaitForMultipleObjects end (ret: %lu)\n", ret);
189
190 if (ret == (DWORD)(WAIT_OBJECT_0 + initcount) && th) {
191 errno = EINTR;
192 }
193 if (ret == WAIT_FAILED && THREAD_DEBUG) {
194 int i;
195 DWORD dmy;
196 for (i = 0; i < count; i++) {
197 thread_debug(" * error handle %d - %s\n", i,
198 GetHandleInformation(targets[i], &dmy) ? "OK" : "NG");
199 }
200 }
201 return ret;
202}
203
204static void ubf_handle(void *ptr);
205#define ubf_select ubf_handle
206
207int
208rb_w32_wait_events_blocking(HANDLE *events, int num, DWORD timeout)
209{
210 return w32_wait_events(events, num, timeout, ruby_thread_from_native());
211}
212
213int
214rb_w32_wait_events(HANDLE *events, int num, DWORD timeout)
215{
216 int ret;
217 rb_thread_t *th = GET_THREAD();
218
219 BLOCKING_REGION(th, ret = rb_w32_wait_events_blocking(events, num, timeout),
220 ubf_handle, ruby_thread_from_native(), FALSE);
221 return ret;
222}
223
224static void
225w32_close_handle(HANDLE handle)
226{
227 if (CloseHandle(handle) == 0) {
228 w32_error("w32_close_handle");
229 }
230}
231
232static void
233w32_resume_thread(HANDLE handle)
234{
235 if (ResumeThread(handle) == (DWORD)-1) {
236 w32_error("w32_resume_thread");
237 }
238}
239
240#ifdef _MSC_VER
241#define HAVE__BEGINTHREADEX 1
242#else
243#undef HAVE__BEGINTHREADEX
244#endif
245
246#ifdef HAVE__BEGINTHREADEX
247#define start_thread (HANDLE)_beginthreadex
248#define thread_errno errno
249typedef unsigned long (__stdcall *w32_thread_start_func)(void*);
250#else
251#define start_thread CreateThread
252#define thread_errno rb_w32_map_errno(GetLastError())
253typedef LPTHREAD_START_ROUTINE w32_thread_start_func;
254#endif
255
256static HANDLE
257w32_create_thread(DWORD stack_size, w32_thread_start_func func, void *val)
258{
259 return start_thread(0, stack_size, func, val, CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION, 0);
260}
261
262int
263rb_w32_sleep(unsigned long msec)
264{
265 return w32_wait_events(0, 0, msec, ruby_thread_from_native());
266}
267
268int WINAPI
269rb_w32_Sleep(unsigned long msec)
270{
271 int ret;
272 rb_thread_t *th = GET_THREAD();
273
274 BLOCKING_REGION(th, ret = rb_w32_sleep(msec),
275 ubf_handle, ruby_thread_from_native(), FALSE);
276 return ret;
277}
278
279static DWORD
280hrtime2msec(rb_hrtime_t hrt)
281{
282 return (DWORD)hrt / (DWORD)RB_HRTIME_PER_MSEC;
283}
284
285static void
286native_sleep(rb_thread_t *th, rb_hrtime_t *rel)
287{
288 const volatile DWORD msec = rel ? hrtime2msec(*rel) : INFINITE;
289
291 {
292 DWORD ret;
293
295 th->unblock.func = ubf_handle;
296 th->unblock.arg = th;
298
299 if (RUBY_VM_INTERRUPTED(th->ec)) {
300 /* interrupted. return immediate */
301 }
302 else {
303 thread_debug("native_sleep start (%lu)\n", msec);
304 ret = w32_wait_events(0, 0, msec, th);
305 thread_debug("native_sleep done (%lu)\n", ret);
306 }
307
309 th->unblock.func = 0;
310 th->unblock.arg = 0;
312 }
313 GVL_UNLOCK_END(th);
314}
315
316void
318{
319#if USE_WIN32_MUTEX
320 w32_mutex_lock(lock->mutex);
321#else
322 EnterCriticalSection(&lock->crit);
323#endif
324}
325
326void
328{
329#if USE_WIN32_MUTEX
330 thread_debug("release mutex: %p\n", lock->mutex);
331 ReleaseMutex(lock->mutex);
332#else
333 LeaveCriticalSection(&lock->crit);
334#endif
335}
336
337static int
338native_mutex_trylock(rb_nativethread_lock_t *lock)
339{
340#if USE_WIN32_MUTEX
341 int result;
342 thread_debug("native_mutex_trylock: %p\n", lock->mutex);
343 result = w32_wait_events(&lock->mutex, 1, 1, 0);
344 thread_debug("native_mutex_trylock result: %d\n", result);
345 switch (result) {
346 case WAIT_OBJECT_0:
347 return 0;
348 case WAIT_TIMEOUT:
349 return EBUSY;
350 }
351 return EINVAL;
352#else
353 return TryEnterCriticalSection(&lock->crit) == 0;
354#endif
355}
356
357void
359{
360#if USE_WIN32_MUTEX
361 lock->mutex = w32_mutex_create();
362 /* thread_debug("initialize mutex: %p\n", lock->mutex); */
363#else
364 InitializeCriticalSection(&lock->crit);
365#endif
366}
367
368void
370{
371#if USE_WIN32_MUTEX
372 w32_close_handle(lock->mutex);
373#else
374 DeleteCriticalSection(&lock->crit);
375#endif
376}
377
378struct cond_event_entry {
379 struct cond_event_entry* next;
380 struct cond_event_entry* prev;
381 HANDLE event;
382};
383
384void
386{
387 /* cond is guarded by mutex */
388 struct cond_event_entry *e = cond->next;
389 struct cond_event_entry *head = (struct cond_event_entry*)cond;
390
391 if (e != head) {
392 struct cond_event_entry *next = e->next;
393 struct cond_event_entry *prev = e->prev;
394
395 prev->next = next;
396 next->prev = prev;
397 e->next = e->prev = e;
398
399 SetEvent(e->event);
400 }
401}
402
403void
405{
406 /* cond is guarded by mutex */
407 struct cond_event_entry *e = cond->next;
408 struct cond_event_entry *head = (struct cond_event_entry*)cond;
409
410 while (e != head) {
411 struct cond_event_entry *next = e->next;
412 struct cond_event_entry *prev = e->prev;
413
414 SetEvent(e->event);
415
416 prev->next = next;
417 next->prev = prev;
418 e->next = e->prev = e;
419
420 e = next;
421 }
422}
423
424static int
425native_cond_timedwait_ms(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec)
426{
427 DWORD r;
428 struct cond_event_entry entry;
429 struct cond_event_entry *head = (struct cond_event_entry*)cond;
430
431 entry.event = CreateEvent(0, FALSE, FALSE, 0);
432
433 /* cond is guarded by mutex */
434 entry.next = head;
435 entry.prev = head->prev;
436 head->prev->next = &entry;
437 head->prev = &entry;
438
440 {
441 r = WaitForSingleObject(entry.event, msec);
442 if ((r != WAIT_OBJECT_0) && (r != WAIT_TIMEOUT)) {
443 rb_bug("rb_native_cond_wait: WaitForSingleObject returns %lu", r);
444 }
445 }
447
448 entry.prev->next = entry.next;
449 entry.next->prev = entry.prev;
450
451 w32_close_handle(entry.event);
452 return (r == WAIT_OBJECT_0) ? 0 : ETIMEDOUT;
453}
454
455void
457{
458 native_cond_timedwait_ms(cond, mutex, INFINITE);
459}
460
461#if 0
462static unsigned long
463abs_timespec_to_timeout_ms(const struct timespec *ts)
464{
465 struct timeval tv;
466 struct timeval now;
467
468 gettimeofday(&now, NULL);
469 tv.tv_sec = ts->tv_sec;
470 tv.tv_usec = ts->tv_nsec / 1000;
471
472 if (!rb_w32_time_subtract(&tv, &now))
473 return 0;
474
475 return (tv.tv_sec * 1000) + (tv.tv_usec / 1000);
476}
477
478static int
479native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, const struct timespec *ts)
480{
481 unsigned long timeout_ms;
482
483 timeout_ms = abs_timespec_to_timeout_ms(ts);
484 if (!timeout_ms)
485 return ETIMEDOUT;
486
487 return native_cond_timedwait_ms(cond, mutex, timeout_ms);
488}
489
490static struct timespec
491native_cond_timeout(rb_nativethread_cond_t *cond, struct timespec timeout_rel)
492{
493 int ret;
494 struct timeval tv;
495 struct timespec timeout;
496 struct timespec now;
497
498 ret = gettimeofday(&tv, 0);
499 if (ret != 0)
500 rb_sys_fail(0);
501 now.tv_sec = tv.tv_sec;
502 now.tv_nsec = tv.tv_usec * 1000;
503
504 timeout.tv_sec = now.tv_sec;
505 timeout.tv_nsec = now.tv_nsec;
506 timeout.tv_sec += timeout_rel.tv_sec;
507 timeout.tv_nsec += timeout_rel.tv_nsec;
508
509 if (timeout.tv_nsec >= 1000*1000*1000) {
510 timeout.tv_sec++;
511 timeout.tv_nsec -= 1000*1000*1000;
512 }
513
514 if (timeout.tv_sec < now.tv_sec)
515 timeout.tv_sec = TIMET_MAX;
516
517 return timeout;
518}
519#endif
520
521void
523{
524 cond->next = (struct cond_event_entry *)cond;
525 cond->prev = (struct cond_event_entry *)cond;
526}
527
528void
530{
531 /* */
532}
533
534void
535ruby_init_stack(volatile VALUE *addr)
536{
537}
538
539#define CHECK_ERR(expr) \
540 {if (!(expr)) {rb_bug("err: %lu - %s", GetLastError(), #expr);}}
541
542static void
543native_thread_init_stack(rb_thread_t *th)
544{
545 MEMORY_BASIC_INFORMATION mi;
546 char *base, *end;
547 DWORD size, space;
548
549 CHECK_ERR(VirtualQuery(&mi, &mi, sizeof(mi)));
550 base = mi.AllocationBase;
551 end = mi.BaseAddress;
552 end += mi.RegionSize;
553 size = end - base;
554 space = size / 5;
555 if (space > 1024*1024) space = 1024*1024;
556 th->ec->machine.stack_start = (VALUE *)end - 1;
557 th->ec->machine.stack_maxsize = size - space;
558}
559
560#ifndef InterlockedExchangePointer
561#define InterlockedExchangePointer(t, v) \
562 (void *)InterlockedExchange((long *)(t), (long)(v))
563#endif
564static void
565native_thread_destroy(rb_thread_t *th)
566{
567 HANDLE intr = InterlockedExchangePointer(&th->native_thread_data.interrupt_event, 0);
568 thread_debug("close handle - intr: %p, thid: %p\n", intr, th->thread_id);
569 w32_close_handle(intr);
570}
571
572static unsigned long __stdcall
573thread_start_func_1(void *th_ptr)
574{
575 rb_thread_t *th = th_ptr;
576 volatile HANDLE thread_id = th->thread_id;
577
578 native_thread_init_stack(th);
579 th->native_thread_data.interrupt_event = CreateEvent(0, TRUE, FALSE, 0);
580
581 /* run */
582 thread_debug("thread created (th: %p, thid: %p, event: %p)\n", th,
584
585 thread_start_func_2(th, th->ec->machine.stack_start);
586
587 w32_close_handle(thread_id);
588 thread_debug("thread deleted (th: %p)\n", th);
589 return 0;
590}
591
592static int
593native_thread_create(rb_thread_t *th)
594{
596 th->thread_id = w32_create_thread(stack_size, thread_start_func_1, th);
597
598 if ((th->thread_id) == 0) {
599 return thread_errno;
600 }
601
602 w32_resume_thread(th->thread_id);
603
604 if (THREAD_DEBUG) {
605 Sleep(0);
606 thread_debug("create: (th: %p, thid: %p, intr: %p), stack size: %"PRIuSIZE"\n",
607 th, th->thread_id,
608 th->native_thread_data.interrupt_event, stack_size);
609 }
610 return 0;
611}
612
613static void
614native_thread_join(HANDLE th)
615{
616 w32_wait_events(&th, 1, INFINITE, 0);
617}
618
619#if USE_NATIVE_THREAD_PRIORITY
620
621static void
622native_thread_apply_priority(rb_thread_t *th)
623{
624 int priority = th->priority;
625 if (th->priority > 0) {
626 priority = THREAD_PRIORITY_ABOVE_NORMAL;
627 }
628 else if (th->priority < 0) {
629 priority = THREAD_PRIORITY_BELOW_NORMAL;
630 }
631 else {
632 priority = THREAD_PRIORITY_NORMAL;
633 }
634
635 SetThreadPriority(th->thread_id, priority);
636}
637
638#endif /* USE_NATIVE_THREAD_PRIORITY */
639
640int rb_w32_select_with_thread(int, fd_set *, fd_set *, fd_set *, struct timeval *, void *); /* @internal */
641
642static int
643native_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout, rb_thread_t *th)
644{
645 fd_set *r = NULL, *w = NULL, *e = NULL;
646 if (readfds) {
647 rb_fd_resize(n - 1, readfds);
648 r = rb_fd_ptr(readfds);
649 }
650 if (writefds) {
651 rb_fd_resize(n - 1, writefds);
652 w = rb_fd_ptr(writefds);
653 }
654 if (exceptfds) {
655 rb_fd_resize(n - 1, exceptfds);
656 e = rb_fd_ptr(exceptfds);
657 }
658 return rb_w32_select_with_thread(n, r, w, e, timeout, th);
659}
660
661/* @internal */
662int
664{
665 return w32_wait_events(0, 0, 0, th);
666}
667
668static void
669ubf_handle(void *ptr)
670{
671 rb_thread_t *th = (rb_thread_t *)ptr;
672 thread_debug("ubf_handle: %p\n", th);
673
674 if (!SetEvent(th->native_thread_data.interrupt_event)) {
675 w32_error("ubf_handle");
676 }
677}
678
679int rb_w32_set_thread_description(HANDLE th, const WCHAR *name);
681#define native_set_another_thread_name rb_w32_set_thread_description_str
682
683static struct {
684 HANDLE id;
685 HANDLE lock;
686} timer_thread;
687#define TIMER_THREAD_CREATED_P() (timer_thread.id != 0)
688
689static unsigned long __stdcall
690timer_thread_func(void *dummy)
691{
692 rb_vm_t *vm = GET_VM();
693 thread_debug("timer_thread\n");
694 rb_w32_set_thread_description(GetCurrentThread(), L"ruby-timer-thread");
695 while (WaitForSingleObject(timer_thread.lock, TIME_QUANTUM_USEC/1000) ==
696 WAIT_TIMEOUT) {
697 timer_thread_function();
698 ruby_sigchld_handler(vm); /* probably no-op */
700 }
701 thread_debug("timer killed\n");
702 return 0;
703}
704
705void
707{
708 /* do nothing */
709}
710
711static VALUE
712rb_thread_start_unblock_thread(void)
713{
714 return Qfalse; /* no-op */
715}
716
717static void
718rb_thread_create_timer_thread(void)
719{
720 if (timer_thread.id == 0) {
721 if (!timer_thread.lock) {
722 timer_thread.lock = CreateEvent(0, TRUE, FALSE, 0);
723 }
724 timer_thread.id = w32_create_thread(1024 + (THREAD_DEBUG ? BUFSIZ : 0),
725 timer_thread_func, 0);
726 w32_resume_thread(timer_thread.id);
727 }
728}
729
730static int
731native_stop_timer_thread(void)
732{
733 int stopped = --system_working <= 0;
734 if (stopped) {
735 SetEvent(timer_thread.lock);
736 native_thread_join(timer_thread.id);
737 CloseHandle(timer_thread.lock);
738 timer_thread.lock = 0;
739 }
740 return stopped;
741}
742
743static void
744native_reset_timer_thread(void)
745{
746 if (timer_thread.id) {
747 CloseHandle(timer_thread.id);
748 timer_thread.id = 0;
749 }
750}
751
752int
753ruby_stack_overflowed_p(const rb_thread_t *th, const void *addr)
754{
756}
757
758#if defined(__MINGW32__)
759LONG WINAPI
760rb_w32_stack_overflow_handler(struct _EXCEPTION_POINTERS *exception)
761{
762 if (exception->ExceptionRecord->ExceptionCode == EXCEPTION_STACK_OVERFLOW) {
764 raise(SIGSEGV);
765 }
766 return EXCEPTION_CONTINUE_SEARCH;
767}
768#endif
769
770#ifdef RUBY_ALLOCA_CHKSTK
771void
772ruby_alloca_chkstk(size_t len, void *sp)
773{
774 if (ruby_stack_length(NULL) * sizeof(VALUE) >= len) {
779 }
780 }
781}
782#endif
783int
784rb_reserved_fd_p(int fd)
785{
786 return 0;
787}
788
789int
791{
792 return -1; /* TODO */
793}
794
796void
798{
799 rb_bug("not implemented, should not be called");
800}
801
802NORETURN(void rb_sigwait_sleep(const rb_thread_t *, int, const rb_hrtime_t *));
803void
804rb_sigwait_sleep(const rb_thread_t *th, int fd, const rb_hrtime_t *rel)
805{
806 rb_bug("not implemented, should not be called");
807}
808
811{
812 return GetCurrentThread();
813}
814
815static void
816native_set_thread_name(rb_thread_t *th)
817{
818}
819
820#if USE_MJIT
821static unsigned long __stdcall
822mjit_worker(void *arg)
823{
824 void (*worker_func)(void) = arg;
825 rb_w32_set_thread_description(GetCurrentThread(), L"ruby-mjitworker");
826 worker_func();
827 return 0;
828}
829
830/* Launch MJIT thread. Returns FALSE if it fails to create thread. */
831int
832rb_thread_create_mjit_thread(void (*worker_func)(void))
833{
834 size_t stack_size = 4 * 1024; /* 4KB is the minimum commit size */
835 HANDLE thread_id = w32_create_thread(stack_size, mjit_worker, worker_func);
836 if (thread_id == 0) {
837 return FALSE;
838 }
839
840 w32_resume_thread(thread_id);
841 return TRUE;
842}
843#endif
844
845#endif /* THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION */
int errno
#define L(x)
Definition: asm.h:125
struct RIMemo * ptr
Definition: debug.c:65
int count
Definition: encoding.c:57
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition: eval.c:668
void rb_bug(const char *fmt,...)
Definition: error.c:636
void rb_sys_fail(const char *mesg)
Definition: error.c:2795
#define RB_HRTIME_PER_MSEC
Definition: hrtime.h:36
uint64_t rb_hrtime_t
Definition: hrtime.h:47
#define rb_fd_resize(n, f)
Definition: intern.h:410
void mjit_worker(void)
Definition: mjit_worker.c:1195
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
void rb_native_cond_initialize(rb_nativethread_cond_t *cond)
void rb_native_cond_broadcast(rb_nativethread_cond_t *cond)
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
void rb_native_cond_destroy(rb_nativethread_cond_t *cond)
void rb_native_cond_signal(rb_nativethread_cond_t *cond)
void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex)
const char * name
Definition: nkf.c:208
void rb_sigwait_fd_put(const rb_thread_t *, int fd)
int rb_sigwait_fd_get(const rb_thread_t *)
void rb_sigwait_sleep(const rb_thread_t *, int fd, const rb_hrtime_t *)
#define NULL
rb_nativethread_id_t rb_nativethread_self()
#define rb_ec_raised_p(ec, f)
#define ALLOCA_N(type, n)
#define RUBY_VM_INTERRUPTED(ec)
void rb_threadptr_check_signal(rb_thread_t *mth)
Definition: thread.c:4317
void Init_native_thread(rb_thread_t *th)
#define PRIuSIZE
#define EINVAL
#define rb_ec_raised_set(ec, f)
#define GET_EC()
#define EINTR
int fprintf(FILE *__restrict__, const char *__restrict__,...) __attribute__((__format__(__printf__
const char size_t n
#define ETIMEDOUT
unsigned long VALUE
#define stderr
#define GET_VM()
uint32_t i
#define BUFSIZ
__inline__ const void *__restrict__ size_t len
#define NORETURN(x)
#define sysstack_error
#define EBUSY
#define long
#define GET_THREAD()
int rb_reserved_fd_p(int fd)
#define TRUE
#define FALSE
unsigned int size
void ruby_init_stack(volatile VALUE *)
#define TIMET_MAX
#define Qfalse
void * memcpy(void *__restrict__, const void *__restrict__, size_t)
void rb_thread_wakeup_timer_thread(int)
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
#define SIGSEGV
VALUE ID id
#define rb_fd_ptr(f)
size_t ruby_stack_length(VALUE **)
Definition: gc.c:4647
void ruby_sigchld_handler(rb_vm_t *vm)
Definition: signal.c:1073
struct rb_execution_context_struct::@55 machine
rb_execution_context_t * ec
struct rb_unblock_callback unblock
native_thread_data_t native_thread_data
rb_nativethread_id_t thread_id
rb_nativethread_lock_t interrupt_lock
rb_unblock_function_t * func
rb_global_vm_lock_t gvl
struct rb_thread_struct * main_thread
struct rb_vm_struct::@52 default_params
#define GVL_UNLOCK_BEGIN(th)
Definition: thread.c:170
#define BLOCKING_REGION(th, exec, ubf, ubfarg, fail_if_interrupted)
Definition: thread.c:188
#define THREAD_DEBUG
Definition: thread.c:92
#define GVL_UNLOCK_END(th)
Definition: thread.c:174
#define thread_debug
Definition: thread.c:330
WINBASEAPI BOOL WINAPI TryEnterCriticalSection(IN OUT LPCRITICAL_SECTION lpCriticalSection)
int rb_w32_select_with_thread(int nfds, fd_set *rd, fd_set *wr, fd_set *ex, struct timeval *timeout, void *th)
Definition: win32.c:3143
int rb_w32_check_interrupt(void *)
int rb_w32_set_thread_description(HANDLE th, const WCHAR *name)
Definition: win32.c:8099
int rb_w32_set_thread_description_str(HANDLE th, VALUE name)
Definition: win32.c:8116
int rb_w32_wait_events(HANDLE *events, int num, DWORD timeout)
int gettimeofday(struct timeval *, struct timezone *)
Definition: win32.c:4628
int rb_w32_wait_events_blocking(HANDLE *events, int num, DWORD timeout)
int rb_w32_sleep(unsigned long msec)
int rb_w32_time_subtract(struct timeval *rest, const struct timeval *wait)
Definition: win32.c:3104
int WINAPI rb_w32_Sleep(unsigned long msec)
IUnknown DWORD
Definition: win32ole.c:33