Ruby 2.7.6p219 (2022-04-12 revision c9c2245c0a25176072e02db9254f0e0c84c805cd)
thread_sync.c
Go to the documentation of this file.
1/* included by thread.c */
2#include "ccan/list/list.h"
3
4static VALUE rb_cMutex, rb_cQueue, rb_cSizedQueue, rb_cConditionVariable;
5static VALUE rb_eClosedQueueError;
6
7/* sync_waiter is always on-stack */
8struct sync_waiter {
11};
12
13#define MUTEX_ALLOW_TRAP FL_USER1
14
15static void
16sync_wakeup(struct list_head *head, long max)
17{
18 struct sync_waiter *cur = 0, *next;
19
20 list_for_each_safe(head, cur, next, node) {
21 list_del_init(&cur->node);
22 if (cur->th->status != THREAD_KILLED) {
25 if (--max == 0) return;
26 }
27 }
28}
29
30static void
31wakeup_one(struct list_head *head)
32{
33 sync_wakeup(head, 1);
34}
35
36static void
37wakeup_all(struct list_head *head)
38{
39 sync_wakeup(head, LONG_MAX);
40}
41
42/* Mutex */
43
44typedef struct rb_mutex_struct {
47 struct list_head waitq; /* protected by GVL */
49
50#if defined(HAVE_WORKING_FORK)
51static void rb_mutex_abandon_all(rb_mutex_t *mutexes);
52static void rb_mutex_abandon_keeping_mutexes(rb_thread_t *th);
53static void rb_mutex_abandon_locking_mutex(rb_thread_t *th);
54#endif
55static const char* rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t *th);
56
57/*
58 * Document-class: Mutex
59 *
60 * Mutex implements a simple semaphore that can be used to coordinate access to
61 * shared data from multiple concurrent threads.
62 *
63 * Example:
64 *
65 * semaphore = Mutex.new
66 *
67 * a = Thread.new {
68 * semaphore.synchronize {
69 * # access shared resource
70 * }
71 * }
72 *
73 * b = Thread.new {
74 * semaphore.synchronize {
75 * # access shared resource
76 * }
77 * }
78 *
79 */
80
81#define mutex_mark NULL
82
83static size_t
84rb_mutex_num_waiting(rb_mutex_t *mutex)
85{
86 struct sync_waiter *w = 0;
87 size_t n = 0;
88
89 list_for_each(&mutex->waitq, w, node) {
90 n++;
91 }
92
93 return n;
94}
95
96static void
97mutex_free(void *ptr)
98{
99 rb_mutex_t *mutex = ptr;
100 if (mutex->th) {
101 /* rb_warn("free locked mutex"); */
102 const char *err = rb_mutex_unlock_th(mutex, mutex->th);
103 if (err) rb_bug("%s", err);
104 }
106}
107
108static size_t
109mutex_memsize(const void *ptr)
110{
111 return sizeof(rb_mutex_t);
112}
113
114static const rb_data_type_t mutex_data_type = {
115 "mutex",
116 {mutex_mark, mutex_free, mutex_memsize,},
118};
119
120static rb_mutex_t *
121mutex_ptr(VALUE obj)
122{
123 rb_mutex_t *mutex;
124
125 TypedData_Get_Struct(obj, rb_mutex_t, &mutex_data_type, mutex);
126
127 return mutex;
128}
129
130VALUE
132{
133 if (rb_typeddata_is_kind_of(obj, &mutex_data_type)) {
134 return Qtrue;
135 }
136 else {
137 return Qfalse;
138 }
139}
140
141static VALUE
142mutex_alloc(VALUE klass)
143{
144 VALUE obj;
145 rb_mutex_t *mutex;
146
147 obj = TypedData_Make_Struct(klass, rb_mutex_t, &mutex_data_type, mutex);
148 list_head_init(&mutex->waitq);
149 return obj;
150}
151
152/*
153 * call-seq:
154 * Mutex.new -> mutex
155 *
156 * Creates a new Mutex
157 */
158static VALUE
159mutex_initialize(VALUE self)
160{
161 return self;
162}
163
164VALUE
166{
167 return mutex_alloc(rb_cMutex);
168}
169
170/*
171 * call-seq:
172 * mutex.locked? -> true or false
173 *
174 * Returns +true+ if this lock is currently held by some thread.
175 */
176VALUE
178{
179 rb_mutex_t *mutex = mutex_ptr(self);
180
181 return mutex->th ? Qtrue : Qfalse;
182}
183
184static void
185mutex_locked(rb_thread_t *th, VALUE self)
186{
187 rb_mutex_t *mutex = mutex_ptr(self);
188
189 if (th->keeping_mutexes) {
190 mutex->next_mutex = th->keeping_mutexes;
191 }
192 th->keeping_mutexes = mutex;
193}
194
195/*
196 * call-seq:
197 * mutex.try_lock -> true or false
198 *
199 * Attempts to obtain the lock and returns immediately. Returns +true+ if the
200 * lock was granted.
201 */
202VALUE
204{
205 rb_mutex_t *mutex = mutex_ptr(self);
206 VALUE locked = Qfalse;
207
208 if (mutex->th == 0) {
210 mutex->th = th;
211 locked = Qtrue;
212
213 mutex_locked(th, self);
214 }
215
216 return locked;
217}
218
219/*
220 * At maximum, only one thread can use cond_timedwait and watch deadlock
221 * periodically. Multiple polling thread (i.e. concurrent deadlock check)
222 * introduces new race conditions. [Bug #6278] [ruby-core:44275]
223 */
224static const rb_thread_t *patrol_thread = NULL;
225
226static VALUE
227mutex_owned_p(rb_thread_t *th, rb_mutex_t *mutex)
228{
229 if (mutex->th == th) {
230 return Qtrue;
231 }
232 else {
233 return Qfalse;
234 }
235}
236
237static VALUE
238do_mutex_lock(VALUE self, int interruptible_p)
239{
241 rb_mutex_t *mutex = mutex_ptr(self);
242
243 /* When running trap handler */
244 if (!FL_TEST_RAW(self, MUTEX_ALLOW_TRAP) &&
246 rb_raise(rb_eThreadError, "can't be called from trap context");
247 }
248
249 if (rb_mutex_trylock(self) == Qfalse) {
250 struct sync_waiter w;
251
252 if (mutex->th == th) {
253 rb_raise(rb_eThreadError, "deadlock; recursive locking");
254 }
255
256 w.th = th;
257
258 while (mutex->th != th) {
259 enum rb_thread_status prev_status = th->status;
260 rb_hrtime_t *timeout = 0;
261 rb_hrtime_t rel = rb_msec2hrtime(100);
262
264 th->locking_mutex = self;
265 th->vm->sleeper++;
266 /*
267 * Carefully! while some contended threads are in native_sleep(),
268 * vm->sleeper is unstable value. we have to avoid both deadlock
269 * and busy loop.
270 */
271 if ((vm_living_thread_num(th->vm) == th->vm->sleeper) &&
272 !patrol_thread) {
273 timeout = &rel;
274 patrol_thread = th;
275 }
276
277 list_add_tail(&mutex->waitq, &w.node);
278 native_sleep(th, timeout); /* release GVL */
279 list_del(&w.node);
280
281 if (!mutex->th) {
282 mutex->th = th;
283 }
284
285 if (patrol_thread == th)
286 patrol_thread = NULL;
287
289 if (mutex->th && timeout && !RUBY_VM_INTERRUPTED(th->ec)) {
290 rb_check_deadlock(th->vm);
291 }
293 th->status = prev_status;
294 }
295 th->vm->sleeper--;
296
297 if (interruptible_p) {
298 /* release mutex before checking for interrupts...as interrupt checking
299 * code might call rb_raise() */
300 if (mutex->th == th) mutex->th = 0;
301 RUBY_VM_CHECK_INTS_BLOCKING(th->ec); /* may release mutex */
302 if (!mutex->th) {
303 mutex->th = th;
304 mutex_locked(th, self);
305 }
306 }
307 else {
308 if (mutex->th == th) mutex_locked(th, self);
309 }
310 }
311 }
312
313 // assertion
314 if (mutex_owned_p(th, mutex) == Qfalse) rb_bug("do_mutex_lock: mutex is not owned.");
315
316 return self;
317}
318
319static VALUE
320mutex_lock_uninterruptible(VALUE self)
321{
322 return do_mutex_lock(self, 0);
323}
324
325/*
326 * call-seq:
327 * mutex.lock -> self
328 *
329 * Attempts to grab the lock and waits if it isn't available.
330 * Raises +ThreadError+ if +mutex+ was locked by the current thread.
331 */
332VALUE
334{
335 return do_mutex_lock(self, 1);
336}
337
338/*
339 * call-seq:
340 * mutex.owned? -> true or false
341 *
342 * Returns +true+ if this lock is currently held by current thread.
343 */
344VALUE
346{
348 rb_mutex_t *mutex = mutex_ptr(self);
349
350 return mutex_owned_p(th, mutex);
351}
352
353static const char *
354rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t *th)
355{
356 const char *err = NULL;
357
358 if (mutex->th == 0) {
359 err = "Attempt to unlock a mutex which is not locked";
360 }
361 else if (mutex->th != th) {
362 err = "Attempt to unlock a mutex which is locked by another thread";
363 }
364 else {
365 struct sync_waiter *cur = 0, *next;
366 rb_mutex_t **th_mutex = &th->keeping_mutexes;
367
368 mutex->th = 0;
369 list_for_each_safe(&mutex->waitq, cur, next, node) {
370 list_del_init(&cur->node);
371 switch (cur->th->status) {
372 case THREAD_RUNNABLE: /* from someone else calling Thread#run */
373 case THREAD_STOPPED_FOREVER: /* likely (rb_mutex_lock) */
375 goto found;
376 case THREAD_STOPPED: /* probably impossible */
377 rb_bug("unexpected THREAD_STOPPED");
378 case THREAD_KILLED:
379 /* not sure about this, possible in exit GC? */
380 rb_bug("unexpected THREAD_KILLED");
381 continue;
382 }
383 }
384 found:
385 while (*th_mutex != mutex) {
386 th_mutex = &(*th_mutex)->next_mutex;
387 }
388 *th_mutex = mutex->next_mutex;
389 mutex->next_mutex = NULL;
390 }
391
392 return err;
393}
394
395/*
396 * call-seq:
397 * mutex.unlock -> self
398 *
399 * Releases the lock.
400 * Raises +ThreadError+ if +mutex+ wasn't locked by the current thread.
401 */
402VALUE
404{
405 const char *err;
406 rb_mutex_t *mutex = mutex_ptr(self);
407
408 err = rb_mutex_unlock_th(mutex, GET_THREAD());
409 if (err) rb_raise(rb_eThreadError, "%s", err);
410
411 return self;
412}
413
414#if defined(HAVE_WORKING_FORK)
415static void
416rb_mutex_abandon_keeping_mutexes(rb_thread_t *th)
417{
418 rb_mutex_abandon_all(th->keeping_mutexes);
420}
421
422static void
423rb_mutex_abandon_locking_mutex(rb_thread_t *th)
424{
425 if (th->locking_mutex) {
426 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
427
428 list_head_init(&mutex->waitq);
430 }
431}
432
433static void
434rb_mutex_abandon_all(rb_mutex_t *mutexes)
435{
436 rb_mutex_t *mutex;
437
438 while (mutexes) {
439 mutex = mutexes;
440 mutexes = mutex->next_mutex;
441 mutex->th = 0;
442 mutex->next_mutex = 0;
443 list_head_init(&mutex->waitq);
444 }
445}
446#endif
447
448static VALUE
449rb_mutex_sleep_forever(VALUE time)
450{
451 rb_thread_sleep_deadly_allow_spurious_wakeup();
452 return Qnil;
453}
454
455static VALUE
456rb_mutex_wait_for(VALUE time)
457{
458 rb_hrtime_t *rel = (rb_hrtime_t *)time;
459 /* permit spurious check */
460 sleep_hrtime(GET_THREAD(), *rel, 0);
461 return Qnil;
462}
463
464VALUE
466{
467 time_t beg, end;
468 struct timeval t;
469
470 if (!NIL_P(timeout)) {
471 t = rb_time_interval(timeout);
472 }
473
474 rb_mutex_unlock(self);
475 beg = time(0);
476 if (NIL_P(timeout)) {
477 rb_ensure(rb_mutex_sleep_forever, Qnil, mutex_lock_uninterruptible, self);
478 }
479 else {
480 rb_hrtime_t rel = rb_timeval2hrtime(&t);
481
482 rb_ensure(rb_mutex_wait_for, (VALUE)&rel,
483 mutex_lock_uninterruptible, self);
484 }
486 end = time(0) - beg;
487 return TIMET2NUM(end);
488}
489
490/*
491 * call-seq:
492 * mutex.sleep(timeout = nil) -> number
493 *
494 * Releases the lock and sleeps +timeout+ seconds if it is given and
495 * non-nil or forever. Raises +ThreadError+ if +mutex+ wasn't locked by
496 * the current thread.
497 *
498 * When the thread is next woken up, it will attempt to reacquire
499 * the lock.
500 *
501 * Note that this method can wakeup without explicit Thread#wakeup call.
502 * For example, receiving signal and so on.
503 */
504static VALUE
505mutex_sleep(int argc, VALUE *argv, VALUE self)
506{
507 VALUE timeout;
508
509 timeout = rb_check_arity(argc, 0, 1) ? argv[0] : Qnil;
510 return rb_mutex_sleep(self, timeout);
511}
512
513/*
514 * call-seq:
515 * mutex.synchronize { ... } -> result of the block
516 *
517 * Obtains a lock, runs the block, and releases the lock when the block
518 * completes. See the example under +Mutex+.
519 */
520
521VALUE
523{
524 rb_mutex_lock(mutex);
525 return rb_ensure(func, arg, rb_mutex_unlock, mutex);
526}
527
528/*
529 * call-seq:
530 * mutex.synchronize { ... } -> result of the block
531 *
532 * Obtains a lock, runs the block, and releases the lock when the block
533 * completes. See the example under +Mutex+.
534 */
535static VALUE
536rb_mutex_synchronize_m(VALUE self)
537{
538 if (!rb_block_given_p()) {
539 rb_raise(rb_eThreadError, "must be called with a block");
540 }
541
542 return rb_mutex_synchronize(self, rb_yield, Qundef);
543}
544
545void rb_mutex_allow_trap(VALUE self, int val)
546{
547 Check_TypedStruct(self, &mutex_data_type);
548
549 if (val)
551 else
553}
554
555/* Queue */
556
557#define queue_waitq(q) UNALIGNED_MEMBER_PTR(q, waitq)
558PACKED_STRUCT_UNALIGNED(struct rb_queue {
559 struct list_head waitq;
560 rb_serial_t fork_gen;
561 const VALUE que;
562 int num_waiting;
563});
564
565#define szqueue_waitq(sq) UNALIGNED_MEMBER_PTR(sq, q.waitq)
566#define szqueue_pushq(sq) UNALIGNED_MEMBER_PTR(sq, pushq)
567PACKED_STRUCT_UNALIGNED(struct rb_szqueue {
568 struct rb_queue q;
569 int num_waiting_push;
570 struct list_head pushq;
571 long max;
572});
573
574static void
575queue_mark(void *ptr)
576{
577 struct rb_queue *q = ptr;
578
579 /* no need to mark threads in waitq, they are on stack */
580 rb_gc_mark(q->que);
581}
582
583static size_t
584queue_memsize(const void *ptr)
585{
586 return sizeof(struct rb_queue);
587}
588
589static const rb_data_type_t queue_data_type = {
590 "queue",
591 {queue_mark, RUBY_TYPED_DEFAULT_FREE, queue_memsize,},
593};
594
595static VALUE
596queue_alloc(VALUE klass)
597{
598 VALUE obj;
599 struct rb_queue *q;
600
601 obj = TypedData_Make_Struct(klass, struct rb_queue, &queue_data_type, q);
602 list_head_init(queue_waitq(q));
603 return obj;
604}
605
606static int
607queue_fork_check(struct rb_queue *q)
608{
609 rb_serial_t fork_gen = GET_VM()->fork_gen;
610
611 if (q->fork_gen == fork_gen) {
612 return 0;
613 }
614 /* forked children can't reach into parent thread stacks */
615 q->fork_gen = fork_gen;
616 list_head_init(queue_waitq(q));
617 q->num_waiting = 0;
618 return 1;
619}
620
621static struct rb_queue *
622queue_ptr(VALUE obj)
623{
624 struct rb_queue *q;
625
626 TypedData_Get_Struct(obj, struct rb_queue, &queue_data_type, q);
627 queue_fork_check(q);
628
629 return q;
630}
631
632#define QUEUE_CLOSED FL_USER5
633
634static void
635szqueue_mark(void *ptr)
636{
637 struct rb_szqueue *sq = ptr;
638
639 queue_mark(&sq->q);
640}
641
642static size_t
643szqueue_memsize(const void *ptr)
644{
645 return sizeof(struct rb_szqueue);
646}
647
648static const rb_data_type_t szqueue_data_type = {
649 "sized_queue",
650 {szqueue_mark, RUBY_TYPED_DEFAULT_FREE, szqueue_memsize,},
652};
653
654static VALUE
655szqueue_alloc(VALUE klass)
656{
657 struct rb_szqueue *sq;
658 VALUE obj = TypedData_Make_Struct(klass, struct rb_szqueue,
659 &szqueue_data_type, sq);
660 list_head_init(szqueue_waitq(sq));
661 list_head_init(szqueue_pushq(sq));
662 return obj;
663}
664
665static struct rb_szqueue *
666szqueue_ptr(VALUE obj)
667{
668 struct rb_szqueue *sq;
669
670 TypedData_Get_Struct(obj, struct rb_szqueue, &szqueue_data_type, sq);
671 if (queue_fork_check(&sq->q)) {
672 list_head_init(szqueue_pushq(sq));
673 sq->num_waiting_push = 0;
674 }
675
676 return sq;
677}
678
679static VALUE
680ary_buf_new(void)
681{
682 return rb_ary_tmp_new(1);
683}
684
685static VALUE
686check_array(VALUE obj, VALUE ary)
687{
688 if (!RB_TYPE_P(ary, T_ARRAY)) {
689 rb_raise(rb_eTypeError, "%+"PRIsVALUE" not initialized", obj);
690 }
691 return ary;
692}
693
694static long
695queue_length(VALUE self, struct rb_queue *q)
696{
697 return RARRAY_LEN(check_array(self, q->que));
698}
699
700static int
701queue_closed_p(VALUE self)
702{
703 return FL_TEST_RAW(self, QUEUE_CLOSED) != 0;
704}
705
706/*
707 * Document-class: ClosedQueueError
708 *
709 * The exception class which will be raised when pushing into a closed
710 * Queue. See Queue#close and SizedQueue#close.
711 */
712
713NORETURN(static void raise_closed_queue_error(VALUE self));
714
715static void
716raise_closed_queue_error(VALUE self)
717{
718 rb_raise(rb_eClosedQueueError, "queue closed");
719}
720
721static VALUE
722queue_closed_result(VALUE self, struct rb_queue *q)
723{
724 assert(queue_length(self, q) == 0);
725 return Qnil;
726}
727
728/*
729 * Document-class: Queue
730 *
731 * The Queue class implements multi-producer, multi-consumer queues.
732 * It is especially useful in threaded programming when information
733 * must be exchanged safely between multiple threads. The Queue class
734 * implements all the required locking semantics.
735 *
736 * The class implements FIFO type of queue. In a FIFO queue, the first
737 * tasks added are the first retrieved.
738 *
739 * Example:
740 *
741 * queue = Queue.new
742 *
743 * producer = Thread.new do
744 * 5.times do |i|
745 * sleep rand(i) # simulate expense
746 * queue << i
747 * puts "#{i} produced"
748 * end
749 * end
750 *
751 * consumer = Thread.new do
752 * 5.times do |i|
753 * value = queue.pop
754 * sleep rand(i/2) # simulate expense
755 * puts "consumed #{value}"
756 * end
757 * end
758 *
759 * consumer.join
760 *
761 */
762
763/*
764 * Document-method: Queue::new
765 *
766 * Creates a new queue instance.
767 */
768
769static VALUE
770rb_queue_initialize(VALUE self)
771{
772 struct rb_queue *q = queue_ptr(self);
773 RB_OBJ_WRITE(self, &q->que, ary_buf_new());
774 list_head_init(queue_waitq(q));
775 return self;
776}
777
778static VALUE
779queue_do_push(VALUE self, struct rb_queue *q, VALUE obj)
780{
781 if (queue_closed_p(self)) {
782 raise_closed_queue_error(self);
783 }
784 rb_ary_push(check_array(self, q->que), obj);
785 wakeup_one(queue_waitq(q));
786 return self;
787}
788
789/*
790 * Document-method: Queue#close
791 * call-seq:
792 * close
793 *
794 * Closes the queue. A closed queue cannot be re-opened.
795 *
796 * After the call to close completes, the following are true:
797 *
798 * - +closed?+ will return true
799 *
800 * - +close+ will be ignored.
801 *
802 * - calling enq/push/<< will raise a +ClosedQueueError+.
803 *
804 * - when +empty?+ is false, calling deq/pop/shift will return an object
805 * from the queue as usual.
806 * - when +empty?+ is true, deq(false) will not suspend the thread and will return nil.
807 * deq(true) will raise a +ThreadError+.
808 *
809 * ClosedQueueError is inherited from StopIteration, so that you can break loop block.
810 *
811 * Example:
812 *
813 * q = Queue.new
814 * Thread.new{
815 * while e = q.deq # wait for nil to break loop
816 * # ...
817 * end
818 * }
819 * q.close
820 */
821
822static VALUE
823rb_queue_close(VALUE self)
824{
825 struct rb_queue *q = queue_ptr(self);
826
827 if (!queue_closed_p(self)) {
828 FL_SET(self, QUEUE_CLOSED);
829
830 wakeup_all(queue_waitq(q));
831 }
832
833 return self;
834}
835
836/*
837 * Document-method: Queue#closed?
838 * call-seq: closed?
839 *
840 * Returns +true+ if the queue is closed.
841 */
842
843static VALUE
844rb_queue_closed_p(VALUE self)
845{
846 return queue_closed_p(self) ? Qtrue : Qfalse;
847}
848
849/*
850 * Document-method: Queue#push
851 * call-seq:
852 * push(object)
853 * enq(object)
854 * <<(object)
855 *
856 * Pushes the given +object+ to the queue.
857 */
858
859static VALUE
860rb_queue_push(VALUE self, VALUE obj)
861{
862 return queue_do_push(self, queue_ptr(self), obj);
863}
864
865static VALUE
866queue_sleep(VALUE arg)
867{
868 rb_thread_sleep_deadly_allow_spurious_wakeup();
869 return Qnil;
870}
871
874 union {
875 struct rb_queue *q;
876 struct rb_szqueue *sq;
877 } as;
878};
879
880static VALUE
881queue_sleep_done(VALUE p)
882{
883 struct queue_waiter *qw = (struct queue_waiter *)p;
884
885 list_del(&qw->w.node);
886 qw->as.q->num_waiting--;
887
888 return Qfalse;
889}
890
891static VALUE
892szqueue_sleep_done(VALUE p)
893{
894 struct queue_waiter *qw = (struct queue_waiter *)p;
895
896 list_del(&qw->w.node);
897 qw->as.sq->num_waiting_push--;
898
899 return Qfalse;
900}
901
902static VALUE
903queue_do_pop(VALUE self, struct rb_queue *q, int should_block)
904{
905 check_array(self, q->que);
906
907 while (RARRAY_LEN(q->que) == 0) {
908 if (!should_block) {
909 rb_raise(rb_eThreadError, "queue empty");
910 }
911 else if (queue_closed_p(self)) {
912 return queue_closed_result(self, q);
913 }
914 else {
915 struct queue_waiter qw;
916
917 assert(RARRAY_LEN(q->que) == 0);
918 assert(queue_closed_p(self) == 0);
919
920 qw.w.th = GET_THREAD();
921 qw.as.q = q;
922 list_add_tail(queue_waitq(qw.as.q), &qw.w.node);
923 qw.as.q->num_waiting++;
924
925 rb_ensure(queue_sleep, self, queue_sleep_done, (VALUE)&qw);
926 }
927 }
928
929 return rb_ary_shift(q->que);
930}
931
932static int
933queue_pop_should_block(int argc, const VALUE *argv)
934{
935 int should_block = 1;
936 rb_check_arity(argc, 0, 1);
937 if (argc > 0) {
938 should_block = !RTEST(argv[0]);
939 }
940 return should_block;
941}
942
943/*
944 * Document-method: Queue#pop
945 * call-seq:
946 * pop(non_block=false)
947 * deq(non_block=false)
948 * shift(non_block=false)
949 *
950 * Retrieves data from the queue.
951 *
952 * If the queue is empty, the calling thread is suspended until data is pushed
953 * onto the queue. If +non_block+ is true, the thread isn't suspended, and
954 * +ThreadError+ is raised.
955 */
956
957static VALUE
958rb_queue_pop(int argc, VALUE *argv, VALUE self)
959{
960 int should_block = queue_pop_should_block(argc, argv);
961 return queue_do_pop(self, queue_ptr(self), should_block);
962}
963
964/*
965 * Document-method: Queue#empty?
966 * call-seq: empty?
967 *
968 * Returns +true+ if the queue is empty.
969 */
970
971static VALUE
972rb_queue_empty_p(VALUE self)
973{
974 return queue_length(self, queue_ptr(self)) == 0 ? Qtrue : Qfalse;
975}
976
977/*
978 * Document-method: Queue#clear
979 *
980 * Removes all objects from the queue.
981 */
982
983static VALUE
984rb_queue_clear(VALUE self)
985{
986 struct rb_queue *q = queue_ptr(self);
987
988 rb_ary_clear(check_array(self, q->que));
989 return self;
990}
991
992/*
993 * Document-method: Queue#length
994 * call-seq:
995 * length
996 * size
997 *
998 * Returns the length of the queue.
999 */
1000
1001static VALUE
1002rb_queue_length(VALUE self)
1003{
1004 return LONG2NUM(queue_length(self, queue_ptr(self)));
1005}
1006
1007/*
1008 * Document-method: Queue#num_waiting
1009 *
1010 * Returns the number of threads waiting on the queue.
1011 */
1012
1013static VALUE
1014rb_queue_num_waiting(VALUE self)
1015{
1016 struct rb_queue *q = queue_ptr(self);
1017
1018 return INT2NUM(q->num_waiting);
1019}
1020
1021/*
1022 * Document-class: SizedQueue
1023 *
1024 * This class represents queues of specified size capacity. The push operation
1025 * may be blocked if the capacity is full.
1026 *
1027 * See Queue for an example of how a SizedQueue works.
1028 */
1029
1030/*
1031 * Document-method: SizedQueue::new
1032 * call-seq: new(max)
1033 *
1034 * Creates a fixed-length queue with a maximum size of +max+.
1035 */
1036
1037static VALUE
1038rb_szqueue_initialize(VALUE self, VALUE vmax)
1039{
1040 long max;
1041 struct rb_szqueue *sq = szqueue_ptr(self);
1042
1043 max = NUM2LONG(vmax);
1044 if (max <= 0) {
1045 rb_raise(rb_eArgError, "queue size must be positive");
1046 }
1047
1048 RB_OBJ_WRITE(self, &sq->q.que, ary_buf_new());
1049 list_head_init(szqueue_waitq(sq));
1050 list_head_init(szqueue_pushq(sq));
1051 sq->max = max;
1052
1053 return self;
1054}
1055
1056/*
1057 * Document-method: SizedQueue#close
1058 * call-seq:
1059 * close
1060 *
1061 * Similar to Queue#close.
1062 *
1063 * The difference is behavior with waiting enqueuing threads.
1064 *
1065 * If there are waiting enqueuing threads, they are interrupted by
1066 * raising ClosedQueueError('queue closed').
1067 */
1068static VALUE
1069rb_szqueue_close(VALUE self)
1070{
1071 if (!queue_closed_p(self)) {
1072 struct rb_szqueue *sq = szqueue_ptr(self);
1073
1074 FL_SET(self, QUEUE_CLOSED);
1075 wakeup_all(szqueue_waitq(sq));
1076 wakeup_all(szqueue_pushq(sq));
1077 }
1078 return self;
1079}
1080
1081/*
1082 * Document-method: SizedQueue#max
1083 *
1084 * Returns the maximum size of the queue.
1085 */
1086
1087static VALUE
1088rb_szqueue_max_get(VALUE self)
1089{
1090 return LONG2NUM(szqueue_ptr(self)->max);
1091}
1092
1093/*
1094 * Document-method: SizedQueue#max=
1095 * call-seq: max=(number)
1096 *
1097 * Sets the maximum size of the queue to the given +number+.
1098 */
1099
1100static VALUE
1101rb_szqueue_max_set(VALUE self, VALUE vmax)
1102{
1103 long max = NUM2LONG(vmax);
1104 long diff = 0;
1105 struct rb_szqueue *sq = szqueue_ptr(self);
1106
1107 if (max <= 0) {
1108 rb_raise(rb_eArgError, "queue size must be positive");
1109 }
1110 if (max > sq->max) {
1111 diff = max - sq->max;
1112 }
1113 sq->max = max;
1114 sync_wakeup(szqueue_pushq(sq), diff);
1115 return vmax;
1116}
1117
1118static int
1119szqueue_push_should_block(int argc, const VALUE *argv)
1120{
1121 int should_block = 1;
1122 rb_check_arity(argc, 1, 2);
1123 if (argc > 1) {
1124 should_block = !RTEST(argv[1]);
1125 }
1126 return should_block;
1127}
1128
1129/*
1130 * Document-method: SizedQueue#push
1131 * call-seq:
1132 * push(object, non_block=false)
1133 * enq(object, non_block=false)
1134 * <<(object)
1135 *
1136 * Pushes +object+ to the queue.
1137 *
1138 * If there is no space left in the queue, waits until space becomes
1139 * available, unless +non_block+ is true. If +non_block+ is true, the
1140 * thread isn't suspended, and +ThreadError+ is raised.
1141 */
1142
1143static VALUE
1144rb_szqueue_push(int argc, VALUE *argv, VALUE self)
1145{
1146 struct rb_szqueue *sq = szqueue_ptr(self);
1147 int should_block = szqueue_push_should_block(argc, argv);
1148
1149 while (queue_length(self, &sq->q) >= sq->max) {
1150 if (!should_block) {
1151 rb_raise(rb_eThreadError, "queue full");
1152 }
1153 else if (queue_closed_p(self)) {
1154 goto closed;
1155 }
1156 else {
1157 struct queue_waiter qw;
1158 struct list_head *pushq = szqueue_pushq(sq);
1159
1160 qw.w.th = GET_THREAD();
1161 qw.as.sq = sq;
1162 list_add_tail(pushq, &qw.w.node);
1163 sq->num_waiting_push++;
1164
1165 rb_ensure(queue_sleep, self, szqueue_sleep_done, (VALUE)&qw);
1166 }
1167 }
1168
1169 if (queue_closed_p(self)) {
1170 closed:
1171 raise_closed_queue_error(self);
1172 }
1173
1174 return queue_do_push(self, &sq->q, argv[0]);
1175}
1176
1177static VALUE
1178szqueue_do_pop(VALUE self, int should_block)
1179{
1180 struct rb_szqueue *sq = szqueue_ptr(self);
1181 VALUE retval = queue_do_pop(self, &sq->q, should_block);
1182
1183 if (queue_length(self, &sq->q) < sq->max) {
1184 wakeup_one(szqueue_pushq(sq));
1185 }
1186
1187 return retval;
1188}
1189
1190/*
1191 * Document-method: SizedQueue#pop
1192 * call-seq:
1193 * pop(non_block=false)
1194 * deq(non_block=false)
1195 * shift(non_block=false)
1196 *
1197 * Retrieves data from the queue.
1198 *
1199 * If the queue is empty, the calling thread is suspended until data is pushed
1200 * onto the queue. If +non_block+ is true, the thread isn't suspended, and
1201 * +ThreadError+ is raised.
1202 */
1203
1204static VALUE
1205rb_szqueue_pop(int argc, VALUE *argv, VALUE self)
1206{
1207 int should_block = queue_pop_should_block(argc, argv);
1208 return szqueue_do_pop(self, should_block);
1209}
1210
1211/*
1212 * Document-method: SizedQueue#clear
1213 *
1214 * Removes all objects from the queue.
1215 */
1216
1217static VALUE
1218rb_szqueue_clear(VALUE self)
1219{
1220 struct rb_szqueue *sq = szqueue_ptr(self);
1221
1222 rb_ary_clear(check_array(self, sq->q.que));
1223 wakeup_all(szqueue_pushq(sq));
1224 return self;
1225}
1226
1227/*
1228 * Document-method: SizedQueue#length
1229 * call-seq:
1230 * length
1231 * size
1232 *
1233 * Returns the length of the queue.
1234 */
1235
1236static VALUE
1237rb_szqueue_length(VALUE self)
1238{
1239 struct rb_szqueue *sq = szqueue_ptr(self);
1240
1241 return LONG2NUM(queue_length(self, &sq->q));
1242}
1243
1244/*
1245 * Document-method: SizedQueue#num_waiting
1246 *
1247 * Returns the number of threads waiting on the queue.
1248 */
1249
1250static VALUE
1251rb_szqueue_num_waiting(VALUE self)
1252{
1253 struct rb_szqueue *sq = szqueue_ptr(self);
1254
1255 return INT2NUM(sq->q.num_waiting + sq->num_waiting_push);
1256}
1257
1258/*
1259 * Document-method: SizedQueue#empty?
1260 * call-seq: empty?
1261 *
1262 * Returns +true+ if the queue is empty.
1263 */
1264
1265static VALUE
1266rb_szqueue_empty_p(VALUE self)
1267{
1268 struct rb_szqueue *sq = szqueue_ptr(self);
1269
1270 return queue_length(self, &sq->q) == 0 ? Qtrue : Qfalse;
1271}
1272
1273
1274/* ConditionalVariable */
1278};
1279
1280/*
1281 * Document-class: ConditionVariable
1282 *
1283 * ConditionVariable objects augment class Mutex. Using condition variables,
1284 * it is possible to suspend while in the middle of a critical section until a
1285 * resource becomes available.
1286 *
1287 * Example:
1288 *
1289 * mutex = Mutex.new
1290 * resource = ConditionVariable.new
1291 *
1292 * a = Thread.new {
1293 * mutex.synchronize {
1294 * # Thread 'a' now needs the resource
1295 * resource.wait(mutex)
1296 * # 'a' can now have the resource
1297 * }
1298 * }
1299 *
1300 * b = Thread.new {
1301 * mutex.synchronize {
1302 * # Thread 'b' has finished using the resource
1303 * resource.signal
1304 * }
1305 * }
1306 */
1307
1308static size_t
1309condvar_memsize(const void *ptr)
1310{
1311 return sizeof(struct rb_condvar);
1312}
1313
1314static const rb_data_type_t cv_data_type = {
1315 "condvar",
1316 {0, RUBY_TYPED_DEFAULT_FREE, condvar_memsize,},
1318};
1319
1320static struct rb_condvar *
1321condvar_ptr(VALUE self)
1322{
1323 struct rb_condvar *cv;
1324 rb_serial_t fork_gen = GET_VM()->fork_gen;
1325
1326 TypedData_Get_Struct(self, struct rb_condvar, &cv_data_type, cv);
1327
1328 /* forked children can't reach into parent thread stacks */
1329 if (cv->fork_gen != fork_gen) {
1330 cv->fork_gen = fork_gen;
1331 list_head_init(&cv->waitq);
1332 }
1333
1334 return cv;
1335}
1336
1337static VALUE
1338condvar_alloc(VALUE klass)
1339{
1340 struct rb_condvar *cv;
1341 VALUE obj;
1342
1343 obj = TypedData_Make_Struct(klass, struct rb_condvar, &cv_data_type, cv);
1344 list_head_init(&cv->waitq);
1345
1346 return obj;
1347}
1348
1349/*
1350 * Document-method: ConditionVariable::new
1351 *
1352 * Creates a new condition variable instance.
1353 */
1354
1355static VALUE
1356rb_condvar_initialize(VALUE self)
1357{
1358 struct rb_condvar *cv = condvar_ptr(self);
1359 list_head_init(&cv->waitq);
1360 return self;
1361}
1362
1366};
1367
1368static ID id_sleep;
1369
1370static VALUE
1371do_sleep(VALUE args)
1372{
1373 struct sleep_call *p = (struct sleep_call *)args;
1374 return rb_funcallv(p->mutex, id_sleep, 1, &p->timeout);
1375}
1376
1377static VALUE
1378delete_from_waitq(VALUE v)
1379{
1380 struct sync_waiter *w = (void *)v;
1381 list_del(&w->node);
1382
1383 return Qnil;
1384}
1385
1386/*
1387 * Document-method: ConditionVariable#wait
1388 * call-seq: wait(mutex, timeout=nil)
1389 *
1390 * Releases the lock held in +mutex+ and waits; reacquires the lock on wakeup.
1391 *
1392 * If +timeout+ is given, this method returns after +timeout+ seconds passed,
1393 * even if no other thread doesn't signal.
1394 */
1395
1396static VALUE
1397rb_condvar_wait(int argc, VALUE *argv, VALUE self)
1398{
1399 struct rb_condvar *cv = condvar_ptr(self);
1400 struct sleep_call args;
1401 struct sync_waiter w;
1402
1403 rb_scan_args(argc, argv, "11", &args.mutex, &args.timeout);
1404
1405 w.th = GET_THREAD();
1406 list_add_tail(&cv->waitq, &w.node);
1407 rb_ensure(do_sleep, (VALUE)&args, delete_from_waitq, (VALUE)&w);
1408
1409 return self;
1410}
1411
1412/*
1413 * Document-method: ConditionVariable#signal
1414 *
1415 * Wakes up the first thread in line waiting for this lock.
1416 */
1417
1418static VALUE
1419rb_condvar_signal(VALUE self)
1420{
1421 struct rb_condvar *cv = condvar_ptr(self);
1422 wakeup_one(&cv->waitq);
1423 return self;
1424}
1425
1426/*
1427 * Document-method: ConditionVariable#broadcast
1428 *
1429 * Wakes up all threads waiting for this lock.
1430 */
1431
1432static VALUE
1433rb_condvar_broadcast(VALUE self)
1434{
1435 struct rb_condvar *cv = condvar_ptr(self);
1436 wakeup_all(&cv->waitq);
1437 return self;
1438}
1439
1440/* :nodoc: */
1441static VALUE
1442undumpable(VALUE obj)
1443{
1446}
1447
1448static VALUE
1449define_thread_class(VALUE outer, const char *name, VALUE super)
1450{
1451 VALUE klass = rb_define_class_under(outer, name, super);
1453 return klass;
1454}
1455
1456static void
1457Init_thread_sync(void)
1458{
1459#undef rb_intern
1460#if 0
1461 rb_cMutex = rb_define_class("Mutex", rb_cObject); /* teach rdoc Mutex */
1462 rb_cConditionVariable = rb_define_class("ConditionVariable", rb_cObject); /* teach rdoc ConditionVariable */
1463 rb_cQueue = rb_define_class("Queue", rb_cObject); /* teach rdoc Queue */
1464 rb_cSizedQueue = rb_define_class("SizedQueue", rb_cObject); /* teach rdoc SizedQueue */
1465#endif
1466
1467#define DEFINE_CLASS(name, super) \
1468 rb_c##name = define_thread_class(rb_cThread, #name, rb_c##super)
1469
1470 /* Mutex */
1471 DEFINE_CLASS(Mutex, Object);
1472 rb_define_alloc_func(rb_cMutex, mutex_alloc);
1473 rb_define_method(rb_cMutex, "initialize", mutex_initialize, 0);
1474 rb_define_method(rb_cMutex, "locked?", rb_mutex_locked_p, 0);
1475 rb_define_method(rb_cMutex, "try_lock", rb_mutex_trylock, 0);
1476 rb_define_method(rb_cMutex, "lock", rb_mutex_lock, 0);
1477 rb_define_method(rb_cMutex, "unlock", rb_mutex_unlock, 0);
1478 rb_define_method(rb_cMutex, "sleep", mutex_sleep, -1);
1479 rb_define_method(rb_cMutex, "synchronize", rb_mutex_synchronize_m, 0);
1480 rb_define_method(rb_cMutex, "owned?", rb_mutex_owned_p, 0);
1481
1482 /* Queue */
1483 DEFINE_CLASS(Queue, Object);
1484 rb_define_alloc_func(rb_cQueue, queue_alloc);
1485
1486 rb_eClosedQueueError = rb_define_class("ClosedQueueError", rb_eStopIteration);
1487
1488 rb_define_method(rb_cQueue, "initialize", rb_queue_initialize, 0);
1489 rb_undef_method(rb_cQueue, "initialize_copy");
1490 rb_define_method(rb_cQueue, "marshal_dump", undumpable, 0);
1491 rb_define_method(rb_cQueue, "close", rb_queue_close, 0);
1492 rb_define_method(rb_cQueue, "closed?", rb_queue_closed_p, 0);
1493 rb_define_method(rb_cQueue, "push", rb_queue_push, 1);
1494 rb_define_method(rb_cQueue, "pop", rb_queue_pop, -1);
1495 rb_define_method(rb_cQueue, "empty?", rb_queue_empty_p, 0);
1496 rb_define_method(rb_cQueue, "clear", rb_queue_clear, 0);
1497 rb_define_method(rb_cQueue, "length", rb_queue_length, 0);
1498 rb_define_method(rb_cQueue, "num_waiting", rb_queue_num_waiting, 0);
1499
1500 rb_define_alias(rb_cQueue, "enq", "push");
1501 rb_define_alias(rb_cQueue, "<<", "push");
1502 rb_define_alias(rb_cQueue, "deq", "pop");
1503 rb_define_alias(rb_cQueue, "shift", "pop");
1504 rb_define_alias(rb_cQueue, "size", "length");
1505
1506 DEFINE_CLASS(SizedQueue, Queue);
1507 rb_define_alloc_func(rb_cSizedQueue, szqueue_alloc);
1508
1509 rb_define_method(rb_cSizedQueue, "initialize", rb_szqueue_initialize, 1);
1510 rb_define_method(rb_cSizedQueue, "close", rb_szqueue_close, 0);
1511 rb_define_method(rb_cSizedQueue, "max", rb_szqueue_max_get, 0);
1512 rb_define_method(rb_cSizedQueue, "max=", rb_szqueue_max_set, 1);
1513 rb_define_method(rb_cSizedQueue, "push", rb_szqueue_push, -1);
1514 rb_define_method(rb_cSizedQueue, "pop", rb_szqueue_pop, -1);
1515 rb_define_method(rb_cSizedQueue, "empty?", rb_szqueue_empty_p, 0);
1516 rb_define_method(rb_cSizedQueue, "clear", rb_szqueue_clear, 0);
1517 rb_define_method(rb_cSizedQueue, "length", rb_szqueue_length, 0);
1518 rb_define_method(rb_cSizedQueue, "num_waiting", rb_szqueue_num_waiting, 0);
1519
1520 rb_define_alias(rb_cSizedQueue, "enq", "push");
1521 rb_define_alias(rb_cSizedQueue, "<<", "push");
1522 rb_define_alias(rb_cSizedQueue, "deq", "pop");
1523 rb_define_alias(rb_cSizedQueue, "shift", "pop");
1524 rb_define_alias(rb_cSizedQueue, "size", "length");
1525
1526 /* CVar */
1527 DEFINE_CLASS(ConditionVariable, Object);
1528 rb_define_alloc_func(rb_cConditionVariable, condvar_alloc);
1529
1530 id_sleep = rb_intern("sleep");
1531
1532 rb_define_method(rb_cConditionVariable, "initialize", rb_condvar_initialize, 0);
1533 rb_undef_method(rb_cConditionVariable, "initialize_copy");
1534 rb_define_method(rb_cConditionVariable, "marshal_dump", undumpable, 0);
1535 rb_define_method(rb_cConditionVariable, "wait", rb_condvar_wait, -1);
1536 rb_define_method(rb_cConditionVariable, "signal", rb_condvar_signal, 0);
1537 rb_define_method(rb_cConditionVariable, "broadcast", rb_condvar_broadcast, 0);
1538
1539 rb_provide("thread.rb");
1540}
struct RIMemo * ptr
Definition: debug.c:65
VALUE rb_define_class(const char *, VALUE)
Defines a top-level class.
Definition: class.c:662
VALUE rb_define_class_under(VALUE, const char *, VALUE)
Defines a class under the namespace of outer.
Definition: class.c:711
void rb_undef_method(VALUE, const char *)
Definition: class.c:1593
void rb_define_alias(VALUE, const char *, const char *)
Defines an alias of a method.
Definition: class.c:1818
int rb_block_given_p(void)
Determines if the current method is given a block.
Definition: eval.c:898
VALUE rb_eStopIteration
Definition: enumerator.c:124
VALUE rb_cObject
Object class.
Definition: ruby.h:2012
VALUE rb_eThreadError
Definition: eval.c:924
void rb_raise(VALUE exc, const char *fmt,...)
Definition: error.c:2671
int rb_typeddata_is_kind_of(VALUE obj, const rb_data_type_t *data_type)
Definition: error.c:874
void rb_bug(const char *fmt,...)
Definition: error.c:636
VALUE rb_eTypeError
Definition: error.c:924
VALUE rb_eArgError
Definition: error.c:925
VALUE rb_ensure(VALUE(*)(VALUE), VALUE, VALUE(*)(VALUE), VALUE)
An equivalent to ensure clause.
Definition: eval.c:1115
VALUE rb_obj_class(VALUE)
Equivalent to Object#class in Ruby.
Definition: object.c:217
uint64_t rb_hrtime_t
Definition: hrtime.h:47
const char * name
Definition: nkf.c:208
#define RARRAY_LEN(a)
@ THREAD_STOPPED_FOREVER
#define list_del(n)
#define NULL
#define rb_funcallv(recv, mid, argc, argv)
void rb_provide(const char *)
Definition: load.c:607
#define list_del_init(n)
#define RTEST(v)
#define RUBY_VM_INTERRUPTED(ec)
time_t time(time_t *_timer)
#define RUBY_TYPED_WB_PROTECTED
#define Qundef
const VALUE VALUE obj
#define FL_SET(x, f)
#define GET_EC()
#define NIL_P(v)
#define list_add_tail(h, n)
#define LONG_MAX
#define RUBY_TYPED_DEFAULT_FREE
const char size_t n
unsigned long VALUE
VALUE rb_ary_push(VALUE, VALUE)
Definition: array.c:1195
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
#define GET_VM()
#define FL_TEST_RAW(x, f)
#define INT2NUM(x)
#define RB_OBJ_WRITE(a, slot, b)
#define LONG2NUM(x)
void rb_define_const(VALUE, const char *, VALUE)
Definition: variable.c:2891
#define RUBY_TYPED_FREE_IMMEDIATELY
#define TypedData_Get_Struct(obj, type, data_type, sval)
#define GET_THREAD()
#define PRIsVALUE
unsigned long long rb_serial_t
VALUE rb_ary_clear(VALUE)
Definition: array.c:3862
VALUE rb_ary_tmp_new(long)
Definition: array.c:768
int VALUE v
#define rb_scan_args(argc, argvp, fmt,...)
#define list_for_each_safe(h, i, nxt, member)
#define TIMET2NUM(v)
void rb_gc_mark(VALUE)
Definition: gc.c:5228
#define rb_intern(str)
#define UNREACHABLE_RETURN(val)
struct timeval rb_time_interval(VALUE num)
Definition: time.c:2683
#define Qtrue
#define FL_UNSET_RAW(x, f)
#define Qnil
#define Qfalse
#define T_ARRAY
#define list_for_each(h, i, member)
void rb_threadptr_interrupt(rb_thread_t *th)
Definition: thread.c:505
#define RB_TYPE_P(obj, type)
#define TypedData_Make_Struct(klass, type, data_type, sval)
const VALUE * argv
void void ruby_xfree(void *)
Definition: gc.c:10183
VALUE rb_ary_shift(VALUE)
Definition: array.c:1294
#define Check_TypedStruct(v, t)
#define assert
#define rb_check_arity
unsigned long ID
VALUE rb_yield(VALUE)
Definition: vm_eval.c:1237
#define NUM2LONG(x)
void rb_define_method(VALUE, const char *, VALUE(*)(), int)
#define FL_SET_RAW(x, f)
struct sync_waiter w
Definition: thread_sync.c:873
union queue_waiter::@225 as
struct rb_queue * q
Definition: thread_sync.c:875
struct rb_szqueue * sq
Definition: thread_sync.c:876
struct list_head waitq
Definition: thread_sync.c:1276
rb_serial_t fork_gen
Definition: thread_sync.c:1277
struct list_head waitq
Definition: thread_sync.c:47
struct rb_mutex_struct * next_mutex
Definition: thread_sync.c:46
rb_thread_t * th
Definition: thread_sync.c:45
rb_execution_context_t * ec
enum rb_thread_status status
struct rb_mutex_struct * keeping_mutexes
VALUE timeout
Definition: thread_sync.c:1365
struct list_node node
Definition: thread_sync.c:10
rb_thread_t * th
Definition: thread_sync.c:9
#define RUBY_VM_CHECK_INTS_BLOCKING(ec)
Definition: thread.c:202
#define DEFINE_CLASS(name, super)
void rb_mutex_allow_trap(VALUE self, int val)
Definition: thread_sync.c:545
VALUE rb_mutex_new(void)
Definition: thread_sync.c:165
#define QUEUE_CLOSED
Definition: thread_sync.c:632
#define szqueue_waitq(sq)
Definition: thread_sync.c:565
VALUE rb_mutex_owned_p(VALUE self)
Definition: thread_sync.c:345
struct rb_mutex_struct rb_mutex_t
#define szqueue_pushq(sq)
Definition: thread_sync.c:566
#define MUTEX_ALLOW_TRAP
Definition: thread_sync.c:13
VALUE rb_mutex_synchronize(VALUE mutex, VALUE(*func)(VALUE arg), VALUE arg)
Definition: thread_sync.c:522
VALUE rb_mutex_unlock(VALUE self)
Definition: thread_sync.c:403
VALUE rb_mutex_sleep(VALUE self, VALUE timeout)
Definition: thread_sync.c:465
VALUE rb_mutex_lock(VALUE self)
Definition: thread_sync.c:333
VALUE rb_mutex_trylock(VALUE self)
Definition: thread_sync.c:203
#define queue_waitq(q)
Definition: thread_sync.c:557
VALUE rb_mutex_locked_p(VALUE self)
Definition: thread_sync.c:177
VALUE rb_obj_is_mutex(VALUE obj)
Definition: thread_sync.c:131
PACKED_STRUCT_UNALIGNED(struct rb_queue { struct list_head waitq;rb_serial_t fork_gen;const VALUE que;int num_waiting;})
NORETURN(static void raise_closed_queue_error(VALUE self))
#define mutex_mark
Definition: thread_sync.c:81