Ruby 3.2.1p31 (2023-02-08 revision 31819e82c88c6f8ecfaeb162519bfa26a14b21fd)
thread_sync.c
1/* included by thread.c */
2#include "ccan/list/list.h"
3#include "builtin.h"
4
5static VALUE rb_cMutex, rb_cQueue, rb_cSizedQueue, rb_cConditionVariable;
6static VALUE rb_eClosedQueueError;
7
8/* Mutex */
9typedef struct rb_mutex_struct {
10 rb_fiber_t *fiber;
11 struct rb_mutex_struct *next_mutex;
12 struct ccan_list_head waitq; /* protected by GVL */
14
15/* sync_waiter is always on-stack */
17 VALUE self;
18 rb_thread_t *th;
19 rb_fiber_t *fiber;
20 struct ccan_list_node node;
21};
22
23static inline rb_fiber_t*
24nonblocking_fiber(rb_fiber_t *fiber)
25{
26 if (rb_fiberptr_blocking(fiber)) {
27 return NULL;
28 }
29
30 return fiber;
31}
32
34 VALUE self;
35 VALUE timeout;
36 rb_hrtime_t end;
37};
38
39#define MUTEX_ALLOW_TRAP FL_USER1
40
41static void
42sync_wakeup(struct ccan_list_head *head, long max)
43{
44 struct sync_waiter *cur = 0, *next;
45
46 ccan_list_for_each_safe(head, cur, next, node) {
47 ccan_list_del_init(&cur->node);
48
49 if (cur->th->status != THREAD_KILLED) {
50 if (cur->th->scheduler != Qnil && cur->fiber) {
51 rb_fiber_scheduler_unblock(cur->th->scheduler, cur->self, rb_fiberptr_self(cur->fiber));
52 }
53 else {
54 rb_threadptr_interrupt(cur->th);
55 cur->th->status = THREAD_RUNNABLE;
56 }
57
58 if (--max == 0) return;
59 }
60 }
61}
62
63static void
64wakeup_one(struct ccan_list_head *head)
65{
66 sync_wakeup(head, 1);
67}
68
69static void
70wakeup_all(struct ccan_list_head *head)
71{
72 sync_wakeup(head, LONG_MAX);
73}
74
75#if defined(HAVE_WORKING_FORK)
76static void rb_mutex_abandon_all(rb_mutex_t *mutexes);
77static void rb_mutex_abandon_keeping_mutexes(rb_thread_t *th);
78static void rb_mutex_abandon_locking_mutex(rb_thread_t *th);
79#endif
80static const char* rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t *th, rb_fiber_t *fiber);
81
82/*
83 * Document-class: Thread::Mutex
84 *
85 * Thread::Mutex implements a simple semaphore that can be used to
86 * coordinate access to shared data from multiple concurrent threads.
87 *
88 * Example:
89 *
90 * semaphore = Thread::Mutex.new
91 *
92 * a = Thread.new {
93 * semaphore.synchronize {
94 * # access shared resource
95 * }
96 * }
97 *
98 * b = Thread.new {
99 * semaphore.synchronize {
100 * # access shared resource
101 * }
102 * }
103 *
104 */
105
106#define mutex_mark ((void(*)(void*))0)
107
108static size_t
109rb_mutex_num_waiting(rb_mutex_t *mutex)
110{
111 struct sync_waiter *w = 0;
112 size_t n = 0;
113
114 ccan_list_for_each(&mutex->waitq, w, node) {
115 n++;
116 }
117
118 return n;
119}
120
121rb_thread_t* rb_fiber_threadptr(const rb_fiber_t *fiber);
122
123static void
124mutex_free(void *ptr)
125{
126 rb_mutex_t *mutex = ptr;
127 if (mutex->fiber) {
128 /* rb_warn("free locked mutex"); */
129 const char *err = rb_mutex_unlock_th(mutex, rb_fiber_threadptr(mutex->fiber), mutex->fiber);
130 if (err) rb_bug("%s", err);
131 }
132 ruby_xfree(ptr);
133}
134
135static size_t
136mutex_memsize(const void *ptr)
137{
138 return sizeof(rb_mutex_t);
139}
140
141static const rb_data_type_t mutex_data_type = {
142 "mutex",
143 {mutex_mark, mutex_free, mutex_memsize,},
144 0, 0, RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_FREE_IMMEDIATELY
145};
146
147static rb_mutex_t *
148mutex_ptr(VALUE obj)
149{
150 rb_mutex_t *mutex;
151
152 TypedData_Get_Struct(obj, rb_mutex_t, &mutex_data_type, mutex);
153
154 return mutex;
155}
156
157VALUE
158rb_obj_is_mutex(VALUE obj)
159{
160 return RBOOL(rb_typeddata_is_kind_of(obj, &mutex_data_type));
161}
162
163static VALUE
164mutex_alloc(VALUE klass)
165{
166 VALUE obj;
167 rb_mutex_t *mutex;
168
169 obj = TypedData_Make_Struct(klass, rb_mutex_t, &mutex_data_type, mutex);
170
171 ccan_list_head_init(&mutex->waitq);
172 return obj;
173}
174
175/*
176 * call-seq:
177 * Thread::Mutex.new -> mutex
178 *
179 * Creates a new Mutex
180 */
181static VALUE
182mutex_initialize(VALUE self)
183{
184 return self;
185}
186
187VALUE
189{
190 return mutex_alloc(rb_cMutex);
191}
192
193/*
194 * call-seq:
195 * mutex.locked? -> true or false
196 *
197 * Returns +true+ if this lock is currently held by some thread.
198 */
199VALUE
201{
202 rb_mutex_t *mutex = mutex_ptr(self);
203
204 return RBOOL(mutex->fiber);
205}
206
207static void
208thread_mutex_insert(rb_thread_t *thread, rb_mutex_t *mutex)
209{
210 if (thread->keeping_mutexes) {
211 mutex->next_mutex = thread->keeping_mutexes;
212 }
213
214 thread->keeping_mutexes = mutex;
215}
216
217static void
218thread_mutex_remove(rb_thread_t *thread, rb_mutex_t *mutex)
219{
220 rb_mutex_t **keeping_mutexes = &thread->keeping_mutexes;
221
222 while (*keeping_mutexes && *keeping_mutexes != mutex) {
223 // Move to the next mutex in the list:
224 keeping_mutexes = &(*keeping_mutexes)->next_mutex;
225 }
226
227 if (*keeping_mutexes) {
228 *keeping_mutexes = mutex->next_mutex;
229 mutex->next_mutex = NULL;
230 }
231}
232
233static void
234mutex_locked(rb_thread_t *th, VALUE self)
235{
236 rb_mutex_t *mutex = mutex_ptr(self);
237
238 thread_mutex_insert(th, mutex);
239}
240
241/*
242 * call-seq:
243 * mutex.try_lock -> true or false
244 *
245 * Attempts to obtain the lock and returns immediately. Returns +true+ if the
246 * lock was granted.
247 */
248VALUE
250{
251 rb_mutex_t *mutex = mutex_ptr(self);
252
253 if (mutex->fiber == 0) {
254 rb_fiber_t *fiber = GET_EC()->fiber_ptr;
255 rb_thread_t *th = GET_THREAD();
256 mutex->fiber = fiber;
257
258 mutex_locked(th, self);
259 return Qtrue;
260 }
261
262 return Qfalse;
263}
264
265/*
266 * At maximum, only one thread can use cond_timedwait and watch deadlock
267 * periodically. Multiple polling thread (i.e. concurrent deadlock check)
268 * introduces new race conditions. [Bug #6278] [ruby-core:44275]
269 */
270static const rb_thread_t *patrol_thread = NULL;
271
272static VALUE
273mutex_owned_p(rb_fiber_t *fiber, rb_mutex_t *mutex)
274{
275 return RBOOL(mutex->fiber == fiber);
276}
277
278static VALUE
279call_rb_fiber_scheduler_block(VALUE mutex)
280{
282}
283
284static VALUE
285delete_from_waitq(VALUE value)
286{
287 struct sync_waiter *sync_waiter = (void *)value;
288 ccan_list_del(&sync_waiter->node);
289
290 return Qnil;
291}
292
293static VALUE
294do_mutex_lock(VALUE self, int interruptible_p)
295{
296 rb_execution_context_t *ec = GET_EC();
297 rb_thread_t *th = ec->thread_ptr;
298 rb_fiber_t *fiber = ec->fiber_ptr;
299 rb_mutex_t *mutex = mutex_ptr(self);
300
301 /* When running trap handler */
302 if (!FL_TEST_RAW(self, MUTEX_ALLOW_TRAP) &&
303 th->ec->interrupt_mask & TRAP_INTERRUPT_MASK) {
304 rb_raise(rb_eThreadError, "can't be called from trap context");
305 }
306
307 if (rb_mutex_trylock(self) == Qfalse) {
308 if (mutex->fiber == fiber) {
309 rb_raise(rb_eThreadError, "deadlock; recursive locking");
310 }
311
312 while (mutex->fiber != fiber) {
313 VALUE scheduler = rb_fiber_scheduler_current();
314 if (scheduler != Qnil) {
315 struct sync_waiter sync_waiter = {
316 .self = self,
317 .th = th,
318 .fiber = nonblocking_fiber(fiber)
319 };
320
321 ccan_list_add_tail(&mutex->waitq, &sync_waiter.node);
322
323 rb_ensure(call_rb_fiber_scheduler_block, self, delete_from_waitq, (VALUE)&sync_waiter);
324
325 if (!mutex->fiber) {
326 mutex->fiber = fiber;
327 }
328 }
329 else {
330 if (!th->vm->thread_ignore_deadlock && rb_fiber_threadptr(mutex->fiber) == th) {
331 rb_raise(rb_eThreadError, "deadlock; lock already owned by another fiber belonging to the same thread");
332 }
333
334 enum rb_thread_status prev_status = th->status;
335 rb_hrtime_t *timeout = 0;
336 rb_hrtime_t rel = rb_msec2hrtime(100);
337
338 th->status = THREAD_STOPPED_FOREVER;
339 th->locking_mutex = self;
340 rb_ractor_sleeper_threads_inc(th->ractor);
341 /*
342 * Carefully! while some contended threads are in native_sleep(),
343 * ractor->sleeper is unstable value. we have to avoid both deadlock
344 * and busy loop.
345 */
346 if ((rb_ractor_living_thread_num(th->ractor) == rb_ractor_sleeper_thread_num(th->ractor)) &&
347 !patrol_thread) {
348 timeout = &rel;
349 patrol_thread = th;
350 }
351
352 struct sync_waiter sync_waiter = {
353 .self = self,
354 .th = th,
355 .fiber = nonblocking_fiber(fiber)
356 };
357
358 ccan_list_add_tail(&mutex->waitq, &sync_waiter.node);
359
360 native_sleep(th, timeout); /* release GVL */
361
362 ccan_list_del(&sync_waiter.node);
363
364 if (!mutex->fiber) {
365 mutex->fiber = fiber;
366 }
367
368 if (patrol_thread == th)
369 patrol_thread = NULL;
370
371 th->locking_mutex = Qfalse;
372 if (mutex->fiber && timeout && !RUBY_VM_INTERRUPTED(th->ec)) {
373 rb_check_deadlock(th->ractor);
374 }
375 if (th->status == THREAD_STOPPED_FOREVER) {
376 th->status = prev_status;
377 }
378 rb_ractor_sleeper_threads_dec(th->ractor);
379 }
380
381 if (interruptible_p) {
382 /* release mutex before checking for interrupts...as interrupt checking
383 * code might call rb_raise() */
384 if (mutex->fiber == fiber) mutex->fiber = 0;
385 RUBY_VM_CHECK_INTS_BLOCKING(th->ec); /* may release mutex */
386 if (!mutex->fiber) {
387 mutex->fiber = fiber;
388 }
389 }
390 }
391
392 if (mutex->fiber == fiber) mutex_locked(th, self);
393 }
394
395 // assertion
396 if (mutex_owned_p(fiber, mutex) == Qfalse) rb_bug("do_mutex_lock: mutex is not owned.");
397
398 return self;
399}
400
401static VALUE
402mutex_lock_uninterruptible(VALUE self)
403{
404 return do_mutex_lock(self, 0);
405}
406
407/*
408 * call-seq:
409 * mutex.lock -> self
410 *
411 * Attempts to grab the lock and waits if it isn't available.
412 * Raises +ThreadError+ if +mutex+ was locked by the current thread.
413 */
414VALUE
416{
417 return do_mutex_lock(self, 1);
418}
419
420/*
421 * call-seq:
422 * mutex.owned? -> true or false
423 *
424 * Returns +true+ if this lock is currently held by current thread.
425 */
426VALUE
427rb_mutex_owned_p(VALUE self)
428{
429 rb_fiber_t *fiber = GET_EC()->fiber_ptr;
430 rb_mutex_t *mutex = mutex_ptr(self);
431
432 return mutex_owned_p(fiber, mutex);
433}
434
435static const char *
436rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t *th, rb_fiber_t *fiber)
437{
438 const char *err = NULL;
439
440 if (mutex->fiber == 0) {
441 err = "Attempt to unlock a mutex which is not locked";
442 }
443 else if (mutex->fiber != fiber) {
444 err = "Attempt to unlock a mutex which is locked by another thread/fiber";
445 }
446 else {
447 struct sync_waiter *cur = 0, *next;
448
449 mutex->fiber = 0;
450 ccan_list_for_each_safe(&mutex->waitq, cur, next, node) {
451 ccan_list_del_init(&cur->node);
452
453 if (cur->th->scheduler != Qnil && cur->fiber) {
454 rb_fiber_scheduler_unblock(cur->th->scheduler, cur->self, rb_fiberptr_self(cur->fiber));
455 goto found;
456 }
457 else {
458 switch (cur->th->status) {
459 case THREAD_RUNNABLE: /* from someone else calling Thread#run */
460 case THREAD_STOPPED_FOREVER: /* likely (rb_mutex_lock) */
461 rb_threadptr_interrupt(cur->th);
462 goto found;
463 case THREAD_STOPPED: /* probably impossible */
464 rb_bug("unexpected THREAD_STOPPED");
465 case THREAD_KILLED:
466 /* not sure about this, possible in exit GC? */
467 rb_bug("unexpected THREAD_KILLED");
468 continue;
469 }
470 }
471 }
472
473 found:
474 thread_mutex_remove(th, mutex);
475 }
476
477 return err;
478}
479
480/*
481 * call-seq:
482 * mutex.unlock -> self
483 *
484 * Releases the lock.
485 * Raises +ThreadError+ if +mutex+ wasn't locked by the current thread.
486 */
487VALUE
489{
490 const char *err;
491 rb_mutex_t *mutex = mutex_ptr(self);
492 rb_thread_t *th = GET_THREAD();
493
494 err = rb_mutex_unlock_th(mutex, th, GET_EC()->fiber_ptr);
495 if (err) rb_raise(rb_eThreadError, "%s", err);
496
497 return self;
498}
499
500#if defined(HAVE_WORKING_FORK)
501static void
502rb_mutex_abandon_keeping_mutexes(rb_thread_t *th)
503{
504 rb_mutex_abandon_all(th->keeping_mutexes);
505 th->keeping_mutexes = NULL;
506}
507
508static void
509rb_mutex_abandon_locking_mutex(rb_thread_t *th)
510{
511 if (th->locking_mutex) {
512 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
513
514 ccan_list_head_init(&mutex->waitq);
515 th->locking_mutex = Qfalse;
516 }
517}
518
519static void
520rb_mutex_abandon_all(rb_mutex_t *mutexes)
521{
522 rb_mutex_t *mutex;
523
524 while (mutexes) {
525 mutex = mutexes;
526 mutexes = mutex->next_mutex;
527 mutex->fiber = 0;
528 mutex->next_mutex = 0;
529 ccan_list_head_init(&mutex->waitq);
530 }
531}
532#endif
533
534static VALUE
535rb_mutex_sleep_forever(VALUE self)
536{
537 rb_thread_sleep_deadly_allow_spurious_wakeup(self, Qnil, 0);
538 return Qnil;
539}
540
541static VALUE
542rb_mutex_wait_for(VALUE time)
543{
544 rb_hrtime_t *rel = (rb_hrtime_t *)time;
545 /* permit spurious check */
546 return RBOOL(sleep_hrtime(GET_THREAD(), *rel, 0));
547}
548
549VALUE
551{
552 struct timeval t;
553 VALUE woken = Qtrue;
554
555 if (!NIL_P(timeout)) {
556 t = rb_time_interval(timeout);
557 }
558
559 rb_mutex_unlock(self);
560 time_t beg = time(0);
561
562 VALUE scheduler = rb_fiber_scheduler_current();
563 if (scheduler != Qnil) {
564 rb_fiber_scheduler_kernel_sleep(scheduler, timeout);
565 mutex_lock_uninterruptible(self);
566 }
567 else {
568 if (NIL_P(timeout)) {
569 rb_ensure(rb_mutex_sleep_forever, self, mutex_lock_uninterruptible, self);
570 }
571 else {
572 rb_hrtime_t rel = rb_timeval2hrtime(&t);
573 woken = rb_ensure(rb_mutex_wait_for, (VALUE)&rel, mutex_lock_uninterruptible, self);
574 }
575 }
576
577 RUBY_VM_CHECK_INTS_BLOCKING(GET_EC());
578 if (!woken) return Qnil;
579 time_t end = time(0) - beg;
580 return TIMET2NUM(end);
581}
582
583/*
584 * call-seq:
585 * mutex.sleep(timeout = nil) -> number or nil
586 *
587 * Releases the lock and sleeps +timeout+ seconds if it is given and
588 * non-nil or forever. Raises +ThreadError+ if +mutex+ wasn't locked by
589 * the current thread.
590 *
591 * When the thread is next woken up, it will attempt to reacquire
592 * the lock.
593 *
594 * Note that this method can wakeup without explicit Thread#wakeup call.
595 * For example, receiving signal and so on.
596 *
597 * Returns the slept time in seconds if woken up, or +nil+ if timed out.
598 */
599static VALUE
600mutex_sleep(int argc, VALUE *argv, VALUE self)
601{
602 VALUE timeout;
603
604 timeout = rb_check_arity(argc, 0, 1) ? argv[0] : Qnil;
605 return rb_mutex_sleep(self, timeout);
606}
607
608/*
609 * call-seq:
610 * mutex.synchronize { ... } -> result of the block
611 *
612 * Obtains a lock, runs the block, and releases the lock when the block
613 * completes. See the example under Thread::Mutex.
614 */
615
616VALUE
617rb_mutex_synchronize(VALUE mutex, VALUE (*func)(VALUE arg), VALUE arg)
618{
619 rb_mutex_lock(mutex);
620 return rb_ensure(func, arg, rb_mutex_unlock, mutex);
621}
622
623/*
624 * call-seq:
625 * mutex.synchronize { ... } -> result of the block
626 *
627 * Obtains a lock, runs the block, and releases the lock when the block
628 * completes. See the example under Thread::Mutex.
629 */
630static VALUE
631rb_mutex_synchronize_m(VALUE self)
632{
633 if (!rb_block_given_p()) {
634 rb_raise(rb_eThreadError, "must be called with a block");
635 }
636
637 return rb_mutex_synchronize(self, rb_yield, Qundef);
638}
639
640void
641rb_mutex_allow_trap(VALUE self, int val)
642{
643 Check_TypedStruct(self, &mutex_data_type);
644
645 if (val)
646 FL_SET_RAW(self, MUTEX_ALLOW_TRAP);
647 else
648 FL_UNSET_RAW(self, MUTEX_ALLOW_TRAP);
649}
650
651/* Queue */
652
653#define queue_waitq(q) UNALIGNED_MEMBER_PTR(q, waitq)
654PACKED_STRUCT_UNALIGNED(struct rb_queue {
655 struct ccan_list_head waitq;
656 rb_serial_t fork_gen;
657 const VALUE que;
658 int num_waiting;
659});
660
661#define szqueue_waitq(sq) UNALIGNED_MEMBER_PTR(sq, q.waitq)
662#define szqueue_pushq(sq) UNALIGNED_MEMBER_PTR(sq, pushq)
663PACKED_STRUCT_UNALIGNED(struct rb_szqueue {
664 struct rb_queue q;
665 int num_waiting_push;
666 struct ccan_list_head pushq;
667 long max;
668});
669
670static void
671queue_mark(void *ptr)
672{
673 struct rb_queue *q = ptr;
674
675 /* no need to mark threads in waitq, they are on stack */
676 rb_gc_mark(q->que);
677}
678
679static size_t
680queue_memsize(const void *ptr)
681{
682 return sizeof(struct rb_queue);
683}
684
685static const rb_data_type_t queue_data_type = {
686 "queue",
687 {queue_mark, RUBY_TYPED_DEFAULT_FREE, queue_memsize,},
688 0, 0, RUBY_TYPED_FREE_IMMEDIATELY|RUBY_TYPED_WB_PROTECTED
689};
690
691static VALUE
692queue_alloc(VALUE klass)
693{
694 VALUE obj;
695 struct rb_queue *q;
696
697 obj = TypedData_Make_Struct(klass, struct rb_queue, &queue_data_type, q);
698 ccan_list_head_init(queue_waitq(q));
699 return obj;
700}
701
702static int
703queue_fork_check(struct rb_queue *q)
704{
705 rb_serial_t fork_gen = GET_VM()->fork_gen;
706
707 if (q->fork_gen == fork_gen) {
708 return 0;
709 }
710 /* forked children can't reach into parent thread stacks */
711 q->fork_gen = fork_gen;
712 ccan_list_head_init(queue_waitq(q));
713 q->num_waiting = 0;
714 return 1;
715}
716
717static struct rb_queue *
718queue_ptr(VALUE obj)
719{
720 struct rb_queue *q;
721
722 TypedData_Get_Struct(obj, struct rb_queue, &queue_data_type, q);
723 queue_fork_check(q);
724
725 return q;
726}
727
728#define QUEUE_CLOSED FL_USER5
729
730static rb_hrtime_t
731queue_timeout2hrtime(VALUE timeout)
732{
733 if (NIL_P(timeout)) {
734 return (rb_hrtime_t)0;
735 }
736 rb_hrtime_t rel = 0;
737 if (FIXNUM_P(timeout)) {
738 rel = rb_sec2hrtime(NUM2TIMET(timeout));
739 }
740 else {
741 double2hrtime(&rel, rb_num2dbl(timeout));
742 }
743 return rb_hrtime_add(rel, rb_hrtime_now());
744}
745
746static void
747szqueue_mark(void *ptr)
748{
749 struct rb_szqueue *sq = ptr;
750
751 queue_mark(&sq->q);
752}
753
754static size_t
755szqueue_memsize(const void *ptr)
756{
757 return sizeof(struct rb_szqueue);
758}
759
760static const rb_data_type_t szqueue_data_type = {
761 "sized_queue",
762 {szqueue_mark, RUBY_TYPED_DEFAULT_FREE, szqueue_memsize,},
763 0, 0, RUBY_TYPED_FREE_IMMEDIATELY|RUBY_TYPED_WB_PROTECTED
764};
765
766static VALUE
767szqueue_alloc(VALUE klass)
768{
769 struct rb_szqueue *sq;
770 VALUE obj = TypedData_Make_Struct(klass, struct rb_szqueue,
771 &szqueue_data_type, sq);
772 ccan_list_head_init(szqueue_waitq(sq));
773 ccan_list_head_init(szqueue_pushq(sq));
774 return obj;
775}
776
777static struct rb_szqueue *
778szqueue_ptr(VALUE obj)
779{
780 struct rb_szqueue *sq;
781
782 TypedData_Get_Struct(obj, struct rb_szqueue, &szqueue_data_type, sq);
783 if (queue_fork_check(&sq->q)) {
784 ccan_list_head_init(szqueue_pushq(sq));
785 sq->num_waiting_push = 0;
786 }
787
788 return sq;
789}
790
791static VALUE
792ary_buf_new(void)
793{
794 return rb_ary_hidden_new(1);
795}
796
797static VALUE
798check_array(VALUE obj, VALUE ary)
799{
800 if (!RB_TYPE_P(ary, T_ARRAY)) {
801 rb_raise(rb_eTypeError, "%+"PRIsVALUE" not initialized", obj);
802 }
803 return ary;
804}
805
806static long
807queue_length(VALUE self, struct rb_queue *q)
808{
809 return RARRAY_LEN(check_array(self, q->que));
810}
811
812static int
813queue_closed_p(VALUE self)
814{
815 return FL_TEST_RAW(self, QUEUE_CLOSED) != 0;
816}
817
818/*
819 * Document-class: ClosedQueueError
820 *
821 * The exception class which will be raised when pushing into a closed
822 * Queue. See Thread::Queue#close and Thread::SizedQueue#close.
823 */
824
825NORETURN(static void raise_closed_queue_error(VALUE self));
826
827static void
828raise_closed_queue_error(VALUE self)
829{
830 rb_raise(rb_eClosedQueueError, "queue closed");
831}
832
833static VALUE
834queue_closed_result(VALUE self, struct rb_queue *q)
835{
836 assert(queue_length(self, q) == 0);
837 return Qnil;
838}
839
840/*
841 * Document-class: Thread::Queue
842 *
843 * The Thread::Queue class implements multi-producer, multi-consumer
844 * queues. It is especially useful in threaded programming when
845 * information must be exchanged safely between multiple threads. The
846 * Thread::Queue class implements all the required locking semantics.
847 *
848 * The class implements FIFO type of queue. In a FIFO queue, the first
849 * tasks added are the first retrieved.
850 *
851 * Example:
852 *
853 * queue = Thread::Queue.new
854 *
855 * producer = Thread.new do
856 * 5.times do |i|
857 * sleep rand(i) # simulate expense
858 * queue << i
859 * puts "#{i} produced"
860 * end
861 * end
862 *
863 * consumer = Thread.new do
864 * 5.times do |i|
865 * value = queue.pop
866 * sleep rand(i/2) # simulate expense
867 * puts "consumed #{value}"
868 * end
869 * end
870 *
871 * consumer.join
872 *
873 */
874
875/*
876 * Document-method: Queue::new
877 *
878 * call-seq:
879 * Thread::Queue.new -> empty_queue
880 * Thread::Queue.new(enumerable) -> queue
881 *
882 * Creates a new queue instance, optionally using the contents of an +enumerable+
883 * for its initial state.
884 *
885 * Example:
886 *
887 * q = Thread::Queue.new
888 * #=> #<Thread::Queue:0x00007ff7501110d0>
889 * q.empty?
890 * #=> true
891 *
892 * q = Thread::Queue.new([1, 2, 3])
893 * #=> #<Thread::Queue:0x00007ff7500ec500>
894 * q.empty?
895 * #=> false
896 * q.pop
897 * #=> 1
898 */
899
900static VALUE
901rb_queue_initialize(int argc, VALUE *argv, VALUE self)
902{
903 VALUE initial;
904 struct rb_queue *q = queue_ptr(self);
905 if ((argc = rb_scan_args(argc, argv, "01", &initial)) == 1) {
906 initial = rb_to_array(initial);
907 }
908 RB_OBJ_WRITE(self, &q->que, ary_buf_new());
909 ccan_list_head_init(queue_waitq(q));
910 if (argc == 1) {
911 rb_ary_concat(q->que, initial);
912 }
913 return self;
914}
915
916static VALUE
917queue_do_push(VALUE self, struct rb_queue *q, VALUE obj)
918{
919 if (queue_closed_p(self)) {
920 raise_closed_queue_error(self);
921 }
922 rb_ary_push(check_array(self, q->que), obj);
923 wakeup_one(queue_waitq(q));
924 return self;
925}
926
927/*
928 * Document-method: Thread::Queue#close
929 * call-seq:
930 * close
931 *
932 * Closes the queue. A closed queue cannot be re-opened.
933 *
934 * After the call to close completes, the following are true:
935 *
936 * - +closed?+ will return true
937 *
938 * - +close+ will be ignored.
939 *
940 * - calling enq/push/<< will raise a +ClosedQueueError+.
941 *
942 * - when +empty?+ is false, calling deq/pop/shift will return an object
943 * from the queue as usual.
944 * - when +empty?+ is true, deq(false) will not suspend the thread and will return nil.
945 * deq(true) will raise a +ThreadError+.
946 *
947 * ClosedQueueError is inherited from StopIteration, so that you can break loop block.
948 *
949 * Example:
950 *
951 * q = Thread::Queue.new
952 * Thread.new{
953 * while e = q.deq # wait for nil to break loop
954 * # ...
955 * end
956 * }
957 * q.close
958 */
959
960static VALUE
961rb_queue_close(VALUE self)
962{
963 struct rb_queue *q = queue_ptr(self);
964
965 if (!queue_closed_p(self)) {
966 FL_SET(self, QUEUE_CLOSED);
967
968 wakeup_all(queue_waitq(q));
969 }
970
971 return self;
972}
973
974/*
975 * Document-method: Thread::Queue#closed?
976 * call-seq: closed?
977 *
978 * Returns +true+ if the queue is closed.
979 */
980
981static VALUE
982rb_queue_closed_p(VALUE self)
983{
984 return RBOOL(queue_closed_p(self));
985}
986
987/*
988 * Document-method: Thread::Queue#push
989 * call-seq:
990 * push(object)
991 * enq(object)
992 * <<(object)
993 *
994 * Pushes the given +object+ to the queue.
995 */
996
997static VALUE
998rb_queue_push(VALUE self, VALUE obj)
999{
1000 return queue_do_push(self, queue_ptr(self), obj);
1001}
1002
1003static VALUE
1004queue_sleep(VALUE _args)
1005{
1006 struct queue_sleep_arg *args = (struct queue_sleep_arg *)_args;
1007 rb_thread_sleep_deadly_allow_spurious_wakeup(args->self, args->timeout, args->end);
1008 return Qnil;
1009}
1010
1012 struct sync_waiter w;
1013 union {
1014 struct rb_queue *q;
1015 struct rb_szqueue *sq;
1016 } as;
1017};
1018
1019static VALUE
1020queue_sleep_done(VALUE p)
1021{
1022 struct queue_waiter *qw = (struct queue_waiter *)p;
1023
1024 ccan_list_del(&qw->w.node);
1025 qw->as.q->num_waiting--;
1026
1027 return Qfalse;
1028}
1029
1030static VALUE
1031szqueue_sleep_done(VALUE p)
1032{
1033 struct queue_waiter *qw = (struct queue_waiter *)p;
1034
1035 ccan_list_del(&qw->w.node);
1036 qw->as.sq->num_waiting_push--;
1037
1038 return Qfalse;
1039}
1040
1041static VALUE
1042queue_do_pop(VALUE self, struct rb_queue *q, int should_block, VALUE timeout)
1043{
1044 check_array(self, q->que);
1045 if (RARRAY_LEN(q->que) == 0) {
1046 if (!should_block) {
1047 rb_raise(rb_eThreadError, "queue empty");
1048 }
1049
1050 if (RTEST(rb_equal(INT2FIX(0), timeout))) {
1051 return Qnil;
1052 }
1053 }
1054
1055 rb_hrtime_t end = queue_timeout2hrtime(timeout);
1056 while (RARRAY_LEN(q->que) == 0) {
1057 if (queue_closed_p(self)) {
1058 return queue_closed_result(self, q);
1059 }
1060 else {
1061 rb_execution_context_t *ec = GET_EC();
1062
1063 assert(RARRAY_LEN(q->que) == 0);
1064 assert(queue_closed_p(self) == 0);
1065
1066 struct queue_waiter queue_waiter = {
1067 .w = {.self = self, .th = ec->thread_ptr, .fiber = nonblocking_fiber(ec->fiber_ptr)},
1068 .as = {.q = q}
1069 };
1070
1071 struct ccan_list_head *waitq = queue_waitq(q);
1072
1073 ccan_list_add_tail(waitq, &queue_waiter.w.node);
1074 queue_waiter.as.q->num_waiting++;
1075
1077 .self = self,
1078 .timeout = timeout,
1079 .end = end
1080 };
1081
1082 rb_ensure(queue_sleep, (VALUE)&queue_sleep_arg, queue_sleep_done, (VALUE)&queue_waiter);
1083 if (!NIL_P(timeout) && (rb_hrtime_now() >= end))
1084 break;
1085 }
1086 }
1087
1088 return rb_ary_shift(q->que);
1089}
1090
1091static VALUE
1092rb_queue_pop(rb_execution_context_t *ec, VALUE self, VALUE non_block, VALUE timeout)
1093{
1094 return queue_do_pop(self, queue_ptr(self), !RTEST(non_block), timeout);
1095}
1096
1097/*
1098 * Document-method: Thread::Queue#empty?
1099 * call-seq: empty?
1100 *
1101 * Returns +true+ if the queue is empty.
1102 */
1103
1104static VALUE
1105rb_queue_empty_p(VALUE self)
1106{
1107 return RBOOL(queue_length(self, queue_ptr(self)) == 0);
1108}
1109
1110/*
1111 * Document-method: Thread::Queue#clear
1112 *
1113 * Removes all objects from the queue.
1114 */
1115
1116static VALUE
1117rb_queue_clear(VALUE self)
1118{
1119 struct rb_queue *q = queue_ptr(self);
1120
1121 rb_ary_clear(check_array(self, q->que));
1122 return self;
1123}
1124
1125/*
1126 * Document-method: Thread::Queue#length
1127 * call-seq:
1128 * length
1129 * size
1130 *
1131 * Returns the length of the queue.
1132 */
1133
1134static VALUE
1135rb_queue_length(VALUE self)
1136{
1137 return LONG2NUM(queue_length(self, queue_ptr(self)));
1138}
1139
1140/*
1141 * Document-method: Thread::Queue#num_waiting
1142 *
1143 * Returns the number of threads waiting on the queue.
1144 */
1145
1146static VALUE
1147rb_queue_num_waiting(VALUE self)
1148{
1149 struct rb_queue *q = queue_ptr(self);
1150
1151 return INT2NUM(q->num_waiting);
1152}
1153
1154/*
1155 * Document-class: Thread::SizedQueue
1156 *
1157 * This class represents queues of specified size capacity. The push operation
1158 * may be blocked if the capacity is full.
1159 *
1160 * See Thread::Queue for an example of how a Thread::SizedQueue works.
1161 */
1162
1163/*
1164 * Document-method: SizedQueue::new
1165 * call-seq: new(max)
1166 *
1167 * Creates a fixed-length queue with a maximum size of +max+.
1168 */
1169
1170static VALUE
1171rb_szqueue_initialize(VALUE self, VALUE vmax)
1172{
1173 long max;
1174 struct rb_szqueue *sq = szqueue_ptr(self);
1175
1176 max = NUM2LONG(vmax);
1177 if (max <= 0) {
1178 rb_raise(rb_eArgError, "queue size must be positive");
1179 }
1180
1181 RB_OBJ_WRITE(self, &sq->q.que, ary_buf_new());
1182 ccan_list_head_init(szqueue_waitq(sq));
1183 ccan_list_head_init(szqueue_pushq(sq));
1184 sq->max = max;
1185
1186 return self;
1187}
1188
1189/*
1190 * Document-method: Thread::SizedQueue#close
1191 * call-seq:
1192 * close
1193 *
1194 * Similar to Thread::Queue#close.
1195 *
1196 * The difference is behavior with waiting enqueuing threads.
1197 *
1198 * If there are waiting enqueuing threads, they are interrupted by
1199 * raising ClosedQueueError('queue closed').
1200 */
1201static VALUE
1202rb_szqueue_close(VALUE self)
1203{
1204 if (!queue_closed_p(self)) {
1205 struct rb_szqueue *sq = szqueue_ptr(self);
1206
1207 FL_SET(self, QUEUE_CLOSED);
1208 wakeup_all(szqueue_waitq(sq));
1209 wakeup_all(szqueue_pushq(sq));
1210 }
1211 return self;
1212}
1213
1214/*
1215 * Document-method: Thread::SizedQueue#max
1216 *
1217 * Returns the maximum size of the queue.
1218 */
1219
1220static VALUE
1221rb_szqueue_max_get(VALUE self)
1222{
1223 return LONG2NUM(szqueue_ptr(self)->max);
1224}
1225
1226/*
1227 * Document-method: Thread::SizedQueue#max=
1228 * call-seq: max=(number)
1229 *
1230 * Sets the maximum size of the queue to the given +number+.
1231 */
1232
1233static VALUE
1234rb_szqueue_max_set(VALUE self, VALUE vmax)
1235{
1236 long max = NUM2LONG(vmax);
1237 long diff = 0;
1238 struct rb_szqueue *sq = szqueue_ptr(self);
1239
1240 if (max <= 0) {
1241 rb_raise(rb_eArgError, "queue size must be positive");
1242 }
1243 if (max > sq->max) {
1244 diff = max - sq->max;
1245 }
1246 sq->max = max;
1247 sync_wakeup(szqueue_pushq(sq), diff);
1248 return vmax;
1249}
1250
1251static VALUE
1252rb_szqueue_push(rb_execution_context_t *ec, VALUE self, VALUE object, VALUE non_block, VALUE timeout)
1253{
1254 struct rb_szqueue *sq = szqueue_ptr(self);
1255
1256 if (queue_length(self, &sq->q) >= sq->max) {
1257 if (RTEST(non_block)) {
1258 rb_raise(rb_eThreadError, "queue full");
1259 }
1260
1261 if (RTEST(rb_equal(INT2FIX(0), timeout))) {
1262 return Qnil;
1263 }
1264 }
1265
1266 rb_hrtime_t end = queue_timeout2hrtime(timeout);
1267 while (queue_length(self, &sq->q) >= sq->max) {
1268 if (queue_closed_p(self)) {
1269 raise_closed_queue_error(self);
1270 }
1271 else {
1272 rb_execution_context_t *ec = GET_EC();
1273 struct queue_waiter queue_waiter = {
1274 .w = {.self = self, .th = ec->thread_ptr, .fiber = nonblocking_fiber(ec->fiber_ptr)},
1275 .as = {.sq = sq}
1276 };
1277
1278 struct ccan_list_head *pushq = szqueue_pushq(sq);
1279
1280 ccan_list_add_tail(pushq, &queue_waiter.w.node);
1281 sq->num_waiting_push++;
1282
1284 .self = self,
1285 .timeout = timeout,
1286 .end = end
1287 };
1288 rb_ensure(queue_sleep, (VALUE)&queue_sleep_arg, szqueue_sleep_done, (VALUE)&queue_waiter);
1289 if (!NIL_P(timeout) && rb_hrtime_now() >= end) {
1290 return Qnil;
1291 }
1292 }
1293 }
1294
1295 return queue_do_push(self, &sq->q, object);
1296}
1297
1298static VALUE
1299szqueue_do_pop(VALUE self, int should_block, VALUE timeout)
1300{
1301 struct rb_szqueue *sq = szqueue_ptr(self);
1302 VALUE retval = queue_do_pop(self, &sq->q, should_block, timeout);
1303
1304 if (queue_length(self, &sq->q) < sq->max) {
1305 wakeup_one(szqueue_pushq(sq));
1306 }
1307
1308 return retval;
1309}
1310static VALUE
1311rb_szqueue_pop(rb_execution_context_t *ec, VALUE self, VALUE non_block, VALUE timeout)
1312{
1313 return szqueue_do_pop(self, !RTEST(non_block), timeout);
1314}
1315
1316/*
1317 * Document-method: Thread::SizedQueue#clear
1318 *
1319 * Removes all objects from the queue.
1320 */
1321
1322static VALUE
1323rb_szqueue_clear(VALUE self)
1324{
1325 struct rb_szqueue *sq = szqueue_ptr(self);
1326
1327 rb_ary_clear(check_array(self, sq->q.que));
1328 wakeup_all(szqueue_pushq(sq));
1329 return self;
1330}
1331
1332/*
1333 * Document-method: Thread::SizedQueue#length
1334 * call-seq:
1335 * length
1336 * size
1337 *
1338 * Returns the length of the queue.
1339 */
1340
1341static VALUE
1342rb_szqueue_length(VALUE self)
1343{
1344 struct rb_szqueue *sq = szqueue_ptr(self);
1345
1346 return LONG2NUM(queue_length(self, &sq->q));
1347}
1348
1349/*
1350 * Document-method: Thread::SizedQueue#num_waiting
1351 *
1352 * Returns the number of threads waiting on the queue.
1353 */
1354
1355static VALUE
1356rb_szqueue_num_waiting(VALUE self)
1357{
1358 struct rb_szqueue *sq = szqueue_ptr(self);
1359
1360 return INT2NUM(sq->q.num_waiting + sq->num_waiting_push);
1361}
1362
1363/*
1364 * Document-method: Thread::SizedQueue#empty?
1365 * call-seq: empty?
1366 *
1367 * Returns +true+ if the queue is empty.
1368 */
1369
1370static VALUE
1371rb_szqueue_empty_p(VALUE self)
1372{
1373 struct rb_szqueue *sq = szqueue_ptr(self);
1374
1375 return RBOOL(queue_length(self, &sq->q) == 0);
1376}
1377
1378
1379/* ConditionalVariable */
1381 struct ccan_list_head waitq;
1382 rb_serial_t fork_gen;
1383};
1384
1385/*
1386 * Document-class: Thread::ConditionVariable
1387 *
1388 * ConditionVariable objects augment class Mutex. Using condition variables,
1389 * it is possible to suspend while in the middle of a critical section until a
1390 * resource becomes available.
1391 *
1392 * Example:
1393 *
1394 * mutex = Thread::Mutex.new
1395 * resource = Thread::ConditionVariable.new
1396 *
1397 * a = Thread.new {
1398 * mutex.synchronize {
1399 * # Thread 'a' now needs the resource
1400 * resource.wait(mutex)
1401 * # 'a' can now have the resource
1402 * }
1403 * }
1404 *
1405 * b = Thread.new {
1406 * mutex.synchronize {
1407 * # Thread 'b' has finished using the resource
1408 * resource.signal
1409 * }
1410 * }
1411 */
1412
1413static size_t
1414condvar_memsize(const void *ptr)
1415{
1416 return sizeof(struct rb_condvar);
1417}
1418
1419static const rb_data_type_t cv_data_type = {
1420 "condvar",
1421 {0, RUBY_TYPED_DEFAULT_FREE, condvar_memsize,},
1422 0, 0, RUBY_TYPED_FREE_IMMEDIATELY|RUBY_TYPED_WB_PROTECTED
1423};
1424
1425static struct rb_condvar *
1426condvar_ptr(VALUE self)
1427{
1428 struct rb_condvar *cv;
1429 rb_serial_t fork_gen = GET_VM()->fork_gen;
1430
1431 TypedData_Get_Struct(self, struct rb_condvar, &cv_data_type, cv);
1432
1433 /* forked children can't reach into parent thread stacks */
1434 if (cv->fork_gen != fork_gen) {
1435 cv->fork_gen = fork_gen;
1436 ccan_list_head_init(&cv->waitq);
1437 }
1438
1439 return cv;
1440}
1441
1442static VALUE
1443condvar_alloc(VALUE klass)
1444{
1445 struct rb_condvar *cv;
1446 VALUE obj;
1447
1448 obj = TypedData_Make_Struct(klass, struct rb_condvar, &cv_data_type, cv);
1449 ccan_list_head_init(&cv->waitq);
1450
1451 return obj;
1452}
1453
1454/*
1455 * Document-method: ConditionVariable::new
1456 *
1457 * Creates a new condition variable instance.
1458 */
1459
1460static VALUE
1461rb_condvar_initialize(VALUE self)
1462{
1463 struct rb_condvar *cv = condvar_ptr(self);
1464 ccan_list_head_init(&cv->waitq);
1465 return self;
1466}
1467
1469 VALUE mutex;
1470 VALUE timeout;
1471};
1472
1473static ID id_sleep;
1474
1475static VALUE
1476do_sleep(VALUE args)
1477{
1478 struct sleep_call *p = (struct sleep_call *)args;
1479 return rb_funcallv(p->mutex, id_sleep, 1, &p->timeout);
1480}
1481
1482/*
1483 * Document-method: Thread::ConditionVariable#wait
1484 * call-seq: wait(mutex, timeout=nil)
1485 *
1486 * Releases the lock held in +mutex+ and waits; reacquires the lock on wakeup.
1487 *
1488 * If +timeout+ is given, this method returns after +timeout+ seconds passed,
1489 * even if no other thread doesn't signal.
1490 *
1491 * Returns the slept result on +mutex+.
1492 */
1493
1494static VALUE
1495rb_condvar_wait(int argc, VALUE *argv, VALUE self)
1496{
1497 rb_execution_context_t *ec = GET_EC();
1498
1499 struct rb_condvar *cv = condvar_ptr(self);
1500 struct sleep_call args;
1501
1502 rb_scan_args(argc, argv, "11", &args.mutex, &args.timeout);
1503
1504 struct sync_waiter sync_waiter = {
1505 .self = args.mutex,
1506 .th = ec->thread_ptr,
1507 .fiber = nonblocking_fiber(ec->fiber_ptr)
1508 };
1509
1510 ccan_list_add_tail(&cv->waitq, &sync_waiter.node);
1511 return rb_ensure(do_sleep, (VALUE)&args, delete_from_waitq, (VALUE)&sync_waiter);
1512}
1513
1514/*
1515 * Document-method: Thread::ConditionVariable#signal
1516 *
1517 * Wakes up the first thread in line waiting for this lock.
1518 */
1519
1520static VALUE
1521rb_condvar_signal(VALUE self)
1522{
1523 struct rb_condvar *cv = condvar_ptr(self);
1524 wakeup_one(&cv->waitq);
1525 return self;
1526}
1527
1528/*
1529 * Document-method: Thread::ConditionVariable#broadcast
1530 *
1531 * Wakes up all threads waiting for this lock.
1532 */
1533
1534static VALUE
1535rb_condvar_broadcast(VALUE self)
1536{
1537 struct rb_condvar *cv = condvar_ptr(self);
1538 wakeup_all(&cv->waitq);
1539 return self;
1540}
1541
1542NORETURN(static VALUE undumpable(VALUE obj));
1543/* :nodoc: */
1544static VALUE
1545undumpable(VALUE obj)
1546{
1547 rb_raise(rb_eTypeError, "can't dump %"PRIsVALUE, rb_obj_class(obj));
1549}
1550
1551static VALUE
1552define_thread_class(VALUE outer, const ID name, VALUE super)
1553{
1554 VALUE klass = rb_define_class_id_under(outer, name, super);
1555 rb_const_set(rb_cObject, name, klass);
1556 return klass;
1557}
1558
1559static void
1560Init_thread_sync(void)
1561{
1562#undef rb_intern
1563#if defined(TEACH_RDOC) && TEACH_RDOC == 42
1564 rb_cMutex = rb_define_class_under(rb_cThread, "Mutex", rb_cObject);
1565 rb_cConditionVariable = rb_define_class_under(rb_cThread, "ConditionVariable", rb_cObject);
1566 rb_cQueue = rb_define_class_under(rb_cThread, "Queue", rb_cObject);
1567 rb_cSizedQueue = rb_define_class_under(rb_cThread, "SizedQueue", rb_cObject);
1568#endif
1569
1570#define DEFINE_CLASS(name, super) \
1571 rb_c##name = define_thread_class(rb_cThread, rb_intern(#name), rb_c##super)
1572
1573 /* Mutex */
1574 DEFINE_CLASS(Mutex, Object);
1575 rb_define_alloc_func(rb_cMutex, mutex_alloc);
1576 rb_define_method(rb_cMutex, "initialize", mutex_initialize, 0);
1577 rb_define_method(rb_cMutex, "locked?", rb_mutex_locked_p, 0);
1578 rb_define_method(rb_cMutex, "try_lock", rb_mutex_trylock, 0);
1579 rb_define_method(rb_cMutex, "lock", rb_mutex_lock, 0);
1580 rb_define_method(rb_cMutex, "unlock", rb_mutex_unlock, 0);
1581 rb_define_method(rb_cMutex, "sleep", mutex_sleep, -1);
1582 rb_define_method(rb_cMutex, "synchronize", rb_mutex_synchronize_m, 0);
1583 rb_define_method(rb_cMutex, "owned?", rb_mutex_owned_p, 0);
1584
1585 /* Queue */
1586 DEFINE_CLASS(Queue, Object);
1587 rb_define_alloc_func(rb_cQueue, queue_alloc);
1588
1589 rb_eClosedQueueError = rb_define_class("ClosedQueueError", rb_eStopIteration);
1590
1591 rb_define_method(rb_cQueue, "initialize", rb_queue_initialize, -1);
1592 rb_undef_method(rb_cQueue, "initialize_copy");
1593 rb_define_method(rb_cQueue, "marshal_dump", undumpable, 0);
1594 rb_define_method(rb_cQueue, "close", rb_queue_close, 0);
1595 rb_define_method(rb_cQueue, "closed?", rb_queue_closed_p, 0);
1596 rb_define_method(rb_cQueue, "push", rb_queue_push, 1);
1597 rb_define_method(rb_cQueue, "empty?", rb_queue_empty_p, 0);
1598 rb_define_method(rb_cQueue, "clear", rb_queue_clear, 0);
1599 rb_define_method(rb_cQueue, "length", rb_queue_length, 0);
1600 rb_define_method(rb_cQueue, "num_waiting", rb_queue_num_waiting, 0);
1601
1602 rb_define_alias(rb_cQueue, "enq", "push");
1603 rb_define_alias(rb_cQueue, "<<", "push");
1604 rb_define_alias(rb_cQueue, "size", "length");
1605
1606 DEFINE_CLASS(SizedQueue, Queue);
1607 rb_define_alloc_func(rb_cSizedQueue, szqueue_alloc);
1608
1609 rb_define_method(rb_cSizedQueue, "initialize", rb_szqueue_initialize, 1);
1610 rb_define_method(rb_cSizedQueue, "close", rb_szqueue_close, 0);
1611 rb_define_method(rb_cSizedQueue, "max", rb_szqueue_max_get, 0);
1612 rb_define_method(rb_cSizedQueue, "max=", rb_szqueue_max_set, 1);
1613 rb_define_method(rb_cSizedQueue, "empty?", rb_szqueue_empty_p, 0);
1614 rb_define_method(rb_cSizedQueue, "clear", rb_szqueue_clear, 0);
1615 rb_define_method(rb_cSizedQueue, "length", rb_szqueue_length, 0);
1616 rb_define_method(rb_cSizedQueue, "num_waiting", rb_szqueue_num_waiting, 0);
1617 rb_define_alias(rb_cSizedQueue, "size", "length");
1618
1619 /* CVar */
1620 DEFINE_CLASS(ConditionVariable, Object);
1621 rb_define_alloc_func(rb_cConditionVariable, condvar_alloc);
1622
1623 id_sleep = rb_intern("sleep");
1624
1625 rb_define_method(rb_cConditionVariable, "initialize", rb_condvar_initialize, 0);
1626 rb_undef_method(rb_cConditionVariable, "initialize_copy");
1627 rb_define_method(rb_cConditionVariable, "marshal_dump", undumpable, 0);
1628 rb_define_method(rb_cConditionVariable, "wait", rb_condvar_wait, -1);
1629 rb_define_method(rb_cConditionVariable, "signal", rb_condvar_signal, 0);
1630 rb_define_method(rb_cConditionVariable, "broadcast", rb_condvar_broadcast, 0);
1631
1632 rb_provide("thread.rb");
1633}
1634
1635#include "thread_sync.rbinc"
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
Definition: cxxanyargs.hpp:670
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition: class.c:888
VALUE rb_define_class_under(VALUE outer, const char *name, VALUE super)
Defines a class under the namespace of outer.
Definition: class.c:920
VALUE rb_define_class_id_under(VALUE outer, ID id, VALUE super)
Identical to rb_define_class_under(), except it takes the name in ID instead of C's string.
Definition: class.c:926
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition: class.c:2249
void rb_undef_method(VALUE klass, const char *name)
Defines an undef of a method.
Definition: class.c:2073
int rb_block_given_p(void)
Determines if the current method is given a block.
Definition: eval.c:864
#define FL_UNSET_RAW
Old name of RB_FL_UNSET_RAW.
Definition: fl_type.h:142
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition: long.h:48
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition: assume.h:29
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition: fl_type.h:140
#define FL_SET
Old name of RB_FL_SET.
Definition: fl_type.h:137
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition: long.h:50
#define Qtrue
Old name of RUBY_Qtrue.
#define INT2NUM
Old name of RB_INT2NUM.
Definition: int.h:43
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition: value_type.h:56
#define NIL_P
Old name of RB_NIL_P.
#define Check_TypedStruct(v, t)
Old name of rb_check_typeddata.
Definition: rtypeddata.h:105
#define NUM2LONG
Old name of RB_NUM2LONG.
Definition: long.h:51
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_SET_RAW
Old name of RB_FL_SET_RAW.
Definition: fl_type.h:138
void rb_raise(VALUE exc, const char *fmt,...)
Exception entry point.
Definition: error.c:3148
int rb_typeddata_is_kind_of(VALUE obj, const rb_data_type_t *data_type)
Checks if the given object is of given kind.
Definition: error.c:1041
void rb_bug(const char *fmt,...)
Interpreter panic switch.
Definition: error.c:794
VALUE rb_eTypeError
TypeError exception.
Definition: error.c:1091
VALUE rb_eStopIteration
StopIteration exception.
Definition: enumerator.c:173
VALUE rb_eArgError
ArgumentError exception.
Definition: error.c:1092
VALUE rb_ensure(VALUE(*b_proc)(VALUE), VALUE data1, VALUE(*e_proc)(VALUE), VALUE data2)
An equivalent to ensure clause.
Definition: eval.c:993
VALUE rb_eThreadError
ThreadError exception.
Definition: eval.c:882
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition: object.c:190
VALUE rb_cThread
Thread class.
Definition: vm.c:466
double rb_num2dbl(VALUE num)
Converts an instance of rb_cNumeric into C's double.
Definition: object.c:3619
VALUE rb_equal(VALUE lhs, VALUE rhs)
This function is an optimised version of calling #==.
Definition: object.c:122
#define RB_OBJ_WRITE(old, slot, young)
Declaration of a "back" pointer.
Definition: rgengc.h:220
Defines RBIMPL_HAS_BUILTIN.
void rb_provide(const char *feature)
Declares that the given feature is already provided by someone else.
Definition: load.c:671
VALUE rb_mutex_new(void)
Creates a mutex.
Definition: thread_sync.c:188
VALUE rb_mutex_trylock(VALUE mutex)
Attempts to lock the mutex, without waiting for other threads to unlock it.
Definition: thread_sync.c:249
VALUE rb_mutex_locked_p(VALUE mutex)
Queries if there are any threads that holds the lock.
Definition: thread_sync.c:200
VALUE rb_mutex_synchronize(VALUE mutex, VALUE(*func)(VALUE arg), VALUE arg)
Obtains the lock, runs the passed function, and releases the lock when it completes.
Definition: thread_sync.c:617
VALUE rb_mutex_sleep(VALUE self, VALUE timeout)
Releases the lock held in the mutex and waits for the period of time; reacquires the lock on wakeup.
Definition: thread_sync.c:550
VALUE rb_mutex_unlock(VALUE mutex)
Releases the mutex.
Definition: thread_sync.c:488
VALUE rb_mutex_lock(VALUE mutex)
Attempts to lock the mutex.
Definition: thread_sync.c:415
struct timeval rb_time_interval(VALUE num)
Creates a "time interval".
Definition: time.c:2838
void rb_const_set(VALUE space, ID name, VALUE val)
Names a constant.
Definition: variable.c:3333
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
VALUE rb_yield(VALUE val)
Yields the block.
Definition: vm_eval.c:1358
#define RARRAY_LEN
Just another name of rb_array_len.
Definition: rarray.h:68
#define RUBY_TYPED_DEFAULT_FREE
This is a value you can set to rb_data_type_struct::dfree.
Definition: rtypeddata.h:79
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
Definition: rtypeddata.h:507
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition: rtypeddata.h:489
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
Definition: scheduler.c:203
VALUE rb_fiber_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
Non-blocking wait for the passed "blocker", which is for instance Thread.join or Mutex....
Definition: scheduler.c:367
VALUE rb_fiber_scheduler_kernel_sleep(VALUE scheduler, VALUE duration)
Non-blocking sleep.
Definition: scheduler.c:267
VALUE rb_fiber_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
Wakes up a fiber previously blocked using rb_fiber_scheduler_block().
Definition: scheduler.c:386
#define RTEST
This is an old name of RB_TEST.
uintptr_t VALUE
Type that represents a Ruby object.
Definition: value.h:40
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition: value.h:52