blob: 2c205ee784ee5b306f7948fee15da9a12952e5fd [file] [log] [blame]
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -07001/*
Travis Geiselbrecht9045c062009-06-28 11:20:09 -07002 * Copyright (c) 2008-2009 Travis Geiselbrecht
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -07003 *
Unnati Gandhi86a7b2d2014-07-17 14:40:35 +05304 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
5 *
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -07006 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files
8 * (the "Software"), to deal in the Software without restriction,
9 * including without limitation the rights to use, copy, modify, merge,
10 * publish, distribute, sublicense, and/or sell copies of the Software,
11 * and to permit persons to whom the Software is furnished to do so,
12 * subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be
15 * included in all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
21 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
Travis Geiselbrecht12403e32010-05-06 13:36:57 -070025
26/**
27 * @file
28 * @brief Kernel threading
29 *
30 * This file is the core kernel threading interface.
31 *
32 * @defgroup thread Threads
33 * @{
34 */
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -070035#include <debug.h>
36#include <list.h>
37#include <malloc.h>
38#include <string.h>
39#include <err.h>
40#include <kernel/thread.h>
41#include <kernel/timer.h>
42#include <kernel/dpc.h>
43#include <platform.h>
44
45#if DEBUGLEVEL > 1
46#define THREAD_CHECKS 1
47#endif
48
49#if THREAD_STATS
50struct thread_stats thread_stats;
51#endif
52
53/* global thread list */
54static struct list_node thread_list;
55
56/* the current thread */
57thread_t *current_thread;
58
59/* the global critical section count */
60int critical_section_count = 1;
61
62/* the run queue */
63static struct list_node run_queue[NUM_PRIORITIES];
64static uint32_t run_queue_bitmap;
65
66/* the bootstrap thread (statically allocated) */
67static thread_t bootstrap_thread;
68
69/* the idle thread */
70thread_t *idle_thread;
71
72/* local routines */
73static void thread_resched(void);
74static void idle_thread_routine(void) __NO_RETURN;
75
Travis Geiselbrecht9045c062009-06-28 11:20:09 -070076#if PLATFORM_HAS_DYNAMIC_TIMER
77/* preemption timer */
78static timer_t preempt_timer;
79#endif
80
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -070081/* run queue manipulation */
82static void insert_in_run_queue_head(thread_t *t)
83{
84#if THREAD_CHECKS
85 ASSERT(t->magic == THREAD_MAGIC);
86 ASSERT(t->state == THREAD_READY);
87 ASSERT(!list_in_list(&t->queue_node));
88 ASSERT(in_critical_section());
89#endif
90
91 list_add_head(&run_queue[t->priority], &t->queue_node);
92 run_queue_bitmap |= (1<<t->priority);
93}
94
95static void insert_in_run_queue_tail(thread_t *t)
96{
97#if THREAD_CHECKS
98 ASSERT(t->magic == THREAD_MAGIC);
99 ASSERT(t->state == THREAD_READY);
100 ASSERT(!list_in_list(&t->queue_node));
101 ASSERT(in_critical_section());
102#endif
103
104 list_add_tail(&run_queue[t->priority], &t->queue_node);
105 run_queue_bitmap |= (1<<t->priority);
106}
107
108static void init_thread_struct(thread_t *t, const char *name)
109{
110 memset(t, 0, sizeof(thread_t));
111 t->magic = THREAD_MAGIC;
112 strlcpy(t->name, name, sizeof(t->name));
113}
114
Travis Geiselbrecht12403e32010-05-06 13:36:57 -0700115/**
116 * @brief Create a new thread
117 *
118 * This function creates a new thread. The thread is initially suspended, so you
119 * need to call thread_resume() to execute it.
120 *
121 * @param name Name of thread
122 * @param entry Entry point of thread
123 * @param arg Arbitrary argument passed to entry()
124 * @param priority Execution priority for the thread.
125 * @param stack_size Stack size for the thread.
126 *
127 * Thread priority is an integer from 0 (lowest) to 31 (highest). Some standard
128 * prioritys are defined in <kernel/thread.h>:
129 *
130 * HIGHEST_PRIORITY
131 * DPC_PRIORITY
132 * HIGH_PRIORITY
133 * DEFAULT_PRIORITY
134 * LOW_PRIORITY
135 * IDLE_PRIORITY
136 * LOWEST_PRIORITY
137 *
138 * Stack size is typically set to DEFAULT_STACK_SIZE
139 *
140 * @return Pointer to thread object, or NULL on failure.
141 */
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700142thread_t *thread_create(const char *name, thread_start_routine entry, void *arg, int priority, size_t stack_size)
143{
144 thread_t *t;
145
146 t = malloc(sizeof(thread_t));
147 if (!t)
148 return NULL;
149
150 init_thread_struct(t, name);
151
152 t->entry = entry;
153 t->arg = arg;
154 t->priority = priority;
155 t->saved_critical_section_count = 1; /* we always start inside a critical section */
156 t->state = THREAD_SUSPENDED;
157 t->blocking_wait_queue = NULL;
158 t->wait_queue_block_ret = NO_ERROR;
159
160 /* create the stack */
161 t->stack = malloc(stack_size);
162 if (!t->stack) {
163 free(t);
164 return NULL;
165 }
166
167 t->stack_size = stack_size;
168
travis geiselbrechtc60a2e62008-12-22 23:46:26 +0000169 /* inheirit thread local storage from the parent */
170 int i;
171 for (i=0; i < MAX_TLS_ENTRY; i++)
172 t->tls[i] = current_thread->tls[i];
173
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700174 /* set up the initial stack frame */
175 arch_thread_initialize(t);
176
177 /* add it to the global thread list */
178 enter_critical_section();
179 list_add_head(&thread_list, &t->thread_list_node);
180 exit_critical_section();
181
182 return t;
183}
184
Travis Geiselbrecht12403e32010-05-06 13:36:57 -0700185/**
186 * @brief Make a suspended thread executable.
187 *
188 * This function is typically called to start a thread which has just been
189 * created with thread_create()
190 *
191 * @param t Thread to resume
192 *
193 * @return NO_ERROR on success, ERR_NOT_SUSPENDED if thread was not suspended.
194 */
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700195status_t thread_resume(thread_t *t)
196{
197#if THREAD_CHECKS
198 ASSERT(t->magic == THREAD_MAGIC);
199 ASSERT(t->state != THREAD_DEATH);
200#endif
201
202 if (t->state == THREAD_READY || t->state == THREAD_RUNNING)
203 return ERR_NOT_SUSPENDED;
204
205 enter_critical_section();
206 t->state = THREAD_READY;
207 insert_in_run_queue_head(t);
208 thread_yield();
209 exit_critical_section();
210
211 return NO_ERROR;
212}
213
214static void thread_cleanup_dpc(void *thread)
215{
216 thread_t *t = (thread_t *)thread;
217
Travis Geiselbrechteb946052008-09-07 22:32:49 -0700218// dprintf(SPEW, "thread_cleanup_dpc: thread %p (%s)\n", t, t->name);
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700219
220#if THREAD_CHECKS
221 ASSERT(t->state == THREAD_DEATH);
222 ASSERT(t->blocking_wait_queue == NULL);
223 ASSERT(!list_in_list(&t->queue_node));
224#endif
225
226 /* remove it from the master thread list */
227 enter_critical_section();
228 list_delete(&t->thread_list_node);
229 exit_critical_section();
230
231 /* free its stack and the thread structure itself */
232 if (t->stack)
233 free(t->stack);
234
235 free(t);
236}
237
Travis Geiselbrecht12403e32010-05-06 13:36:57 -0700238/**
239 * @brief Terminate the current thread
240 *
241 * Current thread exits with the specified return code.
242 *
243 * This function does not return.
244 */
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700245void thread_exit(int retcode)
246{
247#if THREAD_CHECKS
248 ASSERT(current_thread->magic == THREAD_MAGIC);
249 ASSERT(current_thread->state == THREAD_RUNNING);
250#endif
251
252// dprintf("thread_exit: current %p\n", current_thread);
253
254 enter_critical_section();
255
256 /* enter the dead state */
257 current_thread->state = THREAD_DEATH;
258 current_thread->retcode = retcode;
259
260 /* schedule a dpc to clean ourselves up */
261 dpc_queue(thread_cleanup_dpc, (void *)current_thread, DPC_FLAG_NORESCHED);
262
263 /* reschedule */
264 thread_resched();
265
266 panic("somehow fell through thread_exit()\n");
Unnati Gandhi86a7b2d2014-07-17 14:40:35 +0530267 for(;;);
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700268}
269
270static void idle_thread_routine(void)
271{
272 for(;;)
273 arch_idle();
274}
275
Travis Geiselbrecht12403e32010-05-06 13:36:57 -0700276/**
277 * @brief Cause another thread to be executed.
278 *
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700279 * Internal reschedule routine. The current thread needs to already be in whatever
280 * state and queues it needs to be in. This routine simply picks the next thread and
281 * switches to it.
Travis Geiselbrecht12403e32010-05-06 13:36:57 -0700282 *
283 * This is probably not the function you're looking for. See
284 * thread_yield() instead.
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700285 */
286void thread_resched(void)
287{
288 thread_t *oldthread;
289 thread_t *newthread;
290
291// dprintf("thread_resched: current %p: ", current_thread);
292// dump_thread(current_thread);
293
294#if THREAD_CHECKS
295 ASSERT(in_critical_section());
296#endif
297
298#if THREAD_STATS
299 thread_stats.reschedules++;
300#endif
301
302 oldthread = current_thread;
303
304 // at the moment, can't deal with more than 32 priority levels
305 ASSERT(NUM_PRIORITIES <= 32);
306
307 // should at least find the idle thread
308#if THREAD_CHECKS
309 ASSERT(run_queue_bitmap != 0);
310#endif
311
312 int next_queue = HIGHEST_PRIORITY - __builtin_clz(run_queue_bitmap) - (32 - NUM_PRIORITIES);
Travis Geiselbrechteb946052008-09-07 22:32:49 -0700313 //dprintf(SPEW, "bitmap 0x%x, next %d\n", run_queue_bitmap, next_queue);
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700314
315 newthread = list_remove_head_type(&run_queue[next_queue], thread_t, queue_node);
316
317#if THREAD_CHECKS
318 ASSERT(newthread);
319#endif
320
321 if (list_is_empty(&run_queue[next_queue]))
322 run_queue_bitmap &= ~(1<<next_queue);
323
324#if 0
325 // XXX make this more efficient
326 newthread = NULL;
327 for (i=HIGHEST_PRIORITY; i >= LOWEST_PRIORITY; i--) {
328 newthread = list_remove_head_type(&run_queue[i], thread_t, queue_node);
329 if (newthread)
330 break;
331 }
332#endif
333
334// dprintf("newthread: ");
335// dump_thread(newthread);
336
337 newthread->state = THREAD_RUNNING;
338
339 if (newthread == oldthread)
340 return;
341
342 /* set up quantum for the new thread if it was consumed */
343 if (newthread->remaining_quantum <= 0) {
344 newthread->remaining_quantum = 5; // XXX make this smarter
345 }
346
347#if THREAD_STATS
348 thread_stats.context_switches++;
349
350 if (oldthread == idle_thread) {
351 bigtime_t now = current_time_hires();
352 thread_stats.idle_time += now - thread_stats.last_idle_timestamp;
353 }
354 if (newthread == idle_thread) {
355 thread_stats.last_idle_timestamp = current_time_hires();
356 }
357#endif
358
359#if THREAD_CHECKS
360 ASSERT(critical_section_count > 0);
361 ASSERT(newthread->saved_critical_section_count > 0);
362#endif
363
Travis Geiselbrecht9045c062009-06-28 11:20:09 -0700364#if PLATFORM_HAS_DYNAMIC_TIMER
365 /* if we're switching from idle to a real thread, set up a periodic
366 * timer to run our preemption tick.
367 */
368 if (oldthread == idle_thread) {
369 timer_set_periodic(&preempt_timer, 10, (timer_callback)thread_timer_tick, NULL);
370 } else if (newthread == idle_thread) {
371 timer_cancel(&preempt_timer);
372 }
373#endif
374
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700375 /* do the switch */
376 oldthread->saved_critical_section_count = critical_section_count;
377 current_thread = newthread;
378 critical_section_count = newthread->saved_critical_section_count;
379 arch_context_switch(oldthread, newthread);
380}
381
Travis Geiselbrecht12403e32010-05-06 13:36:57 -0700382/**
383 * @brief Yield the cpu to another thread
384 *
385 * This function places the current thread at the end of the run queue
386 * and yields the cpu to another waiting thread (if any.)
387 *
388 * This function will return at some later time. Possibly immediately if
389 * no other threads are waiting to execute.
390 */
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700391void thread_yield(void)
392{
393#if THREAD_CHECKS
394 ASSERT(current_thread->magic == THREAD_MAGIC);
395 ASSERT(current_thread->state == THREAD_RUNNING);
396#endif
397
398 enter_critical_section();
399
400#if THREAD_STATS
401 thread_stats.yields++;
402#endif
403
404 /* we are yielding the cpu, so stick ourselves into the tail of the run queue and reschedule */
405 current_thread->state = THREAD_READY;
406 current_thread->remaining_quantum = 0;
407 insert_in_run_queue_tail(current_thread);
408 thread_resched();
409
410 exit_critical_section();
411}
412
Travis Geiselbrecht12403e32010-05-06 13:36:57 -0700413/**
414 * @brief Briefly yield cpu to another thread
415 *
416 * This function is similar to thread_yield(), except that it will
417 * restart more quickly.
418 *
419 * This function places the current thread at the head of the run
420 * queue and then yields the cpu to another thread.
421 *
422 * Exception: If the time slice for this thread has expired, then
423 * the thread goes to the end of the run queue.
424 *
425 * This function will return at some later time. Possibly immediately if
426 * no other threads are waiting to execute.
427 */
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700428void thread_preempt(void)
429{
430#if THREAD_CHECKS
431 ASSERT(current_thread->magic == THREAD_MAGIC);
432 ASSERT(current_thread->state == THREAD_RUNNING);
433#endif
434
435 enter_critical_section();
436
437#if THREAD_STATS
438 if (current_thread != idle_thread)
439 thread_stats.preempts++; /* only track when a meaningful preempt happens */
440#endif
441
442 /* we are being preempted, so we get to go back into the front of the run queue if we have quantum left */
443 current_thread->state = THREAD_READY;
444 if (current_thread->remaining_quantum > 0)
445 insert_in_run_queue_head(current_thread);
446 else
447 insert_in_run_queue_tail(current_thread); /* if we're out of quantum, go to the tail of the queue */
448 thread_resched();
449
450 exit_critical_section();
451}
452
Travis Geiselbrecht12403e32010-05-06 13:36:57 -0700453/**
454 * @brief Suspend thread until woken.
455 *
456 * This function schedules another thread to execute. This function does not
457 * return until the thread is made runable again by some other module.
458 *
459 * You probably don't want to call this function directly; it's meant to be called
460 * from other modules, such as mutex, which will presumably set the thread's
461 * state to blocked and add it to some queue or another.
462 */
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700463void thread_block(void)
464{
465#if THREAD_CHECKS
466 ASSERT(current_thread->magic == THREAD_MAGIC);
467 ASSERT(current_thread->state == THREAD_BLOCKED);
468#endif
469
470 enter_critical_section();
471
472 /* we are blocking on something. the blocking code should have already stuck us on a queue */
473 thread_resched();
474
475 exit_critical_section();
476}
477
478enum handler_return thread_timer_tick(void)
479{
480 if (current_thread == idle_thread)
481 return INT_NO_RESCHEDULE;
482
483 current_thread->remaining_quantum--;
484 if (current_thread->remaining_quantum <= 0)
485 return INT_RESCHEDULE;
486 else
487 return INT_NO_RESCHEDULE;
488}
489
490/* timer callback to wake up a sleeping thread */
491static enum handler_return thread_sleep_handler(timer_t *timer, time_t now, void *arg)
492{
493 thread_t *t = (thread_t *)arg;
494
495#if THREAD_CHECKS
496 ASSERT(t->magic == THREAD_MAGIC);
497 ASSERT(t->state == THREAD_SLEEPING);
498#endif
499
500 t->state = THREAD_READY;
501 insert_in_run_queue_head(t);
502
503 return INT_RESCHEDULE;
504}
505
Travis Geiselbrecht12403e32010-05-06 13:36:57 -0700506/**
507 * @brief Put thread to sleep; delay specified in ms
508 *
509 * This function puts the current thread to sleep until the specified
510 * delay in ms has expired.
511 *
512 * Note that this function could sleep for longer than the specified delay if
513 * other threads are running. When the timer expires, this thread will
514 * be placed at the head of the run queue.
515 */
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700516void thread_sleep(time_t delay)
517{
518 timer_t timer;
519
520#if THREAD_CHECKS
521 ASSERT(current_thread->magic == THREAD_MAGIC);
522 ASSERT(current_thread->state == THREAD_RUNNING);
523#endif
524
525 timer_initialize(&timer);
526
527 enter_critical_section();
528 timer_set_oneshot(&timer, delay, thread_sleep_handler, (void *)current_thread);
529 current_thread->state = THREAD_SLEEPING;
530 thread_resched();
531 exit_critical_section();
532}
533
Travis Geiselbrecht12403e32010-05-06 13:36:57 -0700534/**
535 * @brief Initialize threading system
536 *
537 * This function is called once, from kmain()
538 */
travis geiselbrecht858b1ad2008-12-23 21:55:41 +0000539void thread_init_early(void)
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700540{
541 int i;
542
543 /* initialize the run queues */
544 for (i=0; i < NUM_PRIORITIES; i++)
545 list_initialize(&run_queue[i]);
546
547 /* initialize the thread list */
548 list_initialize(&thread_list);
549
550 /* create a thread to cover the current running state */
551 thread_t *t = &bootstrap_thread;
552 init_thread_struct(t, "bootstrap");
553
554 /* half construct this thread, since we're already running */
555 t->priority = HIGHEST_PRIORITY;
556 t->state = THREAD_RUNNING;
557 t->saved_critical_section_count = 1;
558 list_add_head(&thread_list, &t->thread_list_node);
559 current_thread = t;
560}
561
Travis Geiselbrecht12403e32010-05-06 13:36:57 -0700562/**
563 * @brief Complete thread initialization
564 *
565 * This function is called once at boot time
566 */
travis geiselbrecht858b1ad2008-12-23 21:55:41 +0000567void thread_init(void)
568{
Travis Geiselbrecht9045c062009-06-28 11:20:09 -0700569#if PLATFORM_HAS_DYNAMIC_TIMER
570 timer_initialize(&preempt_timer);
571#endif
travis geiselbrecht858b1ad2008-12-23 21:55:41 +0000572}
573
Travis Geiselbrecht12403e32010-05-06 13:36:57 -0700574/**
575 * @brief Change name of current thread
576 */
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700577void thread_set_name(const char *name)
578{
579 strlcpy(current_thread->name, name, sizeof(current_thread->name));
580}
581
Travis Geiselbrecht12403e32010-05-06 13:36:57 -0700582/**
583 * @brief Change priority of current thread
584 *
585 * See thread_create() for a discussion of priority values.
586 */
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700587void thread_set_priority(int priority)
588{
589 if (priority < LOWEST_PRIORITY)
590 priority = LOWEST_PRIORITY;
591 if (priority > HIGHEST_PRIORITY)
592 priority = HIGHEST_PRIORITY;
593 current_thread->priority = priority;
594}
595
Travis Geiselbrecht12403e32010-05-06 13:36:57 -0700596/**
597 * @brief Become an idle thread
598 *
599 * This function marks the current thread as the idle thread -- the one which
600 * executes when there is nothing else to do. This function does not return.
601 * This function is called once at boot time.
602 */
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700603void thread_become_idle(void)
604{
605 thread_set_name("idle");
606 thread_set_priority(IDLE_PRIORITY);
607 idle_thread = current_thread;
608 idle_thread_routine();
609}
610
Travis Geiselbrecht12403e32010-05-06 13:36:57 -0700611/**
612 * @brief Dump debugging info about the specified thread.
613 */
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700614void dump_thread(thread_t *t)
615{
Travis Geiselbrechteb946052008-09-07 22:32:49 -0700616 dprintf(INFO, "dump_thread: t %p (%s)\n", t, t->name);
617 dprintf(INFO, "\tstate %d, priority %d, remaining quantum %d, critical section %d\n", t->state, t->priority, t->remaining_quantum, t->saved_critical_section_count);
618 dprintf(INFO, "\tstack %p, stack_size %zd\n", t->stack, t->stack_size);
619 dprintf(INFO, "\tentry %p, arg %p\n", t->entry, t->arg);
620 dprintf(INFO, "\twait queue %p, wait queue ret %d\n", t->blocking_wait_queue, t->wait_queue_block_ret);
travis geiselbrechtc60a2e62008-12-22 23:46:26 +0000621 dprintf(INFO, "\ttls:");
622 int i;
623 for (i=0; i < MAX_TLS_ENTRY; i++) {
624 dprintf(INFO, " 0x%x", t->tls[i]);
625 }
626 dprintf(INFO, "\n");
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700627}
628
Travis Geiselbrecht12403e32010-05-06 13:36:57 -0700629/**
630 * @brief Dump debugging info about all threads
631 */
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700632void dump_all_threads(void)
633{
634 thread_t *t;
635
636 enter_critical_section();
637 list_for_every_entry(&thread_list, t, thread_t, thread_list_node) {
638 dump_thread(t);
639 }
640 exit_critical_section();
641}
642
Travis Geiselbrecht12403e32010-05-06 13:36:57 -0700643/** @} */
644
645
646/**
647 * @defgroup wait Wait Queue
648 * @{
649 */
650
651/**
652 * @brief Initialize a wait queue
653 */
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700654void wait_queue_init(wait_queue_t *wait)
655{
656 wait->magic = WAIT_QUEUE_MAGIC;
657 list_initialize(&wait->list);
658 wait->count = 0;
659}
660
661static enum handler_return wait_queue_timeout_handler(timer_t *timer, time_t now, void *arg)
662{
663 thread_t *thread = (thread_t *)arg;
664
665#if THREAD_CHECKS
666 ASSERT(thread->magic == THREAD_MAGIC);
667#endif
668
669 if (thread_unblock_from_wait_queue(thread, false, ERR_TIMED_OUT) >= NO_ERROR)
670 return INT_RESCHEDULE;
671
672 return INT_NO_RESCHEDULE;
673}
674
Travis Geiselbrecht12403e32010-05-06 13:36:57 -0700675/**
676 * @brief Block until a wait queue is notified.
677 *
678 * This function puts the current thread at the end of a wait
679 * queue and then blocks until some other thread wakes the queue
680 * up again.
681 *
682 * @param wait The wait queue to enter
683 * @param timeout The maximum time, in ms, to wait
684 *
685 * If the timeout is zero, this function returns immediately with
686 * ERR_TIMED_OUT. If the timeout is INFINITE_TIME, this function
687 * waits indefinitely. Otherwise, this function returns with
688 * ERR_TIMED_OUT at the end of the timeout period.
689 *
690 * @return ERR_TIMED_OUT on timeout, else returns the return
691 * value specified when the queue was woken by wait_queue_wake_one().
692 */
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700693status_t wait_queue_block(wait_queue_t *wait, time_t timeout)
694{
695 timer_t timer;
696
697#if THREAD_CHECKS
698 ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
699 ASSERT(current_thread->state == THREAD_RUNNING);
700 ASSERT(in_critical_section());
701#endif
702
703 if (timeout == 0)
704 return ERR_TIMED_OUT;
705
706 list_add_tail(&wait->list, &current_thread->queue_node);
707 wait->count++;
708 current_thread->state = THREAD_BLOCKED;
709 current_thread->blocking_wait_queue = wait;
710 current_thread->wait_queue_block_ret = NO_ERROR;
711
712 /* if the timeout is nonzero or noninfinite, set a callback to yank us out of the queue */
713 if (timeout != INFINITE_TIME) {
714 timer_initialize(&timer);
715 timer_set_oneshot(&timer, timeout, wait_queue_timeout_handler, (void *)current_thread);
716 }
717
718 thread_block();
719
720 /* we don't really know if the timer fired or not, so it's better safe to try to cancel it */
721 if (timeout != INFINITE_TIME) {
722 timer_cancel(&timer);
723 }
724
725 return current_thread->wait_queue_block_ret;
726}
727
Travis Geiselbrecht12403e32010-05-06 13:36:57 -0700728/**
729 * @brief Wake up one thread sleeping on a wait queue
730 *
731 * This function removes one thread (if any) from the head of the wait queue and
732 * makes it executable. The new thread will be placed at the head of the
733 * run queue.
734 *
735 * @param wait The wait queue to wake
736 * @param reschedule If true, the newly-woken thread will run immediately.
737 * @param wait_queue_error The return value which the new thread will receive
738 * from wait_queue_block().
739 *
740 * @return The number of threads woken (zero or one)
741 */
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700742int wait_queue_wake_one(wait_queue_t *wait, bool reschedule, status_t wait_queue_error)
743{
744 thread_t *t;
745 int ret = 0;
746
747#if THREAD_CHECKS
748 ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
749 ASSERT(in_critical_section());
750#endif
751
752 t = list_remove_head_type(&wait->list, thread_t, queue_node);
753 if (t) {
754 wait->count--;
755#if THREAD_CHECKS
756 ASSERT(t->state == THREAD_BLOCKED);
757#endif
758 t->state = THREAD_READY;
759 t->wait_queue_block_ret = wait_queue_error;
760 t->blocking_wait_queue = NULL;
761
762 /* if we're instructed to reschedule, stick the current thread on the head
763 * of the run queue first, so that the newly awakened thread gets a chance to run
764 * before the current one, but the current one doesn't get unnecessarilly punished.
765 */
766 if (reschedule) {
767 current_thread->state = THREAD_READY;
768 insert_in_run_queue_head(current_thread);
769 }
770 insert_in_run_queue_head(t);
771 if (reschedule)
772 thread_resched();
773 ret = 1;
774 }
775
776 return ret;
777}
778
Travis Geiselbrecht12403e32010-05-06 13:36:57 -0700779
780/**
781 * @brief Wake all threads sleeping on a wait queue
782 *
783 * This function removes all threads (if any) from the wait queue and
784 * makes them executable. The new threads will be placed at the head of the
785 * run queue.
786 *
787 * @param wait The wait queue to wake
788 * @param reschedule If true, the newly-woken threads will run immediately.
789 * @param wait_queue_error The return value which the new thread will receive
790 * from wait_queue_block().
791 *
792 * @return The number of threads woken (zero or one)
793 */
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700794int wait_queue_wake_all(wait_queue_t *wait, bool reschedule, status_t wait_queue_error)
795{
796 thread_t *t;
797 int ret = 0;
798
799#if THREAD_CHECKS
800 ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
801 ASSERT(in_critical_section());
802#endif
803
804 if (reschedule && wait->count > 0) {
805 /* if we're instructed to reschedule, stick the current thread on the head
806 * of the run queue first, so that the newly awakened threads get a chance to run
807 * before the current one, but the current one doesn't get unnecessarilly punished.
808 */
809 current_thread->state = THREAD_READY;
810 insert_in_run_queue_head(current_thread);
811 }
812
813 /* pop all the threads off the wait queue into the run queue */
814 while ((t = list_remove_head_type(&wait->list, thread_t, queue_node))) {
815 wait->count--;
816#if THREAD_CHECKS
817 ASSERT(t->state == THREAD_BLOCKED);
818#endif
819 t->state = THREAD_READY;
820 t->wait_queue_block_ret = wait_queue_error;
821 t->blocking_wait_queue = NULL;
822
823 insert_in_run_queue_head(t);
824 ret++;
825 }
826
827#if THREAD_CHECKS
828 ASSERT(wait->count == 0);
829#endif
830
831 if (reschedule && ret > 0)
832 thread_resched();
833
834 return ret;
835}
836
Travis Geiselbrecht12403e32010-05-06 13:36:57 -0700837/**
838 * @brief Free all resources allocated in wait_queue_init()
839 *
840 * If any threads were waiting on this queue, they are all woken.
841 */
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700842void wait_queue_destroy(wait_queue_t *wait, bool reschedule)
843{
844#if THREAD_CHECKS
845 ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
846 ASSERT(in_critical_section());
847#endif
848 wait_queue_wake_all(wait, reschedule, ERR_OBJECT_DESTROYED);
849 wait->magic = 0;
850}
851
Travis Geiselbrecht12403e32010-05-06 13:36:57 -0700852/**
853 * @brief Wake a specific thread in a wait queue
854 *
855 * This function extracts a specific thread from a wait queue, wakes it, and
856 * puts it at the head of the run queue.
857 *
858 * @param t The thread to wake
859 * @param reschedule If true, the newly-woken threads will run immediately.
860 * @param wait_queue_error The return value which the new thread will receive
861 * from wait_queue_block().
862 *
863 * @return ERR_NOT_BLOCKED if thread was not in any wait queue.
864 */
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700865status_t thread_unblock_from_wait_queue(thread_t *t, bool reschedule, status_t wait_queue_error)
866{
867 enter_critical_section();
868
869#if THREAD_CHECKS
870 ASSERT(t->magic == THREAD_MAGIC);
871#endif
872
873 if (t->state != THREAD_BLOCKED)
874 return ERR_NOT_BLOCKED;
875
876#if THREAD_CHECKS
877 ASSERT(t->blocking_wait_queue != NULL);
878 ASSERT(t->blocking_wait_queue->magic == WAIT_QUEUE_MAGIC);
879 ASSERT(list_in_list(&t->queue_node));
880#endif
881
882 list_delete(&t->queue_node);
883 t->blocking_wait_queue->count--;
884 t->blocking_wait_queue = NULL;
885 t->state = THREAD_READY;
886 t->wait_queue_block_ret = wait_queue_error;
887 insert_in_run_queue_head(t);
888
889 if (reschedule)
890 thread_resched();
891
892 exit_critical_section();
893
894 return NO_ERROR;
895}
896
897