blob: 860e7129b32d94c058bb99047b0e217f769a79c5 [file] [log] [blame]
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -07001/*
Travis Geiselbrecht9045c062009-06-28 11:20:09 -07002 * Copyright (c) 2008-2009 Travis Geiselbrecht
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -07003 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23#include <debug.h>
24#include <list.h>
25#include <malloc.h>
26#include <string.h>
27#include <err.h>
28#include <kernel/thread.h>
29#include <kernel/timer.h>
30#include <kernel/dpc.h>
31#include <platform.h>
32
33#if DEBUGLEVEL > 1
34#define THREAD_CHECKS 1
35#endif
36
37#if THREAD_STATS
38struct thread_stats thread_stats;
39#endif
40
41/* global thread list */
42static struct list_node thread_list;
43
44/* the current thread */
45thread_t *current_thread;
46
47/* the global critical section count */
48int critical_section_count = 1;
49
50/* the run queue */
51static struct list_node run_queue[NUM_PRIORITIES];
52static uint32_t run_queue_bitmap;
53
54/* the bootstrap thread (statically allocated) */
55static thread_t bootstrap_thread;
56
57/* the idle thread */
58thread_t *idle_thread;
59
60/* local routines */
61static void thread_resched(void);
62static void idle_thread_routine(void) __NO_RETURN;
63
Travis Geiselbrecht9045c062009-06-28 11:20:09 -070064#if PLATFORM_HAS_DYNAMIC_TIMER
65/* preemption timer */
66static timer_t preempt_timer;
67#endif
68
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -070069/* run queue manipulation */
70static void insert_in_run_queue_head(thread_t *t)
71{
72#if THREAD_CHECKS
73 ASSERT(t->magic == THREAD_MAGIC);
74 ASSERT(t->state == THREAD_READY);
75 ASSERT(!list_in_list(&t->queue_node));
76 ASSERT(in_critical_section());
77#endif
78
79 list_add_head(&run_queue[t->priority], &t->queue_node);
80 run_queue_bitmap |= (1<<t->priority);
81}
82
83static void insert_in_run_queue_tail(thread_t *t)
84{
85#if THREAD_CHECKS
86 ASSERT(t->magic == THREAD_MAGIC);
87 ASSERT(t->state == THREAD_READY);
88 ASSERT(!list_in_list(&t->queue_node));
89 ASSERT(in_critical_section());
90#endif
91
92 list_add_tail(&run_queue[t->priority], &t->queue_node);
93 run_queue_bitmap |= (1<<t->priority);
94}
95
96static void init_thread_struct(thread_t *t, const char *name)
97{
98 memset(t, 0, sizeof(thread_t));
99 t->magic = THREAD_MAGIC;
100 strlcpy(t->name, name, sizeof(t->name));
101}
102
103thread_t *thread_create(const char *name, thread_start_routine entry, void *arg, int priority, size_t stack_size)
104{
105 thread_t *t;
106
107 t = malloc(sizeof(thread_t));
108 if (!t)
109 return NULL;
110
111 init_thread_struct(t, name);
112
113 t->entry = entry;
114 t->arg = arg;
115 t->priority = priority;
116 t->saved_critical_section_count = 1; /* we always start inside a critical section */
117 t->state = THREAD_SUSPENDED;
118 t->blocking_wait_queue = NULL;
119 t->wait_queue_block_ret = NO_ERROR;
120
121 /* create the stack */
122 t->stack = malloc(stack_size);
123 if (!t->stack) {
124 free(t);
125 return NULL;
126 }
127
128 t->stack_size = stack_size;
129
travis geiselbrechtc60a2e62008-12-22 23:46:26 +0000130 /* inheirit thread local storage from the parent */
131 int i;
132 for (i=0; i < MAX_TLS_ENTRY; i++)
133 t->tls[i] = current_thread->tls[i];
134
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700135 /* set up the initial stack frame */
136 arch_thread_initialize(t);
137
138 /* add it to the global thread list */
139 enter_critical_section();
140 list_add_head(&thread_list, &t->thread_list_node);
141 exit_critical_section();
142
143 return t;
144}
145
146status_t thread_resume(thread_t *t)
147{
148#if THREAD_CHECKS
149 ASSERT(t->magic == THREAD_MAGIC);
150 ASSERT(t->state != THREAD_DEATH);
151#endif
152
153 if (t->state == THREAD_READY || t->state == THREAD_RUNNING)
154 return ERR_NOT_SUSPENDED;
155
156 enter_critical_section();
157 t->state = THREAD_READY;
158 insert_in_run_queue_head(t);
159 thread_yield();
160 exit_critical_section();
161
162 return NO_ERROR;
163}
164
165static void thread_cleanup_dpc(void *thread)
166{
167 thread_t *t = (thread_t *)thread;
168
Travis Geiselbrechteb946052008-09-07 22:32:49 -0700169// dprintf(SPEW, "thread_cleanup_dpc: thread %p (%s)\n", t, t->name);
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700170
171#if THREAD_CHECKS
172 ASSERT(t->state == THREAD_DEATH);
173 ASSERT(t->blocking_wait_queue == NULL);
174 ASSERT(!list_in_list(&t->queue_node));
175#endif
176
177 /* remove it from the master thread list */
178 enter_critical_section();
179 list_delete(&t->thread_list_node);
180 exit_critical_section();
181
182 /* free its stack and the thread structure itself */
183 if (t->stack)
184 free(t->stack);
185
186 free(t);
187}
188
189void thread_exit(int retcode)
190{
191#if THREAD_CHECKS
192 ASSERT(current_thread->magic == THREAD_MAGIC);
193 ASSERT(current_thread->state == THREAD_RUNNING);
194#endif
195
196// dprintf("thread_exit: current %p\n", current_thread);
197
198 enter_critical_section();
199
200 /* enter the dead state */
201 current_thread->state = THREAD_DEATH;
202 current_thread->retcode = retcode;
203
204 /* schedule a dpc to clean ourselves up */
205 dpc_queue(thread_cleanup_dpc, (void *)current_thread, DPC_FLAG_NORESCHED);
206
207 /* reschedule */
208 thread_resched();
209
210 panic("somehow fell through thread_exit()\n");
211}
212
213static void idle_thread_routine(void)
214{
215 for(;;)
216 arch_idle();
217}
218
219/*
220 * Internal reschedule routine. The current thread needs to already be in whatever
221 * state and queues it needs to be in. This routine simply picks the next thread and
222 * switches to it.
223 */
224void thread_resched(void)
225{
226 thread_t *oldthread;
227 thread_t *newthread;
228
229// dprintf("thread_resched: current %p: ", current_thread);
230// dump_thread(current_thread);
231
232#if THREAD_CHECKS
233 ASSERT(in_critical_section());
234#endif
235
236#if THREAD_STATS
237 thread_stats.reschedules++;
238#endif
239
240 oldthread = current_thread;
241
242 // at the moment, can't deal with more than 32 priority levels
243 ASSERT(NUM_PRIORITIES <= 32);
244
245 // should at least find the idle thread
246#if THREAD_CHECKS
247 ASSERT(run_queue_bitmap != 0);
248#endif
249
250 int next_queue = HIGHEST_PRIORITY - __builtin_clz(run_queue_bitmap) - (32 - NUM_PRIORITIES);
Travis Geiselbrechteb946052008-09-07 22:32:49 -0700251 //dprintf(SPEW, "bitmap 0x%x, next %d\n", run_queue_bitmap, next_queue);
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700252
253 newthread = list_remove_head_type(&run_queue[next_queue], thread_t, queue_node);
254
255#if THREAD_CHECKS
256 ASSERT(newthread);
257#endif
258
259 if (list_is_empty(&run_queue[next_queue]))
260 run_queue_bitmap &= ~(1<<next_queue);
261
262#if 0
263 // XXX make this more efficient
264 newthread = NULL;
265 for (i=HIGHEST_PRIORITY; i >= LOWEST_PRIORITY; i--) {
266 newthread = list_remove_head_type(&run_queue[i], thread_t, queue_node);
267 if (newthread)
268 break;
269 }
270#endif
271
272// dprintf("newthread: ");
273// dump_thread(newthread);
274
275 newthread->state = THREAD_RUNNING;
276
277 if (newthread == oldthread)
278 return;
279
280 /* set up quantum for the new thread if it was consumed */
281 if (newthread->remaining_quantum <= 0) {
282 newthread->remaining_quantum = 5; // XXX make this smarter
283 }
284
285#if THREAD_STATS
286 thread_stats.context_switches++;
287
288 if (oldthread == idle_thread) {
289 bigtime_t now = current_time_hires();
290 thread_stats.idle_time += now - thread_stats.last_idle_timestamp;
291 }
292 if (newthread == idle_thread) {
293 thread_stats.last_idle_timestamp = current_time_hires();
294 }
295#endif
296
297#if THREAD_CHECKS
298 ASSERT(critical_section_count > 0);
299 ASSERT(newthread->saved_critical_section_count > 0);
300#endif
301
Travis Geiselbrecht9045c062009-06-28 11:20:09 -0700302#if PLATFORM_HAS_DYNAMIC_TIMER
303 /* if we're switching from idle to a real thread, set up a periodic
304 * timer to run our preemption tick.
305 */
306 if (oldthread == idle_thread) {
307 timer_set_periodic(&preempt_timer, 10, (timer_callback)thread_timer_tick, NULL);
308 } else if (newthread == idle_thread) {
309 timer_cancel(&preempt_timer);
310 }
311#endif
312
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700313 /* do the switch */
314 oldthread->saved_critical_section_count = critical_section_count;
315 current_thread = newthread;
316 critical_section_count = newthread->saved_critical_section_count;
317 arch_context_switch(oldthread, newthread);
318}
319
320void thread_yield(void)
321{
322#if THREAD_CHECKS
323 ASSERT(current_thread->magic == THREAD_MAGIC);
324 ASSERT(current_thread->state == THREAD_RUNNING);
325#endif
326
327 enter_critical_section();
328
329#if THREAD_STATS
330 thread_stats.yields++;
331#endif
332
333 /* we are yielding the cpu, so stick ourselves into the tail of the run queue and reschedule */
334 current_thread->state = THREAD_READY;
335 current_thread->remaining_quantum = 0;
336 insert_in_run_queue_tail(current_thread);
337 thread_resched();
338
339 exit_critical_section();
340}
341
342void thread_preempt(void)
343{
344#if THREAD_CHECKS
345 ASSERT(current_thread->magic == THREAD_MAGIC);
346 ASSERT(current_thread->state == THREAD_RUNNING);
347#endif
348
349 enter_critical_section();
350
351#if THREAD_STATS
352 if (current_thread != idle_thread)
353 thread_stats.preempts++; /* only track when a meaningful preempt happens */
354#endif
355
356 /* we are being preempted, so we get to go back into the front of the run queue if we have quantum left */
357 current_thread->state = THREAD_READY;
358 if (current_thread->remaining_quantum > 0)
359 insert_in_run_queue_head(current_thread);
360 else
361 insert_in_run_queue_tail(current_thread); /* if we're out of quantum, go to the tail of the queue */
362 thread_resched();
363
364 exit_critical_section();
365}
366
367void thread_block(void)
368{
369#if THREAD_CHECKS
370 ASSERT(current_thread->magic == THREAD_MAGIC);
371 ASSERT(current_thread->state == THREAD_BLOCKED);
372#endif
373
374 enter_critical_section();
375
376 /* we are blocking on something. the blocking code should have already stuck us on a queue */
377 thread_resched();
378
379 exit_critical_section();
380}
381
382enum handler_return thread_timer_tick(void)
383{
384 if (current_thread == idle_thread)
385 return INT_NO_RESCHEDULE;
386
387 current_thread->remaining_quantum--;
388 if (current_thread->remaining_quantum <= 0)
389 return INT_RESCHEDULE;
390 else
391 return INT_NO_RESCHEDULE;
392}
393
394/* timer callback to wake up a sleeping thread */
395static enum handler_return thread_sleep_handler(timer_t *timer, time_t now, void *arg)
396{
397 thread_t *t = (thread_t *)arg;
398
399#if THREAD_CHECKS
400 ASSERT(t->magic == THREAD_MAGIC);
401 ASSERT(t->state == THREAD_SLEEPING);
402#endif
403
404 t->state = THREAD_READY;
405 insert_in_run_queue_head(t);
406
407 return INT_RESCHEDULE;
408}
409
Travis Geiselbrecht9045c062009-06-28 11:20:09 -0700410/* Put thread to sleep; delay specified in ms */
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700411void thread_sleep(time_t delay)
412{
413 timer_t timer;
414
415#if THREAD_CHECKS
416 ASSERT(current_thread->magic == THREAD_MAGIC);
417 ASSERT(current_thread->state == THREAD_RUNNING);
418#endif
419
420 timer_initialize(&timer);
421
422 enter_critical_section();
423 timer_set_oneshot(&timer, delay, thread_sleep_handler, (void *)current_thread);
424 current_thread->state = THREAD_SLEEPING;
425 thread_resched();
426 exit_critical_section();
427}
428
travis geiselbrecht858b1ad2008-12-23 21:55:41 +0000429void thread_init_early(void)
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700430{
431 int i;
432
433 /* initialize the run queues */
434 for (i=0; i < NUM_PRIORITIES; i++)
435 list_initialize(&run_queue[i]);
436
437 /* initialize the thread list */
438 list_initialize(&thread_list);
439
440 /* create a thread to cover the current running state */
441 thread_t *t = &bootstrap_thread;
442 init_thread_struct(t, "bootstrap");
443
444 /* half construct this thread, since we're already running */
445 t->priority = HIGHEST_PRIORITY;
446 t->state = THREAD_RUNNING;
447 t->saved_critical_section_count = 1;
448 list_add_head(&thread_list, &t->thread_list_node);
449 current_thread = t;
450}
451
travis geiselbrecht858b1ad2008-12-23 21:55:41 +0000452void thread_init(void)
453{
Travis Geiselbrecht9045c062009-06-28 11:20:09 -0700454#if PLATFORM_HAS_DYNAMIC_TIMER
455 timer_initialize(&preempt_timer);
456#endif
travis geiselbrecht858b1ad2008-12-23 21:55:41 +0000457}
458
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700459void thread_set_name(const char *name)
460{
461 strlcpy(current_thread->name, name, sizeof(current_thread->name));
462}
463
464void thread_set_priority(int priority)
465{
466 if (priority < LOWEST_PRIORITY)
467 priority = LOWEST_PRIORITY;
468 if (priority > HIGHEST_PRIORITY)
469 priority = HIGHEST_PRIORITY;
470 current_thread->priority = priority;
471}
472
473void thread_become_idle(void)
474{
475 thread_set_name("idle");
476 thread_set_priority(IDLE_PRIORITY);
477 idle_thread = current_thread;
478 idle_thread_routine();
479}
480
481void dump_thread(thread_t *t)
482{
Travis Geiselbrechteb946052008-09-07 22:32:49 -0700483 dprintf(INFO, "dump_thread: t %p (%s)\n", t, t->name);
484 dprintf(INFO, "\tstate %d, priority %d, remaining quantum %d, critical section %d\n", t->state, t->priority, t->remaining_quantum, t->saved_critical_section_count);
485 dprintf(INFO, "\tstack %p, stack_size %zd\n", t->stack, t->stack_size);
486 dprintf(INFO, "\tentry %p, arg %p\n", t->entry, t->arg);
487 dprintf(INFO, "\twait queue %p, wait queue ret %d\n", t->blocking_wait_queue, t->wait_queue_block_ret);
travis geiselbrechtc60a2e62008-12-22 23:46:26 +0000488 dprintf(INFO, "\ttls:");
489 int i;
490 for (i=0; i < MAX_TLS_ENTRY; i++) {
491 dprintf(INFO, " 0x%x", t->tls[i]);
492 }
493 dprintf(INFO, "\n");
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700494}
495
496void dump_all_threads(void)
497{
498 thread_t *t;
499
500 enter_critical_section();
501 list_for_every_entry(&thread_list, t, thread_t, thread_list_node) {
502 dump_thread(t);
503 }
504 exit_critical_section();
505}
506
507/* wait queue */
508void wait_queue_init(wait_queue_t *wait)
509{
510 wait->magic = WAIT_QUEUE_MAGIC;
511 list_initialize(&wait->list);
512 wait->count = 0;
513}
514
515static enum handler_return wait_queue_timeout_handler(timer_t *timer, time_t now, void *arg)
516{
517 thread_t *thread = (thread_t *)arg;
518
519#if THREAD_CHECKS
520 ASSERT(thread->magic == THREAD_MAGIC);
521#endif
522
523 if (thread_unblock_from_wait_queue(thread, false, ERR_TIMED_OUT) >= NO_ERROR)
524 return INT_RESCHEDULE;
525
526 return INT_NO_RESCHEDULE;
527}
528
529status_t wait_queue_block(wait_queue_t *wait, time_t timeout)
530{
531 timer_t timer;
532
533#if THREAD_CHECKS
534 ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
535 ASSERT(current_thread->state == THREAD_RUNNING);
536 ASSERT(in_critical_section());
537#endif
538
539 if (timeout == 0)
540 return ERR_TIMED_OUT;
541
542 list_add_tail(&wait->list, &current_thread->queue_node);
543 wait->count++;
544 current_thread->state = THREAD_BLOCKED;
545 current_thread->blocking_wait_queue = wait;
546 current_thread->wait_queue_block_ret = NO_ERROR;
547
548 /* if the timeout is nonzero or noninfinite, set a callback to yank us out of the queue */
549 if (timeout != INFINITE_TIME) {
550 timer_initialize(&timer);
551 timer_set_oneshot(&timer, timeout, wait_queue_timeout_handler, (void *)current_thread);
552 }
553
554 thread_block();
555
556 /* we don't really know if the timer fired or not, so it's better safe to try to cancel it */
557 if (timeout != INFINITE_TIME) {
558 timer_cancel(&timer);
559 }
560
561 return current_thread->wait_queue_block_ret;
562}
563
564int wait_queue_wake_one(wait_queue_t *wait, bool reschedule, status_t wait_queue_error)
565{
566 thread_t *t;
567 int ret = 0;
568
569#if THREAD_CHECKS
570 ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
571 ASSERT(in_critical_section());
572#endif
573
574 t = list_remove_head_type(&wait->list, thread_t, queue_node);
575 if (t) {
576 wait->count--;
577#if THREAD_CHECKS
578 ASSERT(t->state == THREAD_BLOCKED);
579#endif
580 t->state = THREAD_READY;
581 t->wait_queue_block_ret = wait_queue_error;
582 t->blocking_wait_queue = NULL;
583
584 /* if we're instructed to reschedule, stick the current thread on the head
585 * of the run queue first, so that the newly awakened thread gets a chance to run
586 * before the current one, but the current one doesn't get unnecessarilly punished.
587 */
588 if (reschedule) {
589 current_thread->state = THREAD_READY;
590 insert_in_run_queue_head(current_thread);
591 }
592 insert_in_run_queue_head(t);
593 if (reschedule)
594 thread_resched();
595 ret = 1;
596 }
597
598 return ret;
599}
600
601int wait_queue_wake_all(wait_queue_t *wait, bool reschedule, status_t wait_queue_error)
602{
603 thread_t *t;
604 int ret = 0;
605
606#if THREAD_CHECKS
607 ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
608 ASSERT(in_critical_section());
609#endif
610
611 if (reschedule && wait->count > 0) {
612 /* if we're instructed to reschedule, stick the current thread on the head
613 * of the run queue first, so that the newly awakened threads get a chance to run
614 * before the current one, but the current one doesn't get unnecessarilly punished.
615 */
616 current_thread->state = THREAD_READY;
617 insert_in_run_queue_head(current_thread);
618 }
619
620 /* pop all the threads off the wait queue into the run queue */
621 while ((t = list_remove_head_type(&wait->list, thread_t, queue_node))) {
622 wait->count--;
623#if THREAD_CHECKS
624 ASSERT(t->state == THREAD_BLOCKED);
625#endif
626 t->state = THREAD_READY;
627 t->wait_queue_block_ret = wait_queue_error;
628 t->blocking_wait_queue = NULL;
629
630 insert_in_run_queue_head(t);
631 ret++;
632 }
633
634#if THREAD_CHECKS
635 ASSERT(wait->count == 0);
636#endif
637
638 if (reschedule && ret > 0)
639 thread_resched();
640
641 return ret;
642}
643
644void wait_queue_destroy(wait_queue_t *wait, bool reschedule)
645{
646#if THREAD_CHECKS
647 ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
648 ASSERT(in_critical_section());
649#endif
650 wait_queue_wake_all(wait, reschedule, ERR_OBJECT_DESTROYED);
651 wait->magic = 0;
652}
653
654status_t thread_unblock_from_wait_queue(thread_t *t, bool reschedule, status_t wait_queue_error)
655{
656 enter_critical_section();
657
658#if THREAD_CHECKS
659 ASSERT(t->magic == THREAD_MAGIC);
660#endif
661
662 if (t->state != THREAD_BLOCKED)
663 return ERR_NOT_BLOCKED;
664
665#if THREAD_CHECKS
666 ASSERT(t->blocking_wait_queue != NULL);
667 ASSERT(t->blocking_wait_queue->magic == WAIT_QUEUE_MAGIC);
668 ASSERT(list_in_list(&t->queue_node));
669#endif
670
671 list_delete(&t->queue_node);
672 t->blocking_wait_queue->count--;
673 t->blocking_wait_queue = NULL;
674 t->state = THREAD_READY;
675 t->wait_queue_block_ret = wait_queue_error;
676 insert_in_run_queue_head(t);
677
678 if (reschedule)
679 thread_resched();
680
681 exit_critical_section();
682
683 return NO_ERROR;
684}
685
686