blob: eeccdd67d8f3ef57e966006fa28fe889c0de67d0 [file] [log] [blame]
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -07001/*
2 * Copyright (c) 2008 Travis Geiselbrecht
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23#include <debug.h>
24#include <list.h>
25#include <malloc.h>
26#include <string.h>
27#include <err.h>
28#include <kernel/thread.h>
29#include <kernel/timer.h>
30#include <kernel/dpc.h>
31#include <platform.h>
32
33#if DEBUGLEVEL > 1
34#define THREAD_CHECKS 1
35#endif
36
37#if THREAD_STATS
38struct thread_stats thread_stats;
39#endif
40
41/* global thread list */
42static struct list_node thread_list;
43
44/* the current thread */
45thread_t *current_thread;
46
47/* the global critical section count */
48int critical_section_count = 1;
49
50/* the run queue */
51static struct list_node run_queue[NUM_PRIORITIES];
52static uint32_t run_queue_bitmap;
53
54/* the bootstrap thread (statically allocated) */
55static thread_t bootstrap_thread;
56
57/* the idle thread */
58thread_t *idle_thread;
59
60/* local routines */
61static void thread_resched(void);
62static void idle_thread_routine(void) __NO_RETURN;
63
64/* run queue manipulation */
65static void insert_in_run_queue_head(thread_t *t)
66{
67#if THREAD_CHECKS
68 ASSERT(t->magic == THREAD_MAGIC);
69 ASSERT(t->state == THREAD_READY);
70 ASSERT(!list_in_list(&t->queue_node));
71 ASSERT(in_critical_section());
72#endif
73
74 list_add_head(&run_queue[t->priority], &t->queue_node);
75 run_queue_bitmap |= (1<<t->priority);
76}
77
78static void insert_in_run_queue_tail(thread_t *t)
79{
80#if THREAD_CHECKS
81 ASSERT(t->magic == THREAD_MAGIC);
82 ASSERT(t->state == THREAD_READY);
83 ASSERT(!list_in_list(&t->queue_node));
84 ASSERT(in_critical_section());
85#endif
86
87 list_add_tail(&run_queue[t->priority], &t->queue_node);
88 run_queue_bitmap |= (1<<t->priority);
89}
90
91static void init_thread_struct(thread_t *t, const char *name)
92{
93 memset(t, 0, sizeof(thread_t));
94 t->magic = THREAD_MAGIC;
95 strlcpy(t->name, name, sizeof(t->name));
96}
97
98thread_t *thread_create(const char *name, thread_start_routine entry, void *arg, int priority, size_t stack_size)
99{
100 thread_t *t;
101
102 t = malloc(sizeof(thread_t));
103 if (!t)
104 return NULL;
105
106 init_thread_struct(t, name);
107
108 t->entry = entry;
109 t->arg = arg;
110 t->priority = priority;
111 t->saved_critical_section_count = 1; /* we always start inside a critical section */
112 t->state = THREAD_SUSPENDED;
113 t->blocking_wait_queue = NULL;
114 t->wait_queue_block_ret = NO_ERROR;
115
116 /* create the stack */
117 t->stack = malloc(stack_size);
118 if (!t->stack) {
119 free(t);
120 return NULL;
121 }
122
123 t->stack_size = stack_size;
124
travis geiselbrechtc60a2e62008-12-22 23:46:26 +0000125 /* inheirit thread local storage from the parent */
126 int i;
127 for (i=0; i < MAX_TLS_ENTRY; i++)
128 t->tls[i] = current_thread->tls[i];
129
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700130 /* set up the initial stack frame */
131 arch_thread_initialize(t);
132
133 /* add it to the global thread list */
134 enter_critical_section();
135 list_add_head(&thread_list, &t->thread_list_node);
136 exit_critical_section();
137
138 return t;
139}
140
141status_t thread_resume(thread_t *t)
142{
143#if THREAD_CHECKS
144 ASSERT(t->magic == THREAD_MAGIC);
145 ASSERT(t->state != THREAD_DEATH);
146#endif
147
148 if (t->state == THREAD_READY || t->state == THREAD_RUNNING)
149 return ERR_NOT_SUSPENDED;
150
151 enter_critical_section();
152 t->state = THREAD_READY;
153 insert_in_run_queue_head(t);
154 thread_yield();
155 exit_critical_section();
156
157 return NO_ERROR;
158}
159
160static void thread_cleanup_dpc(void *thread)
161{
162 thread_t *t = (thread_t *)thread;
163
Travis Geiselbrechteb946052008-09-07 22:32:49 -0700164// dprintf(SPEW, "thread_cleanup_dpc: thread %p (%s)\n", t, t->name);
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700165
166#if THREAD_CHECKS
167 ASSERT(t->state == THREAD_DEATH);
168 ASSERT(t->blocking_wait_queue == NULL);
169 ASSERT(!list_in_list(&t->queue_node));
170#endif
171
172 /* remove it from the master thread list */
173 enter_critical_section();
174 list_delete(&t->thread_list_node);
175 exit_critical_section();
176
177 /* free its stack and the thread structure itself */
178 if (t->stack)
179 free(t->stack);
180
181 free(t);
182}
183
184void thread_exit(int retcode)
185{
186#if THREAD_CHECKS
187 ASSERT(current_thread->magic == THREAD_MAGIC);
188 ASSERT(current_thread->state == THREAD_RUNNING);
189#endif
190
191// dprintf("thread_exit: current %p\n", current_thread);
192
193 enter_critical_section();
194
195 /* enter the dead state */
196 current_thread->state = THREAD_DEATH;
197 current_thread->retcode = retcode;
198
199 /* schedule a dpc to clean ourselves up */
200 dpc_queue(thread_cleanup_dpc, (void *)current_thread, DPC_FLAG_NORESCHED);
201
202 /* reschedule */
203 thread_resched();
204
205 panic("somehow fell through thread_exit()\n");
206}
207
208static void idle_thread_routine(void)
209{
210 for(;;)
211 arch_idle();
212}
213
214/*
215 * Internal reschedule routine. The current thread needs to already be in whatever
216 * state and queues it needs to be in. This routine simply picks the next thread and
217 * switches to it.
218 */
219void thread_resched(void)
220{
221 thread_t *oldthread;
222 thread_t *newthread;
223
224// dprintf("thread_resched: current %p: ", current_thread);
225// dump_thread(current_thread);
226
227#if THREAD_CHECKS
228 ASSERT(in_critical_section());
229#endif
230
231#if THREAD_STATS
232 thread_stats.reschedules++;
233#endif
234
235 oldthread = current_thread;
236
237 // at the moment, can't deal with more than 32 priority levels
238 ASSERT(NUM_PRIORITIES <= 32);
239
240 // should at least find the idle thread
241#if THREAD_CHECKS
242 ASSERT(run_queue_bitmap != 0);
243#endif
244
245 int next_queue = HIGHEST_PRIORITY - __builtin_clz(run_queue_bitmap) - (32 - NUM_PRIORITIES);
Travis Geiselbrechteb946052008-09-07 22:32:49 -0700246 //dprintf(SPEW, "bitmap 0x%x, next %d\n", run_queue_bitmap, next_queue);
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700247
248 newthread = list_remove_head_type(&run_queue[next_queue], thread_t, queue_node);
249
250#if THREAD_CHECKS
251 ASSERT(newthread);
252#endif
253
254 if (list_is_empty(&run_queue[next_queue]))
255 run_queue_bitmap &= ~(1<<next_queue);
256
257#if 0
258 // XXX make this more efficient
259 newthread = NULL;
260 for (i=HIGHEST_PRIORITY; i >= LOWEST_PRIORITY; i--) {
261 newthread = list_remove_head_type(&run_queue[i], thread_t, queue_node);
262 if (newthread)
263 break;
264 }
265#endif
266
267// dprintf("newthread: ");
268// dump_thread(newthread);
269
270 newthread->state = THREAD_RUNNING;
271
272 if (newthread == oldthread)
273 return;
274
275 /* set up quantum for the new thread if it was consumed */
276 if (newthread->remaining_quantum <= 0) {
277 newthread->remaining_quantum = 5; // XXX make this smarter
278 }
279
280#if THREAD_STATS
281 thread_stats.context_switches++;
282
283 if (oldthread == idle_thread) {
284 bigtime_t now = current_time_hires();
285 thread_stats.idle_time += now - thread_stats.last_idle_timestamp;
286 }
287 if (newthread == idle_thread) {
288 thread_stats.last_idle_timestamp = current_time_hires();
289 }
290#endif
291
292#if THREAD_CHECKS
293 ASSERT(critical_section_count > 0);
294 ASSERT(newthread->saved_critical_section_count > 0);
295#endif
296
297 /* do the switch */
298 oldthread->saved_critical_section_count = critical_section_count;
299 current_thread = newthread;
300 critical_section_count = newthread->saved_critical_section_count;
301 arch_context_switch(oldthread, newthread);
302}
303
304void thread_yield(void)
305{
306#if THREAD_CHECKS
307 ASSERT(current_thread->magic == THREAD_MAGIC);
308 ASSERT(current_thread->state == THREAD_RUNNING);
309#endif
310
311 enter_critical_section();
312
313#if THREAD_STATS
314 thread_stats.yields++;
315#endif
316
317 /* we are yielding the cpu, so stick ourselves into the tail of the run queue and reschedule */
318 current_thread->state = THREAD_READY;
319 current_thread->remaining_quantum = 0;
320 insert_in_run_queue_tail(current_thread);
321 thread_resched();
322
323 exit_critical_section();
324}
325
326void thread_preempt(void)
327{
328#if THREAD_CHECKS
329 ASSERT(current_thread->magic == THREAD_MAGIC);
330 ASSERT(current_thread->state == THREAD_RUNNING);
331#endif
332
333 enter_critical_section();
334
335#if THREAD_STATS
336 if (current_thread != idle_thread)
337 thread_stats.preempts++; /* only track when a meaningful preempt happens */
338#endif
339
340 /* we are being preempted, so we get to go back into the front of the run queue if we have quantum left */
341 current_thread->state = THREAD_READY;
342 if (current_thread->remaining_quantum > 0)
343 insert_in_run_queue_head(current_thread);
344 else
345 insert_in_run_queue_tail(current_thread); /* if we're out of quantum, go to the tail of the queue */
346 thread_resched();
347
348 exit_critical_section();
349}
350
351void thread_block(void)
352{
353#if THREAD_CHECKS
354 ASSERT(current_thread->magic == THREAD_MAGIC);
355 ASSERT(current_thread->state == THREAD_BLOCKED);
356#endif
357
358 enter_critical_section();
359
360 /* we are blocking on something. the blocking code should have already stuck us on a queue */
361 thread_resched();
362
363 exit_critical_section();
364}
365
366enum handler_return thread_timer_tick(void)
367{
368 if (current_thread == idle_thread)
369 return INT_NO_RESCHEDULE;
370
371 current_thread->remaining_quantum--;
372 if (current_thread->remaining_quantum <= 0)
373 return INT_RESCHEDULE;
374 else
375 return INT_NO_RESCHEDULE;
376}
377
378/* timer callback to wake up a sleeping thread */
379static enum handler_return thread_sleep_handler(timer_t *timer, time_t now, void *arg)
380{
381 thread_t *t = (thread_t *)arg;
382
383#if THREAD_CHECKS
384 ASSERT(t->magic == THREAD_MAGIC);
385 ASSERT(t->state == THREAD_SLEEPING);
386#endif
387
388 t->state = THREAD_READY;
389 insert_in_run_queue_head(t);
390
391 return INT_RESCHEDULE;
392}
393
394void thread_sleep(time_t delay)
395{
396 timer_t timer;
397
398#if THREAD_CHECKS
399 ASSERT(current_thread->magic == THREAD_MAGIC);
400 ASSERT(current_thread->state == THREAD_RUNNING);
401#endif
402
403 timer_initialize(&timer);
404
405 enter_critical_section();
406 timer_set_oneshot(&timer, delay, thread_sleep_handler, (void *)current_thread);
407 current_thread->state = THREAD_SLEEPING;
408 thread_resched();
409 exit_critical_section();
410}
411
travis geiselbrecht858b1ad2008-12-23 21:55:41 +0000412void thread_init_early(void)
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700413{
414 int i;
415
416 /* initialize the run queues */
417 for (i=0; i < NUM_PRIORITIES; i++)
418 list_initialize(&run_queue[i]);
419
420 /* initialize the thread list */
421 list_initialize(&thread_list);
422
423 /* create a thread to cover the current running state */
424 thread_t *t = &bootstrap_thread;
425 init_thread_struct(t, "bootstrap");
426
427 /* half construct this thread, since we're already running */
428 t->priority = HIGHEST_PRIORITY;
429 t->state = THREAD_RUNNING;
430 t->saved_critical_section_count = 1;
431 list_add_head(&thread_list, &t->thread_list_node);
432 current_thread = t;
433}
434
travis geiselbrecht858b1ad2008-12-23 21:55:41 +0000435void thread_init(void)
436{
437}
438
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700439void thread_set_name(const char *name)
440{
441 strlcpy(current_thread->name, name, sizeof(current_thread->name));
442}
443
444void thread_set_priority(int priority)
445{
446 if (priority < LOWEST_PRIORITY)
447 priority = LOWEST_PRIORITY;
448 if (priority > HIGHEST_PRIORITY)
449 priority = HIGHEST_PRIORITY;
450 current_thread->priority = priority;
451}
452
453void thread_become_idle(void)
454{
455 thread_set_name("idle");
456 thread_set_priority(IDLE_PRIORITY);
457 idle_thread = current_thread;
458 idle_thread_routine();
459}
460
461void dump_thread(thread_t *t)
462{
Travis Geiselbrechteb946052008-09-07 22:32:49 -0700463 dprintf(INFO, "dump_thread: t %p (%s)\n", t, t->name);
464 dprintf(INFO, "\tstate %d, priority %d, remaining quantum %d, critical section %d\n", t->state, t->priority, t->remaining_quantum, t->saved_critical_section_count);
465 dprintf(INFO, "\tstack %p, stack_size %zd\n", t->stack, t->stack_size);
466 dprintf(INFO, "\tentry %p, arg %p\n", t->entry, t->arg);
467 dprintf(INFO, "\twait queue %p, wait queue ret %d\n", t->blocking_wait_queue, t->wait_queue_block_ret);
travis geiselbrechtc60a2e62008-12-22 23:46:26 +0000468 dprintf(INFO, "\ttls:");
469 int i;
470 for (i=0; i < MAX_TLS_ENTRY; i++) {
471 dprintf(INFO, " 0x%x", t->tls[i]);
472 }
473 dprintf(INFO, "\n");
Travis Geiselbrecht1d0df692008-09-01 02:26:09 -0700474}
475
476void dump_all_threads(void)
477{
478 thread_t *t;
479
480 enter_critical_section();
481 list_for_every_entry(&thread_list, t, thread_t, thread_list_node) {
482 dump_thread(t);
483 }
484 exit_critical_section();
485}
486
487/* wait queue */
488void wait_queue_init(wait_queue_t *wait)
489{
490 wait->magic = WAIT_QUEUE_MAGIC;
491 list_initialize(&wait->list);
492 wait->count = 0;
493}
494
495static enum handler_return wait_queue_timeout_handler(timer_t *timer, time_t now, void *arg)
496{
497 thread_t *thread = (thread_t *)arg;
498
499#if THREAD_CHECKS
500 ASSERT(thread->magic == THREAD_MAGIC);
501#endif
502
503 if (thread_unblock_from_wait_queue(thread, false, ERR_TIMED_OUT) >= NO_ERROR)
504 return INT_RESCHEDULE;
505
506 return INT_NO_RESCHEDULE;
507}
508
509status_t wait_queue_block(wait_queue_t *wait, time_t timeout)
510{
511 timer_t timer;
512
513#if THREAD_CHECKS
514 ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
515 ASSERT(current_thread->state == THREAD_RUNNING);
516 ASSERT(in_critical_section());
517#endif
518
519 if (timeout == 0)
520 return ERR_TIMED_OUT;
521
522 list_add_tail(&wait->list, &current_thread->queue_node);
523 wait->count++;
524 current_thread->state = THREAD_BLOCKED;
525 current_thread->blocking_wait_queue = wait;
526 current_thread->wait_queue_block_ret = NO_ERROR;
527
528 /* if the timeout is nonzero or noninfinite, set a callback to yank us out of the queue */
529 if (timeout != INFINITE_TIME) {
530 timer_initialize(&timer);
531 timer_set_oneshot(&timer, timeout, wait_queue_timeout_handler, (void *)current_thread);
532 }
533
534 thread_block();
535
536 /* we don't really know if the timer fired or not, so it's better safe to try to cancel it */
537 if (timeout != INFINITE_TIME) {
538 timer_cancel(&timer);
539 }
540
541 return current_thread->wait_queue_block_ret;
542}
543
544int wait_queue_wake_one(wait_queue_t *wait, bool reschedule, status_t wait_queue_error)
545{
546 thread_t *t;
547 int ret = 0;
548
549#if THREAD_CHECKS
550 ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
551 ASSERT(in_critical_section());
552#endif
553
554 t = list_remove_head_type(&wait->list, thread_t, queue_node);
555 if (t) {
556 wait->count--;
557#if THREAD_CHECKS
558 ASSERT(t->state == THREAD_BLOCKED);
559#endif
560 t->state = THREAD_READY;
561 t->wait_queue_block_ret = wait_queue_error;
562 t->blocking_wait_queue = NULL;
563
564 /* if we're instructed to reschedule, stick the current thread on the head
565 * of the run queue first, so that the newly awakened thread gets a chance to run
566 * before the current one, but the current one doesn't get unnecessarilly punished.
567 */
568 if (reschedule) {
569 current_thread->state = THREAD_READY;
570 insert_in_run_queue_head(current_thread);
571 }
572 insert_in_run_queue_head(t);
573 if (reschedule)
574 thread_resched();
575 ret = 1;
576 }
577
578 return ret;
579}
580
581int wait_queue_wake_all(wait_queue_t *wait, bool reschedule, status_t wait_queue_error)
582{
583 thread_t *t;
584 int ret = 0;
585
586#if THREAD_CHECKS
587 ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
588 ASSERT(in_critical_section());
589#endif
590
591 if (reschedule && wait->count > 0) {
592 /* if we're instructed to reschedule, stick the current thread on the head
593 * of the run queue first, so that the newly awakened threads get a chance to run
594 * before the current one, but the current one doesn't get unnecessarilly punished.
595 */
596 current_thread->state = THREAD_READY;
597 insert_in_run_queue_head(current_thread);
598 }
599
600 /* pop all the threads off the wait queue into the run queue */
601 while ((t = list_remove_head_type(&wait->list, thread_t, queue_node))) {
602 wait->count--;
603#if THREAD_CHECKS
604 ASSERT(t->state == THREAD_BLOCKED);
605#endif
606 t->state = THREAD_READY;
607 t->wait_queue_block_ret = wait_queue_error;
608 t->blocking_wait_queue = NULL;
609
610 insert_in_run_queue_head(t);
611 ret++;
612 }
613
614#if THREAD_CHECKS
615 ASSERT(wait->count == 0);
616#endif
617
618 if (reschedule && ret > 0)
619 thread_resched();
620
621 return ret;
622}
623
624void wait_queue_destroy(wait_queue_t *wait, bool reschedule)
625{
626#if THREAD_CHECKS
627 ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
628 ASSERT(in_critical_section());
629#endif
630 wait_queue_wake_all(wait, reschedule, ERR_OBJECT_DESTROYED);
631 wait->magic = 0;
632}
633
634status_t thread_unblock_from_wait_queue(thread_t *t, bool reschedule, status_t wait_queue_error)
635{
636 enter_critical_section();
637
638#if THREAD_CHECKS
639 ASSERT(t->magic == THREAD_MAGIC);
640#endif
641
642 if (t->state != THREAD_BLOCKED)
643 return ERR_NOT_BLOCKED;
644
645#if THREAD_CHECKS
646 ASSERT(t->blocking_wait_queue != NULL);
647 ASSERT(t->blocking_wait_queue->magic == WAIT_QUEUE_MAGIC);
648 ASSERT(list_in_list(&t->queue_node));
649#endif
650
651 list_delete(&t->queue_node);
652 t->blocking_wait_queue->count--;
653 t->blocking_wait_queue = NULL;
654 t->state = THREAD_READY;
655 t->wait_queue_block_ret = wait_queue_error;
656 insert_in_run_queue_head(t);
657
658 if (reschedule)
659 thread_resched();
660
661 exit_critical_section();
662
663 return NO_ERROR;
664}
665
666