Travis Geiselbrecht | 1d0df69 | 2008-09-01 02:26:09 -0700 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright (c) 2008 Travis Geiselbrecht |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining |
| 5 | * a copy of this software and associated documentation files |
| 6 | * (the "Software"), to deal in the Software without restriction, |
| 7 | * including without limitation the rights to use, copy, modify, merge, |
| 8 | * publish, distribute, sublicense, and/or sell copies of the Software, |
| 9 | * and to permit persons to whom the Software is furnished to do so, |
| 10 | * subject to the following conditions: |
| 11 | * |
| 12 | * The above copyright notice and this permission notice shall be |
| 13 | * included in all copies or substantial portions of the Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 16 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 17 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. |
| 18 | * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY |
| 19 | * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
| 20 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
| 21 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 22 | */ |
| 23 | #include <debug.h> |
| 24 | #include <list.h> |
| 25 | #include <malloc.h> |
| 26 | #include <string.h> |
| 27 | #include <err.h> |
| 28 | #include <kernel/thread.h> |
| 29 | #include <kernel/timer.h> |
| 30 | #include <kernel/dpc.h> |
| 31 | #include <platform.h> |
| 32 | |
| 33 | #if DEBUGLEVEL > 1 |
| 34 | #define THREAD_CHECKS 1 |
| 35 | #endif |
| 36 | |
| 37 | #if THREAD_STATS |
| 38 | struct thread_stats thread_stats; |
| 39 | #endif |
| 40 | |
| 41 | /* global thread list */ |
| 42 | static struct list_node thread_list; |
| 43 | |
| 44 | /* the current thread */ |
| 45 | thread_t *current_thread; |
| 46 | |
| 47 | /* the global critical section count */ |
| 48 | int critical_section_count = 1; |
| 49 | |
| 50 | /* the run queue */ |
| 51 | static struct list_node run_queue[NUM_PRIORITIES]; |
| 52 | static uint32_t run_queue_bitmap; |
| 53 | |
| 54 | /* the bootstrap thread (statically allocated) */ |
| 55 | static thread_t bootstrap_thread; |
| 56 | |
| 57 | /* the idle thread */ |
| 58 | thread_t *idle_thread; |
| 59 | |
| 60 | /* local routines */ |
| 61 | static void thread_resched(void); |
| 62 | static void idle_thread_routine(void) __NO_RETURN; |
| 63 | |
| 64 | /* run queue manipulation */ |
| 65 | static void insert_in_run_queue_head(thread_t *t) |
| 66 | { |
| 67 | #if THREAD_CHECKS |
| 68 | ASSERT(t->magic == THREAD_MAGIC); |
| 69 | ASSERT(t->state == THREAD_READY); |
| 70 | ASSERT(!list_in_list(&t->queue_node)); |
| 71 | ASSERT(in_critical_section()); |
| 72 | #endif |
| 73 | |
| 74 | list_add_head(&run_queue[t->priority], &t->queue_node); |
| 75 | run_queue_bitmap |= (1<<t->priority); |
| 76 | } |
| 77 | |
| 78 | static void insert_in_run_queue_tail(thread_t *t) |
| 79 | { |
| 80 | #if THREAD_CHECKS |
| 81 | ASSERT(t->magic == THREAD_MAGIC); |
| 82 | ASSERT(t->state == THREAD_READY); |
| 83 | ASSERT(!list_in_list(&t->queue_node)); |
| 84 | ASSERT(in_critical_section()); |
| 85 | #endif |
| 86 | |
| 87 | list_add_tail(&run_queue[t->priority], &t->queue_node); |
| 88 | run_queue_bitmap |= (1<<t->priority); |
| 89 | } |
| 90 | |
| 91 | static void init_thread_struct(thread_t *t, const char *name) |
| 92 | { |
| 93 | memset(t, 0, sizeof(thread_t)); |
| 94 | t->magic = THREAD_MAGIC; |
| 95 | strlcpy(t->name, name, sizeof(t->name)); |
| 96 | } |
| 97 | |
| 98 | thread_t *thread_create(const char *name, thread_start_routine entry, void *arg, int priority, size_t stack_size) |
| 99 | { |
| 100 | thread_t *t; |
| 101 | |
| 102 | t = malloc(sizeof(thread_t)); |
| 103 | if (!t) |
| 104 | return NULL; |
| 105 | |
| 106 | init_thread_struct(t, name); |
| 107 | |
| 108 | t->entry = entry; |
| 109 | t->arg = arg; |
| 110 | t->priority = priority; |
| 111 | t->saved_critical_section_count = 1; /* we always start inside a critical section */ |
| 112 | t->state = THREAD_SUSPENDED; |
| 113 | t->blocking_wait_queue = NULL; |
| 114 | t->wait_queue_block_ret = NO_ERROR; |
| 115 | |
| 116 | /* create the stack */ |
| 117 | t->stack = malloc(stack_size); |
| 118 | if (!t->stack) { |
| 119 | free(t); |
| 120 | return NULL; |
| 121 | } |
| 122 | |
| 123 | t->stack_size = stack_size; |
| 124 | |
| 125 | /* set up the initial stack frame */ |
| 126 | arch_thread_initialize(t); |
| 127 | |
| 128 | /* add it to the global thread list */ |
| 129 | enter_critical_section(); |
| 130 | list_add_head(&thread_list, &t->thread_list_node); |
| 131 | exit_critical_section(); |
| 132 | |
| 133 | return t; |
| 134 | } |
| 135 | |
| 136 | status_t thread_resume(thread_t *t) |
| 137 | { |
| 138 | #if THREAD_CHECKS |
| 139 | ASSERT(t->magic == THREAD_MAGIC); |
| 140 | ASSERT(t->state != THREAD_DEATH); |
| 141 | #endif |
| 142 | |
| 143 | if (t->state == THREAD_READY || t->state == THREAD_RUNNING) |
| 144 | return ERR_NOT_SUSPENDED; |
| 145 | |
| 146 | enter_critical_section(); |
| 147 | t->state = THREAD_READY; |
| 148 | insert_in_run_queue_head(t); |
| 149 | thread_yield(); |
| 150 | exit_critical_section(); |
| 151 | |
| 152 | return NO_ERROR; |
| 153 | } |
| 154 | |
| 155 | static void thread_cleanup_dpc(void *thread) |
| 156 | { |
| 157 | thread_t *t = (thread_t *)thread; |
| 158 | |
| 159 | // dprintf("thread_cleanup_dpc: thread %p (%s)\n", t, t->name); |
| 160 | |
| 161 | #if THREAD_CHECKS |
| 162 | ASSERT(t->state == THREAD_DEATH); |
| 163 | ASSERT(t->blocking_wait_queue == NULL); |
| 164 | ASSERT(!list_in_list(&t->queue_node)); |
| 165 | #endif |
| 166 | |
| 167 | /* remove it from the master thread list */ |
| 168 | enter_critical_section(); |
| 169 | list_delete(&t->thread_list_node); |
| 170 | exit_critical_section(); |
| 171 | |
| 172 | /* free its stack and the thread structure itself */ |
| 173 | if (t->stack) |
| 174 | free(t->stack); |
| 175 | |
| 176 | free(t); |
| 177 | } |
| 178 | |
| 179 | void thread_exit(int retcode) |
| 180 | { |
| 181 | #if THREAD_CHECKS |
| 182 | ASSERT(current_thread->magic == THREAD_MAGIC); |
| 183 | ASSERT(current_thread->state == THREAD_RUNNING); |
| 184 | #endif |
| 185 | |
| 186 | // dprintf("thread_exit: current %p\n", current_thread); |
| 187 | |
| 188 | enter_critical_section(); |
| 189 | |
| 190 | /* enter the dead state */ |
| 191 | current_thread->state = THREAD_DEATH; |
| 192 | current_thread->retcode = retcode; |
| 193 | |
| 194 | /* schedule a dpc to clean ourselves up */ |
| 195 | dpc_queue(thread_cleanup_dpc, (void *)current_thread, DPC_FLAG_NORESCHED); |
| 196 | |
| 197 | /* reschedule */ |
| 198 | thread_resched(); |
| 199 | |
| 200 | panic("somehow fell through thread_exit()\n"); |
| 201 | } |
| 202 | |
| 203 | static void idle_thread_routine(void) |
| 204 | { |
| 205 | for(;;) |
| 206 | arch_idle(); |
| 207 | } |
| 208 | |
| 209 | /* |
| 210 | * Internal reschedule routine. The current thread needs to already be in whatever |
| 211 | * state and queues it needs to be in. This routine simply picks the next thread and |
| 212 | * switches to it. |
| 213 | */ |
| 214 | void thread_resched(void) |
| 215 | { |
| 216 | thread_t *oldthread; |
| 217 | thread_t *newthread; |
| 218 | |
| 219 | // dprintf("thread_resched: current %p: ", current_thread); |
| 220 | // dump_thread(current_thread); |
| 221 | |
| 222 | #if THREAD_CHECKS |
| 223 | ASSERT(in_critical_section()); |
| 224 | #endif |
| 225 | |
| 226 | #if THREAD_STATS |
| 227 | thread_stats.reschedules++; |
| 228 | #endif |
| 229 | |
| 230 | oldthread = current_thread; |
| 231 | |
| 232 | // at the moment, can't deal with more than 32 priority levels |
| 233 | ASSERT(NUM_PRIORITIES <= 32); |
| 234 | |
| 235 | // should at least find the idle thread |
| 236 | #if THREAD_CHECKS |
| 237 | ASSERT(run_queue_bitmap != 0); |
| 238 | #endif |
| 239 | |
| 240 | int next_queue = HIGHEST_PRIORITY - __builtin_clz(run_queue_bitmap) - (32 - NUM_PRIORITIES); |
| 241 | // dprintf("bitmap 0x%x, next %d\n", run_queue_bitmap, next_queue); |
| 242 | |
| 243 | newthread = list_remove_head_type(&run_queue[next_queue], thread_t, queue_node); |
| 244 | |
| 245 | #if THREAD_CHECKS |
| 246 | ASSERT(newthread); |
| 247 | #endif |
| 248 | |
| 249 | if (list_is_empty(&run_queue[next_queue])) |
| 250 | run_queue_bitmap &= ~(1<<next_queue); |
| 251 | |
| 252 | #if 0 |
| 253 | // XXX make this more efficient |
| 254 | newthread = NULL; |
| 255 | for (i=HIGHEST_PRIORITY; i >= LOWEST_PRIORITY; i--) { |
| 256 | newthread = list_remove_head_type(&run_queue[i], thread_t, queue_node); |
| 257 | if (newthread) |
| 258 | break; |
| 259 | } |
| 260 | #endif |
| 261 | |
| 262 | // dprintf("newthread: "); |
| 263 | // dump_thread(newthread); |
| 264 | |
| 265 | newthread->state = THREAD_RUNNING; |
| 266 | |
| 267 | if (newthread == oldthread) |
| 268 | return; |
| 269 | |
| 270 | /* set up quantum for the new thread if it was consumed */ |
| 271 | if (newthread->remaining_quantum <= 0) { |
| 272 | newthread->remaining_quantum = 5; // XXX make this smarter |
| 273 | } |
| 274 | |
| 275 | #if THREAD_STATS |
| 276 | thread_stats.context_switches++; |
| 277 | |
| 278 | if (oldthread == idle_thread) { |
| 279 | bigtime_t now = current_time_hires(); |
| 280 | thread_stats.idle_time += now - thread_stats.last_idle_timestamp; |
| 281 | } |
| 282 | if (newthread == idle_thread) { |
| 283 | thread_stats.last_idle_timestamp = current_time_hires(); |
| 284 | } |
| 285 | #endif |
| 286 | |
| 287 | #if THREAD_CHECKS |
| 288 | ASSERT(critical_section_count > 0); |
| 289 | ASSERT(newthread->saved_critical_section_count > 0); |
| 290 | #endif |
| 291 | |
| 292 | /* do the switch */ |
| 293 | oldthread->saved_critical_section_count = critical_section_count; |
| 294 | current_thread = newthread; |
| 295 | critical_section_count = newthread->saved_critical_section_count; |
| 296 | arch_context_switch(oldthread, newthread); |
| 297 | } |
| 298 | |
| 299 | void thread_yield(void) |
| 300 | { |
| 301 | #if THREAD_CHECKS |
| 302 | ASSERT(current_thread->magic == THREAD_MAGIC); |
| 303 | ASSERT(current_thread->state == THREAD_RUNNING); |
| 304 | #endif |
| 305 | |
| 306 | enter_critical_section(); |
| 307 | |
| 308 | #if THREAD_STATS |
| 309 | thread_stats.yields++; |
| 310 | #endif |
| 311 | |
| 312 | /* we are yielding the cpu, so stick ourselves into the tail of the run queue and reschedule */ |
| 313 | current_thread->state = THREAD_READY; |
| 314 | current_thread->remaining_quantum = 0; |
| 315 | insert_in_run_queue_tail(current_thread); |
| 316 | thread_resched(); |
| 317 | |
| 318 | exit_critical_section(); |
| 319 | } |
| 320 | |
| 321 | void thread_preempt(void) |
| 322 | { |
| 323 | #if THREAD_CHECKS |
| 324 | ASSERT(current_thread->magic == THREAD_MAGIC); |
| 325 | ASSERT(current_thread->state == THREAD_RUNNING); |
| 326 | #endif |
| 327 | |
| 328 | enter_critical_section(); |
| 329 | |
| 330 | #if THREAD_STATS |
| 331 | if (current_thread != idle_thread) |
| 332 | thread_stats.preempts++; /* only track when a meaningful preempt happens */ |
| 333 | #endif |
| 334 | |
| 335 | /* we are being preempted, so we get to go back into the front of the run queue if we have quantum left */ |
| 336 | current_thread->state = THREAD_READY; |
| 337 | if (current_thread->remaining_quantum > 0) |
| 338 | insert_in_run_queue_head(current_thread); |
| 339 | else |
| 340 | insert_in_run_queue_tail(current_thread); /* if we're out of quantum, go to the tail of the queue */ |
| 341 | thread_resched(); |
| 342 | |
| 343 | exit_critical_section(); |
| 344 | } |
| 345 | |
| 346 | void thread_block(void) |
| 347 | { |
| 348 | #if THREAD_CHECKS |
| 349 | ASSERT(current_thread->magic == THREAD_MAGIC); |
| 350 | ASSERT(current_thread->state == THREAD_BLOCKED); |
| 351 | #endif |
| 352 | |
| 353 | enter_critical_section(); |
| 354 | |
| 355 | /* we are blocking on something. the blocking code should have already stuck us on a queue */ |
| 356 | thread_resched(); |
| 357 | |
| 358 | exit_critical_section(); |
| 359 | } |
| 360 | |
| 361 | enum handler_return thread_timer_tick(void) |
| 362 | { |
| 363 | if (current_thread == idle_thread) |
| 364 | return INT_NO_RESCHEDULE; |
| 365 | |
| 366 | current_thread->remaining_quantum--; |
| 367 | if (current_thread->remaining_quantum <= 0) |
| 368 | return INT_RESCHEDULE; |
| 369 | else |
| 370 | return INT_NO_RESCHEDULE; |
| 371 | } |
| 372 | |
| 373 | /* timer callback to wake up a sleeping thread */ |
| 374 | static enum handler_return thread_sleep_handler(timer_t *timer, time_t now, void *arg) |
| 375 | { |
| 376 | thread_t *t = (thread_t *)arg; |
| 377 | |
| 378 | #if THREAD_CHECKS |
| 379 | ASSERT(t->magic == THREAD_MAGIC); |
| 380 | ASSERT(t->state == THREAD_SLEEPING); |
| 381 | #endif |
| 382 | |
| 383 | t->state = THREAD_READY; |
| 384 | insert_in_run_queue_head(t); |
| 385 | |
| 386 | return INT_RESCHEDULE; |
| 387 | } |
| 388 | |
| 389 | void thread_sleep(time_t delay) |
| 390 | { |
| 391 | timer_t timer; |
| 392 | |
| 393 | #if THREAD_CHECKS |
| 394 | ASSERT(current_thread->magic == THREAD_MAGIC); |
| 395 | ASSERT(current_thread->state == THREAD_RUNNING); |
| 396 | #endif |
| 397 | |
| 398 | timer_initialize(&timer); |
| 399 | |
| 400 | enter_critical_section(); |
| 401 | timer_set_oneshot(&timer, delay, thread_sleep_handler, (void *)current_thread); |
| 402 | current_thread->state = THREAD_SLEEPING; |
| 403 | thread_resched(); |
| 404 | exit_critical_section(); |
| 405 | } |
| 406 | |
| 407 | void thread_init(void) |
| 408 | { |
| 409 | int i; |
| 410 | |
| 411 | /* initialize the run queues */ |
| 412 | for (i=0; i < NUM_PRIORITIES; i++) |
| 413 | list_initialize(&run_queue[i]); |
| 414 | |
| 415 | /* initialize the thread list */ |
| 416 | list_initialize(&thread_list); |
| 417 | |
| 418 | /* create a thread to cover the current running state */ |
| 419 | thread_t *t = &bootstrap_thread; |
| 420 | init_thread_struct(t, "bootstrap"); |
| 421 | |
| 422 | /* half construct this thread, since we're already running */ |
| 423 | t->priority = HIGHEST_PRIORITY; |
| 424 | t->state = THREAD_RUNNING; |
| 425 | t->saved_critical_section_count = 1; |
| 426 | list_add_head(&thread_list, &t->thread_list_node); |
| 427 | current_thread = t; |
| 428 | } |
| 429 | |
| 430 | void thread_set_name(const char *name) |
| 431 | { |
| 432 | strlcpy(current_thread->name, name, sizeof(current_thread->name)); |
| 433 | } |
| 434 | |
| 435 | void thread_set_priority(int priority) |
| 436 | { |
| 437 | if (priority < LOWEST_PRIORITY) |
| 438 | priority = LOWEST_PRIORITY; |
| 439 | if (priority > HIGHEST_PRIORITY) |
| 440 | priority = HIGHEST_PRIORITY; |
| 441 | current_thread->priority = priority; |
| 442 | } |
| 443 | |
| 444 | void thread_become_idle(void) |
| 445 | { |
| 446 | thread_set_name("idle"); |
| 447 | thread_set_priority(IDLE_PRIORITY); |
| 448 | idle_thread = current_thread; |
| 449 | idle_thread_routine(); |
| 450 | } |
| 451 | |
| 452 | void dump_thread(thread_t *t) |
| 453 | { |
| 454 | dprintf("dump_thread: t %p (%s)\n", t, t->name); |
| 455 | dprintf("\tstate %d, priority %d, remaining quantum %d, critical section %d\n", t->state, t->priority, t->remaining_quantum, t->saved_critical_section_count); |
| 456 | dprintf("\tstack %p, stack_size %zd\n", t->stack, t->stack_size); |
| 457 | dprintf("\tentry %p, arg %p\n", t->entry, t->arg); |
| 458 | dprintf("\twait queue %p, wait queue ret %d\n", t->blocking_wait_queue, t->wait_queue_block_ret); |
| 459 | } |
| 460 | |
| 461 | void dump_all_threads(void) |
| 462 | { |
| 463 | thread_t *t; |
| 464 | |
| 465 | enter_critical_section(); |
| 466 | list_for_every_entry(&thread_list, t, thread_t, thread_list_node) { |
| 467 | dump_thread(t); |
| 468 | } |
| 469 | exit_critical_section(); |
| 470 | } |
| 471 | |
| 472 | /* wait queue */ |
| 473 | void wait_queue_init(wait_queue_t *wait) |
| 474 | { |
| 475 | wait->magic = WAIT_QUEUE_MAGIC; |
| 476 | list_initialize(&wait->list); |
| 477 | wait->count = 0; |
| 478 | } |
| 479 | |
| 480 | static enum handler_return wait_queue_timeout_handler(timer_t *timer, time_t now, void *arg) |
| 481 | { |
| 482 | thread_t *thread = (thread_t *)arg; |
| 483 | |
| 484 | #if THREAD_CHECKS |
| 485 | ASSERT(thread->magic == THREAD_MAGIC); |
| 486 | #endif |
| 487 | |
| 488 | if (thread_unblock_from_wait_queue(thread, false, ERR_TIMED_OUT) >= NO_ERROR) |
| 489 | return INT_RESCHEDULE; |
| 490 | |
| 491 | return INT_NO_RESCHEDULE; |
| 492 | } |
| 493 | |
| 494 | status_t wait_queue_block(wait_queue_t *wait, time_t timeout) |
| 495 | { |
| 496 | timer_t timer; |
| 497 | |
| 498 | #if THREAD_CHECKS |
| 499 | ASSERT(wait->magic == WAIT_QUEUE_MAGIC); |
| 500 | ASSERT(current_thread->state == THREAD_RUNNING); |
| 501 | ASSERT(in_critical_section()); |
| 502 | #endif |
| 503 | |
| 504 | if (timeout == 0) |
| 505 | return ERR_TIMED_OUT; |
| 506 | |
| 507 | list_add_tail(&wait->list, ¤t_thread->queue_node); |
| 508 | wait->count++; |
| 509 | current_thread->state = THREAD_BLOCKED; |
| 510 | current_thread->blocking_wait_queue = wait; |
| 511 | current_thread->wait_queue_block_ret = NO_ERROR; |
| 512 | |
| 513 | /* if the timeout is nonzero or noninfinite, set a callback to yank us out of the queue */ |
| 514 | if (timeout != INFINITE_TIME) { |
| 515 | timer_initialize(&timer); |
| 516 | timer_set_oneshot(&timer, timeout, wait_queue_timeout_handler, (void *)current_thread); |
| 517 | } |
| 518 | |
| 519 | thread_block(); |
| 520 | |
| 521 | /* we don't really know if the timer fired or not, so it's better safe to try to cancel it */ |
| 522 | if (timeout != INFINITE_TIME) { |
| 523 | timer_cancel(&timer); |
| 524 | } |
| 525 | |
| 526 | return current_thread->wait_queue_block_ret; |
| 527 | } |
| 528 | |
| 529 | int wait_queue_wake_one(wait_queue_t *wait, bool reschedule, status_t wait_queue_error) |
| 530 | { |
| 531 | thread_t *t; |
| 532 | int ret = 0; |
| 533 | |
| 534 | #if THREAD_CHECKS |
| 535 | ASSERT(wait->magic == WAIT_QUEUE_MAGIC); |
| 536 | ASSERT(in_critical_section()); |
| 537 | #endif |
| 538 | |
| 539 | t = list_remove_head_type(&wait->list, thread_t, queue_node); |
| 540 | if (t) { |
| 541 | wait->count--; |
| 542 | #if THREAD_CHECKS |
| 543 | ASSERT(t->state == THREAD_BLOCKED); |
| 544 | #endif |
| 545 | t->state = THREAD_READY; |
| 546 | t->wait_queue_block_ret = wait_queue_error; |
| 547 | t->blocking_wait_queue = NULL; |
| 548 | |
| 549 | /* if we're instructed to reschedule, stick the current thread on the head |
| 550 | * of the run queue first, so that the newly awakened thread gets a chance to run |
| 551 | * before the current one, but the current one doesn't get unnecessarilly punished. |
| 552 | */ |
| 553 | if (reschedule) { |
| 554 | current_thread->state = THREAD_READY; |
| 555 | insert_in_run_queue_head(current_thread); |
| 556 | } |
| 557 | insert_in_run_queue_head(t); |
| 558 | if (reschedule) |
| 559 | thread_resched(); |
| 560 | ret = 1; |
| 561 | } |
| 562 | |
| 563 | return ret; |
| 564 | } |
| 565 | |
| 566 | int wait_queue_wake_all(wait_queue_t *wait, bool reschedule, status_t wait_queue_error) |
| 567 | { |
| 568 | thread_t *t; |
| 569 | int ret = 0; |
| 570 | |
| 571 | #if THREAD_CHECKS |
| 572 | ASSERT(wait->magic == WAIT_QUEUE_MAGIC); |
| 573 | ASSERT(in_critical_section()); |
| 574 | #endif |
| 575 | |
| 576 | if (reschedule && wait->count > 0) { |
| 577 | /* if we're instructed to reschedule, stick the current thread on the head |
| 578 | * of the run queue first, so that the newly awakened threads get a chance to run |
| 579 | * before the current one, but the current one doesn't get unnecessarilly punished. |
| 580 | */ |
| 581 | current_thread->state = THREAD_READY; |
| 582 | insert_in_run_queue_head(current_thread); |
| 583 | } |
| 584 | |
| 585 | /* pop all the threads off the wait queue into the run queue */ |
| 586 | while ((t = list_remove_head_type(&wait->list, thread_t, queue_node))) { |
| 587 | wait->count--; |
| 588 | #if THREAD_CHECKS |
| 589 | ASSERT(t->state == THREAD_BLOCKED); |
| 590 | #endif |
| 591 | t->state = THREAD_READY; |
| 592 | t->wait_queue_block_ret = wait_queue_error; |
| 593 | t->blocking_wait_queue = NULL; |
| 594 | |
| 595 | insert_in_run_queue_head(t); |
| 596 | ret++; |
| 597 | } |
| 598 | |
| 599 | #if THREAD_CHECKS |
| 600 | ASSERT(wait->count == 0); |
| 601 | #endif |
| 602 | |
| 603 | if (reschedule && ret > 0) |
| 604 | thread_resched(); |
| 605 | |
| 606 | return ret; |
| 607 | } |
| 608 | |
| 609 | void wait_queue_destroy(wait_queue_t *wait, bool reschedule) |
| 610 | { |
| 611 | #if THREAD_CHECKS |
| 612 | ASSERT(wait->magic == WAIT_QUEUE_MAGIC); |
| 613 | ASSERT(in_critical_section()); |
| 614 | #endif |
| 615 | wait_queue_wake_all(wait, reschedule, ERR_OBJECT_DESTROYED); |
| 616 | wait->magic = 0; |
| 617 | } |
| 618 | |
| 619 | status_t thread_unblock_from_wait_queue(thread_t *t, bool reschedule, status_t wait_queue_error) |
| 620 | { |
| 621 | enter_critical_section(); |
| 622 | |
| 623 | #if THREAD_CHECKS |
| 624 | ASSERT(t->magic == THREAD_MAGIC); |
| 625 | #endif |
| 626 | |
| 627 | if (t->state != THREAD_BLOCKED) |
| 628 | return ERR_NOT_BLOCKED; |
| 629 | |
| 630 | #if THREAD_CHECKS |
| 631 | ASSERT(t->blocking_wait_queue != NULL); |
| 632 | ASSERT(t->blocking_wait_queue->magic == WAIT_QUEUE_MAGIC); |
| 633 | ASSERT(list_in_list(&t->queue_node)); |
| 634 | #endif |
| 635 | |
| 636 | list_delete(&t->queue_node); |
| 637 | t->blocking_wait_queue->count--; |
| 638 | t->blocking_wait_queue = NULL; |
| 639 | t->state = THREAD_READY; |
| 640 | t->wait_queue_block_ret = wait_queue_error; |
| 641 | insert_in_run_queue_head(t); |
| 642 | |
| 643 | if (reschedule) |
| 644 | thread_resched(); |
| 645 | |
| 646 | exit_critical_section(); |
| 647 | |
| 648 | return NO_ERROR; |
| 649 | } |
| 650 | |
| 651 | |