blob: ff1697c4d2af135751067e6f5acb2c6b92daefdd [file] [log] [blame]
sewardje663cb92002-04-12 10:26:32 +00001
2/*--------------------------------------------------------------------*/
3/*--- A user-space pthreads implementation. vg_scheduler.c ---*/
4/*--------------------------------------------------------------------*/
5
6/*
7 This file is part of Valgrind, an x86 protected-mode emulator
8 designed for debugging and profiling binaries on x86-Unixes.
9
10 Copyright (C) 2000-2002 Julian Seward
11 jseward@acm.org
12 Julian_Seward@muraroa.demon.co.uk
13
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
29 The GNU General Public License is contained in the file LICENSE.
30*/
31
32#include "vg_include.h"
33#include "vg_constants.h"
34
35#include "valgrind.h" /* for VG_USERREQ__MAKE_NOACCESS and
36 VG_USERREQ__DO_LEAK_CHECK */
37
38/* BORKAGE as of 11 Apr 02
39
40Note! This implementation is so poor as to not be suitable for use by
41anyone at all!
42
43- properly save scheduler private state in signal delivery frames.
44
45- fd-poll optimisation (don't select with empty sets)
46
47- signals interrupting read/write and nanosleep, and take notice
48 of SA_RESTART or not
49
sewardj8937c812002-04-12 20:12:20 +000050- when a thread is done mark its stack as noaccess
51
52- make signal return and .fini call be detected via request mechanism
53
54 */
sewardje663cb92002-04-12 10:26:32 +000055
56
57/* ---------------------------------------------------------------------
58 Types and globals for the scheduler.
59 ------------------------------------------------------------------ */
60
61/* type ThreadId is defined in vg_include.h. */
62
63/* struct ThreadState is defined in vg_include.h. */
64
65/* Private globals. A statically allocated array of threads. */
66static ThreadState vg_threads[VG_N_THREADS];
67
68
69/* vg_oursignalhandler() might longjmp(). Here's the jmp_buf. */
70jmp_buf VG_(scheduler_jmpbuf);
71/* ... and if so, here's the signal which caused it to do so. */
72Int VG_(longjmpd_on_signal);
73
74
75/* Machinery to keep track of which threads are waiting on which
76 fds. */
77typedef
78 struct {
79 /* The thread which made the request. */
80 ThreadId tid;
81
82 /* The next two fields describe the request. */
83 /* File descriptor waited for. -1 means this slot is not in use */
84 Int fd;
85 /* The syscall number the fd is used in. */
86 Int syscall_no;
87
88 /* False => still waiting for select to tell us the fd is ready
89 to go. True => the fd is ready, but the results have not yet
90 been delivered back to the calling thread. Once the latter
91 happens, this entire record is marked as no longer in use, by
92 making the fd field be -1. */
93 Bool ready;
94 }
95 VgWaitedOnFd;
96
97static VgWaitedOnFd vg_waiting_fds[VG_N_WAITING_FDS];
98
99
100
101typedef
102 struct {
103 /* Is this slot in use, or free? */
104 Bool in_use;
105 /* If in_use, is this mutex held by some thread, or not? */
106 Bool held;
107 /* if held==True, owner indicates who by. */
108 ThreadId owner;
109 }
110 VgMutex;
111
112static VgMutex vg_mutexes[VG_N_MUTEXES];
113
114/* Forwards */
115static void do_nontrivial_clientreq ( ThreadId tid );
116
117
118/* ---------------------------------------------------------------------
119 Helper functions for the scheduler.
120 ------------------------------------------------------------------ */
121
122static
123void pp_sched_status ( void )
124{
125 Int i;
126 VG_(printf)("\nsched status:\n");
127 for (i = 0; i < VG_N_THREADS; i++) {
128 if (vg_threads[i].status == VgTs_Empty) continue;
129 VG_(printf)("tid %d: ", i);
130 switch (vg_threads[i].status) {
131 case VgTs_Runnable: VG_(printf)("Runnable\n"); break;
132 case VgTs_WaitFD: VG_(printf)("WaitFD\n"); break;
133 case VgTs_WaitJoiner: VG_(printf)("WaitJoiner(%d)\n",
134 vg_threads[i].joiner); break;
135 case VgTs_WaitJoinee: VG_(printf)("WaitJoinee\n"); break;
136 default: VG_(printf)("???"); break;
137 }
138 }
139 VG_(printf)("\n");
140}
141
142static
143void add_waiting_fd ( ThreadId tid, Int fd, Int syscall_no )
144{
145 Int i;
146
147 vg_assert(fd != -1); /* avoid total chaos */
148
149 for (i = 0; i < VG_N_WAITING_FDS; i++)
150 if (vg_waiting_fds[i].fd == -1)
151 break;
152
153 if (i == VG_N_WAITING_FDS)
154 VG_(panic)("add_waiting_fd: VG_N_WAITING_FDS is too low");
155 /*
156 VG_(printf)("add_waiting_fd: add (tid %d, fd %d) at slot %d\n",
157 tid, fd, i);
158 */
159 vg_waiting_fds[i].fd = fd;
160 vg_waiting_fds[i].tid = tid;
161 vg_waiting_fds[i].ready = False;
162 vg_waiting_fds[i].syscall_no = syscall_no;
163}
164
165
166
167static
168void print_sched_event ( ThreadId tid, Char* what )
169{
sewardj8937c812002-04-12 20:12:20 +0000170 VG_(message)(Vg_DebugMsg, "SCHED[%d]: %s", tid, what );
171}
172
173
174static
175void print_pthread_event ( ThreadId tid, Char* what )
176{
177 VG_(message)(Vg_DebugMsg, "PTHREAD[%d]: %s", tid, what );
sewardje663cb92002-04-12 10:26:32 +0000178}
179
180
181static
182Char* name_of_sched_event ( UInt event )
183{
184 switch (event) {
sewardje663cb92002-04-12 10:26:32 +0000185 case VG_TRC_EBP_JMP_SYSCALL: return "SYSCALL";
186 case VG_TRC_EBP_JMP_CLIENTREQ: return "CLIENTREQ";
187 case VG_TRC_INNER_COUNTERZERO: return "COUNTERZERO";
188 case VG_TRC_INNER_FASTMISS: return "FASTMISS";
189 case VG_TRC_UNRESUMABLE_SIGNAL: return "FATALSIGNAL";
190 default: return "??UNKNOWN??";
191 }
192}
193
194
195/* Create a translation of the client basic block beginning at
196 orig_addr, and add it to the translation cache & translation table.
197 This probably doesn't really belong here, but, hey ...
198*/
199void VG_(create_translation_for) ( Addr orig_addr )
200{
201 Addr trans_addr;
202 TTEntry tte;
203 Int orig_size, trans_size;
204 /* Ensure there is space to hold a translation. */
205 VG_(maybe_do_lru_pass)();
206 VG_(translate)( orig_addr, &orig_size, &trans_addr, &trans_size );
207 /* Copy data at trans_addr into the translation cache.
208 Returned pointer is to the code, not to the 4-byte
209 header. */
210 /* Since the .orig_size and .trans_size fields are
211 UShort, be paranoid. */
212 vg_assert(orig_size > 0 && orig_size < 65536);
213 vg_assert(trans_size > 0 && trans_size < 65536);
214 tte.orig_size = orig_size;
215 tte.orig_addr = orig_addr;
216 tte.trans_size = trans_size;
217 tte.trans_addr = VG_(copy_to_transcache)
218 ( trans_addr, trans_size );
219 tte.mru_epoch = VG_(current_epoch);
220 /* Free the intermediary -- was allocated by VG_(emit_code). */
221 VG_(jitfree)( (void*)trans_addr );
222 /* Add to trans tab and set back pointer. */
223 VG_(add_to_trans_tab) ( &tte );
224 /* Update stats. */
225 VG_(this_epoch_in_count) ++;
226 VG_(this_epoch_in_osize) += orig_size;
227 VG_(this_epoch_in_tsize) += trans_size;
228 VG_(overall_in_count) ++;
229 VG_(overall_in_osize) += orig_size;
230 VG_(overall_in_tsize) += trans_size;
231 /* Record translated area for SMC detection. */
232 VG_(smc_mark_original) ( orig_addr, orig_size );
233}
234
235
236/* Allocate a completely empty ThreadState record. */
237static
238ThreadId vg_alloc_ThreadState ( void )
239{
240 Int i;
241 for (i = 0; i < VG_N_THREADS; i++) {
242 if (vg_threads[i].status == VgTs_Empty)
243 return i;
244 }
245 VG_(printf)("vg_alloc_ThreadState: no free slots available\n");
246 VG_(printf)("Increase VG_N_THREADS, rebuild and try again.\n");
247 VG_(panic)("VG_N_THREADS is too low");
248 /*NOTREACHED*/
249}
250
251
252ThreadState* VG_(get_thread_state) ( ThreadId tid )
253{
254 vg_assert(tid >= 0 && tid < VG_N_THREADS);
255 vg_assert(vg_threads[tid].status != VgTs_Empty);
256 return & vg_threads[tid];
257}
258
259
260/* Find an unused VgMutex record. */
261static
262MutexId vg_alloc_VgMutex ( void )
263{
264 Int i;
265 for (i = 0; i < VG_N_MUTEXES; i++) {
266 if (!vg_mutexes[i].in_use)
267 return i;
268 }
269 VG_(printf)("vg_alloc_VgMutex: no free slots available\n");
270 VG_(printf)("Increase VG_N_MUTEXES, rebuild and try again.\n");
271 VG_(panic)("VG_N_MUTEXES is too low");
272 /*NOTREACHED*/
273}
274
275
276/* Copy the saved state of a thread into VG_(baseBlock), ready for it
277 to be run. */
278__inline__
279void VG_(load_thread_state) ( ThreadId tid )
280{
281 Int i;
282 VG_(baseBlock)[VGOFF_(m_eax)] = vg_threads[tid].m_eax;
283 VG_(baseBlock)[VGOFF_(m_ebx)] = vg_threads[tid].m_ebx;
284 VG_(baseBlock)[VGOFF_(m_ecx)] = vg_threads[tid].m_ecx;
285 VG_(baseBlock)[VGOFF_(m_edx)] = vg_threads[tid].m_edx;
286 VG_(baseBlock)[VGOFF_(m_esi)] = vg_threads[tid].m_esi;
287 VG_(baseBlock)[VGOFF_(m_edi)] = vg_threads[tid].m_edi;
288 VG_(baseBlock)[VGOFF_(m_ebp)] = vg_threads[tid].m_ebp;
289 VG_(baseBlock)[VGOFF_(m_esp)] = vg_threads[tid].m_esp;
290 VG_(baseBlock)[VGOFF_(m_eflags)] = vg_threads[tid].m_eflags;
291 VG_(baseBlock)[VGOFF_(m_eip)] = vg_threads[tid].m_eip;
292
293 for (i = 0; i < VG_SIZE_OF_FPUSTATE_W; i++)
294 VG_(baseBlock)[VGOFF_(m_fpustate) + i] = vg_threads[tid].m_fpu[i];
295
296 VG_(baseBlock)[VGOFF_(sh_eax)] = vg_threads[tid].sh_eax;
297 VG_(baseBlock)[VGOFF_(sh_ebx)] = vg_threads[tid].sh_ebx;
298 VG_(baseBlock)[VGOFF_(sh_ecx)] = vg_threads[tid].sh_ecx;
299 VG_(baseBlock)[VGOFF_(sh_edx)] = vg_threads[tid].sh_edx;
300 VG_(baseBlock)[VGOFF_(sh_esi)] = vg_threads[tid].sh_esi;
301 VG_(baseBlock)[VGOFF_(sh_edi)] = vg_threads[tid].sh_edi;
302 VG_(baseBlock)[VGOFF_(sh_ebp)] = vg_threads[tid].sh_ebp;
303 VG_(baseBlock)[VGOFF_(sh_esp)] = vg_threads[tid].sh_esp;
304 VG_(baseBlock)[VGOFF_(sh_eflags)] = vg_threads[tid].sh_eflags;
305}
306
307
308/* Copy the state of a thread from VG_(baseBlock), presumably after it
309 has been descheduled. For sanity-check purposes, fill the vacated
310 VG_(baseBlock) with garbage so as to make the system more likely to
311 fail quickly if we erroneously continue to poke around inside
312 VG_(baseBlock) without first doing a load_thread_state().
313*/
314__inline__
315void VG_(save_thread_state) ( ThreadId tid )
316{
317 Int i;
318 const UInt junk = 0xDEADBEEF;
319
320 vg_threads[tid].m_eax = VG_(baseBlock)[VGOFF_(m_eax)];
321 vg_threads[tid].m_ebx = VG_(baseBlock)[VGOFF_(m_ebx)];
322 vg_threads[tid].m_ecx = VG_(baseBlock)[VGOFF_(m_ecx)];
323 vg_threads[tid].m_edx = VG_(baseBlock)[VGOFF_(m_edx)];
324 vg_threads[tid].m_esi = VG_(baseBlock)[VGOFF_(m_esi)];
325 vg_threads[tid].m_edi = VG_(baseBlock)[VGOFF_(m_edi)];
326 vg_threads[tid].m_ebp = VG_(baseBlock)[VGOFF_(m_ebp)];
327 vg_threads[tid].m_esp = VG_(baseBlock)[VGOFF_(m_esp)];
328 vg_threads[tid].m_eflags = VG_(baseBlock)[VGOFF_(m_eflags)];
329 vg_threads[tid].m_eip = VG_(baseBlock)[VGOFF_(m_eip)];
330
331 for (i = 0; i < VG_SIZE_OF_FPUSTATE_W; i++)
332 vg_threads[tid].m_fpu[i] = VG_(baseBlock)[VGOFF_(m_fpustate) + i];
333
334 vg_threads[tid].sh_eax = VG_(baseBlock)[VGOFF_(sh_eax)];
335 vg_threads[tid].sh_ebx = VG_(baseBlock)[VGOFF_(sh_ebx)];
336 vg_threads[tid].sh_ecx = VG_(baseBlock)[VGOFF_(sh_ecx)];
337 vg_threads[tid].sh_edx = VG_(baseBlock)[VGOFF_(sh_edx)];
338 vg_threads[tid].sh_esi = VG_(baseBlock)[VGOFF_(sh_esi)];
339 vg_threads[tid].sh_edi = VG_(baseBlock)[VGOFF_(sh_edi)];
340 vg_threads[tid].sh_ebp = VG_(baseBlock)[VGOFF_(sh_ebp)];
341 vg_threads[tid].sh_esp = VG_(baseBlock)[VGOFF_(sh_esp)];
342 vg_threads[tid].sh_eflags = VG_(baseBlock)[VGOFF_(sh_eflags)];
343
344 /* Fill it up with junk. */
345 VG_(baseBlock)[VGOFF_(m_eax)] = junk;
346 VG_(baseBlock)[VGOFF_(m_ebx)] = junk;
347 VG_(baseBlock)[VGOFF_(m_ecx)] = junk;
348 VG_(baseBlock)[VGOFF_(m_edx)] = junk;
349 VG_(baseBlock)[VGOFF_(m_esi)] = junk;
350 VG_(baseBlock)[VGOFF_(m_edi)] = junk;
351 VG_(baseBlock)[VGOFF_(m_ebp)] = junk;
352 VG_(baseBlock)[VGOFF_(m_esp)] = junk;
353 VG_(baseBlock)[VGOFF_(m_eflags)] = junk;
354 VG_(baseBlock)[VGOFF_(m_eip)] = junk;
355
356 for (i = 0; i < VG_SIZE_OF_FPUSTATE_W; i++)
357 VG_(baseBlock)[VGOFF_(m_fpustate) + i] = junk;
358}
359
360
361/* Run the thread tid for a while, and return a VG_TRC_* value to the
362 scheduler indicating what happened. */
363static
364UInt run_thread_for_a_while ( ThreadId tid )
365{
366 UInt trc = 0;
367 vg_assert(tid >= 0 && tid < VG_N_THREADS);
368 vg_assert(vg_threads[tid].status != VgTs_Empty);
369 vg_assert(VG_(bbs_to_go) > 0);
370
371 VG_(load_thread_state) ( tid );
372 if (__builtin_setjmp(VG_(scheduler_jmpbuf)) == 0) {
373 /* try this ... */
374 trc = VG_(run_innerloop)();
375 /* We get here if the client didn't take a fault. */
376 } else {
377 /* We get here if the client took a fault, which caused our
378 signal handler to longjmp. */
379 vg_assert(trc == 0);
380 trc = VG_TRC_UNRESUMABLE_SIGNAL;
381 }
382 VG_(save_thread_state) ( tid );
383 return trc;
384}
385
386
387/* Increment the LRU epoch counter. */
388static
389void increment_epoch ( void )
390{
391 VG_(current_epoch)++;
392 if (VG_(clo_verbosity) > 2) {
393 UInt tt_used, tc_used;
394 VG_(get_tt_tc_used) ( &tt_used, &tc_used );
395 VG_(message)(Vg_UserMsg,
396 "%lu bbs, in: %d (%d -> %d), out %d (%d -> %d), TT %d, TC %d",
397 VG_(bbs_done),
398 VG_(this_epoch_in_count),
399 VG_(this_epoch_in_osize),
400 VG_(this_epoch_in_tsize),
401 VG_(this_epoch_out_count),
402 VG_(this_epoch_out_osize),
403 VG_(this_epoch_out_tsize),
404 tt_used, tc_used
405 );
406 }
407 VG_(this_epoch_in_count) = 0;
408 VG_(this_epoch_in_osize) = 0;
409 VG_(this_epoch_in_tsize) = 0;
410 VG_(this_epoch_out_count) = 0;
411 VG_(this_epoch_out_osize) = 0;
412 VG_(this_epoch_out_tsize) = 0;
413}
414
415
416/* Initialise the scheduler. Create a single "main" thread ready to
417 run, with special ThreadId of zero. This is called at startup; the
418 caller takes care to park the client's state is parked in
419 VG_(baseBlock).
420*/
421void VG_(scheduler_init) ( void )
422{
423 Int i;
424 Addr startup_esp;
425 ThreadId tid_main;
426
427 startup_esp = VG_(baseBlock)[VGOFF_(m_esp)];
428 if ((startup_esp & VG_STARTUP_STACK_MASK) != VG_STARTUP_STACK_MASK) {
429 VG_(printf)("%esp at startup = %p is not near %p; aborting\n",
430 startup_esp, VG_STARTUP_STACK_MASK);
431 VG_(panic)("unexpected %esp at startup");
432 }
433
434 for (i = 0; i < VG_N_THREADS; i++) {
435 vg_threads[i].stack_size = 0;
436 vg_threads[i].stack_base = (Addr)NULL;
437 }
438
439 for (i = 0; i < VG_N_WAITING_FDS; i++)
440 vg_waiting_fds[i].fd = -1; /* not in use */
441
442 for (i = 0; i < VG_N_MUTEXES; i++)
443 vg_mutexes[i].in_use = False;
444
445 /* Assert this is thread zero, which has certain magic
446 properties. */
447 tid_main = vg_alloc_ThreadState();
448 vg_assert(tid_main == 0);
449
450 vg_threads[tid_main].status = VgTs_Runnable;
451 vg_threads[tid_main].joiner = VG_INVALID_THREADID;
452 vg_threads[tid_main].retval = NULL; /* not important */
453
454 /* Copy VG_(baseBlock) state to tid_main's slot. */
455 VG_(save_thread_state) ( tid_main );
456}
457
458
459/* What if fd isn't a valid fd? */
460static
461void set_fd_nonblocking ( Int fd )
462{
463 Int res = VG_(fcntl)( fd, VKI_F_GETFL, 0 );
464 vg_assert(!VG_(is_kerror)(res));
465 res |= VKI_O_NONBLOCK;
466 res = VG_(fcntl)( fd, VKI_F_SETFL, res );
467 vg_assert(!VG_(is_kerror)(res));
468}
469
470static
471void set_fd_blocking ( Int fd )
472{
473 Int res = VG_(fcntl)( fd, VKI_F_GETFL, 0 );
474 vg_assert(!VG_(is_kerror)(res));
475 res &= ~VKI_O_NONBLOCK;
476 res = VG_(fcntl)( fd, VKI_F_SETFL, res );
477 vg_assert(!VG_(is_kerror)(res));
478}
479
480static
481Bool fd_is_blockful ( Int fd )
482{
483 Int res = VG_(fcntl)( fd, VKI_F_GETFL, 0 );
484 vg_assert(!VG_(is_kerror)(res));
485 return (res & VKI_O_NONBLOCK) ? False : True;
486}
487
488
489
490/* Do a purely thread-local request for tid, and put the result in its
491 %EDX, without changing its scheduling state in any way, nor that of
492 any other threads. Return True if so.
493
494 If the request is non-trivial, return False; a more capable but
495 slower mechanism will deal with it.
496*/
497static
498Bool maybe_do_trivial_clientreq ( ThreadId tid )
499{
500# define SIMPLE_RETURN(vvv) \
501 { vg_threads[tid].m_edx = (vvv); \
502 return True; \
503 }
504
505 UInt* arg = (UInt*)(vg_threads[tid].m_eax);
506 UInt req_no = arg[0];
507 switch (req_no) {
508 case VG_USERREQ__MALLOC:
509 SIMPLE_RETURN(
510 (UInt)VG_(client_malloc) ( arg[1], Vg_AllocMalloc )
511 );
512 case VG_USERREQ__BUILTIN_NEW:
513 SIMPLE_RETURN(
514 (UInt)VG_(client_malloc) ( arg[1], Vg_AllocNew )
515 );
516 case VG_USERREQ__BUILTIN_VEC_NEW:
517 SIMPLE_RETURN(
518 (UInt)VG_(client_malloc) ( arg[1], Vg_AllocNewVec )
519 );
520 case VG_USERREQ__FREE:
521 VG_(client_free) ( (void*)arg[1], Vg_AllocMalloc );
522 SIMPLE_RETURN(0); /* irrelevant */
523 case VG_USERREQ__BUILTIN_DELETE:
524 VG_(client_free) ( (void*)arg[1], Vg_AllocNew );
525 SIMPLE_RETURN(0); /* irrelevant */
526 case VG_USERREQ__BUILTIN_VEC_DELETE:
527 VG_(client_free) ( (void*)arg[1], Vg_AllocNewVec );
528 SIMPLE_RETURN(0); /* irrelevant */
529 case VG_USERREQ__CALLOC:
530 SIMPLE_RETURN(
531 (UInt)VG_(client_calloc) ( arg[1], arg[2] )
532 );
533 case VG_USERREQ__REALLOC:
534 SIMPLE_RETURN(
535 (UInt)VG_(client_realloc) ( (void*)arg[1], arg[2] )
536 );
537 case VG_USERREQ__MEMALIGN:
538 SIMPLE_RETURN(
539 (UInt)VG_(client_memalign) ( arg[1], arg[2] )
540 );
541 default:
542 /* Too hard; wimp out. */
543 return False;
544 }
545# undef SIMPLE_RETURN
546}
547
548
549static
550void sched_do_syscall ( ThreadId tid )
551{
552 UInt saved_eax;
553 UInt res, syscall_no;
554 UInt fd;
555 Bool might_block, assumed_nonblocking;
556 Bool orig_fd_blockness;
557 Char msg_buf[100];
558
559 vg_assert(tid >= 0 && tid < VG_N_THREADS);
560 vg_assert(vg_threads[tid].status == VgTs_Runnable);
561
562 syscall_no = vg_threads[tid].m_eax; /* syscall number */
563
564 if (syscall_no == __NR_nanosleep) {
565 ULong t_now, t_awaken;
566 struct vki_timespec* req;
567 req = (struct vki_timespec*)vg_threads[tid].m_ebx; /* arg1 */
568 t_now = VG_(read_microsecond_timer)();
569 t_awaken
570 = t_now
571 + (ULong)1000000ULL * (ULong)(req->tv_sec)
572 + (ULong)( (UInt)(req->tv_nsec) / 1000 );
573 vg_threads[tid].status = VgTs_Sleeping;
574 vg_threads[tid].awaken_at = t_awaken;
sewardj8937c812002-04-12 20:12:20 +0000575 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +0000576 VG_(sprintf)(msg_buf, "at %lu: nanosleep for %lu",
577 t_now, t_awaken-t_now);
578 print_sched_event(tid, msg_buf);
579 }
580 /* Force the scheduler to run something else for a while. */
581 return;
582 }
583
584 switch (syscall_no) {
585 case __NR_read:
586 case __NR_write:
587 assumed_nonblocking
588 = False;
589 might_block
590 = fd_is_blockful(vg_threads[tid].m_ebx /* arg1 */);
591 break;
592 default:
593 might_block = False;
594 assumed_nonblocking = True;
595 }
596
597 if (assumed_nonblocking) {
598 /* We think it's non-blocking. Just do it in the normal way. */
599 VG_(perform_assumed_nonblocking_syscall)(tid);
600 /* The thread is still runnable. */
601 return;
602 }
603
604 /* It might block. Take evasive action. */
605 switch (syscall_no) {
606 case __NR_read:
607 case __NR_write:
608 fd = vg_threads[tid].m_ebx; break;
609 default:
610 vg_assert(3+3 == 7);
611 }
612
613 /* Set the fd to nonblocking, and do the syscall, which will return
614 immediately, in order to lodge a request with the Linux kernel.
615 We later poll for I/O completion using select(). */
616
617 orig_fd_blockness = fd_is_blockful(fd);
618 set_fd_nonblocking(fd);
619 vg_assert(!fd_is_blockful(fd));
620 VG_(check_known_blocking_syscall)(tid, syscall_no, NULL /* PRE */);
621
622 /* This trashes the thread's %eax; we have to preserve it. */
623 saved_eax = vg_threads[tid].m_eax;
624 KERNEL_DO_SYSCALL(tid,res);
625
626 /* Restore original blockfulness of the fd. */
627 if (orig_fd_blockness)
628 set_fd_blocking(fd);
629 else
630 set_fd_nonblocking(fd);
631
632 if (res != -VKI_EWOULDBLOCK) {
633 /* It didn't block; it went through immediately. So finish off
634 in the normal way. Don't restore %EAX, since that now
635 (correctly) holds the result of the call. */
636 VG_(check_known_blocking_syscall)(tid, syscall_no, &res /* POST */);
637 /* We're still runnable. */
638 vg_assert(vg_threads[tid].status == VgTs_Runnable);
639
640 } else {
641
642 /* It would have blocked. First, restore %EAX to what it was
643 before our speculative call. */
644 vg_threads[tid].m_eax = saved_eax;
645 /* Put this fd in a table of fds on which we are waiting for
646 completion. The arguments for select() later are constructed
647 from this table. */
648 add_waiting_fd(tid, fd, saved_eax /* which holds the syscall # */);
649 /* Deschedule thread until an I/O completion happens. */
650 vg_threads[tid].status = VgTs_WaitFD;
sewardj8937c812002-04-12 20:12:20 +0000651 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +0000652 VG_(sprintf)(msg_buf,"block until I/O ready on fd %d", fd);
653 print_sched_event(tid, msg_buf);
654 }
655
656 }
657}
658
659
660/* Find out which of the fds in vg_waiting_fds are now ready to go, by
661 making enquiries with select(), and mark them as ready. We have to
662 wait for the requesting threads to fall into the the WaitFD state
663 before we can actually finally deliver the results, so this
664 procedure doesn't do that; complete_blocked_syscalls() does it.
665
666 It might seem odd that a thread which has done a blocking syscall
667 is not in WaitFD state; the way this can happen is if it initially
668 becomes WaitFD, but then a signal is delivered to it, so it becomes
669 Runnable for a while. In this case we have to wait for the
670 sighandler to return, whereupon the WaitFD state is resumed, and
671 only at that point can the I/O result be delivered to it. However,
672 this point may be long after the fd is actually ready.
673
674 So, poll_for_ready_fds() merely detects fds which are ready.
675 complete_blocked_syscalls() does the second half of the trick,
676 possibly much later: it delivers the results from ready fds to
677 threads in WaitFD state.
678*/
679void poll_for_ready_fds ( void )
680{
681 vki_ksigset_t saved_procmask;
682 vki_fd_set readfds;
683 vki_fd_set writefds;
684 vki_fd_set exceptfds;
685 struct vki_timeval timeout;
686 Int fd, fd_max, i, n_ready, syscall_no, n_ok;
687 ThreadId tid;
688 Bool rd_ok, wr_ok, ex_ok;
689 Char msg_buf[100];
690
691 /* Awaken any sleeping threads whose sleep has expired. */
692 {
693 struct vki_timespec * rem;
694 ULong t_now = VG_(read_microsecond_timer)();
695 for (tid = 0; tid < VG_N_THREADS; tid++) {
696 if (vg_threads[tid].status != VgTs_Sleeping)
697 continue;
698 if (t_now >= vg_threads[tid].awaken_at) {
699 /* Resume this thread. Set to zero the remaining-time (second)
700 arg of nanosleep, since it's used up all its time. */
701 vg_assert(vg_threads[tid].m_eax == __NR_nanosleep);
702 rem = (struct vki_timespec *)vg_threads[tid].m_ecx; /* arg2 */
703 if (rem != NULL) {
704 rem->tv_sec = 0;
705 rem->tv_nsec = 0;
706 }
707 /* Make the syscall return 0 (success). */
708 vg_threads[tid].m_eax = 0;
709 /* Reschedule this thread. */
710 vg_threads[tid].status = VgTs_Runnable;
sewardj8937c812002-04-12 20:12:20 +0000711 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +0000712 VG_(sprintf)(msg_buf, "at %lu: nanosleep done",
713 t_now);
714 print_sched_event(tid, msg_buf);
715 }
716 }
717 }
718 }
719
720 timeout.tv_sec = 0;
721 timeout.tv_usec = 0;
722
723 VKI_FD_ZERO(&readfds);
724 VKI_FD_ZERO(&writefds);
725 VKI_FD_ZERO(&exceptfds);
726 fd_max = -1;
727 for (i = 0; i < VG_N_WAITING_FDS; i++) {
728 if (vg_waiting_fds[i].fd == -1 /* not in use */)
729 continue;
730 if (vg_waiting_fds[i].ready /* already ready? */)
731 continue;
732 fd = vg_waiting_fds[i].fd;
733 /* VG_(printf)("adding QUERY for fd %d\n", fd); */
734 if (fd > fd_max)
735 fd_max = fd;
736 tid = vg_waiting_fds[i].tid;
737 vg_assert(tid >= 0 && tid < VG_N_THREADS);
738 syscall_no = vg_waiting_fds[i].syscall_no;
739 switch (syscall_no) {
740 case __NR_read:
741 VKI_FD_SET(fd, &readfds); break;
742 case __NR_write:
743 VKI_FD_SET(fd, &writefds); break;
744 default:
745 VG_(panic)("poll_for_ready_fds: unexpected syscall");
746 /*NOTREACHED*/
747 break;
748 }
749 }
750
751 /* BLOCK ALL SIGNALS. We don't want the complication of select()
752 getting interrupted. */
753 VG_(block_all_host_signals)( &saved_procmask );
754
755 n_ready = VG_(select)
756 ( fd_max+1, &readfds, &writefds, &exceptfds, &timeout);
757 if (VG_(is_kerror)(n_ready)) {
758 VG_(printf)("poll_for_ready_fds: select returned %d\n", n_ready);
759 VG_(panic)("poll_for_ready_fds: select failed?!");
760 /*NOTREACHED*/
761 }
762
763 /* UNBLOCK ALL SIGNALS */
764 VG_(restore_host_signals)( &saved_procmask );
765
766 /* VG_(printf)("poll_for_io_completions: %d fs ready\n", n_ready); */
767
768 if (n_ready == 0)
769 return;
770
771 /* Inspect all the fds we know about, and handle any completions that
772 have happened. */
773 /*
774 VG_(printf)("\n\n");
775 for (fd = 0; fd < 100; fd++)
776 if (VKI_FD_ISSET(fd, &writefds) || VKI_FD_ISSET(fd, &readfds)) {
777 VG_(printf)("X"); } else { VG_(printf)("."); };
778 VG_(printf)("\n\nfd_max = %d\n", fd_max);
779 */
780
781 for (fd = 0; fd <= fd_max; fd++) {
782 rd_ok = VKI_FD_ISSET(fd, &readfds);
783 wr_ok = VKI_FD_ISSET(fd, &writefds);
784 ex_ok = VKI_FD_ISSET(fd, &exceptfds);
785
786 n_ok = (rd_ok ? 1 : 0) + (wr_ok ? 1 : 0) + (ex_ok ? 1 : 0);
787 if (n_ok == 0)
788 continue;
789 if (n_ok > 1) {
790 VG_(printf)("offending fd = %d\n", fd);
791 VG_(panic)("poll_for_ready_fds: multiple events on fd");
792 }
793
794 /* An I/O event completed for fd. Find the thread which
795 requested this. */
796 for (i = 0; i < VG_N_WAITING_FDS; i++) {
797 if (vg_waiting_fds[i].fd == -1 /* not in use */)
798 continue;
799 if (vg_waiting_fds[i].fd == fd)
800 break;
801 }
802
803 /* And a bit more paranoia ... */
804 vg_assert(i >= 0 && i < VG_N_WAITING_FDS);
805
806 /* Mark the fd as ready. */
807 vg_assert(! vg_waiting_fds[i].ready);
808 vg_waiting_fds[i].ready = True;
809 }
810}
811
812
813/* See comment attached to poll_for_ready_fds() for explaination. */
814void complete_blocked_syscalls ( void )
815{
816 Int fd, i, res, syscall_no;
817 ThreadId tid;
818 Char msg_buf[100];
819
820 /* Inspect all the outstanding fds we know about. */
821
822 for (i = 0; i < VG_N_WAITING_FDS; i++) {
823 if (vg_waiting_fds[i].fd == -1 /* not in use */)
824 continue;
825 if (! vg_waiting_fds[i].ready)
826 continue;
827
828 fd = vg_waiting_fds[i].fd;
829 tid = vg_waiting_fds[i].tid;
830 vg_assert(tid >= 0 && tid < VG_N_THREADS);
831
832 /* The thread actually has to be waiting for the I/O event it
833 requested before we can deliver the result! */
834 if (vg_threads[tid].status != VgTs_WaitFD)
835 continue;
836
837 /* Ok, actually do it! We can safely use %EAX as the syscall
838 number, because the speculative call made by
839 sched_do_syscall() doesn't change %EAX in the case where the
840 call would have blocked. */
841
842 syscall_no = vg_waiting_fds[i].syscall_no;
843 vg_assert(syscall_no == vg_threads[tid].m_eax);
844 KERNEL_DO_SYSCALL(tid,res);
845 VG_(check_known_blocking_syscall)(tid, syscall_no, &res /* POST */);
846
847 /* Reschedule. */
848 vg_threads[tid].status = VgTs_Runnable;
849 /* Mark slot as no longer in use. */
850 vg_waiting_fds[i].fd = -1;
851 /* pp_sched_status(); */
sewardj8937c812002-04-12 20:12:20 +0000852 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +0000853 VG_(sprintf)(msg_buf,"resume due to I/O completion on fd %d", fd);
854 print_sched_event(tid, msg_buf);
855 }
856 }
857}
858
859
860static
861void nanosleep_for_a_while ( void )
862{
863 Int res;
864 struct vki_timespec req;
865 struct vki_timespec rem;
866 req.tv_sec = 0;
867 req.tv_nsec = 20 * 1000 * 1000;
868 res = VG_(nanosleep)( &req, &rem );
869 /* VG_(printf)("after ns, unused = %d\n", rem.tv_nsec ); */
870 vg_assert(res == 0);
871}
872
873
874/* ---------------------------------------------------------------------
875 The scheduler proper.
876 ------------------------------------------------------------------ */
877
878/* Run user-space threads until either
879 * Deadlock occurs
880 * One thread asks to shutdown Valgrind
881 * The specified number of basic blocks has gone by.
882*/
883VgSchedReturnCode VG_(scheduler) ( void )
884{
885 ThreadId tid, tid_next;
886 UInt trc;
887 UInt dispatch_ctr_SAVED;
sewardj54cacf02002-04-12 23:24:59 +0000888 Int request_code, done_this_time, n_in_fdwait_or_sleep;
sewardje663cb92002-04-12 10:26:32 +0000889 Char msg_buf[100];
890 Addr trans_addr;
891
892 /* For the LRU structures, records when the epoch began. */
893 ULong lru_epoch_started_at = 0;
894
895 /* Start with the root thread. tid in general indicates the
896 currently runnable/just-finished-running thread. */
897 tid = 0;
898
899 /* This is the top level scheduler loop. It falls into three
900 phases. */
901 while (True) {
902
903 /* ======================= Phase 1 of 3 =======================
904 Handle I/O completions and signals. This may change the
905 status of various threads. Then select a new thread to run,
906 or declare deadlock, or sleep if there are no runnable
907 threads but some are blocked on I/O. */
908
909 /* Age the LRU structures if an epoch has been completed. */
910 if (VG_(bbs_done) - lru_epoch_started_at >= VG_BBS_PER_EPOCH) {
911 lru_epoch_started_at = VG_(bbs_done);
912 increment_epoch();
913 }
914
915 /* Was a debug-stop requested? */
916 if (VG_(bbs_to_go) == 0)
917 goto debug_stop;
918
919 /* Do the following loop until a runnable thread is found, or
920 deadlock is detected. */
921 while (True) {
922
923 /* For stats purposes only. */
924 VG_(num_scheduling_events_MAJOR) ++;
925
926 /* See if any I/O operations which we were waiting for have
927 completed, and, if so, make runnable the relevant waiting
928 threads. */
929 poll_for_ready_fds();
930 complete_blocked_syscalls();
931
932 /* See if there are any signals which need to be delivered. If
933 so, choose thread(s) to deliver them to, and build signal
934 delivery frames on those thread(s) stacks. */
935 VG_(deliver_signals)( 0 /*HACK*/ );
936 VG_(do_sanity_checks)(0 /*HACK*/, False);
937
938 /* Try and find a thread (tid) to run. */
939 tid_next = tid;
sewardj54cacf02002-04-12 23:24:59 +0000940 n_in_fdwait_or_sleep = 0;
sewardje663cb92002-04-12 10:26:32 +0000941 while (True) {
942 tid_next++;
943 if (tid_next >= VG_N_THREADS) tid_next = 0;
sewardj54cacf02002-04-12 23:24:59 +0000944 if (vg_threads[tid_next].status == VgTs_WaitFD
945 || vg_threads[tid_next].status == VgTs_Sleeping)
946 n_in_fdwait_or_sleep ++;
sewardje663cb92002-04-12 10:26:32 +0000947 if (vg_threads[tid_next].status == VgTs_Runnable)
948 break; /* We can run this one. */
949 if (tid_next == tid)
950 break; /* been all the way round */
951 }
952 tid = tid_next;
953
954 if (vg_threads[tid].status == VgTs_Runnable) {
955 /* Found a suitable candidate. Fall out of this loop, so
956 we can advance to stage 2 of the scheduler: actually
957 running the thread. */
958 break;
959 }
960
961 /* We didn't find a runnable thread. Now what? */
sewardj54cacf02002-04-12 23:24:59 +0000962 if (n_in_fdwait_or_sleep == 0) {
963 /* No runnable threads and no prospect of any appearing
964 even if we wait for an arbitrary length of time. In
965 short, we have a deadlock. */
sewardje663cb92002-04-12 10:26:32 +0000966 pp_sched_status();
967 return VgSrc_Deadlock;
968 }
969
970 /* At least one thread is in a fd-wait state. Delay for a
971 while, and go round again, in the hope that eventually a
972 thread becomes runnable. */
973 nanosleep_for_a_while();
974 // pp_sched_status();
975 // VG_(printf)(".\n");
976 }
977
978
979 /* ======================= Phase 2 of 3 =======================
980 Wahey! We've finally decided that thread tid is runnable, so
981 we now do that. Run it for as much of a quanta as possible.
982 Trivial requests are handled and the thread continues. The
983 aim is not to do too many of Phase 1 since it is expensive. */
984
985 if (0)
986 VG_(printf)("SCHED: tid %d, used %d\n", tid, VG_N_THREADS);
987
988 /* Figure out how many bbs to ask vg_run_innerloop to do. Note
989 that it decrements the counter before testing it for zero, so
990 that if VG_(dispatch_ctr) is set to N you get at most N-1
991 iterations. Also this means that VG_(dispatch_ctr) must
992 exceed zero before entering the innerloop. Also also, the
993 decrement is done before the bb is actually run, so you
994 always get at least one decrement even if nothing happens.
995 */
996 if (VG_(bbs_to_go) >= VG_SCHEDULING_QUANTUM)
997 VG_(dispatch_ctr) = VG_SCHEDULING_QUANTUM + 1;
998 else
999 VG_(dispatch_ctr) = (UInt)VG_(bbs_to_go) + 1;
1000
1001 /* ... and remember what we asked for. */
1002 dispatch_ctr_SAVED = VG_(dispatch_ctr);
1003
1004 /* Actually run thread tid. */
1005 while (True) {
1006
1007 /* For stats purposes only. */
1008 VG_(num_scheduling_events_MINOR) ++;
1009
1010 if (0)
1011 VG_(message)(Vg_DebugMsg, "thread %d: running for %d bbs",
1012 tid, VG_(dispatch_ctr) - 1 );
1013
1014 trc = run_thread_for_a_while ( tid );
1015
1016 /* Deal quickly with trivial scheduling events, and resume the
1017 thread. */
1018
1019 if (trc == VG_TRC_INNER_FASTMISS) {
1020 vg_assert(VG_(dispatch_ctr) > 0);
1021
1022 /* Trivial event. Miss in the fast-cache. Do a full
1023 lookup for it. */
1024 trans_addr
1025 = VG_(search_transtab) ( vg_threads[tid].m_eip );
1026 if (trans_addr == (Addr)0) {
1027 /* Not found; we need to request a translation. */
1028 VG_(create_translation_for)( vg_threads[tid].m_eip );
1029 trans_addr = VG_(search_transtab) ( vg_threads[tid].m_eip );
1030 if (trans_addr == (Addr)0)
1031 VG_(panic)("VG_TRC_INNER_FASTMISS: missing tt_fast entry");
1032 }
1033 continue; /* with this thread */
1034 }
1035
1036 if (trc == VG_TRC_EBP_JMP_CLIENTREQ) {
1037 Bool is_triv = maybe_do_trivial_clientreq(tid);
1038 if (is_triv) {
1039 /* NOTE: a trivial request is something like a call to
1040 malloc() or free(). It DOES NOT change the
1041 Runnability of this thread nor the status of any
1042 other thread; it is purely thread-local. */
1043 continue; /* with this thread */
1044 }
1045 }
1046
1047 /* It's a non-trivial event. Give up running this thread and
1048 handle things the expensive way. */
1049 break;
1050 }
1051
1052 /* ======================= Phase 3 of 3 =======================
1053 Handle non-trivial thread requests, mostly pthread stuff. */
1054
1055 /* Ok, we've fallen out of the dispatcher for a
1056 non-completely-trivial reason. First, update basic-block
1057 counters. */
1058
1059 done_this_time = (Int)dispatch_ctr_SAVED - (Int)VG_(dispatch_ctr) - 1;
1060 vg_assert(done_this_time >= 0);
1061 VG_(bbs_to_go) -= (ULong)done_this_time;
1062 VG_(bbs_done) += (ULong)done_this_time;
1063
1064 if (0 && trc != VG_TRC_INNER_FASTMISS)
1065 VG_(message)(Vg_DebugMsg, "thread %d: completed %d bbs, trc %d",
1066 tid, done_this_time, (Int)trc );
1067
1068 if (0 && trc != VG_TRC_INNER_FASTMISS)
1069 VG_(message)(Vg_DebugMsg, "thread %d: %ld bbs, event %s",
1070 tid, VG_(bbs_done),
1071 name_of_sched_event(trc) );
1072
1073 /* Examine the thread's return code to figure out why it
1074 stopped, and handle requests. */
1075
1076 switch (trc) {
1077
1078 case VG_TRC_INNER_FASTMISS:
1079 VG_(panic)("VG_(scheduler): VG_TRC_INNER_FASTMISS");
1080 /*NOTREACHED*/
1081 break;
1082
1083 case VG_TRC_INNER_COUNTERZERO:
1084 /* Timeslice is out. Let a new thread be scheduled,
1085 simply by doing nothing, causing us to arrive back at
1086 Phase 1. */
1087 if (VG_(bbs_to_go) == 0) {
1088 goto debug_stop;
1089 }
1090 vg_assert(VG_(dispatch_ctr) == 0);
1091 break;
1092
1093 case VG_TRC_UNRESUMABLE_SIGNAL:
1094 /* It got a SIGSEGV/SIGBUS, which we need to deliver right
1095 away. Again, do nothing, so we wind up back at Phase
1096 1, whereupon the signal will be "delivered". */
1097 break;
1098
sewardje663cb92002-04-12 10:26:32 +00001099 case VG_TRC_EBP_JMP_SYSCALL:
1100 /* Do a syscall for the vthread tid. This could cause it
1101 to become non-runnable. */
1102 sched_do_syscall(tid);
1103 break;
1104
1105 case VG_TRC_EBP_JMP_CLIENTREQ:
1106 /* Do a client request for the vthread tid. Note that
1107 some requests will have been handled by
1108 maybe_do_trivial_clientreq(), so we don't expect to see
1109 those here.
1110 */
sewardj54cacf02002-04-12 23:24:59 +00001111 /* The thread's %EAX points at an arg block, the first
1112 word of which is the request code. */
1113 request_code = ((UInt*)(vg_threads[tid].m_eax))[0];
sewardje663cb92002-04-12 10:26:32 +00001114 if (0) {
sewardj54cacf02002-04-12 23:24:59 +00001115 VG_(sprintf)(msg_buf, "request 0x%x", request_code );
sewardje663cb92002-04-12 10:26:32 +00001116 print_sched_event(tid, msg_buf);
1117 }
1118 /* Do a non-trivial client request for thread tid. tid's
1119 %EAX points to a short vector of argument words, the
1120 first of which is the request code. The result of the
1121 request is put in tid's %EDX. Alternatively, perhaps
1122 the request causes tid to become non-runnable and/or
1123 other blocked threads become runnable. In general we
1124 can and often do mess with the state of arbitrary
1125 threads at this point. */
sewardj54cacf02002-04-12 23:24:59 +00001126 if (request_code == VG_USERREQ__SHUTDOWN_VALGRIND) {
1127 return VgSrc_Shutdown;
1128 } else {
1129 do_nontrivial_clientreq(tid);
1130 }
sewardje663cb92002-04-12 10:26:32 +00001131 break;
1132
1133 default:
1134 VG_(printf)("\ntrc = %d\n", trc);
1135 VG_(panic)("VG_(scheduler), phase 3: "
1136 "unexpected thread return code");
1137 /* NOTREACHED */
1138 break;
1139
1140 } /* switch (trc) */
1141
1142 /* That completes Phase 3 of 3. Return now to the top of the
1143 main scheduler loop, to Phase 1 of 3. */
1144
1145 } /* top-level scheduler loop */
1146
1147
1148 /* NOTREACHED */
1149 VG_(panic)("scheduler: post-main-loop ?!");
1150 /* NOTREACHED */
1151
1152 debug_stop:
1153 /* If we exited because of a debug stop, print the translation
1154 of the last block executed -- by translating it again, and
1155 throwing away the result. */
1156 VG_(printf)(
1157 "======vvvvvvvv====== LAST TRANSLATION ======vvvvvvvv======\n");
1158 VG_(translate)( vg_threads[tid].m_eip, NULL, NULL, NULL );
1159 VG_(printf)("\n");
1160 VG_(printf)(
1161 "======^^^^^^^^====== LAST TRANSLATION ======^^^^^^^^======\n");
1162
1163 return VgSrc_BbsDone;
1164}
1165
1166
1167/* ---------------------------------------------------------------------
1168 The pthread implementation.
1169 ------------------------------------------------------------------ */
1170
1171#include <pthread.h>
1172#include <errno.h>
1173
1174#if !defined(PTHREAD_STACK_MIN)
1175# define PTHREAD_STACK_MIN (16384 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB)
1176#endif
1177
1178/* /usr/include/bits/pthreadtypes.h:
1179 typedef unsigned long int pthread_t;
1180*/
1181
sewardje663cb92002-04-12 10:26:32 +00001182
1183static
1184void do_pthread_cancel ( ThreadId tid_canceller,
1185 pthread_t tid_cancellee )
1186{
1187 Char msg_buf[100];
1188 /* We want make is appear that this thread has returned to
1189 do_pthread_create_bogusRA with PTHREAD_CANCELED as the
1190 return value. So: simple: put PTHREAD_CANCELED into %EAX
1191 and &do_pthread_create_bogusRA into %EIP and keep going! */
sewardj8937c812002-04-12 20:12:20 +00001192 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +00001193 VG_(sprintf)(msg_buf, "cancelled by %d", tid_canceller);
1194 print_sched_event(tid_cancellee, msg_buf);
1195 }
1196 vg_threads[tid_cancellee].m_eax = (UInt)PTHREAD_CANCELED;
sewardjbc5b99f2002-04-13 00:08:51 +00001197 vg_threads[tid_cancellee].m_eip = (UInt)&VG_(pthreadreturn_bogusRA);
sewardje663cb92002-04-12 10:26:32 +00001198 vg_threads[tid_cancellee].status = VgTs_Runnable;
1199}
1200
1201
1202
1203/* Thread tid is exiting, by returning from the function it was
sewardjbc5b99f2002-04-13 00:08:51 +00001204 created with. Or possibly due to pthread_exit or cancellation.
1205 The main complication here is to resume any thread waiting to join
1206 with this one. */
sewardje663cb92002-04-12 10:26:32 +00001207static
sewardjbc5b99f2002-04-13 00:08:51 +00001208void handle_pthread_return ( ThreadId tid, void* retval )
sewardje663cb92002-04-12 10:26:32 +00001209{
1210 ThreadId jnr; /* joiner, the thread calling pthread_join. */
1211 UInt* jnr_args;
1212 void** jnr_thread_return;
1213 Char msg_buf[100];
1214
1215 /* Mark it as not in use. Leave the stack in place so the next
1216 user of this slot doesn't reallocate it. */
1217 vg_assert(tid >= 0 && tid < VG_N_THREADS);
1218 vg_assert(vg_threads[tid].status != VgTs_Empty);
1219
sewardjbc5b99f2002-04-13 00:08:51 +00001220 vg_threads[tid].retval = retval;
sewardje663cb92002-04-12 10:26:32 +00001221
1222 if (vg_threads[tid].joiner == VG_INVALID_THREADID) {
1223 /* No one has yet done a join on me */
1224 vg_threads[tid].status = VgTs_WaitJoiner;
sewardj8937c812002-04-12 20:12:20 +00001225 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +00001226 VG_(sprintf)(msg_buf,
1227 "root fn returns, waiting for a call pthread_join(%d)",
1228 tid);
1229 print_sched_event(tid, msg_buf);
1230 }
1231 } else {
1232 /* Some is waiting; make their join call return with success,
1233 putting my exit code in the place specified by the caller's
1234 thread_return param. This is all very horrible, since we
1235 need to consult the joiner's arg block -- pointed to by its
1236 %EAX -- in order to extract the 2nd param of its pthread_join
1237 call. TODO: free properly the slot (also below).
1238 */
1239 jnr = vg_threads[tid].joiner;
1240 vg_assert(jnr >= 0 && jnr < VG_N_THREADS);
1241 vg_assert(vg_threads[jnr].status == VgTs_WaitJoinee);
1242 jnr_args = (UInt*)vg_threads[jnr].m_eax;
1243 jnr_thread_return = (void**)(jnr_args[2]);
1244 if (jnr_thread_return != NULL)
1245 *jnr_thread_return = vg_threads[tid].retval;
1246 vg_threads[jnr].m_edx = 0; /* success */
1247 vg_threads[jnr].status = VgTs_Runnable;
1248 vg_threads[tid].status = VgTs_Empty; /* bye! */
sewardj8937c812002-04-12 20:12:20 +00001249 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +00001250 VG_(sprintf)(msg_buf,
1251 "root fn returns, to find a waiting pthread_join(%d)", tid);
1252 print_sched_event(tid, msg_buf);
1253 VG_(sprintf)(msg_buf,
1254 "my pthread_join(%d) returned; resuming", tid);
1255 print_sched_event(jnr, msg_buf);
1256 }
1257 }
1258
1259 /* Return value is irrelevant; this thread will not get
1260 rescheduled. */
1261}
1262
1263
1264static
1265void do_pthread_join ( ThreadId tid, ThreadId jee, void** thread_return )
1266{
1267 Char msg_buf[100];
1268
1269 /* jee, the joinee, is the thread specified as an arg in thread
1270 tid's call to pthread_join. So tid is the join-er. */
1271 vg_assert(tid >= 0 && tid < VG_N_THREADS);
1272 vg_assert(vg_threads[tid].status == VgTs_Runnable);
1273
1274 if (jee == tid) {
1275 vg_threads[tid].m_edx = EDEADLK; /* libc constant, not a kernel one */
1276 vg_threads[tid].status = VgTs_Runnable;
1277 return;
1278 }
1279
1280 if (jee < 0
1281 || jee >= VG_N_THREADS
1282 || vg_threads[jee].status == VgTs_Empty) {
1283 /* Invalid thread to join to. */
1284 vg_threads[tid].m_edx = EINVAL;
1285 vg_threads[tid].status = VgTs_Runnable;
1286 return;
1287 }
1288
1289 if (vg_threads[jee].joiner != VG_INVALID_THREADID) {
1290 /* Someone already did join on this thread */
1291 vg_threads[tid].m_edx = EINVAL;
1292 vg_threads[tid].status = VgTs_Runnable;
1293 return;
1294 }
1295
1296 /* if (vg_threads[jee].detached) ... */
1297
1298 /* Perhaps the joinee has already finished? If so return
1299 immediately with its return code, and free up the slot. TODO:
1300 free it properly (also above). */
1301 if (vg_threads[jee].status == VgTs_WaitJoiner) {
1302 vg_assert(vg_threads[jee].joiner == VG_INVALID_THREADID);
1303 vg_threads[tid].m_edx = 0; /* success */
1304 if (thread_return != NULL)
1305 *thread_return = vg_threads[jee].retval;
1306 vg_threads[tid].status = VgTs_Runnable;
1307 vg_threads[jee].status = VgTs_Empty; /* bye! */
sewardj8937c812002-04-12 20:12:20 +00001308 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +00001309 VG_(sprintf)(msg_buf,
1310 "someone called pthread_join() on me; bye!");
1311 print_sched_event(jee, msg_buf);
1312 VG_(sprintf)(msg_buf,
1313 "my pthread_join(%d) returned immediately",
1314 jee );
1315 print_sched_event(tid, msg_buf);
1316 }
1317 return;
1318 }
1319
1320 /* Ok, so we'll have to wait on jee. */
1321 vg_threads[jee].joiner = tid;
1322 vg_threads[tid].status = VgTs_WaitJoinee;
sewardj8937c812002-04-12 20:12:20 +00001323 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +00001324 VG_(sprintf)(msg_buf,
1325 "blocking on call of pthread_join(%d)", jee );
1326 print_sched_event(tid, msg_buf);
1327 }
1328 /* So tid's join call does not return just now. */
1329}
1330
1331
1332static
1333void do_pthread_create ( ThreadId parent_tid,
1334 pthread_t* thread,
1335 pthread_attr_t* attr,
1336 void* (*start_routine)(void *),
1337 void* arg )
1338{
1339 Addr new_stack;
1340 UInt new_stk_szb;
1341 ThreadId tid;
1342 Char msg_buf[100];
1343
1344 /* Paranoia ... */
1345 vg_assert(sizeof(pthread_t) == sizeof(UInt));
1346
1347 vg_assert(vg_threads[parent_tid].status != VgTs_Empty);
1348
1349 tid = vg_alloc_ThreadState();
1350
1351 /* If we've created the main thread's tid, we're in deep trouble :) */
1352 vg_assert(tid != 0);
1353
1354 /* Copy the parent's CPU state into the child's, in a roundabout
1355 way (via baseBlock). */
1356 VG_(load_thread_state)(parent_tid);
1357 VG_(save_thread_state)(tid);
1358
1359 /* Consider allocating the child a stack, if the one it already has
1360 is inadequate. */
1361 new_stk_szb = PTHREAD_STACK_MIN;
1362
1363 if (new_stk_szb > vg_threads[tid].stack_size) {
1364 /* Again, for good measure :) We definitely don't want to be
1365 allocating a stack for the main thread. */
1366 vg_assert(tid != 0);
1367 /* for now, we don't handle the case of anything other than
1368 assigning it for the first time. */
1369 vg_assert(vg_threads[tid].stack_size == 0);
1370 vg_assert(vg_threads[tid].stack_base == (Addr)NULL);
1371 new_stack = (Addr)VG_(get_memory_from_mmap)( new_stk_szb );
1372 vg_threads[tid].stack_base = new_stack;
1373 vg_threads[tid].stack_size = new_stk_szb;
1374 vg_threads[tid].m_esp
1375 = new_stack + new_stk_szb
1376 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB;
1377 }
1378 if (VG_(clo_instrument))
1379 VGM_(make_noaccess)( vg_threads[tid].m_esp,
1380 VG_AR_CLIENT_STACKBASE_REDZONE_SZB );
1381
1382 /* push arg */
1383 vg_threads[tid].m_esp -= 4;
1384 * (UInt*)(vg_threads[tid].m_esp) = (UInt)arg;
1385
1386 /* push (magical) return address */
1387 vg_threads[tid].m_esp -= 4;
sewardjbc5b99f2002-04-13 00:08:51 +00001388 * (UInt*)(vg_threads[tid].m_esp) = (UInt)VG_(pthreadreturn_bogusRA);
sewardje663cb92002-04-12 10:26:32 +00001389
1390 if (VG_(clo_instrument))
1391 VGM_(make_readable)( vg_threads[tid].m_esp, 2 * 4 );
1392
1393 /* this is where we start */
1394 vg_threads[tid].m_eip = (UInt)start_routine;
1395
sewardj8937c812002-04-12 20:12:20 +00001396 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +00001397 VG_(sprintf)(msg_buf,
1398 "new thread, created by %d", parent_tid );
1399 print_sched_event(tid, msg_buf);
1400 }
1401
1402 /* store the thread id in *thread. */
1403 // if (VG_(clo_instrument))
1404 // ***** CHECK *thread is writable
1405 *thread = (pthread_t)tid;
1406
1407 /* return zero */
1408 vg_threads[tid].joiner = VG_INVALID_THREADID;
1409 vg_threads[tid].status = VgTs_Runnable;
1410 vg_threads[tid].m_edx = 0; /* success */
1411}
1412
1413
1414/* Horrible hacks to do with pthread_mutex_t: the real pthread_mutex_t
1415 is a struct with at least 5 words:
1416 typedef struct
1417 {
1418 int __m_reserved; -- Reserved for future use
1419 int __m_count; -- Depth of recursive locking
1420 _pthread_descr __m_owner; -- Owner thread (if recursive or errcheck)
1421 int __m_kind; -- Mutex kind: fast, recursive or errcheck
1422 struct _pthread_fastlock __m_lock; -- Underlying fast lock
1423 } pthread_mutex_t;
1424 Ours is just a single word, an index into vg_mutexes[].
1425 For now I'll park it in the __m_reserved field.
1426
1427 Uninitialised mutexes (PTHREAD_MUTEX_INITIALIZER) all have
1428 a zero __m_count field (see /usr/include/pthread.h). So I'll
1429 use zero to mean non-inited, and 1 to mean inited.
1430
1431 How convenient.
1432*/
1433
1434static
sewardj8937c812002-04-12 20:12:20 +00001435void initialise_mutex ( ThreadId tid, pthread_mutex_t *mutex )
sewardje663cb92002-04-12 10:26:32 +00001436{
sewardj8937c812002-04-12 20:12:20 +00001437 MutexId mid;
1438 Char msg_buf[100];
sewardje663cb92002-04-12 10:26:32 +00001439 /* vg_alloc_MutexId aborts if we can't allocate a mutex, for
1440 whatever reason. */
sewardje663cb92002-04-12 10:26:32 +00001441 mid = vg_alloc_VgMutex();
1442 vg_mutexes[mid].in_use = True;
1443 vg_mutexes[mid].held = False;
1444 vg_mutexes[mid].owner = VG_INVALID_THREADID; /* irrelevant */
1445 mutex->__m_reserved = mid;
1446 mutex->__m_count = 1; /* initialised */
sewardj8937c812002-04-12 20:12:20 +00001447 if (VG_(clo_trace_pthread)) {
1448 VG_(sprintf)(msg_buf, "(initialise mutex) (%p) -> %d",
1449 mutex, mid );
1450 print_pthread_event(tid, msg_buf);
1451 }
sewardje663cb92002-04-12 10:26:32 +00001452}
1453
1454/* Allocate a new MutexId and write it into *mutex. Ideally take
1455 notice of the attributes in *mutexattr. */
1456static
1457void do_pthread_mutex_init ( ThreadId tid,
1458 pthread_mutex_t *mutex,
1459 const pthread_mutexattr_t *mutexattr)
1460{
sewardj8937c812002-04-12 20:12:20 +00001461 Char msg_buf[100];
sewardje663cb92002-04-12 10:26:32 +00001462 /* Paranoia ... */
sewardje663cb92002-04-12 10:26:32 +00001463 vg_assert(sizeof(pthread_mutex_t) >= sizeof(UInt));
1464
sewardj8937c812002-04-12 20:12:20 +00001465 initialise_mutex(tid, mutex);
1466
1467 if (VG_(clo_trace_pthread)) {
1468 VG_(sprintf)(msg_buf, "pthread_mutex_init (%p) -> %d",
1469 mutex, mutex->__m_reserved );
1470 print_pthread_event(tid, msg_buf);
1471 }
1472
sewardje663cb92002-04-12 10:26:32 +00001473 /*
1474 RETURN VALUE
1475 pthread_mutex_init always returns 0. The other mutex functions
1476 return 0 on success and a non-zero error code on error.
1477 */
1478 /* THIS THREAD returns with 0. */
1479 vg_threads[tid].m_edx = 0;
1480}
1481
1482
1483static
1484void do_pthread_mutex_lock( ThreadId tid, pthread_mutex_t *mutex )
1485{
1486 MutexId mid;
1487 Char msg_buf[100];
1488
sewardje663cb92002-04-12 10:26:32 +00001489 /* *mutex contains the MutexId, or one of the magic values
1490 PTHREAD_*MUTEX_INITIALIZER*, indicating we need to initialise it
1491 now. See comment(s) above re use of __m_count to indicated
1492 initialisation status.
1493 */
1494
1495 /* POSIX doesn't mandate this, but for sanity ... */
1496 if (mutex == NULL) {
1497 vg_threads[tid].m_edx = EINVAL;
1498 return;
1499 }
1500
1501 if (mutex->__m_count == 0) {
sewardj8937c812002-04-12 20:12:20 +00001502 initialise_mutex(tid, mutex);
sewardje663cb92002-04-12 10:26:32 +00001503 }
1504
1505 mid = mutex->__m_reserved;
1506 if (mid < 0 || mid >= VG_N_MUTEXES || !vg_mutexes[mid].in_use) {
1507 vg_threads[tid].m_edx = EINVAL;
1508 return;
1509 }
1510
sewardj8937c812002-04-12 20:12:20 +00001511 if (VG_(clo_trace_pthread)) {
1512 VG_(sprintf)(msg_buf, "pthread_mutex_lock %d (%p)",
1513 mid, mutex );
1514 print_pthread_event(tid, msg_buf);
1515 }
1516
sewardje663cb92002-04-12 10:26:32 +00001517 /* Assert initialised. */
1518 vg_assert(mutex->__m_count == 1);
1519
1520 /* Assume tid valid. */
1521 vg_assert(vg_threads[tid].status == VgTs_Runnable);
1522
1523 if (vg_mutexes[mid].held) {
1524 if (vg_mutexes[mid].owner == tid) {
1525 vg_threads[tid].m_edx = EDEADLK;
1526 return;
1527 }
1528 /* Someone else has it; we have to wait. */
1529 vg_threads[tid].status = VgTs_WaitMX;
1530 vg_threads[tid].waited_on_mid = mid;
1531 /* No assignment to %EDX, since we're blocking. */
sewardj8937c812002-04-12 20:12:20 +00001532 if (VG_(clo_trace_pthread)) {
1533 VG_(sprintf)(msg_buf, "pthread_mutex_lock %d (%p): BLOCK",
1534 mid, mutex );
1535 print_pthread_event(tid, msg_buf);
sewardje663cb92002-04-12 10:26:32 +00001536 }
1537 } else {
1538 /* We get it! */
1539 vg_mutexes[mid].held = True;
1540 vg_mutexes[mid].owner = tid;
1541 /* return 0 (success). */
1542 vg_threads[tid].m_edx = 0;
1543 }
1544}
1545
1546
1547static
1548void do_pthread_mutex_unlock ( ThreadId tid,
1549 pthread_mutex_t *mutex )
1550{
1551 MutexId mid;
1552 Int i;
1553 Char msg_buf[100];
1554
sewardje663cb92002-04-12 10:26:32 +00001555 if (mutex == NULL
1556 || mutex->__m_count != 1) {
1557 vg_threads[tid].m_edx = EINVAL;
1558 return;
1559 }
1560
1561 mid = mutex->__m_reserved;
1562 if (mid < 0 || mid >= VG_N_MUTEXES || !vg_mutexes[mid].in_use) {
1563 vg_threads[tid].m_edx = EINVAL;
1564 return;
1565 }
1566
sewardj8937c812002-04-12 20:12:20 +00001567 if (VG_(clo_trace_pthread)) {
1568 VG_(sprintf)(msg_buf, "pthread_mutex_unlock %d (%p)",
1569 mid, mutex );
1570 print_pthread_event(tid, msg_buf);
1571 }
1572
sewardje663cb92002-04-12 10:26:32 +00001573 /* Assume tid valid */
1574 vg_assert(vg_threads[tid].status == VgTs_Runnable);
1575
1576 /* Barf if we don't currently hold the mutex. */
1577 if (!vg_mutexes[mid].held || vg_mutexes[mid].owner != tid) {
1578 vg_threads[tid].m_edx = EPERM;
1579 return;
1580 }
1581
1582 /* Find some arbitrary thread waiting on this mutex, and make it
1583 runnable. If none are waiting, mark the mutex as not held. */
1584 for (i = 0; i < VG_N_THREADS; i++) {
1585 if (vg_threads[i].status == VgTs_Empty)
1586 continue;
1587 if (vg_threads[i].status == VgTs_WaitMX
1588 && vg_threads[i].waited_on_mid == mid)
1589 break;
1590 }
1591
1592 vg_assert(i <= VG_N_THREADS);
1593 if (i == VG_N_THREADS) {
1594 /* Nobody else is waiting on it. */
1595 vg_mutexes[mid].held = False;
1596 } else {
1597 /* Notionally transfer the hold to thread i, whose
1598 pthread_mutex_lock() call now returns with 0 (success). */
1599 vg_mutexes[mid].owner = i;
1600 vg_threads[i].status = VgTs_Runnable;
1601 vg_threads[i].m_edx = 0; /* pth_lock() success */
sewardj8937c812002-04-12 20:12:20 +00001602
1603 if (VG_(clo_trace_pthread)) {
1604 VG_(sprintf)(msg_buf, "pthread_mutex_lock %d: RESUME",
1605 mid );
1606 print_pthread_event(tid, msg_buf);
sewardje663cb92002-04-12 10:26:32 +00001607 }
1608 }
1609
1610 /* In either case, our (tid's) pth_unlock() returns with 0
1611 (success). */
1612 vg_threads[tid].m_edx = 0; /* Success. */
1613}
1614
1615
1616static void do_pthread_mutex_destroy ( ThreadId tid,
1617 pthread_mutex_t *mutex )
1618{
sewardj8937c812002-04-12 20:12:20 +00001619 MutexId mid;
1620 Char msg_buf[100];
sewardje663cb92002-04-12 10:26:32 +00001621
1622 if (mutex == NULL
1623 || mutex->__m_count != 1) {
1624 vg_threads[tid].m_edx = EINVAL;
1625 return;
1626 }
1627
1628 mid = mutex->__m_reserved;
1629 if (mid < 0 || mid >= VG_N_MUTEXES || !vg_mutexes[mid].in_use) {
1630 vg_threads[tid].m_edx = EINVAL;
1631 return;
1632 }
1633
sewardj8937c812002-04-12 20:12:20 +00001634 if (VG_(clo_trace_pthread)) {
1635 VG_(sprintf)(msg_buf, "pthread_mutex_destroy %d (%p)",
1636 mid, mutex );
1637 print_pthread_event(tid, msg_buf);
1638 }
1639
sewardje663cb92002-04-12 10:26:32 +00001640 /* Assume tid valid */
1641 vg_assert(vg_threads[tid].status == VgTs_Runnable);
1642
1643 /* Barf if the mutex is currently held. */
1644 if (vg_mutexes[mid].held) {
1645 vg_threads[tid].m_edx = EBUSY;
1646 return;
1647 }
1648
1649 mutex->__m_count = 0; /* uninitialised */
1650 vg_mutexes[mid].in_use = False;
1651 vg_threads[tid].m_edx = 0;
1652}
1653
1654
1655/* ---------------------------------------------------------------------
1656 Handle non-trivial client requests.
1657 ------------------------------------------------------------------ */
1658
1659static
1660void do_nontrivial_clientreq ( ThreadId tid )
1661{
1662 UInt* arg = (UInt*)(vg_threads[tid].m_eax);
1663 UInt req_no = arg[0];
1664 switch (req_no) {
1665
1666 case VG_USERREQ__PTHREAD_CREATE:
1667 do_pthread_create( tid,
1668 (pthread_t*)arg[1],
1669 (pthread_attr_t*)arg[2],
1670 (void*(*)(void*))arg[3],
1671 (void*)arg[4] );
1672 break;
1673
sewardjbc5b99f2002-04-13 00:08:51 +00001674 case VG_USERREQ__PTHREAD_RETURNS:
1675 handle_pthread_return( tid, (void*)arg[1] );
sewardje663cb92002-04-12 10:26:32 +00001676 break;
1677
1678 case VG_USERREQ__PTHREAD_JOIN:
1679 do_pthread_join( tid, arg[1], (void**)(arg[2]) );
1680 break;
1681
1682 /* Sigh ... this probably will cause huge numbers of major
1683 (expensive) scheduling events, for no real reason.
1684 Perhaps should be classified as a trivial-request. */
1685 case VG_USERREQ__PTHREAD_GET_THREADID:
1686 vg_threads[tid].m_edx = tid;
1687 break;
1688
1689 case VG_USERREQ__PTHREAD_MUTEX_INIT:
1690 do_pthread_mutex_init( tid,
1691 (pthread_mutex_t *)(arg[1]),
1692 (pthread_mutexattr_t *)(arg[2]) );
1693 break;
1694
1695 case VG_USERREQ__PTHREAD_MUTEX_LOCK:
1696 do_pthread_mutex_lock( tid, (pthread_mutex_t *)(arg[1]) );
1697 break;
1698
1699 case VG_USERREQ__PTHREAD_MUTEX_UNLOCK:
1700 do_pthread_mutex_unlock( tid, (pthread_mutex_t *)(arg[1]) );
1701 break;
1702
1703 case VG_USERREQ__PTHREAD_MUTEX_DESTROY:
1704 do_pthread_mutex_destroy( tid, (pthread_mutex_t *)(arg[1]) );
1705 break;
1706
1707 case VG_USERREQ__PTHREAD_CANCEL:
1708 do_pthread_cancel( tid, (pthread_t)(arg[1]) );
1709 break;
1710
1711 case VG_USERREQ__MAKE_NOACCESS:
1712 case VG_USERREQ__MAKE_WRITABLE:
1713 case VG_USERREQ__MAKE_READABLE:
1714 case VG_USERREQ__DISCARD:
1715 case VG_USERREQ__CHECK_WRITABLE:
1716 case VG_USERREQ__CHECK_READABLE:
1717 case VG_USERREQ__MAKE_NOACCESS_STACK:
1718 case VG_USERREQ__RUNNING_ON_VALGRIND:
1719 case VG_USERREQ__DO_LEAK_CHECK:
1720 vg_threads[tid].m_edx = VG_(handle_client_request) ( arg );
1721 break;
1722
sewardj54cacf02002-04-12 23:24:59 +00001723 case VG_USERREQ__SIGNAL_RETURNS:
1724 /* vthread tid is returning from a signal handler;
1725 modify its stack/regs accordingly. */
1726 VG_(signal_returns)(tid);
1727 break;
1728
sewardje663cb92002-04-12 10:26:32 +00001729 default:
1730 VG_(printf)("panic'd on private request = 0x%x\n", arg[0] );
1731 VG_(panic)("handle_private_client_pthread_request: "
1732 "unknown request");
1733 /*NOTREACHED*/
1734 break;
1735 }
1736}
1737
1738
1739/*--------------------------------------------------------------------*/
1740/*--- end vg_scheduler.c ---*/
1741/*--------------------------------------------------------------------*/