blob: e748db1709aef5928a3e120b86dcc82afd78aab4 [file] [log] [blame]
sewardje663cb92002-04-12 10:26:32 +00001
2/*--------------------------------------------------------------------*/
3/*--- A user-space pthreads implementation. vg_scheduler.c ---*/
4/*--------------------------------------------------------------------*/
5
6/*
7 This file is part of Valgrind, an x86 protected-mode emulator
8 designed for debugging and profiling binaries on x86-Unixes.
9
10 Copyright (C) 2000-2002 Julian Seward
11 jseward@acm.org
12 Julian_Seward@muraroa.demon.co.uk
13
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
29 The GNU General Public License is contained in the file LICENSE.
30*/
31
32#include "vg_include.h"
33#include "vg_constants.h"
34
35#include "valgrind.h" /* for VG_USERREQ__MAKE_NOACCESS and
36 VG_USERREQ__DO_LEAK_CHECK */
37
38/* BORKAGE as of 11 Apr 02
39
40Note! This implementation is so poor as to not be suitable for use by
41anyone at all!
42
43- properly save scheduler private state in signal delivery frames.
44
45- fd-poll optimisation (don't select with empty sets)
46
47- signals interrupting read/write and nanosleep, and take notice
48 of SA_RESTART or not
49
50- return bogus RA: %EAX trashed, so pthread_joiner gets nonsense
51 exit codes
52
sewardj8937c812002-04-12 20:12:20 +000053- when a thread is done mark its stack as noaccess
54
55- make signal return and .fini call be detected via request mechanism
56
57 */
sewardje663cb92002-04-12 10:26:32 +000058
59
60/* ---------------------------------------------------------------------
61 Types and globals for the scheduler.
62 ------------------------------------------------------------------ */
63
64/* type ThreadId is defined in vg_include.h. */
65
66/* struct ThreadState is defined in vg_include.h. */
67
68/* Private globals. A statically allocated array of threads. */
69static ThreadState vg_threads[VG_N_THREADS];
70
71
72/* vg_oursignalhandler() might longjmp(). Here's the jmp_buf. */
73jmp_buf VG_(scheduler_jmpbuf);
74/* ... and if so, here's the signal which caused it to do so. */
75Int VG_(longjmpd_on_signal);
76
77
78/* Machinery to keep track of which threads are waiting on which
79 fds. */
80typedef
81 struct {
82 /* The thread which made the request. */
83 ThreadId tid;
84
85 /* The next two fields describe the request. */
86 /* File descriptor waited for. -1 means this slot is not in use */
87 Int fd;
88 /* The syscall number the fd is used in. */
89 Int syscall_no;
90
91 /* False => still waiting for select to tell us the fd is ready
92 to go. True => the fd is ready, but the results have not yet
93 been delivered back to the calling thread. Once the latter
94 happens, this entire record is marked as no longer in use, by
95 making the fd field be -1. */
96 Bool ready;
97 }
98 VgWaitedOnFd;
99
100static VgWaitedOnFd vg_waiting_fds[VG_N_WAITING_FDS];
101
102
103
104typedef
105 struct {
106 /* Is this slot in use, or free? */
107 Bool in_use;
108 /* If in_use, is this mutex held by some thread, or not? */
109 Bool held;
110 /* if held==True, owner indicates who by. */
111 ThreadId owner;
112 }
113 VgMutex;
114
115static VgMutex vg_mutexes[VG_N_MUTEXES];
116
117/* Forwards */
118static void do_nontrivial_clientreq ( ThreadId tid );
119
120
121/* ---------------------------------------------------------------------
122 Helper functions for the scheduler.
123 ------------------------------------------------------------------ */
124
125static
126void pp_sched_status ( void )
127{
128 Int i;
129 VG_(printf)("\nsched status:\n");
130 for (i = 0; i < VG_N_THREADS; i++) {
131 if (vg_threads[i].status == VgTs_Empty) continue;
132 VG_(printf)("tid %d: ", i);
133 switch (vg_threads[i].status) {
134 case VgTs_Runnable: VG_(printf)("Runnable\n"); break;
135 case VgTs_WaitFD: VG_(printf)("WaitFD\n"); break;
136 case VgTs_WaitJoiner: VG_(printf)("WaitJoiner(%d)\n",
137 vg_threads[i].joiner); break;
138 case VgTs_WaitJoinee: VG_(printf)("WaitJoinee\n"); break;
139 default: VG_(printf)("???"); break;
140 }
141 }
142 VG_(printf)("\n");
143}
144
145static
146void add_waiting_fd ( ThreadId tid, Int fd, Int syscall_no )
147{
148 Int i;
149
150 vg_assert(fd != -1); /* avoid total chaos */
151
152 for (i = 0; i < VG_N_WAITING_FDS; i++)
153 if (vg_waiting_fds[i].fd == -1)
154 break;
155
156 if (i == VG_N_WAITING_FDS)
157 VG_(panic)("add_waiting_fd: VG_N_WAITING_FDS is too low");
158 /*
159 VG_(printf)("add_waiting_fd: add (tid %d, fd %d) at slot %d\n",
160 tid, fd, i);
161 */
162 vg_waiting_fds[i].fd = fd;
163 vg_waiting_fds[i].tid = tid;
164 vg_waiting_fds[i].ready = False;
165 vg_waiting_fds[i].syscall_no = syscall_no;
166}
167
168
169
170static
171void print_sched_event ( ThreadId tid, Char* what )
172{
sewardj8937c812002-04-12 20:12:20 +0000173 VG_(message)(Vg_DebugMsg, "SCHED[%d]: %s", tid, what );
174}
175
176
177static
178void print_pthread_event ( ThreadId tid, Char* what )
179{
180 VG_(message)(Vg_DebugMsg, "PTHREAD[%d]: %s", tid, what );
sewardje663cb92002-04-12 10:26:32 +0000181}
182
183
184static
185Char* name_of_sched_event ( UInt event )
186{
187 switch (event) {
sewardje663cb92002-04-12 10:26:32 +0000188 case VG_TRC_EBP_JMP_SYSCALL: return "SYSCALL";
189 case VG_TRC_EBP_JMP_CLIENTREQ: return "CLIENTREQ";
190 case VG_TRC_INNER_COUNTERZERO: return "COUNTERZERO";
191 case VG_TRC_INNER_FASTMISS: return "FASTMISS";
192 case VG_TRC_UNRESUMABLE_SIGNAL: return "FATALSIGNAL";
193 default: return "??UNKNOWN??";
194 }
195}
196
197
198/* Create a translation of the client basic block beginning at
199 orig_addr, and add it to the translation cache & translation table.
200 This probably doesn't really belong here, but, hey ...
201*/
202void VG_(create_translation_for) ( Addr orig_addr )
203{
204 Addr trans_addr;
205 TTEntry tte;
206 Int orig_size, trans_size;
207 /* Ensure there is space to hold a translation. */
208 VG_(maybe_do_lru_pass)();
209 VG_(translate)( orig_addr, &orig_size, &trans_addr, &trans_size );
210 /* Copy data at trans_addr into the translation cache.
211 Returned pointer is to the code, not to the 4-byte
212 header. */
213 /* Since the .orig_size and .trans_size fields are
214 UShort, be paranoid. */
215 vg_assert(orig_size > 0 && orig_size < 65536);
216 vg_assert(trans_size > 0 && trans_size < 65536);
217 tte.orig_size = orig_size;
218 tte.orig_addr = orig_addr;
219 tte.trans_size = trans_size;
220 tte.trans_addr = VG_(copy_to_transcache)
221 ( trans_addr, trans_size );
222 tte.mru_epoch = VG_(current_epoch);
223 /* Free the intermediary -- was allocated by VG_(emit_code). */
224 VG_(jitfree)( (void*)trans_addr );
225 /* Add to trans tab and set back pointer. */
226 VG_(add_to_trans_tab) ( &tte );
227 /* Update stats. */
228 VG_(this_epoch_in_count) ++;
229 VG_(this_epoch_in_osize) += orig_size;
230 VG_(this_epoch_in_tsize) += trans_size;
231 VG_(overall_in_count) ++;
232 VG_(overall_in_osize) += orig_size;
233 VG_(overall_in_tsize) += trans_size;
234 /* Record translated area for SMC detection. */
235 VG_(smc_mark_original) ( orig_addr, orig_size );
236}
237
238
239/* Allocate a completely empty ThreadState record. */
240static
241ThreadId vg_alloc_ThreadState ( void )
242{
243 Int i;
244 for (i = 0; i < VG_N_THREADS; i++) {
245 if (vg_threads[i].status == VgTs_Empty)
246 return i;
247 }
248 VG_(printf)("vg_alloc_ThreadState: no free slots available\n");
249 VG_(printf)("Increase VG_N_THREADS, rebuild and try again.\n");
250 VG_(panic)("VG_N_THREADS is too low");
251 /*NOTREACHED*/
252}
253
254
255ThreadState* VG_(get_thread_state) ( ThreadId tid )
256{
257 vg_assert(tid >= 0 && tid < VG_N_THREADS);
258 vg_assert(vg_threads[tid].status != VgTs_Empty);
259 return & vg_threads[tid];
260}
261
262
263/* Find an unused VgMutex record. */
264static
265MutexId vg_alloc_VgMutex ( void )
266{
267 Int i;
268 for (i = 0; i < VG_N_MUTEXES; i++) {
269 if (!vg_mutexes[i].in_use)
270 return i;
271 }
272 VG_(printf)("vg_alloc_VgMutex: no free slots available\n");
273 VG_(printf)("Increase VG_N_MUTEXES, rebuild and try again.\n");
274 VG_(panic)("VG_N_MUTEXES is too low");
275 /*NOTREACHED*/
276}
277
278
279/* Copy the saved state of a thread into VG_(baseBlock), ready for it
280 to be run. */
281__inline__
282void VG_(load_thread_state) ( ThreadId tid )
283{
284 Int i;
285 VG_(baseBlock)[VGOFF_(m_eax)] = vg_threads[tid].m_eax;
286 VG_(baseBlock)[VGOFF_(m_ebx)] = vg_threads[tid].m_ebx;
287 VG_(baseBlock)[VGOFF_(m_ecx)] = vg_threads[tid].m_ecx;
288 VG_(baseBlock)[VGOFF_(m_edx)] = vg_threads[tid].m_edx;
289 VG_(baseBlock)[VGOFF_(m_esi)] = vg_threads[tid].m_esi;
290 VG_(baseBlock)[VGOFF_(m_edi)] = vg_threads[tid].m_edi;
291 VG_(baseBlock)[VGOFF_(m_ebp)] = vg_threads[tid].m_ebp;
292 VG_(baseBlock)[VGOFF_(m_esp)] = vg_threads[tid].m_esp;
293 VG_(baseBlock)[VGOFF_(m_eflags)] = vg_threads[tid].m_eflags;
294 VG_(baseBlock)[VGOFF_(m_eip)] = vg_threads[tid].m_eip;
295
296 for (i = 0; i < VG_SIZE_OF_FPUSTATE_W; i++)
297 VG_(baseBlock)[VGOFF_(m_fpustate) + i] = vg_threads[tid].m_fpu[i];
298
299 VG_(baseBlock)[VGOFF_(sh_eax)] = vg_threads[tid].sh_eax;
300 VG_(baseBlock)[VGOFF_(sh_ebx)] = vg_threads[tid].sh_ebx;
301 VG_(baseBlock)[VGOFF_(sh_ecx)] = vg_threads[tid].sh_ecx;
302 VG_(baseBlock)[VGOFF_(sh_edx)] = vg_threads[tid].sh_edx;
303 VG_(baseBlock)[VGOFF_(sh_esi)] = vg_threads[tid].sh_esi;
304 VG_(baseBlock)[VGOFF_(sh_edi)] = vg_threads[tid].sh_edi;
305 VG_(baseBlock)[VGOFF_(sh_ebp)] = vg_threads[tid].sh_ebp;
306 VG_(baseBlock)[VGOFF_(sh_esp)] = vg_threads[tid].sh_esp;
307 VG_(baseBlock)[VGOFF_(sh_eflags)] = vg_threads[tid].sh_eflags;
308}
309
310
311/* Copy the state of a thread from VG_(baseBlock), presumably after it
312 has been descheduled. For sanity-check purposes, fill the vacated
313 VG_(baseBlock) with garbage so as to make the system more likely to
314 fail quickly if we erroneously continue to poke around inside
315 VG_(baseBlock) without first doing a load_thread_state().
316*/
317__inline__
318void VG_(save_thread_state) ( ThreadId tid )
319{
320 Int i;
321 const UInt junk = 0xDEADBEEF;
322
323 vg_threads[tid].m_eax = VG_(baseBlock)[VGOFF_(m_eax)];
324 vg_threads[tid].m_ebx = VG_(baseBlock)[VGOFF_(m_ebx)];
325 vg_threads[tid].m_ecx = VG_(baseBlock)[VGOFF_(m_ecx)];
326 vg_threads[tid].m_edx = VG_(baseBlock)[VGOFF_(m_edx)];
327 vg_threads[tid].m_esi = VG_(baseBlock)[VGOFF_(m_esi)];
328 vg_threads[tid].m_edi = VG_(baseBlock)[VGOFF_(m_edi)];
329 vg_threads[tid].m_ebp = VG_(baseBlock)[VGOFF_(m_ebp)];
330 vg_threads[tid].m_esp = VG_(baseBlock)[VGOFF_(m_esp)];
331 vg_threads[tid].m_eflags = VG_(baseBlock)[VGOFF_(m_eflags)];
332 vg_threads[tid].m_eip = VG_(baseBlock)[VGOFF_(m_eip)];
333
334 for (i = 0; i < VG_SIZE_OF_FPUSTATE_W; i++)
335 vg_threads[tid].m_fpu[i] = VG_(baseBlock)[VGOFF_(m_fpustate) + i];
336
337 vg_threads[tid].sh_eax = VG_(baseBlock)[VGOFF_(sh_eax)];
338 vg_threads[tid].sh_ebx = VG_(baseBlock)[VGOFF_(sh_ebx)];
339 vg_threads[tid].sh_ecx = VG_(baseBlock)[VGOFF_(sh_ecx)];
340 vg_threads[tid].sh_edx = VG_(baseBlock)[VGOFF_(sh_edx)];
341 vg_threads[tid].sh_esi = VG_(baseBlock)[VGOFF_(sh_esi)];
342 vg_threads[tid].sh_edi = VG_(baseBlock)[VGOFF_(sh_edi)];
343 vg_threads[tid].sh_ebp = VG_(baseBlock)[VGOFF_(sh_ebp)];
344 vg_threads[tid].sh_esp = VG_(baseBlock)[VGOFF_(sh_esp)];
345 vg_threads[tid].sh_eflags = VG_(baseBlock)[VGOFF_(sh_eflags)];
346
347 /* Fill it up with junk. */
348 VG_(baseBlock)[VGOFF_(m_eax)] = junk;
349 VG_(baseBlock)[VGOFF_(m_ebx)] = junk;
350 VG_(baseBlock)[VGOFF_(m_ecx)] = junk;
351 VG_(baseBlock)[VGOFF_(m_edx)] = junk;
352 VG_(baseBlock)[VGOFF_(m_esi)] = junk;
353 VG_(baseBlock)[VGOFF_(m_edi)] = junk;
354 VG_(baseBlock)[VGOFF_(m_ebp)] = junk;
355 VG_(baseBlock)[VGOFF_(m_esp)] = junk;
356 VG_(baseBlock)[VGOFF_(m_eflags)] = junk;
357 VG_(baseBlock)[VGOFF_(m_eip)] = junk;
358
359 for (i = 0; i < VG_SIZE_OF_FPUSTATE_W; i++)
360 VG_(baseBlock)[VGOFF_(m_fpustate) + i] = junk;
361}
362
363
364/* Run the thread tid for a while, and return a VG_TRC_* value to the
365 scheduler indicating what happened. */
366static
367UInt run_thread_for_a_while ( ThreadId tid )
368{
369 UInt trc = 0;
370 vg_assert(tid >= 0 && tid < VG_N_THREADS);
371 vg_assert(vg_threads[tid].status != VgTs_Empty);
372 vg_assert(VG_(bbs_to_go) > 0);
373
374 VG_(load_thread_state) ( tid );
375 if (__builtin_setjmp(VG_(scheduler_jmpbuf)) == 0) {
376 /* try this ... */
377 trc = VG_(run_innerloop)();
378 /* We get here if the client didn't take a fault. */
379 } else {
380 /* We get here if the client took a fault, which caused our
381 signal handler to longjmp. */
382 vg_assert(trc == 0);
383 trc = VG_TRC_UNRESUMABLE_SIGNAL;
384 }
385 VG_(save_thread_state) ( tid );
386 return trc;
387}
388
389
390/* Increment the LRU epoch counter. */
391static
392void increment_epoch ( void )
393{
394 VG_(current_epoch)++;
395 if (VG_(clo_verbosity) > 2) {
396 UInt tt_used, tc_used;
397 VG_(get_tt_tc_used) ( &tt_used, &tc_used );
398 VG_(message)(Vg_UserMsg,
399 "%lu bbs, in: %d (%d -> %d), out %d (%d -> %d), TT %d, TC %d",
400 VG_(bbs_done),
401 VG_(this_epoch_in_count),
402 VG_(this_epoch_in_osize),
403 VG_(this_epoch_in_tsize),
404 VG_(this_epoch_out_count),
405 VG_(this_epoch_out_osize),
406 VG_(this_epoch_out_tsize),
407 tt_used, tc_used
408 );
409 }
410 VG_(this_epoch_in_count) = 0;
411 VG_(this_epoch_in_osize) = 0;
412 VG_(this_epoch_in_tsize) = 0;
413 VG_(this_epoch_out_count) = 0;
414 VG_(this_epoch_out_osize) = 0;
415 VG_(this_epoch_out_tsize) = 0;
416}
417
418
419/* Initialise the scheduler. Create a single "main" thread ready to
420 run, with special ThreadId of zero. This is called at startup; the
421 caller takes care to park the client's state is parked in
422 VG_(baseBlock).
423*/
424void VG_(scheduler_init) ( void )
425{
426 Int i;
427 Addr startup_esp;
428 ThreadId tid_main;
429
430 startup_esp = VG_(baseBlock)[VGOFF_(m_esp)];
431 if ((startup_esp & VG_STARTUP_STACK_MASK) != VG_STARTUP_STACK_MASK) {
432 VG_(printf)("%esp at startup = %p is not near %p; aborting\n",
433 startup_esp, VG_STARTUP_STACK_MASK);
434 VG_(panic)("unexpected %esp at startup");
435 }
436
437 for (i = 0; i < VG_N_THREADS; i++) {
438 vg_threads[i].stack_size = 0;
439 vg_threads[i].stack_base = (Addr)NULL;
440 }
441
442 for (i = 0; i < VG_N_WAITING_FDS; i++)
443 vg_waiting_fds[i].fd = -1; /* not in use */
444
445 for (i = 0; i < VG_N_MUTEXES; i++)
446 vg_mutexes[i].in_use = False;
447
448 /* Assert this is thread zero, which has certain magic
449 properties. */
450 tid_main = vg_alloc_ThreadState();
451 vg_assert(tid_main == 0);
452
453 vg_threads[tid_main].status = VgTs_Runnable;
454 vg_threads[tid_main].joiner = VG_INVALID_THREADID;
455 vg_threads[tid_main].retval = NULL; /* not important */
456
457 /* Copy VG_(baseBlock) state to tid_main's slot. */
458 VG_(save_thread_state) ( tid_main );
459}
460
461
462/* What if fd isn't a valid fd? */
463static
464void set_fd_nonblocking ( Int fd )
465{
466 Int res = VG_(fcntl)( fd, VKI_F_GETFL, 0 );
467 vg_assert(!VG_(is_kerror)(res));
468 res |= VKI_O_NONBLOCK;
469 res = VG_(fcntl)( fd, VKI_F_SETFL, res );
470 vg_assert(!VG_(is_kerror)(res));
471}
472
473static
474void set_fd_blocking ( Int fd )
475{
476 Int res = VG_(fcntl)( fd, VKI_F_GETFL, 0 );
477 vg_assert(!VG_(is_kerror)(res));
478 res &= ~VKI_O_NONBLOCK;
479 res = VG_(fcntl)( fd, VKI_F_SETFL, res );
480 vg_assert(!VG_(is_kerror)(res));
481}
482
483static
484Bool fd_is_blockful ( Int fd )
485{
486 Int res = VG_(fcntl)( fd, VKI_F_GETFL, 0 );
487 vg_assert(!VG_(is_kerror)(res));
488 return (res & VKI_O_NONBLOCK) ? False : True;
489}
490
491
492
493/* Do a purely thread-local request for tid, and put the result in its
494 %EDX, without changing its scheduling state in any way, nor that of
495 any other threads. Return True if so.
496
497 If the request is non-trivial, return False; a more capable but
498 slower mechanism will deal with it.
499*/
500static
501Bool maybe_do_trivial_clientreq ( ThreadId tid )
502{
503# define SIMPLE_RETURN(vvv) \
504 { vg_threads[tid].m_edx = (vvv); \
505 return True; \
506 }
507
508 UInt* arg = (UInt*)(vg_threads[tid].m_eax);
509 UInt req_no = arg[0];
510 switch (req_no) {
511 case VG_USERREQ__MALLOC:
512 SIMPLE_RETURN(
513 (UInt)VG_(client_malloc) ( arg[1], Vg_AllocMalloc )
514 );
515 case VG_USERREQ__BUILTIN_NEW:
516 SIMPLE_RETURN(
517 (UInt)VG_(client_malloc) ( arg[1], Vg_AllocNew )
518 );
519 case VG_USERREQ__BUILTIN_VEC_NEW:
520 SIMPLE_RETURN(
521 (UInt)VG_(client_malloc) ( arg[1], Vg_AllocNewVec )
522 );
523 case VG_USERREQ__FREE:
524 VG_(client_free) ( (void*)arg[1], Vg_AllocMalloc );
525 SIMPLE_RETURN(0); /* irrelevant */
526 case VG_USERREQ__BUILTIN_DELETE:
527 VG_(client_free) ( (void*)arg[1], Vg_AllocNew );
528 SIMPLE_RETURN(0); /* irrelevant */
529 case VG_USERREQ__BUILTIN_VEC_DELETE:
530 VG_(client_free) ( (void*)arg[1], Vg_AllocNewVec );
531 SIMPLE_RETURN(0); /* irrelevant */
532 case VG_USERREQ__CALLOC:
533 SIMPLE_RETURN(
534 (UInt)VG_(client_calloc) ( arg[1], arg[2] )
535 );
536 case VG_USERREQ__REALLOC:
537 SIMPLE_RETURN(
538 (UInt)VG_(client_realloc) ( (void*)arg[1], arg[2] )
539 );
540 case VG_USERREQ__MEMALIGN:
541 SIMPLE_RETURN(
542 (UInt)VG_(client_memalign) ( arg[1], arg[2] )
543 );
544 default:
545 /* Too hard; wimp out. */
546 return False;
547 }
548# undef SIMPLE_RETURN
549}
550
551
552static
553void sched_do_syscall ( ThreadId tid )
554{
555 UInt saved_eax;
556 UInt res, syscall_no;
557 UInt fd;
558 Bool might_block, assumed_nonblocking;
559 Bool orig_fd_blockness;
560 Char msg_buf[100];
561
562 vg_assert(tid >= 0 && tid < VG_N_THREADS);
563 vg_assert(vg_threads[tid].status == VgTs_Runnable);
564
565 syscall_no = vg_threads[tid].m_eax; /* syscall number */
566
567 if (syscall_no == __NR_nanosleep) {
568 ULong t_now, t_awaken;
569 struct vki_timespec* req;
570 req = (struct vki_timespec*)vg_threads[tid].m_ebx; /* arg1 */
571 t_now = VG_(read_microsecond_timer)();
572 t_awaken
573 = t_now
574 + (ULong)1000000ULL * (ULong)(req->tv_sec)
575 + (ULong)( (UInt)(req->tv_nsec) / 1000 );
576 vg_threads[tid].status = VgTs_Sleeping;
577 vg_threads[tid].awaken_at = t_awaken;
sewardj8937c812002-04-12 20:12:20 +0000578 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +0000579 VG_(sprintf)(msg_buf, "at %lu: nanosleep for %lu",
580 t_now, t_awaken-t_now);
581 print_sched_event(tid, msg_buf);
582 }
583 /* Force the scheduler to run something else for a while. */
584 return;
585 }
586
587 switch (syscall_no) {
588 case __NR_read:
589 case __NR_write:
590 assumed_nonblocking
591 = False;
592 might_block
593 = fd_is_blockful(vg_threads[tid].m_ebx /* arg1 */);
594 break;
595 default:
596 might_block = False;
597 assumed_nonblocking = True;
598 }
599
600 if (assumed_nonblocking) {
601 /* We think it's non-blocking. Just do it in the normal way. */
602 VG_(perform_assumed_nonblocking_syscall)(tid);
603 /* The thread is still runnable. */
604 return;
605 }
606
607 /* It might block. Take evasive action. */
608 switch (syscall_no) {
609 case __NR_read:
610 case __NR_write:
611 fd = vg_threads[tid].m_ebx; break;
612 default:
613 vg_assert(3+3 == 7);
614 }
615
616 /* Set the fd to nonblocking, and do the syscall, which will return
617 immediately, in order to lodge a request with the Linux kernel.
618 We later poll for I/O completion using select(). */
619
620 orig_fd_blockness = fd_is_blockful(fd);
621 set_fd_nonblocking(fd);
622 vg_assert(!fd_is_blockful(fd));
623 VG_(check_known_blocking_syscall)(tid, syscall_no, NULL /* PRE */);
624
625 /* This trashes the thread's %eax; we have to preserve it. */
626 saved_eax = vg_threads[tid].m_eax;
627 KERNEL_DO_SYSCALL(tid,res);
628
629 /* Restore original blockfulness of the fd. */
630 if (orig_fd_blockness)
631 set_fd_blocking(fd);
632 else
633 set_fd_nonblocking(fd);
634
635 if (res != -VKI_EWOULDBLOCK) {
636 /* It didn't block; it went through immediately. So finish off
637 in the normal way. Don't restore %EAX, since that now
638 (correctly) holds the result of the call. */
639 VG_(check_known_blocking_syscall)(tid, syscall_no, &res /* POST */);
640 /* We're still runnable. */
641 vg_assert(vg_threads[tid].status == VgTs_Runnable);
642
643 } else {
644
645 /* It would have blocked. First, restore %EAX to what it was
646 before our speculative call. */
647 vg_threads[tid].m_eax = saved_eax;
648 /* Put this fd in a table of fds on which we are waiting for
649 completion. The arguments for select() later are constructed
650 from this table. */
651 add_waiting_fd(tid, fd, saved_eax /* which holds the syscall # */);
652 /* Deschedule thread until an I/O completion happens. */
653 vg_threads[tid].status = VgTs_WaitFD;
sewardj8937c812002-04-12 20:12:20 +0000654 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +0000655 VG_(sprintf)(msg_buf,"block until I/O ready on fd %d", fd);
656 print_sched_event(tid, msg_buf);
657 }
658
659 }
660}
661
662
663/* Find out which of the fds in vg_waiting_fds are now ready to go, by
664 making enquiries with select(), and mark them as ready. We have to
665 wait for the requesting threads to fall into the the WaitFD state
666 before we can actually finally deliver the results, so this
667 procedure doesn't do that; complete_blocked_syscalls() does it.
668
669 It might seem odd that a thread which has done a blocking syscall
670 is not in WaitFD state; the way this can happen is if it initially
671 becomes WaitFD, but then a signal is delivered to it, so it becomes
672 Runnable for a while. In this case we have to wait for the
673 sighandler to return, whereupon the WaitFD state is resumed, and
674 only at that point can the I/O result be delivered to it. However,
675 this point may be long after the fd is actually ready.
676
677 So, poll_for_ready_fds() merely detects fds which are ready.
678 complete_blocked_syscalls() does the second half of the trick,
679 possibly much later: it delivers the results from ready fds to
680 threads in WaitFD state.
681*/
682void poll_for_ready_fds ( void )
683{
684 vki_ksigset_t saved_procmask;
685 vki_fd_set readfds;
686 vki_fd_set writefds;
687 vki_fd_set exceptfds;
688 struct vki_timeval timeout;
689 Int fd, fd_max, i, n_ready, syscall_no, n_ok;
690 ThreadId tid;
691 Bool rd_ok, wr_ok, ex_ok;
692 Char msg_buf[100];
693
694 /* Awaken any sleeping threads whose sleep has expired. */
695 {
696 struct vki_timespec * rem;
697 ULong t_now = VG_(read_microsecond_timer)();
698 for (tid = 0; tid < VG_N_THREADS; tid++) {
699 if (vg_threads[tid].status != VgTs_Sleeping)
700 continue;
701 if (t_now >= vg_threads[tid].awaken_at) {
702 /* Resume this thread. Set to zero the remaining-time (second)
703 arg of nanosleep, since it's used up all its time. */
704 vg_assert(vg_threads[tid].m_eax == __NR_nanosleep);
705 rem = (struct vki_timespec *)vg_threads[tid].m_ecx; /* arg2 */
706 if (rem != NULL) {
707 rem->tv_sec = 0;
708 rem->tv_nsec = 0;
709 }
710 /* Make the syscall return 0 (success). */
711 vg_threads[tid].m_eax = 0;
712 /* Reschedule this thread. */
713 vg_threads[tid].status = VgTs_Runnable;
sewardj8937c812002-04-12 20:12:20 +0000714 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +0000715 VG_(sprintf)(msg_buf, "at %lu: nanosleep done",
716 t_now);
717 print_sched_event(tid, msg_buf);
718 }
719 }
720 }
721 }
722
723 timeout.tv_sec = 0;
724 timeout.tv_usec = 0;
725
726 VKI_FD_ZERO(&readfds);
727 VKI_FD_ZERO(&writefds);
728 VKI_FD_ZERO(&exceptfds);
729 fd_max = -1;
730 for (i = 0; i < VG_N_WAITING_FDS; i++) {
731 if (vg_waiting_fds[i].fd == -1 /* not in use */)
732 continue;
733 if (vg_waiting_fds[i].ready /* already ready? */)
734 continue;
735 fd = vg_waiting_fds[i].fd;
736 /* VG_(printf)("adding QUERY for fd %d\n", fd); */
737 if (fd > fd_max)
738 fd_max = fd;
739 tid = vg_waiting_fds[i].tid;
740 vg_assert(tid >= 0 && tid < VG_N_THREADS);
741 syscall_no = vg_waiting_fds[i].syscall_no;
742 switch (syscall_no) {
743 case __NR_read:
744 VKI_FD_SET(fd, &readfds); break;
745 case __NR_write:
746 VKI_FD_SET(fd, &writefds); break;
747 default:
748 VG_(panic)("poll_for_ready_fds: unexpected syscall");
749 /*NOTREACHED*/
750 break;
751 }
752 }
753
754 /* BLOCK ALL SIGNALS. We don't want the complication of select()
755 getting interrupted. */
756 VG_(block_all_host_signals)( &saved_procmask );
757
758 n_ready = VG_(select)
759 ( fd_max+1, &readfds, &writefds, &exceptfds, &timeout);
760 if (VG_(is_kerror)(n_ready)) {
761 VG_(printf)("poll_for_ready_fds: select returned %d\n", n_ready);
762 VG_(panic)("poll_for_ready_fds: select failed?!");
763 /*NOTREACHED*/
764 }
765
766 /* UNBLOCK ALL SIGNALS */
767 VG_(restore_host_signals)( &saved_procmask );
768
769 /* VG_(printf)("poll_for_io_completions: %d fs ready\n", n_ready); */
770
771 if (n_ready == 0)
772 return;
773
774 /* Inspect all the fds we know about, and handle any completions that
775 have happened. */
776 /*
777 VG_(printf)("\n\n");
778 for (fd = 0; fd < 100; fd++)
779 if (VKI_FD_ISSET(fd, &writefds) || VKI_FD_ISSET(fd, &readfds)) {
780 VG_(printf)("X"); } else { VG_(printf)("."); };
781 VG_(printf)("\n\nfd_max = %d\n", fd_max);
782 */
783
784 for (fd = 0; fd <= fd_max; fd++) {
785 rd_ok = VKI_FD_ISSET(fd, &readfds);
786 wr_ok = VKI_FD_ISSET(fd, &writefds);
787 ex_ok = VKI_FD_ISSET(fd, &exceptfds);
788
789 n_ok = (rd_ok ? 1 : 0) + (wr_ok ? 1 : 0) + (ex_ok ? 1 : 0);
790 if (n_ok == 0)
791 continue;
792 if (n_ok > 1) {
793 VG_(printf)("offending fd = %d\n", fd);
794 VG_(panic)("poll_for_ready_fds: multiple events on fd");
795 }
796
797 /* An I/O event completed for fd. Find the thread which
798 requested this. */
799 for (i = 0; i < VG_N_WAITING_FDS; i++) {
800 if (vg_waiting_fds[i].fd == -1 /* not in use */)
801 continue;
802 if (vg_waiting_fds[i].fd == fd)
803 break;
804 }
805
806 /* And a bit more paranoia ... */
807 vg_assert(i >= 0 && i < VG_N_WAITING_FDS);
808
809 /* Mark the fd as ready. */
810 vg_assert(! vg_waiting_fds[i].ready);
811 vg_waiting_fds[i].ready = True;
812 }
813}
814
815
816/* See comment attached to poll_for_ready_fds() for explaination. */
817void complete_blocked_syscalls ( void )
818{
819 Int fd, i, res, syscall_no;
820 ThreadId tid;
821 Char msg_buf[100];
822
823 /* Inspect all the outstanding fds we know about. */
824
825 for (i = 0; i < VG_N_WAITING_FDS; i++) {
826 if (vg_waiting_fds[i].fd == -1 /* not in use */)
827 continue;
828 if (! vg_waiting_fds[i].ready)
829 continue;
830
831 fd = vg_waiting_fds[i].fd;
832 tid = vg_waiting_fds[i].tid;
833 vg_assert(tid >= 0 && tid < VG_N_THREADS);
834
835 /* The thread actually has to be waiting for the I/O event it
836 requested before we can deliver the result! */
837 if (vg_threads[tid].status != VgTs_WaitFD)
838 continue;
839
840 /* Ok, actually do it! We can safely use %EAX as the syscall
841 number, because the speculative call made by
842 sched_do_syscall() doesn't change %EAX in the case where the
843 call would have blocked. */
844
845 syscall_no = vg_waiting_fds[i].syscall_no;
846 vg_assert(syscall_no == vg_threads[tid].m_eax);
847 KERNEL_DO_SYSCALL(tid,res);
848 VG_(check_known_blocking_syscall)(tid, syscall_no, &res /* POST */);
849
850 /* Reschedule. */
851 vg_threads[tid].status = VgTs_Runnable;
852 /* Mark slot as no longer in use. */
853 vg_waiting_fds[i].fd = -1;
854 /* pp_sched_status(); */
sewardj8937c812002-04-12 20:12:20 +0000855 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +0000856 VG_(sprintf)(msg_buf,"resume due to I/O completion on fd %d", fd);
857 print_sched_event(tid, msg_buf);
858 }
859 }
860}
861
862
863static
864void nanosleep_for_a_while ( void )
865{
866 Int res;
867 struct vki_timespec req;
868 struct vki_timespec rem;
869 req.tv_sec = 0;
870 req.tv_nsec = 20 * 1000 * 1000;
871 res = VG_(nanosleep)( &req, &rem );
872 /* VG_(printf)("after ns, unused = %d\n", rem.tv_nsec ); */
873 vg_assert(res == 0);
874}
875
876
877/* ---------------------------------------------------------------------
878 The scheduler proper.
879 ------------------------------------------------------------------ */
880
881/* Run user-space threads until either
882 * Deadlock occurs
883 * One thread asks to shutdown Valgrind
884 * The specified number of basic blocks has gone by.
885*/
886VgSchedReturnCode VG_(scheduler) ( void )
887{
888 ThreadId tid, tid_next;
889 UInt trc;
890 UInt dispatch_ctr_SAVED;
sewardj54cacf02002-04-12 23:24:59 +0000891 Int request_code, done_this_time, n_in_fdwait_or_sleep;
sewardje663cb92002-04-12 10:26:32 +0000892 Char msg_buf[100];
893 Addr trans_addr;
894
895 /* For the LRU structures, records when the epoch began. */
896 ULong lru_epoch_started_at = 0;
897
898 /* Start with the root thread. tid in general indicates the
899 currently runnable/just-finished-running thread. */
900 tid = 0;
901
902 /* This is the top level scheduler loop. It falls into three
903 phases. */
904 while (True) {
905
906 /* ======================= Phase 1 of 3 =======================
907 Handle I/O completions and signals. This may change the
908 status of various threads. Then select a new thread to run,
909 or declare deadlock, or sleep if there are no runnable
910 threads but some are blocked on I/O. */
911
912 /* Age the LRU structures if an epoch has been completed. */
913 if (VG_(bbs_done) - lru_epoch_started_at >= VG_BBS_PER_EPOCH) {
914 lru_epoch_started_at = VG_(bbs_done);
915 increment_epoch();
916 }
917
918 /* Was a debug-stop requested? */
919 if (VG_(bbs_to_go) == 0)
920 goto debug_stop;
921
922 /* Do the following loop until a runnable thread is found, or
923 deadlock is detected. */
924 while (True) {
925
926 /* For stats purposes only. */
927 VG_(num_scheduling_events_MAJOR) ++;
928
929 /* See if any I/O operations which we were waiting for have
930 completed, and, if so, make runnable the relevant waiting
931 threads. */
932 poll_for_ready_fds();
933 complete_blocked_syscalls();
934
935 /* See if there are any signals which need to be delivered. If
936 so, choose thread(s) to deliver them to, and build signal
937 delivery frames on those thread(s) stacks. */
938 VG_(deliver_signals)( 0 /*HACK*/ );
939 VG_(do_sanity_checks)(0 /*HACK*/, False);
940
941 /* Try and find a thread (tid) to run. */
942 tid_next = tid;
sewardj54cacf02002-04-12 23:24:59 +0000943 n_in_fdwait_or_sleep = 0;
sewardje663cb92002-04-12 10:26:32 +0000944 while (True) {
945 tid_next++;
946 if (tid_next >= VG_N_THREADS) tid_next = 0;
sewardj54cacf02002-04-12 23:24:59 +0000947 if (vg_threads[tid_next].status == VgTs_WaitFD
948 || vg_threads[tid_next].status == VgTs_Sleeping)
949 n_in_fdwait_or_sleep ++;
sewardje663cb92002-04-12 10:26:32 +0000950 if (vg_threads[tid_next].status == VgTs_Runnable)
951 break; /* We can run this one. */
952 if (tid_next == tid)
953 break; /* been all the way round */
954 }
955 tid = tid_next;
956
957 if (vg_threads[tid].status == VgTs_Runnable) {
958 /* Found a suitable candidate. Fall out of this loop, so
959 we can advance to stage 2 of the scheduler: actually
960 running the thread. */
961 break;
962 }
963
964 /* We didn't find a runnable thread. Now what? */
sewardj54cacf02002-04-12 23:24:59 +0000965 if (n_in_fdwait_or_sleep == 0) {
966 /* No runnable threads and no prospect of any appearing
967 even if we wait for an arbitrary length of time. In
968 short, we have a deadlock. */
sewardje663cb92002-04-12 10:26:32 +0000969 pp_sched_status();
970 return VgSrc_Deadlock;
971 }
972
973 /* At least one thread is in a fd-wait state. Delay for a
974 while, and go round again, in the hope that eventually a
975 thread becomes runnable. */
976 nanosleep_for_a_while();
977 // pp_sched_status();
978 // VG_(printf)(".\n");
979 }
980
981
982 /* ======================= Phase 2 of 3 =======================
983 Wahey! We've finally decided that thread tid is runnable, so
984 we now do that. Run it for as much of a quanta as possible.
985 Trivial requests are handled and the thread continues. The
986 aim is not to do too many of Phase 1 since it is expensive. */
987
988 if (0)
989 VG_(printf)("SCHED: tid %d, used %d\n", tid, VG_N_THREADS);
990
991 /* Figure out how many bbs to ask vg_run_innerloop to do. Note
992 that it decrements the counter before testing it for zero, so
993 that if VG_(dispatch_ctr) is set to N you get at most N-1
994 iterations. Also this means that VG_(dispatch_ctr) must
995 exceed zero before entering the innerloop. Also also, the
996 decrement is done before the bb is actually run, so you
997 always get at least one decrement even if nothing happens.
998 */
999 if (VG_(bbs_to_go) >= VG_SCHEDULING_QUANTUM)
1000 VG_(dispatch_ctr) = VG_SCHEDULING_QUANTUM + 1;
1001 else
1002 VG_(dispatch_ctr) = (UInt)VG_(bbs_to_go) + 1;
1003
1004 /* ... and remember what we asked for. */
1005 dispatch_ctr_SAVED = VG_(dispatch_ctr);
1006
1007 /* Actually run thread tid. */
1008 while (True) {
1009
1010 /* For stats purposes only. */
1011 VG_(num_scheduling_events_MINOR) ++;
1012
1013 if (0)
1014 VG_(message)(Vg_DebugMsg, "thread %d: running for %d bbs",
1015 tid, VG_(dispatch_ctr) - 1 );
1016
1017 trc = run_thread_for_a_while ( tid );
1018
1019 /* Deal quickly with trivial scheduling events, and resume the
1020 thread. */
1021
1022 if (trc == VG_TRC_INNER_FASTMISS) {
1023 vg_assert(VG_(dispatch_ctr) > 0);
1024
1025 /* Trivial event. Miss in the fast-cache. Do a full
1026 lookup for it. */
1027 trans_addr
1028 = VG_(search_transtab) ( vg_threads[tid].m_eip );
1029 if (trans_addr == (Addr)0) {
1030 /* Not found; we need to request a translation. */
1031 VG_(create_translation_for)( vg_threads[tid].m_eip );
1032 trans_addr = VG_(search_transtab) ( vg_threads[tid].m_eip );
1033 if (trans_addr == (Addr)0)
1034 VG_(panic)("VG_TRC_INNER_FASTMISS: missing tt_fast entry");
1035 }
1036 continue; /* with this thread */
1037 }
1038
1039 if (trc == VG_TRC_EBP_JMP_CLIENTREQ) {
1040 Bool is_triv = maybe_do_trivial_clientreq(tid);
1041 if (is_triv) {
1042 /* NOTE: a trivial request is something like a call to
1043 malloc() or free(). It DOES NOT change the
1044 Runnability of this thread nor the status of any
1045 other thread; it is purely thread-local. */
1046 continue; /* with this thread */
1047 }
1048 }
1049
1050 /* It's a non-trivial event. Give up running this thread and
1051 handle things the expensive way. */
1052 break;
1053 }
1054
1055 /* ======================= Phase 3 of 3 =======================
1056 Handle non-trivial thread requests, mostly pthread stuff. */
1057
1058 /* Ok, we've fallen out of the dispatcher for a
1059 non-completely-trivial reason. First, update basic-block
1060 counters. */
1061
1062 done_this_time = (Int)dispatch_ctr_SAVED - (Int)VG_(dispatch_ctr) - 1;
1063 vg_assert(done_this_time >= 0);
1064 VG_(bbs_to_go) -= (ULong)done_this_time;
1065 VG_(bbs_done) += (ULong)done_this_time;
1066
1067 if (0 && trc != VG_TRC_INNER_FASTMISS)
1068 VG_(message)(Vg_DebugMsg, "thread %d: completed %d bbs, trc %d",
1069 tid, done_this_time, (Int)trc );
1070
1071 if (0 && trc != VG_TRC_INNER_FASTMISS)
1072 VG_(message)(Vg_DebugMsg, "thread %d: %ld bbs, event %s",
1073 tid, VG_(bbs_done),
1074 name_of_sched_event(trc) );
1075
1076 /* Examine the thread's return code to figure out why it
1077 stopped, and handle requests. */
1078
1079 switch (trc) {
1080
1081 case VG_TRC_INNER_FASTMISS:
1082 VG_(panic)("VG_(scheduler): VG_TRC_INNER_FASTMISS");
1083 /*NOTREACHED*/
1084 break;
1085
1086 case VG_TRC_INNER_COUNTERZERO:
1087 /* Timeslice is out. Let a new thread be scheduled,
1088 simply by doing nothing, causing us to arrive back at
1089 Phase 1. */
1090 if (VG_(bbs_to_go) == 0) {
1091 goto debug_stop;
1092 }
1093 vg_assert(VG_(dispatch_ctr) == 0);
1094 break;
1095
1096 case VG_TRC_UNRESUMABLE_SIGNAL:
1097 /* It got a SIGSEGV/SIGBUS, which we need to deliver right
1098 away. Again, do nothing, so we wind up back at Phase
1099 1, whereupon the signal will be "delivered". */
1100 break;
1101
sewardje663cb92002-04-12 10:26:32 +00001102 case VG_TRC_EBP_JMP_SYSCALL:
1103 /* Do a syscall for the vthread tid. This could cause it
1104 to become non-runnable. */
1105 sched_do_syscall(tid);
1106 break;
1107
1108 case VG_TRC_EBP_JMP_CLIENTREQ:
1109 /* Do a client request for the vthread tid. Note that
1110 some requests will have been handled by
1111 maybe_do_trivial_clientreq(), so we don't expect to see
1112 those here.
1113 */
sewardj54cacf02002-04-12 23:24:59 +00001114 /* The thread's %EAX points at an arg block, the first
1115 word of which is the request code. */
1116 request_code = ((UInt*)(vg_threads[tid].m_eax))[0];
sewardje663cb92002-04-12 10:26:32 +00001117 if (0) {
sewardj54cacf02002-04-12 23:24:59 +00001118 VG_(sprintf)(msg_buf, "request 0x%x", request_code );
sewardje663cb92002-04-12 10:26:32 +00001119 print_sched_event(tid, msg_buf);
1120 }
1121 /* Do a non-trivial client request for thread tid. tid's
1122 %EAX points to a short vector of argument words, the
1123 first of which is the request code. The result of the
1124 request is put in tid's %EDX. Alternatively, perhaps
1125 the request causes tid to become non-runnable and/or
1126 other blocked threads become runnable. In general we
1127 can and often do mess with the state of arbitrary
1128 threads at this point. */
sewardj54cacf02002-04-12 23:24:59 +00001129 if (request_code == VG_USERREQ__SHUTDOWN_VALGRIND) {
1130 return VgSrc_Shutdown;
1131 } else {
1132 do_nontrivial_clientreq(tid);
1133 }
sewardje663cb92002-04-12 10:26:32 +00001134 break;
1135
1136 default:
1137 VG_(printf)("\ntrc = %d\n", trc);
1138 VG_(panic)("VG_(scheduler), phase 3: "
1139 "unexpected thread return code");
1140 /* NOTREACHED */
1141 break;
1142
1143 } /* switch (trc) */
1144
1145 /* That completes Phase 3 of 3. Return now to the top of the
1146 main scheduler loop, to Phase 1 of 3. */
1147
1148 } /* top-level scheduler loop */
1149
1150
1151 /* NOTREACHED */
1152 VG_(panic)("scheduler: post-main-loop ?!");
1153 /* NOTREACHED */
1154
1155 debug_stop:
1156 /* If we exited because of a debug stop, print the translation
1157 of the last block executed -- by translating it again, and
1158 throwing away the result. */
1159 VG_(printf)(
1160 "======vvvvvvvv====== LAST TRANSLATION ======vvvvvvvv======\n");
1161 VG_(translate)( vg_threads[tid].m_eip, NULL, NULL, NULL );
1162 VG_(printf)("\n");
1163 VG_(printf)(
1164 "======^^^^^^^^====== LAST TRANSLATION ======^^^^^^^^======\n");
1165
1166 return VgSrc_BbsDone;
1167}
1168
1169
1170/* ---------------------------------------------------------------------
1171 The pthread implementation.
1172 ------------------------------------------------------------------ */
1173
1174#include <pthread.h>
1175#include <errno.h>
1176
1177#if !defined(PTHREAD_STACK_MIN)
1178# define PTHREAD_STACK_MIN (16384 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB)
1179#endif
1180
1181/* /usr/include/bits/pthreadtypes.h:
1182 typedef unsigned long int pthread_t;
1183*/
1184
1185/* RUNS ON SIMD CPU!
1186 This is the return address that pthread_create uses.
1187*/
1188static
1189void do_pthread_create_bogusRA ( void )
1190{
1191 /* Tell the scheduler that this thread has returned. */
1192 Int res;
1193 VALGRIND_MAGIC_SEQUENCE(res, 0 /* default */,
1194 VG_USERREQ__PTHREAD_CREATE_BOGUSRA,
1195 0, 0, 0, 0);
1196 VG_(panic)("do_pthread_create_bogusRA: shouldn't be still alive!");
1197}
1198
1199
1200static
1201void do_pthread_cancel ( ThreadId tid_canceller,
1202 pthread_t tid_cancellee )
1203{
1204 Char msg_buf[100];
1205 /* We want make is appear that this thread has returned to
1206 do_pthread_create_bogusRA with PTHREAD_CANCELED as the
1207 return value. So: simple: put PTHREAD_CANCELED into %EAX
1208 and &do_pthread_create_bogusRA into %EIP and keep going! */
sewardj8937c812002-04-12 20:12:20 +00001209 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +00001210 VG_(sprintf)(msg_buf, "cancelled by %d", tid_canceller);
1211 print_sched_event(tid_cancellee, msg_buf);
1212 }
1213 vg_threads[tid_cancellee].m_eax = (UInt)PTHREAD_CANCELED;
1214 vg_threads[tid_cancellee].m_eip = (UInt)&do_pthread_create_bogusRA;
1215 vg_threads[tid_cancellee].status = VgTs_Runnable;
1216}
1217
1218
1219
1220/* Thread tid is exiting, by returning from the function it was
1221 created with. The main complication here is to resume any thread
1222 waiting to join with this one. */
1223static
1224void do_pthread_create_exit_by_returning ( ThreadId tid )
1225{
1226 ThreadId jnr; /* joiner, the thread calling pthread_join. */
1227 UInt* jnr_args;
1228 void** jnr_thread_return;
1229 Char msg_buf[100];
1230
1231 /* Mark it as not in use. Leave the stack in place so the next
1232 user of this slot doesn't reallocate it. */
1233 vg_assert(tid >= 0 && tid < VG_N_THREADS);
1234 vg_assert(vg_threads[tid].status != VgTs_Empty);
1235
1236 vg_threads[tid].retval = (void*)vg_threads[tid].m_eax;
1237
1238 if (vg_threads[tid].joiner == VG_INVALID_THREADID) {
1239 /* No one has yet done a join on me */
1240 vg_threads[tid].status = VgTs_WaitJoiner;
sewardj8937c812002-04-12 20:12:20 +00001241 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +00001242 VG_(sprintf)(msg_buf,
1243 "root fn returns, waiting for a call pthread_join(%d)",
1244 tid);
1245 print_sched_event(tid, msg_buf);
1246 }
1247 } else {
1248 /* Some is waiting; make their join call return with success,
1249 putting my exit code in the place specified by the caller's
1250 thread_return param. This is all very horrible, since we
1251 need to consult the joiner's arg block -- pointed to by its
1252 %EAX -- in order to extract the 2nd param of its pthread_join
1253 call. TODO: free properly the slot (also below).
1254 */
1255 jnr = vg_threads[tid].joiner;
1256 vg_assert(jnr >= 0 && jnr < VG_N_THREADS);
1257 vg_assert(vg_threads[jnr].status == VgTs_WaitJoinee);
1258 jnr_args = (UInt*)vg_threads[jnr].m_eax;
1259 jnr_thread_return = (void**)(jnr_args[2]);
1260 if (jnr_thread_return != NULL)
1261 *jnr_thread_return = vg_threads[tid].retval;
1262 vg_threads[jnr].m_edx = 0; /* success */
1263 vg_threads[jnr].status = VgTs_Runnable;
1264 vg_threads[tid].status = VgTs_Empty; /* bye! */
sewardj8937c812002-04-12 20:12:20 +00001265 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +00001266 VG_(sprintf)(msg_buf,
1267 "root fn returns, to find a waiting pthread_join(%d)", tid);
1268 print_sched_event(tid, msg_buf);
1269 VG_(sprintf)(msg_buf,
1270 "my pthread_join(%d) returned; resuming", tid);
1271 print_sched_event(jnr, msg_buf);
1272 }
1273 }
1274
1275 /* Return value is irrelevant; this thread will not get
1276 rescheduled. */
1277}
1278
1279
1280static
1281void do_pthread_join ( ThreadId tid, ThreadId jee, void** thread_return )
1282{
1283 Char msg_buf[100];
1284
1285 /* jee, the joinee, is the thread specified as an arg in thread
1286 tid's call to pthread_join. So tid is the join-er. */
1287 vg_assert(tid >= 0 && tid < VG_N_THREADS);
1288 vg_assert(vg_threads[tid].status == VgTs_Runnable);
1289
1290 if (jee == tid) {
1291 vg_threads[tid].m_edx = EDEADLK; /* libc constant, not a kernel one */
1292 vg_threads[tid].status = VgTs_Runnable;
1293 return;
1294 }
1295
1296 if (jee < 0
1297 || jee >= VG_N_THREADS
1298 || vg_threads[jee].status == VgTs_Empty) {
1299 /* Invalid thread to join to. */
1300 vg_threads[tid].m_edx = EINVAL;
1301 vg_threads[tid].status = VgTs_Runnable;
1302 return;
1303 }
1304
1305 if (vg_threads[jee].joiner != VG_INVALID_THREADID) {
1306 /* Someone already did join on this thread */
1307 vg_threads[tid].m_edx = EINVAL;
1308 vg_threads[tid].status = VgTs_Runnable;
1309 return;
1310 }
1311
1312 /* if (vg_threads[jee].detached) ... */
1313
1314 /* Perhaps the joinee has already finished? If so return
1315 immediately with its return code, and free up the slot. TODO:
1316 free it properly (also above). */
1317 if (vg_threads[jee].status == VgTs_WaitJoiner) {
1318 vg_assert(vg_threads[jee].joiner == VG_INVALID_THREADID);
1319 vg_threads[tid].m_edx = 0; /* success */
1320 if (thread_return != NULL)
1321 *thread_return = vg_threads[jee].retval;
1322 vg_threads[tid].status = VgTs_Runnable;
1323 vg_threads[jee].status = VgTs_Empty; /* bye! */
sewardj8937c812002-04-12 20:12:20 +00001324 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +00001325 VG_(sprintf)(msg_buf,
1326 "someone called pthread_join() on me; bye!");
1327 print_sched_event(jee, msg_buf);
1328 VG_(sprintf)(msg_buf,
1329 "my pthread_join(%d) returned immediately",
1330 jee );
1331 print_sched_event(tid, msg_buf);
1332 }
1333 return;
1334 }
1335
1336 /* Ok, so we'll have to wait on jee. */
1337 vg_threads[jee].joiner = tid;
1338 vg_threads[tid].status = VgTs_WaitJoinee;
sewardj8937c812002-04-12 20:12:20 +00001339 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +00001340 VG_(sprintf)(msg_buf,
1341 "blocking on call of pthread_join(%d)", jee );
1342 print_sched_event(tid, msg_buf);
1343 }
1344 /* So tid's join call does not return just now. */
1345}
1346
1347
1348static
1349void do_pthread_create ( ThreadId parent_tid,
1350 pthread_t* thread,
1351 pthread_attr_t* attr,
1352 void* (*start_routine)(void *),
1353 void* arg )
1354{
1355 Addr new_stack;
1356 UInt new_stk_szb;
1357 ThreadId tid;
1358 Char msg_buf[100];
1359
1360 /* Paranoia ... */
1361 vg_assert(sizeof(pthread_t) == sizeof(UInt));
1362
1363 vg_assert(vg_threads[parent_tid].status != VgTs_Empty);
1364
1365 tid = vg_alloc_ThreadState();
1366
1367 /* If we've created the main thread's tid, we're in deep trouble :) */
1368 vg_assert(tid != 0);
1369
1370 /* Copy the parent's CPU state into the child's, in a roundabout
1371 way (via baseBlock). */
1372 VG_(load_thread_state)(parent_tid);
1373 VG_(save_thread_state)(tid);
1374
1375 /* Consider allocating the child a stack, if the one it already has
1376 is inadequate. */
1377 new_stk_szb = PTHREAD_STACK_MIN;
1378
1379 if (new_stk_szb > vg_threads[tid].stack_size) {
1380 /* Again, for good measure :) We definitely don't want to be
1381 allocating a stack for the main thread. */
1382 vg_assert(tid != 0);
1383 /* for now, we don't handle the case of anything other than
1384 assigning it for the first time. */
1385 vg_assert(vg_threads[tid].stack_size == 0);
1386 vg_assert(vg_threads[tid].stack_base == (Addr)NULL);
1387 new_stack = (Addr)VG_(get_memory_from_mmap)( new_stk_szb );
1388 vg_threads[tid].stack_base = new_stack;
1389 vg_threads[tid].stack_size = new_stk_szb;
1390 vg_threads[tid].m_esp
1391 = new_stack + new_stk_szb
1392 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB;
1393 }
1394 if (VG_(clo_instrument))
1395 VGM_(make_noaccess)( vg_threads[tid].m_esp,
1396 VG_AR_CLIENT_STACKBASE_REDZONE_SZB );
1397
1398 /* push arg */
1399 vg_threads[tid].m_esp -= 4;
1400 * (UInt*)(vg_threads[tid].m_esp) = (UInt)arg;
1401
1402 /* push (magical) return address */
1403 vg_threads[tid].m_esp -= 4;
1404 * (UInt*)(vg_threads[tid].m_esp) = (UInt)do_pthread_create_bogusRA;
1405
1406 if (VG_(clo_instrument))
1407 VGM_(make_readable)( vg_threads[tid].m_esp, 2 * 4 );
1408
1409 /* this is where we start */
1410 vg_threads[tid].m_eip = (UInt)start_routine;
1411
sewardj8937c812002-04-12 20:12:20 +00001412 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +00001413 VG_(sprintf)(msg_buf,
1414 "new thread, created by %d", parent_tid );
1415 print_sched_event(tid, msg_buf);
1416 }
1417
1418 /* store the thread id in *thread. */
1419 // if (VG_(clo_instrument))
1420 // ***** CHECK *thread is writable
1421 *thread = (pthread_t)tid;
1422
1423 /* return zero */
1424 vg_threads[tid].joiner = VG_INVALID_THREADID;
1425 vg_threads[tid].status = VgTs_Runnable;
1426 vg_threads[tid].m_edx = 0; /* success */
1427}
1428
1429
1430/* Horrible hacks to do with pthread_mutex_t: the real pthread_mutex_t
1431 is a struct with at least 5 words:
1432 typedef struct
1433 {
1434 int __m_reserved; -- Reserved for future use
1435 int __m_count; -- Depth of recursive locking
1436 _pthread_descr __m_owner; -- Owner thread (if recursive or errcheck)
1437 int __m_kind; -- Mutex kind: fast, recursive or errcheck
1438 struct _pthread_fastlock __m_lock; -- Underlying fast lock
1439 } pthread_mutex_t;
1440 Ours is just a single word, an index into vg_mutexes[].
1441 For now I'll park it in the __m_reserved field.
1442
1443 Uninitialised mutexes (PTHREAD_MUTEX_INITIALIZER) all have
1444 a zero __m_count field (see /usr/include/pthread.h). So I'll
1445 use zero to mean non-inited, and 1 to mean inited.
1446
1447 How convenient.
1448*/
1449
1450static
sewardj8937c812002-04-12 20:12:20 +00001451void initialise_mutex ( ThreadId tid, pthread_mutex_t *mutex )
sewardje663cb92002-04-12 10:26:32 +00001452{
sewardj8937c812002-04-12 20:12:20 +00001453 MutexId mid;
1454 Char msg_buf[100];
sewardje663cb92002-04-12 10:26:32 +00001455 /* vg_alloc_MutexId aborts if we can't allocate a mutex, for
1456 whatever reason. */
sewardje663cb92002-04-12 10:26:32 +00001457 mid = vg_alloc_VgMutex();
1458 vg_mutexes[mid].in_use = True;
1459 vg_mutexes[mid].held = False;
1460 vg_mutexes[mid].owner = VG_INVALID_THREADID; /* irrelevant */
1461 mutex->__m_reserved = mid;
1462 mutex->__m_count = 1; /* initialised */
sewardj8937c812002-04-12 20:12:20 +00001463 if (VG_(clo_trace_pthread)) {
1464 VG_(sprintf)(msg_buf, "(initialise mutex) (%p) -> %d",
1465 mutex, mid );
1466 print_pthread_event(tid, msg_buf);
1467 }
sewardje663cb92002-04-12 10:26:32 +00001468}
1469
1470/* Allocate a new MutexId and write it into *mutex. Ideally take
1471 notice of the attributes in *mutexattr. */
1472static
1473void do_pthread_mutex_init ( ThreadId tid,
1474 pthread_mutex_t *mutex,
1475 const pthread_mutexattr_t *mutexattr)
1476{
sewardj8937c812002-04-12 20:12:20 +00001477 Char msg_buf[100];
sewardje663cb92002-04-12 10:26:32 +00001478 /* Paranoia ... */
sewardje663cb92002-04-12 10:26:32 +00001479 vg_assert(sizeof(pthread_mutex_t) >= sizeof(UInt));
1480
sewardj8937c812002-04-12 20:12:20 +00001481 initialise_mutex(tid, mutex);
1482
1483 if (VG_(clo_trace_pthread)) {
1484 VG_(sprintf)(msg_buf, "pthread_mutex_init (%p) -> %d",
1485 mutex, mutex->__m_reserved );
1486 print_pthread_event(tid, msg_buf);
1487 }
1488
sewardje663cb92002-04-12 10:26:32 +00001489 /*
1490 RETURN VALUE
1491 pthread_mutex_init always returns 0. The other mutex functions
1492 return 0 on success and a non-zero error code on error.
1493 */
1494 /* THIS THREAD returns with 0. */
1495 vg_threads[tid].m_edx = 0;
1496}
1497
1498
1499static
1500void do_pthread_mutex_lock( ThreadId tid, pthread_mutex_t *mutex )
1501{
1502 MutexId mid;
1503 Char msg_buf[100];
1504
sewardje663cb92002-04-12 10:26:32 +00001505 /* *mutex contains the MutexId, or one of the magic values
1506 PTHREAD_*MUTEX_INITIALIZER*, indicating we need to initialise it
1507 now. See comment(s) above re use of __m_count to indicated
1508 initialisation status.
1509 */
1510
1511 /* POSIX doesn't mandate this, but for sanity ... */
1512 if (mutex == NULL) {
1513 vg_threads[tid].m_edx = EINVAL;
1514 return;
1515 }
1516
1517 if (mutex->__m_count == 0) {
sewardj8937c812002-04-12 20:12:20 +00001518 initialise_mutex(tid, mutex);
sewardje663cb92002-04-12 10:26:32 +00001519 }
1520
1521 mid = mutex->__m_reserved;
1522 if (mid < 0 || mid >= VG_N_MUTEXES || !vg_mutexes[mid].in_use) {
1523 vg_threads[tid].m_edx = EINVAL;
1524 return;
1525 }
1526
sewardj8937c812002-04-12 20:12:20 +00001527 if (VG_(clo_trace_pthread)) {
1528 VG_(sprintf)(msg_buf, "pthread_mutex_lock %d (%p)",
1529 mid, mutex );
1530 print_pthread_event(tid, msg_buf);
1531 }
1532
sewardje663cb92002-04-12 10:26:32 +00001533 /* Assert initialised. */
1534 vg_assert(mutex->__m_count == 1);
1535
1536 /* Assume tid valid. */
1537 vg_assert(vg_threads[tid].status == VgTs_Runnable);
1538
1539 if (vg_mutexes[mid].held) {
1540 if (vg_mutexes[mid].owner == tid) {
1541 vg_threads[tid].m_edx = EDEADLK;
1542 return;
1543 }
1544 /* Someone else has it; we have to wait. */
1545 vg_threads[tid].status = VgTs_WaitMX;
1546 vg_threads[tid].waited_on_mid = mid;
1547 /* No assignment to %EDX, since we're blocking. */
sewardj8937c812002-04-12 20:12:20 +00001548 if (VG_(clo_trace_pthread)) {
1549 VG_(sprintf)(msg_buf, "pthread_mutex_lock %d (%p): BLOCK",
1550 mid, mutex );
1551 print_pthread_event(tid, msg_buf);
sewardje663cb92002-04-12 10:26:32 +00001552 }
1553 } else {
1554 /* We get it! */
1555 vg_mutexes[mid].held = True;
1556 vg_mutexes[mid].owner = tid;
1557 /* return 0 (success). */
1558 vg_threads[tid].m_edx = 0;
1559 }
1560}
1561
1562
1563static
1564void do_pthread_mutex_unlock ( ThreadId tid,
1565 pthread_mutex_t *mutex )
1566{
1567 MutexId mid;
1568 Int i;
1569 Char msg_buf[100];
1570
sewardje663cb92002-04-12 10:26:32 +00001571 if (mutex == NULL
1572 || mutex->__m_count != 1) {
1573 vg_threads[tid].m_edx = EINVAL;
1574 return;
1575 }
1576
1577 mid = mutex->__m_reserved;
1578 if (mid < 0 || mid >= VG_N_MUTEXES || !vg_mutexes[mid].in_use) {
1579 vg_threads[tid].m_edx = EINVAL;
1580 return;
1581 }
1582
sewardj8937c812002-04-12 20:12:20 +00001583 if (VG_(clo_trace_pthread)) {
1584 VG_(sprintf)(msg_buf, "pthread_mutex_unlock %d (%p)",
1585 mid, mutex );
1586 print_pthread_event(tid, msg_buf);
1587 }
1588
sewardje663cb92002-04-12 10:26:32 +00001589 /* Assume tid valid */
1590 vg_assert(vg_threads[tid].status == VgTs_Runnable);
1591
1592 /* Barf if we don't currently hold the mutex. */
1593 if (!vg_mutexes[mid].held || vg_mutexes[mid].owner != tid) {
1594 vg_threads[tid].m_edx = EPERM;
1595 return;
1596 }
1597
1598 /* Find some arbitrary thread waiting on this mutex, and make it
1599 runnable. If none are waiting, mark the mutex as not held. */
1600 for (i = 0; i < VG_N_THREADS; i++) {
1601 if (vg_threads[i].status == VgTs_Empty)
1602 continue;
1603 if (vg_threads[i].status == VgTs_WaitMX
1604 && vg_threads[i].waited_on_mid == mid)
1605 break;
1606 }
1607
1608 vg_assert(i <= VG_N_THREADS);
1609 if (i == VG_N_THREADS) {
1610 /* Nobody else is waiting on it. */
1611 vg_mutexes[mid].held = False;
1612 } else {
1613 /* Notionally transfer the hold to thread i, whose
1614 pthread_mutex_lock() call now returns with 0 (success). */
1615 vg_mutexes[mid].owner = i;
1616 vg_threads[i].status = VgTs_Runnable;
1617 vg_threads[i].m_edx = 0; /* pth_lock() success */
sewardj8937c812002-04-12 20:12:20 +00001618
1619 if (VG_(clo_trace_pthread)) {
1620 VG_(sprintf)(msg_buf, "pthread_mutex_lock %d: RESUME",
1621 mid );
1622 print_pthread_event(tid, msg_buf);
sewardje663cb92002-04-12 10:26:32 +00001623 }
1624 }
1625
1626 /* In either case, our (tid's) pth_unlock() returns with 0
1627 (success). */
1628 vg_threads[tid].m_edx = 0; /* Success. */
1629}
1630
1631
1632static void do_pthread_mutex_destroy ( ThreadId tid,
1633 pthread_mutex_t *mutex )
1634{
sewardj8937c812002-04-12 20:12:20 +00001635 MutexId mid;
1636 Char msg_buf[100];
sewardje663cb92002-04-12 10:26:32 +00001637
1638 if (mutex == NULL
1639 || mutex->__m_count != 1) {
1640 vg_threads[tid].m_edx = EINVAL;
1641 return;
1642 }
1643
1644 mid = mutex->__m_reserved;
1645 if (mid < 0 || mid >= VG_N_MUTEXES || !vg_mutexes[mid].in_use) {
1646 vg_threads[tid].m_edx = EINVAL;
1647 return;
1648 }
1649
sewardj8937c812002-04-12 20:12:20 +00001650 if (VG_(clo_trace_pthread)) {
1651 VG_(sprintf)(msg_buf, "pthread_mutex_destroy %d (%p)",
1652 mid, mutex );
1653 print_pthread_event(tid, msg_buf);
1654 }
1655
sewardje663cb92002-04-12 10:26:32 +00001656 /* Assume tid valid */
1657 vg_assert(vg_threads[tid].status == VgTs_Runnable);
1658
1659 /* Barf if the mutex is currently held. */
1660 if (vg_mutexes[mid].held) {
1661 vg_threads[tid].m_edx = EBUSY;
1662 return;
1663 }
1664
1665 mutex->__m_count = 0; /* uninitialised */
1666 vg_mutexes[mid].in_use = False;
1667 vg_threads[tid].m_edx = 0;
1668}
1669
1670
1671/* ---------------------------------------------------------------------
1672 Handle non-trivial client requests.
1673 ------------------------------------------------------------------ */
1674
1675static
1676void do_nontrivial_clientreq ( ThreadId tid )
1677{
1678 UInt* arg = (UInt*)(vg_threads[tid].m_eax);
1679 UInt req_no = arg[0];
1680 switch (req_no) {
1681
1682 case VG_USERREQ__PTHREAD_CREATE:
1683 do_pthread_create( tid,
1684 (pthread_t*)arg[1],
1685 (pthread_attr_t*)arg[2],
1686 (void*(*)(void*))arg[3],
1687 (void*)arg[4] );
1688 break;
1689
1690 case VG_USERREQ__PTHREAD_CREATE_BOGUSRA:
1691 do_pthread_create_exit_by_returning( tid );
1692 break;
1693
1694 case VG_USERREQ__PTHREAD_JOIN:
1695 do_pthread_join( tid, arg[1], (void**)(arg[2]) );
1696 break;
1697
1698 /* Sigh ... this probably will cause huge numbers of major
1699 (expensive) scheduling events, for no real reason.
1700 Perhaps should be classified as a trivial-request. */
1701 case VG_USERREQ__PTHREAD_GET_THREADID:
1702 vg_threads[tid].m_edx = tid;
1703 break;
1704
1705 case VG_USERREQ__PTHREAD_MUTEX_INIT:
1706 do_pthread_mutex_init( tid,
1707 (pthread_mutex_t *)(arg[1]),
1708 (pthread_mutexattr_t *)(arg[2]) );
1709 break;
1710
1711 case VG_USERREQ__PTHREAD_MUTEX_LOCK:
1712 do_pthread_mutex_lock( tid, (pthread_mutex_t *)(arg[1]) );
1713 break;
1714
1715 case VG_USERREQ__PTHREAD_MUTEX_UNLOCK:
1716 do_pthread_mutex_unlock( tid, (pthread_mutex_t *)(arg[1]) );
1717 break;
1718
1719 case VG_USERREQ__PTHREAD_MUTEX_DESTROY:
1720 do_pthread_mutex_destroy( tid, (pthread_mutex_t *)(arg[1]) );
1721 break;
1722
1723 case VG_USERREQ__PTHREAD_CANCEL:
1724 do_pthread_cancel( tid, (pthread_t)(arg[1]) );
1725 break;
1726
1727 case VG_USERREQ__MAKE_NOACCESS:
1728 case VG_USERREQ__MAKE_WRITABLE:
1729 case VG_USERREQ__MAKE_READABLE:
1730 case VG_USERREQ__DISCARD:
1731 case VG_USERREQ__CHECK_WRITABLE:
1732 case VG_USERREQ__CHECK_READABLE:
1733 case VG_USERREQ__MAKE_NOACCESS_STACK:
1734 case VG_USERREQ__RUNNING_ON_VALGRIND:
1735 case VG_USERREQ__DO_LEAK_CHECK:
1736 vg_threads[tid].m_edx = VG_(handle_client_request) ( arg );
1737 break;
1738
sewardj54cacf02002-04-12 23:24:59 +00001739 case VG_USERREQ__SIGNAL_RETURNS:
1740 /* vthread tid is returning from a signal handler;
1741 modify its stack/regs accordingly. */
1742 VG_(signal_returns)(tid);
1743 break;
1744
sewardje663cb92002-04-12 10:26:32 +00001745 default:
1746 VG_(printf)("panic'd on private request = 0x%x\n", arg[0] );
1747 VG_(panic)("handle_private_client_pthread_request: "
1748 "unknown request");
1749 /*NOTREACHED*/
1750 break;
1751 }
1752}
1753
1754
1755/*--------------------------------------------------------------------*/
1756/*--- end vg_scheduler.c ---*/
1757/*--------------------------------------------------------------------*/