blob: ecf44668aa55c9795d7cdb3e8338ff419b2bbe05 [file] [log] [blame]
sewardje663cb92002-04-12 10:26:32 +00001
2/*--------------------------------------------------------------------*/
3/*--- A user-space pthreads implementation. vg_scheduler.c ---*/
4/*--------------------------------------------------------------------*/
5
6/*
7 This file is part of Valgrind, an x86 protected-mode emulator
8 designed for debugging and profiling binaries on x86-Unixes.
9
10 Copyright (C) 2000-2002 Julian Seward
11 jseward@acm.org
12 Julian_Seward@muraroa.demon.co.uk
13
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
29 The GNU General Public License is contained in the file LICENSE.
30*/
31
32#include "vg_include.h"
33#include "vg_constants.h"
34
35#include "valgrind.h" /* for VG_USERREQ__MAKE_NOACCESS and
36 VG_USERREQ__DO_LEAK_CHECK */
37
38/* BORKAGE as of 11 Apr 02
39
40Note! This implementation is so poor as to not be suitable for use by
41anyone at all!
42
43- properly save scheduler private state in signal delivery frames.
44
sewardje663cb92002-04-12 10:26:32 +000045- signals interrupting read/write and nanosleep, and take notice
46 of SA_RESTART or not
47
sewardj8937c812002-04-12 20:12:20 +000048- when a thread is done mark its stack as noaccess
49
sewardje462e202002-04-13 04:09:07 +000050- 0xDEADBEEF syscall errors ... fix.
sewardj8937c812002-04-12 20:12:20 +000051
sewardje462e202002-04-13 04:09:07 +000052*/
sewardje663cb92002-04-12 10:26:32 +000053
54
55/* ---------------------------------------------------------------------
56 Types and globals for the scheduler.
57 ------------------------------------------------------------------ */
58
59/* type ThreadId is defined in vg_include.h. */
60
61/* struct ThreadState is defined in vg_include.h. */
62
63/* Private globals. A statically allocated array of threads. */
64static ThreadState vg_threads[VG_N_THREADS];
65
66
67/* vg_oursignalhandler() might longjmp(). Here's the jmp_buf. */
68jmp_buf VG_(scheduler_jmpbuf);
69/* ... and if so, here's the signal which caused it to do so. */
70Int VG_(longjmpd_on_signal);
71
72
73/* Machinery to keep track of which threads are waiting on which
74 fds. */
75typedef
76 struct {
77 /* The thread which made the request. */
78 ThreadId tid;
79
80 /* The next two fields describe the request. */
81 /* File descriptor waited for. -1 means this slot is not in use */
82 Int fd;
83 /* The syscall number the fd is used in. */
84 Int syscall_no;
85
86 /* False => still waiting for select to tell us the fd is ready
87 to go. True => the fd is ready, but the results have not yet
88 been delivered back to the calling thread. Once the latter
89 happens, this entire record is marked as no longer in use, by
90 making the fd field be -1. */
91 Bool ready;
92 }
93 VgWaitedOnFd;
94
95static VgWaitedOnFd vg_waiting_fds[VG_N_WAITING_FDS];
96
97
98
99typedef
100 struct {
101 /* Is this slot in use, or free? */
102 Bool in_use;
103 /* If in_use, is this mutex held by some thread, or not? */
104 Bool held;
105 /* if held==True, owner indicates who by. */
106 ThreadId owner;
107 }
108 VgMutex;
109
110static VgMutex vg_mutexes[VG_N_MUTEXES];
111
112/* Forwards */
113static void do_nontrivial_clientreq ( ThreadId tid );
114
115
116/* ---------------------------------------------------------------------
117 Helper functions for the scheduler.
118 ------------------------------------------------------------------ */
119
120static
121void pp_sched_status ( void )
122{
123 Int i;
124 VG_(printf)("\nsched status:\n");
125 for (i = 0; i < VG_N_THREADS; i++) {
126 if (vg_threads[i].status == VgTs_Empty) continue;
127 VG_(printf)("tid %d: ", i);
128 switch (vg_threads[i].status) {
129 case VgTs_Runnable: VG_(printf)("Runnable\n"); break;
130 case VgTs_WaitFD: VG_(printf)("WaitFD\n"); break;
131 case VgTs_WaitJoiner: VG_(printf)("WaitJoiner(%d)\n",
132 vg_threads[i].joiner); break;
133 case VgTs_WaitJoinee: VG_(printf)("WaitJoinee\n"); break;
134 default: VG_(printf)("???"); break;
135 }
136 }
137 VG_(printf)("\n");
138}
139
140static
141void add_waiting_fd ( ThreadId tid, Int fd, Int syscall_no )
142{
143 Int i;
144
145 vg_assert(fd != -1); /* avoid total chaos */
146
147 for (i = 0; i < VG_N_WAITING_FDS; i++)
148 if (vg_waiting_fds[i].fd == -1)
149 break;
150
151 if (i == VG_N_WAITING_FDS)
152 VG_(panic)("add_waiting_fd: VG_N_WAITING_FDS is too low");
153 /*
154 VG_(printf)("add_waiting_fd: add (tid %d, fd %d) at slot %d\n",
155 tid, fd, i);
156 */
157 vg_waiting_fds[i].fd = fd;
158 vg_waiting_fds[i].tid = tid;
159 vg_waiting_fds[i].ready = False;
160 vg_waiting_fds[i].syscall_no = syscall_no;
161}
162
163
164
165static
166void print_sched_event ( ThreadId tid, Char* what )
167{
sewardj8937c812002-04-12 20:12:20 +0000168 VG_(message)(Vg_DebugMsg, "SCHED[%d]: %s", tid, what );
169}
170
171
172static
173void print_pthread_event ( ThreadId tid, Char* what )
174{
175 VG_(message)(Vg_DebugMsg, "PTHREAD[%d]: %s", tid, what );
sewardje663cb92002-04-12 10:26:32 +0000176}
177
178
179static
180Char* name_of_sched_event ( UInt event )
181{
182 switch (event) {
sewardje663cb92002-04-12 10:26:32 +0000183 case VG_TRC_EBP_JMP_SYSCALL: return "SYSCALL";
184 case VG_TRC_EBP_JMP_CLIENTREQ: return "CLIENTREQ";
185 case VG_TRC_INNER_COUNTERZERO: return "COUNTERZERO";
186 case VG_TRC_INNER_FASTMISS: return "FASTMISS";
187 case VG_TRC_UNRESUMABLE_SIGNAL: return "FATALSIGNAL";
188 default: return "??UNKNOWN??";
189 }
190}
191
192
193/* Create a translation of the client basic block beginning at
194 orig_addr, and add it to the translation cache & translation table.
195 This probably doesn't really belong here, but, hey ...
196*/
197void VG_(create_translation_for) ( Addr orig_addr )
198{
199 Addr trans_addr;
200 TTEntry tte;
201 Int orig_size, trans_size;
202 /* Ensure there is space to hold a translation. */
203 VG_(maybe_do_lru_pass)();
204 VG_(translate)( orig_addr, &orig_size, &trans_addr, &trans_size );
205 /* Copy data at trans_addr into the translation cache.
206 Returned pointer is to the code, not to the 4-byte
207 header. */
208 /* Since the .orig_size and .trans_size fields are
209 UShort, be paranoid. */
210 vg_assert(orig_size > 0 && orig_size < 65536);
211 vg_assert(trans_size > 0 && trans_size < 65536);
212 tte.orig_size = orig_size;
213 tte.orig_addr = orig_addr;
214 tte.trans_size = trans_size;
215 tte.trans_addr = VG_(copy_to_transcache)
216 ( trans_addr, trans_size );
217 tte.mru_epoch = VG_(current_epoch);
218 /* Free the intermediary -- was allocated by VG_(emit_code). */
219 VG_(jitfree)( (void*)trans_addr );
220 /* Add to trans tab and set back pointer. */
221 VG_(add_to_trans_tab) ( &tte );
222 /* Update stats. */
223 VG_(this_epoch_in_count) ++;
224 VG_(this_epoch_in_osize) += orig_size;
225 VG_(this_epoch_in_tsize) += trans_size;
226 VG_(overall_in_count) ++;
227 VG_(overall_in_osize) += orig_size;
228 VG_(overall_in_tsize) += trans_size;
229 /* Record translated area for SMC detection. */
230 VG_(smc_mark_original) ( orig_addr, orig_size );
231}
232
233
234/* Allocate a completely empty ThreadState record. */
235static
236ThreadId vg_alloc_ThreadState ( void )
237{
238 Int i;
239 for (i = 0; i < VG_N_THREADS; i++) {
240 if (vg_threads[i].status == VgTs_Empty)
241 return i;
242 }
243 VG_(printf)("vg_alloc_ThreadState: no free slots available\n");
244 VG_(printf)("Increase VG_N_THREADS, rebuild and try again.\n");
245 VG_(panic)("VG_N_THREADS is too low");
246 /*NOTREACHED*/
247}
248
249
250ThreadState* VG_(get_thread_state) ( ThreadId tid )
251{
252 vg_assert(tid >= 0 && tid < VG_N_THREADS);
253 vg_assert(vg_threads[tid].status != VgTs_Empty);
254 return & vg_threads[tid];
255}
256
257
258/* Find an unused VgMutex record. */
259static
260MutexId vg_alloc_VgMutex ( void )
261{
262 Int i;
263 for (i = 0; i < VG_N_MUTEXES; i++) {
264 if (!vg_mutexes[i].in_use)
265 return i;
266 }
267 VG_(printf)("vg_alloc_VgMutex: no free slots available\n");
268 VG_(printf)("Increase VG_N_MUTEXES, rebuild and try again.\n");
269 VG_(panic)("VG_N_MUTEXES is too low");
270 /*NOTREACHED*/
271}
272
273
274/* Copy the saved state of a thread into VG_(baseBlock), ready for it
275 to be run. */
276__inline__
277void VG_(load_thread_state) ( ThreadId tid )
278{
279 Int i;
280 VG_(baseBlock)[VGOFF_(m_eax)] = vg_threads[tid].m_eax;
281 VG_(baseBlock)[VGOFF_(m_ebx)] = vg_threads[tid].m_ebx;
282 VG_(baseBlock)[VGOFF_(m_ecx)] = vg_threads[tid].m_ecx;
283 VG_(baseBlock)[VGOFF_(m_edx)] = vg_threads[tid].m_edx;
284 VG_(baseBlock)[VGOFF_(m_esi)] = vg_threads[tid].m_esi;
285 VG_(baseBlock)[VGOFF_(m_edi)] = vg_threads[tid].m_edi;
286 VG_(baseBlock)[VGOFF_(m_ebp)] = vg_threads[tid].m_ebp;
287 VG_(baseBlock)[VGOFF_(m_esp)] = vg_threads[tid].m_esp;
288 VG_(baseBlock)[VGOFF_(m_eflags)] = vg_threads[tid].m_eflags;
289 VG_(baseBlock)[VGOFF_(m_eip)] = vg_threads[tid].m_eip;
290
291 for (i = 0; i < VG_SIZE_OF_FPUSTATE_W; i++)
292 VG_(baseBlock)[VGOFF_(m_fpustate) + i] = vg_threads[tid].m_fpu[i];
293
294 VG_(baseBlock)[VGOFF_(sh_eax)] = vg_threads[tid].sh_eax;
295 VG_(baseBlock)[VGOFF_(sh_ebx)] = vg_threads[tid].sh_ebx;
296 VG_(baseBlock)[VGOFF_(sh_ecx)] = vg_threads[tid].sh_ecx;
297 VG_(baseBlock)[VGOFF_(sh_edx)] = vg_threads[tid].sh_edx;
298 VG_(baseBlock)[VGOFF_(sh_esi)] = vg_threads[tid].sh_esi;
299 VG_(baseBlock)[VGOFF_(sh_edi)] = vg_threads[tid].sh_edi;
300 VG_(baseBlock)[VGOFF_(sh_ebp)] = vg_threads[tid].sh_ebp;
301 VG_(baseBlock)[VGOFF_(sh_esp)] = vg_threads[tid].sh_esp;
302 VG_(baseBlock)[VGOFF_(sh_eflags)] = vg_threads[tid].sh_eflags;
303}
304
305
306/* Copy the state of a thread from VG_(baseBlock), presumably after it
307 has been descheduled. For sanity-check purposes, fill the vacated
308 VG_(baseBlock) with garbage so as to make the system more likely to
309 fail quickly if we erroneously continue to poke around inside
310 VG_(baseBlock) without first doing a load_thread_state().
311*/
312__inline__
313void VG_(save_thread_state) ( ThreadId tid )
314{
315 Int i;
316 const UInt junk = 0xDEADBEEF;
317
318 vg_threads[tid].m_eax = VG_(baseBlock)[VGOFF_(m_eax)];
319 vg_threads[tid].m_ebx = VG_(baseBlock)[VGOFF_(m_ebx)];
320 vg_threads[tid].m_ecx = VG_(baseBlock)[VGOFF_(m_ecx)];
321 vg_threads[tid].m_edx = VG_(baseBlock)[VGOFF_(m_edx)];
322 vg_threads[tid].m_esi = VG_(baseBlock)[VGOFF_(m_esi)];
323 vg_threads[tid].m_edi = VG_(baseBlock)[VGOFF_(m_edi)];
324 vg_threads[tid].m_ebp = VG_(baseBlock)[VGOFF_(m_ebp)];
325 vg_threads[tid].m_esp = VG_(baseBlock)[VGOFF_(m_esp)];
326 vg_threads[tid].m_eflags = VG_(baseBlock)[VGOFF_(m_eflags)];
327 vg_threads[tid].m_eip = VG_(baseBlock)[VGOFF_(m_eip)];
328
329 for (i = 0; i < VG_SIZE_OF_FPUSTATE_W; i++)
330 vg_threads[tid].m_fpu[i] = VG_(baseBlock)[VGOFF_(m_fpustate) + i];
331
332 vg_threads[tid].sh_eax = VG_(baseBlock)[VGOFF_(sh_eax)];
333 vg_threads[tid].sh_ebx = VG_(baseBlock)[VGOFF_(sh_ebx)];
334 vg_threads[tid].sh_ecx = VG_(baseBlock)[VGOFF_(sh_ecx)];
335 vg_threads[tid].sh_edx = VG_(baseBlock)[VGOFF_(sh_edx)];
336 vg_threads[tid].sh_esi = VG_(baseBlock)[VGOFF_(sh_esi)];
337 vg_threads[tid].sh_edi = VG_(baseBlock)[VGOFF_(sh_edi)];
338 vg_threads[tid].sh_ebp = VG_(baseBlock)[VGOFF_(sh_ebp)];
339 vg_threads[tid].sh_esp = VG_(baseBlock)[VGOFF_(sh_esp)];
340 vg_threads[tid].sh_eflags = VG_(baseBlock)[VGOFF_(sh_eflags)];
341
342 /* Fill it up with junk. */
343 VG_(baseBlock)[VGOFF_(m_eax)] = junk;
344 VG_(baseBlock)[VGOFF_(m_ebx)] = junk;
345 VG_(baseBlock)[VGOFF_(m_ecx)] = junk;
346 VG_(baseBlock)[VGOFF_(m_edx)] = junk;
347 VG_(baseBlock)[VGOFF_(m_esi)] = junk;
348 VG_(baseBlock)[VGOFF_(m_edi)] = junk;
349 VG_(baseBlock)[VGOFF_(m_ebp)] = junk;
350 VG_(baseBlock)[VGOFF_(m_esp)] = junk;
351 VG_(baseBlock)[VGOFF_(m_eflags)] = junk;
352 VG_(baseBlock)[VGOFF_(m_eip)] = junk;
353
354 for (i = 0; i < VG_SIZE_OF_FPUSTATE_W; i++)
355 VG_(baseBlock)[VGOFF_(m_fpustate) + i] = junk;
356}
357
358
359/* Run the thread tid for a while, and return a VG_TRC_* value to the
360 scheduler indicating what happened. */
361static
362UInt run_thread_for_a_while ( ThreadId tid )
363{
364 UInt trc = 0;
365 vg_assert(tid >= 0 && tid < VG_N_THREADS);
366 vg_assert(vg_threads[tid].status != VgTs_Empty);
367 vg_assert(VG_(bbs_to_go) > 0);
368
369 VG_(load_thread_state) ( tid );
370 if (__builtin_setjmp(VG_(scheduler_jmpbuf)) == 0) {
371 /* try this ... */
372 trc = VG_(run_innerloop)();
373 /* We get here if the client didn't take a fault. */
374 } else {
375 /* We get here if the client took a fault, which caused our
376 signal handler to longjmp. */
377 vg_assert(trc == 0);
378 trc = VG_TRC_UNRESUMABLE_SIGNAL;
379 }
380 VG_(save_thread_state) ( tid );
381 return trc;
382}
383
384
385/* Increment the LRU epoch counter. */
386static
387void increment_epoch ( void )
388{
389 VG_(current_epoch)++;
390 if (VG_(clo_verbosity) > 2) {
391 UInt tt_used, tc_used;
392 VG_(get_tt_tc_used) ( &tt_used, &tc_used );
393 VG_(message)(Vg_UserMsg,
394 "%lu bbs, in: %d (%d -> %d), out %d (%d -> %d), TT %d, TC %d",
395 VG_(bbs_done),
396 VG_(this_epoch_in_count),
397 VG_(this_epoch_in_osize),
398 VG_(this_epoch_in_tsize),
399 VG_(this_epoch_out_count),
400 VG_(this_epoch_out_osize),
401 VG_(this_epoch_out_tsize),
402 tt_used, tc_used
403 );
404 }
405 VG_(this_epoch_in_count) = 0;
406 VG_(this_epoch_in_osize) = 0;
407 VG_(this_epoch_in_tsize) = 0;
408 VG_(this_epoch_out_count) = 0;
409 VG_(this_epoch_out_osize) = 0;
410 VG_(this_epoch_out_tsize) = 0;
411}
412
413
414/* Initialise the scheduler. Create a single "main" thread ready to
415 run, with special ThreadId of zero. This is called at startup; the
416 caller takes care to park the client's state is parked in
417 VG_(baseBlock).
418*/
419void VG_(scheduler_init) ( void )
420{
421 Int i;
422 Addr startup_esp;
423 ThreadId tid_main;
424
425 startup_esp = VG_(baseBlock)[VGOFF_(m_esp)];
426 if ((startup_esp & VG_STARTUP_STACK_MASK) != VG_STARTUP_STACK_MASK) {
427 VG_(printf)("%esp at startup = %p is not near %p; aborting\n",
428 startup_esp, VG_STARTUP_STACK_MASK);
429 VG_(panic)("unexpected %esp at startup");
430 }
431
432 for (i = 0; i < VG_N_THREADS; i++) {
433 vg_threads[i].stack_size = 0;
434 vg_threads[i].stack_base = (Addr)NULL;
435 }
436
437 for (i = 0; i < VG_N_WAITING_FDS; i++)
438 vg_waiting_fds[i].fd = -1; /* not in use */
439
440 for (i = 0; i < VG_N_MUTEXES; i++)
441 vg_mutexes[i].in_use = False;
442
443 /* Assert this is thread zero, which has certain magic
444 properties. */
445 tid_main = vg_alloc_ThreadState();
446 vg_assert(tid_main == 0);
447
448 vg_threads[tid_main].status = VgTs_Runnable;
449 vg_threads[tid_main].joiner = VG_INVALID_THREADID;
450 vg_threads[tid_main].retval = NULL; /* not important */
451
452 /* Copy VG_(baseBlock) state to tid_main's slot. */
453 VG_(save_thread_state) ( tid_main );
454}
455
456
457/* What if fd isn't a valid fd? */
458static
459void set_fd_nonblocking ( Int fd )
460{
461 Int res = VG_(fcntl)( fd, VKI_F_GETFL, 0 );
462 vg_assert(!VG_(is_kerror)(res));
463 res |= VKI_O_NONBLOCK;
464 res = VG_(fcntl)( fd, VKI_F_SETFL, res );
465 vg_assert(!VG_(is_kerror)(res));
466}
467
468static
469void set_fd_blocking ( Int fd )
470{
471 Int res = VG_(fcntl)( fd, VKI_F_GETFL, 0 );
472 vg_assert(!VG_(is_kerror)(res));
473 res &= ~VKI_O_NONBLOCK;
474 res = VG_(fcntl)( fd, VKI_F_SETFL, res );
475 vg_assert(!VG_(is_kerror)(res));
476}
477
478static
479Bool fd_is_blockful ( Int fd )
480{
481 Int res = VG_(fcntl)( fd, VKI_F_GETFL, 0 );
482 vg_assert(!VG_(is_kerror)(res));
483 return (res & VKI_O_NONBLOCK) ? False : True;
484}
485
486
487
488/* Do a purely thread-local request for tid, and put the result in its
489 %EDX, without changing its scheduling state in any way, nor that of
490 any other threads. Return True if so.
491
492 If the request is non-trivial, return False; a more capable but
493 slower mechanism will deal with it.
494*/
495static
496Bool maybe_do_trivial_clientreq ( ThreadId tid )
497{
498# define SIMPLE_RETURN(vvv) \
499 { vg_threads[tid].m_edx = (vvv); \
500 return True; \
501 }
502
503 UInt* arg = (UInt*)(vg_threads[tid].m_eax);
504 UInt req_no = arg[0];
505 switch (req_no) {
506 case VG_USERREQ__MALLOC:
507 SIMPLE_RETURN(
508 (UInt)VG_(client_malloc) ( arg[1], Vg_AllocMalloc )
509 );
510 case VG_USERREQ__BUILTIN_NEW:
511 SIMPLE_RETURN(
512 (UInt)VG_(client_malloc) ( arg[1], Vg_AllocNew )
513 );
514 case VG_USERREQ__BUILTIN_VEC_NEW:
515 SIMPLE_RETURN(
516 (UInt)VG_(client_malloc) ( arg[1], Vg_AllocNewVec )
517 );
518 case VG_USERREQ__FREE:
519 VG_(client_free) ( (void*)arg[1], Vg_AllocMalloc );
520 SIMPLE_RETURN(0); /* irrelevant */
521 case VG_USERREQ__BUILTIN_DELETE:
522 VG_(client_free) ( (void*)arg[1], Vg_AllocNew );
523 SIMPLE_RETURN(0); /* irrelevant */
524 case VG_USERREQ__BUILTIN_VEC_DELETE:
525 VG_(client_free) ( (void*)arg[1], Vg_AllocNewVec );
526 SIMPLE_RETURN(0); /* irrelevant */
527 case VG_USERREQ__CALLOC:
528 SIMPLE_RETURN(
529 (UInt)VG_(client_calloc) ( arg[1], arg[2] )
530 );
531 case VG_USERREQ__REALLOC:
532 SIMPLE_RETURN(
533 (UInt)VG_(client_realloc) ( (void*)arg[1], arg[2] )
534 );
535 case VG_USERREQ__MEMALIGN:
536 SIMPLE_RETURN(
537 (UInt)VG_(client_memalign) ( arg[1], arg[2] )
538 );
539 default:
540 /* Too hard; wimp out. */
541 return False;
542 }
543# undef SIMPLE_RETURN
544}
545
546
547static
548void sched_do_syscall ( ThreadId tid )
549{
550 UInt saved_eax;
551 UInt res, syscall_no;
552 UInt fd;
553 Bool might_block, assumed_nonblocking;
554 Bool orig_fd_blockness;
555 Char msg_buf[100];
556
557 vg_assert(tid >= 0 && tid < VG_N_THREADS);
558 vg_assert(vg_threads[tid].status == VgTs_Runnable);
559
560 syscall_no = vg_threads[tid].m_eax; /* syscall number */
561
562 if (syscall_no == __NR_nanosleep) {
563 ULong t_now, t_awaken;
564 struct vki_timespec* req;
565 req = (struct vki_timespec*)vg_threads[tid].m_ebx; /* arg1 */
566 t_now = VG_(read_microsecond_timer)();
567 t_awaken
568 = t_now
569 + (ULong)1000000ULL * (ULong)(req->tv_sec)
570 + (ULong)( (UInt)(req->tv_nsec) / 1000 );
571 vg_threads[tid].status = VgTs_Sleeping;
572 vg_threads[tid].awaken_at = t_awaken;
sewardj8937c812002-04-12 20:12:20 +0000573 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +0000574 VG_(sprintf)(msg_buf, "at %lu: nanosleep for %lu",
575 t_now, t_awaken-t_now);
576 print_sched_event(tid, msg_buf);
577 }
578 /* Force the scheduler to run something else for a while. */
579 return;
580 }
581
582 switch (syscall_no) {
583 case __NR_read:
584 case __NR_write:
585 assumed_nonblocking
586 = False;
587 might_block
588 = fd_is_blockful(vg_threads[tid].m_ebx /* arg1 */);
589 break;
590 default:
591 might_block = False;
592 assumed_nonblocking = True;
593 }
594
595 if (assumed_nonblocking) {
596 /* We think it's non-blocking. Just do it in the normal way. */
597 VG_(perform_assumed_nonblocking_syscall)(tid);
598 /* The thread is still runnable. */
599 return;
600 }
601
602 /* It might block. Take evasive action. */
603 switch (syscall_no) {
604 case __NR_read:
605 case __NR_write:
606 fd = vg_threads[tid].m_ebx; break;
607 default:
608 vg_assert(3+3 == 7);
609 }
610
611 /* Set the fd to nonblocking, and do the syscall, which will return
612 immediately, in order to lodge a request with the Linux kernel.
613 We later poll for I/O completion using select(). */
614
615 orig_fd_blockness = fd_is_blockful(fd);
616 set_fd_nonblocking(fd);
617 vg_assert(!fd_is_blockful(fd));
618 VG_(check_known_blocking_syscall)(tid, syscall_no, NULL /* PRE */);
619
620 /* This trashes the thread's %eax; we have to preserve it. */
621 saved_eax = vg_threads[tid].m_eax;
622 KERNEL_DO_SYSCALL(tid,res);
623
624 /* Restore original blockfulness of the fd. */
625 if (orig_fd_blockness)
626 set_fd_blocking(fd);
627 else
628 set_fd_nonblocking(fd);
629
630 if (res != -VKI_EWOULDBLOCK) {
631 /* It didn't block; it went through immediately. So finish off
632 in the normal way. Don't restore %EAX, since that now
633 (correctly) holds the result of the call. */
634 VG_(check_known_blocking_syscall)(tid, syscall_no, &res /* POST */);
635 /* We're still runnable. */
636 vg_assert(vg_threads[tid].status == VgTs_Runnable);
637
638 } else {
639
640 /* It would have blocked. First, restore %EAX to what it was
641 before our speculative call. */
642 vg_threads[tid].m_eax = saved_eax;
643 /* Put this fd in a table of fds on which we are waiting for
644 completion. The arguments for select() later are constructed
645 from this table. */
646 add_waiting_fd(tid, fd, saved_eax /* which holds the syscall # */);
647 /* Deschedule thread until an I/O completion happens. */
648 vg_threads[tid].status = VgTs_WaitFD;
sewardj8937c812002-04-12 20:12:20 +0000649 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +0000650 VG_(sprintf)(msg_buf,"block until I/O ready on fd %d", fd);
651 print_sched_event(tid, msg_buf);
652 }
653
654 }
655}
656
657
658/* Find out which of the fds in vg_waiting_fds are now ready to go, by
659 making enquiries with select(), and mark them as ready. We have to
660 wait for the requesting threads to fall into the the WaitFD state
661 before we can actually finally deliver the results, so this
662 procedure doesn't do that; complete_blocked_syscalls() does it.
663
664 It might seem odd that a thread which has done a blocking syscall
665 is not in WaitFD state; the way this can happen is if it initially
666 becomes WaitFD, but then a signal is delivered to it, so it becomes
667 Runnable for a while. In this case we have to wait for the
668 sighandler to return, whereupon the WaitFD state is resumed, and
669 only at that point can the I/O result be delivered to it. However,
670 this point may be long after the fd is actually ready.
671
672 So, poll_for_ready_fds() merely detects fds which are ready.
673 complete_blocked_syscalls() does the second half of the trick,
674 possibly much later: it delivers the results from ready fds to
675 threads in WaitFD state.
676*/
677void poll_for_ready_fds ( void )
678{
679 vki_ksigset_t saved_procmask;
680 vki_fd_set readfds;
681 vki_fd_set writefds;
682 vki_fd_set exceptfds;
683 struct vki_timeval timeout;
684 Int fd, fd_max, i, n_ready, syscall_no, n_ok;
685 ThreadId tid;
686 Bool rd_ok, wr_ok, ex_ok;
687 Char msg_buf[100];
688
sewardje462e202002-04-13 04:09:07 +0000689 struct vki_timespec* rem;
690 ULong t_now;
691
sewardje663cb92002-04-12 10:26:32 +0000692 /* Awaken any sleeping threads whose sleep has expired. */
sewardje462e202002-04-13 04:09:07 +0000693 t_now = VG_(read_microsecond_timer)();
sewardje663cb92002-04-12 10:26:32 +0000694 for (tid = 0; tid < VG_N_THREADS; tid++) {
695 if (vg_threads[tid].status != VgTs_Sleeping)
696 continue;
697 if (t_now >= vg_threads[tid].awaken_at) {
698 /* Resume this thread. Set to zero the remaining-time (second)
699 arg of nanosleep, since it's used up all its time. */
700 vg_assert(vg_threads[tid].m_eax == __NR_nanosleep);
701 rem = (struct vki_timespec *)vg_threads[tid].m_ecx; /* arg2 */
702 if (rem != NULL) {
703 rem->tv_sec = 0;
704 rem->tv_nsec = 0;
705 }
706 /* Make the syscall return 0 (success). */
707 vg_threads[tid].m_eax = 0;
708 /* Reschedule this thread. */
709 vg_threads[tid].status = VgTs_Runnable;
sewardj8937c812002-04-12 20:12:20 +0000710 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +0000711 VG_(sprintf)(msg_buf, "at %lu: nanosleep done",
712 t_now);
713 print_sched_event(tid, msg_buf);
714 }
715 }
716 }
sewardje663cb92002-04-12 10:26:32 +0000717
sewardje462e202002-04-13 04:09:07 +0000718 /* And look for threads waiting on file descriptors which are now
719 ready for I/O.*/
sewardje663cb92002-04-12 10:26:32 +0000720 timeout.tv_sec = 0;
721 timeout.tv_usec = 0;
722
723 VKI_FD_ZERO(&readfds);
724 VKI_FD_ZERO(&writefds);
725 VKI_FD_ZERO(&exceptfds);
726 fd_max = -1;
727 for (i = 0; i < VG_N_WAITING_FDS; i++) {
728 if (vg_waiting_fds[i].fd == -1 /* not in use */)
729 continue;
730 if (vg_waiting_fds[i].ready /* already ready? */)
731 continue;
732 fd = vg_waiting_fds[i].fd;
733 /* VG_(printf)("adding QUERY for fd %d\n", fd); */
sewardje462e202002-04-13 04:09:07 +0000734 vg_assert(fd >= 0);
sewardje663cb92002-04-12 10:26:32 +0000735 if (fd > fd_max)
736 fd_max = fd;
737 tid = vg_waiting_fds[i].tid;
738 vg_assert(tid >= 0 && tid < VG_N_THREADS);
739 syscall_no = vg_waiting_fds[i].syscall_no;
740 switch (syscall_no) {
741 case __NR_read:
742 VKI_FD_SET(fd, &readfds); break;
743 case __NR_write:
744 VKI_FD_SET(fd, &writefds); break;
745 default:
746 VG_(panic)("poll_for_ready_fds: unexpected syscall");
747 /*NOTREACHED*/
748 break;
749 }
750 }
751
sewardje462e202002-04-13 04:09:07 +0000752 /* Short cut: if no fds are waiting, give up now. */
753 if (fd_max == -1)
754 return;
755
sewardje663cb92002-04-12 10:26:32 +0000756 /* BLOCK ALL SIGNALS. We don't want the complication of select()
757 getting interrupted. */
758 VG_(block_all_host_signals)( &saved_procmask );
759
760 n_ready = VG_(select)
761 ( fd_max+1, &readfds, &writefds, &exceptfds, &timeout);
762 if (VG_(is_kerror)(n_ready)) {
763 VG_(printf)("poll_for_ready_fds: select returned %d\n", n_ready);
764 VG_(panic)("poll_for_ready_fds: select failed?!");
765 /*NOTREACHED*/
766 }
767
768 /* UNBLOCK ALL SIGNALS */
769 VG_(restore_host_signals)( &saved_procmask );
770
771 /* VG_(printf)("poll_for_io_completions: %d fs ready\n", n_ready); */
772
773 if (n_ready == 0)
774 return;
775
776 /* Inspect all the fds we know about, and handle any completions that
777 have happened. */
778 /*
779 VG_(printf)("\n\n");
780 for (fd = 0; fd < 100; fd++)
781 if (VKI_FD_ISSET(fd, &writefds) || VKI_FD_ISSET(fd, &readfds)) {
782 VG_(printf)("X"); } else { VG_(printf)("."); };
783 VG_(printf)("\n\nfd_max = %d\n", fd_max);
784 */
785
786 for (fd = 0; fd <= fd_max; fd++) {
787 rd_ok = VKI_FD_ISSET(fd, &readfds);
788 wr_ok = VKI_FD_ISSET(fd, &writefds);
789 ex_ok = VKI_FD_ISSET(fd, &exceptfds);
790
791 n_ok = (rd_ok ? 1 : 0) + (wr_ok ? 1 : 0) + (ex_ok ? 1 : 0);
792 if (n_ok == 0)
793 continue;
794 if (n_ok > 1) {
795 VG_(printf)("offending fd = %d\n", fd);
796 VG_(panic)("poll_for_ready_fds: multiple events on fd");
797 }
798
799 /* An I/O event completed for fd. Find the thread which
800 requested this. */
801 for (i = 0; i < VG_N_WAITING_FDS; i++) {
802 if (vg_waiting_fds[i].fd == -1 /* not in use */)
803 continue;
804 if (vg_waiting_fds[i].fd == fd)
805 break;
806 }
807
808 /* And a bit more paranoia ... */
809 vg_assert(i >= 0 && i < VG_N_WAITING_FDS);
810
811 /* Mark the fd as ready. */
812 vg_assert(! vg_waiting_fds[i].ready);
813 vg_waiting_fds[i].ready = True;
814 }
815}
816
817
818/* See comment attached to poll_for_ready_fds() for explaination. */
819void complete_blocked_syscalls ( void )
820{
821 Int fd, i, res, syscall_no;
822 ThreadId tid;
823 Char msg_buf[100];
824
825 /* Inspect all the outstanding fds we know about. */
826
827 for (i = 0; i < VG_N_WAITING_FDS; i++) {
828 if (vg_waiting_fds[i].fd == -1 /* not in use */)
829 continue;
830 if (! vg_waiting_fds[i].ready)
831 continue;
832
833 fd = vg_waiting_fds[i].fd;
834 tid = vg_waiting_fds[i].tid;
835 vg_assert(tid >= 0 && tid < VG_N_THREADS);
836
837 /* The thread actually has to be waiting for the I/O event it
838 requested before we can deliver the result! */
839 if (vg_threads[tid].status != VgTs_WaitFD)
840 continue;
841
842 /* Ok, actually do it! We can safely use %EAX as the syscall
843 number, because the speculative call made by
844 sched_do_syscall() doesn't change %EAX in the case where the
845 call would have blocked. */
846
847 syscall_no = vg_waiting_fds[i].syscall_no;
848 vg_assert(syscall_no == vg_threads[tid].m_eax);
849 KERNEL_DO_SYSCALL(tid,res);
850 VG_(check_known_blocking_syscall)(tid, syscall_no, &res /* POST */);
851
852 /* Reschedule. */
853 vg_threads[tid].status = VgTs_Runnable;
854 /* Mark slot as no longer in use. */
855 vg_waiting_fds[i].fd = -1;
856 /* pp_sched_status(); */
sewardj8937c812002-04-12 20:12:20 +0000857 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +0000858 VG_(sprintf)(msg_buf,"resume due to I/O completion on fd %d", fd);
859 print_sched_event(tid, msg_buf);
860 }
861 }
862}
863
864
865static
866void nanosleep_for_a_while ( void )
867{
868 Int res;
869 struct vki_timespec req;
870 struct vki_timespec rem;
871 req.tv_sec = 0;
872 req.tv_nsec = 20 * 1000 * 1000;
873 res = VG_(nanosleep)( &req, &rem );
874 /* VG_(printf)("after ns, unused = %d\n", rem.tv_nsec ); */
875 vg_assert(res == 0);
876}
877
878
879/* ---------------------------------------------------------------------
880 The scheduler proper.
881 ------------------------------------------------------------------ */
882
883/* Run user-space threads until either
884 * Deadlock occurs
885 * One thread asks to shutdown Valgrind
886 * The specified number of basic blocks has gone by.
887*/
888VgSchedReturnCode VG_(scheduler) ( void )
889{
890 ThreadId tid, tid_next;
891 UInt trc;
892 UInt dispatch_ctr_SAVED;
sewardj54cacf02002-04-12 23:24:59 +0000893 Int request_code, done_this_time, n_in_fdwait_or_sleep;
sewardje663cb92002-04-12 10:26:32 +0000894 Char msg_buf[100];
895 Addr trans_addr;
896
897 /* For the LRU structures, records when the epoch began. */
898 ULong lru_epoch_started_at = 0;
899
900 /* Start with the root thread. tid in general indicates the
901 currently runnable/just-finished-running thread. */
902 tid = 0;
903
904 /* This is the top level scheduler loop. It falls into three
905 phases. */
906 while (True) {
907
908 /* ======================= Phase 1 of 3 =======================
909 Handle I/O completions and signals. This may change the
910 status of various threads. Then select a new thread to run,
911 or declare deadlock, or sleep if there are no runnable
912 threads but some are blocked on I/O. */
913
914 /* Age the LRU structures if an epoch has been completed. */
915 if (VG_(bbs_done) - lru_epoch_started_at >= VG_BBS_PER_EPOCH) {
916 lru_epoch_started_at = VG_(bbs_done);
917 increment_epoch();
918 }
919
920 /* Was a debug-stop requested? */
921 if (VG_(bbs_to_go) == 0)
922 goto debug_stop;
923
924 /* Do the following loop until a runnable thread is found, or
925 deadlock is detected. */
926 while (True) {
927
928 /* For stats purposes only. */
929 VG_(num_scheduling_events_MAJOR) ++;
930
931 /* See if any I/O operations which we were waiting for have
932 completed, and, if so, make runnable the relevant waiting
933 threads. */
934 poll_for_ready_fds();
935 complete_blocked_syscalls();
936
937 /* See if there are any signals which need to be delivered. If
938 so, choose thread(s) to deliver them to, and build signal
939 delivery frames on those thread(s) stacks. */
940 VG_(deliver_signals)( 0 /*HACK*/ );
941 VG_(do_sanity_checks)(0 /*HACK*/, False);
942
943 /* Try and find a thread (tid) to run. */
944 tid_next = tid;
sewardj54cacf02002-04-12 23:24:59 +0000945 n_in_fdwait_or_sleep = 0;
sewardje663cb92002-04-12 10:26:32 +0000946 while (True) {
947 tid_next++;
948 if (tid_next >= VG_N_THREADS) tid_next = 0;
sewardj54cacf02002-04-12 23:24:59 +0000949 if (vg_threads[tid_next].status == VgTs_WaitFD
950 || vg_threads[tid_next].status == VgTs_Sleeping)
951 n_in_fdwait_or_sleep ++;
sewardje663cb92002-04-12 10:26:32 +0000952 if (vg_threads[tid_next].status == VgTs_Runnable)
953 break; /* We can run this one. */
954 if (tid_next == tid)
955 break; /* been all the way round */
956 }
957 tid = tid_next;
958
959 if (vg_threads[tid].status == VgTs_Runnable) {
960 /* Found a suitable candidate. Fall out of this loop, so
961 we can advance to stage 2 of the scheduler: actually
962 running the thread. */
963 break;
964 }
965
966 /* We didn't find a runnable thread. Now what? */
sewardj54cacf02002-04-12 23:24:59 +0000967 if (n_in_fdwait_or_sleep == 0) {
968 /* No runnable threads and no prospect of any appearing
969 even if we wait for an arbitrary length of time. In
970 short, we have a deadlock. */
sewardje663cb92002-04-12 10:26:32 +0000971 pp_sched_status();
972 return VgSrc_Deadlock;
973 }
974
975 /* At least one thread is in a fd-wait state. Delay for a
976 while, and go round again, in the hope that eventually a
977 thread becomes runnable. */
978 nanosleep_for_a_while();
979 // pp_sched_status();
980 // VG_(printf)(".\n");
981 }
982
983
984 /* ======================= Phase 2 of 3 =======================
985 Wahey! We've finally decided that thread tid is runnable, so
986 we now do that. Run it for as much of a quanta as possible.
987 Trivial requests are handled and the thread continues. The
988 aim is not to do too many of Phase 1 since it is expensive. */
989
990 if (0)
991 VG_(printf)("SCHED: tid %d, used %d\n", tid, VG_N_THREADS);
992
993 /* Figure out how many bbs to ask vg_run_innerloop to do. Note
994 that it decrements the counter before testing it for zero, so
995 that if VG_(dispatch_ctr) is set to N you get at most N-1
996 iterations. Also this means that VG_(dispatch_ctr) must
997 exceed zero before entering the innerloop. Also also, the
998 decrement is done before the bb is actually run, so you
999 always get at least one decrement even if nothing happens.
1000 */
1001 if (VG_(bbs_to_go) >= VG_SCHEDULING_QUANTUM)
1002 VG_(dispatch_ctr) = VG_SCHEDULING_QUANTUM + 1;
1003 else
1004 VG_(dispatch_ctr) = (UInt)VG_(bbs_to_go) + 1;
1005
1006 /* ... and remember what we asked for. */
1007 dispatch_ctr_SAVED = VG_(dispatch_ctr);
1008
1009 /* Actually run thread tid. */
1010 while (True) {
1011
1012 /* For stats purposes only. */
1013 VG_(num_scheduling_events_MINOR) ++;
1014
1015 if (0)
1016 VG_(message)(Vg_DebugMsg, "thread %d: running for %d bbs",
1017 tid, VG_(dispatch_ctr) - 1 );
1018
1019 trc = run_thread_for_a_while ( tid );
1020
1021 /* Deal quickly with trivial scheduling events, and resume the
1022 thread. */
1023
1024 if (trc == VG_TRC_INNER_FASTMISS) {
1025 vg_assert(VG_(dispatch_ctr) > 0);
1026
1027 /* Trivial event. Miss in the fast-cache. Do a full
1028 lookup for it. */
1029 trans_addr
1030 = VG_(search_transtab) ( vg_threads[tid].m_eip );
1031 if (trans_addr == (Addr)0) {
1032 /* Not found; we need to request a translation. */
1033 VG_(create_translation_for)( vg_threads[tid].m_eip );
1034 trans_addr = VG_(search_transtab) ( vg_threads[tid].m_eip );
1035 if (trans_addr == (Addr)0)
1036 VG_(panic)("VG_TRC_INNER_FASTMISS: missing tt_fast entry");
1037 }
1038 continue; /* with this thread */
1039 }
1040
1041 if (trc == VG_TRC_EBP_JMP_CLIENTREQ) {
1042 Bool is_triv = maybe_do_trivial_clientreq(tid);
1043 if (is_triv) {
1044 /* NOTE: a trivial request is something like a call to
1045 malloc() or free(). It DOES NOT change the
1046 Runnability of this thread nor the status of any
1047 other thread; it is purely thread-local. */
1048 continue; /* with this thread */
1049 }
1050 }
1051
1052 /* It's a non-trivial event. Give up running this thread and
1053 handle things the expensive way. */
1054 break;
1055 }
1056
1057 /* ======================= Phase 3 of 3 =======================
1058 Handle non-trivial thread requests, mostly pthread stuff. */
1059
1060 /* Ok, we've fallen out of the dispatcher for a
1061 non-completely-trivial reason. First, update basic-block
1062 counters. */
1063
1064 done_this_time = (Int)dispatch_ctr_SAVED - (Int)VG_(dispatch_ctr) - 1;
1065 vg_assert(done_this_time >= 0);
1066 VG_(bbs_to_go) -= (ULong)done_this_time;
1067 VG_(bbs_done) += (ULong)done_this_time;
1068
1069 if (0 && trc != VG_TRC_INNER_FASTMISS)
1070 VG_(message)(Vg_DebugMsg, "thread %d: completed %d bbs, trc %d",
1071 tid, done_this_time, (Int)trc );
1072
1073 if (0 && trc != VG_TRC_INNER_FASTMISS)
1074 VG_(message)(Vg_DebugMsg, "thread %d: %ld bbs, event %s",
1075 tid, VG_(bbs_done),
1076 name_of_sched_event(trc) );
1077
1078 /* Examine the thread's return code to figure out why it
1079 stopped, and handle requests. */
1080
1081 switch (trc) {
1082
1083 case VG_TRC_INNER_FASTMISS:
1084 VG_(panic)("VG_(scheduler): VG_TRC_INNER_FASTMISS");
1085 /*NOTREACHED*/
1086 break;
1087
1088 case VG_TRC_INNER_COUNTERZERO:
1089 /* Timeslice is out. Let a new thread be scheduled,
1090 simply by doing nothing, causing us to arrive back at
1091 Phase 1. */
1092 if (VG_(bbs_to_go) == 0) {
1093 goto debug_stop;
1094 }
1095 vg_assert(VG_(dispatch_ctr) == 0);
1096 break;
1097
1098 case VG_TRC_UNRESUMABLE_SIGNAL:
1099 /* It got a SIGSEGV/SIGBUS, which we need to deliver right
1100 away. Again, do nothing, so we wind up back at Phase
1101 1, whereupon the signal will be "delivered". */
1102 break;
1103
sewardje663cb92002-04-12 10:26:32 +00001104 case VG_TRC_EBP_JMP_SYSCALL:
1105 /* Do a syscall for the vthread tid. This could cause it
1106 to become non-runnable. */
1107 sched_do_syscall(tid);
1108 break;
1109
1110 case VG_TRC_EBP_JMP_CLIENTREQ:
1111 /* Do a client request for the vthread tid. Note that
1112 some requests will have been handled by
1113 maybe_do_trivial_clientreq(), so we don't expect to see
1114 those here.
1115 */
sewardj54cacf02002-04-12 23:24:59 +00001116 /* The thread's %EAX points at an arg block, the first
1117 word of which is the request code. */
1118 request_code = ((UInt*)(vg_threads[tid].m_eax))[0];
sewardje663cb92002-04-12 10:26:32 +00001119 if (0) {
sewardj54cacf02002-04-12 23:24:59 +00001120 VG_(sprintf)(msg_buf, "request 0x%x", request_code );
sewardje663cb92002-04-12 10:26:32 +00001121 print_sched_event(tid, msg_buf);
1122 }
1123 /* Do a non-trivial client request for thread tid. tid's
1124 %EAX points to a short vector of argument words, the
1125 first of which is the request code. The result of the
1126 request is put in tid's %EDX. Alternatively, perhaps
1127 the request causes tid to become non-runnable and/or
1128 other blocked threads become runnable. In general we
1129 can and often do mess with the state of arbitrary
1130 threads at this point. */
sewardj54cacf02002-04-12 23:24:59 +00001131 if (request_code == VG_USERREQ__SHUTDOWN_VALGRIND) {
1132 return VgSrc_Shutdown;
1133 } else {
1134 do_nontrivial_clientreq(tid);
1135 }
sewardje663cb92002-04-12 10:26:32 +00001136 break;
1137
1138 default:
1139 VG_(printf)("\ntrc = %d\n", trc);
1140 VG_(panic)("VG_(scheduler), phase 3: "
1141 "unexpected thread return code");
1142 /* NOTREACHED */
1143 break;
1144
1145 } /* switch (trc) */
1146
1147 /* That completes Phase 3 of 3. Return now to the top of the
1148 main scheduler loop, to Phase 1 of 3. */
1149
1150 } /* top-level scheduler loop */
1151
1152
1153 /* NOTREACHED */
1154 VG_(panic)("scheduler: post-main-loop ?!");
1155 /* NOTREACHED */
1156
1157 debug_stop:
1158 /* If we exited because of a debug stop, print the translation
1159 of the last block executed -- by translating it again, and
1160 throwing away the result. */
1161 VG_(printf)(
1162 "======vvvvvvvv====== LAST TRANSLATION ======vvvvvvvv======\n");
1163 VG_(translate)( vg_threads[tid].m_eip, NULL, NULL, NULL );
1164 VG_(printf)("\n");
1165 VG_(printf)(
1166 "======^^^^^^^^====== LAST TRANSLATION ======^^^^^^^^======\n");
1167
1168 return VgSrc_BbsDone;
1169}
1170
1171
1172/* ---------------------------------------------------------------------
1173 The pthread implementation.
1174 ------------------------------------------------------------------ */
1175
1176#include <pthread.h>
1177#include <errno.h>
1178
1179#if !defined(PTHREAD_STACK_MIN)
1180# define PTHREAD_STACK_MIN (16384 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB)
1181#endif
1182
1183/* /usr/include/bits/pthreadtypes.h:
1184 typedef unsigned long int pthread_t;
1185*/
1186
sewardje663cb92002-04-12 10:26:32 +00001187
1188static
1189void do_pthread_cancel ( ThreadId tid_canceller,
1190 pthread_t tid_cancellee )
1191{
1192 Char msg_buf[100];
1193 /* We want make is appear that this thread has returned to
1194 do_pthread_create_bogusRA with PTHREAD_CANCELED as the
1195 return value. So: simple: put PTHREAD_CANCELED into %EAX
1196 and &do_pthread_create_bogusRA into %EIP and keep going! */
sewardj8937c812002-04-12 20:12:20 +00001197 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +00001198 VG_(sprintf)(msg_buf, "cancelled by %d", tid_canceller);
1199 print_sched_event(tid_cancellee, msg_buf);
1200 }
1201 vg_threads[tid_cancellee].m_eax = (UInt)PTHREAD_CANCELED;
sewardjbc5b99f2002-04-13 00:08:51 +00001202 vg_threads[tid_cancellee].m_eip = (UInt)&VG_(pthreadreturn_bogusRA);
sewardje663cb92002-04-12 10:26:32 +00001203 vg_threads[tid_cancellee].status = VgTs_Runnable;
1204}
1205
1206
1207
1208/* Thread tid is exiting, by returning from the function it was
sewardjbc5b99f2002-04-13 00:08:51 +00001209 created with. Or possibly due to pthread_exit or cancellation.
1210 The main complication here is to resume any thread waiting to join
1211 with this one. */
sewardje663cb92002-04-12 10:26:32 +00001212static
sewardjbc5b99f2002-04-13 00:08:51 +00001213void handle_pthread_return ( ThreadId tid, void* retval )
sewardje663cb92002-04-12 10:26:32 +00001214{
1215 ThreadId jnr; /* joiner, the thread calling pthread_join. */
1216 UInt* jnr_args;
1217 void** jnr_thread_return;
1218 Char msg_buf[100];
1219
1220 /* Mark it as not in use. Leave the stack in place so the next
1221 user of this slot doesn't reallocate it. */
1222 vg_assert(tid >= 0 && tid < VG_N_THREADS);
1223 vg_assert(vg_threads[tid].status != VgTs_Empty);
1224
sewardjbc5b99f2002-04-13 00:08:51 +00001225 vg_threads[tid].retval = retval;
sewardje663cb92002-04-12 10:26:32 +00001226
1227 if (vg_threads[tid].joiner == VG_INVALID_THREADID) {
1228 /* No one has yet done a join on me */
1229 vg_threads[tid].status = VgTs_WaitJoiner;
sewardj8937c812002-04-12 20:12:20 +00001230 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +00001231 VG_(sprintf)(msg_buf,
1232 "root fn returns, waiting for a call pthread_join(%d)",
1233 tid);
1234 print_sched_event(tid, msg_buf);
1235 }
1236 } else {
1237 /* Some is waiting; make their join call return with success,
1238 putting my exit code in the place specified by the caller's
1239 thread_return param. This is all very horrible, since we
1240 need to consult the joiner's arg block -- pointed to by its
1241 %EAX -- in order to extract the 2nd param of its pthread_join
1242 call. TODO: free properly the slot (also below).
1243 */
1244 jnr = vg_threads[tid].joiner;
1245 vg_assert(jnr >= 0 && jnr < VG_N_THREADS);
1246 vg_assert(vg_threads[jnr].status == VgTs_WaitJoinee);
1247 jnr_args = (UInt*)vg_threads[jnr].m_eax;
1248 jnr_thread_return = (void**)(jnr_args[2]);
1249 if (jnr_thread_return != NULL)
1250 *jnr_thread_return = vg_threads[tid].retval;
1251 vg_threads[jnr].m_edx = 0; /* success */
1252 vg_threads[jnr].status = VgTs_Runnable;
1253 vg_threads[tid].status = VgTs_Empty; /* bye! */
sewardj8937c812002-04-12 20:12:20 +00001254 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +00001255 VG_(sprintf)(msg_buf,
1256 "root fn returns, to find a waiting pthread_join(%d)", tid);
1257 print_sched_event(tid, msg_buf);
1258 VG_(sprintf)(msg_buf,
1259 "my pthread_join(%d) returned; resuming", tid);
1260 print_sched_event(jnr, msg_buf);
1261 }
1262 }
1263
1264 /* Return value is irrelevant; this thread will not get
1265 rescheduled. */
1266}
1267
1268
1269static
1270void do_pthread_join ( ThreadId tid, ThreadId jee, void** thread_return )
1271{
1272 Char msg_buf[100];
1273
1274 /* jee, the joinee, is the thread specified as an arg in thread
1275 tid's call to pthread_join. So tid is the join-er. */
1276 vg_assert(tid >= 0 && tid < VG_N_THREADS);
1277 vg_assert(vg_threads[tid].status == VgTs_Runnable);
1278
1279 if (jee == tid) {
1280 vg_threads[tid].m_edx = EDEADLK; /* libc constant, not a kernel one */
1281 vg_threads[tid].status = VgTs_Runnable;
1282 return;
1283 }
1284
1285 if (jee < 0
1286 || jee >= VG_N_THREADS
1287 || vg_threads[jee].status == VgTs_Empty) {
1288 /* Invalid thread to join to. */
1289 vg_threads[tid].m_edx = EINVAL;
1290 vg_threads[tid].status = VgTs_Runnable;
1291 return;
1292 }
1293
1294 if (vg_threads[jee].joiner != VG_INVALID_THREADID) {
1295 /* Someone already did join on this thread */
1296 vg_threads[tid].m_edx = EINVAL;
1297 vg_threads[tid].status = VgTs_Runnable;
1298 return;
1299 }
1300
1301 /* if (vg_threads[jee].detached) ... */
1302
1303 /* Perhaps the joinee has already finished? If so return
1304 immediately with its return code, and free up the slot. TODO:
1305 free it properly (also above). */
1306 if (vg_threads[jee].status == VgTs_WaitJoiner) {
1307 vg_assert(vg_threads[jee].joiner == VG_INVALID_THREADID);
1308 vg_threads[tid].m_edx = 0; /* success */
1309 if (thread_return != NULL)
1310 *thread_return = vg_threads[jee].retval;
1311 vg_threads[tid].status = VgTs_Runnable;
1312 vg_threads[jee].status = VgTs_Empty; /* bye! */
sewardj8937c812002-04-12 20:12:20 +00001313 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +00001314 VG_(sprintf)(msg_buf,
1315 "someone called pthread_join() on me; bye!");
1316 print_sched_event(jee, msg_buf);
1317 VG_(sprintf)(msg_buf,
1318 "my pthread_join(%d) returned immediately",
1319 jee );
1320 print_sched_event(tid, msg_buf);
1321 }
1322 return;
1323 }
1324
1325 /* Ok, so we'll have to wait on jee. */
1326 vg_threads[jee].joiner = tid;
1327 vg_threads[tid].status = VgTs_WaitJoinee;
sewardj8937c812002-04-12 20:12:20 +00001328 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +00001329 VG_(sprintf)(msg_buf,
1330 "blocking on call of pthread_join(%d)", jee );
1331 print_sched_event(tid, msg_buf);
1332 }
1333 /* So tid's join call does not return just now. */
1334}
1335
1336
1337static
1338void do_pthread_create ( ThreadId parent_tid,
1339 pthread_t* thread,
1340 pthread_attr_t* attr,
1341 void* (*start_routine)(void *),
1342 void* arg )
1343{
1344 Addr new_stack;
1345 UInt new_stk_szb;
1346 ThreadId tid;
1347 Char msg_buf[100];
1348
1349 /* Paranoia ... */
1350 vg_assert(sizeof(pthread_t) == sizeof(UInt));
1351
1352 vg_assert(vg_threads[parent_tid].status != VgTs_Empty);
1353
1354 tid = vg_alloc_ThreadState();
1355
1356 /* If we've created the main thread's tid, we're in deep trouble :) */
1357 vg_assert(tid != 0);
1358
1359 /* Copy the parent's CPU state into the child's, in a roundabout
1360 way (via baseBlock). */
1361 VG_(load_thread_state)(parent_tid);
1362 VG_(save_thread_state)(tid);
1363
1364 /* Consider allocating the child a stack, if the one it already has
1365 is inadequate. */
1366 new_stk_szb = PTHREAD_STACK_MIN;
1367
1368 if (new_stk_szb > vg_threads[tid].stack_size) {
1369 /* Again, for good measure :) We definitely don't want to be
1370 allocating a stack for the main thread. */
1371 vg_assert(tid != 0);
1372 /* for now, we don't handle the case of anything other than
1373 assigning it for the first time. */
1374 vg_assert(vg_threads[tid].stack_size == 0);
1375 vg_assert(vg_threads[tid].stack_base == (Addr)NULL);
1376 new_stack = (Addr)VG_(get_memory_from_mmap)( new_stk_szb );
1377 vg_threads[tid].stack_base = new_stack;
1378 vg_threads[tid].stack_size = new_stk_szb;
1379 vg_threads[tid].m_esp
1380 = new_stack + new_stk_szb
1381 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB;
1382 }
1383 if (VG_(clo_instrument))
1384 VGM_(make_noaccess)( vg_threads[tid].m_esp,
1385 VG_AR_CLIENT_STACKBASE_REDZONE_SZB );
1386
1387 /* push arg */
1388 vg_threads[tid].m_esp -= 4;
1389 * (UInt*)(vg_threads[tid].m_esp) = (UInt)arg;
1390
1391 /* push (magical) return address */
1392 vg_threads[tid].m_esp -= 4;
sewardjbc5b99f2002-04-13 00:08:51 +00001393 * (UInt*)(vg_threads[tid].m_esp) = (UInt)VG_(pthreadreturn_bogusRA);
sewardje663cb92002-04-12 10:26:32 +00001394
1395 if (VG_(clo_instrument))
1396 VGM_(make_readable)( vg_threads[tid].m_esp, 2 * 4 );
1397
1398 /* this is where we start */
1399 vg_threads[tid].m_eip = (UInt)start_routine;
1400
sewardj8937c812002-04-12 20:12:20 +00001401 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +00001402 VG_(sprintf)(msg_buf,
1403 "new thread, created by %d", parent_tid );
1404 print_sched_event(tid, msg_buf);
1405 }
1406
1407 /* store the thread id in *thread. */
1408 // if (VG_(clo_instrument))
1409 // ***** CHECK *thread is writable
1410 *thread = (pthread_t)tid;
1411
1412 /* return zero */
1413 vg_threads[tid].joiner = VG_INVALID_THREADID;
1414 vg_threads[tid].status = VgTs_Runnable;
1415 vg_threads[tid].m_edx = 0; /* success */
1416}
1417
1418
1419/* Horrible hacks to do with pthread_mutex_t: the real pthread_mutex_t
1420 is a struct with at least 5 words:
1421 typedef struct
1422 {
1423 int __m_reserved; -- Reserved for future use
1424 int __m_count; -- Depth of recursive locking
1425 _pthread_descr __m_owner; -- Owner thread (if recursive or errcheck)
1426 int __m_kind; -- Mutex kind: fast, recursive or errcheck
1427 struct _pthread_fastlock __m_lock; -- Underlying fast lock
1428 } pthread_mutex_t;
1429 Ours is just a single word, an index into vg_mutexes[].
1430 For now I'll park it in the __m_reserved field.
1431
1432 Uninitialised mutexes (PTHREAD_MUTEX_INITIALIZER) all have
1433 a zero __m_count field (see /usr/include/pthread.h). So I'll
1434 use zero to mean non-inited, and 1 to mean inited.
1435
1436 How convenient.
1437*/
1438
1439static
sewardj8937c812002-04-12 20:12:20 +00001440void initialise_mutex ( ThreadId tid, pthread_mutex_t *mutex )
sewardje663cb92002-04-12 10:26:32 +00001441{
sewardj8937c812002-04-12 20:12:20 +00001442 MutexId mid;
1443 Char msg_buf[100];
sewardje663cb92002-04-12 10:26:32 +00001444 /* vg_alloc_MutexId aborts if we can't allocate a mutex, for
1445 whatever reason. */
sewardje663cb92002-04-12 10:26:32 +00001446 mid = vg_alloc_VgMutex();
1447 vg_mutexes[mid].in_use = True;
1448 vg_mutexes[mid].held = False;
1449 vg_mutexes[mid].owner = VG_INVALID_THREADID; /* irrelevant */
1450 mutex->__m_reserved = mid;
1451 mutex->__m_count = 1; /* initialised */
sewardj8937c812002-04-12 20:12:20 +00001452 if (VG_(clo_trace_pthread)) {
1453 VG_(sprintf)(msg_buf, "(initialise mutex) (%p) -> %d",
1454 mutex, mid );
1455 print_pthread_event(tid, msg_buf);
1456 }
sewardje663cb92002-04-12 10:26:32 +00001457}
1458
1459/* Allocate a new MutexId and write it into *mutex. Ideally take
1460 notice of the attributes in *mutexattr. */
1461static
1462void do_pthread_mutex_init ( ThreadId tid,
1463 pthread_mutex_t *mutex,
1464 const pthread_mutexattr_t *mutexattr)
1465{
sewardj8937c812002-04-12 20:12:20 +00001466 Char msg_buf[100];
sewardje663cb92002-04-12 10:26:32 +00001467 /* Paranoia ... */
sewardje663cb92002-04-12 10:26:32 +00001468 vg_assert(sizeof(pthread_mutex_t) >= sizeof(UInt));
1469
sewardj8937c812002-04-12 20:12:20 +00001470 initialise_mutex(tid, mutex);
1471
1472 if (VG_(clo_trace_pthread)) {
1473 VG_(sprintf)(msg_buf, "pthread_mutex_init (%p) -> %d",
1474 mutex, mutex->__m_reserved );
1475 print_pthread_event(tid, msg_buf);
1476 }
1477
sewardje663cb92002-04-12 10:26:32 +00001478 /*
1479 RETURN VALUE
1480 pthread_mutex_init always returns 0. The other mutex functions
1481 return 0 on success and a non-zero error code on error.
1482 */
1483 /* THIS THREAD returns with 0. */
1484 vg_threads[tid].m_edx = 0;
1485}
1486
1487
1488static
1489void do_pthread_mutex_lock( ThreadId tid, pthread_mutex_t *mutex )
1490{
1491 MutexId mid;
1492 Char msg_buf[100];
1493
sewardje663cb92002-04-12 10:26:32 +00001494 /* *mutex contains the MutexId, or one of the magic values
1495 PTHREAD_*MUTEX_INITIALIZER*, indicating we need to initialise it
1496 now. See comment(s) above re use of __m_count to indicated
1497 initialisation status.
1498 */
1499
1500 /* POSIX doesn't mandate this, but for sanity ... */
1501 if (mutex == NULL) {
1502 vg_threads[tid].m_edx = EINVAL;
1503 return;
1504 }
1505
1506 if (mutex->__m_count == 0) {
sewardj8937c812002-04-12 20:12:20 +00001507 initialise_mutex(tid, mutex);
sewardje663cb92002-04-12 10:26:32 +00001508 }
1509
1510 mid = mutex->__m_reserved;
1511 if (mid < 0 || mid >= VG_N_MUTEXES || !vg_mutexes[mid].in_use) {
1512 vg_threads[tid].m_edx = EINVAL;
1513 return;
1514 }
1515
sewardj8937c812002-04-12 20:12:20 +00001516 if (VG_(clo_trace_pthread)) {
1517 VG_(sprintf)(msg_buf, "pthread_mutex_lock %d (%p)",
1518 mid, mutex );
1519 print_pthread_event(tid, msg_buf);
1520 }
1521
sewardje663cb92002-04-12 10:26:32 +00001522 /* Assert initialised. */
1523 vg_assert(mutex->__m_count == 1);
1524
1525 /* Assume tid valid. */
1526 vg_assert(vg_threads[tid].status == VgTs_Runnable);
1527
1528 if (vg_mutexes[mid].held) {
1529 if (vg_mutexes[mid].owner == tid) {
1530 vg_threads[tid].m_edx = EDEADLK;
1531 return;
1532 }
1533 /* Someone else has it; we have to wait. */
1534 vg_threads[tid].status = VgTs_WaitMX;
1535 vg_threads[tid].waited_on_mid = mid;
1536 /* No assignment to %EDX, since we're blocking. */
sewardj8937c812002-04-12 20:12:20 +00001537 if (VG_(clo_trace_pthread)) {
1538 VG_(sprintf)(msg_buf, "pthread_mutex_lock %d (%p): BLOCK",
1539 mid, mutex );
1540 print_pthread_event(tid, msg_buf);
sewardje663cb92002-04-12 10:26:32 +00001541 }
1542 } else {
1543 /* We get it! */
1544 vg_mutexes[mid].held = True;
1545 vg_mutexes[mid].owner = tid;
1546 /* return 0 (success). */
1547 vg_threads[tid].m_edx = 0;
1548 }
1549}
1550
1551
1552static
1553void do_pthread_mutex_unlock ( ThreadId tid,
1554 pthread_mutex_t *mutex )
1555{
1556 MutexId mid;
1557 Int i;
1558 Char msg_buf[100];
1559
sewardje663cb92002-04-12 10:26:32 +00001560 if (mutex == NULL
1561 || mutex->__m_count != 1) {
1562 vg_threads[tid].m_edx = EINVAL;
1563 return;
1564 }
1565
1566 mid = mutex->__m_reserved;
1567 if (mid < 0 || mid >= VG_N_MUTEXES || !vg_mutexes[mid].in_use) {
1568 vg_threads[tid].m_edx = EINVAL;
1569 return;
1570 }
1571
sewardj8937c812002-04-12 20:12:20 +00001572 if (VG_(clo_trace_pthread)) {
1573 VG_(sprintf)(msg_buf, "pthread_mutex_unlock %d (%p)",
1574 mid, mutex );
1575 print_pthread_event(tid, msg_buf);
1576 }
1577
sewardje663cb92002-04-12 10:26:32 +00001578 /* Assume tid valid */
1579 vg_assert(vg_threads[tid].status == VgTs_Runnable);
1580
1581 /* Barf if we don't currently hold the mutex. */
1582 if (!vg_mutexes[mid].held || vg_mutexes[mid].owner != tid) {
1583 vg_threads[tid].m_edx = EPERM;
1584 return;
1585 }
1586
1587 /* Find some arbitrary thread waiting on this mutex, and make it
1588 runnable. If none are waiting, mark the mutex as not held. */
1589 for (i = 0; i < VG_N_THREADS; i++) {
1590 if (vg_threads[i].status == VgTs_Empty)
1591 continue;
1592 if (vg_threads[i].status == VgTs_WaitMX
1593 && vg_threads[i].waited_on_mid == mid)
1594 break;
1595 }
1596
1597 vg_assert(i <= VG_N_THREADS);
1598 if (i == VG_N_THREADS) {
1599 /* Nobody else is waiting on it. */
1600 vg_mutexes[mid].held = False;
1601 } else {
1602 /* Notionally transfer the hold to thread i, whose
1603 pthread_mutex_lock() call now returns with 0 (success). */
1604 vg_mutexes[mid].owner = i;
1605 vg_threads[i].status = VgTs_Runnable;
1606 vg_threads[i].m_edx = 0; /* pth_lock() success */
sewardj8937c812002-04-12 20:12:20 +00001607
1608 if (VG_(clo_trace_pthread)) {
1609 VG_(sprintf)(msg_buf, "pthread_mutex_lock %d: RESUME",
1610 mid );
1611 print_pthread_event(tid, msg_buf);
sewardje663cb92002-04-12 10:26:32 +00001612 }
1613 }
1614
1615 /* In either case, our (tid's) pth_unlock() returns with 0
1616 (success). */
1617 vg_threads[tid].m_edx = 0; /* Success. */
1618}
1619
1620
1621static void do_pthread_mutex_destroy ( ThreadId tid,
1622 pthread_mutex_t *mutex )
1623{
sewardj8937c812002-04-12 20:12:20 +00001624 MutexId mid;
1625 Char msg_buf[100];
sewardje663cb92002-04-12 10:26:32 +00001626
1627 if (mutex == NULL
1628 || mutex->__m_count != 1) {
1629 vg_threads[tid].m_edx = EINVAL;
1630 return;
1631 }
1632
1633 mid = mutex->__m_reserved;
1634 if (mid < 0 || mid >= VG_N_MUTEXES || !vg_mutexes[mid].in_use) {
1635 vg_threads[tid].m_edx = EINVAL;
1636 return;
1637 }
1638
sewardj8937c812002-04-12 20:12:20 +00001639 if (VG_(clo_trace_pthread)) {
1640 VG_(sprintf)(msg_buf, "pthread_mutex_destroy %d (%p)",
1641 mid, mutex );
1642 print_pthread_event(tid, msg_buf);
1643 }
1644
sewardje663cb92002-04-12 10:26:32 +00001645 /* Assume tid valid */
1646 vg_assert(vg_threads[tid].status == VgTs_Runnable);
1647
1648 /* Barf if the mutex is currently held. */
1649 if (vg_mutexes[mid].held) {
1650 vg_threads[tid].m_edx = EBUSY;
1651 return;
1652 }
1653
1654 mutex->__m_count = 0; /* uninitialised */
1655 vg_mutexes[mid].in_use = False;
1656 vg_threads[tid].m_edx = 0;
1657}
1658
1659
1660/* ---------------------------------------------------------------------
1661 Handle non-trivial client requests.
1662 ------------------------------------------------------------------ */
1663
1664static
1665void do_nontrivial_clientreq ( ThreadId tid )
1666{
1667 UInt* arg = (UInt*)(vg_threads[tid].m_eax);
1668 UInt req_no = arg[0];
1669 switch (req_no) {
1670
1671 case VG_USERREQ__PTHREAD_CREATE:
1672 do_pthread_create( tid,
1673 (pthread_t*)arg[1],
1674 (pthread_attr_t*)arg[2],
1675 (void*(*)(void*))arg[3],
1676 (void*)arg[4] );
1677 break;
1678
sewardjbc5b99f2002-04-13 00:08:51 +00001679 case VG_USERREQ__PTHREAD_RETURNS:
1680 handle_pthread_return( tid, (void*)arg[1] );
sewardje663cb92002-04-12 10:26:32 +00001681 break;
1682
1683 case VG_USERREQ__PTHREAD_JOIN:
1684 do_pthread_join( tid, arg[1], (void**)(arg[2]) );
1685 break;
1686
1687 /* Sigh ... this probably will cause huge numbers of major
1688 (expensive) scheduling events, for no real reason.
1689 Perhaps should be classified as a trivial-request. */
1690 case VG_USERREQ__PTHREAD_GET_THREADID:
1691 vg_threads[tid].m_edx = tid;
1692 break;
1693
1694 case VG_USERREQ__PTHREAD_MUTEX_INIT:
1695 do_pthread_mutex_init( tid,
1696 (pthread_mutex_t *)(arg[1]),
1697 (pthread_mutexattr_t *)(arg[2]) );
1698 break;
1699
1700 case VG_USERREQ__PTHREAD_MUTEX_LOCK:
1701 do_pthread_mutex_lock( tid, (pthread_mutex_t *)(arg[1]) );
1702 break;
1703
1704 case VG_USERREQ__PTHREAD_MUTEX_UNLOCK:
1705 do_pthread_mutex_unlock( tid, (pthread_mutex_t *)(arg[1]) );
1706 break;
1707
1708 case VG_USERREQ__PTHREAD_MUTEX_DESTROY:
1709 do_pthread_mutex_destroy( tid, (pthread_mutex_t *)(arg[1]) );
1710 break;
1711
1712 case VG_USERREQ__PTHREAD_CANCEL:
1713 do_pthread_cancel( tid, (pthread_t)(arg[1]) );
1714 break;
1715
1716 case VG_USERREQ__MAKE_NOACCESS:
1717 case VG_USERREQ__MAKE_WRITABLE:
1718 case VG_USERREQ__MAKE_READABLE:
1719 case VG_USERREQ__DISCARD:
1720 case VG_USERREQ__CHECK_WRITABLE:
1721 case VG_USERREQ__CHECK_READABLE:
1722 case VG_USERREQ__MAKE_NOACCESS_STACK:
1723 case VG_USERREQ__RUNNING_ON_VALGRIND:
1724 case VG_USERREQ__DO_LEAK_CHECK:
1725 vg_threads[tid].m_edx = VG_(handle_client_request) ( arg );
1726 break;
1727
sewardj54cacf02002-04-12 23:24:59 +00001728 case VG_USERREQ__SIGNAL_RETURNS:
1729 /* vthread tid is returning from a signal handler;
1730 modify its stack/regs accordingly. */
1731 VG_(signal_returns)(tid);
1732 break;
1733
sewardje663cb92002-04-12 10:26:32 +00001734 default:
1735 VG_(printf)("panic'd on private request = 0x%x\n", arg[0] );
1736 VG_(panic)("handle_private_client_pthread_request: "
1737 "unknown request");
1738 /*NOTREACHED*/
1739 break;
1740 }
1741}
1742
1743
1744/*--------------------------------------------------------------------*/
1745/*--- end vg_scheduler.c ---*/
1746/*--------------------------------------------------------------------*/