blob: 8340dfa970e86e24a83fd12e137f63d6c7b92fac [file] [log] [blame]
sewardje663cb92002-04-12 10:26:32 +00001
2/*--------------------------------------------------------------------*/
3/*--- A user-space pthreads implementation. vg_scheduler.c ---*/
4/*--------------------------------------------------------------------*/
5
6/*
njnc9539842002-10-02 13:26:35 +00007 This file is part of Valgrind, an extensible x86 protected-mode
8 emulator for monitoring program execution on x86-Unixes.
sewardje663cb92002-04-12 10:26:32 +00009
nethercotebb1c9912004-01-04 16:43:23 +000010 Copyright (C) 2000-2004 Julian Seward
sewardje663cb92002-04-12 10:26:32 +000011 jseward@acm.org
sewardje663cb92002-04-12 10:26:32 +000012
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
njn25e49d8e72002-09-23 09:36:25 +000028 The GNU General Public License is contained in the file COPYING.
sewardje663cb92002-04-12 10:26:32 +000029*/
30
njn25e49d8e72002-09-23 09:36:25 +000031#include "valgrind.h" /* for VG_USERREQ__RUNNING_ON_VALGRIND and
njn47363ab2003-04-21 13:24:40 +000032 VG_USERREQ__DISCARD_TRANSLATIONS, and others */
nethercotef1e5e152004-09-01 23:58:16 +000033#include "core.h"
sewardje663cb92002-04-12 10:26:32 +000034
sewardje663cb92002-04-12 10:26:32 +000035
36/* ---------------------------------------------------------------------
37 Types and globals for the scheduler.
38 ------------------------------------------------------------------ */
39
rjwalsh7109a8c2004-09-02 00:31:02 +000040/* ThreadId and ThreadState are defined in core.h. */
sewardje663cb92002-04-12 10:26:32 +000041
sewardj018f7622002-05-15 21:13:39 +000042/* Globals. A statically allocated array of threads. NOTE: [0] is
43 never used, to simplify the simulation of initialisers for
sewardj6072c362002-04-19 14:40:57 +000044 LinuxThreads. */
sewardj018f7622002-05-15 21:13:39 +000045ThreadState VG_(threads)[VG_N_THREADS];
sewardje663cb92002-04-12 10:26:32 +000046
sewardj2cb00342002-06-28 01:46:26 +000047/* The process' fork-handler stack. */
48static Int vg_fhstack_used = 0;
49static ForkHandlerEntry vg_fhstack[VG_N_FORKHANDLERSTACK];
50
51
sewardj1e8cdc92002-04-18 11:37:52 +000052/* The tid of the thread currently in VG_(baseBlock). */
njn1be61612003-05-14 14:04:39 +000053static ThreadId vg_tid_currently_in_baseBlock = VG_INVALID_THREADID;
sewardj1e8cdc92002-04-18 11:37:52 +000054
sewardjb52a1b02002-10-23 21:38:22 +000055/* The tid either currently in baseBlock, or was in baseBlock before
56 was saved it out; this is only updated when a new thread is loaded
57 into the baseBlock */
njn1be61612003-05-14 14:04:39 +000058static ThreadId vg_tid_last_in_baseBlock = VG_INVALID_THREADID;
sewardje663cb92002-04-12 10:26:32 +000059
60/* vg_oursignalhandler() might longjmp(). Here's the jmp_buf. */
nethercotef971ab72004-08-02 16:27:40 +000061static jmp_buf scheduler_jmpbuf;
sewardj872051c2002-07-13 12:12:56 +000062/* This says whether scheduler_jmpbuf is actually valid. Needed so
63 that our signal handler doesn't longjmp when the buffer isn't
64 actually valid. */
nethercotef971ab72004-08-02 16:27:40 +000065static Bool scheduler_jmpbuf_valid = False;
sewardje663cb92002-04-12 10:26:32 +000066/* ... and if so, here's the signal which caused it to do so. */
nethercotef971ab72004-08-02 16:27:40 +000067static Int longjmpd_on_signal;
jsgf855d93d2003-10-13 22:26:55 +000068/* If the current thread gets a syncronous unresumable signal, then
69 its details are placed here by the signal handler, to be passed to
70 the applications signal handler later on. */
nethercotef971ab72004-08-02 16:27:40 +000071static vki_ksiginfo_t unresumable_siginfo;
sewardje663cb92002-04-12 10:26:32 +000072
jsgf855d93d2003-10-13 22:26:55 +000073/* If != VG_INVALID_THREADID, this is the preferred tid to schedule */
74static ThreadId prefer_sched = VG_INVALID_THREADID;
sewardje663cb92002-04-12 10:26:32 +000075
sewardj5f07b662002-04-23 16:52:51 +000076/* Keeping track of keys. */
77typedef
78 struct {
79 /* Has this key been allocated ? */
80 Bool inuse;
81 /* If .inuse==True, records the address of the associated
82 destructor, or NULL if none. */
83 void (*destructor)(void*);
84 }
85 ThreadKeyState;
86
87/* And our array of thread keys. */
88static ThreadKeyState vg_thread_keys[VG_N_THREAD_KEYS];
89
90typedef UInt ThreadKey;
91
fitzhardinge98abfc72003-12-16 02:05:15 +000092/* The scheduler does need to know the address of it so it can be
93 called at program exit. */
nethercotef971ab72004-08-02 16:27:40 +000094static Addr __libc_freeres_wrapper;
njn25e49d8e72002-09-23 09:36:25 +000095
sewardje663cb92002-04-12 10:26:32 +000096/* Forwards */
nethercote3e901a22004-09-11 13:17:02 +000097static void do_client_request ( ThreadId tid, UInt* args );
sewardj6072c362002-04-19 14:40:57 +000098static void scheduler_sanity ( void );
thughese321d492004-10-17 15:00:20 +000099static void do_pthread_mutex_timedlock_TIMEOUT ( ThreadId tid );
sewardj124ca2a2002-06-20 10:19:38 +0000100static void do_pthread_cond_timedwait_TIMEOUT ( ThreadId tid );
thughesa3afffc2004-08-25 18:58:04 +0000101static void maybe_rendezvous_joiners_and_joinees ( void );
sewardjd140e442002-05-29 01:21:19 +0000102
nethercote844e7122004-08-02 15:27:22 +0000103/* Stats. */
104static UInt n_scheduling_events_MINOR = 0;
105static UInt n_scheduling_events_MAJOR = 0;
106
107void VG_(print_scheduler_stats)(void)
108{
109 VG_(message)(Vg_DebugMsg,
110 " %d/%d major/minor sched events.",
111 n_scheduling_events_MAJOR, n_scheduling_events_MINOR);
112}
113
sewardje663cb92002-04-12 10:26:32 +0000114/* ---------------------------------------------------------------------
115 Helper functions for the scheduler.
116 ------------------------------------------------------------------ */
117
sewardjb48e5002002-05-13 00:16:03 +0000118__inline__
119Bool VG_(is_valid_tid) ( ThreadId tid )
sewardj604ec3c2002-04-18 22:38:41 +0000120{
121 /* tid is unsigned, hence no < 0 test. */
sewardj6072c362002-04-19 14:40:57 +0000122 if (tid == 0) return False;
sewardj604ec3c2002-04-18 22:38:41 +0000123 if (tid >= VG_N_THREADS) return False;
sewardj018f7622002-05-15 21:13:39 +0000124 if (VG_(threads)[tid].status == VgTs_Empty) return False;
125 return True;
126}
127
128
129__inline__
nethercote36881a22004-08-04 14:03:16 +0000130Bool is_valid_or_empty_tid ( ThreadId tid )
sewardj018f7622002-05-15 21:13:39 +0000131{
132 /* tid is unsigned, hence no < 0 test. */
133 if (tid == 0) return False;
134 if (tid >= VG_N_THREADS) return False;
sewardj604ec3c2002-04-18 22:38:41 +0000135 return True;
136}
137
138
sewardj1e8cdc92002-04-18 11:37:52 +0000139/* For constructing error messages only: try and identify a thread
njn25e49d8e72002-09-23 09:36:25 +0000140 whose stack satisfies the predicate p, or return VG_INVALID_THREADID
141 if none do. A small complication is dealing with any currently
142 VG_(baseBlock)-resident thread.
sewardj1e8cdc92002-04-18 11:37:52 +0000143*/
njn43c799e2003-04-08 00:08:52 +0000144ThreadId VG_(first_matching_thread_stack)
thughes4ad52d02004-06-27 17:37:21 +0000145 ( Bool (*p) ( Addr stack_min, Addr stack_max, void* d ),
146 void* d )
sewardj1e8cdc92002-04-18 11:37:52 +0000147{
148 ThreadId tid, tid_to_skip;
149
150 tid_to_skip = VG_INVALID_THREADID;
151
152 /* First check to see if there's a currently-loaded thread in
153 VG_(baseBlock). */
154 if (vg_tid_currently_in_baseBlock != VG_INVALID_THREADID) {
155 tid = vg_tid_currently_in_baseBlock;
nethercoteb8ef9d82004-09-05 22:02:33 +0000156 if ( p ( VG_(baseBlock)[VGOFF_STACK_PTR],
thughes4ad52d02004-06-27 17:37:21 +0000157 VG_(threads)[tid].stack_highest_word, d ) )
sewardj1e8cdc92002-04-18 11:37:52 +0000158 return tid;
159 else
160 tid_to_skip = tid;
161 }
162
sewardj6072c362002-04-19 14:40:57 +0000163 for (tid = 1; tid < VG_N_THREADS; tid++) {
sewardj018f7622002-05-15 21:13:39 +0000164 if (VG_(threads)[tid].status == VgTs_Empty) continue;
sewardj1e8cdc92002-04-18 11:37:52 +0000165 if (tid == tid_to_skip) continue;
nethercoteb8ef9d82004-09-05 22:02:33 +0000166 if ( p ( ARCH_STACK_PTR(VG_(threads)[tid].arch),
thughes4ad52d02004-06-27 17:37:21 +0000167 VG_(threads)[tid].stack_highest_word, d ) )
sewardj1e8cdc92002-04-18 11:37:52 +0000168 return tid;
169 }
170 return VG_INVALID_THREADID;
171}
172
173
sewardj15a43e12002-04-17 19:35:12 +0000174/* Print the scheduler status. */
175void VG_(pp_sched_status) ( void )
sewardje663cb92002-04-12 10:26:32 +0000176{
177 Int i;
178 VG_(printf)("\nsched status:\n");
sewardj6072c362002-04-19 14:40:57 +0000179 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +0000180 if (VG_(threads)[i].status == VgTs_Empty) continue;
sewardj15a43e12002-04-17 19:35:12 +0000181 VG_(printf)("\nThread %d: status = ", i);
sewardj018f7622002-05-15 21:13:39 +0000182 switch (VG_(threads)[i].status) {
sewardj6072c362002-04-19 14:40:57 +0000183 case VgTs_Runnable: VG_(printf)("Runnable"); break;
sewardj20917d82002-05-28 01:36:45 +0000184 case VgTs_WaitJoinee: VG_(printf)("WaitJoinee(%d)",
185 VG_(threads)[i].joiner_jee_tid);
186 break;
187 case VgTs_WaitJoiner: VG_(printf)("WaitJoiner"); break;
sewardj6072c362002-04-19 14:40:57 +0000188 case VgTs_Sleeping: VG_(printf)("Sleeping"); break;
189 case VgTs_WaitMX: VG_(printf)("WaitMX"); break;
sewardj3b5d8862002-04-20 13:53:23 +0000190 case VgTs_WaitCV: VG_(printf)("WaitCV"); break;
jsgf855d93d2003-10-13 22:26:55 +0000191 case VgTs_WaitSys: VG_(printf)("WaitSys"); break;
sewardje663cb92002-04-12 10:26:32 +0000192 default: VG_(printf)("???"); break;
193 }
sewardj3b5d8862002-04-20 13:53:23 +0000194 VG_(printf)(", associated_mx = %p, associated_cv = %p\n",
sewardj018f7622002-05-15 21:13:39 +0000195 VG_(threads)[i].associated_mx,
196 VG_(threads)[i].associated_cv );
sewardj15a43e12002-04-17 19:35:12 +0000197 VG_(pp_ExeContext)(
nethercoteb8ef9d82004-09-05 22:02:33 +0000198 VG_(get_ExeContext2)( ARCH_INSTR_PTR(VG_(threads)[i].arch),
199 ARCH_FRAME_PTR(VG_(threads)[i].arch),
200 ARCH_STACK_PTR(VG_(threads)[i].arch),
njn25e49d8e72002-09-23 09:36:25 +0000201 VG_(threads)[i].stack_highest_word)
202 );
sewardje663cb92002-04-12 10:26:32 +0000203 }
204 VG_(printf)("\n");
205}
206
sewardje663cb92002-04-12 10:26:32 +0000207static
208void print_sched_event ( ThreadId tid, Char* what )
209{
sewardj45b4b372002-04-16 22:50:32 +0000210 VG_(message)(Vg_DebugMsg, " SCHED[%d]: %s", tid, what );
sewardj8937c812002-04-12 20:12:20 +0000211}
212
sewardj8937c812002-04-12 20:12:20 +0000213static
214void print_pthread_event ( ThreadId tid, Char* what )
215{
216 VG_(message)(Vg_DebugMsg, "PTHREAD[%d]: %s", tid, what );
sewardje663cb92002-04-12 10:26:32 +0000217}
218
sewardje663cb92002-04-12 10:26:32 +0000219static
220Char* name_of_sched_event ( UInt event )
221{
222 switch (event) {
sewardje663cb92002-04-12 10:26:32 +0000223 case VG_TRC_EBP_JMP_SYSCALL: return "SYSCALL";
224 case VG_TRC_EBP_JMP_CLIENTREQ: return "CLIENTREQ";
fitzhardingea02f8812003-12-18 09:06:09 +0000225 case VG_TRC_EBP_JMP_YIELD: return "YIELD";
sewardje663cb92002-04-12 10:26:32 +0000226 case VG_TRC_INNER_COUNTERZERO: return "COUNTERZERO";
227 case VG_TRC_INNER_FASTMISS: return "FASTMISS";
228 case VG_TRC_UNRESUMABLE_SIGNAL: return "FATALSIGNAL";
229 default: return "??UNKNOWN??";
230 }
231}
232
233
sewardje663cb92002-04-12 10:26:32 +0000234/* Allocate a completely empty ThreadState record. */
235static
236ThreadId vg_alloc_ThreadState ( void )
237{
238 Int i;
sewardj6072c362002-04-19 14:40:57 +0000239 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +0000240 if (VG_(threads)[i].status == VgTs_Empty)
sewardje663cb92002-04-12 10:26:32 +0000241 return i;
242 }
243 VG_(printf)("vg_alloc_ThreadState: no free slots available\n");
244 VG_(printf)("Increase VG_N_THREADS, rebuild and try again.\n");
njne427a662002-10-02 11:08:25 +0000245 VG_(core_panic)("VG_N_THREADS is too low");
sewardje663cb92002-04-12 10:26:32 +0000246 /*NOTREACHED*/
247}
248
jsgf855d93d2003-10-13 22:26:55 +0000249ThreadState *VG_(get_ThreadState)(ThreadId tid)
250{
251 vg_assert(tid >= 0 && tid < VG_N_THREADS);
252 return &VG_(threads)[tid];
253}
254
njn72718642003-07-24 08:45:32 +0000255Bool VG_(is_running_thread)(ThreadId tid)
njn25e49d8e72002-09-23 09:36:25 +0000256{
njn72718642003-07-24 08:45:32 +0000257 ThreadId curr = VG_(get_current_tid)();
258 return (curr == tid && VG_INVALID_THREADID != tid);
njn25e49d8e72002-09-23 09:36:25 +0000259}
sewardje663cb92002-04-12 10:26:32 +0000260
sewardj1e8cdc92002-04-18 11:37:52 +0000261ThreadId VG_(get_current_tid) ( void )
262{
sewardjb52a1b02002-10-23 21:38:22 +0000263 if (!VG_(is_valid_tid)(vg_tid_currently_in_baseBlock))
264 return VG_INVALID_THREADID;
sewardj1e8cdc92002-04-18 11:37:52 +0000265 return vg_tid_currently_in_baseBlock;
266}
267
sewardjb52a1b02002-10-23 21:38:22 +0000268ThreadId VG_(get_current_or_recent_tid) ( void )
njn25e49d8e72002-09-23 09:36:25 +0000269{
sewardjb52a1b02002-10-23 21:38:22 +0000270 vg_assert(vg_tid_currently_in_baseBlock == vg_tid_last_in_baseBlock ||
271 vg_tid_currently_in_baseBlock == VG_INVALID_THREADID);
272 vg_assert(VG_(is_valid_tid)(vg_tid_last_in_baseBlock));
273
274 return vg_tid_last_in_baseBlock;
njn25e49d8e72002-09-23 09:36:25 +0000275}
276
sewardje663cb92002-04-12 10:26:32 +0000277/* Copy the saved state of a thread into VG_(baseBlock), ready for it
278 to be run. */
nethercotef971ab72004-08-02 16:27:40 +0000279static void load_thread_state ( ThreadId tid )
sewardje663cb92002-04-12 10:26:32 +0000280{
sewardj1e8cdc92002-04-18 11:37:52 +0000281 vg_assert(vg_tid_currently_in_baseBlock == VG_INVALID_THREADID);
282
nethercotec06e2132004-09-03 13:45:29 +0000283 VGA_(load_state)(&VG_(threads)[tid].arch, tid);
sewardj1e8cdc92002-04-18 11:37:52 +0000284
285 vg_tid_currently_in_baseBlock = tid;
sewardjb52a1b02002-10-23 21:38:22 +0000286 vg_tid_last_in_baseBlock = tid;
sewardje663cb92002-04-12 10:26:32 +0000287}
288
289
290/* Copy the state of a thread from VG_(baseBlock), presumably after it
291 has been descheduled. For sanity-check purposes, fill the vacated
292 VG_(baseBlock) with garbage so as to make the system more likely to
293 fail quickly if we erroneously continue to poke around inside
294 VG_(baseBlock) without first doing a load_thread_state().
295*/
nethercotef971ab72004-08-02 16:27:40 +0000296static void save_thread_state ( ThreadId tid )
sewardje663cb92002-04-12 10:26:32 +0000297{
sewardj1e8cdc92002-04-18 11:37:52 +0000298 vg_assert(vg_tid_currently_in_baseBlock != VG_INVALID_THREADID);
299
nethercotec06e2132004-09-03 13:45:29 +0000300 VGA_(save_state)(&VG_(threads)[tid].arch, tid);
sewardj1e8cdc92002-04-18 11:37:52 +0000301
302 vg_tid_currently_in_baseBlock = VG_INVALID_THREADID;
sewardje663cb92002-04-12 10:26:32 +0000303}
304
305
nethercote75d26242004-08-01 22:59:18 +0000306void VG_(resume_scheduler)(Int sigNo, vki_ksiginfo_t *info)
307{
308 if (scheduler_jmpbuf_valid) {
309 /* Can't continue; must longjmp back to the scheduler and thus
310 enter the sighandler immediately. */
nethercotef971ab72004-08-02 16:27:40 +0000311 VG_(memcpy)(&unresumable_siginfo, info, sizeof(vki_ksiginfo_t));
nethercote75d26242004-08-01 22:59:18 +0000312
313 longjmpd_on_signal = sigNo;
314 __builtin_longjmp(scheduler_jmpbuf,1);
315 }
316}
317
sewardje663cb92002-04-12 10:26:32 +0000318/* Run the thread tid for a while, and return a VG_TRC_* value to the
319 scheduler indicating what happened. */
sewardj6072c362002-04-19 14:40:57 +0000320static
sewardje663cb92002-04-12 10:26:32 +0000321UInt run_thread_for_a_while ( ThreadId tid )
322{
sewardj7ccc5c22002-04-24 21:39:11 +0000323 volatile UInt trc = 0;
sewardjb48e5002002-05-13 00:16:03 +0000324 vg_assert(VG_(is_valid_tid)(tid));
sewardj018f7622002-05-15 21:13:39 +0000325 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
nethercote75d26242004-08-01 22:59:18 +0000326 vg_assert(!scheduler_jmpbuf_valid);
sewardje663cb92002-04-12 10:26:32 +0000327
sewardj671ff542002-05-07 09:25:30 +0000328 VGP_PUSHCC(VgpRun);
nethercotef971ab72004-08-02 16:27:40 +0000329 load_thread_state ( tid );
jsgf855d93d2003-10-13 22:26:55 +0000330
331 /* there should be no undealt-with signals */
nethercotef971ab72004-08-02 16:27:40 +0000332 vg_assert(unresumable_siginfo.si_signo == 0);
jsgf855d93d2003-10-13 22:26:55 +0000333
nethercote75d26242004-08-01 22:59:18 +0000334 if (__builtin_setjmp(scheduler_jmpbuf) == 0) {
sewardje663cb92002-04-12 10:26:32 +0000335 /* try this ... */
nethercote75d26242004-08-01 22:59:18 +0000336 scheduler_jmpbuf_valid = True;
sewardje663cb92002-04-12 10:26:32 +0000337 trc = VG_(run_innerloop)();
nethercote75d26242004-08-01 22:59:18 +0000338 scheduler_jmpbuf_valid = False;
sewardje663cb92002-04-12 10:26:32 +0000339 /* We get here if the client didn't take a fault. */
340 } else {
341 /* We get here if the client took a fault, which caused our
342 signal handler to longjmp. */
nethercote75d26242004-08-01 22:59:18 +0000343 scheduler_jmpbuf_valid = False;
sewardje663cb92002-04-12 10:26:32 +0000344 vg_assert(trc == 0);
345 trc = VG_TRC_UNRESUMABLE_SIGNAL;
346 }
sewardj872051c2002-07-13 12:12:56 +0000347
nethercote75d26242004-08-01 22:59:18 +0000348 vg_assert(!scheduler_jmpbuf_valid);
sewardj872051c2002-07-13 12:12:56 +0000349
nethercotef971ab72004-08-02 16:27:40 +0000350 save_thread_state ( tid );
njn25e49d8e72002-09-23 09:36:25 +0000351 VGP_POPCC(VgpRun);
sewardje663cb92002-04-12 10:26:32 +0000352 return trc;
353}
354
355
sewardj20917d82002-05-28 01:36:45 +0000356static
357void mostly_clear_thread_record ( ThreadId tid )
358{
sewardj20917d82002-05-28 01:36:45 +0000359 vg_assert(tid >= 0 && tid < VG_N_THREADS);
nethercotef9b59412004-09-10 15:33:32 +0000360 VGA_(clear_thread)(&VG_(threads)[tid].arch);
sewardj20917d82002-05-28 01:36:45 +0000361 VG_(threads)[tid].tid = tid;
362 VG_(threads)[tid].status = VgTs_Empty;
363 VG_(threads)[tid].associated_mx = NULL;
364 VG_(threads)[tid].associated_cv = NULL;
365 VG_(threads)[tid].awaken_at = 0;
366 VG_(threads)[tid].joinee_retval = NULL;
367 VG_(threads)[tid].joiner_thread_return = NULL;
368 VG_(threads)[tid].joiner_jee_tid = VG_INVALID_THREADID;
sewardj8ad94e12002-05-29 00:10:20 +0000369 VG_(threads)[tid].detached = False;
sewardj20917d82002-05-28 01:36:45 +0000370 VG_(threads)[tid].cancel_st = True; /* PTHREAD_CANCEL_ENABLE */
371 VG_(threads)[tid].cancel_ty = True; /* PTHREAD_CANCEL_DEFERRED */
372 VG_(threads)[tid].cancel_pend = NULL; /* not pending */
sewardj8ad94e12002-05-29 00:10:20 +0000373 VG_(threads)[tid].custack_used = 0;
sewardj20917d82002-05-28 01:36:45 +0000374 VG_(ksigemptyset)(&VG_(threads)[tid].sig_mask);
jsgf855d93d2003-10-13 22:26:55 +0000375 VG_(ksigfillset)(&VG_(threads)[tid].eff_sig_mask);
thughes8abf3922004-10-16 10:59:49 +0000376 VG_(threads)[tid].sigqueue_head = 0;
377 VG_(threads)[tid].sigqueue_tail = 0;
sewardj00a66b12002-10-12 16:42:35 +0000378 VG_(threads)[tid].specifics_ptr = NULL;
jsgf855d93d2003-10-13 22:26:55 +0000379
380 VG_(threads)[tid].syscallno = -1;
thughesbaa46e52004-07-29 17:44:23 +0000381 VG_(threads)[tid].sys_flags = 0;
jsgf855d93d2003-10-13 22:26:55 +0000382 VG_(threads)[tid].sys_pre_res = NULL;
383
384 VG_(threads)[tid].proxy = NULL;
fitzhardinge28428592004-03-16 22:07:12 +0000385
386 /* start with no altstack */
387 VG_(threads)[tid].altstack.ss_sp = (void *)0xdeadbeef;
388 VG_(threads)[tid].altstack.ss_size = 0;
389 VG_(threads)[tid].altstack.ss_flags = VKI_SS_DISABLE;
sewardj20917d82002-05-28 01:36:45 +0000390}
391
392
jsgf855d93d2003-10-13 22:26:55 +0000393
sewardje663cb92002-04-12 10:26:32 +0000394/* Initialise the scheduler. Create a single "main" thread ready to
sewardj6072c362002-04-19 14:40:57 +0000395 run, with special ThreadId of one. This is called at startup; the
nethercote71980f02004-01-24 18:18:54 +0000396 caller takes care to park the client's state in VG_(baseBlock).
sewardje663cb92002-04-12 10:26:32 +0000397*/
398void VG_(scheduler_init) ( void )
399{
thughesc37184f2004-09-11 14:16:57 +0000400 Int i;
sewardje663cb92002-04-12 10:26:32 +0000401 ThreadId tid_main;
402
sewardj6072c362002-04-19 14:40:57 +0000403 for (i = 0 /* NB; not 1 */; i < VG_N_THREADS; i++) {
sewardj20917d82002-05-28 01:36:45 +0000404 mostly_clear_thread_record(i);
405 VG_(threads)[i].stack_size = 0;
406 VG_(threads)[i].stack_base = (Addr)NULL;
thughesdaa34562004-06-27 12:48:53 +0000407 VG_(threads)[i].stack_guard_size = 0;
sewardj20917d82002-05-28 01:36:45 +0000408 VG_(threads)[i].stack_highest_word = (Addr)NULL;
sewardje663cb92002-04-12 10:26:32 +0000409 }
410
sewardj5f07b662002-04-23 16:52:51 +0000411 for (i = 0; i < VG_N_THREAD_KEYS; i++) {
412 vg_thread_keys[i].inuse = False;
413 vg_thread_keys[i].destructor = NULL;
414 }
415
sewardj2cb00342002-06-28 01:46:26 +0000416 vg_fhstack_used = 0;
417
sewardje663cb92002-04-12 10:26:32 +0000418 /* Assert this is thread zero, which has certain magic
419 properties. */
420 tid_main = vg_alloc_ThreadState();
sewardj6072c362002-04-19 14:40:57 +0000421 vg_assert(tid_main == 1);
sewardj20917d82002-05-28 01:36:45 +0000422 VG_(threads)[tid_main].status = VgTs_Runnable;
sewardje663cb92002-04-12 10:26:32 +0000423
424 /* Copy VG_(baseBlock) state to tid_main's slot. */
sewardj1e8cdc92002-04-18 11:37:52 +0000425 vg_tid_currently_in_baseBlock = tid_main;
sewardjb52a1b02002-10-23 21:38:22 +0000426 vg_tid_last_in_baseBlock = tid_main;
nethercotef9b59412004-09-10 15:33:32 +0000427
428 VGA_(init_thread)(&VG_(threads)[tid_main].arch);
nethercotef971ab72004-08-02 16:27:40 +0000429 save_thread_state ( tid_main );
sewardj1e8cdc92002-04-18 11:37:52 +0000430
sewardj018f7622002-05-15 21:13:39 +0000431 VG_(threads)[tid_main].stack_highest_word
fitzhardinge98abfc72003-12-16 02:05:15 +0000432 = VG_(clstk_end) - 4;
433 VG_(threads)[tid_main].stack_base = VG_(clstk_base);
thughesc37184f2004-09-11 14:16:57 +0000434 VG_(threads)[tid_main].stack_size = VG_(client_rlimit_stack).rlim_cur;
sewardjbf290b92002-05-01 02:28:01 +0000435
sewardj1e8cdc92002-04-18 11:37:52 +0000436 /* So now ... */
437 vg_assert(vg_tid_currently_in_baseBlock == VG_INVALID_THREADID);
sewardj872051c2002-07-13 12:12:56 +0000438
439 /* Not running client code right now. */
nethercote75d26242004-08-01 22:59:18 +0000440 scheduler_jmpbuf_valid = False;
jsgf855d93d2003-10-13 22:26:55 +0000441
442 /* Proxy for main thread */
443 VG_(proxy_create)(tid_main);
sewardje663cb92002-04-12 10:26:32 +0000444}
445
446
sewardj3947e622002-05-23 16:52:11 +0000447
sewardj6072c362002-04-19 14:40:57 +0000448/* vthread tid is returning from a signal handler; modify its
449 stack/regs accordingly. */
450static
451void handle_signal_return ( ThreadId tid )
452{
sewardj6072c362002-04-19 14:40:57 +0000453 Bool restart_blocked_syscalls;
sewardj645030e2002-06-06 01:27:39 +0000454 struct vki_timespec * rem;
sewardj6072c362002-04-19 14:40:57 +0000455
sewardjb48e5002002-05-13 00:16:03 +0000456 vg_assert(VG_(is_valid_tid)(tid));
sewardj6072c362002-04-19 14:40:57 +0000457
458 restart_blocked_syscalls = VG_(signal_returns)(tid);
459
thughesa3afffc2004-08-25 18:58:04 +0000460 /* If we were interrupted in the middle of a rendezvous
461 then check the rendezvous hasn't completed while we
462 were busy handling the signal. */
463 if (VG_(threads)[tid].status == VgTs_WaitJoiner ||
464 VG_(threads)[tid].status == VgTs_WaitJoinee ) {
465 maybe_rendezvous_joiners_and_joinees();
466 }
467
thughesc41c6f42004-10-16 16:50:14 +0000468 /* If we were interrupted while waiting on a mutex then check that
469 it hasn't been unlocked while we were busy handling the signal. */
470 if (VG_(threads)[tid].status == VgTs_WaitMX &&
471 VG_(threads)[tid].associated_mx->__vg_m_count == 0) {
472 vg_pthread_mutex_t* mutex = VG_(threads)[tid].associated_mx;
473 mutex->__vg_m_count = 1;
474 mutex->__vg_m_owner = (/*_pthread_descr*/void*)tid;
475 VG_(threads)[tid].status = VgTs_Runnable;
476 VG_(threads)[tid].associated_mx = NULL;
477 /* m_edx already holds pth_mx_lock() success (0) */
478 }
479
sewardj6072c362002-04-19 14:40:57 +0000480 if (restart_blocked_syscalls)
481 /* Easy; we don't have to do anything. */
482 return;
483
sewardj645030e2002-06-06 01:27:39 +0000484 if (VG_(threads)[tid].status == VgTs_Sleeping
nethercotebb4222b2004-09-10 17:42:11 +0000485 && PLATFORM_SYSCALL_NUM(VG_(threads)[tid].arch) == __NR_nanosleep) {
sewardj6072c362002-04-19 14:40:57 +0000486 /* We interrupted a nanosleep(). The right thing to do is to
sewardj645030e2002-06-06 01:27:39 +0000487 write the unused time to nanosleep's second param, but that's
488 too much effort ... we just say that 1 nanosecond was not
489 used, and return EINTR. */
nethercotebb4222b2004-09-10 17:42:11 +0000490 rem = (struct vki_timespec*)PLATFORM_SYSCALL_ARG2(VG_(threads)[tid].arch);
sewardj645030e2002-06-06 01:27:39 +0000491 if (rem != NULL) {
492 rem->tv_sec = 0;
493 rem->tv_nsec = 1;
494 }
njnd3040452003-05-19 15:04:06 +0000495 SET_SYSCALL_RETVAL(tid, -VKI_EINTR);
sewardj645030e2002-06-06 01:27:39 +0000496 VG_(threads)[tid].status = VgTs_Runnable;
sewardj6072c362002-04-19 14:40:57 +0000497 return;
498 }
499
500 /* All other cases? Just return. */
501}
502
503
nethercotef971ab72004-08-02 16:27:40 +0000504struct timeout {
505 UInt time; /* time we should awaken */
506 ThreadId tid; /* thread which cares about this timeout */
507 struct timeout *next;
508};
509
510static struct timeout *timeouts;
511
512static void add_timeout(ThreadId tid, UInt time)
513{
514 struct timeout *t = VG_(arena_malloc)(VG_AR_CORE, sizeof(*t));
515 struct timeout **prev, *tp;
516
517 t->time = time;
518 t->tid = tid;
519
520 if (VG_(clo_trace_sched)) {
521 Char msg_buf[100];
522 VG_(sprintf)(msg_buf, "add_timeout: now=%u adding timeout at %u",
523 VG_(read_millisecond_timer)(), time);
524 print_sched_event(tid, msg_buf);
525 }
526
527 for(tp = timeouts, prev = &timeouts;
528 tp != NULL && tp->time < time;
529 prev = &tp->next, tp = tp->next)
530 ;
531 t->next = tp;
532 *prev = t;
533}
534
sewardje663cb92002-04-12 10:26:32 +0000535static
536void sched_do_syscall ( ThreadId tid )
537{
jsgf855d93d2003-10-13 22:26:55 +0000538 Int syscall_no;
njn25e49d8e72002-09-23 09:36:25 +0000539 Char msg_buf[100];
sewardje663cb92002-04-12 10:26:32 +0000540
sewardjb48e5002002-05-13 00:16:03 +0000541 vg_assert(VG_(is_valid_tid)(tid));
sewardj018f7622002-05-15 21:13:39 +0000542 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +0000543
nethercotebb4222b2004-09-10 17:42:11 +0000544 syscall_no = PLATFORM_SYSCALL_NUM(VG_(threads)[tid].arch);
sewardje663cb92002-04-12 10:26:32 +0000545
jsgf855d93d2003-10-13 22:26:55 +0000546 /* Special-case nanosleep because we can. But should we?
547
548 XXX not doing so for now, because it doesn't seem to work
549 properly, and we can use the syscall nanosleep just as easily.
550 */
551 if (0 && syscall_no == __NR_nanosleep) {
sewardj5f07b662002-04-23 16:52:51 +0000552 UInt t_now, t_awaken;
sewardje663cb92002-04-12 10:26:32 +0000553 struct vki_timespec* req;
nethercotebb4222b2004-09-10 17:42:11 +0000554 req = (struct vki_timespec*)PLATFORM_SYSCALL_ARG1(VG_(threads)[tid].arch);
jsgf855d93d2003-10-13 22:26:55 +0000555
556 if (req->tv_sec < 0 || req->tv_nsec < 0 || req->tv_nsec >= 1000000000) {
557 SET_SYSCALL_RETVAL(tid, -VKI_EINVAL);
558 return;
559 }
560
sewardj5f07b662002-04-23 16:52:51 +0000561 t_now = VG_(read_millisecond_timer)();
sewardje663cb92002-04-12 10:26:32 +0000562 t_awaken
563 = t_now
sewardj5f07b662002-04-23 16:52:51 +0000564 + (UInt)1000ULL * (UInt)(req->tv_sec)
565 + (UInt)(req->tv_nsec) / 1000000;
sewardj018f7622002-05-15 21:13:39 +0000566 VG_(threads)[tid].status = VgTs_Sleeping;
567 VG_(threads)[tid].awaken_at = t_awaken;
sewardj8937c812002-04-12 20:12:20 +0000568 if (VG_(clo_trace_sched)) {
sewardj5f07b662002-04-23 16:52:51 +0000569 VG_(sprintf)(msg_buf, "at %d: nanosleep for %d",
sewardje663cb92002-04-12 10:26:32 +0000570 t_now, t_awaken-t_now);
571 print_sched_event(tid, msg_buf);
572 }
nethercotef971ab72004-08-02 16:27:40 +0000573 add_timeout(tid, t_awaken);
sewardje663cb92002-04-12 10:26:32 +0000574 /* Force the scheduler to run something else for a while. */
575 return;
576 }
577
jsgf855d93d2003-10-13 22:26:55 +0000578 /* If pre_syscall returns true, then we're done immediately */
579 if (VG_(pre_syscall)(tid)) {
fitzhardinge31ba9052004-01-16 02:15:23 +0000580 VG_(post_syscall(tid, True));
sewardj3947e622002-05-23 16:52:11 +0000581 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +0000582 } else {
jsgf855d93d2003-10-13 22:26:55 +0000583 vg_assert(VG_(threads)[tid].status == VgTs_WaitSys);
sewardje663cb92002-04-12 10:26:32 +0000584 }
585}
586
587
sewardje663cb92002-04-12 10:26:32 +0000588
jsgf855d93d2003-10-13 22:26:55 +0000589/* Sleep for a while, but be willing to be woken. */
590static
591void idle ( void )
592{
593 struct vki_pollfd pollfd[1];
594 Int delta = -1;
595 Int fd = VG_(proxy_resfd)();
596
597 pollfd[0].fd = fd;
598 pollfd[0].events = VKI_POLLIN;
599
600 /* Look though the nearest timeouts, looking for the next future
601 one (there may be stale past timeouts). They'll all be mopped
602 below up when the poll() finishes. */
603 if (timeouts != NULL) {
604 struct timeout *tp;
605 Bool wicked = False;
606 UInt now = VG_(read_millisecond_timer)();
607
608 for(tp = timeouts; tp != NULL && tp->time < now; tp = tp->next) {
609 /* If a thread is still sleeping in the past, make it runnable */
610 ThreadState *tst = VG_(get_ThreadState)(tp->tid);
611 if (tst->status == VgTs_Sleeping)
612 tst->status = VgTs_Runnable;
613 wicked = True; /* no sleep for the wicked */
sewardje663cb92002-04-12 10:26:32 +0000614 }
sewardje663cb92002-04-12 10:26:32 +0000615
jsgf855d93d2003-10-13 22:26:55 +0000616 if (tp != NULL) {
thughese761bef2004-10-17 15:18:22 +0000617 vg_assert(tp->time >= now);
618 /* limit the signed int delta to INT_MAX */
619 if ((tp->time - now) <= 0x7FFFFFFFU) {
620 delta = tp->time - now;
621 } else {
622 delta = 0x7FFFFFFF;
623 }
sewardje663cb92002-04-12 10:26:32 +0000624 }
jsgf855d93d2003-10-13 22:26:55 +0000625 if (wicked)
626 delta = 0;
sewardje663cb92002-04-12 10:26:32 +0000627 }
628
jsgf855d93d2003-10-13 22:26:55 +0000629 /* gotta wake up for something! */
630 vg_assert(fd != -1 || delta != -1);
sewardje462e202002-04-13 04:09:07 +0000631
jsgf855d93d2003-10-13 22:26:55 +0000632 /* If we need to do signal routing, then poll for pending signals
633 every VG_(clo_signal_polltime) mS */
634 if (VG_(do_signal_routing) && (delta > VG_(clo_signal_polltime) || delta == -1))
635 delta = VG_(clo_signal_polltime);
sewardje663cb92002-04-12 10:26:32 +0000636
jsgf855d93d2003-10-13 22:26:55 +0000637 if (VG_(clo_trace_sched)) {
638 Char msg_buf[100];
639 VG_(sprintf)(msg_buf, "idle: waiting for %dms and fd %d",
640 delta, fd);
641 print_sched_event(0, msg_buf);
sewardje663cb92002-04-12 10:26:32 +0000642 }
sewardje663cb92002-04-12 10:26:32 +0000643
jsgf855d93d2003-10-13 22:26:55 +0000644 VG_(poll)(pollfd, fd != -1 ? 1 : 0, delta);
sewardje663cb92002-04-12 10:26:32 +0000645
jsgf855d93d2003-10-13 22:26:55 +0000646 /* See if there's anything on the timeout list which needs
647 waking, and mop up anything in the past. */
648 {
649 UInt now = VG_(read_millisecond_timer)();
650 struct timeout *tp;
sewardje663cb92002-04-12 10:26:32 +0000651
jsgf855d93d2003-10-13 22:26:55 +0000652 tp = timeouts;
sewardje663cb92002-04-12 10:26:32 +0000653
jsgf855d93d2003-10-13 22:26:55 +0000654 while(tp && tp->time <= now) {
655 struct timeout *dead;
656 ThreadState *tst;
657
658 tst = VG_(get_ThreadState)(tp->tid);
659
660 if (VG_(clo_trace_sched)) {
661 Char msg_buf[100];
662 VG_(sprintf)(msg_buf, "idle: now=%u removing timeout at %u",
663 now, tp->time);
664 print_sched_event(tp->tid, msg_buf);
665 }
sewardje663cb92002-04-12 10:26:32 +0000666
jsgf855d93d2003-10-13 22:26:55 +0000667 /* If awaken_at != tp->time then it means the timeout is
668 stale and we should just ignore it. */
669 if(tst->awaken_at == tp->time) {
670 switch(tst->status) {
671 case VgTs_Sleeping:
672 tst->awaken_at = 0xFFFFFFFF;
673 tst->status = VgTs_Runnable;
674 break;
sewardje663cb92002-04-12 10:26:32 +0000675
thughese321d492004-10-17 15:00:20 +0000676 case VgTs_WaitMX:
677 do_pthread_mutex_timedlock_TIMEOUT(tst->tid);
678 break;
679
jsgf855d93d2003-10-13 22:26:55 +0000680 case VgTs_WaitCV:
681 do_pthread_cond_timedwait_TIMEOUT(tst->tid);
682 break;
sewardje663cb92002-04-12 10:26:32 +0000683
jsgf855d93d2003-10-13 22:26:55 +0000684 default:
685 /* This is a bit odd but OK; if a thread had a timeout
686 but woke for some other reason (signal, condvar
687 wakeup), then it will still be on the list. */
688 if (0)
689 VG_(printf)("idle(): unexpected status tp->tid=%d tst->status = %d\n",
690 tp->tid, tst->status);
691 break;
692 }
693 }
sewardjbc7d8782002-06-30 12:44:54 +0000694
jsgf855d93d2003-10-13 22:26:55 +0000695 dead = tp;
696 tp = tp->next;
njn25e49d8e72002-09-23 09:36:25 +0000697
jsgf855d93d2003-10-13 22:26:55 +0000698 VG_(arena_free)(VG_AR_CORE, dead);
sewardjbc7d8782002-06-30 12:44:54 +0000699 }
700
jsgf855d93d2003-10-13 22:26:55 +0000701 timeouts = tp;
sewardje663cb92002-04-12 10:26:32 +0000702 }
703}
704
705
sewardje663cb92002-04-12 10:26:32 +0000706/* ---------------------------------------------------------------------
707 The scheduler proper.
708 ------------------------------------------------------------------ */
709
nethercote238a3c32004-08-09 13:13:31 +0000710// For handling of the default action of a fatal signal.
711// jmp_buf for fatal signals; VG_(fatal_signal_jmpbuf_ptr) is NULL until
712// the time is right that it can be used.
713static jmp_buf fatal_signal_jmpbuf;
714static jmp_buf* fatal_signal_jmpbuf_ptr;
715static Int fatal_sigNo; // the fatal signal, if it happens
716
sewardje663cb92002-04-12 10:26:32 +0000717/* Run user-space threads until either
718 * Deadlock occurs
719 * One thread asks to shutdown Valgrind
720 * The specified number of basic blocks has gone by.
721*/
nethercote238a3c32004-08-09 13:13:31 +0000722VgSchedReturnCode do_scheduler ( Int* exitcode, ThreadId* last_run_tid )
sewardje663cb92002-04-12 10:26:32 +0000723{
724 ThreadId tid, tid_next;
725 UInt trc;
726 UInt dispatch_ctr_SAVED;
sewardj124ca2a2002-06-20 10:19:38 +0000727 Int done_this_time, n_in_bounded_wait;
jsgf855d93d2003-10-13 22:26:55 +0000728 Int n_exists, n_waiting_for_reaper;
sewardje663cb92002-04-12 10:26:32 +0000729 Addr trans_addr;
730
sewardje663cb92002-04-12 10:26:32 +0000731 /* Start with the root thread. tid in general indicates the
732 currently runnable/just-finished-running thread. */
nethercote759dda32004-08-07 18:16:56 +0000733 *last_run_tid = tid = 1;
sewardje663cb92002-04-12 10:26:32 +0000734
735 /* This is the top level scheduler loop. It falls into three
736 phases. */
737 while (True) {
738
sewardj6072c362002-04-19 14:40:57 +0000739 /* ======================= Phase 0 of 3 =======================
740 Be paranoid. Always a good idea. */
sewardjd7fd4d22002-04-24 01:57:27 +0000741 stage1:
sewardj6072c362002-04-19 14:40:57 +0000742 scheduler_sanity();
nethercote885dd912004-08-03 23:14:00 +0000743 VG_(sanity_check_general)( False );
sewardj6072c362002-04-19 14:40:57 +0000744
sewardje663cb92002-04-12 10:26:32 +0000745 /* ======================= Phase 1 of 3 =======================
746 Handle I/O completions and signals. This may change the
747 status of various threads. Then select a new thread to run,
748 or declare deadlock, or sleep if there are no runnable
749 threads but some are blocked on I/O. */
750
sewardje663cb92002-04-12 10:26:32 +0000751 /* Do the following loop until a runnable thread is found, or
752 deadlock is detected. */
753 while (True) {
754
755 /* For stats purposes only. */
nethercote844e7122004-08-02 15:27:22 +0000756 n_scheduling_events_MAJOR++;
sewardje663cb92002-04-12 10:26:32 +0000757
jsgf855d93d2003-10-13 22:26:55 +0000758 /* Route signals to their proper places */
759 VG_(route_signals)();
sewardje663cb92002-04-12 10:26:32 +0000760
jsgf855d93d2003-10-13 22:26:55 +0000761 /* See if any of the proxy LWPs report any activity: either a
762 syscall completing or a signal arriving. */
763 VG_(proxy_results)();
sewardje663cb92002-04-12 10:26:32 +0000764
765 /* Try and find a thread (tid) to run. */
766 tid_next = tid;
jsgf855d93d2003-10-13 22:26:55 +0000767 if (prefer_sched != VG_INVALID_THREADID) {
768 tid_next = prefer_sched-1;
769 prefer_sched = VG_INVALID_THREADID;
770 }
sewardj51c0aaf2002-04-25 01:32:10 +0000771 n_in_bounded_wait = 0;
jsgf855d93d2003-10-13 22:26:55 +0000772 n_exists = 0;
773 n_waiting_for_reaper = 0;
sewardje663cb92002-04-12 10:26:32 +0000774 while (True) {
775 tid_next++;
sewardj6072c362002-04-19 14:40:57 +0000776 if (tid_next >= VG_N_THREADS) tid_next = 1;
jsgf855d93d2003-10-13 22:26:55 +0000777 if (VG_(threads)[tid_next].status == VgTs_Sleeping
778 || VG_(threads)[tid_next].status == VgTs_WaitSys
thughese321d492004-10-17 15:00:20 +0000779 || (VG_(threads)[tid_next].status == VgTs_WaitMX
780 && VG_(threads)[tid_next].awaken_at != 0xFFFFFFFF)
781 || (VG_(threads)[tid_next].status == VgTs_WaitCV
sewardj018f7622002-05-15 21:13:39 +0000782 && VG_(threads)[tid_next].awaken_at != 0xFFFFFFFF))
sewardj51c0aaf2002-04-25 01:32:10 +0000783 n_in_bounded_wait ++;
jsgf855d93d2003-10-13 22:26:55 +0000784 if (VG_(threads)[tid_next].status != VgTs_Empty)
785 n_exists++;
786 if (VG_(threads)[tid_next].status == VgTs_WaitJoiner)
787 n_waiting_for_reaper++;
sewardj018f7622002-05-15 21:13:39 +0000788 if (VG_(threads)[tid_next].status == VgTs_Runnable)
sewardje663cb92002-04-12 10:26:32 +0000789 break; /* We can run this one. */
790 if (tid_next == tid)
791 break; /* been all the way round */
792 }
793 tid = tid_next;
794
sewardj018f7622002-05-15 21:13:39 +0000795 if (VG_(threads)[tid].status == VgTs_Runnable) {
sewardje663cb92002-04-12 10:26:32 +0000796 /* Found a suitable candidate. Fall out of this loop, so
797 we can advance to stage 2 of the scheduler: actually
798 running the thread. */
799 break;
800 }
801
jsgf855d93d2003-10-13 22:26:55 +0000802 /* All threads have exited - pretend someone called exit() */
803 if (n_waiting_for_reaper == n_exists) {
nethercote47dd12c2004-06-22 14:18:42 +0000804 *exitcode = 0; /* ? */
jsgf855d93d2003-10-13 22:26:55 +0000805 return VgSrc_ExitSyscall;
806 }
807
sewardje663cb92002-04-12 10:26:32 +0000808 /* We didn't find a runnable thread. Now what? */
sewardj51c0aaf2002-04-25 01:32:10 +0000809 if (n_in_bounded_wait == 0) {
sewardj54cacf02002-04-12 23:24:59 +0000810 /* No runnable threads and no prospect of any appearing
811 even if we wait for an arbitrary length of time. In
812 short, we have a deadlock. */
sewardj15a43e12002-04-17 19:35:12 +0000813 VG_(pp_sched_status)();
sewardje663cb92002-04-12 10:26:32 +0000814 return VgSrc_Deadlock;
815 }
816
jsgf855d93d2003-10-13 22:26:55 +0000817 /* Nothing needs doing, so sit in idle until either a timeout
818 happens or a thread's syscall completes. */
819 idle();
sewardj7e87e382002-05-03 19:09:05 +0000820 /* pp_sched_status(); */
sewardjb48e5002002-05-13 00:16:03 +0000821 /* VG_(printf)("."); */
sewardje663cb92002-04-12 10:26:32 +0000822 }
823
824
825 /* ======================= Phase 2 of 3 =======================
826 Wahey! We've finally decided that thread tid is runnable, so
827 we now do that. Run it for as much of a quanta as possible.
828 Trivial requests are handled and the thread continues. The
829 aim is not to do too many of Phase 1 since it is expensive. */
830
831 if (0)
sewardj3b5d8862002-04-20 13:53:23 +0000832 VG_(printf)("SCHED: tid %d\n", tid);
sewardje663cb92002-04-12 10:26:32 +0000833
njn25e49d8e72002-09-23 09:36:25 +0000834 VG_TRACK( thread_run, tid );
835
sewardje663cb92002-04-12 10:26:32 +0000836 /* Figure out how many bbs to ask vg_run_innerloop to do. Note
837 that it decrements the counter before testing it for zero, so
838 that if VG_(dispatch_ctr) is set to N you get at most N-1
839 iterations. Also this means that VG_(dispatch_ctr) must
840 exceed zero before entering the innerloop. Also also, the
841 decrement is done before the bb is actually run, so you
842 always get at least one decrement even if nothing happens.
843 */
nethercote1d447092004-02-01 17:29:59 +0000844 VG_(dispatch_ctr) = VG_SCHEDULING_QUANTUM + 1;
sewardje663cb92002-04-12 10:26:32 +0000845
846 /* ... and remember what we asked for. */
847 dispatch_ctr_SAVED = VG_(dispatch_ctr);
848
sewardj1e8cdc92002-04-18 11:37:52 +0000849 /* paranoia ... */
sewardj018f7622002-05-15 21:13:39 +0000850 vg_assert(VG_(threads)[tid].tid == tid);
sewardj1e8cdc92002-04-18 11:37:52 +0000851
sewardje663cb92002-04-12 10:26:32 +0000852 /* Actually run thread tid. */
853 while (True) {
854
nethercote759dda32004-08-07 18:16:56 +0000855 *last_run_tid = tid;
sewardj7e87e382002-05-03 19:09:05 +0000856
sewardje663cb92002-04-12 10:26:32 +0000857 /* For stats purposes only. */
nethercote844e7122004-08-02 15:27:22 +0000858 n_scheduling_events_MINOR++;
sewardje663cb92002-04-12 10:26:32 +0000859
860 if (0)
861 VG_(message)(Vg_DebugMsg, "thread %d: running for %d bbs",
862 tid, VG_(dispatch_ctr) - 1 );
sewardjb3eef6b2002-05-01 00:05:27 +0000863# if 0
864 if (VG_(bbs_done) > 31700000 + 0) {
865 dispatch_ctr_SAVED = VG_(dispatch_ctr) = 2;
nethercoteb8ef9d82004-09-05 22:02:33 +0000866 VG_(translate)(&VG_(threads)[tid],
867 ARCH_INSTR_PTR(VG_(threads)[tid].arch),
nethercote59a122d2004-08-03 17:16:51 +0000868 /*debugging*/True);
sewardjb3eef6b2002-05-01 00:05:27 +0000869 }
nethercoteb8ef9d82004-09-05 22:02:33 +0000870 vg_assert(ARCH_INSTR_PTR(VG_(threads)[tid].arch) != 0);
sewardjb3eef6b2002-05-01 00:05:27 +0000871# endif
sewardje663cb92002-04-12 10:26:32 +0000872
873 trc = run_thread_for_a_while ( tid );
874
sewardjb3eef6b2002-05-01 00:05:27 +0000875# if 0
nethercoteb8ef9d82004-09-05 22:02:33 +0000876 if (0 == ARCH_INSTR_PTR(VG_(threads)[tid].arch)) {
sewardjb3eef6b2002-05-01 00:05:27 +0000877 VG_(printf)("tid = %d, dc = %llu\n", tid, VG_(bbs_done));
nethercoteb8ef9d82004-09-05 22:02:33 +0000878 vg_assert(0 != ARCH_INSTR_PTR(VG_(threads)[tid].arch));
sewardjb3eef6b2002-05-01 00:05:27 +0000879 }
880# endif
881
sewardje663cb92002-04-12 10:26:32 +0000882 /* Deal quickly with trivial scheduling events, and resume the
883 thread. */
884
885 if (trc == VG_TRC_INNER_FASTMISS) {
nethercote4d714382004-10-13 09:47:24 +0000886 Addr ip = ARCH_INSTR_PTR(VG_(threads)[tid].arch);
887
sewardje663cb92002-04-12 10:26:32 +0000888 vg_assert(VG_(dispatch_ctr) > 0);
889
890 /* Trivial event. Miss in the fast-cache. Do a full
891 lookup for it. */
nethercote4d714382004-10-13 09:47:24 +0000892 trans_addr = VG_(search_transtab)( ip );
sewardje663cb92002-04-12 10:26:32 +0000893 if (trans_addr == (Addr)0) {
894 /* Not found; we need to request a translation. */
nethercote4d714382004-10-13 09:47:24 +0000895 if (VG_(translate)( tid, ip, /*debug*/False )) {
896 trans_addr = VG_(search_transtab)( ip );
897 if (trans_addr == (Addr)0)
898 VG_(core_panic)("VG_TRC_INNER_FASTMISS: missing tt_fast entry");
899 } else {
900 // If VG_(translate)() fails, it's because it had to throw
901 // a signal because the client jumped to a bad address.
902 // This means VG_(deliver_signal)() will have been called
903 // by now, and the program counter will now be pointing to
904 // the start of the signal handler (if there is no
905 // handler, things would have been aborted by now), so do
906 // nothing, and things will work out next time around the
907 // scheduler loop.
908 }
sewardje663cb92002-04-12 10:26:32 +0000909 }
910 continue; /* with this thread */
911 }
912
913 if (trc == VG_TRC_EBP_JMP_CLIENTREQ) {
nethercote3e901a22004-09-11 13:17:02 +0000914 UInt* args = (UInt*)(ARCH_CLREQ_ARGS(VG_(threads)[tid].arch));
915 UInt reqno = args[0];
sewardj18a62ff2002-07-12 22:30:51 +0000916 /* VG_(printf)("request 0x%x\n", reqno); */
sewardj1fe7b002002-07-16 01:43:15 +0000917
918 /* Are we really absolutely totally quitting? */
919 if (reqno == VG_USERREQ__LIBC_FREERES_DONE) {
920 if (0 || VG_(clo_trace_syscalls) || VG_(clo_trace_sched)) {
921 VG_(message)(Vg_DebugMsg,
922 "__libc_freeres() done; really quitting!");
923 }
924 return VgSrc_ExitSyscall;
925 }
926
nethercote3e901a22004-09-11 13:17:02 +0000927 do_client_request(tid,args);
sewardj124ca2a2002-06-20 10:19:38 +0000928 /* Following the request, we try and continue with the
929 same thread if still runnable. If not, go back to
930 Stage 1 to select a new thread to run. */
sewardj18a62ff2002-07-12 22:30:51 +0000931 if (VG_(threads)[tid].status == VgTs_Runnable
932 && reqno != VG_USERREQ__PTHREAD_YIELD)
sewardj124ca2a2002-06-20 10:19:38 +0000933 continue; /* with this thread */
934 else
935 goto stage1;
sewardje663cb92002-04-12 10:26:32 +0000936 }
937
sewardj51c0aaf2002-04-25 01:32:10 +0000938 if (trc == VG_TRC_EBP_JMP_SYSCALL) {
939 /* Do a syscall for the vthread tid. This could cause it
sewardj7e87e382002-05-03 19:09:05 +0000940 to become non-runnable. One special case: spot the
941 client doing calls to exit() and take this as the cue
942 to exit. */
sewardjb3eef6b2002-05-01 00:05:27 +0000943# if 0
944 { UInt* esp; Int i;
nethercoteb8ef9d82004-09-05 22:02:33 +0000945 esp=(UInt*)ARCH_STACK_PTR(VG_(threads)[tid].arch);
sewardjb3eef6b2002-05-01 00:05:27 +0000946 VG_(printf)("\nBEFORE\n");
947 for (i = 10; i >= -10; i--)
948 VG_(printf)("%2d %p = 0x%x\n", i, &esp[i], esp[i]);
949 }
950# endif
951
sewardj1fe7b002002-07-16 01:43:15 +0000952 /* Deal with calling __libc_freeres() at exit. When the
953 client does __NR_exit, it's exiting for good. So we
nethercotef971ab72004-08-02 16:27:40 +0000954 then run __libc_freeres_wrapper. That quits by
sewardj1fe7b002002-07-16 01:43:15 +0000955 doing VG_USERREQ__LIBC_FREERES_DONE, and at that point
956 we really exit. To be safe we nuke all other threads
sewardjade9d0d2002-07-26 10:52:48 +0000957 currently running.
958
959 If not valgrinding (cachegrinding, etc) don't do this.
960 __libc_freeres does some invalid frees which crash
961 the unprotected malloc/free system. */
njn25e49d8e72002-09-23 09:36:25 +0000962
nethercotebb4222b2004-09-10 17:42:11 +0000963 if (PLATFORM_SYSCALL_NUM(VG_(threads)[tid].arch) == __NR_exit
964 || PLATFORM_SYSCALL_NUM(VG_(threads)[tid].arch) == __NR_exit_group
sewardjf3fb92d2003-02-23 03:26:08 +0000965 ) {
sewardj858964b2002-10-05 14:15:43 +0000966
967 /* If __NR_exit, remember the supplied argument. */
nethercotebb4222b2004-09-10 17:42:11 +0000968 *exitcode = PLATFORM_SYSCALL_ARG1(VG_(threads)[tid].arch);
njn25e49d8e72002-09-23 09:36:25 +0000969
nethercote7cc9c232004-01-21 15:08:04 +0000970 /* Only run __libc_freeres if the tool says it's ok and
sewardj858964b2002-10-05 14:15:43 +0000971 it hasn't been overridden with --run-libc-freeres=no
972 on the command line. */
973
fitzhardinge98abfc72003-12-16 02:05:15 +0000974 if (VG_(needs).libc_freeres &&
975 VG_(clo_run_libc_freeres) &&
nethercotef971ab72004-08-02 16:27:40 +0000976 __libc_freeres_wrapper != 0) {
sewardj00631892002-10-05 15:34:38 +0000977 if (VG_(clo_verbosity) > 2
sewardj858964b2002-10-05 14:15:43 +0000978 || VG_(clo_trace_syscalls) || VG_(clo_trace_sched)) {
979 VG_(message)(Vg_DebugMsg,
980 "Caught __NR_exit; running __libc_freeres()");
981 }
982 VG_(nuke_all_threads_except) ( tid );
nethercoteb8ef9d82004-09-05 22:02:33 +0000983 ARCH_INSTR_PTR(VG_(threads)[tid].arch) =
984 (UInt)__libc_freeres_wrapper;
sewardj858964b2002-10-05 14:15:43 +0000985 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
986 goto stage1; /* party on, dudes (but not for much longer :) */
987
988 } else {
989 /* We won't run __libc_freeres; just exit now. */
sewardj00631892002-10-05 15:34:38 +0000990 if (VG_(clo_verbosity) > 2
sewardj858964b2002-10-05 14:15:43 +0000991 || VG_(clo_trace_syscalls) || VG_(clo_trace_sched)) {
992 VG_(message)(Vg_DebugMsg,
993 "Caught __NR_exit; quitting");
994 }
995 return VgSrc_ExitSyscall;
996 }
997
sewardjade9d0d2002-07-26 10:52:48 +0000998 }
999
sewardj858964b2002-10-05 14:15:43 +00001000 /* We've dealt with __NR_exit at this point. */
nethercotebb4222b2004-09-10 17:42:11 +00001001 vg_assert(PLATFORM_SYSCALL_NUM(VG_(threads)[tid].arch) != __NR_exit &&
1002 PLATFORM_SYSCALL_NUM(VG_(threads)[tid].arch) != __NR_exit_group);
sewardj7e87e382002-05-03 19:09:05 +00001003
sewardj83798bf2002-05-24 00:11:16 +00001004 /* Trap syscalls to __NR_sched_yield and just have this
1005 thread yield instead. Not essential, just an
1006 optimisation. */
nethercotebb4222b2004-09-10 17:42:11 +00001007 if (PLATFORM_SYSCALL_NUM(VG_(threads)[tid].arch) == __NR_sched_yield) {
njnd3040452003-05-19 15:04:06 +00001008 SET_SYSCALL_RETVAL(tid, 0); /* syscall returns with success */
sewardj83798bf2002-05-24 00:11:16 +00001009 goto stage1; /* find a new thread to run */
1010 }
1011
sewardj51c0aaf2002-04-25 01:32:10 +00001012 sched_do_syscall(tid);
sewardjb3eef6b2002-05-01 00:05:27 +00001013
1014# if 0
1015 { UInt* esp; Int i;
nethercoteb8ef9d82004-09-05 22:02:33 +00001016 esp=(UInt*)ARCH_STACK_PTR(VG_(threads)[tid].arch);
sewardjb3eef6b2002-05-01 00:05:27 +00001017 VG_(printf)("AFTER\n");
1018 for (i = 10; i >= -10; i--)
1019 VG_(printf)("%2d %p = 0x%x\n", i, &esp[i], esp[i]);
1020 }
1021# endif
1022
sewardj77f0fc12002-07-12 01:23:03 +00001023 if (VG_(threads)[tid].status == VgTs_Runnable) {
sewardj51c0aaf2002-04-25 01:32:10 +00001024 continue; /* with this thread */
sewardj77f0fc12002-07-12 01:23:03 +00001025 } else {
1026 goto stage1;
1027 }
sewardj51c0aaf2002-04-25 01:32:10 +00001028 }
1029
sewardjd7fd4d22002-04-24 01:57:27 +00001030 /* It's an event we can't quickly deal with. Give up running
1031 this thread and handle things the expensive way. */
sewardje663cb92002-04-12 10:26:32 +00001032 break;
1033 }
1034
1035 /* ======================= Phase 3 of 3 =======================
1036 Handle non-trivial thread requests, mostly pthread stuff. */
1037
1038 /* Ok, we've fallen out of the dispatcher for a
1039 non-completely-trivial reason. First, update basic-block
1040 counters. */
1041
nethercote0d3db0a2004-09-13 12:16:06 +00001042 done_this_time = (Int)dispatch_ctr_SAVED - (Int)VG_(dispatch_ctr);
1043 vg_assert(done_this_time > 0);
sewardje663cb92002-04-12 10:26:32 +00001044 VG_(bbs_done) += (ULong)done_this_time;
1045
1046 if (0 && trc != VG_TRC_INNER_FASTMISS)
1047 VG_(message)(Vg_DebugMsg, "thread %d: completed %d bbs, trc %d",
1048 tid, done_this_time, (Int)trc );
1049
1050 if (0 && trc != VG_TRC_INNER_FASTMISS)
njne0205ff2003-04-08 00:56:14 +00001051 VG_(message)(Vg_DebugMsg, "thread %d: %llu bbs, event %s",
sewardje663cb92002-04-12 10:26:32 +00001052 tid, VG_(bbs_done),
1053 name_of_sched_event(trc) );
sewardj9d1b5d32002-04-17 19:40:49 +00001054
sewardje663cb92002-04-12 10:26:32 +00001055 /* Examine the thread's return code to figure out why it
sewardj124ca2a2002-06-20 10:19:38 +00001056 stopped. */
sewardje663cb92002-04-12 10:26:32 +00001057
1058 switch (trc) {
1059
fitzhardingea02f8812003-12-18 09:06:09 +00001060 case VG_TRC_EBP_JMP_YIELD:
1061 /* Explicit yield. Let a new thread be scheduled,
1062 simply by doing nothing, causing us to arrive back at
1063 Phase 1. */
fitzhardingea02f8812003-12-18 09:06:09 +00001064 break;
1065
sewardje663cb92002-04-12 10:26:32 +00001066 case VG_TRC_INNER_COUNTERZERO:
1067 /* Timeslice is out. Let a new thread be scheduled,
1068 simply by doing nothing, causing us to arrive back at
1069 Phase 1. */
sewardje663cb92002-04-12 10:26:32 +00001070 vg_assert(VG_(dispatch_ctr) == 0);
1071 break;
1072
1073 case VG_TRC_UNRESUMABLE_SIGNAL:
jsgf855d93d2003-10-13 22:26:55 +00001074 /* It got a SIGSEGV/SIGBUS/SIGILL/SIGFPE, which we need to
1075 deliver right away. */
nethercotef971ab72004-08-02 16:27:40 +00001076 vg_assert(unresumable_siginfo.si_signo == VKI_SIGSEGV ||
1077 unresumable_siginfo.si_signo == VKI_SIGBUS ||
1078 unresumable_siginfo.si_signo == VKI_SIGILL ||
1079 unresumable_siginfo.si_signo == VKI_SIGFPE);
1080 vg_assert(longjmpd_on_signal == unresumable_siginfo.si_signo);
jsgf855d93d2003-10-13 22:26:55 +00001081
1082 /* make sure we've unblocked the signals which the handler blocked */
nethercote75d26242004-08-01 22:59:18 +00001083 VG_(unblock_host_signal)(longjmpd_on_signal);
jsgf855d93d2003-10-13 22:26:55 +00001084
nethercotef971ab72004-08-02 16:27:40 +00001085 VG_(deliver_signal)(tid, &unresumable_siginfo, False);
1086 unresumable_siginfo.si_signo = 0; /* done */
sewardje663cb92002-04-12 10:26:32 +00001087 break;
1088
sewardje663cb92002-04-12 10:26:32 +00001089 default:
1090 VG_(printf)("\ntrc = %d\n", trc);
njne427a662002-10-02 11:08:25 +00001091 VG_(core_panic)("VG_(scheduler), phase 3: "
1092 "unexpected thread return code");
sewardje663cb92002-04-12 10:26:32 +00001093 /* NOTREACHED */
1094 break;
1095
1096 } /* switch (trc) */
1097
1098 /* That completes Phase 3 of 3. Return now to the top of the
1099 main scheduler loop, to Phase 1 of 3. */
1100
1101 } /* top-level scheduler loop */
1102
1103
1104 /* NOTREACHED */
njne427a662002-10-02 11:08:25 +00001105 VG_(core_panic)("scheduler: post-main-loop ?!");
sewardje663cb92002-04-12 10:26:32 +00001106 /* NOTREACHED */
sewardje663cb92002-04-12 10:26:32 +00001107}
1108
nethercote238a3c32004-08-09 13:13:31 +00001109VgSchedReturnCode VG_(scheduler) ( Int* exitcode, ThreadId* last_run_tid,
1110 Int* fatal_sigNo_ptr )
1111{
1112 VgSchedReturnCode src;
1113
1114 fatal_signal_jmpbuf_ptr = &fatal_signal_jmpbuf;
1115 if (__builtin_setjmp( fatal_signal_jmpbuf_ptr ) == 0) {
1116 src = do_scheduler( exitcode, last_run_tid );
1117 } else {
1118 src = VgSrc_FatalSig;
1119 *fatal_sigNo_ptr = fatal_sigNo;
1120 }
1121 return src;
1122}
1123
jsgf855d93d2003-10-13 22:26:55 +00001124void VG_(need_resched) ( ThreadId prefer )
1125{
1126 /* Tell the scheduler now might be a good time to find a new
1127 runnable thread, because something happened which woke a thread
1128 up.
1129
1130 NB: This can be called unsynchronized from either a signal
1131 handler, or from another LWP (ie, real kernel thread).
1132
1133 In principle this could simply be a matter of setting
1134 VG_(dispatch_ctr) to a small value (say, 2), which would make
1135 any running code come back to the scheduler fairly quickly.
1136
1137 However, since the scheduler implements a strict round-robin
1138 policy with only one priority level, there are, by definition,
1139 no better threads to be running than the current thread anyway,
1140 so we may as well ignore this hint. For processes with a
1141 mixture of compute and I/O bound threads, this means the compute
1142 threads could introduce longish latencies before the I/O threads
1143 run. For programs with only I/O bound threads, need_resched
1144 won't have any effect anyway.
1145
1146 OK, so I've added command-line switches to enable low-latency
1147 syscalls and signals. The prefer_sched variable is in effect
1148 the ID of a single thread which has higher priority than all the
1149 others. If set, the scheduler will prefer to schedule that
1150 thread over all others. Naturally, this could lead to
1151 starvation or other unfairness.
1152 */
1153
1154 if (VG_(dispatch_ctr) > 10)
1155 VG_(dispatch_ctr) = 2;
1156 prefer_sched = prefer;
1157}
1158
nethercote238a3c32004-08-09 13:13:31 +00001159void VG_(scheduler_handle_fatal_signal) ( Int sigNo )
1160{
1161 if (NULL != fatal_signal_jmpbuf_ptr) {
1162 fatal_sigNo = sigNo;
1163 __builtin_longjmp(*fatal_signal_jmpbuf_ptr, 1);
1164 }
1165}
sewardje663cb92002-04-12 10:26:32 +00001166
1167/* ---------------------------------------------------------------------
1168 The pthread implementation.
1169 ------------------------------------------------------------------ */
1170
1171#include <pthread.h>
1172#include <errno.h>
1173
sewardje663cb92002-04-12 10:26:32 +00001174/* /usr/include/bits/pthreadtypes.h:
1175 typedef unsigned long int pthread_t;
1176*/
1177
sewardje663cb92002-04-12 10:26:32 +00001178
sewardj604ec3c2002-04-18 22:38:41 +00001179/* -----------------------------------------------------------
sewardj20917d82002-05-28 01:36:45 +00001180 Thread CREATION, JOINAGE and CANCELLATION: HELPER FNS
sewardj604ec3c2002-04-18 22:38:41 +00001181 -------------------------------------------------------- */
1182
sewardj20917d82002-05-28 01:36:45 +00001183/* We've decided to action a cancellation on tid. Make it jump to
1184 thread_exit_wrapper() in vg_libpthread.c, passing PTHREAD_CANCELED
1185 as the arg. */
1186static
1187void make_thread_jump_to_cancelhdlr ( ThreadId tid )
1188{
1189 Char msg_buf[100];
1190 vg_assert(VG_(is_valid_tid)(tid));
sewardjdadc8d02002-12-08 23:24:18 +00001191
sewardj20917d82002-05-28 01:36:45 +00001192 /* Push PTHREAD_CANCELED on the stack and jump to the cancellation
1193 handler -- which is really thread_exit_wrapper() in
1194 vg_libpthread.c. */
1195 vg_assert(VG_(threads)[tid].cancel_pend != NULL);
sewardj4bdd9962002-12-26 11:51:50 +00001196
nethercote6b9c8472004-09-13 13:16:40 +00001197 /* Set an argument and bogus return address. The return address will not
1198 be used, but we still need to have it so that the arg is at the
1199 correct stack offset. */
1200 VGA_(set_arg_and_bogus_ret)(tid, (UInt)PTHREAD_CANCELED, 0xBEADDEEF);
sewardj4bdd9962002-12-26 11:51:50 +00001201
1202 /* .cancel_pend will hold &thread_exit_wrapper */
nethercoteb8ef9d82004-09-05 22:02:33 +00001203 ARCH_INSTR_PTR(VG_(threads)[tid].arch) = (UInt)VG_(threads)[tid].cancel_pend;
sewardjdadc8d02002-12-08 23:24:18 +00001204
jsgf855d93d2003-10-13 22:26:55 +00001205 VG_(proxy_abort_syscall)(tid);
sewardjdadc8d02002-12-08 23:24:18 +00001206
sewardj20917d82002-05-28 01:36:45 +00001207 /* Make sure we aren't cancelled again whilst handling this
1208 cancellation. */
1209 VG_(threads)[tid].cancel_st = False;
1210 if (VG_(clo_trace_sched)) {
1211 VG_(sprintf)(msg_buf,
1212 "jump to cancellation handler (hdlr = %p)",
1213 VG_(threads)[tid].cancel_pend);
1214 print_sched_event(tid, msg_buf);
1215 }
thughes513197c2004-06-13 12:07:53 +00001216
1217 if(VG_(threads)[tid].status == VgTs_WaitCV) {
1218 /* posix says we must reaquire mutex before handling cancelation */
1219 vg_pthread_mutex_t* mx;
1220 vg_pthread_cond_t* cond;
1221
1222 mx = VG_(threads)[tid].associated_mx;
1223 cond = VG_(threads)[tid].associated_cv;
1224 VG_TRACK( pre_mutex_lock, tid, mx );
1225
1226 if (mx->__vg_m_owner == VG_INVALID_THREADID) {
1227 /* Currently unheld; hand it out to thread tid. */
1228 vg_assert(mx->__vg_m_count == 0);
1229 VG_(threads)[tid].status = VgTs_Runnable;
1230 VG_(threads)[tid].associated_cv = NULL;
1231 VG_(threads)[tid].associated_mx = NULL;
thughes10236472004-06-13 14:35:43 +00001232 mx->__vg_m_owner = (/*_pthread_descr*/void*)tid;
thughes513197c2004-06-13 12:07:53 +00001233 mx->__vg_m_count = 1;
1234 /* .m_edx already holds pth_cond_wait success value (0) */
1235
1236 VG_TRACK( post_mutex_lock, tid, mx );
1237
1238 if (VG_(clo_trace_pthread_level) >= 1) {
1239 VG_(sprintf)(msg_buf, "%s cv %p: RESUME with mx %p",
1240 "pthread_cancel", cond, mx );
1241 print_pthread_event(tid, msg_buf);
1242 }
1243
1244 } else {
1245 /* Currently held. Make thread tid be blocked on it. */
1246 vg_assert(mx->__vg_m_count > 0);
1247 VG_(threads)[tid].status = VgTs_WaitMX;
1248 VG_(threads)[tid].associated_cv = NULL;
1249 VG_(threads)[tid].associated_mx = mx;
1250 SET_PTHREQ_RETVAL(tid, 0); /* pth_cond_wait success value */
1251
1252 if (VG_(clo_trace_pthread_level) >= 1) {
1253 VG_(sprintf)(msg_buf, "%s cv %p: BLOCK for mx %p",
1254 "pthread_cancel", cond, mx );
1255 print_pthread_event(tid, msg_buf);
1256 }
1257 }
1258 } else {
1259 VG_(threads)[tid].status = VgTs_Runnable;
1260 }
sewardj20917d82002-05-28 01:36:45 +00001261}
1262
1263
1264
sewardjb48e5002002-05-13 00:16:03 +00001265/* Release resources and generally clean up once a thread has finally
nethercotef971ab72004-08-02 16:27:40 +00001266 disappeared.
1267
1268 BORKAGE/ISSUES as of 29 May 02 (moved from top of file --njn 2004-Aug-02)
1269
1270 TODO sometime:
1271 - Mutex scrubbing - clearup_after_thread_exit: look for threads
1272 blocked on mutexes held by the exiting thread, and release them
1273 appropriately. (??)
1274*/
sewardjb48e5002002-05-13 00:16:03 +00001275static
jsgf855d93d2003-10-13 22:26:55 +00001276void cleanup_after_thread_exited ( ThreadId tid, Bool forcekill )
sewardjb48e5002002-05-13 00:16:03 +00001277{
thughes3a1b8172004-09-12 22:48:59 +00001278 Segment *seg;
1279
nethercote36881a22004-08-04 14:03:16 +00001280 vg_assert(is_valid_or_empty_tid(tid));
sewardj018f7622002-05-15 21:13:39 +00001281 vg_assert(VG_(threads)[tid].status == VgTs_Empty);
njn25e49d8e72002-09-23 09:36:25 +00001282 /* Its stack is now off-limits */
thughes3a1b8172004-09-12 22:48:59 +00001283 seg = VG_(find_segment)( VG_(threads)[tid].stack_base );
1284 VG_TRACK( die_mem_stack, seg->addr, seg->len );
njn25e49d8e72002-09-23 09:36:25 +00001285
nethercotef9b59412004-09-10 15:33:32 +00001286 VGA_(cleanup_thread)( &VG_(threads)[tid].arch );
fitzhardinge47735af2004-01-21 01:27:27 +00001287
jsgf855d93d2003-10-13 22:26:55 +00001288 /* Not interested in the timeout anymore */
1289 VG_(threads)[tid].awaken_at = 0xFFFFFFFF;
1290
1291 /* Delete proxy LWP */
1292 VG_(proxy_delete)(tid, forcekill);
sewardjb48e5002002-05-13 00:16:03 +00001293}
1294
1295
sewardj20917d82002-05-28 01:36:45 +00001296/* Look for matching pairs of threads waiting for joiners and threads
1297 waiting for joinees. For each such pair copy the return value of
1298 the joinee into the joiner, let the joiner resume and discard the
1299 joinee. */
1300static
1301void maybe_rendezvous_joiners_and_joinees ( void )
1302{
1303 Char msg_buf[100];
1304 void** thread_return;
1305 ThreadId jnr, jee;
1306
1307 for (jnr = 1; jnr < VG_N_THREADS; jnr++) {
1308 if (VG_(threads)[jnr].status != VgTs_WaitJoinee)
1309 continue;
1310 jee = VG_(threads)[jnr].joiner_jee_tid;
1311 if (jee == VG_INVALID_THREADID)
1312 continue;
1313 vg_assert(VG_(is_valid_tid)(jee));
jsgf855d93d2003-10-13 22:26:55 +00001314 if (VG_(threads)[jee].status != VgTs_WaitJoiner) {
1315 /* if joinee has become detached, then make join fail with
1316 EINVAL */
1317 if (VG_(threads)[jee].detached) {
1318 VG_(threads)[jnr].status = VgTs_Runnable;
1319 VG_(threads)[jnr].joiner_jee_tid = VG_INVALID_THREADID;
1320 SET_PTHREQ_RETVAL(jnr, VKI_EINVAL);
1321 }
sewardj20917d82002-05-28 01:36:45 +00001322 continue;
jsgf855d93d2003-10-13 22:26:55 +00001323 }
sewardj20917d82002-05-28 01:36:45 +00001324 /* ok! jnr is waiting to join with jee, and jee is waiting to be
1325 joined by ... well, any thread. So let's do it! */
1326
1327 /* Copy return value to where joiner wants it. */
1328 thread_return = VG_(threads)[jnr].joiner_thread_return;
1329 if (thread_return != NULL) {
1330 /* CHECK thread_return writable */
njn72718642003-07-24 08:45:32 +00001331 VG_TRACK( pre_mem_write, Vg_CorePThread, jnr,
njn25e49d8e72002-09-23 09:36:25 +00001332 "pthread_join: thread_return",
1333 (Addr)thread_return, sizeof(void*));
sewardj5a3798b2002-06-04 23:24:22 +00001334
sewardj20917d82002-05-28 01:36:45 +00001335 *thread_return = VG_(threads)[jee].joinee_retval;
1336 /* Not really right, since it makes the thread's return value
1337 appear to be defined even if it isn't. */
njn25e49d8e72002-09-23 09:36:25 +00001338 VG_TRACK( post_mem_write, (Addr)thread_return, sizeof(void*) );
sewardj20917d82002-05-28 01:36:45 +00001339 }
1340
1341 /* Joinee is discarded */
1342 VG_(threads)[jee].status = VgTs_Empty; /* bye! */
jsgf855d93d2003-10-13 22:26:55 +00001343 cleanup_after_thread_exited ( jee, False );
sewardjc4a810d2002-11-13 22:25:51 +00001344 if (VG_(clo_trace_sched)) {
1345 VG_(sprintf)(msg_buf,
1346 "rendezvous with joinee %d. %d resumes, %d exits.",
1347 jee, jnr, jee );
sewardj20917d82002-05-28 01:36:45 +00001348 print_sched_event(jnr, msg_buf);
1349 }
sewardjc4a810d2002-11-13 22:25:51 +00001350
1351 VG_TRACK( post_thread_join, jnr, jee );
sewardj20917d82002-05-28 01:36:45 +00001352
1353 /* joiner returns with success */
1354 VG_(threads)[jnr].status = VgTs_Runnable;
njnd3040452003-05-19 15:04:06 +00001355 SET_PTHREQ_RETVAL(jnr, 0);
sewardj20917d82002-05-28 01:36:45 +00001356 }
1357}
1358
1359
sewardjccef2e62002-05-29 19:26:32 +00001360/* Nuke all threads other than tid. POSIX specifies that this should
1361 happen in __NR_exec, and after a __NR_fork() when I am the child,
jsgf855d93d2003-10-13 22:26:55 +00001362 as POSIX requires. Also used at process exit time with
1363 me==VG_INVALID_THREADID */
sewardjccef2e62002-05-29 19:26:32 +00001364void VG_(nuke_all_threads_except) ( ThreadId me )
1365{
1366 ThreadId tid;
1367 for (tid = 1; tid < VG_N_THREADS; tid++) {
1368 if (tid == me
jsgf855d93d2003-10-13 22:26:55 +00001369 || VG_(threads)[tid].status == VgTs_Empty)
sewardjccef2e62002-05-29 19:26:32 +00001370 continue;
sewardjef037c72002-05-30 00:40:03 +00001371 if (0)
1372 VG_(printf)(
1373 "VG_(nuke_all_threads_except): nuking tid %d\n", tid);
jsgf855d93d2003-10-13 22:26:55 +00001374 VG_(proxy_delete)(tid, True);
sewardjccef2e62002-05-29 19:26:32 +00001375 VG_(threads)[tid].status = VgTs_Empty;
thughes6d41bea2004-10-20 12:25:59 +00001376 VG_(threads)[tid].associated_mx = NULL;
1377 VG_(threads)[tid].associated_cv = NULL;
jsgf855d93d2003-10-13 22:26:55 +00001378 cleanup_after_thread_exited( tid, True );
sewardjccef2e62002-05-29 19:26:32 +00001379 }
1380}
1381
1382
sewardj20917d82002-05-28 01:36:45 +00001383/* -----------------------------------------------------------
1384 Thread CREATION, JOINAGE and CANCELLATION: REQUESTS
1385 -------------------------------------------------------- */
1386
sewardje663cb92002-04-12 10:26:32 +00001387static
sewardj8ad94e12002-05-29 00:10:20 +00001388void do__cleanup_push ( ThreadId tid, CleanupEntry* cu )
1389{
1390 Int sp;
1391 Char msg_buf[100];
1392 vg_assert(VG_(is_valid_tid)(tid));
1393 sp = VG_(threads)[tid].custack_used;
1394 if (VG_(clo_trace_sched)) {
thughes11975ff2004-06-12 12:58:22 +00001395 switch (cu->type) {
1396 case VgCt_Function:
1397 VG_(sprintf)(msg_buf,
1398 "cleanup_push (fn %p, arg %p) -> slot %d",
1399 cu->data.function.fn, cu->data.function.arg, sp);
1400 break;
1401 case VgCt_Longjmp:
1402 VG_(sprintf)(msg_buf,
1403 "cleanup_push (ub %p) -> slot %d",
1404 cu->data.longjmp.ub, sp);
1405 break;
1406 default:
1407 VG_(sprintf)(msg_buf,
1408 "cleanup_push (unknown type) -> slot %d",
1409 sp);
1410 break;
1411 }
sewardj8ad94e12002-05-29 00:10:20 +00001412 print_sched_event(tid, msg_buf);
1413 }
1414 vg_assert(sp >= 0 && sp <= VG_N_CLEANUPSTACK);
1415 if (sp == VG_N_CLEANUPSTACK)
njne427a662002-10-02 11:08:25 +00001416 VG_(core_panic)("do__cleanup_push: VG_N_CLEANUPSTACK is too small."
sewardj8ad94e12002-05-29 00:10:20 +00001417 " Increase and recompile.");
1418 VG_(threads)[tid].custack[sp] = *cu;
1419 sp++;
1420 VG_(threads)[tid].custack_used = sp;
njnd3040452003-05-19 15:04:06 +00001421 SET_PTHREQ_RETVAL(tid, 0);
sewardj8ad94e12002-05-29 00:10:20 +00001422}
1423
1424
1425static
1426void do__cleanup_pop ( ThreadId tid, CleanupEntry* cu )
1427{
1428 Int sp;
1429 Char msg_buf[100];
1430 vg_assert(VG_(is_valid_tid)(tid));
1431 sp = VG_(threads)[tid].custack_used;
1432 if (VG_(clo_trace_sched)) {
njn36650922002-10-04 09:18:09 +00001433 VG_(sprintf)(msg_buf, "cleanup_pop from slot %d", sp-1);
sewardj8ad94e12002-05-29 00:10:20 +00001434 print_sched_event(tid, msg_buf);
1435 }
1436 vg_assert(sp >= 0 && sp <= VG_N_CLEANUPSTACK);
1437 if (sp == 0) {
njnd3040452003-05-19 15:04:06 +00001438 SET_PTHREQ_RETVAL(tid, -1);
sewardj8ad94e12002-05-29 00:10:20 +00001439 return;
1440 }
1441 sp--;
njn72718642003-07-24 08:45:32 +00001442 VG_TRACK( pre_mem_write, Vg_CorePThread, tid,
njn36650922002-10-04 09:18:09 +00001443 "cleanup pop", (Addr)cu, sizeof(CleanupEntry) );
sewardj8ad94e12002-05-29 00:10:20 +00001444 *cu = VG_(threads)[tid].custack[sp];
njn25e49d8e72002-09-23 09:36:25 +00001445 VG_TRACK( post_mem_write, (Addr)cu, sizeof(CleanupEntry) );
sewardj8ad94e12002-05-29 00:10:20 +00001446 VG_(threads)[tid].custack_used = sp;
njnd3040452003-05-19 15:04:06 +00001447 SET_PTHREQ_RETVAL(tid, 0);
sewardj8ad94e12002-05-29 00:10:20 +00001448}
1449
1450
1451static
sewardjff42d1d2002-05-22 13:17:31 +00001452void do_pthread_yield ( ThreadId tid )
1453{
1454 Char msg_buf[100];
1455 vg_assert(VG_(is_valid_tid)(tid));
sewardjff42d1d2002-05-22 13:17:31 +00001456 if (VG_(clo_trace_sched)) {
1457 VG_(sprintf)(msg_buf, "yield");
1458 print_sched_event(tid, msg_buf);
1459 }
njnd3040452003-05-19 15:04:06 +00001460 SET_PTHREQ_RETVAL(tid, 0);
sewardjff42d1d2002-05-22 13:17:31 +00001461}
1462
1463
1464static
sewardj20917d82002-05-28 01:36:45 +00001465void do__testcancel ( ThreadId tid )
sewardje663cb92002-04-12 10:26:32 +00001466{
sewardj7989d0c2002-05-28 11:00:01 +00001467 Char msg_buf[100];
sewardjb48e5002002-05-13 00:16:03 +00001468 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001469 if (VG_(clo_trace_sched)) {
1470 VG_(sprintf)(msg_buf, "testcancel");
1471 print_sched_event(tid, msg_buf);
1472 }
sewardj20917d82002-05-28 01:36:45 +00001473 if (/* is there a cancellation pending on this thread? */
1474 VG_(threads)[tid].cancel_pend != NULL
1475 && /* is this thread accepting cancellations? */
1476 VG_(threads)[tid].cancel_st) {
1477 /* Ok, let's do the cancellation. */
1478 make_thread_jump_to_cancelhdlr ( tid );
sewardje663cb92002-04-12 10:26:32 +00001479 } else {
sewardj20917d82002-05-28 01:36:45 +00001480 /* No, we keep going. */
njnd3040452003-05-19 15:04:06 +00001481 SET_PTHREQ_RETVAL(tid, 0);
sewardje663cb92002-04-12 10:26:32 +00001482 }
sewardje663cb92002-04-12 10:26:32 +00001483}
1484
1485
1486static
sewardj20917d82002-05-28 01:36:45 +00001487void do__set_cancelstate ( ThreadId tid, Int state )
1488{
1489 Bool old_st;
sewardj7989d0c2002-05-28 11:00:01 +00001490 Char msg_buf[100];
sewardj20917d82002-05-28 01:36:45 +00001491 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001492 if (VG_(clo_trace_sched)) {
1493 VG_(sprintf)(msg_buf, "set_cancelstate to %d (%s)", state,
1494 state==PTHREAD_CANCEL_ENABLE
1495 ? "ENABLE"
1496 : (state==PTHREAD_CANCEL_DISABLE ? "DISABLE" : "???"));
1497 print_sched_event(tid, msg_buf);
1498 }
sewardj20917d82002-05-28 01:36:45 +00001499 old_st = VG_(threads)[tid].cancel_st;
1500 if (state == PTHREAD_CANCEL_ENABLE) {
1501 VG_(threads)[tid].cancel_st = True;
1502 } else
1503 if (state == PTHREAD_CANCEL_DISABLE) {
1504 VG_(threads)[tid].cancel_st = False;
1505 } else {
njne427a662002-10-02 11:08:25 +00001506 VG_(core_panic)("do__set_cancelstate");
sewardj20917d82002-05-28 01:36:45 +00001507 }
njnd3040452003-05-19 15:04:06 +00001508 SET_PTHREQ_RETVAL(tid, old_st ? PTHREAD_CANCEL_ENABLE
1509 : PTHREAD_CANCEL_DISABLE);
sewardj20917d82002-05-28 01:36:45 +00001510}
1511
1512
1513static
1514void do__set_canceltype ( ThreadId tid, Int type )
1515{
1516 Bool old_ty;
sewardj7989d0c2002-05-28 11:00:01 +00001517 Char msg_buf[100];
sewardj20917d82002-05-28 01:36:45 +00001518 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001519 if (VG_(clo_trace_sched)) {
1520 VG_(sprintf)(msg_buf, "set_canceltype to %d (%s)", type,
1521 type==PTHREAD_CANCEL_ASYNCHRONOUS
1522 ? "ASYNCHRONOUS"
1523 : (type==PTHREAD_CANCEL_DEFERRED ? "DEFERRED" : "???"));
1524 print_sched_event(tid, msg_buf);
1525 }
sewardj20917d82002-05-28 01:36:45 +00001526 old_ty = VG_(threads)[tid].cancel_ty;
1527 if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
1528 VG_(threads)[tid].cancel_ty = False;
1529 } else
1530 if (type == PTHREAD_CANCEL_DEFERRED) {
sewardjaf00b6d2002-05-29 23:30:28 +00001531 VG_(threads)[tid].cancel_ty = True;
sewardj20917d82002-05-28 01:36:45 +00001532 } else {
njne427a662002-10-02 11:08:25 +00001533 VG_(core_panic)("do__set_canceltype");
sewardj20917d82002-05-28 01:36:45 +00001534 }
njnd3040452003-05-19 15:04:06 +00001535 SET_PTHREQ_RETVAL(tid, old_ty ? PTHREAD_CANCEL_DEFERRED
sewardj20917d82002-05-28 01:36:45 +00001536 : PTHREAD_CANCEL_ASYNCHRONOUS);
1537}
1538
1539
sewardj7989d0c2002-05-28 11:00:01 +00001540/* Set or get the detach state for thread det. */
sewardj20917d82002-05-28 01:36:45 +00001541static
sewardj7989d0c2002-05-28 11:00:01 +00001542void do__set_or_get_detach ( ThreadId tid,
1543 Int what, ThreadId det )
sewardj20917d82002-05-28 01:36:45 +00001544{
sewardj7989d0c2002-05-28 11:00:01 +00001545 Char msg_buf[100];
1546 /* VG_(printf)("do__set_or_get_detach tid %d what %d det %d\n",
1547 tid, what, det); */
sewardj20917d82002-05-28 01:36:45 +00001548 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001549 if (VG_(clo_trace_sched)) {
1550 VG_(sprintf)(msg_buf, "set_or_get_detach %d (%s) for tid %d", what,
1551 what==0 ? "not-detached" : (
1552 what==1 ? "detached" : (
1553 what==2 ? "fetch old value" : "???")),
1554 det );
1555 print_sched_event(tid, msg_buf);
1556 }
1557
1558 if (!VG_(is_valid_tid)(det)) {
njnd3040452003-05-19 15:04:06 +00001559 SET_PTHREQ_RETVAL(tid, -1);
sewardj7989d0c2002-05-28 11:00:01 +00001560 return;
1561 }
1562
sewardj20917d82002-05-28 01:36:45 +00001563 switch (what) {
1564 case 2: /* get */
njnd3040452003-05-19 15:04:06 +00001565 SET_PTHREQ_RETVAL(tid, VG_(threads)[det].detached ? 1 : 0);
sewardj20917d82002-05-28 01:36:45 +00001566 return;
jsgf855d93d2003-10-13 22:26:55 +00001567 case 1:
sewardj7989d0c2002-05-28 11:00:01 +00001568 VG_(threads)[det].detached = True;
njnd3040452003-05-19 15:04:06 +00001569 SET_PTHREQ_RETVAL(tid, 0);
jsgf855d93d2003-10-13 22:26:55 +00001570 /* wake anyone who was joining on us */
1571 maybe_rendezvous_joiners_and_joinees();
sewardj20917d82002-05-28 01:36:45 +00001572 return;
1573 case 0: /* set not detached */
sewardj7989d0c2002-05-28 11:00:01 +00001574 VG_(threads)[det].detached = False;
njnd3040452003-05-19 15:04:06 +00001575 SET_PTHREQ_RETVAL(tid, 0);
sewardj20917d82002-05-28 01:36:45 +00001576 return;
1577 default:
njne427a662002-10-02 11:08:25 +00001578 VG_(core_panic)("do__set_or_get_detach");
sewardj20917d82002-05-28 01:36:45 +00001579 }
1580}
1581
1582
1583static
1584void do__set_cancelpend ( ThreadId tid,
1585 ThreadId cee,
1586 void (*cancelpend_hdlr)(void*) )
sewardje663cb92002-04-12 10:26:32 +00001587{
1588 Char msg_buf[100];
1589
sewardj20917d82002-05-28 01:36:45 +00001590 vg_assert(VG_(is_valid_tid)(tid));
1591 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1592
thughes97e54d22004-08-15 14:34:02 +00001593 if (!VG_(is_valid_tid)(cee) ||
1594 VG_(threads)[cee].status == VgTs_WaitJoiner) {
sewardj7989d0c2002-05-28 11:00:01 +00001595 if (VG_(clo_trace_sched)) {
1596 VG_(sprintf)(msg_buf,
1597 "set_cancelpend for invalid tid %d", cee);
1598 print_sched_event(tid, msg_buf);
1599 }
njn25e49d8e72002-09-23 09:36:25 +00001600 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00001601 "pthread_cancel: target thread does not exist, or invalid");
jsgf855d93d2003-10-13 22:26:55 +00001602 SET_PTHREQ_RETVAL(tid, VKI_ESRCH);
sewardj7989d0c2002-05-28 11:00:01 +00001603 return;
1604 }
sewardj20917d82002-05-28 01:36:45 +00001605
1606 VG_(threads)[cee].cancel_pend = cancelpend_hdlr;
1607
jsgf855d93d2003-10-13 22:26:55 +00001608 /* interrupt a pending syscall */
1609 VG_(proxy_abort_syscall)(cee);
1610
sewardj20917d82002-05-28 01:36:45 +00001611 if (VG_(clo_trace_sched)) {
1612 VG_(sprintf)(msg_buf,
sewardj7989d0c2002-05-28 11:00:01 +00001613 "set_cancelpend (hdlr = %p, set by tid %d)",
sewardj20917d82002-05-28 01:36:45 +00001614 cancelpend_hdlr, tid);
1615 print_sched_event(cee, msg_buf);
1616 }
1617
1618 /* Thread doing the cancelling returns with success. */
njnd3040452003-05-19 15:04:06 +00001619 SET_PTHREQ_RETVAL(tid, 0);
sewardj20917d82002-05-28 01:36:45 +00001620
1621 /* Perhaps we can nuke the cancellee right now? */
thughes513197c2004-06-13 12:07:53 +00001622 if (!VG_(threads)[cee].cancel_ty || /* if PTHREAD_CANCEL_ASYNCHRONOUS */
1623 (VG_(threads)[cee].status != VgTs_Runnable &&
1624 VG_(threads)[cee].status != VgTs_WaitMX)) {
jsgf855d93d2003-10-13 22:26:55 +00001625 do__testcancel(cee);
thughes513197c2004-06-13 12:07:53 +00001626 }
sewardj20917d82002-05-28 01:36:45 +00001627}
1628
1629
1630static
1631void do_pthread_join ( ThreadId tid,
1632 ThreadId jee, void** thread_return )
1633{
1634 Char msg_buf[100];
1635 ThreadId i;
sewardje663cb92002-04-12 10:26:32 +00001636 /* jee, the joinee, is the thread specified as an arg in thread
1637 tid's call to pthread_join. So tid is the join-er. */
sewardjb48e5002002-05-13 00:16:03 +00001638 vg_assert(VG_(is_valid_tid)(tid));
sewardj018f7622002-05-15 21:13:39 +00001639 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +00001640
1641 if (jee == tid) {
njn25e49d8e72002-09-23 09:36:25 +00001642 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00001643 "pthread_join: attempt to join to self");
njnd3040452003-05-19 15:04:06 +00001644 SET_PTHREQ_RETVAL(tid, EDEADLK); /* libc constant, not a kernel one */
jsgf855d93d2003-10-13 22:26:55 +00001645 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +00001646 return;
1647 }
1648
sewardj20917d82002-05-28 01:36:45 +00001649 /* Flush any completed pairs, so as to make sure what we're looking
1650 at is up-to-date. */
1651 maybe_rendezvous_joiners_and_joinees();
1652
1653 /* Is this a sane request? */
jsgf855d93d2003-10-13 22:26:55 +00001654 if ( ! VG_(is_valid_tid)(jee) ||
1655 VG_(threads)[jee].detached) {
sewardje663cb92002-04-12 10:26:32 +00001656 /* Invalid thread to join to. */
njn25e49d8e72002-09-23 09:36:25 +00001657 VG_(record_pthread_error)( tid,
jsgf855d93d2003-10-13 22:26:55 +00001658 "pthread_join: target thread does not exist, invalid, or detached");
1659 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardje663cb92002-04-12 10:26:32 +00001660 return;
1661 }
1662
sewardj20917d82002-05-28 01:36:45 +00001663 /* Is anyone else already in a join-wait for jee? */
1664 for (i = 1; i < VG_N_THREADS; i++) {
1665 if (i == tid) continue;
1666 if (VG_(threads)[i].status == VgTs_WaitJoinee
1667 && VG_(threads)[i].joiner_jee_tid == jee) {
1668 /* Someone already did join on this thread */
njn25e49d8e72002-09-23 09:36:25 +00001669 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00001670 "pthread_join: another thread already "
1671 "in join-wait for target thread");
jsgf855d93d2003-10-13 22:26:55 +00001672 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
1673 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardj20917d82002-05-28 01:36:45 +00001674 return;
1675 }
sewardje663cb92002-04-12 10:26:32 +00001676 }
1677
thughes513197c2004-06-13 12:07:53 +00001678 if(VG_(threads)[tid].cancel_pend != NULL &&
1679 VG_(threads)[tid].cancel_st) {
1680 make_thread_jump_to_cancelhdlr ( tid );
1681 } else {
1682 /* Mark this thread as waiting for the joinee. */
1683 VG_(threads)[tid].status = VgTs_WaitJoinee;
1684 VG_(threads)[tid].joiner_thread_return = thread_return;
1685 VG_(threads)[tid].joiner_jee_tid = jee;
1686
1687 /* Look for matching joiners and joinees and do the right thing. */
1688 maybe_rendezvous_joiners_and_joinees();
1689
1690 /* Return value is irrelevant since this this thread becomes
1691 non-runnable. maybe_resume_joiner() will cause it to return the
1692 right value when it resumes. */
1693
1694 if (VG_(clo_trace_sched)) {
1695 VG_(sprintf)(msg_buf,
1696 "wait for joinee %d (may already be ready)", jee);
1697 print_sched_event(tid, msg_buf);
1698 }
sewardje663cb92002-04-12 10:26:32 +00001699 }
sewardje663cb92002-04-12 10:26:32 +00001700}
1701
1702
sewardj20917d82002-05-28 01:36:45 +00001703/* ( void* ): calling thread waits for joiner and returns the void* to
1704 it. This is one of two ways in which a thread can finally exit --
1705 the other is do__quit. */
sewardje663cb92002-04-12 10:26:32 +00001706static
sewardj20917d82002-05-28 01:36:45 +00001707void do__wait_joiner ( ThreadId tid, void* retval )
sewardje663cb92002-04-12 10:26:32 +00001708{
sewardj20917d82002-05-28 01:36:45 +00001709 Char msg_buf[100];
1710 vg_assert(VG_(is_valid_tid)(tid));
1711 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1712 if (VG_(clo_trace_sched)) {
1713 VG_(sprintf)(msg_buf,
sewardj7989d0c2002-05-28 11:00:01 +00001714 "do__wait_joiner(retval = %p) (non-detached thread exit)", retval);
sewardj20917d82002-05-28 01:36:45 +00001715 print_sched_event(tid, msg_buf);
1716 }
1717 VG_(threads)[tid].status = VgTs_WaitJoiner;
1718 VG_(threads)[tid].joinee_retval = retval;
1719 maybe_rendezvous_joiners_and_joinees();
1720}
1721
1722
1723/* ( no-args ): calling thread disappears from the system forever.
1724 Reclaim resources. */
1725static
1726void do__quit ( ThreadId tid )
1727{
1728 Char msg_buf[100];
1729 vg_assert(VG_(is_valid_tid)(tid));
1730 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1731 VG_(threads)[tid].status = VgTs_Empty; /* bye! */
jsgf855d93d2003-10-13 22:26:55 +00001732 cleanup_after_thread_exited ( tid, False );
sewardj20917d82002-05-28 01:36:45 +00001733 if (VG_(clo_trace_sched)) {
sewardj7989d0c2002-05-28 11:00:01 +00001734 VG_(sprintf)(msg_buf, "do__quit (detached thread exit)");
sewardj20917d82002-05-28 01:36:45 +00001735 print_sched_event(tid, msg_buf);
1736 }
jsgf855d93d2003-10-13 22:26:55 +00001737 maybe_rendezvous_joiners_and_joinees();
sewardj20917d82002-05-28 01:36:45 +00001738 /* Return value is irrelevant; this thread will not get
1739 rescheduled. */
1740}
1741
1742
nethercote6b9c8472004-09-13 13:16:40 +00001743/* Should never be entered. If it is, will be on the simulated CPU. */
sewardj20917d82002-05-28 01:36:45 +00001744static
1745void do__apply_in_new_thread_bogusRA ( void )
1746{
njne427a662002-10-02 11:08:25 +00001747 VG_(core_panic)("do__apply_in_new_thread_bogusRA");
sewardj20917d82002-05-28 01:36:45 +00001748}
1749
1750/* (Fn, Arg): Create a new thread and run Fn applied to Arg in it. Fn
1751 MUST NOT return -- ever. Eventually it will do either __QUIT or
1752 __WAIT_JOINER. Return the child tid to the parent. */
1753static
1754void do__apply_in_new_thread ( ThreadId parent_tid,
1755 void* (*fn)(void *),
thughesdaa34562004-06-27 12:48:53 +00001756 void* arg,
1757 StackInfo *si )
sewardj20917d82002-05-28 01:36:45 +00001758{
sewardje663cb92002-04-12 10:26:32 +00001759 Addr new_stack;
1760 UInt new_stk_szb;
1761 ThreadId tid;
1762 Char msg_buf[100];
1763
1764 /* Paranoia ... */
1765 vg_assert(sizeof(pthread_t) == sizeof(UInt));
1766
sewardj018f7622002-05-15 21:13:39 +00001767 vg_assert(VG_(threads)[parent_tid].status != VgTs_Empty);
sewardje663cb92002-04-12 10:26:32 +00001768
sewardj1e8cdc92002-04-18 11:37:52 +00001769 tid = vg_alloc_ThreadState();
sewardje663cb92002-04-12 10:26:32 +00001770
1771 /* If we've created the main thread's tid, we're in deep trouble :) */
sewardj6072c362002-04-19 14:40:57 +00001772 vg_assert(tid != 1);
nethercote36881a22004-08-04 14:03:16 +00001773 vg_assert(is_valid_or_empty_tid(tid));
sewardje663cb92002-04-12 10:26:32 +00001774
sewardjc4a810d2002-11-13 22:25:51 +00001775 /* do this early, before the child gets any memory writes */
1776 VG_TRACK ( post_thread_create, parent_tid, tid );
1777
sewardjf6374322002-11-13 22:35:55 +00001778 /* Create new thread with default attrs:
1779 deferred cancellation, not detached
1780 */
1781 mostly_clear_thread_record(tid);
1782 VG_(threads)[tid].status = VgTs_Runnable;
1783
sewardje663cb92002-04-12 10:26:32 +00001784 /* Copy the parent's CPU state into the child's, in a roundabout
1785 way (via baseBlock). */
nethercotef971ab72004-08-02 16:27:40 +00001786 load_thread_state(parent_tid);
nethercotef9b59412004-09-10 15:33:32 +00001787 VGA_(setup_child)( &VG_(threads)[tid].arch,
1788 &VG_(threads)[parent_tid].arch );
nethercotef971ab72004-08-02 16:27:40 +00001789 save_thread_state(tid);
sewardjf6374322002-11-13 22:35:55 +00001790 vg_tid_last_in_baseBlock = tid;
sewardje663cb92002-04-12 10:26:32 +00001791
1792 /* Consider allocating the child a stack, if the one it already has
1793 is inadequate. */
thughesdaa34562004-06-27 12:48:53 +00001794 new_stk_szb = si->size + VG_AR_CLIENT_STACKBASE_REDZONE_SZB + si->guardsize;
1795 new_stk_szb = (new_stk_szb + VKI_BYTES_PER_PAGE - 1) & ~VKI_BYTES_PER_PAGE;
1796
1797 VG_(threads)[tid].stack_guard_size = si->guardsize;
sewardje663cb92002-04-12 10:26:32 +00001798
sewardj018f7622002-05-15 21:13:39 +00001799 if (new_stk_szb > VG_(threads)[tid].stack_size) {
sewardje663cb92002-04-12 10:26:32 +00001800 /* Again, for good measure :) We definitely don't want to be
1801 allocating a stack for the main thread. */
sewardj6072c362002-04-19 14:40:57 +00001802 vg_assert(tid != 1);
thughesdaa34562004-06-27 12:48:53 +00001803 if (VG_(threads)[tid].stack_size > 0)
1804 VG_(client_free)(VG_(threads)[tid].stack_base);
fitzhardinge98abfc72003-12-16 02:05:15 +00001805 new_stack = VG_(client_alloc)(0, new_stk_szb,
nethercotee567e702004-07-10 17:49:17 +00001806 VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC,
fitzhardinge98abfc72003-12-16 02:05:15 +00001807 SF_STACK);
nethercote8e9eab02004-07-11 18:01:06 +00001808 // Given the low number of threads Valgrind can handle, stack
1809 // allocation should pretty much always succeed, so having an
1810 // assertion here isn't too bad. However, probably better would be
1811 // this:
1812 //
1813 // if (0 == new_stack)
1814 // SET_PTHREQ_RETVAL(parent_tid, -VKI_EAGAIN);
1815 //
nethercotee567e702004-07-10 17:49:17 +00001816 vg_assert(0 != new_stack);
sewardj018f7622002-05-15 21:13:39 +00001817 VG_(threads)[tid].stack_base = new_stack;
1818 VG_(threads)[tid].stack_size = new_stk_szb;
1819 VG_(threads)[tid].stack_highest_word
sewardje663cb92002-04-12 10:26:32 +00001820 = new_stack + new_stk_szb
sewardj1e8cdc92002-04-18 11:37:52 +00001821 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB; /* -4 ??? */;
sewardje663cb92002-04-12 10:26:32 +00001822 }
sewardj1e8cdc92002-04-18 11:37:52 +00001823
njn25e49d8e72002-09-23 09:36:25 +00001824 /* Having got memory to hold the thread's stack:
1825 - set %esp as base + size
1826 - mark everything below %esp inaccessible
1827 - mark redzone at stack end inaccessible
1828 */
njnd3040452003-05-19 15:04:06 +00001829 SET_PTHREQ_ESP(tid, VG_(threads)[tid].stack_base
1830 + VG_(threads)[tid].stack_size
1831 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB);
sewardj1e8cdc92002-04-18 11:37:52 +00001832
njn25e49d8e72002-09-23 09:36:25 +00001833 VG_TRACK ( die_mem_stack, VG_(threads)[tid].stack_base,
thughesdaa34562004-06-27 12:48:53 +00001834 VG_(threads)[tid].stack_size
1835 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB);
nethercote6b9c8472004-09-13 13:16:40 +00001836 VG_TRACK ( ban_mem_stack, ARCH_STACK_PTR(VG_(threads)[tid].arch),
njn25e49d8e72002-09-23 09:36:25 +00001837 VG_AR_CLIENT_STACKBASE_REDZONE_SZB );
sewardje663cb92002-04-12 10:26:32 +00001838
nethercote6b9c8472004-09-13 13:16:40 +00001839 VGA_(thread_initial_stack)(tid, (UWord)arg,
1840 (Addr)&do__apply_in_new_thread_bogusRA);
sewardje663cb92002-04-12 10:26:32 +00001841
1842 /* this is where we start */
nethercoteb8ef9d82004-09-05 22:02:33 +00001843 ARCH_INSTR_PTR(VG_(threads)[tid].arch) = (UInt)fn;
sewardje663cb92002-04-12 10:26:32 +00001844
sewardj8937c812002-04-12 20:12:20 +00001845 if (VG_(clo_trace_sched)) {
njn25e49d8e72002-09-23 09:36:25 +00001846 VG_(sprintf)(msg_buf, "new thread, created by %d", parent_tid );
sewardje663cb92002-04-12 10:26:32 +00001847 print_sched_event(tid, msg_buf);
1848 }
1849
fitzhardingef7866182004-03-16 22:09:12 +00001850 /* Start the thread with all signals blocked; it's up to the client
1851 code to set the right signal mask when it's ready. */
1852 VG_(ksigfillset)(&VG_(threads)[tid].sig_mask);
jsgf855d93d2003-10-13 22:26:55 +00001853
1854 /* Now that the signal mask is set up, create a proxy LWP for this thread */
1855 VG_(proxy_create)(tid);
1856
1857 /* Set the proxy's signal mask */
1858 VG_(proxy_setsigmask)(tid);
sewardjb48e5002002-05-13 00:16:03 +00001859
sewardj20917d82002-05-28 01:36:45 +00001860 /* return child's tid to parent */
njnd3040452003-05-19 15:04:06 +00001861 SET_PTHREQ_RETVAL(parent_tid, tid); /* success */
sewardje663cb92002-04-12 10:26:32 +00001862}
1863
1864
sewardj604ec3c2002-04-18 22:38:41 +00001865/* -----------------------------------------------------------
1866 MUTEXes
1867 -------------------------------------------------------- */
1868
rjwalsh7109a8c2004-09-02 00:31:02 +00001869/* vg_pthread_mutex_t is defined in core.h.
sewardj604ec3c2002-04-18 22:38:41 +00001870
nethercote1f0173b2004-02-28 15:40:36 +00001871 The initializers zero everything, except possibly the fourth word,
1872 which in vg_pthread_mutex_t is the __vg_m_kind field. It gets set to one
1873 of PTHREAD_MUTEX_{TIMED,RECURSIVE,ERRORCHECK,ADAPTIVE}_NP
sewardj604ec3c2002-04-18 22:38:41 +00001874
sewardj6072c362002-04-19 14:40:57 +00001875 How we use it:
sewardj604ec3c2002-04-18 22:38:41 +00001876
nethercote1f0173b2004-02-28 15:40:36 +00001877 __vg_m_kind never changes and indicates whether or not it is recursive.
sewardj6072c362002-04-19 14:40:57 +00001878
nethercote1f0173b2004-02-28 15:40:36 +00001879 __vg_m_count indicates the lock count; if 0, the mutex is not owned by
sewardj6072c362002-04-19 14:40:57 +00001880 anybody.
1881
nethercote1f0173b2004-02-28 15:40:36 +00001882 __vg_m_owner has a ThreadId value stuffed into it. We carefully arrange
sewardj6072c362002-04-19 14:40:57 +00001883 that ThreadId == 0 is invalid (VG_INVALID_THREADID), so that
1884 statically initialised mutexes correctly appear
1885 to belong to nobody.
1886
nethercote1f0173b2004-02-28 15:40:36 +00001887 In summary, a not-in-use mutex is distinguised by having __vg_m_owner
1888 == 0 (VG_INVALID_THREADID) and __vg_m_count == 0 too. If one of those
sewardj6072c362002-04-19 14:40:57 +00001889 conditions holds, the other should too.
1890
1891 There is no linked list of threads waiting for this mutex. Instead
1892 a thread in WaitMX state points at the mutex with its waited_on_mx
1893 field. This makes _unlock() inefficient, but simple to implement the
1894 right semantics viz-a-viz signals.
sewardje663cb92002-04-12 10:26:32 +00001895
sewardj604ec3c2002-04-18 22:38:41 +00001896 We don't have to deal with mutex initialisation; the client side
sewardj6072c362002-04-19 14:40:57 +00001897 deals with that for us.
1898*/
sewardje663cb92002-04-12 10:26:32 +00001899
sewardj3b5d8862002-04-20 13:53:23 +00001900/* Helper fns ... */
thughese321d492004-10-17 15:00:20 +00001901static
1902void do_pthread_mutex_timedlock_TIMEOUT ( ThreadId tid )
1903{
1904 Char msg_buf[100];
1905 vg_pthread_mutex_t* mx;
1906
1907 vg_assert(VG_(is_valid_tid)(tid)
1908 && VG_(threads)[tid].status == VgTs_WaitMX
1909 && VG_(threads)[tid].awaken_at != 0xFFFFFFFF);
1910 mx = VG_(threads)[tid].associated_mx;
1911 vg_assert(mx != NULL);
1912
1913 VG_(threads)[tid].status = VgTs_Runnable;
1914 SET_PTHREQ_RETVAL(tid, ETIMEDOUT); /* pthread_mutex_lock return value */
1915 VG_(threads)[tid].associated_mx = NULL;
1916
1917 if (VG_(clo_trace_pthread_level) >= 1) {
1918 VG_(sprintf)(msg_buf, "pthread_mutex_timedlock mx %p: TIMEOUT", mx);
1919 print_pthread_event(tid, msg_buf);
1920 }
1921}
1922
1923
sewardj3b5d8862002-04-20 13:53:23 +00001924static
nethercote1f0173b2004-02-28 15:40:36 +00001925void release_one_thread_waiting_on_mutex ( vg_pthread_mutex_t* mutex,
sewardj3b5d8862002-04-20 13:53:23 +00001926 Char* caller )
1927{
1928 Int i;
1929 Char msg_buf[100];
1930
1931 /* Find some arbitrary thread waiting on this mutex, and make it
1932 runnable. If none are waiting, mark the mutex as not held. */
1933 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +00001934 if (VG_(threads)[i].status == VgTs_Empty)
sewardj3b5d8862002-04-20 13:53:23 +00001935 continue;
sewardj018f7622002-05-15 21:13:39 +00001936 if (VG_(threads)[i].status == VgTs_WaitMX
1937 && VG_(threads)[i].associated_mx == mutex)
sewardj3b5d8862002-04-20 13:53:23 +00001938 break;
1939 }
1940
nethercote1f0173b2004-02-28 15:40:36 +00001941 VG_TRACK( post_mutex_unlock, (ThreadId)mutex->__vg_m_owner, mutex );
sewardj0af43bc2002-10-22 04:30:35 +00001942
sewardj3b5d8862002-04-20 13:53:23 +00001943 vg_assert(i <= VG_N_THREADS);
1944 if (i == VG_N_THREADS) {
1945 /* Nobody else is waiting on it. */
nethercote1f0173b2004-02-28 15:40:36 +00001946 mutex->__vg_m_count = 0;
1947 mutex->__vg_m_owner = VG_INVALID_THREADID;
sewardj3b5d8862002-04-20 13:53:23 +00001948 } else {
1949 /* Notionally transfer the hold to thread i, whose
1950 pthread_mutex_lock() call now returns with 0 (success). */
1951 /* The .count is already == 1. */
sewardj018f7622002-05-15 21:13:39 +00001952 vg_assert(VG_(threads)[i].associated_mx == mutex);
nethercote1f0173b2004-02-28 15:40:36 +00001953 mutex->__vg_m_owner = (/*_pthread_descr*/void*)i;
sewardj018f7622002-05-15 21:13:39 +00001954 VG_(threads)[i].status = VgTs_Runnable;
1955 VG_(threads)[i].associated_mx = NULL;
sewardj5f07b662002-04-23 16:52:51 +00001956 /* m_edx already holds pth_mx_lock() success (0) */
sewardj3b5d8862002-04-20 13:53:23 +00001957
sewardj0af43bc2002-10-22 04:30:35 +00001958 VG_TRACK( post_mutex_lock, (ThreadId)i, mutex);
1959
sewardj3b5d8862002-04-20 13:53:23 +00001960 if (VG_(clo_trace_pthread_level) >= 1) {
1961 VG_(sprintf)(msg_buf, "%s mx %p: RESUME",
1962 caller, mutex );
1963 print_pthread_event(i, msg_buf);
1964 }
1965 }
1966}
1967
sewardje663cb92002-04-12 10:26:32 +00001968
1969static
sewardj30671ff2002-04-21 00:13:57 +00001970void do_pthread_mutex_lock( ThreadId tid,
1971 Bool is_trylock,
thughese321d492004-10-17 15:00:20 +00001972 vg_pthread_mutex_t* mutex,
1973 UInt ms_end )
sewardje663cb92002-04-12 10:26:32 +00001974{
sewardj30671ff2002-04-21 00:13:57 +00001975 Char msg_buf[100];
1976 Char* caller
sewardj8ccc2be2002-05-10 20:26:37 +00001977 = is_trylock ? "pthread_mutex_trylock"
1978 : "pthread_mutex_lock ";
sewardje663cb92002-04-12 10:26:32 +00001979
thughese321d492004-10-17 15:00:20 +00001980 /* If ms_end == 0xFFFFFFFF, wait forever (no timeout). Otherwise,
1981 ms_end is the ending millisecond. */
1982
sewardj604ec3c2002-04-18 22:38:41 +00001983 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj30671ff2002-04-21 00:13:57 +00001984 VG_(sprintf)(msg_buf, "%s mx %p ...", caller, mutex );
sewardj604ec3c2002-04-18 22:38:41 +00001985 print_pthread_event(tid, msg_buf);
1986 }
1987
1988 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00001989 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00001990 && VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +00001991
1992 /* POSIX doesn't mandate this, but for sanity ... */
1993 if (mutex == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00001994 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00001995 "pthread_mutex_lock/trylock: mutex is NULL");
jsgf855d93d2003-10-13 22:26:55 +00001996 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardje663cb92002-04-12 10:26:32 +00001997 return;
1998 }
1999
sewardj604ec3c2002-04-18 22:38:41 +00002000 /* More paranoia ... */
nethercote1f0173b2004-02-28 15:40:36 +00002001 switch (mutex->__vg_m_kind) {
sewardj2a1dcce2002-04-22 12:45:25 +00002002# ifndef GLIBC_2_1
sewardj604ec3c2002-04-18 22:38:41 +00002003 case PTHREAD_MUTEX_TIMED_NP:
sewardj2a1dcce2002-04-22 12:45:25 +00002004 case PTHREAD_MUTEX_ADAPTIVE_NP:
2005# endif
sewardja1679dd2002-05-10 22:31:40 +00002006# ifdef GLIBC_2_1
sewardj8e651d72002-05-10 21:00:19 +00002007 case PTHREAD_MUTEX_FAST_NP:
sewardja1679dd2002-05-10 22:31:40 +00002008# endif
sewardj604ec3c2002-04-18 22:38:41 +00002009 case PTHREAD_MUTEX_RECURSIVE_NP:
2010 case PTHREAD_MUTEX_ERRORCHECK_NP:
nethercote1f0173b2004-02-28 15:40:36 +00002011 if (mutex->__vg_m_count >= 0) break;
sewardj604ec3c2002-04-18 22:38:41 +00002012 /* else fall thru */
2013 default:
njn25e49d8e72002-09-23 09:36:25 +00002014 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002015 "pthread_mutex_lock/trylock: mutex is invalid");
jsgf855d93d2003-10-13 22:26:55 +00002016 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj604ec3c2002-04-18 22:38:41 +00002017 return;
sewardje663cb92002-04-12 10:26:32 +00002018 }
2019
nethercote1f0173b2004-02-28 15:40:36 +00002020 if (mutex->__vg_m_count > 0) {
2021 if (!VG_(is_valid_tid)((ThreadId)mutex->__vg_m_owner)) {
fitzhardinge47735af2004-01-21 01:27:27 +00002022 VG_(record_pthread_error)( tid,
2023 "pthread_mutex_lock/trylock: mutex has invalid owner");
2024 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
2025 return;
2026 }
sewardjf8f819e2002-04-17 23:21:37 +00002027
2028 /* Someone has it already. */
thughese321d492004-10-17 15:00:20 +00002029 if ((ThreadId)mutex->__vg_m_owner == tid && ms_end == 0xFFFFFFFF) {
sewardjf8f819e2002-04-17 23:21:37 +00002030 /* It's locked -- by me! */
nethercote1f0173b2004-02-28 15:40:36 +00002031 if (mutex->__vg_m_kind == PTHREAD_MUTEX_RECURSIVE_NP) {
sewardjf8f819e2002-04-17 23:21:37 +00002032 /* return 0 (success). */
nethercote1f0173b2004-02-28 15:40:36 +00002033 mutex->__vg_m_count++;
njnd3040452003-05-19 15:04:06 +00002034 SET_PTHREQ_RETVAL(tid, 0);
sewardj853f55d2002-04-26 00:27:53 +00002035 if (0)
2036 VG_(printf)("!!!!!! tid %d, mx %p -> locked %d\n",
nethercote1f0173b2004-02-28 15:40:36 +00002037 tid, mutex, mutex->__vg_m_count);
sewardjf8f819e2002-04-17 23:21:37 +00002038 return;
2039 } else {
sewardj30671ff2002-04-21 00:13:57 +00002040 if (is_trylock)
njnd3040452003-05-19 15:04:06 +00002041 SET_PTHREQ_RETVAL(tid, EBUSY);
sewardj30671ff2002-04-21 00:13:57 +00002042 else
njnd3040452003-05-19 15:04:06 +00002043 SET_PTHREQ_RETVAL(tid, EDEADLK);
sewardjf8f819e2002-04-17 23:21:37 +00002044 return;
2045 }
2046 } else {
sewardj6072c362002-04-19 14:40:57 +00002047 /* Someone else has it; we have to wait. Mark ourselves
2048 thusly. */
nethercote1f0173b2004-02-28 15:40:36 +00002049 /* GUARD: __vg_m_count > 0 && __vg_m_owner is valid */
sewardj30671ff2002-04-21 00:13:57 +00002050 if (is_trylock) {
2051 /* caller is polling; so return immediately. */
njnd3040452003-05-19 15:04:06 +00002052 SET_PTHREQ_RETVAL(tid, EBUSY);
sewardj30671ff2002-04-21 00:13:57 +00002053 } else {
sewardjdca84112002-11-13 22:29:34 +00002054 VG_TRACK ( pre_mutex_lock, tid, mutex );
2055
sewardj018f7622002-05-15 21:13:39 +00002056 VG_(threads)[tid].status = VgTs_WaitMX;
2057 VG_(threads)[tid].associated_mx = mutex;
thughese321d492004-10-17 15:00:20 +00002058 VG_(threads)[tid].awaken_at = ms_end;
2059 if (ms_end != 0xFFFFFFFF)
2060 add_timeout(tid, ms_end);
njnd3040452003-05-19 15:04:06 +00002061 SET_PTHREQ_RETVAL(tid, 0); /* pth_mx_lock success value */
sewardj30671ff2002-04-21 00:13:57 +00002062 if (VG_(clo_trace_pthread_level) >= 1) {
2063 VG_(sprintf)(msg_buf, "%s mx %p: BLOCK",
2064 caller, mutex );
2065 print_pthread_event(tid, msg_buf);
2066 }
2067 }
sewardje663cb92002-04-12 10:26:32 +00002068 return;
2069 }
sewardjf8f819e2002-04-17 23:21:37 +00002070
sewardje663cb92002-04-12 10:26:32 +00002071 } else {
sewardj6072c362002-04-19 14:40:57 +00002072 /* Nobody owns it. Sanity check ... */
nethercote1f0173b2004-02-28 15:40:36 +00002073 vg_assert(mutex->__vg_m_owner == VG_INVALID_THREADID);
sewardjdca84112002-11-13 22:29:34 +00002074
2075 VG_TRACK ( pre_mutex_lock, tid, mutex );
2076
sewardjf8f819e2002-04-17 23:21:37 +00002077 /* We get it! [for the first time]. */
nethercote1f0173b2004-02-28 15:40:36 +00002078 mutex->__vg_m_count = 1;
2079 mutex->__vg_m_owner = (/*_pthread_descr*/void*)tid;
njn25e49d8e72002-09-23 09:36:25 +00002080
sewardje663cb92002-04-12 10:26:32 +00002081 /* return 0 (success). */
njnd3040452003-05-19 15:04:06 +00002082 SET_PTHREQ_RETVAL(tid, 0);
sewardjf8f819e2002-04-17 23:21:37 +00002083
njnd3040452003-05-19 15:04:06 +00002084 VG_TRACK( post_mutex_lock, tid, mutex);
2085 }
sewardje663cb92002-04-12 10:26:32 +00002086}
2087
2088
2089static
2090void do_pthread_mutex_unlock ( ThreadId tid,
nethercote1f0173b2004-02-28 15:40:36 +00002091 vg_pthread_mutex_t* mutex )
sewardje663cb92002-04-12 10:26:32 +00002092{
sewardj3b5d8862002-04-20 13:53:23 +00002093 Char msg_buf[100];
sewardje663cb92002-04-12 10:26:32 +00002094
sewardj45b4b372002-04-16 22:50:32 +00002095 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj3b5d8862002-04-20 13:53:23 +00002096 VG_(sprintf)(msg_buf, "pthread_mutex_unlock mx %p ...", mutex );
sewardj8937c812002-04-12 20:12:20 +00002097 print_pthread_event(tid, msg_buf);
2098 }
2099
sewardj604ec3c2002-04-18 22:38:41 +00002100 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00002101 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002102 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj604ec3c2002-04-18 22:38:41 +00002103
2104 if (mutex == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00002105 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002106 "pthread_mutex_unlock: mutex is NULL");
jsgf855d93d2003-10-13 22:26:55 +00002107 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj604ec3c2002-04-18 22:38:41 +00002108 return;
2109 }
2110
2111 /* More paranoia ... */
nethercote1f0173b2004-02-28 15:40:36 +00002112 switch (mutex->__vg_m_kind) {
sewardj2a1dcce2002-04-22 12:45:25 +00002113# ifndef GLIBC_2_1
sewardj604ec3c2002-04-18 22:38:41 +00002114 case PTHREAD_MUTEX_TIMED_NP:
sewardj2a1dcce2002-04-22 12:45:25 +00002115 case PTHREAD_MUTEX_ADAPTIVE_NP:
2116# endif
sewardja1679dd2002-05-10 22:31:40 +00002117# ifdef GLIBC_2_1
sewardj8e651d72002-05-10 21:00:19 +00002118 case PTHREAD_MUTEX_FAST_NP:
sewardja1679dd2002-05-10 22:31:40 +00002119# endif
sewardj604ec3c2002-04-18 22:38:41 +00002120 case PTHREAD_MUTEX_RECURSIVE_NP:
2121 case PTHREAD_MUTEX_ERRORCHECK_NP:
nethercote1f0173b2004-02-28 15:40:36 +00002122 if (mutex->__vg_m_count >= 0) break;
sewardj604ec3c2002-04-18 22:38:41 +00002123 /* else fall thru */
2124 default:
njn25e49d8e72002-09-23 09:36:25 +00002125 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002126 "pthread_mutex_unlock: mutex is invalid");
jsgf855d93d2003-10-13 22:26:55 +00002127 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj604ec3c2002-04-18 22:38:41 +00002128 return;
2129 }
sewardje663cb92002-04-12 10:26:32 +00002130
2131 /* Barf if we don't currently hold the mutex. */
nethercote1f0173b2004-02-28 15:40:36 +00002132 if (mutex->__vg_m_count == 0) {
sewardj4dced352002-06-04 22:54:20 +00002133 /* nobody holds it */
njn25e49d8e72002-09-23 09:36:25 +00002134 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002135 "pthread_mutex_unlock: mutex is not locked");
njnd3040452003-05-19 15:04:06 +00002136 SET_PTHREQ_RETVAL(tid, EPERM);
sewardj4dced352002-06-04 22:54:20 +00002137 return;
2138 }
2139
nethercote1f0173b2004-02-28 15:40:36 +00002140 if ((ThreadId)mutex->__vg_m_owner != tid) {
sewardj4dced352002-06-04 22:54:20 +00002141 /* we don't hold it */
njn25e49d8e72002-09-23 09:36:25 +00002142 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002143 "pthread_mutex_unlock: mutex is locked by a different thread");
njnd3040452003-05-19 15:04:06 +00002144 SET_PTHREQ_RETVAL(tid, EPERM);
sewardje663cb92002-04-12 10:26:32 +00002145 return;
2146 }
2147
sewardjf8f819e2002-04-17 23:21:37 +00002148 /* If it's a multiply-locked recursive mutex, just decrement the
2149 lock count and return. */
nethercote1f0173b2004-02-28 15:40:36 +00002150 if (mutex->__vg_m_count > 1) {
2151 vg_assert(mutex->__vg_m_kind == PTHREAD_MUTEX_RECURSIVE_NP);
2152 mutex->__vg_m_count --;
njnd3040452003-05-19 15:04:06 +00002153 SET_PTHREQ_RETVAL(tid, 0); /* success */
sewardjf8f819e2002-04-17 23:21:37 +00002154 return;
2155 }
2156
sewardj604ec3c2002-04-18 22:38:41 +00002157 /* Now we're sure it is locked exactly once, and by the thread who
sewardjf8f819e2002-04-17 23:21:37 +00002158 is now doing an unlock on it. */
nethercote1f0173b2004-02-28 15:40:36 +00002159 vg_assert(mutex->__vg_m_count == 1);
2160 vg_assert((ThreadId)mutex->__vg_m_owner == tid);
sewardjf8f819e2002-04-17 23:21:37 +00002161
sewardj3b5d8862002-04-20 13:53:23 +00002162 /* Release at max one thread waiting on this mutex. */
2163 release_one_thread_waiting_on_mutex ( mutex, "pthread_mutex_lock" );
sewardje663cb92002-04-12 10:26:32 +00002164
sewardj3b5d8862002-04-20 13:53:23 +00002165 /* Our (tid's) pth_unlock() returns with 0 (success). */
njnd3040452003-05-19 15:04:06 +00002166 SET_PTHREQ_RETVAL(tid, 0); /* Success. */
sewardje663cb92002-04-12 10:26:32 +00002167}
2168
2169
sewardj6072c362002-04-19 14:40:57 +00002170/* -----------------------------------------------------------
2171 CONDITION VARIABLES
2172 -------------------------------------------------------- */
sewardje663cb92002-04-12 10:26:32 +00002173
rjwalsh7109a8c2004-09-02 00:31:02 +00002174/* The relevant type (vg_pthread_cond_t) is in core.h.
sewardj77e466c2002-04-14 02:29:29 +00002175
nethercote1f0173b2004-02-28 15:40:36 +00002176 We don't use any fields of vg_pthread_cond_t for anything at all.
2177 Only the identity of the CVs is important. (Actually, we initialise
2178 __vg_c_waiting in pthread_cond_init() to VG_INVALID_THREADID.)
sewardj6072c362002-04-19 14:40:57 +00002179
2180 Linux pthreads supports no attributes on condition variables, so we
sewardj3b5d8862002-04-20 13:53:23 +00002181 don't need to think too hard there. */
sewardj6072c362002-04-19 14:40:57 +00002182
sewardj77e466c2002-04-14 02:29:29 +00002183
sewardj5f07b662002-04-23 16:52:51 +00002184static
2185void do_pthread_cond_timedwait_TIMEOUT ( ThreadId tid )
2186{
2187 Char msg_buf[100];
nethercote1f0173b2004-02-28 15:40:36 +00002188 vg_pthread_mutex_t* mx;
2189 vg_pthread_cond_t* cv;
sewardj5f07b662002-04-23 16:52:51 +00002190
sewardjb48e5002002-05-13 00:16:03 +00002191 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002192 && VG_(threads)[tid].status == VgTs_WaitCV
2193 && VG_(threads)[tid].awaken_at != 0xFFFFFFFF);
2194 mx = VG_(threads)[tid].associated_mx;
sewardj5f07b662002-04-23 16:52:51 +00002195 vg_assert(mx != NULL);
sewardj018f7622002-05-15 21:13:39 +00002196 cv = VG_(threads)[tid].associated_cv;
sewardj5f07b662002-04-23 16:52:51 +00002197 vg_assert(cv != NULL);
2198
nethercote1f0173b2004-02-28 15:40:36 +00002199 if (mx->__vg_m_owner == VG_INVALID_THREADID) {
sewardj5f07b662002-04-23 16:52:51 +00002200 /* Currently unheld; hand it out to thread tid. */
nethercote1f0173b2004-02-28 15:40:36 +00002201 vg_assert(mx->__vg_m_count == 0);
sewardj018f7622002-05-15 21:13:39 +00002202 VG_(threads)[tid].status = VgTs_Runnable;
njnd3040452003-05-19 15:04:06 +00002203 SET_PTHREQ_RETVAL(tid, ETIMEDOUT); /* pthread_cond_wait return value */
sewardj018f7622002-05-15 21:13:39 +00002204 VG_(threads)[tid].associated_cv = NULL;
2205 VG_(threads)[tid].associated_mx = NULL;
nethercote1f0173b2004-02-28 15:40:36 +00002206 mx->__vg_m_owner = (/*_pthread_descr*/void*)tid;
2207 mx->__vg_m_count = 1;
sewardj5f07b662002-04-23 16:52:51 +00002208
sewardj0af43bc2002-10-22 04:30:35 +00002209 VG_TRACK( post_mutex_lock, tid, mx );
2210
sewardj5f07b662002-04-23 16:52:51 +00002211 if (VG_(clo_trace_pthread_level) >= 1) {
sewardjc3bd5f52002-05-01 03:24:23 +00002212 VG_(sprintf)(msg_buf,
nethercote1f0173b2004-02-28 15:40:36 +00002213 "pthread_cond_timedwait cv %p: TIMEOUT with mx %p",
sewardjc3bd5f52002-05-01 03:24:23 +00002214 cv, mx );
sewardj5f07b662002-04-23 16:52:51 +00002215 print_pthread_event(tid, msg_buf);
2216 }
2217 } else {
2218 /* Currently held. Make thread tid be blocked on it. */
nethercote1f0173b2004-02-28 15:40:36 +00002219 vg_assert(mx->__vg_m_count > 0);
sewardjdca84112002-11-13 22:29:34 +00002220 VG_TRACK( pre_mutex_lock, tid, mx );
2221
sewardj018f7622002-05-15 21:13:39 +00002222 VG_(threads)[tid].status = VgTs_WaitMX;
njnd3040452003-05-19 15:04:06 +00002223 SET_PTHREQ_RETVAL(tid, ETIMEDOUT); /* pthread_cond_wait return value */
sewardj018f7622002-05-15 21:13:39 +00002224 VG_(threads)[tid].associated_cv = NULL;
2225 VG_(threads)[tid].associated_mx = mx;
sewardj5f07b662002-04-23 16:52:51 +00002226 if (VG_(clo_trace_pthread_level) >= 1) {
2227 VG_(sprintf)(msg_buf,
nethercote1f0173b2004-02-28 15:40:36 +00002228 "pthread_cond_timedwait cv %p: TIMEOUT -> BLOCK for mx %p",
sewardj5f07b662002-04-23 16:52:51 +00002229 cv, mx );
2230 print_pthread_event(tid, msg_buf);
2231 }
sewardj5f07b662002-04-23 16:52:51 +00002232 }
2233}
2234
2235
sewardj3b5d8862002-04-20 13:53:23 +00002236static
nethercote1f0173b2004-02-28 15:40:36 +00002237void release_N_threads_waiting_on_cond ( vg_pthread_cond_t* cond,
sewardj3b5d8862002-04-20 13:53:23 +00002238 Int n_to_release,
2239 Char* caller )
2240{
2241 Int i;
2242 Char msg_buf[100];
nethercote1f0173b2004-02-28 15:40:36 +00002243 vg_pthread_mutex_t* mx;
sewardj3b5d8862002-04-20 13:53:23 +00002244
2245 while (True) {
2246 if (n_to_release == 0)
2247 return;
2248
2249 /* Find a thread waiting on this CV. */
2250 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +00002251 if (VG_(threads)[i].status == VgTs_Empty)
sewardj3b5d8862002-04-20 13:53:23 +00002252 continue;
sewardj018f7622002-05-15 21:13:39 +00002253 if (VG_(threads)[i].status == VgTs_WaitCV
2254 && VG_(threads)[i].associated_cv == cond)
sewardj3b5d8862002-04-20 13:53:23 +00002255 break;
2256 }
2257 vg_assert(i <= VG_N_THREADS);
2258
2259 if (i == VG_N_THREADS) {
2260 /* Nobody else is waiting on it. */
2261 return;
2262 }
2263
sewardj018f7622002-05-15 21:13:39 +00002264 mx = VG_(threads)[i].associated_mx;
sewardj3b5d8862002-04-20 13:53:23 +00002265 vg_assert(mx != NULL);
2266
sewardjdca84112002-11-13 22:29:34 +00002267 VG_TRACK( pre_mutex_lock, i, mx );
2268
nethercote1f0173b2004-02-28 15:40:36 +00002269 if (mx->__vg_m_owner == VG_INVALID_THREADID) {
sewardj3b5d8862002-04-20 13:53:23 +00002270 /* Currently unheld; hand it out to thread i. */
nethercote1f0173b2004-02-28 15:40:36 +00002271 vg_assert(mx->__vg_m_count == 0);
sewardj018f7622002-05-15 21:13:39 +00002272 VG_(threads)[i].status = VgTs_Runnable;
2273 VG_(threads)[i].associated_cv = NULL;
2274 VG_(threads)[i].associated_mx = NULL;
nethercote1f0173b2004-02-28 15:40:36 +00002275 mx->__vg_m_owner = (/*_pthread_descr*/void*)i;
2276 mx->__vg_m_count = 1;
sewardj5f07b662002-04-23 16:52:51 +00002277 /* .m_edx already holds pth_cond_wait success value (0) */
sewardj3b5d8862002-04-20 13:53:23 +00002278
sewardj0af43bc2002-10-22 04:30:35 +00002279 VG_TRACK( post_mutex_lock, i, mx );
2280
sewardj3b5d8862002-04-20 13:53:23 +00002281 if (VG_(clo_trace_pthread_level) >= 1) {
2282 VG_(sprintf)(msg_buf, "%s cv %p: RESUME with mx %p",
2283 caller, cond, mx );
2284 print_pthread_event(i, msg_buf);
2285 }
2286
2287 } else {
2288 /* Currently held. Make thread i be blocked on it. */
nethercote1f0173b2004-02-28 15:40:36 +00002289 vg_assert(mx->__vg_m_count > 0);
sewardj018f7622002-05-15 21:13:39 +00002290 VG_(threads)[i].status = VgTs_WaitMX;
2291 VG_(threads)[i].associated_cv = NULL;
2292 VG_(threads)[i].associated_mx = mx;
njnd3040452003-05-19 15:04:06 +00002293 SET_PTHREQ_RETVAL(i, 0); /* pth_cond_wait success value */
sewardj3b5d8862002-04-20 13:53:23 +00002294
2295 if (VG_(clo_trace_pthread_level) >= 1) {
2296 VG_(sprintf)(msg_buf, "%s cv %p: BLOCK for mx %p",
2297 caller, cond, mx );
2298 print_pthread_event(i, msg_buf);
2299 }
2300
2301 }
jsgf855d93d2003-10-13 22:26:55 +00002302
sewardj3b5d8862002-04-20 13:53:23 +00002303 n_to_release--;
2304 }
2305}
2306
2307
2308static
2309void do_pthread_cond_wait ( ThreadId tid,
nethercote1f0173b2004-02-28 15:40:36 +00002310 vg_pthread_cond_t *cond,
2311 vg_pthread_mutex_t *mutex,
sewardj5f07b662002-04-23 16:52:51 +00002312 UInt ms_end )
sewardj3b5d8862002-04-20 13:53:23 +00002313{
2314 Char msg_buf[100];
2315
sewardj5f07b662002-04-23 16:52:51 +00002316 /* If ms_end == 0xFFFFFFFF, wait forever (no timeout). Otherwise,
2317 ms_end is the ending millisecond. */
2318
sewardj3b5d8862002-04-20 13:53:23 +00002319 /* pre: mutex should be a valid mutex and owned by tid. */
2320 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj5f07b662002-04-23 16:52:51 +00002321 VG_(sprintf)(msg_buf, "pthread_cond_wait cv %p, mx %p, end %d ...",
2322 cond, mutex, ms_end );
sewardj3b5d8862002-04-20 13:53:23 +00002323 print_pthread_event(tid, msg_buf);
2324 }
2325
2326 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00002327 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002328 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj3b5d8862002-04-20 13:53:23 +00002329
nethercoted3693d02004-04-26 08:05:24 +00002330 if (mutex == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00002331 VG_(record_pthread_error)( tid,
nethercoted3693d02004-04-26 08:05:24 +00002332 "pthread_cond_wait/timedwait: mutex is NULL");
2333 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
2334 return;
2335 }
2336
2337 if (cond == NULL) {
2338 VG_(record_pthread_error)( tid,
2339 "pthread_cond_wait/timedwait: cond is NULL");
jsgf855d93d2003-10-13 22:26:55 +00002340 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj3b5d8862002-04-20 13:53:23 +00002341 return;
2342 }
2343
2344 /* More paranoia ... */
nethercote1f0173b2004-02-28 15:40:36 +00002345 switch (mutex->__vg_m_kind) {
sewardj2a1dcce2002-04-22 12:45:25 +00002346# ifndef GLIBC_2_1
sewardj3b5d8862002-04-20 13:53:23 +00002347 case PTHREAD_MUTEX_TIMED_NP:
sewardj2a1dcce2002-04-22 12:45:25 +00002348 case PTHREAD_MUTEX_ADAPTIVE_NP:
2349# endif
sewardja1679dd2002-05-10 22:31:40 +00002350# ifdef GLIBC_2_1
sewardj8e651d72002-05-10 21:00:19 +00002351 case PTHREAD_MUTEX_FAST_NP:
sewardja1679dd2002-05-10 22:31:40 +00002352# endif
sewardj3b5d8862002-04-20 13:53:23 +00002353 case PTHREAD_MUTEX_RECURSIVE_NP:
2354 case PTHREAD_MUTEX_ERRORCHECK_NP:
nethercote1f0173b2004-02-28 15:40:36 +00002355 if (mutex->__vg_m_count >= 0) break;
sewardj3b5d8862002-04-20 13:53:23 +00002356 /* else fall thru */
2357 default:
njn25e49d8e72002-09-23 09:36:25 +00002358 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002359 "pthread_cond_wait/timedwait: mutex is invalid");
jsgf855d93d2003-10-13 22:26:55 +00002360 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj3b5d8862002-04-20 13:53:23 +00002361 return;
2362 }
2363
2364 /* Barf if we don't currently hold the mutex. */
nethercoted3693d02004-04-26 08:05:24 +00002365 if (mutex->__vg_m_count == 0 /* nobody holds it */) {
njn25e49d8e72002-09-23 09:36:25 +00002366 VG_(record_pthread_error)( tid,
nethercoted3693d02004-04-26 08:05:24 +00002367 "pthread_cond_wait/timedwait: mutex is unlocked");
2368 SET_PTHREQ_RETVAL(tid, VKI_EPERM);
2369 return;
2370 }
2371
2372 if ((ThreadId)mutex->__vg_m_owner != tid /* we don't hold it */) {
2373 VG_(record_pthread_error)( tid,
2374 "pthread_cond_wait/timedwait: mutex is locked by another thread");
2375 SET_PTHREQ_RETVAL(tid, VKI_EPERM);
sewardj3b5d8862002-04-20 13:53:23 +00002376 return;
2377 }
2378
thughes513197c2004-06-13 12:07:53 +00002379 if(VG_(threads)[tid].cancel_pend != NULL &&
2380 VG_(threads)[tid].cancel_st) {
2381 make_thread_jump_to_cancelhdlr ( tid );
2382 } else {
2383 /* Queue ourselves on the condition. */
2384 VG_(threads)[tid].status = VgTs_WaitCV;
2385 VG_(threads)[tid].associated_cv = cond;
2386 VG_(threads)[tid].associated_mx = mutex;
2387 VG_(threads)[tid].awaken_at = ms_end;
2388 if (ms_end != 0xFFFFFFFF)
nethercotef971ab72004-08-02 16:27:40 +00002389 add_timeout(tid, ms_end);
sewardj3b5d8862002-04-20 13:53:23 +00002390
thughes513197c2004-06-13 12:07:53 +00002391 if (VG_(clo_trace_pthread_level) >= 1) {
2392 VG_(sprintf)(msg_buf,
2393 "pthread_cond_wait cv %p, mx %p: BLOCK",
2394 cond, mutex );
2395 print_pthread_event(tid, msg_buf);
2396 }
2397
2398 /* Release the mutex. */
2399 release_one_thread_waiting_on_mutex ( mutex, "pthread_cond_wait " );
sewardj3b5d8862002-04-20 13:53:23 +00002400 }
sewardj3b5d8862002-04-20 13:53:23 +00002401}
2402
2403
2404static
2405void do_pthread_cond_signal_or_broadcast ( ThreadId tid,
2406 Bool broadcast,
nethercote1f0173b2004-02-28 15:40:36 +00002407 vg_pthread_cond_t *cond )
sewardj3b5d8862002-04-20 13:53:23 +00002408{
2409 Char msg_buf[100];
2410 Char* caller
2411 = broadcast ? "pthread_cond_broadcast"
2412 : "pthread_cond_signal ";
2413
2414 if (VG_(clo_trace_pthread_level) >= 2) {
2415 VG_(sprintf)(msg_buf, "%s cv %p ...",
2416 caller, cond );
2417 print_pthread_event(tid, msg_buf);
2418 }
2419
2420 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00002421 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002422 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj3b5d8862002-04-20 13:53:23 +00002423
2424 if (cond == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00002425 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002426 "pthread_cond_signal/broadcast: cond is NULL");
jsgf855d93d2003-10-13 22:26:55 +00002427 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj3b5d8862002-04-20 13:53:23 +00002428 return;
2429 }
2430
2431 release_N_threads_waiting_on_cond (
2432 cond,
2433 broadcast ? VG_N_THREADS : 1,
2434 caller
2435 );
2436
njnd3040452003-05-19 15:04:06 +00002437 SET_PTHREQ_RETVAL(tid, 0); /* success */
sewardj3b5d8862002-04-20 13:53:23 +00002438}
2439
sewardj77e466c2002-04-14 02:29:29 +00002440
sewardj5f07b662002-04-23 16:52:51 +00002441/* -----------------------------------------------------------
2442 THREAD SPECIFIC DATA
2443 -------------------------------------------------------- */
2444
2445static __inline__
2446Bool is_valid_key ( ThreadKey k )
2447{
2448 /* k unsigned; hence no < 0 check */
2449 if (k >= VG_N_THREAD_KEYS) return False;
2450 if (!vg_thread_keys[k].inuse) return False;
2451 return True;
2452}
2453
sewardj00a66b12002-10-12 16:42:35 +00002454
2455/* Return in %EDX a value of 1 if the key is valid, else 0. */
2456static
2457void do_pthread_key_validate ( ThreadId tid,
2458 pthread_key_t key )
2459{
2460 Char msg_buf[100];
2461
2462 if (VG_(clo_trace_pthread_level) >= 1) {
2463 VG_(sprintf)(msg_buf, "pthread_key_validate key %p",
2464 key );
2465 print_pthread_event(tid, msg_buf);
2466 }
2467
2468 vg_assert(sizeof(pthread_key_t) == sizeof(ThreadKey));
2469 vg_assert(VG_(is_valid_tid)(tid)
2470 && VG_(threads)[tid].status == VgTs_Runnable);
2471
2472 if (is_valid_key((ThreadKey)key)) {
njnd3040452003-05-19 15:04:06 +00002473 SET_PTHREQ_RETVAL(tid, 1);
sewardj00a66b12002-10-12 16:42:35 +00002474 } else {
njnd3040452003-05-19 15:04:06 +00002475 SET_PTHREQ_RETVAL(tid, 0);
sewardj00a66b12002-10-12 16:42:35 +00002476 }
2477}
2478
2479
sewardj5f07b662002-04-23 16:52:51 +00002480static
2481void do_pthread_key_create ( ThreadId tid,
2482 pthread_key_t* key,
2483 void (*destructor)(void*) )
2484{
2485 Int i;
2486 Char msg_buf[100];
2487
2488 if (VG_(clo_trace_pthread_level) >= 1) {
2489 VG_(sprintf)(msg_buf, "pthread_key_create *key %p, destr %p",
2490 key, destructor );
2491 print_pthread_event(tid, msg_buf);
2492 }
2493
2494 vg_assert(sizeof(pthread_key_t) == sizeof(ThreadKey));
sewardjb48e5002002-05-13 00:16:03 +00002495 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002496 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj5f07b662002-04-23 16:52:51 +00002497
2498 for (i = 0; i < VG_N_THREAD_KEYS; i++)
2499 if (!vg_thread_keys[i].inuse)
2500 break;
2501
2502 if (i == VG_N_THREAD_KEYS) {
jsgf855d93d2003-10-13 22:26:55 +00002503 VG_(message)(Vg_UserMsg, "pthread_key_create() asked for too many keys (more than %d): increase VG_N_THREAD_KEYS and recompile Valgrind.",
2504 VG_N_THREAD_KEYS);
2505 SET_PTHREQ_RETVAL(tid, EAGAIN);
2506 return;
sewardj5f07b662002-04-23 16:52:51 +00002507 }
2508
sewardj870497a2002-05-29 01:06:47 +00002509 vg_thread_keys[i].inuse = True;
2510 vg_thread_keys[i].destructor = destructor;
sewardjc3bd5f52002-05-01 03:24:23 +00002511
sewardj5a3798b2002-06-04 23:24:22 +00002512 /* check key for addressibility */
njn72718642003-07-24 08:45:32 +00002513 VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "pthread_key_create: key",
njn25e49d8e72002-09-23 09:36:25 +00002514 (Addr)key, sizeof(pthread_key_t));
sewardj5f07b662002-04-23 16:52:51 +00002515 *key = i;
njn25e49d8e72002-09-23 09:36:25 +00002516 VG_TRACK( post_mem_write, (Addr)key, sizeof(pthread_key_t) );
sewardjc3bd5f52002-05-01 03:24:23 +00002517
njnd3040452003-05-19 15:04:06 +00002518 SET_PTHREQ_RETVAL(tid, 0);
sewardj5f07b662002-04-23 16:52:51 +00002519}
2520
2521
2522static
2523void do_pthread_key_delete ( ThreadId tid, pthread_key_t key )
2524{
2525 Char msg_buf[100];
2526 if (VG_(clo_trace_pthread_level) >= 1) {
2527 VG_(sprintf)(msg_buf, "pthread_key_delete key %d",
2528 key );
2529 print_pthread_event(tid, msg_buf);
2530 }
2531
sewardjb48e5002002-05-13 00:16:03 +00002532 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002533 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj5f07b662002-04-23 16:52:51 +00002534
2535 if (!is_valid_key(key)) {
njn25e49d8e72002-09-23 09:36:25 +00002536 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002537 "pthread_key_delete: key is invalid");
jsgf855d93d2003-10-13 22:26:55 +00002538 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj5f07b662002-04-23 16:52:51 +00002539 return;
2540 }
2541
2542 vg_thread_keys[key].inuse = False;
sewardj648b3152002-12-09 19:07:59 +00002543 vg_thread_keys[key].destructor = NULL;
njnd3040452003-05-19 15:04:06 +00002544 SET_PTHREQ_RETVAL(tid, 0);
sewardj5f07b662002-04-23 16:52:51 +00002545}
2546
2547
sewardj00a66b12002-10-12 16:42:35 +00002548/* Get the .specific_ptr for a thread. Return 1 if the thread-slot
2549 isn't in use, so that client-space can scan all thread slots. 1
2550 cannot be confused with NULL or a legitimately-aligned specific_ptr
2551 value. */
sewardj5f07b662002-04-23 16:52:51 +00002552static
sewardj00a66b12002-10-12 16:42:35 +00002553void do_pthread_getspecific_ptr ( ThreadId tid )
sewardj5f07b662002-04-23 16:52:51 +00002554{
sewardj00a66b12002-10-12 16:42:35 +00002555 void** specifics_ptr;
2556 Char msg_buf[100];
2557
jsgf855d93d2003-10-13 22:26:55 +00002558 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj00a66b12002-10-12 16:42:35 +00002559 VG_(sprintf)(msg_buf, "pthread_getspecific_ptr" );
sewardj5f07b662002-04-23 16:52:51 +00002560 print_pthread_event(tid, msg_buf);
2561 }
2562
nethercote36881a22004-08-04 14:03:16 +00002563 vg_assert(is_valid_or_empty_tid(tid));
sewardj5f07b662002-04-23 16:52:51 +00002564
sewardj00a66b12002-10-12 16:42:35 +00002565 if (VG_(threads)[tid].status == VgTs_Empty) {
njnd3040452003-05-19 15:04:06 +00002566 SET_PTHREQ_RETVAL(tid, 1);
sewardj5f07b662002-04-23 16:52:51 +00002567 return;
2568 }
2569
sewardj00a66b12002-10-12 16:42:35 +00002570 specifics_ptr = VG_(threads)[tid].specifics_ptr;
2571 vg_assert(specifics_ptr == NULL
2572 || IS_ALIGNED4_ADDR(specifics_ptr));
2573
njnd3040452003-05-19 15:04:06 +00002574 SET_PTHREQ_RETVAL(tid, (UInt)specifics_ptr);
sewardj5f07b662002-04-23 16:52:51 +00002575}
2576
2577
2578static
sewardj00a66b12002-10-12 16:42:35 +00002579void do_pthread_setspecific_ptr ( ThreadId tid, void** ptr )
sewardj5f07b662002-04-23 16:52:51 +00002580{
2581 Char msg_buf[100];
2582 if (VG_(clo_trace_pthread_level) >= 1) {
sewardj00a66b12002-10-12 16:42:35 +00002583 VG_(sprintf)(msg_buf, "pthread_setspecific_ptr ptr %p",
2584 ptr );
sewardj5f07b662002-04-23 16:52:51 +00002585 print_pthread_event(tid, msg_buf);
2586 }
2587
sewardjb48e5002002-05-13 00:16:03 +00002588 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002589 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj5f07b662002-04-23 16:52:51 +00002590
sewardj00a66b12002-10-12 16:42:35 +00002591 VG_(threads)[tid].specifics_ptr = ptr;
njnd3040452003-05-19 15:04:06 +00002592 SET_PTHREQ_RETVAL(tid, 0);
sewardj5f07b662002-04-23 16:52:51 +00002593}
2594
2595
sewardj870497a2002-05-29 01:06:47 +00002596/* Helper for calling destructors at thread exit. If key is valid,
2597 copy the thread's specific value into cu->arg and put the *key*'s
2598 destructor fn address in cu->fn. Then return 0 to the caller.
2599 Otherwise return non-zero to the caller. */
2600static
2601void do__get_key_destr_and_spec ( ThreadId tid,
2602 pthread_key_t key,
2603 CleanupEntry* cu )
2604{
2605 Char msg_buf[100];
jsgf855d93d2003-10-13 22:26:55 +00002606 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj870497a2002-05-29 01:06:47 +00002607 VG_(sprintf)(msg_buf,
2608 "get_key_destr_and_arg (key = %d)", key );
2609 print_pthread_event(tid, msg_buf);
2610 }
2611 vg_assert(VG_(is_valid_tid)(tid));
2612 vg_assert(key >= 0 && key < VG_N_THREAD_KEYS);
njn25e49d8e72002-09-23 09:36:25 +00002613
sewardj870497a2002-05-29 01:06:47 +00002614 if (!vg_thread_keys[key].inuse) {
njnd3040452003-05-19 15:04:06 +00002615 SET_PTHREQ_RETVAL(tid, -1);
sewardj870497a2002-05-29 01:06:47 +00002616 return;
2617 }
njn72718642003-07-24 08:45:32 +00002618 VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "get_key_destr_and_spec: cu",
2619 (Addr)cu, sizeof(CleanupEntry) );
sewardj00a66b12002-10-12 16:42:35 +00002620
thughes11975ff2004-06-12 12:58:22 +00002621 cu->type = VgCt_Function;
2622 cu->data.function.fn = vg_thread_keys[key].destructor;
sewardj00a66b12002-10-12 16:42:35 +00002623 if (VG_(threads)[tid].specifics_ptr == NULL) {
thughes11975ff2004-06-12 12:58:22 +00002624 cu->data.function.arg = NULL;
sewardj00a66b12002-10-12 16:42:35 +00002625 } else {
njn72718642003-07-24 08:45:32 +00002626 VG_TRACK( pre_mem_read, Vg_CorePThread, tid,
sewardj00a66b12002-10-12 16:42:35 +00002627 "get_key_destr_and_spec: key",
2628 (Addr)(&VG_(threads)[tid].specifics_ptr[key]),
2629 sizeof(void*) );
thughes11975ff2004-06-12 12:58:22 +00002630 cu->data.function.arg = VG_(threads)[tid].specifics_ptr[key];
sewardj00a66b12002-10-12 16:42:35 +00002631 }
2632
njn25e49d8e72002-09-23 09:36:25 +00002633 VG_TRACK( post_mem_write, (Addr)cu, sizeof(CleanupEntry) );
njnd3040452003-05-19 15:04:06 +00002634 SET_PTHREQ_RETVAL(tid, 0);
sewardj870497a2002-05-29 01:06:47 +00002635}
2636
2637
sewardjb48e5002002-05-13 00:16:03 +00002638/* ---------------------------------------------------
2639 SIGNALS
2640 ------------------------------------------------ */
2641
2642/* See comment in vg_libthread.c:pthread_sigmask() regarding
sewardj018f7622002-05-15 21:13:39 +00002643 deliberate confusion of types sigset_t and vki_sigset_t. Return 0
2644 for OK and 1 for some kind of addressing error, which the
2645 vg_libpthread.c routine turns into return values 0 and EFAULT
2646 respectively. */
sewardjb48e5002002-05-13 00:16:03 +00002647static
2648void do_pthread_sigmask ( ThreadId tid,
sewardj018f7622002-05-15 21:13:39 +00002649 Int vki_how,
sewardjb48e5002002-05-13 00:16:03 +00002650 vki_ksigset_t* newmask,
2651 vki_ksigset_t* oldmask )
2652{
2653 Char msg_buf[100];
2654 if (VG_(clo_trace_pthread_level) >= 1) {
2655 VG_(sprintf)(msg_buf,
sewardj018f7622002-05-15 21:13:39 +00002656 "pthread_sigmask vki_how %d, newmask %p, oldmask %p",
2657 vki_how, newmask, oldmask );
sewardjb48e5002002-05-13 00:16:03 +00002658 print_pthread_event(tid, msg_buf);
2659 }
2660
2661 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002662 && VG_(threads)[tid].status == VgTs_Runnable);
sewardjb48e5002002-05-13 00:16:03 +00002663
njn25e49d8e72002-09-23 09:36:25 +00002664 if (newmask)
njn72718642003-07-24 08:45:32 +00002665 VG_TRACK( pre_mem_read, Vg_CorePThread, tid, "pthread_sigmask: newmask",
njn25e49d8e72002-09-23 09:36:25 +00002666 (Addr)newmask, sizeof(vki_ksigset_t));
2667 if (oldmask)
njn72718642003-07-24 08:45:32 +00002668 VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "pthread_sigmask: oldmask",
njn25e49d8e72002-09-23 09:36:25 +00002669 (Addr)oldmask, sizeof(vki_ksigset_t));
sewardjb48e5002002-05-13 00:16:03 +00002670
sewardj018f7622002-05-15 21:13:39 +00002671 VG_(do_pthread_sigmask_SCSS_upd) ( tid, vki_how, newmask, oldmask );
sewardjb48e5002002-05-13 00:16:03 +00002672
njn25e49d8e72002-09-23 09:36:25 +00002673 if (oldmask)
2674 VG_TRACK( post_mem_write, (Addr)oldmask, sizeof(vki_ksigset_t) );
sewardj3a951cf2002-05-15 22:25:47 +00002675
sewardj018f7622002-05-15 21:13:39 +00002676 /* Success. */
njnd3040452003-05-19 15:04:06 +00002677 SET_PTHREQ_RETVAL(tid, 0);
sewardjb48e5002002-05-13 00:16:03 +00002678}
2679
2680
2681static
sewardj018f7622002-05-15 21:13:39 +00002682void do_pthread_kill ( ThreadId tid, /* me */
2683 ThreadId thread, /* thread to signal */
2684 Int sig )
2685{
nethercote97ccd5e2004-08-02 12:10:01 +00002686 ThreadState* tst;
sewardj018f7622002-05-15 21:13:39 +00002687 Char msg_buf[100];
2688
2689 if (VG_(clo_trace_signals) || VG_(clo_trace_pthread_level) >= 1) {
2690 VG_(sprintf)(msg_buf,
2691 "pthread_kill thread %d, signo %d",
2692 thread, sig );
2693 print_pthread_event(tid, msg_buf);
2694 }
2695
2696 vg_assert(VG_(is_valid_tid)(tid)
2697 && VG_(threads)[tid].status == VgTs_Runnable);
2698
sewardj4dced352002-06-04 22:54:20 +00002699 if (!VG_(is_valid_tid)(thread)) {
njn25e49d8e72002-09-23 09:36:25 +00002700 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002701 "pthread_kill: invalid target thread");
jsgf855d93d2003-10-13 22:26:55 +00002702 SET_PTHREQ_RETVAL(tid, VKI_ESRCH);
2703 return;
2704 }
2705
2706 if (sig == 0) {
2707 /* OK, signal 0 is just for testing */
2708 SET_PTHREQ_RETVAL(tid, 0);
sewardj018f7622002-05-15 21:13:39 +00002709 return;
2710 }
2711
2712 if (sig < 1 || sig > VKI_KNSIG) {
jsgf855d93d2003-10-13 22:26:55 +00002713 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj018f7622002-05-15 21:13:39 +00002714 return;
2715 }
2716
nethercote97ccd5e2004-08-02 12:10:01 +00002717 tst = VG_(get_ThreadState)(thread);
2718 vg_assert(NULL != tst->proxy);
2719 VG_(proxy_sendsig)(thread, sig);
njnd3040452003-05-19 15:04:06 +00002720 SET_PTHREQ_RETVAL(tid, 0);
sewardjb48e5002002-05-13 00:16:03 +00002721}
2722
2723
sewardj2cb00342002-06-28 01:46:26 +00002724/* -----------------------------------------------------------
2725 FORK HANDLERS.
2726 -------------------------------------------------------- */
2727
2728static
2729void do__set_fhstack_used ( ThreadId tid, Int n )
2730{
2731 Char msg_buf[100];
2732 if (VG_(clo_trace_sched)) {
2733 VG_(sprintf)(msg_buf, "set_fhstack_used to %d", n );
2734 print_pthread_event(tid, msg_buf);
2735 }
2736
2737 vg_assert(VG_(is_valid_tid)(tid)
2738 && VG_(threads)[tid].status == VgTs_Runnable);
2739
2740 if (n >= 0 && n < VG_N_FORKHANDLERSTACK) {
2741 vg_fhstack_used = n;
njnd3040452003-05-19 15:04:06 +00002742 SET_PTHREQ_RETVAL(tid, 0);
sewardj2cb00342002-06-28 01:46:26 +00002743 } else {
njnd3040452003-05-19 15:04:06 +00002744 SET_PTHREQ_RETVAL(tid, -1);
sewardj2cb00342002-06-28 01:46:26 +00002745 }
2746}
2747
2748
2749static
2750void do__get_fhstack_used ( ThreadId tid )
2751{
2752 Int n;
2753 Char msg_buf[100];
2754 if (VG_(clo_trace_sched)) {
2755 VG_(sprintf)(msg_buf, "get_fhstack_used" );
2756 print_pthread_event(tid, msg_buf);
2757 }
2758
2759 vg_assert(VG_(is_valid_tid)(tid)
2760 && VG_(threads)[tid].status == VgTs_Runnable);
2761
2762 n = vg_fhstack_used;
2763 vg_assert(n >= 0 && n < VG_N_FORKHANDLERSTACK);
njnd3040452003-05-19 15:04:06 +00002764 SET_PTHREQ_RETVAL(tid, n);
sewardj2cb00342002-06-28 01:46:26 +00002765}
2766
2767static
2768void do__set_fhstack_entry ( ThreadId tid, Int n, ForkHandlerEntry* fh )
2769{
2770 Char msg_buf[100];
2771 if (VG_(clo_trace_sched)) {
2772 VG_(sprintf)(msg_buf, "set_fhstack_entry %d to %p", n, fh );
2773 print_pthread_event(tid, msg_buf);
2774 }
2775
2776 vg_assert(VG_(is_valid_tid)(tid)
2777 && VG_(threads)[tid].status == VgTs_Runnable);
njn72718642003-07-24 08:45:32 +00002778 VG_TRACK( pre_mem_read, Vg_CorePThread, tid,
njn25e49d8e72002-09-23 09:36:25 +00002779 "pthread_atfork: prepare/parent/child",
2780 (Addr)fh, sizeof(ForkHandlerEntry));
sewardj2cb00342002-06-28 01:46:26 +00002781
njn25e49d8e72002-09-23 09:36:25 +00002782 if (n < 0 || n >= VG_N_FORKHANDLERSTACK) {
njnd3040452003-05-19 15:04:06 +00002783 SET_PTHREQ_RETVAL(tid, -1);
sewardj2cb00342002-06-28 01:46:26 +00002784 return;
2785 }
2786
2787 vg_fhstack[n] = *fh;
njnd3040452003-05-19 15:04:06 +00002788 SET_PTHREQ_RETVAL(tid, 0);
sewardj2cb00342002-06-28 01:46:26 +00002789}
2790
2791
2792static
2793void do__get_fhstack_entry ( ThreadId tid, Int n, /*OUT*/
2794 ForkHandlerEntry* fh )
2795{
2796 Char msg_buf[100];
2797 if (VG_(clo_trace_sched)) {
2798 VG_(sprintf)(msg_buf, "get_fhstack_entry %d", n );
2799 print_pthread_event(tid, msg_buf);
2800 }
2801
2802 vg_assert(VG_(is_valid_tid)(tid)
2803 && VG_(threads)[tid].status == VgTs_Runnable);
njn72718642003-07-24 08:45:32 +00002804 VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "fork: prepare/parent/child",
njn25e49d8e72002-09-23 09:36:25 +00002805 (Addr)fh, sizeof(ForkHandlerEntry));
sewardj2cb00342002-06-28 01:46:26 +00002806
njn25e49d8e72002-09-23 09:36:25 +00002807 if (n < 0 || n >= VG_N_FORKHANDLERSTACK) {
njnd3040452003-05-19 15:04:06 +00002808 SET_PTHREQ_RETVAL(tid, -1);
sewardj2cb00342002-06-28 01:46:26 +00002809 return;
2810 }
2811
2812 *fh = vg_fhstack[n];
njnd3040452003-05-19 15:04:06 +00002813 SET_PTHREQ_RETVAL(tid, 0);
sewardj2cb00342002-06-28 01:46:26 +00002814
njn25e49d8e72002-09-23 09:36:25 +00002815 VG_TRACK( post_mem_write, (Addr)fh, sizeof(ForkHandlerEntry) );
sewardj2cb00342002-06-28 01:46:26 +00002816}
2817
thughesdaa34562004-06-27 12:48:53 +00002818
2819static
2820void do__get_stack_info ( ThreadId tid, ThreadId which, StackInfo* si )
2821{
2822 Char msg_buf[100];
2823
2824 vg_assert(VG_(is_valid_tid)(tid)
2825 && VG_(threads)[tid].status == VgTs_Runnable);
2826
2827 if (VG_(clo_trace_sched)) {
2828 VG_(sprintf)(msg_buf, "get_stack_info for tid %d", which );
2829 print_pthread_event(tid, msg_buf);
2830 }
2831
2832 if (!VG_(is_valid_tid)(which)) {
2833 SET_PTHREQ_RETVAL(tid, -1);
2834 return;
2835 }
2836
2837 si->base = VG_(threads)[which].stack_base;
2838 si->size = VG_(threads)[which].stack_size
2839 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB
2840 - VG_(threads)[which].stack_guard_size;
2841 si->guardsize = VG_(threads)[which].stack_guard_size;
2842
2843 SET_PTHREQ_RETVAL(tid, 0);
2844}
2845
njnd3040452003-05-19 15:04:06 +00002846/* ---------------------------------------------------------------------
2847 Specifying shadow register values
2848 ------------------------------------------------------------------ */
2849
2850void VG_(set_return_from_syscall_shadow) ( ThreadId tid, UInt ret_shadow )
2851{
nethercote15218bd2004-09-11 15:11:47 +00002852 VG_(set_thread_shadow_archreg)(tid, R_SYSCALL_RET, ret_shadow);
njnd3040452003-05-19 15:04:06 +00002853}
2854
2855UInt VG_(get_exit_status_shadow) ( void )
2856{
nethercote15218bd2004-09-11 15:11:47 +00002857 return VG_(get_shadow_archreg)(R_SYSCALL_ARG1);
njnd3040452003-05-19 15:04:06 +00002858}
2859
rjwalshe4e779d2004-04-16 23:02:29 +00002860void VG_(intercept_libc_freeres_wrapper)(Addr addr)
2861{
nethercotef971ab72004-08-02 16:27:40 +00002862 __libc_freeres_wrapper = addr;
rjwalshe4e779d2004-04-16 23:02:29 +00002863}
sewardj2cb00342002-06-28 01:46:26 +00002864
sewardje663cb92002-04-12 10:26:32 +00002865/* ---------------------------------------------------------------------
sewardj124ca2a2002-06-20 10:19:38 +00002866 Handle client requests.
sewardje663cb92002-04-12 10:26:32 +00002867 ------------------------------------------------------------------ */
2868
sewardj124ca2a2002-06-20 10:19:38 +00002869/* Do a client request for the thread tid. After the request, tid may
2870 or may not still be runnable; if not, the scheduler will have to
2871 choose a new thread to run.
2872*/
sewardje663cb92002-04-12 10:26:32 +00002873static
nethercote3e901a22004-09-11 13:17:02 +00002874void do_client_request ( ThreadId tid, UInt* arg )
sewardje663cb92002-04-12 10:26:32 +00002875{
nethercote3e901a22004-09-11 13:17:02 +00002876 UInt req_no = arg[0];
sewardj124ca2a2002-06-20 10:19:38 +00002877
fitzhardinge98abfc72003-12-16 02:05:15 +00002878 if (0)
nethercote3e901a22004-09-11 13:17:02 +00002879 VG_(printf)("req no = 0x%x, arg = %p\n", req_no, arg);
sewardje663cb92002-04-12 10:26:32 +00002880 switch (req_no) {
2881
njn3e884182003-04-15 13:03:23 +00002882 case VG_USERREQ__CLIENT_CALL0: {
2883 UInt (*f)(void) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00002884 if (f == NULL)
nethercote66b3af62004-09-11 13:06:55 +00002885 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL0: func=%p\n", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00002886 else
2887 SET_CLCALL_RETVAL(tid, f ( ), (Addr)f);
njn3e884182003-04-15 13:03:23 +00002888 break;
2889 }
2890 case VG_USERREQ__CLIENT_CALL1: {
2891 UInt (*f)(UInt) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00002892 if (f == NULL)
nethercote66b3af62004-09-11 13:06:55 +00002893 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL1: func=%p\n", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00002894 else
2895 SET_CLCALL_RETVAL(tid, f ( arg[2] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00002896 break;
2897 }
2898 case VG_USERREQ__CLIENT_CALL2: {
2899 UInt (*f)(UInt, UInt) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00002900 if (f == NULL)
nethercote66b3af62004-09-11 13:06:55 +00002901 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL2: func=%p\n", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00002902 else
2903 SET_CLCALL_RETVAL(tid, f ( arg[2], arg[3] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00002904 break;
2905 }
2906 case VG_USERREQ__CLIENT_CALL3: {
2907 UInt (*f)(UInt, UInt, UInt) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00002908 if (f == NULL)
nethercote66b3af62004-09-11 13:06:55 +00002909 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL3: func=%p\n", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00002910 else
2911 SET_CLCALL_RETVAL(tid, f ( arg[2], arg[3], arg[4] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00002912 break;
2913 }
2914
nethercote7cc9c232004-01-21 15:08:04 +00002915 /* Note: for tools that replace malloc() et al, we want to call
njn3e884182003-04-15 13:03:23 +00002916 the replacement versions. For those that don't, we want to call
2917 VG_(cli_malloc)() et al. We do this by calling SK_(malloc)(), which
nethercote3ced0e32004-01-26 14:50:45 +00002918 malloc-replacing tools must replace, but have the default definition
2919 of SK_(malloc)() call VG_(cli_malloc)(). */
njn3e884182003-04-15 13:03:23 +00002920
2921 /* Note: for MALLOC and FREE, must set the appropriate "lock"... see
2922 the comment in vg_defaults.c/SK_(malloc)() for why. */
sewardj124ca2a2002-06-20 10:19:38 +00002923 case VG_USERREQ__MALLOC:
njn3e884182003-04-15 13:03:23 +00002924 VG_(sk_malloc_called_by_scheduler) = True;
njnd3040452003-05-19 15:04:06 +00002925 SET_PTHREQ_RETVAL(
njn72718642003-07-24 08:45:32 +00002926 tid, (UInt)SK_(malloc) ( arg[1] )
sewardj124ca2a2002-06-20 10:19:38 +00002927 );
njn3e884182003-04-15 13:03:23 +00002928 VG_(sk_malloc_called_by_scheduler) = False;
sewardj124ca2a2002-06-20 10:19:38 +00002929 break;
2930
2931 case VG_USERREQ__FREE:
njn3e884182003-04-15 13:03:23 +00002932 VG_(sk_malloc_called_by_scheduler) = True;
njn72718642003-07-24 08:45:32 +00002933 SK_(free) ( (void*)arg[1] );
njn3e884182003-04-15 13:03:23 +00002934 VG_(sk_malloc_called_by_scheduler) = False;
njnd3040452003-05-19 15:04:06 +00002935 SET_PTHREQ_RETVAL(tid, 0); /* irrelevant */
sewardj124ca2a2002-06-20 10:19:38 +00002936 break;
2937
sewardj124ca2a2002-06-20 10:19:38 +00002938 case VG_USERREQ__PTHREAD_GET_THREADID:
njnd3040452003-05-19 15:04:06 +00002939 SET_PTHREQ_RETVAL(tid, tid);
sewardj124ca2a2002-06-20 10:19:38 +00002940 break;
2941
2942 case VG_USERREQ__RUNNING_ON_VALGRIND:
njnd3040452003-05-19 15:04:06 +00002943 SET_CLREQ_RETVAL(tid, 1);
sewardj124ca2a2002-06-20 10:19:38 +00002944 break;
2945
2946 case VG_USERREQ__GET_PTHREAD_TRACE_LEVEL:
njnd3040452003-05-19 15:04:06 +00002947 SET_PTHREQ_RETVAL(tid, VG_(clo_trace_pthread_level));
sewardj124ca2a2002-06-20 10:19:38 +00002948 break;
2949
2950 case VG_USERREQ__READ_MILLISECOND_TIMER:
njnd3040452003-05-19 15:04:06 +00002951 SET_PTHREQ_RETVAL(tid, VG_(read_millisecond_timer)());
sewardj124ca2a2002-06-20 10:19:38 +00002952 break;
2953
2954 /* Some of these may make thread tid non-runnable, but the
2955 scheduler checks for that on return from this function. */
2956 case VG_USERREQ__PTHREAD_MUTEX_LOCK:
thughese321d492004-10-17 15:00:20 +00002957 do_pthread_mutex_lock( tid, False, (void *)(arg[1]), 0xFFFFFFFF );
2958 break;
2959
2960 case VG_USERREQ__PTHREAD_MUTEX_TIMEDLOCK:
2961 do_pthread_mutex_lock( tid, False, (void *)(arg[1]), arg[2] );
sewardj124ca2a2002-06-20 10:19:38 +00002962 break;
2963
2964 case VG_USERREQ__PTHREAD_MUTEX_TRYLOCK:
thughese321d492004-10-17 15:00:20 +00002965 do_pthread_mutex_lock( tid, True, (void *)(arg[1]), 0xFFFFFFFF );
sewardj124ca2a2002-06-20 10:19:38 +00002966 break;
2967
2968 case VG_USERREQ__PTHREAD_MUTEX_UNLOCK:
2969 do_pthread_mutex_unlock( tid, (void *)(arg[1]) );
2970 break;
2971
sewardj00a66b12002-10-12 16:42:35 +00002972 case VG_USERREQ__PTHREAD_GETSPECIFIC_PTR:
2973 do_pthread_getspecific_ptr ( tid );
sewardj124ca2a2002-06-20 10:19:38 +00002974 break;
2975
2976 case VG_USERREQ__SET_CANCELTYPE:
2977 do__set_canceltype ( tid, arg[1] );
2978 break;
2979
2980 case VG_USERREQ__CLEANUP_PUSH:
2981 do__cleanup_push ( tid, (CleanupEntry*)(arg[1]) );
2982 break;
2983
2984 case VG_USERREQ__CLEANUP_POP:
2985 do__cleanup_pop ( tid, (CleanupEntry*)(arg[1]) );
2986 break;
2987
2988 case VG_USERREQ__TESTCANCEL:
2989 do__testcancel ( tid );
2990 break;
2991
sewardje663cb92002-04-12 10:26:32 +00002992 case VG_USERREQ__PTHREAD_JOIN:
2993 do_pthread_join( tid, arg[1], (void**)(arg[2]) );
2994 break;
2995
sewardj3b5d8862002-04-20 13:53:23 +00002996 case VG_USERREQ__PTHREAD_COND_WAIT:
2997 do_pthread_cond_wait( tid,
nethercote1f0173b2004-02-28 15:40:36 +00002998 (vg_pthread_cond_t *)(arg[1]),
2999 (vg_pthread_mutex_t *)(arg[2]),
sewardj5f07b662002-04-23 16:52:51 +00003000 0xFFFFFFFF /* no timeout */ );
3001 break;
3002
3003 case VG_USERREQ__PTHREAD_COND_TIMEDWAIT:
3004 do_pthread_cond_wait( tid,
nethercote1f0173b2004-02-28 15:40:36 +00003005 (vg_pthread_cond_t *)(arg[1]),
3006 (vg_pthread_mutex_t *)(arg[2]),
sewardj5f07b662002-04-23 16:52:51 +00003007 arg[3] /* timeout millisecond point */ );
sewardj3b5d8862002-04-20 13:53:23 +00003008 break;
3009
3010 case VG_USERREQ__PTHREAD_COND_SIGNAL:
3011 do_pthread_cond_signal_or_broadcast(
3012 tid,
3013 False, /* signal, not broadcast */
nethercote1f0173b2004-02-28 15:40:36 +00003014 (vg_pthread_cond_t *)(arg[1]) );
sewardj3b5d8862002-04-20 13:53:23 +00003015 break;
3016
3017 case VG_USERREQ__PTHREAD_COND_BROADCAST:
3018 do_pthread_cond_signal_or_broadcast(
3019 tid,
3020 True, /* broadcast, not signal */
nethercote1f0173b2004-02-28 15:40:36 +00003021 (vg_pthread_cond_t *)(arg[1]) );
sewardj3b5d8862002-04-20 13:53:23 +00003022 break;
3023
sewardj00a66b12002-10-12 16:42:35 +00003024 case VG_USERREQ__PTHREAD_KEY_VALIDATE:
3025 do_pthread_key_validate ( tid,
3026 (pthread_key_t)(arg[1]) );
3027 break;
3028
sewardj5f07b662002-04-23 16:52:51 +00003029 case VG_USERREQ__PTHREAD_KEY_CREATE:
3030 do_pthread_key_create ( tid,
3031 (pthread_key_t*)(arg[1]),
3032 (void(*)(void*))(arg[2]) );
3033 break;
3034
3035 case VG_USERREQ__PTHREAD_KEY_DELETE:
3036 do_pthread_key_delete ( tid,
3037 (pthread_key_t)(arg[1]) );
3038 break;
3039
sewardj00a66b12002-10-12 16:42:35 +00003040 case VG_USERREQ__PTHREAD_SETSPECIFIC_PTR:
3041 do_pthread_setspecific_ptr ( tid,
3042 (void**)(arg[1]) );
sewardj5f07b662002-04-23 16:52:51 +00003043 break;
3044
sewardjb48e5002002-05-13 00:16:03 +00003045 case VG_USERREQ__PTHREAD_SIGMASK:
3046 do_pthread_sigmask ( tid,
3047 arg[1],
3048 (vki_ksigset_t*)(arg[2]),
3049 (vki_ksigset_t*)(arg[3]) );
3050 break;
3051
sewardj018f7622002-05-15 21:13:39 +00003052 case VG_USERREQ__PTHREAD_KILL:
3053 do_pthread_kill ( tid, arg[1], arg[2] );
3054 break;
3055
sewardjff42d1d2002-05-22 13:17:31 +00003056 case VG_USERREQ__PTHREAD_YIELD:
3057 do_pthread_yield ( tid );
sewardj18a62ff2002-07-12 22:30:51 +00003058 /* On return from do_client_request(), the scheduler will
3059 select a new thread to run. */
sewardjff42d1d2002-05-22 13:17:31 +00003060 break;
sewardj018f7622002-05-15 21:13:39 +00003061
sewardj7989d0c2002-05-28 11:00:01 +00003062 case VG_USERREQ__SET_CANCELSTATE:
3063 do__set_cancelstate ( tid, arg[1] );
3064 break;
3065
sewardj7989d0c2002-05-28 11:00:01 +00003066 case VG_USERREQ__SET_OR_GET_DETACH:
3067 do__set_or_get_detach ( tid, arg[1], arg[2] );
3068 break;
3069
3070 case VG_USERREQ__SET_CANCELPEND:
3071 do__set_cancelpend ( tid, arg[1], (void(*)(void*))arg[2] );
3072 break;
3073
3074 case VG_USERREQ__WAIT_JOINER:
3075 do__wait_joiner ( tid, (void*)arg[1] );
3076 break;
3077
3078 case VG_USERREQ__QUIT:
3079 do__quit ( tid );
3080 break;
3081
3082 case VG_USERREQ__APPLY_IN_NEW_THREAD:
3083 do__apply_in_new_thread ( tid, (void*(*)(void*))arg[1],
thughesdaa34562004-06-27 12:48:53 +00003084 (void*)arg[2], (StackInfo*)(arg[3]) );
sewardj7989d0c2002-05-28 11:00:01 +00003085 break;
3086
sewardj870497a2002-05-29 01:06:47 +00003087 case VG_USERREQ__GET_KEY_D_AND_S:
3088 do__get_key_destr_and_spec ( tid,
3089 (pthread_key_t)arg[1],
3090 (CleanupEntry*)arg[2] );
3091 break;
3092
sewardjef037c72002-05-30 00:40:03 +00003093 case VG_USERREQ__NUKE_OTHER_THREADS:
3094 VG_(nuke_all_threads_except) ( tid );
njnd3040452003-05-19 15:04:06 +00003095 SET_PTHREQ_RETVAL(tid, 0);
sewardjef037c72002-05-30 00:40:03 +00003096 break;
3097
sewardj4dced352002-06-04 22:54:20 +00003098 case VG_USERREQ__PTHREAD_ERROR:
njn25e49d8e72002-09-23 09:36:25 +00003099 VG_(record_pthread_error)( tid, (Char*)(arg[1]) );
njnd3040452003-05-19 15:04:06 +00003100 SET_PTHREQ_RETVAL(tid, 0);
sewardj4dced352002-06-04 22:54:20 +00003101 break;
3102
sewardj2cb00342002-06-28 01:46:26 +00003103 case VG_USERREQ__SET_FHSTACK_USED:
3104 do__set_fhstack_used( tid, (Int)(arg[1]) );
3105 break;
3106
3107 case VG_USERREQ__GET_FHSTACK_USED:
3108 do__get_fhstack_used( tid );
3109 break;
3110
3111 case VG_USERREQ__SET_FHSTACK_ENTRY:
3112 do__set_fhstack_entry( tid, (Int)(arg[1]),
3113 (ForkHandlerEntry*)(arg[2]) );
3114 break;
3115
3116 case VG_USERREQ__GET_FHSTACK_ENTRY:
3117 do__get_fhstack_entry( tid, (Int)(arg[1]),
3118 (ForkHandlerEntry*)(arg[2]) );
3119 break;
3120
sewardj77e466c2002-04-14 02:29:29 +00003121 case VG_USERREQ__SIGNAL_RETURNS:
3122 handle_signal_return(tid);
3123 break;
fitzhardinge98abfc72003-12-16 02:05:15 +00003124
thughesdaa34562004-06-27 12:48:53 +00003125 case VG_USERREQ__GET_STACK_INFO:
3126 do__get_stack_info( tid, (Int)(arg[1]), (StackInfo*)(arg[2]) );
3127 break;
3128
fitzhardinge98abfc72003-12-16 02:05:15 +00003129
3130 case VG_USERREQ__GET_SIGRT_MIN:
3131 SET_PTHREQ_RETVAL(tid, VG_(sig_rtmin));
3132 break;
3133
3134 case VG_USERREQ__GET_SIGRT_MAX:
3135 SET_PTHREQ_RETVAL(tid, VG_(sig_rtmax));
3136 break;
3137
3138 case VG_USERREQ__ALLOC_RTSIG:
3139 SET_PTHREQ_RETVAL(tid, VG_(sig_alloc_rtsig)((Int)arg[1]));
3140 break;
3141
fitzhardinge39de4b42003-10-31 07:12:21 +00003142 case VG_USERREQ__PRINTF: {
3143 int count =
nethercote3e901a22004-09-11 13:17:02 +00003144 VG_(vmessage)( Vg_ClientMsg, (char *)arg[1], (void*)arg[2] );
fitzhardinge39de4b42003-10-31 07:12:21 +00003145 SET_CLREQ_RETVAL( tid, count );
3146 break; }
3147
fitzhardinge98abfc72003-12-16 02:05:15 +00003148
fitzhardinge39de4b42003-10-31 07:12:21 +00003149 case VG_USERREQ__INTERNAL_PRINTF: {
3150 int count =
nethercote3e901a22004-09-11 13:17:02 +00003151 VG_(vmessage)( Vg_UserMsg, (char *)arg[1], (void*)arg[2] );
fitzhardinge39de4b42003-10-31 07:12:21 +00003152 SET_CLREQ_RETVAL( tid, count );
3153 break; }
3154
3155 case VG_USERREQ__PRINTF_BACKTRACE: {
3156 ExeContext *e = VG_(get_ExeContext)( tid );
3157 int count =
nethercote3e901a22004-09-11 13:17:02 +00003158 VG_(vmessage)( Vg_ClientMsg, (char *)arg[1], (void*)arg[2] );
nethercote86c5dcb2004-09-05 21:32:37 +00003159 VG_(mini_stack_dump)(e->ips, VG_(clo_backtrace_size));
fitzhardinge39de4b42003-10-31 07:12:21 +00003160 SET_CLREQ_RETVAL( tid, count );
3161 break; }
3162
3163 case VG_USERREQ__INTERNAL_PRINTF_BACKTRACE: {
3164 ExeContext *e = VG_(get_ExeContext)( tid );
3165 int count =
nethercote3e901a22004-09-11 13:17:02 +00003166 VG_(vmessage)( Vg_UserMsg, (char *)arg[1], (void*)arg[2] );
nethercote86c5dcb2004-09-05 21:32:37 +00003167 VG_(mini_stack_dump)(e->ips, VG_(clo_backtrace_size));
fitzhardinge39de4b42003-10-31 07:12:21 +00003168 SET_CLREQ_RETVAL( tid, count );
3169 break; }
3170
fitzhardinge98abfc72003-12-16 02:05:15 +00003171 case VG_USERREQ__GET_MALLOCFUNCS: {
3172 struct vg_mallocfunc_info *info = (struct vg_mallocfunc_info *)arg[1];
3173
3174 info->sk_malloc = (Addr)SK_(malloc);
3175 info->sk_calloc = (Addr)SK_(calloc);
3176 info->sk_realloc = (Addr)SK_(realloc);
3177 info->sk_memalign = (Addr)SK_(memalign);
3178 info->sk___builtin_new = (Addr)SK_(__builtin_new);
3179 info->sk___builtin_vec_new = (Addr)SK_(__builtin_vec_new);
3180 info->sk_free = (Addr)SK_(free);
3181 info->sk___builtin_delete = (Addr)SK_(__builtin_delete);
3182 info->sk___builtin_vec_delete = (Addr)SK_(__builtin_vec_delete);
3183
3184 info->arena_payload_szB = (Addr)VG_(arena_payload_szB);
3185
3186 info->clo_sloppy_malloc = VG_(clo_sloppy_malloc);
3187 info->clo_trace_malloc = VG_(clo_trace_malloc);
3188
3189 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
3190
3191 break;
3192 }
3193
njn25e49d8e72002-09-23 09:36:25 +00003194 /* Requests from the client program */
3195
3196 case VG_USERREQ__DISCARD_TRANSLATIONS:
3197 if (VG_(clo_verbosity) > 2)
3198 VG_(printf)( "client request: DISCARD_TRANSLATIONS,"
3199 " addr %p, len %d\n",
3200 (void*)arg[1], arg[2] );
3201
sewardj97ad5522003-05-04 12:32:56 +00003202 VG_(invalidate_translations)( arg[1], arg[2], True );
njn25e49d8e72002-09-23 09:36:25 +00003203
njnd3040452003-05-19 15:04:06 +00003204 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
njn25e49d8e72002-09-23 09:36:25 +00003205 break;
3206
njn47363ab2003-04-21 13:24:40 +00003207 case VG_USERREQ__COUNT_ERRORS:
nethercotef2b11482004-08-02 12:36:01 +00003208 SET_CLREQ_RETVAL( tid, VG_(get_n_errs_found)() );
njn47363ab2003-04-21 13:24:40 +00003209 break;
3210
sewardje663cb92002-04-12 10:26:32 +00003211 default:
njn25e49d8e72002-09-23 09:36:25 +00003212 if (VG_(needs).client_requests) {
sewardj34042512002-10-22 04:14:35 +00003213 UInt ret;
3214
njn25e49d8e72002-09-23 09:36:25 +00003215 if (VG_(clo_verbosity) > 2)
fitzhardinge98abfc72003-12-16 02:05:15 +00003216 VG_(printf)("client request: code %x, addr %p, len %d\n",
njn25e49d8e72002-09-23 09:36:25 +00003217 arg[0], (void*)arg[1], arg[2] );
3218
njn72718642003-07-24 08:45:32 +00003219 if (SK_(handle_client_request) ( tid, arg, &ret ))
njnd3040452003-05-19 15:04:06 +00003220 SET_CLREQ_RETVAL(tid, ret);
njn25e49d8e72002-09-23 09:36:25 +00003221 } else {
sewardj34042512002-10-22 04:14:35 +00003222 static Bool whined = False;
3223
3224 if (!whined) {
nethercote7cc9c232004-01-21 15:08:04 +00003225 // Allow for requests in core, but defined by tools, which
njnd7994182003-10-02 13:44:04 +00003226 // have 0 and 0 in their two high bytes.
3227 Char c1 = (arg[0] >> 24) & 0xff;
3228 Char c2 = (arg[0] >> 16) & 0xff;
3229 if (c1 == 0) c1 = '_';
3230 if (c2 == 0) c2 = '_';
sewardj34042512002-10-22 04:14:35 +00003231 VG_(message)(Vg_UserMsg, "Warning:\n"
njnd7994182003-10-02 13:44:04 +00003232 " unhandled client request: 0x%x (%c%c+0x%x). Perhaps\n"
3233 " VG_(needs).client_requests should be set?\n",
3234 arg[0], c1, c2, arg[0] & 0xffff);
sewardj34042512002-10-22 04:14:35 +00003235 whined = True;
3236 }
njn25e49d8e72002-09-23 09:36:25 +00003237 }
sewardje663cb92002-04-12 10:26:32 +00003238 break;
3239 }
3240}
3241
3242
sewardj6072c362002-04-19 14:40:57 +00003243/* ---------------------------------------------------------------------
3244 Sanity checking.
3245 ------------------------------------------------------------------ */
3246
3247/* Internal consistency checks on the sched/pthread structures. */
3248static
3249void scheduler_sanity ( void )
3250{
nethercote1f0173b2004-02-28 15:40:36 +00003251 vg_pthread_mutex_t* mx;
3252 vg_pthread_cond_t* cv;
sewardj6072c362002-04-19 14:40:57 +00003253 Int i;
jsgf855d93d2003-10-13 22:26:55 +00003254 struct timeout* top;
3255 UInt lasttime = 0;
3256
3257 for(top = timeouts; top != NULL; top = top->next) {
3258 vg_assert(top->time >= lasttime);
nethercote36881a22004-08-04 14:03:16 +00003259 vg_assert(is_valid_or_empty_tid(top->tid));
jsgf855d93d2003-10-13 22:26:55 +00003260
3261#if 0
3262 /* assert timeout entry is either stale, or associated with a
3263 thread in the right state
3264
3265 XXX disable for now - can be stale, but times happen to match
3266 */
3267 vg_assert(VG_(threads)[top->tid].awaken_at != top->time ||
3268 VG_(threads)[top->tid].status == VgTs_Sleeping ||
thughese321d492004-10-17 15:00:20 +00003269 VG_(threads)[top->tid].status == VgTs_WaitMX ||
jsgf855d93d2003-10-13 22:26:55 +00003270 VG_(threads)[top->tid].status == VgTs_WaitCV);
3271#endif
3272
3273 lasttime = top->time;
3274 }
sewardj5f07b662002-04-23 16:52:51 +00003275
sewardj6072c362002-04-19 14:40:57 +00003276 /* VG_(printf)("scheduler_sanity\n"); */
3277 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +00003278 mx = VG_(threads)[i].associated_mx;
3279 cv = VG_(threads)[i].associated_cv;
3280 if (VG_(threads)[i].status == VgTs_WaitMX) {
sewardjbf290b92002-05-01 02:28:01 +00003281 /* If we're waiting on a MX: (1) the mx is not null, (2, 3)
3282 it's actually held by someone, since otherwise this thread
3283 is deadlocked, (4) the mutex's owner is not us, since
3284 otherwise this thread is also deadlocked. The logic in
3285 do_pthread_mutex_lock rejects attempts by a thread to lock
3286 a (non-recursive) mutex which it already owns.
sewardj05553872002-04-20 20:53:17 +00003287
sewardjbf290b92002-05-01 02:28:01 +00003288 (2) has been seen to fail sometimes. I don't know why.
3289 Possibly to do with signals. */
sewardj3b5d8862002-04-20 13:53:23 +00003290 vg_assert(cv == NULL);
sewardj05553872002-04-20 20:53:17 +00003291 /* 1 */ vg_assert(mx != NULL);
nethercote1f0173b2004-02-28 15:40:36 +00003292 /* 2 */ vg_assert(mx->__vg_m_count > 0);
3293 /* 3 */ vg_assert(VG_(is_valid_tid)((ThreadId)mx->__vg_m_owner));
thughese321d492004-10-17 15:00:20 +00003294 /* 4 */ vg_assert((UInt)i != (ThreadId)mx->__vg_m_owner ||
3295 VG_(threads)[i].awaken_at != 0xFFFFFFFF);
sewardj3b5d8862002-04-20 13:53:23 +00003296 } else
sewardj018f7622002-05-15 21:13:39 +00003297 if (VG_(threads)[i].status == VgTs_WaitCV) {
sewardj3b5d8862002-04-20 13:53:23 +00003298 vg_assert(cv != NULL);
3299 vg_assert(mx != NULL);
sewardj6072c362002-04-19 14:40:57 +00003300 } else {
thughesf7269232004-10-16 16:17:06 +00003301 vg_assert(cv == NULL);
3302 vg_assert(mx == NULL);
sewardj6072c362002-04-19 14:40:57 +00003303 }
sewardjbf290b92002-05-01 02:28:01 +00003304
sewardj018f7622002-05-15 21:13:39 +00003305 if (VG_(threads)[i].status != VgTs_Empty) {
sewardjbf290b92002-05-01 02:28:01 +00003306 Int
sewardj018f7622002-05-15 21:13:39 +00003307 stack_used = (Addr)VG_(threads)[i].stack_highest_word
nethercoteb8ef9d82004-09-05 22:02:33 +00003308 - (Addr)ARCH_STACK_PTR(VG_(threads)[i].arch);
thughesdaa34562004-06-27 12:48:53 +00003309 Int
3310 stack_avail = VG_(threads)[i].stack_size
3311 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB
3312 - VG_(threads)[i].stack_guard_size;
fitzhardinge98c4dc02004-03-16 08:27:29 +00003313 /* This test is a bit bogus - it doesn't take into account
3314 alternate signal stacks, for a start. Also, if a thread
3315 has it's stack pointer somewhere strange, killing Valgrind
3316 isn't the right answer. */
3317 if (0 && i > 1 /* not the root thread */
thughesdaa34562004-06-27 12:48:53 +00003318 && stack_used >= stack_avail) {
sewardjbf290b92002-05-01 02:28:01 +00003319 VG_(message)(Vg_UserMsg,
njn25e49d8e72002-09-23 09:36:25 +00003320 "Error: STACK OVERFLOW: "
sewardjbf290b92002-05-01 02:28:01 +00003321 "thread %d: stack used %d, available %d",
thughesdaa34562004-06-27 12:48:53 +00003322 i, stack_used, stack_avail );
sewardjbf290b92002-05-01 02:28:01 +00003323 VG_(message)(Vg_UserMsg,
3324 "Terminating Valgrind. If thread(s) "
3325 "really need more stack, increase");
3326 VG_(message)(Vg_UserMsg,
rjwalsh7109a8c2004-09-02 00:31:02 +00003327 "VG_PTHREAD_STACK_SIZE in core.h and recompile.");
sewardjbf290b92002-05-01 02:28:01 +00003328 VG_(exit)(1);
3329 }
3330 }
sewardj6072c362002-04-19 14:40:57 +00003331 }
sewardj5f07b662002-04-23 16:52:51 +00003332
3333 for (i = 0; i < VG_N_THREAD_KEYS; i++) {
3334 if (!vg_thread_keys[i].inuse)
3335 vg_assert(vg_thread_keys[i].destructor == NULL);
3336 }
sewardj6072c362002-04-19 14:40:57 +00003337}
3338
3339
sewardje663cb92002-04-12 10:26:32 +00003340/*--------------------------------------------------------------------*/
3341/*--- end vg_scheduler.c ---*/
3342/*--------------------------------------------------------------------*/