blob: 66d638f127c38e8307a4e9ad5ed3bd4d9aca59be [file] [log] [blame]
sewardje663cb92002-04-12 10:26:32 +00001
2/*--------------------------------------------------------------------*/
3/*--- A user-space pthreads implementation. vg_scheduler.c ---*/
4/*--------------------------------------------------------------------*/
5
6/*
njnc9539842002-10-02 13:26:35 +00007 This file is part of Valgrind, an extensible x86 protected-mode
8 emulator for monitoring program execution on x86-Unixes.
sewardje663cb92002-04-12 10:26:32 +00009
nethercotebb1c9912004-01-04 16:43:23 +000010 Copyright (C) 2000-2004 Julian Seward
sewardje663cb92002-04-12 10:26:32 +000011 jseward@acm.org
sewardje663cb92002-04-12 10:26:32 +000012
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
njn25e49d8e72002-09-23 09:36:25 +000028 The GNU General Public License is contained in the file COPYING.
sewardje663cb92002-04-12 10:26:32 +000029*/
30
njn25e49d8e72002-09-23 09:36:25 +000031#include "valgrind.h" /* for VG_USERREQ__RUNNING_ON_VALGRIND and
njn47363ab2003-04-21 13:24:40 +000032 VG_USERREQ__DISCARD_TRANSLATIONS, and others */
nethercotef1e5e152004-09-01 23:58:16 +000033#include "core.h"
sewardje663cb92002-04-12 10:26:32 +000034
sewardje663cb92002-04-12 10:26:32 +000035
36/* ---------------------------------------------------------------------
37 Types and globals for the scheduler.
38 ------------------------------------------------------------------ */
39
rjwalsh7109a8c2004-09-02 00:31:02 +000040/* ThreadId and ThreadState are defined in core.h. */
sewardje663cb92002-04-12 10:26:32 +000041
sewardj018f7622002-05-15 21:13:39 +000042/* Globals. A statically allocated array of threads. NOTE: [0] is
43 never used, to simplify the simulation of initialisers for
sewardj6072c362002-04-19 14:40:57 +000044 LinuxThreads. */
sewardj018f7622002-05-15 21:13:39 +000045ThreadState VG_(threads)[VG_N_THREADS];
sewardje663cb92002-04-12 10:26:32 +000046
sewardj2cb00342002-06-28 01:46:26 +000047/* The process' fork-handler stack. */
48static Int vg_fhstack_used = 0;
49static ForkHandlerEntry vg_fhstack[VG_N_FORKHANDLERSTACK];
50
51
sewardj1e8cdc92002-04-18 11:37:52 +000052/* The tid of the thread currently in VG_(baseBlock). */
njn1be61612003-05-14 14:04:39 +000053static ThreadId vg_tid_currently_in_baseBlock = VG_INVALID_THREADID;
sewardj1e8cdc92002-04-18 11:37:52 +000054
sewardjb52a1b02002-10-23 21:38:22 +000055/* The tid either currently in baseBlock, or was in baseBlock before
56 was saved it out; this is only updated when a new thread is loaded
57 into the baseBlock */
njn1be61612003-05-14 14:04:39 +000058static ThreadId vg_tid_last_in_baseBlock = VG_INVALID_THREADID;
sewardje663cb92002-04-12 10:26:32 +000059
60/* vg_oursignalhandler() might longjmp(). Here's the jmp_buf. */
nethercotef971ab72004-08-02 16:27:40 +000061static jmp_buf scheduler_jmpbuf;
sewardj872051c2002-07-13 12:12:56 +000062/* This says whether scheduler_jmpbuf is actually valid. Needed so
63 that our signal handler doesn't longjmp when the buffer isn't
64 actually valid. */
nethercotef971ab72004-08-02 16:27:40 +000065static Bool scheduler_jmpbuf_valid = False;
sewardje663cb92002-04-12 10:26:32 +000066/* ... and if so, here's the signal which caused it to do so. */
nethercotef971ab72004-08-02 16:27:40 +000067static Int longjmpd_on_signal;
jsgf855d93d2003-10-13 22:26:55 +000068/* If the current thread gets a syncronous unresumable signal, then
69 its details are placed here by the signal handler, to be passed to
70 the applications signal handler later on. */
nethercotef971ab72004-08-02 16:27:40 +000071static vki_ksiginfo_t unresumable_siginfo;
sewardje663cb92002-04-12 10:26:32 +000072
jsgf855d93d2003-10-13 22:26:55 +000073/* If != VG_INVALID_THREADID, this is the preferred tid to schedule */
74static ThreadId prefer_sched = VG_INVALID_THREADID;
sewardje663cb92002-04-12 10:26:32 +000075
sewardj5f07b662002-04-23 16:52:51 +000076/* Keeping track of keys. */
77typedef
78 struct {
79 /* Has this key been allocated ? */
80 Bool inuse;
81 /* If .inuse==True, records the address of the associated
82 destructor, or NULL if none. */
83 void (*destructor)(void*);
84 }
85 ThreadKeyState;
86
87/* And our array of thread keys. */
88static ThreadKeyState vg_thread_keys[VG_N_THREAD_KEYS];
89
90typedef UInt ThreadKey;
91
fitzhardinge98abfc72003-12-16 02:05:15 +000092/* The scheduler does need to know the address of it so it can be
93 called at program exit. */
nethercotef971ab72004-08-02 16:27:40 +000094static Addr __libc_freeres_wrapper;
njn25e49d8e72002-09-23 09:36:25 +000095
sewardje663cb92002-04-12 10:26:32 +000096/* Forwards */
nethercote3e901a22004-09-11 13:17:02 +000097static void do_client_request ( ThreadId tid, UInt* args );
sewardj6072c362002-04-19 14:40:57 +000098static void scheduler_sanity ( void );
thughese321d492004-10-17 15:00:20 +000099static void do_pthread_mutex_timedlock_TIMEOUT ( ThreadId tid );
sewardj124ca2a2002-06-20 10:19:38 +0000100static void do_pthread_cond_timedwait_TIMEOUT ( ThreadId tid );
thughesa3afffc2004-08-25 18:58:04 +0000101static void maybe_rendezvous_joiners_and_joinees ( void );
sewardjd140e442002-05-29 01:21:19 +0000102
nethercote844e7122004-08-02 15:27:22 +0000103/* Stats. */
104static UInt n_scheduling_events_MINOR = 0;
105static UInt n_scheduling_events_MAJOR = 0;
106
107void VG_(print_scheduler_stats)(void)
108{
109 VG_(message)(Vg_DebugMsg,
110 " %d/%d major/minor sched events.",
111 n_scheduling_events_MAJOR, n_scheduling_events_MINOR);
112}
113
sewardje663cb92002-04-12 10:26:32 +0000114/* ---------------------------------------------------------------------
115 Helper functions for the scheduler.
116 ------------------------------------------------------------------ */
117
sewardjb48e5002002-05-13 00:16:03 +0000118__inline__
119Bool VG_(is_valid_tid) ( ThreadId tid )
sewardj604ec3c2002-04-18 22:38:41 +0000120{
121 /* tid is unsigned, hence no < 0 test. */
sewardj6072c362002-04-19 14:40:57 +0000122 if (tid == 0) return False;
sewardj604ec3c2002-04-18 22:38:41 +0000123 if (tid >= VG_N_THREADS) return False;
sewardj018f7622002-05-15 21:13:39 +0000124 if (VG_(threads)[tid].status == VgTs_Empty) return False;
125 return True;
126}
127
128
129__inline__
nethercote36881a22004-08-04 14:03:16 +0000130Bool is_valid_or_empty_tid ( ThreadId tid )
sewardj018f7622002-05-15 21:13:39 +0000131{
132 /* tid is unsigned, hence no < 0 test. */
133 if (tid == 0) return False;
134 if (tid >= VG_N_THREADS) return False;
sewardj604ec3c2002-04-18 22:38:41 +0000135 return True;
136}
137
138
sewardj1e8cdc92002-04-18 11:37:52 +0000139/* For constructing error messages only: try and identify a thread
njn25e49d8e72002-09-23 09:36:25 +0000140 whose stack satisfies the predicate p, or return VG_INVALID_THREADID
141 if none do. A small complication is dealing with any currently
142 VG_(baseBlock)-resident thread.
sewardj1e8cdc92002-04-18 11:37:52 +0000143*/
njn43c799e2003-04-08 00:08:52 +0000144ThreadId VG_(first_matching_thread_stack)
thughes4ad52d02004-06-27 17:37:21 +0000145 ( Bool (*p) ( Addr stack_min, Addr stack_max, void* d ),
146 void* d )
sewardj1e8cdc92002-04-18 11:37:52 +0000147{
148 ThreadId tid, tid_to_skip;
149
150 tid_to_skip = VG_INVALID_THREADID;
151
152 /* First check to see if there's a currently-loaded thread in
153 VG_(baseBlock). */
154 if (vg_tid_currently_in_baseBlock != VG_INVALID_THREADID) {
155 tid = vg_tid_currently_in_baseBlock;
nethercoteb8ef9d82004-09-05 22:02:33 +0000156 if ( p ( VG_(baseBlock)[VGOFF_STACK_PTR],
thughes4ad52d02004-06-27 17:37:21 +0000157 VG_(threads)[tid].stack_highest_word, d ) )
sewardj1e8cdc92002-04-18 11:37:52 +0000158 return tid;
159 else
160 tid_to_skip = tid;
161 }
162
sewardj6072c362002-04-19 14:40:57 +0000163 for (tid = 1; tid < VG_N_THREADS; tid++) {
sewardj018f7622002-05-15 21:13:39 +0000164 if (VG_(threads)[tid].status == VgTs_Empty) continue;
sewardj1e8cdc92002-04-18 11:37:52 +0000165 if (tid == tid_to_skip) continue;
nethercoteb8ef9d82004-09-05 22:02:33 +0000166 if ( p ( ARCH_STACK_PTR(VG_(threads)[tid].arch),
thughes4ad52d02004-06-27 17:37:21 +0000167 VG_(threads)[tid].stack_highest_word, d ) )
sewardj1e8cdc92002-04-18 11:37:52 +0000168 return tid;
169 }
170 return VG_INVALID_THREADID;
171}
172
173
sewardj15a43e12002-04-17 19:35:12 +0000174/* Print the scheduler status. */
175void VG_(pp_sched_status) ( void )
sewardje663cb92002-04-12 10:26:32 +0000176{
177 Int i;
178 VG_(printf)("\nsched status:\n");
sewardj6072c362002-04-19 14:40:57 +0000179 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +0000180 if (VG_(threads)[i].status == VgTs_Empty) continue;
sewardj15a43e12002-04-17 19:35:12 +0000181 VG_(printf)("\nThread %d: status = ", i);
sewardj018f7622002-05-15 21:13:39 +0000182 switch (VG_(threads)[i].status) {
sewardj6072c362002-04-19 14:40:57 +0000183 case VgTs_Runnable: VG_(printf)("Runnable"); break;
sewardj20917d82002-05-28 01:36:45 +0000184 case VgTs_WaitJoinee: VG_(printf)("WaitJoinee(%d)",
185 VG_(threads)[i].joiner_jee_tid);
186 break;
187 case VgTs_WaitJoiner: VG_(printf)("WaitJoiner"); break;
sewardj6072c362002-04-19 14:40:57 +0000188 case VgTs_Sleeping: VG_(printf)("Sleeping"); break;
189 case VgTs_WaitMX: VG_(printf)("WaitMX"); break;
sewardj3b5d8862002-04-20 13:53:23 +0000190 case VgTs_WaitCV: VG_(printf)("WaitCV"); break;
jsgf855d93d2003-10-13 22:26:55 +0000191 case VgTs_WaitSys: VG_(printf)("WaitSys"); break;
sewardje663cb92002-04-12 10:26:32 +0000192 default: VG_(printf)("???"); break;
193 }
sewardj3b5d8862002-04-20 13:53:23 +0000194 VG_(printf)(", associated_mx = %p, associated_cv = %p\n",
sewardj018f7622002-05-15 21:13:39 +0000195 VG_(threads)[i].associated_mx,
196 VG_(threads)[i].associated_cv );
sewardj15a43e12002-04-17 19:35:12 +0000197 VG_(pp_ExeContext)(
nethercoteb8ef9d82004-09-05 22:02:33 +0000198 VG_(get_ExeContext2)( ARCH_INSTR_PTR(VG_(threads)[i].arch),
199 ARCH_FRAME_PTR(VG_(threads)[i].arch),
200 ARCH_STACK_PTR(VG_(threads)[i].arch),
njn25e49d8e72002-09-23 09:36:25 +0000201 VG_(threads)[i].stack_highest_word)
202 );
sewardje663cb92002-04-12 10:26:32 +0000203 }
204 VG_(printf)("\n");
205}
206
sewardje663cb92002-04-12 10:26:32 +0000207static
208void print_sched_event ( ThreadId tid, Char* what )
209{
sewardj45b4b372002-04-16 22:50:32 +0000210 VG_(message)(Vg_DebugMsg, " SCHED[%d]: %s", tid, what );
sewardj8937c812002-04-12 20:12:20 +0000211}
212
sewardj8937c812002-04-12 20:12:20 +0000213static
214void print_pthread_event ( ThreadId tid, Char* what )
215{
216 VG_(message)(Vg_DebugMsg, "PTHREAD[%d]: %s", tid, what );
sewardje663cb92002-04-12 10:26:32 +0000217}
218
sewardje663cb92002-04-12 10:26:32 +0000219static
220Char* name_of_sched_event ( UInt event )
221{
222 switch (event) {
sewardje663cb92002-04-12 10:26:32 +0000223 case VG_TRC_EBP_JMP_SYSCALL: return "SYSCALL";
224 case VG_TRC_EBP_JMP_CLIENTREQ: return "CLIENTREQ";
fitzhardingea02f8812003-12-18 09:06:09 +0000225 case VG_TRC_EBP_JMP_YIELD: return "YIELD";
sewardje663cb92002-04-12 10:26:32 +0000226 case VG_TRC_INNER_COUNTERZERO: return "COUNTERZERO";
227 case VG_TRC_INNER_FASTMISS: return "FASTMISS";
228 case VG_TRC_UNRESUMABLE_SIGNAL: return "FATALSIGNAL";
229 default: return "??UNKNOWN??";
230 }
231}
232
233
sewardje663cb92002-04-12 10:26:32 +0000234/* Allocate a completely empty ThreadState record. */
235static
236ThreadId vg_alloc_ThreadState ( void )
237{
238 Int i;
sewardj6072c362002-04-19 14:40:57 +0000239 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +0000240 if (VG_(threads)[i].status == VgTs_Empty)
sewardje663cb92002-04-12 10:26:32 +0000241 return i;
242 }
243 VG_(printf)("vg_alloc_ThreadState: no free slots available\n");
244 VG_(printf)("Increase VG_N_THREADS, rebuild and try again.\n");
njne427a662002-10-02 11:08:25 +0000245 VG_(core_panic)("VG_N_THREADS is too low");
sewardje663cb92002-04-12 10:26:32 +0000246 /*NOTREACHED*/
247}
248
jsgf855d93d2003-10-13 22:26:55 +0000249ThreadState *VG_(get_ThreadState)(ThreadId tid)
250{
251 vg_assert(tid >= 0 && tid < VG_N_THREADS);
252 return &VG_(threads)[tid];
253}
254
njn72718642003-07-24 08:45:32 +0000255Bool VG_(is_running_thread)(ThreadId tid)
njn25e49d8e72002-09-23 09:36:25 +0000256{
njn72718642003-07-24 08:45:32 +0000257 ThreadId curr = VG_(get_current_tid)();
258 return (curr == tid && VG_INVALID_THREADID != tid);
njn25e49d8e72002-09-23 09:36:25 +0000259}
sewardje663cb92002-04-12 10:26:32 +0000260
sewardj1e8cdc92002-04-18 11:37:52 +0000261ThreadId VG_(get_current_tid) ( void )
262{
sewardjb52a1b02002-10-23 21:38:22 +0000263 if (!VG_(is_valid_tid)(vg_tid_currently_in_baseBlock))
264 return VG_INVALID_THREADID;
sewardj1e8cdc92002-04-18 11:37:52 +0000265 return vg_tid_currently_in_baseBlock;
266}
267
sewardjb52a1b02002-10-23 21:38:22 +0000268ThreadId VG_(get_current_or_recent_tid) ( void )
njn25e49d8e72002-09-23 09:36:25 +0000269{
sewardjb52a1b02002-10-23 21:38:22 +0000270 vg_assert(vg_tid_currently_in_baseBlock == vg_tid_last_in_baseBlock ||
271 vg_tid_currently_in_baseBlock == VG_INVALID_THREADID);
272 vg_assert(VG_(is_valid_tid)(vg_tid_last_in_baseBlock));
273
274 return vg_tid_last_in_baseBlock;
njn25e49d8e72002-09-23 09:36:25 +0000275}
276
sewardje663cb92002-04-12 10:26:32 +0000277/* Copy the saved state of a thread into VG_(baseBlock), ready for it
278 to be run. */
nethercotef971ab72004-08-02 16:27:40 +0000279static void load_thread_state ( ThreadId tid )
sewardje663cb92002-04-12 10:26:32 +0000280{
sewardj1e8cdc92002-04-18 11:37:52 +0000281 vg_assert(vg_tid_currently_in_baseBlock == VG_INVALID_THREADID);
282
nethercotec06e2132004-09-03 13:45:29 +0000283 VGA_(load_state)(&VG_(threads)[tid].arch, tid);
sewardj1e8cdc92002-04-18 11:37:52 +0000284
285 vg_tid_currently_in_baseBlock = tid;
sewardjb52a1b02002-10-23 21:38:22 +0000286 vg_tid_last_in_baseBlock = tid;
sewardje663cb92002-04-12 10:26:32 +0000287}
288
289
290/* Copy the state of a thread from VG_(baseBlock), presumably after it
291 has been descheduled. For sanity-check purposes, fill the vacated
292 VG_(baseBlock) with garbage so as to make the system more likely to
293 fail quickly if we erroneously continue to poke around inside
294 VG_(baseBlock) without first doing a load_thread_state().
295*/
nethercotef971ab72004-08-02 16:27:40 +0000296static void save_thread_state ( ThreadId tid )
sewardje663cb92002-04-12 10:26:32 +0000297{
sewardj1e8cdc92002-04-18 11:37:52 +0000298 vg_assert(vg_tid_currently_in_baseBlock != VG_INVALID_THREADID);
299
nethercotec06e2132004-09-03 13:45:29 +0000300 VGA_(save_state)(&VG_(threads)[tid].arch, tid);
sewardj1e8cdc92002-04-18 11:37:52 +0000301
302 vg_tid_currently_in_baseBlock = VG_INVALID_THREADID;
sewardje663cb92002-04-12 10:26:32 +0000303}
304
305
nethercote75d26242004-08-01 22:59:18 +0000306void VG_(resume_scheduler)(Int sigNo, vki_ksiginfo_t *info)
307{
308 if (scheduler_jmpbuf_valid) {
309 /* Can't continue; must longjmp back to the scheduler and thus
310 enter the sighandler immediately. */
nethercotef971ab72004-08-02 16:27:40 +0000311 VG_(memcpy)(&unresumable_siginfo, info, sizeof(vki_ksiginfo_t));
nethercote75d26242004-08-01 22:59:18 +0000312
313 longjmpd_on_signal = sigNo;
314 __builtin_longjmp(scheduler_jmpbuf,1);
315 }
316}
317
sewardje663cb92002-04-12 10:26:32 +0000318/* Run the thread tid for a while, and return a VG_TRC_* value to the
319 scheduler indicating what happened. */
sewardj6072c362002-04-19 14:40:57 +0000320static
sewardje663cb92002-04-12 10:26:32 +0000321UInt run_thread_for_a_while ( ThreadId tid )
322{
sewardj7ccc5c22002-04-24 21:39:11 +0000323 volatile UInt trc = 0;
sewardjb48e5002002-05-13 00:16:03 +0000324 vg_assert(VG_(is_valid_tid)(tid));
sewardj018f7622002-05-15 21:13:39 +0000325 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
nethercote75d26242004-08-01 22:59:18 +0000326 vg_assert(!scheduler_jmpbuf_valid);
sewardje663cb92002-04-12 10:26:32 +0000327
sewardj671ff542002-05-07 09:25:30 +0000328 VGP_PUSHCC(VgpRun);
nethercotef971ab72004-08-02 16:27:40 +0000329 load_thread_state ( tid );
jsgf855d93d2003-10-13 22:26:55 +0000330
331 /* there should be no undealt-with signals */
nethercotef971ab72004-08-02 16:27:40 +0000332 vg_assert(unresumable_siginfo.si_signo == 0);
jsgf855d93d2003-10-13 22:26:55 +0000333
nethercote75d26242004-08-01 22:59:18 +0000334 if (__builtin_setjmp(scheduler_jmpbuf) == 0) {
sewardje663cb92002-04-12 10:26:32 +0000335 /* try this ... */
nethercote75d26242004-08-01 22:59:18 +0000336 scheduler_jmpbuf_valid = True;
sewardje663cb92002-04-12 10:26:32 +0000337 trc = VG_(run_innerloop)();
nethercote75d26242004-08-01 22:59:18 +0000338 scheduler_jmpbuf_valid = False;
sewardje663cb92002-04-12 10:26:32 +0000339 /* We get here if the client didn't take a fault. */
340 } else {
341 /* We get here if the client took a fault, which caused our
342 signal handler to longjmp. */
nethercote75d26242004-08-01 22:59:18 +0000343 scheduler_jmpbuf_valid = False;
sewardje663cb92002-04-12 10:26:32 +0000344 vg_assert(trc == 0);
345 trc = VG_TRC_UNRESUMABLE_SIGNAL;
346 }
sewardj872051c2002-07-13 12:12:56 +0000347
nethercote75d26242004-08-01 22:59:18 +0000348 vg_assert(!scheduler_jmpbuf_valid);
sewardj872051c2002-07-13 12:12:56 +0000349
nethercotef971ab72004-08-02 16:27:40 +0000350 save_thread_state ( tid );
njn25e49d8e72002-09-23 09:36:25 +0000351 VGP_POPCC(VgpRun);
sewardje663cb92002-04-12 10:26:32 +0000352 return trc;
353}
354
355
sewardj20917d82002-05-28 01:36:45 +0000356static
357void mostly_clear_thread_record ( ThreadId tid )
358{
sewardj20917d82002-05-28 01:36:45 +0000359 vg_assert(tid >= 0 && tid < VG_N_THREADS);
nethercotef9b59412004-09-10 15:33:32 +0000360 VGA_(clear_thread)(&VG_(threads)[tid].arch);
sewardj20917d82002-05-28 01:36:45 +0000361 VG_(threads)[tid].tid = tid;
362 VG_(threads)[tid].status = VgTs_Empty;
363 VG_(threads)[tid].associated_mx = NULL;
364 VG_(threads)[tid].associated_cv = NULL;
365 VG_(threads)[tid].awaken_at = 0;
366 VG_(threads)[tid].joinee_retval = NULL;
367 VG_(threads)[tid].joiner_thread_return = NULL;
368 VG_(threads)[tid].joiner_jee_tid = VG_INVALID_THREADID;
sewardj8ad94e12002-05-29 00:10:20 +0000369 VG_(threads)[tid].detached = False;
sewardj20917d82002-05-28 01:36:45 +0000370 VG_(threads)[tid].cancel_st = True; /* PTHREAD_CANCEL_ENABLE */
371 VG_(threads)[tid].cancel_ty = True; /* PTHREAD_CANCEL_DEFERRED */
372 VG_(threads)[tid].cancel_pend = NULL; /* not pending */
sewardj8ad94e12002-05-29 00:10:20 +0000373 VG_(threads)[tid].custack_used = 0;
sewardj20917d82002-05-28 01:36:45 +0000374 VG_(ksigemptyset)(&VG_(threads)[tid].sig_mask);
jsgf855d93d2003-10-13 22:26:55 +0000375 VG_(ksigfillset)(&VG_(threads)[tid].eff_sig_mask);
thughes8abf3922004-10-16 10:59:49 +0000376 VG_(threads)[tid].sigqueue_head = 0;
377 VG_(threads)[tid].sigqueue_tail = 0;
sewardj00a66b12002-10-12 16:42:35 +0000378 VG_(threads)[tid].specifics_ptr = NULL;
jsgf855d93d2003-10-13 22:26:55 +0000379
380 VG_(threads)[tid].syscallno = -1;
thughesbaa46e52004-07-29 17:44:23 +0000381 VG_(threads)[tid].sys_flags = 0;
jsgf855d93d2003-10-13 22:26:55 +0000382 VG_(threads)[tid].sys_pre_res = NULL;
383
384 VG_(threads)[tid].proxy = NULL;
fitzhardinge28428592004-03-16 22:07:12 +0000385
386 /* start with no altstack */
387 VG_(threads)[tid].altstack.ss_sp = (void *)0xdeadbeef;
388 VG_(threads)[tid].altstack.ss_size = 0;
389 VG_(threads)[tid].altstack.ss_flags = VKI_SS_DISABLE;
sewardj20917d82002-05-28 01:36:45 +0000390}
391
392
jsgf855d93d2003-10-13 22:26:55 +0000393
sewardje663cb92002-04-12 10:26:32 +0000394/* Initialise the scheduler. Create a single "main" thread ready to
sewardj6072c362002-04-19 14:40:57 +0000395 run, with special ThreadId of one. This is called at startup; the
nethercote71980f02004-01-24 18:18:54 +0000396 caller takes care to park the client's state in VG_(baseBlock).
sewardje663cb92002-04-12 10:26:32 +0000397*/
398void VG_(scheduler_init) ( void )
399{
thughesc37184f2004-09-11 14:16:57 +0000400 Int i;
sewardje663cb92002-04-12 10:26:32 +0000401 ThreadId tid_main;
402
sewardj6072c362002-04-19 14:40:57 +0000403 for (i = 0 /* NB; not 1 */; i < VG_N_THREADS; i++) {
sewardj20917d82002-05-28 01:36:45 +0000404 mostly_clear_thread_record(i);
405 VG_(threads)[i].stack_size = 0;
406 VG_(threads)[i].stack_base = (Addr)NULL;
thughesdaa34562004-06-27 12:48:53 +0000407 VG_(threads)[i].stack_guard_size = 0;
sewardj20917d82002-05-28 01:36:45 +0000408 VG_(threads)[i].stack_highest_word = (Addr)NULL;
sewardje663cb92002-04-12 10:26:32 +0000409 }
410
sewardj5f07b662002-04-23 16:52:51 +0000411 for (i = 0; i < VG_N_THREAD_KEYS; i++) {
412 vg_thread_keys[i].inuse = False;
413 vg_thread_keys[i].destructor = NULL;
414 }
415
sewardj2cb00342002-06-28 01:46:26 +0000416 vg_fhstack_used = 0;
417
sewardje663cb92002-04-12 10:26:32 +0000418 /* Assert this is thread zero, which has certain magic
419 properties. */
420 tid_main = vg_alloc_ThreadState();
sewardj6072c362002-04-19 14:40:57 +0000421 vg_assert(tid_main == 1);
sewardj20917d82002-05-28 01:36:45 +0000422 VG_(threads)[tid_main].status = VgTs_Runnable;
sewardje663cb92002-04-12 10:26:32 +0000423
424 /* Copy VG_(baseBlock) state to tid_main's slot. */
sewardj1e8cdc92002-04-18 11:37:52 +0000425 vg_tid_currently_in_baseBlock = tid_main;
sewardjb52a1b02002-10-23 21:38:22 +0000426 vg_tid_last_in_baseBlock = tid_main;
nethercotef9b59412004-09-10 15:33:32 +0000427
428 VGA_(init_thread)(&VG_(threads)[tid_main].arch);
nethercotef971ab72004-08-02 16:27:40 +0000429 save_thread_state ( tid_main );
sewardj1e8cdc92002-04-18 11:37:52 +0000430
sewardj018f7622002-05-15 21:13:39 +0000431 VG_(threads)[tid_main].stack_highest_word
fitzhardinge98abfc72003-12-16 02:05:15 +0000432 = VG_(clstk_end) - 4;
433 VG_(threads)[tid_main].stack_base = VG_(clstk_base);
thughesc37184f2004-09-11 14:16:57 +0000434 VG_(threads)[tid_main].stack_size = VG_(client_rlimit_stack).rlim_cur;
sewardjbf290b92002-05-01 02:28:01 +0000435
sewardj1e8cdc92002-04-18 11:37:52 +0000436 /* So now ... */
437 vg_assert(vg_tid_currently_in_baseBlock == VG_INVALID_THREADID);
sewardj872051c2002-07-13 12:12:56 +0000438
439 /* Not running client code right now. */
nethercote75d26242004-08-01 22:59:18 +0000440 scheduler_jmpbuf_valid = False;
jsgf855d93d2003-10-13 22:26:55 +0000441
442 /* Proxy for main thread */
443 VG_(proxy_create)(tid_main);
sewardje663cb92002-04-12 10:26:32 +0000444}
445
446
sewardj3947e622002-05-23 16:52:11 +0000447
sewardj6072c362002-04-19 14:40:57 +0000448/* vthread tid is returning from a signal handler; modify its
449 stack/regs accordingly. */
450static
451void handle_signal_return ( ThreadId tid )
452{
sewardj6072c362002-04-19 14:40:57 +0000453 Bool restart_blocked_syscalls;
sewardj645030e2002-06-06 01:27:39 +0000454 struct vki_timespec * rem;
sewardj6072c362002-04-19 14:40:57 +0000455
sewardjb48e5002002-05-13 00:16:03 +0000456 vg_assert(VG_(is_valid_tid)(tid));
sewardj6072c362002-04-19 14:40:57 +0000457
458 restart_blocked_syscalls = VG_(signal_returns)(tid);
459
thughesa3afffc2004-08-25 18:58:04 +0000460 /* If we were interrupted in the middle of a rendezvous
461 then check the rendezvous hasn't completed while we
462 were busy handling the signal. */
463 if (VG_(threads)[tid].status == VgTs_WaitJoiner ||
464 VG_(threads)[tid].status == VgTs_WaitJoinee ) {
465 maybe_rendezvous_joiners_and_joinees();
466 }
467
thughesc41c6f42004-10-16 16:50:14 +0000468 /* If we were interrupted while waiting on a mutex then check that
469 it hasn't been unlocked while we were busy handling the signal. */
470 if (VG_(threads)[tid].status == VgTs_WaitMX &&
471 VG_(threads)[tid].associated_mx->__vg_m_count == 0) {
472 vg_pthread_mutex_t* mutex = VG_(threads)[tid].associated_mx;
473 mutex->__vg_m_count = 1;
474 mutex->__vg_m_owner = (/*_pthread_descr*/void*)tid;
475 VG_(threads)[tid].status = VgTs_Runnable;
476 VG_(threads)[tid].associated_mx = NULL;
477 /* m_edx already holds pth_mx_lock() success (0) */
478 }
479
sewardj6072c362002-04-19 14:40:57 +0000480 if (restart_blocked_syscalls)
481 /* Easy; we don't have to do anything. */
482 return;
483
sewardj645030e2002-06-06 01:27:39 +0000484 if (VG_(threads)[tid].status == VgTs_Sleeping
nethercotebb4222b2004-09-10 17:42:11 +0000485 && PLATFORM_SYSCALL_NUM(VG_(threads)[tid].arch) == __NR_nanosleep) {
sewardj6072c362002-04-19 14:40:57 +0000486 /* We interrupted a nanosleep(). The right thing to do is to
sewardj645030e2002-06-06 01:27:39 +0000487 write the unused time to nanosleep's second param, but that's
488 too much effort ... we just say that 1 nanosecond was not
489 used, and return EINTR. */
nethercotebb4222b2004-09-10 17:42:11 +0000490 rem = (struct vki_timespec*)PLATFORM_SYSCALL_ARG2(VG_(threads)[tid].arch);
sewardj645030e2002-06-06 01:27:39 +0000491 if (rem != NULL) {
492 rem->tv_sec = 0;
493 rem->tv_nsec = 1;
494 }
njnd3040452003-05-19 15:04:06 +0000495 SET_SYSCALL_RETVAL(tid, -VKI_EINTR);
sewardj645030e2002-06-06 01:27:39 +0000496 VG_(threads)[tid].status = VgTs_Runnable;
sewardj6072c362002-04-19 14:40:57 +0000497 return;
498 }
499
500 /* All other cases? Just return. */
501}
502
503
nethercotef971ab72004-08-02 16:27:40 +0000504struct timeout {
505 UInt time; /* time we should awaken */
506 ThreadId tid; /* thread which cares about this timeout */
507 struct timeout *next;
508};
509
510static struct timeout *timeouts;
511
512static void add_timeout(ThreadId tid, UInt time)
513{
514 struct timeout *t = VG_(arena_malloc)(VG_AR_CORE, sizeof(*t));
515 struct timeout **prev, *tp;
516
517 t->time = time;
518 t->tid = tid;
519
520 if (VG_(clo_trace_sched)) {
521 Char msg_buf[100];
522 VG_(sprintf)(msg_buf, "add_timeout: now=%u adding timeout at %u",
523 VG_(read_millisecond_timer)(), time);
524 print_sched_event(tid, msg_buf);
525 }
526
527 for(tp = timeouts, prev = &timeouts;
528 tp != NULL && tp->time < time;
529 prev = &tp->next, tp = tp->next)
530 ;
531 t->next = tp;
532 *prev = t;
533}
534
sewardje663cb92002-04-12 10:26:32 +0000535static
536void sched_do_syscall ( ThreadId tid )
537{
jsgf855d93d2003-10-13 22:26:55 +0000538 Int syscall_no;
njn25e49d8e72002-09-23 09:36:25 +0000539 Char msg_buf[100];
sewardje663cb92002-04-12 10:26:32 +0000540
sewardjb48e5002002-05-13 00:16:03 +0000541 vg_assert(VG_(is_valid_tid)(tid));
sewardj018f7622002-05-15 21:13:39 +0000542 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +0000543
nethercotebb4222b2004-09-10 17:42:11 +0000544 syscall_no = PLATFORM_SYSCALL_NUM(VG_(threads)[tid].arch);
sewardje663cb92002-04-12 10:26:32 +0000545
jsgf855d93d2003-10-13 22:26:55 +0000546 /* Special-case nanosleep because we can. But should we?
547
548 XXX not doing so for now, because it doesn't seem to work
549 properly, and we can use the syscall nanosleep just as easily.
550 */
551 if (0 && syscall_no == __NR_nanosleep) {
sewardj5f07b662002-04-23 16:52:51 +0000552 UInt t_now, t_awaken;
sewardje663cb92002-04-12 10:26:32 +0000553 struct vki_timespec* req;
nethercotebb4222b2004-09-10 17:42:11 +0000554 req = (struct vki_timespec*)PLATFORM_SYSCALL_ARG1(VG_(threads)[tid].arch);
jsgf855d93d2003-10-13 22:26:55 +0000555
556 if (req->tv_sec < 0 || req->tv_nsec < 0 || req->tv_nsec >= 1000000000) {
557 SET_SYSCALL_RETVAL(tid, -VKI_EINVAL);
558 return;
559 }
560
sewardj5f07b662002-04-23 16:52:51 +0000561 t_now = VG_(read_millisecond_timer)();
sewardje663cb92002-04-12 10:26:32 +0000562 t_awaken
563 = t_now
sewardj5f07b662002-04-23 16:52:51 +0000564 + (UInt)1000ULL * (UInt)(req->tv_sec)
565 + (UInt)(req->tv_nsec) / 1000000;
sewardj018f7622002-05-15 21:13:39 +0000566 VG_(threads)[tid].status = VgTs_Sleeping;
567 VG_(threads)[tid].awaken_at = t_awaken;
sewardj8937c812002-04-12 20:12:20 +0000568 if (VG_(clo_trace_sched)) {
sewardj5f07b662002-04-23 16:52:51 +0000569 VG_(sprintf)(msg_buf, "at %d: nanosleep for %d",
sewardje663cb92002-04-12 10:26:32 +0000570 t_now, t_awaken-t_now);
571 print_sched_event(tid, msg_buf);
572 }
nethercotef971ab72004-08-02 16:27:40 +0000573 add_timeout(tid, t_awaken);
sewardje663cb92002-04-12 10:26:32 +0000574 /* Force the scheduler to run something else for a while. */
575 return;
576 }
577
jsgf855d93d2003-10-13 22:26:55 +0000578 /* If pre_syscall returns true, then we're done immediately */
579 if (VG_(pre_syscall)(tid)) {
fitzhardinge31ba9052004-01-16 02:15:23 +0000580 VG_(post_syscall(tid, True));
sewardj3947e622002-05-23 16:52:11 +0000581 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +0000582 } else {
jsgf855d93d2003-10-13 22:26:55 +0000583 vg_assert(VG_(threads)[tid].status == VgTs_WaitSys);
sewardje663cb92002-04-12 10:26:32 +0000584 }
585}
586
587
sewardje663cb92002-04-12 10:26:32 +0000588
jsgf855d93d2003-10-13 22:26:55 +0000589/* Sleep for a while, but be willing to be woken. */
590static
591void idle ( void )
592{
593 struct vki_pollfd pollfd[1];
594 Int delta = -1;
595 Int fd = VG_(proxy_resfd)();
596
597 pollfd[0].fd = fd;
598 pollfd[0].events = VKI_POLLIN;
599
600 /* Look though the nearest timeouts, looking for the next future
601 one (there may be stale past timeouts). They'll all be mopped
602 below up when the poll() finishes. */
603 if (timeouts != NULL) {
604 struct timeout *tp;
605 Bool wicked = False;
606 UInt now = VG_(read_millisecond_timer)();
607
608 for(tp = timeouts; tp != NULL && tp->time < now; tp = tp->next) {
609 /* If a thread is still sleeping in the past, make it runnable */
610 ThreadState *tst = VG_(get_ThreadState)(tp->tid);
611 if (tst->status == VgTs_Sleeping)
612 tst->status = VgTs_Runnable;
613 wicked = True; /* no sleep for the wicked */
sewardje663cb92002-04-12 10:26:32 +0000614 }
sewardje663cb92002-04-12 10:26:32 +0000615
jsgf855d93d2003-10-13 22:26:55 +0000616 if (tp != NULL) {
617 delta = tp->time - now;
618 vg_assert(delta >= 0);
sewardje663cb92002-04-12 10:26:32 +0000619 }
jsgf855d93d2003-10-13 22:26:55 +0000620 if (wicked)
621 delta = 0;
sewardje663cb92002-04-12 10:26:32 +0000622 }
623
jsgf855d93d2003-10-13 22:26:55 +0000624 /* gotta wake up for something! */
625 vg_assert(fd != -1 || delta != -1);
sewardje462e202002-04-13 04:09:07 +0000626
jsgf855d93d2003-10-13 22:26:55 +0000627 /* If we need to do signal routing, then poll for pending signals
628 every VG_(clo_signal_polltime) mS */
629 if (VG_(do_signal_routing) && (delta > VG_(clo_signal_polltime) || delta == -1))
630 delta = VG_(clo_signal_polltime);
sewardje663cb92002-04-12 10:26:32 +0000631
jsgf855d93d2003-10-13 22:26:55 +0000632 if (VG_(clo_trace_sched)) {
633 Char msg_buf[100];
634 VG_(sprintf)(msg_buf, "idle: waiting for %dms and fd %d",
635 delta, fd);
636 print_sched_event(0, msg_buf);
sewardje663cb92002-04-12 10:26:32 +0000637 }
sewardje663cb92002-04-12 10:26:32 +0000638
jsgf855d93d2003-10-13 22:26:55 +0000639 VG_(poll)(pollfd, fd != -1 ? 1 : 0, delta);
sewardje663cb92002-04-12 10:26:32 +0000640
jsgf855d93d2003-10-13 22:26:55 +0000641 /* See if there's anything on the timeout list which needs
642 waking, and mop up anything in the past. */
643 {
644 UInt now = VG_(read_millisecond_timer)();
645 struct timeout *tp;
sewardje663cb92002-04-12 10:26:32 +0000646
jsgf855d93d2003-10-13 22:26:55 +0000647 tp = timeouts;
sewardje663cb92002-04-12 10:26:32 +0000648
jsgf855d93d2003-10-13 22:26:55 +0000649 while(tp && tp->time <= now) {
650 struct timeout *dead;
651 ThreadState *tst;
652
653 tst = VG_(get_ThreadState)(tp->tid);
654
655 if (VG_(clo_trace_sched)) {
656 Char msg_buf[100];
657 VG_(sprintf)(msg_buf, "idle: now=%u removing timeout at %u",
658 now, tp->time);
659 print_sched_event(tp->tid, msg_buf);
660 }
sewardje663cb92002-04-12 10:26:32 +0000661
jsgf855d93d2003-10-13 22:26:55 +0000662 /* If awaken_at != tp->time then it means the timeout is
663 stale and we should just ignore it. */
664 if(tst->awaken_at == tp->time) {
665 switch(tst->status) {
666 case VgTs_Sleeping:
667 tst->awaken_at = 0xFFFFFFFF;
668 tst->status = VgTs_Runnable;
669 break;
sewardje663cb92002-04-12 10:26:32 +0000670
thughese321d492004-10-17 15:00:20 +0000671 case VgTs_WaitMX:
672 do_pthread_mutex_timedlock_TIMEOUT(tst->tid);
673 break;
674
jsgf855d93d2003-10-13 22:26:55 +0000675 case VgTs_WaitCV:
676 do_pthread_cond_timedwait_TIMEOUT(tst->tid);
677 break;
sewardje663cb92002-04-12 10:26:32 +0000678
jsgf855d93d2003-10-13 22:26:55 +0000679 default:
680 /* This is a bit odd but OK; if a thread had a timeout
681 but woke for some other reason (signal, condvar
682 wakeup), then it will still be on the list. */
683 if (0)
684 VG_(printf)("idle(): unexpected status tp->tid=%d tst->status = %d\n",
685 tp->tid, tst->status);
686 break;
687 }
688 }
sewardjbc7d8782002-06-30 12:44:54 +0000689
jsgf855d93d2003-10-13 22:26:55 +0000690 dead = tp;
691 tp = tp->next;
njn25e49d8e72002-09-23 09:36:25 +0000692
jsgf855d93d2003-10-13 22:26:55 +0000693 VG_(arena_free)(VG_AR_CORE, dead);
sewardjbc7d8782002-06-30 12:44:54 +0000694 }
695
jsgf855d93d2003-10-13 22:26:55 +0000696 timeouts = tp;
sewardje663cb92002-04-12 10:26:32 +0000697 }
698}
699
700
sewardje663cb92002-04-12 10:26:32 +0000701/* ---------------------------------------------------------------------
702 The scheduler proper.
703 ------------------------------------------------------------------ */
704
nethercote238a3c32004-08-09 13:13:31 +0000705// For handling of the default action of a fatal signal.
706// jmp_buf for fatal signals; VG_(fatal_signal_jmpbuf_ptr) is NULL until
707// the time is right that it can be used.
708static jmp_buf fatal_signal_jmpbuf;
709static jmp_buf* fatal_signal_jmpbuf_ptr;
710static Int fatal_sigNo; // the fatal signal, if it happens
711
sewardje663cb92002-04-12 10:26:32 +0000712/* Run user-space threads until either
713 * Deadlock occurs
714 * One thread asks to shutdown Valgrind
715 * The specified number of basic blocks has gone by.
716*/
nethercote238a3c32004-08-09 13:13:31 +0000717VgSchedReturnCode do_scheduler ( Int* exitcode, ThreadId* last_run_tid )
sewardje663cb92002-04-12 10:26:32 +0000718{
719 ThreadId tid, tid_next;
720 UInt trc;
721 UInt dispatch_ctr_SAVED;
sewardj124ca2a2002-06-20 10:19:38 +0000722 Int done_this_time, n_in_bounded_wait;
jsgf855d93d2003-10-13 22:26:55 +0000723 Int n_exists, n_waiting_for_reaper;
sewardje663cb92002-04-12 10:26:32 +0000724 Addr trans_addr;
725
sewardje663cb92002-04-12 10:26:32 +0000726 /* Start with the root thread. tid in general indicates the
727 currently runnable/just-finished-running thread. */
nethercote759dda32004-08-07 18:16:56 +0000728 *last_run_tid = tid = 1;
sewardje663cb92002-04-12 10:26:32 +0000729
730 /* This is the top level scheduler loop. It falls into three
731 phases. */
732 while (True) {
733
sewardj6072c362002-04-19 14:40:57 +0000734 /* ======================= Phase 0 of 3 =======================
735 Be paranoid. Always a good idea. */
sewardjd7fd4d22002-04-24 01:57:27 +0000736 stage1:
sewardj6072c362002-04-19 14:40:57 +0000737 scheduler_sanity();
nethercote885dd912004-08-03 23:14:00 +0000738 VG_(sanity_check_general)( False );
sewardj6072c362002-04-19 14:40:57 +0000739
sewardje663cb92002-04-12 10:26:32 +0000740 /* ======================= Phase 1 of 3 =======================
741 Handle I/O completions and signals. This may change the
742 status of various threads. Then select a new thread to run,
743 or declare deadlock, or sleep if there are no runnable
744 threads but some are blocked on I/O. */
745
sewardje663cb92002-04-12 10:26:32 +0000746 /* Do the following loop until a runnable thread is found, or
747 deadlock is detected. */
748 while (True) {
749
750 /* For stats purposes only. */
nethercote844e7122004-08-02 15:27:22 +0000751 n_scheduling_events_MAJOR++;
sewardje663cb92002-04-12 10:26:32 +0000752
jsgf855d93d2003-10-13 22:26:55 +0000753 /* Route signals to their proper places */
754 VG_(route_signals)();
sewardje663cb92002-04-12 10:26:32 +0000755
jsgf855d93d2003-10-13 22:26:55 +0000756 /* See if any of the proxy LWPs report any activity: either a
757 syscall completing or a signal arriving. */
758 VG_(proxy_results)();
sewardje663cb92002-04-12 10:26:32 +0000759
760 /* Try and find a thread (tid) to run. */
761 tid_next = tid;
jsgf855d93d2003-10-13 22:26:55 +0000762 if (prefer_sched != VG_INVALID_THREADID) {
763 tid_next = prefer_sched-1;
764 prefer_sched = VG_INVALID_THREADID;
765 }
sewardj51c0aaf2002-04-25 01:32:10 +0000766 n_in_bounded_wait = 0;
jsgf855d93d2003-10-13 22:26:55 +0000767 n_exists = 0;
768 n_waiting_for_reaper = 0;
sewardje663cb92002-04-12 10:26:32 +0000769 while (True) {
770 tid_next++;
sewardj6072c362002-04-19 14:40:57 +0000771 if (tid_next >= VG_N_THREADS) tid_next = 1;
jsgf855d93d2003-10-13 22:26:55 +0000772 if (VG_(threads)[tid_next].status == VgTs_Sleeping
773 || VG_(threads)[tid_next].status == VgTs_WaitSys
thughese321d492004-10-17 15:00:20 +0000774 || (VG_(threads)[tid_next].status == VgTs_WaitMX
775 && VG_(threads)[tid_next].awaken_at != 0xFFFFFFFF)
776 || (VG_(threads)[tid_next].status == VgTs_WaitCV
sewardj018f7622002-05-15 21:13:39 +0000777 && VG_(threads)[tid_next].awaken_at != 0xFFFFFFFF))
sewardj51c0aaf2002-04-25 01:32:10 +0000778 n_in_bounded_wait ++;
jsgf855d93d2003-10-13 22:26:55 +0000779 if (VG_(threads)[tid_next].status != VgTs_Empty)
780 n_exists++;
781 if (VG_(threads)[tid_next].status == VgTs_WaitJoiner)
782 n_waiting_for_reaper++;
sewardj018f7622002-05-15 21:13:39 +0000783 if (VG_(threads)[tid_next].status == VgTs_Runnable)
sewardje663cb92002-04-12 10:26:32 +0000784 break; /* We can run this one. */
785 if (tid_next == tid)
786 break; /* been all the way round */
787 }
788 tid = tid_next;
789
sewardj018f7622002-05-15 21:13:39 +0000790 if (VG_(threads)[tid].status == VgTs_Runnable) {
sewardje663cb92002-04-12 10:26:32 +0000791 /* Found a suitable candidate. Fall out of this loop, so
792 we can advance to stage 2 of the scheduler: actually
793 running the thread. */
794 break;
795 }
796
jsgf855d93d2003-10-13 22:26:55 +0000797 /* All threads have exited - pretend someone called exit() */
798 if (n_waiting_for_reaper == n_exists) {
nethercote47dd12c2004-06-22 14:18:42 +0000799 *exitcode = 0; /* ? */
jsgf855d93d2003-10-13 22:26:55 +0000800 return VgSrc_ExitSyscall;
801 }
802
sewardje663cb92002-04-12 10:26:32 +0000803 /* We didn't find a runnable thread. Now what? */
sewardj51c0aaf2002-04-25 01:32:10 +0000804 if (n_in_bounded_wait == 0) {
sewardj54cacf02002-04-12 23:24:59 +0000805 /* No runnable threads and no prospect of any appearing
806 even if we wait for an arbitrary length of time. In
807 short, we have a deadlock. */
sewardj15a43e12002-04-17 19:35:12 +0000808 VG_(pp_sched_status)();
sewardje663cb92002-04-12 10:26:32 +0000809 return VgSrc_Deadlock;
810 }
811
jsgf855d93d2003-10-13 22:26:55 +0000812 /* Nothing needs doing, so sit in idle until either a timeout
813 happens or a thread's syscall completes. */
814 idle();
sewardj7e87e382002-05-03 19:09:05 +0000815 /* pp_sched_status(); */
sewardjb48e5002002-05-13 00:16:03 +0000816 /* VG_(printf)("."); */
sewardje663cb92002-04-12 10:26:32 +0000817 }
818
819
820 /* ======================= Phase 2 of 3 =======================
821 Wahey! We've finally decided that thread tid is runnable, so
822 we now do that. Run it for as much of a quanta as possible.
823 Trivial requests are handled and the thread continues. The
824 aim is not to do too many of Phase 1 since it is expensive. */
825
826 if (0)
sewardj3b5d8862002-04-20 13:53:23 +0000827 VG_(printf)("SCHED: tid %d\n", tid);
sewardje663cb92002-04-12 10:26:32 +0000828
njn25e49d8e72002-09-23 09:36:25 +0000829 VG_TRACK( thread_run, tid );
830
sewardje663cb92002-04-12 10:26:32 +0000831 /* Figure out how many bbs to ask vg_run_innerloop to do. Note
832 that it decrements the counter before testing it for zero, so
833 that if VG_(dispatch_ctr) is set to N you get at most N-1
834 iterations. Also this means that VG_(dispatch_ctr) must
835 exceed zero before entering the innerloop. Also also, the
836 decrement is done before the bb is actually run, so you
837 always get at least one decrement even if nothing happens.
838 */
nethercote1d447092004-02-01 17:29:59 +0000839 VG_(dispatch_ctr) = VG_SCHEDULING_QUANTUM + 1;
sewardje663cb92002-04-12 10:26:32 +0000840
841 /* ... and remember what we asked for. */
842 dispatch_ctr_SAVED = VG_(dispatch_ctr);
843
sewardj1e8cdc92002-04-18 11:37:52 +0000844 /* paranoia ... */
sewardj018f7622002-05-15 21:13:39 +0000845 vg_assert(VG_(threads)[tid].tid == tid);
sewardj1e8cdc92002-04-18 11:37:52 +0000846
sewardje663cb92002-04-12 10:26:32 +0000847 /* Actually run thread tid. */
848 while (True) {
849
nethercote759dda32004-08-07 18:16:56 +0000850 *last_run_tid = tid;
sewardj7e87e382002-05-03 19:09:05 +0000851
sewardje663cb92002-04-12 10:26:32 +0000852 /* For stats purposes only. */
nethercote844e7122004-08-02 15:27:22 +0000853 n_scheduling_events_MINOR++;
sewardje663cb92002-04-12 10:26:32 +0000854
855 if (0)
856 VG_(message)(Vg_DebugMsg, "thread %d: running for %d bbs",
857 tid, VG_(dispatch_ctr) - 1 );
sewardjb3eef6b2002-05-01 00:05:27 +0000858# if 0
859 if (VG_(bbs_done) > 31700000 + 0) {
860 dispatch_ctr_SAVED = VG_(dispatch_ctr) = 2;
nethercoteb8ef9d82004-09-05 22:02:33 +0000861 VG_(translate)(&VG_(threads)[tid],
862 ARCH_INSTR_PTR(VG_(threads)[tid].arch),
nethercote59a122d2004-08-03 17:16:51 +0000863 /*debugging*/True);
sewardjb3eef6b2002-05-01 00:05:27 +0000864 }
nethercoteb8ef9d82004-09-05 22:02:33 +0000865 vg_assert(ARCH_INSTR_PTR(VG_(threads)[tid].arch) != 0);
sewardjb3eef6b2002-05-01 00:05:27 +0000866# endif
sewardje663cb92002-04-12 10:26:32 +0000867
868 trc = run_thread_for_a_while ( tid );
869
sewardjb3eef6b2002-05-01 00:05:27 +0000870# if 0
nethercoteb8ef9d82004-09-05 22:02:33 +0000871 if (0 == ARCH_INSTR_PTR(VG_(threads)[tid].arch)) {
sewardjb3eef6b2002-05-01 00:05:27 +0000872 VG_(printf)("tid = %d, dc = %llu\n", tid, VG_(bbs_done));
nethercoteb8ef9d82004-09-05 22:02:33 +0000873 vg_assert(0 != ARCH_INSTR_PTR(VG_(threads)[tid].arch));
sewardjb3eef6b2002-05-01 00:05:27 +0000874 }
875# endif
876
sewardje663cb92002-04-12 10:26:32 +0000877 /* Deal quickly with trivial scheduling events, and resume the
878 thread. */
879
880 if (trc == VG_TRC_INNER_FASTMISS) {
nethercote4d714382004-10-13 09:47:24 +0000881 Addr ip = ARCH_INSTR_PTR(VG_(threads)[tid].arch);
882
sewardje663cb92002-04-12 10:26:32 +0000883 vg_assert(VG_(dispatch_ctr) > 0);
884
885 /* Trivial event. Miss in the fast-cache. Do a full
886 lookup for it. */
nethercote4d714382004-10-13 09:47:24 +0000887 trans_addr = VG_(search_transtab)( ip );
sewardje663cb92002-04-12 10:26:32 +0000888 if (trans_addr == (Addr)0) {
889 /* Not found; we need to request a translation. */
nethercote4d714382004-10-13 09:47:24 +0000890 if (VG_(translate)( tid, ip, /*debug*/False )) {
891 trans_addr = VG_(search_transtab)( ip );
892 if (trans_addr == (Addr)0)
893 VG_(core_panic)("VG_TRC_INNER_FASTMISS: missing tt_fast entry");
894 } else {
895 // If VG_(translate)() fails, it's because it had to throw
896 // a signal because the client jumped to a bad address.
897 // This means VG_(deliver_signal)() will have been called
898 // by now, and the program counter will now be pointing to
899 // the start of the signal handler (if there is no
900 // handler, things would have been aborted by now), so do
901 // nothing, and things will work out next time around the
902 // scheduler loop.
903 }
sewardje663cb92002-04-12 10:26:32 +0000904 }
905 continue; /* with this thread */
906 }
907
908 if (trc == VG_TRC_EBP_JMP_CLIENTREQ) {
nethercote3e901a22004-09-11 13:17:02 +0000909 UInt* args = (UInt*)(ARCH_CLREQ_ARGS(VG_(threads)[tid].arch));
910 UInt reqno = args[0];
sewardj18a62ff2002-07-12 22:30:51 +0000911 /* VG_(printf)("request 0x%x\n", reqno); */
sewardj1fe7b002002-07-16 01:43:15 +0000912
913 /* Are we really absolutely totally quitting? */
914 if (reqno == VG_USERREQ__LIBC_FREERES_DONE) {
915 if (0 || VG_(clo_trace_syscalls) || VG_(clo_trace_sched)) {
916 VG_(message)(Vg_DebugMsg,
917 "__libc_freeres() done; really quitting!");
918 }
919 return VgSrc_ExitSyscall;
920 }
921
nethercote3e901a22004-09-11 13:17:02 +0000922 do_client_request(tid,args);
sewardj124ca2a2002-06-20 10:19:38 +0000923 /* Following the request, we try and continue with the
924 same thread if still runnable. If not, go back to
925 Stage 1 to select a new thread to run. */
sewardj18a62ff2002-07-12 22:30:51 +0000926 if (VG_(threads)[tid].status == VgTs_Runnable
927 && reqno != VG_USERREQ__PTHREAD_YIELD)
sewardj124ca2a2002-06-20 10:19:38 +0000928 continue; /* with this thread */
929 else
930 goto stage1;
sewardje663cb92002-04-12 10:26:32 +0000931 }
932
sewardj51c0aaf2002-04-25 01:32:10 +0000933 if (trc == VG_TRC_EBP_JMP_SYSCALL) {
934 /* Do a syscall for the vthread tid. This could cause it
sewardj7e87e382002-05-03 19:09:05 +0000935 to become non-runnable. One special case: spot the
936 client doing calls to exit() and take this as the cue
937 to exit. */
sewardjb3eef6b2002-05-01 00:05:27 +0000938# if 0
939 { UInt* esp; Int i;
nethercoteb8ef9d82004-09-05 22:02:33 +0000940 esp=(UInt*)ARCH_STACK_PTR(VG_(threads)[tid].arch);
sewardjb3eef6b2002-05-01 00:05:27 +0000941 VG_(printf)("\nBEFORE\n");
942 for (i = 10; i >= -10; i--)
943 VG_(printf)("%2d %p = 0x%x\n", i, &esp[i], esp[i]);
944 }
945# endif
946
sewardj1fe7b002002-07-16 01:43:15 +0000947 /* Deal with calling __libc_freeres() at exit. When the
948 client does __NR_exit, it's exiting for good. So we
nethercotef971ab72004-08-02 16:27:40 +0000949 then run __libc_freeres_wrapper. That quits by
sewardj1fe7b002002-07-16 01:43:15 +0000950 doing VG_USERREQ__LIBC_FREERES_DONE, and at that point
951 we really exit. To be safe we nuke all other threads
sewardjade9d0d2002-07-26 10:52:48 +0000952 currently running.
953
954 If not valgrinding (cachegrinding, etc) don't do this.
955 __libc_freeres does some invalid frees which crash
956 the unprotected malloc/free system. */
njn25e49d8e72002-09-23 09:36:25 +0000957
nethercotebb4222b2004-09-10 17:42:11 +0000958 if (PLATFORM_SYSCALL_NUM(VG_(threads)[tid].arch) == __NR_exit
959 || PLATFORM_SYSCALL_NUM(VG_(threads)[tid].arch) == __NR_exit_group
sewardjf3fb92d2003-02-23 03:26:08 +0000960 ) {
sewardj858964b2002-10-05 14:15:43 +0000961
962 /* If __NR_exit, remember the supplied argument. */
nethercotebb4222b2004-09-10 17:42:11 +0000963 *exitcode = PLATFORM_SYSCALL_ARG1(VG_(threads)[tid].arch);
njn25e49d8e72002-09-23 09:36:25 +0000964
nethercote7cc9c232004-01-21 15:08:04 +0000965 /* Only run __libc_freeres if the tool says it's ok and
sewardj858964b2002-10-05 14:15:43 +0000966 it hasn't been overridden with --run-libc-freeres=no
967 on the command line. */
968
fitzhardinge98abfc72003-12-16 02:05:15 +0000969 if (VG_(needs).libc_freeres &&
970 VG_(clo_run_libc_freeres) &&
nethercotef971ab72004-08-02 16:27:40 +0000971 __libc_freeres_wrapper != 0) {
sewardj00631892002-10-05 15:34:38 +0000972 if (VG_(clo_verbosity) > 2
sewardj858964b2002-10-05 14:15:43 +0000973 || VG_(clo_trace_syscalls) || VG_(clo_trace_sched)) {
974 VG_(message)(Vg_DebugMsg,
975 "Caught __NR_exit; running __libc_freeres()");
976 }
977 VG_(nuke_all_threads_except) ( tid );
nethercoteb8ef9d82004-09-05 22:02:33 +0000978 ARCH_INSTR_PTR(VG_(threads)[tid].arch) =
979 (UInt)__libc_freeres_wrapper;
sewardj858964b2002-10-05 14:15:43 +0000980 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
981 goto stage1; /* party on, dudes (but not for much longer :) */
982
983 } else {
984 /* We won't run __libc_freeres; just exit now. */
sewardj00631892002-10-05 15:34:38 +0000985 if (VG_(clo_verbosity) > 2
sewardj858964b2002-10-05 14:15:43 +0000986 || VG_(clo_trace_syscalls) || VG_(clo_trace_sched)) {
987 VG_(message)(Vg_DebugMsg,
988 "Caught __NR_exit; quitting");
989 }
990 return VgSrc_ExitSyscall;
991 }
992
sewardjade9d0d2002-07-26 10:52:48 +0000993 }
994
sewardj858964b2002-10-05 14:15:43 +0000995 /* We've dealt with __NR_exit at this point. */
nethercotebb4222b2004-09-10 17:42:11 +0000996 vg_assert(PLATFORM_SYSCALL_NUM(VG_(threads)[tid].arch) != __NR_exit &&
997 PLATFORM_SYSCALL_NUM(VG_(threads)[tid].arch) != __NR_exit_group);
sewardj7e87e382002-05-03 19:09:05 +0000998
sewardj83798bf2002-05-24 00:11:16 +0000999 /* Trap syscalls to __NR_sched_yield and just have this
1000 thread yield instead. Not essential, just an
1001 optimisation. */
nethercotebb4222b2004-09-10 17:42:11 +00001002 if (PLATFORM_SYSCALL_NUM(VG_(threads)[tid].arch) == __NR_sched_yield) {
njnd3040452003-05-19 15:04:06 +00001003 SET_SYSCALL_RETVAL(tid, 0); /* syscall returns with success */
sewardj83798bf2002-05-24 00:11:16 +00001004 goto stage1; /* find a new thread to run */
1005 }
1006
sewardj51c0aaf2002-04-25 01:32:10 +00001007 sched_do_syscall(tid);
sewardjb3eef6b2002-05-01 00:05:27 +00001008
1009# if 0
1010 { UInt* esp; Int i;
nethercoteb8ef9d82004-09-05 22:02:33 +00001011 esp=(UInt*)ARCH_STACK_PTR(VG_(threads)[tid].arch);
sewardjb3eef6b2002-05-01 00:05:27 +00001012 VG_(printf)("AFTER\n");
1013 for (i = 10; i >= -10; i--)
1014 VG_(printf)("%2d %p = 0x%x\n", i, &esp[i], esp[i]);
1015 }
1016# endif
1017
sewardj77f0fc12002-07-12 01:23:03 +00001018 if (VG_(threads)[tid].status == VgTs_Runnable) {
sewardj51c0aaf2002-04-25 01:32:10 +00001019 continue; /* with this thread */
sewardj77f0fc12002-07-12 01:23:03 +00001020 } else {
1021 goto stage1;
1022 }
sewardj51c0aaf2002-04-25 01:32:10 +00001023 }
1024
sewardjd7fd4d22002-04-24 01:57:27 +00001025 /* It's an event we can't quickly deal with. Give up running
1026 this thread and handle things the expensive way. */
sewardje663cb92002-04-12 10:26:32 +00001027 break;
1028 }
1029
1030 /* ======================= Phase 3 of 3 =======================
1031 Handle non-trivial thread requests, mostly pthread stuff. */
1032
1033 /* Ok, we've fallen out of the dispatcher for a
1034 non-completely-trivial reason. First, update basic-block
1035 counters. */
1036
nethercote0d3db0a2004-09-13 12:16:06 +00001037 done_this_time = (Int)dispatch_ctr_SAVED - (Int)VG_(dispatch_ctr);
1038 vg_assert(done_this_time > 0);
sewardje663cb92002-04-12 10:26:32 +00001039 VG_(bbs_done) += (ULong)done_this_time;
1040
1041 if (0 && trc != VG_TRC_INNER_FASTMISS)
1042 VG_(message)(Vg_DebugMsg, "thread %d: completed %d bbs, trc %d",
1043 tid, done_this_time, (Int)trc );
1044
1045 if (0 && trc != VG_TRC_INNER_FASTMISS)
njne0205ff2003-04-08 00:56:14 +00001046 VG_(message)(Vg_DebugMsg, "thread %d: %llu bbs, event %s",
sewardje663cb92002-04-12 10:26:32 +00001047 tid, VG_(bbs_done),
1048 name_of_sched_event(trc) );
sewardj9d1b5d32002-04-17 19:40:49 +00001049
sewardje663cb92002-04-12 10:26:32 +00001050 /* Examine the thread's return code to figure out why it
sewardj124ca2a2002-06-20 10:19:38 +00001051 stopped. */
sewardje663cb92002-04-12 10:26:32 +00001052
1053 switch (trc) {
1054
fitzhardingea02f8812003-12-18 09:06:09 +00001055 case VG_TRC_EBP_JMP_YIELD:
1056 /* Explicit yield. Let a new thread be scheduled,
1057 simply by doing nothing, causing us to arrive back at
1058 Phase 1. */
fitzhardingea02f8812003-12-18 09:06:09 +00001059 break;
1060
sewardje663cb92002-04-12 10:26:32 +00001061 case VG_TRC_INNER_COUNTERZERO:
1062 /* Timeslice is out. Let a new thread be scheduled,
1063 simply by doing nothing, causing us to arrive back at
1064 Phase 1. */
sewardje663cb92002-04-12 10:26:32 +00001065 vg_assert(VG_(dispatch_ctr) == 0);
1066 break;
1067
1068 case VG_TRC_UNRESUMABLE_SIGNAL:
jsgf855d93d2003-10-13 22:26:55 +00001069 /* It got a SIGSEGV/SIGBUS/SIGILL/SIGFPE, which we need to
1070 deliver right away. */
nethercotef971ab72004-08-02 16:27:40 +00001071 vg_assert(unresumable_siginfo.si_signo == VKI_SIGSEGV ||
1072 unresumable_siginfo.si_signo == VKI_SIGBUS ||
1073 unresumable_siginfo.si_signo == VKI_SIGILL ||
1074 unresumable_siginfo.si_signo == VKI_SIGFPE);
1075 vg_assert(longjmpd_on_signal == unresumable_siginfo.si_signo);
jsgf855d93d2003-10-13 22:26:55 +00001076
1077 /* make sure we've unblocked the signals which the handler blocked */
nethercote75d26242004-08-01 22:59:18 +00001078 VG_(unblock_host_signal)(longjmpd_on_signal);
jsgf855d93d2003-10-13 22:26:55 +00001079
nethercotef971ab72004-08-02 16:27:40 +00001080 VG_(deliver_signal)(tid, &unresumable_siginfo, False);
1081 unresumable_siginfo.si_signo = 0; /* done */
sewardje663cb92002-04-12 10:26:32 +00001082 break;
1083
sewardje663cb92002-04-12 10:26:32 +00001084 default:
1085 VG_(printf)("\ntrc = %d\n", trc);
njne427a662002-10-02 11:08:25 +00001086 VG_(core_panic)("VG_(scheduler), phase 3: "
1087 "unexpected thread return code");
sewardje663cb92002-04-12 10:26:32 +00001088 /* NOTREACHED */
1089 break;
1090
1091 } /* switch (trc) */
1092
1093 /* That completes Phase 3 of 3. Return now to the top of the
1094 main scheduler loop, to Phase 1 of 3. */
1095
1096 } /* top-level scheduler loop */
1097
1098
1099 /* NOTREACHED */
njne427a662002-10-02 11:08:25 +00001100 VG_(core_panic)("scheduler: post-main-loop ?!");
sewardje663cb92002-04-12 10:26:32 +00001101 /* NOTREACHED */
sewardje663cb92002-04-12 10:26:32 +00001102}
1103
nethercote238a3c32004-08-09 13:13:31 +00001104VgSchedReturnCode VG_(scheduler) ( Int* exitcode, ThreadId* last_run_tid,
1105 Int* fatal_sigNo_ptr )
1106{
1107 VgSchedReturnCode src;
1108
1109 fatal_signal_jmpbuf_ptr = &fatal_signal_jmpbuf;
1110 if (__builtin_setjmp( fatal_signal_jmpbuf_ptr ) == 0) {
1111 src = do_scheduler( exitcode, last_run_tid );
1112 } else {
1113 src = VgSrc_FatalSig;
1114 *fatal_sigNo_ptr = fatal_sigNo;
1115 }
1116 return src;
1117}
1118
jsgf855d93d2003-10-13 22:26:55 +00001119void VG_(need_resched) ( ThreadId prefer )
1120{
1121 /* Tell the scheduler now might be a good time to find a new
1122 runnable thread, because something happened which woke a thread
1123 up.
1124
1125 NB: This can be called unsynchronized from either a signal
1126 handler, or from another LWP (ie, real kernel thread).
1127
1128 In principle this could simply be a matter of setting
1129 VG_(dispatch_ctr) to a small value (say, 2), which would make
1130 any running code come back to the scheduler fairly quickly.
1131
1132 However, since the scheduler implements a strict round-robin
1133 policy with only one priority level, there are, by definition,
1134 no better threads to be running than the current thread anyway,
1135 so we may as well ignore this hint. For processes with a
1136 mixture of compute and I/O bound threads, this means the compute
1137 threads could introduce longish latencies before the I/O threads
1138 run. For programs with only I/O bound threads, need_resched
1139 won't have any effect anyway.
1140
1141 OK, so I've added command-line switches to enable low-latency
1142 syscalls and signals. The prefer_sched variable is in effect
1143 the ID of a single thread which has higher priority than all the
1144 others. If set, the scheduler will prefer to schedule that
1145 thread over all others. Naturally, this could lead to
1146 starvation or other unfairness.
1147 */
1148
1149 if (VG_(dispatch_ctr) > 10)
1150 VG_(dispatch_ctr) = 2;
1151 prefer_sched = prefer;
1152}
1153
nethercote238a3c32004-08-09 13:13:31 +00001154void VG_(scheduler_handle_fatal_signal) ( Int sigNo )
1155{
1156 if (NULL != fatal_signal_jmpbuf_ptr) {
1157 fatal_sigNo = sigNo;
1158 __builtin_longjmp(*fatal_signal_jmpbuf_ptr, 1);
1159 }
1160}
sewardje663cb92002-04-12 10:26:32 +00001161
1162/* ---------------------------------------------------------------------
1163 The pthread implementation.
1164 ------------------------------------------------------------------ */
1165
1166#include <pthread.h>
1167#include <errno.h>
1168
sewardje663cb92002-04-12 10:26:32 +00001169/* /usr/include/bits/pthreadtypes.h:
1170 typedef unsigned long int pthread_t;
1171*/
1172
sewardje663cb92002-04-12 10:26:32 +00001173
sewardj604ec3c2002-04-18 22:38:41 +00001174/* -----------------------------------------------------------
sewardj20917d82002-05-28 01:36:45 +00001175 Thread CREATION, JOINAGE and CANCELLATION: HELPER FNS
sewardj604ec3c2002-04-18 22:38:41 +00001176 -------------------------------------------------------- */
1177
sewardj20917d82002-05-28 01:36:45 +00001178/* We've decided to action a cancellation on tid. Make it jump to
1179 thread_exit_wrapper() in vg_libpthread.c, passing PTHREAD_CANCELED
1180 as the arg. */
1181static
1182void make_thread_jump_to_cancelhdlr ( ThreadId tid )
1183{
1184 Char msg_buf[100];
1185 vg_assert(VG_(is_valid_tid)(tid));
sewardjdadc8d02002-12-08 23:24:18 +00001186
sewardj20917d82002-05-28 01:36:45 +00001187 /* Push PTHREAD_CANCELED on the stack and jump to the cancellation
1188 handler -- which is really thread_exit_wrapper() in
1189 vg_libpthread.c. */
1190 vg_assert(VG_(threads)[tid].cancel_pend != NULL);
sewardj4bdd9962002-12-26 11:51:50 +00001191
nethercote6b9c8472004-09-13 13:16:40 +00001192 /* Set an argument and bogus return address. The return address will not
1193 be used, but we still need to have it so that the arg is at the
1194 correct stack offset. */
1195 VGA_(set_arg_and_bogus_ret)(tid, (UInt)PTHREAD_CANCELED, 0xBEADDEEF);
sewardj4bdd9962002-12-26 11:51:50 +00001196
1197 /* .cancel_pend will hold &thread_exit_wrapper */
nethercoteb8ef9d82004-09-05 22:02:33 +00001198 ARCH_INSTR_PTR(VG_(threads)[tid].arch) = (UInt)VG_(threads)[tid].cancel_pend;
sewardjdadc8d02002-12-08 23:24:18 +00001199
jsgf855d93d2003-10-13 22:26:55 +00001200 VG_(proxy_abort_syscall)(tid);
sewardjdadc8d02002-12-08 23:24:18 +00001201
sewardj20917d82002-05-28 01:36:45 +00001202 /* Make sure we aren't cancelled again whilst handling this
1203 cancellation. */
1204 VG_(threads)[tid].cancel_st = False;
1205 if (VG_(clo_trace_sched)) {
1206 VG_(sprintf)(msg_buf,
1207 "jump to cancellation handler (hdlr = %p)",
1208 VG_(threads)[tid].cancel_pend);
1209 print_sched_event(tid, msg_buf);
1210 }
thughes513197c2004-06-13 12:07:53 +00001211
1212 if(VG_(threads)[tid].status == VgTs_WaitCV) {
1213 /* posix says we must reaquire mutex before handling cancelation */
1214 vg_pthread_mutex_t* mx;
1215 vg_pthread_cond_t* cond;
1216
1217 mx = VG_(threads)[tid].associated_mx;
1218 cond = VG_(threads)[tid].associated_cv;
1219 VG_TRACK( pre_mutex_lock, tid, mx );
1220
1221 if (mx->__vg_m_owner == VG_INVALID_THREADID) {
1222 /* Currently unheld; hand it out to thread tid. */
1223 vg_assert(mx->__vg_m_count == 0);
1224 VG_(threads)[tid].status = VgTs_Runnable;
1225 VG_(threads)[tid].associated_cv = NULL;
1226 VG_(threads)[tid].associated_mx = NULL;
thughes10236472004-06-13 14:35:43 +00001227 mx->__vg_m_owner = (/*_pthread_descr*/void*)tid;
thughes513197c2004-06-13 12:07:53 +00001228 mx->__vg_m_count = 1;
1229 /* .m_edx already holds pth_cond_wait success value (0) */
1230
1231 VG_TRACK( post_mutex_lock, tid, mx );
1232
1233 if (VG_(clo_trace_pthread_level) >= 1) {
1234 VG_(sprintf)(msg_buf, "%s cv %p: RESUME with mx %p",
1235 "pthread_cancel", cond, mx );
1236 print_pthread_event(tid, msg_buf);
1237 }
1238
1239 } else {
1240 /* Currently held. Make thread tid be blocked on it. */
1241 vg_assert(mx->__vg_m_count > 0);
1242 VG_(threads)[tid].status = VgTs_WaitMX;
1243 VG_(threads)[tid].associated_cv = NULL;
1244 VG_(threads)[tid].associated_mx = mx;
1245 SET_PTHREQ_RETVAL(tid, 0); /* pth_cond_wait success value */
1246
1247 if (VG_(clo_trace_pthread_level) >= 1) {
1248 VG_(sprintf)(msg_buf, "%s cv %p: BLOCK for mx %p",
1249 "pthread_cancel", cond, mx );
1250 print_pthread_event(tid, msg_buf);
1251 }
1252 }
1253 } else {
1254 VG_(threads)[tid].status = VgTs_Runnable;
1255 }
sewardj20917d82002-05-28 01:36:45 +00001256}
1257
1258
1259
sewardjb48e5002002-05-13 00:16:03 +00001260/* Release resources and generally clean up once a thread has finally
nethercotef971ab72004-08-02 16:27:40 +00001261 disappeared.
1262
1263 BORKAGE/ISSUES as of 29 May 02 (moved from top of file --njn 2004-Aug-02)
1264
1265 TODO sometime:
1266 - Mutex scrubbing - clearup_after_thread_exit: look for threads
1267 blocked on mutexes held by the exiting thread, and release them
1268 appropriately. (??)
1269*/
sewardjb48e5002002-05-13 00:16:03 +00001270static
jsgf855d93d2003-10-13 22:26:55 +00001271void cleanup_after_thread_exited ( ThreadId tid, Bool forcekill )
sewardjb48e5002002-05-13 00:16:03 +00001272{
thughes3a1b8172004-09-12 22:48:59 +00001273 Segment *seg;
1274
nethercote36881a22004-08-04 14:03:16 +00001275 vg_assert(is_valid_or_empty_tid(tid));
sewardj018f7622002-05-15 21:13:39 +00001276 vg_assert(VG_(threads)[tid].status == VgTs_Empty);
njn25e49d8e72002-09-23 09:36:25 +00001277 /* Its stack is now off-limits */
thughes3a1b8172004-09-12 22:48:59 +00001278 seg = VG_(find_segment)( VG_(threads)[tid].stack_base );
1279 VG_TRACK( die_mem_stack, seg->addr, seg->len );
njn25e49d8e72002-09-23 09:36:25 +00001280
nethercotef9b59412004-09-10 15:33:32 +00001281 VGA_(cleanup_thread)( &VG_(threads)[tid].arch );
fitzhardinge47735af2004-01-21 01:27:27 +00001282
jsgf855d93d2003-10-13 22:26:55 +00001283 /* Not interested in the timeout anymore */
1284 VG_(threads)[tid].awaken_at = 0xFFFFFFFF;
1285
1286 /* Delete proxy LWP */
1287 VG_(proxy_delete)(tid, forcekill);
sewardjb48e5002002-05-13 00:16:03 +00001288}
1289
1290
sewardj20917d82002-05-28 01:36:45 +00001291/* Look for matching pairs of threads waiting for joiners and threads
1292 waiting for joinees. For each such pair copy the return value of
1293 the joinee into the joiner, let the joiner resume and discard the
1294 joinee. */
1295static
1296void maybe_rendezvous_joiners_and_joinees ( void )
1297{
1298 Char msg_buf[100];
1299 void** thread_return;
1300 ThreadId jnr, jee;
1301
1302 for (jnr = 1; jnr < VG_N_THREADS; jnr++) {
1303 if (VG_(threads)[jnr].status != VgTs_WaitJoinee)
1304 continue;
1305 jee = VG_(threads)[jnr].joiner_jee_tid;
1306 if (jee == VG_INVALID_THREADID)
1307 continue;
1308 vg_assert(VG_(is_valid_tid)(jee));
jsgf855d93d2003-10-13 22:26:55 +00001309 if (VG_(threads)[jee].status != VgTs_WaitJoiner) {
1310 /* if joinee has become detached, then make join fail with
1311 EINVAL */
1312 if (VG_(threads)[jee].detached) {
1313 VG_(threads)[jnr].status = VgTs_Runnable;
1314 VG_(threads)[jnr].joiner_jee_tid = VG_INVALID_THREADID;
1315 SET_PTHREQ_RETVAL(jnr, VKI_EINVAL);
1316 }
sewardj20917d82002-05-28 01:36:45 +00001317 continue;
jsgf855d93d2003-10-13 22:26:55 +00001318 }
sewardj20917d82002-05-28 01:36:45 +00001319 /* ok! jnr is waiting to join with jee, and jee is waiting to be
1320 joined by ... well, any thread. So let's do it! */
1321
1322 /* Copy return value to where joiner wants it. */
1323 thread_return = VG_(threads)[jnr].joiner_thread_return;
1324 if (thread_return != NULL) {
1325 /* CHECK thread_return writable */
njn72718642003-07-24 08:45:32 +00001326 VG_TRACK( pre_mem_write, Vg_CorePThread, jnr,
njn25e49d8e72002-09-23 09:36:25 +00001327 "pthread_join: thread_return",
1328 (Addr)thread_return, sizeof(void*));
sewardj5a3798b2002-06-04 23:24:22 +00001329
sewardj20917d82002-05-28 01:36:45 +00001330 *thread_return = VG_(threads)[jee].joinee_retval;
1331 /* Not really right, since it makes the thread's return value
1332 appear to be defined even if it isn't. */
njn25e49d8e72002-09-23 09:36:25 +00001333 VG_TRACK( post_mem_write, (Addr)thread_return, sizeof(void*) );
sewardj20917d82002-05-28 01:36:45 +00001334 }
1335
1336 /* Joinee is discarded */
1337 VG_(threads)[jee].status = VgTs_Empty; /* bye! */
jsgf855d93d2003-10-13 22:26:55 +00001338 cleanup_after_thread_exited ( jee, False );
sewardjc4a810d2002-11-13 22:25:51 +00001339 if (VG_(clo_trace_sched)) {
1340 VG_(sprintf)(msg_buf,
1341 "rendezvous with joinee %d. %d resumes, %d exits.",
1342 jee, jnr, jee );
sewardj20917d82002-05-28 01:36:45 +00001343 print_sched_event(jnr, msg_buf);
1344 }
sewardjc4a810d2002-11-13 22:25:51 +00001345
1346 VG_TRACK( post_thread_join, jnr, jee );
sewardj20917d82002-05-28 01:36:45 +00001347
1348 /* joiner returns with success */
1349 VG_(threads)[jnr].status = VgTs_Runnable;
njnd3040452003-05-19 15:04:06 +00001350 SET_PTHREQ_RETVAL(jnr, 0);
sewardj20917d82002-05-28 01:36:45 +00001351 }
1352}
1353
1354
sewardjccef2e62002-05-29 19:26:32 +00001355/* Nuke all threads other than tid. POSIX specifies that this should
1356 happen in __NR_exec, and after a __NR_fork() when I am the child,
jsgf855d93d2003-10-13 22:26:55 +00001357 as POSIX requires. Also used at process exit time with
1358 me==VG_INVALID_THREADID */
sewardjccef2e62002-05-29 19:26:32 +00001359void VG_(nuke_all_threads_except) ( ThreadId me )
1360{
1361 ThreadId tid;
1362 for (tid = 1; tid < VG_N_THREADS; tid++) {
1363 if (tid == me
jsgf855d93d2003-10-13 22:26:55 +00001364 || VG_(threads)[tid].status == VgTs_Empty)
sewardjccef2e62002-05-29 19:26:32 +00001365 continue;
sewardjef037c72002-05-30 00:40:03 +00001366 if (0)
1367 VG_(printf)(
1368 "VG_(nuke_all_threads_except): nuking tid %d\n", tid);
jsgf855d93d2003-10-13 22:26:55 +00001369 VG_(proxy_delete)(tid, True);
sewardjccef2e62002-05-29 19:26:32 +00001370 VG_(threads)[tid].status = VgTs_Empty;
jsgf855d93d2003-10-13 22:26:55 +00001371 cleanup_after_thread_exited( tid, True );
sewardjccef2e62002-05-29 19:26:32 +00001372 }
1373}
1374
1375
sewardj20917d82002-05-28 01:36:45 +00001376/* -----------------------------------------------------------
1377 Thread CREATION, JOINAGE and CANCELLATION: REQUESTS
1378 -------------------------------------------------------- */
1379
sewardje663cb92002-04-12 10:26:32 +00001380static
sewardj8ad94e12002-05-29 00:10:20 +00001381void do__cleanup_push ( ThreadId tid, CleanupEntry* cu )
1382{
1383 Int sp;
1384 Char msg_buf[100];
1385 vg_assert(VG_(is_valid_tid)(tid));
1386 sp = VG_(threads)[tid].custack_used;
1387 if (VG_(clo_trace_sched)) {
thughes11975ff2004-06-12 12:58:22 +00001388 switch (cu->type) {
1389 case VgCt_Function:
1390 VG_(sprintf)(msg_buf,
1391 "cleanup_push (fn %p, arg %p) -> slot %d",
1392 cu->data.function.fn, cu->data.function.arg, sp);
1393 break;
1394 case VgCt_Longjmp:
1395 VG_(sprintf)(msg_buf,
1396 "cleanup_push (ub %p) -> slot %d",
1397 cu->data.longjmp.ub, sp);
1398 break;
1399 default:
1400 VG_(sprintf)(msg_buf,
1401 "cleanup_push (unknown type) -> slot %d",
1402 sp);
1403 break;
1404 }
sewardj8ad94e12002-05-29 00:10:20 +00001405 print_sched_event(tid, msg_buf);
1406 }
1407 vg_assert(sp >= 0 && sp <= VG_N_CLEANUPSTACK);
1408 if (sp == VG_N_CLEANUPSTACK)
njne427a662002-10-02 11:08:25 +00001409 VG_(core_panic)("do__cleanup_push: VG_N_CLEANUPSTACK is too small."
sewardj8ad94e12002-05-29 00:10:20 +00001410 " Increase and recompile.");
1411 VG_(threads)[tid].custack[sp] = *cu;
1412 sp++;
1413 VG_(threads)[tid].custack_used = sp;
njnd3040452003-05-19 15:04:06 +00001414 SET_PTHREQ_RETVAL(tid, 0);
sewardj8ad94e12002-05-29 00:10:20 +00001415}
1416
1417
1418static
1419void do__cleanup_pop ( ThreadId tid, CleanupEntry* cu )
1420{
1421 Int sp;
1422 Char msg_buf[100];
1423 vg_assert(VG_(is_valid_tid)(tid));
1424 sp = VG_(threads)[tid].custack_used;
1425 if (VG_(clo_trace_sched)) {
njn36650922002-10-04 09:18:09 +00001426 VG_(sprintf)(msg_buf, "cleanup_pop from slot %d", sp-1);
sewardj8ad94e12002-05-29 00:10:20 +00001427 print_sched_event(tid, msg_buf);
1428 }
1429 vg_assert(sp >= 0 && sp <= VG_N_CLEANUPSTACK);
1430 if (sp == 0) {
njnd3040452003-05-19 15:04:06 +00001431 SET_PTHREQ_RETVAL(tid, -1);
sewardj8ad94e12002-05-29 00:10:20 +00001432 return;
1433 }
1434 sp--;
njn72718642003-07-24 08:45:32 +00001435 VG_TRACK( pre_mem_write, Vg_CorePThread, tid,
njn36650922002-10-04 09:18:09 +00001436 "cleanup pop", (Addr)cu, sizeof(CleanupEntry) );
sewardj8ad94e12002-05-29 00:10:20 +00001437 *cu = VG_(threads)[tid].custack[sp];
njn25e49d8e72002-09-23 09:36:25 +00001438 VG_TRACK( post_mem_write, (Addr)cu, sizeof(CleanupEntry) );
sewardj8ad94e12002-05-29 00:10:20 +00001439 VG_(threads)[tid].custack_used = sp;
njnd3040452003-05-19 15:04:06 +00001440 SET_PTHREQ_RETVAL(tid, 0);
sewardj8ad94e12002-05-29 00:10:20 +00001441}
1442
1443
1444static
sewardjff42d1d2002-05-22 13:17:31 +00001445void do_pthread_yield ( ThreadId tid )
1446{
1447 Char msg_buf[100];
1448 vg_assert(VG_(is_valid_tid)(tid));
sewardjff42d1d2002-05-22 13:17:31 +00001449 if (VG_(clo_trace_sched)) {
1450 VG_(sprintf)(msg_buf, "yield");
1451 print_sched_event(tid, msg_buf);
1452 }
njnd3040452003-05-19 15:04:06 +00001453 SET_PTHREQ_RETVAL(tid, 0);
sewardjff42d1d2002-05-22 13:17:31 +00001454}
1455
1456
1457static
sewardj20917d82002-05-28 01:36:45 +00001458void do__testcancel ( ThreadId tid )
sewardje663cb92002-04-12 10:26:32 +00001459{
sewardj7989d0c2002-05-28 11:00:01 +00001460 Char msg_buf[100];
sewardjb48e5002002-05-13 00:16:03 +00001461 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001462 if (VG_(clo_trace_sched)) {
1463 VG_(sprintf)(msg_buf, "testcancel");
1464 print_sched_event(tid, msg_buf);
1465 }
sewardj20917d82002-05-28 01:36:45 +00001466 if (/* is there a cancellation pending on this thread? */
1467 VG_(threads)[tid].cancel_pend != NULL
1468 && /* is this thread accepting cancellations? */
1469 VG_(threads)[tid].cancel_st) {
1470 /* Ok, let's do the cancellation. */
1471 make_thread_jump_to_cancelhdlr ( tid );
sewardje663cb92002-04-12 10:26:32 +00001472 } else {
sewardj20917d82002-05-28 01:36:45 +00001473 /* No, we keep going. */
njnd3040452003-05-19 15:04:06 +00001474 SET_PTHREQ_RETVAL(tid, 0);
sewardje663cb92002-04-12 10:26:32 +00001475 }
sewardje663cb92002-04-12 10:26:32 +00001476}
1477
1478
1479static
sewardj20917d82002-05-28 01:36:45 +00001480void do__set_cancelstate ( ThreadId tid, Int state )
1481{
1482 Bool old_st;
sewardj7989d0c2002-05-28 11:00:01 +00001483 Char msg_buf[100];
sewardj20917d82002-05-28 01:36:45 +00001484 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001485 if (VG_(clo_trace_sched)) {
1486 VG_(sprintf)(msg_buf, "set_cancelstate to %d (%s)", state,
1487 state==PTHREAD_CANCEL_ENABLE
1488 ? "ENABLE"
1489 : (state==PTHREAD_CANCEL_DISABLE ? "DISABLE" : "???"));
1490 print_sched_event(tid, msg_buf);
1491 }
sewardj20917d82002-05-28 01:36:45 +00001492 old_st = VG_(threads)[tid].cancel_st;
1493 if (state == PTHREAD_CANCEL_ENABLE) {
1494 VG_(threads)[tid].cancel_st = True;
1495 } else
1496 if (state == PTHREAD_CANCEL_DISABLE) {
1497 VG_(threads)[tid].cancel_st = False;
1498 } else {
njne427a662002-10-02 11:08:25 +00001499 VG_(core_panic)("do__set_cancelstate");
sewardj20917d82002-05-28 01:36:45 +00001500 }
njnd3040452003-05-19 15:04:06 +00001501 SET_PTHREQ_RETVAL(tid, old_st ? PTHREAD_CANCEL_ENABLE
1502 : PTHREAD_CANCEL_DISABLE);
sewardj20917d82002-05-28 01:36:45 +00001503}
1504
1505
1506static
1507void do__set_canceltype ( ThreadId tid, Int type )
1508{
1509 Bool old_ty;
sewardj7989d0c2002-05-28 11:00:01 +00001510 Char msg_buf[100];
sewardj20917d82002-05-28 01:36:45 +00001511 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001512 if (VG_(clo_trace_sched)) {
1513 VG_(sprintf)(msg_buf, "set_canceltype to %d (%s)", type,
1514 type==PTHREAD_CANCEL_ASYNCHRONOUS
1515 ? "ASYNCHRONOUS"
1516 : (type==PTHREAD_CANCEL_DEFERRED ? "DEFERRED" : "???"));
1517 print_sched_event(tid, msg_buf);
1518 }
sewardj20917d82002-05-28 01:36:45 +00001519 old_ty = VG_(threads)[tid].cancel_ty;
1520 if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
1521 VG_(threads)[tid].cancel_ty = False;
1522 } else
1523 if (type == PTHREAD_CANCEL_DEFERRED) {
sewardjaf00b6d2002-05-29 23:30:28 +00001524 VG_(threads)[tid].cancel_ty = True;
sewardj20917d82002-05-28 01:36:45 +00001525 } else {
njne427a662002-10-02 11:08:25 +00001526 VG_(core_panic)("do__set_canceltype");
sewardj20917d82002-05-28 01:36:45 +00001527 }
njnd3040452003-05-19 15:04:06 +00001528 SET_PTHREQ_RETVAL(tid, old_ty ? PTHREAD_CANCEL_DEFERRED
sewardj20917d82002-05-28 01:36:45 +00001529 : PTHREAD_CANCEL_ASYNCHRONOUS);
1530}
1531
1532
sewardj7989d0c2002-05-28 11:00:01 +00001533/* Set or get the detach state for thread det. */
sewardj20917d82002-05-28 01:36:45 +00001534static
sewardj7989d0c2002-05-28 11:00:01 +00001535void do__set_or_get_detach ( ThreadId tid,
1536 Int what, ThreadId det )
sewardj20917d82002-05-28 01:36:45 +00001537{
sewardj7989d0c2002-05-28 11:00:01 +00001538 Char msg_buf[100];
1539 /* VG_(printf)("do__set_or_get_detach tid %d what %d det %d\n",
1540 tid, what, det); */
sewardj20917d82002-05-28 01:36:45 +00001541 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001542 if (VG_(clo_trace_sched)) {
1543 VG_(sprintf)(msg_buf, "set_or_get_detach %d (%s) for tid %d", what,
1544 what==0 ? "not-detached" : (
1545 what==1 ? "detached" : (
1546 what==2 ? "fetch old value" : "???")),
1547 det );
1548 print_sched_event(tid, msg_buf);
1549 }
1550
1551 if (!VG_(is_valid_tid)(det)) {
njnd3040452003-05-19 15:04:06 +00001552 SET_PTHREQ_RETVAL(tid, -1);
sewardj7989d0c2002-05-28 11:00:01 +00001553 return;
1554 }
1555
sewardj20917d82002-05-28 01:36:45 +00001556 switch (what) {
1557 case 2: /* get */
njnd3040452003-05-19 15:04:06 +00001558 SET_PTHREQ_RETVAL(tid, VG_(threads)[det].detached ? 1 : 0);
sewardj20917d82002-05-28 01:36:45 +00001559 return;
jsgf855d93d2003-10-13 22:26:55 +00001560 case 1:
sewardj7989d0c2002-05-28 11:00:01 +00001561 VG_(threads)[det].detached = True;
njnd3040452003-05-19 15:04:06 +00001562 SET_PTHREQ_RETVAL(tid, 0);
jsgf855d93d2003-10-13 22:26:55 +00001563 /* wake anyone who was joining on us */
1564 maybe_rendezvous_joiners_and_joinees();
sewardj20917d82002-05-28 01:36:45 +00001565 return;
1566 case 0: /* set not detached */
sewardj7989d0c2002-05-28 11:00:01 +00001567 VG_(threads)[det].detached = False;
njnd3040452003-05-19 15:04:06 +00001568 SET_PTHREQ_RETVAL(tid, 0);
sewardj20917d82002-05-28 01:36:45 +00001569 return;
1570 default:
njne427a662002-10-02 11:08:25 +00001571 VG_(core_panic)("do__set_or_get_detach");
sewardj20917d82002-05-28 01:36:45 +00001572 }
1573}
1574
1575
1576static
1577void do__set_cancelpend ( ThreadId tid,
1578 ThreadId cee,
1579 void (*cancelpend_hdlr)(void*) )
sewardje663cb92002-04-12 10:26:32 +00001580{
1581 Char msg_buf[100];
1582
sewardj20917d82002-05-28 01:36:45 +00001583 vg_assert(VG_(is_valid_tid)(tid));
1584 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1585
thughes97e54d22004-08-15 14:34:02 +00001586 if (!VG_(is_valid_tid)(cee) ||
1587 VG_(threads)[cee].status == VgTs_WaitJoiner) {
sewardj7989d0c2002-05-28 11:00:01 +00001588 if (VG_(clo_trace_sched)) {
1589 VG_(sprintf)(msg_buf,
1590 "set_cancelpend for invalid tid %d", cee);
1591 print_sched_event(tid, msg_buf);
1592 }
njn25e49d8e72002-09-23 09:36:25 +00001593 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00001594 "pthread_cancel: target thread does not exist, or invalid");
jsgf855d93d2003-10-13 22:26:55 +00001595 SET_PTHREQ_RETVAL(tid, VKI_ESRCH);
sewardj7989d0c2002-05-28 11:00:01 +00001596 return;
1597 }
sewardj20917d82002-05-28 01:36:45 +00001598
1599 VG_(threads)[cee].cancel_pend = cancelpend_hdlr;
1600
jsgf855d93d2003-10-13 22:26:55 +00001601 /* interrupt a pending syscall */
1602 VG_(proxy_abort_syscall)(cee);
1603
sewardj20917d82002-05-28 01:36:45 +00001604 if (VG_(clo_trace_sched)) {
1605 VG_(sprintf)(msg_buf,
sewardj7989d0c2002-05-28 11:00:01 +00001606 "set_cancelpend (hdlr = %p, set by tid %d)",
sewardj20917d82002-05-28 01:36:45 +00001607 cancelpend_hdlr, tid);
1608 print_sched_event(cee, msg_buf);
1609 }
1610
1611 /* Thread doing the cancelling returns with success. */
njnd3040452003-05-19 15:04:06 +00001612 SET_PTHREQ_RETVAL(tid, 0);
sewardj20917d82002-05-28 01:36:45 +00001613
1614 /* Perhaps we can nuke the cancellee right now? */
thughes513197c2004-06-13 12:07:53 +00001615 if (!VG_(threads)[cee].cancel_ty || /* if PTHREAD_CANCEL_ASYNCHRONOUS */
1616 (VG_(threads)[cee].status != VgTs_Runnable &&
1617 VG_(threads)[cee].status != VgTs_WaitMX)) {
jsgf855d93d2003-10-13 22:26:55 +00001618 do__testcancel(cee);
thughes513197c2004-06-13 12:07:53 +00001619 }
sewardj20917d82002-05-28 01:36:45 +00001620}
1621
1622
1623static
1624void do_pthread_join ( ThreadId tid,
1625 ThreadId jee, void** thread_return )
1626{
1627 Char msg_buf[100];
1628 ThreadId i;
sewardje663cb92002-04-12 10:26:32 +00001629 /* jee, the joinee, is the thread specified as an arg in thread
1630 tid's call to pthread_join. So tid is the join-er. */
sewardjb48e5002002-05-13 00:16:03 +00001631 vg_assert(VG_(is_valid_tid)(tid));
sewardj018f7622002-05-15 21:13:39 +00001632 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +00001633
1634 if (jee == tid) {
njn25e49d8e72002-09-23 09:36:25 +00001635 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00001636 "pthread_join: attempt to join to self");
njnd3040452003-05-19 15:04:06 +00001637 SET_PTHREQ_RETVAL(tid, EDEADLK); /* libc constant, not a kernel one */
jsgf855d93d2003-10-13 22:26:55 +00001638 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +00001639 return;
1640 }
1641
sewardj20917d82002-05-28 01:36:45 +00001642 /* Flush any completed pairs, so as to make sure what we're looking
1643 at is up-to-date. */
1644 maybe_rendezvous_joiners_and_joinees();
1645
1646 /* Is this a sane request? */
jsgf855d93d2003-10-13 22:26:55 +00001647 if ( ! VG_(is_valid_tid)(jee) ||
1648 VG_(threads)[jee].detached) {
sewardje663cb92002-04-12 10:26:32 +00001649 /* Invalid thread to join to. */
njn25e49d8e72002-09-23 09:36:25 +00001650 VG_(record_pthread_error)( tid,
jsgf855d93d2003-10-13 22:26:55 +00001651 "pthread_join: target thread does not exist, invalid, or detached");
1652 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardje663cb92002-04-12 10:26:32 +00001653 return;
1654 }
1655
sewardj20917d82002-05-28 01:36:45 +00001656 /* Is anyone else already in a join-wait for jee? */
1657 for (i = 1; i < VG_N_THREADS; i++) {
1658 if (i == tid) continue;
1659 if (VG_(threads)[i].status == VgTs_WaitJoinee
1660 && VG_(threads)[i].joiner_jee_tid == jee) {
1661 /* Someone already did join on this thread */
njn25e49d8e72002-09-23 09:36:25 +00001662 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00001663 "pthread_join: another thread already "
1664 "in join-wait for target thread");
jsgf855d93d2003-10-13 22:26:55 +00001665 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
1666 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardj20917d82002-05-28 01:36:45 +00001667 return;
1668 }
sewardje663cb92002-04-12 10:26:32 +00001669 }
1670
thughes513197c2004-06-13 12:07:53 +00001671 if(VG_(threads)[tid].cancel_pend != NULL &&
1672 VG_(threads)[tid].cancel_st) {
1673 make_thread_jump_to_cancelhdlr ( tid );
1674 } else {
1675 /* Mark this thread as waiting for the joinee. */
1676 VG_(threads)[tid].status = VgTs_WaitJoinee;
1677 VG_(threads)[tid].joiner_thread_return = thread_return;
1678 VG_(threads)[tid].joiner_jee_tid = jee;
1679
1680 /* Look for matching joiners and joinees and do the right thing. */
1681 maybe_rendezvous_joiners_and_joinees();
1682
1683 /* Return value is irrelevant since this this thread becomes
1684 non-runnable. maybe_resume_joiner() will cause it to return the
1685 right value when it resumes. */
1686
1687 if (VG_(clo_trace_sched)) {
1688 VG_(sprintf)(msg_buf,
1689 "wait for joinee %d (may already be ready)", jee);
1690 print_sched_event(tid, msg_buf);
1691 }
sewardje663cb92002-04-12 10:26:32 +00001692 }
sewardje663cb92002-04-12 10:26:32 +00001693}
1694
1695
sewardj20917d82002-05-28 01:36:45 +00001696/* ( void* ): calling thread waits for joiner and returns the void* to
1697 it. This is one of two ways in which a thread can finally exit --
1698 the other is do__quit. */
sewardje663cb92002-04-12 10:26:32 +00001699static
sewardj20917d82002-05-28 01:36:45 +00001700void do__wait_joiner ( ThreadId tid, void* retval )
sewardje663cb92002-04-12 10:26:32 +00001701{
sewardj20917d82002-05-28 01:36:45 +00001702 Char msg_buf[100];
1703 vg_assert(VG_(is_valid_tid)(tid));
1704 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1705 if (VG_(clo_trace_sched)) {
1706 VG_(sprintf)(msg_buf,
sewardj7989d0c2002-05-28 11:00:01 +00001707 "do__wait_joiner(retval = %p) (non-detached thread exit)", retval);
sewardj20917d82002-05-28 01:36:45 +00001708 print_sched_event(tid, msg_buf);
1709 }
1710 VG_(threads)[tid].status = VgTs_WaitJoiner;
1711 VG_(threads)[tid].joinee_retval = retval;
1712 maybe_rendezvous_joiners_and_joinees();
1713}
1714
1715
1716/* ( no-args ): calling thread disappears from the system forever.
1717 Reclaim resources. */
1718static
1719void do__quit ( ThreadId tid )
1720{
1721 Char msg_buf[100];
1722 vg_assert(VG_(is_valid_tid)(tid));
1723 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1724 VG_(threads)[tid].status = VgTs_Empty; /* bye! */
jsgf855d93d2003-10-13 22:26:55 +00001725 cleanup_after_thread_exited ( tid, False );
sewardj20917d82002-05-28 01:36:45 +00001726 if (VG_(clo_trace_sched)) {
sewardj7989d0c2002-05-28 11:00:01 +00001727 VG_(sprintf)(msg_buf, "do__quit (detached thread exit)");
sewardj20917d82002-05-28 01:36:45 +00001728 print_sched_event(tid, msg_buf);
1729 }
jsgf855d93d2003-10-13 22:26:55 +00001730 maybe_rendezvous_joiners_and_joinees();
sewardj20917d82002-05-28 01:36:45 +00001731 /* Return value is irrelevant; this thread will not get
1732 rescheduled. */
1733}
1734
1735
nethercote6b9c8472004-09-13 13:16:40 +00001736/* Should never be entered. If it is, will be on the simulated CPU. */
sewardj20917d82002-05-28 01:36:45 +00001737static
1738void do__apply_in_new_thread_bogusRA ( void )
1739{
njne427a662002-10-02 11:08:25 +00001740 VG_(core_panic)("do__apply_in_new_thread_bogusRA");
sewardj20917d82002-05-28 01:36:45 +00001741}
1742
1743/* (Fn, Arg): Create a new thread and run Fn applied to Arg in it. Fn
1744 MUST NOT return -- ever. Eventually it will do either __QUIT or
1745 __WAIT_JOINER. Return the child tid to the parent. */
1746static
1747void do__apply_in_new_thread ( ThreadId parent_tid,
1748 void* (*fn)(void *),
thughesdaa34562004-06-27 12:48:53 +00001749 void* arg,
1750 StackInfo *si )
sewardj20917d82002-05-28 01:36:45 +00001751{
sewardje663cb92002-04-12 10:26:32 +00001752 Addr new_stack;
1753 UInt new_stk_szb;
1754 ThreadId tid;
1755 Char msg_buf[100];
1756
1757 /* Paranoia ... */
1758 vg_assert(sizeof(pthread_t) == sizeof(UInt));
1759
sewardj018f7622002-05-15 21:13:39 +00001760 vg_assert(VG_(threads)[parent_tid].status != VgTs_Empty);
sewardje663cb92002-04-12 10:26:32 +00001761
sewardj1e8cdc92002-04-18 11:37:52 +00001762 tid = vg_alloc_ThreadState();
sewardje663cb92002-04-12 10:26:32 +00001763
1764 /* If we've created the main thread's tid, we're in deep trouble :) */
sewardj6072c362002-04-19 14:40:57 +00001765 vg_assert(tid != 1);
nethercote36881a22004-08-04 14:03:16 +00001766 vg_assert(is_valid_or_empty_tid(tid));
sewardje663cb92002-04-12 10:26:32 +00001767
sewardjc4a810d2002-11-13 22:25:51 +00001768 /* do this early, before the child gets any memory writes */
1769 VG_TRACK ( post_thread_create, parent_tid, tid );
1770
sewardjf6374322002-11-13 22:35:55 +00001771 /* Create new thread with default attrs:
1772 deferred cancellation, not detached
1773 */
1774 mostly_clear_thread_record(tid);
1775 VG_(threads)[tid].status = VgTs_Runnable;
1776
sewardje663cb92002-04-12 10:26:32 +00001777 /* Copy the parent's CPU state into the child's, in a roundabout
1778 way (via baseBlock). */
nethercotef971ab72004-08-02 16:27:40 +00001779 load_thread_state(parent_tid);
nethercotef9b59412004-09-10 15:33:32 +00001780 VGA_(setup_child)( &VG_(threads)[tid].arch,
1781 &VG_(threads)[parent_tid].arch );
nethercotef971ab72004-08-02 16:27:40 +00001782 save_thread_state(tid);
sewardjf6374322002-11-13 22:35:55 +00001783 vg_tid_last_in_baseBlock = tid;
sewardje663cb92002-04-12 10:26:32 +00001784
1785 /* Consider allocating the child a stack, if the one it already has
1786 is inadequate. */
thughesdaa34562004-06-27 12:48:53 +00001787 new_stk_szb = si->size + VG_AR_CLIENT_STACKBASE_REDZONE_SZB + si->guardsize;
1788 new_stk_szb = (new_stk_szb + VKI_BYTES_PER_PAGE - 1) & ~VKI_BYTES_PER_PAGE;
1789
1790 VG_(threads)[tid].stack_guard_size = si->guardsize;
sewardje663cb92002-04-12 10:26:32 +00001791
sewardj018f7622002-05-15 21:13:39 +00001792 if (new_stk_szb > VG_(threads)[tid].stack_size) {
sewardje663cb92002-04-12 10:26:32 +00001793 /* Again, for good measure :) We definitely don't want to be
1794 allocating a stack for the main thread. */
sewardj6072c362002-04-19 14:40:57 +00001795 vg_assert(tid != 1);
thughesdaa34562004-06-27 12:48:53 +00001796 if (VG_(threads)[tid].stack_size > 0)
1797 VG_(client_free)(VG_(threads)[tid].stack_base);
fitzhardinge98abfc72003-12-16 02:05:15 +00001798 new_stack = VG_(client_alloc)(0, new_stk_szb,
nethercotee567e702004-07-10 17:49:17 +00001799 VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC,
fitzhardinge98abfc72003-12-16 02:05:15 +00001800 SF_STACK);
nethercote8e9eab02004-07-11 18:01:06 +00001801 // Given the low number of threads Valgrind can handle, stack
1802 // allocation should pretty much always succeed, so having an
1803 // assertion here isn't too bad. However, probably better would be
1804 // this:
1805 //
1806 // if (0 == new_stack)
1807 // SET_PTHREQ_RETVAL(parent_tid, -VKI_EAGAIN);
1808 //
nethercotee567e702004-07-10 17:49:17 +00001809 vg_assert(0 != new_stack);
sewardj018f7622002-05-15 21:13:39 +00001810 VG_(threads)[tid].stack_base = new_stack;
1811 VG_(threads)[tid].stack_size = new_stk_szb;
1812 VG_(threads)[tid].stack_highest_word
sewardje663cb92002-04-12 10:26:32 +00001813 = new_stack + new_stk_szb
sewardj1e8cdc92002-04-18 11:37:52 +00001814 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB; /* -4 ??? */;
sewardje663cb92002-04-12 10:26:32 +00001815 }
sewardj1e8cdc92002-04-18 11:37:52 +00001816
njn25e49d8e72002-09-23 09:36:25 +00001817 /* Having got memory to hold the thread's stack:
1818 - set %esp as base + size
1819 - mark everything below %esp inaccessible
1820 - mark redzone at stack end inaccessible
1821 */
njnd3040452003-05-19 15:04:06 +00001822 SET_PTHREQ_ESP(tid, VG_(threads)[tid].stack_base
1823 + VG_(threads)[tid].stack_size
1824 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB);
sewardj1e8cdc92002-04-18 11:37:52 +00001825
njn25e49d8e72002-09-23 09:36:25 +00001826 VG_TRACK ( die_mem_stack, VG_(threads)[tid].stack_base,
thughesdaa34562004-06-27 12:48:53 +00001827 VG_(threads)[tid].stack_size
1828 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB);
nethercote6b9c8472004-09-13 13:16:40 +00001829 VG_TRACK ( ban_mem_stack, ARCH_STACK_PTR(VG_(threads)[tid].arch),
njn25e49d8e72002-09-23 09:36:25 +00001830 VG_AR_CLIENT_STACKBASE_REDZONE_SZB );
sewardje663cb92002-04-12 10:26:32 +00001831
nethercote6b9c8472004-09-13 13:16:40 +00001832 VGA_(thread_initial_stack)(tid, (UWord)arg,
1833 (Addr)&do__apply_in_new_thread_bogusRA);
sewardje663cb92002-04-12 10:26:32 +00001834
1835 /* this is where we start */
nethercoteb8ef9d82004-09-05 22:02:33 +00001836 ARCH_INSTR_PTR(VG_(threads)[tid].arch) = (UInt)fn;
sewardje663cb92002-04-12 10:26:32 +00001837
sewardj8937c812002-04-12 20:12:20 +00001838 if (VG_(clo_trace_sched)) {
njn25e49d8e72002-09-23 09:36:25 +00001839 VG_(sprintf)(msg_buf, "new thread, created by %d", parent_tid );
sewardje663cb92002-04-12 10:26:32 +00001840 print_sched_event(tid, msg_buf);
1841 }
1842
fitzhardingef7866182004-03-16 22:09:12 +00001843 /* Start the thread with all signals blocked; it's up to the client
1844 code to set the right signal mask when it's ready. */
1845 VG_(ksigfillset)(&VG_(threads)[tid].sig_mask);
jsgf855d93d2003-10-13 22:26:55 +00001846
1847 /* Now that the signal mask is set up, create a proxy LWP for this thread */
1848 VG_(proxy_create)(tid);
1849
1850 /* Set the proxy's signal mask */
1851 VG_(proxy_setsigmask)(tid);
sewardjb48e5002002-05-13 00:16:03 +00001852
sewardj20917d82002-05-28 01:36:45 +00001853 /* return child's tid to parent */
njnd3040452003-05-19 15:04:06 +00001854 SET_PTHREQ_RETVAL(parent_tid, tid); /* success */
sewardje663cb92002-04-12 10:26:32 +00001855}
1856
1857
sewardj604ec3c2002-04-18 22:38:41 +00001858/* -----------------------------------------------------------
1859 MUTEXes
1860 -------------------------------------------------------- */
1861
rjwalsh7109a8c2004-09-02 00:31:02 +00001862/* vg_pthread_mutex_t is defined in core.h.
sewardj604ec3c2002-04-18 22:38:41 +00001863
nethercote1f0173b2004-02-28 15:40:36 +00001864 The initializers zero everything, except possibly the fourth word,
1865 which in vg_pthread_mutex_t is the __vg_m_kind field. It gets set to one
1866 of PTHREAD_MUTEX_{TIMED,RECURSIVE,ERRORCHECK,ADAPTIVE}_NP
sewardj604ec3c2002-04-18 22:38:41 +00001867
sewardj6072c362002-04-19 14:40:57 +00001868 How we use it:
sewardj604ec3c2002-04-18 22:38:41 +00001869
nethercote1f0173b2004-02-28 15:40:36 +00001870 __vg_m_kind never changes and indicates whether or not it is recursive.
sewardj6072c362002-04-19 14:40:57 +00001871
nethercote1f0173b2004-02-28 15:40:36 +00001872 __vg_m_count indicates the lock count; if 0, the mutex is not owned by
sewardj6072c362002-04-19 14:40:57 +00001873 anybody.
1874
nethercote1f0173b2004-02-28 15:40:36 +00001875 __vg_m_owner has a ThreadId value stuffed into it. We carefully arrange
sewardj6072c362002-04-19 14:40:57 +00001876 that ThreadId == 0 is invalid (VG_INVALID_THREADID), so that
1877 statically initialised mutexes correctly appear
1878 to belong to nobody.
1879
nethercote1f0173b2004-02-28 15:40:36 +00001880 In summary, a not-in-use mutex is distinguised by having __vg_m_owner
1881 == 0 (VG_INVALID_THREADID) and __vg_m_count == 0 too. If one of those
sewardj6072c362002-04-19 14:40:57 +00001882 conditions holds, the other should too.
1883
1884 There is no linked list of threads waiting for this mutex. Instead
1885 a thread in WaitMX state points at the mutex with its waited_on_mx
1886 field. This makes _unlock() inefficient, but simple to implement the
1887 right semantics viz-a-viz signals.
sewardje663cb92002-04-12 10:26:32 +00001888
sewardj604ec3c2002-04-18 22:38:41 +00001889 We don't have to deal with mutex initialisation; the client side
sewardj6072c362002-04-19 14:40:57 +00001890 deals with that for us.
1891*/
sewardje663cb92002-04-12 10:26:32 +00001892
sewardj3b5d8862002-04-20 13:53:23 +00001893/* Helper fns ... */
thughese321d492004-10-17 15:00:20 +00001894static
1895void do_pthread_mutex_timedlock_TIMEOUT ( ThreadId tid )
1896{
1897 Char msg_buf[100];
1898 vg_pthread_mutex_t* mx;
1899
1900 vg_assert(VG_(is_valid_tid)(tid)
1901 && VG_(threads)[tid].status == VgTs_WaitMX
1902 && VG_(threads)[tid].awaken_at != 0xFFFFFFFF);
1903 mx = VG_(threads)[tid].associated_mx;
1904 vg_assert(mx != NULL);
1905
1906 VG_(threads)[tid].status = VgTs_Runnable;
1907 SET_PTHREQ_RETVAL(tid, ETIMEDOUT); /* pthread_mutex_lock return value */
1908 VG_(threads)[tid].associated_mx = NULL;
1909
1910 if (VG_(clo_trace_pthread_level) >= 1) {
1911 VG_(sprintf)(msg_buf, "pthread_mutex_timedlock mx %p: TIMEOUT", mx);
1912 print_pthread_event(tid, msg_buf);
1913 }
1914}
1915
1916
sewardj3b5d8862002-04-20 13:53:23 +00001917static
nethercote1f0173b2004-02-28 15:40:36 +00001918void release_one_thread_waiting_on_mutex ( vg_pthread_mutex_t* mutex,
sewardj3b5d8862002-04-20 13:53:23 +00001919 Char* caller )
1920{
1921 Int i;
1922 Char msg_buf[100];
1923
1924 /* Find some arbitrary thread waiting on this mutex, and make it
1925 runnable. If none are waiting, mark the mutex as not held. */
1926 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +00001927 if (VG_(threads)[i].status == VgTs_Empty)
sewardj3b5d8862002-04-20 13:53:23 +00001928 continue;
sewardj018f7622002-05-15 21:13:39 +00001929 if (VG_(threads)[i].status == VgTs_WaitMX
1930 && VG_(threads)[i].associated_mx == mutex)
sewardj3b5d8862002-04-20 13:53:23 +00001931 break;
1932 }
1933
nethercote1f0173b2004-02-28 15:40:36 +00001934 VG_TRACK( post_mutex_unlock, (ThreadId)mutex->__vg_m_owner, mutex );
sewardj0af43bc2002-10-22 04:30:35 +00001935
sewardj3b5d8862002-04-20 13:53:23 +00001936 vg_assert(i <= VG_N_THREADS);
1937 if (i == VG_N_THREADS) {
1938 /* Nobody else is waiting on it. */
nethercote1f0173b2004-02-28 15:40:36 +00001939 mutex->__vg_m_count = 0;
1940 mutex->__vg_m_owner = VG_INVALID_THREADID;
sewardj3b5d8862002-04-20 13:53:23 +00001941 } else {
1942 /* Notionally transfer the hold to thread i, whose
1943 pthread_mutex_lock() call now returns with 0 (success). */
1944 /* The .count is already == 1. */
sewardj018f7622002-05-15 21:13:39 +00001945 vg_assert(VG_(threads)[i].associated_mx == mutex);
nethercote1f0173b2004-02-28 15:40:36 +00001946 mutex->__vg_m_owner = (/*_pthread_descr*/void*)i;
sewardj018f7622002-05-15 21:13:39 +00001947 VG_(threads)[i].status = VgTs_Runnable;
1948 VG_(threads)[i].associated_mx = NULL;
sewardj5f07b662002-04-23 16:52:51 +00001949 /* m_edx already holds pth_mx_lock() success (0) */
sewardj3b5d8862002-04-20 13:53:23 +00001950
sewardj0af43bc2002-10-22 04:30:35 +00001951 VG_TRACK( post_mutex_lock, (ThreadId)i, mutex);
1952
sewardj3b5d8862002-04-20 13:53:23 +00001953 if (VG_(clo_trace_pthread_level) >= 1) {
1954 VG_(sprintf)(msg_buf, "%s mx %p: RESUME",
1955 caller, mutex );
1956 print_pthread_event(i, msg_buf);
1957 }
1958 }
1959}
1960
sewardje663cb92002-04-12 10:26:32 +00001961
1962static
sewardj30671ff2002-04-21 00:13:57 +00001963void do_pthread_mutex_lock( ThreadId tid,
1964 Bool is_trylock,
thughese321d492004-10-17 15:00:20 +00001965 vg_pthread_mutex_t* mutex,
1966 UInt ms_end )
sewardje663cb92002-04-12 10:26:32 +00001967{
sewardj30671ff2002-04-21 00:13:57 +00001968 Char msg_buf[100];
1969 Char* caller
sewardj8ccc2be2002-05-10 20:26:37 +00001970 = is_trylock ? "pthread_mutex_trylock"
1971 : "pthread_mutex_lock ";
sewardje663cb92002-04-12 10:26:32 +00001972
thughese321d492004-10-17 15:00:20 +00001973 /* If ms_end == 0xFFFFFFFF, wait forever (no timeout). Otherwise,
1974 ms_end is the ending millisecond. */
1975
sewardj604ec3c2002-04-18 22:38:41 +00001976 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj30671ff2002-04-21 00:13:57 +00001977 VG_(sprintf)(msg_buf, "%s mx %p ...", caller, mutex );
sewardj604ec3c2002-04-18 22:38:41 +00001978 print_pthread_event(tid, msg_buf);
1979 }
1980
1981 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00001982 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00001983 && VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +00001984
1985 /* POSIX doesn't mandate this, but for sanity ... */
1986 if (mutex == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00001987 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00001988 "pthread_mutex_lock/trylock: mutex is NULL");
jsgf855d93d2003-10-13 22:26:55 +00001989 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardje663cb92002-04-12 10:26:32 +00001990 return;
1991 }
1992
sewardj604ec3c2002-04-18 22:38:41 +00001993 /* More paranoia ... */
nethercote1f0173b2004-02-28 15:40:36 +00001994 switch (mutex->__vg_m_kind) {
sewardj2a1dcce2002-04-22 12:45:25 +00001995# ifndef GLIBC_2_1
sewardj604ec3c2002-04-18 22:38:41 +00001996 case PTHREAD_MUTEX_TIMED_NP:
sewardj2a1dcce2002-04-22 12:45:25 +00001997 case PTHREAD_MUTEX_ADAPTIVE_NP:
1998# endif
sewardja1679dd2002-05-10 22:31:40 +00001999# ifdef GLIBC_2_1
sewardj8e651d72002-05-10 21:00:19 +00002000 case PTHREAD_MUTEX_FAST_NP:
sewardja1679dd2002-05-10 22:31:40 +00002001# endif
sewardj604ec3c2002-04-18 22:38:41 +00002002 case PTHREAD_MUTEX_RECURSIVE_NP:
2003 case PTHREAD_MUTEX_ERRORCHECK_NP:
nethercote1f0173b2004-02-28 15:40:36 +00002004 if (mutex->__vg_m_count >= 0) break;
sewardj604ec3c2002-04-18 22:38:41 +00002005 /* else fall thru */
2006 default:
njn25e49d8e72002-09-23 09:36:25 +00002007 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002008 "pthread_mutex_lock/trylock: mutex is invalid");
jsgf855d93d2003-10-13 22:26:55 +00002009 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj604ec3c2002-04-18 22:38:41 +00002010 return;
sewardje663cb92002-04-12 10:26:32 +00002011 }
2012
nethercote1f0173b2004-02-28 15:40:36 +00002013 if (mutex->__vg_m_count > 0) {
2014 if (!VG_(is_valid_tid)((ThreadId)mutex->__vg_m_owner)) {
fitzhardinge47735af2004-01-21 01:27:27 +00002015 VG_(record_pthread_error)( tid,
2016 "pthread_mutex_lock/trylock: mutex has invalid owner");
2017 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
2018 return;
2019 }
sewardjf8f819e2002-04-17 23:21:37 +00002020
2021 /* Someone has it already. */
thughese321d492004-10-17 15:00:20 +00002022 if ((ThreadId)mutex->__vg_m_owner == tid && ms_end == 0xFFFFFFFF) {
sewardjf8f819e2002-04-17 23:21:37 +00002023 /* It's locked -- by me! */
nethercote1f0173b2004-02-28 15:40:36 +00002024 if (mutex->__vg_m_kind == PTHREAD_MUTEX_RECURSIVE_NP) {
sewardjf8f819e2002-04-17 23:21:37 +00002025 /* return 0 (success). */
nethercote1f0173b2004-02-28 15:40:36 +00002026 mutex->__vg_m_count++;
njnd3040452003-05-19 15:04:06 +00002027 SET_PTHREQ_RETVAL(tid, 0);
sewardj853f55d2002-04-26 00:27:53 +00002028 if (0)
2029 VG_(printf)("!!!!!! tid %d, mx %p -> locked %d\n",
nethercote1f0173b2004-02-28 15:40:36 +00002030 tid, mutex, mutex->__vg_m_count);
sewardjf8f819e2002-04-17 23:21:37 +00002031 return;
2032 } else {
sewardj30671ff2002-04-21 00:13:57 +00002033 if (is_trylock)
njnd3040452003-05-19 15:04:06 +00002034 SET_PTHREQ_RETVAL(tid, EBUSY);
sewardj30671ff2002-04-21 00:13:57 +00002035 else
njnd3040452003-05-19 15:04:06 +00002036 SET_PTHREQ_RETVAL(tid, EDEADLK);
sewardjf8f819e2002-04-17 23:21:37 +00002037 return;
2038 }
2039 } else {
sewardj6072c362002-04-19 14:40:57 +00002040 /* Someone else has it; we have to wait. Mark ourselves
2041 thusly. */
nethercote1f0173b2004-02-28 15:40:36 +00002042 /* GUARD: __vg_m_count > 0 && __vg_m_owner is valid */
sewardj30671ff2002-04-21 00:13:57 +00002043 if (is_trylock) {
2044 /* caller is polling; so return immediately. */
njnd3040452003-05-19 15:04:06 +00002045 SET_PTHREQ_RETVAL(tid, EBUSY);
sewardj30671ff2002-04-21 00:13:57 +00002046 } else {
sewardjdca84112002-11-13 22:29:34 +00002047 VG_TRACK ( pre_mutex_lock, tid, mutex );
2048
sewardj018f7622002-05-15 21:13:39 +00002049 VG_(threads)[tid].status = VgTs_WaitMX;
2050 VG_(threads)[tid].associated_mx = mutex;
thughese321d492004-10-17 15:00:20 +00002051 VG_(threads)[tid].awaken_at = ms_end;
2052 if (ms_end != 0xFFFFFFFF)
2053 add_timeout(tid, ms_end);
njnd3040452003-05-19 15:04:06 +00002054 SET_PTHREQ_RETVAL(tid, 0); /* pth_mx_lock success value */
sewardj30671ff2002-04-21 00:13:57 +00002055 if (VG_(clo_trace_pthread_level) >= 1) {
2056 VG_(sprintf)(msg_buf, "%s mx %p: BLOCK",
2057 caller, mutex );
2058 print_pthread_event(tid, msg_buf);
2059 }
2060 }
sewardje663cb92002-04-12 10:26:32 +00002061 return;
2062 }
sewardjf8f819e2002-04-17 23:21:37 +00002063
sewardje663cb92002-04-12 10:26:32 +00002064 } else {
sewardj6072c362002-04-19 14:40:57 +00002065 /* Nobody owns it. Sanity check ... */
nethercote1f0173b2004-02-28 15:40:36 +00002066 vg_assert(mutex->__vg_m_owner == VG_INVALID_THREADID);
sewardjdca84112002-11-13 22:29:34 +00002067
2068 VG_TRACK ( pre_mutex_lock, tid, mutex );
2069
sewardjf8f819e2002-04-17 23:21:37 +00002070 /* We get it! [for the first time]. */
nethercote1f0173b2004-02-28 15:40:36 +00002071 mutex->__vg_m_count = 1;
2072 mutex->__vg_m_owner = (/*_pthread_descr*/void*)tid;
njn25e49d8e72002-09-23 09:36:25 +00002073
sewardje663cb92002-04-12 10:26:32 +00002074 /* return 0 (success). */
njnd3040452003-05-19 15:04:06 +00002075 SET_PTHREQ_RETVAL(tid, 0);
sewardjf8f819e2002-04-17 23:21:37 +00002076
njnd3040452003-05-19 15:04:06 +00002077 VG_TRACK( post_mutex_lock, tid, mutex);
2078 }
sewardje663cb92002-04-12 10:26:32 +00002079}
2080
2081
2082static
2083void do_pthread_mutex_unlock ( ThreadId tid,
nethercote1f0173b2004-02-28 15:40:36 +00002084 vg_pthread_mutex_t* mutex )
sewardje663cb92002-04-12 10:26:32 +00002085{
sewardj3b5d8862002-04-20 13:53:23 +00002086 Char msg_buf[100];
sewardje663cb92002-04-12 10:26:32 +00002087
sewardj45b4b372002-04-16 22:50:32 +00002088 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj3b5d8862002-04-20 13:53:23 +00002089 VG_(sprintf)(msg_buf, "pthread_mutex_unlock mx %p ...", mutex );
sewardj8937c812002-04-12 20:12:20 +00002090 print_pthread_event(tid, msg_buf);
2091 }
2092
sewardj604ec3c2002-04-18 22:38:41 +00002093 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00002094 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002095 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj604ec3c2002-04-18 22:38:41 +00002096
2097 if (mutex == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00002098 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002099 "pthread_mutex_unlock: mutex is NULL");
jsgf855d93d2003-10-13 22:26:55 +00002100 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj604ec3c2002-04-18 22:38:41 +00002101 return;
2102 }
2103
2104 /* More paranoia ... */
nethercote1f0173b2004-02-28 15:40:36 +00002105 switch (mutex->__vg_m_kind) {
sewardj2a1dcce2002-04-22 12:45:25 +00002106# ifndef GLIBC_2_1
sewardj604ec3c2002-04-18 22:38:41 +00002107 case PTHREAD_MUTEX_TIMED_NP:
sewardj2a1dcce2002-04-22 12:45:25 +00002108 case PTHREAD_MUTEX_ADAPTIVE_NP:
2109# endif
sewardja1679dd2002-05-10 22:31:40 +00002110# ifdef GLIBC_2_1
sewardj8e651d72002-05-10 21:00:19 +00002111 case PTHREAD_MUTEX_FAST_NP:
sewardja1679dd2002-05-10 22:31:40 +00002112# endif
sewardj604ec3c2002-04-18 22:38:41 +00002113 case PTHREAD_MUTEX_RECURSIVE_NP:
2114 case PTHREAD_MUTEX_ERRORCHECK_NP:
nethercote1f0173b2004-02-28 15:40:36 +00002115 if (mutex->__vg_m_count >= 0) break;
sewardj604ec3c2002-04-18 22:38:41 +00002116 /* else fall thru */
2117 default:
njn25e49d8e72002-09-23 09:36:25 +00002118 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002119 "pthread_mutex_unlock: mutex is invalid");
jsgf855d93d2003-10-13 22:26:55 +00002120 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj604ec3c2002-04-18 22:38:41 +00002121 return;
2122 }
sewardje663cb92002-04-12 10:26:32 +00002123
2124 /* Barf if we don't currently hold the mutex. */
nethercote1f0173b2004-02-28 15:40:36 +00002125 if (mutex->__vg_m_count == 0) {
sewardj4dced352002-06-04 22:54:20 +00002126 /* nobody holds it */
njn25e49d8e72002-09-23 09:36:25 +00002127 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002128 "pthread_mutex_unlock: mutex is not locked");
njnd3040452003-05-19 15:04:06 +00002129 SET_PTHREQ_RETVAL(tid, EPERM);
sewardj4dced352002-06-04 22:54:20 +00002130 return;
2131 }
2132
nethercote1f0173b2004-02-28 15:40:36 +00002133 if ((ThreadId)mutex->__vg_m_owner != tid) {
sewardj4dced352002-06-04 22:54:20 +00002134 /* we don't hold it */
njn25e49d8e72002-09-23 09:36:25 +00002135 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002136 "pthread_mutex_unlock: mutex is locked by a different thread");
njnd3040452003-05-19 15:04:06 +00002137 SET_PTHREQ_RETVAL(tid, EPERM);
sewardje663cb92002-04-12 10:26:32 +00002138 return;
2139 }
2140
sewardjf8f819e2002-04-17 23:21:37 +00002141 /* If it's a multiply-locked recursive mutex, just decrement the
2142 lock count and return. */
nethercote1f0173b2004-02-28 15:40:36 +00002143 if (mutex->__vg_m_count > 1) {
2144 vg_assert(mutex->__vg_m_kind == PTHREAD_MUTEX_RECURSIVE_NP);
2145 mutex->__vg_m_count --;
njnd3040452003-05-19 15:04:06 +00002146 SET_PTHREQ_RETVAL(tid, 0); /* success */
sewardjf8f819e2002-04-17 23:21:37 +00002147 return;
2148 }
2149
sewardj604ec3c2002-04-18 22:38:41 +00002150 /* Now we're sure it is locked exactly once, and by the thread who
sewardjf8f819e2002-04-17 23:21:37 +00002151 is now doing an unlock on it. */
nethercote1f0173b2004-02-28 15:40:36 +00002152 vg_assert(mutex->__vg_m_count == 1);
2153 vg_assert((ThreadId)mutex->__vg_m_owner == tid);
sewardjf8f819e2002-04-17 23:21:37 +00002154
sewardj3b5d8862002-04-20 13:53:23 +00002155 /* Release at max one thread waiting on this mutex. */
2156 release_one_thread_waiting_on_mutex ( mutex, "pthread_mutex_lock" );
sewardje663cb92002-04-12 10:26:32 +00002157
sewardj3b5d8862002-04-20 13:53:23 +00002158 /* Our (tid's) pth_unlock() returns with 0 (success). */
njnd3040452003-05-19 15:04:06 +00002159 SET_PTHREQ_RETVAL(tid, 0); /* Success. */
sewardje663cb92002-04-12 10:26:32 +00002160}
2161
2162
sewardj6072c362002-04-19 14:40:57 +00002163/* -----------------------------------------------------------
2164 CONDITION VARIABLES
2165 -------------------------------------------------------- */
sewardje663cb92002-04-12 10:26:32 +00002166
rjwalsh7109a8c2004-09-02 00:31:02 +00002167/* The relevant type (vg_pthread_cond_t) is in core.h.
sewardj77e466c2002-04-14 02:29:29 +00002168
nethercote1f0173b2004-02-28 15:40:36 +00002169 We don't use any fields of vg_pthread_cond_t for anything at all.
2170 Only the identity of the CVs is important. (Actually, we initialise
2171 __vg_c_waiting in pthread_cond_init() to VG_INVALID_THREADID.)
sewardj6072c362002-04-19 14:40:57 +00002172
2173 Linux pthreads supports no attributes on condition variables, so we
sewardj3b5d8862002-04-20 13:53:23 +00002174 don't need to think too hard there. */
sewardj6072c362002-04-19 14:40:57 +00002175
sewardj77e466c2002-04-14 02:29:29 +00002176
sewardj5f07b662002-04-23 16:52:51 +00002177static
2178void do_pthread_cond_timedwait_TIMEOUT ( ThreadId tid )
2179{
2180 Char msg_buf[100];
nethercote1f0173b2004-02-28 15:40:36 +00002181 vg_pthread_mutex_t* mx;
2182 vg_pthread_cond_t* cv;
sewardj5f07b662002-04-23 16:52:51 +00002183
sewardjb48e5002002-05-13 00:16:03 +00002184 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002185 && VG_(threads)[tid].status == VgTs_WaitCV
2186 && VG_(threads)[tid].awaken_at != 0xFFFFFFFF);
2187 mx = VG_(threads)[tid].associated_mx;
sewardj5f07b662002-04-23 16:52:51 +00002188 vg_assert(mx != NULL);
sewardj018f7622002-05-15 21:13:39 +00002189 cv = VG_(threads)[tid].associated_cv;
sewardj5f07b662002-04-23 16:52:51 +00002190 vg_assert(cv != NULL);
2191
nethercote1f0173b2004-02-28 15:40:36 +00002192 if (mx->__vg_m_owner == VG_INVALID_THREADID) {
sewardj5f07b662002-04-23 16:52:51 +00002193 /* Currently unheld; hand it out to thread tid. */
nethercote1f0173b2004-02-28 15:40:36 +00002194 vg_assert(mx->__vg_m_count == 0);
sewardj018f7622002-05-15 21:13:39 +00002195 VG_(threads)[tid].status = VgTs_Runnable;
njnd3040452003-05-19 15:04:06 +00002196 SET_PTHREQ_RETVAL(tid, ETIMEDOUT); /* pthread_cond_wait return value */
sewardj018f7622002-05-15 21:13:39 +00002197 VG_(threads)[tid].associated_cv = NULL;
2198 VG_(threads)[tid].associated_mx = NULL;
nethercote1f0173b2004-02-28 15:40:36 +00002199 mx->__vg_m_owner = (/*_pthread_descr*/void*)tid;
2200 mx->__vg_m_count = 1;
sewardj5f07b662002-04-23 16:52:51 +00002201
sewardj0af43bc2002-10-22 04:30:35 +00002202 VG_TRACK( post_mutex_lock, tid, mx );
2203
sewardj5f07b662002-04-23 16:52:51 +00002204 if (VG_(clo_trace_pthread_level) >= 1) {
sewardjc3bd5f52002-05-01 03:24:23 +00002205 VG_(sprintf)(msg_buf,
nethercote1f0173b2004-02-28 15:40:36 +00002206 "pthread_cond_timedwait cv %p: TIMEOUT with mx %p",
sewardjc3bd5f52002-05-01 03:24:23 +00002207 cv, mx );
sewardj5f07b662002-04-23 16:52:51 +00002208 print_pthread_event(tid, msg_buf);
2209 }
2210 } else {
2211 /* Currently held. Make thread tid be blocked on it. */
nethercote1f0173b2004-02-28 15:40:36 +00002212 vg_assert(mx->__vg_m_count > 0);
sewardjdca84112002-11-13 22:29:34 +00002213 VG_TRACK( pre_mutex_lock, tid, mx );
2214
sewardj018f7622002-05-15 21:13:39 +00002215 VG_(threads)[tid].status = VgTs_WaitMX;
njnd3040452003-05-19 15:04:06 +00002216 SET_PTHREQ_RETVAL(tid, ETIMEDOUT); /* pthread_cond_wait return value */
sewardj018f7622002-05-15 21:13:39 +00002217 VG_(threads)[tid].associated_cv = NULL;
2218 VG_(threads)[tid].associated_mx = mx;
sewardj5f07b662002-04-23 16:52:51 +00002219 if (VG_(clo_trace_pthread_level) >= 1) {
2220 VG_(sprintf)(msg_buf,
nethercote1f0173b2004-02-28 15:40:36 +00002221 "pthread_cond_timedwait cv %p: TIMEOUT -> BLOCK for mx %p",
sewardj5f07b662002-04-23 16:52:51 +00002222 cv, mx );
2223 print_pthread_event(tid, msg_buf);
2224 }
sewardj5f07b662002-04-23 16:52:51 +00002225 }
2226}
2227
2228
sewardj3b5d8862002-04-20 13:53:23 +00002229static
nethercote1f0173b2004-02-28 15:40:36 +00002230void release_N_threads_waiting_on_cond ( vg_pthread_cond_t* cond,
sewardj3b5d8862002-04-20 13:53:23 +00002231 Int n_to_release,
2232 Char* caller )
2233{
2234 Int i;
2235 Char msg_buf[100];
nethercote1f0173b2004-02-28 15:40:36 +00002236 vg_pthread_mutex_t* mx;
sewardj3b5d8862002-04-20 13:53:23 +00002237
2238 while (True) {
2239 if (n_to_release == 0)
2240 return;
2241
2242 /* Find a thread waiting on this CV. */
2243 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +00002244 if (VG_(threads)[i].status == VgTs_Empty)
sewardj3b5d8862002-04-20 13:53:23 +00002245 continue;
sewardj018f7622002-05-15 21:13:39 +00002246 if (VG_(threads)[i].status == VgTs_WaitCV
2247 && VG_(threads)[i].associated_cv == cond)
sewardj3b5d8862002-04-20 13:53:23 +00002248 break;
2249 }
2250 vg_assert(i <= VG_N_THREADS);
2251
2252 if (i == VG_N_THREADS) {
2253 /* Nobody else is waiting on it. */
2254 return;
2255 }
2256
sewardj018f7622002-05-15 21:13:39 +00002257 mx = VG_(threads)[i].associated_mx;
sewardj3b5d8862002-04-20 13:53:23 +00002258 vg_assert(mx != NULL);
2259
sewardjdca84112002-11-13 22:29:34 +00002260 VG_TRACK( pre_mutex_lock, i, mx );
2261
nethercote1f0173b2004-02-28 15:40:36 +00002262 if (mx->__vg_m_owner == VG_INVALID_THREADID) {
sewardj3b5d8862002-04-20 13:53:23 +00002263 /* Currently unheld; hand it out to thread i. */
nethercote1f0173b2004-02-28 15:40:36 +00002264 vg_assert(mx->__vg_m_count == 0);
sewardj018f7622002-05-15 21:13:39 +00002265 VG_(threads)[i].status = VgTs_Runnable;
2266 VG_(threads)[i].associated_cv = NULL;
2267 VG_(threads)[i].associated_mx = NULL;
nethercote1f0173b2004-02-28 15:40:36 +00002268 mx->__vg_m_owner = (/*_pthread_descr*/void*)i;
2269 mx->__vg_m_count = 1;
sewardj5f07b662002-04-23 16:52:51 +00002270 /* .m_edx already holds pth_cond_wait success value (0) */
sewardj3b5d8862002-04-20 13:53:23 +00002271
sewardj0af43bc2002-10-22 04:30:35 +00002272 VG_TRACK( post_mutex_lock, i, mx );
2273
sewardj3b5d8862002-04-20 13:53:23 +00002274 if (VG_(clo_trace_pthread_level) >= 1) {
2275 VG_(sprintf)(msg_buf, "%s cv %p: RESUME with mx %p",
2276 caller, cond, mx );
2277 print_pthread_event(i, msg_buf);
2278 }
2279
2280 } else {
2281 /* Currently held. Make thread i be blocked on it. */
nethercote1f0173b2004-02-28 15:40:36 +00002282 vg_assert(mx->__vg_m_count > 0);
sewardj018f7622002-05-15 21:13:39 +00002283 VG_(threads)[i].status = VgTs_WaitMX;
2284 VG_(threads)[i].associated_cv = NULL;
2285 VG_(threads)[i].associated_mx = mx;
njnd3040452003-05-19 15:04:06 +00002286 SET_PTHREQ_RETVAL(i, 0); /* pth_cond_wait success value */
sewardj3b5d8862002-04-20 13:53:23 +00002287
2288 if (VG_(clo_trace_pthread_level) >= 1) {
2289 VG_(sprintf)(msg_buf, "%s cv %p: BLOCK for mx %p",
2290 caller, cond, mx );
2291 print_pthread_event(i, msg_buf);
2292 }
2293
2294 }
jsgf855d93d2003-10-13 22:26:55 +00002295
sewardj3b5d8862002-04-20 13:53:23 +00002296 n_to_release--;
2297 }
2298}
2299
2300
2301static
2302void do_pthread_cond_wait ( ThreadId tid,
nethercote1f0173b2004-02-28 15:40:36 +00002303 vg_pthread_cond_t *cond,
2304 vg_pthread_mutex_t *mutex,
sewardj5f07b662002-04-23 16:52:51 +00002305 UInt ms_end )
sewardj3b5d8862002-04-20 13:53:23 +00002306{
2307 Char msg_buf[100];
2308
sewardj5f07b662002-04-23 16:52:51 +00002309 /* If ms_end == 0xFFFFFFFF, wait forever (no timeout). Otherwise,
2310 ms_end is the ending millisecond. */
2311
sewardj3b5d8862002-04-20 13:53:23 +00002312 /* pre: mutex should be a valid mutex and owned by tid. */
2313 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj5f07b662002-04-23 16:52:51 +00002314 VG_(sprintf)(msg_buf, "pthread_cond_wait cv %p, mx %p, end %d ...",
2315 cond, mutex, ms_end );
sewardj3b5d8862002-04-20 13:53:23 +00002316 print_pthread_event(tid, msg_buf);
2317 }
2318
2319 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00002320 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002321 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj3b5d8862002-04-20 13:53:23 +00002322
nethercoted3693d02004-04-26 08:05:24 +00002323 if (mutex == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00002324 VG_(record_pthread_error)( tid,
nethercoted3693d02004-04-26 08:05:24 +00002325 "pthread_cond_wait/timedwait: mutex is NULL");
2326 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
2327 return;
2328 }
2329
2330 if (cond == NULL) {
2331 VG_(record_pthread_error)( tid,
2332 "pthread_cond_wait/timedwait: cond is NULL");
jsgf855d93d2003-10-13 22:26:55 +00002333 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj3b5d8862002-04-20 13:53:23 +00002334 return;
2335 }
2336
2337 /* More paranoia ... */
nethercote1f0173b2004-02-28 15:40:36 +00002338 switch (mutex->__vg_m_kind) {
sewardj2a1dcce2002-04-22 12:45:25 +00002339# ifndef GLIBC_2_1
sewardj3b5d8862002-04-20 13:53:23 +00002340 case PTHREAD_MUTEX_TIMED_NP:
sewardj2a1dcce2002-04-22 12:45:25 +00002341 case PTHREAD_MUTEX_ADAPTIVE_NP:
2342# endif
sewardja1679dd2002-05-10 22:31:40 +00002343# ifdef GLIBC_2_1
sewardj8e651d72002-05-10 21:00:19 +00002344 case PTHREAD_MUTEX_FAST_NP:
sewardja1679dd2002-05-10 22:31:40 +00002345# endif
sewardj3b5d8862002-04-20 13:53:23 +00002346 case PTHREAD_MUTEX_RECURSIVE_NP:
2347 case PTHREAD_MUTEX_ERRORCHECK_NP:
nethercote1f0173b2004-02-28 15:40:36 +00002348 if (mutex->__vg_m_count >= 0) break;
sewardj3b5d8862002-04-20 13:53:23 +00002349 /* else fall thru */
2350 default:
njn25e49d8e72002-09-23 09:36:25 +00002351 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002352 "pthread_cond_wait/timedwait: mutex is invalid");
jsgf855d93d2003-10-13 22:26:55 +00002353 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj3b5d8862002-04-20 13:53:23 +00002354 return;
2355 }
2356
2357 /* Barf if we don't currently hold the mutex. */
nethercoted3693d02004-04-26 08:05:24 +00002358 if (mutex->__vg_m_count == 0 /* nobody holds it */) {
njn25e49d8e72002-09-23 09:36:25 +00002359 VG_(record_pthread_error)( tid,
nethercoted3693d02004-04-26 08:05:24 +00002360 "pthread_cond_wait/timedwait: mutex is unlocked");
2361 SET_PTHREQ_RETVAL(tid, VKI_EPERM);
2362 return;
2363 }
2364
2365 if ((ThreadId)mutex->__vg_m_owner != tid /* we don't hold it */) {
2366 VG_(record_pthread_error)( tid,
2367 "pthread_cond_wait/timedwait: mutex is locked by another thread");
2368 SET_PTHREQ_RETVAL(tid, VKI_EPERM);
sewardj3b5d8862002-04-20 13:53:23 +00002369 return;
2370 }
2371
thughes513197c2004-06-13 12:07:53 +00002372 if(VG_(threads)[tid].cancel_pend != NULL &&
2373 VG_(threads)[tid].cancel_st) {
2374 make_thread_jump_to_cancelhdlr ( tid );
2375 } else {
2376 /* Queue ourselves on the condition. */
2377 VG_(threads)[tid].status = VgTs_WaitCV;
2378 VG_(threads)[tid].associated_cv = cond;
2379 VG_(threads)[tid].associated_mx = mutex;
2380 VG_(threads)[tid].awaken_at = ms_end;
2381 if (ms_end != 0xFFFFFFFF)
nethercotef971ab72004-08-02 16:27:40 +00002382 add_timeout(tid, ms_end);
sewardj3b5d8862002-04-20 13:53:23 +00002383
thughes513197c2004-06-13 12:07:53 +00002384 if (VG_(clo_trace_pthread_level) >= 1) {
2385 VG_(sprintf)(msg_buf,
2386 "pthread_cond_wait cv %p, mx %p: BLOCK",
2387 cond, mutex );
2388 print_pthread_event(tid, msg_buf);
2389 }
2390
2391 /* Release the mutex. */
2392 release_one_thread_waiting_on_mutex ( mutex, "pthread_cond_wait " );
sewardj3b5d8862002-04-20 13:53:23 +00002393 }
sewardj3b5d8862002-04-20 13:53:23 +00002394}
2395
2396
2397static
2398void do_pthread_cond_signal_or_broadcast ( ThreadId tid,
2399 Bool broadcast,
nethercote1f0173b2004-02-28 15:40:36 +00002400 vg_pthread_cond_t *cond )
sewardj3b5d8862002-04-20 13:53:23 +00002401{
2402 Char msg_buf[100];
2403 Char* caller
2404 = broadcast ? "pthread_cond_broadcast"
2405 : "pthread_cond_signal ";
2406
2407 if (VG_(clo_trace_pthread_level) >= 2) {
2408 VG_(sprintf)(msg_buf, "%s cv %p ...",
2409 caller, cond );
2410 print_pthread_event(tid, msg_buf);
2411 }
2412
2413 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00002414 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002415 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj3b5d8862002-04-20 13:53:23 +00002416
2417 if (cond == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00002418 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002419 "pthread_cond_signal/broadcast: cond is NULL");
jsgf855d93d2003-10-13 22:26:55 +00002420 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj3b5d8862002-04-20 13:53:23 +00002421 return;
2422 }
2423
2424 release_N_threads_waiting_on_cond (
2425 cond,
2426 broadcast ? VG_N_THREADS : 1,
2427 caller
2428 );
2429
njnd3040452003-05-19 15:04:06 +00002430 SET_PTHREQ_RETVAL(tid, 0); /* success */
sewardj3b5d8862002-04-20 13:53:23 +00002431}
2432
sewardj77e466c2002-04-14 02:29:29 +00002433
sewardj5f07b662002-04-23 16:52:51 +00002434/* -----------------------------------------------------------
2435 THREAD SPECIFIC DATA
2436 -------------------------------------------------------- */
2437
2438static __inline__
2439Bool is_valid_key ( ThreadKey k )
2440{
2441 /* k unsigned; hence no < 0 check */
2442 if (k >= VG_N_THREAD_KEYS) return False;
2443 if (!vg_thread_keys[k].inuse) return False;
2444 return True;
2445}
2446
sewardj00a66b12002-10-12 16:42:35 +00002447
2448/* Return in %EDX a value of 1 if the key is valid, else 0. */
2449static
2450void do_pthread_key_validate ( ThreadId tid,
2451 pthread_key_t key )
2452{
2453 Char msg_buf[100];
2454
2455 if (VG_(clo_trace_pthread_level) >= 1) {
2456 VG_(sprintf)(msg_buf, "pthread_key_validate key %p",
2457 key );
2458 print_pthread_event(tid, msg_buf);
2459 }
2460
2461 vg_assert(sizeof(pthread_key_t) == sizeof(ThreadKey));
2462 vg_assert(VG_(is_valid_tid)(tid)
2463 && VG_(threads)[tid].status == VgTs_Runnable);
2464
2465 if (is_valid_key((ThreadKey)key)) {
njnd3040452003-05-19 15:04:06 +00002466 SET_PTHREQ_RETVAL(tid, 1);
sewardj00a66b12002-10-12 16:42:35 +00002467 } else {
njnd3040452003-05-19 15:04:06 +00002468 SET_PTHREQ_RETVAL(tid, 0);
sewardj00a66b12002-10-12 16:42:35 +00002469 }
2470}
2471
2472
sewardj5f07b662002-04-23 16:52:51 +00002473static
2474void do_pthread_key_create ( ThreadId tid,
2475 pthread_key_t* key,
2476 void (*destructor)(void*) )
2477{
2478 Int i;
2479 Char msg_buf[100];
2480
2481 if (VG_(clo_trace_pthread_level) >= 1) {
2482 VG_(sprintf)(msg_buf, "pthread_key_create *key %p, destr %p",
2483 key, destructor );
2484 print_pthread_event(tid, msg_buf);
2485 }
2486
2487 vg_assert(sizeof(pthread_key_t) == sizeof(ThreadKey));
sewardjb48e5002002-05-13 00:16:03 +00002488 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002489 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj5f07b662002-04-23 16:52:51 +00002490
2491 for (i = 0; i < VG_N_THREAD_KEYS; i++)
2492 if (!vg_thread_keys[i].inuse)
2493 break;
2494
2495 if (i == VG_N_THREAD_KEYS) {
jsgf855d93d2003-10-13 22:26:55 +00002496 VG_(message)(Vg_UserMsg, "pthread_key_create() asked for too many keys (more than %d): increase VG_N_THREAD_KEYS and recompile Valgrind.",
2497 VG_N_THREAD_KEYS);
2498 SET_PTHREQ_RETVAL(tid, EAGAIN);
2499 return;
sewardj5f07b662002-04-23 16:52:51 +00002500 }
2501
sewardj870497a2002-05-29 01:06:47 +00002502 vg_thread_keys[i].inuse = True;
2503 vg_thread_keys[i].destructor = destructor;
sewardjc3bd5f52002-05-01 03:24:23 +00002504
sewardj5a3798b2002-06-04 23:24:22 +00002505 /* check key for addressibility */
njn72718642003-07-24 08:45:32 +00002506 VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "pthread_key_create: key",
njn25e49d8e72002-09-23 09:36:25 +00002507 (Addr)key, sizeof(pthread_key_t));
sewardj5f07b662002-04-23 16:52:51 +00002508 *key = i;
njn25e49d8e72002-09-23 09:36:25 +00002509 VG_TRACK( post_mem_write, (Addr)key, sizeof(pthread_key_t) );
sewardjc3bd5f52002-05-01 03:24:23 +00002510
njnd3040452003-05-19 15:04:06 +00002511 SET_PTHREQ_RETVAL(tid, 0);
sewardj5f07b662002-04-23 16:52:51 +00002512}
2513
2514
2515static
2516void do_pthread_key_delete ( ThreadId tid, pthread_key_t key )
2517{
2518 Char msg_buf[100];
2519 if (VG_(clo_trace_pthread_level) >= 1) {
2520 VG_(sprintf)(msg_buf, "pthread_key_delete key %d",
2521 key );
2522 print_pthread_event(tid, msg_buf);
2523 }
2524
sewardjb48e5002002-05-13 00:16:03 +00002525 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002526 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj5f07b662002-04-23 16:52:51 +00002527
2528 if (!is_valid_key(key)) {
njn25e49d8e72002-09-23 09:36:25 +00002529 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002530 "pthread_key_delete: key is invalid");
jsgf855d93d2003-10-13 22:26:55 +00002531 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj5f07b662002-04-23 16:52:51 +00002532 return;
2533 }
2534
2535 vg_thread_keys[key].inuse = False;
sewardj648b3152002-12-09 19:07:59 +00002536 vg_thread_keys[key].destructor = NULL;
njnd3040452003-05-19 15:04:06 +00002537 SET_PTHREQ_RETVAL(tid, 0);
sewardj5f07b662002-04-23 16:52:51 +00002538}
2539
2540
sewardj00a66b12002-10-12 16:42:35 +00002541/* Get the .specific_ptr for a thread. Return 1 if the thread-slot
2542 isn't in use, so that client-space can scan all thread slots. 1
2543 cannot be confused with NULL or a legitimately-aligned specific_ptr
2544 value. */
sewardj5f07b662002-04-23 16:52:51 +00002545static
sewardj00a66b12002-10-12 16:42:35 +00002546void do_pthread_getspecific_ptr ( ThreadId tid )
sewardj5f07b662002-04-23 16:52:51 +00002547{
sewardj00a66b12002-10-12 16:42:35 +00002548 void** specifics_ptr;
2549 Char msg_buf[100];
2550
jsgf855d93d2003-10-13 22:26:55 +00002551 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj00a66b12002-10-12 16:42:35 +00002552 VG_(sprintf)(msg_buf, "pthread_getspecific_ptr" );
sewardj5f07b662002-04-23 16:52:51 +00002553 print_pthread_event(tid, msg_buf);
2554 }
2555
nethercote36881a22004-08-04 14:03:16 +00002556 vg_assert(is_valid_or_empty_tid(tid));
sewardj5f07b662002-04-23 16:52:51 +00002557
sewardj00a66b12002-10-12 16:42:35 +00002558 if (VG_(threads)[tid].status == VgTs_Empty) {
njnd3040452003-05-19 15:04:06 +00002559 SET_PTHREQ_RETVAL(tid, 1);
sewardj5f07b662002-04-23 16:52:51 +00002560 return;
2561 }
2562
sewardj00a66b12002-10-12 16:42:35 +00002563 specifics_ptr = VG_(threads)[tid].specifics_ptr;
2564 vg_assert(specifics_ptr == NULL
2565 || IS_ALIGNED4_ADDR(specifics_ptr));
2566
njnd3040452003-05-19 15:04:06 +00002567 SET_PTHREQ_RETVAL(tid, (UInt)specifics_ptr);
sewardj5f07b662002-04-23 16:52:51 +00002568}
2569
2570
2571static
sewardj00a66b12002-10-12 16:42:35 +00002572void do_pthread_setspecific_ptr ( ThreadId tid, void** ptr )
sewardj5f07b662002-04-23 16:52:51 +00002573{
2574 Char msg_buf[100];
2575 if (VG_(clo_trace_pthread_level) >= 1) {
sewardj00a66b12002-10-12 16:42:35 +00002576 VG_(sprintf)(msg_buf, "pthread_setspecific_ptr ptr %p",
2577 ptr );
sewardj5f07b662002-04-23 16:52:51 +00002578 print_pthread_event(tid, msg_buf);
2579 }
2580
sewardjb48e5002002-05-13 00:16:03 +00002581 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002582 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj5f07b662002-04-23 16:52:51 +00002583
sewardj00a66b12002-10-12 16:42:35 +00002584 VG_(threads)[tid].specifics_ptr = ptr;
njnd3040452003-05-19 15:04:06 +00002585 SET_PTHREQ_RETVAL(tid, 0);
sewardj5f07b662002-04-23 16:52:51 +00002586}
2587
2588
sewardj870497a2002-05-29 01:06:47 +00002589/* Helper for calling destructors at thread exit. If key is valid,
2590 copy the thread's specific value into cu->arg and put the *key*'s
2591 destructor fn address in cu->fn. Then return 0 to the caller.
2592 Otherwise return non-zero to the caller. */
2593static
2594void do__get_key_destr_and_spec ( ThreadId tid,
2595 pthread_key_t key,
2596 CleanupEntry* cu )
2597{
2598 Char msg_buf[100];
jsgf855d93d2003-10-13 22:26:55 +00002599 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj870497a2002-05-29 01:06:47 +00002600 VG_(sprintf)(msg_buf,
2601 "get_key_destr_and_arg (key = %d)", key );
2602 print_pthread_event(tid, msg_buf);
2603 }
2604 vg_assert(VG_(is_valid_tid)(tid));
2605 vg_assert(key >= 0 && key < VG_N_THREAD_KEYS);
njn25e49d8e72002-09-23 09:36:25 +00002606
sewardj870497a2002-05-29 01:06:47 +00002607 if (!vg_thread_keys[key].inuse) {
njnd3040452003-05-19 15:04:06 +00002608 SET_PTHREQ_RETVAL(tid, -1);
sewardj870497a2002-05-29 01:06:47 +00002609 return;
2610 }
njn72718642003-07-24 08:45:32 +00002611 VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "get_key_destr_and_spec: cu",
2612 (Addr)cu, sizeof(CleanupEntry) );
sewardj00a66b12002-10-12 16:42:35 +00002613
thughes11975ff2004-06-12 12:58:22 +00002614 cu->type = VgCt_Function;
2615 cu->data.function.fn = vg_thread_keys[key].destructor;
sewardj00a66b12002-10-12 16:42:35 +00002616 if (VG_(threads)[tid].specifics_ptr == NULL) {
thughes11975ff2004-06-12 12:58:22 +00002617 cu->data.function.arg = NULL;
sewardj00a66b12002-10-12 16:42:35 +00002618 } else {
njn72718642003-07-24 08:45:32 +00002619 VG_TRACK( pre_mem_read, Vg_CorePThread, tid,
sewardj00a66b12002-10-12 16:42:35 +00002620 "get_key_destr_and_spec: key",
2621 (Addr)(&VG_(threads)[tid].specifics_ptr[key]),
2622 sizeof(void*) );
thughes11975ff2004-06-12 12:58:22 +00002623 cu->data.function.arg = VG_(threads)[tid].specifics_ptr[key];
sewardj00a66b12002-10-12 16:42:35 +00002624 }
2625
njn25e49d8e72002-09-23 09:36:25 +00002626 VG_TRACK( post_mem_write, (Addr)cu, sizeof(CleanupEntry) );
njnd3040452003-05-19 15:04:06 +00002627 SET_PTHREQ_RETVAL(tid, 0);
sewardj870497a2002-05-29 01:06:47 +00002628}
2629
2630
sewardjb48e5002002-05-13 00:16:03 +00002631/* ---------------------------------------------------
2632 SIGNALS
2633 ------------------------------------------------ */
2634
2635/* See comment in vg_libthread.c:pthread_sigmask() regarding
sewardj018f7622002-05-15 21:13:39 +00002636 deliberate confusion of types sigset_t and vki_sigset_t. Return 0
2637 for OK and 1 for some kind of addressing error, which the
2638 vg_libpthread.c routine turns into return values 0 and EFAULT
2639 respectively. */
sewardjb48e5002002-05-13 00:16:03 +00002640static
2641void do_pthread_sigmask ( ThreadId tid,
sewardj018f7622002-05-15 21:13:39 +00002642 Int vki_how,
sewardjb48e5002002-05-13 00:16:03 +00002643 vki_ksigset_t* newmask,
2644 vki_ksigset_t* oldmask )
2645{
2646 Char msg_buf[100];
2647 if (VG_(clo_trace_pthread_level) >= 1) {
2648 VG_(sprintf)(msg_buf,
sewardj018f7622002-05-15 21:13:39 +00002649 "pthread_sigmask vki_how %d, newmask %p, oldmask %p",
2650 vki_how, newmask, oldmask );
sewardjb48e5002002-05-13 00:16:03 +00002651 print_pthread_event(tid, msg_buf);
2652 }
2653
2654 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002655 && VG_(threads)[tid].status == VgTs_Runnable);
sewardjb48e5002002-05-13 00:16:03 +00002656
njn25e49d8e72002-09-23 09:36:25 +00002657 if (newmask)
njn72718642003-07-24 08:45:32 +00002658 VG_TRACK( pre_mem_read, Vg_CorePThread, tid, "pthread_sigmask: newmask",
njn25e49d8e72002-09-23 09:36:25 +00002659 (Addr)newmask, sizeof(vki_ksigset_t));
2660 if (oldmask)
njn72718642003-07-24 08:45:32 +00002661 VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "pthread_sigmask: oldmask",
njn25e49d8e72002-09-23 09:36:25 +00002662 (Addr)oldmask, sizeof(vki_ksigset_t));
sewardjb48e5002002-05-13 00:16:03 +00002663
sewardj018f7622002-05-15 21:13:39 +00002664 VG_(do_pthread_sigmask_SCSS_upd) ( tid, vki_how, newmask, oldmask );
sewardjb48e5002002-05-13 00:16:03 +00002665
njn25e49d8e72002-09-23 09:36:25 +00002666 if (oldmask)
2667 VG_TRACK( post_mem_write, (Addr)oldmask, sizeof(vki_ksigset_t) );
sewardj3a951cf2002-05-15 22:25:47 +00002668
sewardj018f7622002-05-15 21:13:39 +00002669 /* Success. */
njnd3040452003-05-19 15:04:06 +00002670 SET_PTHREQ_RETVAL(tid, 0);
sewardjb48e5002002-05-13 00:16:03 +00002671}
2672
2673
2674static
sewardj018f7622002-05-15 21:13:39 +00002675void do_pthread_kill ( ThreadId tid, /* me */
2676 ThreadId thread, /* thread to signal */
2677 Int sig )
2678{
nethercote97ccd5e2004-08-02 12:10:01 +00002679 ThreadState* tst;
sewardj018f7622002-05-15 21:13:39 +00002680 Char msg_buf[100];
2681
2682 if (VG_(clo_trace_signals) || VG_(clo_trace_pthread_level) >= 1) {
2683 VG_(sprintf)(msg_buf,
2684 "pthread_kill thread %d, signo %d",
2685 thread, sig );
2686 print_pthread_event(tid, msg_buf);
2687 }
2688
2689 vg_assert(VG_(is_valid_tid)(tid)
2690 && VG_(threads)[tid].status == VgTs_Runnable);
2691
sewardj4dced352002-06-04 22:54:20 +00002692 if (!VG_(is_valid_tid)(thread)) {
njn25e49d8e72002-09-23 09:36:25 +00002693 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002694 "pthread_kill: invalid target thread");
jsgf855d93d2003-10-13 22:26:55 +00002695 SET_PTHREQ_RETVAL(tid, VKI_ESRCH);
2696 return;
2697 }
2698
2699 if (sig == 0) {
2700 /* OK, signal 0 is just for testing */
2701 SET_PTHREQ_RETVAL(tid, 0);
sewardj018f7622002-05-15 21:13:39 +00002702 return;
2703 }
2704
2705 if (sig < 1 || sig > VKI_KNSIG) {
jsgf855d93d2003-10-13 22:26:55 +00002706 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj018f7622002-05-15 21:13:39 +00002707 return;
2708 }
2709
nethercote97ccd5e2004-08-02 12:10:01 +00002710 tst = VG_(get_ThreadState)(thread);
2711 vg_assert(NULL != tst->proxy);
2712 VG_(proxy_sendsig)(thread, sig);
njnd3040452003-05-19 15:04:06 +00002713 SET_PTHREQ_RETVAL(tid, 0);
sewardjb48e5002002-05-13 00:16:03 +00002714}
2715
2716
sewardj2cb00342002-06-28 01:46:26 +00002717/* -----------------------------------------------------------
2718 FORK HANDLERS.
2719 -------------------------------------------------------- */
2720
2721static
2722void do__set_fhstack_used ( ThreadId tid, Int n )
2723{
2724 Char msg_buf[100];
2725 if (VG_(clo_trace_sched)) {
2726 VG_(sprintf)(msg_buf, "set_fhstack_used to %d", n );
2727 print_pthread_event(tid, msg_buf);
2728 }
2729
2730 vg_assert(VG_(is_valid_tid)(tid)
2731 && VG_(threads)[tid].status == VgTs_Runnable);
2732
2733 if (n >= 0 && n < VG_N_FORKHANDLERSTACK) {
2734 vg_fhstack_used = n;
njnd3040452003-05-19 15:04:06 +00002735 SET_PTHREQ_RETVAL(tid, 0);
sewardj2cb00342002-06-28 01:46:26 +00002736 } else {
njnd3040452003-05-19 15:04:06 +00002737 SET_PTHREQ_RETVAL(tid, -1);
sewardj2cb00342002-06-28 01:46:26 +00002738 }
2739}
2740
2741
2742static
2743void do__get_fhstack_used ( ThreadId tid )
2744{
2745 Int n;
2746 Char msg_buf[100];
2747 if (VG_(clo_trace_sched)) {
2748 VG_(sprintf)(msg_buf, "get_fhstack_used" );
2749 print_pthread_event(tid, msg_buf);
2750 }
2751
2752 vg_assert(VG_(is_valid_tid)(tid)
2753 && VG_(threads)[tid].status == VgTs_Runnable);
2754
2755 n = vg_fhstack_used;
2756 vg_assert(n >= 0 && n < VG_N_FORKHANDLERSTACK);
njnd3040452003-05-19 15:04:06 +00002757 SET_PTHREQ_RETVAL(tid, n);
sewardj2cb00342002-06-28 01:46:26 +00002758}
2759
2760static
2761void do__set_fhstack_entry ( ThreadId tid, Int n, ForkHandlerEntry* fh )
2762{
2763 Char msg_buf[100];
2764 if (VG_(clo_trace_sched)) {
2765 VG_(sprintf)(msg_buf, "set_fhstack_entry %d to %p", n, fh );
2766 print_pthread_event(tid, msg_buf);
2767 }
2768
2769 vg_assert(VG_(is_valid_tid)(tid)
2770 && VG_(threads)[tid].status == VgTs_Runnable);
njn72718642003-07-24 08:45:32 +00002771 VG_TRACK( pre_mem_read, Vg_CorePThread, tid,
njn25e49d8e72002-09-23 09:36:25 +00002772 "pthread_atfork: prepare/parent/child",
2773 (Addr)fh, sizeof(ForkHandlerEntry));
sewardj2cb00342002-06-28 01:46:26 +00002774
njn25e49d8e72002-09-23 09:36:25 +00002775 if (n < 0 || n >= VG_N_FORKHANDLERSTACK) {
njnd3040452003-05-19 15:04:06 +00002776 SET_PTHREQ_RETVAL(tid, -1);
sewardj2cb00342002-06-28 01:46:26 +00002777 return;
2778 }
2779
2780 vg_fhstack[n] = *fh;
njnd3040452003-05-19 15:04:06 +00002781 SET_PTHREQ_RETVAL(tid, 0);
sewardj2cb00342002-06-28 01:46:26 +00002782}
2783
2784
2785static
2786void do__get_fhstack_entry ( ThreadId tid, Int n, /*OUT*/
2787 ForkHandlerEntry* fh )
2788{
2789 Char msg_buf[100];
2790 if (VG_(clo_trace_sched)) {
2791 VG_(sprintf)(msg_buf, "get_fhstack_entry %d", n );
2792 print_pthread_event(tid, msg_buf);
2793 }
2794
2795 vg_assert(VG_(is_valid_tid)(tid)
2796 && VG_(threads)[tid].status == VgTs_Runnable);
njn72718642003-07-24 08:45:32 +00002797 VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "fork: prepare/parent/child",
njn25e49d8e72002-09-23 09:36:25 +00002798 (Addr)fh, sizeof(ForkHandlerEntry));
sewardj2cb00342002-06-28 01:46:26 +00002799
njn25e49d8e72002-09-23 09:36:25 +00002800 if (n < 0 || n >= VG_N_FORKHANDLERSTACK) {
njnd3040452003-05-19 15:04:06 +00002801 SET_PTHREQ_RETVAL(tid, -1);
sewardj2cb00342002-06-28 01:46:26 +00002802 return;
2803 }
2804
2805 *fh = vg_fhstack[n];
njnd3040452003-05-19 15:04:06 +00002806 SET_PTHREQ_RETVAL(tid, 0);
sewardj2cb00342002-06-28 01:46:26 +00002807
njn25e49d8e72002-09-23 09:36:25 +00002808 VG_TRACK( post_mem_write, (Addr)fh, sizeof(ForkHandlerEntry) );
sewardj2cb00342002-06-28 01:46:26 +00002809}
2810
thughesdaa34562004-06-27 12:48:53 +00002811
2812static
2813void do__get_stack_info ( ThreadId tid, ThreadId which, StackInfo* si )
2814{
2815 Char msg_buf[100];
2816
2817 vg_assert(VG_(is_valid_tid)(tid)
2818 && VG_(threads)[tid].status == VgTs_Runnable);
2819
2820 if (VG_(clo_trace_sched)) {
2821 VG_(sprintf)(msg_buf, "get_stack_info for tid %d", which );
2822 print_pthread_event(tid, msg_buf);
2823 }
2824
2825 if (!VG_(is_valid_tid)(which)) {
2826 SET_PTHREQ_RETVAL(tid, -1);
2827 return;
2828 }
2829
2830 si->base = VG_(threads)[which].stack_base;
2831 si->size = VG_(threads)[which].stack_size
2832 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB
2833 - VG_(threads)[which].stack_guard_size;
2834 si->guardsize = VG_(threads)[which].stack_guard_size;
2835
2836 SET_PTHREQ_RETVAL(tid, 0);
2837}
2838
njnd3040452003-05-19 15:04:06 +00002839/* ---------------------------------------------------------------------
2840 Specifying shadow register values
2841 ------------------------------------------------------------------ */
2842
2843void VG_(set_return_from_syscall_shadow) ( ThreadId tid, UInt ret_shadow )
2844{
nethercote15218bd2004-09-11 15:11:47 +00002845 VG_(set_thread_shadow_archreg)(tid, R_SYSCALL_RET, ret_shadow);
njnd3040452003-05-19 15:04:06 +00002846}
2847
2848UInt VG_(get_exit_status_shadow) ( void )
2849{
nethercote15218bd2004-09-11 15:11:47 +00002850 return VG_(get_shadow_archreg)(R_SYSCALL_ARG1);
njnd3040452003-05-19 15:04:06 +00002851}
2852
rjwalshe4e779d2004-04-16 23:02:29 +00002853void VG_(intercept_libc_freeres_wrapper)(Addr addr)
2854{
nethercotef971ab72004-08-02 16:27:40 +00002855 __libc_freeres_wrapper = addr;
rjwalshe4e779d2004-04-16 23:02:29 +00002856}
sewardj2cb00342002-06-28 01:46:26 +00002857
sewardje663cb92002-04-12 10:26:32 +00002858/* ---------------------------------------------------------------------
sewardj124ca2a2002-06-20 10:19:38 +00002859 Handle client requests.
sewardje663cb92002-04-12 10:26:32 +00002860 ------------------------------------------------------------------ */
2861
sewardj124ca2a2002-06-20 10:19:38 +00002862/* Do a client request for the thread tid. After the request, tid may
2863 or may not still be runnable; if not, the scheduler will have to
2864 choose a new thread to run.
2865*/
sewardje663cb92002-04-12 10:26:32 +00002866static
nethercote3e901a22004-09-11 13:17:02 +00002867void do_client_request ( ThreadId tid, UInt* arg )
sewardje663cb92002-04-12 10:26:32 +00002868{
nethercote3e901a22004-09-11 13:17:02 +00002869 UInt req_no = arg[0];
sewardj124ca2a2002-06-20 10:19:38 +00002870
fitzhardinge98abfc72003-12-16 02:05:15 +00002871 if (0)
nethercote3e901a22004-09-11 13:17:02 +00002872 VG_(printf)("req no = 0x%x, arg = %p\n", req_no, arg);
sewardje663cb92002-04-12 10:26:32 +00002873 switch (req_no) {
2874
njn3e884182003-04-15 13:03:23 +00002875 case VG_USERREQ__CLIENT_CALL0: {
2876 UInt (*f)(void) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00002877 if (f == NULL)
nethercote66b3af62004-09-11 13:06:55 +00002878 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL0: func=%p\n", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00002879 else
2880 SET_CLCALL_RETVAL(tid, f ( ), (Addr)f);
njn3e884182003-04-15 13:03:23 +00002881 break;
2882 }
2883 case VG_USERREQ__CLIENT_CALL1: {
2884 UInt (*f)(UInt) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00002885 if (f == NULL)
nethercote66b3af62004-09-11 13:06:55 +00002886 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL1: func=%p\n", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00002887 else
2888 SET_CLCALL_RETVAL(tid, f ( arg[2] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00002889 break;
2890 }
2891 case VG_USERREQ__CLIENT_CALL2: {
2892 UInt (*f)(UInt, UInt) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00002893 if (f == NULL)
nethercote66b3af62004-09-11 13:06:55 +00002894 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL2: func=%p\n", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00002895 else
2896 SET_CLCALL_RETVAL(tid, f ( arg[2], arg[3] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00002897 break;
2898 }
2899 case VG_USERREQ__CLIENT_CALL3: {
2900 UInt (*f)(UInt, UInt, UInt) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00002901 if (f == NULL)
nethercote66b3af62004-09-11 13:06:55 +00002902 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL3: func=%p\n", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00002903 else
2904 SET_CLCALL_RETVAL(tid, f ( arg[2], arg[3], arg[4] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00002905 break;
2906 }
2907
nethercote7cc9c232004-01-21 15:08:04 +00002908 /* Note: for tools that replace malloc() et al, we want to call
njn3e884182003-04-15 13:03:23 +00002909 the replacement versions. For those that don't, we want to call
2910 VG_(cli_malloc)() et al. We do this by calling SK_(malloc)(), which
nethercote3ced0e32004-01-26 14:50:45 +00002911 malloc-replacing tools must replace, but have the default definition
2912 of SK_(malloc)() call VG_(cli_malloc)(). */
njn3e884182003-04-15 13:03:23 +00002913
2914 /* Note: for MALLOC and FREE, must set the appropriate "lock"... see
2915 the comment in vg_defaults.c/SK_(malloc)() for why. */
sewardj124ca2a2002-06-20 10:19:38 +00002916 case VG_USERREQ__MALLOC:
njn3e884182003-04-15 13:03:23 +00002917 VG_(sk_malloc_called_by_scheduler) = True;
njnd3040452003-05-19 15:04:06 +00002918 SET_PTHREQ_RETVAL(
njn72718642003-07-24 08:45:32 +00002919 tid, (UInt)SK_(malloc) ( arg[1] )
sewardj124ca2a2002-06-20 10:19:38 +00002920 );
njn3e884182003-04-15 13:03:23 +00002921 VG_(sk_malloc_called_by_scheduler) = False;
sewardj124ca2a2002-06-20 10:19:38 +00002922 break;
2923
2924 case VG_USERREQ__FREE:
njn3e884182003-04-15 13:03:23 +00002925 VG_(sk_malloc_called_by_scheduler) = True;
njn72718642003-07-24 08:45:32 +00002926 SK_(free) ( (void*)arg[1] );
njn3e884182003-04-15 13:03:23 +00002927 VG_(sk_malloc_called_by_scheduler) = False;
njnd3040452003-05-19 15:04:06 +00002928 SET_PTHREQ_RETVAL(tid, 0); /* irrelevant */
sewardj124ca2a2002-06-20 10:19:38 +00002929 break;
2930
sewardj124ca2a2002-06-20 10:19:38 +00002931 case VG_USERREQ__PTHREAD_GET_THREADID:
njnd3040452003-05-19 15:04:06 +00002932 SET_PTHREQ_RETVAL(tid, tid);
sewardj124ca2a2002-06-20 10:19:38 +00002933 break;
2934
2935 case VG_USERREQ__RUNNING_ON_VALGRIND:
njnd3040452003-05-19 15:04:06 +00002936 SET_CLREQ_RETVAL(tid, 1);
sewardj124ca2a2002-06-20 10:19:38 +00002937 break;
2938
2939 case VG_USERREQ__GET_PTHREAD_TRACE_LEVEL:
njnd3040452003-05-19 15:04:06 +00002940 SET_PTHREQ_RETVAL(tid, VG_(clo_trace_pthread_level));
sewardj124ca2a2002-06-20 10:19:38 +00002941 break;
2942
2943 case VG_USERREQ__READ_MILLISECOND_TIMER:
njnd3040452003-05-19 15:04:06 +00002944 SET_PTHREQ_RETVAL(tid, VG_(read_millisecond_timer)());
sewardj124ca2a2002-06-20 10:19:38 +00002945 break;
2946
2947 /* Some of these may make thread tid non-runnable, but the
2948 scheduler checks for that on return from this function. */
2949 case VG_USERREQ__PTHREAD_MUTEX_LOCK:
thughese321d492004-10-17 15:00:20 +00002950 do_pthread_mutex_lock( tid, False, (void *)(arg[1]), 0xFFFFFFFF );
2951 break;
2952
2953 case VG_USERREQ__PTHREAD_MUTEX_TIMEDLOCK:
2954 do_pthread_mutex_lock( tid, False, (void *)(arg[1]), arg[2] );
sewardj124ca2a2002-06-20 10:19:38 +00002955 break;
2956
2957 case VG_USERREQ__PTHREAD_MUTEX_TRYLOCK:
thughese321d492004-10-17 15:00:20 +00002958 do_pthread_mutex_lock( tid, True, (void *)(arg[1]), 0xFFFFFFFF );
sewardj124ca2a2002-06-20 10:19:38 +00002959 break;
2960
2961 case VG_USERREQ__PTHREAD_MUTEX_UNLOCK:
2962 do_pthread_mutex_unlock( tid, (void *)(arg[1]) );
2963 break;
2964
sewardj00a66b12002-10-12 16:42:35 +00002965 case VG_USERREQ__PTHREAD_GETSPECIFIC_PTR:
2966 do_pthread_getspecific_ptr ( tid );
sewardj124ca2a2002-06-20 10:19:38 +00002967 break;
2968
2969 case VG_USERREQ__SET_CANCELTYPE:
2970 do__set_canceltype ( tid, arg[1] );
2971 break;
2972
2973 case VG_USERREQ__CLEANUP_PUSH:
2974 do__cleanup_push ( tid, (CleanupEntry*)(arg[1]) );
2975 break;
2976
2977 case VG_USERREQ__CLEANUP_POP:
2978 do__cleanup_pop ( tid, (CleanupEntry*)(arg[1]) );
2979 break;
2980
2981 case VG_USERREQ__TESTCANCEL:
2982 do__testcancel ( tid );
2983 break;
2984
sewardje663cb92002-04-12 10:26:32 +00002985 case VG_USERREQ__PTHREAD_JOIN:
2986 do_pthread_join( tid, arg[1], (void**)(arg[2]) );
2987 break;
2988
sewardj3b5d8862002-04-20 13:53:23 +00002989 case VG_USERREQ__PTHREAD_COND_WAIT:
2990 do_pthread_cond_wait( tid,
nethercote1f0173b2004-02-28 15:40:36 +00002991 (vg_pthread_cond_t *)(arg[1]),
2992 (vg_pthread_mutex_t *)(arg[2]),
sewardj5f07b662002-04-23 16:52:51 +00002993 0xFFFFFFFF /* no timeout */ );
2994 break;
2995
2996 case VG_USERREQ__PTHREAD_COND_TIMEDWAIT:
2997 do_pthread_cond_wait( tid,
nethercote1f0173b2004-02-28 15:40:36 +00002998 (vg_pthread_cond_t *)(arg[1]),
2999 (vg_pthread_mutex_t *)(arg[2]),
sewardj5f07b662002-04-23 16:52:51 +00003000 arg[3] /* timeout millisecond point */ );
sewardj3b5d8862002-04-20 13:53:23 +00003001 break;
3002
3003 case VG_USERREQ__PTHREAD_COND_SIGNAL:
3004 do_pthread_cond_signal_or_broadcast(
3005 tid,
3006 False, /* signal, not broadcast */
nethercote1f0173b2004-02-28 15:40:36 +00003007 (vg_pthread_cond_t *)(arg[1]) );
sewardj3b5d8862002-04-20 13:53:23 +00003008 break;
3009
3010 case VG_USERREQ__PTHREAD_COND_BROADCAST:
3011 do_pthread_cond_signal_or_broadcast(
3012 tid,
3013 True, /* broadcast, not signal */
nethercote1f0173b2004-02-28 15:40:36 +00003014 (vg_pthread_cond_t *)(arg[1]) );
sewardj3b5d8862002-04-20 13:53:23 +00003015 break;
3016
sewardj00a66b12002-10-12 16:42:35 +00003017 case VG_USERREQ__PTHREAD_KEY_VALIDATE:
3018 do_pthread_key_validate ( tid,
3019 (pthread_key_t)(arg[1]) );
3020 break;
3021
sewardj5f07b662002-04-23 16:52:51 +00003022 case VG_USERREQ__PTHREAD_KEY_CREATE:
3023 do_pthread_key_create ( tid,
3024 (pthread_key_t*)(arg[1]),
3025 (void(*)(void*))(arg[2]) );
3026 break;
3027
3028 case VG_USERREQ__PTHREAD_KEY_DELETE:
3029 do_pthread_key_delete ( tid,
3030 (pthread_key_t)(arg[1]) );
3031 break;
3032
sewardj00a66b12002-10-12 16:42:35 +00003033 case VG_USERREQ__PTHREAD_SETSPECIFIC_PTR:
3034 do_pthread_setspecific_ptr ( tid,
3035 (void**)(arg[1]) );
sewardj5f07b662002-04-23 16:52:51 +00003036 break;
3037
sewardjb48e5002002-05-13 00:16:03 +00003038 case VG_USERREQ__PTHREAD_SIGMASK:
3039 do_pthread_sigmask ( tid,
3040 arg[1],
3041 (vki_ksigset_t*)(arg[2]),
3042 (vki_ksigset_t*)(arg[3]) );
3043 break;
3044
sewardj018f7622002-05-15 21:13:39 +00003045 case VG_USERREQ__PTHREAD_KILL:
3046 do_pthread_kill ( tid, arg[1], arg[2] );
3047 break;
3048
sewardjff42d1d2002-05-22 13:17:31 +00003049 case VG_USERREQ__PTHREAD_YIELD:
3050 do_pthread_yield ( tid );
sewardj18a62ff2002-07-12 22:30:51 +00003051 /* On return from do_client_request(), the scheduler will
3052 select a new thread to run. */
sewardjff42d1d2002-05-22 13:17:31 +00003053 break;
sewardj018f7622002-05-15 21:13:39 +00003054
sewardj7989d0c2002-05-28 11:00:01 +00003055 case VG_USERREQ__SET_CANCELSTATE:
3056 do__set_cancelstate ( tid, arg[1] );
3057 break;
3058
sewardj7989d0c2002-05-28 11:00:01 +00003059 case VG_USERREQ__SET_OR_GET_DETACH:
3060 do__set_or_get_detach ( tid, arg[1], arg[2] );
3061 break;
3062
3063 case VG_USERREQ__SET_CANCELPEND:
3064 do__set_cancelpend ( tid, arg[1], (void(*)(void*))arg[2] );
3065 break;
3066
3067 case VG_USERREQ__WAIT_JOINER:
3068 do__wait_joiner ( tid, (void*)arg[1] );
3069 break;
3070
3071 case VG_USERREQ__QUIT:
3072 do__quit ( tid );
3073 break;
3074
3075 case VG_USERREQ__APPLY_IN_NEW_THREAD:
3076 do__apply_in_new_thread ( tid, (void*(*)(void*))arg[1],
thughesdaa34562004-06-27 12:48:53 +00003077 (void*)arg[2], (StackInfo*)(arg[3]) );
sewardj7989d0c2002-05-28 11:00:01 +00003078 break;
3079
sewardj870497a2002-05-29 01:06:47 +00003080 case VG_USERREQ__GET_KEY_D_AND_S:
3081 do__get_key_destr_and_spec ( tid,
3082 (pthread_key_t)arg[1],
3083 (CleanupEntry*)arg[2] );
3084 break;
3085
sewardjef037c72002-05-30 00:40:03 +00003086 case VG_USERREQ__NUKE_OTHER_THREADS:
3087 VG_(nuke_all_threads_except) ( tid );
njnd3040452003-05-19 15:04:06 +00003088 SET_PTHREQ_RETVAL(tid, 0);
sewardjef037c72002-05-30 00:40:03 +00003089 break;
3090
sewardj4dced352002-06-04 22:54:20 +00003091 case VG_USERREQ__PTHREAD_ERROR:
njn25e49d8e72002-09-23 09:36:25 +00003092 VG_(record_pthread_error)( tid, (Char*)(arg[1]) );
njnd3040452003-05-19 15:04:06 +00003093 SET_PTHREQ_RETVAL(tid, 0);
sewardj4dced352002-06-04 22:54:20 +00003094 break;
3095
sewardj2cb00342002-06-28 01:46:26 +00003096 case VG_USERREQ__SET_FHSTACK_USED:
3097 do__set_fhstack_used( tid, (Int)(arg[1]) );
3098 break;
3099
3100 case VG_USERREQ__GET_FHSTACK_USED:
3101 do__get_fhstack_used( tid );
3102 break;
3103
3104 case VG_USERREQ__SET_FHSTACK_ENTRY:
3105 do__set_fhstack_entry( tid, (Int)(arg[1]),
3106 (ForkHandlerEntry*)(arg[2]) );
3107 break;
3108
3109 case VG_USERREQ__GET_FHSTACK_ENTRY:
3110 do__get_fhstack_entry( tid, (Int)(arg[1]),
3111 (ForkHandlerEntry*)(arg[2]) );
3112 break;
3113
sewardj77e466c2002-04-14 02:29:29 +00003114 case VG_USERREQ__SIGNAL_RETURNS:
3115 handle_signal_return(tid);
3116 break;
fitzhardinge98abfc72003-12-16 02:05:15 +00003117
thughesdaa34562004-06-27 12:48:53 +00003118 case VG_USERREQ__GET_STACK_INFO:
3119 do__get_stack_info( tid, (Int)(arg[1]), (StackInfo*)(arg[2]) );
3120 break;
3121
fitzhardinge98abfc72003-12-16 02:05:15 +00003122
3123 case VG_USERREQ__GET_SIGRT_MIN:
3124 SET_PTHREQ_RETVAL(tid, VG_(sig_rtmin));
3125 break;
3126
3127 case VG_USERREQ__GET_SIGRT_MAX:
3128 SET_PTHREQ_RETVAL(tid, VG_(sig_rtmax));
3129 break;
3130
3131 case VG_USERREQ__ALLOC_RTSIG:
3132 SET_PTHREQ_RETVAL(tid, VG_(sig_alloc_rtsig)((Int)arg[1]));
3133 break;
3134
fitzhardinge39de4b42003-10-31 07:12:21 +00003135 case VG_USERREQ__PRINTF: {
3136 int count =
nethercote3e901a22004-09-11 13:17:02 +00003137 VG_(vmessage)( Vg_ClientMsg, (char *)arg[1], (void*)arg[2] );
fitzhardinge39de4b42003-10-31 07:12:21 +00003138 SET_CLREQ_RETVAL( tid, count );
3139 break; }
3140
fitzhardinge98abfc72003-12-16 02:05:15 +00003141
fitzhardinge39de4b42003-10-31 07:12:21 +00003142 case VG_USERREQ__INTERNAL_PRINTF: {
3143 int count =
nethercote3e901a22004-09-11 13:17:02 +00003144 VG_(vmessage)( Vg_UserMsg, (char *)arg[1], (void*)arg[2] );
fitzhardinge39de4b42003-10-31 07:12:21 +00003145 SET_CLREQ_RETVAL( tid, count );
3146 break; }
3147
3148 case VG_USERREQ__PRINTF_BACKTRACE: {
3149 ExeContext *e = VG_(get_ExeContext)( tid );
3150 int count =
nethercote3e901a22004-09-11 13:17:02 +00003151 VG_(vmessage)( Vg_ClientMsg, (char *)arg[1], (void*)arg[2] );
nethercote86c5dcb2004-09-05 21:32:37 +00003152 VG_(mini_stack_dump)(e->ips, VG_(clo_backtrace_size));
fitzhardinge39de4b42003-10-31 07:12:21 +00003153 SET_CLREQ_RETVAL( tid, count );
3154 break; }
3155
3156 case VG_USERREQ__INTERNAL_PRINTF_BACKTRACE: {
3157 ExeContext *e = VG_(get_ExeContext)( tid );
3158 int count =
nethercote3e901a22004-09-11 13:17:02 +00003159 VG_(vmessage)( Vg_UserMsg, (char *)arg[1], (void*)arg[2] );
nethercote86c5dcb2004-09-05 21:32:37 +00003160 VG_(mini_stack_dump)(e->ips, VG_(clo_backtrace_size));
fitzhardinge39de4b42003-10-31 07:12:21 +00003161 SET_CLREQ_RETVAL( tid, count );
3162 break; }
3163
fitzhardinge98abfc72003-12-16 02:05:15 +00003164 case VG_USERREQ__GET_MALLOCFUNCS: {
3165 struct vg_mallocfunc_info *info = (struct vg_mallocfunc_info *)arg[1];
3166
3167 info->sk_malloc = (Addr)SK_(malloc);
3168 info->sk_calloc = (Addr)SK_(calloc);
3169 info->sk_realloc = (Addr)SK_(realloc);
3170 info->sk_memalign = (Addr)SK_(memalign);
3171 info->sk___builtin_new = (Addr)SK_(__builtin_new);
3172 info->sk___builtin_vec_new = (Addr)SK_(__builtin_vec_new);
3173 info->sk_free = (Addr)SK_(free);
3174 info->sk___builtin_delete = (Addr)SK_(__builtin_delete);
3175 info->sk___builtin_vec_delete = (Addr)SK_(__builtin_vec_delete);
3176
3177 info->arena_payload_szB = (Addr)VG_(arena_payload_szB);
3178
3179 info->clo_sloppy_malloc = VG_(clo_sloppy_malloc);
3180 info->clo_trace_malloc = VG_(clo_trace_malloc);
3181
3182 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
3183
3184 break;
3185 }
3186
njn25e49d8e72002-09-23 09:36:25 +00003187 /* Requests from the client program */
3188
3189 case VG_USERREQ__DISCARD_TRANSLATIONS:
3190 if (VG_(clo_verbosity) > 2)
3191 VG_(printf)( "client request: DISCARD_TRANSLATIONS,"
3192 " addr %p, len %d\n",
3193 (void*)arg[1], arg[2] );
3194
sewardj97ad5522003-05-04 12:32:56 +00003195 VG_(invalidate_translations)( arg[1], arg[2], True );
njn25e49d8e72002-09-23 09:36:25 +00003196
njnd3040452003-05-19 15:04:06 +00003197 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
njn25e49d8e72002-09-23 09:36:25 +00003198 break;
3199
njn47363ab2003-04-21 13:24:40 +00003200 case VG_USERREQ__COUNT_ERRORS:
nethercotef2b11482004-08-02 12:36:01 +00003201 SET_CLREQ_RETVAL( tid, VG_(get_n_errs_found)() );
njn47363ab2003-04-21 13:24:40 +00003202 break;
3203
sewardje663cb92002-04-12 10:26:32 +00003204 default:
njn25e49d8e72002-09-23 09:36:25 +00003205 if (VG_(needs).client_requests) {
sewardj34042512002-10-22 04:14:35 +00003206 UInt ret;
3207
njn25e49d8e72002-09-23 09:36:25 +00003208 if (VG_(clo_verbosity) > 2)
fitzhardinge98abfc72003-12-16 02:05:15 +00003209 VG_(printf)("client request: code %x, addr %p, len %d\n",
njn25e49d8e72002-09-23 09:36:25 +00003210 arg[0], (void*)arg[1], arg[2] );
3211
njn72718642003-07-24 08:45:32 +00003212 if (SK_(handle_client_request) ( tid, arg, &ret ))
njnd3040452003-05-19 15:04:06 +00003213 SET_CLREQ_RETVAL(tid, ret);
njn25e49d8e72002-09-23 09:36:25 +00003214 } else {
sewardj34042512002-10-22 04:14:35 +00003215 static Bool whined = False;
3216
3217 if (!whined) {
nethercote7cc9c232004-01-21 15:08:04 +00003218 // Allow for requests in core, but defined by tools, which
njnd7994182003-10-02 13:44:04 +00003219 // have 0 and 0 in their two high bytes.
3220 Char c1 = (arg[0] >> 24) & 0xff;
3221 Char c2 = (arg[0] >> 16) & 0xff;
3222 if (c1 == 0) c1 = '_';
3223 if (c2 == 0) c2 = '_';
sewardj34042512002-10-22 04:14:35 +00003224 VG_(message)(Vg_UserMsg, "Warning:\n"
njnd7994182003-10-02 13:44:04 +00003225 " unhandled client request: 0x%x (%c%c+0x%x). Perhaps\n"
3226 " VG_(needs).client_requests should be set?\n",
3227 arg[0], c1, c2, arg[0] & 0xffff);
sewardj34042512002-10-22 04:14:35 +00003228 whined = True;
3229 }
njn25e49d8e72002-09-23 09:36:25 +00003230 }
sewardje663cb92002-04-12 10:26:32 +00003231 break;
3232 }
3233}
3234
3235
sewardj6072c362002-04-19 14:40:57 +00003236/* ---------------------------------------------------------------------
3237 Sanity checking.
3238 ------------------------------------------------------------------ */
3239
3240/* Internal consistency checks on the sched/pthread structures. */
3241static
3242void scheduler_sanity ( void )
3243{
nethercote1f0173b2004-02-28 15:40:36 +00003244 vg_pthread_mutex_t* mx;
3245 vg_pthread_cond_t* cv;
sewardj6072c362002-04-19 14:40:57 +00003246 Int i;
jsgf855d93d2003-10-13 22:26:55 +00003247 struct timeout* top;
3248 UInt lasttime = 0;
3249
3250 for(top = timeouts; top != NULL; top = top->next) {
3251 vg_assert(top->time >= lasttime);
nethercote36881a22004-08-04 14:03:16 +00003252 vg_assert(is_valid_or_empty_tid(top->tid));
jsgf855d93d2003-10-13 22:26:55 +00003253
3254#if 0
3255 /* assert timeout entry is either stale, or associated with a
3256 thread in the right state
3257
3258 XXX disable for now - can be stale, but times happen to match
3259 */
3260 vg_assert(VG_(threads)[top->tid].awaken_at != top->time ||
3261 VG_(threads)[top->tid].status == VgTs_Sleeping ||
thughese321d492004-10-17 15:00:20 +00003262 VG_(threads)[top->tid].status == VgTs_WaitMX ||
jsgf855d93d2003-10-13 22:26:55 +00003263 VG_(threads)[top->tid].status == VgTs_WaitCV);
3264#endif
3265
3266 lasttime = top->time;
3267 }
sewardj5f07b662002-04-23 16:52:51 +00003268
sewardj6072c362002-04-19 14:40:57 +00003269 /* VG_(printf)("scheduler_sanity\n"); */
3270 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +00003271 mx = VG_(threads)[i].associated_mx;
3272 cv = VG_(threads)[i].associated_cv;
3273 if (VG_(threads)[i].status == VgTs_WaitMX) {
sewardjbf290b92002-05-01 02:28:01 +00003274 /* If we're waiting on a MX: (1) the mx is not null, (2, 3)
3275 it's actually held by someone, since otherwise this thread
3276 is deadlocked, (4) the mutex's owner is not us, since
3277 otherwise this thread is also deadlocked. The logic in
3278 do_pthread_mutex_lock rejects attempts by a thread to lock
3279 a (non-recursive) mutex which it already owns.
sewardj05553872002-04-20 20:53:17 +00003280
sewardjbf290b92002-05-01 02:28:01 +00003281 (2) has been seen to fail sometimes. I don't know why.
3282 Possibly to do with signals. */
sewardj3b5d8862002-04-20 13:53:23 +00003283 vg_assert(cv == NULL);
sewardj05553872002-04-20 20:53:17 +00003284 /* 1 */ vg_assert(mx != NULL);
nethercote1f0173b2004-02-28 15:40:36 +00003285 /* 2 */ vg_assert(mx->__vg_m_count > 0);
3286 /* 3 */ vg_assert(VG_(is_valid_tid)((ThreadId)mx->__vg_m_owner));
thughese321d492004-10-17 15:00:20 +00003287 /* 4 */ vg_assert((UInt)i != (ThreadId)mx->__vg_m_owner ||
3288 VG_(threads)[i].awaken_at != 0xFFFFFFFF);
sewardj3b5d8862002-04-20 13:53:23 +00003289 } else
sewardj018f7622002-05-15 21:13:39 +00003290 if (VG_(threads)[i].status == VgTs_WaitCV) {
sewardj3b5d8862002-04-20 13:53:23 +00003291 vg_assert(cv != NULL);
3292 vg_assert(mx != NULL);
sewardj6072c362002-04-19 14:40:57 +00003293 } else {
thughesf7269232004-10-16 16:17:06 +00003294 vg_assert(cv == NULL);
3295 vg_assert(mx == NULL);
sewardj6072c362002-04-19 14:40:57 +00003296 }
sewardjbf290b92002-05-01 02:28:01 +00003297
sewardj018f7622002-05-15 21:13:39 +00003298 if (VG_(threads)[i].status != VgTs_Empty) {
sewardjbf290b92002-05-01 02:28:01 +00003299 Int
sewardj018f7622002-05-15 21:13:39 +00003300 stack_used = (Addr)VG_(threads)[i].stack_highest_word
nethercoteb8ef9d82004-09-05 22:02:33 +00003301 - (Addr)ARCH_STACK_PTR(VG_(threads)[i].arch);
thughesdaa34562004-06-27 12:48:53 +00003302 Int
3303 stack_avail = VG_(threads)[i].stack_size
3304 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB
3305 - VG_(threads)[i].stack_guard_size;
fitzhardinge98c4dc02004-03-16 08:27:29 +00003306 /* This test is a bit bogus - it doesn't take into account
3307 alternate signal stacks, for a start. Also, if a thread
3308 has it's stack pointer somewhere strange, killing Valgrind
3309 isn't the right answer. */
3310 if (0 && i > 1 /* not the root thread */
thughesdaa34562004-06-27 12:48:53 +00003311 && stack_used >= stack_avail) {
sewardjbf290b92002-05-01 02:28:01 +00003312 VG_(message)(Vg_UserMsg,
njn25e49d8e72002-09-23 09:36:25 +00003313 "Error: STACK OVERFLOW: "
sewardjbf290b92002-05-01 02:28:01 +00003314 "thread %d: stack used %d, available %d",
thughesdaa34562004-06-27 12:48:53 +00003315 i, stack_used, stack_avail );
sewardjbf290b92002-05-01 02:28:01 +00003316 VG_(message)(Vg_UserMsg,
3317 "Terminating Valgrind. If thread(s) "
3318 "really need more stack, increase");
3319 VG_(message)(Vg_UserMsg,
rjwalsh7109a8c2004-09-02 00:31:02 +00003320 "VG_PTHREAD_STACK_SIZE in core.h and recompile.");
sewardjbf290b92002-05-01 02:28:01 +00003321 VG_(exit)(1);
3322 }
3323 }
sewardj6072c362002-04-19 14:40:57 +00003324 }
sewardj5f07b662002-04-23 16:52:51 +00003325
3326 for (i = 0; i < VG_N_THREAD_KEYS; i++) {
3327 if (!vg_thread_keys[i].inuse)
3328 vg_assert(vg_thread_keys[i].destructor == NULL);
3329 }
sewardj6072c362002-04-19 14:40:57 +00003330}
3331
3332
sewardje663cb92002-04-12 10:26:32 +00003333/*--------------------------------------------------------------------*/
3334/*--- end vg_scheduler.c ---*/
3335/*--------------------------------------------------------------------*/