blob: d6e64fd46396ea525c905fff93308b61f0f9ad1a [file] [log] [blame]
sewardje663cb92002-04-12 10:26:32 +00001
2/*--------------------------------------------------------------------*/
3/*--- A user-space pthreads implementation. vg_scheduler.c ---*/
4/*--------------------------------------------------------------------*/
5
6/*
njnc9539842002-10-02 13:26:35 +00007 This file is part of Valgrind, an extensible x86 protected-mode
8 emulator for monitoring program execution on x86-Unixes.
sewardje663cb92002-04-12 10:26:32 +00009
nethercotebb1c9912004-01-04 16:43:23 +000010 Copyright (C) 2000-2004 Julian Seward
sewardje663cb92002-04-12 10:26:32 +000011 jseward@acm.org
sewardje663cb92002-04-12 10:26:32 +000012
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
njn25e49d8e72002-09-23 09:36:25 +000028 The GNU General Public License is contained in the file COPYING.
sewardje663cb92002-04-12 10:26:32 +000029*/
30
njn25e49d8e72002-09-23 09:36:25 +000031#include "valgrind.h" /* for VG_USERREQ__RUNNING_ON_VALGRIND and
njn47363ab2003-04-21 13:24:40 +000032 VG_USERREQ__DISCARD_TRANSLATIONS, and others */
nethercotef1e5e152004-09-01 23:58:16 +000033#include "core.h"
sewardje663cb92002-04-12 10:26:32 +000034
sewardje663cb92002-04-12 10:26:32 +000035
36/* ---------------------------------------------------------------------
37 Types and globals for the scheduler.
38 ------------------------------------------------------------------ */
39
rjwalsh7109a8c2004-09-02 00:31:02 +000040/* ThreadId and ThreadState are defined in core.h. */
sewardje663cb92002-04-12 10:26:32 +000041
sewardj018f7622002-05-15 21:13:39 +000042/* Globals. A statically allocated array of threads. NOTE: [0] is
43 never used, to simplify the simulation of initialisers for
sewardj6072c362002-04-19 14:40:57 +000044 LinuxThreads. */
sewardj018f7622002-05-15 21:13:39 +000045ThreadState VG_(threads)[VG_N_THREADS];
sewardje663cb92002-04-12 10:26:32 +000046
sewardj2cb00342002-06-28 01:46:26 +000047/* The process' fork-handler stack. */
48static Int vg_fhstack_used = 0;
49static ForkHandlerEntry vg_fhstack[VG_N_FORKHANDLERSTACK];
50
51
sewardj1e8cdc92002-04-18 11:37:52 +000052/* The tid of the thread currently in VG_(baseBlock). */
njn1be61612003-05-14 14:04:39 +000053static ThreadId vg_tid_currently_in_baseBlock = VG_INVALID_THREADID;
sewardj1e8cdc92002-04-18 11:37:52 +000054
sewardjb52a1b02002-10-23 21:38:22 +000055/* The tid either currently in baseBlock, or was in baseBlock before
56 was saved it out; this is only updated when a new thread is loaded
57 into the baseBlock */
njn1be61612003-05-14 14:04:39 +000058static ThreadId vg_tid_last_in_baseBlock = VG_INVALID_THREADID;
sewardje663cb92002-04-12 10:26:32 +000059
60/* vg_oursignalhandler() might longjmp(). Here's the jmp_buf. */
nethercotef971ab72004-08-02 16:27:40 +000061static jmp_buf scheduler_jmpbuf;
sewardj872051c2002-07-13 12:12:56 +000062/* This says whether scheduler_jmpbuf is actually valid. Needed so
63 that our signal handler doesn't longjmp when the buffer isn't
64 actually valid. */
nethercotef971ab72004-08-02 16:27:40 +000065static Bool scheduler_jmpbuf_valid = False;
sewardje663cb92002-04-12 10:26:32 +000066/* ... and if so, here's the signal which caused it to do so. */
nethercotef971ab72004-08-02 16:27:40 +000067static Int longjmpd_on_signal;
jsgf855d93d2003-10-13 22:26:55 +000068/* If the current thread gets a syncronous unresumable signal, then
69 its details are placed here by the signal handler, to be passed to
70 the applications signal handler later on. */
nethercote73b526f2004-10-31 18:48:21 +000071static vki_siginfo_t unresumable_siginfo;
sewardje663cb92002-04-12 10:26:32 +000072
jsgf855d93d2003-10-13 22:26:55 +000073/* If != VG_INVALID_THREADID, this is the preferred tid to schedule */
74static ThreadId prefer_sched = VG_INVALID_THREADID;
sewardje663cb92002-04-12 10:26:32 +000075
sewardj5f07b662002-04-23 16:52:51 +000076/* Keeping track of keys. */
77typedef
78 struct {
79 /* Has this key been allocated ? */
80 Bool inuse;
81 /* If .inuse==True, records the address of the associated
82 destructor, or NULL if none. */
83 void (*destructor)(void*);
84 }
85 ThreadKeyState;
86
87/* And our array of thread keys. */
88static ThreadKeyState vg_thread_keys[VG_N_THREAD_KEYS];
89
90typedef UInt ThreadKey;
91
fitzhardinge98abfc72003-12-16 02:05:15 +000092/* The scheduler does need to know the address of it so it can be
93 called at program exit. */
nethercotef971ab72004-08-02 16:27:40 +000094static Addr __libc_freeres_wrapper;
njn25e49d8e72002-09-23 09:36:25 +000095
sewardje663cb92002-04-12 10:26:32 +000096/* Forwards */
nethercoted1b64b22004-11-04 18:22:28 +000097static void do_client_request ( ThreadId tid, UWord* args );
sewardj6072c362002-04-19 14:40:57 +000098static void scheduler_sanity ( void );
thughese321d492004-10-17 15:00:20 +000099static void do_pthread_mutex_timedlock_TIMEOUT ( ThreadId tid );
sewardj124ca2a2002-06-20 10:19:38 +0000100static void do_pthread_cond_timedwait_TIMEOUT ( ThreadId tid );
thughesa3afffc2004-08-25 18:58:04 +0000101static void maybe_rendezvous_joiners_and_joinees ( void );
sewardjd140e442002-05-29 01:21:19 +0000102
nethercote844e7122004-08-02 15:27:22 +0000103/* Stats. */
104static UInt n_scheduling_events_MINOR = 0;
105static UInt n_scheduling_events_MAJOR = 0;
106
107void VG_(print_scheduler_stats)(void)
108{
109 VG_(message)(Vg_DebugMsg,
110 " %d/%d major/minor sched events.",
111 n_scheduling_events_MAJOR, n_scheduling_events_MINOR);
112}
113
sewardje663cb92002-04-12 10:26:32 +0000114/* ---------------------------------------------------------------------
115 Helper functions for the scheduler.
116 ------------------------------------------------------------------ */
117
sewardjb48e5002002-05-13 00:16:03 +0000118__inline__
119Bool VG_(is_valid_tid) ( ThreadId tid )
sewardj604ec3c2002-04-18 22:38:41 +0000120{
121 /* tid is unsigned, hence no < 0 test. */
sewardj6072c362002-04-19 14:40:57 +0000122 if (tid == 0) return False;
sewardj604ec3c2002-04-18 22:38:41 +0000123 if (tid >= VG_N_THREADS) return False;
sewardj018f7622002-05-15 21:13:39 +0000124 if (VG_(threads)[tid].status == VgTs_Empty) return False;
125 return True;
126}
127
128
129__inline__
nethercote36881a22004-08-04 14:03:16 +0000130Bool is_valid_or_empty_tid ( ThreadId tid )
sewardj018f7622002-05-15 21:13:39 +0000131{
132 /* tid is unsigned, hence no < 0 test. */
133 if (tid == 0) return False;
134 if (tid >= VG_N_THREADS) return False;
sewardj604ec3c2002-04-18 22:38:41 +0000135 return True;
136}
137
138
sewardj1e8cdc92002-04-18 11:37:52 +0000139/* For constructing error messages only: try and identify a thread
njn25e49d8e72002-09-23 09:36:25 +0000140 whose stack satisfies the predicate p, or return VG_INVALID_THREADID
141 if none do. A small complication is dealing with any currently
142 VG_(baseBlock)-resident thread.
sewardj1e8cdc92002-04-18 11:37:52 +0000143*/
njn43c799e2003-04-08 00:08:52 +0000144ThreadId VG_(first_matching_thread_stack)
thughes4ad52d02004-06-27 17:37:21 +0000145 ( Bool (*p) ( Addr stack_min, Addr stack_max, void* d ),
146 void* d )
sewardj1e8cdc92002-04-18 11:37:52 +0000147{
148 ThreadId tid, tid_to_skip;
149
150 tid_to_skip = VG_INVALID_THREADID;
151
152 /* First check to see if there's a currently-loaded thread in
153 VG_(baseBlock). */
154 if (vg_tid_currently_in_baseBlock != VG_INVALID_THREADID) {
155 tid = vg_tid_currently_in_baseBlock;
nethercoteb8ef9d82004-09-05 22:02:33 +0000156 if ( p ( VG_(baseBlock)[VGOFF_STACK_PTR],
thughes4ad52d02004-06-27 17:37:21 +0000157 VG_(threads)[tid].stack_highest_word, d ) )
sewardj1e8cdc92002-04-18 11:37:52 +0000158 return tid;
159 else
160 tid_to_skip = tid;
161 }
162
sewardj6072c362002-04-19 14:40:57 +0000163 for (tid = 1; tid < VG_N_THREADS; tid++) {
sewardj018f7622002-05-15 21:13:39 +0000164 if (VG_(threads)[tid].status == VgTs_Empty) continue;
sewardj1e8cdc92002-04-18 11:37:52 +0000165 if (tid == tid_to_skip) continue;
nethercoteb8ef9d82004-09-05 22:02:33 +0000166 if ( p ( ARCH_STACK_PTR(VG_(threads)[tid].arch),
thughes4ad52d02004-06-27 17:37:21 +0000167 VG_(threads)[tid].stack_highest_word, d ) )
sewardj1e8cdc92002-04-18 11:37:52 +0000168 return tid;
169 }
170 return VG_INVALID_THREADID;
171}
172
173
sewardj15a43e12002-04-17 19:35:12 +0000174/* Print the scheduler status. */
175void VG_(pp_sched_status) ( void )
sewardje663cb92002-04-12 10:26:32 +0000176{
177 Int i;
178 VG_(printf)("\nsched status:\n");
sewardj6072c362002-04-19 14:40:57 +0000179 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +0000180 if (VG_(threads)[i].status == VgTs_Empty) continue;
sewardj15a43e12002-04-17 19:35:12 +0000181 VG_(printf)("\nThread %d: status = ", i);
sewardj018f7622002-05-15 21:13:39 +0000182 switch (VG_(threads)[i].status) {
sewardj6072c362002-04-19 14:40:57 +0000183 case VgTs_Runnable: VG_(printf)("Runnable"); break;
sewardj20917d82002-05-28 01:36:45 +0000184 case VgTs_WaitJoinee: VG_(printf)("WaitJoinee(%d)",
185 VG_(threads)[i].joiner_jee_tid);
186 break;
187 case VgTs_WaitJoiner: VG_(printf)("WaitJoiner"); break;
sewardj6072c362002-04-19 14:40:57 +0000188 case VgTs_Sleeping: VG_(printf)("Sleeping"); break;
189 case VgTs_WaitMX: VG_(printf)("WaitMX"); break;
sewardj3b5d8862002-04-20 13:53:23 +0000190 case VgTs_WaitCV: VG_(printf)("WaitCV"); break;
jsgf855d93d2003-10-13 22:26:55 +0000191 case VgTs_WaitSys: VG_(printf)("WaitSys"); break;
sewardje663cb92002-04-12 10:26:32 +0000192 default: VG_(printf)("???"); break;
193 }
sewardj3b5d8862002-04-20 13:53:23 +0000194 VG_(printf)(", associated_mx = %p, associated_cv = %p\n",
sewardj018f7622002-05-15 21:13:39 +0000195 VG_(threads)[i].associated_mx,
196 VG_(threads)[i].associated_cv );
sewardj15a43e12002-04-17 19:35:12 +0000197 VG_(pp_ExeContext)(
nethercoteb8ef9d82004-09-05 22:02:33 +0000198 VG_(get_ExeContext2)( ARCH_INSTR_PTR(VG_(threads)[i].arch),
199 ARCH_FRAME_PTR(VG_(threads)[i].arch),
200 ARCH_STACK_PTR(VG_(threads)[i].arch),
njn25e49d8e72002-09-23 09:36:25 +0000201 VG_(threads)[i].stack_highest_word)
202 );
sewardje663cb92002-04-12 10:26:32 +0000203 }
204 VG_(printf)("\n");
205}
206
sewardje663cb92002-04-12 10:26:32 +0000207static
208void print_sched_event ( ThreadId tid, Char* what )
209{
sewardj45b4b372002-04-16 22:50:32 +0000210 VG_(message)(Vg_DebugMsg, " SCHED[%d]: %s", tid, what );
sewardj8937c812002-04-12 20:12:20 +0000211}
212
sewardj8937c812002-04-12 20:12:20 +0000213static
214void print_pthread_event ( ThreadId tid, Char* what )
215{
216 VG_(message)(Vg_DebugMsg, "PTHREAD[%d]: %s", tid, what );
sewardje663cb92002-04-12 10:26:32 +0000217}
218
sewardje663cb92002-04-12 10:26:32 +0000219static
220Char* name_of_sched_event ( UInt event )
221{
222 switch (event) {
sewardje663cb92002-04-12 10:26:32 +0000223 case VG_TRC_EBP_JMP_SYSCALL: return "SYSCALL";
224 case VG_TRC_EBP_JMP_CLIENTREQ: return "CLIENTREQ";
fitzhardingea02f8812003-12-18 09:06:09 +0000225 case VG_TRC_EBP_JMP_YIELD: return "YIELD";
sewardje663cb92002-04-12 10:26:32 +0000226 case VG_TRC_INNER_COUNTERZERO: return "COUNTERZERO";
227 case VG_TRC_INNER_FASTMISS: return "FASTMISS";
228 case VG_TRC_UNRESUMABLE_SIGNAL: return "FATALSIGNAL";
229 default: return "??UNKNOWN??";
230 }
231}
232
233
sewardje663cb92002-04-12 10:26:32 +0000234/* Allocate a completely empty ThreadState record. */
235static
236ThreadId vg_alloc_ThreadState ( void )
237{
238 Int i;
sewardj6072c362002-04-19 14:40:57 +0000239 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +0000240 if (VG_(threads)[i].status == VgTs_Empty)
sewardje663cb92002-04-12 10:26:32 +0000241 return i;
242 }
243 VG_(printf)("vg_alloc_ThreadState: no free slots available\n");
244 VG_(printf)("Increase VG_N_THREADS, rebuild and try again.\n");
njne427a662002-10-02 11:08:25 +0000245 VG_(core_panic)("VG_N_THREADS is too low");
sewardje663cb92002-04-12 10:26:32 +0000246 /*NOTREACHED*/
247}
248
jsgf855d93d2003-10-13 22:26:55 +0000249ThreadState *VG_(get_ThreadState)(ThreadId tid)
250{
251 vg_assert(tid >= 0 && tid < VG_N_THREADS);
252 return &VG_(threads)[tid];
253}
254
njn72718642003-07-24 08:45:32 +0000255Bool VG_(is_running_thread)(ThreadId tid)
njn25e49d8e72002-09-23 09:36:25 +0000256{
njn72718642003-07-24 08:45:32 +0000257 ThreadId curr = VG_(get_current_tid)();
258 return (curr == tid && VG_INVALID_THREADID != tid);
njn25e49d8e72002-09-23 09:36:25 +0000259}
sewardje663cb92002-04-12 10:26:32 +0000260
sewardj1e8cdc92002-04-18 11:37:52 +0000261ThreadId VG_(get_current_tid) ( void )
262{
sewardjb52a1b02002-10-23 21:38:22 +0000263 if (!VG_(is_valid_tid)(vg_tid_currently_in_baseBlock))
264 return VG_INVALID_THREADID;
sewardj1e8cdc92002-04-18 11:37:52 +0000265 return vg_tid_currently_in_baseBlock;
266}
267
sewardjb52a1b02002-10-23 21:38:22 +0000268ThreadId VG_(get_current_or_recent_tid) ( void )
njn25e49d8e72002-09-23 09:36:25 +0000269{
sewardjb52a1b02002-10-23 21:38:22 +0000270 vg_assert(vg_tid_currently_in_baseBlock == vg_tid_last_in_baseBlock ||
271 vg_tid_currently_in_baseBlock == VG_INVALID_THREADID);
272 vg_assert(VG_(is_valid_tid)(vg_tid_last_in_baseBlock));
273
274 return vg_tid_last_in_baseBlock;
njn25e49d8e72002-09-23 09:36:25 +0000275}
276
sewardje663cb92002-04-12 10:26:32 +0000277/* Copy the saved state of a thread into VG_(baseBlock), ready for it
278 to be run. */
nethercotef971ab72004-08-02 16:27:40 +0000279static void load_thread_state ( ThreadId tid )
sewardje663cb92002-04-12 10:26:32 +0000280{
sewardj1e8cdc92002-04-18 11:37:52 +0000281 vg_assert(vg_tid_currently_in_baseBlock == VG_INVALID_THREADID);
282
nethercotec06e2132004-09-03 13:45:29 +0000283 VGA_(load_state)(&VG_(threads)[tid].arch, tid);
sewardj1e8cdc92002-04-18 11:37:52 +0000284
285 vg_tid_currently_in_baseBlock = tid;
sewardjb52a1b02002-10-23 21:38:22 +0000286 vg_tid_last_in_baseBlock = tid;
sewardje663cb92002-04-12 10:26:32 +0000287}
288
289
290/* Copy the state of a thread from VG_(baseBlock), presumably after it
291 has been descheduled. For sanity-check purposes, fill the vacated
292 VG_(baseBlock) with garbage so as to make the system more likely to
293 fail quickly if we erroneously continue to poke around inside
294 VG_(baseBlock) without first doing a load_thread_state().
295*/
nethercotef971ab72004-08-02 16:27:40 +0000296static void save_thread_state ( ThreadId tid )
sewardje663cb92002-04-12 10:26:32 +0000297{
sewardj1e8cdc92002-04-18 11:37:52 +0000298 vg_assert(vg_tid_currently_in_baseBlock != VG_INVALID_THREADID);
299
nethercotec06e2132004-09-03 13:45:29 +0000300 VGA_(save_state)(&VG_(threads)[tid].arch, tid);
sewardj1e8cdc92002-04-18 11:37:52 +0000301
302 vg_tid_currently_in_baseBlock = VG_INVALID_THREADID;
sewardje663cb92002-04-12 10:26:32 +0000303}
304
305
nethercote73b526f2004-10-31 18:48:21 +0000306void VG_(resume_scheduler)(Int sigNo, vki_siginfo_t *info)
nethercote75d26242004-08-01 22:59:18 +0000307{
308 if (scheduler_jmpbuf_valid) {
309 /* Can't continue; must longjmp back to the scheduler and thus
310 enter the sighandler immediately. */
nethercote73b526f2004-10-31 18:48:21 +0000311 VG_(memcpy)(&unresumable_siginfo, info, sizeof(vki_siginfo_t));
nethercote75d26242004-08-01 22:59:18 +0000312
313 longjmpd_on_signal = sigNo;
314 __builtin_longjmp(scheduler_jmpbuf,1);
315 }
316}
317
sewardje663cb92002-04-12 10:26:32 +0000318/* Run the thread tid for a while, and return a VG_TRC_* value to the
319 scheduler indicating what happened. */
sewardj6072c362002-04-19 14:40:57 +0000320static
sewardje663cb92002-04-12 10:26:32 +0000321UInt run_thread_for_a_while ( ThreadId tid )
322{
sewardj7ccc5c22002-04-24 21:39:11 +0000323 volatile UInt trc = 0;
sewardjb48e5002002-05-13 00:16:03 +0000324 vg_assert(VG_(is_valid_tid)(tid));
sewardj018f7622002-05-15 21:13:39 +0000325 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
nethercote75d26242004-08-01 22:59:18 +0000326 vg_assert(!scheduler_jmpbuf_valid);
sewardje663cb92002-04-12 10:26:32 +0000327
sewardj671ff542002-05-07 09:25:30 +0000328 VGP_PUSHCC(VgpRun);
nethercotef971ab72004-08-02 16:27:40 +0000329 load_thread_state ( tid );
jsgf855d93d2003-10-13 22:26:55 +0000330
331 /* there should be no undealt-with signals */
nethercotef971ab72004-08-02 16:27:40 +0000332 vg_assert(unresumable_siginfo.si_signo == 0);
jsgf855d93d2003-10-13 22:26:55 +0000333
nethercote75d26242004-08-01 22:59:18 +0000334 if (__builtin_setjmp(scheduler_jmpbuf) == 0) {
sewardje663cb92002-04-12 10:26:32 +0000335 /* try this ... */
nethercote75d26242004-08-01 22:59:18 +0000336 scheduler_jmpbuf_valid = True;
sewardje663cb92002-04-12 10:26:32 +0000337 trc = VG_(run_innerloop)();
nethercote75d26242004-08-01 22:59:18 +0000338 scheduler_jmpbuf_valid = False;
sewardje663cb92002-04-12 10:26:32 +0000339 /* We get here if the client didn't take a fault. */
340 } else {
341 /* We get here if the client took a fault, which caused our
342 signal handler to longjmp. */
nethercote75d26242004-08-01 22:59:18 +0000343 scheduler_jmpbuf_valid = False;
sewardje663cb92002-04-12 10:26:32 +0000344 vg_assert(trc == 0);
345 trc = VG_TRC_UNRESUMABLE_SIGNAL;
346 }
sewardj872051c2002-07-13 12:12:56 +0000347
nethercote75d26242004-08-01 22:59:18 +0000348 vg_assert(!scheduler_jmpbuf_valid);
sewardj872051c2002-07-13 12:12:56 +0000349
nethercotef971ab72004-08-02 16:27:40 +0000350 save_thread_state ( tid );
njn25e49d8e72002-09-23 09:36:25 +0000351 VGP_POPCC(VgpRun);
sewardje663cb92002-04-12 10:26:32 +0000352 return trc;
353}
354
355
sewardj20917d82002-05-28 01:36:45 +0000356static
357void mostly_clear_thread_record ( ThreadId tid )
358{
sewardj20917d82002-05-28 01:36:45 +0000359 vg_assert(tid >= 0 && tid < VG_N_THREADS);
nethercotef9b59412004-09-10 15:33:32 +0000360 VGA_(clear_thread)(&VG_(threads)[tid].arch);
sewardj20917d82002-05-28 01:36:45 +0000361 VG_(threads)[tid].tid = tid;
362 VG_(threads)[tid].status = VgTs_Empty;
363 VG_(threads)[tid].associated_mx = NULL;
364 VG_(threads)[tid].associated_cv = NULL;
365 VG_(threads)[tid].awaken_at = 0;
366 VG_(threads)[tid].joinee_retval = NULL;
367 VG_(threads)[tid].joiner_thread_return = NULL;
368 VG_(threads)[tid].joiner_jee_tid = VG_INVALID_THREADID;
sewardj8ad94e12002-05-29 00:10:20 +0000369 VG_(threads)[tid].detached = False;
sewardj20917d82002-05-28 01:36:45 +0000370 VG_(threads)[tid].cancel_st = True; /* PTHREAD_CANCEL_ENABLE */
371 VG_(threads)[tid].cancel_ty = True; /* PTHREAD_CANCEL_DEFERRED */
372 VG_(threads)[tid].cancel_pend = NULL; /* not pending */
sewardj8ad94e12002-05-29 00:10:20 +0000373 VG_(threads)[tid].custack_used = 0;
nethercote73b526f2004-10-31 18:48:21 +0000374 VG_(sigemptyset)(&VG_(threads)[tid].sig_mask);
375 VG_(sigfillset)(&VG_(threads)[tid].eff_sig_mask);
thughes8abf3922004-10-16 10:59:49 +0000376 VG_(threads)[tid].sigqueue_head = 0;
377 VG_(threads)[tid].sigqueue_tail = 0;
sewardj00a66b12002-10-12 16:42:35 +0000378 VG_(threads)[tid].specifics_ptr = NULL;
jsgf855d93d2003-10-13 22:26:55 +0000379
380 VG_(threads)[tid].syscallno = -1;
thughesbaa46e52004-07-29 17:44:23 +0000381 VG_(threads)[tid].sys_flags = 0;
jsgf855d93d2003-10-13 22:26:55 +0000382 VG_(threads)[tid].sys_pre_res = NULL;
383
384 VG_(threads)[tid].proxy = NULL;
fitzhardinge28428592004-03-16 22:07:12 +0000385
386 /* start with no altstack */
387 VG_(threads)[tid].altstack.ss_sp = (void *)0xdeadbeef;
388 VG_(threads)[tid].altstack.ss_size = 0;
389 VG_(threads)[tid].altstack.ss_flags = VKI_SS_DISABLE;
sewardj20917d82002-05-28 01:36:45 +0000390}
391
392
jsgf855d93d2003-10-13 22:26:55 +0000393
sewardje663cb92002-04-12 10:26:32 +0000394/* Initialise the scheduler. Create a single "main" thread ready to
sewardj6072c362002-04-19 14:40:57 +0000395 run, with special ThreadId of one. This is called at startup; the
nethercote71980f02004-01-24 18:18:54 +0000396 caller takes care to park the client's state in VG_(baseBlock).
sewardje663cb92002-04-12 10:26:32 +0000397*/
398void VG_(scheduler_init) ( void )
399{
thughesc37184f2004-09-11 14:16:57 +0000400 Int i;
sewardje663cb92002-04-12 10:26:32 +0000401 ThreadId tid_main;
402
sewardj6072c362002-04-19 14:40:57 +0000403 for (i = 0 /* NB; not 1 */; i < VG_N_THREADS; i++) {
sewardj20917d82002-05-28 01:36:45 +0000404 mostly_clear_thread_record(i);
405 VG_(threads)[i].stack_size = 0;
406 VG_(threads)[i].stack_base = (Addr)NULL;
thughesdaa34562004-06-27 12:48:53 +0000407 VG_(threads)[i].stack_guard_size = 0;
sewardj20917d82002-05-28 01:36:45 +0000408 VG_(threads)[i].stack_highest_word = (Addr)NULL;
sewardje663cb92002-04-12 10:26:32 +0000409 }
410
sewardj5f07b662002-04-23 16:52:51 +0000411 for (i = 0; i < VG_N_THREAD_KEYS; i++) {
412 vg_thread_keys[i].inuse = False;
413 vg_thread_keys[i].destructor = NULL;
414 }
415
sewardj2cb00342002-06-28 01:46:26 +0000416 vg_fhstack_used = 0;
417
sewardje663cb92002-04-12 10:26:32 +0000418 /* Assert this is thread zero, which has certain magic
419 properties. */
420 tid_main = vg_alloc_ThreadState();
sewardj6072c362002-04-19 14:40:57 +0000421 vg_assert(tid_main == 1);
sewardj20917d82002-05-28 01:36:45 +0000422 VG_(threads)[tid_main].status = VgTs_Runnable;
sewardje663cb92002-04-12 10:26:32 +0000423
424 /* Copy VG_(baseBlock) state to tid_main's slot. */
sewardj1e8cdc92002-04-18 11:37:52 +0000425 vg_tid_currently_in_baseBlock = tid_main;
sewardjb52a1b02002-10-23 21:38:22 +0000426 vg_tid_last_in_baseBlock = tid_main;
nethercotef9b59412004-09-10 15:33:32 +0000427
428 VGA_(init_thread)(&VG_(threads)[tid_main].arch);
nethercotef971ab72004-08-02 16:27:40 +0000429 save_thread_state ( tid_main );
sewardj1e8cdc92002-04-18 11:37:52 +0000430
sewardj018f7622002-05-15 21:13:39 +0000431 VG_(threads)[tid_main].stack_highest_word
fitzhardinge98abfc72003-12-16 02:05:15 +0000432 = VG_(clstk_end) - 4;
433 VG_(threads)[tid_main].stack_base = VG_(clstk_base);
thughesc37184f2004-09-11 14:16:57 +0000434 VG_(threads)[tid_main].stack_size = VG_(client_rlimit_stack).rlim_cur;
sewardjbf290b92002-05-01 02:28:01 +0000435
sewardj1e8cdc92002-04-18 11:37:52 +0000436 /* So now ... */
437 vg_assert(vg_tid_currently_in_baseBlock == VG_INVALID_THREADID);
sewardj872051c2002-07-13 12:12:56 +0000438
439 /* Not running client code right now. */
nethercote75d26242004-08-01 22:59:18 +0000440 scheduler_jmpbuf_valid = False;
jsgf855d93d2003-10-13 22:26:55 +0000441
442 /* Proxy for main thread */
443 VG_(proxy_create)(tid_main);
sewardje663cb92002-04-12 10:26:32 +0000444}
445
446
sewardj3947e622002-05-23 16:52:11 +0000447
sewardj6072c362002-04-19 14:40:57 +0000448/* vthread tid is returning from a signal handler; modify its
449 stack/regs accordingly. */
450static
451void handle_signal_return ( ThreadId tid )
452{
sewardj6072c362002-04-19 14:40:57 +0000453 Bool restart_blocked_syscalls;
sewardj645030e2002-06-06 01:27:39 +0000454 struct vki_timespec * rem;
sewardj6072c362002-04-19 14:40:57 +0000455
sewardjb48e5002002-05-13 00:16:03 +0000456 vg_assert(VG_(is_valid_tid)(tid));
sewardj6072c362002-04-19 14:40:57 +0000457
458 restart_blocked_syscalls = VG_(signal_returns)(tid);
459
thughesa3afffc2004-08-25 18:58:04 +0000460 /* If we were interrupted in the middle of a rendezvous
461 then check the rendezvous hasn't completed while we
462 were busy handling the signal. */
463 if (VG_(threads)[tid].status == VgTs_WaitJoiner ||
464 VG_(threads)[tid].status == VgTs_WaitJoinee ) {
465 maybe_rendezvous_joiners_and_joinees();
466 }
467
thughesc41c6f42004-10-16 16:50:14 +0000468 /* If we were interrupted while waiting on a mutex then check that
469 it hasn't been unlocked while we were busy handling the signal. */
470 if (VG_(threads)[tid].status == VgTs_WaitMX &&
471 VG_(threads)[tid].associated_mx->__vg_m_count == 0) {
472 vg_pthread_mutex_t* mutex = VG_(threads)[tid].associated_mx;
473 mutex->__vg_m_count = 1;
474 mutex->__vg_m_owner = (/*_pthread_descr*/void*)tid;
475 VG_(threads)[tid].status = VgTs_Runnable;
476 VG_(threads)[tid].associated_mx = NULL;
477 /* m_edx already holds pth_mx_lock() success (0) */
478 }
479
sewardj6072c362002-04-19 14:40:57 +0000480 if (restart_blocked_syscalls)
481 /* Easy; we don't have to do anything. */
482 return;
483
sewardj645030e2002-06-06 01:27:39 +0000484 if (VG_(threads)[tid].status == VgTs_Sleeping
nethercotebb4222b2004-09-10 17:42:11 +0000485 && PLATFORM_SYSCALL_NUM(VG_(threads)[tid].arch) == __NR_nanosleep) {
sewardj6072c362002-04-19 14:40:57 +0000486 /* We interrupted a nanosleep(). The right thing to do is to
sewardj645030e2002-06-06 01:27:39 +0000487 write the unused time to nanosleep's second param, but that's
488 too much effort ... we just say that 1 nanosecond was not
489 used, and return EINTR. */
nethercotebb4222b2004-09-10 17:42:11 +0000490 rem = (struct vki_timespec*)PLATFORM_SYSCALL_ARG2(VG_(threads)[tid].arch);
sewardj645030e2002-06-06 01:27:39 +0000491 if (rem != NULL) {
492 rem->tv_sec = 0;
493 rem->tv_nsec = 1;
494 }
njnd3040452003-05-19 15:04:06 +0000495 SET_SYSCALL_RETVAL(tid, -VKI_EINTR);
sewardj645030e2002-06-06 01:27:39 +0000496 VG_(threads)[tid].status = VgTs_Runnable;
sewardj6072c362002-04-19 14:40:57 +0000497 return;
498 }
499
500 /* All other cases? Just return. */
501}
502
503
nethercotef971ab72004-08-02 16:27:40 +0000504struct timeout {
505 UInt time; /* time we should awaken */
506 ThreadId tid; /* thread which cares about this timeout */
507 struct timeout *next;
508};
509
510static struct timeout *timeouts;
511
512static void add_timeout(ThreadId tid, UInt time)
513{
514 struct timeout *t = VG_(arena_malloc)(VG_AR_CORE, sizeof(*t));
515 struct timeout **prev, *tp;
516
517 t->time = time;
518 t->tid = tid;
519
520 if (VG_(clo_trace_sched)) {
521 Char msg_buf[100];
522 VG_(sprintf)(msg_buf, "add_timeout: now=%u adding timeout at %u",
523 VG_(read_millisecond_timer)(), time);
524 print_sched_event(tid, msg_buf);
525 }
526
527 for(tp = timeouts, prev = &timeouts;
528 tp != NULL && tp->time < time;
529 prev = &tp->next, tp = tp->next)
530 ;
531 t->next = tp;
532 *prev = t;
533}
534
sewardje663cb92002-04-12 10:26:32 +0000535static
536void sched_do_syscall ( ThreadId tid )
537{
jsgf855d93d2003-10-13 22:26:55 +0000538 Int syscall_no;
njn25e49d8e72002-09-23 09:36:25 +0000539 Char msg_buf[100];
sewardje663cb92002-04-12 10:26:32 +0000540
sewardjb48e5002002-05-13 00:16:03 +0000541 vg_assert(VG_(is_valid_tid)(tid));
sewardj018f7622002-05-15 21:13:39 +0000542 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +0000543
nethercotebb4222b2004-09-10 17:42:11 +0000544 syscall_no = PLATFORM_SYSCALL_NUM(VG_(threads)[tid].arch);
sewardje663cb92002-04-12 10:26:32 +0000545
jsgf855d93d2003-10-13 22:26:55 +0000546 /* Special-case nanosleep because we can. But should we?
547
548 XXX not doing so for now, because it doesn't seem to work
549 properly, and we can use the syscall nanosleep just as easily.
550 */
551 if (0 && syscall_no == __NR_nanosleep) {
sewardj5f07b662002-04-23 16:52:51 +0000552 UInt t_now, t_awaken;
sewardje663cb92002-04-12 10:26:32 +0000553 struct vki_timespec* req;
nethercotebb4222b2004-09-10 17:42:11 +0000554 req = (struct vki_timespec*)PLATFORM_SYSCALL_ARG1(VG_(threads)[tid].arch);
jsgf855d93d2003-10-13 22:26:55 +0000555
556 if (req->tv_sec < 0 || req->tv_nsec < 0 || req->tv_nsec >= 1000000000) {
557 SET_SYSCALL_RETVAL(tid, -VKI_EINVAL);
558 return;
559 }
560
sewardj5f07b662002-04-23 16:52:51 +0000561 t_now = VG_(read_millisecond_timer)();
sewardje663cb92002-04-12 10:26:32 +0000562 t_awaken
563 = t_now
sewardj5f07b662002-04-23 16:52:51 +0000564 + (UInt)1000ULL * (UInt)(req->tv_sec)
565 + (UInt)(req->tv_nsec) / 1000000;
sewardj018f7622002-05-15 21:13:39 +0000566 VG_(threads)[tid].status = VgTs_Sleeping;
567 VG_(threads)[tid].awaken_at = t_awaken;
sewardj8937c812002-04-12 20:12:20 +0000568 if (VG_(clo_trace_sched)) {
sewardj5f07b662002-04-23 16:52:51 +0000569 VG_(sprintf)(msg_buf, "at %d: nanosleep for %d",
sewardje663cb92002-04-12 10:26:32 +0000570 t_now, t_awaken-t_now);
571 print_sched_event(tid, msg_buf);
572 }
nethercotef971ab72004-08-02 16:27:40 +0000573 add_timeout(tid, t_awaken);
sewardje663cb92002-04-12 10:26:32 +0000574 /* Force the scheduler to run something else for a while. */
575 return;
576 }
577
jsgf855d93d2003-10-13 22:26:55 +0000578 /* If pre_syscall returns true, then we're done immediately */
579 if (VG_(pre_syscall)(tid)) {
fitzhardinge31ba9052004-01-16 02:15:23 +0000580 VG_(post_syscall(tid, True));
sewardj3947e622002-05-23 16:52:11 +0000581 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +0000582 } else {
jsgf855d93d2003-10-13 22:26:55 +0000583 vg_assert(VG_(threads)[tid].status == VgTs_WaitSys);
sewardje663cb92002-04-12 10:26:32 +0000584 }
585}
586
587
sewardje663cb92002-04-12 10:26:32 +0000588
jsgf855d93d2003-10-13 22:26:55 +0000589/* Sleep for a while, but be willing to be woken. */
590static
591void idle ( void )
592{
593 struct vki_pollfd pollfd[1];
594 Int delta = -1;
595 Int fd = VG_(proxy_resfd)();
596
597 pollfd[0].fd = fd;
598 pollfd[0].events = VKI_POLLIN;
599
600 /* Look though the nearest timeouts, looking for the next future
601 one (there may be stale past timeouts). They'll all be mopped
602 below up when the poll() finishes. */
603 if (timeouts != NULL) {
604 struct timeout *tp;
605 Bool wicked = False;
606 UInt now = VG_(read_millisecond_timer)();
607
608 for(tp = timeouts; tp != NULL && tp->time < now; tp = tp->next) {
609 /* If a thread is still sleeping in the past, make it runnable */
610 ThreadState *tst = VG_(get_ThreadState)(tp->tid);
611 if (tst->status == VgTs_Sleeping)
612 tst->status = VgTs_Runnable;
613 wicked = True; /* no sleep for the wicked */
sewardje663cb92002-04-12 10:26:32 +0000614 }
sewardje663cb92002-04-12 10:26:32 +0000615
jsgf855d93d2003-10-13 22:26:55 +0000616 if (tp != NULL) {
thughese761bef2004-10-17 15:18:22 +0000617 vg_assert(tp->time >= now);
618 /* limit the signed int delta to INT_MAX */
619 if ((tp->time - now) <= 0x7FFFFFFFU) {
620 delta = tp->time - now;
621 } else {
622 delta = 0x7FFFFFFF;
623 }
sewardje663cb92002-04-12 10:26:32 +0000624 }
jsgf855d93d2003-10-13 22:26:55 +0000625 if (wicked)
626 delta = 0;
sewardje663cb92002-04-12 10:26:32 +0000627 }
628
jsgf855d93d2003-10-13 22:26:55 +0000629 /* gotta wake up for something! */
630 vg_assert(fd != -1 || delta != -1);
sewardje462e202002-04-13 04:09:07 +0000631
jsgf855d93d2003-10-13 22:26:55 +0000632 /* If we need to do signal routing, then poll for pending signals
633 every VG_(clo_signal_polltime) mS */
634 if (VG_(do_signal_routing) && (delta > VG_(clo_signal_polltime) || delta == -1))
635 delta = VG_(clo_signal_polltime);
sewardje663cb92002-04-12 10:26:32 +0000636
jsgf855d93d2003-10-13 22:26:55 +0000637 if (VG_(clo_trace_sched)) {
638 Char msg_buf[100];
639 VG_(sprintf)(msg_buf, "idle: waiting for %dms and fd %d",
640 delta, fd);
641 print_sched_event(0, msg_buf);
sewardje663cb92002-04-12 10:26:32 +0000642 }
sewardje663cb92002-04-12 10:26:32 +0000643
jsgf855d93d2003-10-13 22:26:55 +0000644 VG_(poll)(pollfd, fd != -1 ? 1 : 0, delta);
sewardje663cb92002-04-12 10:26:32 +0000645
jsgf855d93d2003-10-13 22:26:55 +0000646 /* See if there's anything on the timeout list which needs
647 waking, and mop up anything in the past. */
648 {
649 UInt now = VG_(read_millisecond_timer)();
650 struct timeout *tp;
sewardje663cb92002-04-12 10:26:32 +0000651
jsgf855d93d2003-10-13 22:26:55 +0000652 tp = timeouts;
sewardje663cb92002-04-12 10:26:32 +0000653
jsgf855d93d2003-10-13 22:26:55 +0000654 while(tp && tp->time <= now) {
655 struct timeout *dead;
656 ThreadState *tst;
657
658 tst = VG_(get_ThreadState)(tp->tid);
659
660 if (VG_(clo_trace_sched)) {
661 Char msg_buf[100];
662 VG_(sprintf)(msg_buf, "idle: now=%u removing timeout at %u",
663 now, tp->time);
664 print_sched_event(tp->tid, msg_buf);
665 }
sewardje663cb92002-04-12 10:26:32 +0000666
jsgf855d93d2003-10-13 22:26:55 +0000667 /* If awaken_at != tp->time then it means the timeout is
668 stale and we should just ignore it. */
669 if(tst->awaken_at == tp->time) {
670 switch(tst->status) {
671 case VgTs_Sleeping:
672 tst->awaken_at = 0xFFFFFFFF;
673 tst->status = VgTs_Runnable;
674 break;
sewardje663cb92002-04-12 10:26:32 +0000675
thughese321d492004-10-17 15:00:20 +0000676 case VgTs_WaitMX:
677 do_pthread_mutex_timedlock_TIMEOUT(tst->tid);
678 break;
679
jsgf855d93d2003-10-13 22:26:55 +0000680 case VgTs_WaitCV:
681 do_pthread_cond_timedwait_TIMEOUT(tst->tid);
682 break;
sewardje663cb92002-04-12 10:26:32 +0000683
jsgf855d93d2003-10-13 22:26:55 +0000684 default:
685 /* This is a bit odd but OK; if a thread had a timeout
686 but woke for some other reason (signal, condvar
687 wakeup), then it will still be on the list. */
688 if (0)
689 VG_(printf)("idle(): unexpected status tp->tid=%d tst->status = %d\n",
690 tp->tid, tst->status);
691 break;
692 }
693 }
sewardjbc7d8782002-06-30 12:44:54 +0000694
jsgf855d93d2003-10-13 22:26:55 +0000695 dead = tp;
696 tp = tp->next;
njn25e49d8e72002-09-23 09:36:25 +0000697
jsgf855d93d2003-10-13 22:26:55 +0000698 VG_(arena_free)(VG_AR_CORE, dead);
sewardjbc7d8782002-06-30 12:44:54 +0000699 }
700
jsgf855d93d2003-10-13 22:26:55 +0000701 timeouts = tp;
sewardje663cb92002-04-12 10:26:32 +0000702 }
703}
704
705
sewardje663cb92002-04-12 10:26:32 +0000706/* ---------------------------------------------------------------------
707 The scheduler proper.
708 ------------------------------------------------------------------ */
709
nethercote238a3c32004-08-09 13:13:31 +0000710// For handling of the default action of a fatal signal.
711// jmp_buf for fatal signals; VG_(fatal_signal_jmpbuf_ptr) is NULL until
712// the time is right that it can be used.
713static jmp_buf fatal_signal_jmpbuf;
714static jmp_buf* fatal_signal_jmpbuf_ptr;
715static Int fatal_sigNo; // the fatal signal, if it happens
716
sewardje663cb92002-04-12 10:26:32 +0000717/* Run user-space threads until either
718 * Deadlock occurs
719 * One thread asks to shutdown Valgrind
720 * The specified number of basic blocks has gone by.
721*/
nethercote238a3c32004-08-09 13:13:31 +0000722VgSchedReturnCode do_scheduler ( Int* exitcode, ThreadId* last_run_tid )
sewardje663cb92002-04-12 10:26:32 +0000723{
724 ThreadId tid, tid_next;
725 UInt trc;
726 UInt dispatch_ctr_SAVED;
sewardj124ca2a2002-06-20 10:19:38 +0000727 Int done_this_time, n_in_bounded_wait;
jsgf855d93d2003-10-13 22:26:55 +0000728 Int n_exists, n_waiting_for_reaper;
sewardje663cb92002-04-12 10:26:32 +0000729 Addr trans_addr;
730
sewardje663cb92002-04-12 10:26:32 +0000731 /* Start with the root thread. tid in general indicates the
732 currently runnable/just-finished-running thread. */
nethercote759dda32004-08-07 18:16:56 +0000733 *last_run_tid = tid = 1;
sewardje663cb92002-04-12 10:26:32 +0000734
735 /* This is the top level scheduler loop. It falls into three
736 phases. */
737 while (True) {
738
sewardj6072c362002-04-19 14:40:57 +0000739 /* ======================= Phase 0 of 3 =======================
740 Be paranoid. Always a good idea. */
sewardjd7fd4d22002-04-24 01:57:27 +0000741 stage1:
sewardj6072c362002-04-19 14:40:57 +0000742 scheduler_sanity();
nethercote885dd912004-08-03 23:14:00 +0000743 VG_(sanity_check_general)( False );
sewardj6072c362002-04-19 14:40:57 +0000744
sewardje663cb92002-04-12 10:26:32 +0000745 /* ======================= Phase 1 of 3 =======================
746 Handle I/O completions and signals. This may change the
747 status of various threads. Then select a new thread to run,
748 or declare deadlock, or sleep if there are no runnable
749 threads but some are blocked on I/O. */
750
sewardje663cb92002-04-12 10:26:32 +0000751 /* Do the following loop until a runnable thread is found, or
752 deadlock is detected. */
753 while (True) {
754
755 /* For stats purposes only. */
nethercote844e7122004-08-02 15:27:22 +0000756 n_scheduling_events_MAJOR++;
sewardje663cb92002-04-12 10:26:32 +0000757
jsgf855d93d2003-10-13 22:26:55 +0000758 /* Route signals to their proper places */
759 VG_(route_signals)();
sewardje663cb92002-04-12 10:26:32 +0000760
jsgf855d93d2003-10-13 22:26:55 +0000761 /* See if any of the proxy LWPs report any activity: either a
762 syscall completing or a signal arriving. */
763 VG_(proxy_results)();
sewardje663cb92002-04-12 10:26:32 +0000764
765 /* Try and find a thread (tid) to run. */
766 tid_next = tid;
jsgf855d93d2003-10-13 22:26:55 +0000767 if (prefer_sched != VG_INVALID_THREADID) {
768 tid_next = prefer_sched-1;
769 prefer_sched = VG_INVALID_THREADID;
770 }
sewardj51c0aaf2002-04-25 01:32:10 +0000771 n_in_bounded_wait = 0;
jsgf855d93d2003-10-13 22:26:55 +0000772 n_exists = 0;
773 n_waiting_for_reaper = 0;
sewardje663cb92002-04-12 10:26:32 +0000774 while (True) {
775 tid_next++;
sewardj6072c362002-04-19 14:40:57 +0000776 if (tid_next >= VG_N_THREADS) tid_next = 1;
jsgf855d93d2003-10-13 22:26:55 +0000777 if (VG_(threads)[tid_next].status == VgTs_Sleeping
778 || VG_(threads)[tid_next].status == VgTs_WaitSys
thughese321d492004-10-17 15:00:20 +0000779 || (VG_(threads)[tid_next].status == VgTs_WaitMX
780 && VG_(threads)[tid_next].awaken_at != 0xFFFFFFFF)
781 || (VG_(threads)[tid_next].status == VgTs_WaitCV
sewardj018f7622002-05-15 21:13:39 +0000782 && VG_(threads)[tid_next].awaken_at != 0xFFFFFFFF))
sewardj51c0aaf2002-04-25 01:32:10 +0000783 n_in_bounded_wait ++;
jsgf855d93d2003-10-13 22:26:55 +0000784 if (VG_(threads)[tid_next].status != VgTs_Empty)
785 n_exists++;
786 if (VG_(threads)[tid_next].status == VgTs_WaitJoiner)
787 n_waiting_for_reaper++;
sewardj018f7622002-05-15 21:13:39 +0000788 if (VG_(threads)[tid_next].status == VgTs_Runnable)
sewardje663cb92002-04-12 10:26:32 +0000789 break; /* We can run this one. */
790 if (tid_next == tid)
791 break; /* been all the way round */
792 }
793 tid = tid_next;
794
sewardj018f7622002-05-15 21:13:39 +0000795 if (VG_(threads)[tid].status == VgTs_Runnable) {
sewardje663cb92002-04-12 10:26:32 +0000796 /* Found a suitable candidate. Fall out of this loop, so
797 we can advance to stage 2 of the scheduler: actually
798 running the thread. */
799 break;
800 }
801
jsgf855d93d2003-10-13 22:26:55 +0000802 /* All threads have exited - pretend someone called exit() */
803 if (n_waiting_for_reaper == n_exists) {
nethercote47dd12c2004-06-22 14:18:42 +0000804 *exitcode = 0; /* ? */
jsgf855d93d2003-10-13 22:26:55 +0000805 return VgSrc_ExitSyscall;
806 }
807
sewardje663cb92002-04-12 10:26:32 +0000808 /* We didn't find a runnable thread. Now what? */
sewardj51c0aaf2002-04-25 01:32:10 +0000809 if (n_in_bounded_wait == 0) {
sewardj54cacf02002-04-12 23:24:59 +0000810 /* No runnable threads and no prospect of any appearing
811 even if we wait for an arbitrary length of time. In
812 short, we have a deadlock. */
sewardj15a43e12002-04-17 19:35:12 +0000813 VG_(pp_sched_status)();
sewardje663cb92002-04-12 10:26:32 +0000814 return VgSrc_Deadlock;
815 }
816
jsgf855d93d2003-10-13 22:26:55 +0000817 /* Nothing needs doing, so sit in idle until either a timeout
818 happens or a thread's syscall completes. */
819 idle();
sewardj7e87e382002-05-03 19:09:05 +0000820 /* pp_sched_status(); */
sewardjb48e5002002-05-13 00:16:03 +0000821 /* VG_(printf)("."); */
sewardje663cb92002-04-12 10:26:32 +0000822 }
823
824
825 /* ======================= Phase 2 of 3 =======================
826 Wahey! We've finally decided that thread tid is runnable, so
827 we now do that. Run it for as much of a quanta as possible.
828 Trivial requests are handled and the thread continues. The
829 aim is not to do too many of Phase 1 since it is expensive. */
830
831 if (0)
sewardj3b5d8862002-04-20 13:53:23 +0000832 VG_(printf)("SCHED: tid %d\n", tid);
sewardje663cb92002-04-12 10:26:32 +0000833
njn25e49d8e72002-09-23 09:36:25 +0000834 VG_TRACK( thread_run, tid );
835
sewardje663cb92002-04-12 10:26:32 +0000836 /* Figure out how many bbs to ask vg_run_innerloop to do. Note
837 that it decrements the counter before testing it for zero, so
838 that if VG_(dispatch_ctr) is set to N you get at most N-1
839 iterations. Also this means that VG_(dispatch_ctr) must
840 exceed zero before entering the innerloop. Also also, the
841 decrement is done before the bb is actually run, so you
842 always get at least one decrement even if nothing happens.
843 */
nethercote1d447092004-02-01 17:29:59 +0000844 VG_(dispatch_ctr) = VG_SCHEDULING_QUANTUM + 1;
sewardje663cb92002-04-12 10:26:32 +0000845
846 /* ... and remember what we asked for. */
847 dispatch_ctr_SAVED = VG_(dispatch_ctr);
848
sewardj1e8cdc92002-04-18 11:37:52 +0000849 /* paranoia ... */
sewardj018f7622002-05-15 21:13:39 +0000850 vg_assert(VG_(threads)[tid].tid == tid);
sewardj1e8cdc92002-04-18 11:37:52 +0000851
sewardje663cb92002-04-12 10:26:32 +0000852 /* Actually run thread tid. */
853 while (True) {
854
nethercote759dda32004-08-07 18:16:56 +0000855 *last_run_tid = tid;
sewardj7e87e382002-05-03 19:09:05 +0000856
sewardje663cb92002-04-12 10:26:32 +0000857 /* For stats purposes only. */
nethercote844e7122004-08-02 15:27:22 +0000858 n_scheduling_events_MINOR++;
sewardje663cb92002-04-12 10:26:32 +0000859
860 if (0)
861 VG_(message)(Vg_DebugMsg, "thread %d: running for %d bbs",
862 tid, VG_(dispatch_ctr) - 1 );
sewardjb3eef6b2002-05-01 00:05:27 +0000863# if 0
864 if (VG_(bbs_done) > 31700000 + 0) {
865 dispatch_ctr_SAVED = VG_(dispatch_ctr) = 2;
nethercoteb8ef9d82004-09-05 22:02:33 +0000866 VG_(translate)(&VG_(threads)[tid],
867 ARCH_INSTR_PTR(VG_(threads)[tid].arch),
nethercote59a122d2004-08-03 17:16:51 +0000868 /*debugging*/True);
sewardjb3eef6b2002-05-01 00:05:27 +0000869 }
nethercoteb8ef9d82004-09-05 22:02:33 +0000870 vg_assert(ARCH_INSTR_PTR(VG_(threads)[tid].arch) != 0);
sewardjb3eef6b2002-05-01 00:05:27 +0000871# endif
sewardje663cb92002-04-12 10:26:32 +0000872
873 trc = run_thread_for_a_while ( tid );
874
sewardjb3eef6b2002-05-01 00:05:27 +0000875# if 0
nethercoteb8ef9d82004-09-05 22:02:33 +0000876 if (0 == ARCH_INSTR_PTR(VG_(threads)[tid].arch)) {
sewardjb3eef6b2002-05-01 00:05:27 +0000877 VG_(printf)("tid = %d, dc = %llu\n", tid, VG_(bbs_done));
nethercoteb8ef9d82004-09-05 22:02:33 +0000878 vg_assert(0 != ARCH_INSTR_PTR(VG_(threads)[tid].arch));
sewardjb3eef6b2002-05-01 00:05:27 +0000879 }
880# endif
881
sewardje663cb92002-04-12 10:26:32 +0000882 /* Deal quickly with trivial scheduling events, and resume the
883 thread. */
884
885 if (trc == VG_TRC_INNER_FASTMISS) {
nethercote4d714382004-10-13 09:47:24 +0000886 Addr ip = ARCH_INSTR_PTR(VG_(threads)[tid].arch);
887
sewardje663cb92002-04-12 10:26:32 +0000888 vg_assert(VG_(dispatch_ctr) > 0);
889
890 /* Trivial event. Miss in the fast-cache. Do a full
891 lookup for it. */
nethercote4d714382004-10-13 09:47:24 +0000892 trans_addr = VG_(search_transtab)( ip );
sewardje663cb92002-04-12 10:26:32 +0000893 if (trans_addr == (Addr)0) {
894 /* Not found; we need to request a translation. */
nethercote4d714382004-10-13 09:47:24 +0000895 if (VG_(translate)( tid, ip, /*debug*/False )) {
896 trans_addr = VG_(search_transtab)( ip );
897 if (trans_addr == (Addr)0)
898 VG_(core_panic)("VG_TRC_INNER_FASTMISS: missing tt_fast entry");
899 } else {
900 // If VG_(translate)() fails, it's because it had to throw
901 // a signal because the client jumped to a bad address.
902 // This means VG_(deliver_signal)() will have been called
903 // by now, and the program counter will now be pointing to
904 // the start of the signal handler (if there is no
905 // handler, things would have been aborted by now), so do
906 // nothing, and things will work out next time around the
907 // scheduler loop.
908 }
sewardje663cb92002-04-12 10:26:32 +0000909 }
910 continue; /* with this thread */
911 }
912
913 if (trc == VG_TRC_EBP_JMP_CLIENTREQ) {
nethercoted1b64b22004-11-04 18:22:28 +0000914 UWord* args = (UWord*)(ARCH_CLREQ_ARGS(VG_(threads)[tid].arch));
915 UWord reqno = args[0];
sewardj18a62ff2002-07-12 22:30:51 +0000916 /* VG_(printf)("request 0x%x\n", reqno); */
sewardj1fe7b002002-07-16 01:43:15 +0000917
918 /* Are we really absolutely totally quitting? */
919 if (reqno == VG_USERREQ__LIBC_FREERES_DONE) {
920 if (0 || VG_(clo_trace_syscalls) || VG_(clo_trace_sched)) {
921 VG_(message)(Vg_DebugMsg,
922 "__libc_freeres() done; really quitting!");
923 }
924 return VgSrc_ExitSyscall;
925 }
926
nethercote3e901a22004-09-11 13:17:02 +0000927 do_client_request(tid,args);
sewardj124ca2a2002-06-20 10:19:38 +0000928 /* Following the request, we try and continue with the
929 same thread if still runnable. If not, go back to
930 Stage 1 to select a new thread to run. */
sewardj18a62ff2002-07-12 22:30:51 +0000931 if (VG_(threads)[tid].status == VgTs_Runnable
932 && reqno != VG_USERREQ__PTHREAD_YIELD)
sewardj124ca2a2002-06-20 10:19:38 +0000933 continue; /* with this thread */
934 else
935 goto stage1;
sewardje663cb92002-04-12 10:26:32 +0000936 }
937
sewardj51c0aaf2002-04-25 01:32:10 +0000938 if (trc == VG_TRC_EBP_JMP_SYSCALL) {
939 /* Do a syscall for the vthread tid. This could cause it
sewardj7e87e382002-05-03 19:09:05 +0000940 to become non-runnable. One special case: spot the
941 client doing calls to exit() and take this as the cue
942 to exit. */
sewardjb3eef6b2002-05-01 00:05:27 +0000943# if 0
944 { UInt* esp; Int i;
nethercoteb8ef9d82004-09-05 22:02:33 +0000945 esp=(UInt*)ARCH_STACK_PTR(VG_(threads)[tid].arch);
sewardjb3eef6b2002-05-01 00:05:27 +0000946 VG_(printf)("\nBEFORE\n");
947 for (i = 10; i >= -10; i--)
948 VG_(printf)("%2d %p = 0x%x\n", i, &esp[i], esp[i]);
949 }
950# endif
951
sewardj1fe7b002002-07-16 01:43:15 +0000952 /* Deal with calling __libc_freeres() at exit. When the
953 client does __NR_exit, it's exiting for good. So we
nethercotef971ab72004-08-02 16:27:40 +0000954 then run __libc_freeres_wrapper. That quits by
sewardj1fe7b002002-07-16 01:43:15 +0000955 doing VG_USERREQ__LIBC_FREERES_DONE, and at that point
956 we really exit. To be safe we nuke all other threads
sewardjade9d0d2002-07-26 10:52:48 +0000957 currently running.
958
959 If not valgrinding (cachegrinding, etc) don't do this.
960 __libc_freeres does some invalid frees which crash
961 the unprotected malloc/free system. */
njn25e49d8e72002-09-23 09:36:25 +0000962
nethercotebb4222b2004-09-10 17:42:11 +0000963 if (PLATFORM_SYSCALL_NUM(VG_(threads)[tid].arch) == __NR_exit
964 || PLATFORM_SYSCALL_NUM(VG_(threads)[tid].arch) == __NR_exit_group
sewardjf3fb92d2003-02-23 03:26:08 +0000965 ) {
sewardj858964b2002-10-05 14:15:43 +0000966
nethercote8b76fe52004-11-08 19:20:09 +0000967 /* Remember the supplied argument. */
nethercotebb4222b2004-09-10 17:42:11 +0000968 *exitcode = PLATFORM_SYSCALL_ARG1(VG_(threads)[tid].arch);
njn25e49d8e72002-09-23 09:36:25 +0000969
nethercote8b76fe52004-11-08 19:20:09 +0000970 // Inform tool about regs read by syscall
971 VG_TRACK( pre_reg_read, Vg_CoreSysCall, tid, "(syscallno)",
972 R_SYSCALL_NUM, sizeof(UWord) );
973
974 if (PLATFORM_SYSCALL_NUM(VG_(threads)[tid].arch) == __NR_exit)
975 VG_TRACK( pre_reg_read, Vg_CoreSysCall, tid,
976 "exit(error_code)", R_SYSCALL_ARG1, sizeof(int) );
977
978 if (PLATFORM_SYSCALL_NUM(VG_(threads)[tid].arch) == __NR_exit_group)
979 VG_TRACK( pre_reg_read, Vg_CoreSysCall, tid,
980 "exit_group(error_code)", R_SYSCALL_ARG1,
981 sizeof(int) );
982
nethercote7cc9c232004-01-21 15:08:04 +0000983 /* Only run __libc_freeres if the tool says it's ok and
sewardj858964b2002-10-05 14:15:43 +0000984 it hasn't been overridden with --run-libc-freeres=no
985 on the command line. */
986
fitzhardinge98abfc72003-12-16 02:05:15 +0000987 if (VG_(needs).libc_freeres &&
988 VG_(clo_run_libc_freeres) &&
nethercotef971ab72004-08-02 16:27:40 +0000989 __libc_freeres_wrapper != 0) {
sewardj00631892002-10-05 15:34:38 +0000990 if (VG_(clo_verbosity) > 2
sewardj858964b2002-10-05 14:15:43 +0000991 || VG_(clo_trace_syscalls) || VG_(clo_trace_sched)) {
992 VG_(message)(Vg_DebugMsg,
993 "Caught __NR_exit; running __libc_freeres()");
994 }
995 VG_(nuke_all_threads_except) ( tid );
nethercoteb8ef9d82004-09-05 22:02:33 +0000996 ARCH_INSTR_PTR(VG_(threads)[tid].arch) =
nethercote50397c22004-11-04 18:03:06 +0000997 __libc_freeres_wrapper;
sewardj858964b2002-10-05 14:15:43 +0000998 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
999 goto stage1; /* party on, dudes (but not for much longer :) */
1000
1001 } else {
1002 /* We won't run __libc_freeres; just exit now. */
sewardj00631892002-10-05 15:34:38 +00001003 if (VG_(clo_verbosity) > 2
sewardj858964b2002-10-05 14:15:43 +00001004 || VG_(clo_trace_syscalls) || VG_(clo_trace_sched)) {
1005 VG_(message)(Vg_DebugMsg,
1006 "Caught __NR_exit; quitting");
1007 }
1008 return VgSrc_ExitSyscall;
1009 }
1010
sewardjade9d0d2002-07-26 10:52:48 +00001011 }
1012
sewardj858964b2002-10-05 14:15:43 +00001013 /* We've dealt with __NR_exit at this point. */
nethercotebb4222b2004-09-10 17:42:11 +00001014 vg_assert(PLATFORM_SYSCALL_NUM(VG_(threads)[tid].arch) != __NR_exit &&
1015 PLATFORM_SYSCALL_NUM(VG_(threads)[tid].arch) != __NR_exit_group);
sewardj7e87e382002-05-03 19:09:05 +00001016
sewardj83798bf2002-05-24 00:11:16 +00001017 /* Trap syscalls to __NR_sched_yield and just have this
1018 thread yield instead. Not essential, just an
1019 optimisation. */
nethercotebb4222b2004-09-10 17:42:11 +00001020 if (PLATFORM_SYSCALL_NUM(VG_(threads)[tid].arch) == __NR_sched_yield) {
njnd3040452003-05-19 15:04:06 +00001021 SET_SYSCALL_RETVAL(tid, 0); /* syscall returns with success */
sewardj83798bf2002-05-24 00:11:16 +00001022 goto stage1; /* find a new thread to run */
1023 }
1024
sewardj51c0aaf2002-04-25 01:32:10 +00001025 sched_do_syscall(tid);
sewardjb3eef6b2002-05-01 00:05:27 +00001026
1027# if 0
1028 { UInt* esp; Int i;
nethercoteb8ef9d82004-09-05 22:02:33 +00001029 esp=(UInt*)ARCH_STACK_PTR(VG_(threads)[tid].arch);
sewardjb3eef6b2002-05-01 00:05:27 +00001030 VG_(printf)("AFTER\n");
1031 for (i = 10; i >= -10; i--)
1032 VG_(printf)("%2d %p = 0x%x\n", i, &esp[i], esp[i]);
1033 }
1034# endif
1035
sewardj77f0fc12002-07-12 01:23:03 +00001036 if (VG_(threads)[tid].status == VgTs_Runnable) {
sewardj51c0aaf2002-04-25 01:32:10 +00001037 continue; /* with this thread */
sewardj77f0fc12002-07-12 01:23:03 +00001038 } else {
1039 goto stage1;
1040 }
sewardj51c0aaf2002-04-25 01:32:10 +00001041 }
1042
sewardjd7fd4d22002-04-24 01:57:27 +00001043 /* It's an event we can't quickly deal with. Give up running
1044 this thread and handle things the expensive way. */
sewardje663cb92002-04-12 10:26:32 +00001045 break;
1046 }
1047
1048 /* ======================= Phase 3 of 3 =======================
1049 Handle non-trivial thread requests, mostly pthread stuff. */
1050
1051 /* Ok, we've fallen out of the dispatcher for a
1052 non-completely-trivial reason. First, update basic-block
1053 counters. */
1054
nethercote0d3db0a2004-09-13 12:16:06 +00001055 done_this_time = (Int)dispatch_ctr_SAVED - (Int)VG_(dispatch_ctr);
1056 vg_assert(done_this_time > 0);
sewardje663cb92002-04-12 10:26:32 +00001057 VG_(bbs_done) += (ULong)done_this_time;
1058
1059 if (0 && trc != VG_TRC_INNER_FASTMISS)
1060 VG_(message)(Vg_DebugMsg, "thread %d: completed %d bbs, trc %d",
1061 tid, done_this_time, (Int)trc );
1062
1063 if (0 && trc != VG_TRC_INNER_FASTMISS)
njne0205ff2003-04-08 00:56:14 +00001064 VG_(message)(Vg_DebugMsg, "thread %d: %llu bbs, event %s",
sewardje663cb92002-04-12 10:26:32 +00001065 tid, VG_(bbs_done),
1066 name_of_sched_event(trc) );
sewardj9d1b5d32002-04-17 19:40:49 +00001067
sewardje663cb92002-04-12 10:26:32 +00001068 /* Examine the thread's return code to figure out why it
sewardj124ca2a2002-06-20 10:19:38 +00001069 stopped. */
sewardje663cb92002-04-12 10:26:32 +00001070
1071 switch (trc) {
1072
fitzhardingea02f8812003-12-18 09:06:09 +00001073 case VG_TRC_EBP_JMP_YIELD:
1074 /* Explicit yield. Let a new thread be scheduled,
1075 simply by doing nothing, causing us to arrive back at
1076 Phase 1. */
fitzhardingea02f8812003-12-18 09:06:09 +00001077 break;
1078
sewardje663cb92002-04-12 10:26:32 +00001079 case VG_TRC_INNER_COUNTERZERO:
1080 /* Timeslice is out. Let a new thread be scheduled,
1081 simply by doing nothing, causing us to arrive back at
1082 Phase 1. */
sewardje663cb92002-04-12 10:26:32 +00001083 vg_assert(VG_(dispatch_ctr) == 0);
1084 break;
1085
1086 case VG_TRC_UNRESUMABLE_SIGNAL:
jsgf855d93d2003-10-13 22:26:55 +00001087 /* It got a SIGSEGV/SIGBUS/SIGILL/SIGFPE, which we need to
1088 deliver right away. */
nethercotef971ab72004-08-02 16:27:40 +00001089 vg_assert(unresumable_siginfo.si_signo == VKI_SIGSEGV ||
1090 unresumable_siginfo.si_signo == VKI_SIGBUS ||
1091 unresumable_siginfo.si_signo == VKI_SIGILL ||
1092 unresumable_siginfo.si_signo == VKI_SIGFPE);
1093 vg_assert(longjmpd_on_signal == unresumable_siginfo.si_signo);
jsgf855d93d2003-10-13 22:26:55 +00001094
1095 /* make sure we've unblocked the signals which the handler blocked */
nethercote75d26242004-08-01 22:59:18 +00001096 VG_(unblock_host_signal)(longjmpd_on_signal);
jsgf855d93d2003-10-13 22:26:55 +00001097
nethercotef971ab72004-08-02 16:27:40 +00001098 VG_(deliver_signal)(tid, &unresumable_siginfo, False);
1099 unresumable_siginfo.si_signo = 0; /* done */
sewardje663cb92002-04-12 10:26:32 +00001100 break;
1101
sewardje663cb92002-04-12 10:26:32 +00001102 default:
1103 VG_(printf)("\ntrc = %d\n", trc);
njne427a662002-10-02 11:08:25 +00001104 VG_(core_panic)("VG_(scheduler), phase 3: "
1105 "unexpected thread return code");
sewardje663cb92002-04-12 10:26:32 +00001106 /* NOTREACHED */
1107 break;
1108
1109 } /* switch (trc) */
1110
1111 /* That completes Phase 3 of 3. Return now to the top of the
1112 main scheduler loop, to Phase 1 of 3. */
1113
1114 } /* top-level scheduler loop */
1115
1116
1117 /* NOTREACHED */
njne427a662002-10-02 11:08:25 +00001118 VG_(core_panic)("scheduler: post-main-loop ?!");
sewardje663cb92002-04-12 10:26:32 +00001119 /* NOTREACHED */
sewardje663cb92002-04-12 10:26:32 +00001120}
1121
nethercote238a3c32004-08-09 13:13:31 +00001122VgSchedReturnCode VG_(scheduler) ( Int* exitcode, ThreadId* last_run_tid,
1123 Int* fatal_sigNo_ptr )
1124{
1125 VgSchedReturnCode src;
1126
1127 fatal_signal_jmpbuf_ptr = &fatal_signal_jmpbuf;
1128 if (__builtin_setjmp( fatal_signal_jmpbuf_ptr ) == 0) {
1129 src = do_scheduler( exitcode, last_run_tid );
1130 } else {
1131 src = VgSrc_FatalSig;
1132 *fatal_sigNo_ptr = fatal_sigNo;
1133 }
1134 return src;
1135}
1136
jsgf855d93d2003-10-13 22:26:55 +00001137void VG_(need_resched) ( ThreadId prefer )
1138{
1139 /* Tell the scheduler now might be a good time to find a new
1140 runnable thread, because something happened which woke a thread
1141 up.
1142
1143 NB: This can be called unsynchronized from either a signal
1144 handler, or from another LWP (ie, real kernel thread).
1145
1146 In principle this could simply be a matter of setting
1147 VG_(dispatch_ctr) to a small value (say, 2), which would make
1148 any running code come back to the scheduler fairly quickly.
1149
1150 However, since the scheduler implements a strict round-robin
1151 policy with only one priority level, there are, by definition,
1152 no better threads to be running than the current thread anyway,
1153 so we may as well ignore this hint. For processes with a
1154 mixture of compute and I/O bound threads, this means the compute
1155 threads could introduce longish latencies before the I/O threads
1156 run. For programs with only I/O bound threads, need_resched
1157 won't have any effect anyway.
1158
1159 OK, so I've added command-line switches to enable low-latency
1160 syscalls and signals. The prefer_sched variable is in effect
1161 the ID of a single thread which has higher priority than all the
1162 others. If set, the scheduler will prefer to schedule that
1163 thread over all others. Naturally, this could lead to
1164 starvation or other unfairness.
1165 */
1166
1167 if (VG_(dispatch_ctr) > 10)
1168 VG_(dispatch_ctr) = 2;
1169 prefer_sched = prefer;
1170}
1171
nethercote238a3c32004-08-09 13:13:31 +00001172void VG_(scheduler_handle_fatal_signal) ( Int sigNo )
1173{
1174 if (NULL != fatal_signal_jmpbuf_ptr) {
1175 fatal_sigNo = sigNo;
1176 __builtin_longjmp(*fatal_signal_jmpbuf_ptr, 1);
1177 }
1178}
sewardje663cb92002-04-12 10:26:32 +00001179
1180/* ---------------------------------------------------------------------
1181 The pthread implementation.
1182 ------------------------------------------------------------------ */
1183
1184#include <pthread.h>
1185#include <errno.h>
1186
sewardje663cb92002-04-12 10:26:32 +00001187/* /usr/include/bits/pthreadtypes.h:
1188 typedef unsigned long int pthread_t;
1189*/
1190
sewardje663cb92002-04-12 10:26:32 +00001191
sewardj604ec3c2002-04-18 22:38:41 +00001192/* -----------------------------------------------------------
sewardj20917d82002-05-28 01:36:45 +00001193 Thread CREATION, JOINAGE and CANCELLATION: HELPER FNS
sewardj604ec3c2002-04-18 22:38:41 +00001194 -------------------------------------------------------- */
1195
sewardj20917d82002-05-28 01:36:45 +00001196/* We've decided to action a cancellation on tid. Make it jump to
1197 thread_exit_wrapper() in vg_libpthread.c, passing PTHREAD_CANCELED
1198 as the arg. */
1199static
1200void make_thread_jump_to_cancelhdlr ( ThreadId tid )
1201{
1202 Char msg_buf[100];
1203 vg_assert(VG_(is_valid_tid)(tid));
sewardjdadc8d02002-12-08 23:24:18 +00001204
sewardj20917d82002-05-28 01:36:45 +00001205 /* Push PTHREAD_CANCELED on the stack and jump to the cancellation
1206 handler -- which is really thread_exit_wrapper() in
1207 vg_libpthread.c. */
1208 vg_assert(VG_(threads)[tid].cancel_pend != NULL);
sewardj4bdd9962002-12-26 11:51:50 +00001209
nethercote6b9c8472004-09-13 13:16:40 +00001210 /* Set an argument and bogus return address. The return address will not
1211 be used, but we still need to have it so that the arg is at the
1212 correct stack offset. */
nethercote50397c22004-11-04 18:03:06 +00001213 VGA_(set_arg_and_bogus_ret)(tid, (UWord)PTHREAD_CANCELED, 0xBEADDEEF);
sewardj4bdd9962002-12-26 11:51:50 +00001214
1215 /* .cancel_pend will hold &thread_exit_wrapper */
nethercote50397c22004-11-04 18:03:06 +00001216 ARCH_INSTR_PTR(VG_(threads)[tid].arch) = (UWord)VG_(threads)[tid].cancel_pend;
sewardjdadc8d02002-12-08 23:24:18 +00001217
jsgf855d93d2003-10-13 22:26:55 +00001218 VG_(proxy_abort_syscall)(tid);
sewardjdadc8d02002-12-08 23:24:18 +00001219
sewardj20917d82002-05-28 01:36:45 +00001220 /* Make sure we aren't cancelled again whilst handling this
1221 cancellation. */
1222 VG_(threads)[tid].cancel_st = False;
1223 if (VG_(clo_trace_sched)) {
1224 VG_(sprintf)(msg_buf,
1225 "jump to cancellation handler (hdlr = %p)",
1226 VG_(threads)[tid].cancel_pend);
1227 print_sched_event(tid, msg_buf);
1228 }
thughes513197c2004-06-13 12:07:53 +00001229
1230 if(VG_(threads)[tid].status == VgTs_WaitCV) {
1231 /* posix says we must reaquire mutex before handling cancelation */
1232 vg_pthread_mutex_t* mx;
1233 vg_pthread_cond_t* cond;
1234
1235 mx = VG_(threads)[tid].associated_mx;
1236 cond = VG_(threads)[tid].associated_cv;
1237 VG_TRACK( pre_mutex_lock, tid, mx );
1238
1239 if (mx->__vg_m_owner == VG_INVALID_THREADID) {
1240 /* Currently unheld; hand it out to thread tid. */
1241 vg_assert(mx->__vg_m_count == 0);
1242 VG_(threads)[tid].status = VgTs_Runnable;
1243 VG_(threads)[tid].associated_cv = NULL;
1244 VG_(threads)[tid].associated_mx = NULL;
thughes10236472004-06-13 14:35:43 +00001245 mx->__vg_m_owner = (/*_pthread_descr*/void*)tid;
thughes513197c2004-06-13 12:07:53 +00001246 mx->__vg_m_count = 1;
1247 /* .m_edx already holds pth_cond_wait success value (0) */
1248
1249 VG_TRACK( post_mutex_lock, tid, mx );
1250
1251 if (VG_(clo_trace_pthread_level) >= 1) {
1252 VG_(sprintf)(msg_buf, "%s cv %p: RESUME with mx %p",
1253 "pthread_cancel", cond, mx );
1254 print_pthread_event(tid, msg_buf);
1255 }
1256
1257 } else {
1258 /* Currently held. Make thread tid be blocked on it. */
1259 vg_assert(mx->__vg_m_count > 0);
1260 VG_(threads)[tid].status = VgTs_WaitMX;
1261 VG_(threads)[tid].associated_cv = NULL;
1262 VG_(threads)[tid].associated_mx = mx;
1263 SET_PTHREQ_RETVAL(tid, 0); /* pth_cond_wait success value */
1264
1265 if (VG_(clo_trace_pthread_level) >= 1) {
1266 VG_(sprintf)(msg_buf, "%s cv %p: BLOCK for mx %p",
1267 "pthread_cancel", cond, mx );
1268 print_pthread_event(tid, msg_buf);
1269 }
1270 }
1271 } else {
1272 VG_(threads)[tid].status = VgTs_Runnable;
1273 }
sewardj20917d82002-05-28 01:36:45 +00001274}
1275
1276
1277
sewardjb48e5002002-05-13 00:16:03 +00001278/* Release resources and generally clean up once a thread has finally
nethercotef971ab72004-08-02 16:27:40 +00001279 disappeared.
1280
1281 BORKAGE/ISSUES as of 29 May 02 (moved from top of file --njn 2004-Aug-02)
1282
1283 TODO sometime:
1284 - Mutex scrubbing - clearup_after_thread_exit: look for threads
1285 blocked on mutexes held by the exiting thread, and release them
1286 appropriately. (??)
1287*/
sewardjb48e5002002-05-13 00:16:03 +00001288static
jsgf855d93d2003-10-13 22:26:55 +00001289void cleanup_after_thread_exited ( ThreadId tid, Bool forcekill )
sewardjb48e5002002-05-13 00:16:03 +00001290{
thughes3a1b8172004-09-12 22:48:59 +00001291 Segment *seg;
1292
nethercote36881a22004-08-04 14:03:16 +00001293 vg_assert(is_valid_or_empty_tid(tid));
sewardj018f7622002-05-15 21:13:39 +00001294 vg_assert(VG_(threads)[tid].status == VgTs_Empty);
thugheseb9b8fb2004-11-12 23:11:21 +00001295
njn25e49d8e72002-09-23 09:36:25 +00001296 /* Its stack is now off-limits */
thugheseb9b8fb2004-11-12 23:11:21 +00001297 if (VG_(threads)[tid].stack_base) {
1298 seg = VG_(find_segment)( VG_(threads)[tid].stack_base );
1299 VG_TRACK( die_mem_stack, seg->addr, seg->len );
1300 }
njn25e49d8e72002-09-23 09:36:25 +00001301
nethercotef9b59412004-09-10 15:33:32 +00001302 VGA_(cleanup_thread)( &VG_(threads)[tid].arch );
fitzhardinge47735af2004-01-21 01:27:27 +00001303
jsgf855d93d2003-10-13 22:26:55 +00001304 /* Not interested in the timeout anymore */
1305 VG_(threads)[tid].awaken_at = 0xFFFFFFFF;
1306
1307 /* Delete proxy LWP */
1308 VG_(proxy_delete)(tid, forcekill);
sewardjb48e5002002-05-13 00:16:03 +00001309}
1310
1311
sewardj20917d82002-05-28 01:36:45 +00001312/* Look for matching pairs of threads waiting for joiners and threads
1313 waiting for joinees. For each such pair copy the return value of
1314 the joinee into the joiner, let the joiner resume and discard the
1315 joinee. */
1316static
1317void maybe_rendezvous_joiners_and_joinees ( void )
1318{
1319 Char msg_buf[100];
1320 void** thread_return;
1321 ThreadId jnr, jee;
1322
1323 for (jnr = 1; jnr < VG_N_THREADS; jnr++) {
1324 if (VG_(threads)[jnr].status != VgTs_WaitJoinee)
1325 continue;
1326 jee = VG_(threads)[jnr].joiner_jee_tid;
1327 if (jee == VG_INVALID_THREADID)
1328 continue;
1329 vg_assert(VG_(is_valid_tid)(jee));
jsgf855d93d2003-10-13 22:26:55 +00001330 if (VG_(threads)[jee].status != VgTs_WaitJoiner) {
1331 /* if joinee has become detached, then make join fail with
1332 EINVAL */
1333 if (VG_(threads)[jee].detached) {
1334 VG_(threads)[jnr].status = VgTs_Runnable;
1335 VG_(threads)[jnr].joiner_jee_tid = VG_INVALID_THREADID;
1336 SET_PTHREQ_RETVAL(jnr, VKI_EINVAL);
1337 }
sewardj20917d82002-05-28 01:36:45 +00001338 continue;
jsgf855d93d2003-10-13 22:26:55 +00001339 }
sewardj20917d82002-05-28 01:36:45 +00001340 /* ok! jnr is waiting to join with jee, and jee is waiting to be
1341 joined by ... well, any thread. So let's do it! */
1342
1343 /* Copy return value to where joiner wants it. */
1344 thread_return = VG_(threads)[jnr].joiner_thread_return;
1345 if (thread_return != NULL) {
1346 /* CHECK thread_return writable */
njn72718642003-07-24 08:45:32 +00001347 VG_TRACK( pre_mem_write, Vg_CorePThread, jnr,
njn25e49d8e72002-09-23 09:36:25 +00001348 "pthread_join: thread_return",
1349 (Addr)thread_return, sizeof(void*));
sewardj5a3798b2002-06-04 23:24:22 +00001350
sewardj20917d82002-05-28 01:36:45 +00001351 *thread_return = VG_(threads)[jee].joinee_retval;
1352 /* Not really right, since it makes the thread's return value
1353 appear to be defined even if it isn't. */
njn25e49d8e72002-09-23 09:36:25 +00001354 VG_TRACK( post_mem_write, (Addr)thread_return, sizeof(void*) );
sewardj20917d82002-05-28 01:36:45 +00001355 }
1356
1357 /* Joinee is discarded */
1358 VG_(threads)[jee].status = VgTs_Empty; /* bye! */
jsgf855d93d2003-10-13 22:26:55 +00001359 cleanup_after_thread_exited ( jee, False );
sewardjc4a810d2002-11-13 22:25:51 +00001360 if (VG_(clo_trace_sched)) {
1361 VG_(sprintf)(msg_buf,
1362 "rendezvous with joinee %d. %d resumes, %d exits.",
1363 jee, jnr, jee );
sewardj20917d82002-05-28 01:36:45 +00001364 print_sched_event(jnr, msg_buf);
1365 }
sewardjc4a810d2002-11-13 22:25:51 +00001366
1367 VG_TRACK( post_thread_join, jnr, jee );
sewardj20917d82002-05-28 01:36:45 +00001368
1369 /* joiner returns with success */
1370 VG_(threads)[jnr].status = VgTs_Runnable;
njnd3040452003-05-19 15:04:06 +00001371 SET_PTHREQ_RETVAL(jnr, 0);
sewardj20917d82002-05-28 01:36:45 +00001372 }
1373}
1374
1375
sewardjccef2e62002-05-29 19:26:32 +00001376/* Nuke all threads other than tid. POSIX specifies that this should
1377 happen in __NR_exec, and after a __NR_fork() when I am the child,
jsgf855d93d2003-10-13 22:26:55 +00001378 as POSIX requires. Also used at process exit time with
1379 me==VG_INVALID_THREADID */
sewardjccef2e62002-05-29 19:26:32 +00001380void VG_(nuke_all_threads_except) ( ThreadId me )
1381{
1382 ThreadId tid;
1383 for (tid = 1; tid < VG_N_THREADS; tid++) {
1384 if (tid == me
jsgf855d93d2003-10-13 22:26:55 +00001385 || VG_(threads)[tid].status == VgTs_Empty)
sewardjccef2e62002-05-29 19:26:32 +00001386 continue;
sewardjef037c72002-05-30 00:40:03 +00001387 if (0)
1388 VG_(printf)(
1389 "VG_(nuke_all_threads_except): nuking tid %d\n", tid);
jsgf855d93d2003-10-13 22:26:55 +00001390 VG_(proxy_delete)(tid, True);
sewardjccef2e62002-05-29 19:26:32 +00001391 VG_(threads)[tid].status = VgTs_Empty;
thughes6d41bea2004-10-20 12:25:59 +00001392 VG_(threads)[tid].associated_mx = NULL;
1393 VG_(threads)[tid].associated_cv = NULL;
thughes168eb882004-11-13 00:39:37 +00001394 VG_(threads)[tid].stack_base = (Addr)NULL;
thugheseb9b8fb2004-11-12 23:11:21 +00001395 VG_(threads)[tid].stack_size = 0;
jsgf855d93d2003-10-13 22:26:55 +00001396 cleanup_after_thread_exited( tid, True );
sewardjccef2e62002-05-29 19:26:32 +00001397 }
1398}
1399
1400
sewardj20917d82002-05-28 01:36:45 +00001401/* -----------------------------------------------------------
1402 Thread CREATION, JOINAGE and CANCELLATION: REQUESTS
1403 -------------------------------------------------------- */
1404
sewardje663cb92002-04-12 10:26:32 +00001405static
sewardj8ad94e12002-05-29 00:10:20 +00001406void do__cleanup_push ( ThreadId tid, CleanupEntry* cu )
1407{
1408 Int sp;
1409 Char msg_buf[100];
1410 vg_assert(VG_(is_valid_tid)(tid));
1411 sp = VG_(threads)[tid].custack_used;
1412 if (VG_(clo_trace_sched)) {
thughes11975ff2004-06-12 12:58:22 +00001413 switch (cu->type) {
1414 case VgCt_Function:
1415 VG_(sprintf)(msg_buf,
1416 "cleanup_push (fn %p, arg %p) -> slot %d",
1417 cu->data.function.fn, cu->data.function.arg, sp);
1418 break;
1419 case VgCt_Longjmp:
1420 VG_(sprintf)(msg_buf,
1421 "cleanup_push (ub %p) -> slot %d",
1422 cu->data.longjmp.ub, sp);
1423 break;
1424 default:
1425 VG_(sprintf)(msg_buf,
1426 "cleanup_push (unknown type) -> slot %d",
1427 sp);
1428 break;
1429 }
sewardj8ad94e12002-05-29 00:10:20 +00001430 print_sched_event(tid, msg_buf);
1431 }
1432 vg_assert(sp >= 0 && sp <= VG_N_CLEANUPSTACK);
1433 if (sp == VG_N_CLEANUPSTACK)
njne427a662002-10-02 11:08:25 +00001434 VG_(core_panic)("do__cleanup_push: VG_N_CLEANUPSTACK is too small."
sewardj8ad94e12002-05-29 00:10:20 +00001435 " Increase and recompile.");
1436 VG_(threads)[tid].custack[sp] = *cu;
1437 sp++;
1438 VG_(threads)[tid].custack_used = sp;
njnd3040452003-05-19 15:04:06 +00001439 SET_PTHREQ_RETVAL(tid, 0);
sewardj8ad94e12002-05-29 00:10:20 +00001440}
1441
1442
1443static
1444void do__cleanup_pop ( ThreadId tid, CleanupEntry* cu )
1445{
1446 Int sp;
1447 Char msg_buf[100];
1448 vg_assert(VG_(is_valid_tid)(tid));
1449 sp = VG_(threads)[tid].custack_used;
1450 if (VG_(clo_trace_sched)) {
njn36650922002-10-04 09:18:09 +00001451 VG_(sprintf)(msg_buf, "cleanup_pop from slot %d", sp-1);
sewardj8ad94e12002-05-29 00:10:20 +00001452 print_sched_event(tid, msg_buf);
1453 }
1454 vg_assert(sp >= 0 && sp <= VG_N_CLEANUPSTACK);
1455 if (sp == 0) {
njnd3040452003-05-19 15:04:06 +00001456 SET_PTHREQ_RETVAL(tid, -1);
sewardj8ad94e12002-05-29 00:10:20 +00001457 return;
1458 }
1459 sp--;
njn72718642003-07-24 08:45:32 +00001460 VG_TRACK( pre_mem_write, Vg_CorePThread, tid,
njn36650922002-10-04 09:18:09 +00001461 "cleanup pop", (Addr)cu, sizeof(CleanupEntry) );
sewardj8ad94e12002-05-29 00:10:20 +00001462 *cu = VG_(threads)[tid].custack[sp];
njn25e49d8e72002-09-23 09:36:25 +00001463 VG_TRACK( post_mem_write, (Addr)cu, sizeof(CleanupEntry) );
sewardj8ad94e12002-05-29 00:10:20 +00001464 VG_(threads)[tid].custack_used = sp;
njnd3040452003-05-19 15:04:06 +00001465 SET_PTHREQ_RETVAL(tid, 0);
sewardj8ad94e12002-05-29 00:10:20 +00001466}
1467
1468
1469static
sewardjff42d1d2002-05-22 13:17:31 +00001470void do_pthread_yield ( ThreadId tid )
1471{
1472 Char msg_buf[100];
1473 vg_assert(VG_(is_valid_tid)(tid));
sewardjff42d1d2002-05-22 13:17:31 +00001474 if (VG_(clo_trace_sched)) {
1475 VG_(sprintf)(msg_buf, "yield");
1476 print_sched_event(tid, msg_buf);
1477 }
njnd3040452003-05-19 15:04:06 +00001478 SET_PTHREQ_RETVAL(tid, 0);
sewardjff42d1d2002-05-22 13:17:31 +00001479}
1480
1481
1482static
sewardj20917d82002-05-28 01:36:45 +00001483void do__testcancel ( ThreadId tid )
sewardje663cb92002-04-12 10:26:32 +00001484{
sewardj7989d0c2002-05-28 11:00:01 +00001485 Char msg_buf[100];
sewardjb48e5002002-05-13 00:16:03 +00001486 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001487 if (VG_(clo_trace_sched)) {
1488 VG_(sprintf)(msg_buf, "testcancel");
1489 print_sched_event(tid, msg_buf);
1490 }
sewardj20917d82002-05-28 01:36:45 +00001491 if (/* is there a cancellation pending on this thread? */
1492 VG_(threads)[tid].cancel_pend != NULL
1493 && /* is this thread accepting cancellations? */
1494 VG_(threads)[tid].cancel_st) {
1495 /* Ok, let's do the cancellation. */
1496 make_thread_jump_to_cancelhdlr ( tid );
sewardje663cb92002-04-12 10:26:32 +00001497 } else {
sewardj20917d82002-05-28 01:36:45 +00001498 /* No, we keep going. */
njnd3040452003-05-19 15:04:06 +00001499 SET_PTHREQ_RETVAL(tid, 0);
sewardje663cb92002-04-12 10:26:32 +00001500 }
sewardje663cb92002-04-12 10:26:32 +00001501}
1502
1503
1504static
sewardj20917d82002-05-28 01:36:45 +00001505void do__set_cancelstate ( ThreadId tid, Int state )
1506{
1507 Bool old_st;
sewardj7989d0c2002-05-28 11:00:01 +00001508 Char msg_buf[100];
sewardj20917d82002-05-28 01:36:45 +00001509 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001510 if (VG_(clo_trace_sched)) {
1511 VG_(sprintf)(msg_buf, "set_cancelstate to %d (%s)", state,
1512 state==PTHREAD_CANCEL_ENABLE
1513 ? "ENABLE"
1514 : (state==PTHREAD_CANCEL_DISABLE ? "DISABLE" : "???"));
1515 print_sched_event(tid, msg_buf);
1516 }
sewardj20917d82002-05-28 01:36:45 +00001517 old_st = VG_(threads)[tid].cancel_st;
1518 if (state == PTHREAD_CANCEL_ENABLE) {
1519 VG_(threads)[tid].cancel_st = True;
1520 } else
1521 if (state == PTHREAD_CANCEL_DISABLE) {
1522 VG_(threads)[tid].cancel_st = False;
1523 } else {
njne427a662002-10-02 11:08:25 +00001524 VG_(core_panic)("do__set_cancelstate");
sewardj20917d82002-05-28 01:36:45 +00001525 }
njnd3040452003-05-19 15:04:06 +00001526 SET_PTHREQ_RETVAL(tid, old_st ? PTHREAD_CANCEL_ENABLE
1527 : PTHREAD_CANCEL_DISABLE);
sewardj20917d82002-05-28 01:36:45 +00001528}
1529
1530
1531static
1532void do__set_canceltype ( ThreadId tid, Int type )
1533{
1534 Bool old_ty;
sewardj7989d0c2002-05-28 11:00:01 +00001535 Char msg_buf[100];
sewardj20917d82002-05-28 01:36:45 +00001536 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001537 if (VG_(clo_trace_sched)) {
1538 VG_(sprintf)(msg_buf, "set_canceltype to %d (%s)", type,
1539 type==PTHREAD_CANCEL_ASYNCHRONOUS
1540 ? "ASYNCHRONOUS"
1541 : (type==PTHREAD_CANCEL_DEFERRED ? "DEFERRED" : "???"));
1542 print_sched_event(tid, msg_buf);
1543 }
sewardj20917d82002-05-28 01:36:45 +00001544 old_ty = VG_(threads)[tid].cancel_ty;
1545 if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
1546 VG_(threads)[tid].cancel_ty = False;
1547 } else
1548 if (type == PTHREAD_CANCEL_DEFERRED) {
sewardjaf00b6d2002-05-29 23:30:28 +00001549 VG_(threads)[tid].cancel_ty = True;
sewardj20917d82002-05-28 01:36:45 +00001550 } else {
njne427a662002-10-02 11:08:25 +00001551 VG_(core_panic)("do__set_canceltype");
sewardj20917d82002-05-28 01:36:45 +00001552 }
njnd3040452003-05-19 15:04:06 +00001553 SET_PTHREQ_RETVAL(tid, old_ty ? PTHREAD_CANCEL_DEFERRED
sewardj20917d82002-05-28 01:36:45 +00001554 : PTHREAD_CANCEL_ASYNCHRONOUS);
1555}
1556
1557
sewardj7989d0c2002-05-28 11:00:01 +00001558/* Set or get the detach state for thread det. */
sewardj20917d82002-05-28 01:36:45 +00001559static
sewardj7989d0c2002-05-28 11:00:01 +00001560void do__set_or_get_detach ( ThreadId tid,
1561 Int what, ThreadId det )
sewardj20917d82002-05-28 01:36:45 +00001562{
sewardj7989d0c2002-05-28 11:00:01 +00001563 Char msg_buf[100];
1564 /* VG_(printf)("do__set_or_get_detach tid %d what %d det %d\n",
1565 tid, what, det); */
sewardj20917d82002-05-28 01:36:45 +00001566 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001567 if (VG_(clo_trace_sched)) {
1568 VG_(sprintf)(msg_buf, "set_or_get_detach %d (%s) for tid %d", what,
1569 what==0 ? "not-detached" : (
1570 what==1 ? "detached" : (
1571 what==2 ? "fetch old value" : "???")),
1572 det );
1573 print_sched_event(tid, msg_buf);
1574 }
1575
1576 if (!VG_(is_valid_tid)(det)) {
njnd3040452003-05-19 15:04:06 +00001577 SET_PTHREQ_RETVAL(tid, -1);
sewardj7989d0c2002-05-28 11:00:01 +00001578 return;
1579 }
1580
sewardj20917d82002-05-28 01:36:45 +00001581 switch (what) {
1582 case 2: /* get */
njnd3040452003-05-19 15:04:06 +00001583 SET_PTHREQ_RETVAL(tid, VG_(threads)[det].detached ? 1 : 0);
sewardj20917d82002-05-28 01:36:45 +00001584 return;
jsgf855d93d2003-10-13 22:26:55 +00001585 case 1:
sewardj7989d0c2002-05-28 11:00:01 +00001586 VG_(threads)[det].detached = True;
njnd3040452003-05-19 15:04:06 +00001587 SET_PTHREQ_RETVAL(tid, 0);
jsgf855d93d2003-10-13 22:26:55 +00001588 /* wake anyone who was joining on us */
1589 maybe_rendezvous_joiners_and_joinees();
sewardj20917d82002-05-28 01:36:45 +00001590 return;
1591 case 0: /* set not detached */
sewardj7989d0c2002-05-28 11:00:01 +00001592 VG_(threads)[det].detached = False;
njnd3040452003-05-19 15:04:06 +00001593 SET_PTHREQ_RETVAL(tid, 0);
sewardj20917d82002-05-28 01:36:45 +00001594 return;
1595 default:
njne427a662002-10-02 11:08:25 +00001596 VG_(core_panic)("do__set_or_get_detach");
sewardj20917d82002-05-28 01:36:45 +00001597 }
1598}
1599
1600
1601static
1602void do__set_cancelpend ( ThreadId tid,
1603 ThreadId cee,
1604 void (*cancelpend_hdlr)(void*) )
sewardje663cb92002-04-12 10:26:32 +00001605{
1606 Char msg_buf[100];
1607
sewardj20917d82002-05-28 01:36:45 +00001608 vg_assert(VG_(is_valid_tid)(tid));
1609 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1610
thughes97e54d22004-08-15 14:34:02 +00001611 if (!VG_(is_valid_tid)(cee) ||
1612 VG_(threads)[cee].status == VgTs_WaitJoiner) {
sewardj7989d0c2002-05-28 11:00:01 +00001613 if (VG_(clo_trace_sched)) {
1614 VG_(sprintf)(msg_buf,
1615 "set_cancelpend for invalid tid %d", cee);
1616 print_sched_event(tid, msg_buf);
1617 }
njn25e49d8e72002-09-23 09:36:25 +00001618 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00001619 "pthread_cancel: target thread does not exist, or invalid");
jsgf855d93d2003-10-13 22:26:55 +00001620 SET_PTHREQ_RETVAL(tid, VKI_ESRCH);
sewardj7989d0c2002-05-28 11:00:01 +00001621 return;
1622 }
sewardj20917d82002-05-28 01:36:45 +00001623
1624 VG_(threads)[cee].cancel_pend = cancelpend_hdlr;
1625
thughes31c1aae2004-10-28 15:56:55 +00001626 /* interrupt a pending syscall if asynchronous cancellation
1627 is enabled for the target thread */
1628 if (VG_(threads)[cee].cancel_st && !VG_(threads)[cee].cancel_ty) {
1629 VG_(proxy_abort_syscall)(cee);
1630 }
jsgf855d93d2003-10-13 22:26:55 +00001631
sewardj20917d82002-05-28 01:36:45 +00001632 if (VG_(clo_trace_sched)) {
1633 VG_(sprintf)(msg_buf,
sewardj7989d0c2002-05-28 11:00:01 +00001634 "set_cancelpend (hdlr = %p, set by tid %d)",
sewardj20917d82002-05-28 01:36:45 +00001635 cancelpend_hdlr, tid);
1636 print_sched_event(cee, msg_buf);
1637 }
1638
1639 /* Thread doing the cancelling returns with success. */
njnd3040452003-05-19 15:04:06 +00001640 SET_PTHREQ_RETVAL(tid, 0);
sewardj20917d82002-05-28 01:36:45 +00001641
1642 /* Perhaps we can nuke the cancellee right now? */
thughes513197c2004-06-13 12:07:53 +00001643 if (!VG_(threads)[cee].cancel_ty || /* if PTHREAD_CANCEL_ASYNCHRONOUS */
1644 (VG_(threads)[cee].status != VgTs_Runnable &&
1645 VG_(threads)[cee].status != VgTs_WaitMX)) {
jsgf855d93d2003-10-13 22:26:55 +00001646 do__testcancel(cee);
thughes513197c2004-06-13 12:07:53 +00001647 }
sewardj20917d82002-05-28 01:36:45 +00001648}
1649
1650
1651static
1652void do_pthread_join ( ThreadId tid,
1653 ThreadId jee, void** thread_return )
1654{
1655 Char msg_buf[100];
1656 ThreadId i;
sewardje663cb92002-04-12 10:26:32 +00001657 /* jee, the joinee, is the thread specified as an arg in thread
1658 tid's call to pthread_join. So tid is the join-er. */
sewardjb48e5002002-05-13 00:16:03 +00001659 vg_assert(VG_(is_valid_tid)(tid));
sewardj018f7622002-05-15 21:13:39 +00001660 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +00001661
1662 if (jee == tid) {
njn25e49d8e72002-09-23 09:36:25 +00001663 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00001664 "pthread_join: attempt to join to self");
njnd3040452003-05-19 15:04:06 +00001665 SET_PTHREQ_RETVAL(tid, EDEADLK); /* libc constant, not a kernel one */
jsgf855d93d2003-10-13 22:26:55 +00001666 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +00001667 return;
1668 }
1669
sewardj20917d82002-05-28 01:36:45 +00001670 /* Flush any completed pairs, so as to make sure what we're looking
1671 at is up-to-date. */
1672 maybe_rendezvous_joiners_and_joinees();
1673
1674 /* Is this a sane request? */
jsgf855d93d2003-10-13 22:26:55 +00001675 if ( ! VG_(is_valid_tid)(jee) ||
1676 VG_(threads)[jee].detached) {
sewardje663cb92002-04-12 10:26:32 +00001677 /* Invalid thread to join to. */
njn25e49d8e72002-09-23 09:36:25 +00001678 VG_(record_pthread_error)( tid,
jsgf855d93d2003-10-13 22:26:55 +00001679 "pthread_join: target thread does not exist, invalid, or detached");
1680 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardje663cb92002-04-12 10:26:32 +00001681 return;
1682 }
1683
sewardj20917d82002-05-28 01:36:45 +00001684 /* Is anyone else already in a join-wait for jee? */
1685 for (i = 1; i < VG_N_THREADS; i++) {
1686 if (i == tid) continue;
1687 if (VG_(threads)[i].status == VgTs_WaitJoinee
1688 && VG_(threads)[i].joiner_jee_tid == jee) {
1689 /* Someone already did join on this thread */
njn25e49d8e72002-09-23 09:36:25 +00001690 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00001691 "pthread_join: another thread already "
1692 "in join-wait for target thread");
jsgf855d93d2003-10-13 22:26:55 +00001693 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
1694 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardj20917d82002-05-28 01:36:45 +00001695 return;
1696 }
sewardje663cb92002-04-12 10:26:32 +00001697 }
1698
thughes513197c2004-06-13 12:07:53 +00001699 if(VG_(threads)[tid].cancel_pend != NULL &&
1700 VG_(threads)[tid].cancel_st) {
1701 make_thread_jump_to_cancelhdlr ( tid );
1702 } else {
1703 /* Mark this thread as waiting for the joinee. */
1704 VG_(threads)[tid].status = VgTs_WaitJoinee;
1705 VG_(threads)[tid].joiner_thread_return = thread_return;
1706 VG_(threads)[tid].joiner_jee_tid = jee;
1707
1708 /* Look for matching joiners and joinees and do the right thing. */
1709 maybe_rendezvous_joiners_and_joinees();
1710
1711 /* Return value is irrelevant since this this thread becomes
1712 non-runnable. maybe_resume_joiner() will cause it to return the
1713 right value when it resumes. */
1714
1715 if (VG_(clo_trace_sched)) {
1716 VG_(sprintf)(msg_buf,
1717 "wait for joinee %d (may already be ready)", jee);
1718 print_sched_event(tid, msg_buf);
1719 }
sewardje663cb92002-04-12 10:26:32 +00001720 }
sewardje663cb92002-04-12 10:26:32 +00001721}
1722
1723
sewardj20917d82002-05-28 01:36:45 +00001724/* ( void* ): calling thread waits for joiner and returns the void* to
1725 it. This is one of two ways in which a thread can finally exit --
1726 the other is do__quit. */
sewardje663cb92002-04-12 10:26:32 +00001727static
sewardj20917d82002-05-28 01:36:45 +00001728void do__wait_joiner ( ThreadId tid, void* retval )
sewardje663cb92002-04-12 10:26:32 +00001729{
sewardj20917d82002-05-28 01:36:45 +00001730 Char msg_buf[100];
1731 vg_assert(VG_(is_valid_tid)(tid));
1732 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1733 if (VG_(clo_trace_sched)) {
1734 VG_(sprintf)(msg_buf,
sewardj7989d0c2002-05-28 11:00:01 +00001735 "do__wait_joiner(retval = %p) (non-detached thread exit)", retval);
sewardj20917d82002-05-28 01:36:45 +00001736 print_sched_event(tid, msg_buf);
1737 }
1738 VG_(threads)[tid].status = VgTs_WaitJoiner;
1739 VG_(threads)[tid].joinee_retval = retval;
1740 maybe_rendezvous_joiners_and_joinees();
1741}
1742
1743
1744/* ( no-args ): calling thread disappears from the system forever.
1745 Reclaim resources. */
1746static
1747void do__quit ( ThreadId tid )
1748{
1749 Char msg_buf[100];
1750 vg_assert(VG_(is_valid_tid)(tid));
1751 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1752 VG_(threads)[tid].status = VgTs_Empty; /* bye! */
jsgf855d93d2003-10-13 22:26:55 +00001753 cleanup_after_thread_exited ( tid, False );
sewardj20917d82002-05-28 01:36:45 +00001754 if (VG_(clo_trace_sched)) {
sewardj7989d0c2002-05-28 11:00:01 +00001755 VG_(sprintf)(msg_buf, "do__quit (detached thread exit)");
sewardj20917d82002-05-28 01:36:45 +00001756 print_sched_event(tid, msg_buf);
1757 }
jsgf855d93d2003-10-13 22:26:55 +00001758 maybe_rendezvous_joiners_and_joinees();
sewardj20917d82002-05-28 01:36:45 +00001759 /* Return value is irrelevant; this thread will not get
1760 rescheduled. */
1761}
1762
1763
nethercote6b9c8472004-09-13 13:16:40 +00001764/* Should never be entered. If it is, will be on the simulated CPU. */
sewardj20917d82002-05-28 01:36:45 +00001765static
1766void do__apply_in_new_thread_bogusRA ( void )
1767{
njne427a662002-10-02 11:08:25 +00001768 VG_(core_panic)("do__apply_in_new_thread_bogusRA");
sewardj20917d82002-05-28 01:36:45 +00001769}
1770
1771/* (Fn, Arg): Create a new thread and run Fn applied to Arg in it. Fn
1772 MUST NOT return -- ever. Eventually it will do either __QUIT or
1773 __WAIT_JOINER. Return the child tid to the parent. */
1774static
1775void do__apply_in_new_thread ( ThreadId parent_tid,
1776 void* (*fn)(void *),
thughesdaa34562004-06-27 12:48:53 +00001777 void* arg,
1778 StackInfo *si )
sewardj20917d82002-05-28 01:36:45 +00001779{
sewardje663cb92002-04-12 10:26:32 +00001780 Addr new_stack;
1781 UInt new_stk_szb;
1782 ThreadId tid;
1783 Char msg_buf[100];
1784
1785 /* Paranoia ... */
1786 vg_assert(sizeof(pthread_t) == sizeof(UInt));
1787
sewardj018f7622002-05-15 21:13:39 +00001788 vg_assert(VG_(threads)[parent_tid].status != VgTs_Empty);
sewardje663cb92002-04-12 10:26:32 +00001789
sewardj1e8cdc92002-04-18 11:37:52 +00001790 tid = vg_alloc_ThreadState();
sewardje663cb92002-04-12 10:26:32 +00001791
1792 /* If we've created the main thread's tid, we're in deep trouble :) */
sewardj6072c362002-04-19 14:40:57 +00001793 vg_assert(tid != 1);
nethercote36881a22004-08-04 14:03:16 +00001794 vg_assert(is_valid_or_empty_tid(tid));
sewardje663cb92002-04-12 10:26:32 +00001795
sewardjc4a810d2002-11-13 22:25:51 +00001796 /* do this early, before the child gets any memory writes */
1797 VG_TRACK ( post_thread_create, parent_tid, tid );
1798
sewardjf6374322002-11-13 22:35:55 +00001799 /* Create new thread with default attrs:
1800 deferred cancellation, not detached
1801 */
1802 mostly_clear_thread_record(tid);
1803 VG_(threads)[tid].status = VgTs_Runnable;
1804
sewardje663cb92002-04-12 10:26:32 +00001805 /* Copy the parent's CPU state into the child's, in a roundabout
1806 way (via baseBlock). */
nethercotef971ab72004-08-02 16:27:40 +00001807 load_thread_state(parent_tid);
nethercotef9b59412004-09-10 15:33:32 +00001808 VGA_(setup_child)( &VG_(threads)[tid].arch,
1809 &VG_(threads)[parent_tid].arch );
nethercotef971ab72004-08-02 16:27:40 +00001810 save_thread_state(tid);
sewardjf6374322002-11-13 22:35:55 +00001811 vg_tid_last_in_baseBlock = tid;
sewardje663cb92002-04-12 10:26:32 +00001812
1813 /* Consider allocating the child a stack, if the one it already has
1814 is inadequate. */
thughes5e5e2132004-11-16 19:40:05 +00001815 new_stk_szb = PGROUNDUP(si->size + VG_AR_CLIENT_STACKBASE_REDZONE_SZB + si->guardsize);
1816
thughesdaa34562004-06-27 12:48:53 +00001817 VG_(threads)[tid].stack_guard_size = si->guardsize;
sewardje663cb92002-04-12 10:26:32 +00001818
sewardj018f7622002-05-15 21:13:39 +00001819 if (new_stk_szb > VG_(threads)[tid].stack_size) {
sewardje663cb92002-04-12 10:26:32 +00001820 /* Again, for good measure :) We definitely don't want to be
1821 allocating a stack for the main thread. */
sewardj6072c362002-04-19 14:40:57 +00001822 vg_assert(tid != 1);
thughesdaa34562004-06-27 12:48:53 +00001823 if (VG_(threads)[tid].stack_size > 0)
1824 VG_(client_free)(VG_(threads)[tid].stack_base);
fitzhardinge98abfc72003-12-16 02:05:15 +00001825 new_stack = VG_(client_alloc)(0, new_stk_szb,
nethercotee567e702004-07-10 17:49:17 +00001826 VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC,
fitzhardinge98abfc72003-12-16 02:05:15 +00001827 SF_STACK);
nethercote8e9eab02004-07-11 18:01:06 +00001828 // Given the low number of threads Valgrind can handle, stack
1829 // allocation should pretty much always succeed, so having an
1830 // assertion here isn't too bad. However, probably better would be
1831 // this:
1832 //
1833 // if (0 == new_stack)
1834 // SET_PTHREQ_RETVAL(parent_tid, -VKI_EAGAIN);
1835 //
nethercotee567e702004-07-10 17:49:17 +00001836 vg_assert(0 != new_stack);
sewardj018f7622002-05-15 21:13:39 +00001837 VG_(threads)[tid].stack_base = new_stack;
1838 VG_(threads)[tid].stack_size = new_stk_szb;
1839 VG_(threads)[tid].stack_highest_word
sewardje663cb92002-04-12 10:26:32 +00001840 = new_stack + new_stk_szb
sewardj1e8cdc92002-04-18 11:37:52 +00001841 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB; /* -4 ??? */;
sewardje663cb92002-04-12 10:26:32 +00001842 }
sewardj1e8cdc92002-04-18 11:37:52 +00001843
njn25e49d8e72002-09-23 09:36:25 +00001844 /* Having got memory to hold the thread's stack:
1845 - set %esp as base + size
1846 - mark everything below %esp inaccessible
1847 - mark redzone at stack end inaccessible
1848 */
njnd3040452003-05-19 15:04:06 +00001849 SET_PTHREQ_ESP(tid, VG_(threads)[tid].stack_base
1850 + VG_(threads)[tid].stack_size
1851 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB);
sewardj1e8cdc92002-04-18 11:37:52 +00001852
njn25e49d8e72002-09-23 09:36:25 +00001853 VG_TRACK ( die_mem_stack, VG_(threads)[tid].stack_base,
thughesdaa34562004-06-27 12:48:53 +00001854 VG_(threads)[tid].stack_size
1855 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB);
nethercote6b9c8472004-09-13 13:16:40 +00001856 VG_TRACK ( ban_mem_stack, ARCH_STACK_PTR(VG_(threads)[tid].arch),
njn25e49d8e72002-09-23 09:36:25 +00001857 VG_AR_CLIENT_STACKBASE_REDZONE_SZB );
sewardje663cb92002-04-12 10:26:32 +00001858
nethercote6b9c8472004-09-13 13:16:40 +00001859 VGA_(thread_initial_stack)(tid, (UWord)arg,
1860 (Addr)&do__apply_in_new_thread_bogusRA);
sewardje663cb92002-04-12 10:26:32 +00001861
1862 /* this is where we start */
nethercote50397c22004-11-04 18:03:06 +00001863 ARCH_INSTR_PTR(VG_(threads)[tid].arch) = (UWord)fn;
sewardje663cb92002-04-12 10:26:32 +00001864
sewardj8937c812002-04-12 20:12:20 +00001865 if (VG_(clo_trace_sched)) {
njn25e49d8e72002-09-23 09:36:25 +00001866 VG_(sprintf)(msg_buf, "new thread, created by %d", parent_tid );
sewardje663cb92002-04-12 10:26:32 +00001867 print_sched_event(tid, msg_buf);
1868 }
1869
fitzhardingef7866182004-03-16 22:09:12 +00001870 /* Start the thread with all signals blocked; it's up to the client
1871 code to set the right signal mask when it's ready. */
nethercote73b526f2004-10-31 18:48:21 +00001872 VG_(sigfillset)(&VG_(threads)[tid].sig_mask);
jsgf855d93d2003-10-13 22:26:55 +00001873
1874 /* Now that the signal mask is set up, create a proxy LWP for this thread */
1875 VG_(proxy_create)(tid);
1876
1877 /* Set the proxy's signal mask */
1878 VG_(proxy_setsigmask)(tid);
sewardjb48e5002002-05-13 00:16:03 +00001879
sewardj20917d82002-05-28 01:36:45 +00001880 /* return child's tid to parent */
njnd3040452003-05-19 15:04:06 +00001881 SET_PTHREQ_RETVAL(parent_tid, tid); /* success */
sewardje663cb92002-04-12 10:26:32 +00001882}
1883
1884
sewardj604ec3c2002-04-18 22:38:41 +00001885/* -----------------------------------------------------------
1886 MUTEXes
1887 -------------------------------------------------------- */
1888
rjwalsh7109a8c2004-09-02 00:31:02 +00001889/* vg_pthread_mutex_t is defined in core.h.
sewardj604ec3c2002-04-18 22:38:41 +00001890
nethercote1f0173b2004-02-28 15:40:36 +00001891 The initializers zero everything, except possibly the fourth word,
1892 which in vg_pthread_mutex_t is the __vg_m_kind field. It gets set to one
1893 of PTHREAD_MUTEX_{TIMED,RECURSIVE,ERRORCHECK,ADAPTIVE}_NP
sewardj604ec3c2002-04-18 22:38:41 +00001894
sewardj6072c362002-04-19 14:40:57 +00001895 How we use it:
sewardj604ec3c2002-04-18 22:38:41 +00001896
nethercote1f0173b2004-02-28 15:40:36 +00001897 __vg_m_kind never changes and indicates whether or not it is recursive.
sewardj6072c362002-04-19 14:40:57 +00001898
nethercote1f0173b2004-02-28 15:40:36 +00001899 __vg_m_count indicates the lock count; if 0, the mutex is not owned by
sewardj6072c362002-04-19 14:40:57 +00001900 anybody.
1901
nethercote1f0173b2004-02-28 15:40:36 +00001902 __vg_m_owner has a ThreadId value stuffed into it. We carefully arrange
sewardj6072c362002-04-19 14:40:57 +00001903 that ThreadId == 0 is invalid (VG_INVALID_THREADID), so that
1904 statically initialised mutexes correctly appear
1905 to belong to nobody.
1906
nethercote1f0173b2004-02-28 15:40:36 +00001907 In summary, a not-in-use mutex is distinguised by having __vg_m_owner
1908 == 0 (VG_INVALID_THREADID) and __vg_m_count == 0 too. If one of those
sewardj6072c362002-04-19 14:40:57 +00001909 conditions holds, the other should too.
1910
1911 There is no linked list of threads waiting for this mutex. Instead
1912 a thread in WaitMX state points at the mutex with its waited_on_mx
1913 field. This makes _unlock() inefficient, but simple to implement the
1914 right semantics viz-a-viz signals.
sewardje663cb92002-04-12 10:26:32 +00001915
sewardj604ec3c2002-04-18 22:38:41 +00001916 We don't have to deal with mutex initialisation; the client side
sewardj6072c362002-04-19 14:40:57 +00001917 deals with that for us.
1918*/
sewardje663cb92002-04-12 10:26:32 +00001919
sewardj3b5d8862002-04-20 13:53:23 +00001920/* Helper fns ... */
thughese321d492004-10-17 15:00:20 +00001921static
1922void do_pthread_mutex_timedlock_TIMEOUT ( ThreadId tid )
1923{
1924 Char msg_buf[100];
1925 vg_pthread_mutex_t* mx;
1926
1927 vg_assert(VG_(is_valid_tid)(tid)
1928 && VG_(threads)[tid].status == VgTs_WaitMX
1929 && VG_(threads)[tid].awaken_at != 0xFFFFFFFF);
1930 mx = VG_(threads)[tid].associated_mx;
1931 vg_assert(mx != NULL);
1932
1933 VG_(threads)[tid].status = VgTs_Runnable;
1934 SET_PTHREQ_RETVAL(tid, ETIMEDOUT); /* pthread_mutex_lock return value */
1935 VG_(threads)[tid].associated_mx = NULL;
1936
1937 if (VG_(clo_trace_pthread_level) >= 1) {
1938 VG_(sprintf)(msg_buf, "pthread_mutex_timedlock mx %p: TIMEOUT", mx);
1939 print_pthread_event(tid, msg_buf);
1940 }
1941}
1942
1943
sewardj3b5d8862002-04-20 13:53:23 +00001944static
nethercote1f0173b2004-02-28 15:40:36 +00001945void release_one_thread_waiting_on_mutex ( vg_pthread_mutex_t* mutex,
sewardj3b5d8862002-04-20 13:53:23 +00001946 Char* caller )
1947{
1948 Int i;
1949 Char msg_buf[100];
1950
1951 /* Find some arbitrary thread waiting on this mutex, and make it
1952 runnable. If none are waiting, mark the mutex as not held. */
1953 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +00001954 if (VG_(threads)[i].status == VgTs_Empty)
sewardj3b5d8862002-04-20 13:53:23 +00001955 continue;
sewardj018f7622002-05-15 21:13:39 +00001956 if (VG_(threads)[i].status == VgTs_WaitMX
1957 && VG_(threads)[i].associated_mx == mutex)
sewardj3b5d8862002-04-20 13:53:23 +00001958 break;
1959 }
1960
nethercote1f0173b2004-02-28 15:40:36 +00001961 VG_TRACK( post_mutex_unlock, (ThreadId)mutex->__vg_m_owner, mutex );
sewardj0af43bc2002-10-22 04:30:35 +00001962
sewardj3b5d8862002-04-20 13:53:23 +00001963 vg_assert(i <= VG_N_THREADS);
1964 if (i == VG_N_THREADS) {
1965 /* Nobody else is waiting on it. */
nethercote1f0173b2004-02-28 15:40:36 +00001966 mutex->__vg_m_count = 0;
1967 mutex->__vg_m_owner = VG_INVALID_THREADID;
sewardj3b5d8862002-04-20 13:53:23 +00001968 } else {
1969 /* Notionally transfer the hold to thread i, whose
1970 pthread_mutex_lock() call now returns with 0 (success). */
1971 /* The .count is already == 1. */
sewardj018f7622002-05-15 21:13:39 +00001972 vg_assert(VG_(threads)[i].associated_mx == mutex);
nethercote1f0173b2004-02-28 15:40:36 +00001973 mutex->__vg_m_owner = (/*_pthread_descr*/void*)i;
sewardj018f7622002-05-15 21:13:39 +00001974 VG_(threads)[i].status = VgTs_Runnable;
1975 VG_(threads)[i].associated_mx = NULL;
sewardj5f07b662002-04-23 16:52:51 +00001976 /* m_edx already holds pth_mx_lock() success (0) */
sewardj3b5d8862002-04-20 13:53:23 +00001977
sewardj0af43bc2002-10-22 04:30:35 +00001978 VG_TRACK( post_mutex_lock, (ThreadId)i, mutex);
1979
sewardj3b5d8862002-04-20 13:53:23 +00001980 if (VG_(clo_trace_pthread_level) >= 1) {
1981 VG_(sprintf)(msg_buf, "%s mx %p: RESUME",
1982 caller, mutex );
1983 print_pthread_event(i, msg_buf);
1984 }
1985 }
1986}
1987
sewardje663cb92002-04-12 10:26:32 +00001988
1989static
sewardj30671ff2002-04-21 00:13:57 +00001990void do_pthread_mutex_lock( ThreadId tid,
1991 Bool is_trylock,
thughese321d492004-10-17 15:00:20 +00001992 vg_pthread_mutex_t* mutex,
1993 UInt ms_end )
sewardje663cb92002-04-12 10:26:32 +00001994{
sewardj30671ff2002-04-21 00:13:57 +00001995 Char msg_buf[100];
1996 Char* caller
sewardj8ccc2be2002-05-10 20:26:37 +00001997 = is_trylock ? "pthread_mutex_trylock"
1998 : "pthread_mutex_lock ";
sewardje663cb92002-04-12 10:26:32 +00001999
thughese321d492004-10-17 15:00:20 +00002000 /* If ms_end == 0xFFFFFFFF, wait forever (no timeout). Otherwise,
2001 ms_end is the ending millisecond. */
2002
sewardj604ec3c2002-04-18 22:38:41 +00002003 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj30671ff2002-04-21 00:13:57 +00002004 VG_(sprintf)(msg_buf, "%s mx %p ...", caller, mutex );
sewardj604ec3c2002-04-18 22:38:41 +00002005 print_pthread_event(tid, msg_buf);
2006 }
2007
2008 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00002009 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002010 && VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +00002011
2012 /* POSIX doesn't mandate this, but for sanity ... */
2013 if (mutex == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00002014 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002015 "pthread_mutex_lock/trylock: mutex is NULL");
jsgf855d93d2003-10-13 22:26:55 +00002016 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardje663cb92002-04-12 10:26:32 +00002017 return;
2018 }
2019
sewardj604ec3c2002-04-18 22:38:41 +00002020 /* More paranoia ... */
nethercote1f0173b2004-02-28 15:40:36 +00002021 switch (mutex->__vg_m_kind) {
sewardj2a1dcce2002-04-22 12:45:25 +00002022# ifndef GLIBC_2_1
sewardj604ec3c2002-04-18 22:38:41 +00002023 case PTHREAD_MUTEX_TIMED_NP:
sewardj2a1dcce2002-04-22 12:45:25 +00002024 case PTHREAD_MUTEX_ADAPTIVE_NP:
2025# endif
sewardja1679dd2002-05-10 22:31:40 +00002026# ifdef GLIBC_2_1
sewardj8e651d72002-05-10 21:00:19 +00002027 case PTHREAD_MUTEX_FAST_NP:
sewardja1679dd2002-05-10 22:31:40 +00002028# endif
sewardj604ec3c2002-04-18 22:38:41 +00002029 case PTHREAD_MUTEX_RECURSIVE_NP:
2030 case PTHREAD_MUTEX_ERRORCHECK_NP:
nethercote1f0173b2004-02-28 15:40:36 +00002031 if (mutex->__vg_m_count >= 0) break;
sewardj604ec3c2002-04-18 22:38:41 +00002032 /* else fall thru */
2033 default:
njn25e49d8e72002-09-23 09:36:25 +00002034 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002035 "pthread_mutex_lock/trylock: mutex is invalid");
jsgf855d93d2003-10-13 22:26:55 +00002036 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj604ec3c2002-04-18 22:38:41 +00002037 return;
sewardje663cb92002-04-12 10:26:32 +00002038 }
2039
nethercote1f0173b2004-02-28 15:40:36 +00002040 if (mutex->__vg_m_count > 0) {
2041 if (!VG_(is_valid_tid)((ThreadId)mutex->__vg_m_owner)) {
fitzhardinge47735af2004-01-21 01:27:27 +00002042 VG_(record_pthread_error)( tid,
2043 "pthread_mutex_lock/trylock: mutex has invalid owner");
2044 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
2045 return;
2046 }
sewardjf8f819e2002-04-17 23:21:37 +00002047
2048 /* Someone has it already. */
thughese321d492004-10-17 15:00:20 +00002049 if ((ThreadId)mutex->__vg_m_owner == tid && ms_end == 0xFFFFFFFF) {
sewardjf8f819e2002-04-17 23:21:37 +00002050 /* It's locked -- by me! */
nethercote1f0173b2004-02-28 15:40:36 +00002051 if (mutex->__vg_m_kind == PTHREAD_MUTEX_RECURSIVE_NP) {
sewardjf8f819e2002-04-17 23:21:37 +00002052 /* return 0 (success). */
nethercote1f0173b2004-02-28 15:40:36 +00002053 mutex->__vg_m_count++;
njnd3040452003-05-19 15:04:06 +00002054 SET_PTHREQ_RETVAL(tid, 0);
sewardj853f55d2002-04-26 00:27:53 +00002055 if (0)
2056 VG_(printf)("!!!!!! tid %d, mx %p -> locked %d\n",
nethercote1f0173b2004-02-28 15:40:36 +00002057 tid, mutex, mutex->__vg_m_count);
sewardjf8f819e2002-04-17 23:21:37 +00002058 return;
2059 } else {
sewardj30671ff2002-04-21 00:13:57 +00002060 if (is_trylock)
njnd3040452003-05-19 15:04:06 +00002061 SET_PTHREQ_RETVAL(tid, EBUSY);
sewardj30671ff2002-04-21 00:13:57 +00002062 else
njnd3040452003-05-19 15:04:06 +00002063 SET_PTHREQ_RETVAL(tid, EDEADLK);
sewardjf8f819e2002-04-17 23:21:37 +00002064 return;
2065 }
2066 } else {
sewardj6072c362002-04-19 14:40:57 +00002067 /* Someone else has it; we have to wait. Mark ourselves
2068 thusly. */
nethercote1f0173b2004-02-28 15:40:36 +00002069 /* GUARD: __vg_m_count > 0 && __vg_m_owner is valid */
sewardj30671ff2002-04-21 00:13:57 +00002070 if (is_trylock) {
2071 /* caller is polling; so return immediately. */
njnd3040452003-05-19 15:04:06 +00002072 SET_PTHREQ_RETVAL(tid, EBUSY);
sewardj30671ff2002-04-21 00:13:57 +00002073 } else {
sewardjdca84112002-11-13 22:29:34 +00002074 VG_TRACK ( pre_mutex_lock, tid, mutex );
2075
sewardj018f7622002-05-15 21:13:39 +00002076 VG_(threads)[tid].status = VgTs_WaitMX;
2077 VG_(threads)[tid].associated_mx = mutex;
thughese321d492004-10-17 15:00:20 +00002078 VG_(threads)[tid].awaken_at = ms_end;
2079 if (ms_end != 0xFFFFFFFF)
2080 add_timeout(tid, ms_end);
njnd3040452003-05-19 15:04:06 +00002081 SET_PTHREQ_RETVAL(tid, 0); /* pth_mx_lock success value */
sewardj30671ff2002-04-21 00:13:57 +00002082 if (VG_(clo_trace_pthread_level) >= 1) {
2083 VG_(sprintf)(msg_buf, "%s mx %p: BLOCK",
2084 caller, mutex );
2085 print_pthread_event(tid, msg_buf);
2086 }
2087 }
sewardje663cb92002-04-12 10:26:32 +00002088 return;
2089 }
sewardjf8f819e2002-04-17 23:21:37 +00002090
sewardje663cb92002-04-12 10:26:32 +00002091 } else {
sewardj6072c362002-04-19 14:40:57 +00002092 /* Nobody owns it. Sanity check ... */
nethercote1f0173b2004-02-28 15:40:36 +00002093 vg_assert(mutex->__vg_m_owner == VG_INVALID_THREADID);
sewardjdca84112002-11-13 22:29:34 +00002094
2095 VG_TRACK ( pre_mutex_lock, tid, mutex );
2096
sewardjf8f819e2002-04-17 23:21:37 +00002097 /* We get it! [for the first time]. */
nethercote1f0173b2004-02-28 15:40:36 +00002098 mutex->__vg_m_count = 1;
2099 mutex->__vg_m_owner = (/*_pthread_descr*/void*)tid;
njn25e49d8e72002-09-23 09:36:25 +00002100
sewardje663cb92002-04-12 10:26:32 +00002101 /* return 0 (success). */
njnd3040452003-05-19 15:04:06 +00002102 SET_PTHREQ_RETVAL(tid, 0);
sewardjf8f819e2002-04-17 23:21:37 +00002103
njnd3040452003-05-19 15:04:06 +00002104 VG_TRACK( post_mutex_lock, tid, mutex);
2105 }
sewardje663cb92002-04-12 10:26:32 +00002106}
2107
2108
2109static
2110void do_pthread_mutex_unlock ( ThreadId tid,
nethercote1f0173b2004-02-28 15:40:36 +00002111 vg_pthread_mutex_t* mutex )
sewardje663cb92002-04-12 10:26:32 +00002112{
sewardj3b5d8862002-04-20 13:53:23 +00002113 Char msg_buf[100];
sewardje663cb92002-04-12 10:26:32 +00002114
sewardj45b4b372002-04-16 22:50:32 +00002115 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj3b5d8862002-04-20 13:53:23 +00002116 VG_(sprintf)(msg_buf, "pthread_mutex_unlock mx %p ...", mutex );
sewardj8937c812002-04-12 20:12:20 +00002117 print_pthread_event(tid, msg_buf);
2118 }
2119
sewardj604ec3c2002-04-18 22:38:41 +00002120 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00002121 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002122 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj604ec3c2002-04-18 22:38:41 +00002123
2124 if (mutex == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00002125 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002126 "pthread_mutex_unlock: mutex is NULL");
jsgf855d93d2003-10-13 22:26:55 +00002127 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj604ec3c2002-04-18 22:38:41 +00002128 return;
2129 }
2130
2131 /* More paranoia ... */
nethercote1f0173b2004-02-28 15:40:36 +00002132 switch (mutex->__vg_m_kind) {
sewardj2a1dcce2002-04-22 12:45:25 +00002133# ifndef GLIBC_2_1
sewardj604ec3c2002-04-18 22:38:41 +00002134 case PTHREAD_MUTEX_TIMED_NP:
sewardj2a1dcce2002-04-22 12:45:25 +00002135 case PTHREAD_MUTEX_ADAPTIVE_NP:
2136# endif
sewardja1679dd2002-05-10 22:31:40 +00002137# ifdef GLIBC_2_1
sewardj8e651d72002-05-10 21:00:19 +00002138 case PTHREAD_MUTEX_FAST_NP:
sewardja1679dd2002-05-10 22:31:40 +00002139# endif
sewardj604ec3c2002-04-18 22:38:41 +00002140 case PTHREAD_MUTEX_RECURSIVE_NP:
2141 case PTHREAD_MUTEX_ERRORCHECK_NP:
nethercote1f0173b2004-02-28 15:40:36 +00002142 if (mutex->__vg_m_count >= 0) break;
sewardj604ec3c2002-04-18 22:38:41 +00002143 /* else fall thru */
2144 default:
njn25e49d8e72002-09-23 09:36:25 +00002145 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002146 "pthread_mutex_unlock: mutex is invalid");
jsgf855d93d2003-10-13 22:26:55 +00002147 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj604ec3c2002-04-18 22:38:41 +00002148 return;
2149 }
sewardje663cb92002-04-12 10:26:32 +00002150
2151 /* Barf if we don't currently hold the mutex. */
nethercote1f0173b2004-02-28 15:40:36 +00002152 if (mutex->__vg_m_count == 0) {
sewardj4dced352002-06-04 22:54:20 +00002153 /* nobody holds it */
njn25e49d8e72002-09-23 09:36:25 +00002154 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002155 "pthread_mutex_unlock: mutex is not locked");
njnd3040452003-05-19 15:04:06 +00002156 SET_PTHREQ_RETVAL(tid, EPERM);
sewardj4dced352002-06-04 22:54:20 +00002157 return;
2158 }
2159
nethercote1f0173b2004-02-28 15:40:36 +00002160 if ((ThreadId)mutex->__vg_m_owner != tid) {
sewardj4dced352002-06-04 22:54:20 +00002161 /* we don't hold it */
njn25e49d8e72002-09-23 09:36:25 +00002162 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002163 "pthread_mutex_unlock: mutex is locked by a different thread");
njnd3040452003-05-19 15:04:06 +00002164 SET_PTHREQ_RETVAL(tid, EPERM);
sewardje663cb92002-04-12 10:26:32 +00002165 return;
2166 }
2167
sewardjf8f819e2002-04-17 23:21:37 +00002168 /* If it's a multiply-locked recursive mutex, just decrement the
2169 lock count and return. */
nethercote1f0173b2004-02-28 15:40:36 +00002170 if (mutex->__vg_m_count > 1) {
2171 vg_assert(mutex->__vg_m_kind == PTHREAD_MUTEX_RECURSIVE_NP);
2172 mutex->__vg_m_count --;
njnd3040452003-05-19 15:04:06 +00002173 SET_PTHREQ_RETVAL(tid, 0); /* success */
sewardjf8f819e2002-04-17 23:21:37 +00002174 return;
2175 }
2176
sewardj604ec3c2002-04-18 22:38:41 +00002177 /* Now we're sure it is locked exactly once, and by the thread who
sewardjf8f819e2002-04-17 23:21:37 +00002178 is now doing an unlock on it. */
nethercote1f0173b2004-02-28 15:40:36 +00002179 vg_assert(mutex->__vg_m_count == 1);
2180 vg_assert((ThreadId)mutex->__vg_m_owner == tid);
sewardjf8f819e2002-04-17 23:21:37 +00002181
sewardj3b5d8862002-04-20 13:53:23 +00002182 /* Release at max one thread waiting on this mutex. */
2183 release_one_thread_waiting_on_mutex ( mutex, "pthread_mutex_lock" );
sewardje663cb92002-04-12 10:26:32 +00002184
sewardj3b5d8862002-04-20 13:53:23 +00002185 /* Our (tid's) pth_unlock() returns with 0 (success). */
njnd3040452003-05-19 15:04:06 +00002186 SET_PTHREQ_RETVAL(tid, 0); /* Success. */
sewardje663cb92002-04-12 10:26:32 +00002187}
2188
2189
sewardj6072c362002-04-19 14:40:57 +00002190/* -----------------------------------------------------------
2191 CONDITION VARIABLES
2192 -------------------------------------------------------- */
sewardje663cb92002-04-12 10:26:32 +00002193
rjwalsh7109a8c2004-09-02 00:31:02 +00002194/* The relevant type (vg_pthread_cond_t) is in core.h.
sewardj77e466c2002-04-14 02:29:29 +00002195
nethercote1f0173b2004-02-28 15:40:36 +00002196 We don't use any fields of vg_pthread_cond_t for anything at all.
2197 Only the identity of the CVs is important. (Actually, we initialise
2198 __vg_c_waiting in pthread_cond_init() to VG_INVALID_THREADID.)
sewardj6072c362002-04-19 14:40:57 +00002199
2200 Linux pthreads supports no attributes on condition variables, so we
sewardj3b5d8862002-04-20 13:53:23 +00002201 don't need to think too hard there. */
sewardj6072c362002-04-19 14:40:57 +00002202
sewardj77e466c2002-04-14 02:29:29 +00002203
sewardj5f07b662002-04-23 16:52:51 +00002204static
2205void do_pthread_cond_timedwait_TIMEOUT ( ThreadId tid )
2206{
2207 Char msg_buf[100];
nethercote1f0173b2004-02-28 15:40:36 +00002208 vg_pthread_mutex_t* mx;
2209 vg_pthread_cond_t* cv;
sewardj5f07b662002-04-23 16:52:51 +00002210
sewardjb48e5002002-05-13 00:16:03 +00002211 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002212 && VG_(threads)[tid].status == VgTs_WaitCV
2213 && VG_(threads)[tid].awaken_at != 0xFFFFFFFF);
2214 mx = VG_(threads)[tid].associated_mx;
sewardj5f07b662002-04-23 16:52:51 +00002215 vg_assert(mx != NULL);
sewardj018f7622002-05-15 21:13:39 +00002216 cv = VG_(threads)[tid].associated_cv;
sewardj5f07b662002-04-23 16:52:51 +00002217 vg_assert(cv != NULL);
2218
nethercote1f0173b2004-02-28 15:40:36 +00002219 if (mx->__vg_m_owner == VG_INVALID_THREADID) {
sewardj5f07b662002-04-23 16:52:51 +00002220 /* Currently unheld; hand it out to thread tid. */
nethercote1f0173b2004-02-28 15:40:36 +00002221 vg_assert(mx->__vg_m_count == 0);
sewardj018f7622002-05-15 21:13:39 +00002222 VG_(threads)[tid].status = VgTs_Runnable;
njnd3040452003-05-19 15:04:06 +00002223 SET_PTHREQ_RETVAL(tid, ETIMEDOUT); /* pthread_cond_wait return value */
sewardj018f7622002-05-15 21:13:39 +00002224 VG_(threads)[tid].associated_cv = NULL;
2225 VG_(threads)[tid].associated_mx = NULL;
nethercote1f0173b2004-02-28 15:40:36 +00002226 mx->__vg_m_owner = (/*_pthread_descr*/void*)tid;
2227 mx->__vg_m_count = 1;
sewardj5f07b662002-04-23 16:52:51 +00002228
sewardj0af43bc2002-10-22 04:30:35 +00002229 VG_TRACK( post_mutex_lock, tid, mx );
2230
sewardj5f07b662002-04-23 16:52:51 +00002231 if (VG_(clo_trace_pthread_level) >= 1) {
sewardjc3bd5f52002-05-01 03:24:23 +00002232 VG_(sprintf)(msg_buf,
nethercote1f0173b2004-02-28 15:40:36 +00002233 "pthread_cond_timedwait cv %p: TIMEOUT with mx %p",
sewardjc3bd5f52002-05-01 03:24:23 +00002234 cv, mx );
sewardj5f07b662002-04-23 16:52:51 +00002235 print_pthread_event(tid, msg_buf);
2236 }
2237 } else {
2238 /* Currently held. Make thread tid be blocked on it. */
nethercote1f0173b2004-02-28 15:40:36 +00002239 vg_assert(mx->__vg_m_count > 0);
sewardjdca84112002-11-13 22:29:34 +00002240 VG_TRACK( pre_mutex_lock, tid, mx );
2241
sewardj018f7622002-05-15 21:13:39 +00002242 VG_(threads)[tid].status = VgTs_WaitMX;
njnd3040452003-05-19 15:04:06 +00002243 SET_PTHREQ_RETVAL(tid, ETIMEDOUT); /* pthread_cond_wait return value */
sewardj018f7622002-05-15 21:13:39 +00002244 VG_(threads)[tid].associated_cv = NULL;
2245 VG_(threads)[tid].associated_mx = mx;
sewardj5f07b662002-04-23 16:52:51 +00002246 if (VG_(clo_trace_pthread_level) >= 1) {
2247 VG_(sprintf)(msg_buf,
nethercote1f0173b2004-02-28 15:40:36 +00002248 "pthread_cond_timedwait cv %p: TIMEOUT -> BLOCK for mx %p",
sewardj5f07b662002-04-23 16:52:51 +00002249 cv, mx );
2250 print_pthread_event(tid, msg_buf);
2251 }
sewardj5f07b662002-04-23 16:52:51 +00002252 }
2253}
2254
2255
sewardj3b5d8862002-04-20 13:53:23 +00002256static
nethercote1f0173b2004-02-28 15:40:36 +00002257void release_N_threads_waiting_on_cond ( vg_pthread_cond_t* cond,
sewardj3b5d8862002-04-20 13:53:23 +00002258 Int n_to_release,
2259 Char* caller )
2260{
2261 Int i;
2262 Char msg_buf[100];
nethercote1f0173b2004-02-28 15:40:36 +00002263 vg_pthread_mutex_t* mx;
sewardj3b5d8862002-04-20 13:53:23 +00002264
2265 while (True) {
2266 if (n_to_release == 0)
2267 return;
2268
2269 /* Find a thread waiting on this CV. */
2270 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +00002271 if (VG_(threads)[i].status == VgTs_Empty)
sewardj3b5d8862002-04-20 13:53:23 +00002272 continue;
sewardj018f7622002-05-15 21:13:39 +00002273 if (VG_(threads)[i].status == VgTs_WaitCV
2274 && VG_(threads)[i].associated_cv == cond)
sewardj3b5d8862002-04-20 13:53:23 +00002275 break;
2276 }
2277 vg_assert(i <= VG_N_THREADS);
2278
2279 if (i == VG_N_THREADS) {
2280 /* Nobody else is waiting on it. */
2281 return;
2282 }
2283
sewardj018f7622002-05-15 21:13:39 +00002284 mx = VG_(threads)[i].associated_mx;
sewardj3b5d8862002-04-20 13:53:23 +00002285 vg_assert(mx != NULL);
2286
sewardjdca84112002-11-13 22:29:34 +00002287 VG_TRACK( pre_mutex_lock, i, mx );
2288
nethercote1f0173b2004-02-28 15:40:36 +00002289 if (mx->__vg_m_owner == VG_INVALID_THREADID) {
sewardj3b5d8862002-04-20 13:53:23 +00002290 /* Currently unheld; hand it out to thread i. */
nethercote1f0173b2004-02-28 15:40:36 +00002291 vg_assert(mx->__vg_m_count == 0);
sewardj018f7622002-05-15 21:13:39 +00002292 VG_(threads)[i].status = VgTs_Runnable;
2293 VG_(threads)[i].associated_cv = NULL;
2294 VG_(threads)[i].associated_mx = NULL;
nethercote1f0173b2004-02-28 15:40:36 +00002295 mx->__vg_m_owner = (/*_pthread_descr*/void*)i;
2296 mx->__vg_m_count = 1;
sewardj5f07b662002-04-23 16:52:51 +00002297 /* .m_edx already holds pth_cond_wait success value (0) */
sewardj3b5d8862002-04-20 13:53:23 +00002298
sewardj0af43bc2002-10-22 04:30:35 +00002299 VG_TRACK( post_mutex_lock, i, mx );
2300
sewardj3b5d8862002-04-20 13:53:23 +00002301 if (VG_(clo_trace_pthread_level) >= 1) {
2302 VG_(sprintf)(msg_buf, "%s cv %p: RESUME with mx %p",
2303 caller, cond, mx );
2304 print_pthread_event(i, msg_buf);
2305 }
2306
2307 } else {
2308 /* Currently held. Make thread i be blocked on it. */
nethercote1f0173b2004-02-28 15:40:36 +00002309 vg_assert(mx->__vg_m_count > 0);
sewardj018f7622002-05-15 21:13:39 +00002310 VG_(threads)[i].status = VgTs_WaitMX;
2311 VG_(threads)[i].associated_cv = NULL;
2312 VG_(threads)[i].associated_mx = mx;
njnd3040452003-05-19 15:04:06 +00002313 SET_PTHREQ_RETVAL(i, 0); /* pth_cond_wait success value */
sewardj3b5d8862002-04-20 13:53:23 +00002314
2315 if (VG_(clo_trace_pthread_level) >= 1) {
2316 VG_(sprintf)(msg_buf, "%s cv %p: BLOCK for mx %p",
2317 caller, cond, mx );
2318 print_pthread_event(i, msg_buf);
2319 }
2320
2321 }
jsgf855d93d2003-10-13 22:26:55 +00002322
sewardj3b5d8862002-04-20 13:53:23 +00002323 n_to_release--;
2324 }
2325}
2326
2327
2328static
2329void do_pthread_cond_wait ( ThreadId tid,
nethercote1f0173b2004-02-28 15:40:36 +00002330 vg_pthread_cond_t *cond,
2331 vg_pthread_mutex_t *mutex,
sewardj5f07b662002-04-23 16:52:51 +00002332 UInt ms_end )
sewardj3b5d8862002-04-20 13:53:23 +00002333{
2334 Char msg_buf[100];
2335
sewardj5f07b662002-04-23 16:52:51 +00002336 /* If ms_end == 0xFFFFFFFF, wait forever (no timeout). Otherwise,
2337 ms_end is the ending millisecond. */
2338
sewardj3b5d8862002-04-20 13:53:23 +00002339 /* pre: mutex should be a valid mutex and owned by tid. */
2340 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj5f07b662002-04-23 16:52:51 +00002341 VG_(sprintf)(msg_buf, "pthread_cond_wait cv %p, mx %p, end %d ...",
2342 cond, mutex, ms_end );
sewardj3b5d8862002-04-20 13:53:23 +00002343 print_pthread_event(tid, msg_buf);
2344 }
2345
2346 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00002347 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002348 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj3b5d8862002-04-20 13:53:23 +00002349
nethercoted3693d02004-04-26 08:05:24 +00002350 if (mutex == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00002351 VG_(record_pthread_error)( tid,
nethercoted3693d02004-04-26 08:05:24 +00002352 "pthread_cond_wait/timedwait: mutex is NULL");
2353 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
2354 return;
2355 }
2356
2357 if (cond == NULL) {
2358 VG_(record_pthread_error)( tid,
2359 "pthread_cond_wait/timedwait: cond is NULL");
jsgf855d93d2003-10-13 22:26:55 +00002360 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj3b5d8862002-04-20 13:53:23 +00002361 return;
2362 }
2363
2364 /* More paranoia ... */
nethercote1f0173b2004-02-28 15:40:36 +00002365 switch (mutex->__vg_m_kind) {
sewardj2a1dcce2002-04-22 12:45:25 +00002366# ifndef GLIBC_2_1
sewardj3b5d8862002-04-20 13:53:23 +00002367 case PTHREAD_MUTEX_TIMED_NP:
sewardj2a1dcce2002-04-22 12:45:25 +00002368 case PTHREAD_MUTEX_ADAPTIVE_NP:
2369# endif
sewardja1679dd2002-05-10 22:31:40 +00002370# ifdef GLIBC_2_1
sewardj8e651d72002-05-10 21:00:19 +00002371 case PTHREAD_MUTEX_FAST_NP:
sewardja1679dd2002-05-10 22:31:40 +00002372# endif
sewardj3b5d8862002-04-20 13:53:23 +00002373 case PTHREAD_MUTEX_RECURSIVE_NP:
2374 case PTHREAD_MUTEX_ERRORCHECK_NP:
nethercote1f0173b2004-02-28 15:40:36 +00002375 if (mutex->__vg_m_count >= 0) break;
sewardj3b5d8862002-04-20 13:53:23 +00002376 /* else fall thru */
2377 default:
njn25e49d8e72002-09-23 09:36:25 +00002378 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002379 "pthread_cond_wait/timedwait: mutex is invalid");
jsgf855d93d2003-10-13 22:26:55 +00002380 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj3b5d8862002-04-20 13:53:23 +00002381 return;
2382 }
2383
2384 /* Barf if we don't currently hold the mutex. */
nethercoted3693d02004-04-26 08:05:24 +00002385 if (mutex->__vg_m_count == 0 /* nobody holds it */) {
njn25e49d8e72002-09-23 09:36:25 +00002386 VG_(record_pthread_error)( tid,
nethercoted3693d02004-04-26 08:05:24 +00002387 "pthread_cond_wait/timedwait: mutex is unlocked");
2388 SET_PTHREQ_RETVAL(tid, VKI_EPERM);
2389 return;
2390 }
2391
2392 if ((ThreadId)mutex->__vg_m_owner != tid /* we don't hold it */) {
2393 VG_(record_pthread_error)( tid,
2394 "pthread_cond_wait/timedwait: mutex is locked by another thread");
2395 SET_PTHREQ_RETVAL(tid, VKI_EPERM);
sewardj3b5d8862002-04-20 13:53:23 +00002396 return;
2397 }
2398
thughes513197c2004-06-13 12:07:53 +00002399 if(VG_(threads)[tid].cancel_pend != NULL &&
2400 VG_(threads)[tid].cancel_st) {
2401 make_thread_jump_to_cancelhdlr ( tid );
2402 } else {
2403 /* Queue ourselves on the condition. */
2404 VG_(threads)[tid].status = VgTs_WaitCV;
2405 VG_(threads)[tid].associated_cv = cond;
2406 VG_(threads)[tid].associated_mx = mutex;
2407 VG_(threads)[tid].awaken_at = ms_end;
2408 if (ms_end != 0xFFFFFFFF)
nethercotef971ab72004-08-02 16:27:40 +00002409 add_timeout(tid, ms_end);
sewardj3b5d8862002-04-20 13:53:23 +00002410
thughes513197c2004-06-13 12:07:53 +00002411 if (VG_(clo_trace_pthread_level) >= 1) {
2412 VG_(sprintf)(msg_buf,
2413 "pthread_cond_wait cv %p, mx %p: BLOCK",
2414 cond, mutex );
2415 print_pthread_event(tid, msg_buf);
2416 }
2417
2418 /* Release the mutex. */
2419 release_one_thread_waiting_on_mutex ( mutex, "pthread_cond_wait " );
sewardj3b5d8862002-04-20 13:53:23 +00002420 }
sewardj3b5d8862002-04-20 13:53:23 +00002421}
2422
2423
2424static
2425void do_pthread_cond_signal_or_broadcast ( ThreadId tid,
2426 Bool broadcast,
nethercote1f0173b2004-02-28 15:40:36 +00002427 vg_pthread_cond_t *cond )
sewardj3b5d8862002-04-20 13:53:23 +00002428{
2429 Char msg_buf[100];
2430 Char* caller
2431 = broadcast ? "pthread_cond_broadcast"
2432 : "pthread_cond_signal ";
2433
2434 if (VG_(clo_trace_pthread_level) >= 2) {
2435 VG_(sprintf)(msg_buf, "%s cv %p ...",
2436 caller, cond );
2437 print_pthread_event(tid, msg_buf);
2438 }
2439
2440 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00002441 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002442 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj3b5d8862002-04-20 13:53:23 +00002443
2444 if (cond == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00002445 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002446 "pthread_cond_signal/broadcast: cond is NULL");
jsgf855d93d2003-10-13 22:26:55 +00002447 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj3b5d8862002-04-20 13:53:23 +00002448 return;
2449 }
2450
2451 release_N_threads_waiting_on_cond (
2452 cond,
2453 broadcast ? VG_N_THREADS : 1,
2454 caller
2455 );
2456
njnd3040452003-05-19 15:04:06 +00002457 SET_PTHREQ_RETVAL(tid, 0); /* success */
sewardj3b5d8862002-04-20 13:53:23 +00002458}
2459
sewardj77e466c2002-04-14 02:29:29 +00002460
sewardj5f07b662002-04-23 16:52:51 +00002461/* -----------------------------------------------------------
2462 THREAD SPECIFIC DATA
2463 -------------------------------------------------------- */
2464
2465static __inline__
2466Bool is_valid_key ( ThreadKey k )
2467{
2468 /* k unsigned; hence no < 0 check */
2469 if (k >= VG_N_THREAD_KEYS) return False;
2470 if (!vg_thread_keys[k].inuse) return False;
2471 return True;
2472}
2473
sewardj00a66b12002-10-12 16:42:35 +00002474
2475/* Return in %EDX a value of 1 if the key is valid, else 0. */
2476static
2477void do_pthread_key_validate ( ThreadId tid,
2478 pthread_key_t key )
2479{
2480 Char msg_buf[100];
2481
2482 if (VG_(clo_trace_pthread_level) >= 1) {
2483 VG_(sprintf)(msg_buf, "pthread_key_validate key %p",
2484 key );
2485 print_pthread_event(tid, msg_buf);
2486 }
2487
2488 vg_assert(sizeof(pthread_key_t) == sizeof(ThreadKey));
2489 vg_assert(VG_(is_valid_tid)(tid)
2490 && VG_(threads)[tid].status == VgTs_Runnable);
2491
2492 if (is_valid_key((ThreadKey)key)) {
njnd3040452003-05-19 15:04:06 +00002493 SET_PTHREQ_RETVAL(tid, 1);
sewardj00a66b12002-10-12 16:42:35 +00002494 } else {
njnd3040452003-05-19 15:04:06 +00002495 SET_PTHREQ_RETVAL(tid, 0);
sewardj00a66b12002-10-12 16:42:35 +00002496 }
2497}
2498
2499
sewardj5f07b662002-04-23 16:52:51 +00002500static
2501void do_pthread_key_create ( ThreadId tid,
2502 pthread_key_t* key,
2503 void (*destructor)(void*) )
2504{
2505 Int i;
2506 Char msg_buf[100];
2507
2508 if (VG_(clo_trace_pthread_level) >= 1) {
2509 VG_(sprintf)(msg_buf, "pthread_key_create *key %p, destr %p",
2510 key, destructor );
2511 print_pthread_event(tid, msg_buf);
2512 }
2513
2514 vg_assert(sizeof(pthread_key_t) == sizeof(ThreadKey));
sewardjb48e5002002-05-13 00:16:03 +00002515 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002516 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj5f07b662002-04-23 16:52:51 +00002517
2518 for (i = 0; i < VG_N_THREAD_KEYS; i++)
2519 if (!vg_thread_keys[i].inuse)
2520 break;
2521
2522 if (i == VG_N_THREAD_KEYS) {
jsgf855d93d2003-10-13 22:26:55 +00002523 VG_(message)(Vg_UserMsg, "pthread_key_create() asked for too many keys (more than %d): increase VG_N_THREAD_KEYS and recompile Valgrind.",
2524 VG_N_THREAD_KEYS);
2525 SET_PTHREQ_RETVAL(tid, EAGAIN);
2526 return;
sewardj5f07b662002-04-23 16:52:51 +00002527 }
2528
sewardj870497a2002-05-29 01:06:47 +00002529 vg_thread_keys[i].inuse = True;
2530 vg_thread_keys[i].destructor = destructor;
sewardjc3bd5f52002-05-01 03:24:23 +00002531
sewardj5a3798b2002-06-04 23:24:22 +00002532 /* check key for addressibility */
njn72718642003-07-24 08:45:32 +00002533 VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "pthread_key_create: key",
njn25e49d8e72002-09-23 09:36:25 +00002534 (Addr)key, sizeof(pthread_key_t));
sewardj5f07b662002-04-23 16:52:51 +00002535 *key = i;
njn25e49d8e72002-09-23 09:36:25 +00002536 VG_TRACK( post_mem_write, (Addr)key, sizeof(pthread_key_t) );
sewardjc3bd5f52002-05-01 03:24:23 +00002537
njnd3040452003-05-19 15:04:06 +00002538 SET_PTHREQ_RETVAL(tid, 0);
sewardj5f07b662002-04-23 16:52:51 +00002539}
2540
2541
2542static
2543void do_pthread_key_delete ( ThreadId tid, pthread_key_t key )
2544{
2545 Char msg_buf[100];
2546 if (VG_(clo_trace_pthread_level) >= 1) {
2547 VG_(sprintf)(msg_buf, "pthread_key_delete key %d",
2548 key );
2549 print_pthread_event(tid, msg_buf);
2550 }
2551
sewardjb48e5002002-05-13 00:16:03 +00002552 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002553 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj5f07b662002-04-23 16:52:51 +00002554
2555 if (!is_valid_key(key)) {
njn25e49d8e72002-09-23 09:36:25 +00002556 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002557 "pthread_key_delete: key is invalid");
jsgf855d93d2003-10-13 22:26:55 +00002558 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj5f07b662002-04-23 16:52:51 +00002559 return;
2560 }
2561
2562 vg_thread_keys[key].inuse = False;
sewardj648b3152002-12-09 19:07:59 +00002563 vg_thread_keys[key].destructor = NULL;
njnd3040452003-05-19 15:04:06 +00002564 SET_PTHREQ_RETVAL(tid, 0);
sewardj5f07b662002-04-23 16:52:51 +00002565}
2566
2567
sewardj00a66b12002-10-12 16:42:35 +00002568/* Get the .specific_ptr for a thread. Return 1 if the thread-slot
2569 isn't in use, so that client-space can scan all thread slots. 1
2570 cannot be confused with NULL or a legitimately-aligned specific_ptr
2571 value. */
sewardj5f07b662002-04-23 16:52:51 +00002572static
sewardj00a66b12002-10-12 16:42:35 +00002573void do_pthread_getspecific_ptr ( ThreadId tid )
sewardj5f07b662002-04-23 16:52:51 +00002574{
sewardj00a66b12002-10-12 16:42:35 +00002575 void** specifics_ptr;
2576 Char msg_buf[100];
2577
jsgf855d93d2003-10-13 22:26:55 +00002578 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj00a66b12002-10-12 16:42:35 +00002579 VG_(sprintf)(msg_buf, "pthread_getspecific_ptr" );
sewardj5f07b662002-04-23 16:52:51 +00002580 print_pthread_event(tid, msg_buf);
2581 }
2582
nethercote36881a22004-08-04 14:03:16 +00002583 vg_assert(is_valid_or_empty_tid(tid));
sewardj5f07b662002-04-23 16:52:51 +00002584
sewardj00a66b12002-10-12 16:42:35 +00002585 if (VG_(threads)[tid].status == VgTs_Empty) {
njnd3040452003-05-19 15:04:06 +00002586 SET_PTHREQ_RETVAL(tid, 1);
sewardj5f07b662002-04-23 16:52:51 +00002587 return;
2588 }
2589
sewardj00a66b12002-10-12 16:42:35 +00002590 specifics_ptr = VG_(threads)[tid].specifics_ptr;
nethercote5fd72bb2004-11-04 19:28:38 +00002591 vg_assert(specifics_ptr == NULL || IS_ALIGNED4_ADDR(specifics_ptr));
sewardj00a66b12002-10-12 16:42:35 +00002592
nethercote50397c22004-11-04 18:03:06 +00002593 SET_PTHREQ_RETVAL(tid, (UWord)specifics_ptr);
sewardj5f07b662002-04-23 16:52:51 +00002594}
2595
2596
2597static
sewardj00a66b12002-10-12 16:42:35 +00002598void do_pthread_setspecific_ptr ( ThreadId tid, void** ptr )
sewardj5f07b662002-04-23 16:52:51 +00002599{
2600 Char msg_buf[100];
2601 if (VG_(clo_trace_pthread_level) >= 1) {
sewardj00a66b12002-10-12 16:42:35 +00002602 VG_(sprintf)(msg_buf, "pthread_setspecific_ptr ptr %p",
2603 ptr );
sewardj5f07b662002-04-23 16:52:51 +00002604 print_pthread_event(tid, msg_buf);
2605 }
2606
sewardjb48e5002002-05-13 00:16:03 +00002607 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002608 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj5f07b662002-04-23 16:52:51 +00002609
sewardj00a66b12002-10-12 16:42:35 +00002610 VG_(threads)[tid].specifics_ptr = ptr;
njnd3040452003-05-19 15:04:06 +00002611 SET_PTHREQ_RETVAL(tid, 0);
sewardj5f07b662002-04-23 16:52:51 +00002612}
2613
2614
sewardj870497a2002-05-29 01:06:47 +00002615/* Helper for calling destructors at thread exit. If key is valid,
2616 copy the thread's specific value into cu->arg and put the *key*'s
2617 destructor fn address in cu->fn. Then return 0 to the caller.
2618 Otherwise return non-zero to the caller. */
2619static
2620void do__get_key_destr_and_spec ( ThreadId tid,
2621 pthread_key_t key,
2622 CleanupEntry* cu )
2623{
2624 Char msg_buf[100];
jsgf855d93d2003-10-13 22:26:55 +00002625 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj870497a2002-05-29 01:06:47 +00002626 VG_(sprintf)(msg_buf,
2627 "get_key_destr_and_arg (key = %d)", key );
2628 print_pthread_event(tid, msg_buf);
2629 }
2630 vg_assert(VG_(is_valid_tid)(tid));
2631 vg_assert(key >= 0 && key < VG_N_THREAD_KEYS);
njn25e49d8e72002-09-23 09:36:25 +00002632
sewardj870497a2002-05-29 01:06:47 +00002633 if (!vg_thread_keys[key].inuse) {
njnd3040452003-05-19 15:04:06 +00002634 SET_PTHREQ_RETVAL(tid, -1);
sewardj870497a2002-05-29 01:06:47 +00002635 return;
2636 }
njn72718642003-07-24 08:45:32 +00002637 VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "get_key_destr_and_spec: cu",
2638 (Addr)cu, sizeof(CleanupEntry) );
sewardj00a66b12002-10-12 16:42:35 +00002639
thughes11975ff2004-06-12 12:58:22 +00002640 cu->type = VgCt_Function;
2641 cu->data.function.fn = vg_thread_keys[key].destructor;
sewardj00a66b12002-10-12 16:42:35 +00002642 if (VG_(threads)[tid].specifics_ptr == NULL) {
thughes11975ff2004-06-12 12:58:22 +00002643 cu->data.function.arg = NULL;
sewardj00a66b12002-10-12 16:42:35 +00002644 } else {
njn72718642003-07-24 08:45:32 +00002645 VG_TRACK( pre_mem_read, Vg_CorePThread, tid,
sewardj00a66b12002-10-12 16:42:35 +00002646 "get_key_destr_and_spec: key",
2647 (Addr)(&VG_(threads)[tid].specifics_ptr[key]),
2648 sizeof(void*) );
thughes11975ff2004-06-12 12:58:22 +00002649 cu->data.function.arg = VG_(threads)[tid].specifics_ptr[key];
sewardj00a66b12002-10-12 16:42:35 +00002650 }
2651
njn25e49d8e72002-09-23 09:36:25 +00002652 VG_TRACK( post_mem_write, (Addr)cu, sizeof(CleanupEntry) );
njnd3040452003-05-19 15:04:06 +00002653 SET_PTHREQ_RETVAL(tid, 0);
sewardj870497a2002-05-29 01:06:47 +00002654}
2655
2656
sewardjb48e5002002-05-13 00:16:03 +00002657/* ---------------------------------------------------
2658 SIGNALS
2659 ------------------------------------------------ */
2660
2661/* See comment in vg_libthread.c:pthread_sigmask() regarding
sewardj018f7622002-05-15 21:13:39 +00002662 deliberate confusion of types sigset_t and vki_sigset_t. Return 0
2663 for OK and 1 for some kind of addressing error, which the
2664 vg_libpthread.c routine turns into return values 0 and EFAULT
2665 respectively. */
sewardjb48e5002002-05-13 00:16:03 +00002666static
2667void do_pthread_sigmask ( ThreadId tid,
sewardj018f7622002-05-15 21:13:39 +00002668 Int vki_how,
nethercote73b526f2004-10-31 18:48:21 +00002669 vki_sigset_t* newmask,
2670 vki_sigset_t* oldmask )
sewardjb48e5002002-05-13 00:16:03 +00002671{
2672 Char msg_buf[100];
2673 if (VG_(clo_trace_pthread_level) >= 1) {
2674 VG_(sprintf)(msg_buf,
sewardj018f7622002-05-15 21:13:39 +00002675 "pthread_sigmask vki_how %d, newmask %p, oldmask %p",
2676 vki_how, newmask, oldmask );
sewardjb48e5002002-05-13 00:16:03 +00002677 print_pthread_event(tid, msg_buf);
2678 }
2679
2680 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002681 && VG_(threads)[tid].status == VgTs_Runnable);
sewardjb48e5002002-05-13 00:16:03 +00002682
njn25e49d8e72002-09-23 09:36:25 +00002683 if (newmask)
njn72718642003-07-24 08:45:32 +00002684 VG_TRACK( pre_mem_read, Vg_CorePThread, tid, "pthread_sigmask: newmask",
nethercote73b526f2004-10-31 18:48:21 +00002685 (Addr)newmask, sizeof(vki_sigset_t));
njn25e49d8e72002-09-23 09:36:25 +00002686 if (oldmask)
njn72718642003-07-24 08:45:32 +00002687 VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "pthread_sigmask: oldmask",
nethercote73b526f2004-10-31 18:48:21 +00002688 (Addr)oldmask, sizeof(vki_sigset_t));
sewardjb48e5002002-05-13 00:16:03 +00002689
sewardj018f7622002-05-15 21:13:39 +00002690 VG_(do_pthread_sigmask_SCSS_upd) ( tid, vki_how, newmask, oldmask );
sewardjb48e5002002-05-13 00:16:03 +00002691
njn25e49d8e72002-09-23 09:36:25 +00002692 if (oldmask)
nethercote73b526f2004-10-31 18:48:21 +00002693 VG_TRACK( post_mem_write, (Addr)oldmask, sizeof(vki_sigset_t) );
sewardj3a951cf2002-05-15 22:25:47 +00002694
sewardj018f7622002-05-15 21:13:39 +00002695 /* Success. */
njnd3040452003-05-19 15:04:06 +00002696 SET_PTHREQ_RETVAL(tid, 0);
sewardjb48e5002002-05-13 00:16:03 +00002697}
2698
2699
2700static
sewardj018f7622002-05-15 21:13:39 +00002701void do_pthread_kill ( ThreadId tid, /* me */
2702 ThreadId thread, /* thread to signal */
2703 Int sig )
2704{
nethercote97ccd5e2004-08-02 12:10:01 +00002705 ThreadState* tst;
sewardj018f7622002-05-15 21:13:39 +00002706 Char msg_buf[100];
2707
2708 if (VG_(clo_trace_signals) || VG_(clo_trace_pthread_level) >= 1) {
2709 VG_(sprintf)(msg_buf,
2710 "pthread_kill thread %d, signo %d",
2711 thread, sig );
2712 print_pthread_event(tid, msg_buf);
2713 }
2714
2715 vg_assert(VG_(is_valid_tid)(tid)
2716 && VG_(threads)[tid].status == VgTs_Runnable);
2717
sewardj4dced352002-06-04 22:54:20 +00002718 if (!VG_(is_valid_tid)(thread)) {
njn25e49d8e72002-09-23 09:36:25 +00002719 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002720 "pthread_kill: invalid target thread");
jsgf855d93d2003-10-13 22:26:55 +00002721 SET_PTHREQ_RETVAL(tid, VKI_ESRCH);
2722 return;
2723 }
2724
2725 if (sig == 0) {
2726 /* OK, signal 0 is just for testing */
2727 SET_PTHREQ_RETVAL(tid, 0);
sewardj018f7622002-05-15 21:13:39 +00002728 return;
2729 }
2730
nethercote73b526f2004-10-31 18:48:21 +00002731 if (sig < 1 || sig > _VKI_NSIG) {
jsgf855d93d2003-10-13 22:26:55 +00002732 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj018f7622002-05-15 21:13:39 +00002733 return;
2734 }
2735
nethercote97ccd5e2004-08-02 12:10:01 +00002736 tst = VG_(get_ThreadState)(thread);
2737 vg_assert(NULL != tst->proxy);
2738 VG_(proxy_sendsig)(thread, sig);
njnd3040452003-05-19 15:04:06 +00002739 SET_PTHREQ_RETVAL(tid, 0);
sewardjb48e5002002-05-13 00:16:03 +00002740}
2741
2742
sewardj2cb00342002-06-28 01:46:26 +00002743/* -----------------------------------------------------------
2744 FORK HANDLERS.
2745 -------------------------------------------------------- */
2746
2747static
2748void do__set_fhstack_used ( ThreadId tid, Int n )
2749{
2750 Char msg_buf[100];
2751 if (VG_(clo_trace_sched)) {
2752 VG_(sprintf)(msg_buf, "set_fhstack_used to %d", n );
2753 print_pthread_event(tid, msg_buf);
2754 }
2755
2756 vg_assert(VG_(is_valid_tid)(tid)
2757 && VG_(threads)[tid].status == VgTs_Runnable);
2758
2759 if (n >= 0 && n < VG_N_FORKHANDLERSTACK) {
2760 vg_fhstack_used = n;
njnd3040452003-05-19 15:04:06 +00002761 SET_PTHREQ_RETVAL(tid, 0);
sewardj2cb00342002-06-28 01:46:26 +00002762 } else {
njnd3040452003-05-19 15:04:06 +00002763 SET_PTHREQ_RETVAL(tid, -1);
sewardj2cb00342002-06-28 01:46:26 +00002764 }
2765}
2766
2767
2768static
2769void do__get_fhstack_used ( ThreadId tid )
2770{
2771 Int n;
2772 Char msg_buf[100];
2773 if (VG_(clo_trace_sched)) {
2774 VG_(sprintf)(msg_buf, "get_fhstack_used" );
2775 print_pthread_event(tid, msg_buf);
2776 }
2777
2778 vg_assert(VG_(is_valid_tid)(tid)
2779 && VG_(threads)[tid].status == VgTs_Runnable);
2780
2781 n = vg_fhstack_used;
2782 vg_assert(n >= 0 && n < VG_N_FORKHANDLERSTACK);
njnd3040452003-05-19 15:04:06 +00002783 SET_PTHREQ_RETVAL(tid, n);
sewardj2cb00342002-06-28 01:46:26 +00002784}
2785
2786static
2787void do__set_fhstack_entry ( ThreadId tid, Int n, ForkHandlerEntry* fh )
2788{
2789 Char msg_buf[100];
2790 if (VG_(clo_trace_sched)) {
2791 VG_(sprintf)(msg_buf, "set_fhstack_entry %d to %p", n, fh );
2792 print_pthread_event(tid, msg_buf);
2793 }
2794
2795 vg_assert(VG_(is_valid_tid)(tid)
2796 && VG_(threads)[tid].status == VgTs_Runnable);
njn72718642003-07-24 08:45:32 +00002797 VG_TRACK( pre_mem_read, Vg_CorePThread, tid,
njn25e49d8e72002-09-23 09:36:25 +00002798 "pthread_atfork: prepare/parent/child",
2799 (Addr)fh, sizeof(ForkHandlerEntry));
sewardj2cb00342002-06-28 01:46:26 +00002800
njn25e49d8e72002-09-23 09:36:25 +00002801 if (n < 0 || n >= VG_N_FORKHANDLERSTACK) {
njnd3040452003-05-19 15:04:06 +00002802 SET_PTHREQ_RETVAL(tid, -1);
sewardj2cb00342002-06-28 01:46:26 +00002803 return;
2804 }
2805
2806 vg_fhstack[n] = *fh;
njnd3040452003-05-19 15:04:06 +00002807 SET_PTHREQ_RETVAL(tid, 0);
sewardj2cb00342002-06-28 01:46:26 +00002808}
2809
2810
2811static
2812void do__get_fhstack_entry ( ThreadId tid, Int n, /*OUT*/
2813 ForkHandlerEntry* fh )
2814{
2815 Char msg_buf[100];
2816 if (VG_(clo_trace_sched)) {
2817 VG_(sprintf)(msg_buf, "get_fhstack_entry %d", n );
2818 print_pthread_event(tid, msg_buf);
2819 }
2820
2821 vg_assert(VG_(is_valid_tid)(tid)
2822 && VG_(threads)[tid].status == VgTs_Runnable);
njn72718642003-07-24 08:45:32 +00002823 VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "fork: prepare/parent/child",
njn25e49d8e72002-09-23 09:36:25 +00002824 (Addr)fh, sizeof(ForkHandlerEntry));
sewardj2cb00342002-06-28 01:46:26 +00002825
njn25e49d8e72002-09-23 09:36:25 +00002826 if (n < 0 || n >= VG_N_FORKHANDLERSTACK) {
njnd3040452003-05-19 15:04:06 +00002827 SET_PTHREQ_RETVAL(tid, -1);
sewardj2cb00342002-06-28 01:46:26 +00002828 return;
2829 }
2830
2831 *fh = vg_fhstack[n];
njnd3040452003-05-19 15:04:06 +00002832 SET_PTHREQ_RETVAL(tid, 0);
sewardj2cb00342002-06-28 01:46:26 +00002833
njn25e49d8e72002-09-23 09:36:25 +00002834 VG_TRACK( post_mem_write, (Addr)fh, sizeof(ForkHandlerEntry) );
sewardj2cb00342002-06-28 01:46:26 +00002835}
2836
thughesdaa34562004-06-27 12:48:53 +00002837
2838static
2839void do__get_stack_info ( ThreadId tid, ThreadId which, StackInfo* si )
2840{
2841 Char msg_buf[100];
2842
2843 vg_assert(VG_(is_valid_tid)(tid)
2844 && VG_(threads)[tid].status == VgTs_Runnable);
2845
2846 if (VG_(clo_trace_sched)) {
2847 VG_(sprintf)(msg_buf, "get_stack_info for tid %d", which );
2848 print_pthread_event(tid, msg_buf);
2849 }
2850
2851 if (!VG_(is_valid_tid)(which)) {
2852 SET_PTHREQ_RETVAL(tid, -1);
2853 return;
2854 }
2855
2856 si->base = VG_(threads)[which].stack_base;
2857 si->size = VG_(threads)[which].stack_size
2858 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB
2859 - VG_(threads)[which].stack_guard_size;
2860 si->guardsize = VG_(threads)[which].stack_guard_size;
2861
2862 SET_PTHREQ_RETVAL(tid, 0);
2863}
2864
njnd3040452003-05-19 15:04:06 +00002865/* ---------------------------------------------------------------------
2866 Specifying shadow register values
2867 ------------------------------------------------------------------ */
2868
2869void VG_(set_return_from_syscall_shadow) ( ThreadId tid, UInt ret_shadow )
2870{
nethercote15218bd2004-09-11 15:11:47 +00002871 VG_(set_thread_shadow_archreg)(tid, R_SYSCALL_RET, ret_shadow);
njnd3040452003-05-19 15:04:06 +00002872}
2873
2874UInt VG_(get_exit_status_shadow) ( void )
2875{
nethercote15218bd2004-09-11 15:11:47 +00002876 return VG_(get_shadow_archreg)(R_SYSCALL_ARG1);
njnd3040452003-05-19 15:04:06 +00002877}
2878
rjwalshe4e779d2004-04-16 23:02:29 +00002879void VG_(intercept_libc_freeres_wrapper)(Addr addr)
2880{
nethercotef971ab72004-08-02 16:27:40 +00002881 __libc_freeres_wrapper = addr;
rjwalshe4e779d2004-04-16 23:02:29 +00002882}
sewardj2cb00342002-06-28 01:46:26 +00002883
sewardje663cb92002-04-12 10:26:32 +00002884/* ---------------------------------------------------------------------
sewardj124ca2a2002-06-20 10:19:38 +00002885 Handle client requests.
sewardje663cb92002-04-12 10:26:32 +00002886 ------------------------------------------------------------------ */
2887
sewardj124ca2a2002-06-20 10:19:38 +00002888/* Do a client request for the thread tid. After the request, tid may
2889 or may not still be runnable; if not, the scheduler will have to
2890 choose a new thread to run.
2891*/
sewardje663cb92002-04-12 10:26:32 +00002892static
nethercoted1b64b22004-11-04 18:22:28 +00002893void do_client_request ( ThreadId tid, UWord* arg )
sewardje663cb92002-04-12 10:26:32 +00002894{
nethercoted1b64b22004-11-04 18:22:28 +00002895 UWord req_no = arg[0];
sewardj124ca2a2002-06-20 10:19:38 +00002896
fitzhardinge98abfc72003-12-16 02:05:15 +00002897 if (0)
nethercoted1b64b22004-11-04 18:22:28 +00002898 VG_(printf)("req no = 0x%llx, arg = %p\n", (ULong)req_no, arg);
sewardje663cb92002-04-12 10:26:32 +00002899 switch (req_no) {
2900
njn3e884182003-04-15 13:03:23 +00002901 case VG_USERREQ__CLIENT_CALL0: {
nethercoted1b64b22004-11-04 18:22:28 +00002902 UWord (*f)(void) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00002903 if (f == NULL)
nethercote66b3af62004-09-11 13:06:55 +00002904 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL0: func=%p\n", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00002905 else
2906 SET_CLCALL_RETVAL(tid, f ( ), (Addr)f);
njn3e884182003-04-15 13:03:23 +00002907 break;
2908 }
2909 case VG_USERREQ__CLIENT_CALL1: {
nethercoted1b64b22004-11-04 18:22:28 +00002910 UWord (*f)(UWord) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00002911 if (f == NULL)
nethercote66b3af62004-09-11 13:06:55 +00002912 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL1: func=%p\n", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00002913 else
2914 SET_CLCALL_RETVAL(tid, f ( arg[2] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00002915 break;
2916 }
2917 case VG_USERREQ__CLIENT_CALL2: {
nethercoted1b64b22004-11-04 18:22:28 +00002918 UWord (*f)(UWord, UWord) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00002919 if (f == NULL)
nethercote66b3af62004-09-11 13:06:55 +00002920 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL2: func=%p\n", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00002921 else
2922 SET_CLCALL_RETVAL(tid, f ( arg[2], arg[3] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00002923 break;
2924 }
2925 case VG_USERREQ__CLIENT_CALL3: {
nethercoted1b64b22004-11-04 18:22:28 +00002926 UWord (*f)(UWord, UWord, UWord) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00002927 if (f == NULL)
nethercote66b3af62004-09-11 13:06:55 +00002928 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL3: func=%p\n", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00002929 else
2930 SET_CLCALL_RETVAL(tid, f ( arg[2], arg[3], arg[4] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00002931 break;
2932 }
2933
nethercote7cc9c232004-01-21 15:08:04 +00002934 /* Note: for tools that replace malloc() et al, we want to call
njn3e884182003-04-15 13:03:23 +00002935 the replacement versions. For those that don't, we want to call
2936 VG_(cli_malloc)() et al. We do this by calling SK_(malloc)(), which
nethercote3ced0e32004-01-26 14:50:45 +00002937 malloc-replacing tools must replace, but have the default definition
2938 of SK_(malloc)() call VG_(cli_malloc)(). */
njn3e884182003-04-15 13:03:23 +00002939
2940 /* Note: for MALLOC and FREE, must set the appropriate "lock"... see
2941 the comment in vg_defaults.c/SK_(malloc)() for why. */
sewardj124ca2a2002-06-20 10:19:38 +00002942 case VG_USERREQ__MALLOC:
njn3e884182003-04-15 13:03:23 +00002943 VG_(sk_malloc_called_by_scheduler) = True;
njnd3040452003-05-19 15:04:06 +00002944 SET_PTHREQ_RETVAL(
nethercote50397c22004-11-04 18:03:06 +00002945 tid, (Addr)SK_(malloc) ( arg[1] )
sewardj124ca2a2002-06-20 10:19:38 +00002946 );
njn3e884182003-04-15 13:03:23 +00002947 VG_(sk_malloc_called_by_scheduler) = False;
sewardj124ca2a2002-06-20 10:19:38 +00002948 break;
2949
2950 case VG_USERREQ__FREE:
njn3e884182003-04-15 13:03:23 +00002951 VG_(sk_malloc_called_by_scheduler) = True;
njn72718642003-07-24 08:45:32 +00002952 SK_(free) ( (void*)arg[1] );
njn3e884182003-04-15 13:03:23 +00002953 VG_(sk_malloc_called_by_scheduler) = False;
njnd3040452003-05-19 15:04:06 +00002954 SET_PTHREQ_RETVAL(tid, 0); /* irrelevant */
sewardj124ca2a2002-06-20 10:19:38 +00002955 break;
2956
sewardj124ca2a2002-06-20 10:19:38 +00002957 case VG_USERREQ__PTHREAD_GET_THREADID:
njnd3040452003-05-19 15:04:06 +00002958 SET_PTHREQ_RETVAL(tid, tid);
sewardj124ca2a2002-06-20 10:19:38 +00002959 break;
2960
2961 case VG_USERREQ__RUNNING_ON_VALGRIND:
njnd3040452003-05-19 15:04:06 +00002962 SET_CLREQ_RETVAL(tid, 1);
sewardj124ca2a2002-06-20 10:19:38 +00002963 break;
2964
2965 case VG_USERREQ__GET_PTHREAD_TRACE_LEVEL:
njnd3040452003-05-19 15:04:06 +00002966 SET_PTHREQ_RETVAL(tid, VG_(clo_trace_pthread_level));
sewardj124ca2a2002-06-20 10:19:38 +00002967 break;
2968
2969 case VG_USERREQ__READ_MILLISECOND_TIMER:
njnd3040452003-05-19 15:04:06 +00002970 SET_PTHREQ_RETVAL(tid, VG_(read_millisecond_timer)());
sewardj124ca2a2002-06-20 10:19:38 +00002971 break;
2972
2973 /* Some of these may make thread tid non-runnable, but the
2974 scheduler checks for that on return from this function. */
2975 case VG_USERREQ__PTHREAD_MUTEX_LOCK:
thughese321d492004-10-17 15:00:20 +00002976 do_pthread_mutex_lock( tid, False, (void *)(arg[1]), 0xFFFFFFFF );
2977 break;
2978
2979 case VG_USERREQ__PTHREAD_MUTEX_TIMEDLOCK:
2980 do_pthread_mutex_lock( tid, False, (void *)(arg[1]), arg[2] );
sewardj124ca2a2002-06-20 10:19:38 +00002981 break;
2982
2983 case VG_USERREQ__PTHREAD_MUTEX_TRYLOCK:
thughese321d492004-10-17 15:00:20 +00002984 do_pthread_mutex_lock( tid, True, (void *)(arg[1]), 0xFFFFFFFF );
sewardj124ca2a2002-06-20 10:19:38 +00002985 break;
2986
2987 case VG_USERREQ__PTHREAD_MUTEX_UNLOCK:
2988 do_pthread_mutex_unlock( tid, (void *)(arg[1]) );
2989 break;
2990
sewardj00a66b12002-10-12 16:42:35 +00002991 case VG_USERREQ__PTHREAD_GETSPECIFIC_PTR:
2992 do_pthread_getspecific_ptr ( tid );
sewardj124ca2a2002-06-20 10:19:38 +00002993 break;
2994
2995 case VG_USERREQ__SET_CANCELTYPE:
2996 do__set_canceltype ( tid, arg[1] );
2997 break;
2998
2999 case VG_USERREQ__CLEANUP_PUSH:
3000 do__cleanup_push ( tid, (CleanupEntry*)(arg[1]) );
3001 break;
3002
3003 case VG_USERREQ__CLEANUP_POP:
3004 do__cleanup_pop ( tid, (CleanupEntry*)(arg[1]) );
3005 break;
3006
3007 case VG_USERREQ__TESTCANCEL:
3008 do__testcancel ( tid );
3009 break;
3010
sewardje663cb92002-04-12 10:26:32 +00003011 case VG_USERREQ__PTHREAD_JOIN:
3012 do_pthread_join( tid, arg[1], (void**)(arg[2]) );
3013 break;
3014
sewardj3b5d8862002-04-20 13:53:23 +00003015 case VG_USERREQ__PTHREAD_COND_WAIT:
3016 do_pthread_cond_wait( tid,
nethercote1f0173b2004-02-28 15:40:36 +00003017 (vg_pthread_cond_t *)(arg[1]),
3018 (vg_pthread_mutex_t *)(arg[2]),
sewardj5f07b662002-04-23 16:52:51 +00003019 0xFFFFFFFF /* no timeout */ );
3020 break;
3021
3022 case VG_USERREQ__PTHREAD_COND_TIMEDWAIT:
3023 do_pthread_cond_wait( tid,
nethercote1f0173b2004-02-28 15:40:36 +00003024 (vg_pthread_cond_t *)(arg[1]),
3025 (vg_pthread_mutex_t *)(arg[2]),
sewardj5f07b662002-04-23 16:52:51 +00003026 arg[3] /* timeout millisecond point */ );
sewardj3b5d8862002-04-20 13:53:23 +00003027 break;
3028
3029 case VG_USERREQ__PTHREAD_COND_SIGNAL:
3030 do_pthread_cond_signal_or_broadcast(
3031 tid,
3032 False, /* signal, not broadcast */
nethercote1f0173b2004-02-28 15:40:36 +00003033 (vg_pthread_cond_t *)(arg[1]) );
sewardj3b5d8862002-04-20 13:53:23 +00003034 break;
3035
3036 case VG_USERREQ__PTHREAD_COND_BROADCAST:
3037 do_pthread_cond_signal_or_broadcast(
3038 tid,
3039 True, /* broadcast, not signal */
nethercote1f0173b2004-02-28 15:40:36 +00003040 (vg_pthread_cond_t *)(arg[1]) );
sewardj3b5d8862002-04-20 13:53:23 +00003041 break;
3042
sewardj00a66b12002-10-12 16:42:35 +00003043 case VG_USERREQ__PTHREAD_KEY_VALIDATE:
3044 do_pthread_key_validate ( tid,
3045 (pthread_key_t)(arg[1]) );
3046 break;
3047
sewardj5f07b662002-04-23 16:52:51 +00003048 case VG_USERREQ__PTHREAD_KEY_CREATE:
3049 do_pthread_key_create ( tid,
3050 (pthread_key_t*)(arg[1]),
3051 (void(*)(void*))(arg[2]) );
3052 break;
3053
3054 case VG_USERREQ__PTHREAD_KEY_DELETE:
3055 do_pthread_key_delete ( tid,
3056 (pthread_key_t)(arg[1]) );
3057 break;
3058
sewardj00a66b12002-10-12 16:42:35 +00003059 case VG_USERREQ__PTHREAD_SETSPECIFIC_PTR:
3060 do_pthread_setspecific_ptr ( tid,
3061 (void**)(arg[1]) );
sewardj5f07b662002-04-23 16:52:51 +00003062 break;
3063
sewardjb48e5002002-05-13 00:16:03 +00003064 case VG_USERREQ__PTHREAD_SIGMASK:
3065 do_pthread_sigmask ( tid,
3066 arg[1],
nethercote73b526f2004-10-31 18:48:21 +00003067 (vki_sigset_t*)(arg[2]),
3068 (vki_sigset_t*)(arg[3]) );
sewardjb48e5002002-05-13 00:16:03 +00003069 break;
3070
sewardj018f7622002-05-15 21:13:39 +00003071 case VG_USERREQ__PTHREAD_KILL:
3072 do_pthread_kill ( tid, arg[1], arg[2] );
3073 break;
3074
sewardjff42d1d2002-05-22 13:17:31 +00003075 case VG_USERREQ__PTHREAD_YIELD:
3076 do_pthread_yield ( tid );
sewardj18a62ff2002-07-12 22:30:51 +00003077 /* On return from do_client_request(), the scheduler will
3078 select a new thread to run. */
sewardjff42d1d2002-05-22 13:17:31 +00003079 break;
sewardj018f7622002-05-15 21:13:39 +00003080
sewardj7989d0c2002-05-28 11:00:01 +00003081 case VG_USERREQ__SET_CANCELSTATE:
3082 do__set_cancelstate ( tid, arg[1] );
3083 break;
3084
sewardj7989d0c2002-05-28 11:00:01 +00003085 case VG_USERREQ__SET_OR_GET_DETACH:
3086 do__set_or_get_detach ( tid, arg[1], arg[2] );
3087 break;
3088
3089 case VG_USERREQ__SET_CANCELPEND:
3090 do__set_cancelpend ( tid, arg[1], (void(*)(void*))arg[2] );
3091 break;
3092
3093 case VG_USERREQ__WAIT_JOINER:
3094 do__wait_joiner ( tid, (void*)arg[1] );
3095 break;
3096
3097 case VG_USERREQ__QUIT:
3098 do__quit ( tid );
3099 break;
3100
3101 case VG_USERREQ__APPLY_IN_NEW_THREAD:
3102 do__apply_in_new_thread ( tid, (void*(*)(void*))arg[1],
thughesdaa34562004-06-27 12:48:53 +00003103 (void*)arg[2], (StackInfo*)(arg[3]) );
sewardj7989d0c2002-05-28 11:00:01 +00003104 break;
3105
sewardj870497a2002-05-29 01:06:47 +00003106 case VG_USERREQ__GET_KEY_D_AND_S:
3107 do__get_key_destr_and_spec ( tid,
3108 (pthread_key_t)arg[1],
3109 (CleanupEntry*)arg[2] );
3110 break;
3111
sewardjef037c72002-05-30 00:40:03 +00003112 case VG_USERREQ__NUKE_OTHER_THREADS:
3113 VG_(nuke_all_threads_except) ( tid );
njnd3040452003-05-19 15:04:06 +00003114 SET_PTHREQ_RETVAL(tid, 0);
sewardjef037c72002-05-30 00:40:03 +00003115 break;
3116
sewardj4dced352002-06-04 22:54:20 +00003117 case VG_USERREQ__PTHREAD_ERROR:
njn25e49d8e72002-09-23 09:36:25 +00003118 VG_(record_pthread_error)( tid, (Char*)(arg[1]) );
njnd3040452003-05-19 15:04:06 +00003119 SET_PTHREQ_RETVAL(tid, 0);
sewardj4dced352002-06-04 22:54:20 +00003120 break;
3121
sewardj2cb00342002-06-28 01:46:26 +00003122 case VG_USERREQ__SET_FHSTACK_USED:
3123 do__set_fhstack_used( tid, (Int)(arg[1]) );
3124 break;
3125
3126 case VG_USERREQ__GET_FHSTACK_USED:
3127 do__get_fhstack_used( tid );
3128 break;
3129
3130 case VG_USERREQ__SET_FHSTACK_ENTRY:
3131 do__set_fhstack_entry( tid, (Int)(arg[1]),
3132 (ForkHandlerEntry*)(arg[2]) );
3133 break;
3134
3135 case VG_USERREQ__GET_FHSTACK_ENTRY:
3136 do__get_fhstack_entry( tid, (Int)(arg[1]),
3137 (ForkHandlerEntry*)(arg[2]) );
3138 break;
3139
sewardj77e466c2002-04-14 02:29:29 +00003140 case VG_USERREQ__SIGNAL_RETURNS:
3141 handle_signal_return(tid);
3142 break;
fitzhardinge98abfc72003-12-16 02:05:15 +00003143
thughesdaa34562004-06-27 12:48:53 +00003144 case VG_USERREQ__GET_STACK_INFO:
3145 do__get_stack_info( tid, (Int)(arg[1]), (StackInfo*)(arg[2]) );
3146 break;
3147
fitzhardinge98abfc72003-12-16 02:05:15 +00003148
3149 case VG_USERREQ__GET_SIGRT_MIN:
3150 SET_PTHREQ_RETVAL(tid, VG_(sig_rtmin));
3151 break;
3152
3153 case VG_USERREQ__GET_SIGRT_MAX:
3154 SET_PTHREQ_RETVAL(tid, VG_(sig_rtmax));
3155 break;
3156
3157 case VG_USERREQ__ALLOC_RTSIG:
3158 SET_PTHREQ_RETVAL(tid, VG_(sig_alloc_rtsig)((Int)arg[1]));
3159 break;
3160
fitzhardinge39de4b42003-10-31 07:12:21 +00003161 case VG_USERREQ__PRINTF: {
3162 int count =
nethercote3e901a22004-09-11 13:17:02 +00003163 VG_(vmessage)( Vg_ClientMsg, (char *)arg[1], (void*)arg[2] );
fitzhardinge39de4b42003-10-31 07:12:21 +00003164 SET_CLREQ_RETVAL( tid, count );
3165 break; }
3166
fitzhardinge98abfc72003-12-16 02:05:15 +00003167
fitzhardinge39de4b42003-10-31 07:12:21 +00003168 case VG_USERREQ__INTERNAL_PRINTF: {
3169 int count =
nethercote3e901a22004-09-11 13:17:02 +00003170 VG_(vmessage)( Vg_UserMsg, (char *)arg[1], (void*)arg[2] );
fitzhardinge39de4b42003-10-31 07:12:21 +00003171 SET_CLREQ_RETVAL( tid, count );
3172 break; }
3173
3174 case VG_USERREQ__PRINTF_BACKTRACE: {
3175 ExeContext *e = VG_(get_ExeContext)( tid );
3176 int count =
nethercote3e901a22004-09-11 13:17:02 +00003177 VG_(vmessage)( Vg_ClientMsg, (char *)arg[1], (void*)arg[2] );
nethercote86c5dcb2004-09-05 21:32:37 +00003178 VG_(mini_stack_dump)(e->ips, VG_(clo_backtrace_size));
fitzhardinge39de4b42003-10-31 07:12:21 +00003179 SET_CLREQ_RETVAL( tid, count );
3180 break; }
3181
3182 case VG_USERREQ__INTERNAL_PRINTF_BACKTRACE: {
3183 ExeContext *e = VG_(get_ExeContext)( tid );
3184 int count =
nethercote3e901a22004-09-11 13:17:02 +00003185 VG_(vmessage)( Vg_UserMsg, (char *)arg[1], (void*)arg[2] );
nethercote86c5dcb2004-09-05 21:32:37 +00003186 VG_(mini_stack_dump)(e->ips, VG_(clo_backtrace_size));
fitzhardinge39de4b42003-10-31 07:12:21 +00003187 SET_CLREQ_RETVAL( tid, count );
3188 break; }
3189
fitzhardinge98abfc72003-12-16 02:05:15 +00003190 case VG_USERREQ__GET_MALLOCFUNCS: {
3191 struct vg_mallocfunc_info *info = (struct vg_mallocfunc_info *)arg[1];
3192
3193 info->sk_malloc = (Addr)SK_(malloc);
3194 info->sk_calloc = (Addr)SK_(calloc);
3195 info->sk_realloc = (Addr)SK_(realloc);
3196 info->sk_memalign = (Addr)SK_(memalign);
3197 info->sk___builtin_new = (Addr)SK_(__builtin_new);
3198 info->sk___builtin_vec_new = (Addr)SK_(__builtin_vec_new);
3199 info->sk_free = (Addr)SK_(free);
3200 info->sk___builtin_delete = (Addr)SK_(__builtin_delete);
3201 info->sk___builtin_vec_delete = (Addr)SK_(__builtin_vec_delete);
3202
3203 info->arena_payload_szB = (Addr)VG_(arena_payload_szB);
3204
3205 info->clo_sloppy_malloc = VG_(clo_sloppy_malloc);
3206 info->clo_trace_malloc = VG_(clo_trace_malloc);
3207
3208 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
3209
3210 break;
3211 }
3212
njn25e49d8e72002-09-23 09:36:25 +00003213 /* Requests from the client program */
3214
3215 case VG_USERREQ__DISCARD_TRANSLATIONS:
3216 if (VG_(clo_verbosity) > 2)
3217 VG_(printf)( "client request: DISCARD_TRANSLATIONS,"
3218 " addr %p, len %d\n",
3219 (void*)arg[1], arg[2] );
3220
sewardj97ad5522003-05-04 12:32:56 +00003221 VG_(invalidate_translations)( arg[1], arg[2], True );
njn25e49d8e72002-09-23 09:36:25 +00003222
njnd3040452003-05-19 15:04:06 +00003223 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
njn25e49d8e72002-09-23 09:36:25 +00003224 break;
3225
njn47363ab2003-04-21 13:24:40 +00003226 case VG_USERREQ__COUNT_ERRORS:
nethercotef2b11482004-08-02 12:36:01 +00003227 SET_CLREQ_RETVAL( tid, VG_(get_n_errs_found)() );
njn47363ab2003-04-21 13:24:40 +00003228 break;
3229
sewardje663cb92002-04-12 10:26:32 +00003230 default:
njn25e49d8e72002-09-23 09:36:25 +00003231 if (VG_(needs).client_requests) {
nethercoted1b64b22004-11-04 18:22:28 +00003232 UWord ret;
sewardj34042512002-10-22 04:14:35 +00003233
njn25e49d8e72002-09-23 09:36:25 +00003234 if (VG_(clo_verbosity) > 2)
fitzhardinge98abfc72003-12-16 02:05:15 +00003235 VG_(printf)("client request: code %x, addr %p, len %d\n",
njn25e49d8e72002-09-23 09:36:25 +00003236 arg[0], (void*)arg[1], arg[2] );
3237
njn72718642003-07-24 08:45:32 +00003238 if (SK_(handle_client_request) ( tid, arg, &ret ))
njnd3040452003-05-19 15:04:06 +00003239 SET_CLREQ_RETVAL(tid, ret);
njn25e49d8e72002-09-23 09:36:25 +00003240 } else {
sewardj34042512002-10-22 04:14:35 +00003241 static Bool whined = False;
3242
3243 if (!whined) {
nethercote7cc9c232004-01-21 15:08:04 +00003244 // Allow for requests in core, but defined by tools, which
njnd7994182003-10-02 13:44:04 +00003245 // have 0 and 0 in their two high bytes.
3246 Char c1 = (arg[0] >> 24) & 0xff;
3247 Char c2 = (arg[0] >> 16) & 0xff;
3248 if (c1 == 0) c1 = '_';
3249 if (c2 == 0) c2 = '_';
sewardj34042512002-10-22 04:14:35 +00003250 VG_(message)(Vg_UserMsg, "Warning:\n"
njnd7994182003-10-02 13:44:04 +00003251 " unhandled client request: 0x%x (%c%c+0x%x). Perhaps\n"
3252 " VG_(needs).client_requests should be set?\n",
3253 arg[0], c1, c2, arg[0] & 0xffff);
sewardj34042512002-10-22 04:14:35 +00003254 whined = True;
3255 }
njn25e49d8e72002-09-23 09:36:25 +00003256 }
sewardje663cb92002-04-12 10:26:32 +00003257 break;
3258 }
3259}
3260
3261
sewardj6072c362002-04-19 14:40:57 +00003262/* ---------------------------------------------------------------------
3263 Sanity checking.
3264 ------------------------------------------------------------------ */
3265
3266/* Internal consistency checks on the sched/pthread structures. */
3267static
3268void scheduler_sanity ( void )
3269{
nethercote1f0173b2004-02-28 15:40:36 +00003270 vg_pthread_mutex_t* mx;
3271 vg_pthread_cond_t* cv;
sewardj6072c362002-04-19 14:40:57 +00003272 Int i;
jsgf855d93d2003-10-13 22:26:55 +00003273 struct timeout* top;
3274 UInt lasttime = 0;
3275
3276 for(top = timeouts; top != NULL; top = top->next) {
3277 vg_assert(top->time >= lasttime);
nethercote36881a22004-08-04 14:03:16 +00003278 vg_assert(is_valid_or_empty_tid(top->tid));
jsgf855d93d2003-10-13 22:26:55 +00003279
3280#if 0
3281 /* assert timeout entry is either stale, or associated with a
3282 thread in the right state
3283
3284 XXX disable for now - can be stale, but times happen to match
3285 */
3286 vg_assert(VG_(threads)[top->tid].awaken_at != top->time ||
3287 VG_(threads)[top->tid].status == VgTs_Sleeping ||
thughese321d492004-10-17 15:00:20 +00003288 VG_(threads)[top->tid].status == VgTs_WaitMX ||
jsgf855d93d2003-10-13 22:26:55 +00003289 VG_(threads)[top->tid].status == VgTs_WaitCV);
3290#endif
3291
3292 lasttime = top->time;
3293 }
sewardj5f07b662002-04-23 16:52:51 +00003294
sewardj6072c362002-04-19 14:40:57 +00003295 /* VG_(printf)("scheduler_sanity\n"); */
3296 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +00003297 mx = VG_(threads)[i].associated_mx;
3298 cv = VG_(threads)[i].associated_cv;
3299 if (VG_(threads)[i].status == VgTs_WaitMX) {
sewardjbf290b92002-05-01 02:28:01 +00003300 /* If we're waiting on a MX: (1) the mx is not null, (2, 3)
3301 it's actually held by someone, since otherwise this thread
3302 is deadlocked, (4) the mutex's owner is not us, since
3303 otherwise this thread is also deadlocked. The logic in
3304 do_pthread_mutex_lock rejects attempts by a thread to lock
3305 a (non-recursive) mutex which it already owns.
sewardj05553872002-04-20 20:53:17 +00003306
sewardjbf290b92002-05-01 02:28:01 +00003307 (2) has been seen to fail sometimes. I don't know why.
3308 Possibly to do with signals. */
sewardj3b5d8862002-04-20 13:53:23 +00003309 vg_assert(cv == NULL);
sewardj05553872002-04-20 20:53:17 +00003310 /* 1 */ vg_assert(mx != NULL);
nethercote1f0173b2004-02-28 15:40:36 +00003311 /* 2 */ vg_assert(mx->__vg_m_count > 0);
3312 /* 3 */ vg_assert(VG_(is_valid_tid)((ThreadId)mx->__vg_m_owner));
thughese321d492004-10-17 15:00:20 +00003313 /* 4 */ vg_assert((UInt)i != (ThreadId)mx->__vg_m_owner ||
3314 VG_(threads)[i].awaken_at != 0xFFFFFFFF);
sewardj3b5d8862002-04-20 13:53:23 +00003315 } else
sewardj018f7622002-05-15 21:13:39 +00003316 if (VG_(threads)[i].status == VgTs_WaitCV) {
sewardj3b5d8862002-04-20 13:53:23 +00003317 vg_assert(cv != NULL);
3318 vg_assert(mx != NULL);
sewardj6072c362002-04-19 14:40:57 +00003319 } else {
thughesf7269232004-10-16 16:17:06 +00003320 vg_assert(cv == NULL);
3321 vg_assert(mx == NULL);
sewardj6072c362002-04-19 14:40:57 +00003322 }
sewardjbf290b92002-05-01 02:28:01 +00003323
sewardj018f7622002-05-15 21:13:39 +00003324 if (VG_(threads)[i].status != VgTs_Empty) {
sewardjbf290b92002-05-01 02:28:01 +00003325 Int
sewardj018f7622002-05-15 21:13:39 +00003326 stack_used = (Addr)VG_(threads)[i].stack_highest_word
nethercoteb8ef9d82004-09-05 22:02:33 +00003327 - (Addr)ARCH_STACK_PTR(VG_(threads)[i].arch);
thughesdaa34562004-06-27 12:48:53 +00003328 Int
3329 stack_avail = VG_(threads)[i].stack_size
3330 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB
3331 - VG_(threads)[i].stack_guard_size;
fitzhardinge98c4dc02004-03-16 08:27:29 +00003332 /* This test is a bit bogus - it doesn't take into account
3333 alternate signal stacks, for a start. Also, if a thread
3334 has it's stack pointer somewhere strange, killing Valgrind
3335 isn't the right answer. */
3336 if (0 && i > 1 /* not the root thread */
thughesdaa34562004-06-27 12:48:53 +00003337 && stack_used >= stack_avail) {
sewardjbf290b92002-05-01 02:28:01 +00003338 VG_(message)(Vg_UserMsg,
njn25e49d8e72002-09-23 09:36:25 +00003339 "Error: STACK OVERFLOW: "
sewardjbf290b92002-05-01 02:28:01 +00003340 "thread %d: stack used %d, available %d",
thughesdaa34562004-06-27 12:48:53 +00003341 i, stack_used, stack_avail );
sewardjbf290b92002-05-01 02:28:01 +00003342 VG_(message)(Vg_UserMsg,
3343 "Terminating Valgrind. If thread(s) "
3344 "really need more stack, increase");
3345 VG_(message)(Vg_UserMsg,
rjwalsh7109a8c2004-09-02 00:31:02 +00003346 "VG_PTHREAD_STACK_SIZE in core.h and recompile.");
sewardjbf290b92002-05-01 02:28:01 +00003347 VG_(exit)(1);
3348 }
3349 }
sewardj6072c362002-04-19 14:40:57 +00003350 }
sewardj5f07b662002-04-23 16:52:51 +00003351
3352 for (i = 0; i < VG_N_THREAD_KEYS; i++) {
3353 if (!vg_thread_keys[i].inuse)
3354 vg_assert(vg_thread_keys[i].destructor == NULL);
3355 }
sewardj6072c362002-04-19 14:40:57 +00003356}
3357
3358
sewardje663cb92002-04-12 10:26:32 +00003359/*--------------------------------------------------------------------*/
3360/*--- end vg_scheduler.c ---*/
3361/*--------------------------------------------------------------------*/