blob: 71a979638deb8c01332ab9e22ea429d9933648ca [file] [log] [blame]
sewardje663cb92002-04-12 10:26:32 +00001
2/*--------------------------------------------------------------------*/
3/*--- A user-space pthreads implementation. vg_scheduler.c ---*/
4/*--------------------------------------------------------------------*/
5
6/*
njnc9539842002-10-02 13:26:35 +00007 This file is part of Valgrind, an extensible x86 protected-mode
8 emulator for monitoring program execution on x86-Unixes.
sewardje663cb92002-04-12 10:26:32 +00009
nethercotebb1c9912004-01-04 16:43:23 +000010 Copyright (C) 2000-2004 Julian Seward
sewardje663cb92002-04-12 10:26:32 +000011 jseward@acm.org
sewardje663cb92002-04-12 10:26:32 +000012
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
njn25e49d8e72002-09-23 09:36:25 +000028 The GNU General Public License is contained in the file COPYING.
sewardje663cb92002-04-12 10:26:32 +000029*/
30
njn25e49d8e72002-09-23 09:36:25 +000031#include "valgrind.h" /* for VG_USERREQ__RUNNING_ON_VALGRIND and
njn47363ab2003-04-21 13:24:40 +000032 VG_USERREQ__DISCARD_TRANSLATIONS, and others */
nethercotef1e5e152004-09-01 23:58:16 +000033#include "core.h"
sewardje663cb92002-04-12 10:26:32 +000034
sewardje663cb92002-04-12 10:26:32 +000035
36/* ---------------------------------------------------------------------
37 Types and globals for the scheduler.
38 ------------------------------------------------------------------ */
39
rjwalsh7109a8c2004-09-02 00:31:02 +000040/* ThreadId and ThreadState are defined in core.h. */
sewardje663cb92002-04-12 10:26:32 +000041
sewardj018f7622002-05-15 21:13:39 +000042/* Globals. A statically allocated array of threads. NOTE: [0] is
43 never used, to simplify the simulation of initialisers for
sewardj6072c362002-04-19 14:40:57 +000044 LinuxThreads. */
sewardj018f7622002-05-15 21:13:39 +000045ThreadState VG_(threads)[VG_N_THREADS];
sewardje663cb92002-04-12 10:26:32 +000046
sewardj2cb00342002-06-28 01:46:26 +000047/* The process' fork-handler stack. */
48static Int vg_fhstack_used = 0;
49static ForkHandlerEntry vg_fhstack[VG_N_FORKHANDLERSTACK];
50
51
sewardj1e8cdc92002-04-18 11:37:52 +000052/* The tid of the thread currently in VG_(baseBlock). */
njn1be61612003-05-14 14:04:39 +000053static ThreadId vg_tid_currently_in_baseBlock = VG_INVALID_THREADID;
sewardj1e8cdc92002-04-18 11:37:52 +000054
sewardjb52a1b02002-10-23 21:38:22 +000055/* The tid either currently in baseBlock, or was in baseBlock before
56 was saved it out; this is only updated when a new thread is loaded
57 into the baseBlock */
njn1be61612003-05-14 14:04:39 +000058static ThreadId vg_tid_last_in_baseBlock = VG_INVALID_THREADID;
sewardje663cb92002-04-12 10:26:32 +000059
60/* vg_oursignalhandler() might longjmp(). Here's the jmp_buf. */
nethercotef971ab72004-08-02 16:27:40 +000061static jmp_buf scheduler_jmpbuf;
sewardj872051c2002-07-13 12:12:56 +000062/* This says whether scheduler_jmpbuf is actually valid. Needed so
63 that our signal handler doesn't longjmp when the buffer isn't
64 actually valid. */
nethercotef971ab72004-08-02 16:27:40 +000065static Bool scheduler_jmpbuf_valid = False;
sewardje663cb92002-04-12 10:26:32 +000066/* ... and if so, here's the signal which caused it to do so. */
nethercotef971ab72004-08-02 16:27:40 +000067static Int longjmpd_on_signal;
jsgf855d93d2003-10-13 22:26:55 +000068/* If the current thread gets a syncronous unresumable signal, then
69 its details are placed here by the signal handler, to be passed to
70 the applications signal handler later on. */
nethercotef971ab72004-08-02 16:27:40 +000071static vki_ksiginfo_t unresumable_siginfo;
sewardje663cb92002-04-12 10:26:32 +000072
jsgf855d93d2003-10-13 22:26:55 +000073/* If != VG_INVALID_THREADID, this is the preferred tid to schedule */
74static ThreadId prefer_sched = VG_INVALID_THREADID;
sewardje663cb92002-04-12 10:26:32 +000075
sewardj5f07b662002-04-23 16:52:51 +000076/* Keeping track of keys. */
77typedef
78 struct {
79 /* Has this key been allocated ? */
80 Bool inuse;
81 /* If .inuse==True, records the address of the associated
82 destructor, or NULL if none. */
83 void (*destructor)(void*);
84 }
85 ThreadKeyState;
86
87/* And our array of thread keys. */
88static ThreadKeyState vg_thread_keys[VG_N_THREAD_KEYS];
89
90typedef UInt ThreadKey;
91
fitzhardinge98abfc72003-12-16 02:05:15 +000092/* The scheduler does need to know the address of it so it can be
93 called at program exit. */
nethercotef971ab72004-08-02 16:27:40 +000094static Addr __libc_freeres_wrapper;
njn25e49d8e72002-09-23 09:36:25 +000095
sewardje663cb92002-04-12 10:26:32 +000096/* Forwards */
sewardj124ca2a2002-06-20 10:19:38 +000097static void do_client_request ( ThreadId tid );
sewardj6072c362002-04-19 14:40:57 +000098static void scheduler_sanity ( void );
sewardj124ca2a2002-06-20 10:19:38 +000099static void do_pthread_cond_timedwait_TIMEOUT ( ThreadId tid );
thughesa3afffc2004-08-25 18:58:04 +0000100static void maybe_rendezvous_joiners_and_joinees ( void );
sewardjd140e442002-05-29 01:21:19 +0000101
nethercote844e7122004-08-02 15:27:22 +0000102/* Stats. */
103static UInt n_scheduling_events_MINOR = 0;
104static UInt n_scheduling_events_MAJOR = 0;
105
106void VG_(print_scheduler_stats)(void)
107{
108 VG_(message)(Vg_DebugMsg,
109 " %d/%d major/minor sched events.",
110 n_scheduling_events_MAJOR, n_scheduling_events_MINOR);
111}
112
sewardje663cb92002-04-12 10:26:32 +0000113/* ---------------------------------------------------------------------
114 Helper functions for the scheduler.
115 ------------------------------------------------------------------ */
116
sewardjb48e5002002-05-13 00:16:03 +0000117__inline__
118Bool VG_(is_valid_tid) ( ThreadId tid )
sewardj604ec3c2002-04-18 22:38:41 +0000119{
120 /* tid is unsigned, hence no < 0 test. */
sewardj6072c362002-04-19 14:40:57 +0000121 if (tid == 0) return False;
sewardj604ec3c2002-04-18 22:38:41 +0000122 if (tid >= VG_N_THREADS) return False;
sewardj018f7622002-05-15 21:13:39 +0000123 if (VG_(threads)[tid].status == VgTs_Empty) return False;
124 return True;
125}
126
127
128__inline__
nethercote36881a22004-08-04 14:03:16 +0000129Bool is_valid_or_empty_tid ( ThreadId tid )
sewardj018f7622002-05-15 21:13:39 +0000130{
131 /* tid is unsigned, hence no < 0 test. */
132 if (tid == 0) return False;
133 if (tid >= VG_N_THREADS) return False;
sewardj604ec3c2002-04-18 22:38:41 +0000134 return True;
135}
136
137
sewardj1e8cdc92002-04-18 11:37:52 +0000138/* For constructing error messages only: try and identify a thread
njn25e49d8e72002-09-23 09:36:25 +0000139 whose stack satisfies the predicate p, or return VG_INVALID_THREADID
140 if none do. A small complication is dealing with any currently
141 VG_(baseBlock)-resident thread.
sewardj1e8cdc92002-04-18 11:37:52 +0000142*/
njn43c799e2003-04-08 00:08:52 +0000143ThreadId VG_(first_matching_thread_stack)
thughes4ad52d02004-06-27 17:37:21 +0000144 ( Bool (*p) ( Addr stack_min, Addr stack_max, void* d ),
145 void* d )
sewardj1e8cdc92002-04-18 11:37:52 +0000146{
147 ThreadId tid, tid_to_skip;
148
149 tid_to_skip = VG_INVALID_THREADID;
150
151 /* First check to see if there's a currently-loaded thread in
152 VG_(baseBlock). */
153 if (vg_tid_currently_in_baseBlock != VG_INVALID_THREADID) {
154 tid = vg_tid_currently_in_baseBlock;
nethercoteb8ef9d82004-09-05 22:02:33 +0000155 if ( p ( VG_(baseBlock)[VGOFF_STACK_PTR],
thughes4ad52d02004-06-27 17:37:21 +0000156 VG_(threads)[tid].stack_highest_word, d ) )
sewardj1e8cdc92002-04-18 11:37:52 +0000157 return tid;
158 else
159 tid_to_skip = tid;
160 }
161
sewardj6072c362002-04-19 14:40:57 +0000162 for (tid = 1; tid < VG_N_THREADS; tid++) {
sewardj018f7622002-05-15 21:13:39 +0000163 if (VG_(threads)[tid].status == VgTs_Empty) continue;
sewardj1e8cdc92002-04-18 11:37:52 +0000164 if (tid == tid_to_skip) continue;
nethercoteb8ef9d82004-09-05 22:02:33 +0000165 if ( p ( ARCH_STACK_PTR(VG_(threads)[tid].arch),
thughes4ad52d02004-06-27 17:37:21 +0000166 VG_(threads)[tid].stack_highest_word, d ) )
sewardj1e8cdc92002-04-18 11:37:52 +0000167 return tid;
168 }
169 return VG_INVALID_THREADID;
170}
171
172
sewardj15a43e12002-04-17 19:35:12 +0000173/* Print the scheduler status. */
174void VG_(pp_sched_status) ( void )
sewardje663cb92002-04-12 10:26:32 +0000175{
176 Int i;
177 VG_(printf)("\nsched status:\n");
sewardj6072c362002-04-19 14:40:57 +0000178 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +0000179 if (VG_(threads)[i].status == VgTs_Empty) continue;
sewardj15a43e12002-04-17 19:35:12 +0000180 VG_(printf)("\nThread %d: status = ", i);
sewardj018f7622002-05-15 21:13:39 +0000181 switch (VG_(threads)[i].status) {
sewardj6072c362002-04-19 14:40:57 +0000182 case VgTs_Runnable: VG_(printf)("Runnable"); break;
sewardj20917d82002-05-28 01:36:45 +0000183 case VgTs_WaitJoinee: VG_(printf)("WaitJoinee(%d)",
184 VG_(threads)[i].joiner_jee_tid);
185 break;
186 case VgTs_WaitJoiner: VG_(printf)("WaitJoiner"); break;
sewardj6072c362002-04-19 14:40:57 +0000187 case VgTs_Sleeping: VG_(printf)("Sleeping"); break;
188 case VgTs_WaitMX: VG_(printf)("WaitMX"); break;
sewardj3b5d8862002-04-20 13:53:23 +0000189 case VgTs_WaitCV: VG_(printf)("WaitCV"); break;
jsgf855d93d2003-10-13 22:26:55 +0000190 case VgTs_WaitSys: VG_(printf)("WaitSys"); break;
sewardje663cb92002-04-12 10:26:32 +0000191 default: VG_(printf)("???"); break;
192 }
sewardj3b5d8862002-04-20 13:53:23 +0000193 VG_(printf)(", associated_mx = %p, associated_cv = %p\n",
sewardj018f7622002-05-15 21:13:39 +0000194 VG_(threads)[i].associated_mx,
195 VG_(threads)[i].associated_cv );
sewardj15a43e12002-04-17 19:35:12 +0000196 VG_(pp_ExeContext)(
nethercoteb8ef9d82004-09-05 22:02:33 +0000197 VG_(get_ExeContext2)( ARCH_INSTR_PTR(VG_(threads)[i].arch),
198 ARCH_FRAME_PTR(VG_(threads)[i].arch),
199 ARCH_STACK_PTR(VG_(threads)[i].arch),
njn25e49d8e72002-09-23 09:36:25 +0000200 VG_(threads)[i].stack_highest_word)
201 );
sewardje663cb92002-04-12 10:26:32 +0000202 }
203 VG_(printf)("\n");
204}
205
sewardje663cb92002-04-12 10:26:32 +0000206static
207void print_sched_event ( ThreadId tid, Char* what )
208{
sewardj45b4b372002-04-16 22:50:32 +0000209 VG_(message)(Vg_DebugMsg, " SCHED[%d]: %s", tid, what );
sewardj8937c812002-04-12 20:12:20 +0000210}
211
sewardj8937c812002-04-12 20:12:20 +0000212static
213void print_pthread_event ( ThreadId tid, Char* what )
214{
215 VG_(message)(Vg_DebugMsg, "PTHREAD[%d]: %s", tid, what );
sewardje663cb92002-04-12 10:26:32 +0000216}
217
sewardje663cb92002-04-12 10:26:32 +0000218static
219Char* name_of_sched_event ( UInt event )
220{
221 switch (event) {
sewardje663cb92002-04-12 10:26:32 +0000222 case VG_TRC_EBP_JMP_SYSCALL: return "SYSCALL";
223 case VG_TRC_EBP_JMP_CLIENTREQ: return "CLIENTREQ";
fitzhardingea02f8812003-12-18 09:06:09 +0000224 case VG_TRC_EBP_JMP_YIELD: return "YIELD";
sewardje663cb92002-04-12 10:26:32 +0000225 case VG_TRC_INNER_COUNTERZERO: return "COUNTERZERO";
226 case VG_TRC_INNER_FASTMISS: return "FASTMISS";
227 case VG_TRC_UNRESUMABLE_SIGNAL: return "FATALSIGNAL";
228 default: return "??UNKNOWN??";
229 }
230}
231
232
sewardje663cb92002-04-12 10:26:32 +0000233/* Allocate a completely empty ThreadState record. */
234static
235ThreadId vg_alloc_ThreadState ( void )
236{
237 Int i;
sewardj6072c362002-04-19 14:40:57 +0000238 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +0000239 if (VG_(threads)[i].status == VgTs_Empty)
sewardje663cb92002-04-12 10:26:32 +0000240 return i;
241 }
242 VG_(printf)("vg_alloc_ThreadState: no free slots available\n");
243 VG_(printf)("Increase VG_N_THREADS, rebuild and try again.\n");
njne427a662002-10-02 11:08:25 +0000244 VG_(core_panic)("VG_N_THREADS is too low");
sewardje663cb92002-04-12 10:26:32 +0000245 /*NOTREACHED*/
246}
247
jsgf855d93d2003-10-13 22:26:55 +0000248ThreadState *VG_(get_ThreadState)(ThreadId tid)
249{
250 vg_assert(tid >= 0 && tid < VG_N_THREADS);
251 return &VG_(threads)[tid];
252}
253
njn72718642003-07-24 08:45:32 +0000254Bool VG_(is_running_thread)(ThreadId tid)
njn25e49d8e72002-09-23 09:36:25 +0000255{
njn72718642003-07-24 08:45:32 +0000256 ThreadId curr = VG_(get_current_tid)();
257 return (curr == tid && VG_INVALID_THREADID != tid);
njn25e49d8e72002-09-23 09:36:25 +0000258}
sewardje663cb92002-04-12 10:26:32 +0000259
sewardj1e8cdc92002-04-18 11:37:52 +0000260ThreadId VG_(get_current_tid) ( void )
261{
sewardjb52a1b02002-10-23 21:38:22 +0000262 if (!VG_(is_valid_tid)(vg_tid_currently_in_baseBlock))
263 return VG_INVALID_THREADID;
sewardj1e8cdc92002-04-18 11:37:52 +0000264 return vg_tid_currently_in_baseBlock;
265}
266
sewardjb52a1b02002-10-23 21:38:22 +0000267ThreadId VG_(get_current_or_recent_tid) ( void )
njn25e49d8e72002-09-23 09:36:25 +0000268{
sewardjb52a1b02002-10-23 21:38:22 +0000269 vg_assert(vg_tid_currently_in_baseBlock == vg_tid_last_in_baseBlock ||
270 vg_tid_currently_in_baseBlock == VG_INVALID_THREADID);
271 vg_assert(VG_(is_valid_tid)(vg_tid_last_in_baseBlock));
272
273 return vg_tid_last_in_baseBlock;
njn25e49d8e72002-09-23 09:36:25 +0000274}
275
sewardje663cb92002-04-12 10:26:32 +0000276/* Copy the saved state of a thread into VG_(baseBlock), ready for it
277 to be run. */
nethercotef971ab72004-08-02 16:27:40 +0000278static void load_thread_state ( ThreadId tid )
sewardje663cb92002-04-12 10:26:32 +0000279{
sewardj1e8cdc92002-04-18 11:37:52 +0000280 vg_assert(vg_tid_currently_in_baseBlock == VG_INVALID_THREADID);
281
nethercotec06e2132004-09-03 13:45:29 +0000282 VGA_(load_state)(&VG_(threads)[tid].arch, tid);
sewardj1e8cdc92002-04-18 11:37:52 +0000283
284 vg_tid_currently_in_baseBlock = tid;
sewardjb52a1b02002-10-23 21:38:22 +0000285 vg_tid_last_in_baseBlock = tid;
sewardje663cb92002-04-12 10:26:32 +0000286}
287
288
289/* Copy the state of a thread from VG_(baseBlock), presumably after it
290 has been descheduled. For sanity-check purposes, fill the vacated
291 VG_(baseBlock) with garbage so as to make the system more likely to
292 fail quickly if we erroneously continue to poke around inside
293 VG_(baseBlock) without first doing a load_thread_state().
294*/
nethercotef971ab72004-08-02 16:27:40 +0000295static void save_thread_state ( ThreadId tid )
sewardje663cb92002-04-12 10:26:32 +0000296{
sewardj1e8cdc92002-04-18 11:37:52 +0000297 vg_assert(vg_tid_currently_in_baseBlock != VG_INVALID_THREADID);
298
nethercotec06e2132004-09-03 13:45:29 +0000299 VGA_(save_state)(&VG_(threads)[tid].arch, tid);
sewardj1e8cdc92002-04-18 11:37:52 +0000300
301 vg_tid_currently_in_baseBlock = VG_INVALID_THREADID;
sewardje663cb92002-04-12 10:26:32 +0000302}
303
304
nethercote75d26242004-08-01 22:59:18 +0000305void VG_(resume_scheduler)(Int sigNo, vki_ksiginfo_t *info)
306{
307 if (scheduler_jmpbuf_valid) {
308 /* Can't continue; must longjmp back to the scheduler and thus
309 enter the sighandler immediately. */
nethercotef971ab72004-08-02 16:27:40 +0000310 VG_(memcpy)(&unresumable_siginfo, info, sizeof(vki_ksiginfo_t));
nethercote75d26242004-08-01 22:59:18 +0000311
312 longjmpd_on_signal = sigNo;
313 __builtin_longjmp(scheduler_jmpbuf,1);
314 }
315}
316
sewardje663cb92002-04-12 10:26:32 +0000317/* Run the thread tid for a while, and return a VG_TRC_* value to the
318 scheduler indicating what happened. */
sewardj6072c362002-04-19 14:40:57 +0000319static
sewardje663cb92002-04-12 10:26:32 +0000320UInt run_thread_for_a_while ( ThreadId tid )
321{
sewardj7ccc5c22002-04-24 21:39:11 +0000322 volatile UInt trc = 0;
sewardjb48e5002002-05-13 00:16:03 +0000323 vg_assert(VG_(is_valid_tid)(tid));
sewardj018f7622002-05-15 21:13:39 +0000324 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
nethercote75d26242004-08-01 22:59:18 +0000325 vg_assert(!scheduler_jmpbuf_valid);
sewardje663cb92002-04-12 10:26:32 +0000326
sewardj671ff542002-05-07 09:25:30 +0000327 VGP_PUSHCC(VgpRun);
nethercotef971ab72004-08-02 16:27:40 +0000328 load_thread_state ( tid );
jsgf855d93d2003-10-13 22:26:55 +0000329
330 /* there should be no undealt-with signals */
nethercotef971ab72004-08-02 16:27:40 +0000331 vg_assert(unresumable_siginfo.si_signo == 0);
jsgf855d93d2003-10-13 22:26:55 +0000332
nethercote75d26242004-08-01 22:59:18 +0000333 if (__builtin_setjmp(scheduler_jmpbuf) == 0) {
sewardje663cb92002-04-12 10:26:32 +0000334 /* try this ... */
nethercote75d26242004-08-01 22:59:18 +0000335 scheduler_jmpbuf_valid = True;
sewardje663cb92002-04-12 10:26:32 +0000336 trc = VG_(run_innerloop)();
nethercote75d26242004-08-01 22:59:18 +0000337 scheduler_jmpbuf_valid = False;
sewardje663cb92002-04-12 10:26:32 +0000338 /* We get here if the client didn't take a fault. */
339 } else {
340 /* We get here if the client took a fault, which caused our
341 signal handler to longjmp. */
nethercote75d26242004-08-01 22:59:18 +0000342 scheduler_jmpbuf_valid = False;
sewardje663cb92002-04-12 10:26:32 +0000343 vg_assert(trc == 0);
344 trc = VG_TRC_UNRESUMABLE_SIGNAL;
345 }
sewardj872051c2002-07-13 12:12:56 +0000346
nethercote75d26242004-08-01 22:59:18 +0000347 vg_assert(!scheduler_jmpbuf_valid);
sewardj872051c2002-07-13 12:12:56 +0000348
nethercotef971ab72004-08-02 16:27:40 +0000349 save_thread_state ( tid );
njn25e49d8e72002-09-23 09:36:25 +0000350 VGP_POPCC(VgpRun);
sewardje663cb92002-04-12 10:26:32 +0000351 return trc;
352}
353
354
sewardj20917d82002-05-28 01:36:45 +0000355static
356void mostly_clear_thread_record ( ThreadId tid )
357{
sewardj20917d82002-05-28 01:36:45 +0000358 vg_assert(tid >= 0 && tid < VG_N_THREADS);
nethercotef9b59412004-09-10 15:33:32 +0000359 VGA_(clear_thread)(&VG_(threads)[tid].arch);
sewardj20917d82002-05-28 01:36:45 +0000360 VG_(threads)[tid].tid = tid;
361 VG_(threads)[tid].status = VgTs_Empty;
362 VG_(threads)[tid].associated_mx = NULL;
363 VG_(threads)[tid].associated_cv = NULL;
364 VG_(threads)[tid].awaken_at = 0;
365 VG_(threads)[tid].joinee_retval = NULL;
366 VG_(threads)[tid].joiner_thread_return = NULL;
367 VG_(threads)[tid].joiner_jee_tid = VG_INVALID_THREADID;
sewardj8ad94e12002-05-29 00:10:20 +0000368 VG_(threads)[tid].detached = False;
sewardj20917d82002-05-28 01:36:45 +0000369 VG_(threads)[tid].cancel_st = True; /* PTHREAD_CANCEL_ENABLE */
370 VG_(threads)[tid].cancel_ty = True; /* PTHREAD_CANCEL_DEFERRED */
371 VG_(threads)[tid].cancel_pend = NULL; /* not pending */
sewardj8ad94e12002-05-29 00:10:20 +0000372 VG_(threads)[tid].custack_used = 0;
sewardj20917d82002-05-28 01:36:45 +0000373 VG_(ksigemptyset)(&VG_(threads)[tid].sig_mask);
jsgf855d93d2003-10-13 22:26:55 +0000374 VG_(ksigfillset)(&VG_(threads)[tid].eff_sig_mask);
sewardj00a66b12002-10-12 16:42:35 +0000375 VG_(threads)[tid].specifics_ptr = NULL;
jsgf855d93d2003-10-13 22:26:55 +0000376
377 VG_(threads)[tid].syscallno = -1;
thughesbaa46e52004-07-29 17:44:23 +0000378 VG_(threads)[tid].sys_flags = 0;
jsgf855d93d2003-10-13 22:26:55 +0000379 VG_(threads)[tid].sys_pre_res = NULL;
380
381 VG_(threads)[tid].proxy = NULL;
fitzhardinge28428592004-03-16 22:07:12 +0000382
383 /* start with no altstack */
384 VG_(threads)[tid].altstack.ss_sp = (void *)0xdeadbeef;
385 VG_(threads)[tid].altstack.ss_size = 0;
386 VG_(threads)[tid].altstack.ss_flags = VKI_SS_DISABLE;
sewardj20917d82002-05-28 01:36:45 +0000387}
388
389
jsgf855d93d2003-10-13 22:26:55 +0000390
sewardje663cb92002-04-12 10:26:32 +0000391/* Initialise the scheduler. Create a single "main" thread ready to
sewardj6072c362002-04-19 14:40:57 +0000392 run, with special ThreadId of one. This is called at startup; the
nethercote71980f02004-01-24 18:18:54 +0000393 caller takes care to park the client's state in VG_(baseBlock).
sewardje663cb92002-04-12 10:26:32 +0000394*/
395void VG_(scheduler_init) ( void )
396{
397 Int i;
sewardje663cb92002-04-12 10:26:32 +0000398 ThreadId tid_main;
399
sewardj6072c362002-04-19 14:40:57 +0000400 for (i = 0 /* NB; not 1 */; i < VG_N_THREADS; i++) {
sewardj20917d82002-05-28 01:36:45 +0000401 mostly_clear_thread_record(i);
402 VG_(threads)[i].stack_size = 0;
403 VG_(threads)[i].stack_base = (Addr)NULL;
thughesdaa34562004-06-27 12:48:53 +0000404 VG_(threads)[i].stack_guard_size = 0;
sewardj20917d82002-05-28 01:36:45 +0000405 VG_(threads)[i].stack_highest_word = (Addr)NULL;
sewardje663cb92002-04-12 10:26:32 +0000406 }
407
sewardj5f07b662002-04-23 16:52:51 +0000408 for (i = 0; i < VG_N_THREAD_KEYS; i++) {
409 vg_thread_keys[i].inuse = False;
410 vg_thread_keys[i].destructor = NULL;
411 }
412
sewardj2cb00342002-06-28 01:46:26 +0000413 vg_fhstack_used = 0;
414
sewardje663cb92002-04-12 10:26:32 +0000415 /* Assert this is thread zero, which has certain magic
416 properties. */
417 tid_main = vg_alloc_ThreadState();
sewardj6072c362002-04-19 14:40:57 +0000418 vg_assert(tid_main == 1);
sewardj20917d82002-05-28 01:36:45 +0000419 VG_(threads)[tid_main].status = VgTs_Runnable;
sewardje663cb92002-04-12 10:26:32 +0000420
421 /* Copy VG_(baseBlock) state to tid_main's slot. */
sewardj1e8cdc92002-04-18 11:37:52 +0000422 vg_tid_currently_in_baseBlock = tid_main;
sewardjb52a1b02002-10-23 21:38:22 +0000423 vg_tid_last_in_baseBlock = tid_main;
nethercotef9b59412004-09-10 15:33:32 +0000424
425 VGA_(init_thread)(&VG_(threads)[tid_main].arch);
nethercotef971ab72004-08-02 16:27:40 +0000426 save_thread_state ( tid_main );
sewardj1e8cdc92002-04-18 11:37:52 +0000427
sewardj018f7622002-05-15 21:13:39 +0000428 VG_(threads)[tid_main].stack_highest_word
fitzhardinge98abfc72003-12-16 02:05:15 +0000429 = VG_(clstk_end) - 4;
430 VG_(threads)[tid_main].stack_base = VG_(clstk_base);
431 VG_(threads)[tid_main].stack_size = VG_(clstk_end) - VG_(clstk_base);
sewardjbf290b92002-05-01 02:28:01 +0000432
sewardj1e8cdc92002-04-18 11:37:52 +0000433 /* So now ... */
434 vg_assert(vg_tid_currently_in_baseBlock == VG_INVALID_THREADID);
sewardj872051c2002-07-13 12:12:56 +0000435
436 /* Not running client code right now. */
nethercote75d26242004-08-01 22:59:18 +0000437 scheduler_jmpbuf_valid = False;
jsgf855d93d2003-10-13 22:26:55 +0000438
439 /* Proxy for main thread */
440 VG_(proxy_create)(tid_main);
sewardje663cb92002-04-12 10:26:32 +0000441}
442
443
sewardj3947e622002-05-23 16:52:11 +0000444
sewardj6072c362002-04-19 14:40:57 +0000445/* vthread tid is returning from a signal handler; modify its
446 stack/regs accordingly. */
447static
448void handle_signal_return ( ThreadId tid )
449{
sewardj6072c362002-04-19 14:40:57 +0000450 Bool restart_blocked_syscalls;
sewardj645030e2002-06-06 01:27:39 +0000451 struct vki_timespec * rem;
sewardj6072c362002-04-19 14:40:57 +0000452
sewardjb48e5002002-05-13 00:16:03 +0000453 vg_assert(VG_(is_valid_tid)(tid));
sewardj6072c362002-04-19 14:40:57 +0000454
455 restart_blocked_syscalls = VG_(signal_returns)(tid);
456
thughesa3afffc2004-08-25 18:58:04 +0000457 /* If we were interrupted in the middle of a rendezvous
458 then check the rendezvous hasn't completed while we
459 were busy handling the signal. */
460 if (VG_(threads)[tid].status == VgTs_WaitJoiner ||
461 VG_(threads)[tid].status == VgTs_WaitJoinee ) {
462 maybe_rendezvous_joiners_and_joinees();
463 }
464
sewardj6072c362002-04-19 14:40:57 +0000465 if (restart_blocked_syscalls)
466 /* Easy; we don't have to do anything. */
467 return;
468
sewardj645030e2002-06-06 01:27:39 +0000469 if (VG_(threads)[tid].status == VgTs_Sleeping
nethercotec06e2132004-09-03 13:45:29 +0000470 && VG_(threads)[tid].arch.m_eax == __NR_nanosleep) {
sewardj6072c362002-04-19 14:40:57 +0000471 /* We interrupted a nanosleep(). The right thing to do is to
sewardj645030e2002-06-06 01:27:39 +0000472 write the unused time to nanosleep's second param, but that's
473 too much effort ... we just say that 1 nanosecond was not
474 used, and return EINTR. */
nethercotec06e2132004-09-03 13:45:29 +0000475 rem = (struct vki_timespec *)VG_(threads)[tid].arch.m_ecx; /* arg2 */
sewardj645030e2002-06-06 01:27:39 +0000476 if (rem != NULL) {
477 rem->tv_sec = 0;
478 rem->tv_nsec = 1;
479 }
njnd3040452003-05-19 15:04:06 +0000480 SET_SYSCALL_RETVAL(tid, -VKI_EINTR);
sewardj645030e2002-06-06 01:27:39 +0000481 VG_(threads)[tid].status = VgTs_Runnable;
sewardj6072c362002-04-19 14:40:57 +0000482 return;
483 }
484
485 /* All other cases? Just return. */
486}
487
488
nethercotef971ab72004-08-02 16:27:40 +0000489struct timeout {
490 UInt time; /* time we should awaken */
491 ThreadId tid; /* thread which cares about this timeout */
492 struct timeout *next;
493};
494
495static struct timeout *timeouts;
496
497static void add_timeout(ThreadId tid, UInt time)
498{
499 struct timeout *t = VG_(arena_malloc)(VG_AR_CORE, sizeof(*t));
500 struct timeout **prev, *tp;
501
502 t->time = time;
503 t->tid = tid;
504
505 if (VG_(clo_trace_sched)) {
506 Char msg_buf[100];
507 VG_(sprintf)(msg_buf, "add_timeout: now=%u adding timeout at %u",
508 VG_(read_millisecond_timer)(), time);
509 print_sched_event(tid, msg_buf);
510 }
511
512 for(tp = timeouts, prev = &timeouts;
513 tp != NULL && tp->time < time;
514 prev = &tp->next, tp = tp->next)
515 ;
516 t->next = tp;
517 *prev = t;
518}
519
sewardje663cb92002-04-12 10:26:32 +0000520static
521void sched_do_syscall ( ThreadId tid )
522{
jsgf855d93d2003-10-13 22:26:55 +0000523 Int syscall_no;
njn25e49d8e72002-09-23 09:36:25 +0000524 Char msg_buf[100];
sewardje663cb92002-04-12 10:26:32 +0000525
sewardjb48e5002002-05-13 00:16:03 +0000526 vg_assert(VG_(is_valid_tid)(tid));
sewardj018f7622002-05-15 21:13:39 +0000527 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +0000528
nethercotec06e2132004-09-03 13:45:29 +0000529 syscall_no = VG_(threads)[tid].arch.m_eax; /* syscall number */
sewardje663cb92002-04-12 10:26:32 +0000530
jsgf855d93d2003-10-13 22:26:55 +0000531 /* Special-case nanosleep because we can. But should we?
532
533 XXX not doing so for now, because it doesn't seem to work
534 properly, and we can use the syscall nanosleep just as easily.
535 */
536 if (0 && syscall_no == __NR_nanosleep) {
sewardj5f07b662002-04-23 16:52:51 +0000537 UInt t_now, t_awaken;
sewardje663cb92002-04-12 10:26:32 +0000538 struct vki_timespec* req;
nethercotec06e2132004-09-03 13:45:29 +0000539 req = (struct vki_timespec*)VG_(threads)[tid].arch.m_ebx; /* arg1 */
jsgf855d93d2003-10-13 22:26:55 +0000540
541 if (req->tv_sec < 0 || req->tv_nsec < 0 || req->tv_nsec >= 1000000000) {
542 SET_SYSCALL_RETVAL(tid, -VKI_EINVAL);
543 return;
544 }
545
sewardj5f07b662002-04-23 16:52:51 +0000546 t_now = VG_(read_millisecond_timer)();
sewardje663cb92002-04-12 10:26:32 +0000547 t_awaken
548 = t_now
sewardj5f07b662002-04-23 16:52:51 +0000549 + (UInt)1000ULL * (UInt)(req->tv_sec)
550 + (UInt)(req->tv_nsec) / 1000000;
sewardj018f7622002-05-15 21:13:39 +0000551 VG_(threads)[tid].status = VgTs_Sleeping;
552 VG_(threads)[tid].awaken_at = t_awaken;
sewardj8937c812002-04-12 20:12:20 +0000553 if (VG_(clo_trace_sched)) {
sewardj5f07b662002-04-23 16:52:51 +0000554 VG_(sprintf)(msg_buf, "at %d: nanosleep for %d",
sewardje663cb92002-04-12 10:26:32 +0000555 t_now, t_awaken-t_now);
556 print_sched_event(tid, msg_buf);
557 }
nethercotef971ab72004-08-02 16:27:40 +0000558 add_timeout(tid, t_awaken);
sewardje663cb92002-04-12 10:26:32 +0000559 /* Force the scheduler to run something else for a while. */
560 return;
561 }
562
jsgf855d93d2003-10-13 22:26:55 +0000563 /* If pre_syscall returns true, then we're done immediately */
564 if (VG_(pre_syscall)(tid)) {
fitzhardinge31ba9052004-01-16 02:15:23 +0000565 VG_(post_syscall(tid, True));
sewardj3947e622002-05-23 16:52:11 +0000566 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +0000567 } else {
jsgf855d93d2003-10-13 22:26:55 +0000568 vg_assert(VG_(threads)[tid].status == VgTs_WaitSys);
sewardje663cb92002-04-12 10:26:32 +0000569 }
570}
571
572
sewardje663cb92002-04-12 10:26:32 +0000573
jsgf855d93d2003-10-13 22:26:55 +0000574/* Sleep for a while, but be willing to be woken. */
575static
576void idle ( void )
577{
578 struct vki_pollfd pollfd[1];
579 Int delta = -1;
580 Int fd = VG_(proxy_resfd)();
581
582 pollfd[0].fd = fd;
583 pollfd[0].events = VKI_POLLIN;
584
585 /* Look though the nearest timeouts, looking for the next future
586 one (there may be stale past timeouts). They'll all be mopped
587 below up when the poll() finishes. */
588 if (timeouts != NULL) {
589 struct timeout *tp;
590 Bool wicked = False;
591 UInt now = VG_(read_millisecond_timer)();
592
593 for(tp = timeouts; tp != NULL && tp->time < now; tp = tp->next) {
594 /* If a thread is still sleeping in the past, make it runnable */
595 ThreadState *tst = VG_(get_ThreadState)(tp->tid);
596 if (tst->status == VgTs_Sleeping)
597 tst->status = VgTs_Runnable;
598 wicked = True; /* no sleep for the wicked */
sewardje663cb92002-04-12 10:26:32 +0000599 }
sewardje663cb92002-04-12 10:26:32 +0000600
jsgf855d93d2003-10-13 22:26:55 +0000601 if (tp != NULL) {
602 delta = tp->time - now;
603 vg_assert(delta >= 0);
sewardje663cb92002-04-12 10:26:32 +0000604 }
jsgf855d93d2003-10-13 22:26:55 +0000605 if (wicked)
606 delta = 0;
sewardje663cb92002-04-12 10:26:32 +0000607 }
608
jsgf855d93d2003-10-13 22:26:55 +0000609 /* gotta wake up for something! */
610 vg_assert(fd != -1 || delta != -1);
sewardje462e202002-04-13 04:09:07 +0000611
jsgf855d93d2003-10-13 22:26:55 +0000612 /* If we need to do signal routing, then poll for pending signals
613 every VG_(clo_signal_polltime) mS */
614 if (VG_(do_signal_routing) && (delta > VG_(clo_signal_polltime) || delta == -1))
615 delta = VG_(clo_signal_polltime);
sewardje663cb92002-04-12 10:26:32 +0000616
jsgf855d93d2003-10-13 22:26:55 +0000617 if (VG_(clo_trace_sched)) {
618 Char msg_buf[100];
619 VG_(sprintf)(msg_buf, "idle: waiting for %dms and fd %d",
620 delta, fd);
621 print_sched_event(0, msg_buf);
sewardje663cb92002-04-12 10:26:32 +0000622 }
sewardje663cb92002-04-12 10:26:32 +0000623
jsgf855d93d2003-10-13 22:26:55 +0000624 VG_(poll)(pollfd, fd != -1 ? 1 : 0, delta);
sewardje663cb92002-04-12 10:26:32 +0000625
jsgf855d93d2003-10-13 22:26:55 +0000626 /* See if there's anything on the timeout list which needs
627 waking, and mop up anything in the past. */
628 {
629 UInt now = VG_(read_millisecond_timer)();
630 struct timeout *tp;
sewardje663cb92002-04-12 10:26:32 +0000631
jsgf855d93d2003-10-13 22:26:55 +0000632 tp = timeouts;
sewardje663cb92002-04-12 10:26:32 +0000633
jsgf855d93d2003-10-13 22:26:55 +0000634 while(tp && tp->time <= now) {
635 struct timeout *dead;
636 ThreadState *tst;
637
638 tst = VG_(get_ThreadState)(tp->tid);
639
640 if (VG_(clo_trace_sched)) {
641 Char msg_buf[100];
642 VG_(sprintf)(msg_buf, "idle: now=%u removing timeout at %u",
643 now, tp->time);
644 print_sched_event(tp->tid, msg_buf);
645 }
sewardje663cb92002-04-12 10:26:32 +0000646
jsgf855d93d2003-10-13 22:26:55 +0000647 /* If awaken_at != tp->time then it means the timeout is
648 stale and we should just ignore it. */
649 if(tst->awaken_at == tp->time) {
650 switch(tst->status) {
651 case VgTs_Sleeping:
652 tst->awaken_at = 0xFFFFFFFF;
653 tst->status = VgTs_Runnable;
654 break;
sewardje663cb92002-04-12 10:26:32 +0000655
jsgf855d93d2003-10-13 22:26:55 +0000656 case VgTs_WaitCV:
657 do_pthread_cond_timedwait_TIMEOUT(tst->tid);
658 break;
sewardje663cb92002-04-12 10:26:32 +0000659
jsgf855d93d2003-10-13 22:26:55 +0000660 default:
661 /* This is a bit odd but OK; if a thread had a timeout
662 but woke for some other reason (signal, condvar
663 wakeup), then it will still be on the list. */
664 if (0)
665 VG_(printf)("idle(): unexpected status tp->tid=%d tst->status = %d\n",
666 tp->tid, tst->status);
667 break;
668 }
669 }
sewardjbc7d8782002-06-30 12:44:54 +0000670
jsgf855d93d2003-10-13 22:26:55 +0000671 dead = tp;
672 tp = tp->next;
njn25e49d8e72002-09-23 09:36:25 +0000673
jsgf855d93d2003-10-13 22:26:55 +0000674 VG_(arena_free)(VG_AR_CORE, dead);
sewardjbc7d8782002-06-30 12:44:54 +0000675 }
676
jsgf855d93d2003-10-13 22:26:55 +0000677 timeouts = tp;
sewardje663cb92002-04-12 10:26:32 +0000678 }
679}
680
681
sewardje663cb92002-04-12 10:26:32 +0000682/* ---------------------------------------------------------------------
683 The scheduler proper.
684 ------------------------------------------------------------------ */
685
nethercote238a3c32004-08-09 13:13:31 +0000686// For handling of the default action of a fatal signal.
687// jmp_buf for fatal signals; VG_(fatal_signal_jmpbuf_ptr) is NULL until
688// the time is right that it can be used.
689static jmp_buf fatal_signal_jmpbuf;
690static jmp_buf* fatal_signal_jmpbuf_ptr;
691static Int fatal_sigNo; // the fatal signal, if it happens
692
sewardje663cb92002-04-12 10:26:32 +0000693/* Run user-space threads until either
694 * Deadlock occurs
695 * One thread asks to shutdown Valgrind
696 * The specified number of basic blocks has gone by.
697*/
nethercote238a3c32004-08-09 13:13:31 +0000698VgSchedReturnCode do_scheduler ( Int* exitcode, ThreadId* last_run_tid )
sewardje663cb92002-04-12 10:26:32 +0000699{
700 ThreadId tid, tid_next;
701 UInt trc;
702 UInt dispatch_ctr_SAVED;
sewardj124ca2a2002-06-20 10:19:38 +0000703 Int done_this_time, n_in_bounded_wait;
jsgf855d93d2003-10-13 22:26:55 +0000704 Int n_exists, n_waiting_for_reaper;
sewardje663cb92002-04-12 10:26:32 +0000705 Addr trans_addr;
706
sewardje663cb92002-04-12 10:26:32 +0000707 /* Start with the root thread. tid in general indicates the
708 currently runnable/just-finished-running thread. */
nethercote759dda32004-08-07 18:16:56 +0000709 *last_run_tid = tid = 1;
sewardje663cb92002-04-12 10:26:32 +0000710
711 /* This is the top level scheduler loop. It falls into three
712 phases. */
713 while (True) {
714
sewardj6072c362002-04-19 14:40:57 +0000715 /* ======================= Phase 0 of 3 =======================
716 Be paranoid. Always a good idea. */
sewardjd7fd4d22002-04-24 01:57:27 +0000717 stage1:
sewardj6072c362002-04-19 14:40:57 +0000718 scheduler_sanity();
nethercote885dd912004-08-03 23:14:00 +0000719 VG_(sanity_check_general)( False );
sewardj6072c362002-04-19 14:40:57 +0000720
sewardje663cb92002-04-12 10:26:32 +0000721 /* ======================= Phase 1 of 3 =======================
722 Handle I/O completions and signals. This may change the
723 status of various threads. Then select a new thread to run,
724 or declare deadlock, or sleep if there are no runnable
725 threads but some are blocked on I/O. */
726
sewardje663cb92002-04-12 10:26:32 +0000727 /* Do the following loop until a runnable thread is found, or
728 deadlock is detected. */
729 while (True) {
730
731 /* For stats purposes only. */
nethercote844e7122004-08-02 15:27:22 +0000732 n_scheduling_events_MAJOR++;
sewardje663cb92002-04-12 10:26:32 +0000733
jsgf855d93d2003-10-13 22:26:55 +0000734 /* Route signals to their proper places */
735 VG_(route_signals)();
sewardje663cb92002-04-12 10:26:32 +0000736
jsgf855d93d2003-10-13 22:26:55 +0000737 /* See if any of the proxy LWPs report any activity: either a
738 syscall completing or a signal arriving. */
739 VG_(proxy_results)();
sewardje663cb92002-04-12 10:26:32 +0000740
741 /* Try and find a thread (tid) to run. */
742 tid_next = tid;
jsgf855d93d2003-10-13 22:26:55 +0000743 if (prefer_sched != VG_INVALID_THREADID) {
744 tid_next = prefer_sched-1;
745 prefer_sched = VG_INVALID_THREADID;
746 }
sewardj51c0aaf2002-04-25 01:32:10 +0000747 n_in_bounded_wait = 0;
jsgf855d93d2003-10-13 22:26:55 +0000748 n_exists = 0;
749 n_waiting_for_reaper = 0;
sewardje663cb92002-04-12 10:26:32 +0000750 while (True) {
751 tid_next++;
sewardj6072c362002-04-19 14:40:57 +0000752 if (tid_next >= VG_N_THREADS) tid_next = 1;
jsgf855d93d2003-10-13 22:26:55 +0000753 if (VG_(threads)[tid_next].status == VgTs_Sleeping
754 || VG_(threads)[tid_next].status == VgTs_WaitSys
sewardj018f7622002-05-15 21:13:39 +0000755 || (VG_(threads)[tid_next].status == VgTs_WaitCV
756 && VG_(threads)[tid_next].awaken_at != 0xFFFFFFFF))
sewardj51c0aaf2002-04-25 01:32:10 +0000757 n_in_bounded_wait ++;
jsgf855d93d2003-10-13 22:26:55 +0000758 if (VG_(threads)[tid_next].status != VgTs_Empty)
759 n_exists++;
760 if (VG_(threads)[tid_next].status == VgTs_WaitJoiner)
761 n_waiting_for_reaper++;
sewardj018f7622002-05-15 21:13:39 +0000762 if (VG_(threads)[tid_next].status == VgTs_Runnable)
sewardje663cb92002-04-12 10:26:32 +0000763 break; /* We can run this one. */
764 if (tid_next == tid)
765 break; /* been all the way round */
766 }
767 tid = tid_next;
768
sewardj018f7622002-05-15 21:13:39 +0000769 if (VG_(threads)[tid].status == VgTs_Runnable) {
sewardje663cb92002-04-12 10:26:32 +0000770 /* Found a suitable candidate. Fall out of this loop, so
771 we can advance to stage 2 of the scheduler: actually
772 running the thread. */
773 break;
774 }
775
jsgf855d93d2003-10-13 22:26:55 +0000776 /* All threads have exited - pretend someone called exit() */
777 if (n_waiting_for_reaper == n_exists) {
nethercote47dd12c2004-06-22 14:18:42 +0000778 *exitcode = 0; /* ? */
jsgf855d93d2003-10-13 22:26:55 +0000779 return VgSrc_ExitSyscall;
780 }
781
sewardje663cb92002-04-12 10:26:32 +0000782 /* We didn't find a runnable thread. Now what? */
sewardj51c0aaf2002-04-25 01:32:10 +0000783 if (n_in_bounded_wait == 0) {
sewardj54cacf02002-04-12 23:24:59 +0000784 /* No runnable threads and no prospect of any appearing
785 even if we wait for an arbitrary length of time. In
786 short, we have a deadlock. */
sewardj15a43e12002-04-17 19:35:12 +0000787 VG_(pp_sched_status)();
sewardje663cb92002-04-12 10:26:32 +0000788 return VgSrc_Deadlock;
789 }
790
jsgf855d93d2003-10-13 22:26:55 +0000791 /* Nothing needs doing, so sit in idle until either a timeout
792 happens or a thread's syscall completes. */
793 idle();
sewardj7e87e382002-05-03 19:09:05 +0000794 /* pp_sched_status(); */
sewardjb48e5002002-05-13 00:16:03 +0000795 /* VG_(printf)("."); */
sewardje663cb92002-04-12 10:26:32 +0000796 }
797
798
799 /* ======================= Phase 2 of 3 =======================
800 Wahey! We've finally decided that thread tid is runnable, so
801 we now do that. Run it for as much of a quanta as possible.
802 Trivial requests are handled and the thread continues. The
803 aim is not to do too many of Phase 1 since it is expensive. */
804
805 if (0)
sewardj3b5d8862002-04-20 13:53:23 +0000806 VG_(printf)("SCHED: tid %d\n", tid);
sewardje663cb92002-04-12 10:26:32 +0000807
njn25e49d8e72002-09-23 09:36:25 +0000808 VG_TRACK( thread_run, tid );
809
sewardje663cb92002-04-12 10:26:32 +0000810 /* Figure out how many bbs to ask vg_run_innerloop to do. Note
811 that it decrements the counter before testing it for zero, so
812 that if VG_(dispatch_ctr) is set to N you get at most N-1
813 iterations. Also this means that VG_(dispatch_ctr) must
814 exceed zero before entering the innerloop. Also also, the
815 decrement is done before the bb is actually run, so you
816 always get at least one decrement even if nothing happens.
817 */
nethercote1d447092004-02-01 17:29:59 +0000818 VG_(dispatch_ctr) = VG_SCHEDULING_QUANTUM + 1;
sewardje663cb92002-04-12 10:26:32 +0000819
820 /* ... and remember what we asked for. */
821 dispatch_ctr_SAVED = VG_(dispatch_ctr);
822
sewardj1e8cdc92002-04-18 11:37:52 +0000823 /* paranoia ... */
sewardj018f7622002-05-15 21:13:39 +0000824 vg_assert(VG_(threads)[tid].tid == tid);
sewardj1e8cdc92002-04-18 11:37:52 +0000825
sewardje663cb92002-04-12 10:26:32 +0000826 /* Actually run thread tid. */
827 while (True) {
828
nethercote759dda32004-08-07 18:16:56 +0000829 *last_run_tid = tid;
sewardj7e87e382002-05-03 19:09:05 +0000830
sewardje663cb92002-04-12 10:26:32 +0000831 /* For stats purposes only. */
nethercote844e7122004-08-02 15:27:22 +0000832 n_scheduling_events_MINOR++;
sewardje663cb92002-04-12 10:26:32 +0000833
834 if (0)
835 VG_(message)(Vg_DebugMsg, "thread %d: running for %d bbs",
836 tid, VG_(dispatch_ctr) - 1 );
sewardjb3eef6b2002-05-01 00:05:27 +0000837# if 0
838 if (VG_(bbs_done) > 31700000 + 0) {
839 dispatch_ctr_SAVED = VG_(dispatch_ctr) = 2;
nethercoteb8ef9d82004-09-05 22:02:33 +0000840 VG_(translate)(&VG_(threads)[tid],
841 ARCH_INSTR_PTR(VG_(threads)[tid].arch),
nethercote59a122d2004-08-03 17:16:51 +0000842 /*debugging*/True);
sewardjb3eef6b2002-05-01 00:05:27 +0000843 }
nethercoteb8ef9d82004-09-05 22:02:33 +0000844 vg_assert(ARCH_INSTR_PTR(VG_(threads)[tid].arch) != 0);
sewardjb3eef6b2002-05-01 00:05:27 +0000845# endif
sewardje663cb92002-04-12 10:26:32 +0000846
847 trc = run_thread_for_a_while ( tid );
848
sewardjb3eef6b2002-05-01 00:05:27 +0000849# if 0
nethercoteb8ef9d82004-09-05 22:02:33 +0000850 if (0 == ARCH_INSTR_PTR(VG_(threads)[tid].arch)) {
sewardjb3eef6b2002-05-01 00:05:27 +0000851 VG_(printf)("tid = %d, dc = %llu\n", tid, VG_(bbs_done));
nethercoteb8ef9d82004-09-05 22:02:33 +0000852 vg_assert(0 != ARCH_INSTR_PTR(VG_(threads)[tid].arch));
sewardjb3eef6b2002-05-01 00:05:27 +0000853 }
854# endif
855
sewardje663cb92002-04-12 10:26:32 +0000856 /* Deal quickly with trivial scheduling events, and resume the
857 thread. */
858
859 if (trc == VG_TRC_INNER_FASTMISS) {
860 vg_assert(VG_(dispatch_ctr) > 0);
861
862 /* Trivial event. Miss in the fast-cache. Do a full
863 lookup for it. */
nethercoteb8ef9d82004-09-05 22:02:33 +0000864 trans_addr = VG_(search_transtab)
865 ( ARCH_INSTR_PTR(VG_(threads)[tid].arch) );
sewardje663cb92002-04-12 10:26:32 +0000866 if (trans_addr == (Addr)0) {
867 /* Not found; we need to request a translation. */
nethercoteb8ef9d82004-09-05 22:02:33 +0000868 VG_(translate)( tid, ARCH_INSTR_PTR(VG_(threads)[tid].arch),
869 /*debug*/False );
870 trans_addr = VG_(search_transtab)
871 ( ARCH_INSTR_PTR(VG_(threads)[tid].arch) );
sewardje663cb92002-04-12 10:26:32 +0000872 if (trans_addr == (Addr)0)
njne427a662002-10-02 11:08:25 +0000873 VG_(core_panic)("VG_TRC_INNER_FASTMISS: missing tt_fast entry");
sewardje663cb92002-04-12 10:26:32 +0000874 }
875 continue; /* with this thread */
876 }
877
878 if (trc == VG_TRC_EBP_JMP_CLIENTREQ) {
nethercotec06e2132004-09-03 13:45:29 +0000879 UInt reqno = *(UInt*)(VG_(threads)[tid].arch.m_eax);
sewardj18a62ff2002-07-12 22:30:51 +0000880 /* VG_(printf)("request 0x%x\n", reqno); */
sewardj1fe7b002002-07-16 01:43:15 +0000881
882 /* Are we really absolutely totally quitting? */
883 if (reqno == VG_USERREQ__LIBC_FREERES_DONE) {
884 if (0 || VG_(clo_trace_syscalls) || VG_(clo_trace_sched)) {
885 VG_(message)(Vg_DebugMsg,
886 "__libc_freeres() done; really quitting!");
887 }
888 return VgSrc_ExitSyscall;
889 }
890
sewardj124ca2a2002-06-20 10:19:38 +0000891 do_client_request(tid);
892 /* Following the request, we try and continue with the
893 same thread if still runnable. If not, go back to
894 Stage 1 to select a new thread to run. */
sewardj18a62ff2002-07-12 22:30:51 +0000895 if (VG_(threads)[tid].status == VgTs_Runnable
896 && reqno != VG_USERREQ__PTHREAD_YIELD)
sewardj124ca2a2002-06-20 10:19:38 +0000897 continue; /* with this thread */
898 else
899 goto stage1;
sewardje663cb92002-04-12 10:26:32 +0000900 }
901
sewardj51c0aaf2002-04-25 01:32:10 +0000902 if (trc == VG_TRC_EBP_JMP_SYSCALL) {
903 /* Do a syscall for the vthread tid. This could cause it
sewardj7e87e382002-05-03 19:09:05 +0000904 to become non-runnable. One special case: spot the
905 client doing calls to exit() and take this as the cue
906 to exit. */
sewardjb3eef6b2002-05-01 00:05:27 +0000907# if 0
908 { UInt* esp; Int i;
nethercoteb8ef9d82004-09-05 22:02:33 +0000909 esp=(UInt*)ARCH_STACK_PTR(VG_(threads)[tid].arch);
sewardjb3eef6b2002-05-01 00:05:27 +0000910 VG_(printf)("\nBEFORE\n");
911 for (i = 10; i >= -10; i--)
912 VG_(printf)("%2d %p = 0x%x\n", i, &esp[i], esp[i]);
913 }
914# endif
915
sewardj1fe7b002002-07-16 01:43:15 +0000916 /* Deal with calling __libc_freeres() at exit. When the
917 client does __NR_exit, it's exiting for good. So we
nethercotef971ab72004-08-02 16:27:40 +0000918 then run __libc_freeres_wrapper. That quits by
sewardj1fe7b002002-07-16 01:43:15 +0000919 doing VG_USERREQ__LIBC_FREERES_DONE, and at that point
920 we really exit. To be safe we nuke all other threads
sewardjade9d0d2002-07-26 10:52:48 +0000921 currently running.
922
923 If not valgrinding (cachegrinding, etc) don't do this.
924 __libc_freeres does some invalid frees which crash
925 the unprotected malloc/free system. */
njn25e49d8e72002-09-23 09:36:25 +0000926
nethercotec06e2132004-09-03 13:45:29 +0000927 if (VG_(threads)[tid].arch.m_eax == __NR_exit
928 || VG_(threads)[tid].arch.m_eax == __NR_exit_group
sewardjf3fb92d2003-02-23 03:26:08 +0000929 ) {
sewardj858964b2002-10-05 14:15:43 +0000930
931 /* If __NR_exit, remember the supplied argument. */
nethercotec06e2132004-09-03 13:45:29 +0000932 *exitcode = VG_(threads)[tid].arch.m_ebx; /* syscall arg1 */
njn25e49d8e72002-09-23 09:36:25 +0000933
nethercote7cc9c232004-01-21 15:08:04 +0000934 /* Only run __libc_freeres if the tool says it's ok and
sewardj858964b2002-10-05 14:15:43 +0000935 it hasn't been overridden with --run-libc-freeres=no
936 on the command line. */
937
fitzhardinge98abfc72003-12-16 02:05:15 +0000938 if (VG_(needs).libc_freeres &&
939 VG_(clo_run_libc_freeres) &&
nethercotef971ab72004-08-02 16:27:40 +0000940 __libc_freeres_wrapper != 0) {
sewardj00631892002-10-05 15:34:38 +0000941 if (VG_(clo_verbosity) > 2
sewardj858964b2002-10-05 14:15:43 +0000942 || VG_(clo_trace_syscalls) || VG_(clo_trace_sched)) {
943 VG_(message)(Vg_DebugMsg,
944 "Caught __NR_exit; running __libc_freeres()");
945 }
946 VG_(nuke_all_threads_except) ( tid );
nethercoteb8ef9d82004-09-05 22:02:33 +0000947 ARCH_INSTR_PTR(VG_(threads)[tid].arch) =
948 (UInt)__libc_freeres_wrapper;
sewardj858964b2002-10-05 14:15:43 +0000949 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
950 goto stage1; /* party on, dudes (but not for much longer :) */
951
952 } else {
953 /* We won't run __libc_freeres; just exit now. */
sewardj00631892002-10-05 15:34:38 +0000954 if (VG_(clo_verbosity) > 2
sewardj858964b2002-10-05 14:15:43 +0000955 || VG_(clo_trace_syscalls) || VG_(clo_trace_sched)) {
956 VG_(message)(Vg_DebugMsg,
957 "Caught __NR_exit; quitting");
958 }
959 return VgSrc_ExitSyscall;
960 }
961
sewardjade9d0d2002-07-26 10:52:48 +0000962 }
963
sewardj858964b2002-10-05 14:15:43 +0000964 /* We've dealt with __NR_exit at this point. */
nethercotec06e2132004-09-03 13:45:29 +0000965 vg_assert(VG_(threads)[tid].arch.m_eax != __NR_exit &&
966 VG_(threads)[tid].arch.m_eax != __NR_exit_group);
sewardj7e87e382002-05-03 19:09:05 +0000967
sewardj83798bf2002-05-24 00:11:16 +0000968 /* Trap syscalls to __NR_sched_yield and just have this
969 thread yield instead. Not essential, just an
970 optimisation. */
nethercotec06e2132004-09-03 13:45:29 +0000971 if (VG_(threads)[tid].arch.m_eax == __NR_sched_yield) {
njnd3040452003-05-19 15:04:06 +0000972 SET_SYSCALL_RETVAL(tid, 0); /* syscall returns with success */
sewardj83798bf2002-05-24 00:11:16 +0000973 goto stage1; /* find a new thread to run */
974 }
975
sewardj51c0aaf2002-04-25 01:32:10 +0000976 sched_do_syscall(tid);
sewardjb3eef6b2002-05-01 00:05:27 +0000977
978# if 0
979 { UInt* esp; Int i;
nethercoteb8ef9d82004-09-05 22:02:33 +0000980 esp=(UInt*)ARCH_STACK_PTR(VG_(threads)[tid].arch);
sewardjb3eef6b2002-05-01 00:05:27 +0000981 VG_(printf)("AFTER\n");
982 for (i = 10; i >= -10; i--)
983 VG_(printf)("%2d %p = 0x%x\n", i, &esp[i], esp[i]);
984 }
985# endif
986
sewardj77f0fc12002-07-12 01:23:03 +0000987 if (VG_(threads)[tid].status == VgTs_Runnable) {
sewardj51c0aaf2002-04-25 01:32:10 +0000988 continue; /* with this thread */
sewardj77f0fc12002-07-12 01:23:03 +0000989 } else {
990 goto stage1;
991 }
sewardj51c0aaf2002-04-25 01:32:10 +0000992 }
993
sewardjd7fd4d22002-04-24 01:57:27 +0000994 /* It's an event we can't quickly deal with. Give up running
995 this thread and handle things the expensive way. */
sewardje663cb92002-04-12 10:26:32 +0000996 break;
997 }
998
999 /* ======================= Phase 3 of 3 =======================
1000 Handle non-trivial thread requests, mostly pthread stuff. */
1001
1002 /* Ok, we've fallen out of the dispatcher for a
1003 non-completely-trivial reason. First, update basic-block
1004 counters. */
1005
1006 done_this_time = (Int)dispatch_ctr_SAVED - (Int)VG_(dispatch_ctr) - 1;
1007 vg_assert(done_this_time >= 0);
sewardje663cb92002-04-12 10:26:32 +00001008 VG_(bbs_done) += (ULong)done_this_time;
1009
1010 if (0 && trc != VG_TRC_INNER_FASTMISS)
1011 VG_(message)(Vg_DebugMsg, "thread %d: completed %d bbs, trc %d",
1012 tid, done_this_time, (Int)trc );
1013
1014 if (0 && trc != VG_TRC_INNER_FASTMISS)
njne0205ff2003-04-08 00:56:14 +00001015 VG_(message)(Vg_DebugMsg, "thread %d: %llu bbs, event %s",
sewardje663cb92002-04-12 10:26:32 +00001016 tid, VG_(bbs_done),
1017 name_of_sched_event(trc) );
sewardj9d1b5d32002-04-17 19:40:49 +00001018
sewardje663cb92002-04-12 10:26:32 +00001019 /* Examine the thread's return code to figure out why it
sewardj124ca2a2002-06-20 10:19:38 +00001020 stopped. */
sewardje663cb92002-04-12 10:26:32 +00001021
1022 switch (trc) {
1023
fitzhardingea02f8812003-12-18 09:06:09 +00001024 case VG_TRC_EBP_JMP_YIELD:
1025 /* Explicit yield. Let a new thread be scheduled,
1026 simply by doing nothing, causing us to arrive back at
1027 Phase 1. */
fitzhardingea02f8812003-12-18 09:06:09 +00001028 break;
1029
sewardje663cb92002-04-12 10:26:32 +00001030 case VG_TRC_INNER_COUNTERZERO:
1031 /* Timeslice is out. Let a new thread be scheduled,
1032 simply by doing nothing, causing us to arrive back at
1033 Phase 1. */
sewardje663cb92002-04-12 10:26:32 +00001034 vg_assert(VG_(dispatch_ctr) == 0);
1035 break;
1036
1037 case VG_TRC_UNRESUMABLE_SIGNAL:
jsgf855d93d2003-10-13 22:26:55 +00001038 /* It got a SIGSEGV/SIGBUS/SIGILL/SIGFPE, which we need to
1039 deliver right away. */
nethercotef971ab72004-08-02 16:27:40 +00001040 vg_assert(unresumable_siginfo.si_signo == VKI_SIGSEGV ||
1041 unresumable_siginfo.si_signo == VKI_SIGBUS ||
1042 unresumable_siginfo.si_signo == VKI_SIGILL ||
1043 unresumable_siginfo.si_signo == VKI_SIGFPE);
1044 vg_assert(longjmpd_on_signal == unresumable_siginfo.si_signo);
jsgf855d93d2003-10-13 22:26:55 +00001045
1046 /* make sure we've unblocked the signals which the handler blocked */
nethercote75d26242004-08-01 22:59:18 +00001047 VG_(unblock_host_signal)(longjmpd_on_signal);
jsgf855d93d2003-10-13 22:26:55 +00001048
nethercotef971ab72004-08-02 16:27:40 +00001049 VG_(deliver_signal)(tid, &unresumable_siginfo, False);
1050 unresumable_siginfo.si_signo = 0; /* done */
sewardje663cb92002-04-12 10:26:32 +00001051 break;
1052
sewardje663cb92002-04-12 10:26:32 +00001053 default:
1054 VG_(printf)("\ntrc = %d\n", trc);
njne427a662002-10-02 11:08:25 +00001055 VG_(core_panic)("VG_(scheduler), phase 3: "
1056 "unexpected thread return code");
sewardje663cb92002-04-12 10:26:32 +00001057 /* NOTREACHED */
1058 break;
1059
1060 } /* switch (trc) */
1061
1062 /* That completes Phase 3 of 3. Return now to the top of the
1063 main scheduler loop, to Phase 1 of 3. */
1064
1065 } /* top-level scheduler loop */
1066
1067
1068 /* NOTREACHED */
njne427a662002-10-02 11:08:25 +00001069 VG_(core_panic)("scheduler: post-main-loop ?!");
sewardje663cb92002-04-12 10:26:32 +00001070 /* NOTREACHED */
sewardje663cb92002-04-12 10:26:32 +00001071}
1072
nethercote238a3c32004-08-09 13:13:31 +00001073VgSchedReturnCode VG_(scheduler) ( Int* exitcode, ThreadId* last_run_tid,
1074 Int* fatal_sigNo_ptr )
1075{
1076 VgSchedReturnCode src;
1077
1078 fatal_signal_jmpbuf_ptr = &fatal_signal_jmpbuf;
1079 if (__builtin_setjmp( fatal_signal_jmpbuf_ptr ) == 0) {
1080 src = do_scheduler( exitcode, last_run_tid );
1081 } else {
1082 src = VgSrc_FatalSig;
1083 *fatal_sigNo_ptr = fatal_sigNo;
1084 }
1085 return src;
1086}
1087
jsgf855d93d2003-10-13 22:26:55 +00001088void VG_(need_resched) ( ThreadId prefer )
1089{
1090 /* Tell the scheduler now might be a good time to find a new
1091 runnable thread, because something happened which woke a thread
1092 up.
1093
1094 NB: This can be called unsynchronized from either a signal
1095 handler, or from another LWP (ie, real kernel thread).
1096
1097 In principle this could simply be a matter of setting
1098 VG_(dispatch_ctr) to a small value (say, 2), which would make
1099 any running code come back to the scheduler fairly quickly.
1100
1101 However, since the scheduler implements a strict round-robin
1102 policy with only one priority level, there are, by definition,
1103 no better threads to be running than the current thread anyway,
1104 so we may as well ignore this hint. For processes with a
1105 mixture of compute and I/O bound threads, this means the compute
1106 threads could introduce longish latencies before the I/O threads
1107 run. For programs with only I/O bound threads, need_resched
1108 won't have any effect anyway.
1109
1110 OK, so I've added command-line switches to enable low-latency
1111 syscalls and signals. The prefer_sched variable is in effect
1112 the ID of a single thread which has higher priority than all the
1113 others. If set, the scheduler will prefer to schedule that
1114 thread over all others. Naturally, this could lead to
1115 starvation or other unfairness.
1116 */
1117
1118 if (VG_(dispatch_ctr) > 10)
1119 VG_(dispatch_ctr) = 2;
1120 prefer_sched = prefer;
1121}
1122
nethercote238a3c32004-08-09 13:13:31 +00001123void VG_(scheduler_handle_fatal_signal) ( Int sigNo )
1124{
1125 if (NULL != fatal_signal_jmpbuf_ptr) {
1126 fatal_sigNo = sigNo;
1127 __builtin_longjmp(*fatal_signal_jmpbuf_ptr, 1);
1128 }
1129}
sewardje663cb92002-04-12 10:26:32 +00001130
1131/* ---------------------------------------------------------------------
1132 The pthread implementation.
1133 ------------------------------------------------------------------ */
1134
1135#include <pthread.h>
1136#include <errno.h>
1137
sewardje663cb92002-04-12 10:26:32 +00001138/* /usr/include/bits/pthreadtypes.h:
1139 typedef unsigned long int pthread_t;
1140*/
1141
sewardje663cb92002-04-12 10:26:32 +00001142
sewardj604ec3c2002-04-18 22:38:41 +00001143/* -----------------------------------------------------------
sewardj20917d82002-05-28 01:36:45 +00001144 Thread CREATION, JOINAGE and CANCELLATION: HELPER FNS
sewardj604ec3c2002-04-18 22:38:41 +00001145 -------------------------------------------------------- */
1146
sewardj20917d82002-05-28 01:36:45 +00001147/* We've decided to action a cancellation on tid. Make it jump to
1148 thread_exit_wrapper() in vg_libpthread.c, passing PTHREAD_CANCELED
1149 as the arg. */
1150static
1151void make_thread_jump_to_cancelhdlr ( ThreadId tid )
1152{
1153 Char msg_buf[100];
1154 vg_assert(VG_(is_valid_tid)(tid));
sewardjdadc8d02002-12-08 23:24:18 +00001155
sewardj20917d82002-05-28 01:36:45 +00001156 /* Push PTHREAD_CANCELED on the stack and jump to the cancellation
1157 handler -- which is really thread_exit_wrapper() in
1158 vg_libpthread.c. */
1159 vg_assert(VG_(threads)[tid].cancel_pend != NULL);
sewardj4bdd9962002-12-26 11:51:50 +00001160
1161 /* Push a suitable arg, and mark it as readable. */
nethercotec06e2132004-09-03 13:45:29 +00001162 SET_PTHREQ_ESP(tid, VG_(threads)[tid].arch.m_esp - 4);
1163 * (UInt*)(VG_(threads)[tid].arch.m_esp) = (UInt)PTHREAD_CANCELED;
1164 VG_TRACK( post_mem_write, VG_(threads)[tid].arch.m_esp, sizeof(void*) );
sewardj4bdd9962002-12-26 11:51:50 +00001165
1166 /* Push a bogus return address. It will not return, but we still
1167 need to have it so that the arg is at the correct stack offset.
1168 Don't mark as readable; any attempt to read this is and internal
1169 valgrind bug since thread_exit_wrapper should not return. */
nethercotec06e2132004-09-03 13:45:29 +00001170 SET_PTHREQ_ESP(tid, VG_(threads)[tid].arch.m_esp - 4);
1171 * (UInt*)(VG_(threads)[tid].arch.m_esp) = 0xBEADDEEF;
sewardj4bdd9962002-12-26 11:51:50 +00001172
1173 /* .cancel_pend will hold &thread_exit_wrapper */
nethercoteb8ef9d82004-09-05 22:02:33 +00001174 ARCH_INSTR_PTR(VG_(threads)[tid].arch) = (UInt)VG_(threads)[tid].cancel_pend;
sewardjdadc8d02002-12-08 23:24:18 +00001175
jsgf855d93d2003-10-13 22:26:55 +00001176 VG_(proxy_abort_syscall)(tid);
sewardjdadc8d02002-12-08 23:24:18 +00001177
sewardj20917d82002-05-28 01:36:45 +00001178 /* Make sure we aren't cancelled again whilst handling this
1179 cancellation. */
1180 VG_(threads)[tid].cancel_st = False;
1181 if (VG_(clo_trace_sched)) {
1182 VG_(sprintf)(msg_buf,
1183 "jump to cancellation handler (hdlr = %p)",
1184 VG_(threads)[tid].cancel_pend);
1185 print_sched_event(tid, msg_buf);
1186 }
thughes513197c2004-06-13 12:07:53 +00001187
1188 if(VG_(threads)[tid].status == VgTs_WaitCV) {
1189 /* posix says we must reaquire mutex before handling cancelation */
1190 vg_pthread_mutex_t* mx;
1191 vg_pthread_cond_t* cond;
1192
1193 mx = VG_(threads)[tid].associated_mx;
1194 cond = VG_(threads)[tid].associated_cv;
1195 VG_TRACK( pre_mutex_lock, tid, mx );
1196
1197 if (mx->__vg_m_owner == VG_INVALID_THREADID) {
1198 /* Currently unheld; hand it out to thread tid. */
1199 vg_assert(mx->__vg_m_count == 0);
1200 VG_(threads)[tid].status = VgTs_Runnable;
1201 VG_(threads)[tid].associated_cv = NULL;
1202 VG_(threads)[tid].associated_mx = NULL;
thughes10236472004-06-13 14:35:43 +00001203 mx->__vg_m_owner = (/*_pthread_descr*/void*)tid;
thughes513197c2004-06-13 12:07:53 +00001204 mx->__vg_m_count = 1;
1205 /* .m_edx already holds pth_cond_wait success value (0) */
1206
1207 VG_TRACK( post_mutex_lock, tid, mx );
1208
1209 if (VG_(clo_trace_pthread_level) >= 1) {
1210 VG_(sprintf)(msg_buf, "%s cv %p: RESUME with mx %p",
1211 "pthread_cancel", cond, mx );
1212 print_pthread_event(tid, msg_buf);
1213 }
1214
1215 } else {
1216 /* Currently held. Make thread tid be blocked on it. */
1217 vg_assert(mx->__vg_m_count > 0);
1218 VG_(threads)[tid].status = VgTs_WaitMX;
1219 VG_(threads)[tid].associated_cv = NULL;
1220 VG_(threads)[tid].associated_mx = mx;
1221 SET_PTHREQ_RETVAL(tid, 0); /* pth_cond_wait success value */
1222
1223 if (VG_(clo_trace_pthread_level) >= 1) {
1224 VG_(sprintf)(msg_buf, "%s cv %p: BLOCK for mx %p",
1225 "pthread_cancel", cond, mx );
1226 print_pthread_event(tid, msg_buf);
1227 }
1228 }
1229 } else {
1230 VG_(threads)[tid].status = VgTs_Runnable;
1231 }
sewardj20917d82002-05-28 01:36:45 +00001232}
1233
1234
1235
sewardjb48e5002002-05-13 00:16:03 +00001236/* Release resources and generally clean up once a thread has finally
nethercotef971ab72004-08-02 16:27:40 +00001237 disappeared.
1238
1239 BORKAGE/ISSUES as of 29 May 02 (moved from top of file --njn 2004-Aug-02)
1240
1241 TODO sometime:
1242 - Mutex scrubbing - clearup_after_thread_exit: look for threads
1243 blocked on mutexes held by the exiting thread, and release them
1244 appropriately. (??)
1245*/
sewardjb48e5002002-05-13 00:16:03 +00001246static
jsgf855d93d2003-10-13 22:26:55 +00001247void cleanup_after_thread_exited ( ThreadId tid, Bool forcekill )
sewardjb48e5002002-05-13 00:16:03 +00001248{
nethercote36881a22004-08-04 14:03:16 +00001249 vg_assert(is_valid_or_empty_tid(tid));
sewardj018f7622002-05-15 21:13:39 +00001250 vg_assert(VG_(threads)[tid].status == VgTs_Empty);
njn25e49d8e72002-09-23 09:36:25 +00001251 /* Its stack is now off-limits */
1252 VG_TRACK( die_mem_stack, VG_(threads)[tid].stack_base,
1253 VG_(threads)[tid].stack_size );
1254
nethercotef9b59412004-09-10 15:33:32 +00001255 VGA_(cleanup_thread)( &VG_(threads)[tid].arch );
fitzhardinge47735af2004-01-21 01:27:27 +00001256
jsgf855d93d2003-10-13 22:26:55 +00001257 /* Not interested in the timeout anymore */
1258 VG_(threads)[tid].awaken_at = 0xFFFFFFFF;
1259
1260 /* Delete proxy LWP */
1261 VG_(proxy_delete)(tid, forcekill);
sewardjb48e5002002-05-13 00:16:03 +00001262}
1263
1264
sewardj20917d82002-05-28 01:36:45 +00001265/* Look for matching pairs of threads waiting for joiners and threads
1266 waiting for joinees. For each such pair copy the return value of
1267 the joinee into the joiner, let the joiner resume and discard the
1268 joinee. */
1269static
1270void maybe_rendezvous_joiners_and_joinees ( void )
1271{
1272 Char msg_buf[100];
1273 void** thread_return;
1274 ThreadId jnr, jee;
1275
1276 for (jnr = 1; jnr < VG_N_THREADS; jnr++) {
1277 if (VG_(threads)[jnr].status != VgTs_WaitJoinee)
1278 continue;
1279 jee = VG_(threads)[jnr].joiner_jee_tid;
1280 if (jee == VG_INVALID_THREADID)
1281 continue;
1282 vg_assert(VG_(is_valid_tid)(jee));
jsgf855d93d2003-10-13 22:26:55 +00001283 if (VG_(threads)[jee].status != VgTs_WaitJoiner) {
1284 /* if joinee has become detached, then make join fail with
1285 EINVAL */
1286 if (VG_(threads)[jee].detached) {
1287 VG_(threads)[jnr].status = VgTs_Runnable;
1288 VG_(threads)[jnr].joiner_jee_tid = VG_INVALID_THREADID;
1289 SET_PTHREQ_RETVAL(jnr, VKI_EINVAL);
1290 }
sewardj20917d82002-05-28 01:36:45 +00001291 continue;
jsgf855d93d2003-10-13 22:26:55 +00001292 }
sewardj20917d82002-05-28 01:36:45 +00001293 /* ok! jnr is waiting to join with jee, and jee is waiting to be
1294 joined by ... well, any thread. So let's do it! */
1295
1296 /* Copy return value to where joiner wants it. */
1297 thread_return = VG_(threads)[jnr].joiner_thread_return;
1298 if (thread_return != NULL) {
1299 /* CHECK thread_return writable */
njn72718642003-07-24 08:45:32 +00001300 VG_TRACK( pre_mem_write, Vg_CorePThread, jnr,
njn25e49d8e72002-09-23 09:36:25 +00001301 "pthread_join: thread_return",
1302 (Addr)thread_return, sizeof(void*));
sewardj5a3798b2002-06-04 23:24:22 +00001303
sewardj20917d82002-05-28 01:36:45 +00001304 *thread_return = VG_(threads)[jee].joinee_retval;
1305 /* Not really right, since it makes the thread's return value
1306 appear to be defined even if it isn't. */
njn25e49d8e72002-09-23 09:36:25 +00001307 VG_TRACK( post_mem_write, (Addr)thread_return, sizeof(void*) );
sewardj20917d82002-05-28 01:36:45 +00001308 }
1309
1310 /* Joinee is discarded */
1311 VG_(threads)[jee].status = VgTs_Empty; /* bye! */
jsgf855d93d2003-10-13 22:26:55 +00001312 cleanup_after_thread_exited ( jee, False );
sewardjc4a810d2002-11-13 22:25:51 +00001313 if (VG_(clo_trace_sched)) {
1314 VG_(sprintf)(msg_buf,
1315 "rendezvous with joinee %d. %d resumes, %d exits.",
1316 jee, jnr, jee );
sewardj20917d82002-05-28 01:36:45 +00001317 print_sched_event(jnr, msg_buf);
1318 }
sewardjc4a810d2002-11-13 22:25:51 +00001319
1320 VG_TRACK( post_thread_join, jnr, jee );
sewardj20917d82002-05-28 01:36:45 +00001321
1322 /* joiner returns with success */
1323 VG_(threads)[jnr].status = VgTs_Runnable;
njnd3040452003-05-19 15:04:06 +00001324 SET_PTHREQ_RETVAL(jnr, 0);
sewardj20917d82002-05-28 01:36:45 +00001325 }
1326}
1327
1328
sewardjccef2e62002-05-29 19:26:32 +00001329/* Nuke all threads other than tid. POSIX specifies that this should
1330 happen in __NR_exec, and after a __NR_fork() when I am the child,
jsgf855d93d2003-10-13 22:26:55 +00001331 as POSIX requires. Also used at process exit time with
1332 me==VG_INVALID_THREADID */
sewardjccef2e62002-05-29 19:26:32 +00001333void VG_(nuke_all_threads_except) ( ThreadId me )
1334{
1335 ThreadId tid;
1336 for (tid = 1; tid < VG_N_THREADS; tid++) {
1337 if (tid == me
jsgf855d93d2003-10-13 22:26:55 +00001338 || VG_(threads)[tid].status == VgTs_Empty)
sewardjccef2e62002-05-29 19:26:32 +00001339 continue;
sewardjef037c72002-05-30 00:40:03 +00001340 if (0)
1341 VG_(printf)(
1342 "VG_(nuke_all_threads_except): nuking tid %d\n", tid);
jsgf855d93d2003-10-13 22:26:55 +00001343 VG_(proxy_delete)(tid, True);
sewardjccef2e62002-05-29 19:26:32 +00001344 VG_(threads)[tid].status = VgTs_Empty;
jsgf855d93d2003-10-13 22:26:55 +00001345 cleanup_after_thread_exited( tid, True );
sewardjccef2e62002-05-29 19:26:32 +00001346 }
1347}
1348
1349
sewardj20917d82002-05-28 01:36:45 +00001350/* -----------------------------------------------------------
1351 Thread CREATION, JOINAGE and CANCELLATION: REQUESTS
1352 -------------------------------------------------------- */
1353
sewardje663cb92002-04-12 10:26:32 +00001354static
sewardj8ad94e12002-05-29 00:10:20 +00001355void do__cleanup_push ( ThreadId tid, CleanupEntry* cu )
1356{
1357 Int sp;
1358 Char msg_buf[100];
1359 vg_assert(VG_(is_valid_tid)(tid));
1360 sp = VG_(threads)[tid].custack_used;
1361 if (VG_(clo_trace_sched)) {
thughes11975ff2004-06-12 12:58:22 +00001362 switch (cu->type) {
1363 case VgCt_Function:
1364 VG_(sprintf)(msg_buf,
1365 "cleanup_push (fn %p, arg %p) -> slot %d",
1366 cu->data.function.fn, cu->data.function.arg, sp);
1367 break;
1368 case VgCt_Longjmp:
1369 VG_(sprintf)(msg_buf,
1370 "cleanup_push (ub %p) -> slot %d",
1371 cu->data.longjmp.ub, sp);
1372 break;
1373 default:
1374 VG_(sprintf)(msg_buf,
1375 "cleanup_push (unknown type) -> slot %d",
1376 sp);
1377 break;
1378 }
sewardj8ad94e12002-05-29 00:10:20 +00001379 print_sched_event(tid, msg_buf);
1380 }
1381 vg_assert(sp >= 0 && sp <= VG_N_CLEANUPSTACK);
1382 if (sp == VG_N_CLEANUPSTACK)
njne427a662002-10-02 11:08:25 +00001383 VG_(core_panic)("do__cleanup_push: VG_N_CLEANUPSTACK is too small."
sewardj8ad94e12002-05-29 00:10:20 +00001384 " Increase and recompile.");
1385 VG_(threads)[tid].custack[sp] = *cu;
1386 sp++;
1387 VG_(threads)[tid].custack_used = sp;
njnd3040452003-05-19 15:04:06 +00001388 SET_PTHREQ_RETVAL(tid, 0);
sewardj8ad94e12002-05-29 00:10:20 +00001389}
1390
1391
1392static
1393void do__cleanup_pop ( ThreadId tid, CleanupEntry* cu )
1394{
1395 Int sp;
1396 Char msg_buf[100];
1397 vg_assert(VG_(is_valid_tid)(tid));
1398 sp = VG_(threads)[tid].custack_used;
1399 if (VG_(clo_trace_sched)) {
njn36650922002-10-04 09:18:09 +00001400 VG_(sprintf)(msg_buf, "cleanup_pop from slot %d", sp-1);
sewardj8ad94e12002-05-29 00:10:20 +00001401 print_sched_event(tid, msg_buf);
1402 }
1403 vg_assert(sp >= 0 && sp <= VG_N_CLEANUPSTACK);
1404 if (sp == 0) {
njnd3040452003-05-19 15:04:06 +00001405 SET_PTHREQ_RETVAL(tid, -1);
sewardj8ad94e12002-05-29 00:10:20 +00001406 return;
1407 }
1408 sp--;
njn72718642003-07-24 08:45:32 +00001409 VG_TRACK( pre_mem_write, Vg_CorePThread, tid,
njn36650922002-10-04 09:18:09 +00001410 "cleanup pop", (Addr)cu, sizeof(CleanupEntry) );
sewardj8ad94e12002-05-29 00:10:20 +00001411 *cu = VG_(threads)[tid].custack[sp];
njn25e49d8e72002-09-23 09:36:25 +00001412 VG_TRACK( post_mem_write, (Addr)cu, sizeof(CleanupEntry) );
sewardj8ad94e12002-05-29 00:10:20 +00001413 VG_(threads)[tid].custack_used = sp;
njnd3040452003-05-19 15:04:06 +00001414 SET_PTHREQ_RETVAL(tid, 0);
sewardj8ad94e12002-05-29 00:10:20 +00001415}
1416
1417
1418static
sewardjff42d1d2002-05-22 13:17:31 +00001419void do_pthread_yield ( ThreadId tid )
1420{
1421 Char msg_buf[100];
1422 vg_assert(VG_(is_valid_tid)(tid));
sewardjff42d1d2002-05-22 13:17:31 +00001423 if (VG_(clo_trace_sched)) {
1424 VG_(sprintf)(msg_buf, "yield");
1425 print_sched_event(tid, msg_buf);
1426 }
njnd3040452003-05-19 15:04:06 +00001427 SET_PTHREQ_RETVAL(tid, 0);
sewardjff42d1d2002-05-22 13:17:31 +00001428}
1429
1430
1431static
sewardj20917d82002-05-28 01:36:45 +00001432void do__testcancel ( ThreadId tid )
sewardje663cb92002-04-12 10:26:32 +00001433{
sewardj7989d0c2002-05-28 11:00:01 +00001434 Char msg_buf[100];
sewardjb48e5002002-05-13 00:16:03 +00001435 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001436 if (VG_(clo_trace_sched)) {
1437 VG_(sprintf)(msg_buf, "testcancel");
1438 print_sched_event(tid, msg_buf);
1439 }
sewardj20917d82002-05-28 01:36:45 +00001440 if (/* is there a cancellation pending on this thread? */
1441 VG_(threads)[tid].cancel_pend != NULL
1442 && /* is this thread accepting cancellations? */
1443 VG_(threads)[tid].cancel_st) {
1444 /* Ok, let's do the cancellation. */
1445 make_thread_jump_to_cancelhdlr ( tid );
sewardje663cb92002-04-12 10:26:32 +00001446 } else {
sewardj20917d82002-05-28 01:36:45 +00001447 /* No, we keep going. */
njnd3040452003-05-19 15:04:06 +00001448 SET_PTHREQ_RETVAL(tid, 0);
sewardje663cb92002-04-12 10:26:32 +00001449 }
sewardje663cb92002-04-12 10:26:32 +00001450}
1451
1452
1453static
sewardj20917d82002-05-28 01:36:45 +00001454void do__set_cancelstate ( ThreadId tid, Int state )
1455{
1456 Bool old_st;
sewardj7989d0c2002-05-28 11:00:01 +00001457 Char msg_buf[100];
sewardj20917d82002-05-28 01:36:45 +00001458 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001459 if (VG_(clo_trace_sched)) {
1460 VG_(sprintf)(msg_buf, "set_cancelstate to %d (%s)", state,
1461 state==PTHREAD_CANCEL_ENABLE
1462 ? "ENABLE"
1463 : (state==PTHREAD_CANCEL_DISABLE ? "DISABLE" : "???"));
1464 print_sched_event(tid, msg_buf);
1465 }
sewardj20917d82002-05-28 01:36:45 +00001466 old_st = VG_(threads)[tid].cancel_st;
1467 if (state == PTHREAD_CANCEL_ENABLE) {
1468 VG_(threads)[tid].cancel_st = True;
1469 } else
1470 if (state == PTHREAD_CANCEL_DISABLE) {
1471 VG_(threads)[tid].cancel_st = False;
1472 } else {
njne427a662002-10-02 11:08:25 +00001473 VG_(core_panic)("do__set_cancelstate");
sewardj20917d82002-05-28 01:36:45 +00001474 }
njnd3040452003-05-19 15:04:06 +00001475 SET_PTHREQ_RETVAL(tid, old_st ? PTHREAD_CANCEL_ENABLE
1476 : PTHREAD_CANCEL_DISABLE);
sewardj20917d82002-05-28 01:36:45 +00001477}
1478
1479
1480static
1481void do__set_canceltype ( ThreadId tid, Int type )
1482{
1483 Bool old_ty;
sewardj7989d0c2002-05-28 11:00:01 +00001484 Char msg_buf[100];
sewardj20917d82002-05-28 01:36:45 +00001485 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001486 if (VG_(clo_trace_sched)) {
1487 VG_(sprintf)(msg_buf, "set_canceltype to %d (%s)", type,
1488 type==PTHREAD_CANCEL_ASYNCHRONOUS
1489 ? "ASYNCHRONOUS"
1490 : (type==PTHREAD_CANCEL_DEFERRED ? "DEFERRED" : "???"));
1491 print_sched_event(tid, msg_buf);
1492 }
sewardj20917d82002-05-28 01:36:45 +00001493 old_ty = VG_(threads)[tid].cancel_ty;
1494 if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
1495 VG_(threads)[tid].cancel_ty = False;
1496 } else
1497 if (type == PTHREAD_CANCEL_DEFERRED) {
sewardjaf00b6d2002-05-29 23:30:28 +00001498 VG_(threads)[tid].cancel_ty = True;
sewardj20917d82002-05-28 01:36:45 +00001499 } else {
njne427a662002-10-02 11:08:25 +00001500 VG_(core_panic)("do__set_canceltype");
sewardj20917d82002-05-28 01:36:45 +00001501 }
njnd3040452003-05-19 15:04:06 +00001502 SET_PTHREQ_RETVAL(tid, old_ty ? PTHREAD_CANCEL_DEFERRED
sewardj20917d82002-05-28 01:36:45 +00001503 : PTHREAD_CANCEL_ASYNCHRONOUS);
1504}
1505
1506
sewardj7989d0c2002-05-28 11:00:01 +00001507/* Set or get the detach state for thread det. */
sewardj20917d82002-05-28 01:36:45 +00001508static
sewardj7989d0c2002-05-28 11:00:01 +00001509void do__set_or_get_detach ( ThreadId tid,
1510 Int what, ThreadId det )
sewardj20917d82002-05-28 01:36:45 +00001511{
sewardj7989d0c2002-05-28 11:00:01 +00001512 Char msg_buf[100];
1513 /* VG_(printf)("do__set_or_get_detach tid %d what %d det %d\n",
1514 tid, what, det); */
sewardj20917d82002-05-28 01:36:45 +00001515 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001516 if (VG_(clo_trace_sched)) {
1517 VG_(sprintf)(msg_buf, "set_or_get_detach %d (%s) for tid %d", what,
1518 what==0 ? "not-detached" : (
1519 what==1 ? "detached" : (
1520 what==2 ? "fetch old value" : "???")),
1521 det );
1522 print_sched_event(tid, msg_buf);
1523 }
1524
1525 if (!VG_(is_valid_tid)(det)) {
njnd3040452003-05-19 15:04:06 +00001526 SET_PTHREQ_RETVAL(tid, -1);
sewardj7989d0c2002-05-28 11:00:01 +00001527 return;
1528 }
1529
sewardj20917d82002-05-28 01:36:45 +00001530 switch (what) {
1531 case 2: /* get */
njnd3040452003-05-19 15:04:06 +00001532 SET_PTHREQ_RETVAL(tid, VG_(threads)[det].detached ? 1 : 0);
sewardj20917d82002-05-28 01:36:45 +00001533 return;
jsgf855d93d2003-10-13 22:26:55 +00001534 case 1:
sewardj7989d0c2002-05-28 11:00:01 +00001535 VG_(threads)[det].detached = True;
njnd3040452003-05-19 15:04:06 +00001536 SET_PTHREQ_RETVAL(tid, 0);
jsgf855d93d2003-10-13 22:26:55 +00001537 /* wake anyone who was joining on us */
1538 maybe_rendezvous_joiners_and_joinees();
sewardj20917d82002-05-28 01:36:45 +00001539 return;
1540 case 0: /* set not detached */
sewardj7989d0c2002-05-28 11:00:01 +00001541 VG_(threads)[det].detached = False;
njnd3040452003-05-19 15:04:06 +00001542 SET_PTHREQ_RETVAL(tid, 0);
sewardj20917d82002-05-28 01:36:45 +00001543 return;
1544 default:
njne427a662002-10-02 11:08:25 +00001545 VG_(core_panic)("do__set_or_get_detach");
sewardj20917d82002-05-28 01:36:45 +00001546 }
1547}
1548
1549
1550static
1551void do__set_cancelpend ( ThreadId tid,
1552 ThreadId cee,
1553 void (*cancelpend_hdlr)(void*) )
sewardje663cb92002-04-12 10:26:32 +00001554{
1555 Char msg_buf[100];
1556
sewardj20917d82002-05-28 01:36:45 +00001557 vg_assert(VG_(is_valid_tid)(tid));
1558 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1559
thughes97e54d22004-08-15 14:34:02 +00001560 if (!VG_(is_valid_tid)(cee) ||
1561 VG_(threads)[cee].status == VgTs_WaitJoiner) {
sewardj7989d0c2002-05-28 11:00:01 +00001562 if (VG_(clo_trace_sched)) {
1563 VG_(sprintf)(msg_buf,
1564 "set_cancelpend for invalid tid %d", cee);
1565 print_sched_event(tid, msg_buf);
1566 }
njn25e49d8e72002-09-23 09:36:25 +00001567 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00001568 "pthread_cancel: target thread does not exist, or invalid");
jsgf855d93d2003-10-13 22:26:55 +00001569 SET_PTHREQ_RETVAL(tid, VKI_ESRCH);
sewardj7989d0c2002-05-28 11:00:01 +00001570 return;
1571 }
sewardj20917d82002-05-28 01:36:45 +00001572
1573 VG_(threads)[cee].cancel_pend = cancelpend_hdlr;
1574
jsgf855d93d2003-10-13 22:26:55 +00001575 /* interrupt a pending syscall */
1576 VG_(proxy_abort_syscall)(cee);
1577
sewardj20917d82002-05-28 01:36:45 +00001578 if (VG_(clo_trace_sched)) {
1579 VG_(sprintf)(msg_buf,
sewardj7989d0c2002-05-28 11:00:01 +00001580 "set_cancelpend (hdlr = %p, set by tid %d)",
sewardj20917d82002-05-28 01:36:45 +00001581 cancelpend_hdlr, tid);
1582 print_sched_event(cee, msg_buf);
1583 }
1584
1585 /* Thread doing the cancelling returns with success. */
njnd3040452003-05-19 15:04:06 +00001586 SET_PTHREQ_RETVAL(tid, 0);
sewardj20917d82002-05-28 01:36:45 +00001587
1588 /* Perhaps we can nuke the cancellee right now? */
thughes513197c2004-06-13 12:07:53 +00001589 if (!VG_(threads)[cee].cancel_ty || /* if PTHREAD_CANCEL_ASYNCHRONOUS */
1590 (VG_(threads)[cee].status != VgTs_Runnable &&
1591 VG_(threads)[cee].status != VgTs_WaitMX)) {
jsgf855d93d2003-10-13 22:26:55 +00001592 do__testcancel(cee);
thughes513197c2004-06-13 12:07:53 +00001593 }
sewardj20917d82002-05-28 01:36:45 +00001594}
1595
1596
1597static
1598void do_pthread_join ( ThreadId tid,
1599 ThreadId jee, void** thread_return )
1600{
1601 Char msg_buf[100];
1602 ThreadId i;
sewardje663cb92002-04-12 10:26:32 +00001603 /* jee, the joinee, is the thread specified as an arg in thread
1604 tid's call to pthread_join. So tid is the join-er. */
sewardjb48e5002002-05-13 00:16:03 +00001605 vg_assert(VG_(is_valid_tid)(tid));
sewardj018f7622002-05-15 21:13:39 +00001606 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +00001607
1608 if (jee == tid) {
njn25e49d8e72002-09-23 09:36:25 +00001609 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00001610 "pthread_join: attempt to join to self");
njnd3040452003-05-19 15:04:06 +00001611 SET_PTHREQ_RETVAL(tid, EDEADLK); /* libc constant, not a kernel one */
jsgf855d93d2003-10-13 22:26:55 +00001612 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +00001613 return;
1614 }
1615
sewardj20917d82002-05-28 01:36:45 +00001616 /* Flush any completed pairs, so as to make sure what we're looking
1617 at is up-to-date. */
1618 maybe_rendezvous_joiners_and_joinees();
1619
1620 /* Is this a sane request? */
jsgf855d93d2003-10-13 22:26:55 +00001621 if ( ! VG_(is_valid_tid)(jee) ||
1622 VG_(threads)[jee].detached) {
sewardje663cb92002-04-12 10:26:32 +00001623 /* Invalid thread to join to. */
njn25e49d8e72002-09-23 09:36:25 +00001624 VG_(record_pthread_error)( tid,
jsgf855d93d2003-10-13 22:26:55 +00001625 "pthread_join: target thread does not exist, invalid, or detached");
1626 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardje663cb92002-04-12 10:26:32 +00001627 return;
1628 }
1629
sewardj20917d82002-05-28 01:36:45 +00001630 /* Is anyone else already in a join-wait for jee? */
1631 for (i = 1; i < VG_N_THREADS; i++) {
1632 if (i == tid) continue;
1633 if (VG_(threads)[i].status == VgTs_WaitJoinee
1634 && VG_(threads)[i].joiner_jee_tid == jee) {
1635 /* Someone already did join on this thread */
njn25e49d8e72002-09-23 09:36:25 +00001636 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00001637 "pthread_join: another thread already "
1638 "in join-wait for target thread");
jsgf855d93d2003-10-13 22:26:55 +00001639 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
1640 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardj20917d82002-05-28 01:36:45 +00001641 return;
1642 }
sewardje663cb92002-04-12 10:26:32 +00001643 }
1644
thughes513197c2004-06-13 12:07:53 +00001645 if(VG_(threads)[tid].cancel_pend != NULL &&
1646 VG_(threads)[tid].cancel_st) {
1647 make_thread_jump_to_cancelhdlr ( tid );
1648 } else {
1649 /* Mark this thread as waiting for the joinee. */
1650 VG_(threads)[tid].status = VgTs_WaitJoinee;
1651 VG_(threads)[tid].joiner_thread_return = thread_return;
1652 VG_(threads)[tid].joiner_jee_tid = jee;
1653
1654 /* Look for matching joiners and joinees and do the right thing. */
1655 maybe_rendezvous_joiners_and_joinees();
1656
1657 /* Return value is irrelevant since this this thread becomes
1658 non-runnable. maybe_resume_joiner() will cause it to return the
1659 right value when it resumes. */
1660
1661 if (VG_(clo_trace_sched)) {
1662 VG_(sprintf)(msg_buf,
1663 "wait for joinee %d (may already be ready)", jee);
1664 print_sched_event(tid, msg_buf);
1665 }
sewardje663cb92002-04-12 10:26:32 +00001666 }
sewardje663cb92002-04-12 10:26:32 +00001667}
1668
1669
sewardj20917d82002-05-28 01:36:45 +00001670/* ( void* ): calling thread waits for joiner and returns the void* to
1671 it. This is one of two ways in which a thread can finally exit --
1672 the other is do__quit. */
sewardje663cb92002-04-12 10:26:32 +00001673static
sewardj20917d82002-05-28 01:36:45 +00001674void do__wait_joiner ( ThreadId tid, void* retval )
sewardje663cb92002-04-12 10:26:32 +00001675{
sewardj20917d82002-05-28 01:36:45 +00001676 Char msg_buf[100];
1677 vg_assert(VG_(is_valid_tid)(tid));
1678 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1679 if (VG_(clo_trace_sched)) {
1680 VG_(sprintf)(msg_buf,
sewardj7989d0c2002-05-28 11:00:01 +00001681 "do__wait_joiner(retval = %p) (non-detached thread exit)", retval);
sewardj20917d82002-05-28 01:36:45 +00001682 print_sched_event(tid, msg_buf);
1683 }
1684 VG_(threads)[tid].status = VgTs_WaitJoiner;
1685 VG_(threads)[tid].joinee_retval = retval;
1686 maybe_rendezvous_joiners_and_joinees();
1687}
1688
1689
1690/* ( no-args ): calling thread disappears from the system forever.
1691 Reclaim resources. */
1692static
1693void do__quit ( ThreadId tid )
1694{
1695 Char msg_buf[100];
1696 vg_assert(VG_(is_valid_tid)(tid));
1697 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1698 VG_(threads)[tid].status = VgTs_Empty; /* bye! */
jsgf855d93d2003-10-13 22:26:55 +00001699 cleanup_after_thread_exited ( tid, False );
sewardj20917d82002-05-28 01:36:45 +00001700 if (VG_(clo_trace_sched)) {
sewardj7989d0c2002-05-28 11:00:01 +00001701 VG_(sprintf)(msg_buf, "do__quit (detached thread exit)");
sewardj20917d82002-05-28 01:36:45 +00001702 print_sched_event(tid, msg_buf);
1703 }
jsgf855d93d2003-10-13 22:26:55 +00001704 maybe_rendezvous_joiners_and_joinees();
sewardj20917d82002-05-28 01:36:45 +00001705 /* Return value is irrelevant; this thread will not get
1706 rescheduled. */
1707}
1708
1709
1710/* Should never be entered. If it is, will be on the simulated
1711 CPU. */
1712static
1713void do__apply_in_new_thread_bogusRA ( void )
1714{
njne427a662002-10-02 11:08:25 +00001715 VG_(core_panic)("do__apply_in_new_thread_bogusRA");
sewardj20917d82002-05-28 01:36:45 +00001716}
1717
1718/* (Fn, Arg): Create a new thread and run Fn applied to Arg in it. Fn
1719 MUST NOT return -- ever. Eventually it will do either __QUIT or
1720 __WAIT_JOINER. Return the child tid to the parent. */
1721static
1722void do__apply_in_new_thread ( ThreadId parent_tid,
1723 void* (*fn)(void *),
thughesdaa34562004-06-27 12:48:53 +00001724 void* arg,
1725 StackInfo *si )
sewardj20917d82002-05-28 01:36:45 +00001726{
sewardje663cb92002-04-12 10:26:32 +00001727 Addr new_stack;
1728 UInt new_stk_szb;
1729 ThreadId tid;
1730 Char msg_buf[100];
1731
1732 /* Paranoia ... */
1733 vg_assert(sizeof(pthread_t) == sizeof(UInt));
1734
sewardj018f7622002-05-15 21:13:39 +00001735 vg_assert(VG_(threads)[parent_tid].status != VgTs_Empty);
sewardje663cb92002-04-12 10:26:32 +00001736
sewardj1e8cdc92002-04-18 11:37:52 +00001737 tid = vg_alloc_ThreadState();
sewardje663cb92002-04-12 10:26:32 +00001738
1739 /* If we've created the main thread's tid, we're in deep trouble :) */
sewardj6072c362002-04-19 14:40:57 +00001740 vg_assert(tid != 1);
nethercote36881a22004-08-04 14:03:16 +00001741 vg_assert(is_valid_or_empty_tid(tid));
sewardje663cb92002-04-12 10:26:32 +00001742
sewardjc4a810d2002-11-13 22:25:51 +00001743 /* do this early, before the child gets any memory writes */
1744 VG_TRACK ( post_thread_create, parent_tid, tid );
1745
sewardjf6374322002-11-13 22:35:55 +00001746 /* Create new thread with default attrs:
1747 deferred cancellation, not detached
1748 */
1749 mostly_clear_thread_record(tid);
1750 VG_(threads)[tid].status = VgTs_Runnable;
1751
sewardje663cb92002-04-12 10:26:32 +00001752 /* Copy the parent's CPU state into the child's, in a roundabout
1753 way (via baseBlock). */
nethercotef971ab72004-08-02 16:27:40 +00001754 load_thread_state(parent_tid);
nethercotef9b59412004-09-10 15:33:32 +00001755 VGA_(setup_child)( &VG_(threads)[tid].arch,
1756 &VG_(threads)[parent_tid].arch );
nethercotef971ab72004-08-02 16:27:40 +00001757 save_thread_state(tid);
sewardjf6374322002-11-13 22:35:55 +00001758 vg_tid_last_in_baseBlock = tid;
sewardje663cb92002-04-12 10:26:32 +00001759
1760 /* Consider allocating the child a stack, if the one it already has
1761 is inadequate. */
thughesdaa34562004-06-27 12:48:53 +00001762 new_stk_szb = si->size + VG_AR_CLIENT_STACKBASE_REDZONE_SZB + si->guardsize;
1763 new_stk_szb = (new_stk_szb + VKI_BYTES_PER_PAGE - 1) & ~VKI_BYTES_PER_PAGE;
1764
1765 VG_(threads)[tid].stack_guard_size = si->guardsize;
sewardje663cb92002-04-12 10:26:32 +00001766
sewardj018f7622002-05-15 21:13:39 +00001767 if (new_stk_szb > VG_(threads)[tid].stack_size) {
sewardje663cb92002-04-12 10:26:32 +00001768 /* Again, for good measure :) We definitely don't want to be
1769 allocating a stack for the main thread. */
sewardj6072c362002-04-19 14:40:57 +00001770 vg_assert(tid != 1);
thughesdaa34562004-06-27 12:48:53 +00001771 if (VG_(threads)[tid].stack_size > 0)
1772 VG_(client_free)(VG_(threads)[tid].stack_base);
fitzhardinge98abfc72003-12-16 02:05:15 +00001773 new_stack = VG_(client_alloc)(0, new_stk_szb,
nethercotee567e702004-07-10 17:49:17 +00001774 VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC,
fitzhardinge98abfc72003-12-16 02:05:15 +00001775 SF_STACK);
nethercote8e9eab02004-07-11 18:01:06 +00001776 // Given the low number of threads Valgrind can handle, stack
1777 // allocation should pretty much always succeed, so having an
1778 // assertion here isn't too bad. However, probably better would be
1779 // this:
1780 //
1781 // if (0 == new_stack)
1782 // SET_PTHREQ_RETVAL(parent_tid, -VKI_EAGAIN);
1783 //
nethercotee567e702004-07-10 17:49:17 +00001784 vg_assert(0 != new_stack);
sewardj018f7622002-05-15 21:13:39 +00001785 VG_(threads)[tid].stack_base = new_stack;
1786 VG_(threads)[tid].stack_size = new_stk_szb;
1787 VG_(threads)[tid].stack_highest_word
sewardje663cb92002-04-12 10:26:32 +00001788 = new_stack + new_stk_szb
sewardj1e8cdc92002-04-18 11:37:52 +00001789 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB; /* -4 ??? */;
sewardje663cb92002-04-12 10:26:32 +00001790 }
sewardj1e8cdc92002-04-18 11:37:52 +00001791
njn25e49d8e72002-09-23 09:36:25 +00001792 /* Having got memory to hold the thread's stack:
1793 - set %esp as base + size
1794 - mark everything below %esp inaccessible
1795 - mark redzone at stack end inaccessible
1796 */
njnd3040452003-05-19 15:04:06 +00001797 SET_PTHREQ_ESP(tid, VG_(threads)[tid].stack_base
1798 + VG_(threads)[tid].stack_size
1799 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB);
sewardj1e8cdc92002-04-18 11:37:52 +00001800
njn25e49d8e72002-09-23 09:36:25 +00001801 VG_TRACK ( die_mem_stack, VG_(threads)[tid].stack_base,
thughesdaa34562004-06-27 12:48:53 +00001802 VG_(threads)[tid].stack_size
1803 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB);
nethercotec06e2132004-09-03 13:45:29 +00001804 VG_TRACK ( ban_mem_stack, VG_(threads)[tid].arch.m_esp,
njn25e49d8e72002-09-23 09:36:25 +00001805 VG_AR_CLIENT_STACKBASE_REDZONE_SZB );
sewardje663cb92002-04-12 10:26:32 +00001806
njn25e49d8e72002-09-23 09:36:25 +00001807 /* push two args */
nethercotec06e2132004-09-03 13:45:29 +00001808 SET_PTHREQ_ESP(tid, VG_(threads)[tid].arch.m_esp - 8);
njnd3040452003-05-19 15:04:06 +00001809
nethercotec06e2132004-09-03 13:45:29 +00001810 VG_TRACK ( new_mem_stack, (Addr)VG_(threads)[tid].arch.m_esp, 2 * 4 );
njn72718642003-07-24 08:45:32 +00001811 VG_TRACK ( pre_mem_write, Vg_CorePThread, tid, "new thread: stack",
nethercotec06e2132004-09-03 13:45:29 +00001812 (Addr)VG_(threads)[tid].arch.m_esp, 2 * 4 );
njn25e49d8e72002-09-23 09:36:25 +00001813
1814 /* push arg and (bogus) return address */
nethercotec06e2132004-09-03 13:45:29 +00001815 * (UInt*)(VG_(threads)[tid].arch.m_esp+4) = (UInt)arg;
1816 * (UInt*)(VG_(threads)[tid].arch.m_esp)
sewardj20917d82002-05-28 01:36:45 +00001817 = (UInt)&do__apply_in_new_thread_bogusRA;
sewardje663cb92002-04-12 10:26:32 +00001818
nethercotec06e2132004-09-03 13:45:29 +00001819 VG_TRACK ( post_mem_write, VG_(threads)[tid].arch.m_esp, 2 * 4 );
sewardje663cb92002-04-12 10:26:32 +00001820
1821 /* this is where we start */
nethercoteb8ef9d82004-09-05 22:02:33 +00001822 ARCH_INSTR_PTR(VG_(threads)[tid].arch) = (UInt)fn;
sewardje663cb92002-04-12 10:26:32 +00001823
sewardj8937c812002-04-12 20:12:20 +00001824 if (VG_(clo_trace_sched)) {
njn25e49d8e72002-09-23 09:36:25 +00001825 VG_(sprintf)(msg_buf, "new thread, created by %d", parent_tid );
sewardje663cb92002-04-12 10:26:32 +00001826 print_sched_event(tid, msg_buf);
1827 }
1828
fitzhardingef7866182004-03-16 22:09:12 +00001829 /* Start the thread with all signals blocked; it's up to the client
1830 code to set the right signal mask when it's ready. */
1831 VG_(ksigfillset)(&VG_(threads)[tid].sig_mask);
jsgf855d93d2003-10-13 22:26:55 +00001832
1833 /* Now that the signal mask is set up, create a proxy LWP for this thread */
1834 VG_(proxy_create)(tid);
1835
1836 /* Set the proxy's signal mask */
1837 VG_(proxy_setsigmask)(tid);
sewardjb48e5002002-05-13 00:16:03 +00001838
sewardj20917d82002-05-28 01:36:45 +00001839 /* return child's tid to parent */
njnd3040452003-05-19 15:04:06 +00001840 SET_PTHREQ_RETVAL(parent_tid, tid); /* success */
sewardje663cb92002-04-12 10:26:32 +00001841}
1842
1843
sewardj604ec3c2002-04-18 22:38:41 +00001844/* -----------------------------------------------------------
1845 MUTEXes
1846 -------------------------------------------------------- */
1847
rjwalsh7109a8c2004-09-02 00:31:02 +00001848/* vg_pthread_mutex_t is defined in core.h.
sewardj604ec3c2002-04-18 22:38:41 +00001849
nethercote1f0173b2004-02-28 15:40:36 +00001850 The initializers zero everything, except possibly the fourth word,
1851 which in vg_pthread_mutex_t is the __vg_m_kind field. It gets set to one
1852 of PTHREAD_MUTEX_{TIMED,RECURSIVE,ERRORCHECK,ADAPTIVE}_NP
sewardj604ec3c2002-04-18 22:38:41 +00001853
sewardj6072c362002-04-19 14:40:57 +00001854 How we use it:
sewardj604ec3c2002-04-18 22:38:41 +00001855
nethercote1f0173b2004-02-28 15:40:36 +00001856 __vg_m_kind never changes and indicates whether or not it is recursive.
sewardj6072c362002-04-19 14:40:57 +00001857
nethercote1f0173b2004-02-28 15:40:36 +00001858 __vg_m_count indicates the lock count; if 0, the mutex is not owned by
sewardj6072c362002-04-19 14:40:57 +00001859 anybody.
1860
nethercote1f0173b2004-02-28 15:40:36 +00001861 __vg_m_owner has a ThreadId value stuffed into it. We carefully arrange
sewardj6072c362002-04-19 14:40:57 +00001862 that ThreadId == 0 is invalid (VG_INVALID_THREADID), so that
1863 statically initialised mutexes correctly appear
1864 to belong to nobody.
1865
nethercote1f0173b2004-02-28 15:40:36 +00001866 In summary, a not-in-use mutex is distinguised by having __vg_m_owner
1867 == 0 (VG_INVALID_THREADID) and __vg_m_count == 0 too. If one of those
sewardj6072c362002-04-19 14:40:57 +00001868 conditions holds, the other should too.
1869
1870 There is no linked list of threads waiting for this mutex. Instead
1871 a thread in WaitMX state points at the mutex with its waited_on_mx
1872 field. This makes _unlock() inefficient, but simple to implement the
1873 right semantics viz-a-viz signals.
sewardje663cb92002-04-12 10:26:32 +00001874
sewardj604ec3c2002-04-18 22:38:41 +00001875 We don't have to deal with mutex initialisation; the client side
sewardj6072c362002-04-19 14:40:57 +00001876 deals with that for us.
1877*/
sewardje663cb92002-04-12 10:26:32 +00001878
sewardj3b5d8862002-04-20 13:53:23 +00001879/* Helper fns ... */
1880static
nethercote1f0173b2004-02-28 15:40:36 +00001881void release_one_thread_waiting_on_mutex ( vg_pthread_mutex_t* mutex,
sewardj3b5d8862002-04-20 13:53:23 +00001882 Char* caller )
1883{
1884 Int i;
1885 Char msg_buf[100];
1886
1887 /* Find some arbitrary thread waiting on this mutex, and make it
1888 runnable. If none are waiting, mark the mutex as not held. */
1889 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +00001890 if (VG_(threads)[i].status == VgTs_Empty)
sewardj3b5d8862002-04-20 13:53:23 +00001891 continue;
sewardj018f7622002-05-15 21:13:39 +00001892 if (VG_(threads)[i].status == VgTs_WaitMX
1893 && VG_(threads)[i].associated_mx == mutex)
sewardj3b5d8862002-04-20 13:53:23 +00001894 break;
1895 }
1896
nethercote1f0173b2004-02-28 15:40:36 +00001897 VG_TRACK( post_mutex_unlock, (ThreadId)mutex->__vg_m_owner, mutex );
sewardj0af43bc2002-10-22 04:30:35 +00001898
sewardj3b5d8862002-04-20 13:53:23 +00001899 vg_assert(i <= VG_N_THREADS);
1900 if (i == VG_N_THREADS) {
1901 /* Nobody else is waiting on it. */
nethercote1f0173b2004-02-28 15:40:36 +00001902 mutex->__vg_m_count = 0;
1903 mutex->__vg_m_owner = VG_INVALID_THREADID;
sewardj3b5d8862002-04-20 13:53:23 +00001904 } else {
1905 /* Notionally transfer the hold to thread i, whose
1906 pthread_mutex_lock() call now returns with 0 (success). */
1907 /* The .count is already == 1. */
sewardj018f7622002-05-15 21:13:39 +00001908 vg_assert(VG_(threads)[i].associated_mx == mutex);
nethercote1f0173b2004-02-28 15:40:36 +00001909 mutex->__vg_m_owner = (/*_pthread_descr*/void*)i;
sewardj018f7622002-05-15 21:13:39 +00001910 VG_(threads)[i].status = VgTs_Runnable;
1911 VG_(threads)[i].associated_mx = NULL;
sewardj5f07b662002-04-23 16:52:51 +00001912 /* m_edx already holds pth_mx_lock() success (0) */
sewardj3b5d8862002-04-20 13:53:23 +00001913
sewardj0af43bc2002-10-22 04:30:35 +00001914 VG_TRACK( post_mutex_lock, (ThreadId)i, mutex);
1915
sewardj3b5d8862002-04-20 13:53:23 +00001916 if (VG_(clo_trace_pthread_level) >= 1) {
1917 VG_(sprintf)(msg_buf, "%s mx %p: RESUME",
1918 caller, mutex );
1919 print_pthread_event(i, msg_buf);
1920 }
1921 }
1922}
1923
sewardje663cb92002-04-12 10:26:32 +00001924
1925static
sewardj30671ff2002-04-21 00:13:57 +00001926void do_pthread_mutex_lock( ThreadId tid,
1927 Bool is_trylock,
nethercote1f0173b2004-02-28 15:40:36 +00001928 vg_pthread_mutex_t* mutex )
sewardje663cb92002-04-12 10:26:32 +00001929{
sewardj30671ff2002-04-21 00:13:57 +00001930 Char msg_buf[100];
1931 Char* caller
sewardj8ccc2be2002-05-10 20:26:37 +00001932 = is_trylock ? "pthread_mutex_trylock"
1933 : "pthread_mutex_lock ";
sewardje663cb92002-04-12 10:26:32 +00001934
sewardj604ec3c2002-04-18 22:38:41 +00001935 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj30671ff2002-04-21 00:13:57 +00001936 VG_(sprintf)(msg_buf, "%s mx %p ...", caller, mutex );
sewardj604ec3c2002-04-18 22:38:41 +00001937 print_pthread_event(tid, msg_buf);
1938 }
1939
1940 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00001941 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00001942 && VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +00001943
1944 /* POSIX doesn't mandate this, but for sanity ... */
1945 if (mutex == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00001946 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00001947 "pthread_mutex_lock/trylock: mutex is NULL");
jsgf855d93d2003-10-13 22:26:55 +00001948 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardje663cb92002-04-12 10:26:32 +00001949 return;
1950 }
1951
sewardj604ec3c2002-04-18 22:38:41 +00001952 /* More paranoia ... */
nethercote1f0173b2004-02-28 15:40:36 +00001953 switch (mutex->__vg_m_kind) {
sewardj2a1dcce2002-04-22 12:45:25 +00001954# ifndef GLIBC_2_1
sewardj604ec3c2002-04-18 22:38:41 +00001955 case PTHREAD_MUTEX_TIMED_NP:
sewardj2a1dcce2002-04-22 12:45:25 +00001956 case PTHREAD_MUTEX_ADAPTIVE_NP:
1957# endif
sewardja1679dd2002-05-10 22:31:40 +00001958# ifdef GLIBC_2_1
sewardj8e651d72002-05-10 21:00:19 +00001959 case PTHREAD_MUTEX_FAST_NP:
sewardja1679dd2002-05-10 22:31:40 +00001960# endif
sewardj604ec3c2002-04-18 22:38:41 +00001961 case PTHREAD_MUTEX_RECURSIVE_NP:
1962 case PTHREAD_MUTEX_ERRORCHECK_NP:
nethercote1f0173b2004-02-28 15:40:36 +00001963 if (mutex->__vg_m_count >= 0) break;
sewardj604ec3c2002-04-18 22:38:41 +00001964 /* else fall thru */
1965 default:
njn25e49d8e72002-09-23 09:36:25 +00001966 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00001967 "pthread_mutex_lock/trylock: mutex is invalid");
jsgf855d93d2003-10-13 22:26:55 +00001968 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj604ec3c2002-04-18 22:38:41 +00001969 return;
sewardje663cb92002-04-12 10:26:32 +00001970 }
1971
nethercote1f0173b2004-02-28 15:40:36 +00001972 if (mutex->__vg_m_count > 0) {
1973 if (!VG_(is_valid_tid)((ThreadId)mutex->__vg_m_owner)) {
fitzhardinge47735af2004-01-21 01:27:27 +00001974 VG_(record_pthread_error)( tid,
1975 "pthread_mutex_lock/trylock: mutex has invalid owner");
1976 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
1977 return;
1978 }
sewardjf8f819e2002-04-17 23:21:37 +00001979
1980 /* Someone has it already. */
nethercote1f0173b2004-02-28 15:40:36 +00001981 if ((ThreadId)mutex->__vg_m_owner == tid) {
sewardjf8f819e2002-04-17 23:21:37 +00001982 /* It's locked -- by me! */
nethercote1f0173b2004-02-28 15:40:36 +00001983 if (mutex->__vg_m_kind == PTHREAD_MUTEX_RECURSIVE_NP) {
sewardjf8f819e2002-04-17 23:21:37 +00001984 /* return 0 (success). */
nethercote1f0173b2004-02-28 15:40:36 +00001985 mutex->__vg_m_count++;
njnd3040452003-05-19 15:04:06 +00001986 SET_PTHREQ_RETVAL(tid, 0);
sewardj853f55d2002-04-26 00:27:53 +00001987 if (0)
1988 VG_(printf)("!!!!!! tid %d, mx %p -> locked %d\n",
nethercote1f0173b2004-02-28 15:40:36 +00001989 tid, mutex, mutex->__vg_m_count);
sewardjf8f819e2002-04-17 23:21:37 +00001990 return;
1991 } else {
sewardj30671ff2002-04-21 00:13:57 +00001992 if (is_trylock)
njnd3040452003-05-19 15:04:06 +00001993 SET_PTHREQ_RETVAL(tid, EBUSY);
sewardj30671ff2002-04-21 00:13:57 +00001994 else
njnd3040452003-05-19 15:04:06 +00001995 SET_PTHREQ_RETVAL(tid, EDEADLK);
sewardjf8f819e2002-04-17 23:21:37 +00001996 return;
1997 }
1998 } else {
sewardj6072c362002-04-19 14:40:57 +00001999 /* Someone else has it; we have to wait. Mark ourselves
2000 thusly. */
nethercote1f0173b2004-02-28 15:40:36 +00002001 /* GUARD: __vg_m_count > 0 && __vg_m_owner is valid */
sewardj30671ff2002-04-21 00:13:57 +00002002 if (is_trylock) {
2003 /* caller is polling; so return immediately. */
njnd3040452003-05-19 15:04:06 +00002004 SET_PTHREQ_RETVAL(tid, EBUSY);
sewardj30671ff2002-04-21 00:13:57 +00002005 } else {
sewardjdca84112002-11-13 22:29:34 +00002006 VG_TRACK ( pre_mutex_lock, tid, mutex );
2007
sewardj018f7622002-05-15 21:13:39 +00002008 VG_(threads)[tid].status = VgTs_WaitMX;
2009 VG_(threads)[tid].associated_mx = mutex;
njnd3040452003-05-19 15:04:06 +00002010 SET_PTHREQ_RETVAL(tid, 0); /* pth_mx_lock success value */
sewardj30671ff2002-04-21 00:13:57 +00002011 if (VG_(clo_trace_pthread_level) >= 1) {
2012 VG_(sprintf)(msg_buf, "%s mx %p: BLOCK",
2013 caller, mutex );
2014 print_pthread_event(tid, msg_buf);
2015 }
2016 }
sewardje663cb92002-04-12 10:26:32 +00002017 return;
2018 }
sewardjf8f819e2002-04-17 23:21:37 +00002019
sewardje663cb92002-04-12 10:26:32 +00002020 } else {
sewardj6072c362002-04-19 14:40:57 +00002021 /* Nobody owns it. Sanity check ... */
nethercote1f0173b2004-02-28 15:40:36 +00002022 vg_assert(mutex->__vg_m_owner == VG_INVALID_THREADID);
sewardjdca84112002-11-13 22:29:34 +00002023
2024 VG_TRACK ( pre_mutex_lock, tid, mutex );
2025
sewardjf8f819e2002-04-17 23:21:37 +00002026 /* We get it! [for the first time]. */
nethercote1f0173b2004-02-28 15:40:36 +00002027 mutex->__vg_m_count = 1;
2028 mutex->__vg_m_owner = (/*_pthread_descr*/void*)tid;
njn25e49d8e72002-09-23 09:36:25 +00002029
sewardje663cb92002-04-12 10:26:32 +00002030 /* return 0 (success). */
njnd3040452003-05-19 15:04:06 +00002031 SET_PTHREQ_RETVAL(tid, 0);
sewardjf8f819e2002-04-17 23:21:37 +00002032
njnd3040452003-05-19 15:04:06 +00002033 VG_TRACK( post_mutex_lock, tid, mutex);
2034 }
sewardje663cb92002-04-12 10:26:32 +00002035}
2036
2037
2038static
2039void do_pthread_mutex_unlock ( ThreadId tid,
nethercote1f0173b2004-02-28 15:40:36 +00002040 vg_pthread_mutex_t* mutex )
sewardje663cb92002-04-12 10:26:32 +00002041{
sewardj3b5d8862002-04-20 13:53:23 +00002042 Char msg_buf[100];
sewardje663cb92002-04-12 10:26:32 +00002043
sewardj45b4b372002-04-16 22:50:32 +00002044 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj3b5d8862002-04-20 13:53:23 +00002045 VG_(sprintf)(msg_buf, "pthread_mutex_unlock mx %p ...", mutex );
sewardj8937c812002-04-12 20:12:20 +00002046 print_pthread_event(tid, msg_buf);
2047 }
2048
sewardj604ec3c2002-04-18 22:38:41 +00002049 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00002050 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002051 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj604ec3c2002-04-18 22:38:41 +00002052
2053 if (mutex == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00002054 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002055 "pthread_mutex_unlock: mutex is NULL");
jsgf855d93d2003-10-13 22:26:55 +00002056 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj604ec3c2002-04-18 22:38:41 +00002057 return;
2058 }
2059
2060 /* More paranoia ... */
nethercote1f0173b2004-02-28 15:40:36 +00002061 switch (mutex->__vg_m_kind) {
sewardj2a1dcce2002-04-22 12:45:25 +00002062# ifndef GLIBC_2_1
sewardj604ec3c2002-04-18 22:38:41 +00002063 case PTHREAD_MUTEX_TIMED_NP:
sewardj2a1dcce2002-04-22 12:45:25 +00002064 case PTHREAD_MUTEX_ADAPTIVE_NP:
2065# endif
sewardja1679dd2002-05-10 22:31:40 +00002066# ifdef GLIBC_2_1
sewardj8e651d72002-05-10 21:00:19 +00002067 case PTHREAD_MUTEX_FAST_NP:
sewardja1679dd2002-05-10 22:31:40 +00002068# endif
sewardj604ec3c2002-04-18 22:38:41 +00002069 case PTHREAD_MUTEX_RECURSIVE_NP:
2070 case PTHREAD_MUTEX_ERRORCHECK_NP:
nethercote1f0173b2004-02-28 15:40:36 +00002071 if (mutex->__vg_m_count >= 0) break;
sewardj604ec3c2002-04-18 22:38:41 +00002072 /* else fall thru */
2073 default:
njn25e49d8e72002-09-23 09:36:25 +00002074 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002075 "pthread_mutex_unlock: mutex is invalid");
jsgf855d93d2003-10-13 22:26:55 +00002076 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj604ec3c2002-04-18 22:38:41 +00002077 return;
2078 }
sewardje663cb92002-04-12 10:26:32 +00002079
2080 /* Barf if we don't currently hold the mutex. */
nethercote1f0173b2004-02-28 15:40:36 +00002081 if (mutex->__vg_m_count == 0) {
sewardj4dced352002-06-04 22:54:20 +00002082 /* nobody holds it */
njn25e49d8e72002-09-23 09:36:25 +00002083 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002084 "pthread_mutex_unlock: mutex is not locked");
njnd3040452003-05-19 15:04:06 +00002085 SET_PTHREQ_RETVAL(tid, EPERM);
sewardj4dced352002-06-04 22:54:20 +00002086 return;
2087 }
2088
nethercote1f0173b2004-02-28 15:40:36 +00002089 if ((ThreadId)mutex->__vg_m_owner != tid) {
sewardj4dced352002-06-04 22:54:20 +00002090 /* we don't hold it */
njn25e49d8e72002-09-23 09:36:25 +00002091 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002092 "pthread_mutex_unlock: mutex is locked by a different thread");
njnd3040452003-05-19 15:04:06 +00002093 SET_PTHREQ_RETVAL(tid, EPERM);
sewardje663cb92002-04-12 10:26:32 +00002094 return;
2095 }
2096
sewardjf8f819e2002-04-17 23:21:37 +00002097 /* If it's a multiply-locked recursive mutex, just decrement the
2098 lock count and return. */
nethercote1f0173b2004-02-28 15:40:36 +00002099 if (mutex->__vg_m_count > 1) {
2100 vg_assert(mutex->__vg_m_kind == PTHREAD_MUTEX_RECURSIVE_NP);
2101 mutex->__vg_m_count --;
njnd3040452003-05-19 15:04:06 +00002102 SET_PTHREQ_RETVAL(tid, 0); /* success */
sewardjf8f819e2002-04-17 23:21:37 +00002103 return;
2104 }
2105
sewardj604ec3c2002-04-18 22:38:41 +00002106 /* Now we're sure it is locked exactly once, and by the thread who
sewardjf8f819e2002-04-17 23:21:37 +00002107 is now doing an unlock on it. */
nethercote1f0173b2004-02-28 15:40:36 +00002108 vg_assert(mutex->__vg_m_count == 1);
2109 vg_assert((ThreadId)mutex->__vg_m_owner == tid);
sewardjf8f819e2002-04-17 23:21:37 +00002110
sewardj3b5d8862002-04-20 13:53:23 +00002111 /* Release at max one thread waiting on this mutex. */
2112 release_one_thread_waiting_on_mutex ( mutex, "pthread_mutex_lock" );
sewardje663cb92002-04-12 10:26:32 +00002113
sewardj3b5d8862002-04-20 13:53:23 +00002114 /* Our (tid's) pth_unlock() returns with 0 (success). */
njnd3040452003-05-19 15:04:06 +00002115 SET_PTHREQ_RETVAL(tid, 0); /* Success. */
sewardje663cb92002-04-12 10:26:32 +00002116}
2117
2118
sewardj6072c362002-04-19 14:40:57 +00002119/* -----------------------------------------------------------
2120 CONDITION VARIABLES
2121 -------------------------------------------------------- */
sewardje663cb92002-04-12 10:26:32 +00002122
rjwalsh7109a8c2004-09-02 00:31:02 +00002123/* The relevant type (vg_pthread_cond_t) is in core.h.
sewardj77e466c2002-04-14 02:29:29 +00002124
nethercote1f0173b2004-02-28 15:40:36 +00002125 We don't use any fields of vg_pthread_cond_t for anything at all.
2126 Only the identity of the CVs is important. (Actually, we initialise
2127 __vg_c_waiting in pthread_cond_init() to VG_INVALID_THREADID.)
sewardj6072c362002-04-19 14:40:57 +00002128
2129 Linux pthreads supports no attributes on condition variables, so we
sewardj3b5d8862002-04-20 13:53:23 +00002130 don't need to think too hard there. */
sewardj6072c362002-04-19 14:40:57 +00002131
sewardj77e466c2002-04-14 02:29:29 +00002132
sewardj5f07b662002-04-23 16:52:51 +00002133static
2134void do_pthread_cond_timedwait_TIMEOUT ( ThreadId tid )
2135{
2136 Char msg_buf[100];
nethercote1f0173b2004-02-28 15:40:36 +00002137 vg_pthread_mutex_t* mx;
2138 vg_pthread_cond_t* cv;
sewardj5f07b662002-04-23 16:52:51 +00002139
sewardjb48e5002002-05-13 00:16:03 +00002140 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002141 && VG_(threads)[tid].status == VgTs_WaitCV
2142 && VG_(threads)[tid].awaken_at != 0xFFFFFFFF);
2143 mx = VG_(threads)[tid].associated_mx;
sewardj5f07b662002-04-23 16:52:51 +00002144 vg_assert(mx != NULL);
sewardj018f7622002-05-15 21:13:39 +00002145 cv = VG_(threads)[tid].associated_cv;
sewardj5f07b662002-04-23 16:52:51 +00002146 vg_assert(cv != NULL);
2147
nethercote1f0173b2004-02-28 15:40:36 +00002148 if (mx->__vg_m_owner == VG_INVALID_THREADID) {
sewardj5f07b662002-04-23 16:52:51 +00002149 /* Currently unheld; hand it out to thread tid. */
nethercote1f0173b2004-02-28 15:40:36 +00002150 vg_assert(mx->__vg_m_count == 0);
sewardj018f7622002-05-15 21:13:39 +00002151 VG_(threads)[tid].status = VgTs_Runnable;
njnd3040452003-05-19 15:04:06 +00002152 SET_PTHREQ_RETVAL(tid, ETIMEDOUT); /* pthread_cond_wait return value */
sewardj018f7622002-05-15 21:13:39 +00002153 VG_(threads)[tid].associated_cv = NULL;
2154 VG_(threads)[tid].associated_mx = NULL;
nethercote1f0173b2004-02-28 15:40:36 +00002155 mx->__vg_m_owner = (/*_pthread_descr*/void*)tid;
2156 mx->__vg_m_count = 1;
sewardj5f07b662002-04-23 16:52:51 +00002157
sewardj0af43bc2002-10-22 04:30:35 +00002158 VG_TRACK( post_mutex_lock, tid, mx );
2159
sewardj5f07b662002-04-23 16:52:51 +00002160 if (VG_(clo_trace_pthread_level) >= 1) {
sewardjc3bd5f52002-05-01 03:24:23 +00002161 VG_(sprintf)(msg_buf,
nethercote1f0173b2004-02-28 15:40:36 +00002162 "pthread_cond_timedwait cv %p: TIMEOUT with mx %p",
sewardjc3bd5f52002-05-01 03:24:23 +00002163 cv, mx );
sewardj5f07b662002-04-23 16:52:51 +00002164 print_pthread_event(tid, msg_buf);
2165 }
2166 } else {
2167 /* Currently held. Make thread tid be blocked on it. */
nethercote1f0173b2004-02-28 15:40:36 +00002168 vg_assert(mx->__vg_m_count > 0);
sewardjdca84112002-11-13 22:29:34 +00002169 VG_TRACK( pre_mutex_lock, tid, mx );
2170
sewardj018f7622002-05-15 21:13:39 +00002171 VG_(threads)[tid].status = VgTs_WaitMX;
njnd3040452003-05-19 15:04:06 +00002172 SET_PTHREQ_RETVAL(tid, ETIMEDOUT); /* pthread_cond_wait return value */
sewardj018f7622002-05-15 21:13:39 +00002173 VG_(threads)[tid].associated_cv = NULL;
2174 VG_(threads)[tid].associated_mx = mx;
sewardj5f07b662002-04-23 16:52:51 +00002175 if (VG_(clo_trace_pthread_level) >= 1) {
2176 VG_(sprintf)(msg_buf,
nethercote1f0173b2004-02-28 15:40:36 +00002177 "pthread_cond_timedwait cv %p: TIMEOUT -> BLOCK for mx %p",
sewardj5f07b662002-04-23 16:52:51 +00002178 cv, mx );
2179 print_pthread_event(tid, msg_buf);
2180 }
sewardj5f07b662002-04-23 16:52:51 +00002181 }
2182}
2183
2184
sewardj3b5d8862002-04-20 13:53:23 +00002185static
nethercote1f0173b2004-02-28 15:40:36 +00002186void release_N_threads_waiting_on_cond ( vg_pthread_cond_t* cond,
sewardj3b5d8862002-04-20 13:53:23 +00002187 Int n_to_release,
2188 Char* caller )
2189{
2190 Int i;
2191 Char msg_buf[100];
nethercote1f0173b2004-02-28 15:40:36 +00002192 vg_pthread_mutex_t* mx;
sewardj3b5d8862002-04-20 13:53:23 +00002193
2194 while (True) {
2195 if (n_to_release == 0)
2196 return;
2197
2198 /* Find a thread waiting on this CV. */
2199 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +00002200 if (VG_(threads)[i].status == VgTs_Empty)
sewardj3b5d8862002-04-20 13:53:23 +00002201 continue;
sewardj018f7622002-05-15 21:13:39 +00002202 if (VG_(threads)[i].status == VgTs_WaitCV
2203 && VG_(threads)[i].associated_cv == cond)
sewardj3b5d8862002-04-20 13:53:23 +00002204 break;
2205 }
2206 vg_assert(i <= VG_N_THREADS);
2207
2208 if (i == VG_N_THREADS) {
2209 /* Nobody else is waiting on it. */
2210 return;
2211 }
2212
sewardj018f7622002-05-15 21:13:39 +00002213 mx = VG_(threads)[i].associated_mx;
sewardj3b5d8862002-04-20 13:53:23 +00002214 vg_assert(mx != NULL);
2215
sewardjdca84112002-11-13 22:29:34 +00002216 VG_TRACK( pre_mutex_lock, i, mx );
2217
nethercote1f0173b2004-02-28 15:40:36 +00002218 if (mx->__vg_m_owner == VG_INVALID_THREADID) {
sewardj3b5d8862002-04-20 13:53:23 +00002219 /* Currently unheld; hand it out to thread i. */
nethercote1f0173b2004-02-28 15:40:36 +00002220 vg_assert(mx->__vg_m_count == 0);
sewardj018f7622002-05-15 21:13:39 +00002221 VG_(threads)[i].status = VgTs_Runnable;
2222 VG_(threads)[i].associated_cv = NULL;
2223 VG_(threads)[i].associated_mx = NULL;
nethercote1f0173b2004-02-28 15:40:36 +00002224 mx->__vg_m_owner = (/*_pthread_descr*/void*)i;
2225 mx->__vg_m_count = 1;
sewardj5f07b662002-04-23 16:52:51 +00002226 /* .m_edx already holds pth_cond_wait success value (0) */
sewardj3b5d8862002-04-20 13:53:23 +00002227
sewardj0af43bc2002-10-22 04:30:35 +00002228 VG_TRACK( post_mutex_lock, i, mx );
2229
sewardj3b5d8862002-04-20 13:53:23 +00002230 if (VG_(clo_trace_pthread_level) >= 1) {
2231 VG_(sprintf)(msg_buf, "%s cv %p: RESUME with mx %p",
2232 caller, cond, mx );
2233 print_pthread_event(i, msg_buf);
2234 }
2235
2236 } else {
2237 /* Currently held. Make thread i be blocked on it. */
nethercote1f0173b2004-02-28 15:40:36 +00002238 vg_assert(mx->__vg_m_count > 0);
sewardj018f7622002-05-15 21:13:39 +00002239 VG_(threads)[i].status = VgTs_WaitMX;
2240 VG_(threads)[i].associated_cv = NULL;
2241 VG_(threads)[i].associated_mx = mx;
njnd3040452003-05-19 15:04:06 +00002242 SET_PTHREQ_RETVAL(i, 0); /* pth_cond_wait success value */
sewardj3b5d8862002-04-20 13:53:23 +00002243
2244 if (VG_(clo_trace_pthread_level) >= 1) {
2245 VG_(sprintf)(msg_buf, "%s cv %p: BLOCK for mx %p",
2246 caller, cond, mx );
2247 print_pthread_event(i, msg_buf);
2248 }
2249
2250 }
jsgf855d93d2003-10-13 22:26:55 +00002251
sewardj3b5d8862002-04-20 13:53:23 +00002252 n_to_release--;
2253 }
2254}
2255
2256
2257static
2258void do_pthread_cond_wait ( ThreadId tid,
nethercote1f0173b2004-02-28 15:40:36 +00002259 vg_pthread_cond_t *cond,
2260 vg_pthread_mutex_t *mutex,
sewardj5f07b662002-04-23 16:52:51 +00002261 UInt ms_end )
sewardj3b5d8862002-04-20 13:53:23 +00002262{
2263 Char msg_buf[100];
2264
sewardj5f07b662002-04-23 16:52:51 +00002265 /* If ms_end == 0xFFFFFFFF, wait forever (no timeout). Otherwise,
2266 ms_end is the ending millisecond. */
2267
sewardj3b5d8862002-04-20 13:53:23 +00002268 /* pre: mutex should be a valid mutex and owned by tid. */
2269 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj5f07b662002-04-23 16:52:51 +00002270 VG_(sprintf)(msg_buf, "pthread_cond_wait cv %p, mx %p, end %d ...",
2271 cond, mutex, ms_end );
sewardj3b5d8862002-04-20 13:53:23 +00002272 print_pthread_event(tid, msg_buf);
2273 }
2274
2275 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00002276 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002277 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj3b5d8862002-04-20 13:53:23 +00002278
nethercoted3693d02004-04-26 08:05:24 +00002279 if (mutex == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00002280 VG_(record_pthread_error)( tid,
nethercoted3693d02004-04-26 08:05:24 +00002281 "pthread_cond_wait/timedwait: mutex is NULL");
2282 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
2283 return;
2284 }
2285
2286 if (cond == NULL) {
2287 VG_(record_pthread_error)( tid,
2288 "pthread_cond_wait/timedwait: cond is NULL");
jsgf855d93d2003-10-13 22:26:55 +00002289 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj3b5d8862002-04-20 13:53:23 +00002290 return;
2291 }
2292
2293 /* More paranoia ... */
nethercote1f0173b2004-02-28 15:40:36 +00002294 switch (mutex->__vg_m_kind) {
sewardj2a1dcce2002-04-22 12:45:25 +00002295# ifndef GLIBC_2_1
sewardj3b5d8862002-04-20 13:53:23 +00002296 case PTHREAD_MUTEX_TIMED_NP:
sewardj2a1dcce2002-04-22 12:45:25 +00002297 case PTHREAD_MUTEX_ADAPTIVE_NP:
2298# endif
sewardja1679dd2002-05-10 22:31:40 +00002299# ifdef GLIBC_2_1
sewardj8e651d72002-05-10 21:00:19 +00002300 case PTHREAD_MUTEX_FAST_NP:
sewardja1679dd2002-05-10 22:31:40 +00002301# endif
sewardj3b5d8862002-04-20 13:53:23 +00002302 case PTHREAD_MUTEX_RECURSIVE_NP:
2303 case PTHREAD_MUTEX_ERRORCHECK_NP:
nethercote1f0173b2004-02-28 15:40:36 +00002304 if (mutex->__vg_m_count >= 0) break;
sewardj3b5d8862002-04-20 13:53:23 +00002305 /* else fall thru */
2306 default:
njn25e49d8e72002-09-23 09:36:25 +00002307 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002308 "pthread_cond_wait/timedwait: mutex is invalid");
jsgf855d93d2003-10-13 22:26:55 +00002309 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj3b5d8862002-04-20 13:53:23 +00002310 return;
2311 }
2312
2313 /* Barf if we don't currently hold the mutex. */
nethercoted3693d02004-04-26 08:05:24 +00002314 if (mutex->__vg_m_count == 0 /* nobody holds it */) {
njn25e49d8e72002-09-23 09:36:25 +00002315 VG_(record_pthread_error)( tid,
nethercoted3693d02004-04-26 08:05:24 +00002316 "pthread_cond_wait/timedwait: mutex is unlocked");
2317 SET_PTHREQ_RETVAL(tid, VKI_EPERM);
2318 return;
2319 }
2320
2321 if ((ThreadId)mutex->__vg_m_owner != tid /* we don't hold it */) {
2322 VG_(record_pthread_error)( tid,
2323 "pthread_cond_wait/timedwait: mutex is locked by another thread");
2324 SET_PTHREQ_RETVAL(tid, VKI_EPERM);
sewardj3b5d8862002-04-20 13:53:23 +00002325 return;
2326 }
2327
thughes513197c2004-06-13 12:07:53 +00002328 if(VG_(threads)[tid].cancel_pend != NULL &&
2329 VG_(threads)[tid].cancel_st) {
2330 make_thread_jump_to_cancelhdlr ( tid );
2331 } else {
2332 /* Queue ourselves on the condition. */
2333 VG_(threads)[tid].status = VgTs_WaitCV;
2334 VG_(threads)[tid].associated_cv = cond;
2335 VG_(threads)[tid].associated_mx = mutex;
2336 VG_(threads)[tid].awaken_at = ms_end;
2337 if (ms_end != 0xFFFFFFFF)
nethercotef971ab72004-08-02 16:27:40 +00002338 add_timeout(tid, ms_end);
sewardj3b5d8862002-04-20 13:53:23 +00002339
thughes513197c2004-06-13 12:07:53 +00002340 if (VG_(clo_trace_pthread_level) >= 1) {
2341 VG_(sprintf)(msg_buf,
2342 "pthread_cond_wait cv %p, mx %p: BLOCK",
2343 cond, mutex );
2344 print_pthread_event(tid, msg_buf);
2345 }
2346
2347 /* Release the mutex. */
2348 release_one_thread_waiting_on_mutex ( mutex, "pthread_cond_wait " );
sewardj3b5d8862002-04-20 13:53:23 +00002349 }
sewardj3b5d8862002-04-20 13:53:23 +00002350}
2351
2352
2353static
2354void do_pthread_cond_signal_or_broadcast ( ThreadId tid,
2355 Bool broadcast,
nethercote1f0173b2004-02-28 15:40:36 +00002356 vg_pthread_cond_t *cond )
sewardj3b5d8862002-04-20 13:53:23 +00002357{
2358 Char msg_buf[100];
2359 Char* caller
2360 = broadcast ? "pthread_cond_broadcast"
2361 : "pthread_cond_signal ";
2362
2363 if (VG_(clo_trace_pthread_level) >= 2) {
2364 VG_(sprintf)(msg_buf, "%s cv %p ...",
2365 caller, cond );
2366 print_pthread_event(tid, msg_buf);
2367 }
2368
2369 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00002370 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002371 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj3b5d8862002-04-20 13:53:23 +00002372
2373 if (cond == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00002374 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002375 "pthread_cond_signal/broadcast: cond is NULL");
jsgf855d93d2003-10-13 22:26:55 +00002376 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj3b5d8862002-04-20 13:53:23 +00002377 return;
2378 }
2379
2380 release_N_threads_waiting_on_cond (
2381 cond,
2382 broadcast ? VG_N_THREADS : 1,
2383 caller
2384 );
2385
njnd3040452003-05-19 15:04:06 +00002386 SET_PTHREQ_RETVAL(tid, 0); /* success */
sewardj3b5d8862002-04-20 13:53:23 +00002387}
2388
sewardj77e466c2002-04-14 02:29:29 +00002389
sewardj5f07b662002-04-23 16:52:51 +00002390/* -----------------------------------------------------------
2391 THREAD SPECIFIC DATA
2392 -------------------------------------------------------- */
2393
2394static __inline__
2395Bool is_valid_key ( ThreadKey k )
2396{
2397 /* k unsigned; hence no < 0 check */
2398 if (k >= VG_N_THREAD_KEYS) return False;
2399 if (!vg_thread_keys[k].inuse) return False;
2400 return True;
2401}
2402
sewardj00a66b12002-10-12 16:42:35 +00002403
2404/* Return in %EDX a value of 1 if the key is valid, else 0. */
2405static
2406void do_pthread_key_validate ( ThreadId tid,
2407 pthread_key_t key )
2408{
2409 Char msg_buf[100];
2410
2411 if (VG_(clo_trace_pthread_level) >= 1) {
2412 VG_(sprintf)(msg_buf, "pthread_key_validate key %p",
2413 key );
2414 print_pthread_event(tid, msg_buf);
2415 }
2416
2417 vg_assert(sizeof(pthread_key_t) == sizeof(ThreadKey));
2418 vg_assert(VG_(is_valid_tid)(tid)
2419 && VG_(threads)[tid].status == VgTs_Runnable);
2420
2421 if (is_valid_key((ThreadKey)key)) {
njnd3040452003-05-19 15:04:06 +00002422 SET_PTHREQ_RETVAL(tid, 1);
sewardj00a66b12002-10-12 16:42:35 +00002423 } else {
njnd3040452003-05-19 15:04:06 +00002424 SET_PTHREQ_RETVAL(tid, 0);
sewardj00a66b12002-10-12 16:42:35 +00002425 }
2426}
2427
2428
sewardj5f07b662002-04-23 16:52:51 +00002429static
2430void do_pthread_key_create ( ThreadId tid,
2431 pthread_key_t* key,
2432 void (*destructor)(void*) )
2433{
2434 Int i;
2435 Char msg_buf[100];
2436
2437 if (VG_(clo_trace_pthread_level) >= 1) {
2438 VG_(sprintf)(msg_buf, "pthread_key_create *key %p, destr %p",
2439 key, destructor );
2440 print_pthread_event(tid, msg_buf);
2441 }
2442
2443 vg_assert(sizeof(pthread_key_t) == sizeof(ThreadKey));
sewardjb48e5002002-05-13 00:16:03 +00002444 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002445 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj5f07b662002-04-23 16:52:51 +00002446
2447 for (i = 0; i < VG_N_THREAD_KEYS; i++)
2448 if (!vg_thread_keys[i].inuse)
2449 break;
2450
2451 if (i == VG_N_THREAD_KEYS) {
jsgf855d93d2003-10-13 22:26:55 +00002452 VG_(message)(Vg_UserMsg, "pthread_key_create() asked for too many keys (more than %d): increase VG_N_THREAD_KEYS and recompile Valgrind.",
2453 VG_N_THREAD_KEYS);
2454 SET_PTHREQ_RETVAL(tid, EAGAIN);
2455 return;
sewardj5f07b662002-04-23 16:52:51 +00002456 }
2457
sewardj870497a2002-05-29 01:06:47 +00002458 vg_thread_keys[i].inuse = True;
2459 vg_thread_keys[i].destructor = destructor;
sewardjc3bd5f52002-05-01 03:24:23 +00002460
sewardj5a3798b2002-06-04 23:24:22 +00002461 /* check key for addressibility */
njn72718642003-07-24 08:45:32 +00002462 VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "pthread_key_create: key",
njn25e49d8e72002-09-23 09:36:25 +00002463 (Addr)key, sizeof(pthread_key_t));
sewardj5f07b662002-04-23 16:52:51 +00002464 *key = i;
njn25e49d8e72002-09-23 09:36:25 +00002465 VG_TRACK( post_mem_write, (Addr)key, sizeof(pthread_key_t) );
sewardjc3bd5f52002-05-01 03:24:23 +00002466
njnd3040452003-05-19 15:04:06 +00002467 SET_PTHREQ_RETVAL(tid, 0);
sewardj5f07b662002-04-23 16:52:51 +00002468}
2469
2470
2471static
2472void do_pthread_key_delete ( ThreadId tid, pthread_key_t key )
2473{
2474 Char msg_buf[100];
2475 if (VG_(clo_trace_pthread_level) >= 1) {
2476 VG_(sprintf)(msg_buf, "pthread_key_delete key %d",
2477 key );
2478 print_pthread_event(tid, msg_buf);
2479 }
2480
sewardjb48e5002002-05-13 00:16:03 +00002481 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002482 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj5f07b662002-04-23 16:52:51 +00002483
2484 if (!is_valid_key(key)) {
njn25e49d8e72002-09-23 09:36:25 +00002485 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002486 "pthread_key_delete: key is invalid");
jsgf855d93d2003-10-13 22:26:55 +00002487 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj5f07b662002-04-23 16:52:51 +00002488 return;
2489 }
2490
2491 vg_thread_keys[key].inuse = False;
sewardj648b3152002-12-09 19:07:59 +00002492 vg_thread_keys[key].destructor = NULL;
njnd3040452003-05-19 15:04:06 +00002493 SET_PTHREQ_RETVAL(tid, 0);
sewardj5f07b662002-04-23 16:52:51 +00002494}
2495
2496
sewardj00a66b12002-10-12 16:42:35 +00002497/* Get the .specific_ptr for a thread. Return 1 if the thread-slot
2498 isn't in use, so that client-space can scan all thread slots. 1
2499 cannot be confused with NULL or a legitimately-aligned specific_ptr
2500 value. */
sewardj5f07b662002-04-23 16:52:51 +00002501static
sewardj00a66b12002-10-12 16:42:35 +00002502void do_pthread_getspecific_ptr ( ThreadId tid )
sewardj5f07b662002-04-23 16:52:51 +00002503{
sewardj00a66b12002-10-12 16:42:35 +00002504 void** specifics_ptr;
2505 Char msg_buf[100];
2506
jsgf855d93d2003-10-13 22:26:55 +00002507 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj00a66b12002-10-12 16:42:35 +00002508 VG_(sprintf)(msg_buf, "pthread_getspecific_ptr" );
sewardj5f07b662002-04-23 16:52:51 +00002509 print_pthread_event(tid, msg_buf);
2510 }
2511
nethercote36881a22004-08-04 14:03:16 +00002512 vg_assert(is_valid_or_empty_tid(tid));
sewardj5f07b662002-04-23 16:52:51 +00002513
sewardj00a66b12002-10-12 16:42:35 +00002514 if (VG_(threads)[tid].status == VgTs_Empty) {
njnd3040452003-05-19 15:04:06 +00002515 SET_PTHREQ_RETVAL(tid, 1);
sewardj5f07b662002-04-23 16:52:51 +00002516 return;
2517 }
2518
sewardj00a66b12002-10-12 16:42:35 +00002519 specifics_ptr = VG_(threads)[tid].specifics_ptr;
2520 vg_assert(specifics_ptr == NULL
2521 || IS_ALIGNED4_ADDR(specifics_ptr));
2522
njnd3040452003-05-19 15:04:06 +00002523 SET_PTHREQ_RETVAL(tid, (UInt)specifics_ptr);
sewardj5f07b662002-04-23 16:52:51 +00002524}
2525
2526
2527static
sewardj00a66b12002-10-12 16:42:35 +00002528void do_pthread_setspecific_ptr ( ThreadId tid, void** ptr )
sewardj5f07b662002-04-23 16:52:51 +00002529{
2530 Char msg_buf[100];
2531 if (VG_(clo_trace_pthread_level) >= 1) {
sewardj00a66b12002-10-12 16:42:35 +00002532 VG_(sprintf)(msg_buf, "pthread_setspecific_ptr ptr %p",
2533 ptr );
sewardj5f07b662002-04-23 16:52:51 +00002534 print_pthread_event(tid, msg_buf);
2535 }
2536
sewardjb48e5002002-05-13 00:16:03 +00002537 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002538 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj5f07b662002-04-23 16:52:51 +00002539
sewardj00a66b12002-10-12 16:42:35 +00002540 VG_(threads)[tid].specifics_ptr = ptr;
njnd3040452003-05-19 15:04:06 +00002541 SET_PTHREQ_RETVAL(tid, 0);
sewardj5f07b662002-04-23 16:52:51 +00002542}
2543
2544
sewardj870497a2002-05-29 01:06:47 +00002545/* Helper for calling destructors at thread exit. If key is valid,
2546 copy the thread's specific value into cu->arg and put the *key*'s
2547 destructor fn address in cu->fn. Then return 0 to the caller.
2548 Otherwise return non-zero to the caller. */
2549static
2550void do__get_key_destr_and_spec ( ThreadId tid,
2551 pthread_key_t key,
2552 CleanupEntry* cu )
2553{
2554 Char msg_buf[100];
jsgf855d93d2003-10-13 22:26:55 +00002555 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj870497a2002-05-29 01:06:47 +00002556 VG_(sprintf)(msg_buf,
2557 "get_key_destr_and_arg (key = %d)", key );
2558 print_pthread_event(tid, msg_buf);
2559 }
2560 vg_assert(VG_(is_valid_tid)(tid));
2561 vg_assert(key >= 0 && key < VG_N_THREAD_KEYS);
njn25e49d8e72002-09-23 09:36:25 +00002562
sewardj870497a2002-05-29 01:06:47 +00002563 if (!vg_thread_keys[key].inuse) {
njnd3040452003-05-19 15:04:06 +00002564 SET_PTHREQ_RETVAL(tid, -1);
sewardj870497a2002-05-29 01:06:47 +00002565 return;
2566 }
njn72718642003-07-24 08:45:32 +00002567 VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "get_key_destr_and_spec: cu",
2568 (Addr)cu, sizeof(CleanupEntry) );
sewardj00a66b12002-10-12 16:42:35 +00002569
thughes11975ff2004-06-12 12:58:22 +00002570 cu->type = VgCt_Function;
2571 cu->data.function.fn = vg_thread_keys[key].destructor;
sewardj00a66b12002-10-12 16:42:35 +00002572 if (VG_(threads)[tid].specifics_ptr == NULL) {
thughes11975ff2004-06-12 12:58:22 +00002573 cu->data.function.arg = NULL;
sewardj00a66b12002-10-12 16:42:35 +00002574 } else {
njn72718642003-07-24 08:45:32 +00002575 VG_TRACK( pre_mem_read, Vg_CorePThread, tid,
sewardj00a66b12002-10-12 16:42:35 +00002576 "get_key_destr_and_spec: key",
2577 (Addr)(&VG_(threads)[tid].specifics_ptr[key]),
2578 sizeof(void*) );
thughes11975ff2004-06-12 12:58:22 +00002579 cu->data.function.arg = VG_(threads)[tid].specifics_ptr[key];
sewardj00a66b12002-10-12 16:42:35 +00002580 }
2581
njn25e49d8e72002-09-23 09:36:25 +00002582 VG_TRACK( post_mem_write, (Addr)cu, sizeof(CleanupEntry) );
njnd3040452003-05-19 15:04:06 +00002583 SET_PTHREQ_RETVAL(tid, 0);
sewardj870497a2002-05-29 01:06:47 +00002584}
2585
2586
sewardjb48e5002002-05-13 00:16:03 +00002587/* ---------------------------------------------------
2588 SIGNALS
2589 ------------------------------------------------ */
2590
2591/* See comment in vg_libthread.c:pthread_sigmask() regarding
sewardj018f7622002-05-15 21:13:39 +00002592 deliberate confusion of types sigset_t and vki_sigset_t. Return 0
2593 for OK and 1 for some kind of addressing error, which the
2594 vg_libpthread.c routine turns into return values 0 and EFAULT
2595 respectively. */
sewardjb48e5002002-05-13 00:16:03 +00002596static
2597void do_pthread_sigmask ( ThreadId tid,
sewardj018f7622002-05-15 21:13:39 +00002598 Int vki_how,
sewardjb48e5002002-05-13 00:16:03 +00002599 vki_ksigset_t* newmask,
2600 vki_ksigset_t* oldmask )
2601{
2602 Char msg_buf[100];
2603 if (VG_(clo_trace_pthread_level) >= 1) {
2604 VG_(sprintf)(msg_buf,
sewardj018f7622002-05-15 21:13:39 +00002605 "pthread_sigmask vki_how %d, newmask %p, oldmask %p",
2606 vki_how, newmask, oldmask );
sewardjb48e5002002-05-13 00:16:03 +00002607 print_pthread_event(tid, msg_buf);
2608 }
2609
2610 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002611 && VG_(threads)[tid].status == VgTs_Runnable);
sewardjb48e5002002-05-13 00:16:03 +00002612
njn25e49d8e72002-09-23 09:36:25 +00002613 if (newmask)
njn72718642003-07-24 08:45:32 +00002614 VG_TRACK( pre_mem_read, Vg_CorePThread, tid, "pthread_sigmask: newmask",
njn25e49d8e72002-09-23 09:36:25 +00002615 (Addr)newmask, sizeof(vki_ksigset_t));
2616 if (oldmask)
njn72718642003-07-24 08:45:32 +00002617 VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "pthread_sigmask: oldmask",
njn25e49d8e72002-09-23 09:36:25 +00002618 (Addr)oldmask, sizeof(vki_ksigset_t));
sewardjb48e5002002-05-13 00:16:03 +00002619
sewardj018f7622002-05-15 21:13:39 +00002620 VG_(do_pthread_sigmask_SCSS_upd) ( tid, vki_how, newmask, oldmask );
sewardjb48e5002002-05-13 00:16:03 +00002621
njn25e49d8e72002-09-23 09:36:25 +00002622 if (oldmask)
2623 VG_TRACK( post_mem_write, (Addr)oldmask, sizeof(vki_ksigset_t) );
sewardj3a951cf2002-05-15 22:25:47 +00002624
sewardj018f7622002-05-15 21:13:39 +00002625 /* Success. */
njnd3040452003-05-19 15:04:06 +00002626 SET_PTHREQ_RETVAL(tid, 0);
sewardjb48e5002002-05-13 00:16:03 +00002627}
2628
2629
2630static
sewardj018f7622002-05-15 21:13:39 +00002631void do_pthread_kill ( ThreadId tid, /* me */
2632 ThreadId thread, /* thread to signal */
2633 Int sig )
2634{
nethercote97ccd5e2004-08-02 12:10:01 +00002635 ThreadState* tst;
sewardj018f7622002-05-15 21:13:39 +00002636 Char msg_buf[100];
2637
2638 if (VG_(clo_trace_signals) || VG_(clo_trace_pthread_level) >= 1) {
2639 VG_(sprintf)(msg_buf,
2640 "pthread_kill thread %d, signo %d",
2641 thread, sig );
2642 print_pthread_event(tid, msg_buf);
2643 }
2644
2645 vg_assert(VG_(is_valid_tid)(tid)
2646 && VG_(threads)[tid].status == VgTs_Runnable);
2647
sewardj4dced352002-06-04 22:54:20 +00002648 if (!VG_(is_valid_tid)(thread)) {
njn25e49d8e72002-09-23 09:36:25 +00002649 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002650 "pthread_kill: invalid target thread");
jsgf855d93d2003-10-13 22:26:55 +00002651 SET_PTHREQ_RETVAL(tid, VKI_ESRCH);
2652 return;
2653 }
2654
2655 if (sig == 0) {
2656 /* OK, signal 0 is just for testing */
2657 SET_PTHREQ_RETVAL(tid, 0);
sewardj018f7622002-05-15 21:13:39 +00002658 return;
2659 }
2660
2661 if (sig < 1 || sig > VKI_KNSIG) {
jsgf855d93d2003-10-13 22:26:55 +00002662 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj018f7622002-05-15 21:13:39 +00002663 return;
2664 }
2665
nethercote97ccd5e2004-08-02 12:10:01 +00002666 tst = VG_(get_ThreadState)(thread);
2667 vg_assert(NULL != tst->proxy);
2668 VG_(proxy_sendsig)(thread, sig);
njnd3040452003-05-19 15:04:06 +00002669 SET_PTHREQ_RETVAL(tid, 0);
sewardjb48e5002002-05-13 00:16:03 +00002670}
2671
2672
sewardj2cb00342002-06-28 01:46:26 +00002673/* -----------------------------------------------------------
2674 FORK HANDLERS.
2675 -------------------------------------------------------- */
2676
2677static
2678void do__set_fhstack_used ( ThreadId tid, Int n )
2679{
2680 Char msg_buf[100];
2681 if (VG_(clo_trace_sched)) {
2682 VG_(sprintf)(msg_buf, "set_fhstack_used to %d", n );
2683 print_pthread_event(tid, msg_buf);
2684 }
2685
2686 vg_assert(VG_(is_valid_tid)(tid)
2687 && VG_(threads)[tid].status == VgTs_Runnable);
2688
2689 if (n >= 0 && n < VG_N_FORKHANDLERSTACK) {
2690 vg_fhstack_used = n;
njnd3040452003-05-19 15:04:06 +00002691 SET_PTHREQ_RETVAL(tid, 0);
sewardj2cb00342002-06-28 01:46:26 +00002692 } else {
njnd3040452003-05-19 15:04:06 +00002693 SET_PTHREQ_RETVAL(tid, -1);
sewardj2cb00342002-06-28 01:46:26 +00002694 }
2695}
2696
2697
2698static
2699void do__get_fhstack_used ( ThreadId tid )
2700{
2701 Int n;
2702 Char msg_buf[100];
2703 if (VG_(clo_trace_sched)) {
2704 VG_(sprintf)(msg_buf, "get_fhstack_used" );
2705 print_pthread_event(tid, msg_buf);
2706 }
2707
2708 vg_assert(VG_(is_valid_tid)(tid)
2709 && VG_(threads)[tid].status == VgTs_Runnable);
2710
2711 n = vg_fhstack_used;
2712 vg_assert(n >= 0 && n < VG_N_FORKHANDLERSTACK);
njnd3040452003-05-19 15:04:06 +00002713 SET_PTHREQ_RETVAL(tid, n);
sewardj2cb00342002-06-28 01:46:26 +00002714}
2715
2716static
2717void do__set_fhstack_entry ( ThreadId tid, Int n, ForkHandlerEntry* fh )
2718{
2719 Char msg_buf[100];
2720 if (VG_(clo_trace_sched)) {
2721 VG_(sprintf)(msg_buf, "set_fhstack_entry %d to %p", n, fh );
2722 print_pthread_event(tid, msg_buf);
2723 }
2724
2725 vg_assert(VG_(is_valid_tid)(tid)
2726 && VG_(threads)[tid].status == VgTs_Runnable);
njn72718642003-07-24 08:45:32 +00002727 VG_TRACK( pre_mem_read, Vg_CorePThread, tid,
njn25e49d8e72002-09-23 09:36:25 +00002728 "pthread_atfork: prepare/parent/child",
2729 (Addr)fh, sizeof(ForkHandlerEntry));
sewardj2cb00342002-06-28 01:46:26 +00002730
njn25e49d8e72002-09-23 09:36:25 +00002731 if (n < 0 || n >= VG_N_FORKHANDLERSTACK) {
njnd3040452003-05-19 15:04:06 +00002732 SET_PTHREQ_RETVAL(tid, -1);
sewardj2cb00342002-06-28 01:46:26 +00002733 return;
2734 }
2735
2736 vg_fhstack[n] = *fh;
njnd3040452003-05-19 15:04:06 +00002737 SET_PTHREQ_RETVAL(tid, 0);
sewardj2cb00342002-06-28 01:46:26 +00002738}
2739
2740
2741static
2742void do__get_fhstack_entry ( ThreadId tid, Int n, /*OUT*/
2743 ForkHandlerEntry* fh )
2744{
2745 Char msg_buf[100];
2746 if (VG_(clo_trace_sched)) {
2747 VG_(sprintf)(msg_buf, "get_fhstack_entry %d", n );
2748 print_pthread_event(tid, msg_buf);
2749 }
2750
2751 vg_assert(VG_(is_valid_tid)(tid)
2752 && VG_(threads)[tid].status == VgTs_Runnable);
njn72718642003-07-24 08:45:32 +00002753 VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "fork: prepare/parent/child",
njn25e49d8e72002-09-23 09:36:25 +00002754 (Addr)fh, sizeof(ForkHandlerEntry));
sewardj2cb00342002-06-28 01:46:26 +00002755
njn25e49d8e72002-09-23 09:36:25 +00002756 if (n < 0 || n >= VG_N_FORKHANDLERSTACK) {
njnd3040452003-05-19 15:04:06 +00002757 SET_PTHREQ_RETVAL(tid, -1);
sewardj2cb00342002-06-28 01:46:26 +00002758 return;
2759 }
2760
2761 *fh = vg_fhstack[n];
njnd3040452003-05-19 15:04:06 +00002762 SET_PTHREQ_RETVAL(tid, 0);
sewardj2cb00342002-06-28 01:46:26 +00002763
njn25e49d8e72002-09-23 09:36:25 +00002764 VG_TRACK( post_mem_write, (Addr)fh, sizeof(ForkHandlerEntry) );
sewardj2cb00342002-06-28 01:46:26 +00002765}
2766
thughesdaa34562004-06-27 12:48:53 +00002767
2768static
2769void do__get_stack_info ( ThreadId tid, ThreadId which, StackInfo* si )
2770{
2771 Char msg_buf[100];
2772
2773 vg_assert(VG_(is_valid_tid)(tid)
2774 && VG_(threads)[tid].status == VgTs_Runnable);
2775
2776 if (VG_(clo_trace_sched)) {
2777 VG_(sprintf)(msg_buf, "get_stack_info for tid %d", which );
2778 print_pthread_event(tid, msg_buf);
2779 }
2780
2781 if (!VG_(is_valid_tid)(which)) {
2782 SET_PTHREQ_RETVAL(tid, -1);
2783 return;
2784 }
2785
2786 si->base = VG_(threads)[which].stack_base;
2787 si->size = VG_(threads)[which].stack_size
2788 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB
2789 - VG_(threads)[which].stack_guard_size;
2790 si->guardsize = VG_(threads)[which].stack_guard_size;
2791
2792 SET_PTHREQ_RETVAL(tid, 0);
2793}
2794
njnd3040452003-05-19 15:04:06 +00002795/* ---------------------------------------------------------------------
2796 Specifying shadow register values
2797 ------------------------------------------------------------------ */
2798
2799void VG_(set_return_from_syscall_shadow) ( ThreadId tid, UInt ret_shadow )
2800{
2801 VG_(set_thread_shadow_archreg)(tid, R_EAX, ret_shadow);
2802}
2803
2804UInt VG_(get_exit_status_shadow) ( void )
2805{
2806 return VG_(get_shadow_archreg)(R_EBX);
2807}
2808
rjwalshe4e779d2004-04-16 23:02:29 +00002809void VG_(intercept_libc_freeres_wrapper)(Addr addr)
2810{
nethercotef971ab72004-08-02 16:27:40 +00002811 __libc_freeres_wrapper = addr;
rjwalshe4e779d2004-04-16 23:02:29 +00002812}
sewardj2cb00342002-06-28 01:46:26 +00002813
sewardje663cb92002-04-12 10:26:32 +00002814/* ---------------------------------------------------------------------
sewardj124ca2a2002-06-20 10:19:38 +00002815 Handle client requests.
sewardje663cb92002-04-12 10:26:32 +00002816 ------------------------------------------------------------------ */
2817
sewardj124ca2a2002-06-20 10:19:38 +00002818/* Do a client request for the thread tid. After the request, tid may
2819 or may not still be runnable; if not, the scheduler will have to
2820 choose a new thread to run.
2821*/
sewardje663cb92002-04-12 10:26:32 +00002822static
sewardj124ca2a2002-06-20 10:19:38 +00002823void do_client_request ( ThreadId tid )
sewardje663cb92002-04-12 10:26:32 +00002824{
nethercotec06e2132004-09-03 13:45:29 +00002825 UInt* arg = (UInt*)(VG_(threads)[tid].arch.m_eax);
nethercotef971ab72004-08-02 16:27:40 +00002826 UInt req_no = arg[0];
sewardj124ca2a2002-06-20 10:19:38 +00002827
fitzhardinge98abfc72003-12-16 02:05:15 +00002828 if (0)
2829 VG_(printf)("req no = 0x%x\n", req_no);
sewardje663cb92002-04-12 10:26:32 +00002830 switch (req_no) {
2831
njn3e884182003-04-15 13:03:23 +00002832 case VG_USERREQ__CLIENT_CALL0: {
2833 UInt (*f)(void) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00002834 if (f == NULL)
2835 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL: func=%p\n", f);
2836 else
2837 SET_CLCALL_RETVAL(tid, f ( ), (Addr)f);
njn3e884182003-04-15 13:03:23 +00002838 break;
2839 }
2840 case VG_USERREQ__CLIENT_CALL1: {
2841 UInt (*f)(UInt) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00002842 if (f == NULL)
2843 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL: func=%p\n", f);
2844 else
2845 SET_CLCALL_RETVAL(tid, f ( arg[2] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00002846 break;
2847 }
2848 case VG_USERREQ__CLIENT_CALL2: {
2849 UInt (*f)(UInt, UInt) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00002850 if (f == NULL)
2851 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL: func=%p\n", f);
2852 else
2853 SET_CLCALL_RETVAL(tid, f ( arg[2], arg[3] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00002854 break;
2855 }
2856 case VG_USERREQ__CLIENT_CALL3: {
2857 UInt (*f)(UInt, UInt, UInt) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00002858 if (f == NULL)
2859 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL: func=%p\n", f);
2860 else
2861 SET_CLCALL_RETVAL(tid, f ( arg[2], arg[3], arg[4] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00002862 break;
2863 }
2864
nethercote7cc9c232004-01-21 15:08:04 +00002865 /* Note: for tools that replace malloc() et al, we want to call
njn3e884182003-04-15 13:03:23 +00002866 the replacement versions. For those that don't, we want to call
2867 VG_(cli_malloc)() et al. We do this by calling SK_(malloc)(), which
nethercote3ced0e32004-01-26 14:50:45 +00002868 malloc-replacing tools must replace, but have the default definition
2869 of SK_(malloc)() call VG_(cli_malloc)(). */
njn3e884182003-04-15 13:03:23 +00002870
2871 /* Note: for MALLOC and FREE, must set the appropriate "lock"... see
2872 the comment in vg_defaults.c/SK_(malloc)() for why. */
sewardj124ca2a2002-06-20 10:19:38 +00002873 case VG_USERREQ__MALLOC:
njn3e884182003-04-15 13:03:23 +00002874 VG_(sk_malloc_called_by_scheduler) = True;
njnd3040452003-05-19 15:04:06 +00002875 SET_PTHREQ_RETVAL(
njn72718642003-07-24 08:45:32 +00002876 tid, (UInt)SK_(malloc) ( arg[1] )
sewardj124ca2a2002-06-20 10:19:38 +00002877 );
njn3e884182003-04-15 13:03:23 +00002878 VG_(sk_malloc_called_by_scheduler) = False;
sewardj124ca2a2002-06-20 10:19:38 +00002879 break;
2880
2881 case VG_USERREQ__FREE:
njn3e884182003-04-15 13:03:23 +00002882 VG_(sk_malloc_called_by_scheduler) = True;
njn72718642003-07-24 08:45:32 +00002883 SK_(free) ( (void*)arg[1] );
njn3e884182003-04-15 13:03:23 +00002884 VG_(sk_malloc_called_by_scheduler) = False;
njnd3040452003-05-19 15:04:06 +00002885 SET_PTHREQ_RETVAL(tid, 0); /* irrelevant */
sewardj124ca2a2002-06-20 10:19:38 +00002886 break;
2887
sewardj124ca2a2002-06-20 10:19:38 +00002888 case VG_USERREQ__PTHREAD_GET_THREADID:
njnd3040452003-05-19 15:04:06 +00002889 SET_PTHREQ_RETVAL(tid, tid);
sewardj124ca2a2002-06-20 10:19:38 +00002890 break;
2891
2892 case VG_USERREQ__RUNNING_ON_VALGRIND:
njnd3040452003-05-19 15:04:06 +00002893 SET_CLREQ_RETVAL(tid, 1);
sewardj124ca2a2002-06-20 10:19:38 +00002894 break;
2895
2896 case VG_USERREQ__GET_PTHREAD_TRACE_LEVEL:
njnd3040452003-05-19 15:04:06 +00002897 SET_PTHREQ_RETVAL(tid, VG_(clo_trace_pthread_level));
sewardj124ca2a2002-06-20 10:19:38 +00002898 break;
2899
2900 case VG_USERREQ__READ_MILLISECOND_TIMER:
njnd3040452003-05-19 15:04:06 +00002901 SET_PTHREQ_RETVAL(tid, VG_(read_millisecond_timer)());
sewardj124ca2a2002-06-20 10:19:38 +00002902 break;
2903
2904 /* Some of these may make thread tid non-runnable, but the
2905 scheduler checks for that on return from this function. */
2906 case VG_USERREQ__PTHREAD_MUTEX_LOCK:
2907 do_pthread_mutex_lock( tid, False, (void *)(arg[1]) );
2908 break;
2909
2910 case VG_USERREQ__PTHREAD_MUTEX_TRYLOCK:
2911 do_pthread_mutex_lock( tid, True, (void *)(arg[1]) );
2912 break;
2913
2914 case VG_USERREQ__PTHREAD_MUTEX_UNLOCK:
2915 do_pthread_mutex_unlock( tid, (void *)(arg[1]) );
2916 break;
2917
sewardj00a66b12002-10-12 16:42:35 +00002918 case VG_USERREQ__PTHREAD_GETSPECIFIC_PTR:
2919 do_pthread_getspecific_ptr ( tid );
sewardj124ca2a2002-06-20 10:19:38 +00002920 break;
2921
2922 case VG_USERREQ__SET_CANCELTYPE:
2923 do__set_canceltype ( tid, arg[1] );
2924 break;
2925
2926 case VG_USERREQ__CLEANUP_PUSH:
2927 do__cleanup_push ( tid, (CleanupEntry*)(arg[1]) );
2928 break;
2929
2930 case VG_USERREQ__CLEANUP_POP:
2931 do__cleanup_pop ( tid, (CleanupEntry*)(arg[1]) );
2932 break;
2933
2934 case VG_USERREQ__TESTCANCEL:
2935 do__testcancel ( tid );
2936 break;
2937
sewardje663cb92002-04-12 10:26:32 +00002938 case VG_USERREQ__PTHREAD_JOIN:
2939 do_pthread_join( tid, arg[1], (void**)(arg[2]) );
2940 break;
2941
sewardj3b5d8862002-04-20 13:53:23 +00002942 case VG_USERREQ__PTHREAD_COND_WAIT:
2943 do_pthread_cond_wait( tid,
nethercote1f0173b2004-02-28 15:40:36 +00002944 (vg_pthread_cond_t *)(arg[1]),
2945 (vg_pthread_mutex_t *)(arg[2]),
sewardj5f07b662002-04-23 16:52:51 +00002946 0xFFFFFFFF /* no timeout */ );
2947 break;
2948
2949 case VG_USERREQ__PTHREAD_COND_TIMEDWAIT:
2950 do_pthread_cond_wait( tid,
nethercote1f0173b2004-02-28 15:40:36 +00002951 (vg_pthread_cond_t *)(arg[1]),
2952 (vg_pthread_mutex_t *)(arg[2]),
sewardj5f07b662002-04-23 16:52:51 +00002953 arg[3] /* timeout millisecond point */ );
sewardj3b5d8862002-04-20 13:53:23 +00002954 break;
2955
2956 case VG_USERREQ__PTHREAD_COND_SIGNAL:
2957 do_pthread_cond_signal_or_broadcast(
2958 tid,
2959 False, /* signal, not broadcast */
nethercote1f0173b2004-02-28 15:40:36 +00002960 (vg_pthread_cond_t *)(arg[1]) );
sewardj3b5d8862002-04-20 13:53:23 +00002961 break;
2962
2963 case VG_USERREQ__PTHREAD_COND_BROADCAST:
2964 do_pthread_cond_signal_or_broadcast(
2965 tid,
2966 True, /* broadcast, not signal */
nethercote1f0173b2004-02-28 15:40:36 +00002967 (vg_pthread_cond_t *)(arg[1]) );
sewardj3b5d8862002-04-20 13:53:23 +00002968 break;
2969
sewardj00a66b12002-10-12 16:42:35 +00002970 case VG_USERREQ__PTHREAD_KEY_VALIDATE:
2971 do_pthread_key_validate ( tid,
2972 (pthread_key_t)(arg[1]) );
2973 break;
2974
sewardj5f07b662002-04-23 16:52:51 +00002975 case VG_USERREQ__PTHREAD_KEY_CREATE:
2976 do_pthread_key_create ( tid,
2977 (pthread_key_t*)(arg[1]),
2978 (void(*)(void*))(arg[2]) );
2979 break;
2980
2981 case VG_USERREQ__PTHREAD_KEY_DELETE:
2982 do_pthread_key_delete ( tid,
2983 (pthread_key_t)(arg[1]) );
2984 break;
2985
sewardj00a66b12002-10-12 16:42:35 +00002986 case VG_USERREQ__PTHREAD_SETSPECIFIC_PTR:
2987 do_pthread_setspecific_ptr ( tid,
2988 (void**)(arg[1]) );
sewardj5f07b662002-04-23 16:52:51 +00002989 break;
2990
sewardjb48e5002002-05-13 00:16:03 +00002991 case VG_USERREQ__PTHREAD_SIGMASK:
2992 do_pthread_sigmask ( tid,
2993 arg[1],
2994 (vki_ksigset_t*)(arg[2]),
2995 (vki_ksigset_t*)(arg[3]) );
2996 break;
2997
sewardj018f7622002-05-15 21:13:39 +00002998 case VG_USERREQ__PTHREAD_KILL:
2999 do_pthread_kill ( tid, arg[1], arg[2] );
3000 break;
3001
sewardjff42d1d2002-05-22 13:17:31 +00003002 case VG_USERREQ__PTHREAD_YIELD:
3003 do_pthread_yield ( tid );
sewardj18a62ff2002-07-12 22:30:51 +00003004 /* On return from do_client_request(), the scheduler will
3005 select a new thread to run. */
sewardjff42d1d2002-05-22 13:17:31 +00003006 break;
sewardj018f7622002-05-15 21:13:39 +00003007
sewardj7989d0c2002-05-28 11:00:01 +00003008 case VG_USERREQ__SET_CANCELSTATE:
3009 do__set_cancelstate ( tid, arg[1] );
3010 break;
3011
sewardj7989d0c2002-05-28 11:00:01 +00003012 case VG_USERREQ__SET_OR_GET_DETACH:
3013 do__set_or_get_detach ( tid, arg[1], arg[2] );
3014 break;
3015
3016 case VG_USERREQ__SET_CANCELPEND:
3017 do__set_cancelpend ( tid, arg[1], (void(*)(void*))arg[2] );
3018 break;
3019
3020 case VG_USERREQ__WAIT_JOINER:
3021 do__wait_joiner ( tid, (void*)arg[1] );
3022 break;
3023
3024 case VG_USERREQ__QUIT:
3025 do__quit ( tid );
3026 break;
3027
3028 case VG_USERREQ__APPLY_IN_NEW_THREAD:
3029 do__apply_in_new_thread ( tid, (void*(*)(void*))arg[1],
thughesdaa34562004-06-27 12:48:53 +00003030 (void*)arg[2], (StackInfo*)(arg[3]) );
sewardj7989d0c2002-05-28 11:00:01 +00003031 break;
3032
sewardj870497a2002-05-29 01:06:47 +00003033 case VG_USERREQ__GET_KEY_D_AND_S:
3034 do__get_key_destr_and_spec ( tid,
3035 (pthread_key_t)arg[1],
3036 (CleanupEntry*)arg[2] );
3037 break;
3038
sewardjef037c72002-05-30 00:40:03 +00003039 case VG_USERREQ__NUKE_OTHER_THREADS:
3040 VG_(nuke_all_threads_except) ( tid );
njnd3040452003-05-19 15:04:06 +00003041 SET_PTHREQ_RETVAL(tid, 0);
sewardjef037c72002-05-30 00:40:03 +00003042 break;
3043
sewardj4dced352002-06-04 22:54:20 +00003044 case VG_USERREQ__PTHREAD_ERROR:
njn25e49d8e72002-09-23 09:36:25 +00003045 VG_(record_pthread_error)( tid, (Char*)(arg[1]) );
njnd3040452003-05-19 15:04:06 +00003046 SET_PTHREQ_RETVAL(tid, 0);
sewardj4dced352002-06-04 22:54:20 +00003047 break;
3048
sewardj2cb00342002-06-28 01:46:26 +00003049 case VG_USERREQ__SET_FHSTACK_USED:
3050 do__set_fhstack_used( tid, (Int)(arg[1]) );
3051 break;
3052
3053 case VG_USERREQ__GET_FHSTACK_USED:
3054 do__get_fhstack_used( tid );
3055 break;
3056
3057 case VG_USERREQ__SET_FHSTACK_ENTRY:
3058 do__set_fhstack_entry( tid, (Int)(arg[1]),
3059 (ForkHandlerEntry*)(arg[2]) );
3060 break;
3061
3062 case VG_USERREQ__GET_FHSTACK_ENTRY:
3063 do__get_fhstack_entry( tid, (Int)(arg[1]),
3064 (ForkHandlerEntry*)(arg[2]) );
3065 break;
3066
sewardj77e466c2002-04-14 02:29:29 +00003067 case VG_USERREQ__SIGNAL_RETURNS:
3068 handle_signal_return(tid);
3069 break;
fitzhardinge98abfc72003-12-16 02:05:15 +00003070
thughesdaa34562004-06-27 12:48:53 +00003071 case VG_USERREQ__GET_STACK_INFO:
3072 do__get_stack_info( tid, (Int)(arg[1]), (StackInfo*)(arg[2]) );
3073 break;
3074
fitzhardinge98abfc72003-12-16 02:05:15 +00003075
3076 case VG_USERREQ__GET_SIGRT_MIN:
3077 SET_PTHREQ_RETVAL(tid, VG_(sig_rtmin));
3078 break;
3079
3080 case VG_USERREQ__GET_SIGRT_MAX:
3081 SET_PTHREQ_RETVAL(tid, VG_(sig_rtmax));
3082 break;
3083
3084 case VG_USERREQ__ALLOC_RTSIG:
3085 SET_PTHREQ_RETVAL(tid, VG_(sig_alloc_rtsig)((Int)arg[1]));
3086 break;
3087
fitzhardinge39de4b42003-10-31 07:12:21 +00003088 case VG_USERREQ__PRINTF: {
3089 int count =
3090 VG_(vmessage)( Vg_ClientMsg, (char *)arg[1], (va_list)arg[2] );
3091 SET_CLREQ_RETVAL( tid, count );
3092 break; }
3093
fitzhardinge98abfc72003-12-16 02:05:15 +00003094
fitzhardinge39de4b42003-10-31 07:12:21 +00003095 case VG_USERREQ__INTERNAL_PRINTF: {
3096 int count =
3097 VG_(vmessage)( Vg_UserMsg, (char *)arg[1], (va_list)arg[2] );
3098 SET_CLREQ_RETVAL( tid, count );
3099 break; }
3100
3101 case VG_USERREQ__PRINTF_BACKTRACE: {
3102 ExeContext *e = VG_(get_ExeContext)( tid );
3103 int count =
3104 VG_(vmessage)( Vg_ClientMsg, (char *)arg[1], (va_list)arg[2] );
nethercote86c5dcb2004-09-05 21:32:37 +00003105 VG_(mini_stack_dump)(e->ips, VG_(clo_backtrace_size));
fitzhardinge39de4b42003-10-31 07:12:21 +00003106 SET_CLREQ_RETVAL( tid, count );
3107 break; }
3108
3109 case VG_USERREQ__INTERNAL_PRINTF_BACKTRACE: {
3110 ExeContext *e = VG_(get_ExeContext)( tid );
3111 int count =
3112 VG_(vmessage)( Vg_UserMsg, (char *)arg[1], (va_list)arg[2] );
nethercote86c5dcb2004-09-05 21:32:37 +00003113 VG_(mini_stack_dump)(e->ips, VG_(clo_backtrace_size));
fitzhardinge39de4b42003-10-31 07:12:21 +00003114 SET_CLREQ_RETVAL( tid, count );
3115 break; }
3116
fitzhardinge98abfc72003-12-16 02:05:15 +00003117 case VG_USERREQ__GET_MALLOCFUNCS: {
3118 struct vg_mallocfunc_info *info = (struct vg_mallocfunc_info *)arg[1];
3119
3120 info->sk_malloc = (Addr)SK_(malloc);
3121 info->sk_calloc = (Addr)SK_(calloc);
3122 info->sk_realloc = (Addr)SK_(realloc);
3123 info->sk_memalign = (Addr)SK_(memalign);
3124 info->sk___builtin_new = (Addr)SK_(__builtin_new);
3125 info->sk___builtin_vec_new = (Addr)SK_(__builtin_vec_new);
3126 info->sk_free = (Addr)SK_(free);
3127 info->sk___builtin_delete = (Addr)SK_(__builtin_delete);
3128 info->sk___builtin_vec_delete = (Addr)SK_(__builtin_vec_delete);
3129
3130 info->arena_payload_szB = (Addr)VG_(arena_payload_szB);
3131
3132 info->clo_sloppy_malloc = VG_(clo_sloppy_malloc);
3133 info->clo_trace_malloc = VG_(clo_trace_malloc);
3134
3135 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
3136
3137 break;
3138 }
3139
njn25e49d8e72002-09-23 09:36:25 +00003140 /* Requests from the client program */
3141
3142 case VG_USERREQ__DISCARD_TRANSLATIONS:
3143 if (VG_(clo_verbosity) > 2)
3144 VG_(printf)( "client request: DISCARD_TRANSLATIONS,"
3145 " addr %p, len %d\n",
3146 (void*)arg[1], arg[2] );
3147
sewardj97ad5522003-05-04 12:32:56 +00003148 VG_(invalidate_translations)( arg[1], arg[2], True );
njn25e49d8e72002-09-23 09:36:25 +00003149
njnd3040452003-05-19 15:04:06 +00003150 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
njn25e49d8e72002-09-23 09:36:25 +00003151 break;
3152
njn47363ab2003-04-21 13:24:40 +00003153 case VG_USERREQ__COUNT_ERRORS:
nethercotef2b11482004-08-02 12:36:01 +00003154 SET_CLREQ_RETVAL( tid, VG_(get_n_errs_found)() );
njn47363ab2003-04-21 13:24:40 +00003155 break;
3156
sewardje663cb92002-04-12 10:26:32 +00003157 default:
njn25e49d8e72002-09-23 09:36:25 +00003158 if (VG_(needs).client_requests) {
sewardj34042512002-10-22 04:14:35 +00003159 UInt ret;
3160
njn25e49d8e72002-09-23 09:36:25 +00003161 if (VG_(clo_verbosity) > 2)
fitzhardinge98abfc72003-12-16 02:05:15 +00003162 VG_(printf)("client request: code %x, addr %p, len %d\n",
njn25e49d8e72002-09-23 09:36:25 +00003163 arg[0], (void*)arg[1], arg[2] );
3164
njn72718642003-07-24 08:45:32 +00003165 if (SK_(handle_client_request) ( tid, arg, &ret ))
njnd3040452003-05-19 15:04:06 +00003166 SET_CLREQ_RETVAL(tid, ret);
njn25e49d8e72002-09-23 09:36:25 +00003167 } else {
sewardj34042512002-10-22 04:14:35 +00003168 static Bool whined = False;
3169
3170 if (!whined) {
nethercote7cc9c232004-01-21 15:08:04 +00003171 // Allow for requests in core, but defined by tools, which
njnd7994182003-10-02 13:44:04 +00003172 // have 0 and 0 in their two high bytes.
3173 Char c1 = (arg[0] >> 24) & 0xff;
3174 Char c2 = (arg[0] >> 16) & 0xff;
3175 if (c1 == 0) c1 = '_';
3176 if (c2 == 0) c2 = '_';
sewardj34042512002-10-22 04:14:35 +00003177 VG_(message)(Vg_UserMsg, "Warning:\n"
njnd7994182003-10-02 13:44:04 +00003178 " unhandled client request: 0x%x (%c%c+0x%x). Perhaps\n"
3179 " VG_(needs).client_requests should be set?\n",
3180 arg[0], c1, c2, arg[0] & 0xffff);
sewardj34042512002-10-22 04:14:35 +00003181 whined = True;
3182 }
njn25e49d8e72002-09-23 09:36:25 +00003183 }
sewardje663cb92002-04-12 10:26:32 +00003184 break;
3185 }
3186}
3187
3188
sewardj6072c362002-04-19 14:40:57 +00003189/* ---------------------------------------------------------------------
3190 Sanity checking.
3191 ------------------------------------------------------------------ */
3192
3193/* Internal consistency checks on the sched/pthread structures. */
3194static
3195void scheduler_sanity ( void )
3196{
nethercote1f0173b2004-02-28 15:40:36 +00003197 vg_pthread_mutex_t* mx;
3198 vg_pthread_cond_t* cv;
sewardj6072c362002-04-19 14:40:57 +00003199 Int i;
jsgf855d93d2003-10-13 22:26:55 +00003200 struct timeout* top;
3201 UInt lasttime = 0;
3202
3203 for(top = timeouts; top != NULL; top = top->next) {
3204 vg_assert(top->time >= lasttime);
nethercote36881a22004-08-04 14:03:16 +00003205 vg_assert(is_valid_or_empty_tid(top->tid));
jsgf855d93d2003-10-13 22:26:55 +00003206
3207#if 0
3208 /* assert timeout entry is either stale, or associated with a
3209 thread in the right state
3210
3211 XXX disable for now - can be stale, but times happen to match
3212 */
3213 vg_assert(VG_(threads)[top->tid].awaken_at != top->time ||
3214 VG_(threads)[top->tid].status == VgTs_Sleeping ||
3215 VG_(threads)[top->tid].status == VgTs_WaitCV);
3216#endif
3217
3218 lasttime = top->time;
3219 }
sewardj5f07b662002-04-23 16:52:51 +00003220
sewardj6072c362002-04-19 14:40:57 +00003221 /* VG_(printf)("scheduler_sanity\n"); */
3222 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +00003223 mx = VG_(threads)[i].associated_mx;
3224 cv = VG_(threads)[i].associated_cv;
3225 if (VG_(threads)[i].status == VgTs_WaitMX) {
sewardjbf290b92002-05-01 02:28:01 +00003226 /* If we're waiting on a MX: (1) the mx is not null, (2, 3)
3227 it's actually held by someone, since otherwise this thread
3228 is deadlocked, (4) the mutex's owner is not us, since
3229 otherwise this thread is also deadlocked. The logic in
3230 do_pthread_mutex_lock rejects attempts by a thread to lock
3231 a (non-recursive) mutex which it already owns.
sewardj05553872002-04-20 20:53:17 +00003232
sewardjbf290b92002-05-01 02:28:01 +00003233 (2) has been seen to fail sometimes. I don't know why.
3234 Possibly to do with signals. */
sewardj3b5d8862002-04-20 13:53:23 +00003235 vg_assert(cv == NULL);
sewardj05553872002-04-20 20:53:17 +00003236 /* 1 */ vg_assert(mx != NULL);
nethercote1f0173b2004-02-28 15:40:36 +00003237 /* 2 */ vg_assert(mx->__vg_m_count > 0);
3238 /* 3 */ vg_assert(VG_(is_valid_tid)((ThreadId)mx->__vg_m_owner));
3239 /* 4 */ vg_assert((UInt)i != (ThreadId)mx->__vg_m_owner);
sewardj3b5d8862002-04-20 13:53:23 +00003240 } else
sewardj018f7622002-05-15 21:13:39 +00003241 if (VG_(threads)[i].status == VgTs_WaitCV) {
sewardj3b5d8862002-04-20 13:53:23 +00003242 vg_assert(cv != NULL);
3243 vg_assert(mx != NULL);
sewardj6072c362002-04-19 14:40:57 +00003244 } else {
sewardj05553872002-04-20 20:53:17 +00003245 /* Unfortunately these don't hold true when a sighandler is
3246 running. To be fixed. */
3247 /* vg_assert(cv == NULL); */
3248 /* vg_assert(mx == NULL); */
sewardj6072c362002-04-19 14:40:57 +00003249 }
sewardjbf290b92002-05-01 02:28:01 +00003250
sewardj018f7622002-05-15 21:13:39 +00003251 if (VG_(threads)[i].status != VgTs_Empty) {
sewardjbf290b92002-05-01 02:28:01 +00003252 Int
sewardj018f7622002-05-15 21:13:39 +00003253 stack_used = (Addr)VG_(threads)[i].stack_highest_word
nethercoteb8ef9d82004-09-05 22:02:33 +00003254 - (Addr)ARCH_STACK_PTR(VG_(threads)[i].arch);
thughesdaa34562004-06-27 12:48:53 +00003255 Int
3256 stack_avail = VG_(threads)[i].stack_size
3257 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB
3258 - VG_(threads)[i].stack_guard_size;
fitzhardinge98c4dc02004-03-16 08:27:29 +00003259 /* This test is a bit bogus - it doesn't take into account
3260 alternate signal stacks, for a start. Also, if a thread
3261 has it's stack pointer somewhere strange, killing Valgrind
3262 isn't the right answer. */
3263 if (0 && i > 1 /* not the root thread */
thughesdaa34562004-06-27 12:48:53 +00003264 && stack_used >= stack_avail) {
sewardjbf290b92002-05-01 02:28:01 +00003265 VG_(message)(Vg_UserMsg,
njn25e49d8e72002-09-23 09:36:25 +00003266 "Error: STACK OVERFLOW: "
sewardjbf290b92002-05-01 02:28:01 +00003267 "thread %d: stack used %d, available %d",
thughesdaa34562004-06-27 12:48:53 +00003268 i, stack_used, stack_avail );
sewardjbf290b92002-05-01 02:28:01 +00003269 VG_(message)(Vg_UserMsg,
3270 "Terminating Valgrind. If thread(s) "
3271 "really need more stack, increase");
3272 VG_(message)(Vg_UserMsg,
rjwalsh7109a8c2004-09-02 00:31:02 +00003273 "VG_PTHREAD_STACK_SIZE in core.h and recompile.");
sewardjbf290b92002-05-01 02:28:01 +00003274 VG_(exit)(1);
3275 }
3276 }
sewardj6072c362002-04-19 14:40:57 +00003277 }
sewardj5f07b662002-04-23 16:52:51 +00003278
3279 for (i = 0; i < VG_N_THREAD_KEYS; i++) {
3280 if (!vg_thread_keys[i].inuse)
3281 vg_assert(vg_thread_keys[i].destructor == NULL);
3282 }
sewardj6072c362002-04-19 14:40:57 +00003283}
3284
3285
sewardje663cb92002-04-12 10:26:32 +00003286/*--------------------------------------------------------------------*/
3287/*--- end vg_scheduler.c ---*/
3288/*--------------------------------------------------------------------*/