blob: fae4554c57df5fb2c2f3aa0913e06989af6c0da3 [file] [log] [blame]
sewardje663cb92002-04-12 10:26:32 +00001
2/*--------------------------------------------------------------------*/
3/*--- A user-space pthreads implementation. vg_scheduler.c ---*/
4/*--------------------------------------------------------------------*/
5
6/*
njnc9539842002-10-02 13:26:35 +00007 This file is part of Valgrind, an extensible x86 protected-mode
8 emulator for monitoring program execution on x86-Unixes.
sewardje663cb92002-04-12 10:26:32 +00009
nethercotebb1c9912004-01-04 16:43:23 +000010 Copyright (C) 2000-2004 Julian Seward
sewardje663cb92002-04-12 10:26:32 +000011 jseward@acm.org
sewardje663cb92002-04-12 10:26:32 +000012
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
njn25e49d8e72002-09-23 09:36:25 +000028 The GNU General Public License is contained in the file COPYING.
sewardje663cb92002-04-12 10:26:32 +000029*/
30
njn25e49d8e72002-09-23 09:36:25 +000031#include "valgrind.h" /* for VG_USERREQ__RUNNING_ON_VALGRIND and
njn47363ab2003-04-21 13:24:40 +000032 VG_USERREQ__DISCARD_TRANSLATIONS, and others */
nethercotef1e5e152004-09-01 23:58:16 +000033#include "core.h"
sewardje663cb92002-04-12 10:26:32 +000034
sewardje663cb92002-04-12 10:26:32 +000035
36/* ---------------------------------------------------------------------
37 Types and globals for the scheduler.
38 ------------------------------------------------------------------ */
39
rjwalsh7109a8c2004-09-02 00:31:02 +000040/* ThreadId and ThreadState are defined in core.h. */
sewardje663cb92002-04-12 10:26:32 +000041
sewardj018f7622002-05-15 21:13:39 +000042/* Globals. A statically allocated array of threads. NOTE: [0] is
43 never used, to simplify the simulation of initialisers for
sewardj6072c362002-04-19 14:40:57 +000044 LinuxThreads. */
sewardj018f7622002-05-15 21:13:39 +000045ThreadState VG_(threads)[VG_N_THREADS];
sewardje663cb92002-04-12 10:26:32 +000046
sewardj2cb00342002-06-28 01:46:26 +000047/* The process' fork-handler stack. */
48static Int vg_fhstack_used = 0;
49static ForkHandlerEntry vg_fhstack[VG_N_FORKHANDLERSTACK];
50
51
sewardj2a99cf62004-11-24 10:44:19 +000052/* The tid of the thread currently running, or VG_INVALID_THREADID if
53 none. */
54static ThreadId vg_tid_currently_running = VG_INVALID_THREADID;
sewardj1e8cdc92002-04-18 11:37:52 +000055
sewardje663cb92002-04-12 10:26:32 +000056
57/* vg_oursignalhandler() might longjmp(). Here's the jmp_buf. */
nethercotef971ab72004-08-02 16:27:40 +000058static jmp_buf scheduler_jmpbuf;
sewardj872051c2002-07-13 12:12:56 +000059/* This says whether scheduler_jmpbuf is actually valid. Needed so
60 that our signal handler doesn't longjmp when the buffer isn't
61 actually valid. */
nethercotef971ab72004-08-02 16:27:40 +000062static Bool scheduler_jmpbuf_valid = False;
sewardje663cb92002-04-12 10:26:32 +000063/* ... and if so, here's the signal which caused it to do so. */
nethercotef971ab72004-08-02 16:27:40 +000064static Int longjmpd_on_signal;
jsgf855d93d2003-10-13 22:26:55 +000065/* If the current thread gets a syncronous unresumable signal, then
66 its details are placed here by the signal handler, to be passed to
67 the applications signal handler later on. */
nethercote73b526f2004-10-31 18:48:21 +000068static vki_siginfo_t unresumable_siginfo;
sewardje663cb92002-04-12 10:26:32 +000069
jsgf855d93d2003-10-13 22:26:55 +000070/* If != VG_INVALID_THREADID, this is the preferred tid to schedule */
71static ThreadId prefer_sched = VG_INVALID_THREADID;
sewardje663cb92002-04-12 10:26:32 +000072
sewardj5f07b662002-04-23 16:52:51 +000073/* Keeping track of keys. */
74typedef
75 struct {
76 /* Has this key been allocated ? */
77 Bool inuse;
78 /* If .inuse==True, records the address of the associated
79 destructor, or NULL if none. */
80 void (*destructor)(void*);
81 }
82 ThreadKeyState;
83
84/* And our array of thread keys. */
85static ThreadKeyState vg_thread_keys[VG_N_THREAD_KEYS];
86
87typedef UInt ThreadKey;
88
fitzhardinge98abfc72003-12-16 02:05:15 +000089/* The scheduler does need to know the address of it so it can be
90 called at program exit. */
nethercotef971ab72004-08-02 16:27:40 +000091static Addr __libc_freeres_wrapper;
njn25e49d8e72002-09-23 09:36:25 +000092
sewardje663cb92002-04-12 10:26:32 +000093/* Forwards */
nethercoted1b64b22004-11-04 18:22:28 +000094static void do_client_request ( ThreadId tid, UWord* args );
sewardj6072c362002-04-19 14:40:57 +000095static void scheduler_sanity ( void );
thughese321d492004-10-17 15:00:20 +000096static void do_pthread_mutex_timedlock_TIMEOUT ( ThreadId tid );
sewardj124ca2a2002-06-20 10:19:38 +000097static void do_pthread_cond_timedwait_TIMEOUT ( ThreadId tid );
thughesa3afffc2004-08-25 18:58:04 +000098static void maybe_rendezvous_joiners_and_joinees ( void );
sewardjd140e442002-05-29 01:21:19 +000099
nethercote844e7122004-08-02 15:27:22 +0000100/* Stats. */
101static UInt n_scheduling_events_MINOR = 0;
102static UInt n_scheduling_events_MAJOR = 0;
103
104void VG_(print_scheduler_stats)(void)
105{
106 VG_(message)(Vg_DebugMsg,
107 " %d/%d major/minor sched events.",
108 n_scheduling_events_MAJOR, n_scheduling_events_MINOR);
109}
110
sewardje663cb92002-04-12 10:26:32 +0000111/* ---------------------------------------------------------------------
112 Helper functions for the scheduler.
113 ------------------------------------------------------------------ */
114
sewardjb48e5002002-05-13 00:16:03 +0000115__inline__
116Bool VG_(is_valid_tid) ( ThreadId tid )
sewardj604ec3c2002-04-18 22:38:41 +0000117{
118 /* tid is unsigned, hence no < 0 test. */
sewardj6072c362002-04-19 14:40:57 +0000119 if (tid == 0) return False;
sewardj604ec3c2002-04-18 22:38:41 +0000120 if (tid >= VG_N_THREADS) return False;
sewardj018f7622002-05-15 21:13:39 +0000121 if (VG_(threads)[tid].status == VgTs_Empty) return False;
122 return True;
123}
124
125
126__inline__
nethercote36881a22004-08-04 14:03:16 +0000127Bool is_valid_or_empty_tid ( ThreadId tid )
sewardj018f7622002-05-15 21:13:39 +0000128{
129 /* tid is unsigned, hence no < 0 test. */
130 if (tid == 0) return False;
131 if (tid >= VG_N_THREADS) return False;
sewardj604ec3c2002-04-18 22:38:41 +0000132 return True;
133}
134
135
sewardj1e8cdc92002-04-18 11:37:52 +0000136/* For constructing error messages only: try and identify a thread
njn25e49d8e72002-09-23 09:36:25 +0000137 whose stack satisfies the predicate p, or return VG_INVALID_THREADID
sewardj2a99cf62004-11-24 10:44:19 +0000138 if none do.
sewardj1e8cdc92002-04-18 11:37:52 +0000139*/
njn43c799e2003-04-08 00:08:52 +0000140ThreadId VG_(first_matching_thread_stack)
thughes4ad52d02004-06-27 17:37:21 +0000141 ( Bool (*p) ( Addr stack_min, Addr stack_max, void* d ),
142 void* d )
sewardj1e8cdc92002-04-18 11:37:52 +0000143{
144 ThreadId tid, tid_to_skip;
145
146 tid_to_skip = VG_INVALID_THREADID;
147
sewardj6072c362002-04-19 14:40:57 +0000148 for (tid = 1; tid < VG_N_THREADS; tid++) {
sewardj018f7622002-05-15 21:13:39 +0000149 if (VG_(threads)[tid].status == VgTs_Empty) continue;
sewardj1e8cdc92002-04-18 11:37:52 +0000150 if (tid == tid_to_skip) continue;
njncf45fd42004-11-24 16:30:22 +0000151 if ( p ( STACK_PTR(VG_(threads)[tid].arch),
thughes4ad52d02004-06-27 17:37:21 +0000152 VG_(threads)[tid].stack_highest_word, d ) )
sewardj1e8cdc92002-04-18 11:37:52 +0000153 return tid;
154 }
155 return VG_INVALID_THREADID;
156}
157
158
sewardj15a43e12002-04-17 19:35:12 +0000159/* Print the scheduler status. */
160void VG_(pp_sched_status) ( void )
sewardje663cb92002-04-12 10:26:32 +0000161{
162 Int i;
163 VG_(printf)("\nsched status:\n");
sewardj6072c362002-04-19 14:40:57 +0000164 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +0000165 if (VG_(threads)[i].status == VgTs_Empty) continue;
sewardj15a43e12002-04-17 19:35:12 +0000166 VG_(printf)("\nThread %d: status = ", i);
sewardj018f7622002-05-15 21:13:39 +0000167 switch (VG_(threads)[i].status) {
sewardj6072c362002-04-19 14:40:57 +0000168 case VgTs_Runnable: VG_(printf)("Runnable"); break;
sewardj20917d82002-05-28 01:36:45 +0000169 case VgTs_WaitJoinee: VG_(printf)("WaitJoinee(%d)",
170 VG_(threads)[i].joiner_jee_tid);
171 break;
172 case VgTs_WaitJoiner: VG_(printf)("WaitJoiner"); break;
sewardj6072c362002-04-19 14:40:57 +0000173 case VgTs_Sleeping: VG_(printf)("Sleeping"); break;
174 case VgTs_WaitMX: VG_(printf)("WaitMX"); break;
sewardj3b5d8862002-04-20 13:53:23 +0000175 case VgTs_WaitCV: VG_(printf)("WaitCV"); break;
jsgf855d93d2003-10-13 22:26:55 +0000176 case VgTs_WaitSys: VG_(printf)("WaitSys"); break;
sewardje663cb92002-04-12 10:26:32 +0000177 default: VG_(printf)("???"); break;
178 }
sewardj3b5d8862002-04-20 13:53:23 +0000179 VG_(printf)(", associated_mx = %p, associated_cv = %p\n",
sewardj018f7622002-05-15 21:13:39 +0000180 VG_(threads)[i].associated_mx,
181 VG_(threads)[i].associated_cv );
sewardj15a43e12002-04-17 19:35:12 +0000182 VG_(pp_ExeContext)(
njncf45fd42004-11-24 16:30:22 +0000183 VG_(get_ExeContext2)( INSTR_PTR(VG_(threads)[i].arch),
184 FRAME_PTR(VG_(threads)[i].arch),
185 STACK_PTR(VG_(threads)[i].arch),
njn25e49d8e72002-09-23 09:36:25 +0000186 VG_(threads)[i].stack_highest_word)
187 );
sewardje663cb92002-04-12 10:26:32 +0000188 }
189 VG_(printf)("\n");
190}
191
sewardje663cb92002-04-12 10:26:32 +0000192static
193void print_sched_event ( ThreadId tid, Char* what )
194{
sewardj45b4b372002-04-16 22:50:32 +0000195 VG_(message)(Vg_DebugMsg, " SCHED[%d]: %s", tid, what );
sewardj8937c812002-04-12 20:12:20 +0000196}
197
sewardj8937c812002-04-12 20:12:20 +0000198static
199void print_pthread_event ( ThreadId tid, Char* what )
200{
201 VG_(message)(Vg_DebugMsg, "PTHREAD[%d]: %s", tid, what );
sewardje663cb92002-04-12 10:26:32 +0000202}
203
sewardje663cb92002-04-12 10:26:32 +0000204static
205Char* name_of_sched_event ( UInt event )
206{
207 switch (event) {
sewardjd79ef682004-11-26 13:25:17 +0000208 case VEX_TRC_JMP_SYSCALL: return "SYSCALL";
209 case VEX_TRC_JMP_CLIENTREQ: return "CLIENTREQ";
210 case VEX_TRC_JMP_YIELD: return "YIELD";
sewardje663cb92002-04-12 10:26:32 +0000211 case VG_TRC_INNER_COUNTERZERO: return "COUNTERZERO";
212 case VG_TRC_INNER_FASTMISS: return "FASTMISS";
213 case VG_TRC_UNRESUMABLE_SIGNAL: return "FATALSIGNAL";
214 default: return "??UNKNOWN??";
215 }
216}
217
218
sewardje663cb92002-04-12 10:26:32 +0000219/* Allocate a completely empty ThreadState record. */
220static
221ThreadId vg_alloc_ThreadState ( void )
222{
223 Int i;
sewardj6072c362002-04-19 14:40:57 +0000224 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +0000225 if (VG_(threads)[i].status == VgTs_Empty)
sewardje663cb92002-04-12 10:26:32 +0000226 return i;
227 }
228 VG_(printf)("vg_alloc_ThreadState: no free slots available\n");
229 VG_(printf)("Increase VG_N_THREADS, rebuild and try again.\n");
njne427a662002-10-02 11:08:25 +0000230 VG_(core_panic)("VG_N_THREADS is too low");
sewardje663cb92002-04-12 10:26:32 +0000231 /*NOTREACHED*/
232}
233
jsgf855d93d2003-10-13 22:26:55 +0000234ThreadState *VG_(get_ThreadState)(ThreadId tid)
235{
236 vg_assert(tid >= 0 && tid < VG_N_THREADS);
237 return &VG_(threads)[tid];
238}
239
sewardj2a99cf62004-11-24 10:44:19 +0000240/* Return True precisely when get_current_tid can return
241 successfully. */
242Bool VG_(running_a_thread) ( void )
njn25e49d8e72002-09-23 09:36:25 +0000243{
sewardj2a99cf62004-11-24 10:44:19 +0000244 if (vg_tid_currently_running == VG_INVALID_THREADID)
245 return False;
246 /* Otherwise, it must be a valid thread ID. */
247 vg_assert(VG_(is_valid_tid)(vg_tid_currently_running));
248 return True;
njn25e49d8e72002-09-23 09:36:25 +0000249}
sewardje663cb92002-04-12 10:26:32 +0000250
sewardj1e8cdc92002-04-18 11:37:52 +0000251ThreadId VG_(get_current_tid) ( void )
252{
sewardj2a99cf62004-11-24 10:44:19 +0000253 if (vg_tid_currently_running == VG_INVALID_THREADID)
254 VG_(core_panic)("VG_(get_current_tid): not running generated code");
255 /* Otherwise, it must be a valid thread ID. */
256 vg_assert(VG_(is_valid_tid)(vg_tid_currently_running));
257 return vg_tid_currently_running;
sewardj1e8cdc92002-04-18 11:37:52 +0000258}
259
nethercote73b526f2004-10-31 18:48:21 +0000260void VG_(resume_scheduler)(Int sigNo, vki_siginfo_t *info)
nethercote75d26242004-08-01 22:59:18 +0000261{
262 if (scheduler_jmpbuf_valid) {
263 /* Can't continue; must longjmp back to the scheduler and thus
264 enter the sighandler immediately. */
sewardj2a99cf62004-11-24 10:44:19 +0000265 vg_assert(vg_tid_currently_running != VG_INVALID_THREADID);
nethercote73b526f2004-10-31 18:48:21 +0000266 VG_(memcpy)(&unresumable_siginfo, info, sizeof(vki_siginfo_t));
nethercote75d26242004-08-01 22:59:18 +0000267
268 longjmpd_on_signal = sigNo;
269 __builtin_longjmp(scheduler_jmpbuf,1);
sewardj2a99cf62004-11-24 10:44:19 +0000270 } else {
271 vg_assert(vg_tid_currently_running == VG_INVALID_THREADID);
nethercote75d26242004-08-01 22:59:18 +0000272 }
273}
274
sewardj2a99cf62004-11-24 10:44:19 +0000275
sewardj6072c362002-04-19 14:40:57 +0000276static
sewardje663cb92002-04-12 10:26:32 +0000277UInt run_thread_for_a_while ( ThreadId tid )
278{
sewardj7ccc5c22002-04-24 21:39:11 +0000279 volatile UInt trc = 0;
sewardj8b635a42004-11-22 19:01:47 +0000280 volatile Int dispatch_ctr_SAVED = VG_(dispatch_ctr);
281 volatile Int done_this_time;
282
sewardj873b3132004-11-25 22:50:17 +0000283 /* For paranoia purposes only */
284 volatile Addr a_vex = (Addr) & VG_(threads)[tid].arch.vex;
285 volatile Addr a_vexsh = (Addr) & VG_(threads)[tid].arch.vex_shadow;
286 volatile Addr a_spill = (Addr) & VG_(threads)[tid].arch.vex_spill;
287 volatile UInt sz_vex = (UInt) sizeof VG_(threads)[tid].arch.vex;
288 volatile UInt sz_vexsh = (UInt) sizeof VG_(threads)[tid].arch.vex_shadow;
289 volatile UInt sz_spill = (UInt) sizeof VG_(threads)[tid].arch.vex_spill;
290
291 /* Paranoia */
sewardjb48e5002002-05-13 00:16:03 +0000292 vg_assert(VG_(is_valid_tid)(tid));
sewardj018f7622002-05-15 21:13:39 +0000293 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
nethercote75d26242004-08-01 22:59:18 +0000294 vg_assert(!scheduler_jmpbuf_valid);
sewardj2a99cf62004-11-24 10:44:19 +0000295 vg_assert(vg_tid_currently_running == VG_INVALID_THREADID);
sewardje663cb92002-04-12 10:26:32 +0000296
sewardj873b3132004-11-25 22:50:17 +0000297 /* Even more paranoia. Check that what we have matches
298 Vex's guest state layout requirements. */
299
sewardj12a74b52004-11-26 11:57:41 +0000300# define IS_4_ALIGNED(_xx) (0 == ((_xx) & 3))
301# define IS_8_ALIGNED(_xx) (0 == ((_xx) & 7))
302# define IS_16_ALIGNED(_xx) (0 == ((_xx) & 0xF))
303
304 if (0)
305 VG_(printf)("%p %d %p %d %p %d\n",
306 (void*)a_vex, sz_vex, (void*)a_vexsh, sz_vexsh,
307 (void*)a_spill, sz_spill );
sewardj873b3132004-11-25 22:50:17 +0000308
309 vg_assert(IS_8_ALIGNED(sz_vex));
310 vg_assert(IS_8_ALIGNED(sz_vexsh));
sewardj12a74b52004-11-26 11:57:41 +0000311 vg_assert(IS_16_ALIGNED(sz_spill));
312
313 vg_assert(IS_4_ALIGNED(a_vex));
314 vg_assert(IS_4_ALIGNED(a_vexsh));
315 vg_assert(IS_4_ALIGNED(a_spill));
sewardj873b3132004-11-25 22:50:17 +0000316
317 vg_assert(sz_vex == sz_vexsh);
318 vg_assert(a_vex + sz_vex == a_vexsh);
319
320 vg_assert(sz_spill == LibVEX_N_SPILL_BYTES);
321 vg_assert(a_vex + 2 * sz_vex == a_spill);
322
sewardj12a74b52004-11-26 11:57:41 +0000323# undef IS_4_ALIGNED
sewardj873b3132004-11-25 22:50:17 +0000324# undef IS_8_ALIGNED
sewardj12a74b52004-11-26 11:57:41 +0000325# undef IS_16_ALIGNED
sewardj873b3132004-11-25 22:50:17 +0000326
sewardj671ff542002-05-07 09:25:30 +0000327 VGP_PUSHCC(VgpRun);
jsgf855d93d2003-10-13 22:26:55 +0000328
329 /* there should be no undealt-with signals */
nethercotef971ab72004-08-02 16:27:40 +0000330 vg_assert(unresumable_siginfo.si_signo == 0);
jsgf855d93d2003-10-13 22:26:55 +0000331
nethercote75d26242004-08-01 22:59:18 +0000332 if (__builtin_setjmp(scheduler_jmpbuf) == 0) {
sewardje663cb92002-04-12 10:26:32 +0000333 /* try this ... */
sewardj2a99cf62004-11-24 10:44:19 +0000334 vg_tid_currently_running = tid;
335 scheduler_jmpbuf_valid = True;
336 trc = VG_(run_innerloop)( &VG_(threads)[tid].arch.vex );
337 scheduler_jmpbuf_valid = False;
338 vg_tid_currently_running = VG_INVALID_THREADID;
sewardje663cb92002-04-12 10:26:32 +0000339 /* We get here if the client didn't take a fault. */
340 } else {
341 /* We get here if the client took a fault, which caused our
342 signal handler to longjmp. */
sewardj2a99cf62004-11-24 10:44:19 +0000343 scheduler_jmpbuf_valid = False;
344 vg_tid_currently_running = VG_INVALID_THREADID;
sewardje663cb92002-04-12 10:26:32 +0000345 vg_assert(trc == 0);
346 trc = VG_TRC_UNRESUMABLE_SIGNAL;
347 }
sewardj872051c2002-07-13 12:12:56 +0000348
nethercote75d26242004-08-01 22:59:18 +0000349 vg_assert(!scheduler_jmpbuf_valid);
sewardj872051c2002-07-13 12:12:56 +0000350
sewardj8b635a42004-11-22 19:01:47 +0000351 done_this_time = (Int)dispatch_ctr_SAVED - (Int)VG_(dispatch_ctr) - 0;
352
353 vg_assert(done_this_time >= 0);
354 VG_(bbs_done) += (ULong)done_this_time;
355
njn25e49d8e72002-09-23 09:36:25 +0000356 VGP_POPCC(VgpRun);
sewardje663cb92002-04-12 10:26:32 +0000357 return trc;
358}
359
360
sewardj20917d82002-05-28 01:36:45 +0000361static
362void mostly_clear_thread_record ( ThreadId tid )
363{
sewardj20917d82002-05-28 01:36:45 +0000364 vg_assert(tid >= 0 && tid < VG_N_THREADS);
nethercotef9b59412004-09-10 15:33:32 +0000365 VGA_(clear_thread)(&VG_(threads)[tid].arch);
sewardj20917d82002-05-28 01:36:45 +0000366 VG_(threads)[tid].tid = tid;
367 VG_(threads)[tid].status = VgTs_Empty;
368 VG_(threads)[tid].associated_mx = NULL;
369 VG_(threads)[tid].associated_cv = NULL;
370 VG_(threads)[tid].awaken_at = 0;
371 VG_(threads)[tid].joinee_retval = NULL;
372 VG_(threads)[tid].joiner_thread_return = NULL;
373 VG_(threads)[tid].joiner_jee_tid = VG_INVALID_THREADID;
sewardj8ad94e12002-05-29 00:10:20 +0000374 VG_(threads)[tid].detached = False;
sewardj20917d82002-05-28 01:36:45 +0000375 VG_(threads)[tid].cancel_st = True; /* PTHREAD_CANCEL_ENABLE */
376 VG_(threads)[tid].cancel_ty = True; /* PTHREAD_CANCEL_DEFERRED */
377 VG_(threads)[tid].cancel_pend = NULL; /* not pending */
sewardj8ad94e12002-05-29 00:10:20 +0000378 VG_(threads)[tid].custack_used = 0;
nethercote73b526f2004-10-31 18:48:21 +0000379 VG_(sigemptyset)(&VG_(threads)[tid].sig_mask);
380 VG_(sigfillset)(&VG_(threads)[tid].eff_sig_mask);
thughes8abf3922004-10-16 10:59:49 +0000381 VG_(threads)[tid].sigqueue_head = 0;
382 VG_(threads)[tid].sigqueue_tail = 0;
sewardj00a66b12002-10-12 16:42:35 +0000383 VG_(threads)[tid].specifics_ptr = NULL;
jsgf855d93d2003-10-13 22:26:55 +0000384
385 VG_(threads)[tid].syscallno = -1;
thughesbaa46e52004-07-29 17:44:23 +0000386 VG_(threads)[tid].sys_flags = 0;
jsgf855d93d2003-10-13 22:26:55 +0000387
388 VG_(threads)[tid].proxy = NULL;
fitzhardinge28428592004-03-16 22:07:12 +0000389
390 /* start with no altstack */
391 VG_(threads)[tid].altstack.ss_sp = (void *)0xdeadbeef;
392 VG_(threads)[tid].altstack.ss_size = 0;
393 VG_(threads)[tid].altstack.ss_flags = VKI_SS_DISABLE;
sewardj20917d82002-05-28 01:36:45 +0000394}
395
396
jsgf855d93d2003-10-13 22:26:55 +0000397
sewardje663cb92002-04-12 10:26:32 +0000398/* Initialise the scheduler. Create a single "main" thread ready to
sewardj2a99cf62004-11-24 10:44:19 +0000399 run, with special ThreadId of one. This is called at startup. The
400 caller subsequently initialises the guest state components of
401 this main thread, thread 1.
sewardje663cb92002-04-12 10:26:32 +0000402*/
403void VG_(scheduler_init) ( void )
404{
thughesc37184f2004-09-11 14:16:57 +0000405 Int i;
sewardje663cb92002-04-12 10:26:32 +0000406 ThreadId tid_main;
407
sewardj6072c362002-04-19 14:40:57 +0000408 for (i = 0 /* NB; not 1 */; i < VG_N_THREADS; i++) {
sewardj20917d82002-05-28 01:36:45 +0000409 mostly_clear_thread_record(i);
410 VG_(threads)[i].stack_size = 0;
411 VG_(threads)[i].stack_base = (Addr)NULL;
thughesdaa34562004-06-27 12:48:53 +0000412 VG_(threads)[i].stack_guard_size = 0;
sewardj20917d82002-05-28 01:36:45 +0000413 VG_(threads)[i].stack_highest_word = (Addr)NULL;
sewardje663cb92002-04-12 10:26:32 +0000414 }
415
sewardj5f07b662002-04-23 16:52:51 +0000416 for (i = 0; i < VG_N_THREAD_KEYS; i++) {
417 vg_thread_keys[i].inuse = False;
418 vg_thread_keys[i].destructor = NULL;
419 }
420
sewardj2cb00342002-06-28 01:46:26 +0000421 vg_fhstack_used = 0;
422
sewardj2a99cf62004-11-24 10:44:19 +0000423 /* Assert this is thread one, which has certain magic
sewardje663cb92002-04-12 10:26:32 +0000424 properties. */
425 tid_main = vg_alloc_ThreadState();
sewardj6072c362002-04-19 14:40:57 +0000426 vg_assert(tid_main == 1);
sewardj20917d82002-05-28 01:36:45 +0000427 VG_(threads)[tid_main].status = VgTs_Runnable;
sewardje663cb92002-04-12 10:26:32 +0000428
sewardj2a99cf62004-11-24 10:44:19 +0000429 VG_(threads)[tid_main].stack_highest_word = VG_(clstk_end) - 4;
fitzhardinge98abfc72003-12-16 02:05:15 +0000430 VG_(threads)[tid_main].stack_base = VG_(clstk_base);
thughesc37184f2004-09-11 14:16:57 +0000431 VG_(threads)[tid_main].stack_size = VG_(client_rlimit_stack).rlim_cur;
sewardjbf290b92002-05-01 02:28:01 +0000432
sewardj872051c2002-07-13 12:12:56 +0000433 /* Not running client code right now. */
nethercote75d26242004-08-01 22:59:18 +0000434 scheduler_jmpbuf_valid = False;
jsgf855d93d2003-10-13 22:26:55 +0000435
436 /* Proxy for main thread */
437 VG_(proxy_create)(tid_main);
sewardje663cb92002-04-12 10:26:32 +0000438}
439
440
sewardj3947e622002-05-23 16:52:11 +0000441
sewardj6072c362002-04-19 14:40:57 +0000442/* vthread tid is returning from a signal handler; modify its
443 stack/regs accordingly. */
444static
445void handle_signal_return ( ThreadId tid )
446{
sewardj6072c362002-04-19 14:40:57 +0000447 Bool restart_blocked_syscalls;
sewardj645030e2002-06-06 01:27:39 +0000448 struct vki_timespec * rem;
sewardj6072c362002-04-19 14:40:57 +0000449
sewardjb48e5002002-05-13 00:16:03 +0000450 vg_assert(VG_(is_valid_tid)(tid));
sewardj6072c362002-04-19 14:40:57 +0000451
452 restart_blocked_syscalls = VG_(signal_returns)(tid);
453
thughesa3afffc2004-08-25 18:58:04 +0000454 /* If we were interrupted in the middle of a rendezvous
455 then check the rendezvous hasn't completed while we
456 were busy handling the signal. */
457 if (VG_(threads)[tid].status == VgTs_WaitJoiner ||
458 VG_(threads)[tid].status == VgTs_WaitJoinee ) {
459 maybe_rendezvous_joiners_and_joinees();
460 }
461
thughesc41c6f42004-10-16 16:50:14 +0000462 /* If we were interrupted while waiting on a mutex then check that
463 it hasn't been unlocked while we were busy handling the signal. */
464 if (VG_(threads)[tid].status == VgTs_WaitMX &&
465 VG_(threads)[tid].associated_mx->__vg_m_count == 0) {
466 vg_pthread_mutex_t* mutex = VG_(threads)[tid].associated_mx;
467 mutex->__vg_m_count = 1;
468 mutex->__vg_m_owner = (/*_pthread_descr*/void*)tid;
469 VG_(threads)[tid].status = VgTs_Runnable;
470 VG_(threads)[tid].associated_mx = NULL;
471 /* m_edx already holds pth_mx_lock() success (0) */
472 }
473
sewardj6072c362002-04-19 14:40:57 +0000474 if (restart_blocked_syscalls)
475 /* Easy; we don't have to do anything. */
476 return;
477
sewardj645030e2002-06-06 01:27:39 +0000478 if (VG_(threads)[tid].status == VgTs_Sleeping
njncf45fd42004-11-24 16:30:22 +0000479 && SYSCALL_NUM(VG_(threads)[tid].arch) == __NR_nanosleep) {
sewardj6072c362002-04-19 14:40:57 +0000480 /* We interrupted a nanosleep(). The right thing to do is to
sewardj645030e2002-06-06 01:27:39 +0000481 write the unused time to nanosleep's second param, but that's
482 too much effort ... we just say that 1 nanosecond was not
483 used, and return EINTR. */
njncf45fd42004-11-24 16:30:22 +0000484 rem = (struct vki_timespec*)SYSCALL_ARG2(VG_(threads)[tid].arch);
sewardj645030e2002-06-06 01:27:39 +0000485 if (rem != NULL) {
486 rem->tv_sec = 0;
487 rem->tv_nsec = 1;
488 }
njnd3040452003-05-19 15:04:06 +0000489 SET_SYSCALL_RETVAL(tid, -VKI_EINTR);
sewardj645030e2002-06-06 01:27:39 +0000490 VG_(threads)[tid].status = VgTs_Runnable;
sewardj6072c362002-04-19 14:40:57 +0000491 return;
492 }
493
494 /* All other cases? Just return. */
495}
496
497
nethercotef971ab72004-08-02 16:27:40 +0000498struct timeout {
499 UInt time; /* time we should awaken */
500 ThreadId tid; /* thread which cares about this timeout */
501 struct timeout *next;
502};
503
504static struct timeout *timeouts;
505
506static void add_timeout(ThreadId tid, UInt time)
507{
508 struct timeout *t = VG_(arena_malloc)(VG_AR_CORE, sizeof(*t));
509 struct timeout **prev, *tp;
510
511 t->time = time;
512 t->tid = tid;
513
514 if (VG_(clo_trace_sched)) {
515 Char msg_buf[100];
516 VG_(sprintf)(msg_buf, "add_timeout: now=%u adding timeout at %u",
517 VG_(read_millisecond_timer)(), time);
518 print_sched_event(tid, msg_buf);
519 }
520
521 for(tp = timeouts, prev = &timeouts;
522 tp != NULL && tp->time < time;
523 prev = &tp->next, tp = tp->next)
524 ;
525 t->next = tp;
526 *prev = t;
527}
528
sewardje663cb92002-04-12 10:26:32 +0000529static
530void sched_do_syscall ( ThreadId tid )
531{
jsgf855d93d2003-10-13 22:26:55 +0000532 Int syscall_no;
njn25e49d8e72002-09-23 09:36:25 +0000533 Char msg_buf[100];
sewardje663cb92002-04-12 10:26:32 +0000534
sewardjb48e5002002-05-13 00:16:03 +0000535 vg_assert(VG_(is_valid_tid)(tid));
sewardj018f7622002-05-15 21:13:39 +0000536 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +0000537
njncf45fd42004-11-24 16:30:22 +0000538 syscall_no = SYSCALL_NUM(VG_(threads)[tid].arch);
sewardje663cb92002-04-12 10:26:32 +0000539
jsgf855d93d2003-10-13 22:26:55 +0000540 /* Special-case nanosleep because we can. But should we?
541
542 XXX not doing so for now, because it doesn't seem to work
543 properly, and we can use the syscall nanosleep just as easily.
544 */
545 if (0 && syscall_no == __NR_nanosleep) {
sewardj5f07b662002-04-23 16:52:51 +0000546 UInt t_now, t_awaken;
sewardje663cb92002-04-12 10:26:32 +0000547 struct vki_timespec* req;
njncf45fd42004-11-24 16:30:22 +0000548 req = (struct vki_timespec*)SYSCALL_ARG1(VG_(threads)[tid].arch);
jsgf855d93d2003-10-13 22:26:55 +0000549
550 if (req->tv_sec < 0 || req->tv_nsec < 0 || req->tv_nsec >= 1000000000) {
551 SET_SYSCALL_RETVAL(tid, -VKI_EINVAL);
552 return;
553 }
554
sewardj5f07b662002-04-23 16:52:51 +0000555 t_now = VG_(read_millisecond_timer)();
sewardje663cb92002-04-12 10:26:32 +0000556 t_awaken
557 = t_now
sewardj5f07b662002-04-23 16:52:51 +0000558 + (UInt)1000ULL * (UInt)(req->tv_sec)
559 + (UInt)(req->tv_nsec) / 1000000;
sewardj018f7622002-05-15 21:13:39 +0000560 VG_(threads)[tid].status = VgTs_Sleeping;
561 VG_(threads)[tid].awaken_at = t_awaken;
sewardj8937c812002-04-12 20:12:20 +0000562 if (VG_(clo_trace_sched)) {
sewardj5f07b662002-04-23 16:52:51 +0000563 VG_(sprintf)(msg_buf, "at %d: nanosleep for %d",
sewardje663cb92002-04-12 10:26:32 +0000564 t_now, t_awaken-t_now);
565 print_sched_event(tid, msg_buf);
566 }
nethercotef971ab72004-08-02 16:27:40 +0000567 add_timeout(tid, t_awaken);
sewardje663cb92002-04-12 10:26:32 +0000568 /* Force the scheduler to run something else for a while. */
569 return;
570 }
571
jsgf855d93d2003-10-13 22:26:55 +0000572 /* If pre_syscall returns true, then we're done immediately */
573 if (VG_(pre_syscall)(tid)) {
fitzhardinge31ba9052004-01-16 02:15:23 +0000574 VG_(post_syscall(tid, True));
sewardj3947e622002-05-23 16:52:11 +0000575 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +0000576 } else {
jsgf855d93d2003-10-13 22:26:55 +0000577 vg_assert(VG_(threads)[tid].status == VgTs_WaitSys);
sewardje663cb92002-04-12 10:26:32 +0000578 }
579}
580
581
sewardje663cb92002-04-12 10:26:32 +0000582
jsgf855d93d2003-10-13 22:26:55 +0000583/* Sleep for a while, but be willing to be woken. */
584static
585void idle ( void )
586{
587 struct vki_pollfd pollfd[1];
588 Int delta = -1;
589 Int fd = VG_(proxy_resfd)();
590
591 pollfd[0].fd = fd;
592 pollfd[0].events = VKI_POLLIN;
593
594 /* Look though the nearest timeouts, looking for the next future
595 one (there may be stale past timeouts). They'll all be mopped
596 below up when the poll() finishes. */
597 if (timeouts != NULL) {
598 struct timeout *tp;
599 Bool wicked = False;
600 UInt now = VG_(read_millisecond_timer)();
601
602 for(tp = timeouts; tp != NULL && tp->time < now; tp = tp->next) {
603 /* If a thread is still sleeping in the past, make it runnable */
604 ThreadState *tst = VG_(get_ThreadState)(tp->tid);
605 if (tst->status == VgTs_Sleeping)
606 tst->status = VgTs_Runnable;
607 wicked = True; /* no sleep for the wicked */
sewardje663cb92002-04-12 10:26:32 +0000608 }
sewardje663cb92002-04-12 10:26:32 +0000609
jsgf855d93d2003-10-13 22:26:55 +0000610 if (tp != NULL) {
thughese761bef2004-10-17 15:18:22 +0000611 vg_assert(tp->time >= now);
612 /* limit the signed int delta to INT_MAX */
613 if ((tp->time - now) <= 0x7FFFFFFFU) {
614 delta = tp->time - now;
615 } else {
616 delta = 0x7FFFFFFF;
617 }
sewardje663cb92002-04-12 10:26:32 +0000618 }
jsgf855d93d2003-10-13 22:26:55 +0000619 if (wicked)
620 delta = 0;
sewardje663cb92002-04-12 10:26:32 +0000621 }
622
jsgf855d93d2003-10-13 22:26:55 +0000623 /* gotta wake up for something! */
624 vg_assert(fd != -1 || delta != -1);
sewardje462e202002-04-13 04:09:07 +0000625
jsgf855d93d2003-10-13 22:26:55 +0000626 /* If we need to do signal routing, then poll for pending signals
627 every VG_(clo_signal_polltime) mS */
628 if (VG_(do_signal_routing) && (delta > VG_(clo_signal_polltime) || delta == -1))
629 delta = VG_(clo_signal_polltime);
sewardje663cb92002-04-12 10:26:32 +0000630
jsgf855d93d2003-10-13 22:26:55 +0000631 if (VG_(clo_trace_sched)) {
632 Char msg_buf[100];
633 VG_(sprintf)(msg_buf, "idle: waiting for %dms and fd %d",
634 delta, fd);
635 print_sched_event(0, msg_buf);
sewardje663cb92002-04-12 10:26:32 +0000636 }
sewardje663cb92002-04-12 10:26:32 +0000637
jsgf855d93d2003-10-13 22:26:55 +0000638 VG_(poll)(pollfd, fd != -1 ? 1 : 0, delta);
sewardje663cb92002-04-12 10:26:32 +0000639
jsgf855d93d2003-10-13 22:26:55 +0000640 /* See if there's anything on the timeout list which needs
641 waking, and mop up anything in the past. */
642 {
643 UInt now = VG_(read_millisecond_timer)();
644 struct timeout *tp;
sewardje663cb92002-04-12 10:26:32 +0000645
jsgf855d93d2003-10-13 22:26:55 +0000646 tp = timeouts;
sewardje663cb92002-04-12 10:26:32 +0000647
jsgf855d93d2003-10-13 22:26:55 +0000648 while(tp && tp->time <= now) {
649 struct timeout *dead;
650 ThreadState *tst;
651
652 tst = VG_(get_ThreadState)(tp->tid);
653
654 if (VG_(clo_trace_sched)) {
655 Char msg_buf[100];
656 VG_(sprintf)(msg_buf, "idle: now=%u removing timeout at %u",
657 now, tp->time);
658 print_sched_event(tp->tid, msg_buf);
659 }
sewardje663cb92002-04-12 10:26:32 +0000660
jsgf855d93d2003-10-13 22:26:55 +0000661 /* If awaken_at != tp->time then it means the timeout is
662 stale and we should just ignore it. */
663 if(tst->awaken_at == tp->time) {
664 switch(tst->status) {
665 case VgTs_Sleeping:
666 tst->awaken_at = 0xFFFFFFFF;
667 tst->status = VgTs_Runnable;
668 break;
sewardje663cb92002-04-12 10:26:32 +0000669
thughese321d492004-10-17 15:00:20 +0000670 case VgTs_WaitMX:
671 do_pthread_mutex_timedlock_TIMEOUT(tst->tid);
672 break;
673
jsgf855d93d2003-10-13 22:26:55 +0000674 case VgTs_WaitCV:
675 do_pthread_cond_timedwait_TIMEOUT(tst->tid);
676 break;
sewardje663cb92002-04-12 10:26:32 +0000677
jsgf855d93d2003-10-13 22:26:55 +0000678 default:
679 /* This is a bit odd but OK; if a thread had a timeout
680 but woke for some other reason (signal, condvar
681 wakeup), then it will still be on the list. */
682 if (0)
683 VG_(printf)("idle(): unexpected status tp->tid=%d tst->status = %d\n",
684 tp->tid, tst->status);
685 break;
686 }
687 }
sewardjbc7d8782002-06-30 12:44:54 +0000688
jsgf855d93d2003-10-13 22:26:55 +0000689 dead = tp;
690 tp = tp->next;
njn25e49d8e72002-09-23 09:36:25 +0000691
jsgf855d93d2003-10-13 22:26:55 +0000692 VG_(arena_free)(VG_AR_CORE, dead);
sewardjbc7d8782002-06-30 12:44:54 +0000693 }
694
jsgf855d93d2003-10-13 22:26:55 +0000695 timeouts = tp;
sewardje663cb92002-04-12 10:26:32 +0000696 }
697}
698
699
sewardje663cb92002-04-12 10:26:32 +0000700/* ---------------------------------------------------------------------
701 The scheduler proper.
702 ------------------------------------------------------------------ */
703
nethercote238a3c32004-08-09 13:13:31 +0000704// For handling of the default action of a fatal signal.
705// jmp_buf for fatal signals; VG_(fatal_signal_jmpbuf_ptr) is NULL until
706// the time is right that it can be used.
707static jmp_buf fatal_signal_jmpbuf;
708static jmp_buf* fatal_signal_jmpbuf_ptr;
709static Int fatal_sigNo; // the fatal signal, if it happens
710
sewardje663cb92002-04-12 10:26:32 +0000711/* Run user-space threads until either
712 * Deadlock occurs
713 * One thread asks to shutdown Valgrind
714 * The specified number of basic blocks has gone by.
715*/
nethercote238a3c32004-08-09 13:13:31 +0000716VgSchedReturnCode do_scheduler ( Int* exitcode, ThreadId* last_run_tid )
sewardje663cb92002-04-12 10:26:32 +0000717{
718 ThreadId tid, tid_next;
719 UInt trc;
sewardj124ca2a2002-06-20 10:19:38 +0000720 Int done_this_time, n_in_bounded_wait;
jsgf855d93d2003-10-13 22:26:55 +0000721 Int n_exists, n_waiting_for_reaper;
sewardje663cb92002-04-12 10:26:32 +0000722 Addr trans_addr;
723
sewardje663cb92002-04-12 10:26:32 +0000724 /* Start with the root thread. tid in general indicates the
725 currently runnable/just-finished-running thread. */
nethercote759dda32004-08-07 18:16:56 +0000726 *last_run_tid = tid = 1;
sewardje663cb92002-04-12 10:26:32 +0000727
728 /* This is the top level scheduler loop. It falls into three
729 phases. */
730 while (True) {
731
sewardj6072c362002-04-19 14:40:57 +0000732 /* ======================= Phase 0 of 3 =======================
733 Be paranoid. Always a good idea. */
sewardjd7fd4d22002-04-24 01:57:27 +0000734 stage1:
sewardj6072c362002-04-19 14:40:57 +0000735 scheduler_sanity();
nethercote885dd912004-08-03 23:14:00 +0000736 VG_(sanity_check_general)( False );
sewardj6072c362002-04-19 14:40:57 +0000737
sewardje663cb92002-04-12 10:26:32 +0000738 /* ======================= Phase 1 of 3 =======================
739 Handle I/O completions and signals. This may change the
740 status of various threads. Then select a new thread to run,
741 or declare deadlock, or sleep if there are no runnable
742 threads but some are blocked on I/O. */
743
sewardje663cb92002-04-12 10:26:32 +0000744 /* Do the following loop until a runnable thread is found, or
745 deadlock is detected. */
746 while (True) {
747
748 /* For stats purposes only. */
nethercote844e7122004-08-02 15:27:22 +0000749 n_scheduling_events_MAJOR++;
sewardje663cb92002-04-12 10:26:32 +0000750
jsgf855d93d2003-10-13 22:26:55 +0000751 /* Route signals to their proper places */
752 VG_(route_signals)();
sewardje663cb92002-04-12 10:26:32 +0000753
jsgf855d93d2003-10-13 22:26:55 +0000754 /* See if any of the proxy LWPs report any activity: either a
755 syscall completing or a signal arriving. */
756 VG_(proxy_results)();
sewardje663cb92002-04-12 10:26:32 +0000757
758 /* Try and find a thread (tid) to run. */
759 tid_next = tid;
jsgf855d93d2003-10-13 22:26:55 +0000760 if (prefer_sched != VG_INVALID_THREADID) {
761 tid_next = prefer_sched-1;
762 prefer_sched = VG_INVALID_THREADID;
763 }
sewardj51c0aaf2002-04-25 01:32:10 +0000764 n_in_bounded_wait = 0;
jsgf855d93d2003-10-13 22:26:55 +0000765 n_exists = 0;
766 n_waiting_for_reaper = 0;
sewardje663cb92002-04-12 10:26:32 +0000767 while (True) {
768 tid_next++;
sewardj6072c362002-04-19 14:40:57 +0000769 if (tid_next >= VG_N_THREADS) tid_next = 1;
jsgf855d93d2003-10-13 22:26:55 +0000770 if (VG_(threads)[tid_next].status == VgTs_Sleeping
771 || VG_(threads)[tid_next].status == VgTs_WaitSys
thughese321d492004-10-17 15:00:20 +0000772 || (VG_(threads)[tid_next].status == VgTs_WaitMX
773 && VG_(threads)[tid_next].awaken_at != 0xFFFFFFFF)
774 || (VG_(threads)[tid_next].status == VgTs_WaitCV
sewardj018f7622002-05-15 21:13:39 +0000775 && VG_(threads)[tid_next].awaken_at != 0xFFFFFFFF))
sewardj51c0aaf2002-04-25 01:32:10 +0000776 n_in_bounded_wait ++;
jsgf855d93d2003-10-13 22:26:55 +0000777 if (VG_(threads)[tid_next].status != VgTs_Empty)
778 n_exists++;
779 if (VG_(threads)[tid_next].status == VgTs_WaitJoiner)
780 n_waiting_for_reaper++;
sewardj018f7622002-05-15 21:13:39 +0000781 if (VG_(threads)[tid_next].status == VgTs_Runnable)
sewardje663cb92002-04-12 10:26:32 +0000782 break; /* We can run this one. */
783 if (tid_next == tid)
784 break; /* been all the way round */
785 }
786 tid = tid_next;
787
sewardj018f7622002-05-15 21:13:39 +0000788 if (VG_(threads)[tid].status == VgTs_Runnable) {
sewardje663cb92002-04-12 10:26:32 +0000789 /* Found a suitable candidate. Fall out of this loop, so
790 we can advance to stage 2 of the scheduler: actually
791 running the thread. */
792 break;
793 }
794
jsgf855d93d2003-10-13 22:26:55 +0000795 /* All threads have exited - pretend someone called exit() */
796 if (n_waiting_for_reaper == n_exists) {
nethercote47dd12c2004-06-22 14:18:42 +0000797 *exitcode = 0; /* ? */
jsgf855d93d2003-10-13 22:26:55 +0000798 return VgSrc_ExitSyscall;
799 }
800
sewardje663cb92002-04-12 10:26:32 +0000801 /* We didn't find a runnable thread. Now what? */
sewardj51c0aaf2002-04-25 01:32:10 +0000802 if (n_in_bounded_wait == 0) {
sewardj54cacf02002-04-12 23:24:59 +0000803 /* No runnable threads and no prospect of any appearing
804 even if we wait for an arbitrary length of time. In
805 short, we have a deadlock. */
sewardj15a43e12002-04-17 19:35:12 +0000806 VG_(pp_sched_status)();
sewardje663cb92002-04-12 10:26:32 +0000807 return VgSrc_Deadlock;
808 }
809
jsgf855d93d2003-10-13 22:26:55 +0000810 /* Nothing needs doing, so sit in idle until either a timeout
811 happens or a thread's syscall completes. */
812 idle();
sewardj7e87e382002-05-03 19:09:05 +0000813 /* pp_sched_status(); */
sewardjb48e5002002-05-13 00:16:03 +0000814 /* VG_(printf)("."); */
sewardje663cb92002-04-12 10:26:32 +0000815 }
816
817
818 /* ======================= Phase 2 of 3 =======================
819 Wahey! We've finally decided that thread tid is runnable, so
820 we now do that. Run it for as much of a quanta as possible.
821 Trivial requests are handled and the thread continues. The
822 aim is not to do too many of Phase 1 since it is expensive. */
823
824 if (0)
sewardj3b5d8862002-04-20 13:53:23 +0000825 VG_(printf)("SCHED: tid %d\n", tid);
sewardje663cb92002-04-12 10:26:32 +0000826
njn25e49d8e72002-09-23 09:36:25 +0000827 VG_TRACK( thread_run, tid );
828
sewardje663cb92002-04-12 10:26:32 +0000829 /* Figure out how many bbs to ask vg_run_innerloop to do. Note
830 that it decrements the counter before testing it for zero, so
831 that if VG_(dispatch_ctr) is set to N you get at most N-1
832 iterations. Also this means that VG_(dispatch_ctr) must
833 exceed zero before entering the innerloop. Also also, the
834 decrement is done before the bb is actually run, so you
835 always get at least one decrement even if nothing happens.
836 */
nethercote1d447092004-02-01 17:29:59 +0000837 VG_(dispatch_ctr) = VG_SCHEDULING_QUANTUM + 1;
sewardje663cb92002-04-12 10:26:32 +0000838
sewardj1e8cdc92002-04-18 11:37:52 +0000839 /* paranoia ... */
sewardj018f7622002-05-15 21:13:39 +0000840 vg_assert(VG_(threads)[tid].tid == tid);
sewardj1e8cdc92002-04-18 11:37:52 +0000841
sewardje663cb92002-04-12 10:26:32 +0000842 /* Actually run thread tid. */
843 while (True) {
844
nethercote759dda32004-08-07 18:16:56 +0000845 *last_run_tid = tid;
sewardj7e87e382002-05-03 19:09:05 +0000846
sewardje663cb92002-04-12 10:26:32 +0000847 /* For stats purposes only. */
nethercote844e7122004-08-02 15:27:22 +0000848 n_scheduling_events_MINOR++;
sewardje663cb92002-04-12 10:26:32 +0000849
850 if (0)
851 VG_(message)(Vg_DebugMsg, "thread %d: running for %d bbs",
852 tid, VG_(dispatch_ctr) - 1 );
sewardjb3eef6b2002-05-01 00:05:27 +0000853# if 0
854 if (VG_(bbs_done) > 31700000 + 0) {
855 dispatch_ctr_SAVED = VG_(dispatch_ctr) = 2;
nethercoteb8ef9d82004-09-05 22:02:33 +0000856 VG_(translate)(&VG_(threads)[tid],
njncf45fd42004-11-24 16:30:22 +0000857 INSTR_PTR(VG_(threads)[tid].arch),
nethercote59a122d2004-08-03 17:16:51 +0000858 /*debugging*/True);
sewardjb3eef6b2002-05-01 00:05:27 +0000859 }
njncf45fd42004-11-24 16:30:22 +0000860 vg_assert(INSTR_PTR(VG_(threads)[tid].arch) != 0);
sewardjb3eef6b2002-05-01 00:05:27 +0000861# endif
sewardje663cb92002-04-12 10:26:32 +0000862
863 trc = run_thread_for_a_while ( tid );
864
sewardjb3eef6b2002-05-01 00:05:27 +0000865# if 0
njncf45fd42004-11-24 16:30:22 +0000866 if (0 == INSTR_PTR(VG_(threads)[tid].arch)) {
sewardjb3eef6b2002-05-01 00:05:27 +0000867 VG_(printf)("tid = %d, dc = %llu\n", tid, VG_(bbs_done));
njncf45fd42004-11-24 16:30:22 +0000868 vg_assert(0 != INSTR_PTR(VG_(threads)[tid].arch));
sewardjb3eef6b2002-05-01 00:05:27 +0000869 }
870# endif
871
sewardje663cb92002-04-12 10:26:32 +0000872 /* Deal quickly with trivial scheduling events, and resume the
873 thread. */
874
875 if (trc == VG_TRC_INNER_FASTMISS) {
njncf45fd42004-11-24 16:30:22 +0000876 Addr ip = INSTR_PTR(VG_(threads)[tid].arch);
nethercote4d714382004-10-13 09:47:24 +0000877
sewardj8b635a42004-11-22 19:01:47 +0000878 vg_assert(VG_(dispatch_ctr) > 1);
sewardje663cb92002-04-12 10:26:32 +0000879
880 /* Trivial event. Miss in the fast-cache. Do a full
881 lookup for it. */
nethercote4d714382004-10-13 09:47:24 +0000882 trans_addr = VG_(search_transtab)( ip );
sewardje663cb92002-04-12 10:26:32 +0000883 if (trans_addr == (Addr)0) {
884 /* Not found; we need to request a translation. */
nethercote4d714382004-10-13 09:47:24 +0000885 if (VG_(translate)( tid, ip, /*debug*/False )) {
886 trans_addr = VG_(search_transtab)( ip );
887 if (trans_addr == (Addr)0)
888 VG_(core_panic)("VG_TRC_INNER_FASTMISS: missing tt_fast entry");
889 } else {
890 // If VG_(translate)() fails, it's because it had to throw
891 // a signal because the client jumped to a bad address.
892 // This means VG_(deliver_signal)() will have been called
893 // by now, and the program counter will now be pointing to
894 // the start of the signal handler (if there is no
895 // handler, things would have been aborted by now), so do
896 // nothing, and things will work out next time around the
897 // scheduler loop.
898 }
sewardje663cb92002-04-12 10:26:32 +0000899 }
900 continue; /* with this thread */
901 }
902
sewardjd79ef682004-11-26 13:25:17 +0000903 if (trc == VEX_TRC_JMP_CLIENTREQ) {
njncf45fd42004-11-24 16:30:22 +0000904 UWord* args = (UWord*)(CLREQ_ARGS(VG_(threads)[tid].arch));
nethercoted1b64b22004-11-04 18:22:28 +0000905 UWord reqno = args[0];
sewardj18a62ff2002-07-12 22:30:51 +0000906 /* VG_(printf)("request 0x%x\n", reqno); */
sewardj1fe7b002002-07-16 01:43:15 +0000907
908 /* Are we really absolutely totally quitting? */
909 if (reqno == VG_USERREQ__LIBC_FREERES_DONE) {
910 if (0 || VG_(clo_trace_syscalls) || VG_(clo_trace_sched)) {
911 VG_(message)(Vg_DebugMsg,
912 "__libc_freeres() done; really quitting!");
913 }
914 return VgSrc_ExitSyscall;
915 }
916
nethercote3e901a22004-09-11 13:17:02 +0000917 do_client_request(tid,args);
sewardj124ca2a2002-06-20 10:19:38 +0000918 /* Following the request, we try and continue with the
919 same thread if still runnable. If not, go back to
920 Stage 1 to select a new thread to run. */
sewardj18a62ff2002-07-12 22:30:51 +0000921 if (VG_(threads)[tid].status == VgTs_Runnable
922 && reqno != VG_USERREQ__PTHREAD_YIELD)
sewardj124ca2a2002-06-20 10:19:38 +0000923 continue; /* with this thread */
924 else
925 goto stage1;
sewardje663cb92002-04-12 10:26:32 +0000926 }
927
sewardjd79ef682004-11-26 13:25:17 +0000928 if (trc == VEX_TRC_JMP_SYSCALL) {
sewardj51c0aaf2002-04-25 01:32:10 +0000929 /* Do a syscall for the vthread tid. This could cause it
sewardj7e87e382002-05-03 19:09:05 +0000930 to become non-runnable. One special case: spot the
931 client doing calls to exit() and take this as the cue
932 to exit. */
sewardjb3eef6b2002-05-01 00:05:27 +0000933# if 0
934 { UInt* esp; Int i;
njncf45fd42004-11-24 16:30:22 +0000935 esp=(UInt*)STACK_PTR(VG_(threads)[tid].arch);
sewardjb3eef6b2002-05-01 00:05:27 +0000936 VG_(printf)("\nBEFORE\n");
937 for (i = 10; i >= -10; i--)
938 VG_(printf)("%2d %p = 0x%x\n", i, &esp[i], esp[i]);
939 }
940# endif
941
sewardj1fe7b002002-07-16 01:43:15 +0000942 /* Deal with calling __libc_freeres() at exit. When the
943 client does __NR_exit, it's exiting for good. So we
nethercotef971ab72004-08-02 16:27:40 +0000944 then run __libc_freeres_wrapper. That quits by
sewardj1fe7b002002-07-16 01:43:15 +0000945 doing VG_USERREQ__LIBC_FREERES_DONE, and at that point
946 we really exit. To be safe we nuke all other threads
sewardjade9d0d2002-07-26 10:52:48 +0000947 currently running.
948
949 If not valgrinding (cachegrinding, etc) don't do this.
950 __libc_freeres does some invalid frees which crash
951 the unprotected malloc/free system. */
njn25e49d8e72002-09-23 09:36:25 +0000952
njncf45fd42004-11-24 16:30:22 +0000953 if (SYSCALL_NUM(VG_(threads)[tid].arch) == __NR_exit
954 || SYSCALL_NUM(VG_(threads)[tid].arch) == __NR_exit_group
sewardjf3fb92d2003-02-23 03:26:08 +0000955 ) {
sewardj858964b2002-10-05 14:15:43 +0000956
nethercote8b76fe52004-11-08 19:20:09 +0000957 /* Remember the supplied argument. */
njncf45fd42004-11-24 16:30:22 +0000958 *exitcode = SYSCALL_ARG1(VG_(threads)[tid].arch);
njn25e49d8e72002-09-23 09:36:25 +0000959
nethercote8b76fe52004-11-08 19:20:09 +0000960 // Inform tool about regs read by syscall
961 VG_TRACK( pre_reg_read, Vg_CoreSysCall, tid, "(syscallno)",
njncf45fd42004-11-24 16:30:22 +0000962 O_SYSCALL_NUM, sizeof(UWord) );
nethercote8b76fe52004-11-08 19:20:09 +0000963
njncf45fd42004-11-24 16:30:22 +0000964 if (SYSCALL_NUM(VG_(threads)[tid].arch) == __NR_exit)
nethercote8b76fe52004-11-08 19:20:09 +0000965 VG_TRACK( pre_reg_read, Vg_CoreSysCall, tid,
njncf45fd42004-11-24 16:30:22 +0000966 "exit(error_code)", O_SYSCALL_ARG1, sizeof(int) );
nethercote8b76fe52004-11-08 19:20:09 +0000967
njncf45fd42004-11-24 16:30:22 +0000968 if (SYSCALL_NUM(VG_(threads)[tid].arch) == __NR_exit_group)
nethercote8b76fe52004-11-08 19:20:09 +0000969 VG_TRACK( pre_reg_read, Vg_CoreSysCall, tid,
njncf45fd42004-11-24 16:30:22 +0000970 "exit_group(error_code)", O_SYSCALL_ARG1,
nethercote8b76fe52004-11-08 19:20:09 +0000971 sizeof(int) );
972
nethercote7cc9c232004-01-21 15:08:04 +0000973 /* Only run __libc_freeres if the tool says it's ok and
sewardj858964b2002-10-05 14:15:43 +0000974 it hasn't been overridden with --run-libc-freeres=no
975 on the command line. */
976
fitzhardinge98abfc72003-12-16 02:05:15 +0000977 if (VG_(needs).libc_freeres &&
978 VG_(clo_run_libc_freeres) &&
nethercotef971ab72004-08-02 16:27:40 +0000979 __libc_freeres_wrapper != 0) {
sewardj00631892002-10-05 15:34:38 +0000980 if (VG_(clo_verbosity) > 2
sewardj858964b2002-10-05 14:15:43 +0000981 || VG_(clo_trace_syscalls) || VG_(clo_trace_sched)) {
982 VG_(message)(Vg_DebugMsg,
983 "Caught __NR_exit; running __libc_freeres()");
984 }
985 VG_(nuke_all_threads_except) ( tid );
njncf45fd42004-11-24 16:30:22 +0000986 INSTR_PTR(VG_(threads)[tid].arch) =
nethercote50397c22004-11-04 18:03:06 +0000987 __libc_freeres_wrapper;
sewardj858964b2002-10-05 14:15:43 +0000988 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
989 goto stage1; /* party on, dudes (but not for much longer :) */
990
991 } else {
992 /* We won't run __libc_freeres; just exit now. */
sewardj00631892002-10-05 15:34:38 +0000993 if (VG_(clo_verbosity) > 2
sewardj858964b2002-10-05 14:15:43 +0000994 || VG_(clo_trace_syscalls) || VG_(clo_trace_sched)) {
995 VG_(message)(Vg_DebugMsg,
996 "Caught __NR_exit; quitting");
997 }
998 return VgSrc_ExitSyscall;
999 }
1000
sewardjade9d0d2002-07-26 10:52:48 +00001001 }
1002
sewardj858964b2002-10-05 14:15:43 +00001003 /* We've dealt with __NR_exit at this point. */
njncf45fd42004-11-24 16:30:22 +00001004 vg_assert(SYSCALL_NUM(VG_(threads)[tid].arch) != __NR_exit &&
1005 SYSCALL_NUM(VG_(threads)[tid].arch) != __NR_exit_group);
sewardj7e87e382002-05-03 19:09:05 +00001006
sewardj83798bf2002-05-24 00:11:16 +00001007 /* Trap syscalls to __NR_sched_yield and just have this
1008 thread yield instead. Not essential, just an
1009 optimisation. */
njncf45fd42004-11-24 16:30:22 +00001010 if (SYSCALL_NUM(VG_(threads)[tid].arch) == __NR_sched_yield) {
njnd3040452003-05-19 15:04:06 +00001011 SET_SYSCALL_RETVAL(tid, 0); /* syscall returns with success */
sewardj83798bf2002-05-24 00:11:16 +00001012 goto stage1; /* find a new thread to run */
1013 }
1014
sewardj51c0aaf2002-04-25 01:32:10 +00001015 sched_do_syscall(tid);
sewardjb3eef6b2002-05-01 00:05:27 +00001016
1017# if 0
1018 { UInt* esp; Int i;
njncf45fd42004-11-24 16:30:22 +00001019 esp=(UInt*)STACK_PTR(VG_(threads)[tid].arch);
sewardjb3eef6b2002-05-01 00:05:27 +00001020 VG_(printf)("AFTER\n");
1021 for (i = 10; i >= -10; i--)
1022 VG_(printf)("%2d %p = 0x%x\n", i, &esp[i], esp[i]);
1023 }
1024# endif
1025
sewardj77f0fc12002-07-12 01:23:03 +00001026 if (VG_(threads)[tid].status == VgTs_Runnable) {
sewardj51c0aaf2002-04-25 01:32:10 +00001027 continue; /* with this thread */
sewardj77f0fc12002-07-12 01:23:03 +00001028 } else {
1029 goto stage1;
1030 }
sewardj51c0aaf2002-04-25 01:32:10 +00001031 }
1032
sewardjd7fd4d22002-04-24 01:57:27 +00001033 /* It's an event we can't quickly deal with. Give up running
1034 this thread and handle things the expensive way. */
sewardje663cb92002-04-12 10:26:32 +00001035 break;
1036 }
1037
1038 /* ======================= Phase 3 of 3 =======================
1039 Handle non-trivial thread requests, mostly pthread stuff. */
1040
1041 /* Ok, we've fallen out of the dispatcher for a
1042 non-completely-trivial reason. First, update basic-block
1043 counters. */
1044
sewardje663cb92002-04-12 10:26:32 +00001045 if (0 && trc != VG_TRC_INNER_FASTMISS)
1046 VG_(message)(Vg_DebugMsg, "thread %d: completed %d bbs, trc %d",
1047 tid, done_this_time, (Int)trc );
1048
1049 if (0 && trc != VG_TRC_INNER_FASTMISS)
njne0205ff2003-04-08 00:56:14 +00001050 VG_(message)(Vg_DebugMsg, "thread %d: %llu bbs, event %s",
sewardje663cb92002-04-12 10:26:32 +00001051 tid, VG_(bbs_done),
1052 name_of_sched_event(trc) );
sewardj9d1b5d32002-04-17 19:40:49 +00001053
sewardje663cb92002-04-12 10:26:32 +00001054 /* Examine the thread's return code to figure out why it
sewardj124ca2a2002-06-20 10:19:38 +00001055 stopped. */
sewardje663cb92002-04-12 10:26:32 +00001056
1057 switch (trc) {
1058
sewardjd79ef682004-11-26 13:25:17 +00001059 case VEX_TRC_JMP_YIELD:
fitzhardingea02f8812003-12-18 09:06:09 +00001060 /* Explicit yield. Let a new thread be scheduled,
1061 simply by doing nothing, causing us to arrive back at
1062 Phase 1. */
fitzhardingea02f8812003-12-18 09:06:09 +00001063 break;
1064
sewardje663cb92002-04-12 10:26:32 +00001065 case VG_TRC_INNER_COUNTERZERO:
1066 /* Timeslice is out. Let a new thread be scheduled,
1067 simply by doing nothing, causing us to arrive back at
1068 Phase 1. */
sewardj8b635a42004-11-22 19:01:47 +00001069 vg_assert(VG_(dispatch_ctr) == 1);
sewardje663cb92002-04-12 10:26:32 +00001070 break;
1071
1072 case VG_TRC_UNRESUMABLE_SIGNAL:
jsgf855d93d2003-10-13 22:26:55 +00001073 /* It got a SIGSEGV/SIGBUS/SIGILL/SIGFPE, which we need to
1074 deliver right away. */
nethercotef971ab72004-08-02 16:27:40 +00001075 vg_assert(unresumable_siginfo.si_signo == VKI_SIGSEGV ||
1076 unresumable_siginfo.si_signo == VKI_SIGBUS ||
1077 unresumable_siginfo.si_signo == VKI_SIGILL ||
1078 unresumable_siginfo.si_signo == VKI_SIGFPE);
1079 vg_assert(longjmpd_on_signal == unresumable_siginfo.si_signo);
jsgf855d93d2003-10-13 22:26:55 +00001080
1081 /* make sure we've unblocked the signals which the handler blocked */
nethercote75d26242004-08-01 22:59:18 +00001082 VG_(unblock_host_signal)(longjmpd_on_signal);
jsgf855d93d2003-10-13 22:26:55 +00001083
nethercotef971ab72004-08-02 16:27:40 +00001084 VG_(deliver_signal)(tid, &unresumable_siginfo, False);
1085 unresumable_siginfo.si_signo = 0; /* done */
sewardje663cb92002-04-12 10:26:32 +00001086 break;
1087
sewardje663cb92002-04-12 10:26:32 +00001088 default:
1089 VG_(printf)("\ntrc = %d\n", trc);
njne427a662002-10-02 11:08:25 +00001090 VG_(core_panic)("VG_(scheduler), phase 3: "
1091 "unexpected thread return code");
sewardje663cb92002-04-12 10:26:32 +00001092 /* NOTREACHED */
1093 break;
1094
1095 } /* switch (trc) */
1096
1097 /* That completes Phase 3 of 3. Return now to the top of the
1098 main scheduler loop, to Phase 1 of 3. */
1099
1100 } /* top-level scheduler loop */
1101
1102
1103 /* NOTREACHED */
njne427a662002-10-02 11:08:25 +00001104 VG_(core_panic)("scheduler: post-main-loop ?!");
sewardje663cb92002-04-12 10:26:32 +00001105 /* NOTREACHED */
sewardje663cb92002-04-12 10:26:32 +00001106}
1107
nethercote238a3c32004-08-09 13:13:31 +00001108VgSchedReturnCode VG_(scheduler) ( Int* exitcode, ThreadId* last_run_tid,
1109 Int* fatal_sigNo_ptr )
1110{
1111 VgSchedReturnCode src;
1112
1113 fatal_signal_jmpbuf_ptr = &fatal_signal_jmpbuf;
1114 if (__builtin_setjmp( fatal_signal_jmpbuf_ptr ) == 0) {
1115 src = do_scheduler( exitcode, last_run_tid );
1116 } else {
1117 src = VgSrc_FatalSig;
1118 *fatal_sigNo_ptr = fatal_sigNo;
1119 }
1120 return src;
1121}
1122
jsgf855d93d2003-10-13 22:26:55 +00001123void VG_(need_resched) ( ThreadId prefer )
1124{
1125 /* Tell the scheduler now might be a good time to find a new
1126 runnable thread, because something happened which woke a thread
1127 up.
1128
1129 NB: This can be called unsynchronized from either a signal
1130 handler, or from another LWP (ie, real kernel thread).
1131
1132 In principle this could simply be a matter of setting
1133 VG_(dispatch_ctr) to a small value (say, 2), which would make
1134 any running code come back to the scheduler fairly quickly.
1135
1136 However, since the scheduler implements a strict round-robin
1137 policy with only one priority level, there are, by definition,
1138 no better threads to be running than the current thread anyway,
1139 so we may as well ignore this hint. For processes with a
1140 mixture of compute and I/O bound threads, this means the compute
1141 threads could introduce longish latencies before the I/O threads
1142 run. For programs with only I/O bound threads, need_resched
1143 won't have any effect anyway.
1144
1145 OK, so I've added command-line switches to enable low-latency
1146 syscalls and signals. The prefer_sched variable is in effect
1147 the ID of a single thread which has higher priority than all the
1148 others. If set, the scheduler will prefer to schedule that
1149 thread over all others. Naturally, this could lead to
1150 starvation or other unfairness.
1151 */
1152
1153 if (VG_(dispatch_ctr) > 10)
1154 VG_(dispatch_ctr) = 2;
1155 prefer_sched = prefer;
1156}
1157
nethercote238a3c32004-08-09 13:13:31 +00001158void VG_(scheduler_handle_fatal_signal) ( Int sigNo )
1159{
1160 if (NULL != fatal_signal_jmpbuf_ptr) {
1161 fatal_sigNo = sigNo;
1162 __builtin_longjmp(*fatal_signal_jmpbuf_ptr, 1);
1163 }
1164}
sewardje663cb92002-04-12 10:26:32 +00001165
1166/* ---------------------------------------------------------------------
1167 The pthread implementation.
1168 ------------------------------------------------------------------ */
1169
1170#include <pthread.h>
1171#include <errno.h>
1172
sewardje663cb92002-04-12 10:26:32 +00001173/* /usr/include/bits/pthreadtypes.h:
1174 typedef unsigned long int pthread_t;
1175*/
1176
sewardje663cb92002-04-12 10:26:32 +00001177
sewardj604ec3c2002-04-18 22:38:41 +00001178/* -----------------------------------------------------------
sewardj20917d82002-05-28 01:36:45 +00001179 Thread CREATION, JOINAGE and CANCELLATION: HELPER FNS
sewardj604ec3c2002-04-18 22:38:41 +00001180 -------------------------------------------------------- */
1181
sewardj20917d82002-05-28 01:36:45 +00001182/* We've decided to action a cancellation on tid. Make it jump to
1183 thread_exit_wrapper() in vg_libpthread.c, passing PTHREAD_CANCELED
1184 as the arg. */
1185static
1186void make_thread_jump_to_cancelhdlr ( ThreadId tid )
1187{
1188 Char msg_buf[100];
1189 vg_assert(VG_(is_valid_tid)(tid));
sewardjdadc8d02002-12-08 23:24:18 +00001190
sewardj20917d82002-05-28 01:36:45 +00001191 /* Push PTHREAD_CANCELED on the stack and jump to the cancellation
1192 handler -- which is really thread_exit_wrapper() in
1193 vg_libpthread.c. */
1194 vg_assert(VG_(threads)[tid].cancel_pend != NULL);
sewardj4bdd9962002-12-26 11:51:50 +00001195
nethercote6b9c8472004-09-13 13:16:40 +00001196 /* Set an argument and bogus return address. The return address will not
1197 be used, but we still need to have it so that the arg is at the
1198 correct stack offset. */
nethercote50397c22004-11-04 18:03:06 +00001199 VGA_(set_arg_and_bogus_ret)(tid, (UWord)PTHREAD_CANCELED, 0xBEADDEEF);
sewardj4bdd9962002-12-26 11:51:50 +00001200
1201 /* .cancel_pend will hold &thread_exit_wrapper */
njncf45fd42004-11-24 16:30:22 +00001202 INSTR_PTR(VG_(threads)[tid].arch) = (UWord)VG_(threads)[tid].cancel_pend;
sewardjdadc8d02002-12-08 23:24:18 +00001203
jsgf855d93d2003-10-13 22:26:55 +00001204 VG_(proxy_abort_syscall)(tid);
sewardjdadc8d02002-12-08 23:24:18 +00001205
sewardj20917d82002-05-28 01:36:45 +00001206 /* Make sure we aren't cancelled again whilst handling this
1207 cancellation. */
1208 VG_(threads)[tid].cancel_st = False;
1209 if (VG_(clo_trace_sched)) {
1210 VG_(sprintf)(msg_buf,
1211 "jump to cancellation handler (hdlr = %p)",
1212 VG_(threads)[tid].cancel_pend);
1213 print_sched_event(tid, msg_buf);
1214 }
thughes513197c2004-06-13 12:07:53 +00001215
1216 if(VG_(threads)[tid].status == VgTs_WaitCV) {
1217 /* posix says we must reaquire mutex before handling cancelation */
1218 vg_pthread_mutex_t* mx;
1219 vg_pthread_cond_t* cond;
1220
1221 mx = VG_(threads)[tid].associated_mx;
1222 cond = VG_(threads)[tid].associated_cv;
1223 VG_TRACK( pre_mutex_lock, tid, mx );
1224
1225 if (mx->__vg_m_owner == VG_INVALID_THREADID) {
1226 /* Currently unheld; hand it out to thread tid. */
1227 vg_assert(mx->__vg_m_count == 0);
1228 VG_(threads)[tid].status = VgTs_Runnable;
1229 VG_(threads)[tid].associated_cv = NULL;
1230 VG_(threads)[tid].associated_mx = NULL;
thughes10236472004-06-13 14:35:43 +00001231 mx->__vg_m_owner = (/*_pthread_descr*/void*)tid;
thughes513197c2004-06-13 12:07:53 +00001232 mx->__vg_m_count = 1;
1233 /* .m_edx already holds pth_cond_wait success value (0) */
1234
1235 VG_TRACK( post_mutex_lock, tid, mx );
1236
1237 if (VG_(clo_trace_pthread_level) >= 1) {
1238 VG_(sprintf)(msg_buf, "%s cv %p: RESUME with mx %p",
1239 "pthread_cancel", cond, mx );
1240 print_pthread_event(tid, msg_buf);
1241 }
1242
1243 } else {
1244 /* Currently held. Make thread tid be blocked on it. */
1245 vg_assert(mx->__vg_m_count > 0);
1246 VG_(threads)[tid].status = VgTs_WaitMX;
1247 VG_(threads)[tid].associated_cv = NULL;
1248 VG_(threads)[tid].associated_mx = mx;
1249 SET_PTHREQ_RETVAL(tid, 0); /* pth_cond_wait success value */
1250
1251 if (VG_(clo_trace_pthread_level) >= 1) {
1252 VG_(sprintf)(msg_buf, "%s cv %p: BLOCK for mx %p",
1253 "pthread_cancel", cond, mx );
1254 print_pthread_event(tid, msg_buf);
1255 }
1256 }
1257 } else {
1258 VG_(threads)[tid].status = VgTs_Runnable;
1259 }
sewardj20917d82002-05-28 01:36:45 +00001260}
1261
1262
1263
sewardjb48e5002002-05-13 00:16:03 +00001264/* Release resources and generally clean up once a thread has finally
nethercotef971ab72004-08-02 16:27:40 +00001265 disappeared.
1266
1267 BORKAGE/ISSUES as of 29 May 02 (moved from top of file --njn 2004-Aug-02)
1268
1269 TODO sometime:
1270 - Mutex scrubbing - clearup_after_thread_exit: look for threads
1271 blocked on mutexes held by the exiting thread, and release them
1272 appropriately. (??)
1273*/
sewardjb48e5002002-05-13 00:16:03 +00001274static
jsgf855d93d2003-10-13 22:26:55 +00001275void cleanup_after_thread_exited ( ThreadId tid, Bool forcekill )
sewardjb48e5002002-05-13 00:16:03 +00001276{
thughes3a1b8172004-09-12 22:48:59 +00001277 Segment *seg;
1278
nethercote36881a22004-08-04 14:03:16 +00001279 vg_assert(is_valid_or_empty_tid(tid));
sewardj018f7622002-05-15 21:13:39 +00001280 vg_assert(VG_(threads)[tid].status == VgTs_Empty);
thugheseb9b8fb2004-11-12 23:11:21 +00001281
njn25e49d8e72002-09-23 09:36:25 +00001282 /* Its stack is now off-limits */
thugheseb9b8fb2004-11-12 23:11:21 +00001283 if (VG_(threads)[tid].stack_base) {
1284 seg = VG_(find_segment)( VG_(threads)[tid].stack_base );
1285 VG_TRACK( die_mem_stack, seg->addr, seg->len );
1286 }
njn25e49d8e72002-09-23 09:36:25 +00001287
nethercotef9b59412004-09-10 15:33:32 +00001288 VGA_(cleanup_thread)( &VG_(threads)[tid].arch );
fitzhardinge47735af2004-01-21 01:27:27 +00001289
jsgf855d93d2003-10-13 22:26:55 +00001290 /* Not interested in the timeout anymore */
1291 VG_(threads)[tid].awaken_at = 0xFFFFFFFF;
1292
1293 /* Delete proxy LWP */
1294 VG_(proxy_delete)(tid, forcekill);
sewardjb48e5002002-05-13 00:16:03 +00001295}
1296
1297
sewardj20917d82002-05-28 01:36:45 +00001298/* Look for matching pairs of threads waiting for joiners and threads
1299 waiting for joinees. For each such pair copy the return value of
1300 the joinee into the joiner, let the joiner resume and discard the
1301 joinee. */
1302static
1303void maybe_rendezvous_joiners_and_joinees ( void )
1304{
1305 Char msg_buf[100];
1306 void** thread_return;
1307 ThreadId jnr, jee;
1308
1309 for (jnr = 1; jnr < VG_N_THREADS; jnr++) {
1310 if (VG_(threads)[jnr].status != VgTs_WaitJoinee)
1311 continue;
1312 jee = VG_(threads)[jnr].joiner_jee_tid;
1313 if (jee == VG_INVALID_THREADID)
1314 continue;
1315 vg_assert(VG_(is_valid_tid)(jee));
jsgf855d93d2003-10-13 22:26:55 +00001316 if (VG_(threads)[jee].status != VgTs_WaitJoiner) {
1317 /* if joinee has become detached, then make join fail with
1318 EINVAL */
1319 if (VG_(threads)[jee].detached) {
1320 VG_(threads)[jnr].status = VgTs_Runnable;
1321 VG_(threads)[jnr].joiner_jee_tid = VG_INVALID_THREADID;
1322 SET_PTHREQ_RETVAL(jnr, VKI_EINVAL);
1323 }
sewardj20917d82002-05-28 01:36:45 +00001324 continue;
jsgf855d93d2003-10-13 22:26:55 +00001325 }
sewardj20917d82002-05-28 01:36:45 +00001326 /* ok! jnr is waiting to join with jee, and jee is waiting to be
1327 joined by ... well, any thread. So let's do it! */
1328
1329 /* Copy return value to where joiner wants it. */
1330 thread_return = VG_(threads)[jnr].joiner_thread_return;
1331 if (thread_return != NULL) {
1332 /* CHECK thread_return writable */
njn72718642003-07-24 08:45:32 +00001333 VG_TRACK( pre_mem_write, Vg_CorePThread, jnr,
njn25e49d8e72002-09-23 09:36:25 +00001334 "pthread_join: thread_return",
1335 (Addr)thread_return, sizeof(void*));
sewardj5a3798b2002-06-04 23:24:22 +00001336
sewardj20917d82002-05-28 01:36:45 +00001337 *thread_return = VG_(threads)[jee].joinee_retval;
1338 /* Not really right, since it makes the thread's return value
1339 appear to be defined even if it isn't. */
njncf45fd42004-11-24 16:30:22 +00001340 VG_TRACK( post_mem_write, Vg_CorePThread, jnr,
1341 (Addr)thread_return, sizeof(void*) );
sewardj20917d82002-05-28 01:36:45 +00001342 }
1343
1344 /* Joinee is discarded */
1345 VG_(threads)[jee].status = VgTs_Empty; /* bye! */
jsgf855d93d2003-10-13 22:26:55 +00001346 cleanup_after_thread_exited ( jee, False );
sewardjc4a810d2002-11-13 22:25:51 +00001347 if (VG_(clo_trace_sched)) {
1348 VG_(sprintf)(msg_buf,
1349 "rendezvous with joinee %d. %d resumes, %d exits.",
1350 jee, jnr, jee );
sewardj20917d82002-05-28 01:36:45 +00001351 print_sched_event(jnr, msg_buf);
1352 }
sewardjc4a810d2002-11-13 22:25:51 +00001353
1354 VG_TRACK( post_thread_join, jnr, jee );
sewardj20917d82002-05-28 01:36:45 +00001355
1356 /* joiner returns with success */
1357 VG_(threads)[jnr].status = VgTs_Runnable;
njnd3040452003-05-19 15:04:06 +00001358 SET_PTHREQ_RETVAL(jnr, 0);
sewardj20917d82002-05-28 01:36:45 +00001359 }
1360}
1361
1362
sewardjccef2e62002-05-29 19:26:32 +00001363/* Nuke all threads other than tid. POSIX specifies that this should
1364 happen in __NR_exec, and after a __NR_fork() when I am the child,
jsgf855d93d2003-10-13 22:26:55 +00001365 as POSIX requires. Also used at process exit time with
1366 me==VG_INVALID_THREADID */
sewardjccef2e62002-05-29 19:26:32 +00001367void VG_(nuke_all_threads_except) ( ThreadId me )
1368{
1369 ThreadId tid;
1370 for (tid = 1; tid < VG_N_THREADS; tid++) {
1371 if (tid == me
jsgf855d93d2003-10-13 22:26:55 +00001372 || VG_(threads)[tid].status == VgTs_Empty)
sewardjccef2e62002-05-29 19:26:32 +00001373 continue;
sewardjef037c72002-05-30 00:40:03 +00001374 if (0)
1375 VG_(printf)(
1376 "VG_(nuke_all_threads_except): nuking tid %d\n", tid);
jsgf855d93d2003-10-13 22:26:55 +00001377 VG_(proxy_delete)(tid, True);
sewardjccef2e62002-05-29 19:26:32 +00001378 VG_(threads)[tid].status = VgTs_Empty;
thughes6d41bea2004-10-20 12:25:59 +00001379 VG_(threads)[tid].associated_mx = NULL;
1380 VG_(threads)[tid].associated_cv = NULL;
thughes168eb882004-11-13 00:39:37 +00001381 VG_(threads)[tid].stack_base = (Addr)NULL;
thugheseb9b8fb2004-11-12 23:11:21 +00001382 VG_(threads)[tid].stack_size = 0;
jsgf855d93d2003-10-13 22:26:55 +00001383 cleanup_after_thread_exited( tid, True );
sewardjccef2e62002-05-29 19:26:32 +00001384 }
1385}
1386
1387
sewardj20917d82002-05-28 01:36:45 +00001388/* -----------------------------------------------------------
1389 Thread CREATION, JOINAGE and CANCELLATION: REQUESTS
1390 -------------------------------------------------------- */
1391
sewardje663cb92002-04-12 10:26:32 +00001392static
sewardj8ad94e12002-05-29 00:10:20 +00001393void do__cleanup_push ( ThreadId tid, CleanupEntry* cu )
1394{
1395 Int sp;
1396 Char msg_buf[100];
1397 vg_assert(VG_(is_valid_tid)(tid));
1398 sp = VG_(threads)[tid].custack_used;
1399 if (VG_(clo_trace_sched)) {
thughes11975ff2004-06-12 12:58:22 +00001400 switch (cu->type) {
1401 case VgCt_Function:
1402 VG_(sprintf)(msg_buf,
1403 "cleanup_push (fn %p, arg %p) -> slot %d",
1404 cu->data.function.fn, cu->data.function.arg, sp);
1405 break;
1406 case VgCt_Longjmp:
1407 VG_(sprintf)(msg_buf,
1408 "cleanup_push (ub %p) -> slot %d",
1409 cu->data.longjmp.ub, sp);
1410 break;
1411 default:
1412 VG_(sprintf)(msg_buf,
1413 "cleanup_push (unknown type) -> slot %d",
1414 sp);
1415 break;
1416 }
sewardj8ad94e12002-05-29 00:10:20 +00001417 print_sched_event(tid, msg_buf);
1418 }
1419 vg_assert(sp >= 0 && sp <= VG_N_CLEANUPSTACK);
1420 if (sp == VG_N_CLEANUPSTACK)
njne427a662002-10-02 11:08:25 +00001421 VG_(core_panic)("do__cleanup_push: VG_N_CLEANUPSTACK is too small."
sewardj8ad94e12002-05-29 00:10:20 +00001422 " Increase and recompile.");
1423 VG_(threads)[tid].custack[sp] = *cu;
1424 sp++;
1425 VG_(threads)[tid].custack_used = sp;
njnd3040452003-05-19 15:04:06 +00001426 SET_PTHREQ_RETVAL(tid, 0);
sewardj8ad94e12002-05-29 00:10:20 +00001427}
1428
1429
1430static
1431void do__cleanup_pop ( ThreadId tid, CleanupEntry* cu )
1432{
1433 Int sp;
1434 Char msg_buf[100];
1435 vg_assert(VG_(is_valid_tid)(tid));
1436 sp = VG_(threads)[tid].custack_used;
1437 if (VG_(clo_trace_sched)) {
njn36650922002-10-04 09:18:09 +00001438 VG_(sprintf)(msg_buf, "cleanup_pop from slot %d", sp-1);
sewardj8ad94e12002-05-29 00:10:20 +00001439 print_sched_event(tid, msg_buf);
1440 }
1441 vg_assert(sp >= 0 && sp <= VG_N_CLEANUPSTACK);
1442 if (sp == 0) {
njnd3040452003-05-19 15:04:06 +00001443 SET_PTHREQ_RETVAL(tid, -1);
sewardj8ad94e12002-05-29 00:10:20 +00001444 return;
1445 }
1446 sp--;
njn72718642003-07-24 08:45:32 +00001447 VG_TRACK( pre_mem_write, Vg_CorePThread, tid,
njn36650922002-10-04 09:18:09 +00001448 "cleanup pop", (Addr)cu, sizeof(CleanupEntry) );
sewardj8ad94e12002-05-29 00:10:20 +00001449 *cu = VG_(threads)[tid].custack[sp];
njncf45fd42004-11-24 16:30:22 +00001450 VG_TRACK( post_mem_write, Vg_CorePThread, tid,
1451 (Addr)cu, sizeof(CleanupEntry) );
sewardj8ad94e12002-05-29 00:10:20 +00001452 VG_(threads)[tid].custack_used = sp;
njnd3040452003-05-19 15:04:06 +00001453 SET_PTHREQ_RETVAL(tid, 0);
sewardj8ad94e12002-05-29 00:10:20 +00001454}
1455
1456
1457static
sewardjff42d1d2002-05-22 13:17:31 +00001458void do_pthread_yield ( ThreadId tid )
1459{
1460 Char msg_buf[100];
1461 vg_assert(VG_(is_valid_tid)(tid));
sewardjff42d1d2002-05-22 13:17:31 +00001462 if (VG_(clo_trace_sched)) {
1463 VG_(sprintf)(msg_buf, "yield");
1464 print_sched_event(tid, msg_buf);
1465 }
njnd3040452003-05-19 15:04:06 +00001466 SET_PTHREQ_RETVAL(tid, 0);
sewardjff42d1d2002-05-22 13:17:31 +00001467}
1468
1469
1470static
sewardj20917d82002-05-28 01:36:45 +00001471void do__testcancel ( ThreadId tid )
sewardje663cb92002-04-12 10:26:32 +00001472{
sewardj7989d0c2002-05-28 11:00:01 +00001473 Char msg_buf[100];
sewardjb48e5002002-05-13 00:16:03 +00001474 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001475 if (VG_(clo_trace_sched)) {
1476 VG_(sprintf)(msg_buf, "testcancel");
1477 print_sched_event(tid, msg_buf);
1478 }
sewardj20917d82002-05-28 01:36:45 +00001479 if (/* is there a cancellation pending on this thread? */
1480 VG_(threads)[tid].cancel_pend != NULL
1481 && /* is this thread accepting cancellations? */
1482 VG_(threads)[tid].cancel_st) {
1483 /* Ok, let's do the cancellation. */
1484 make_thread_jump_to_cancelhdlr ( tid );
sewardje663cb92002-04-12 10:26:32 +00001485 } else {
sewardj20917d82002-05-28 01:36:45 +00001486 /* No, we keep going. */
njnd3040452003-05-19 15:04:06 +00001487 SET_PTHREQ_RETVAL(tid, 0);
sewardje663cb92002-04-12 10:26:32 +00001488 }
sewardje663cb92002-04-12 10:26:32 +00001489}
1490
1491
1492static
sewardj20917d82002-05-28 01:36:45 +00001493void do__set_cancelstate ( ThreadId tid, Int state )
1494{
1495 Bool old_st;
sewardj7989d0c2002-05-28 11:00:01 +00001496 Char msg_buf[100];
sewardj20917d82002-05-28 01:36:45 +00001497 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001498 if (VG_(clo_trace_sched)) {
1499 VG_(sprintf)(msg_buf, "set_cancelstate to %d (%s)", state,
1500 state==PTHREAD_CANCEL_ENABLE
1501 ? "ENABLE"
1502 : (state==PTHREAD_CANCEL_DISABLE ? "DISABLE" : "???"));
1503 print_sched_event(tid, msg_buf);
1504 }
sewardj20917d82002-05-28 01:36:45 +00001505 old_st = VG_(threads)[tid].cancel_st;
1506 if (state == PTHREAD_CANCEL_ENABLE) {
1507 VG_(threads)[tid].cancel_st = True;
1508 } else
1509 if (state == PTHREAD_CANCEL_DISABLE) {
1510 VG_(threads)[tid].cancel_st = False;
1511 } else {
njne427a662002-10-02 11:08:25 +00001512 VG_(core_panic)("do__set_cancelstate");
sewardj20917d82002-05-28 01:36:45 +00001513 }
njnd3040452003-05-19 15:04:06 +00001514 SET_PTHREQ_RETVAL(tid, old_st ? PTHREAD_CANCEL_ENABLE
1515 : PTHREAD_CANCEL_DISABLE);
sewardj20917d82002-05-28 01:36:45 +00001516}
1517
1518
1519static
1520void do__set_canceltype ( ThreadId tid, Int type )
1521{
1522 Bool old_ty;
sewardj7989d0c2002-05-28 11:00:01 +00001523 Char msg_buf[100];
sewardj20917d82002-05-28 01:36:45 +00001524 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001525 if (VG_(clo_trace_sched)) {
1526 VG_(sprintf)(msg_buf, "set_canceltype to %d (%s)", type,
1527 type==PTHREAD_CANCEL_ASYNCHRONOUS
1528 ? "ASYNCHRONOUS"
1529 : (type==PTHREAD_CANCEL_DEFERRED ? "DEFERRED" : "???"));
1530 print_sched_event(tid, msg_buf);
1531 }
sewardj20917d82002-05-28 01:36:45 +00001532 old_ty = VG_(threads)[tid].cancel_ty;
1533 if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
1534 VG_(threads)[tid].cancel_ty = False;
1535 } else
1536 if (type == PTHREAD_CANCEL_DEFERRED) {
sewardjaf00b6d2002-05-29 23:30:28 +00001537 VG_(threads)[tid].cancel_ty = True;
sewardj20917d82002-05-28 01:36:45 +00001538 } else {
njne427a662002-10-02 11:08:25 +00001539 VG_(core_panic)("do__set_canceltype");
sewardj20917d82002-05-28 01:36:45 +00001540 }
njnd3040452003-05-19 15:04:06 +00001541 SET_PTHREQ_RETVAL(tid, old_ty ? PTHREAD_CANCEL_DEFERRED
sewardj20917d82002-05-28 01:36:45 +00001542 : PTHREAD_CANCEL_ASYNCHRONOUS);
1543}
1544
1545
sewardj7989d0c2002-05-28 11:00:01 +00001546/* Set or get the detach state for thread det. */
sewardj20917d82002-05-28 01:36:45 +00001547static
sewardj7989d0c2002-05-28 11:00:01 +00001548void do__set_or_get_detach ( ThreadId tid,
1549 Int what, ThreadId det )
sewardj20917d82002-05-28 01:36:45 +00001550{
sewardj7989d0c2002-05-28 11:00:01 +00001551 Char msg_buf[100];
1552 /* VG_(printf)("do__set_or_get_detach tid %d what %d det %d\n",
1553 tid, what, det); */
sewardj20917d82002-05-28 01:36:45 +00001554 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001555 if (VG_(clo_trace_sched)) {
1556 VG_(sprintf)(msg_buf, "set_or_get_detach %d (%s) for tid %d", what,
1557 what==0 ? "not-detached" : (
1558 what==1 ? "detached" : (
1559 what==2 ? "fetch old value" : "???")),
1560 det );
1561 print_sched_event(tid, msg_buf);
1562 }
1563
1564 if (!VG_(is_valid_tid)(det)) {
njnd3040452003-05-19 15:04:06 +00001565 SET_PTHREQ_RETVAL(tid, -1);
sewardj7989d0c2002-05-28 11:00:01 +00001566 return;
1567 }
1568
sewardj20917d82002-05-28 01:36:45 +00001569 switch (what) {
1570 case 2: /* get */
njnd3040452003-05-19 15:04:06 +00001571 SET_PTHREQ_RETVAL(tid, VG_(threads)[det].detached ? 1 : 0);
sewardj20917d82002-05-28 01:36:45 +00001572 return;
jsgf855d93d2003-10-13 22:26:55 +00001573 case 1:
sewardj7989d0c2002-05-28 11:00:01 +00001574 VG_(threads)[det].detached = True;
njnd3040452003-05-19 15:04:06 +00001575 SET_PTHREQ_RETVAL(tid, 0);
jsgf855d93d2003-10-13 22:26:55 +00001576 /* wake anyone who was joining on us */
1577 maybe_rendezvous_joiners_and_joinees();
sewardj20917d82002-05-28 01:36:45 +00001578 return;
1579 case 0: /* set not detached */
sewardj7989d0c2002-05-28 11:00:01 +00001580 VG_(threads)[det].detached = False;
njnd3040452003-05-19 15:04:06 +00001581 SET_PTHREQ_RETVAL(tid, 0);
sewardj20917d82002-05-28 01:36:45 +00001582 return;
1583 default:
njne427a662002-10-02 11:08:25 +00001584 VG_(core_panic)("do__set_or_get_detach");
sewardj20917d82002-05-28 01:36:45 +00001585 }
1586}
1587
1588
1589static
1590void do__set_cancelpend ( ThreadId tid,
1591 ThreadId cee,
1592 void (*cancelpend_hdlr)(void*) )
sewardje663cb92002-04-12 10:26:32 +00001593{
1594 Char msg_buf[100];
1595
sewardj20917d82002-05-28 01:36:45 +00001596 vg_assert(VG_(is_valid_tid)(tid));
1597 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1598
thughes97e54d22004-08-15 14:34:02 +00001599 if (!VG_(is_valid_tid)(cee) ||
1600 VG_(threads)[cee].status == VgTs_WaitJoiner) {
sewardj7989d0c2002-05-28 11:00:01 +00001601 if (VG_(clo_trace_sched)) {
1602 VG_(sprintf)(msg_buf,
1603 "set_cancelpend for invalid tid %d", cee);
1604 print_sched_event(tid, msg_buf);
1605 }
njn25e49d8e72002-09-23 09:36:25 +00001606 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00001607 "pthread_cancel: target thread does not exist, or invalid");
jsgf855d93d2003-10-13 22:26:55 +00001608 SET_PTHREQ_RETVAL(tid, VKI_ESRCH);
sewardj7989d0c2002-05-28 11:00:01 +00001609 return;
1610 }
sewardj20917d82002-05-28 01:36:45 +00001611
1612 VG_(threads)[cee].cancel_pend = cancelpend_hdlr;
1613
thughes31c1aae2004-10-28 15:56:55 +00001614 /* interrupt a pending syscall if asynchronous cancellation
1615 is enabled for the target thread */
1616 if (VG_(threads)[cee].cancel_st && !VG_(threads)[cee].cancel_ty) {
1617 VG_(proxy_abort_syscall)(cee);
1618 }
jsgf855d93d2003-10-13 22:26:55 +00001619
sewardj20917d82002-05-28 01:36:45 +00001620 if (VG_(clo_trace_sched)) {
1621 VG_(sprintf)(msg_buf,
sewardj7989d0c2002-05-28 11:00:01 +00001622 "set_cancelpend (hdlr = %p, set by tid %d)",
sewardj20917d82002-05-28 01:36:45 +00001623 cancelpend_hdlr, tid);
1624 print_sched_event(cee, msg_buf);
1625 }
1626
1627 /* Thread doing the cancelling returns with success. */
njnd3040452003-05-19 15:04:06 +00001628 SET_PTHREQ_RETVAL(tid, 0);
sewardj20917d82002-05-28 01:36:45 +00001629
1630 /* Perhaps we can nuke the cancellee right now? */
thughes513197c2004-06-13 12:07:53 +00001631 if (!VG_(threads)[cee].cancel_ty || /* if PTHREAD_CANCEL_ASYNCHRONOUS */
1632 (VG_(threads)[cee].status != VgTs_Runnable &&
1633 VG_(threads)[cee].status != VgTs_WaitMX)) {
jsgf855d93d2003-10-13 22:26:55 +00001634 do__testcancel(cee);
thughes513197c2004-06-13 12:07:53 +00001635 }
sewardj20917d82002-05-28 01:36:45 +00001636}
1637
1638
1639static
1640void do_pthread_join ( ThreadId tid,
1641 ThreadId jee, void** thread_return )
1642{
1643 Char msg_buf[100];
1644 ThreadId i;
sewardje663cb92002-04-12 10:26:32 +00001645 /* jee, the joinee, is the thread specified as an arg in thread
1646 tid's call to pthread_join. So tid is the join-er. */
sewardjb48e5002002-05-13 00:16:03 +00001647 vg_assert(VG_(is_valid_tid)(tid));
sewardj018f7622002-05-15 21:13:39 +00001648 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +00001649
1650 if (jee == tid) {
njn25e49d8e72002-09-23 09:36:25 +00001651 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00001652 "pthread_join: attempt to join to self");
njnd3040452003-05-19 15:04:06 +00001653 SET_PTHREQ_RETVAL(tid, EDEADLK); /* libc constant, not a kernel one */
jsgf855d93d2003-10-13 22:26:55 +00001654 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +00001655 return;
1656 }
1657
sewardj20917d82002-05-28 01:36:45 +00001658 /* Flush any completed pairs, so as to make sure what we're looking
1659 at is up-to-date. */
1660 maybe_rendezvous_joiners_and_joinees();
1661
1662 /* Is this a sane request? */
jsgf855d93d2003-10-13 22:26:55 +00001663 if ( ! VG_(is_valid_tid)(jee) ||
1664 VG_(threads)[jee].detached) {
sewardje663cb92002-04-12 10:26:32 +00001665 /* Invalid thread to join to. */
njn25e49d8e72002-09-23 09:36:25 +00001666 VG_(record_pthread_error)( tid,
jsgf855d93d2003-10-13 22:26:55 +00001667 "pthread_join: target thread does not exist, invalid, or detached");
1668 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardje663cb92002-04-12 10:26:32 +00001669 return;
1670 }
1671
sewardj20917d82002-05-28 01:36:45 +00001672 /* Is anyone else already in a join-wait for jee? */
1673 for (i = 1; i < VG_N_THREADS; i++) {
1674 if (i == tid) continue;
1675 if (VG_(threads)[i].status == VgTs_WaitJoinee
1676 && VG_(threads)[i].joiner_jee_tid == jee) {
1677 /* Someone already did join on this thread */
njn25e49d8e72002-09-23 09:36:25 +00001678 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00001679 "pthread_join: another thread already "
1680 "in join-wait for target thread");
jsgf855d93d2003-10-13 22:26:55 +00001681 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
1682 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardj20917d82002-05-28 01:36:45 +00001683 return;
1684 }
sewardje663cb92002-04-12 10:26:32 +00001685 }
1686
thughes513197c2004-06-13 12:07:53 +00001687 if(VG_(threads)[tid].cancel_pend != NULL &&
1688 VG_(threads)[tid].cancel_st) {
1689 make_thread_jump_to_cancelhdlr ( tid );
1690 } else {
1691 /* Mark this thread as waiting for the joinee. */
1692 VG_(threads)[tid].status = VgTs_WaitJoinee;
1693 VG_(threads)[tid].joiner_thread_return = thread_return;
1694 VG_(threads)[tid].joiner_jee_tid = jee;
1695
1696 /* Look for matching joiners and joinees and do the right thing. */
1697 maybe_rendezvous_joiners_and_joinees();
1698
1699 /* Return value is irrelevant since this this thread becomes
1700 non-runnable. maybe_resume_joiner() will cause it to return the
1701 right value when it resumes. */
1702
1703 if (VG_(clo_trace_sched)) {
1704 VG_(sprintf)(msg_buf,
1705 "wait for joinee %d (may already be ready)", jee);
1706 print_sched_event(tid, msg_buf);
1707 }
sewardje663cb92002-04-12 10:26:32 +00001708 }
sewardje663cb92002-04-12 10:26:32 +00001709}
1710
1711
sewardj20917d82002-05-28 01:36:45 +00001712/* ( void* ): calling thread waits for joiner and returns the void* to
1713 it. This is one of two ways in which a thread can finally exit --
1714 the other is do__quit. */
sewardje663cb92002-04-12 10:26:32 +00001715static
sewardj20917d82002-05-28 01:36:45 +00001716void do__wait_joiner ( ThreadId tid, void* retval )
sewardje663cb92002-04-12 10:26:32 +00001717{
sewardj20917d82002-05-28 01:36:45 +00001718 Char msg_buf[100];
1719 vg_assert(VG_(is_valid_tid)(tid));
1720 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1721 if (VG_(clo_trace_sched)) {
1722 VG_(sprintf)(msg_buf,
sewardj7989d0c2002-05-28 11:00:01 +00001723 "do__wait_joiner(retval = %p) (non-detached thread exit)", retval);
sewardj20917d82002-05-28 01:36:45 +00001724 print_sched_event(tid, msg_buf);
1725 }
1726 VG_(threads)[tid].status = VgTs_WaitJoiner;
1727 VG_(threads)[tid].joinee_retval = retval;
1728 maybe_rendezvous_joiners_and_joinees();
1729}
1730
1731
1732/* ( no-args ): calling thread disappears from the system forever.
1733 Reclaim resources. */
1734static
1735void do__quit ( ThreadId tid )
1736{
1737 Char msg_buf[100];
1738 vg_assert(VG_(is_valid_tid)(tid));
1739 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1740 VG_(threads)[tid].status = VgTs_Empty; /* bye! */
jsgf855d93d2003-10-13 22:26:55 +00001741 cleanup_after_thread_exited ( tid, False );
sewardj20917d82002-05-28 01:36:45 +00001742 if (VG_(clo_trace_sched)) {
sewardj7989d0c2002-05-28 11:00:01 +00001743 VG_(sprintf)(msg_buf, "do__quit (detached thread exit)");
sewardj20917d82002-05-28 01:36:45 +00001744 print_sched_event(tid, msg_buf);
1745 }
jsgf855d93d2003-10-13 22:26:55 +00001746 maybe_rendezvous_joiners_and_joinees();
sewardj20917d82002-05-28 01:36:45 +00001747 /* Return value is irrelevant; this thread will not get
1748 rescheduled. */
1749}
1750
1751
nethercote6b9c8472004-09-13 13:16:40 +00001752/* Should never be entered. If it is, will be on the simulated CPU. */
sewardj20917d82002-05-28 01:36:45 +00001753static
1754void do__apply_in_new_thread_bogusRA ( void )
1755{
njne427a662002-10-02 11:08:25 +00001756 VG_(core_panic)("do__apply_in_new_thread_bogusRA");
sewardj20917d82002-05-28 01:36:45 +00001757}
1758
1759/* (Fn, Arg): Create a new thread and run Fn applied to Arg in it. Fn
1760 MUST NOT return -- ever. Eventually it will do either __QUIT or
1761 __WAIT_JOINER. Return the child tid to the parent. */
1762static
1763void do__apply_in_new_thread ( ThreadId parent_tid,
1764 void* (*fn)(void *),
thughesdaa34562004-06-27 12:48:53 +00001765 void* arg,
1766 StackInfo *si )
sewardj20917d82002-05-28 01:36:45 +00001767{
sewardje663cb92002-04-12 10:26:32 +00001768 Addr new_stack;
1769 UInt new_stk_szb;
1770 ThreadId tid;
1771 Char msg_buf[100];
1772
1773 /* Paranoia ... */
1774 vg_assert(sizeof(pthread_t) == sizeof(UInt));
1775
sewardj018f7622002-05-15 21:13:39 +00001776 vg_assert(VG_(threads)[parent_tid].status != VgTs_Empty);
sewardje663cb92002-04-12 10:26:32 +00001777
sewardj1e8cdc92002-04-18 11:37:52 +00001778 tid = vg_alloc_ThreadState();
sewardje663cb92002-04-12 10:26:32 +00001779
1780 /* If we've created the main thread's tid, we're in deep trouble :) */
sewardj6072c362002-04-19 14:40:57 +00001781 vg_assert(tid != 1);
nethercote36881a22004-08-04 14:03:16 +00001782 vg_assert(is_valid_or_empty_tid(tid));
sewardje663cb92002-04-12 10:26:32 +00001783
sewardjc4a810d2002-11-13 22:25:51 +00001784 /* do this early, before the child gets any memory writes */
1785 VG_TRACK ( post_thread_create, parent_tid, tid );
1786
sewardjf6374322002-11-13 22:35:55 +00001787 /* Create new thread with default attrs:
1788 deferred cancellation, not detached
1789 */
1790 mostly_clear_thread_record(tid);
1791 VG_(threads)[tid].status = VgTs_Runnable;
1792
sewardj2a99cf62004-11-24 10:44:19 +00001793 /* Copy the parent's CPU state into the child's. */
nethercotef9b59412004-09-10 15:33:32 +00001794 VGA_(setup_child)( &VG_(threads)[tid].arch,
1795 &VG_(threads)[parent_tid].arch );
sewardje663cb92002-04-12 10:26:32 +00001796
1797 /* Consider allocating the child a stack, if the one it already has
1798 is inadequate. */
thughes5e5e2132004-11-16 19:40:05 +00001799 new_stk_szb = PGROUNDUP(si->size + VG_AR_CLIENT_STACKBASE_REDZONE_SZB + si->guardsize);
1800
thughesdaa34562004-06-27 12:48:53 +00001801 VG_(threads)[tid].stack_guard_size = si->guardsize;
sewardje663cb92002-04-12 10:26:32 +00001802
sewardj018f7622002-05-15 21:13:39 +00001803 if (new_stk_szb > VG_(threads)[tid].stack_size) {
sewardje663cb92002-04-12 10:26:32 +00001804 /* Again, for good measure :) We definitely don't want to be
1805 allocating a stack for the main thread. */
sewardj6072c362002-04-19 14:40:57 +00001806 vg_assert(tid != 1);
thughesdaa34562004-06-27 12:48:53 +00001807 if (VG_(threads)[tid].stack_size > 0)
1808 VG_(client_free)(VG_(threads)[tid].stack_base);
fitzhardinge98abfc72003-12-16 02:05:15 +00001809 new_stack = VG_(client_alloc)(0, new_stk_szb,
nethercotee567e702004-07-10 17:49:17 +00001810 VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC,
fitzhardinge98abfc72003-12-16 02:05:15 +00001811 SF_STACK);
nethercote8e9eab02004-07-11 18:01:06 +00001812 // Given the low number of threads Valgrind can handle, stack
1813 // allocation should pretty much always succeed, so having an
1814 // assertion here isn't too bad. However, probably better would be
1815 // this:
1816 //
1817 // if (0 == new_stack)
1818 // SET_PTHREQ_RETVAL(parent_tid, -VKI_EAGAIN);
1819 //
nethercotee567e702004-07-10 17:49:17 +00001820 vg_assert(0 != new_stack);
sewardj018f7622002-05-15 21:13:39 +00001821 VG_(threads)[tid].stack_base = new_stack;
1822 VG_(threads)[tid].stack_size = new_stk_szb;
1823 VG_(threads)[tid].stack_highest_word
sewardje663cb92002-04-12 10:26:32 +00001824 = new_stack + new_stk_szb
sewardj1e8cdc92002-04-18 11:37:52 +00001825 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB; /* -4 ??? */;
sewardje663cb92002-04-12 10:26:32 +00001826 }
sewardj1e8cdc92002-04-18 11:37:52 +00001827
njn25e49d8e72002-09-23 09:36:25 +00001828 /* Having got memory to hold the thread's stack:
1829 - set %esp as base + size
1830 - mark everything below %esp inaccessible
1831 - mark redzone at stack end inaccessible
1832 */
njnd3040452003-05-19 15:04:06 +00001833 SET_PTHREQ_ESP(tid, VG_(threads)[tid].stack_base
1834 + VG_(threads)[tid].stack_size
1835 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB);
sewardj1e8cdc92002-04-18 11:37:52 +00001836
njn25e49d8e72002-09-23 09:36:25 +00001837 VG_TRACK ( die_mem_stack, VG_(threads)[tid].stack_base,
thughesdaa34562004-06-27 12:48:53 +00001838 VG_(threads)[tid].stack_size
1839 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB);
njncf45fd42004-11-24 16:30:22 +00001840 VG_TRACK ( ban_mem_stack, STACK_PTR(VG_(threads)[tid].arch),
njn25e49d8e72002-09-23 09:36:25 +00001841 VG_AR_CLIENT_STACKBASE_REDZONE_SZB );
sewardje663cb92002-04-12 10:26:32 +00001842
nethercote6b9c8472004-09-13 13:16:40 +00001843 VGA_(thread_initial_stack)(tid, (UWord)arg,
1844 (Addr)&do__apply_in_new_thread_bogusRA);
sewardje663cb92002-04-12 10:26:32 +00001845
1846 /* this is where we start */
njncf45fd42004-11-24 16:30:22 +00001847 INSTR_PTR(VG_(threads)[tid].arch) = (UWord)fn;
sewardje663cb92002-04-12 10:26:32 +00001848
sewardj8937c812002-04-12 20:12:20 +00001849 if (VG_(clo_trace_sched)) {
njn25e49d8e72002-09-23 09:36:25 +00001850 VG_(sprintf)(msg_buf, "new thread, created by %d", parent_tid );
sewardje663cb92002-04-12 10:26:32 +00001851 print_sched_event(tid, msg_buf);
1852 }
1853
fitzhardingef7866182004-03-16 22:09:12 +00001854 /* Start the thread with all signals blocked; it's up to the client
1855 code to set the right signal mask when it's ready. */
nethercote73b526f2004-10-31 18:48:21 +00001856 VG_(sigfillset)(&VG_(threads)[tid].sig_mask);
jsgf855d93d2003-10-13 22:26:55 +00001857
1858 /* Now that the signal mask is set up, create a proxy LWP for this thread */
1859 VG_(proxy_create)(tid);
1860
1861 /* Set the proxy's signal mask */
1862 VG_(proxy_setsigmask)(tid);
sewardjb48e5002002-05-13 00:16:03 +00001863
sewardj20917d82002-05-28 01:36:45 +00001864 /* return child's tid to parent */
njnd3040452003-05-19 15:04:06 +00001865 SET_PTHREQ_RETVAL(parent_tid, tid); /* success */
sewardje663cb92002-04-12 10:26:32 +00001866}
1867
1868
sewardj604ec3c2002-04-18 22:38:41 +00001869/* -----------------------------------------------------------
1870 MUTEXes
1871 -------------------------------------------------------- */
1872
rjwalsh7109a8c2004-09-02 00:31:02 +00001873/* vg_pthread_mutex_t is defined in core.h.
sewardj604ec3c2002-04-18 22:38:41 +00001874
nethercote1f0173b2004-02-28 15:40:36 +00001875 The initializers zero everything, except possibly the fourth word,
1876 which in vg_pthread_mutex_t is the __vg_m_kind field. It gets set to one
1877 of PTHREAD_MUTEX_{TIMED,RECURSIVE,ERRORCHECK,ADAPTIVE}_NP
sewardj604ec3c2002-04-18 22:38:41 +00001878
sewardj6072c362002-04-19 14:40:57 +00001879 How we use it:
sewardj604ec3c2002-04-18 22:38:41 +00001880
nethercote1f0173b2004-02-28 15:40:36 +00001881 __vg_m_kind never changes and indicates whether or not it is recursive.
sewardj6072c362002-04-19 14:40:57 +00001882
nethercote1f0173b2004-02-28 15:40:36 +00001883 __vg_m_count indicates the lock count; if 0, the mutex is not owned by
sewardj6072c362002-04-19 14:40:57 +00001884 anybody.
1885
nethercote1f0173b2004-02-28 15:40:36 +00001886 __vg_m_owner has a ThreadId value stuffed into it. We carefully arrange
sewardj6072c362002-04-19 14:40:57 +00001887 that ThreadId == 0 is invalid (VG_INVALID_THREADID), so that
1888 statically initialised mutexes correctly appear
1889 to belong to nobody.
1890
nethercote1f0173b2004-02-28 15:40:36 +00001891 In summary, a not-in-use mutex is distinguised by having __vg_m_owner
1892 == 0 (VG_INVALID_THREADID) and __vg_m_count == 0 too. If one of those
sewardj6072c362002-04-19 14:40:57 +00001893 conditions holds, the other should too.
1894
1895 There is no linked list of threads waiting for this mutex. Instead
1896 a thread in WaitMX state points at the mutex with its waited_on_mx
1897 field. This makes _unlock() inefficient, but simple to implement the
1898 right semantics viz-a-viz signals.
sewardje663cb92002-04-12 10:26:32 +00001899
sewardj604ec3c2002-04-18 22:38:41 +00001900 We don't have to deal with mutex initialisation; the client side
sewardj6072c362002-04-19 14:40:57 +00001901 deals with that for us.
1902*/
sewardje663cb92002-04-12 10:26:32 +00001903
sewardj3b5d8862002-04-20 13:53:23 +00001904/* Helper fns ... */
thughese321d492004-10-17 15:00:20 +00001905static
1906void do_pthread_mutex_timedlock_TIMEOUT ( ThreadId tid )
1907{
1908 Char msg_buf[100];
1909 vg_pthread_mutex_t* mx;
1910
1911 vg_assert(VG_(is_valid_tid)(tid)
1912 && VG_(threads)[tid].status == VgTs_WaitMX
1913 && VG_(threads)[tid].awaken_at != 0xFFFFFFFF);
1914 mx = VG_(threads)[tid].associated_mx;
1915 vg_assert(mx != NULL);
1916
1917 VG_(threads)[tid].status = VgTs_Runnable;
1918 SET_PTHREQ_RETVAL(tid, ETIMEDOUT); /* pthread_mutex_lock return value */
1919 VG_(threads)[tid].associated_mx = NULL;
1920
1921 if (VG_(clo_trace_pthread_level) >= 1) {
1922 VG_(sprintf)(msg_buf, "pthread_mutex_timedlock mx %p: TIMEOUT", mx);
1923 print_pthread_event(tid, msg_buf);
1924 }
1925}
1926
1927
sewardj3b5d8862002-04-20 13:53:23 +00001928static
nethercote1f0173b2004-02-28 15:40:36 +00001929void release_one_thread_waiting_on_mutex ( vg_pthread_mutex_t* mutex,
sewardj3b5d8862002-04-20 13:53:23 +00001930 Char* caller )
1931{
1932 Int i;
1933 Char msg_buf[100];
1934
1935 /* Find some arbitrary thread waiting on this mutex, and make it
1936 runnable. If none are waiting, mark the mutex as not held. */
1937 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +00001938 if (VG_(threads)[i].status == VgTs_Empty)
sewardj3b5d8862002-04-20 13:53:23 +00001939 continue;
sewardj018f7622002-05-15 21:13:39 +00001940 if (VG_(threads)[i].status == VgTs_WaitMX
1941 && VG_(threads)[i].associated_mx == mutex)
sewardj3b5d8862002-04-20 13:53:23 +00001942 break;
1943 }
1944
nethercote1f0173b2004-02-28 15:40:36 +00001945 VG_TRACK( post_mutex_unlock, (ThreadId)mutex->__vg_m_owner, mutex );
sewardj0af43bc2002-10-22 04:30:35 +00001946
sewardj3b5d8862002-04-20 13:53:23 +00001947 vg_assert(i <= VG_N_THREADS);
1948 if (i == VG_N_THREADS) {
1949 /* Nobody else is waiting on it. */
nethercote1f0173b2004-02-28 15:40:36 +00001950 mutex->__vg_m_count = 0;
1951 mutex->__vg_m_owner = VG_INVALID_THREADID;
sewardj3b5d8862002-04-20 13:53:23 +00001952 } else {
1953 /* Notionally transfer the hold to thread i, whose
1954 pthread_mutex_lock() call now returns with 0 (success). */
1955 /* The .count is already == 1. */
sewardj018f7622002-05-15 21:13:39 +00001956 vg_assert(VG_(threads)[i].associated_mx == mutex);
nethercote1f0173b2004-02-28 15:40:36 +00001957 mutex->__vg_m_owner = (/*_pthread_descr*/void*)i;
sewardj018f7622002-05-15 21:13:39 +00001958 VG_(threads)[i].status = VgTs_Runnable;
1959 VG_(threads)[i].associated_mx = NULL;
sewardj5f07b662002-04-23 16:52:51 +00001960 /* m_edx already holds pth_mx_lock() success (0) */
sewardj3b5d8862002-04-20 13:53:23 +00001961
sewardj0af43bc2002-10-22 04:30:35 +00001962 VG_TRACK( post_mutex_lock, (ThreadId)i, mutex);
1963
sewardj3b5d8862002-04-20 13:53:23 +00001964 if (VG_(clo_trace_pthread_level) >= 1) {
1965 VG_(sprintf)(msg_buf, "%s mx %p: RESUME",
1966 caller, mutex );
1967 print_pthread_event(i, msg_buf);
1968 }
1969 }
1970}
1971
sewardje663cb92002-04-12 10:26:32 +00001972
1973static
sewardj30671ff2002-04-21 00:13:57 +00001974void do_pthread_mutex_lock( ThreadId tid,
1975 Bool is_trylock,
thughese321d492004-10-17 15:00:20 +00001976 vg_pthread_mutex_t* mutex,
1977 UInt ms_end )
sewardje663cb92002-04-12 10:26:32 +00001978{
sewardj30671ff2002-04-21 00:13:57 +00001979 Char msg_buf[100];
1980 Char* caller
sewardj8ccc2be2002-05-10 20:26:37 +00001981 = is_trylock ? "pthread_mutex_trylock"
1982 : "pthread_mutex_lock ";
sewardje663cb92002-04-12 10:26:32 +00001983
thughese321d492004-10-17 15:00:20 +00001984 /* If ms_end == 0xFFFFFFFF, wait forever (no timeout). Otherwise,
1985 ms_end is the ending millisecond. */
1986
sewardj604ec3c2002-04-18 22:38:41 +00001987 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj30671ff2002-04-21 00:13:57 +00001988 VG_(sprintf)(msg_buf, "%s mx %p ...", caller, mutex );
sewardj604ec3c2002-04-18 22:38:41 +00001989 print_pthread_event(tid, msg_buf);
1990 }
1991
1992 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00001993 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00001994 && VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +00001995
1996 /* POSIX doesn't mandate this, but for sanity ... */
1997 if (mutex == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00001998 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00001999 "pthread_mutex_lock/trylock: mutex is NULL");
jsgf855d93d2003-10-13 22:26:55 +00002000 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardje663cb92002-04-12 10:26:32 +00002001 return;
2002 }
2003
sewardj604ec3c2002-04-18 22:38:41 +00002004 /* More paranoia ... */
nethercote1f0173b2004-02-28 15:40:36 +00002005 switch (mutex->__vg_m_kind) {
sewardj2a1dcce2002-04-22 12:45:25 +00002006# ifndef GLIBC_2_1
sewardj604ec3c2002-04-18 22:38:41 +00002007 case PTHREAD_MUTEX_TIMED_NP:
sewardj2a1dcce2002-04-22 12:45:25 +00002008 case PTHREAD_MUTEX_ADAPTIVE_NP:
2009# endif
sewardja1679dd2002-05-10 22:31:40 +00002010# ifdef GLIBC_2_1
sewardj8e651d72002-05-10 21:00:19 +00002011 case PTHREAD_MUTEX_FAST_NP:
sewardja1679dd2002-05-10 22:31:40 +00002012# endif
sewardj604ec3c2002-04-18 22:38:41 +00002013 case PTHREAD_MUTEX_RECURSIVE_NP:
2014 case PTHREAD_MUTEX_ERRORCHECK_NP:
nethercote1f0173b2004-02-28 15:40:36 +00002015 if (mutex->__vg_m_count >= 0) break;
sewardj604ec3c2002-04-18 22:38:41 +00002016 /* else fall thru */
2017 default:
njn25e49d8e72002-09-23 09:36:25 +00002018 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002019 "pthread_mutex_lock/trylock: mutex is invalid");
jsgf855d93d2003-10-13 22:26:55 +00002020 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj604ec3c2002-04-18 22:38:41 +00002021 return;
sewardje663cb92002-04-12 10:26:32 +00002022 }
2023
nethercote1f0173b2004-02-28 15:40:36 +00002024 if (mutex->__vg_m_count > 0) {
2025 if (!VG_(is_valid_tid)((ThreadId)mutex->__vg_m_owner)) {
fitzhardinge47735af2004-01-21 01:27:27 +00002026 VG_(record_pthread_error)( tid,
2027 "pthread_mutex_lock/trylock: mutex has invalid owner");
2028 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
2029 return;
2030 }
sewardjf8f819e2002-04-17 23:21:37 +00002031
2032 /* Someone has it already. */
thughese321d492004-10-17 15:00:20 +00002033 if ((ThreadId)mutex->__vg_m_owner == tid && ms_end == 0xFFFFFFFF) {
sewardjf8f819e2002-04-17 23:21:37 +00002034 /* It's locked -- by me! */
nethercote1f0173b2004-02-28 15:40:36 +00002035 if (mutex->__vg_m_kind == PTHREAD_MUTEX_RECURSIVE_NP) {
sewardjf8f819e2002-04-17 23:21:37 +00002036 /* return 0 (success). */
nethercote1f0173b2004-02-28 15:40:36 +00002037 mutex->__vg_m_count++;
njnd3040452003-05-19 15:04:06 +00002038 SET_PTHREQ_RETVAL(tid, 0);
sewardj853f55d2002-04-26 00:27:53 +00002039 if (0)
2040 VG_(printf)("!!!!!! tid %d, mx %p -> locked %d\n",
nethercote1f0173b2004-02-28 15:40:36 +00002041 tid, mutex, mutex->__vg_m_count);
sewardjf8f819e2002-04-17 23:21:37 +00002042 return;
2043 } else {
sewardj30671ff2002-04-21 00:13:57 +00002044 if (is_trylock)
njnd3040452003-05-19 15:04:06 +00002045 SET_PTHREQ_RETVAL(tid, EBUSY);
sewardj30671ff2002-04-21 00:13:57 +00002046 else
njnd3040452003-05-19 15:04:06 +00002047 SET_PTHREQ_RETVAL(tid, EDEADLK);
sewardjf8f819e2002-04-17 23:21:37 +00002048 return;
2049 }
2050 } else {
sewardj6072c362002-04-19 14:40:57 +00002051 /* Someone else has it; we have to wait. Mark ourselves
2052 thusly. */
nethercote1f0173b2004-02-28 15:40:36 +00002053 /* GUARD: __vg_m_count > 0 && __vg_m_owner is valid */
sewardj30671ff2002-04-21 00:13:57 +00002054 if (is_trylock) {
2055 /* caller is polling; so return immediately. */
njnd3040452003-05-19 15:04:06 +00002056 SET_PTHREQ_RETVAL(tid, EBUSY);
sewardj30671ff2002-04-21 00:13:57 +00002057 } else {
sewardjdca84112002-11-13 22:29:34 +00002058 VG_TRACK ( pre_mutex_lock, tid, mutex );
2059
sewardj018f7622002-05-15 21:13:39 +00002060 VG_(threads)[tid].status = VgTs_WaitMX;
2061 VG_(threads)[tid].associated_mx = mutex;
thughese321d492004-10-17 15:00:20 +00002062 VG_(threads)[tid].awaken_at = ms_end;
2063 if (ms_end != 0xFFFFFFFF)
2064 add_timeout(tid, ms_end);
njnd3040452003-05-19 15:04:06 +00002065 SET_PTHREQ_RETVAL(tid, 0); /* pth_mx_lock success value */
sewardj30671ff2002-04-21 00:13:57 +00002066 if (VG_(clo_trace_pthread_level) >= 1) {
2067 VG_(sprintf)(msg_buf, "%s mx %p: BLOCK",
2068 caller, mutex );
2069 print_pthread_event(tid, msg_buf);
2070 }
2071 }
sewardje663cb92002-04-12 10:26:32 +00002072 return;
2073 }
sewardjf8f819e2002-04-17 23:21:37 +00002074
sewardje663cb92002-04-12 10:26:32 +00002075 } else {
sewardj6072c362002-04-19 14:40:57 +00002076 /* Nobody owns it. Sanity check ... */
nethercote1f0173b2004-02-28 15:40:36 +00002077 vg_assert(mutex->__vg_m_owner == VG_INVALID_THREADID);
sewardjdca84112002-11-13 22:29:34 +00002078
2079 VG_TRACK ( pre_mutex_lock, tid, mutex );
2080
sewardjf8f819e2002-04-17 23:21:37 +00002081 /* We get it! [for the first time]. */
nethercote1f0173b2004-02-28 15:40:36 +00002082 mutex->__vg_m_count = 1;
2083 mutex->__vg_m_owner = (/*_pthread_descr*/void*)tid;
njn25e49d8e72002-09-23 09:36:25 +00002084
sewardje663cb92002-04-12 10:26:32 +00002085 /* return 0 (success). */
njnd3040452003-05-19 15:04:06 +00002086 SET_PTHREQ_RETVAL(tid, 0);
sewardjf8f819e2002-04-17 23:21:37 +00002087
njnd3040452003-05-19 15:04:06 +00002088 VG_TRACK( post_mutex_lock, tid, mutex);
2089 }
sewardje663cb92002-04-12 10:26:32 +00002090}
2091
2092
2093static
2094void do_pthread_mutex_unlock ( ThreadId tid,
nethercote1f0173b2004-02-28 15:40:36 +00002095 vg_pthread_mutex_t* mutex )
sewardje663cb92002-04-12 10:26:32 +00002096{
sewardj3b5d8862002-04-20 13:53:23 +00002097 Char msg_buf[100];
sewardje663cb92002-04-12 10:26:32 +00002098
sewardj45b4b372002-04-16 22:50:32 +00002099 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj3b5d8862002-04-20 13:53:23 +00002100 VG_(sprintf)(msg_buf, "pthread_mutex_unlock mx %p ...", mutex );
sewardj8937c812002-04-12 20:12:20 +00002101 print_pthread_event(tid, msg_buf);
2102 }
2103
sewardj604ec3c2002-04-18 22:38:41 +00002104 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00002105 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002106 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj604ec3c2002-04-18 22:38:41 +00002107
2108 if (mutex == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00002109 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002110 "pthread_mutex_unlock: mutex is NULL");
jsgf855d93d2003-10-13 22:26:55 +00002111 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj604ec3c2002-04-18 22:38:41 +00002112 return;
2113 }
2114
2115 /* More paranoia ... */
nethercote1f0173b2004-02-28 15:40:36 +00002116 switch (mutex->__vg_m_kind) {
sewardj2a1dcce2002-04-22 12:45:25 +00002117# ifndef GLIBC_2_1
sewardj604ec3c2002-04-18 22:38:41 +00002118 case PTHREAD_MUTEX_TIMED_NP:
sewardj2a1dcce2002-04-22 12:45:25 +00002119 case PTHREAD_MUTEX_ADAPTIVE_NP:
2120# endif
sewardja1679dd2002-05-10 22:31:40 +00002121# ifdef GLIBC_2_1
sewardj8e651d72002-05-10 21:00:19 +00002122 case PTHREAD_MUTEX_FAST_NP:
sewardja1679dd2002-05-10 22:31:40 +00002123# endif
sewardj604ec3c2002-04-18 22:38:41 +00002124 case PTHREAD_MUTEX_RECURSIVE_NP:
2125 case PTHREAD_MUTEX_ERRORCHECK_NP:
nethercote1f0173b2004-02-28 15:40:36 +00002126 if (mutex->__vg_m_count >= 0) break;
sewardj604ec3c2002-04-18 22:38:41 +00002127 /* else fall thru */
2128 default:
njn25e49d8e72002-09-23 09:36:25 +00002129 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002130 "pthread_mutex_unlock: mutex is invalid");
jsgf855d93d2003-10-13 22:26:55 +00002131 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj604ec3c2002-04-18 22:38:41 +00002132 return;
2133 }
sewardje663cb92002-04-12 10:26:32 +00002134
2135 /* Barf if we don't currently hold the mutex. */
nethercote1f0173b2004-02-28 15:40:36 +00002136 if (mutex->__vg_m_count == 0) {
sewardj4dced352002-06-04 22:54:20 +00002137 /* nobody holds it */
njn25e49d8e72002-09-23 09:36:25 +00002138 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002139 "pthread_mutex_unlock: mutex is not locked");
njnd3040452003-05-19 15:04:06 +00002140 SET_PTHREQ_RETVAL(tid, EPERM);
sewardj4dced352002-06-04 22:54:20 +00002141 return;
2142 }
2143
nethercote1f0173b2004-02-28 15:40:36 +00002144 if ((ThreadId)mutex->__vg_m_owner != tid) {
sewardj4dced352002-06-04 22:54:20 +00002145 /* we don't hold it */
njn25e49d8e72002-09-23 09:36:25 +00002146 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002147 "pthread_mutex_unlock: mutex is locked by a different thread");
njnd3040452003-05-19 15:04:06 +00002148 SET_PTHREQ_RETVAL(tid, EPERM);
sewardje663cb92002-04-12 10:26:32 +00002149 return;
2150 }
2151
sewardjf8f819e2002-04-17 23:21:37 +00002152 /* If it's a multiply-locked recursive mutex, just decrement the
2153 lock count and return. */
nethercote1f0173b2004-02-28 15:40:36 +00002154 if (mutex->__vg_m_count > 1) {
2155 vg_assert(mutex->__vg_m_kind == PTHREAD_MUTEX_RECURSIVE_NP);
2156 mutex->__vg_m_count --;
njnd3040452003-05-19 15:04:06 +00002157 SET_PTHREQ_RETVAL(tid, 0); /* success */
sewardjf8f819e2002-04-17 23:21:37 +00002158 return;
2159 }
2160
sewardj604ec3c2002-04-18 22:38:41 +00002161 /* Now we're sure it is locked exactly once, and by the thread who
sewardjf8f819e2002-04-17 23:21:37 +00002162 is now doing an unlock on it. */
nethercote1f0173b2004-02-28 15:40:36 +00002163 vg_assert(mutex->__vg_m_count == 1);
2164 vg_assert((ThreadId)mutex->__vg_m_owner == tid);
sewardjf8f819e2002-04-17 23:21:37 +00002165
sewardj3b5d8862002-04-20 13:53:23 +00002166 /* Release at max one thread waiting on this mutex. */
2167 release_one_thread_waiting_on_mutex ( mutex, "pthread_mutex_lock" );
sewardje663cb92002-04-12 10:26:32 +00002168
sewardj3b5d8862002-04-20 13:53:23 +00002169 /* Our (tid's) pth_unlock() returns with 0 (success). */
njnd3040452003-05-19 15:04:06 +00002170 SET_PTHREQ_RETVAL(tid, 0); /* Success. */
sewardje663cb92002-04-12 10:26:32 +00002171}
2172
2173
sewardj6072c362002-04-19 14:40:57 +00002174/* -----------------------------------------------------------
2175 CONDITION VARIABLES
2176 -------------------------------------------------------- */
sewardje663cb92002-04-12 10:26:32 +00002177
rjwalsh7109a8c2004-09-02 00:31:02 +00002178/* The relevant type (vg_pthread_cond_t) is in core.h.
sewardj77e466c2002-04-14 02:29:29 +00002179
nethercote1f0173b2004-02-28 15:40:36 +00002180 We don't use any fields of vg_pthread_cond_t for anything at all.
2181 Only the identity of the CVs is important. (Actually, we initialise
2182 __vg_c_waiting in pthread_cond_init() to VG_INVALID_THREADID.)
sewardj6072c362002-04-19 14:40:57 +00002183
2184 Linux pthreads supports no attributes on condition variables, so we
sewardj3b5d8862002-04-20 13:53:23 +00002185 don't need to think too hard there. */
sewardj6072c362002-04-19 14:40:57 +00002186
sewardj77e466c2002-04-14 02:29:29 +00002187
sewardj5f07b662002-04-23 16:52:51 +00002188static
2189void do_pthread_cond_timedwait_TIMEOUT ( ThreadId tid )
2190{
2191 Char msg_buf[100];
nethercote1f0173b2004-02-28 15:40:36 +00002192 vg_pthread_mutex_t* mx;
2193 vg_pthread_cond_t* cv;
sewardj5f07b662002-04-23 16:52:51 +00002194
sewardjb48e5002002-05-13 00:16:03 +00002195 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002196 && VG_(threads)[tid].status == VgTs_WaitCV
2197 && VG_(threads)[tid].awaken_at != 0xFFFFFFFF);
2198 mx = VG_(threads)[tid].associated_mx;
sewardj5f07b662002-04-23 16:52:51 +00002199 vg_assert(mx != NULL);
sewardj018f7622002-05-15 21:13:39 +00002200 cv = VG_(threads)[tid].associated_cv;
sewardj5f07b662002-04-23 16:52:51 +00002201 vg_assert(cv != NULL);
2202
nethercote1f0173b2004-02-28 15:40:36 +00002203 if (mx->__vg_m_owner == VG_INVALID_THREADID) {
sewardj5f07b662002-04-23 16:52:51 +00002204 /* Currently unheld; hand it out to thread tid. */
nethercote1f0173b2004-02-28 15:40:36 +00002205 vg_assert(mx->__vg_m_count == 0);
sewardj018f7622002-05-15 21:13:39 +00002206 VG_(threads)[tid].status = VgTs_Runnable;
njnd3040452003-05-19 15:04:06 +00002207 SET_PTHREQ_RETVAL(tid, ETIMEDOUT); /* pthread_cond_wait return value */
sewardj018f7622002-05-15 21:13:39 +00002208 VG_(threads)[tid].associated_cv = NULL;
2209 VG_(threads)[tid].associated_mx = NULL;
nethercote1f0173b2004-02-28 15:40:36 +00002210 mx->__vg_m_owner = (/*_pthread_descr*/void*)tid;
2211 mx->__vg_m_count = 1;
sewardj5f07b662002-04-23 16:52:51 +00002212
sewardj0af43bc2002-10-22 04:30:35 +00002213 VG_TRACK( post_mutex_lock, tid, mx );
2214
sewardj5f07b662002-04-23 16:52:51 +00002215 if (VG_(clo_trace_pthread_level) >= 1) {
sewardjc3bd5f52002-05-01 03:24:23 +00002216 VG_(sprintf)(msg_buf,
nethercote1f0173b2004-02-28 15:40:36 +00002217 "pthread_cond_timedwait cv %p: TIMEOUT with mx %p",
sewardjc3bd5f52002-05-01 03:24:23 +00002218 cv, mx );
sewardj5f07b662002-04-23 16:52:51 +00002219 print_pthread_event(tid, msg_buf);
2220 }
2221 } else {
2222 /* Currently held. Make thread tid be blocked on it. */
nethercote1f0173b2004-02-28 15:40:36 +00002223 vg_assert(mx->__vg_m_count > 0);
sewardjdca84112002-11-13 22:29:34 +00002224 VG_TRACK( pre_mutex_lock, tid, mx );
2225
sewardj018f7622002-05-15 21:13:39 +00002226 VG_(threads)[tid].status = VgTs_WaitMX;
njnd3040452003-05-19 15:04:06 +00002227 SET_PTHREQ_RETVAL(tid, ETIMEDOUT); /* pthread_cond_wait return value */
sewardj018f7622002-05-15 21:13:39 +00002228 VG_(threads)[tid].associated_cv = NULL;
2229 VG_(threads)[tid].associated_mx = mx;
sewardj5f07b662002-04-23 16:52:51 +00002230 if (VG_(clo_trace_pthread_level) >= 1) {
2231 VG_(sprintf)(msg_buf,
nethercote1f0173b2004-02-28 15:40:36 +00002232 "pthread_cond_timedwait cv %p: TIMEOUT -> BLOCK for mx %p",
sewardj5f07b662002-04-23 16:52:51 +00002233 cv, mx );
2234 print_pthread_event(tid, msg_buf);
2235 }
sewardj5f07b662002-04-23 16:52:51 +00002236 }
2237}
2238
2239
sewardj3b5d8862002-04-20 13:53:23 +00002240static
nethercote1f0173b2004-02-28 15:40:36 +00002241void release_N_threads_waiting_on_cond ( vg_pthread_cond_t* cond,
sewardj3b5d8862002-04-20 13:53:23 +00002242 Int n_to_release,
2243 Char* caller )
2244{
2245 Int i;
2246 Char msg_buf[100];
nethercote1f0173b2004-02-28 15:40:36 +00002247 vg_pthread_mutex_t* mx;
sewardj3b5d8862002-04-20 13:53:23 +00002248
2249 while (True) {
2250 if (n_to_release == 0)
2251 return;
2252
2253 /* Find a thread waiting on this CV. */
2254 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +00002255 if (VG_(threads)[i].status == VgTs_Empty)
sewardj3b5d8862002-04-20 13:53:23 +00002256 continue;
sewardj018f7622002-05-15 21:13:39 +00002257 if (VG_(threads)[i].status == VgTs_WaitCV
2258 && VG_(threads)[i].associated_cv == cond)
sewardj3b5d8862002-04-20 13:53:23 +00002259 break;
2260 }
2261 vg_assert(i <= VG_N_THREADS);
2262
2263 if (i == VG_N_THREADS) {
2264 /* Nobody else is waiting on it. */
2265 return;
2266 }
2267
sewardj018f7622002-05-15 21:13:39 +00002268 mx = VG_(threads)[i].associated_mx;
sewardj3b5d8862002-04-20 13:53:23 +00002269 vg_assert(mx != NULL);
2270
sewardjdca84112002-11-13 22:29:34 +00002271 VG_TRACK( pre_mutex_lock, i, mx );
2272
nethercote1f0173b2004-02-28 15:40:36 +00002273 if (mx->__vg_m_owner == VG_INVALID_THREADID) {
sewardj3b5d8862002-04-20 13:53:23 +00002274 /* Currently unheld; hand it out to thread i. */
nethercote1f0173b2004-02-28 15:40:36 +00002275 vg_assert(mx->__vg_m_count == 0);
sewardj018f7622002-05-15 21:13:39 +00002276 VG_(threads)[i].status = VgTs_Runnable;
2277 VG_(threads)[i].associated_cv = NULL;
2278 VG_(threads)[i].associated_mx = NULL;
nethercote1f0173b2004-02-28 15:40:36 +00002279 mx->__vg_m_owner = (/*_pthread_descr*/void*)i;
2280 mx->__vg_m_count = 1;
sewardj5f07b662002-04-23 16:52:51 +00002281 /* .m_edx already holds pth_cond_wait success value (0) */
sewardj3b5d8862002-04-20 13:53:23 +00002282
sewardj0af43bc2002-10-22 04:30:35 +00002283 VG_TRACK( post_mutex_lock, i, mx );
2284
sewardj3b5d8862002-04-20 13:53:23 +00002285 if (VG_(clo_trace_pthread_level) >= 1) {
2286 VG_(sprintf)(msg_buf, "%s cv %p: RESUME with mx %p",
2287 caller, cond, mx );
2288 print_pthread_event(i, msg_buf);
2289 }
2290
2291 } else {
2292 /* Currently held. Make thread i be blocked on it. */
nethercote1f0173b2004-02-28 15:40:36 +00002293 vg_assert(mx->__vg_m_count > 0);
sewardj018f7622002-05-15 21:13:39 +00002294 VG_(threads)[i].status = VgTs_WaitMX;
2295 VG_(threads)[i].associated_cv = NULL;
2296 VG_(threads)[i].associated_mx = mx;
njnd3040452003-05-19 15:04:06 +00002297 SET_PTHREQ_RETVAL(i, 0); /* pth_cond_wait success value */
sewardj3b5d8862002-04-20 13:53:23 +00002298
2299 if (VG_(clo_trace_pthread_level) >= 1) {
2300 VG_(sprintf)(msg_buf, "%s cv %p: BLOCK for mx %p",
2301 caller, cond, mx );
2302 print_pthread_event(i, msg_buf);
2303 }
2304
2305 }
jsgf855d93d2003-10-13 22:26:55 +00002306
sewardj3b5d8862002-04-20 13:53:23 +00002307 n_to_release--;
2308 }
2309}
2310
2311
2312static
2313void do_pthread_cond_wait ( ThreadId tid,
nethercote1f0173b2004-02-28 15:40:36 +00002314 vg_pthread_cond_t *cond,
2315 vg_pthread_mutex_t *mutex,
sewardj5f07b662002-04-23 16:52:51 +00002316 UInt ms_end )
sewardj3b5d8862002-04-20 13:53:23 +00002317{
2318 Char msg_buf[100];
2319
sewardj5f07b662002-04-23 16:52:51 +00002320 /* If ms_end == 0xFFFFFFFF, wait forever (no timeout). Otherwise,
2321 ms_end is the ending millisecond. */
2322
sewardj3b5d8862002-04-20 13:53:23 +00002323 /* pre: mutex should be a valid mutex and owned by tid. */
2324 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj5f07b662002-04-23 16:52:51 +00002325 VG_(sprintf)(msg_buf, "pthread_cond_wait cv %p, mx %p, end %d ...",
2326 cond, mutex, ms_end );
sewardj3b5d8862002-04-20 13:53:23 +00002327 print_pthread_event(tid, msg_buf);
2328 }
2329
2330 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00002331 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002332 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj3b5d8862002-04-20 13:53:23 +00002333
nethercoted3693d02004-04-26 08:05:24 +00002334 if (mutex == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00002335 VG_(record_pthread_error)( tid,
nethercoted3693d02004-04-26 08:05:24 +00002336 "pthread_cond_wait/timedwait: mutex is NULL");
2337 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
2338 return;
2339 }
2340
2341 if (cond == NULL) {
2342 VG_(record_pthread_error)( tid,
2343 "pthread_cond_wait/timedwait: cond is NULL");
jsgf855d93d2003-10-13 22:26:55 +00002344 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj3b5d8862002-04-20 13:53:23 +00002345 return;
2346 }
2347
2348 /* More paranoia ... */
nethercote1f0173b2004-02-28 15:40:36 +00002349 switch (mutex->__vg_m_kind) {
sewardj2a1dcce2002-04-22 12:45:25 +00002350# ifndef GLIBC_2_1
sewardj3b5d8862002-04-20 13:53:23 +00002351 case PTHREAD_MUTEX_TIMED_NP:
sewardj2a1dcce2002-04-22 12:45:25 +00002352 case PTHREAD_MUTEX_ADAPTIVE_NP:
2353# endif
sewardja1679dd2002-05-10 22:31:40 +00002354# ifdef GLIBC_2_1
sewardj8e651d72002-05-10 21:00:19 +00002355 case PTHREAD_MUTEX_FAST_NP:
sewardja1679dd2002-05-10 22:31:40 +00002356# endif
sewardj3b5d8862002-04-20 13:53:23 +00002357 case PTHREAD_MUTEX_RECURSIVE_NP:
2358 case PTHREAD_MUTEX_ERRORCHECK_NP:
nethercote1f0173b2004-02-28 15:40:36 +00002359 if (mutex->__vg_m_count >= 0) break;
sewardj3b5d8862002-04-20 13:53:23 +00002360 /* else fall thru */
2361 default:
njn25e49d8e72002-09-23 09:36:25 +00002362 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002363 "pthread_cond_wait/timedwait: mutex is invalid");
jsgf855d93d2003-10-13 22:26:55 +00002364 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj3b5d8862002-04-20 13:53:23 +00002365 return;
2366 }
2367
2368 /* Barf if we don't currently hold the mutex. */
nethercoted3693d02004-04-26 08:05:24 +00002369 if (mutex->__vg_m_count == 0 /* nobody holds it */) {
njn25e49d8e72002-09-23 09:36:25 +00002370 VG_(record_pthread_error)( tid,
nethercoted3693d02004-04-26 08:05:24 +00002371 "pthread_cond_wait/timedwait: mutex is unlocked");
2372 SET_PTHREQ_RETVAL(tid, VKI_EPERM);
2373 return;
2374 }
2375
2376 if ((ThreadId)mutex->__vg_m_owner != tid /* we don't hold it */) {
2377 VG_(record_pthread_error)( tid,
2378 "pthread_cond_wait/timedwait: mutex is locked by another thread");
2379 SET_PTHREQ_RETVAL(tid, VKI_EPERM);
sewardj3b5d8862002-04-20 13:53:23 +00002380 return;
2381 }
2382
thughes513197c2004-06-13 12:07:53 +00002383 if(VG_(threads)[tid].cancel_pend != NULL &&
2384 VG_(threads)[tid].cancel_st) {
2385 make_thread_jump_to_cancelhdlr ( tid );
2386 } else {
2387 /* Queue ourselves on the condition. */
2388 VG_(threads)[tid].status = VgTs_WaitCV;
2389 VG_(threads)[tid].associated_cv = cond;
2390 VG_(threads)[tid].associated_mx = mutex;
2391 VG_(threads)[tid].awaken_at = ms_end;
2392 if (ms_end != 0xFFFFFFFF)
nethercotef971ab72004-08-02 16:27:40 +00002393 add_timeout(tid, ms_end);
sewardj3b5d8862002-04-20 13:53:23 +00002394
thughes513197c2004-06-13 12:07:53 +00002395 if (VG_(clo_trace_pthread_level) >= 1) {
2396 VG_(sprintf)(msg_buf,
2397 "pthread_cond_wait cv %p, mx %p: BLOCK",
2398 cond, mutex );
2399 print_pthread_event(tid, msg_buf);
2400 }
2401
2402 /* Release the mutex. */
2403 release_one_thread_waiting_on_mutex ( mutex, "pthread_cond_wait " );
sewardj3b5d8862002-04-20 13:53:23 +00002404 }
sewardj3b5d8862002-04-20 13:53:23 +00002405}
2406
2407
2408static
2409void do_pthread_cond_signal_or_broadcast ( ThreadId tid,
2410 Bool broadcast,
nethercote1f0173b2004-02-28 15:40:36 +00002411 vg_pthread_cond_t *cond )
sewardj3b5d8862002-04-20 13:53:23 +00002412{
2413 Char msg_buf[100];
2414 Char* caller
2415 = broadcast ? "pthread_cond_broadcast"
2416 : "pthread_cond_signal ";
2417
2418 if (VG_(clo_trace_pthread_level) >= 2) {
2419 VG_(sprintf)(msg_buf, "%s cv %p ...",
2420 caller, cond );
2421 print_pthread_event(tid, msg_buf);
2422 }
2423
2424 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00002425 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002426 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj3b5d8862002-04-20 13:53:23 +00002427
2428 if (cond == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00002429 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002430 "pthread_cond_signal/broadcast: cond is NULL");
jsgf855d93d2003-10-13 22:26:55 +00002431 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj3b5d8862002-04-20 13:53:23 +00002432 return;
2433 }
2434
2435 release_N_threads_waiting_on_cond (
2436 cond,
2437 broadcast ? VG_N_THREADS : 1,
2438 caller
2439 );
2440
njnd3040452003-05-19 15:04:06 +00002441 SET_PTHREQ_RETVAL(tid, 0); /* success */
sewardj3b5d8862002-04-20 13:53:23 +00002442}
2443
sewardj77e466c2002-04-14 02:29:29 +00002444
sewardj5f07b662002-04-23 16:52:51 +00002445/* -----------------------------------------------------------
2446 THREAD SPECIFIC DATA
2447 -------------------------------------------------------- */
2448
2449static __inline__
2450Bool is_valid_key ( ThreadKey k )
2451{
2452 /* k unsigned; hence no < 0 check */
2453 if (k >= VG_N_THREAD_KEYS) return False;
2454 if (!vg_thread_keys[k].inuse) return False;
2455 return True;
2456}
2457
sewardj00a66b12002-10-12 16:42:35 +00002458
2459/* Return in %EDX a value of 1 if the key is valid, else 0. */
2460static
2461void do_pthread_key_validate ( ThreadId tid,
2462 pthread_key_t key )
2463{
2464 Char msg_buf[100];
2465
2466 if (VG_(clo_trace_pthread_level) >= 1) {
2467 VG_(sprintf)(msg_buf, "pthread_key_validate key %p",
2468 key );
2469 print_pthread_event(tid, msg_buf);
2470 }
2471
2472 vg_assert(sizeof(pthread_key_t) == sizeof(ThreadKey));
2473 vg_assert(VG_(is_valid_tid)(tid)
2474 && VG_(threads)[tid].status == VgTs_Runnable);
2475
2476 if (is_valid_key((ThreadKey)key)) {
njnd3040452003-05-19 15:04:06 +00002477 SET_PTHREQ_RETVAL(tid, 1);
sewardj00a66b12002-10-12 16:42:35 +00002478 } else {
njnd3040452003-05-19 15:04:06 +00002479 SET_PTHREQ_RETVAL(tid, 0);
sewardj00a66b12002-10-12 16:42:35 +00002480 }
2481}
2482
2483
sewardj5f07b662002-04-23 16:52:51 +00002484static
2485void do_pthread_key_create ( ThreadId tid,
2486 pthread_key_t* key,
2487 void (*destructor)(void*) )
2488{
2489 Int i;
2490 Char msg_buf[100];
2491
2492 if (VG_(clo_trace_pthread_level) >= 1) {
2493 VG_(sprintf)(msg_buf, "pthread_key_create *key %p, destr %p",
2494 key, destructor );
2495 print_pthread_event(tid, msg_buf);
2496 }
2497
2498 vg_assert(sizeof(pthread_key_t) == sizeof(ThreadKey));
sewardjb48e5002002-05-13 00:16:03 +00002499 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002500 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj5f07b662002-04-23 16:52:51 +00002501
2502 for (i = 0; i < VG_N_THREAD_KEYS; i++)
2503 if (!vg_thread_keys[i].inuse)
2504 break;
2505
2506 if (i == VG_N_THREAD_KEYS) {
jsgf855d93d2003-10-13 22:26:55 +00002507 VG_(message)(Vg_UserMsg, "pthread_key_create() asked for too many keys (more than %d): increase VG_N_THREAD_KEYS and recompile Valgrind.",
2508 VG_N_THREAD_KEYS);
2509 SET_PTHREQ_RETVAL(tid, EAGAIN);
2510 return;
sewardj5f07b662002-04-23 16:52:51 +00002511 }
2512
sewardj870497a2002-05-29 01:06:47 +00002513 vg_thread_keys[i].inuse = True;
2514 vg_thread_keys[i].destructor = destructor;
sewardjc3bd5f52002-05-01 03:24:23 +00002515
sewardj5a3798b2002-06-04 23:24:22 +00002516 /* check key for addressibility */
njn72718642003-07-24 08:45:32 +00002517 VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "pthread_key_create: key",
njn25e49d8e72002-09-23 09:36:25 +00002518 (Addr)key, sizeof(pthread_key_t));
sewardj5f07b662002-04-23 16:52:51 +00002519 *key = i;
njncf45fd42004-11-24 16:30:22 +00002520 VG_TRACK( post_mem_write, Vg_CorePThread, tid,
2521 (Addr)key, sizeof(pthread_key_t) );
sewardjc3bd5f52002-05-01 03:24:23 +00002522
njnd3040452003-05-19 15:04:06 +00002523 SET_PTHREQ_RETVAL(tid, 0);
sewardj5f07b662002-04-23 16:52:51 +00002524}
2525
2526
2527static
2528void do_pthread_key_delete ( ThreadId tid, pthread_key_t key )
2529{
2530 Char msg_buf[100];
2531 if (VG_(clo_trace_pthread_level) >= 1) {
2532 VG_(sprintf)(msg_buf, "pthread_key_delete key %d",
2533 key );
2534 print_pthread_event(tid, msg_buf);
2535 }
2536
sewardjb48e5002002-05-13 00:16:03 +00002537 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002538 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj5f07b662002-04-23 16:52:51 +00002539
2540 if (!is_valid_key(key)) {
njn25e49d8e72002-09-23 09:36:25 +00002541 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002542 "pthread_key_delete: key is invalid");
jsgf855d93d2003-10-13 22:26:55 +00002543 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj5f07b662002-04-23 16:52:51 +00002544 return;
2545 }
2546
2547 vg_thread_keys[key].inuse = False;
sewardj648b3152002-12-09 19:07:59 +00002548 vg_thread_keys[key].destructor = NULL;
njnd3040452003-05-19 15:04:06 +00002549 SET_PTHREQ_RETVAL(tid, 0);
sewardj5f07b662002-04-23 16:52:51 +00002550}
2551
2552
sewardj00a66b12002-10-12 16:42:35 +00002553/* Get the .specific_ptr for a thread. Return 1 if the thread-slot
2554 isn't in use, so that client-space can scan all thread slots. 1
2555 cannot be confused with NULL or a legitimately-aligned specific_ptr
2556 value. */
sewardj5f07b662002-04-23 16:52:51 +00002557static
sewardj00a66b12002-10-12 16:42:35 +00002558void do_pthread_getspecific_ptr ( ThreadId tid )
sewardj5f07b662002-04-23 16:52:51 +00002559{
sewardj00a66b12002-10-12 16:42:35 +00002560 void** specifics_ptr;
2561 Char msg_buf[100];
2562
jsgf855d93d2003-10-13 22:26:55 +00002563 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj00a66b12002-10-12 16:42:35 +00002564 VG_(sprintf)(msg_buf, "pthread_getspecific_ptr" );
sewardj5f07b662002-04-23 16:52:51 +00002565 print_pthread_event(tid, msg_buf);
2566 }
2567
nethercote36881a22004-08-04 14:03:16 +00002568 vg_assert(is_valid_or_empty_tid(tid));
sewardj5f07b662002-04-23 16:52:51 +00002569
sewardj00a66b12002-10-12 16:42:35 +00002570 if (VG_(threads)[tid].status == VgTs_Empty) {
njnd3040452003-05-19 15:04:06 +00002571 SET_PTHREQ_RETVAL(tid, 1);
sewardj5f07b662002-04-23 16:52:51 +00002572 return;
2573 }
2574
sewardj00a66b12002-10-12 16:42:35 +00002575 specifics_ptr = VG_(threads)[tid].specifics_ptr;
nethercote5fd72bb2004-11-04 19:28:38 +00002576 vg_assert(specifics_ptr == NULL || IS_ALIGNED4_ADDR(specifics_ptr));
sewardj00a66b12002-10-12 16:42:35 +00002577
nethercote50397c22004-11-04 18:03:06 +00002578 SET_PTHREQ_RETVAL(tid, (UWord)specifics_ptr);
sewardj5f07b662002-04-23 16:52:51 +00002579}
2580
2581
2582static
sewardj00a66b12002-10-12 16:42:35 +00002583void do_pthread_setspecific_ptr ( ThreadId tid, void** ptr )
sewardj5f07b662002-04-23 16:52:51 +00002584{
2585 Char msg_buf[100];
2586 if (VG_(clo_trace_pthread_level) >= 1) {
sewardj00a66b12002-10-12 16:42:35 +00002587 VG_(sprintf)(msg_buf, "pthread_setspecific_ptr ptr %p",
2588 ptr );
sewardj5f07b662002-04-23 16:52:51 +00002589 print_pthread_event(tid, msg_buf);
2590 }
2591
sewardjb48e5002002-05-13 00:16:03 +00002592 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002593 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj5f07b662002-04-23 16:52:51 +00002594
sewardj00a66b12002-10-12 16:42:35 +00002595 VG_(threads)[tid].specifics_ptr = ptr;
njnd3040452003-05-19 15:04:06 +00002596 SET_PTHREQ_RETVAL(tid, 0);
sewardj5f07b662002-04-23 16:52:51 +00002597}
2598
2599
sewardj870497a2002-05-29 01:06:47 +00002600/* Helper for calling destructors at thread exit. If key is valid,
2601 copy the thread's specific value into cu->arg and put the *key*'s
2602 destructor fn address in cu->fn. Then return 0 to the caller.
2603 Otherwise return non-zero to the caller. */
2604static
2605void do__get_key_destr_and_spec ( ThreadId tid,
2606 pthread_key_t key,
2607 CleanupEntry* cu )
2608{
2609 Char msg_buf[100];
jsgf855d93d2003-10-13 22:26:55 +00002610 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj870497a2002-05-29 01:06:47 +00002611 VG_(sprintf)(msg_buf,
2612 "get_key_destr_and_arg (key = %d)", key );
2613 print_pthread_event(tid, msg_buf);
2614 }
2615 vg_assert(VG_(is_valid_tid)(tid));
2616 vg_assert(key >= 0 && key < VG_N_THREAD_KEYS);
njn25e49d8e72002-09-23 09:36:25 +00002617
sewardj870497a2002-05-29 01:06:47 +00002618 if (!vg_thread_keys[key].inuse) {
njnd3040452003-05-19 15:04:06 +00002619 SET_PTHREQ_RETVAL(tid, -1);
sewardj870497a2002-05-29 01:06:47 +00002620 return;
2621 }
njn72718642003-07-24 08:45:32 +00002622 VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "get_key_destr_and_spec: cu",
2623 (Addr)cu, sizeof(CleanupEntry) );
sewardj00a66b12002-10-12 16:42:35 +00002624
thughes11975ff2004-06-12 12:58:22 +00002625 cu->type = VgCt_Function;
2626 cu->data.function.fn = vg_thread_keys[key].destructor;
sewardj00a66b12002-10-12 16:42:35 +00002627 if (VG_(threads)[tid].specifics_ptr == NULL) {
thughes11975ff2004-06-12 12:58:22 +00002628 cu->data.function.arg = NULL;
sewardj00a66b12002-10-12 16:42:35 +00002629 } else {
njn72718642003-07-24 08:45:32 +00002630 VG_TRACK( pre_mem_read, Vg_CorePThread, tid,
sewardj00a66b12002-10-12 16:42:35 +00002631 "get_key_destr_and_spec: key",
2632 (Addr)(&VG_(threads)[tid].specifics_ptr[key]),
2633 sizeof(void*) );
thughes11975ff2004-06-12 12:58:22 +00002634 cu->data.function.arg = VG_(threads)[tid].specifics_ptr[key];
sewardj00a66b12002-10-12 16:42:35 +00002635 }
2636
njncf45fd42004-11-24 16:30:22 +00002637 VG_TRACK( post_mem_write, Vg_CorePThread, tid,
2638 (Addr)cu, sizeof(CleanupEntry) );
njnd3040452003-05-19 15:04:06 +00002639 SET_PTHREQ_RETVAL(tid, 0);
sewardj870497a2002-05-29 01:06:47 +00002640}
2641
2642
sewardjb48e5002002-05-13 00:16:03 +00002643/* ---------------------------------------------------
2644 SIGNALS
2645 ------------------------------------------------ */
2646
2647/* See comment in vg_libthread.c:pthread_sigmask() regarding
sewardj018f7622002-05-15 21:13:39 +00002648 deliberate confusion of types sigset_t and vki_sigset_t. Return 0
2649 for OK and 1 for some kind of addressing error, which the
2650 vg_libpthread.c routine turns into return values 0 and EFAULT
2651 respectively. */
sewardjb48e5002002-05-13 00:16:03 +00002652static
2653void do_pthread_sigmask ( ThreadId tid,
sewardj018f7622002-05-15 21:13:39 +00002654 Int vki_how,
nethercote73b526f2004-10-31 18:48:21 +00002655 vki_sigset_t* newmask,
2656 vki_sigset_t* oldmask )
sewardjb48e5002002-05-13 00:16:03 +00002657{
2658 Char msg_buf[100];
2659 if (VG_(clo_trace_pthread_level) >= 1) {
2660 VG_(sprintf)(msg_buf,
sewardj018f7622002-05-15 21:13:39 +00002661 "pthread_sigmask vki_how %d, newmask %p, oldmask %p",
2662 vki_how, newmask, oldmask );
sewardjb48e5002002-05-13 00:16:03 +00002663 print_pthread_event(tid, msg_buf);
2664 }
2665
2666 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002667 && VG_(threads)[tid].status == VgTs_Runnable);
sewardjb48e5002002-05-13 00:16:03 +00002668
njn25e49d8e72002-09-23 09:36:25 +00002669 if (newmask)
njn72718642003-07-24 08:45:32 +00002670 VG_TRACK( pre_mem_read, Vg_CorePThread, tid, "pthread_sigmask: newmask",
nethercote73b526f2004-10-31 18:48:21 +00002671 (Addr)newmask, sizeof(vki_sigset_t));
njn25e49d8e72002-09-23 09:36:25 +00002672 if (oldmask)
njn72718642003-07-24 08:45:32 +00002673 VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "pthread_sigmask: oldmask",
nethercote73b526f2004-10-31 18:48:21 +00002674 (Addr)oldmask, sizeof(vki_sigset_t));
sewardjb48e5002002-05-13 00:16:03 +00002675
sewardj018f7622002-05-15 21:13:39 +00002676 VG_(do_pthread_sigmask_SCSS_upd) ( tid, vki_how, newmask, oldmask );
sewardjb48e5002002-05-13 00:16:03 +00002677
njn25e49d8e72002-09-23 09:36:25 +00002678 if (oldmask)
njncf45fd42004-11-24 16:30:22 +00002679 VG_TRACK( post_mem_write, Vg_CorePThread, tid,
2680 (Addr)oldmask, sizeof(vki_sigset_t) );
sewardj3a951cf2002-05-15 22:25:47 +00002681
sewardj018f7622002-05-15 21:13:39 +00002682 /* Success. */
njnd3040452003-05-19 15:04:06 +00002683 SET_PTHREQ_RETVAL(tid, 0);
sewardjb48e5002002-05-13 00:16:03 +00002684}
2685
2686
2687static
sewardj018f7622002-05-15 21:13:39 +00002688void do_pthread_kill ( ThreadId tid, /* me */
2689 ThreadId thread, /* thread to signal */
2690 Int sig )
2691{
nethercote97ccd5e2004-08-02 12:10:01 +00002692 ThreadState* tst;
sewardj018f7622002-05-15 21:13:39 +00002693 Char msg_buf[100];
2694
2695 if (VG_(clo_trace_signals) || VG_(clo_trace_pthread_level) >= 1) {
2696 VG_(sprintf)(msg_buf,
2697 "pthread_kill thread %d, signo %d",
2698 thread, sig );
2699 print_pthread_event(tid, msg_buf);
2700 }
2701
2702 vg_assert(VG_(is_valid_tid)(tid)
2703 && VG_(threads)[tid].status == VgTs_Runnable);
2704
sewardj4dced352002-06-04 22:54:20 +00002705 if (!VG_(is_valid_tid)(thread)) {
njn25e49d8e72002-09-23 09:36:25 +00002706 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002707 "pthread_kill: invalid target thread");
jsgf855d93d2003-10-13 22:26:55 +00002708 SET_PTHREQ_RETVAL(tid, VKI_ESRCH);
2709 return;
2710 }
2711
2712 if (sig == 0) {
2713 /* OK, signal 0 is just for testing */
2714 SET_PTHREQ_RETVAL(tid, 0);
sewardj018f7622002-05-15 21:13:39 +00002715 return;
2716 }
2717
nethercote73b526f2004-10-31 18:48:21 +00002718 if (sig < 1 || sig > _VKI_NSIG) {
jsgf855d93d2003-10-13 22:26:55 +00002719 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj018f7622002-05-15 21:13:39 +00002720 return;
2721 }
2722
nethercote97ccd5e2004-08-02 12:10:01 +00002723 tst = VG_(get_ThreadState)(thread);
2724 vg_assert(NULL != tst->proxy);
sewardj0a785fd2004-11-24 21:24:24 +00002725 VG_(proxy_sendsig)(tid/*from*/, thread/*to*/, sig);
njnd3040452003-05-19 15:04:06 +00002726 SET_PTHREQ_RETVAL(tid, 0);
sewardjb48e5002002-05-13 00:16:03 +00002727}
2728
2729
sewardj2cb00342002-06-28 01:46:26 +00002730/* -----------------------------------------------------------
2731 FORK HANDLERS.
2732 -------------------------------------------------------- */
2733
2734static
2735void do__set_fhstack_used ( ThreadId tid, Int n )
2736{
2737 Char msg_buf[100];
2738 if (VG_(clo_trace_sched)) {
2739 VG_(sprintf)(msg_buf, "set_fhstack_used to %d", n );
2740 print_pthread_event(tid, msg_buf);
2741 }
2742
2743 vg_assert(VG_(is_valid_tid)(tid)
2744 && VG_(threads)[tid].status == VgTs_Runnable);
2745
2746 if (n >= 0 && n < VG_N_FORKHANDLERSTACK) {
2747 vg_fhstack_used = n;
njnd3040452003-05-19 15:04:06 +00002748 SET_PTHREQ_RETVAL(tid, 0);
sewardj2cb00342002-06-28 01:46:26 +00002749 } else {
njnd3040452003-05-19 15:04:06 +00002750 SET_PTHREQ_RETVAL(tid, -1);
sewardj2cb00342002-06-28 01:46:26 +00002751 }
2752}
2753
2754
2755static
2756void do__get_fhstack_used ( ThreadId tid )
2757{
2758 Int n;
2759 Char msg_buf[100];
2760 if (VG_(clo_trace_sched)) {
2761 VG_(sprintf)(msg_buf, "get_fhstack_used" );
2762 print_pthread_event(tid, msg_buf);
2763 }
2764
2765 vg_assert(VG_(is_valid_tid)(tid)
2766 && VG_(threads)[tid].status == VgTs_Runnable);
2767
2768 n = vg_fhstack_used;
2769 vg_assert(n >= 0 && n < VG_N_FORKHANDLERSTACK);
njnd3040452003-05-19 15:04:06 +00002770 SET_PTHREQ_RETVAL(tid, n);
sewardj2cb00342002-06-28 01:46:26 +00002771}
2772
2773static
2774void do__set_fhstack_entry ( ThreadId tid, Int n, ForkHandlerEntry* fh )
2775{
2776 Char msg_buf[100];
2777 if (VG_(clo_trace_sched)) {
2778 VG_(sprintf)(msg_buf, "set_fhstack_entry %d to %p", n, fh );
2779 print_pthread_event(tid, msg_buf);
2780 }
2781
2782 vg_assert(VG_(is_valid_tid)(tid)
2783 && VG_(threads)[tid].status == VgTs_Runnable);
njn72718642003-07-24 08:45:32 +00002784 VG_TRACK( pre_mem_read, Vg_CorePThread, tid,
njn25e49d8e72002-09-23 09:36:25 +00002785 "pthread_atfork: prepare/parent/child",
2786 (Addr)fh, sizeof(ForkHandlerEntry));
sewardj2cb00342002-06-28 01:46:26 +00002787
njn25e49d8e72002-09-23 09:36:25 +00002788 if (n < 0 || n >= VG_N_FORKHANDLERSTACK) {
njnd3040452003-05-19 15:04:06 +00002789 SET_PTHREQ_RETVAL(tid, -1);
sewardj2cb00342002-06-28 01:46:26 +00002790 return;
2791 }
2792
2793 vg_fhstack[n] = *fh;
njnd3040452003-05-19 15:04:06 +00002794 SET_PTHREQ_RETVAL(tid, 0);
sewardj2cb00342002-06-28 01:46:26 +00002795}
2796
2797
2798static
2799void do__get_fhstack_entry ( ThreadId tid, Int n, /*OUT*/
2800 ForkHandlerEntry* fh )
2801{
2802 Char msg_buf[100];
2803 if (VG_(clo_trace_sched)) {
2804 VG_(sprintf)(msg_buf, "get_fhstack_entry %d", n );
2805 print_pthread_event(tid, msg_buf);
2806 }
2807
2808 vg_assert(VG_(is_valid_tid)(tid)
2809 && VG_(threads)[tid].status == VgTs_Runnable);
njn72718642003-07-24 08:45:32 +00002810 VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "fork: prepare/parent/child",
njn25e49d8e72002-09-23 09:36:25 +00002811 (Addr)fh, sizeof(ForkHandlerEntry));
sewardj2cb00342002-06-28 01:46:26 +00002812
njn25e49d8e72002-09-23 09:36:25 +00002813 if (n < 0 || n >= VG_N_FORKHANDLERSTACK) {
njnd3040452003-05-19 15:04:06 +00002814 SET_PTHREQ_RETVAL(tid, -1);
sewardj2cb00342002-06-28 01:46:26 +00002815 return;
2816 }
2817
2818 *fh = vg_fhstack[n];
njnd3040452003-05-19 15:04:06 +00002819 SET_PTHREQ_RETVAL(tid, 0);
sewardj2cb00342002-06-28 01:46:26 +00002820
njncf45fd42004-11-24 16:30:22 +00002821 VG_TRACK( post_mem_write, Vg_CorePThread, tid,
2822 (Addr)fh, sizeof(ForkHandlerEntry) );
sewardj2cb00342002-06-28 01:46:26 +00002823}
2824
thughesdaa34562004-06-27 12:48:53 +00002825
2826static
2827void do__get_stack_info ( ThreadId tid, ThreadId which, StackInfo* si )
2828{
2829 Char msg_buf[100];
2830
2831 vg_assert(VG_(is_valid_tid)(tid)
2832 && VG_(threads)[tid].status == VgTs_Runnable);
2833
2834 if (VG_(clo_trace_sched)) {
2835 VG_(sprintf)(msg_buf, "get_stack_info for tid %d", which );
2836 print_pthread_event(tid, msg_buf);
2837 }
2838
2839 if (!VG_(is_valid_tid)(which)) {
2840 SET_PTHREQ_RETVAL(tid, -1);
2841 return;
2842 }
2843
2844 si->base = VG_(threads)[which].stack_base;
2845 si->size = VG_(threads)[which].stack_size
2846 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB
2847 - VG_(threads)[which].stack_guard_size;
2848 si->guardsize = VG_(threads)[which].stack_guard_size;
2849
2850 SET_PTHREQ_RETVAL(tid, 0);
2851}
2852
njnd3040452003-05-19 15:04:06 +00002853/* ---------------------------------------------------------------------
njncf45fd42004-11-24 16:30:22 +00002854 Shadow register manipulations
njnd3040452003-05-19 15:04:06 +00002855 ------------------------------------------------------------------ */
2856
njncf45fd42004-11-24 16:30:22 +00002857void VG_(set_shadow_regs_area) ( ThreadId tid, OffT offset, SizeT size,
2858 const UChar* area )
njnd3040452003-05-19 15:04:06 +00002859{
njncf45fd42004-11-24 16:30:22 +00002860 ThreadState* tst;
2861
2862 vg_assert(VG_(is_valid_tid)(tid));
2863 tst = & VG_(threads)[tid];
2864
2865 // Bounds check
2866 vg_assert(0 <= offset && offset < sizeof(VexGuestArchState));
2867 vg_assert(offset + size <= sizeof(VexGuestArchState));
2868
2869 VG_(memcpy)( (void*)(((Addr)(&tst->arch.vex_shadow)) + offset), area, size);
2870}
2871
2872void VG_(get_shadow_regs_area) ( ThreadId tid, OffT offset, SizeT size,
2873 UChar* area )
2874{
2875 ThreadState* tst;
2876
2877 vg_assert(VG_(is_valid_tid)(tid));
2878 tst = & VG_(threads)[tid];
2879
2880 // Bounds check
2881 vg_assert(0 <= offset && offset < sizeof(VexGuestArchState));
2882 vg_assert(offset + size <= sizeof(VexGuestArchState));
2883
2884 VG_(memcpy)( area, (void*)(((Addr)&(tst->arch.vex_shadow)) + offset), size);
2885}
2886
2887
2888void VG_(set_return_from_syscall_shadow) ( ThreadId tid, UWord ret_shadow )
2889{
2890 VG_(set_shadow_regs_area)(tid, O_SYSCALL_RET, sizeof(UWord),
2891 (UChar*)&ret_shadow);
njnd3040452003-05-19 15:04:06 +00002892}
2893
sewardj2a99cf62004-11-24 10:44:19 +00002894UInt VG_(get_exit_status_shadow) ( ThreadId tid )
njnd3040452003-05-19 15:04:06 +00002895{
njncf45fd42004-11-24 16:30:22 +00002896 UInt ret;
2897 VG_(get_shadow_regs_area)(tid, O_SYSCALL_ARG1, sizeof(UInt),
2898 (UChar*)&ret);
2899 return ret;
njnd3040452003-05-19 15:04:06 +00002900}
2901
rjwalshe4e779d2004-04-16 23:02:29 +00002902void VG_(intercept_libc_freeres_wrapper)(Addr addr)
2903{
nethercotef971ab72004-08-02 16:27:40 +00002904 __libc_freeres_wrapper = addr;
rjwalshe4e779d2004-04-16 23:02:29 +00002905}
sewardj2cb00342002-06-28 01:46:26 +00002906
sewardje663cb92002-04-12 10:26:32 +00002907/* ---------------------------------------------------------------------
sewardj124ca2a2002-06-20 10:19:38 +00002908 Handle client requests.
sewardje663cb92002-04-12 10:26:32 +00002909 ------------------------------------------------------------------ */
2910
sewardj124ca2a2002-06-20 10:19:38 +00002911/* Do a client request for the thread tid. After the request, tid may
2912 or may not still be runnable; if not, the scheduler will have to
2913 choose a new thread to run.
2914*/
sewardje663cb92002-04-12 10:26:32 +00002915static
nethercoted1b64b22004-11-04 18:22:28 +00002916void do_client_request ( ThreadId tid, UWord* arg )
sewardje663cb92002-04-12 10:26:32 +00002917{
nethercoted1b64b22004-11-04 18:22:28 +00002918 UWord req_no = arg[0];
sewardj124ca2a2002-06-20 10:19:38 +00002919
fitzhardinge98abfc72003-12-16 02:05:15 +00002920 if (0)
nethercoted1b64b22004-11-04 18:22:28 +00002921 VG_(printf)("req no = 0x%llx, arg = %p\n", (ULong)req_no, arg);
sewardje663cb92002-04-12 10:26:32 +00002922 switch (req_no) {
2923
njn3e884182003-04-15 13:03:23 +00002924 case VG_USERREQ__CLIENT_CALL0: {
sewardj2a99cf62004-11-24 10:44:19 +00002925 UWord (*f)(ThreadId) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00002926 if (f == NULL)
nethercote66b3af62004-09-11 13:06:55 +00002927 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL0: func=%p\n", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00002928 else
sewardj2a99cf62004-11-24 10:44:19 +00002929 SET_CLCALL_RETVAL(tid, f ( tid ), (Addr)f);
njn3e884182003-04-15 13:03:23 +00002930 break;
2931 }
2932 case VG_USERREQ__CLIENT_CALL1: {
sewardj2a99cf62004-11-24 10:44:19 +00002933 UWord (*f)(ThreadId, UWord) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00002934 if (f == NULL)
nethercote66b3af62004-09-11 13:06:55 +00002935 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL1: func=%p\n", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00002936 else
sewardj2a99cf62004-11-24 10:44:19 +00002937 SET_CLCALL_RETVAL(tid, f ( tid, arg[2] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00002938 break;
2939 }
2940 case VG_USERREQ__CLIENT_CALL2: {
sewardj2a99cf62004-11-24 10:44:19 +00002941 UWord (*f)(ThreadId, UWord, UWord) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00002942 if (f == NULL)
nethercote66b3af62004-09-11 13:06:55 +00002943 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL2: func=%p\n", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00002944 else
sewardj2a99cf62004-11-24 10:44:19 +00002945 SET_CLCALL_RETVAL(tid, f ( tid, arg[2], arg[3] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00002946 break;
2947 }
2948 case VG_USERREQ__CLIENT_CALL3: {
sewardj2a99cf62004-11-24 10:44:19 +00002949 UWord (*f)(ThreadId, UWord, UWord, UWord) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00002950 if (f == NULL)
nethercote66b3af62004-09-11 13:06:55 +00002951 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL3: func=%p\n", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00002952 else
sewardj2a99cf62004-11-24 10:44:19 +00002953 SET_CLCALL_RETVAL(tid, f ( tid, arg[2], arg[3], arg[4] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00002954 break;
2955 }
2956
nethercote7cc9c232004-01-21 15:08:04 +00002957 /* Note: for tools that replace malloc() et al, we want to call
njn3e884182003-04-15 13:03:23 +00002958 the replacement versions. For those that don't, we want to call
njn26f02512004-11-22 18:33:15 +00002959 VG_(cli_malloc)() et al. We do this by calling TL_(malloc)(), which
nethercote3ced0e32004-01-26 14:50:45 +00002960 malloc-replacing tools must replace, but have the default definition
njn26f02512004-11-22 18:33:15 +00002961 of TL_(malloc)() call VG_(cli_malloc)(). */
njn3e884182003-04-15 13:03:23 +00002962
2963 /* Note: for MALLOC and FREE, must set the appropriate "lock"... see
njn26f02512004-11-22 18:33:15 +00002964 the comment in vg_defaults.c/TL_(malloc)() for why. */
sewardj124ca2a2002-06-20 10:19:38 +00002965 case VG_USERREQ__MALLOC:
njnd2252832004-11-26 10:53:33 +00002966 VG_(tl_malloc_called_by_scheduler) = True;
njnd3040452003-05-19 15:04:06 +00002967 SET_PTHREQ_RETVAL(
sewardj2a99cf62004-11-24 10:44:19 +00002968 tid, (Addr)TL_(malloc) ( tid, arg[1] )
sewardj124ca2a2002-06-20 10:19:38 +00002969 );
njnd2252832004-11-26 10:53:33 +00002970 VG_(tl_malloc_called_by_scheduler) = False;
sewardj124ca2a2002-06-20 10:19:38 +00002971 break;
2972
2973 case VG_USERREQ__FREE:
njnd2252832004-11-26 10:53:33 +00002974 VG_(tl_malloc_called_by_scheduler) = True;
sewardj2a99cf62004-11-24 10:44:19 +00002975 TL_(free) ( tid, (void*)arg[1] );
njnd2252832004-11-26 10:53:33 +00002976 VG_(tl_malloc_called_by_scheduler) = False;
njnd3040452003-05-19 15:04:06 +00002977 SET_PTHREQ_RETVAL(tid, 0); /* irrelevant */
sewardj124ca2a2002-06-20 10:19:38 +00002978 break;
2979
sewardj124ca2a2002-06-20 10:19:38 +00002980 case VG_USERREQ__PTHREAD_GET_THREADID:
njnd3040452003-05-19 15:04:06 +00002981 SET_PTHREQ_RETVAL(tid, tid);
sewardj124ca2a2002-06-20 10:19:38 +00002982 break;
2983
2984 case VG_USERREQ__RUNNING_ON_VALGRIND:
njnd3040452003-05-19 15:04:06 +00002985 SET_CLREQ_RETVAL(tid, 1);
sewardj124ca2a2002-06-20 10:19:38 +00002986 break;
2987
2988 case VG_USERREQ__GET_PTHREAD_TRACE_LEVEL:
njnd3040452003-05-19 15:04:06 +00002989 SET_PTHREQ_RETVAL(tid, VG_(clo_trace_pthread_level));
sewardj124ca2a2002-06-20 10:19:38 +00002990 break;
2991
2992 case VG_USERREQ__READ_MILLISECOND_TIMER:
njnd3040452003-05-19 15:04:06 +00002993 SET_PTHREQ_RETVAL(tid, VG_(read_millisecond_timer)());
sewardj124ca2a2002-06-20 10:19:38 +00002994 break;
2995
2996 /* Some of these may make thread tid non-runnable, but the
2997 scheduler checks for that on return from this function. */
2998 case VG_USERREQ__PTHREAD_MUTEX_LOCK:
thughese321d492004-10-17 15:00:20 +00002999 do_pthread_mutex_lock( tid, False, (void *)(arg[1]), 0xFFFFFFFF );
3000 break;
3001
3002 case VG_USERREQ__PTHREAD_MUTEX_TIMEDLOCK:
3003 do_pthread_mutex_lock( tid, False, (void *)(arg[1]), arg[2] );
sewardj124ca2a2002-06-20 10:19:38 +00003004 break;
3005
3006 case VG_USERREQ__PTHREAD_MUTEX_TRYLOCK:
thughese321d492004-10-17 15:00:20 +00003007 do_pthread_mutex_lock( tid, True, (void *)(arg[1]), 0xFFFFFFFF );
sewardj124ca2a2002-06-20 10:19:38 +00003008 break;
3009
3010 case VG_USERREQ__PTHREAD_MUTEX_UNLOCK:
3011 do_pthread_mutex_unlock( tid, (void *)(arg[1]) );
3012 break;
3013
sewardj00a66b12002-10-12 16:42:35 +00003014 case VG_USERREQ__PTHREAD_GETSPECIFIC_PTR:
3015 do_pthread_getspecific_ptr ( tid );
sewardj124ca2a2002-06-20 10:19:38 +00003016 break;
3017
3018 case VG_USERREQ__SET_CANCELTYPE:
3019 do__set_canceltype ( tid, arg[1] );
3020 break;
3021
3022 case VG_USERREQ__CLEANUP_PUSH:
3023 do__cleanup_push ( tid, (CleanupEntry*)(arg[1]) );
3024 break;
3025
3026 case VG_USERREQ__CLEANUP_POP:
3027 do__cleanup_pop ( tid, (CleanupEntry*)(arg[1]) );
3028 break;
3029
3030 case VG_USERREQ__TESTCANCEL:
3031 do__testcancel ( tid );
3032 break;
3033
sewardje663cb92002-04-12 10:26:32 +00003034 case VG_USERREQ__PTHREAD_JOIN:
3035 do_pthread_join( tid, arg[1], (void**)(arg[2]) );
3036 break;
3037
sewardj3b5d8862002-04-20 13:53:23 +00003038 case VG_USERREQ__PTHREAD_COND_WAIT:
3039 do_pthread_cond_wait( tid,
nethercote1f0173b2004-02-28 15:40:36 +00003040 (vg_pthread_cond_t *)(arg[1]),
3041 (vg_pthread_mutex_t *)(arg[2]),
sewardj5f07b662002-04-23 16:52:51 +00003042 0xFFFFFFFF /* no timeout */ );
3043 break;
3044
3045 case VG_USERREQ__PTHREAD_COND_TIMEDWAIT:
3046 do_pthread_cond_wait( tid,
nethercote1f0173b2004-02-28 15:40:36 +00003047 (vg_pthread_cond_t *)(arg[1]),
3048 (vg_pthread_mutex_t *)(arg[2]),
sewardj5f07b662002-04-23 16:52:51 +00003049 arg[3] /* timeout millisecond point */ );
sewardj3b5d8862002-04-20 13:53:23 +00003050 break;
3051
3052 case VG_USERREQ__PTHREAD_COND_SIGNAL:
3053 do_pthread_cond_signal_or_broadcast(
3054 tid,
3055 False, /* signal, not broadcast */
nethercote1f0173b2004-02-28 15:40:36 +00003056 (vg_pthread_cond_t *)(arg[1]) );
sewardj3b5d8862002-04-20 13:53:23 +00003057 break;
3058
3059 case VG_USERREQ__PTHREAD_COND_BROADCAST:
3060 do_pthread_cond_signal_or_broadcast(
3061 tid,
3062 True, /* broadcast, not signal */
nethercote1f0173b2004-02-28 15:40:36 +00003063 (vg_pthread_cond_t *)(arg[1]) );
sewardj3b5d8862002-04-20 13:53:23 +00003064 break;
3065
sewardj00a66b12002-10-12 16:42:35 +00003066 case VG_USERREQ__PTHREAD_KEY_VALIDATE:
3067 do_pthread_key_validate ( tid,
3068 (pthread_key_t)(arg[1]) );
3069 break;
3070
sewardj5f07b662002-04-23 16:52:51 +00003071 case VG_USERREQ__PTHREAD_KEY_CREATE:
3072 do_pthread_key_create ( tid,
3073 (pthread_key_t*)(arg[1]),
3074 (void(*)(void*))(arg[2]) );
3075 break;
3076
3077 case VG_USERREQ__PTHREAD_KEY_DELETE:
3078 do_pthread_key_delete ( tid,
3079 (pthread_key_t)(arg[1]) );
3080 break;
3081
sewardj00a66b12002-10-12 16:42:35 +00003082 case VG_USERREQ__PTHREAD_SETSPECIFIC_PTR:
3083 do_pthread_setspecific_ptr ( tid,
3084 (void**)(arg[1]) );
sewardj5f07b662002-04-23 16:52:51 +00003085 break;
3086
sewardjb48e5002002-05-13 00:16:03 +00003087 case VG_USERREQ__PTHREAD_SIGMASK:
3088 do_pthread_sigmask ( tid,
3089 arg[1],
nethercote73b526f2004-10-31 18:48:21 +00003090 (vki_sigset_t*)(arg[2]),
3091 (vki_sigset_t*)(arg[3]) );
sewardjb48e5002002-05-13 00:16:03 +00003092 break;
3093
sewardj018f7622002-05-15 21:13:39 +00003094 case VG_USERREQ__PTHREAD_KILL:
3095 do_pthread_kill ( tid, arg[1], arg[2] );
3096 break;
3097
sewardjff42d1d2002-05-22 13:17:31 +00003098 case VG_USERREQ__PTHREAD_YIELD:
3099 do_pthread_yield ( tid );
sewardj18a62ff2002-07-12 22:30:51 +00003100 /* On return from do_client_request(), the scheduler will
3101 select a new thread to run. */
sewardjff42d1d2002-05-22 13:17:31 +00003102 break;
sewardj018f7622002-05-15 21:13:39 +00003103
sewardj7989d0c2002-05-28 11:00:01 +00003104 case VG_USERREQ__SET_CANCELSTATE:
3105 do__set_cancelstate ( tid, arg[1] );
3106 break;
3107
sewardj7989d0c2002-05-28 11:00:01 +00003108 case VG_USERREQ__SET_OR_GET_DETACH:
3109 do__set_or_get_detach ( tid, arg[1], arg[2] );
3110 break;
3111
3112 case VG_USERREQ__SET_CANCELPEND:
3113 do__set_cancelpend ( tid, arg[1], (void(*)(void*))arg[2] );
3114 break;
3115
3116 case VG_USERREQ__WAIT_JOINER:
3117 do__wait_joiner ( tid, (void*)arg[1] );
3118 break;
3119
3120 case VG_USERREQ__QUIT:
3121 do__quit ( tid );
3122 break;
3123
3124 case VG_USERREQ__APPLY_IN_NEW_THREAD:
3125 do__apply_in_new_thread ( tid, (void*(*)(void*))arg[1],
thughesdaa34562004-06-27 12:48:53 +00003126 (void*)arg[2], (StackInfo*)(arg[3]) );
sewardj7989d0c2002-05-28 11:00:01 +00003127 break;
3128
sewardj870497a2002-05-29 01:06:47 +00003129 case VG_USERREQ__GET_KEY_D_AND_S:
3130 do__get_key_destr_and_spec ( tid,
3131 (pthread_key_t)arg[1],
3132 (CleanupEntry*)arg[2] );
3133 break;
3134
sewardjef037c72002-05-30 00:40:03 +00003135 case VG_USERREQ__NUKE_OTHER_THREADS:
3136 VG_(nuke_all_threads_except) ( tid );
njnd3040452003-05-19 15:04:06 +00003137 SET_PTHREQ_RETVAL(tid, 0);
sewardjef037c72002-05-30 00:40:03 +00003138 break;
3139
sewardj4dced352002-06-04 22:54:20 +00003140 case VG_USERREQ__PTHREAD_ERROR:
njn25e49d8e72002-09-23 09:36:25 +00003141 VG_(record_pthread_error)( tid, (Char*)(arg[1]) );
njnd3040452003-05-19 15:04:06 +00003142 SET_PTHREQ_RETVAL(tid, 0);
sewardj4dced352002-06-04 22:54:20 +00003143 break;
3144
sewardj2cb00342002-06-28 01:46:26 +00003145 case VG_USERREQ__SET_FHSTACK_USED:
3146 do__set_fhstack_used( tid, (Int)(arg[1]) );
3147 break;
3148
3149 case VG_USERREQ__GET_FHSTACK_USED:
3150 do__get_fhstack_used( tid );
3151 break;
3152
3153 case VG_USERREQ__SET_FHSTACK_ENTRY:
3154 do__set_fhstack_entry( tid, (Int)(arg[1]),
3155 (ForkHandlerEntry*)(arg[2]) );
3156 break;
3157
3158 case VG_USERREQ__GET_FHSTACK_ENTRY:
3159 do__get_fhstack_entry( tid, (Int)(arg[1]),
3160 (ForkHandlerEntry*)(arg[2]) );
3161 break;
3162
sewardj77e466c2002-04-14 02:29:29 +00003163 case VG_USERREQ__SIGNAL_RETURNS:
3164 handle_signal_return(tid);
3165 break;
fitzhardinge98abfc72003-12-16 02:05:15 +00003166
thughesdaa34562004-06-27 12:48:53 +00003167 case VG_USERREQ__GET_STACK_INFO:
3168 do__get_stack_info( tid, (Int)(arg[1]), (StackInfo*)(arg[2]) );
3169 break;
3170
fitzhardinge98abfc72003-12-16 02:05:15 +00003171
3172 case VG_USERREQ__GET_SIGRT_MIN:
3173 SET_PTHREQ_RETVAL(tid, VG_(sig_rtmin));
3174 break;
3175
3176 case VG_USERREQ__GET_SIGRT_MAX:
3177 SET_PTHREQ_RETVAL(tid, VG_(sig_rtmax));
3178 break;
3179
3180 case VG_USERREQ__ALLOC_RTSIG:
3181 SET_PTHREQ_RETVAL(tid, VG_(sig_alloc_rtsig)((Int)arg[1]));
3182 break;
3183
fitzhardinge39de4b42003-10-31 07:12:21 +00003184 case VG_USERREQ__PRINTF: {
3185 int count =
nethercote3e901a22004-09-11 13:17:02 +00003186 VG_(vmessage)( Vg_ClientMsg, (char *)arg[1], (void*)arg[2] );
fitzhardinge39de4b42003-10-31 07:12:21 +00003187 SET_CLREQ_RETVAL( tid, count );
3188 break; }
3189
fitzhardinge98abfc72003-12-16 02:05:15 +00003190
fitzhardinge39de4b42003-10-31 07:12:21 +00003191 case VG_USERREQ__INTERNAL_PRINTF: {
3192 int count =
nethercote3e901a22004-09-11 13:17:02 +00003193 VG_(vmessage)( Vg_UserMsg, (char *)arg[1], (void*)arg[2] );
fitzhardinge39de4b42003-10-31 07:12:21 +00003194 SET_CLREQ_RETVAL( tid, count );
3195 break; }
3196
3197 case VG_USERREQ__PRINTF_BACKTRACE: {
3198 ExeContext *e = VG_(get_ExeContext)( tid );
3199 int count =
nethercote3e901a22004-09-11 13:17:02 +00003200 VG_(vmessage)( Vg_ClientMsg, (char *)arg[1], (void*)arg[2] );
nethercote86c5dcb2004-09-05 21:32:37 +00003201 VG_(mini_stack_dump)(e->ips, VG_(clo_backtrace_size));
fitzhardinge39de4b42003-10-31 07:12:21 +00003202 SET_CLREQ_RETVAL( tid, count );
3203 break; }
3204
3205 case VG_USERREQ__INTERNAL_PRINTF_BACKTRACE: {
3206 ExeContext *e = VG_(get_ExeContext)( tid );
3207 int count =
nethercote3e901a22004-09-11 13:17:02 +00003208 VG_(vmessage)( Vg_UserMsg, (char *)arg[1], (void*)arg[2] );
nethercote86c5dcb2004-09-05 21:32:37 +00003209 VG_(mini_stack_dump)(e->ips, VG_(clo_backtrace_size));
fitzhardinge39de4b42003-10-31 07:12:21 +00003210 SET_CLREQ_RETVAL( tid, count );
3211 break; }
3212
fitzhardinge98abfc72003-12-16 02:05:15 +00003213 case VG_USERREQ__GET_MALLOCFUNCS: {
3214 struct vg_mallocfunc_info *info = (struct vg_mallocfunc_info *)arg[1];
3215
njnd2252832004-11-26 10:53:33 +00003216 info->tl_malloc = (Addr)TL_(malloc);
3217 info->tl_calloc = (Addr)TL_(calloc);
3218 info->tl_realloc = (Addr)TL_(realloc);
3219 info->tl_memalign = (Addr)TL_(memalign);
3220 info->tl___builtin_new = (Addr)TL_(__builtin_new);
3221 info->tl___builtin_vec_new = (Addr)TL_(__builtin_vec_new);
3222 info->tl_free = (Addr)TL_(free);
3223 info->tl___builtin_delete = (Addr)TL_(__builtin_delete);
3224 info->tl___builtin_vec_delete = (Addr)TL_(__builtin_vec_delete);
fitzhardinge98abfc72003-12-16 02:05:15 +00003225
3226 info->arena_payload_szB = (Addr)VG_(arena_payload_szB);
3227
3228 info->clo_sloppy_malloc = VG_(clo_sloppy_malloc);
3229 info->clo_trace_malloc = VG_(clo_trace_malloc);
3230
3231 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
3232
3233 break;
3234 }
3235
njn25e49d8e72002-09-23 09:36:25 +00003236 /* Requests from the client program */
3237
3238 case VG_USERREQ__DISCARD_TRANSLATIONS:
3239 if (VG_(clo_verbosity) > 2)
3240 VG_(printf)( "client request: DISCARD_TRANSLATIONS,"
3241 " addr %p, len %d\n",
3242 (void*)arg[1], arg[2] );
3243
sewardj97ad5522003-05-04 12:32:56 +00003244 VG_(invalidate_translations)( arg[1], arg[2], True );
njn25e49d8e72002-09-23 09:36:25 +00003245
njnd3040452003-05-19 15:04:06 +00003246 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
njn25e49d8e72002-09-23 09:36:25 +00003247 break;
3248
njn47363ab2003-04-21 13:24:40 +00003249 case VG_USERREQ__COUNT_ERRORS:
nethercotef2b11482004-08-02 12:36:01 +00003250 SET_CLREQ_RETVAL( tid, VG_(get_n_errs_found)() );
njn47363ab2003-04-21 13:24:40 +00003251 break;
3252
sewardje663cb92002-04-12 10:26:32 +00003253 default:
njn25e49d8e72002-09-23 09:36:25 +00003254 if (VG_(needs).client_requests) {
nethercoted1b64b22004-11-04 18:22:28 +00003255 UWord ret;
sewardj34042512002-10-22 04:14:35 +00003256
njn25e49d8e72002-09-23 09:36:25 +00003257 if (VG_(clo_verbosity) > 2)
fitzhardinge98abfc72003-12-16 02:05:15 +00003258 VG_(printf)("client request: code %x, addr %p, len %d\n",
njn25e49d8e72002-09-23 09:36:25 +00003259 arg[0], (void*)arg[1], arg[2] );
3260
njn26f02512004-11-22 18:33:15 +00003261 if (TL_(handle_client_request) ( tid, arg, &ret ))
njnd3040452003-05-19 15:04:06 +00003262 SET_CLREQ_RETVAL(tid, ret);
njn25e49d8e72002-09-23 09:36:25 +00003263 } else {
sewardj34042512002-10-22 04:14:35 +00003264 static Bool whined = False;
3265
3266 if (!whined) {
nethercote7cc9c232004-01-21 15:08:04 +00003267 // Allow for requests in core, but defined by tools, which
njnd7994182003-10-02 13:44:04 +00003268 // have 0 and 0 in their two high bytes.
3269 Char c1 = (arg[0] >> 24) & 0xff;
3270 Char c2 = (arg[0] >> 16) & 0xff;
3271 if (c1 == 0) c1 = '_';
3272 if (c2 == 0) c2 = '_';
sewardj34042512002-10-22 04:14:35 +00003273 VG_(message)(Vg_UserMsg, "Warning:\n"
njnd7994182003-10-02 13:44:04 +00003274 " unhandled client request: 0x%x (%c%c+0x%x). Perhaps\n"
3275 " VG_(needs).client_requests should be set?\n",
3276 arg[0], c1, c2, arg[0] & 0xffff);
sewardj34042512002-10-22 04:14:35 +00003277 whined = True;
3278 }
njn25e49d8e72002-09-23 09:36:25 +00003279 }
sewardje663cb92002-04-12 10:26:32 +00003280 break;
3281 }
3282}
3283
3284
sewardj6072c362002-04-19 14:40:57 +00003285/* ---------------------------------------------------------------------
3286 Sanity checking.
3287 ------------------------------------------------------------------ */
3288
3289/* Internal consistency checks on the sched/pthread structures. */
3290static
3291void scheduler_sanity ( void )
3292{
nethercote1f0173b2004-02-28 15:40:36 +00003293 vg_pthread_mutex_t* mx;
3294 vg_pthread_cond_t* cv;
sewardj6072c362002-04-19 14:40:57 +00003295 Int i;
jsgf855d93d2003-10-13 22:26:55 +00003296 struct timeout* top;
3297 UInt lasttime = 0;
3298
3299 for(top = timeouts; top != NULL; top = top->next) {
3300 vg_assert(top->time >= lasttime);
nethercote36881a22004-08-04 14:03:16 +00003301 vg_assert(is_valid_or_empty_tid(top->tid));
jsgf855d93d2003-10-13 22:26:55 +00003302
3303#if 0
3304 /* assert timeout entry is either stale, or associated with a
3305 thread in the right state
3306
3307 XXX disable for now - can be stale, but times happen to match
3308 */
3309 vg_assert(VG_(threads)[top->tid].awaken_at != top->time ||
3310 VG_(threads)[top->tid].status == VgTs_Sleeping ||
thughese321d492004-10-17 15:00:20 +00003311 VG_(threads)[top->tid].status == VgTs_WaitMX ||
jsgf855d93d2003-10-13 22:26:55 +00003312 VG_(threads)[top->tid].status == VgTs_WaitCV);
3313#endif
3314
3315 lasttime = top->time;
3316 }
sewardj5f07b662002-04-23 16:52:51 +00003317
sewardj6072c362002-04-19 14:40:57 +00003318 /* VG_(printf)("scheduler_sanity\n"); */
3319 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +00003320 mx = VG_(threads)[i].associated_mx;
3321 cv = VG_(threads)[i].associated_cv;
3322 if (VG_(threads)[i].status == VgTs_WaitMX) {
sewardjbf290b92002-05-01 02:28:01 +00003323 /* If we're waiting on a MX: (1) the mx is not null, (2, 3)
3324 it's actually held by someone, since otherwise this thread
3325 is deadlocked, (4) the mutex's owner is not us, since
3326 otherwise this thread is also deadlocked. The logic in
3327 do_pthread_mutex_lock rejects attempts by a thread to lock
3328 a (non-recursive) mutex which it already owns.
sewardj05553872002-04-20 20:53:17 +00003329
sewardjbf290b92002-05-01 02:28:01 +00003330 (2) has been seen to fail sometimes. I don't know why.
3331 Possibly to do with signals. */
sewardj3b5d8862002-04-20 13:53:23 +00003332 vg_assert(cv == NULL);
sewardj05553872002-04-20 20:53:17 +00003333 /* 1 */ vg_assert(mx != NULL);
nethercote1f0173b2004-02-28 15:40:36 +00003334 /* 2 */ vg_assert(mx->__vg_m_count > 0);
3335 /* 3 */ vg_assert(VG_(is_valid_tid)((ThreadId)mx->__vg_m_owner));
thughese321d492004-10-17 15:00:20 +00003336 /* 4 */ vg_assert((UInt)i != (ThreadId)mx->__vg_m_owner ||
3337 VG_(threads)[i].awaken_at != 0xFFFFFFFF);
sewardj3b5d8862002-04-20 13:53:23 +00003338 } else
sewardj018f7622002-05-15 21:13:39 +00003339 if (VG_(threads)[i].status == VgTs_WaitCV) {
sewardj3b5d8862002-04-20 13:53:23 +00003340 vg_assert(cv != NULL);
3341 vg_assert(mx != NULL);
sewardj6072c362002-04-19 14:40:57 +00003342 } else {
thughesf7269232004-10-16 16:17:06 +00003343 vg_assert(cv == NULL);
3344 vg_assert(mx == NULL);
sewardj6072c362002-04-19 14:40:57 +00003345 }
sewardjbf290b92002-05-01 02:28:01 +00003346
sewardj018f7622002-05-15 21:13:39 +00003347 if (VG_(threads)[i].status != VgTs_Empty) {
sewardjbf290b92002-05-01 02:28:01 +00003348 Int
sewardj018f7622002-05-15 21:13:39 +00003349 stack_used = (Addr)VG_(threads)[i].stack_highest_word
njncf45fd42004-11-24 16:30:22 +00003350 - (Addr)STACK_PTR(VG_(threads)[i].arch);
thughesdaa34562004-06-27 12:48:53 +00003351 Int
3352 stack_avail = VG_(threads)[i].stack_size
3353 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB
3354 - VG_(threads)[i].stack_guard_size;
fitzhardinge98c4dc02004-03-16 08:27:29 +00003355 /* This test is a bit bogus - it doesn't take into account
3356 alternate signal stacks, for a start. Also, if a thread
3357 has it's stack pointer somewhere strange, killing Valgrind
3358 isn't the right answer. */
3359 if (0 && i > 1 /* not the root thread */
thughesdaa34562004-06-27 12:48:53 +00003360 && stack_used >= stack_avail) {
sewardjbf290b92002-05-01 02:28:01 +00003361 VG_(message)(Vg_UserMsg,
njn25e49d8e72002-09-23 09:36:25 +00003362 "Error: STACK OVERFLOW: "
sewardjbf290b92002-05-01 02:28:01 +00003363 "thread %d: stack used %d, available %d",
thughesdaa34562004-06-27 12:48:53 +00003364 i, stack_used, stack_avail );
sewardjbf290b92002-05-01 02:28:01 +00003365 VG_(message)(Vg_UserMsg,
3366 "Terminating Valgrind. If thread(s) "
3367 "really need more stack, increase");
3368 VG_(message)(Vg_UserMsg,
rjwalsh7109a8c2004-09-02 00:31:02 +00003369 "VG_PTHREAD_STACK_SIZE in core.h and recompile.");
sewardjbf290b92002-05-01 02:28:01 +00003370 VG_(exit)(1);
3371 }
3372 }
sewardj6072c362002-04-19 14:40:57 +00003373 }
sewardj5f07b662002-04-23 16:52:51 +00003374
3375 for (i = 0; i < VG_N_THREAD_KEYS; i++) {
3376 if (!vg_thread_keys[i].inuse)
3377 vg_assert(vg_thread_keys[i].destructor == NULL);
3378 }
sewardj6072c362002-04-19 14:40:57 +00003379}
3380
3381
sewardje663cb92002-04-12 10:26:32 +00003382/*--------------------------------------------------------------------*/
3383/*--- end vg_scheduler.c ---*/
3384/*--------------------------------------------------------------------*/