blob: 9a395a1c3e26b78eae61c2e5efa9f081b55e0a99 [file] [log] [blame]
sewardje663cb92002-04-12 10:26:32 +00001
2/*--------------------------------------------------------------------*/
3/*--- A user-space pthreads implementation. vg_scheduler.c ---*/
4/*--------------------------------------------------------------------*/
5
6/*
njnc9539842002-10-02 13:26:35 +00007 This file is part of Valgrind, an extensible x86 protected-mode
8 emulator for monitoring program execution on x86-Unixes.
sewardje663cb92002-04-12 10:26:32 +00009
nethercotebb1c9912004-01-04 16:43:23 +000010 Copyright (C) 2000-2004 Julian Seward
sewardje663cb92002-04-12 10:26:32 +000011 jseward@acm.org
sewardje663cb92002-04-12 10:26:32 +000012
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
njn25e49d8e72002-09-23 09:36:25 +000028 The GNU General Public License is contained in the file COPYING.
sewardje663cb92002-04-12 10:26:32 +000029*/
30
njn25e49d8e72002-09-23 09:36:25 +000031#include "valgrind.h" /* for VG_USERREQ__RUNNING_ON_VALGRIND and
njn47363ab2003-04-21 13:24:40 +000032 VG_USERREQ__DISCARD_TRANSLATIONS, and others */
nethercotef1e5e152004-09-01 23:58:16 +000033#include "core.h"
sewardje663cb92002-04-12 10:26:32 +000034
sewardje663cb92002-04-12 10:26:32 +000035
36/* ---------------------------------------------------------------------
37 Types and globals for the scheduler.
38 ------------------------------------------------------------------ */
39
rjwalsh7109a8c2004-09-02 00:31:02 +000040/* ThreadId and ThreadState are defined in core.h. */
sewardje663cb92002-04-12 10:26:32 +000041
sewardj018f7622002-05-15 21:13:39 +000042/* Globals. A statically allocated array of threads. NOTE: [0] is
43 never used, to simplify the simulation of initialisers for
sewardj6072c362002-04-19 14:40:57 +000044 LinuxThreads. */
sewardj018f7622002-05-15 21:13:39 +000045ThreadState VG_(threads)[VG_N_THREADS];
sewardje663cb92002-04-12 10:26:32 +000046
sewardj2cb00342002-06-28 01:46:26 +000047/* The process' fork-handler stack. */
48static Int vg_fhstack_used = 0;
49static ForkHandlerEntry vg_fhstack[VG_N_FORKHANDLERSTACK];
50
51
sewardj2a99cf62004-11-24 10:44:19 +000052/* The tid of the thread currently running, or VG_INVALID_THREADID if
53 none. */
54static ThreadId vg_tid_currently_running = VG_INVALID_THREADID;
sewardj1e8cdc92002-04-18 11:37:52 +000055
sewardje663cb92002-04-12 10:26:32 +000056
57/* vg_oursignalhandler() might longjmp(). Here's the jmp_buf. */
nethercotef971ab72004-08-02 16:27:40 +000058static jmp_buf scheduler_jmpbuf;
sewardj872051c2002-07-13 12:12:56 +000059/* This says whether scheduler_jmpbuf is actually valid. Needed so
60 that our signal handler doesn't longjmp when the buffer isn't
61 actually valid. */
nethercotef971ab72004-08-02 16:27:40 +000062static Bool scheduler_jmpbuf_valid = False;
sewardje663cb92002-04-12 10:26:32 +000063/* ... and if so, here's the signal which caused it to do so. */
nethercotef971ab72004-08-02 16:27:40 +000064static Int longjmpd_on_signal;
jsgf855d93d2003-10-13 22:26:55 +000065/* If the current thread gets a syncronous unresumable signal, then
66 its details are placed here by the signal handler, to be passed to
67 the applications signal handler later on. */
nethercote73b526f2004-10-31 18:48:21 +000068static vki_siginfo_t unresumable_siginfo;
sewardje663cb92002-04-12 10:26:32 +000069
jsgf855d93d2003-10-13 22:26:55 +000070/* If != VG_INVALID_THREADID, this is the preferred tid to schedule */
71static ThreadId prefer_sched = VG_INVALID_THREADID;
sewardje663cb92002-04-12 10:26:32 +000072
sewardj5f07b662002-04-23 16:52:51 +000073/* Keeping track of keys. */
74typedef
75 struct {
76 /* Has this key been allocated ? */
77 Bool inuse;
78 /* If .inuse==True, records the address of the associated
79 destructor, or NULL if none. */
80 void (*destructor)(void*);
81 }
82 ThreadKeyState;
83
84/* And our array of thread keys. */
85static ThreadKeyState vg_thread_keys[VG_N_THREAD_KEYS];
86
87typedef UInt ThreadKey;
88
fitzhardinge98abfc72003-12-16 02:05:15 +000089/* The scheduler does need to know the address of it so it can be
90 called at program exit. */
nethercotef971ab72004-08-02 16:27:40 +000091static Addr __libc_freeres_wrapper;
njn25e49d8e72002-09-23 09:36:25 +000092
sewardje663cb92002-04-12 10:26:32 +000093/* Forwards */
nethercoted1b64b22004-11-04 18:22:28 +000094static void do_client_request ( ThreadId tid, UWord* args );
sewardj6072c362002-04-19 14:40:57 +000095static void scheduler_sanity ( void );
thughese321d492004-10-17 15:00:20 +000096static void do_pthread_mutex_timedlock_TIMEOUT ( ThreadId tid );
sewardj124ca2a2002-06-20 10:19:38 +000097static void do_pthread_cond_timedwait_TIMEOUT ( ThreadId tid );
thughesa3afffc2004-08-25 18:58:04 +000098static void maybe_rendezvous_joiners_and_joinees ( void );
sewardjd140e442002-05-29 01:21:19 +000099
nethercote844e7122004-08-02 15:27:22 +0000100/* Stats. */
101static UInt n_scheduling_events_MINOR = 0;
102static UInt n_scheduling_events_MAJOR = 0;
103
104void VG_(print_scheduler_stats)(void)
105{
106 VG_(message)(Vg_DebugMsg,
107 " %d/%d major/minor sched events.",
108 n_scheduling_events_MAJOR, n_scheduling_events_MINOR);
109}
110
sewardje663cb92002-04-12 10:26:32 +0000111/* ---------------------------------------------------------------------
112 Helper functions for the scheduler.
113 ------------------------------------------------------------------ */
114
sewardjb48e5002002-05-13 00:16:03 +0000115__inline__
116Bool VG_(is_valid_tid) ( ThreadId tid )
sewardj604ec3c2002-04-18 22:38:41 +0000117{
118 /* tid is unsigned, hence no < 0 test. */
sewardj6072c362002-04-19 14:40:57 +0000119 if (tid == 0) return False;
sewardj604ec3c2002-04-18 22:38:41 +0000120 if (tid >= VG_N_THREADS) return False;
sewardj018f7622002-05-15 21:13:39 +0000121 if (VG_(threads)[tid].status == VgTs_Empty) return False;
122 return True;
123}
124
125
126__inline__
nethercote36881a22004-08-04 14:03:16 +0000127Bool is_valid_or_empty_tid ( ThreadId tid )
sewardj018f7622002-05-15 21:13:39 +0000128{
129 /* tid is unsigned, hence no < 0 test. */
130 if (tid == 0) return False;
131 if (tid >= VG_N_THREADS) return False;
sewardj604ec3c2002-04-18 22:38:41 +0000132 return True;
133}
134
135
sewardj1e8cdc92002-04-18 11:37:52 +0000136/* For constructing error messages only: try and identify a thread
njn25e49d8e72002-09-23 09:36:25 +0000137 whose stack satisfies the predicate p, or return VG_INVALID_THREADID
sewardj2a99cf62004-11-24 10:44:19 +0000138 if none do.
sewardj1e8cdc92002-04-18 11:37:52 +0000139*/
njn43c799e2003-04-08 00:08:52 +0000140ThreadId VG_(first_matching_thread_stack)
thughes4ad52d02004-06-27 17:37:21 +0000141 ( Bool (*p) ( Addr stack_min, Addr stack_max, void* d ),
142 void* d )
sewardj1e8cdc92002-04-18 11:37:52 +0000143{
144 ThreadId tid, tid_to_skip;
145
146 tid_to_skip = VG_INVALID_THREADID;
147
sewardj6072c362002-04-19 14:40:57 +0000148 for (tid = 1; tid < VG_N_THREADS; tid++) {
sewardj018f7622002-05-15 21:13:39 +0000149 if (VG_(threads)[tid].status == VgTs_Empty) continue;
sewardj1e8cdc92002-04-18 11:37:52 +0000150 if (tid == tid_to_skip) continue;
njncf45fd42004-11-24 16:30:22 +0000151 if ( p ( STACK_PTR(VG_(threads)[tid].arch),
thughes4ad52d02004-06-27 17:37:21 +0000152 VG_(threads)[tid].stack_highest_word, d ) )
sewardj1e8cdc92002-04-18 11:37:52 +0000153 return tid;
154 }
155 return VG_INVALID_THREADID;
156}
157
158
sewardj15a43e12002-04-17 19:35:12 +0000159/* Print the scheduler status. */
160void VG_(pp_sched_status) ( void )
sewardje663cb92002-04-12 10:26:32 +0000161{
162 Int i;
163 VG_(printf)("\nsched status:\n");
sewardj6072c362002-04-19 14:40:57 +0000164 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +0000165 if (VG_(threads)[i].status == VgTs_Empty) continue;
sewardj15a43e12002-04-17 19:35:12 +0000166 VG_(printf)("\nThread %d: status = ", i);
sewardj018f7622002-05-15 21:13:39 +0000167 switch (VG_(threads)[i].status) {
sewardj6072c362002-04-19 14:40:57 +0000168 case VgTs_Runnable: VG_(printf)("Runnable"); break;
sewardj20917d82002-05-28 01:36:45 +0000169 case VgTs_WaitJoinee: VG_(printf)("WaitJoinee(%d)",
170 VG_(threads)[i].joiner_jee_tid);
171 break;
172 case VgTs_WaitJoiner: VG_(printf)("WaitJoiner"); break;
sewardj6072c362002-04-19 14:40:57 +0000173 case VgTs_Sleeping: VG_(printf)("Sleeping"); break;
174 case VgTs_WaitMX: VG_(printf)("WaitMX"); break;
sewardj3b5d8862002-04-20 13:53:23 +0000175 case VgTs_WaitCV: VG_(printf)("WaitCV"); break;
jsgf855d93d2003-10-13 22:26:55 +0000176 case VgTs_WaitSys: VG_(printf)("WaitSys"); break;
sewardje663cb92002-04-12 10:26:32 +0000177 default: VG_(printf)("???"); break;
178 }
sewardj3b5d8862002-04-20 13:53:23 +0000179 VG_(printf)(", associated_mx = %p, associated_cv = %p\n",
sewardj018f7622002-05-15 21:13:39 +0000180 VG_(threads)[i].associated_mx,
181 VG_(threads)[i].associated_cv );
sewardj15a43e12002-04-17 19:35:12 +0000182 VG_(pp_ExeContext)(
njncf45fd42004-11-24 16:30:22 +0000183 VG_(get_ExeContext2)( INSTR_PTR(VG_(threads)[i].arch),
184 FRAME_PTR(VG_(threads)[i].arch),
185 STACK_PTR(VG_(threads)[i].arch),
njn25e49d8e72002-09-23 09:36:25 +0000186 VG_(threads)[i].stack_highest_word)
187 );
sewardje663cb92002-04-12 10:26:32 +0000188 }
189 VG_(printf)("\n");
190}
191
sewardje663cb92002-04-12 10:26:32 +0000192static
193void print_sched_event ( ThreadId tid, Char* what )
194{
sewardj45b4b372002-04-16 22:50:32 +0000195 VG_(message)(Vg_DebugMsg, " SCHED[%d]: %s", tid, what );
sewardj8937c812002-04-12 20:12:20 +0000196}
197
sewardj8937c812002-04-12 20:12:20 +0000198static
199void print_pthread_event ( ThreadId tid, Char* what )
200{
201 VG_(message)(Vg_DebugMsg, "PTHREAD[%d]: %s", tid, what );
sewardje663cb92002-04-12 10:26:32 +0000202}
203
sewardje663cb92002-04-12 10:26:32 +0000204static
205Char* name_of_sched_event ( UInt event )
206{
207 switch (event) {
sewardje663cb92002-04-12 10:26:32 +0000208 case VG_TRC_EBP_JMP_SYSCALL: return "SYSCALL";
209 case VG_TRC_EBP_JMP_CLIENTREQ: return "CLIENTREQ";
fitzhardingea02f8812003-12-18 09:06:09 +0000210 case VG_TRC_EBP_JMP_YIELD: return "YIELD";
sewardje663cb92002-04-12 10:26:32 +0000211 case VG_TRC_INNER_COUNTERZERO: return "COUNTERZERO";
212 case VG_TRC_INNER_FASTMISS: return "FASTMISS";
213 case VG_TRC_UNRESUMABLE_SIGNAL: return "FATALSIGNAL";
214 default: return "??UNKNOWN??";
215 }
216}
217
218
sewardje663cb92002-04-12 10:26:32 +0000219/* Allocate a completely empty ThreadState record. */
220static
221ThreadId vg_alloc_ThreadState ( void )
222{
223 Int i;
sewardj6072c362002-04-19 14:40:57 +0000224 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +0000225 if (VG_(threads)[i].status == VgTs_Empty)
sewardje663cb92002-04-12 10:26:32 +0000226 return i;
227 }
228 VG_(printf)("vg_alloc_ThreadState: no free slots available\n");
229 VG_(printf)("Increase VG_N_THREADS, rebuild and try again.\n");
njne427a662002-10-02 11:08:25 +0000230 VG_(core_panic)("VG_N_THREADS is too low");
sewardje663cb92002-04-12 10:26:32 +0000231 /*NOTREACHED*/
232}
233
jsgf855d93d2003-10-13 22:26:55 +0000234ThreadState *VG_(get_ThreadState)(ThreadId tid)
235{
236 vg_assert(tid >= 0 && tid < VG_N_THREADS);
237 return &VG_(threads)[tid];
238}
239
sewardj2a99cf62004-11-24 10:44:19 +0000240/* Return True precisely when get_current_tid can return
241 successfully. */
242Bool VG_(running_a_thread) ( void )
njn25e49d8e72002-09-23 09:36:25 +0000243{
sewardj2a99cf62004-11-24 10:44:19 +0000244 if (vg_tid_currently_running == VG_INVALID_THREADID)
245 return False;
246 /* Otherwise, it must be a valid thread ID. */
247 vg_assert(VG_(is_valid_tid)(vg_tid_currently_running));
248 return True;
njn25e49d8e72002-09-23 09:36:25 +0000249}
sewardje663cb92002-04-12 10:26:32 +0000250
sewardj1e8cdc92002-04-18 11:37:52 +0000251ThreadId VG_(get_current_tid) ( void )
252{
sewardj2a99cf62004-11-24 10:44:19 +0000253 if (vg_tid_currently_running == VG_INVALID_THREADID)
254 VG_(core_panic)("VG_(get_current_tid): not running generated code");
255 /* Otherwise, it must be a valid thread ID. */
256 vg_assert(VG_(is_valid_tid)(vg_tid_currently_running));
257 return vg_tid_currently_running;
sewardj1e8cdc92002-04-18 11:37:52 +0000258}
259
nethercote73b526f2004-10-31 18:48:21 +0000260void VG_(resume_scheduler)(Int sigNo, vki_siginfo_t *info)
nethercote75d26242004-08-01 22:59:18 +0000261{
262 if (scheduler_jmpbuf_valid) {
263 /* Can't continue; must longjmp back to the scheduler and thus
264 enter the sighandler immediately. */
sewardj2a99cf62004-11-24 10:44:19 +0000265 vg_assert(vg_tid_currently_running != VG_INVALID_THREADID);
nethercote73b526f2004-10-31 18:48:21 +0000266 VG_(memcpy)(&unresumable_siginfo, info, sizeof(vki_siginfo_t));
nethercote75d26242004-08-01 22:59:18 +0000267
268 longjmpd_on_signal = sigNo;
269 __builtin_longjmp(scheduler_jmpbuf,1);
sewardj2a99cf62004-11-24 10:44:19 +0000270 } else {
271 vg_assert(vg_tid_currently_running == VG_INVALID_THREADID);
nethercote75d26242004-08-01 22:59:18 +0000272 }
273}
274
sewardj2a99cf62004-11-24 10:44:19 +0000275
sewardj6072c362002-04-19 14:40:57 +0000276static
sewardje663cb92002-04-12 10:26:32 +0000277UInt run_thread_for_a_while ( ThreadId tid )
278{
sewardj7ccc5c22002-04-24 21:39:11 +0000279 volatile UInt trc = 0;
sewardj8b635a42004-11-22 19:01:47 +0000280 volatile Int dispatch_ctr_SAVED = VG_(dispatch_ctr);
281 volatile Int done_this_time;
282
sewardj873b3132004-11-25 22:50:17 +0000283 /* For paranoia purposes only */
284 volatile Addr a_vex = (Addr) & VG_(threads)[tid].arch.vex;
285 volatile Addr a_vexsh = (Addr) & VG_(threads)[tid].arch.vex_shadow;
286 volatile Addr a_spill = (Addr) & VG_(threads)[tid].arch.vex_spill;
287 volatile UInt sz_vex = (UInt) sizeof VG_(threads)[tid].arch.vex;
288 volatile UInt sz_vexsh = (UInt) sizeof VG_(threads)[tid].arch.vex_shadow;
289 volatile UInt sz_spill = (UInt) sizeof VG_(threads)[tid].arch.vex_spill;
290
291 /* Paranoia */
sewardjb48e5002002-05-13 00:16:03 +0000292 vg_assert(VG_(is_valid_tid)(tid));
sewardj018f7622002-05-15 21:13:39 +0000293 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
nethercote75d26242004-08-01 22:59:18 +0000294 vg_assert(!scheduler_jmpbuf_valid);
sewardj2a99cf62004-11-24 10:44:19 +0000295 vg_assert(vg_tid_currently_running == VG_INVALID_THREADID);
sewardje663cb92002-04-12 10:26:32 +0000296
sewardj873b3132004-11-25 22:50:17 +0000297 /* Even more paranoia. Check that what we have matches
298 Vex's guest state layout requirements. */
299
300# define IS_8_ALIGNED(_xx) (0 == ((_xx) & 7))
301
302 vg_assert(IS_8_ALIGNED(sz_vex));
303 vg_assert(IS_8_ALIGNED(sz_vexsh));
304 vg_assert(IS_8_ALIGNED(a_vex));
305 vg_assert(IS_8_ALIGNED(a_vexsh));
306
307 vg_assert(sz_vex == sz_vexsh);
308 vg_assert(a_vex + sz_vex == a_vexsh);
309
310 vg_assert(sz_spill == LibVEX_N_SPILL_BYTES);
311 vg_assert(a_vex + 2 * sz_vex == a_spill);
312
313# undef IS_8_ALIGNED
314
sewardj671ff542002-05-07 09:25:30 +0000315 VGP_PUSHCC(VgpRun);
jsgf855d93d2003-10-13 22:26:55 +0000316
317 /* there should be no undealt-with signals */
nethercotef971ab72004-08-02 16:27:40 +0000318 vg_assert(unresumable_siginfo.si_signo == 0);
jsgf855d93d2003-10-13 22:26:55 +0000319
nethercote75d26242004-08-01 22:59:18 +0000320 if (__builtin_setjmp(scheduler_jmpbuf) == 0) {
sewardje663cb92002-04-12 10:26:32 +0000321 /* try this ... */
sewardj2a99cf62004-11-24 10:44:19 +0000322 vg_tid_currently_running = tid;
323 scheduler_jmpbuf_valid = True;
324 trc = VG_(run_innerloop)( &VG_(threads)[tid].arch.vex );
325 scheduler_jmpbuf_valid = False;
326 vg_tid_currently_running = VG_INVALID_THREADID;
sewardje663cb92002-04-12 10:26:32 +0000327 /* We get here if the client didn't take a fault. */
328 } else {
329 /* We get here if the client took a fault, which caused our
330 signal handler to longjmp. */
sewardj2a99cf62004-11-24 10:44:19 +0000331 scheduler_jmpbuf_valid = False;
332 vg_tid_currently_running = VG_INVALID_THREADID;
sewardje663cb92002-04-12 10:26:32 +0000333 vg_assert(trc == 0);
334 trc = VG_TRC_UNRESUMABLE_SIGNAL;
335 }
sewardj872051c2002-07-13 12:12:56 +0000336
nethercote75d26242004-08-01 22:59:18 +0000337 vg_assert(!scheduler_jmpbuf_valid);
sewardj872051c2002-07-13 12:12:56 +0000338
sewardj8b635a42004-11-22 19:01:47 +0000339 done_this_time = (Int)dispatch_ctr_SAVED - (Int)VG_(dispatch_ctr) - 0;
340
341 vg_assert(done_this_time >= 0);
342 VG_(bbs_done) += (ULong)done_this_time;
343
njn25e49d8e72002-09-23 09:36:25 +0000344 VGP_POPCC(VgpRun);
sewardje663cb92002-04-12 10:26:32 +0000345 return trc;
346}
347
348
sewardj20917d82002-05-28 01:36:45 +0000349static
350void mostly_clear_thread_record ( ThreadId tid )
351{
sewardj20917d82002-05-28 01:36:45 +0000352 vg_assert(tid >= 0 && tid < VG_N_THREADS);
nethercotef9b59412004-09-10 15:33:32 +0000353 VGA_(clear_thread)(&VG_(threads)[tid].arch);
sewardj20917d82002-05-28 01:36:45 +0000354 VG_(threads)[tid].tid = tid;
355 VG_(threads)[tid].status = VgTs_Empty;
356 VG_(threads)[tid].associated_mx = NULL;
357 VG_(threads)[tid].associated_cv = NULL;
358 VG_(threads)[tid].awaken_at = 0;
359 VG_(threads)[tid].joinee_retval = NULL;
360 VG_(threads)[tid].joiner_thread_return = NULL;
361 VG_(threads)[tid].joiner_jee_tid = VG_INVALID_THREADID;
sewardj8ad94e12002-05-29 00:10:20 +0000362 VG_(threads)[tid].detached = False;
sewardj20917d82002-05-28 01:36:45 +0000363 VG_(threads)[tid].cancel_st = True; /* PTHREAD_CANCEL_ENABLE */
364 VG_(threads)[tid].cancel_ty = True; /* PTHREAD_CANCEL_DEFERRED */
365 VG_(threads)[tid].cancel_pend = NULL; /* not pending */
sewardj8ad94e12002-05-29 00:10:20 +0000366 VG_(threads)[tid].custack_used = 0;
nethercote73b526f2004-10-31 18:48:21 +0000367 VG_(sigemptyset)(&VG_(threads)[tid].sig_mask);
368 VG_(sigfillset)(&VG_(threads)[tid].eff_sig_mask);
thughes8abf3922004-10-16 10:59:49 +0000369 VG_(threads)[tid].sigqueue_head = 0;
370 VG_(threads)[tid].sigqueue_tail = 0;
sewardj00a66b12002-10-12 16:42:35 +0000371 VG_(threads)[tid].specifics_ptr = NULL;
jsgf855d93d2003-10-13 22:26:55 +0000372
373 VG_(threads)[tid].syscallno = -1;
thughesbaa46e52004-07-29 17:44:23 +0000374 VG_(threads)[tid].sys_flags = 0;
jsgf855d93d2003-10-13 22:26:55 +0000375
376 VG_(threads)[tid].proxy = NULL;
fitzhardinge28428592004-03-16 22:07:12 +0000377
378 /* start with no altstack */
379 VG_(threads)[tid].altstack.ss_sp = (void *)0xdeadbeef;
380 VG_(threads)[tid].altstack.ss_size = 0;
381 VG_(threads)[tid].altstack.ss_flags = VKI_SS_DISABLE;
sewardj20917d82002-05-28 01:36:45 +0000382}
383
384
jsgf855d93d2003-10-13 22:26:55 +0000385
sewardje663cb92002-04-12 10:26:32 +0000386/* Initialise the scheduler. Create a single "main" thread ready to
sewardj2a99cf62004-11-24 10:44:19 +0000387 run, with special ThreadId of one. This is called at startup. The
388 caller subsequently initialises the guest state components of
389 this main thread, thread 1.
sewardje663cb92002-04-12 10:26:32 +0000390*/
391void VG_(scheduler_init) ( void )
392{
thughesc37184f2004-09-11 14:16:57 +0000393 Int i;
sewardje663cb92002-04-12 10:26:32 +0000394 ThreadId tid_main;
395
sewardj6072c362002-04-19 14:40:57 +0000396 for (i = 0 /* NB; not 1 */; i < VG_N_THREADS; i++) {
sewardj20917d82002-05-28 01:36:45 +0000397 mostly_clear_thread_record(i);
398 VG_(threads)[i].stack_size = 0;
399 VG_(threads)[i].stack_base = (Addr)NULL;
thughesdaa34562004-06-27 12:48:53 +0000400 VG_(threads)[i].stack_guard_size = 0;
sewardj20917d82002-05-28 01:36:45 +0000401 VG_(threads)[i].stack_highest_word = (Addr)NULL;
sewardje663cb92002-04-12 10:26:32 +0000402 }
403
sewardj5f07b662002-04-23 16:52:51 +0000404 for (i = 0; i < VG_N_THREAD_KEYS; i++) {
405 vg_thread_keys[i].inuse = False;
406 vg_thread_keys[i].destructor = NULL;
407 }
408
sewardj2cb00342002-06-28 01:46:26 +0000409 vg_fhstack_used = 0;
410
sewardj2a99cf62004-11-24 10:44:19 +0000411 /* Assert this is thread one, which has certain magic
sewardje663cb92002-04-12 10:26:32 +0000412 properties. */
413 tid_main = vg_alloc_ThreadState();
sewardj6072c362002-04-19 14:40:57 +0000414 vg_assert(tid_main == 1);
sewardj20917d82002-05-28 01:36:45 +0000415 VG_(threads)[tid_main].status = VgTs_Runnable;
sewardje663cb92002-04-12 10:26:32 +0000416
sewardj2a99cf62004-11-24 10:44:19 +0000417 VG_(threads)[tid_main].stack_highest_word = VG_(clstk_end) - 4;
fitzhardinge98abfc72003-12-16 02:05:15 +0000418 VG_(threads)[tid_main].stack_base = VG_(clstk_base);
thughesc37184f2004-09-11 14:16:57 +0000419 VG_(threads)[tid_main].stack_size = VG_(client_rlimit_stack).rlim_cur;
sewardjbf290b92002-05-01 02:28:01 +0000420
sewardj872051c2002-07-13 12:12:56 +0000421 /* Not running client code right now. */
nethercote75d26242004-08-01 22:59:18 +0000422 scheduler_jmpbuf_valid = False;
jsgf855d93d2003-10-13 22:26:55 +0000423
424 /* Proxy for main thread */
425 VG_(proxy_create)(tid_main);
sewardje663cb92002-04-12 10:26:32 +0000426}
427
428
sewardj3947e622002-05-23 16:52:11 +0000429
sewardj6072c362002-04-19 14:40:57 +0000430/* vthread tid is returning from a signal handler; modify its
431 stack/regs accordingly. */
432static
433void handle_signal_return ( ThreadId tid )
434{
sewardj6072c362002-04-19 14:40:57 +0000435 Bool restart_blocked_syscalls;
sewardj645030e2002-06-06 01:27:39 +0000436 struct vki_timespec * rem;
sewardj6072c362002-04-19 14:40:57 +0000437
sewardjb48e5002002-05-13 00:16:03 +0000438 vg_assert(VG_(is_valid_tid)(tid));
sewardj6072c362002-04-19 14:40:57 +0000439
440 restart_blocked_syscalls = VG_(signal_returns)(tid);
441
thughesa3afffc2004-08-25 18:58:04 +0000442 /* If we were interrupted in the middle of a rendezvous
443 then check the rendezvous hasn't completed while we
444 were busy handling the signal. */
445 if (VG_(threads)[tid].status == VgTs_WaitJoiner ||
446 VG_(threads)[tid].status == VgTs_WaitJoinee ) {
447 maybe_rendezvous_joiners_and_joinees();
448 }
449
thughesc41c6f42004-10-16 16:50:14 +0000450 /* If we were interrupted while waiting on a mutex then check that
451 it hasn't been unlocked while we were busy handling the signal. */
452 if (VG_(threads)[tid].status == VgTs_WaitMX &&
453 VG_(threads)[tid].associated_mx->__vg_m_count == 0) {
454 vg_pthread_mutex_t* mutex = VG_(threads)[tid].associated_mx;
455 mutex->__vg_m_count = 1;
456 mutex->__vg_m_owner = (/*_pthread_descr*/void*)tid;
457 VG_(threads)[tid].status = VgTs_Runnable;
458 VG_(threads)[tid].associated_mx = NULL;
459 /* m_edx already holds pth_mx_lock() success (0) */
460 }
461
sewardj6072c362002-04-19 14:40:57 +0000462 if (restart_blocked_syscalls)
463 /* Easy; we don't have to do anything. */
464 return;
465
sewardj645030e2002-06-06 01:27:39 +0000466 if (VG_(threads)[tid].status == VgTs_Sleeping
njncf45fd42004-11-24 16:30:22 +0000467 && SYSCALL_NUM(VG_(threads)[tid].arch) == __NR_nanosleep) {
sewardj6072c362002-04-19 14:40:57 +0000468 /* We interrupted a nanosleep(). The right thing to do is to
sewardj645030e2002-06-06 01:27:39 +0000469 write the unused time to nanosleep's second param, but that's
470 too much effort ... we just say that 1 nanosecond was not
471 used, and return EINTR. */
njncf45fd42004-11-24 16:30:22 +0000472 rem = (struct vki_timespec*)SYSCALL_ARG2(VG_(threads)[tid].arch);
sewardj645030e2002-06-06 01:27:39 +0000473 if (rem != NULL) {
474 rem->tv_sec = 0;
475 rem->tv_nsec = 1;
476 }
njnd3040452003-05-19 15:04:06 +0000477 SET_SYSCALL_RETVAL(tid, -VKI_EINTR);
sewardj645030e2002-06-06 01:27:39 +0000478 VG_(threads)[tid].status = VgTs_Runnable;
sewardj6072c362002-04-19 14:40:57 +0000479 return;
480 }
481
482 /* All other cases? Just return. */
483}
484
485
nethercotef971ab72004-08-02 16:27:40 +0000486struct timeout {
487 UInt time; /* time we should awaken */
488 ThreadId tid; /* thread which cares about this timeout */
489 struct timeout *next;
490};
491
492static struct timeout *timeouts;
493
494static void add_timeout(ThreadId tid, UInt time)
495{
496 struct timeout *t = VG_(arena_malloc)(VG_AR_CORE, sizeof(*t));
497 struct timeout **prev, *tp;
498
499 t->time = time;
500 t->tid = tid;
501
502 if (VG_(clo_trace_sched)) {
503 Char msg_buf[100];
504 VG_(sprintf)(msg_buf, "add_timeout: now=%u adding timeout at %u",
505 VG_(read_millisecond_timer)(), time);
506 print_sched_event(tid, msg_buf);
507 }
508
509 for(tp = timeouts, prev = &timeouts;
510 tp != NULL && tp->time < time;
511 prev = &tp->next, tp = tp->next)
512 ;
513 t->next = tp;
514 *prev = t;
515}
516
sewardje663cb92002-04-12 10:26:32 +0000517static
518void sched_do_syscall ( ThreadId tid )
519{
jsgf855d93d2003-10-13 22:26:55 +0000520 Int syscall_no;
njn25e49d8e72002-09-23 09:36:25 +0000521 Char msg_buf[100];
sewardje663cb92002-04-12 10:26:32 +0000522
sewardjb48e5002002-05-13 00:16:03 +0000523 vg_assert(VG_(is_valid_tid)(tid));
sewardj018f7622002-05-15 21:13:39 +0000524 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +0000525
njncf45fd42004-11-24 16:30:22 +0000526 syscall_no = SYSCALL_NUM(VG_(threads)[tid].arch);
sewardje663cb92002-04-12 10:26:32 +0000527
jsgf855d93d2003-10-13 22:26:55 +0000528 /* Special-case nanosleep because we can. But should we?
529
530 XXX not doing so for now, because it doesn't seem to work
531 properly, and we can use the syscall nanosleep just as easily.
532 */
533 if (0 && syscall_no == __NR_nanosleep) {
sewardj5f07b662002-04-23 16:52:51 +0000534 UInt t_now, t_awaken;
sewardje663cb92002-04-12 10:26:32 +0000535 struct vki_timespec* req;
njncf45fd42004-11-24 16:30:22 +0000536 req = (struct vki_timespec*)SYSCALL_ARG1(VG_(threads)[tid].arch);
jsgf855d93d2003-10-13 22:26:55 +0000537
538 if (req->tv_sec < 0 || req->tv_nsec < 0 || req->tv_nsec >= 1000000000) {
539 SET_SYSCALL_RETVAL(tid, -VKI_EINVAL);
540 return;
541 }
542
sewardj5f07b662002-04-23 16:52:51 +0000543 t_now = VG_(read_millisecond_timer)();
sewardje663cb92002-04-12 10:26:32 +0000544 t_awaken
545 = t_now
sewardj5f07b662002-04-23 16:52:51 +0000546 + (UInt)1000ULL * (UInt)(req->tv_sec)
547 + (UInt)(req->tv_nsec) / 1000000;
sewardj018f7622002-05-15 21:13:39 +0000548 VG_(threads)[tid].status = VgTs_Sleeping;
549 VG_(threads)[tid].awaken_at = t_awaken;
sewardj8937c812002-04-12 20:12:20 +0000550 if (VG_(clo_trace_sched)) {
sewardj5f07b662002-04-23 16:52:51 +0000551 VG_(sprintf)(msg_buf, "at %d: nanosleep for %d",
sewardje663cb92002-04-12 10:26:32 +0000552 t_now, t_awaken-t_now);
553 print_sched_event(tid, msg_buf);
554 }
nethercotef971ab72004-08-02 16:27:40 +0000555 add_timeout(tid, t_awaken);
sewardje663cb92002-04-12 10:26:32 +0000556 /* Force the scheduler to run something else for a while. */
557 return;
558 }
559
jsgf855d93d2003-10-13 22:26:55 +0000560 /* If pre_syscall returns true, then we're done immediately */
561 if (VG_(pre_syscall)(tid)) {
fitzhardinge31ba9052004-01-16 02:15:23 +0000562 VG_(post_syscall(tid, True));
sewardj3947e622002-05-23 16:52:11 +0000563 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +0000564 } else {
jsgf855d93d2003-10-13 22:26:55 +0000565 vg_assert(VG_(threads)[tid].status == VgTs_WaitSys);
sewardje663cb92002-04-12 10:26:32 +0000566 }
567}
568
569
sewardje663cb92002-04-12 10:26:32 +0000570
jsgf855d93d2003-10-13 22:26:55 +0000571/* Sleep for a while, but be willing to be woken. */
572static
573void idle ( void )
574{
575 struct vki_pollfd pollfd[1];
576 Int delta = -1;
577 Int fd = VG_(proxy_resfd)();
578
579 pollfd[0].fd = fd;
580 pollfd[0].events = VKI_POLLIN;
581
582 /* Look though the nearest timeouts, looking for the next future
583 one (there may be stale past timeouts). They'll all be mopped
584 below up when the poll() finishes. */
585 if (timeouts != NULL) {
586 struct timeout *tp;
587 Bool wicked = False;
588 UInt now = VG_(read_millisecond_timer)();
589
590 for(tp = timeouts; tp != NULL && tp->time < now; tp = tp->next) {
591 /* If a thread is still sleeping in the past, make it runnable */
592 ThreadState *tst = VG_(get_ThreadState)(tp->tid);
593 if (tst->status == VgTs_Sleeping)
594 tst->status = VgTs_Runnable;
595 wicked = True; /* no sleep for the wicked */
sewardje663cb92002-04-12 10:26:32 +0000596 }
sewardje663cb92002-04-12 10:26:32 +0000597
jsgf855d93d2003-10-13 22:26:55 +0000598 if (tp != NULL) {
thughese761bef2004-10-17 15:18:22 +0000599 vg_assert(tp->time >= now);
600 /* limit the signed int delta to INT_MAX */
601 if ((tp->time - now) <= 0x7FFFFFFFU) {
602 delta = tp->time - now;
603 } else {
604 delta = 0x7FFFFFFF;
605 }
sewardje663cb92002-04-12 10:26:32 +0000606 }
jsgf855d93d2003-10-13 22:26:55 +0000607 if (wicked)
608 delta = 0;
sewardje663cb92002-04-12 10:26:32 +0000609 }
610
jsgf855d93d2003-10-13 22:26:55 +0000611 /* gotta wake up for something! */
612 vg_assert(fd != -1 || delta != -1);
sewardje462e202002-04-13 04:09:07 +0000613
jsgf855d93d2003-10-13 22:26:55 +0000614 /* If we need to do signal routing, then poll for pending signals
615 every VG_(clo_signal_polltime) mS */
616 if (VG_(do_signal_routing) && (delta > VG_(clo_signal_polltime) || delta == -1))
617 delta = VG_(clo_signal_polltime);
sewardje663cb92002-04-12 10:26:32 +0000618
jsgf855d93d2003-10-13 22:26:55 +0000619 if (VG_(clo_trace_sched)) {
620 Char msg_buf[100];
621 VG_(sprintf)(msg_buf, "idle: waiting for %dms and fd %d",
622 delta, fd);
623 print_sched_event(0, msg_buf);
sewardje663cb92002-04-12 10:26:32 +0000624 }
sewardje663cb92002-04-12 10:26:32 +0000625
jsgf855d93d2003-10-13 22:26:55 +0000626 VG_(poll)(pollfd, fd != -1 ? 1 : 0, delta);
sewardje663cb92002-04-12 10:26:32 +0000627
jsgf855d93d2003-10-13 22:26:55 +0000628 /* See if there's anything on the timeout list which needs
629 waking, and mop up anything in the past. */
630 {
631 UInt now = VG_(read_millisecond_timer)();
632 struct timeout *tp;
sewardje663cb92002-04-12 10:26:32 +0000633
jsgf855d93d2003-10-13 22:26:55 +0000634 tp = timeouts;
sewardje663cb92002-04-12 10:26:32 +0000635
jsgf855d93d2003-10-13 22:26:55 +0000636 while(tp && tp->time <= now) {
637 struct timeout *dead;
638 ThreadState *tst;
639
640 tst = VG_(get_ThreadState)(tp->tid);
641
642 if (VG_(clo_trace_sched)) {
643 Char msg_buf[100];
644 VG_(sprintf)(msg_buf, "idle: now=%u removing timeout at %u",
645 now, tp->time);
646 print_sched_event(tp->tid, msg_buf);
647 }
sewardje663cb92002-04-12 10:26:32 +0000648
jsgf855d93d2003-10-13 22:26:55 +0000649 /* If awaken_at != tp->time then it means the timeout is
650 stale and we should just ignore it. */
651 if(tst->awaken_at == tp->time) {
652 switch(tst->status) {
653 case VgTs_Sleeping:
654 tst->awaken_at = 0xFFFFFFFF;
655 tst->status = VgTs_Runnable;
656 break;
sewardje663cb92002-04-12 10:26:32 +0000657
thughese321d492004-10-17 15:00:20 +0000658 case VgTs_WaitMX:
659 do_pthread_mutex_timedlock_TIMEOUT(tst->tid);
660 break;
661
jsgf855d93d2003-10-13 22:26:55 +0000662 case VgTs_WaitCV:
663 do_pthread_cond_timedwait_TIMEOUT(tst->tid);
664 break;
sewardje663cb92002-04-12 10:26:32 +0000665
jsgf855d93d2003-10-13 22:26:55 +0000666 default:
667 /* This is a bit odd but OK; if a thread had a timeout
668 but woke for some other reason (signal, condvar
669 wakeup), then it will still be on the list. */
670 if (0)
671 VG_(printf)("idle(): unexpected status tp->tid=%d tst->status = %d\n",
672 tp->tid, tst->status);
673 break;
674 }
675 }
sewardjbc7d8782002-06-30 12:44:54 +0000676
jsgf855d93d2003-10-13 22:26:55 +0000677 dead = tp;
678 tp = tp->next;
njn25e49d8e72002-09-23 09:36:25 +0000679
jsgf855d93d2003-10-13 22:26:55 +0000680 VG_(arena_free)(VG_AR_CORE, dead);
sewardjbc7d8782002-06-30 12:44:54 +0000681 }
682
jsgf855d93d2003-10-13 22:26:55 +0000683 timeouts = tp;
sewardje663cb92002-04-12 10:26:32 +0000684 }
685}
686
687
sewardje663cb92002-04-12 10:26:32 +0000688/* ---------------------------------------------------------------------
689 The scheduler proper.
690 ------------------------------------------------------------------ */
691
nethercote238a3c32004-08-09 13:13:31 +0000692// For handling of the default action of a fatal signal.
693// jmp_buf for fatal signals; VG_(fatal_signal_jmpbuf_ptr) is NULL until
694// the time is right that it can be used.
695static jmp_buf fatal_signal_jmpbuf;
696static jmp_buf* fatal_signal_jmpbuf_ptr;
697static Int fatal_sigNo; // the fatal signal, if it happens
698
sewardje663cb92002-04-12 10:26:32 +0000699/* Run user-space threads until either
700 * Deadlock occurs
701 * One thread asks to shutdown Valgrind
702 * The specified number of basic blocks has gone by.
703*/
nethercote238a3c32004-08-09 13:13:31 +0000704VgSchedReturnCode do_scheduler ( Int* exitcode, ThreadId* last_run_tid )
sewardje663cb92002-04-12 10:26:32 +0000705{
706 ThreadId tid, tid_next;
707 UInt trc;
sewardj124ca2a2002-06-20 10:19:38 +0000708 Int done_this_time, n_in_bounded_wait;
jsgf855d93d2003-10-13 22:26:55 +0000709 Int n_exists, n_waiting_for_reaper;
sewardje663cb92002-04-12 10:26:32 +0000710 Addr trans_addr;
711
sewardje663cb92002-04-12 10:26:32 +0000712 /* Start with the root thread. tid in general indicates the
713 currently runnable/just-finished-running thread. */
nethercote759dda32004-08-07 18:16:56 +0000714 *last_run_tid = tid = 1;
sewardje663cb92002-04-12 10:26:32 +0000715
716 /* This is the top level scheduler loop. It falls into three
717 phases. */
718 while (True) {
719
sewardj6072c362002-04-19 14:40:57 +0000720 /* ======================= Phase 0 of 3 =======================
721 Be paranoid. Always a good idea. */
sewardjd7fd4d22002-04-24 01:57:27 +0000722 stage1:
sewardj6072c362002-04-19 14:40:57 +0000723 scheduler_sanity();
nethercote885dd912004-08-03 23:14:00 +0000724 VG_(sanity_check_general)( False );
sewardj6072c362002-04-19 14:40:57 +0000725
sewardje663cb92002-04-12 10:26:32 +0000726 /* ======================= Phase 1 of 3 =======================
727 Handle I/O completions and signals. This may change the
728 status of various threads. Then select a new thread to run,
729 or declare deadlock, or sleep if there are no runnable
730 threads but some are blocked on I/O. */
731
sewardje663cb92002-04-12 10:26:32 +0000732 /* Do the following loop until a runnable thread is found, or
733 deadlock is detected. */
734 while (True) {
735
736 /* For stats purposes only. */
nethercote844e7122004-08-02 15:27:22 +0000737 n_scheduling_events_MAJOR++;
sewardje663cb92002-04-12 10:26:32 +0000738
jsgf855d93d2003-10-13 22:26:55 +0000739 /* Route signals to their proper places */
740 VG_(route_signals)();
sewardje663cb92002-04-12 10:26:32 +0000741
jsgf855d93d2003-10-13 22:26:55 +0000742 /* See if any of the proxy LWPs report any activity: either a
743 syscall completing or a signal arriving. */
744 VG_(proxy_results)();
sewardje663cb92002-04-12 10:26:32 +0000745
746 /* Try and find a thread (tid) to run. */
747 tid_next = tid;
jsgf855d93d2003-10-13 22:26:55 +0000748 if (prefer_sched != VG_INVALID_THREADID) {
749 tid_next = prefer_sched-1;
750 prefer_sched = VG_INVALID_THREADID;
751 }
sewardj51c0aaf2002-04-25 01:32:10 +0000752 n_in_bounded_wait = 0;
jsgf855d93d2003-10-13 22:26:55 +0000753 n_exists = 0;
754 n_waiting_for_reaper = 0;
sewardje663cb92002-04-12 10:26:32 +0000755 while (True) {
756 tid_next++;
sewardj6072c362002-04-19 14:40:57 +0000757 if (tid_next >= VG_N_THREADS) tid_next = 1;
jsgf855d93d2003-10-13 22:26:55 +0000758 if (VG_(threads)[tid_next].status == VgTs_Sleeping
759 || VG_(threads)[tid_next].status == VgTs_WaitSys
thughese321d492004-10-17 15:00:20 +0000760 || (VG_(threads)[tid_next].status == VgTs_WaitMX
761 && VG_(threads)[tid_next].awaken_at != 0xFFFFFFFF)
762 || (VG_(threads)[tid_next].status == VgTs_WaitCV
sewardj018f7622002-05-15 21:13:39 +0000763 && VG_(threads)[tid_next].awaken_at != 0xFFFFFFFF))
sewardj51c0aaf2002-04-25 01:32:10 +0000764 n_in_bounded_wait ++;
jsgf855d93d2003-10-13 22:26:55 +0000765 if (VG_(threads)[tid_next].status != VgTs_Empty)
766 n_exists++;
767 if (VG_(threads)[tid_next].status == VgTs_WaitJoiner)
768 n_waiting_for_reaper++;
sewardj018f7622002-05-15 21:13:39 +0000769 if (VG_(threads)[tid_next].status == VgTs_Runnable)
sewardje663cb92002-04-12 10:26:32 +0000770 break; /* We can run this one. */
771 if (tid_next == tid)
772 break; /* been all the way round */
773 }
774 tid = tid_next;
775
sewardj018f7622002-05-15 21:13:39 +0000776 if (VG_(threads)[tid].status == VgTs_Runnable) {
sewardje663cb92002-04-12 10:26:32 +0000777 /* Found a suitable candidate. Fall out of this loop, so
778 we can advance to stage 2 of the scheduler: actually
779 running the thread. */
780 break;
781 }
782
jsgf855d93d2003-10-13 22:26:55 +0000783 /* All threads have exited - pretend someone called exit() */
784 if (n_waiting_for_reaper == n_exists) {
nethercote47dd12c2004-06-22 14:18:42 +0000785 *exitcode = 0; /* ? */
jsgf855d93d2003-10-13 22:26:55 +0000786 return VgSrc_ExitSyscall;
787 }
788
sewardje663cb92002-04-12 10:26:32 +0000789 /* We didn't find a runnable thread. Now what? */
sewardj51c0aaf2002-04-25 01:32:10 +0000790 if (n_in_bounded_wait == 0) {
sewardj54cacf02002-04-12 23:24:59 +0000791 /* No runnable threads and no prospect of any appearing
792 even if we wait for an arbitrary length of time. In
793 short, we have a deadlock. */
sewardj15a43e12002-04-17 19:35:12 +0000794 VG_(pp_sched_status)();
sewardje663cb92002-04-12 10:26:32 +0000795 return VgSrc_Deadlock;
796 }
797
jsgf855d93d2003-10-13 22:26:55 +0000798 /* Nothing needs doing, so sit in idle until either a timeout
799 happens or a thread's syscall completes. */
800 idle();
sewardj7e87e382002-05-03 19:09:05 +0000801 /* pp_sched_status(); */
sewardjb48e5002002-05-13 00:16:03 +0000802 /* VG_(printf)("."); */
sewardje663cb92002-04-12 10:26:32 +0000803 }
804
805
806 /* ======================= Phase 2 of 3 =======================
807 Wahey! We've finally decided that thread tid is runnable, so
808 we now do that. Run it for as much of a quanta as possible.
809 Trivial requests are handled and the thread continues. The
810 aim is not to do too many of Phase 1 since it is expensive. */
811
812 if (0)
sewardj3b5d8862002-04-20 13:53:23 +0000813 VG_(printf)("SCHED: tid %d\n", tid);
sewardje663cb92002-04-12 10:26:32 +0000814
njn25e49d8e72002-09-23 09:36:25 +0000815 VG_TRACK( thread_run, tid );
816
sewardje663cb92002-04-12 10:26:32 +0000817 /* Figure out how many bbs to ask vg_run_innerloop to do. Note
818 that it decrements the counter before testing it for zero, so
819 that if VG_(dispatch_ctr) is set to N you get at most N-1
820 iterations. Also this means that VG_(dispatch_ctr) must
821 exceed zero before entering the innerloop. Also also, the
822 decrement is done before the bb is actually run, so you
823 always get at least one decrement even if nothing happens.
824 */
nethercote1d447092004-02-01 17:29:59 +0000825 VG_(dispatch_ctr) = VG_SCHEDULING_QUANTUM + 1;
sewardje663cb92002-04-12 10:26:32 +0000826
sewardj1e8cdc92002-04-18 11:37:52 +0000827 /* paranoia ... */
sewardj018f7622002-05-15 21:13:39 +0000828 vg_assert(VG_(threads)[tid].tid == tid);
sewardj1e8cdc92002-04-18 11:37:52 +0000829
sewardje663cb92002-04-12 10:26:32 +0000830 /* Actually run thread tid. */
831 while (True) {
832
nethercote759dda32004-08-07 18:16:56 +0000833 *last_run_tid = tid;
sewardj7e87e382002-05-03 19:09:05 +0000834
sewardje663cb92002-04-12 10:26:32 +0000835 /* For stats purposes only. */
nethercote844e7122004-08-02 15:27:22 +0000836 n_scheduling_events_MINOR++;
sewardje663cb92002-04-12 10:26:32 +0000837
838 if (0)
839 VG_(message)(Vg_DebugMsg, "thread %d: running for %d bbs",
840 tid, VG_(dispatch_ctr) - 1 );
sewardjb3eef6b2002-05-01 00:05:27 +0000841# if 0
842 if (VG_(bbs_done) > 31700000 + 0) {
843 dispatch_ctr_SAVED = VG_(dispatch_ctr) = 2;
nethercoteb8ef9d82004-09-05 22:02:33 +0000844 VG_(translate)(&VG_(threads)[tid],
njncf45fd42004-11-24 16:30:22 +0000845 INSTR_PTR(VG_(threads)[tid].arch),
nethercote59a122d2004-08-03 17:16:51 +0000846 /*debugging*/True);
sewardjb3eef6b2002-05-01 00:05:27 +0000847 }
njncf45fd42004-11-24 16:30:22 +0000848 vg_assert(INSTR_PTR(VG_(threads)[tid].arch) != 0);
sewardjb3eef6b2002-05-01 00:05:27 +0000849# endif
sewardje663cb92002-04-12 10:26:32 +0000850
851 trc = run_thread_for_a_while ( tid );
852
sewardjb3eef6b2002-05-01 00:05:27 +0000853# if 0
njncf45fd42004-11-24 16:30:22 +0000854 if (0 == INSTR_PTR(VG_(threads)[tid].arch)) {
sewardjb3eef6b2002-05-01 00:05:27 +0000855 VG_(printf)("tid = %d, dc = %llu\n", tid, VG_(bbs_done));
njncf45fd42004-11-24 16:30:22 +0000856 vg_assert(0 != INSTR_PTR(VG_(threads)[tid].arch));
sewardjb3eef6b2002-05-01 00:05:27 +0000857 }
858# endif
859
sewardje663cb92002-04-12 10:26:32 +0000860 /* Deal quickly with trivial scheduling events, and resume the
861 thread. */
862
863 if (trc == VG_TRC_INNER_FASTMISS) {
njncf45fd42004-11-24 16:30:22 +0000864 Addr ip = INSTR_PTR(VG_(threads)[tid].arch);
nethercote4d714382004-10-13 09:47:24 +0000865
sewardj8b635a42004-11-22 19:01:47 +0000866 vg_assert(VG_(dispatch_ctr) > 1);
sewardje663cb92002-04-12 10:26:32 +0000867
868 /* Trivial event. Miss in the fast-cache. Do a full
869 lookup for it. */
nethercote4d714382004-10-13 09:47:24 +0000870 trans_addr = VG_(search_transtab)( ip );
sewardje663cb92002-04-12 10:26:32 +0000871 if (trans_addr == (Addr)0) {
872 /* Not found; we need to request a translation. */
nethercote4d714382004-10-13 09:47:24 +0000873 if (VG_(translate)( tid, ip, /*debug*/False )) {
874 trans_addr = VG_(search_transtab)( ip );
875 if (trans_addr == (Addr)0)
876 VG_(core_panic)("VG_TRC_INNER_FASTMISS: missing tt_fast entry");
877 } else {
878 // If VG_(translate)() fails, it's because it had to throw
879 // a signal because the client jumped to a bad address.
880 // This means VG_(deliver_signal)() will have been called
881 // by now, and the program counter will now be pointing to
882 // the start of the signal handler (if there is no
883 // handler, things would have been aborted by now), so do
884 // nothing, and things will work out next time around the
885 // scheduler loop.
886 }
sewardje663cb92002-04-12 10:26:32 +0000887 }
888 continue; /* with this thread */
889 }
890
891 if (trc == VG_TRC_EBP_JMP_CLIENTREQ) {
njncf45fd42004-11-24 16:30:22 +0000892 UWord* args = (UWord*)(CLREQ_ARGS(VG_(threads)[tid].arch));
nethercoted1b64b22004-11-04 18:22:28 +0000893 UWord reqno = args[0];
sewardj18a62ff2002-07-12 22:30:51 +0000894 /* VG_(printf)("request 0x%x\n", reqno); */
sewardj1fe7b002002-07-16 01:43:15 +0000895
896 /* Are we really absolutely totally quitting? */
897 if (reqno == VG_USERREQ__LIBC_FREERES_DONE) {
898 if (0 || VG_(clo_trace_syscalls) || VG_(clo_trace_sched)) {
899 VG_(message)(Vg_DebugMsg,
900 "__libc_freeres() done; really quitting!");
901 }
902 return VgSrc_ExitSyscall;
903 }
904
nethercote3e901a22004-09-11 13:17:02 +0000905 do_client_request(tid,args);
sewardj124ca2a2002-06-20 10:19:38 +0000906 /* Following the request, we try and continue with the
907 same thread if still runnable. If not, go back to
908 Stage 1 to select a new thread to run. */
sewardj18a62ff2002-07-12 22:30:51 +0000909 if (VG_(threads)[tid].status == VgTs_Runnable
910 && reqno != VG_USERREQ__PTHREAD_YIELD)
sewardj124ca2a2002-06-20 10:19:38 +0000911 continue; /* with this thread */
912 else
913 goto stage1;
sewardje663cb92002-04-12 10:26:32 +0000914 }
915
sewardj51c0aaf2002-04-25 01:32:10 +0000916 if (trc == VG_TRC_EBP_JMP_SYSCALL) {
917 /* Do a syscall for the vthread tid. This could cause it
sewardj7e87e382002-05-03 19:09:05 +0000918 to become non-runnable. One special case: spot the
919 client doing calls to exit() and take this as the cue
920 to exit. */
sewardjb3eef6b2002-05-01 00:05:27 +0000921# if 0
922 { UInt* esp; Int i;
njncf45fd42004-11-24 16:30:22 +0000923 esp=(UInt*)STACK_PTR(VG_(threads)[tid].arch);
sewardjb3eef6b2002-05-01 00:05:27 +0000924 VG_(printf)("\nBEFORE\n");
925 for (i = 10; i >= -10; i--)
926 VG_(printf)("%2d %p = 0x%x\n", i, &esp[i], esp[i]);
927 }
928# endif
929
sewardj1fe7b002002-07-16 01:43:15 +0000930 /* Deal with calling __libc_freeres() at exit. When the
931 client does __NR_exit, it's exiting for good. So we
nethercotef971ab72004-08-02 16:27:40 +0000932 then run __libc_freeres_wrapper. That quits by
sewardj1fe7b002002-07-16 01:43:15 +0000933 doing VG_USERREQ__LIBC_FREERES_DONE, and at that point
934 we really exit. To be safe we nuke all other threads
sewardjade9d0d2002-07-26 10:52:48 +0000935 currently running.
936
937 If not valgrinding (cachegrinding, etc) don't do this.
938 __libc_freeres does some invalid frees which crash
939 the unprotected malloc/free system. */
njn25e49d8e72002-09-23 09:36:25 +0000940
njncf45fd42004-11-24 16:30:22 +0000941 if (SYSCALL_NUM(VG_(threads)[tid].arch) == __NR_exit
942 || SYSCALL_NUM(VG_(threads)[tid].arch) == __NR_exit_group
sewardjf3fb92d2003-02-23 03:26:08 +0000943 ) {
sewardj858964b2002-10-05 14:15:43 +0000944
nethercote8b76fe52004-11-08 19:20:09 +0000945 /* Remember the supplied argument. */
njncf45fd42004-11-24 16:30:22 +0000946 *exitcode = SYSCALL_ARG1(VG_(threads)[tid].arch);
njn25e49d8e72002-09-23 09:36:25 +0000947
nethercote8b76fe52004-11-08 19:20:09 +0000948 // Inform tool about regs read by syscall
949 VG_TRACK( pre_reg_read, Vg_CoreSysCall, tid, "(syscallno)",
njncf45fd42004-11-24 16:30:22 +0000950 O_SYSCALL_NUM, sizeof(UWord) );
nethercote8b76fe52004-11-08 19:20:09 +0000951
njncf45fd42004-11-24 16:30:22 +0000952 if (SYSCALL_NUM(VG_(threads)[tid].arch) == __NR_exit)
nethercote8b76fe52004-11-08 19:20:09 +0000953 VG_TRACK( pre_reg_read, Vg_CoreSysCall, tid,
njncf45fd42004-11-24 16:30:22 +0000954 "exit(error_code)", O_SYSCALL_ARG1, sizeof(int) );
nethercote8b76fe52004-11-08 19:20:09 +0000955
njncf45fd42004-11-24 16:30:22 +0000956 if (SYSCALL_NUM(VG_(threads)[tid].arch) == __NR_exit_group)
nethercote8b76fe52004-11-08 19:20:09 +0000957 VG_TRACK( pre_reg_read, Vg_CoreSysCall, tid,
njncf45fd42004-11-24 16:30:22 +0000958 "exit_group(error_code)", O_SYSCALL_ARG1,
nethercote8b76fe52004-11-08 19:20:09 +0000959 sizeof(int) );
960
nethercote7cc9c232004-01-21 15:08:04 +0000961 /* Only run __libc_freeres if the tool says it's ok and
sewardj858964b2002-10-05 14:15:43 +0000962 it hasn't been overridden with --run-libc-freeres=no
963 on the command line. */
964
fitzhardinge98abfc72003-12-16 02:05:15 +0000965 if (VG_(needs).libc_freeres &&
966 VG_(clo_run_libc_freeres) &&
nethercotef971ab72004-08-02 16:27:40 +0000967 __libc_freeres_wrapper != 0) {
sewardj00631892002-10-05 15:34:38 +0000968 if (VG_(clo_verbosity) > 2
sewardj858964b2002-10-05 14:15:43 +0000969 || VG_(clo_trace_syscalls) || VG_(clo_trace_sched)) {
970 VG_(message)(Vg_DebugMsg,
971 "Caught __NR_exit; running __libc_freeres()");
972 }
973 VG_(nuke_all_threads_except) ( tid );
njncf45fd42004-11-24 16:30:22 +0000974 INSTR_PTR(VG_(threads)[tid].arch) =
nethercote50397c22004-11-04 18:03:06 +0000975 __libc_freeres_wrapper;
sewardj858964b2002-10-05 14:15:43 +0000976 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
977 goto stage1; /* party on, dudes (but not for much longer :) */
978
979 } else {
980 /* We won't run __libc_freeres; just exit now. */
sewardj00631892002-10-05 15:34:38 +0000981 if (VG_(clo_verbosity) > 2
sewardj858964b2002-10-05 14:15:43 +0000982 || VG_(clo_trace_syscalls) || VG_(clo_trace_sched)) {
983 VG_(message)(Vg_DebugMsg,
984 "Caught __NR_exit; quitting");
985 }
986 return VgSrc_ExitSyscall;
987 }
988
sewardjade9d0d2002-07-26 10:52:48 +0000989 }
990
sewardj858964b2002-10-05 14:15:43 +0000991 /* We've dealt with __NR_exit at this point. */
njncf45fd42004-11-24 16:30:22 +0000992 vg_assert(SYSCALL_NUM(VG_(threads)[tid].arch) != __NR_exit &&
993 SYSCALL_NUM(VG_(threads)[tid].arch) != __NR_exit_group);
sewardj7e87e382002-05-03 19:09:05 +0000994
sewardj83798bf2002-05-24 00:11:16 +0000995 /* Trap syscalls to __NR_sched_yield and just have this
996 thread yield instead. Not essential, just an
997 optimisation. */
njncf45fd42004-11-24 16:30:22 +0000998 if (SYSCALL_NUM(VG_(threads)[tid].arch) == __NR_sched_yield) {
njnd3040452003-05-19 15:04:06 +0000999 SET_SYSCALL_RETVAL(tid, 0); /* syscall returns with success */
sewardj83798bf2002-05-24 00:11:16 +00001000 goto stage1; /* find a new thread to run */
1001 }
1002
sewardj51c0aaf2002-04-25 01:32:10 +00001003 sched_do_syscall(tid);
sewardjb3eef6b2002-05-01 00:05:27 +00001004
1005# if 0
1006 { UInt* esp; Int i;
njncf45fd42004-11-24 16:30:22 +00001007 esp=(UInt*)STACK_PTR(VG_(threads)[tid].arch);
sewardjb3eef6b2002-05-01 00:05:27 +00001008 VG_(printf)("AFTER\n");
1009 for (i = 10; i >= -10; i--)
1010 VG_(printf)("%2d %p = 0x%x\n", i, &esp[i], esp[i]);
1011 }
1012# endif
1013
sewardj77f0fc12002-07-12 01:23:03 +00001014 if (VG_(threads)[tid].status == VgTs_Runnable) {
sewardj51c0aaf2002-04-25 01:32:10 +00001015 continue; /* with this thread */
sewardj77f0fc12002-07-12 01:23:03 +00001016 } else {
1017 goto stage1;
1018 }
sewardj51c0aaf2002-04-25 01:32:10 +00001019 }
1020
sewardjd7fd4d22002-04-24 01:57:27 +00001021 /* It's an event we can't quickly deal with. Give up running
1022 this thread and handle things the expensive way. */
sewardje663cb92002-04-12 10:26:32 +00001023 break;
1024 }
1025
1026 /* ======================= Phase 3 of 3 =======================
1027 Handle non-trivial thread requests, mostly pthread stuff. */
1028
1029 /* Ok, we've fallen out of the dispatcher for a
1030 non-completely-trivial reason. First, update basic-block
1031 counters. */
1032
sewardje663cb92002-04-12 10:26:32 +00001033 if (0 && trc != VG_TRC_INNER_FASTMISS)
1034 VG_(message)(Vg_DebugMsg, "thread %d: completed %d bbs, trc %d",
1035 tid, done_this_time, (Int)trc );
1036
1037 if (0 && trc != VG_TRC_INNER_FASTMISS)
njne0205ff2003-04-08 00:56:14 +00001038 VG_(message)(Vg_DebugMsg, "thread %d: %llu bbs, event %s",
sewardje663cb92002-04-12 10:26:32 +00001039 tid, VG_(bbs_done),
1040 name_of_sched_event(trc) );
sewardj9d1b5d32002-04-17 19:40:49 +00001041
sewardje663cb92002-04-12 10:26:32 +00001042 /* Examine the thread's return code to figure out why it
sewardj124ca2a2002-06-20 10:19:38 +00001043 stopped. */
sewardje663cb92002-04-12 10:26:32 +00001044
1045 switch (trc) {
1046
fitzhardingea02f8812003-12-18 09:06:09 +00001047 case VG_TRC_EBP_JMP_YIELD:
1048 /* Explicit yield. Let a new thread be scheduled,
1049 simply by doing nothing, causing us to arrive back at
1050 Phase 1. */
fitzhardingea02f8812003-12-18 09:06:09 +00001051 break;
1052
sewardje663cb92002-04-12 10:26:32 +00001053 case VG_TRC_INNER_COUNTERZERO:
1054 /* Timeslice is out. Let a new thread be scheduled,
1055 simply by doing nothing, causing us to arrive back at
1056 Phase 1. */
sewardj8b635a42004-11-22 19:01:47 +00001057 vg_assert(VG_(dispatch_ctr) == 1);
sewardje663cb92002-04-12 10:26:32 +00001058 break;
1059
1060 case VG_TRC_UNRESUMABLE_SIGNAL:
jsgf855d93d2003-10-13 22:26:55 +00001061 /* It got a SIGSEGV/SIGBUS/SIGILL/SIGFPE, which we need to
1062 deliver right away. */
nethercotef971ab72004-08-02 16:27:40 +00001063 vg_assert(unresumable_siginfo.si_signo == VKI_SIGSEGV ||
1064 unresumable_siginfo.si_signo == VKI_SIGBUS ||
1065 unresumable_siginfo.si_signo == VKI_SIGILL ||
1066 unresumable_siginfo.si_signo == VKI_SIGFPE);
1067 vg_assert(longjmpd_on_signal == unresumable_siginfo.si_signo);
jsgf855d93d2003-10-13 22:26:55 +00001068
1069 /* make sure we've unblocked the signals which the handler blocked */
nethercote75d26242004-08-01 22:59:18 +00001070 VG_(unblock_host_signal)(longjmpd_on_signal);
jsgf855d93d2003-10-13 22:26:55 +00001071
nethercotef971ab72004-08-02 16:27:40 +00001072 VG_(deliver_signal)(tid, &unresumable_siginfo, False);
1073 unresumable_siginfo.si_signo = 0; /* done */
sewardje663cb92002-04-12 10:26:32 +00001074 break;
1075
sewardje663cb92002-04-12 10:26:32 +00001076 default:
1077 VG_(printf)("\ntrc = %d\n", trc);
njne427a662002-10-02 11:08:25 +00001078 VG_(core_panic)("VG_(scheduler), phase 3: "
1079 "unexpected thread return code");
sewardje663cb92002-04-12 10:26:32 +00001080 /* NOTREACHED */
1081 break;
1082
1083 } /* switch (trc) */
1084
1085 /* That completes Phase 3 of 3. Return now to the top of the
1086 main scheduler loop, to Phase 1 of 3. */
1087
1088 } /* top-level scheduler loop */
1089
1090
1091 /* NOTREACHED */
njne427a662002-10-02 11:08:25 +00001092 VG_(core_panic)("scheduler: post-main-loop ?!");
sewardje663cb92002-04-12 10:26:32 +00001093 /* NOTREACHED */
sewardje663cb92002-04-12 10:26:32 +00001094}
1095
nethercote238a3c32004-08-09 13:13:31 +00001096VgSchedReturnCode VG_(scheduler) ( Int* exitcode, ThreadId* last_run_tid,
1097 Int* fatal_sigNo_ptr )
1098{
1099 VgSchedReturnCode src;
1100
1101 fatal_signal_jmpbuf_ptr = &fatal_signal_jmpbuf;
1102 if (__builtin_setjmp( fatal_signal_jmpbuf_ptr ) == 0) {
1103 src = do_scheduler( exitcode, last_run_tid );
1104 } else {
1105 src = VgSrc_FatalSig;
1106 *fatal_sigNo_ptr = fatal_sigNo;
1107 }
1108 return src;
1109}
1110
jsgf855d93d2003-10-13 22:26:55 +00001111void VG_(need_resched) ( ThreadId prefer )
1112{
1113 /* Tell the scheduler now might be a good time to find a new
1114 runnable thread, because something happened which woke a thread
1115 up.
1116
1117 NB: This can be called unsynchronized from either a signal
1118 handler, or from another LWP (ie, real kernel thread).
1119
1120 In principle this could simply be a matter of setting
1121 VG_(dispatch_ctr) to a small value (say, 2), which would make
1122 any running code come back to the scheduler fairly quickly.
1123
1124 However, since the scheduler implements a strict round-robin
1125 policy with only one priority level, there are, by definition,
1126 no better threads to be running than the current thread anyway,
1127 so we may as well ignore this hint. For processes with a
1128 mixture of compute and I/O bound threads, this means the compute
1129 threads could introduce longish latencies before the I/O threads
1130 run. For programs with only I/O bound threads, need_resched
1131 won't have any effect anyway.
1132
1133 OK, so I've added command-line switches to enable low-latency
1134 syscalls and signals. The prefer_sched variable is in effect
1135 the ID of a single thread which has higher priority than all the
1136 others. If set, the scheduler will prefer to schedule that
1137 thread over all others. Naturally, this could lead to
1138 starvation or other unfairness.
1139 */
1140
1141 if (VG_(dispatch_ctr) > 10)
1142 VG_(dispatch_ctr) = 2;
1143 prefer_sched = prefer;
1144}
1145
nethercote238a3c32004-08-09 13:13:31 +00001146void VG_(scheduler_handle_fatal_signal) ( Int sigNo )
1147{
1148 if (NULL != fatal_signal_jmpbuf_ptr) {
1149 fatal_sigNo = sigNo;
1150 __builtin_longjmp(*fatal_signal_jmpbuf_ptr, 1);
1151 }
1152}
sewardje663cb92002-04-12 10:26:32 +00001153
1154/* ---------------------------------------------------------------------
1155 The pthread implementation.
1156 ------------------------------------------------------------------ */
1157
1158#include <pthread.h>
1159#include <errno.h>
1160
sewardje663cb92002-04-12 10:26:32 +00001161/* /usr/include/bits/pthreadtypes.h:
1162 typedef unsigned long int pthread_t;
1163*/
1164
sewardje663cb92002-04-12 10:26:32 +00001165
sewardj604ec3c2002-04-18 22:38:41 +00001166/* -----------------------------------------------------------
sewardj20917d82002-05-28 01:36:45 +00001167 Thread CREATION, JOINAGE and CANCELLATION: HELPER FNS
sewardj604ec3c2002-04-18 22:38:41 +00001168 -------------------------------------------------------- */
1169
sewardj20917d82002-05-28 01:36:45 +00001170/* We've decided to action a cancellation on tid. Make it jump to
1171 thread_exit_wrapper() in vg_libpthread.c, passing PTHREAD_CANCELED
1172 as the arg. */
1173static
1174void make_thread_jump_to_cancelhdlr ( ThreadId tid )
1175{
1176 Char msg_buf[100];
1177 vg_assert(VG_(is_valid_tid)(tid));
sewardjdadc8d02002-12-08 23:24:18 +00001178
sewardj20917d82002-05-28 01:36:45 +00001179 /* Push PTHREAD_CANCELED on the stack and jump to the cancellation
1180 handler -- which is really thread_exit_wrapper() in
1181 vg_libpthread.c. */
1182 vg_assert(VG_(threads)[tid].cancel_pend != NULL);
sewardj4bdd9962002-12-26 11:51:50 +00001183
nethercote6b9c8472004-09-13 13:16:40 +00001184 /* Set an argument and bogus return address. The return address will not
1185 be used, but we still need to have it so that the arg is at the
1186 correct stack offset. */
nethercote50397c22004-11-04 18:03:06 +00001187 VGA_(set_arg_and_bogus_ret)(tid, (UWord)PTHREAD_CANCELED, 0xBEADDEEF);
sewardj4bdd9962002-12-26 11:51:50 +00001188
1189 /* .cancel_pend will hold &thread_exit_wrapper */
njncf45fd42004-11-24 16:30:22 +00001190 INSTR_PTR(VG_(threads)[tid].arch) = (UWord)VG_(threads)[tid].cancel_pend;
sewardjdadc8d02002-12-08 23:24:18 +00001191
jsgf855d93d2003-10-13 22:26:55 +00001192 VG_(proxy_abort_syscall)(tid);
sewardjdadc8d02002-12-08 23:24:18 +00001193
sewardj20917d82002-05-28 01:36:45 +00001194 /* Make sure we aren't cancelled again whilst handling this
1195 cancellation. */
1196 VG_(threads)[tid].cancel_st = False;
1197 if (VG_(clo_trace_sched)) {
1198 VG_(sprintf)(msg_buf,
1199 "jump to cancellation handler (hdlr = %p)",
1200 VG_(threads)[tid].cancel_pend);
1201 print_sched_event(tid, msg_buf);
1202 }
thughes513197c2004-06-13 12:07:53 +00001203
1204 if(VG_(threads)[tid].status == VgTs_WaitCV) {
1205 /* posix says we must reaquire mutex before handling cancelation */
1206 vg_pthread_mutex_t* mx;
1207 vg_pthread_cond_t* cond;
1208
1209 mx = VG_(threads)[tid].associated_mx;
1210 cond = VG_(threads)[tid].associated_cv;
1211 VG_TRACK( pre_mutex_lock, tid, mx );
1212
1213 if (mx->__vg_m_owner == VG_INVALID_THREADID) {
1214 /* Currently unheld; hand it out to thread tid. */
1215 vg_assert(mx->__vg_m_count == 0);
1216 VG_(threads)[tid].status = VgTs_Runnable;
1217 VG_(threads)[tid].associated_cv = NULL;
1218 VG_(threads)[tid].associated_mx = NULL;
thughes10236472004-06-13 14:35:43 +00001219 mx->__vg_m_owner = (/*_pthread_descr*/void*)tid;
thughes513197c2004-06-13 12:07:53 +00001220 mx->__vg_m_count = 1;
1221 /* .m_edx already holds pth_cond_wait success value (0) */
1222
1223 VG_TRACK( post_mutex_lock, tid, mx );
1224
1225 if (VG_(clo_trace_pthread_level) >= 1) {
1226 VG_(sprintf)(msg_buf, "%s cv %p: RESUME with mx %p",
1227 "pthread_cancel", cond, mx );
1228 print_pthread_event(tid, msg_buf);
1229 }
1230
1231 } else {
1232 /* Currently held. Make thread tid be blocked on it. */
1233 vg_assert(mx->__vg_m_count > 0);
1234 VG_(threads)[tid].status = VgTs_WaitMX;
1235 VG_(threads)[tid].associated_cv = NULL;
1236 VG_(threads)[tid].associated_mx = mx;
1237 SET_PTHREQ_RETVAL(tid, 0); /* pth_cond_wait success value */
1238
1239 if (VG_(clo_trace_pthread_level) >= 1) {
1240 VG_(sprintf)(msg_buf, "%s cv %p: BLOCK for mx %p",
1241 "pthread_cancel", cond, mx );
1242 print_pthread_event(tid, msg_buf);
1243 }
1244 }
1245 } else {
1246 VG_(threads)[tid].status = VgTs_Runnable;
1247 }
sewardj20917d82002-05-28 01:36:45 +00001248}
1249
1250
1251
sewardjb48e5002002-05-13 00:16:03 +00001252/* Release resources and generally clean up once a thread has finally
nethercotef971ab72004-08-02 16:27:40 +00001253 disappeared.
1254
1255 BORKAGE/ISSUES as of 29 May 02 (moved from top of file --njn 2004-Aug-02)
1256
1257 TODO sometime:
1258 - Mutex scrubbing - clearup_after_thread_exit: look for threads
1259 blocked on mutexes held by the exiting thread, and release them
1260 appropriately. (??)
1261*/
sewardjb48e5002002-05-13 00:16:03 +00001262static
jsgf855d93d2003-10-13 22:26:55 +00001263void cleanup_after_thread_exited ( ThreadId tid, Bool forcekill )
sewardjb48e5002002-05-13 00:16:03 +00001264{
thughes3a1b8172004-09-12 22:48:59 +00001265 Segment *seg;
1266
nethercote36881a22004-08-04 14:03:16 +00001267 vg_assert(is_valid_or_empty_tid(tid));
sewardj018f7622002-05-15 21:13:39 +00001268 vg_assert(VG_(threads)[tid].status == VgTs_Empty);
thugheseb9b8fb2004-11-12 23:11:21 +00001269
njn25e49d8e72002-09-23 09:36:25 +00001270 /* Its stack is now off-limits */
thugheseb9b8fb2004-11-12 23:11:21 +00001271 if (VG_(threads)[tid].stack_base) {
1272 seg = VG_(find_segment)( VG_(threads)[tid].stack_base );
1273 VG_TRACK( die_mem_stack, seg->addr, seg->len );
1274 }
njn25e49d8e72002-09-23 09:36:25 +00001275
nethercotef9b59412004-09-10 15:33:32 +00001276 VGA_(cleanup_thread)( &VG_(threads)[tid].arch );
fitzhardinge47735af2004-01-21 01:27:27 +00001277
jsgf855d93d2003-10-13 22:26:55 +00001278 /* Not interested in the timeout anymore */
1279 VG_(threads)[tid].awaken_at = 0xFFFFFFFF;
1280
1281 /* Delete proxy LWP */
1282 VG_(proxy_delete)(tid, forcekill);
sewardjb48e5002002-05-13 00:16:03 +00001283}
1284
1285
sewardj20917d82002-05-28 01:36:45 +00001286/* Look for matching pairs of threads waiting for joiners and threads
1287 waiting for joinees. For each such pair copy the return value of
1288 the joinee into the joiner, let the joiner resume and discard the
1289 joinee. */
1290static
1291void maybe_rendezvous_joiners_and_joinees ( void )
1292{
1293 Char msg_buf[100];
1294 void** thread_return;
1295 ThreadId jnr, jee;
1296
1297 for (jnr = 1; jnr < VG_N_THREADS; jnr++) {
1298 if (VG_(threads)[jnr].status != VgTs_WaitJoinee)
1299 continue;
1300 jee = VG_(threads)[jnr].joiner_jee_tid;
1301 if (jee == VG_INVALID_THREADID)
1302 continue;
1303 vg_assert(VG_(is_valid_tid)(jee));
jsgf855d93d2003-10-13 22:26:55 +00001304 if (VG_(threads)[jee].status != VgTs_WaitJoiner) {
1305 /* if joinee has become detached, then make join fail with
1306 EINVAL */
1307 if (VG_(threads)[jee].detached) {
1308 VG_(threads)[jnr].status = VgTs_Runnable;
1309 VG_(threads)[jnr].joiner_jee_tid = VG_INVALID_THREADID;
1310 SET_PTHREQ_RETVAL(jnr, VKI_EINVAL);
1311 }
sewardj20917d82002-05-28 01:36:45 +00001312 continue;
jsgf855d93d2003-10-13 22:26:55 +00001313 }
sewardj20917d82002-05-28 01:36:45 +00001314 /* ok! jnr is waiting to join with jee, and jee is waiting to be
1315 joined by ... well, any thread. So let's do it! */
1316
1317 /* Copy return value to where joiner wants it. */
1318 thread_return = VG_(threads)[jnr].joiner_thread_return;
1319 if (thread_return != NULL) {
1320 /* CHECK thread_return writable */
njn72718642003-07-24 08:45:32 +00001321 VG_TRACK( pre_mem_write, Vg_CorePThread, jnr,
njn25e49d8e72002-09-23 09:36:25 +00001322 "pthread_join: thread_return",
1323 (Addr)thread_return, sizeof(void*));
sewardj5a3798b2002-06-04 23:24:22 +00001324
sewardj20917d82002-05-28 01:36:45 +00001325 *thread_return = VG_(threads)[jee].joinee_retval;
1326 /* Not really right, since it makes the thread's return value
1327 appear to be defined even if it isn't. */
njncf45fd42004-11-24 16:30:22 +00001328 VG_TRACK( post_mem_write, Vg_CorePThread, jnr,
1329 (Addr)thread_return, sizeof(void*) );
sewardj20917d82002-05-28 01:36:45 +00001330 }
1331
1332 /* Joinee is discarded */
1333 VG_(threads)[jee].status = VgTs_Empty; /* bye! */
jsgf855d93d2003-10-13 22:26:55 +00001334 cleanup_after_thread_exited ( jee, False );
sewardjc4a810d2002-11-13 22:25:51 +00001335 if (VG_(clo_trace_sched)) {
1336 VG_(sprintf)(msg_buf,
1337 "rendezvous with joinee %d. %d resumes, %d exits.",
1338 jee, jnr, jee );
sewardj20917d82002-05-28 01:36:45 +00001339 print_sched_event(jnr, msg_buf);
1340 }
sewardjc4a810d2002-11-13 22:25:51 +00001341
1342 VG_TRACK( post_thread_join, jnr, jee );
sewardj20917d82002-05-28 01:36:45 +00001343
1344 /* joiner returns with success */
1345 VG_(threads)[jnr].status = VgTs_Runnable;
njnd3040452003-05-19 15:04:06 +00001346 SET_PTHREQ_RETVAL(jnr, 0);
sewardj20917d82002-05-28 01:36:45 +00001347 }
1348}
1349
1350
sewardjccef2e62002-05-29 19:26:32 +00001351/* Nuke all threads other than tid. POSIX specifies that this should
1352 happen in __NR_exec, and after a __NR_fork() when I am the child,
jsgf855d93d2003-10-13 22:26:55 +00001353 as POSIX requires. Also used at process exit time with
1354 me==VG_INVALID_THREADID */
sewardjccef2e62002-05-29 19:26:32 +00001355void VG_(nuke_all_threads_except) ( ThreadId me )
1356{
1357 ThreadId tid;
1358 for (tid = 1; tid < VG_N_THREADS; tid++) {
1359 if (tid == me
jsgf855d93d2003-10-13 22:26:55 +00001360 || VG_(threads)[tid].status == VgTs_Empty)
sewardjccef2e62002-05-29 19:26:32 +00001361 continue;
sewardjef037c72002-05-30 00:40:03 +00001362 if (0)
1363 VG_(printf)(
1364 "VG_(nuke_all_threads_except): nuking tid %d\n", tid);
jsgf855d93d2003-10-13 22:26:55 +00001365 VG_(proxy_delete)(tid, True);
sewardjccef2e62002-05-29 19:26:32 +00001366 VG_(threads)[tid].status = VgTs_Empty;
thughes6d41bea2004-10-20 12:25:59 +00001367 VG_(threads)[tid].associated_mx = NULL;
1368 VG_(threads)[tid].associated_cv = NULL;
thughes168eb882004-11-13 00:39:37 +00001369 VG_(threads)[tid].stack_base = (Addr)NULL;
thugheseb9b8fb2004-11-12 23:11:21 +00001370 VG_(threads)[tid].stack_size = 0;
jsgf855d93d2003-10-13 22:26:55 +00001371 cleanup_after_thread_exited( tid, True );
sewardjccef2e62002-05-29 19:26:32 +00001372 }
1373}
1374
1375
sewardj20917d82002-05-28 01:36:45 +00001376/* -----------------------------------------------------------
1377 Thread CREATION, JOINAGE and CANCELLATION: REQUESTS
1378 -------------------------------------------------------- */
1379
sewardje663cb92002-04-12 10:26:32 +00001380static
sewardj8ad94e12002-05-29 00:10:20 +00001381void do__cleanup_push ( ThreadId tid, CleanupEntry* cu )
1382{
1383 Int sp;
1384 Char msg_buf[100];
1385 vg_assert(VG_(is_valid_tid)(tid));
1386 sp = VG_(threads)[tid].custack_used;
1387 if (VG_(clo_trace_sched)) {
thughes11975ff2004-06-12 12:58:22 +00001388 switch (cu->type) {
1389 case VgCt_Function:
1390 VG_(sprintf)(msg_buf,
1391 "cleanup_push (fn %p, arg %p) -> slot %d",
1392 cu->data.function.fn, cu->data.function.arg, sp);
1393 break;
1394 case VgCt_Longjmp:
1395 VG_(sprintf)(msg_buf,
1396 "cleanup_push (ub %p) -> slot %d",
1397 cu->data.longjmp.ub, sp);
1398 break;
1399 default:
1400 VG_(sprintf)(msg_buf,
1401 "cleanup_push (unknown type) -> slot %d",
1402 sp);
1403 break;
1404 }
sewardj8ad94e12002-05-29 00:10:20 +00001405 print_sched_event(tid, msg_buf);
1406 }
1407 vg_assert(sp >= 0 && sp <= VG_N_CLEANUPSTACK);
1408 if (sp == VG_N_CLEANUPSTACK)
njne427a662002-10-02 11:08:25 +00001409 VG_(core_panic)("do__cleanup_push: VG_N_CLEANUPSTACK is too small."
sewardj8ad94e12002-05-29 00:10:20 +00001410 " Increase and recompile.");
1411 VG_(threads)[tid].custack[sp] = *cu;
1412 sp++;
1413 VG_(threads)[tid].custack_used = sp;
njnd3040452003-05-19 15:04:06 +00001414 SET_PTHREQ_RETVAL(tid, 0);
sewardj8ad94e12002-05-29 00:10:20 +00001415}
1416
1417
1418static
1419void do__cleanup_pop ( ThreadId tid, CleanupEntry* cu )
1420{
1421 Int sp;
1422 Char msg_buf[100];
1423 vg_assert(VG_(is_valid_tid)(tid));
1424 sp = VG_(threads)[tid].custack_used;
1425 if (VG_(clo_trace_sched)) {
njn36650922002-10-04 09:18:09 +00001426 VG_(sprintf)(msg_buf, "cleanup_pop from slot %d", sp-1);
sewardj8ad94e12002-05-29 00:10:20 +00001427 print_sched_event(tid, msg_buf);
1428 }
1429 vg_assert(sp >= 0 && sp <= VG_N_CLEANUPSTACK);
1430 if (sp == 0) {
njnd3040452003-05-19 15:04:06 +00001431 SET_PTHREQ_RETVAL(tid, -1);
sewardj8ad94e12002-05-29 00:10:20 +00001432 return;
1433 }
1434 sp--;
njn72718642003-07-24 08:45:32 +00001435 VG_TRACK( pre_mem_write, Vg_CorePThread, tid,
njn36650922002-10-04 09:18:09 +00001436 "cleanup pop", (Addr)cu, sizeof(CleanupEntry) );
sewardj8ad94e12002-05-29 00:10:20 +00001437 *cu = VG_(threads)[tid].custack[sp];
njncf45fd42004-11-24 16:30:22 +00001438 VG_TRACK( post_mem_write, Vg_CorePThread, tid,
1439 (Addr)cu, sizeof(CleanupEntry) );
sewardj8ad94e12002-05-29 00:10:20 +00001440 VG_(threads)[tid].custack_used = sp;
njnd3040452003-05-19 15:04:06 +00001441 SET_PTHREQ_RETVAL(tid, 0);
sewardj8ad94e12002-05-29 00:10:20 +00001442}
1443
1444
1445static
sewardjff42d1d2002-05-22 13:17:31 +00001446void do_pthread_yield ( ThreadId tid )
1447{
1448 Char msg_buf[100];
1449 vg_assert(VG_(is_valid_tid)(tid));
sewardjff42d1d2002-05-22 13:17:31 +00001450 if (VG_(clo_trace_sched)) {
1451 VG_(sprintf)(msg_buf, "yield");
1452 print_sched_event(tid, msg_buf);
1453 }
njnd3040452003-05-19 15:04:06 +00001454 SET_PTHREQ_RETVAL(tid, 0);
sewardjff42d1d2002-05-22 13:17:31 +00001455}
1456
1457
1458static
sewardj20917d82002-05-28 01:36:45 +00001459void do__testcancel ( ThreadId tid )
sewardje663cb92002-04-12 10:26:32 +00001460{
sewardj7989d0c2002-05-28 11:00:01 +00001461 Char msg_buf[100];
sewardjb48e5002002-05-13 00:16:03 +00001462 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001463 if (VG_(clo_trace_sched)) {
1464 VG_(sprintf)(msg_buf, "testcancel");
1465 print_sched_event(tid, msg_buf);
1466 }
sewardj20917d82002-05-28 01:36:45 +00001467 if (/* is there a cancellation pending on this thread? */
1468 VG_(threads)[tid].cancel_pend != NULL
1469 && /* is this thread accepting cancellations? */
1470 VG_(threads)[tid].cancel_st) {
1471 /* Ok, let's do the cancellation. */
1472 make_thread_jump_to_cancelhdlr ( tid );
sewardje663cb92002-04-12 10:26:32 +00001473 } else {
sewardj20917d82002-05-28 01:36:45 +00001474 /* No, we keep going. */
njnd3040452003-05-19 15:04:06 +00001475 SET_PTHREQ_RETVAL(tid, 0);
sewardje663cb92002-04-12 10:26:32 +00001476 }
sewardje663cb92002-04-12 10:26:32 +00001477}
1478
1479
1480static
sewardj20917d82002-05-28 01:36:45 +00001481void do__set_cancelstate ( ThreadId tid, Int state )
1482{
1483 Bool old_st;
sewardj7989d0c2002-05-28 11:00:01 +00001484 Char msg_buf[100];
sewardj20917d82002-05-28 01:36:45 +00001485 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001486 if (VG_(clo_trace_sched)) {
1487 VG_(sprintf)(msg_buf, "set_cancelstate to %d (%s)", state,
1488 state==PTHREAD_CANCEL_ENABLE
1489 ? "ENABLE"
1490 : (state==PTHREAD_CANCEL_DISABLE ? "DISABLE" : "???"));
1491 print_sched_event(tid, msg_buf);
1492 }
sewardj20917d82002-05-28 01:36:45 +00001493 old_st = VG_(threads)[tid].cancel_st;
1494 if (state == PTHREAD_CANCEL_ENABLE) {
1495 VG_(threads)[tid].cancel_st = True;
1496 } else
1497 if (state == PTHREAD_CANCEL_DISABLE) {
1498 VG_(threads)[tid].cancel_st = False;
1499 } else {
njne427a662002-10-02 11:08:25 +00001500 VG_(core_panic)("do__set_cancelstate");
sewardj20917d82002-05-28 01:36:45 +00001501 }
njnd3040452003-05-19 15:04:06 +00001502 SET_PTHREQ_RETVAL(tid, old_st ? PTHREAD_CANCEL_ENABLE
1503 : PTHREAD_CANCEL_DISABLE);
sewardj20917d82002-05-28 01:36:45 +00001504}
1505
1506
1507static
1508void do__set_canceltype ( ThreadId tid, Int type )
1509{
1510 Bool old_ty;
sewardj7989d0c2002-05-28 11:00:01 +00001511 Char msg_buf[100];
sewardj20917d82002-05-28 01:36:45 +00001512 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001513 if (VG_(clo_trace_sched)) {
1514 VG_(sprintf)(msg_buf, "set_canceltype to %d (%s)", type,
1515 type==PTHREAD_CANCEL_ASYNCHRONOUS
1516 ? "ASYNCHRONOUS"
1517 : (type==PTHREAD_CANCEL_DEFERRED ? "DEFERRED" : "???"));
1518 print_sched_event(tid, msg_buf);
1519 }
sewardj20917d82002-05-28 01:36:45 +00001520 old_ty = VG_(threads)[tid].cancel_ty;
1521 if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
1522 VG_(threads)[tid].cancel_ty = False;
1523 } else
1524 if (type == PTHREAD_CANCEL_DEFERRED) {
sewardjaf00b6d2002-05-29 23:30:28 +00001525 VG_(threads)[tid].cancel_ty = True;
sewardj20917d82002-05-28 01:36:45 +00001526 } else {
njne427a662002-10-02 11:08:25 +00001527 VG_(core_panic)("do__set_canceltype");
sewardj20917d82002-05-28 01:36:45 +00001528 }
njnd3040452003-05-19 15:04:06 +00001529 SET_PTHREQ_RETVAL(tid, old_ty ? PTHREAD_CANCEL_DEFERRED
sewardj20917d82002-05-28 01:36:45 +00001530 : PTHREAD_CANCEL_ASYNCHRONOUS);
1531}
1532
1533
sewardj7989d0c2002-05-28 11:00:01 +00001534/* Set or get the detach state for thread det. */
sewardj20917d82002-05-28 01:36:45 +00001535static
sewardj7989d0c2002-05-28 11:00:01 +00001536void do__set_or_get_detach ( ThreadId tid,
1537 Int what, ThreadId det )
sewardj20917d82002-05-28 01:36:45 +00001538{
sewardj7989d0c2002-05-28 11:00:01 +00001539 Char msg_buf[100];
1540 /* VG_(printf)("do__set_or_get_detach tid %d what %d det %d\n",
1541 tid, what, det); */
sewardj20917d82002-05-28 01:36:45 +00001542 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001543 if (VG_(clo_trace_sched)) {
1544 VG_(sprintf)(msg_buf, "set_or_get_detach %d (%s) for tid %d", what,
1545 what==0 ? "not-detached" : (
1546 what==1 ? "detached" : (
1547 what==2 ? "fetch old value" : "???")),
1548 det );
1549 print_sched_event(tid, msg_buf);
1550 }
1551
1552 if (!VG_(is_valid_tid)(det)) {
njnd3040452003-05-19 15:04:06 +00001553 SET_PTHREQ_RETVAL(tid, -1);
sewardj7989d0c2002-05-28 11:00:01 +00001554 return;
1555 }
1556
sewardj20917d82002-05-28 01:36:45 +00001557 switch (what) {
1558 case 2: /* get */
njnd3040452003-05-19 15:04:06 +00001559 SET_PTHREQ_RETVAL(tid, VG_(threads)[det].detached ? 1 : 0);
sewardj20917d82002-05-28 01:36:45 +00001560 return;
jsgf855d93d2003-10-13 22:26:55 +00001561 case 1:
sewardj7989d0c2002-05-28 11:00:01 +00001562 VG_(threads)[det].detached = True;
njnd3040452003-05-19 15:04:06 +00001563 SET_PTHREQ_RETVAL(tid, 0);
jsgf855d93d2003-10-13 22:26:55 +00001564 /* wake anyone who was joining on us */
1565 maybe_rendezvous_joiners_and_joinees();
sewardj20917d82002-05-28 01:36:45 +00001566 return;
1567 case 0: /* set not detached */
sewardj7989d0c2002-05-28 11:00:01 +00001568 VG_(threads)[det].detached = False;
njnd3040452003-05-19 15:04:06 +00001569 SET_PTHREQ_RETVAL(tid, 0);
sewardj20917d82002-05-28 01:36:45 +00001570 return;
1571 default:
njne427a662002-10-02 11:08:25 +00001572 VG_(core_panic)("do__set_or_get_detach");
sewardj20917d82002-05-28 01:36:45 +00001573 }
1574}
1575
1576
1577static
1578void do__set_cancelpend ( ThreadId tid,
1579 ThreadId cee,
1580 void (*cancelpend_hdlr)(void*) )
sewardje663cb92002-04-12 10:26:32 +00001581{
1582 Char msg_buf[100];
1583
sewardj20917d82002-05-28 01:36:45 +00001584 vg_assert(VG_(is_valid_tid)(tid));
1585 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1586
thughes97e54d22004-08-15 14:34:02 +00001587 if (!VG_(is_valid_tid)(cee) ||
1588 VG_(threads)[cee].status == VgTs_WaitJoiner) {
sewardj7989d0c2002-05-28 11:00:01 +00001589 if (VG_(clo_trace_sched)) {
1590 VG_(sprintf)(msg_buf,
1591 "set_cancelpend for invalid tid %d", cee);
1592 print_sched_event(tid, msg_buf);
1593 }
njn25e49d8e72002-09-23 09:36:25 +00001594 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00001595 "pthread_cancel: target thread does not exist, or invalid");
jsgf855d93d2003-10-13 22:26:55 +00001596 SET_PTHREQ_RETVAL(tid, VKI_ESRCH);
sewardj7989d0c2002-05-28 11:00:01 +00001597 return;
1598 }
sewardj20917d82002-05-28 01:36:45 +00001599
1600 VG_(threads)[cee].cancel_pend = cancelpend_hdlr;
1601
thughes31c1aae2004-10-28 15:56:55 +00001602 /* interrupt a pending syscall if asynchronous cancellation
1603 is enabled for the target thread */
1604 if (VG_(threads)[cee].cancel_st && !VG_(threads)[cee].cancel_ty) {
1605 VG_(proxy_abort_syscall)(cee);
1606 }
jsgf855d93d2003-10-13 22:26:55 +00001607
sewardj20917d82002-05-28 01:36:45 +00001608 if (VG_(clo_trace_sched)) {
1609 VG_(sprintf)(msg_buf,
sewardj7989d0c2002-05-28 11:00:01 +00001610 "set_cancelpend (hdlr = %p, set by tid %d)",
sewardj20917d82002-05-28 01:36:45 +00001611 cancelpend_hdlr, tid);
1612 print_sched_event(cee, msg_buf);
1613 }
1614
1615 /* Thread doing the cancelling returns with success. */
njnd3040452003-05-19 15:04:06 +00001616 SET_PTHREQ_RETVAL(tid, 0);
sewardj20917d82002-05-28 01:36:45 +00001617
1618 /* Perhaps we can nuke the cancellee right now? */
thughes513197c2004-06-13 12:07:53 +00001619 if (!VG_(threads)[cee].cancel_ty || /* if PTHREAD_CANCEL_ASYNCHRONOUS */
1620 (VG_(threads)[cee].status != VgTs_Runnable &&
1621 VG_(threads)[cee].status != VgTs_WaitMX)) {
jsgf855d93d2003-10-13 22:26:55 +00001622 do__testcancel(cee);
thughes513197c2004-06-13 12:07:53 +00001623 }
sewardj20917d82002-05-28 01:36:45 +00001624}
1625
1626
1627static
1628void do_pthread_join ( ThreadId tid,
1629 ThreadId jee, void** thread_return )
1630{
1631 Char msg_buf[100];
1632 ThreadId i;
sewardje663cb92002-04-12 10:26:32 +00001633 /* jee, the joinee, is the thread specified as an arg in thread
1634 tid's call to pthread_join. So tid is the join-er. */
sewardjb48e5002002-05-13 00:16:03 +00001635 vg_assert(VG_(is_valid_tid)(tid));
sewardj018f7622002-05-15 21:13:39 +00001636 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +00001637
1638 if (jee == tid) {
njn25e49d8e72002-09-23 09:36:25 +00001639 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00001640 "pthread_join: attempt to join to self");
njnd3040452003-05-19 15:04:06 +00001641 SET_PTHREQ_RETVAL(tid, EDEADLK); /* libc constant, not a kernel one */
jsgf855d93d2003-10-13 22:26:55 +00001642 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +00001643 return;
1644 }
1645
sewardj20917d82002-05-28 01:36:45 +00001646 /* Flush any completed pairs, so as to make sure what we're looking
1647 at is up-to-date. */
1648 maybe_rendezvous_joiners_and_joinees();
1649
1650 /* Is this a sane request? */
jsgf855d93d2003-10-13 22:26:55 +00001651 if ( ! VG_(is_valid_tid)(jee) ||
1652 VG_(threads)[jee].detached) {
sewardje663cb92002-04-12 10:26:32 +00001653 /* Invalid thread to join to. */
njn25e49d8e72002-09-23 09:36:25 +00001654 VG_(record_pthread_error)( tid,
jsgf855d93d2003-10-13 22:26:55 +00001655 "pthread_join: target thread does not exist, invalid, or detached");
1656 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardje663cb92002-04-12 10:26:32 +00001657 return;
1658 }
1659
sewardj20917d82002-05-28 01:36:45 +00001660 /* Is anyone else already in a join-wait for jee? */
1661 for (i = 1; i < VG_N_THREADS; i++) {
1662 if (i == tid) continue;
1663 if (VG_(threads)[i].status == VgTs_WaitJoinee
1664 && VG_(threads)[i].joiner_jee_tid == jee) {
1665 /* Someone already did join on this thread */
njn25e49d8e72002-09-23 09:36:25 +00001666 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00001667 "pthread_join: another thread already "
1668 "in join-wait for target thread");
jsgf855d93d2003-10-13 22:26:55 +00001669 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
1670 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardj20917d82002-05-28 01:36:45 +00001671 return;
1672 }
sewardje663cb92002-04-12 10:26:32 +00001673 }
1674
thughes513197c2004-06-13 12:07:53 +00001675 if(VG_(threads)[tid].cancel_pend != NULL &&
1676 VG_(threads)[tid].cancel_st) {
1677 make_thread_jump_to_cancelhdlr ( tid );
1678 } else {
1679 /* Mark this thread as waiting for the joinee. */
1680 VG_(threads)[tid].status = VgTs_WaitJoinee;
1681 VG_(threads)[tid].joiner_thread_return = thread_return;
1682 VG_(threads)[tid].joiner_jee_tid = jee;
1683
1684 /* Look for matching joiners and joinees and do the right thing. */
1685 maybe_rendezvous_joiners_and_joinees();
1686
1687 /* Return value is irrelevant since this this thread becomes
1688 non-runnable. maybe_resume_joiner() will cause it to return the
1689 right value when it resumes. */
1690
1691 if (VG_(clo_trace_sched)) {
1692 VG_(sprintf)(msg_buf,
1693 "wait for joinee %d (may already be ready)", jee);
1694 print_sched_event(tid, msg_buf);
1695 }
sewardje663cb92002-04-12 10:26:32 +00001696 }
sewardje663cb92002-04-12 10:26:32 +00001697}
1698
1699
sewardj20917d82002-05-28 01:36:45 +00001700/* ( void* ): calling thread waits for joiner and returns the void* to
1701 it. This is one of two ways in which a thread can finally exit --
1702 the other is do__quit. */
sewardje663cb92002-04-12 10:26:32 +00001703static
sewardj20917d82002-05-28 01:36:45 +00001704void do__wait_joiner ( ThreadId tid, void* retval )
sewardje663cb92002-04-12 10:26:32 +00001705{
sewardj20917d82002-05-28 01:36:45 +00001706 Char msg_buf[100];
1707 vg_assert(VG_(is_valid_tid)(tid));
1708 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1709 if (VG_(clo_trace_sched)) {
1710 VG_(sprintf)(msg_buf,
sewardj7989d0c2002-05-28 11:00:01 +00001711 "do__wait_joiner(retval = %p) (non-detached thread exit)", retval);
sewardj20917d82002-05-28 01:36:45 +00001712 print_sched_event(tid, msg_buf);
1713 }
1714 VG_(threads)[tid].status = VgTs_WaitJoiner;
1715 VG_(threads)[tid].joinee_retval = retval;
1716 maybe_rendezvous_joiners_and_joinees();
1717}
1718
1719
1720/* ( no-args ): calling thread disappears from the system forever.
1721 Reclaim resources. */
1722static
1723void do__quit ( ThreadId tid )
1724{
1725 Char msg_buf[100];
1726 vg_assert(VG_(is_valid_tid)(tid));
1727 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1728 VG_(threads)[tid].status = VgTs_Empty; /* bye! */
jsgf855d93d2003-10-13 22:26:55 +00001729 cleanup_after_thread_exited ( tid, False );
sewardj20917d82002-05-28 01:36:45 +00001730 if (VG_(clo_trace_sched)) {
sewardj7989d0c2002-05-28 11:00:01 +00001731 VG_(sprintf)(msg_buf, "do__quit (detached thread exit)");
sewardj20917d82002-05-28 01:36:45 +00001732 print_sched_event(tid, msg_buf);
1733 }
jsgf855d93d2003-10-13 22:26:55 +00001734 maybe_rendezvous_joiners_and_joinees();
sewardj20917d82002-05-28 01:36:45 +00001735 /* Return value is irrelevant; this thread will not get
1736 rescheduled. */
1737}
1738
1739
nethercote6b9c8472004-09-13 13:16:40 +00001740/* Should never be entered. If it is, will be on the simulated CPU. */
sewardj20917d82002-05-28 01:36:45 +00001741static
1742void do__apply_in_new_thread_bogusRA ( void )
1743{
njne427a662002-10-02 11:08:25 +00001744 VG_(core_panic)("do__apply_in_new_thread_bogusRA");
sewardj20917d82002-05-28 01:36:45 +00001745}
1746
1747/* (Fn, Arg): Create a new thread and run Fn applied to Arg in it. Fn
1748 MUST NOT return -- ever. Eventually it will do either __QUIT or
1749 __WAIT_JOINER. Return the child tid to the parent. */
1750static
1751void do__apply_in_new_thread ( ThreadId parent_tid,
1752 void* (*fn)(void *),
thughesdaa34562004-06-27 12:48:53 +00001753 void* arg,
1754 StackInfo *si )
sewardj20917d82002-05-28 01:36:45 +00001755{
sewardje663cb92002-04-12 10:26:32 +00001756 Addr new_stack;
1757 UInt new_stk_szb;
1758 ThreadId tid;
1759 Char msg_buf[100];
1760
1761 /* Paranoia ... */
1762 vg_assert(sizeof(pthread_t) == sizeof(UInt));
1763
sewardj018f7622002-05-15 21:13:39 +00001764 vg_assert(VG_(threads)[parent_tid].status != VgTs_Empty);
sewardje663cb92002-04-12 10:26:32 +00001765
sewardj1e8cdc92002-04-18 11:37:52 +00001766 tid = vg_alloc_ThreadState();
sewardje663cb92002-04-12 10:26:32 +00001767
1768 /* If we've created the main thread's tid, we're in deep trouble :) */
sewardj6072c362002-04-19 14:40:57 +00001769 vg_assert(tid != 1);
nethercote36881a22004-08-04 14:03:16 +00001770 vg_assert(is_valid_or_empty_tid(tid));
sewardje663cb92002-04-12 10:26:32 +00001771
sewardjc4a810d2002-11-13 22:25:51 +00001772 /* do this early, before the child gets any memory writes */
1773 VG_TRACK ( post_thread_create, parent_tid, tid );
1774
sewardjf6374322002-11-13 22:35:55 +00001775 /* Create new thread with default attrs:
1776 deferred cancellation, not detached
1777 */
1778 mostly_clear_thread_record(tid);
1779 VG_(threads)[tid].status = VgTs_Runnable;
1780
sewardj2a99cf62004-11-24 10:44:19 +00001781 /* Copy the parent's CPU state into the child's. */
nethercotef9b59412004-09-10 15:33:32 +00001782 VGA_(setup_child)( &VG_(threads)[tid].arch,
1783 &VG_(threads)[parent_tid].arch );
sewardje663cb92002-04-12 10:26:32 +00001784
1785 /* Consider allocating the child a stack, if the one it already has
1786 is inadequate. */
thughes5e5e2132004-11-16 19:40:05 +00001787 new_stk_szb = PGROUNDUP(si->size + VG_AR_CLIENT_STACKBASE_REDZONE_SZB + si->guardsize);
1788
thughesdaa34562004-06-27 12:48:53 +00001789 VG_(threads)[tid].stack_guard_size = si->guardsize;
sewardje663cb92002-04-12 10:26:32 +00001790
sewardj018f7622002-05-15 21:13:39 +00001791 if (new_stk_szb > VG_(threads)[tid].stack_size) {
sewardje663cb92002-04-12 10:26:32 +00001792 /* Again, for good measure :) We definitely don't want to be
1793 allocating a stack for the main thread. */
sewardj6072c362002-04-19 14:40:57 +00001794 vg_assert(tid != 1);
thughesdaa34562004-06-27 12:48:53 +00001795 if (VG_(threads)[tid].stack_size > 0)
1796 VG_(client_free)(VG_(threads)[tid].stack_base);
fitzhardinge98abfc72003-12-16 02:05:15 +00001797 new_stack = VG_(client_alloc)(0, new_stk_szb,
nethercotee567e702004-07-10 17:49:17 +00001798 VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC,
fitzhardinge98abfc72003-12-16 02:05:15 +00001799 SF_STACK);
nethercote8e9eab02004-07-11 18:01:06 +00001800 // Given the low number of threads Valgrind can handle, stack
1801 // allocation should pretty much always succeed, so having an
1802 // assertion here isn't too bad. However, probably better would be
1803 // this:
1804 //
1805 // if (0 == new_stack)
1806 // SET_PTHREQ_RETVAL(parent_tid, -VKI_EAGAIN);
1807 //
nethercotee567e702004-07-10 17:49:17 +00001808 vg_assert(0 != new_stack);
sewardj018f7622002-05-15 21:13:39 +00001809 VG_(threads)[tid].stack_base = new_stack;
1810 VG_(threads)[tid].stack_size = new_stk_szb;
1811 VG_(threads)[tid].stack_highest_word
sewardje663cb92002-04-12 10:26:32 +00001812 = new_stack + new_stk_szb
sewardj1e8cdc92002-04-18 11:37:52 +00001813 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB; /* -4 ??? */;
sewardje663cb92002-04-12 10:26:32 +00001814 }
sewardj1e8cdc92002-04-18 11:37:52 +00001815
njn25e49d8e72002-09-23 09:36:25 +00001816 /* Having got memory to hold the thread's stack:
1817 - set %esp as base + size
1818 - mark everything below %esp inaccessible
1819 - mark redzone at stack end inaccessible
1820 */
njnd3040452003-05-19 15:04:06 +00001821 SET_PTHREQ_ESP(tid, VG_(threads)[tid].stack_base
1822 + VG_(threads)[tid].stack_size
1823 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB);
sewardj1e8cdc92002-04-18 11:37:52 +00001824
njn25e49d8e72002-09-23 09:36:25 +00001825 VG_TRACK ( die_mem_stack, VG_(threads)[tid].stack_base,
thughesdaa34562004-06-27 12:48:53 +00001826 VG_(threads)[tid].stack_size
1827 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB);
njncf45fd42004-11-24 16:30:22 +00001828 VG_TRACK ( ban_mem_stack, STACK_PTR(VG_(threads)[tid].arch),
njn25e49d8e72002-09-23 09:36:25 +00001829 VG_AR_CLIENT_STACKBASE_REDZONE_SZB );
sewardje663cb92002-04-12 10:26:32 +00001830
nethercote6b9c8472004-09-13 13:16:40 +00001831 VGA_(thread_initial_stack)(tid, (UWord)arg,
1832 (Addr)&do__apply_in_new_thread_bogusRA);
sewardje663cb92002-04-12 10:26:32 +00001833
1834 /* this is where we start */
njncf45fd42004-11-24 16:30:22 +00001835 INSTR_PTR(VG_(threads)[tid].arch) = (UWord)fn;
sewardje663cb92002-04-12 10:26:32 +00001836
sewardj8937c812002-04-12 20:12:20 +00001837 if (VG_(clo_trace_sched)) {
njn25e49d8e72002-09-23 09:36:25 +00001838 VG_(sprintf)(msg_buf, "new thread, created by %d", parent_tid );
sewardje663cb92002-04-12 10:26:32 +00001839 print_sched_event(tid, msg_buf);
1840 }
1841
fitzhardingef7866182004-03-16 22:09:12 +00001842 /* Start the thread with all signals blocked; it's up to the client
1843 code to set the right signal mask when it's ready. */
nethercote73b526f2004-10-31 18:48:21 +00001844 VG_(sigfillset)(&VG_(threads)[tid].sig_mask);
jsgf855d93d2003-10-13 22:26:55 +00001845
1846 /* Now that the signal mask is set up, create a proxy LWP for this thread */
1847 VG_(proxy_create)(tid);
1848
1849 /* Set the proxy's signal mask */
1850 VG_(proxy_setsigmask)(tid);
sewardjb48e5002002-05-13 00:16:03 +00001851
sewardj20917d82002-05-28 01:36:45 +00001852 /* return child's tid to parent */
njnd3040452003-05-19 15:04:06 +00001853 SET_PTHREQ_RETVAL(parent_tid, tid); /* success */
sewardje663cb92002-04-12 10:26:32 +00001854}
1855
1856
sewardj604ec3c2002-04-18 22:38:41 +00001857/* -----------------------------------------------------------
1858 MUTEXes
1859 -------------------------------------------------------- */
1860
rjwalsh7109a8c2004-09-02 00:31:02 +00001861/* vg_pthread_mutex_t is defined in core.h.
sewardj604ec3c2002-04-18 22:38:41 +00001862
nethercote1f0173b2004-02-28 15:40:36 +00001863 The initializers zero everything, except possibly the fourth word,
1864 which in vg_pthread_mutex_t is the __vg_m_kind field. It gets set to one
1865 of PTHREAD_MUTEX_{TIMED,RECURSIVE,ERRORCHECK,ADAPTIVE}_NP
sewardj604ec3c2002-04-18 22:38:41 +00001866
sewardj6072c362002-04-19 14:40:57 +00001867 How we use it:
sewardj604ec3c2002-04-18 22:38:41 +00001868
nethercote1f0173b2004-02-28 15:40:36 +00001869 __vg_m_kind never changes and indicates whether or not it is recursive.
sewardj6072c362002-04-19 14:40:57 +00001870
nethercote1f0173b2004-02-28 15:40:36 +00001871 __vg_m_count indicates the lock count; if 0, the mutex is not owned by
sewardj6072c362002-04-19 14:40:57 +00001872 anybody.
1873
nethercote1f0173b2004-02-28 15:40:36 +00001874 __vg_m_owner has a ThreadId value stuffed into it. We carefully arrange
sewardj6072c362002-04-19 14:40:57 +00001875 that ThreadId == 0 is invalid (VG_INVALID_THREADID), so that
1876 statically initialised mutexes correctly appear
1877 to belong to nobody.
1878
nethercote1f0173b2004-02-28 15:40:36 +00001879 In summary, a not-in-use mutex is distinguised by having __vg_m_owner
1880 == 0 (VG_INVALID_THREADID) and __vg_m_count == 0 too. If one of those
sewardj6072c362002-04-19 14:40:57 +00001881 conditions holds, the other should too.
1882
1883 There is no linked list of threads waiting for this mutex. Instead
1884 a thread in WaitMX state points at the mutex with its waited_on_mx
1885 field. This makes _unlock() inefficient, but simple to implement the
1886 right semantics viz-a-viz signals.
sewardje663cb92002-04-12 10:26:32 +00001887
sewardj604ec3c2002-04-18 22:38:41 +00001888 We don't have to deal with mutex initialisation; the client side
sewardj6072c362002-04-19 14:40:57 +00001889 deals with that for us.
1890*/
sewardje663cb92002-04-12 10:26:32 +00001891
sewardj3b5d8862002-04-20 13:53:23 +00001892/* Helper fns ... */
thughese321d492004-10-17 15:00:20 +00001893static
1894void do_pthread_mutex_timedlock_TIMEOUT ( ThreadId tid )
1895{
1896 Char msg_buf[100];
1897 vg_pthread_mutex_t* mx;
1898
1899 vg_assert(VG_(is_valid_tid)(tid)
1900 && VG_(threads)[tid].status == VgTs_WaitMX
1901 && VG_(threads)[tid].awaken_at != 0xFFFFFFFF);
1902 mx = VG_(threads)[tid].associated_mx;
1903 vg_assert(mx != NULL);
1904
1905 VG_(threads)[tid].status = VgTs_Runnable;
1906 SET_PTHREQ_RETVAL(tid, ETIMEDOUT); /* pthread_mutex_lock return value */
1907 VG_(threads)[tid].associated_mx = NULL;
1908
1909 if (VG_(clo_trace_pthread_level) >= 1) {
1910 VG_(sprintf)(msg_buf, "pthread_mutex_timedlock mx %p: TIMEOUT", mx);
1911 print_pthread_event(tid, msg_buf);
1912 }
1913}
1914
1915
sewardj3b5d8862002-04-20 13:53:23 +00001916static
nethercote1f0173b2004-02-28 15:40:36 +00001917void release_one_thread_waiting_on_mutex ( vg_pthread_mutex_t* mutex,
sewardj3b5d8862002-04-20 13:53:23 +00001918 Char* caller )
1919{
1920 Int i;
1921 Char msg_buf[100];
1922
1923 /* Find some arbitrary thread waiting on this mutex, and make it
1924 runnable. If none are waiting, mark the mutex as not held. */
1925 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +00001926 if (VG_(threads)[i].status == VgTs_Empty)
sewardj3b5d8862002-04-20 13:53:23 +00001927 continue;
sewardj018f7622002-05-15 21:13:39 +00001928 if (VG_(threads)[i].status == VgTs_WaitMX
1929 && VG_(threads)[i].associated_mx == mutex)
sewardj3b5d8862002-04-20 13:53:23 +00001930 break;
1931 }
1932
nethercote1f0173b2004-02-28 15:40:36 +00001933 VG_TRACK( post_mutex_unlock, (ThreadId)mutex->__vg_m_owner, mutex );
sewardj0af43bc2002-10-22 04:30:35 +00001934
sewardj3b5d8862002-04-20 13:53:23 +00001935 vg_assert(i <= VG_N_THREADS);
1936 if (i == VG_N_THREADS) {
1937 /* Nobody else is waiting on it. */
nethercote1f0173b2004-02-28 15:40:36 +00001938 mutex->__vg_m_count = 0;
1939 mutex->__vg_m_owner = VG_INVALID_THREADID;
sewardj3b5d8862002-04-20 13:53:23 +00001940 } else {
1941 /* Notionally transfer the hold to thread i, whose
1942 pthread_mutex_lock() call now returns with 0 (success). */
1943 /* The .count is already == 1. */
sewardj018f7622002-05-15 21:13:39 +00001944 vg_assert(VG_(threads)[i].associated_mx == mutex);
nethercote1f0173b2004-02-28 15:40:36 +00001945 mutex->__vg_m_owner = (/*_pthread_descr*/void*)i;
sewardj018f7622002-05-15 21:13:39 +00001946 VG_(threads)[i].status = VgTs_Runnable;
1947 VG_(threads)[i].associated_mx = NULL;
sewardj5f07b662002-04-23 16:52:51 +00001948 /* m_edx already holds pth_mx_lock() success (0) */
sewardj3b5d8862002-04-20 13:53:23 +00001949
sewardj0af43bc2002-10-22 04:30:35 +00001950 VG_TRACK( post_mutex_lock, (ThreadId)i, mutex);
1951
sewardj3b5d8862002-04-20 13:53:23 +00001952 if (VG_(clo_trace_pthread_level) >= 1) {
1953 VG_(sprintf)(msg_buf, "%s mx %p: RESUME",
1954 caller, mutex );
1955 print_pthread_event(i, msg_buf);
1956 }
1957 }
1958}
1959
sewardje663cb92002-04-12 10:26:32 +00001960
1961static
sewardj30671ff2002-04-21 00:13:57 +00001962void do_pthread_mutex_lock( ThreadId tid,
1963 Bool is_trylock,
thughese321d492004-10-17 15:00:20 +00001964 vg_pthread_mutex_t* mutex,
1965 UInt ms_end )
sewardje663cb92002-04-12 10:26:32 +00001966{
sewardj30671ff2002-04-21 00:13:57 +00001967 Char msg_buf[100];
1968 Char* caller
sewardj8ccc2be2002-05-10 20:26:37 +00001969 = is_trylock ? "pthread_mutex_trylock"
1970 : "pthread_mutex_lock ";
sewardje663cb92002-04-12 10:26:32 +00001971
thughese321d492004-10-17 15:00:20 +00001972 /* If ms_end == 0xFFFFFFFF, wait forever (no timeout). Otherwise,
1973 ms_end is the ending millisecond. */
1974
sewardj604ec3c2002-04-18 22:38:41 +00001975 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj30671ff2002-04-21 00:13:57 +00001976 VG_(sprintf)(msg_buf, "%s mx %p ...", caller, mutex );
sewardj604ec3c2002-04-18 22:38:41 +00001977 print_pthread_event(tid, msg_buf);
1978 }
1979
1980 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00001981 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00001982 && VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +00001983
1984 /* POSIX doesn't mandate this, but for sanity ... */
1985 if (mutex == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00001986 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00001987 "pthread_mutex_lock/trylock: mutex is NULL");
jsgf855d93d2003-10-13 22:26:55 +00001988 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardje663cb92002-04-12 10:26:32 +00001989 return;
1990 }
1991
sewardj604ec3c2002-04-18 22:38:41 +00001992 /* More paranoia ... */
nethercote1f0173b2004-02-28 15:40:36 +00001993 switch (mutex->__vg_m_kind) {
sewardj2a1dcce2002-04-22 12:45:25 +00001994# ifndef GLIBC_2_1
sewardj604ec3c2002-04-18 22:38:41 +00001995 case PTHREAD_MUTEX_TIMED_NP:
sewardj2a1dcce2002-04-22 12:45:25 +00001996 case PTHREAD_MUTEX_ADAPTIVE_NP:
1997# endif
sewardja1679dd2002-05-10 22:31:40 +00001998# ifdef GLIBC_2_1
sewardj8e651d72002-05-10 21:00:19 +00001999 case PTHREAD_MUTEX_FAST_NP:
sewardja1679dd2002-05-10 22:31:40 +00002000# endif
sewardj604ec3c2002-04-18 22:38:41 +00002001 case PTHREAD_MUTEX_RECURSIVE_NP:
2002 case PTHREAD_MUTEX_ERRORCHECK_NP:
nethercote1f0173b2004-02-28 15:40:36 +00002003 if (mutex->__vg_m_count >= 0) break;
sewardj604ec3c2002-04-18 22:38:41 +00002004 /* else fall thru */
2005 default:
njn25e49d8e72002-09-23 09:36:25 +00002006 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002007 "pthread_mutex_lock/trylock: mutex is invalid");
jsgf855d93d2003-10-13 22:26:55 +00002008 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj604ec3c2002-04-18 22:38:41 +00002009 return;
sewardje663cb92002-04-12 10:26:32 +00002010 }
2011
nethercote1f0173b2004-02-28 15:40:36 +00002012 if (mutex->__vg_m_count > 0) {
2013 if (!VG_(is_valid_tid)((ThreadId)mutex->__vg_m_owner)) {
fitzhardinge47735af2004-01-21 01:27:27 +00002014 VG_(record_pthread_error)( tid,
2015 "pthread_mutex_lock/trylock: mutex has invalid owner");
2016 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
2017 return;
2018 }
sewardjf8f819e2002-04-17 23:21:37 +00002019
2020 /* Someone has it already. */
thughese321d492004-10-17 15:00:20 +00002021 if ((ThreadId)mutex->__vg_m_owner == tid && ms_end == 0xFFFFFFFF) {
sewardjf8f819e2002-04-17 23:21:37 +00002022 /* It's locked -- by me! */
nethercote1f0173b2004-02-28 15:40:36 +00002023 if (mutex->__vg_m_kind == PTHREAD_MUTEX_RECURSIVE_NP) {
sewardjf8f819e2002-04-17 23:21:37 +00002024 /* return 0 (success). */
nethercote1f0173b2004-02-28 15:40:36 +00002025 mutex->__vg_m_count++;
njnd3040452003-05-19 15:04:06 +00002026 SET_PTHREQ_RETVAL(tid, 0);
sewardj853f55d2002-04-26 00:27:53 +00002027 if (0)
2028 VG_(printf)("!!!!!! tid %d, mx %p -> locked %d\n",
nethercote1f0173b2004-02-28 15:40:36 +00002029 tid, mutex, mutex->__vg_m_count);
sewardjf8f819e2002-04-17 23:21:37 +00002030 return;
2031 } else {
sewardj30671ff2002-04-21 00:13:57 +00002032 if (is_trylock)
njnd3040452003-05-19 15:04:06 +00002033 SET_PTHREQ_RETVAL(tid, EBUSY);
sewardj30671ff2002-04-21 00:13:57 +00002034 else
njnd3040452003-05-19 15:04:06 +00002035 SET_PTHREQ_RETVAL(tid, EDEADLK);
sewardjf8f819e2002-04-17 23:21:37 +00002036 return;
2037 }
2038 } else {
sewardj6072c362002-04-19 14:40:57 +00002039 /* Someone else has it; we have to wait. Mark ourselves
2040 thusly. */
nethercote1f0173b2004-02-28 15:40:36 +00002041 /* GUARD: __vg_m_count > 0 && __vg_m_owner is valid */
sewardj30671ff2002-04-21 00:13:57 +00002042 if (is_trylock) {
2043 /* caller is polling; so return immediately. */
njnd3040452003-05-19 15:04:06 +00002044 SET_PTHREQ_RETVAL(tid, EBUSY);
sewardj30671ff2002-04-21 00:13:57 +00002045 } else {
sewardjdca84112002-11-13 22:29:34 +00002046 VG_TRACK ( pre_mutex_lock, tid, mutex );
2047
sewardj018f7622002-05-15 21:13:39 +00002048 VG_(threads)[tid].status = VgTs_WaitMX;
2049 VG_(threads)[tid].associated_mx = mutex;
thughese321d492004-10-17 15:00:20 +00002050 VG_(threads)[tid].awaken_at = ms_end;
2051 if (ms_end != 0xFFFFFFFF)
2052 add_timeout(tid, ms_end);
njnd3040452003-05-19 15:04:06 +00002053 SET_PTHREQ_RETVAL(tid, 0); /* pth_mx_lock success value */
sewardj30671ff2002-04-21 00:13:57 +00002054 if (VG_(clo_trace_pthread_level) >= 1) {
2055 VG_(sprintf)(msg_buf, "%s mx %p: BLOCK",
2056 caller, mutex );
2057 print_pthread_event(tid, msg_buf);
2058 }
2059 }
sewardje663cb92002-04-12 10:26:32 +00002060 return;
2061 }
sewardjf8f819e2002-04-17 23:21:37 +00002062
sewardje663cb92002-04-12 10:26:32 +00002063 } else {
sewardj6072c362002-04-19 14:40:57 +00002064 /* Nobody owns it. Sanity check ... */
nethercote1f0173b2004-02-28 15:40:36 +00002065 vg_assert(mutex->__vg_m_owner == VG_INVALID_THREADID);
sewardjdca84112002-11-13 22:29:34 +00002066
2067 VG_TRACK ( pre_mutex_lock, tid, mutex );
2068
sewardjf8f819e2002-04-17 23:21:37 +00002069 /* We get it! [for the first time]. */
nethercote1f0173b2004-02-28 15:40:36 +00002070 mutex->__vg_m_count = 1;
2071 mutex->__vg_m_owner = (/*_pthread_descr*/void*)tid;
njn25e49d8e72002-09-23 09:36:25 +00002072
sewardje663cb92002-04-12 10:26:32 +00002073 /* return 0 (success). */
njnd3040452003-05-19 15:04:06 +00002074 SET_PTHREQ_RETVAL(tid, 0);
sewardjf8f819e2002-04-17 23:21:37 +00002075
njnd3040452003-05-19 15:04:06 +00002076 VG_TRACK( post_mutex_lock, tid, mutex);
2077 }
sewardje663cb92002-04-12 10:26:32 +00002078}
2079
2080
2081static
2082void do_pthread_mutex_unlock ( ThreadId tid,
nethercote1f0173b2004-02-28 15:40:36 +00002083 vg_pthread_mutex_t* mutex )
sewardje663cb92002-04-12 10:26:32 +00002084{
sewardj3b5d8862002-04-20 13:53:23 +00002085 Char msg_buf[100];
sewardje663cb92002-04-12 10:26:32 +00002086
sewardj45b4b372002-04-16 22:50:32 +00002087 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj3b5d8862002-04-20 13:53:23 +00002088 VG_(sprintf)(msg_buf, "pthread_mutex_unlock mx %p ...", mutex );
sewardj8937c812002-04-12 20:12:20 +00002089 print_pthread_event(tid, msg_buf);
2090 }
2091
sewardj604ec3c2002-04-18 22:38:41 +00002092 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00002093 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002094 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj604ec3c2002-04-18 22:38:41 +00002095
2096 if (mutex == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00002097 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002098 "pthread_mutex_unlock: mutex is NULL");
jsgf855d93d2003-10-13 22:26:55 +00002099 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj604ec3c2002-04-18 22:38:41 +00002100 return;
2101 }
2102
2103 /* More paranoia ... */
nethercote1f0173b2004-02-28 15:40:36 +00002104 switch (mutex->__vg_m_kind) {
sewardj2a1dcce2002-04-22 12:45:25 +00002105# ifndef GLIBC_2_1
sewardj604ec3c2002-04-18 22:38:41 +00002106 case PTHREAD_MUTEX_TIMED_NP:
sewardj2a1dcce2002-04-22 12:45:25 +00002107 case PTHREAD_MUTEX_ADAPTIVE_NP:
2108# endif
sewardja1679dd2002-05-10 22:31:40 +00002109# ifdef GLIBC_2_1
sewardj8e651d72002-05-10 21:00:19 +00002110 case PTHREAD_MUTEX_FAST_NP:
sewardja1679dd2002-05-10 22:31:40 +00002111# endif
sewardj604ec3c2002-04-18 22:38:41 +00002112 case PTHREAD_MUTEX_RECURSIVE_NP:
2113 case PTHREAD_MUTEX_ERRORCHECK_NP:
nethercote1f0173b2004-02-28 15:40:36 +00002114 if (mutex->__vg_m_count >= 0) break;
sewardj604ec3c2002-04-18 22:38:41 +00002115 /* else fall thru */
2116 default:
njn25e49d8e72002-09-23 09:36:25 +00002117 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002118 "pthread_mutex_unlock: mutex is invalid");
jsgf855d93d2003-10-13 22:26:55 +00002119 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj604ec3c2002-04-18 22:38:41 +00002120 return;
2121 }
sewardje663cb92002-04-12 10:26:32 +00002122
2123 /* Barf if we don't currently hold the mutex. */
nethercote1f0173b2004-02-28 15:40:36 +00002124 if (mutex->__vg_m_count == 0) {
sewardj4dced352002-06-04 22:54:20 +00002125 /* nobody holds it */
njn25e49d8e72002-09-23 09:36:25 +00002126 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002127 "pthread_mutex_unlock: mutex is not locked");
njnd3040452003-05-19 15:04:06 +00002128 SET_PTHREQ_RETVAL(tid, EPERM);
sewardj4dced352002-06-04 22:54:20 +00002129 return;
2130 }
2131
nethercote1f0173b2004-02-28 15:40:36 +00002132 if ((ThreadId)mutex->__vg_m_owner != tid) {
sewardj4dced352002-06-04 22:54:20 +00002133 /* we don't hold it */
njn25e49d8e72002-09-23 09:36:25 +00002134 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002135 "pthread_mutex_unlock: mutex is locked by a different thread");
njnd3040452003-05-19 15:04:06 +00002136 SET_PTHREQ_RETVAL(tid, EPERM);
sewardje663cb92002-04-12 10:26:32 +00002137 return;
2138 }
2139
sewardjf8f819e2002-04-17 23:21:37 +00002140 /* If it's a multiply-locked recursive mutex, just decrement the
2141 lock count and return. */
nethercote1f0173b2004-02-28 15:40:36 +00002142 if (mutex->__vg_m_count > 1) {
2143 vg_assert(mutex->__vg_m_kind == PTHREAD_MUTEX_RECURSIVE_NP);
2144 mutex->__vg_m_count --;
njnd3040452003-05-19 15:04:06 +00002145 SET_PTHREQ_RETVAL(tid, 0); /* success */
sewardjf8f819e2002-04-17 23:21:37 +00002146 return;
2147 }
2148
sewardj604ec3c2002-04-18 22:38:41 +00002149 /* Now we're sure it is locked exactly once, and by the thread who
sewardjf8f819e2002-04-17 23:21:37 +00002150 is now doing an unlock on it. */
nethercote1f0173b2004-02-28 15:40:36 +00002151 vg_assert(mutex->__vg_m_count == 1);
2152 vg_assert((ThreadId)mutex->__vg_m_owner == tid);
sewardjf8f819e2002-04-17 23:21:37 +00002153
sewardj3b5d8862002-04-20 13:53:23 +00002154 /* Release at max one thread waiting on this mutex. */
2155 release_one_thread_waiting_on_mutex ( mutex, "pthread_mutex_lock" );
sewardje663cb92002-04-12 10:26:32 +00002156
sewardj3b5d8862002-04-20 13:53:23 +00002157 /* Our (tid's) pth_unlock() returns with 0 (success). */
njnd3040452003-05-19 15:04:06 +00002158 SET_PTHREQ_RETVAL(tid, 0); /* Success. */
sewardje663cb92002-04-12 10:26:32 +00002159}
2160
2161
sewardj6072c362002-04-19 14:40:57 +00002162/* -----------------------------------------------------------
2163 CONDITION VARIABLES
2164 -------------------------------------------------------- */
sewardje663cb92002-04-12 10:26:32 +00002165
rjwalsh7109a8c2004-09-02 00:31:02 +00002166/* The relevant type (vg_pthread_cond_t) is in core.h.
sewardj77e466c2002-04-14 02:29:29 +00002167
nethercote1f0173b2004-02-28 15:40:36 +00002168 We don't use any fields of vg_pthread_cond_t for anything at all.
2169 Only the identity of the CVs is important. (Actually, we initialise
2170 __vg_c_waiting in pthread_cond_init() to VG_INVALID_THREADID.)
sewardj6072c362002-04-19 14:40:57 +00002171
2172 Linux pthreads supports no attributes on condition variables, so we
sewardj3b5d8862002-04-20 13:53:23 +00002173 don't need to think too hard there. */
sewardj6072c362002-04-19 14:40:57 +00002174
sewardj77e466c2002-04-14 02:29:29 +00002175
sewardj5f07b662002-04-23 16:52:51 +00002176static
2177void do_pthread_cond_timedwait_TIMEOUT ( ThreadId tid )
2178{
2179 Char msg_buf[100];
nethercote1f0173b2004-02-28 15:40:36 +00002180 vg_pthread_mutex_t* mx;
2181 vg_pthread_cond_t* cv;
sewardj5f07b662002-04-23 16:52:51 +00002182
sewardjb48e5002002-05-13 00:16:03 +00002183 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002184 && VG_(threads)[tid].status == VgTs_WaitCV
2185 && VG_(threads)[tid].awaken_at != 0xFFFFFFFF);
2186 mx = VG_(threads)[tid].associated_mx;
sewardj5f07b662002-04-23 16:52:51 +00002187 vg_assert(mx != NULL);
sewardj018f7622002-05-15 21:13:39 +00002188 cv = VG_(threads)[tid].associated_cv;
sewardj5f07b662002-04-23 16:52:51 +00002189 vg_assert(cv != NULL);
2190
nethercote1f0173b2004-02-28 15:40:36 +00002191 if (mx->__vg_m_owner == VG_INVALID_THREADID) {
sewardj5f07b662002-04-23 16:52:51 +00002192 /* Currently unheld; hand it out to thread tid. */
nethercote1f0173b2004-02-28 15:40:36 +00002193 vg_assert(mx->__vg_m_count == 0);
sewardj018f7622002-05-15 21:13:39 +00002194 VG_(threads)[tid].status = VgTs_Runnable;
njnd3040452003-05-19 15:04:06 +00002195 SET_PTHREQ_RETVAL(tid, ETIMEDOUT); /* pthread_cond_wait return value */
sewardj018f7622002-05-15 21:13:39 +00002196 VG_(threads)[tid].associated_cv = NULL;
2197 VG_(threads)[tid].associated_mx = NULL;
nethercote1f0173b2004-02-28 15:40:36 +00002198 mx->__vg_m_owner = (/*_pthread_descr*/void*)tid;
2199 mx->__vg_m_count = 1;
sewardj5f07b662002-04-23 16:52:51 +00002200
sewardj0af43bc2002-10-22 04:30:35 +00002201 VG_TRACK( post_mutex_lock, tid, mx );
2202
sewardj5f07b662002-04-23 16:52:51 +00002203 if (VG_(clo_trace_pthread_level) >= 1) {
sewardjc3bd5f52002-05-01 03:24:23 +00002204 VG_(sprintf)(msg_buf,
nethercote1f0173b2004-02-28 15:40:36 +00002205 "pthread_cond_timedwait cv %p: TIMEOUT with mx %p",
sewardjc3bd5f52002-05-01 03:24:23 +00002206 cv, mx );
sewardj5f07b662002-04-23 16:52:51 +00002207 print_pthread_event(tid, msg_buf);
2208 }
2209 } else {
2210 /* Currently held. Make thread tid be blocked on it. */
nethercote1f0173b2004-02-28 15:40:36 +00002211 vg_assert(mx->__vg_m_count > 0);
sewardjdca84112002-11-13 22:29:34 +00002212 VG_TRACK( pre_mutex_lock, tid, mx );
2213
sewardj018f7622002-05-15 21:13:39 +00002214 VG_(threads)[tid].status = VgTs_WaitMX;
njnd3040452003-05-19 15:04:06 +00002215 SET_PTHREQ_RETVAL(tid, ETIMEDOUT); /* pthread_cond_wait return value */
sewardj018f7622002-05-15 21:13:39 +00002216 VG_(threads)[tid].associated_cv = NULL;
2217 VG_(threads)[tid].associated_mx = mx;
sewardj5f07b662002-04-23 16:52:51 +00002218 if (VG_(clo_trace_pthread_level) >= 1) {
2219 VG_(sprintf)(msg_buf,
nethercote1f0173b2004-02-28 15:40:36 +00002220 "pthread_cond_timedwait cv %p: TIMEOUT -> BLOCK for mx %p",
sewardj5f07b662002-04-23 16:52:51 +00002221 cv, mx );
2222 print_pthread_event(tid, msg_buf);
2223 }
sewardj5f07b662002-04-23 16:52:51 +00002224 }
2225}
2226
2227
sewardj3b5d8862002-04-20 13:53:23 +00002228static
nethercote1f0173b2004-02-28 15:40:36 +00002229void release_N_threads_waiting_on_cond ( vg_pthread_cond_t* cond,
sewardj3b5d8862002-04-20 13:53:23 +00002230 Int n_to_release,
2231 Char* caller )
2232{
2233 Int i;
2234 Char msg_buf[100];
nethercote1f0173b2004-02-28 15:40:36 +00002235 vg_pthread_mutex_t* mx;
sewardj3b5d8862002-04-20 13:53:23 +00002236
2237 while (True) {
2238 if (n_to_release == 0)
2239 return;
2240
2241 /* Find a thread waiting on this CV. */
2242 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +00002243 if (VG_(threads)[i].status == VgTs_Empty)
sewardj3b5d8862002-04-20 13:53:23 +00002244 continue;
sewardj018f7622002-05-15 21:13:39 +00002245 if (VG_(threads)[i].status == VgTs_WaitCV
2246 && VG_(threads)[i].associated_cv == cond)
sewardj3b5d8862002-04-20 13:53:23 +00002247 break;
2248 }
2249 vg_assert(i <= VG_N_THREADS);
2250
2251 if (i == VG_N_THREADS) {
2252 /* Nobody else is waiting on it. */
2253 return;
2254 }
2255
sewardj018f7622002-05-15 21:13:39 +00002256 mx = VG_(threads)[i].associated_mx;
sewardj3b5d8862002-04-20 13:53:23 +00002257 vg_assert(mx != NULL);
2258
sewardjdca84112002-11-13 22:29:34 +00002259 VG_TRACK( pre_mutex_lock, i, mx );
2260
nethercote1f0173b2004-02-28 15:40:36 +00002261 if (mx->__vg_m_owner == VG_INVALID_THREADID) {
sewardj3b5d8862002-04-20 13:53:23 +00002262 /* Currently unheld; hand it out to thread i. */
nethercote1f0173b2004-02-28 15:40:36 +00002263 vg_assert(mx->__vg_m_count == 0);
sewardj018f7622002-05-15 21:13:39 +00002264 VG_(threads)[i].status = VgTs_Runnable;
2265 VG_(threads)[i].associated_cv = NULL;
2266 VG_(threads)[i].associated_mx = NULL;
nethercote1f0173b2004-02-28 15:40:36 +00002267 mx->__vg_m_owner = (/*_pthread_descr*/void*)i;
2268 mx->__vg_m_count = 1;
sewardj5f07b662002-04-23 16:52:51 +00002269 /* .m_edx already holds pth_cond_wait success value (0) */
sewardj3b5d8862002-04-20 13:53:23 +00002270
sewardj0af43bc2002-10-22 04:30:35 +00002271 VG_TRACK( post_mutex_lock, i, mx );
2272
sewardj3b5d8862002-04-20 13:53:23 +00002273 if (VG_(clo_trace_pthread_level) >= 1) {
2274 VG_(sprintf)(msg_buf, "%s cv %p: RESUME with mx %p",
2275 caller, cond, mx );
2276 print_pthread_event(i, msg_buf);
2277 }
2278
2279 } else {
2280 /* Currently held. Make thread i be blocked on it. */
nethercote1f0173b2004-02-28 15:40:36 +00002281 vg_assert(mx->__vg_m_count > 0);
sewardj018f7622002-05-15 21:13:39 +00002282 VG_(threads)[i].status = VgTs_WaitMX;
2283 VG_(threads)[i].associated_cv = NULL;
2284 VG_(threads)[i].associated_mx = mx;
njnd3040452003-05-19 15:04:06 +00002285 SET_PTHREQ_RETVAL(i, 0); /* pth_cond_wait success value */
sewardj3b5d8862002-04-20 13:53:23 +00002286
2287 if (VG_(clo_trace_pthread_level) >= 1) {
2288 VG_(sprintf)(msg_buf, "%s cv %p: BLOCK for mx %p",
2289 caller, cond, mx );
2290 print_pthread_event(i, msg_buf);
2291 }
2292
2293 }
jsgf855d93d2003-10-13 22:26:55 +00002294
sewardj3b5d8862002-04-20 13:53:23 +00002295 n_to_release--;
2296 }
2297}
2298
2299
2300static
2301void do_pthread_cond_wait ( ThreadId tid,
nethercote1f0173b2004-02-28 15:40:36 +00002302 vg_pthread_cond_t *cond,
2303 vg_pthread_mutex_t *mutex,
sewardj5f07b662002-04-23 16:52:51 +00002304 UInt ms_end )
sewardj3b5d8862002-04-20 13:53:23 +00002305{
2306 Char msg_buf[100];
2307
sewardj5f07b662002-04-23 16:52:51 +00002308 /* If ms_end == 0xFFFFFFFF, wait forever (no timeout). Otherwise,
2309 ms_end is the ending millisecond. */
2310
sewardj3b5d8862002-04-20 13:53:23 +00002311 /* pre: mutex should be a valid mutex and owned by tid. */
2312 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj5f07b662002-04-23 16:52:51 +00002313 VG_(sprintf)(msg_buf, "pthread_cond_wait cv %p, mx %p, end %d ...",
2314 cond, mutex, ms_end );
sewardj3b5d8862002-04-20 13:53:23 +00002315 print_pthread_event(tid, msg_buf);
2316 }
2317
2318 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00002319 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002320 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj3b5d8862002-04-20 13:53:23 +00002321
nethercoted3693d02004-04-26 08:05:24 +00002322 if (mutex == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00002323 VG_(record_pthread_error)( tid,
nethercoted3693d02004-04-26 08:05:24 +00002324 "pthread_cond_wait/timedwait: mutex is NULL");
2325 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
2326 return;
2327 }
2328
2329 if (cond == NULL) {
2330 VG_(record_pthread_error)( tid,
2331 "pthread_cond_wait/timedwait: cond is NULL");
jsgf855d93d2003-10-13 22:26:55 +00002332 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj3b5d8862002-04-20 13:53:23 +00002333 return;
2334 }
2335
2336 /* More paranoia ... */
nethercote1f0173b2004-02-28 15:40:36 +00002337 switch (mutex->__vg_m_kind) {
sewardj2a1dcce2002-04-22 12:45:25 +00002338# ifndef GLIBC_2_1
sewardj3b5d8862002-04-20 13:53:23 +00002339 case PTHREAD_MUTEX_TIMED_NP:
sewardj2a1dcce2002-04-22 12:45:25 +00002340 case PTHREAD_MUTEX_ADAPTIVE_NP:
2341# endif
sewardja1679dd2002-05-10 22:31:40 +00002342# ifdef GLIBC_2_1
sewardj8e651d72002-05-10 21:00:19 +00002343 case PTHREAD_MUTEX_FAST_NP:
sewardja1679dd2002-05-10 22:31:40 +00002344# endif
sewardj3b5d8862002-04-20 13:53:23 +00002345 case PTHREAD_MUTEX_RECURSIVE_NP:
2346 case PTHREAD_MUTEX_ERRORCHECK_NP:
nethercote1f0173b2004-02-28 15:40:36 +00002347 if (mutex->__vg_m_count >= 0) break;
sewardj3b5d8862002-04-20 13:53:23 +00002348 /* else fall thru */
2349 default:
njn25e49d8e72002-09-23 09:36:25 +00002350 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002351 "pthread_cond_wait/timedwait: mutex is invalid");
jsgf855d93d2003-10-13 22:26:55 +00002352 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj3b5d8862002-04-20 13:53:23 +00002353 return;
2354 }
2355
2356 /* Barf if we don't currently hold the mutex. */
nethercoted3693d02004-04-26 08:05:24 +00002357 if (mutex->__vg_m_count == 0 /* nobody holds it */) {
njn25e49d8e72002-09-23 09:36:25 +00002358 VG_(record_pthread_error)( tid,
nethercoted3693d02004-04-26 08:05:24 +00002359 "pthread_cond_wait/timedwait: mutex is unlocked");
2360 SET_PTHREQ_RETVAL(tid, VKI_EPERM);
2361 return;
2362 }
2363
2364 if ((ThreadId)mutex->__vg_m_owner != tid /* we don't hold it */) {
2365 VG_(record_pthread_error)( tid,
2366 "pthread_cond_wait/timedwait: mutex is locked by another thread");
2367 SET_PTHREQ_RETVAL(tid, VKI_EPERM);
sewardj3b5d8862002-04-20 13:53:23 +00002368 return;
2369 }
2370
thughes513197c2004-06-13 12:07:53 +00002371 if(VG_(threads)[tid].cancel_pend != NULL &&
2372 VG_(threads)[tid].cancel_st) {
2373 make_thread_jump_to_cancelhdlr ( tid );
2374 } else {
2375 /* Queue ourselves on the condition. */
2376 VG_(threads)[tid].status = VgTs_WaitCV;
2377 VG_(threads)[tid].associated_cv = cond;
2378 VG_(threads)[tid].associated_mx = mutex;
2379 VG_(threads)[tid].awaken_at = ms_end;
2380 if (ms_end != 0xFFFFFFFF)
nethercotef971ab72004-08-02 16:27:40 +00002381 add_timeout(tid, ms_end);
sewardj3b5d8862002-04-20 13:53:23 +00002382
thughes513197c2004-06-13 12:07:53 +00002383 if (VG_(clo_trace_pthread_level) >= 1) {
2384 VG_(sprintf)(msg_buf,
2385 "pthread_cond_wait cv %p, mx %p: BLOCK",
2386 cond, mutex );
2387 print_pthread_event(tid, msg_buf);
2388 }
2389
2390 /* Release the mutex. */
2391 release_one_thread_waiting_on_mutex ( mutex, "pthread_cond_wait " );
sewardj3b5d8862002-04-20 13:53:23 +00002392 }
sewardj3b5d8862002-04-20 13:53:23 +00002393}
2394
2395
2396static
2397void do_pthread_cond_signal_or_broadcast ( ThreadId tid,
2398 Bool broadcast,
nethercote1f0173b2004-02-28 15:40:36 +00002399 vg_pthread_cond_t *cond )
sewardj3b5d8862002-04-20 13:53:23 +00002400{
2401 Char msg_buf[100];
2402 Char* caller
2403 = broadcast ? "pthread_cond_broadcast"
2404 : "pthread_cond_signal ";
2405
2406 if (VG_(clo_trace_pthread_level) >= 2) {
2407 VG_(sprintf)(msg_buf, "%s cv %p ...",
2408 caller, cond );
2409 print_pthread_event(tid, msg_buf);
2410 }
2411
2412 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00002413 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002414 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj3b5d8862002-04-20 13:53:23 +00002415
2416 if (cond == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00002417 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002418 "pthread_cond_signal/broadcast: cond is NULL");
jsgf855d93d2003-10-13 22:26:55 +00002419 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj3b5d8862002-04-20 13:53:23 +00002420 return;
2421 }
2422
2423 release_N_threads_waiting_on_cond (
2424 cond,
2425 broadcast ? VG_N_THREADS : 1,
2426 caller
2427 );
2428
njnd3040452003-05-19 15:04:06 +00002429 SET_PTHREQ_RETVAL(tid, 0); /* success */
sewardj3b5d8862002-04-20 13:53:23 +00002430}
2431
sewardj77e466c2002-04-14 02:29:29 +00002432
sewardj5f07b662002-04-23 16:52:51 +00002433/* -----------------------------------------------------------
2434 THREAD SPECIFIC DATA
2435 -------------------------------------------------------- */
2436
2437static __inline__
2438Bool is_valid_key ( ThreadKey k )
2439{
2440 /* k unsigned; hence no < 0 check */
2441 if (k >= VG_N_THREAD_KEYS) return False;
2442 if (!vg_thread_keys[k].inuse) return False;
2443 return True;
2444}
2445
sewardj00a66b12002-10-12 16:42:35 +00002446
2447/* Return in %EDX a value of 1 if the key is valid, else 0. */
2448static
2449void do_pthread_key_validate ( ThreadId tid,
2450 pthread_key_t key )
2451{
2452 Char msg_buf[100];
2453
2454 if (VG_(clo_trace_pthread_level) >= 1) {
2455 VG_(sprintf)(msg_buf, "pthread_key_validate key %p",
2456 key );
2457 print_pthread_event(tid, msg_buf);
2458 }
2459
2460 vg_assert(sizeof(pthread_key_t) == sizeof(ThreadKey));
2461 vg_assert(VG_(is_valid_tid)(tid)
2462 && VG_(threads)[tid].status == VgTs_Runnable);
2463
2464 if (is_valid_key((ThreadKey)key)) {
njnd3040452003-05-19 15:04:06 +00002465 SET_PTHREQ_RETVAL(tid, 1);
sewardj00a66b12002-10-12 16:42:35 +00002466 } else {
njnd3040452003-05-19 15:04:06 +00002467 SET_PTHREQ_RETVAL(tid, 0);
sewardj00a66b12002-10-12 16:42:35 +00002468 }
2469}
2470
2471
sewardj5f07b662002-04-23 16:52:51 +00002472static
2473void do_pthread_key_create ( ThreadId tid,
2474 pthread_key_t* key,
2475 void (*destructor)(void*) )
2476{
2477 Int i;
2478 Char msg_buf[100];
2479
2480 if (VG_(clo_trace_pthread_level) >= 1) {
2481 VG_(sprintf)(msg_buf, "pthread_key_create *key %p, destr %p",
2482 key, destructor );
2483 print_pthread_event(tid, msg_buf);
2484 }
2485
2486 vg_assert(sizeof(pthread_key_t) == sizeof(ThreadKey));
sewardjb48e5002002-05-13 00:16:03 +00002487 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002488 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj5f07b662002-04-23 16:52:51 +00002489
2490 for (i = 0; i < VG_N_THREAD_KEYS; i++)
2491 if (!vg_thread_keys[i].inuse)
2492 break;
2493
2494 if (i == VG_N_THREAD_KEYS) {
jsgf855d93d2003-10-13 22:26:55 +00002495 VG_(message)(Vg_UserMsg, "pthread_key_create() asked for too many keys (more than %d): increase VG_N_THREAD_KEYS and recompile Valgrind.",
2496 VG_N_THREAD_KEYS);
2497 SET_PTHREQ_RETVAL(tid, EAGAIN);
2498 return;
sewardj5f07b662002-04-23 16:52:51 +00002499 }
2500
sewardj870497a2002-05-29 01:06:47 +00002501 vg_thread_keys[i].inuse = True;
2502 vg_thread_keys[i].destructor = destructor;
sewardjc3bd5f52002-05-01 03:24:23 +00002503
sewardj5a3798b2002-06-04 23:24:22 +00002504 /* check key for addressibility */
njn72718642003-07-24 08:45:32 +00002505 VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "pthread_key_create: key",
njn25e49d8e72002-09-23 09:36:25 +00002506 (Addr)key, sizeof(pthread_key_t));
sewardj5f07b662002-04-23 16:52:51 +00002507 *key = i;
njncf45fd42004-11-24 16:30:22 +00002508 VG_TRACK( post_mem_write, Vg_CorePThread, tid,
2509 (Addr)key, sizeof(pthread_key_t) );
sewardjc3bd5f52002-05-01 03:24:23 +00002510
njnd3040452003-05-19 15:04:06 +00002511 SET_PTHREQ_RETVAL(tid, 0);
sewardj5f07b662002-04-23 16:52:51 +00002512}
2513
2514
2515static
2516void do_pthread_key_delete ( ThreadId tid, pthread_key_t key )
2517{
2518 Char msg_buf[100];
2519 if (VG_(clo_trace_pthread_level) >= 1) {
2520 VG_(sprintf)(msg_buf, "pthread_key_delete key %d",
2521 key );
2522 print_pthread_event(tid, msg_buf);
2523 }
2524
sewardjb48e5002002-05-13 00:16:03 +00002525 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002526 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj5f07b662002-04-23 16:52:51 +00002527
2528 if (!is_valid_key(key)) {
njn25e49d8e72002-09-23 09:36:25 +00002529 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002530 "pthread_key_delete: key is invalid");
jsgf855d93d2003-10-13 22:26:55 +00002531 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj5f07b662002-04-23 16:52:51 +00002532 return;
2533 }
2534
2535 vg_thread_keys[key].inuse = False;
sewardj648b3152002-12-09 19:07:59 +00002536 vg_thread_keys[key].destructor = NULL;
njnd3040452003-05-19 15:04:06 +00002537 SET_PTHREQ_RETVAL(tid, 0);
sewardj5f07b662002-04-23 16:52:51 +00002538}
2539
2540
sewardj00a66b12002-10-12 16:42:35 +00002541/* Get the .specific_ptr for a thread. Return 1 if the thread-slot
2542 isn't in use, so that client-space can scan all thread slots. 1
2543 cannot be confused with NULL or a legitimately-aligned specific_ptr
2544 value. */
sewardj5f07b662002-04-23 16:52:51 +00002545static
sewardj00a66b12002-10-12 16:42:35 +00002546void do_pthread_getspecific_ptr ( ThreadId tid )
sewardj5f07b662002-04-23 16:52:51 +00002547{
sewardj00a66b12002-10-12 16:42:35 +00002548 void** specifics_ptr;
2549 Char msg_buf[100];
2550
jsgf855d93d2003-10-13 22:26:55 +00002551 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj00a66b12002-10-12 16:42:35 +00002552 VG_(sprintf)(msg_buf, "pthread_getspecific_ptr" );
sewardj5f07b662002-04-23 16:52:51 +00002553 print_pthread_event(tid, msg_buf);
2554 }
2555
nethercote36881a22004-08-04 14:03:16 +00002556 vg_assert(is_valid_or_empty_tid(tid));
sewardj5f07b662002-04-23 16:52:51 +00002557
sewardj00a66b12002-10-12 16:42:35 +00002558 if (VG_(threads)[tid].status == VgTs_Empty) {
njnd3040452003-05-19 15:04:06 +00002559 SET_PTHREQ_RETVAL(tid, 1);
sewardj5f07b662002-04-23 16:52:51 +00002560 return;
2561 }
2562
sewardj00a66b12002-10-12 16:42:35 +00002563 specifics_ptr = VG_(threads)[tid].specifics_ptr;
nethercote5fd72bb2004-11-04 19:28:38 +00002564 vg_assert(specifics_ptr == NULL || IS_ALIGNED4_ADDR(specifics_ptr));
sewardj00a66b12002-10-12 16:42:35 +00002565
nethercote50397c22004-11-04 18:03:06 +00002566 SET_PTHREQ_RETVAL(tid, (UWord)specifics_ptr);
sewardj5f07b662002-04-23 16:52:51 +00002567}
2568
2569
2570static
sewardj00a66b12002-10-12 16:42:35 +00002571void do_pthread_setspecific_ptr ( ThreadId tid, void** ptr )
sewardj5f07b662002-04-23 16:52:51 +00002572{
2573 Char msg_buf[100];
2574 if (VG_(clo_trace_pthread_level) >= 1) {
sewardj00a66b12002-10-12 16:42:35 +00002575 VG_(sprintf)(msg_buf, "pthread_setspecific_ptr ptr %p",
2576 ptr );
sewardj5f07b662002-04-23 16:52:51 +00002577 print_pthread_event(tid, msg_buf);
2578 }
2579
sewardjb48e5002002-05-13 00:16:03 +00002580 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002581 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj5f07b662002-04-23 16:52:51 +00002582
sewardj00a66b12002-10-12 16:42:35 +00002583 VG_(threads)[tid].specifics_ptr = ptr;
njnd3040452003-05-19 15:04:06 +00002584 SET_PTHREQ_RETVAL(tid, 0);
sewardj5f07b662002-04-23 16:52:51 +00002585}
2586
2587
sewardj870497a2002-05-29 01:06:47 +00002588/* Helper for calling destructors at thread exit. If key is valid,
2589 copy the thread's specific value into cu->arg and put the *key*'s
2590 destructor fn address in cu->fn. Then return 0 to the caller.
2591 Otherwise return non-zero to the caller. */
2592static
2593void do__get_key_destr_and_spec ( ThreadId tid,
2594 pthread_key_t key,
2595 CleanupEntry* cu )
2596{
2597 Char msg_buf[100];
jsgf855d93d2003-10-13 22:26:55 +00002598 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj870497a2002-05-29 01:06:47 +00002599 VG_(sprintf)(msg_buf,
2600 "get_key_destr_and_arg (key = %d)", key );
2601 print_pthread_event(tid, msg_buf);
2602 }
2603 vg_assert(VG_(is_valid_tid)(tid));
2604 vg_assert(key >= 0 && key < VG_N_THREAD_KEYS);
njn25e49d8e72002-09-23 09:36:25 +00002605
sewardj870497a2002-05-29 01:06:47 +00002606 if (!vg_thread_keys[key].inuse) {
njnd3040452003-05-19 15:04:06 +00002607 SET_PTHREQ_RETVAL(tid, -1);
sewardj870497a2002-05-29 01:06:47 +00002608 return;
2609 }
njn72718642003-07-24 08:45:32 +00002610 VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "get_key_destr_and_spec: cu",
2611 (Addr)cu, sizeof(CleanupEntry) );
sewardj00a66b12002-10-12 16:42:35 +00002612
thughes11975ff2004-06-12 12:58:22 +00002613 cu->type = VgCt_Function;
2614 cu->data.function.fn = vg_thread_keys[key].destructor;
sewardj00a66b12002-10-12 16:42:35 +00002615 if (VG_(threads)[tid].specifics_ptr == NULL) {
thughes11975ff2004-06-12 12:58:22 +00002616 cu->data.function.arg = NULL;
sewardj00a66b12002-10-12 16:42:35 +00002617 } else {
njn72718642003-07-24 08:45:32 +00002618 VG_TRACK( pre_mem_read, Vg_CorePThread, tid,
sewardj00a66b12002-10-12 16:42:35 +00002619 "get_key_destr_and_spec: key",
2620 (Addr)(&VG_(threads)[tid].specifics_ptr[key]),
2621 sizeof(void*) );
thughes11975ff2004-06-12 12:58:22 +00002622 cu->data.function.arg = VG_(threads)[tid].specifics_ptr[key];
sewardj00a66b12002-10-12 16:42:35 +00002623 }
2624
njncf45fd42004-11-24 16:30:22 +00002625 VG_TRACK( post_mem_write, Vg_CorePThread, tid,
2626 (Addr)cu, sizeof(CleanupEntry) );
njnd3040452003-05-19 15:04:06 +00002627 SET_PTHREQ_RETVAL(tid, 0);
sewardj870497a2002-05-29 01:06:47 +00002628}
2629
2630
sewardjb48e5002002-05-13 00:16:03 +00002631/* ---------------------------------------------------
2632 SIGNALS
2633 ------------------------------------------------ */
2634
2635/* See comment in vg_libthread.c:pthread_sigmask() regarding
sewardj018f7622002-05-15 21:13:39 +00002636 deliberate confusion of types sigset_t and vki_sigset_t. Return 0
2637 for OK and 1 for some kind of addressing error, which the
2638 vg_libpthread.c routine turns into return values 0 and EFAULT
2639 respectively. */
sewardjb48e5002002-05-13 00:16:03 +00002640static
2641void do_pthread_sigmask ( ThreadId tid,
sewardj018f7622002-05-15 21:13:39 +00002642 Int vki_how,
nethercote73b526f2004-10-31 18:48:21 +00002643 vki_sigset_t* newmask,
2644 vki_sigset_t* oldmask )
sewardjb48e5002002-05-13 00:16:03 +00002645{
2646 Char msg_buf[100];
2647 if (VG_(clo_trace_pthread_level) >= 1) {
2648 VG_(sprintf)(msg_buf,
sewardj018f7622002-05-15 21:13:39 +00002649 "pthread_sigmask vki_how %d, newmask %p, oldmask %p",
2650 vki_how, newmask, oldmask );
sewardjb48e5002002-05-13 00:16:03 +00002651 print_pthread_event(tid, msg_buf);
2652 }
2653
2654 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002655 && VG_(threads)[tid].status == VgTs_Runnable);
sewardjb48e5002002-05-13 00:16:03 +00002656
njn25e49d8e72002-09-23 09:36:25 +00002657 if (newmask)
njn72718642003-07-24 08:45:32 +00002658 VG_TRACK( pre_mem_read, Vg_CorePThread, tid, "pthread_sigmask: newmask",
nethercote73b526f2004-10-31 18:48:21 +00002659 (Addr)newmask, sizeof(vki_sigset_t));
njn25e49d8e72002-09-23 09:36:25 +00002660 if (oldmask)
njn72718642003-07-24 08:45:32 +00002661 VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "pthread_sigmask: oldmask",
nethercote73b526f2004-10-31 18:48:21 +00002662 (Addr)oldmask, sizeof(vki_sigset_t));
sewardjb48e5002002-05-13 00:16:03 +00002663
sewardj018f7622002-05-15 21:13:39 +00002664 VG_(do_pthread_sigmask_SCSS_upd) ( tid, vki_how, newmask, oldmask );
sewardjb48e5002002-05-13 00:16:03 +00002665
njn25e49d8e72002-09-23 09:36:25 +00002666 if (oldmask)
njncf45fd42004-11-24 16:30:22 +00002667 VG_TRACK( post_mem_write, Vg_CorePThread, tid,
2668 (Addr)oldmask, sizeof(vki_sigset_t) );
sewardj3a951cf2002-05-15 22:25:47 +00002669
sewardj018f7622002-05-15 21:13:39 +00002670 /* Success. */
njnd3040452003-05-19 15:04:06 +00002671 SET_PTHREQ_RETVAL(tid, 0);
sewardjb48e5002002-05-13 00:16:03 +00002672}
2673
2674
2675static
sewardj018f7622002-05-15 21:13:39 +00002676void do_pthread_kill ( ThreadId tid, /* me */
2677 ThreadId thread, /* thread to signal */
2678 Int sig )
2679{
nethercote97ccd5e2004-08-02 12:10:01 +00002680 ThreadState* tst;
sewardj018f7622002-05-15 21:13:39 +00002681 Char msg_buf[100];
2682
2683 if (VG_(clo_trace_signals) || VG_(clo_trace_pthread_level) >= 1) {
2684 VG_(sprintf)(msg_buf,
2685 "pthread_kill thread %d, signo %d",
2686 thread, sig );
2687 print_pthread_event(tid, msg_buf);
2688 }
2689
2690 vg_assert(VG_(is_valid_tid)(tid)
2691 && VG_(threads)[tid].status == VgTs_Runnable);
2692
sewardj4dced352002-06-04 22:54:20 +00002693 if (!VG_(is_valid_tid)(thread)) {
njn25e49d8e72002-09-23 09:36:25 +00002694 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002695 "pthread_kill: invalid target thread");
jsgf855d93d2003-10-13 22:26:55 +00002696 SET_PTHREQ_RETVAL(tid, VKI_ESRCH);
2697 return;
2698 }
2699
2700 if (sig == 0) {
2701 /* OK, signal 0 is just for testing */
2702 SET_PTHREQ_RETVAL(tid, 0);
sewardj018f7622002-05-15 21:13:39 +00002703 return;
2704 }
2705
nethercote73b526f2004-10-31 18:48:21 +00002706 if (sig < 1 || sig > _VKI_NSIG) {
jsgf855d93d2003-10-13 22:26:55 +00002707 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj018f7622002-05-15 21:13:39 +00002708 return;
2709 }
2710
nethercote97ccd5e2004-08-02 12:10:01 +00002711 tst = VG_(get_ThreadState)(thread);
2712 vg_assert(NULL != tst->proxy);
sewardj0a785fd2004-11-24 21:24:24 +00002713 VG_(proxy_sendsig)(tid/*from*/, thread/*to*/, sig);
njnd3040452003-05-19 15:04:06 +00002714 SET_PTHREQ_RETVAL(tid, 0);
sewardjb48e5002002-05-13 00:16:03 +00002715}
2716
2717
sewardj2cb00342002-06-28 01:46:26 +00002718/* -----------------------------------------------------------
2719 FORK HANDLERS.
2720 -------------------------------------------------------- */
2721
2722static
2723void do__set_fhstack_used ( ThreadId tid, Int n )
2724{
2725 Char msg_buf[100];
2726 if (VG_(clo_trace_sched)) {
2727 VG_(sprintf)(msg_buf, "set_fhstack_used to %d", n );
2728 print_pthread_event(tid, msg_buf);
2729 }
2730
2731 vg_assert(VG_(is_valid_tid)(tid)
2732 && VG_(threads)[tid].status == VgTs_Runnable);
2733
2734 if (n >= 0 && n < VG_N_FORKHANDLERSTACK) {
2735 vg_fhstack_used = n;
njnd3040452003-05-19 15:04:06 +00002736 SET_PTHREQ_RETVAL(tid, 0);
sewardj2cb00342002-06-28 01:46:26 +00002737 } else {
njnd3040452003-05-19 15:04:06 +00002738 SET_PTHREQ_RETVAL(tid, -1);
sewardj2cb00342002-06-28 01:46:26 +00002739 }
2740}
2741
2742
2743static
2744void do__get_fhstack_used ( ThreadId tid )
2745{
2746 Int n;
2747 Char msg_buf[100];
2748 if (VG_(clo_trace_sched)) {
2749 VG_(sprintf)(msg_buf, "get_fhstack_used" );
2750 print_pthread_event(tid, msg_buf);
2751 }
2752
2753 vg_assert(VG_(is_valid_tid)(tid)
2754 && VG_(threads)[tid].status == VgTs_Runnable);
2755
2756 n = vg_fhstack_used;
2757 vg_assert(n >= 0 && n < VG_N_FORKHANDLERSTACK);
njnd3040452003-05-19 15:04:06 +00002758 SET_PTHREQ_RETVAL(tid, n);
sewardj2cb00342002-06-28 01:46:26 +00002759}
2760
2761static
2762void do__set_fhstack_entry ( ThreadId tid, Int n, ForkHandlerEntry* fh )
2763{
2764 Char msg_buf[100];
2765 if (VG_(clo_trace_sched)) {
2766 VG_(sprintf)(msg_buf, "set_fhstack_entry %d to %p", n, fh );
2767 print_pthread_event(tid, msg_buf);
2768 }
2769
2770 vg_assert(VG_(is_valid_tid)(tid)
2771 && VG_(threads)[tid].status == VgTs_Runnable);
njn72718642003-07-24 08:45:32 +00002772 VG_TRACK( pre_mem_read, Vg_CorePThread, tid,
njn25e49d8e72002-09-23 09:36:25 +00002773 "pthread_atfork: prepare/parent/child",
2774 (Addr)fh, sizeof(ForkHandlerEntry));
sewardj2cb00342002-06-28 01:46:26 +00002775
njn25e49d8e72002-09-23 09:36:25 +00002776 if (n < 0 || n >= VG_N_FORKHANDLERSTACK) {
njnd3040452003-05-19 15:04:06 +00002777 SET_PTHREQ_RETVAL(tid, -1);
sewardj2cb00342002-06-28 01:46:26 +00002778 return;
2779 }
2780
2781 vg_fhstack[n] = *fh;
njnd3040452003-05-19 15:04:06 +00002782 SET_PTHREQ_RETVAL(tid, 0);
sewardj2cb00342002-06-28 01:46:26 +00002783}
2784
2785
2786static
2787void do__get_fhstack_entry ( ThreadId tid, Int n, /*OUT*/
2788 ForkHandlerEntry* fh )
2789{
2790 Char msg_buf[100];
2791 if (VG_(clo_trace_sched)) {
2792 VG_(sprintf)(msg_buf, "get_fhstack_entry %d", n );
2793 print_pthread_event(tid, msg_buf);
2794 }
2795
2796 vg_assert(VG_(is_valid_tid)(tid)
2797 && VG_(threads)[tid].status == VgTs_Runnable);
njn72718642003-07-24 08:45:32 +00002798 VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "fork: prepare/parent/child",
njn25e49d8e72002-09-23 09:36:25 +00002799 (Addr)fh, sizeof(ForkHandlerEntry));
sewardj2cb00342002-06-28 01:46:26 +00002800
njn25e49d8e72002-09-23 09:36:25 +00002801 if (n < 0 || n >= VG_N_FORKHANDLERSTACK) {
njnd3040452003-05-19 15:04:06 +00002802 SET_PTHREQ_RETVAL(tid, -1);
sewardj2cb00342002-06-28 01:46:26 +00002803 return;
2804 }
2805
2806 *fh = vg_fhstack[n];
njnd3040452003-05-19 15:04:06 +00002807 SET_PTHREQ_RETVAL(tid, 0);
sewardj2cb00342002-06-28 01:46:26 +00002808
njncf45fd42004-11-24 16:30:22 +00002809 VG_TRACK( post_mem_write, Vg_CorePThread, tid,
2810 (Addr)fh, sizeof(ForkHandlerEntry) );
sewardj2cb00342002-06-28 01:46:26 +00002811}
2812
thughesdaa34562004-06-27 12:48:53 +00002813
2814static
2815void do__get_stack_info ( ThreadId tid, ThreadId which, StackInfo* si )
2816{
2817 Char msg_buf[100];
2818
2819 vg_assert(VG_(is_valid_tid)(tid)
2820 && VG_(threads)[tid].status == VgTs_Runnable);
2821
2822 if (VG_(clo_trace_sched)) {
2823 VG_(sprintf)(msg_buf, "get_stack_info for tid %d", which );
2824 print_pthread_event(tid, msg_buf);
2825 }
2826
2827 if (!VG_(is_valid_tid)(which)) {
2828 SET_PTHREQ_RETVAL(tid, -1);
2829 return;
2830 }
2831
2832 si->base = VG_(threads)[which].stack_base;
2833 si->size = VG_(threads)[which].stack_size
2834 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB
2835 - VG_(threads)[which].stack_guard_size;
2836 si->guardsize = VG_(threads)[which].stack_guard_size;
2837
2838 SET_PTHREQ_RETVAL(tid, 0);
2839}
2840
njnd3040452003-05-19 15:04:06 +00002841/* ---------------------------------------------------------------------
njncf45fd42004-11-24 16:30:22 +00002842 Shadow register manipulations
njnd3040452003-05-19 15:04:06 +00002843 ------------------------------------------------------------------ */
2844
njncf45fd42004-11-24 16:30:22 +00002845void VG_(set_shadow_regs_area) ( ThreadId tid, OffT offset, SizeT size,
2846 const UChar* area )
njnd3040452003-05-19 15:04:06 +00002847{
njncf45fd42004-11-24 16:30:22 +00002848 ThreadState* tst;
2849
2850 vg_assert(VG_(is_valid_tid)(tid));
2851 tst = & VG_(threads)[tid];
2852
2853 // Bounds check
2854 vg_assert(0 <= offset && offset < sizeof(VexGuestArchState));
2855 vg_assert(offset + size <= sizeof(VexGuestArchState));
2856
2857 VG_(memcpy)( (void*)(((Addr)(&tst->arch.vex_shadow)) + offset), area, size);
2858}
2859
2860void VG_(get_shadow_regs_area) ( ThreadId tid, OffT offset, SizeT size,
2861 UChar* area )
2862{
2863 ThreadState* tst;
2864
2865 vg_assert(VG_(is_valid_tid)(tid));
2866 tst = & VG_(threads)[tid];
2867
2868 // Bounds check
2869 vg_assert(0 <= offset && offset < sizeof(VexGuestArchState));
2870 vg_assert(offset + size <= sizeof(VexGuestArchState));
2871
2872 VG_(memcpy)( area, (void*)(((Addr)&(tst->arch.vex_shadow)) + offset), size);
2873}
2874
2875
2876void VG_(set_return_from_syscall_shadow) ( ThreadId tid, UWord ret_shadow )
2877{
2878 VG_(set_shadow_regs_area)(tid, O_SYSCALL_RET, sizeof(UWord),
2879 (UChar*)&ret_shadow);
njnd3040452003-05-19 15:04:06 +00002880}
2881
sewardj2a99cf62004-11-24 10:44:19 +00002882UInt VG_(get_exit_status_shadow) ( ThreadId tid )
njnd3040452003-05-19 15:04:06 +00002883{
njncf45fd42004-11-24 16:30:22 +00002884 UInt ret;
2885 VG_(get_shadow_regs_area)(tid, O_SYSCALL_ARG1, sizeof(UInt),
2886 (UChar*)&ret);
2887 return ret;
njnd3040452003-05-19 15:04:06 +00002888}
2889
rjwalshe4e779d2004-04-16 23:02:29 +00002890void VG_(intercept_libc_freeres_wrapper)(Addr addr)
2891{
nethercotef971ab72004-08-02 16:27:40 +00002892 __libc_freeres_wrapper = addr;
rjwalshe4e779d2004-04-16 23:02:29 +00002893}
sewardj2cb00342002-06-28 01:46:26 +00002894
sewardje663cb92002-04-12 10:26:32 +00002895/* ---------------------------------------------------------------------
sewardj124ca2a2002-06-20 10:19:38 +00002896 Handle client requests.
sewardje663cb92002-04-12 10:26:32 +00002897 ------------------------------------------------------------------ */
2898
sewardj124ca2a2002-06-20 10:19:38 +00002899/* Do a client request for the thread tid. After the request, tid may
2900 or may not still be runnable; if not, the scheduler will have to
2901 choose a new thread to run.
2902*/
sewardje663cb92002-04-12 10:26:32 +00002903static
nethercoted1b64b22004-11-04 18:22:28 +00002904void do_client_request ( ThreadId tid, UWord* arg )
sewardje663cb92002-04-12 10:26:32 +00002905{
nethercoted1b64b22004-11-04 18:22:28 +00002906 UWord req_no = arg[0];
sewardj124ca2a2002-06-20 10:19:38 +00002907
fitzhardinge98abfc72003-12-16 02:05:15 +00002908 if (0)
nethercoted1b64b22004-11-04 18:22:28 +00002909 VG_(printf)("req no = 0x%llx, arg = %p\n", (ULong)req_no, arg);
sewardje663cb92002-04-12 10:26:32 +00002910 switch (req_no) {
2911
njn3e884182003-04-15 13:03:23 +00002912 case VG_USERREQ__CLIENT_CALL0: {
sewardj2a99cf62004-11-24 10:44:19 +00002913 UWord (*f)(ThreadId) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00002914 if (f == NULL)
nethercote66b3af62004-09-11 13:06:55 +00002915 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL0: func=%p\n", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00002916 else
sewardj2a99cf62004-11-24 10:44:19 +00002917 SET_CLCALL_RETVAL(tid, f ( tid ), (Addr)f);
njn3e884182003-04-15 13:03:23 +00002918 break;
2919 }
2920 case VG_USERREQ__CLIENT_CALL1: {
sewardj2a99cf62004-11-24 10:44:19 +00002921 UWord (*f)(ThreadId, UWord) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00002922 if (f == NULL)
nethercote66b3af62004-09-11 13:06:55 +00002923 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL1: func=%p\n", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00002924 else
sewardj2a99cf62004-11-24 10:44:19 +00002925 SET_CLCALL_RETVAL(tid, f ( tid, arg[2] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00002926 break;
2927 }
2928 case VG_USERREQ__CLIENT_CALL2: {
sewardj2a99cf62004-11-24 10:44:19 +00002929 UWord (*f)(ThreadId, UWord, UWord) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00002930 if (f == NULL)
nethercote66b3af62004-09-11 13:06:55 +00002931 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL2: func=%p\n", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00002932 else
sewardj2a99cf62004-11-24 10:44:19 +00002933 SET_CLCALL_RETVAL(tid, f ( tid, arg[2], arg[3] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00002934 break;
2935 }
2936 case VG_USERREQ__CLIENT_CALL3: {
sewardj2a99cf62004-11-24 10:44:19 +00002937 UWord (*f)(ThreadId, UWord, UWord, UWord) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00002938 if (f == NULL)
nethercote66b3af62004-09-11 13:06:55 +00002939 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL3: func=%p\n", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00002940 else
sewardj2a99cf62004-11-24 10:44:19 +00002941 SET_CLCALL_RETVAL(tid, f ( tid, arg[2], arg[3], arg[4] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00002942 break;
2943 }
2944
nethercote7cc9c232004-01-21 15:08:04 +00002945 /* Note: for tools that replace malloc() et al, we want to call
njn3e884182003-04-15 13:03:23 +00002946 the replacement versions. For those that don't, we want to call
njn26f02512004-11-22 18:33:15 +00002947 VG_(cli_malloc)() et al. We do this by calling TL_(malloc)(), which
nethercote3ced0e32004-01-26 14:50:45 +00002948 malloc-replacing tools must replace, but have the default definition
njn26f02512004-11-22 18:33:15 +00002949 of TL_(malloc)() call VG_(cli_malloc)(). */
njn3e884182003-04-15 13:03:23 +00002950
2951 /* Note: for MALLOC and FREE, must set the appropriate "lock"... see
njn26f02512004-11-22 18:33:15 +00002952 the comment in vg_defaults.c/TL_(malloc)() for why. */
sewardj124ca2a2002-06-20 10:19:38 +00002953 case VG_USERREQ__MALLOC:
njn3e884182003-04-15 13:03:23 +00002954 VG_(sk_malloc_called_by_scheduler) = True;
njnd3040452003-05-19 15:04:06 +00002955 SET_PTHREQ_RETVAL(
sewardj2a99cf62004-11-24 10:44:19 +00002956 tid, (Addr)TL_(malloc) ( tid, arg[1] )
sewardj124ca2a2002-06-20 10:19:38 +00002957 );
njn3e884182003-04-15 13:03:23 +00002958 VG_(sk_malloc_called_by_scheduler) = False;
sewardj124ca2a2002-06-20 10:19:38 +00002959 break;
2960
2961 case VG_USERREQ__FREE:
njn3e884182003-04-15 13:03:23 +00002962 VG_(sk_malloc_called_by_scheduler) = True;
sewardj2a99cf62004-11-24 10:44:19 +00002963 TL_(free) ( tid, (void*)arg[1] );
njn3e884182003-04-15 13:03:23 +00002964 VG_(sk_malloc_called_by_scheduler) = False;
njnd3040452003-05-19 15:04:06 +00002965 SET_PTHREQ_RETVAL(tid, 0); /* irrelevant */
sewardj124ca2a2002-06-20 10:19:38 +00002966 break;
2967
sewardj124ca2a2002-06-20 10:19:38 +00002968 case VG_USERREQ__PTHREAD_GET_THREADID:
njnd3040452003-05-19 15:04:06 +00002969 SET_PTHREQ_RETVAL(tid, tid);
sewardj124ca2a2002-06-20 10:19:38 +00002970 break;
2971
2972 case VG_USERREQ__RUNNING_ON_VALGRIND:
njnd3040452003-05-19 15:04:06 +00002973 SET_CLREQ_RETVAL(tid, 1);
sewardj124ca2a2002-06-20 10:19:38 +00002974 break;
2975
2976 case VG_USERREQ__GET_PTHREAD_TRACE_LEVEL:
njnd3040452003-05-19 15:04:06 +00002977 SET_PTHREQ_RETVAL(tid, VG_(clo_trace_pthread_level));
sewardj124ca2a2002-06-20 10:19:38 +00002978 break;
2979
2980 case VG_USERREQ__READ_MILLISECOND_TIMER:
njnd3040452003-05-19 15:04:06 +00002981 SET_PTHREQ_RETVAL(tid, VG_(read_millisecond_timer)());
sewardj124ca2a2002-06-20 10:19:38 +00002982 break;
2983
2984 /* Some of these may make thread tid non-runnable, but the
2985 scheduler checks for that on return from this function. */
2986 case VG_USERREQ__PTHREAD_MUTEX_LOCK:
thughese321d492004-10-17 15:00:20 +00002987 do_pthread_mutex_lock( tid, False, (void *)(arg[1]), 0xFFFFFFFF );
2988 break;
2989
2990 case VG_USERREQ__PTHREAD_MUTEX_TIMEDLOCK:
2991 do_pthread_mutex_lock( tid, False, (void *)(arg[1]), arg[2] );
sewardj124ca2a2002-06-20 10:19:38 +00002992 break;
2993
2994 case VG_USERREQ__PTHREAD_MUTEX_TRYLOCK:
thughese321d492004-10-17 15:00:20 +00002995 do_pthread_mutex_lock( tid, True, (void *)(arg[1]), 0xFFFFFFFF );
sewardj124ca2a2002-06-20 10:19:38 +00002996 break;
2997
2998 case VG_USERREQ__PTHREAD_MUTEX_UNLOCK:
2999 do_pthread_mutex_unlock( tid, (void *)(arg[1]) );
3000 break;
3001
sewardj00a66b12002-10-12 16:42:35 +00003002 case VG_USERREQ__PTHREAD_GETSPECIFIC_PTR:
3003 do_pthread_getspecific_ptr ( tid );
sewardj124ca2a2002-06-20 10:19:38 +00003004 break;
3005
3006 case VG_USERREQ__SET_CANCELTYPE:
3007 do__set_canceltype ( tid, arg[1] );
3008 break;
3009
3010 case VG_USERREQ__CLEANUP_PUSH:
3011 do__cleanup_push ( tid, (CleanupEntry*)(arg[1]) );
3012 break;
3013
3014 case VG_USERREQ__CLEANUP_POP:
3015 do__cleanup_pop ( tid, (CleanupEntry*)(arg[1]) );
3016 break;
3017
3018 case VG_USERREQ__TESTCANCEL:
3019 do__testcancel ( tid );
3020 break;
3021
sewardje663cb92002-04-12 10:26:32 +00003022 case VG_USERREQ__PTHREAD_JOIN:
3023 do_pthread_join( tid, arg[1], (void**)(arg[2]) );
3024 break;
3025
sewardj3b5d8862002-04-20 13:53:23 +00003026 case VG_USERREQ__PTHREAD_COND_WAIT:
3027 do_pthread_cond_wait( tid,
nethercote1f0173b2004-02-28 15:40:36 +00003028 (vg_pthread_cond_t *)(arg[1]),
3029 (vg_pthread_mutex_t *)(arg[2]),
sewardj5f07b662002-04-23 16:52:51 +00003030 0xFFFFFFFF /* no timeout */ );
3031 break;
3032
3033 case VG_USERREQ__PTHREAD_COND_TIMEDWAIT:
3034 do_pthread_cond_wait( tid,
nethercote1f0173b2004-02-28 15:40:36 +00003035 (vg_pthread_cond_t *)(arg[1]),
3036 (vg_pthread_mutex_t *)(arg[2]),
sewardj5f07b662002-04-23 16:52:51 +00003037 arg[3] /* timeout millisecond point */ );
sewardj3b5d8862002-04-20 13:53:23 +00003038 break;
3039
3040 case VG_USERREQ__PTHREAD_COND_SIGNAL:
3041 do_pthread_cond_signal_or_broadcast(
3042 tid,
3043 False, /* signal, not broadcast */
nethercote1f0173b2004-02-28 15:40:36 +00003044 (vg_pthread_cond_t *)(arg[1]) );
sewardj3b5d8862002-04-20 13:53:23 +00003045 break;
3046
3047 case VG_USERREQ__PTHREAD_COND_BROADCAST:
3048 do_pthread_cond_signal_or_broadcast(
3049 tid,
3050 True, /* broadcast, not signal */
nethercote1f0173b2004-02-28 15:40:36 +00003051 (vg_pthread_cond_t *)(arg[1]) );
sewardj3b5d8862002-04-20 13:53:23 +00003052 break;
3053
sewardj00a66b12002-10-12 16:42:35 +00003054 case VG_USERREQ__PTHREAD_KEY_VALIDATE:
3055 do_pthread_key_validate ( tid,
3056 (pthread_key_t)(arg[1]) );
3057 break;
3058
sewardj5f07b662002-04-23 16:52:51 +00003059 case VG_USERREQ__PTHREAD_KEY_CREATE:
3060 do_pthread_key_create ( tid,
3061 (pthread_key_t*)(arg[1]),
3062 (void(*)(void*))(arg[2]) );
3063 break;
3064
3065 case VG_USERREQ__PTHREAD_KEY_DELETE:
3066 do_pthread_key_delete ( tid,
3067 (pthread_key_t)(arg[1]) );
3068 break;
3069
sewardj00a66b12002-10-12 16:42:35 +00003070 case VG_USERREQ__PTHREAD_SETSPECIFIC_PTR:
3071 do_pthread_setspecific_ptr ( tid,
3072 (void**)(arg[1]) );
sewardj5f07b662002-04-23 16:52:51 +00003073 break;
3074
sewardjb48e5002002-05-13 00:16:03 +00003075 case VG_USERREQ__PTHREAD_SIGMASK:
3076 do_pthread_sigmask ( tid,
3077 arg[1],
nethercote73b526f2004-10-31 18:48:21 +00003078 (vki_sigset_t*)(arg[2]),
3079 (vki_sigset_t*)(arg[3]) );
sewardjb48e5002002-05-13 00:16:03 +00003080 break;
3081
sewardj018f7622002-05-15 21:13:39 +00003082 case VG_USERREQ__PTHREAD_KILL:
3083 do_pthread_kill ( tid, arg[1], arg[2] );
3084 break;
3085
sewardjff42d1d2002-05-22 13:17:31 +00003086 case VG_USERREQ__PTHREAD_YIELD:
3087 do_pthread_yield ( tid );
sewardj18a62ff2002-07-12 22:30:51 +00003088 /* On return from do_client_request(), the scheduler will
3089 select a new thread to run. */
sewardjff42d1d2002-05-22 13:17:31 +00003090 break;
sewardj018f7622002-05-15 21:13:39 +00003091
sewardj7989d0c2002-05-28 11:00:01 +00003092 case VG_USERREQ__SET_CANCELSTATE:
3093 do__set_cancelstate ( tid, arg[1] );
3094 break;
3095
sewardj7989d0c2002-05-28 11:00:01 +00003096 case VG_USERREQ__SET_OR_GET_DETACH:
3097 do__set_or_get_detach ( tid, arg[1], arg[2] );
3098 break;
3099
3100 case VG_USERREQ__SET_CANCELPEND:
3101 do__set_cancelpend ( tid, arg[1], (void(*)(void*))arg[2] );
3102 break;
3103
3104 case VG_USERREQ__WAIT_JOINER:
3105 do__wait_joiner ( tid, (void*)arg[1] );
3106 break;
3107
3108 case VG_USERREQ__QUIT:
3109 do__quit ( tid );
3110 break;
3111
3112 case VG_USERREQ__APPLY_IN_NEW_THREAD:
3113 do__apply_in_new_thread ( tid, (void*(*)(void*))arg[1],
thughesdaa34562004-06-27 12:48:53 +00003114 (void*)arg[2], (StackInfo*)(arg[3]) );
sewardj7989d0c2002-05-28 11:00:01 +00003115 break;
3116
sewardj870497a2002-05-29 01:06:47 +00003117 case VG_USERREQ__GET_KEY_D_AND_S:
3118 do__get_key_destr_and_spec ( tid,
3119 (pthread_key_t)arg[1],
3120 (CleanupEntry*)arg[2] );
3121 break;
3122
sewardjef037c72002-05-30 00:40:03 +00003123 case VG_USERREQ__NUKE_OTHER_THREADS:
3124 VG_(nuke_all_threads_except) ( tid );
njnd3040452003-05-19 15:04:06 +00003125 SET_PTHREQ_RETVAL(tid, 0);
sewardjef037c72002-05-30 00:40:03 +00003126 break;
3127
sewardj4dced352002-06-04 22:54:20 +00003128 case VG_USERREQ__PTHREAD_ERROR:
njn25e49d8e72002-09-23 09:36:25 +00003129 VG_(record_pthread_error)( tid, (Char*)(arg[1]) );
njnd3040452003-05-19 15:04:06 +00003130 SET_PTHREQ_RETVAL(tid, 0);
sewardj4dced352002-06-04 22:54:20 +00003131 break;
3132
sewardj2cb00342002-06-28 01:46:26 +00003133 case VG_USERREQ__SET_FHSTACK_USED:
3134 do__set_fhstack_used( tid, (Int)(arg[1]) );
3135 break;
3136
3137 case VG_USERREQ__GET_FHSTACK_USED:
3138 do__get_fhstack_used( tid );
3139 break;
3140
3141 case VG_USERREQ__SET_FHSTACK_ENTRY:
3142 do__set_fhstack_entry( tid, (Int)(arg[1]),
3143 (ForkHandlerEntry*)(arg[2]) );
3144 break;
3145
3146 case VG_USERREQ__GET_FHSTACK_ENTRY:
3147 do__get_fhstack_entry( tid, (Int)(arg[1]),
3148 (ForkHandlerEntry*)(arg[2]) );
3149 break;
3150
sewardj77e466c2002-04-14 02:29:29 +00003151 case VG_USERREQ__SIGNAL_RETURNS:
3152 handle_signal_return(tid);
3153 break;
fitzhardinge98abfc72003-12-16 02:05:15 +00003154
thughesdaa34562004-06-27 12:48:53 +00003155 case VG_USERREQ__GET_STACK_INFO:
3156 do__get_stack_info( tid, (Int)(arg[1]), (StackInfo*)(arg[2]) );
3157 break;
3158
fitzhardinge98abfc72003-12-16 02:05:15 +00003159
3160 case VG_USERREQ__GET_SIGRT_MIN:
3161 SET_PTHREQ_RETVAL(tid, VG_(sig_rtmin));
3162 break;
3163
3164 case VG_USERREQ__GET_SIGRT_MAX:
3165 SET_PTHREQ_RETVAL(tid, VG_(sig_rtmax));
3166 break;
3167
3168 case VG_USERREQ__ALLOC_RTSIG:
3169 SET_PTHREQ_RETVAL(tid, VG_(sig_alloc_rtsig)((Int)arg[1]));
3170 break;
3171
fitzhardinge39de4b42003-10-31 07:12:21 +00003172 case VG_USERREQ__PRINTF: {
3173 int count =
nethercote3e901a22004-09-11 13:17:02 +00003174 VG_(vmessage)( Vg_ClientMsg, (char *)arg[1], (void*)arg[2] );
fitzhardinge39de4b42003-10-31 07:12:21 +00003175 SET_CLREQ_RETVAL( tid, count );
3176 break; }
3177
fitzhardinge98abfc72003-12-16 02:05:15 +00003178
fitzhardinge39de4b42003-10-31 07:12:21 +00003179 case VG_USERREQ__INTERNAL_PRINTF: {
3180 int count =
nethercote3e901a22004-09-11 13:17:02 +00003181 VG_(vmessage)( Vg_UserMsg, (char *)arg[1], (void*)arg[2] );
fitzhardinge39de4b42003-10-31 07:12:21 +00003182 SET_CLREQ_RETVAL( tid, count );
3183 break; }
3184
3185 case VG_USERREQ__PRINTF_BACKTRACE: {
3186 ExeContext *e = VG_(get_ExeContext)( tid );
3187 int count =
nethercote3e901a22004-09-11 13:17:02 +00003188 VG_(vmessage)( Vg_ClientMsg, (char *)arg[1], (void*)arg[2] );
nethercote86c5dcb2004-09-05 21:32:37 +00003189 VG_(mini_stack_dump)(e->ips, VG_(clo_backtrace_size));
fitzhardinge39de4b42003-10-31 07:12:21 +00003190 SET_CLREQ_RETVAL( tid, count );
3191 break; }
3192
3193 case VG_USERREQ__INTERNAL_PRINTF_BACKTRACE: {
3194 ExeContext *e = VG_(get_ExeContext)( tid );
3195 int count =
nethercote3e901a22004-09-11 13:17:02 +00003196 VG_(vmessage)( Vg_UserMsg, (char *)arg[1], (void*)arg[2] );
nethercote86c5dcb2004-09-05 21:32:37 +00003197 VG_(mini_stack_dump)(e->ips, VG_(clo_backtrace_size));
fitzhardinge39de4b42003-10-31 07:12:21 +00003198 SET_CLREQ_RETVAL( tid, count );
3199 break; }
3200
fitzhardinge98abfc72003-12-16 02:05:15 +00003201 case VG_USERREQ__GET_MALLOCFUNCS: {
3202 struct vg_mallocfunc_info *info = (struct vg_mallocfunc_info *)arg[1];
3203
njn26f02512004-11-22 18:33:15 +00003204 info->sk_malloc = (Addr)TL_(malloc);
3205 info->sk_calloc = (Addr)TL_(calloc);
3206 info->sk_realloc = (Addr)TL_(realloc);
3207 info->sk_memalign = (Addr)TL_(memalign);
3208 info->sk___builtin_new = (Addr)TL_(__builtin_new);
3209 info->sk___builtin_vec_new = (Addr)TL_(__builtin_vec_new);
3210 info->sk_free = (Addr)TL_(free);
3211 info->sk___builtin_delete = (Addr)TL_(__builtin_delete);
3212 info->sk___builtin_vec_delete = (Addr)TL_(__builtin_vec_delete);
fitzhardinge98abfc72003-12-16 02:05:15 +00003213
3214 info->arena_payload_szB = (Addr)VG_(arena_payload_szB);
3215
3216 info->clo_sloppy_malloc = VG_(clo_sloppy_malloc);
3217 info->clo_trace_malloc = VG_(clo_trace_malloc);
3218
3219 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
3220
3221 break;
3222 }
3223
njn25e49d8e72002-09-23 09:36:25 +00003224 /* Requests from the client program */
3225
3226 case VG_USERREQ__DISCARD_TRANSLATIONS:
3227 if (VG_(clo_verbosity) > 2)
3228 VG_(printf)( "client request: DISCARD_TRANSLATIONS,"
3229 " addr %p, len %d\n",
3230 (void*)arg[1], arg[2] );
3231
sewardj97ad5522003-05-04 12:32:56 +00003232 VG_(invalidate_translations)( arg[1], arg[2], True );
njn25e49d8e72002-09-23 09:36:25 +00003233
njnd3040452003-05-19 15:04:06 +00003234 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
njn25e49d8e72002-09-23 09:36:25 +00003235 break;
3236
njn47363ab2003-04-21 13:24:40 +00003237 case VG_USERREQ__COUNT_ERRORS:
nethercotef2b11482004-08-02 12:36:01 +00003238 SET_CLREQ_RETVAL( tid, VG_(get_n_errs_found)() );
njn47363ab2003-04-21 13:24:40 +00003239 break;
3240
sewardje663cb92002-04-12 10:26:32 +00003241 default:
njn25e49d8e72002-09-23 09:36:25 +00003242 if (VG_(needs).client_requests) {
nethercoted1b64b22004-11-04 18:22:28 +00003243 UWord ret;
sewardj34042512002-10-22 04:14:35 +00003244
njn25e49d8e72002-09-23 09:36:25 +00003245 if (VG_(clo_verbosity) > 2)
fitzhardinge98abfc72003-12-16 02:05:15 +00003246 VG_(printf)("client request: code %x, addr %p, len %d\n",
njn25e49d8e72002-09-23 09:36:25 +00003247 arg[0], (void*)arg[1], arg[2] );
3248
njn26f02512004-11-22 18:33:15 +00003249 if (TL_(handle_client_request) ( tid, arg, &ret ))
njnd3040452003-05-19 15:04:06 +00003250 SET_CLREQ_RETVAL(tid, ret);
njn25e49d8e72002-09-23 09:36:25 +00003251 } else {
sewardj34042512002-10-22 04:14:35 +00003252 static Bool whined = False;
3253
3254 if (!whined) {
nethercote7cc9c232004-01-21 15:08:04 +00003255 // Allow for requests in core, but defined by tools, which
njnd7994182003-10-02 13:44:04 +00003256 // have 0 and 0 in their two high bytes.
3257 Char c1 = (arg[0] >> 24) & 0xff;
3258 Char c2 = (arg[0] >> 16) & 0xff;
3259 if (c1 == 0) c1 = '_';
3260 if (c2 == 0) c2 = '_';
sewardj34042512002-10-22 04:14:35 +00003261 VG_(message)(Vg_UserMsg, "Warning:\n"
njnd7994182003-10-02 13:44:04 +00003262 " unhandled client request: 0x%x (%c%c+0x%x). Perhaps\n"
3263 " VG_(needs).client_requests should be set?\n",
3264 arg[0], c1, c2, arg[0] & 0xffff);
sewardj34042512002-10-22 04:14:35 +00003265 whined = True;
3266 }
njn25e49d8e72002-09-23 09:36:25 +00003267 }
sewardje663cb92002-04-12 10:26:32 +00003268 break;
3269 }
3270}
3271
3272
sewardj6072c362002-04-19 14:40:57 +00003273/* ---------------------------------------------------------------------
3274 Sanity checking.
3275 ------------------------------------------------------------------ */
3276
3277/* Internal consistency checks on the sched/pthread structures. */
3278static
3279void scheduler_sanity ( void )
3280{
nethercote1f0173b2004-02-28 15:40:36 +00003281 vg_pthread_mutex_t* mx;
3282 vg_pthread_cond_t* cv;
sewardj6072c362002-04-19 14:40:57 +00003283 Int i;
jsgf855d93d2003-10-13 22:26:55 +00003284 struct timeout* top;
3285 UInt lasttime = 0;
3286
3287 for(top = timeouts; top != NULL; top = top->next) {
3288 vg_assert(top->time >= lasttime);
nethercote36881a22004-08-04 14:03:16 +00003289 vg_assert(is_valid_or_empty_tid(top->tid));
jsgf855d93d2003-10-13 22:26:55 +00003290
3291#if 0
3292 /* assert timeout entry is either stale, or associated with a
3293 thread in the right state
3294
3295 XXX disable for now - can be stale, but times happen to match
3296 */
3297 vg_assert(VG_(threads)[top->tid].awaken_at != top->time ||
3298 VG_(threads)[top->tid].status == VgTs_Sleeping ||
thughese321d492004-10-17 15:00:20 +00003299 VG_(threads)[top->tid].status == VgTs_WaitMX ||
jsgf855d93d2003-10-13 22:26:55 +00003300 VG_(threads)[top->tid].status == VgTs_WaitCV);
3301#endif
3302
3303 lasttime = top->time;
3304 }
sewardj5f07b662002-04-23 16:52:51 +00003305
sewardj6072c362002-04-19 14:40:57 +00003306 /* VG_(printf)("scheduler_sanity\n"); */
3307 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +00003308 mx = VG_(threads)[i].associated_mx;
3309 cv = VG_(threads)[i].associated_cv;
3310 if (VG_(threads)[i].status == VgTs_WaitMX) {
sewardjbf290b92002-05-01 02:28:01 +00003311 /* If we're waiting on a MX: (1) the mx is not null, (2, 3)
3312 it's actually held by someone, since otherwise this thread
3313 is deadlocked, (4) the mutex's owner is not us, since
3314 otherwise this thread is also deadlocked. The logic in
3315 do_pthread_mutex_lock rejects attempts by a thread to lock
3316 a (non-recursive) mutex which it already owns.
sewardj05553872002-04-20 20:53:17 +00003317
sewardjbf290b92002-05-01 02:28:01 +00003318 (2) has been seen to fail sometimes. I don't know why.
3319 Possibly to do with signals. */
sewardj3b5d8862002-04-20 13:53:23 +00003320 vg_assert(cv == NULL);
sewardj05553872002-04-20 20:53:17 +00003321 /* 1 */ vg_assert(mx != NULL);
nethercote1f0173b2004-02-28 15:40:36 +00003322 /* 2 */ vg_assert(mx->__vg_m_count > 0);
3323 /* 3 */ vg_assert(VG_(is_valid_tid)((ThreadId)mx->__vg_m_owner));
thughese321d492004-10-17 15:00:20 +00003324 /* 4 */ vg_assert((UInt)i != (ThreadId)mx->__vg_m_owner ||
3325 VG_(threads)[i].awaken_at != 0xFFFFFFFF);
sewardj3b5d8862002-04-20 13:53:23 +00003326 } else
sewardj018f7622002-05-15 21:13:39 +00003327 if (VG_(threads)[i].status == VgTs_WaitCV) {
sewardj3b5d8862002-04-20 13:53:23 +00003328 vg_assert(cv != NULL);
3329 vg_assert(mx != NULL);
sewardj6072c362002-04-19 14:40:57 +00003330 } else {
thughesf7269232004-10-16 16:17:06 +00003331 vg_assert(cv == NULL);
3332 vg_assert(mx == NULL);
sewardj6072c362002-04-19 14:40:57 +00003333 }
sewardjbf290b92002-05-01 02:28:01 +00003334
sewardj018f7622002-05-15 21:13:39 +00003335 if (VG_(threads)[i].status != VgTs_Empty) {
sewardjbf290b92002-05-01 02:28:01 +00003336 Int
sewardj018f7622002-05-15 21:13:39 +00003337 stack_used = (Addr)VG_(threads)[i].stack_highest_word
njncf45fd42004-11-24 16:30:22 +00003338 - (Addr)STACK_PTR(VG_(threads)[i].arch);
thughesdaa34562004-06-27 12:48:53 +00003339 Int
3340 stack_avail = VG_(threads)[i].stack_size
3341 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB
3342 - VG_(threads)[i].stack_guard_size;
fitzhardinge98c4dc02004-03-16 08:27:29 +00003343 /* This test is a bit bogus - it doesn't take into account
3344 alternate signal stacks, for a start. Also, if a thread
3345 has it's stack pointer somewhere strange, killing Valgrind
3346 isn't the right answer. */
3347 if (0 && i > 1 /* not the root thread */
thughesdaa34562004-06-27 12:48:53 +00003348 && stack_used >= stack_avail) {
sewardjbf290b92002-05-01 02:28:01 +00003349 VG_(message)(Vg_UserMsg,
njn25e49d8e72002-09-23 09:36:25 +00003350 "Error: STACK OVERFLOW: "
sewardjbf290b92002-05-01 02:28:01 +00003351 "thread %d: stack used %d, available %d",
thughesdaa34562004-06-27 12:48:53 +00003352 i, stack_used, stack_avail );
sewardjbf290b92002-05-01 02:28:01 +00003353 VG_(message)(Vg_UserMsg,
3354 "Terminating Valgrind. If thread(s) "
3355 "really need more stack, increase");
3356 VG_(message)(Vg_UserMsg,
rjwalsh7109a8c2004-09-02 00:31:02 +00003357 "VG_PTHREAD_STACK_SIZE in core.h and recompile.");
sewardjbf290b92002-05-01 02:28:01 +00003358 VG_(exit)(1);
3359 }
3360 }
sewardj6072c362002-04-19 14:40:57 +00003361 }
sewardj5f07b662002-04-23 16:52:51 +00003362
3363 for (i = 0; i < VG_N_THREAD_KEYS; i++) {
3364 if (!vg_thread_keys[i].inuse)
3365 vg_assert(vg_thread_keys[i].destructor == NULL);
3366 }
sewardj6072c362002-04-19 14:40:57 +00003367}
3368
3369
sewardje663cb92002-04-12 10:26:32 +00003370/*--------------------------------------------------------------------*/
3371/*--- end vg_scheduler.c ---*/
3372/*--------------------------------------------------------------------*/