blob: 4a0ec686c69dcc66feaa13dbe2245073116db6a2 [file] [log] [blame]
sewardje663cb92002-04-12 10:26:32 +00001
2/*--------------------------------------------------------------------*/
3/*--- A user-space pthreads implementation. vg_scheduler.c ---*/
4/*--------------------------------------------------------------------*/
5
6/*
njnc9539842002-10-02 13:26:35 +00007 This file is part of Valgrind, an extensible x86 protected-mode
8 emulator for monitoring program execution on x86-Unixes.
sewardje663cb92002-04-12 10:26:32 +00009
nethercotebb1c9912004-01-04 16:43:23 +000010 Copyright (C) 2000-2004 Julian Seward
sewardje663cb92002-04-12 10:26:32 +000011 jseward@acm.org
sewardje663cb92002-04-12 10:26:32 +000012
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
njn25e49d8e72002-09-23 09:36:25 +000028 The GNU General Public License is contained in the file COPYING.
sewardje663cb92002-04-12 10:26:32 +000029*/
30
njn25e49d8e72002-09-23 09:36:25 +000031#include "valgrind.h" /* for VG_USERREQ__RUNNING_ON_VALGRIND and
njn47363ab2003-04-21 13:24:40 +000032 VG_USERREQ__DISCARD_TRANSLATIONS, and others */
nethercote851b0f62003-11-13 23:02:16 +000033#include "vg_include.h"
sewardje663cb92002-04-12 10:26:32 +000034
sewardjb60c1ad2002-05-29 20:23:26 +000035/* BORKAGE/ISSUES as of 29 May 02
sewardje663cb92002-04-12 10:26:32 +000036
sewardj77e466c2002-04-14 02:29:29 +000037- Currently, when a signal is run, just the ThreadStatus.status fields
38 are saved in the signal frame, along with the CPU state. Question:
39 should I also save and restore:
40 ThreadStatus.joiner
41 ThreadStatus.waited_on_mid
42 ThreadStatus.awaken_at
43 ThreadStatus.retval
44 Currently unsure, and so am not doing so.
sewardje663cb92002-04-12 10:26:32 +000045
sewardj705d3cb2002-05-23 13:13:12 +000046- So, what's the deal with signals and mutexes? If a thread is
sewardj6072c362002-04-19 14:40:57 +000047 blocked on a mutex, or for a condition variable for that matter, can
48 signals still be delivered to it? This has serious consequences --
49 deadlocks, etc.
50
sewardjb60c1ad2002-05-29 20:23:26 +000051 TODO for valgrind-1.0:
52
sewardj055fbb82002-05-30 00:40:55 +000053- Update assertion checking in scheduler_sanity().
54
sewardjb60c1ad2002-05-29 20:23:26 +000055 TODO sometime:
56
57- Mutex scrubbing - clearup_after_thread_exit: look for threads
58 blocked on mutexes held by the exiting thread, and release them
59 appropriately. (??)
60
sewardje462e202002-04-13 04:09:07 +000061*/
sewardje663cb92002-04-12 10:26:32 +000062
63
64/* ---------------------------------------------------------------------
65 Types and globals for the scheduler.
66 ------------------------------------------------------------------ */
67
68/* type ThreadId is defined in vg_include.h. */
69
70/* struct ThreadState is defined in vg_include.h. */
71
sewardj018f7622002-05-15 21:13:39 +000072/* Globals. A statically allocated array of threads. NOTE: [0] is
73 never used, to simplify the simulation of initialisers for
sewardj6072c362002-04-19 14:40:57 +000074 LinuxThreads. */
sewardj018f7622002-05-15 21:13:39 +000075ThreadState VG_(threads)[VG_N_THREADS];
sewardje663cb92002-04-12 10:26:32 +000076
sewardj2cb00342002-06-28 01:46:26 +000077/* The process' fork-handler stack. */
78static Int vg_fhstack_used = 0;
79static ForkHandlerEntry vg_fhstack[VG_N_FORKHANDLERSTACK];
80
81
sewardj1e8cdc92002-04-18 11:37:52 +000082/* The tid of the thread currently in VG_(baseBlock). */
njn1be61612003-05-14 14:04:39 +000083static ThreadId vg_tid_currently_in_baseBlock = VG_INVALID_THREADID;
sewardj1e8cdc92002-04-18 11:37:52 +000084
sewardjb52a1b02002-10-23 21:38:22 +000085/* The tid either currently in baseBlock, or was in baseBlock before
86 was saved it out; this is only updated when a new thread is loaded
87 into the baseBlock */
njn1be61612003-05-14 14:04:39 +000088static ThreadId vg_tid_last_in_baseBlock = VG_INVALID_THREADID;
sewardje663cb92002-04-12 10:26:32 +000089
90/* vg_oursignalhandler() might longjmp(). Here's the jmp_buf. */
91jmp_buf VG_(scheduler_jmpbuf);
sewardj872051c2002-07-13 12:12:56 +000092/* This says whether scheduler_jmpbuf is actually valid. Needed so
93 that our signal handler doesn't longjmp when the buffer isn't
94 actually valid. */
95Bool VG_(scheduler_jmpbuf_valid) = False;
sewardje663cb92002-04-12 10:26:32 +000096/* ... and if so, here's the signal which caused it to do so. */
97Int VG_(longjmpd_on_signal);
jsgf855d93d2003-10-13 22:26:55 +000098/* If the current thread gets a syncronous unresumable signal, then
99 its details are placed here by the signal handler, to be passed to
100 the applications signal handler later on. */
101vki_ksiginfo_t VG_(unresumable_siginfo);
sewardje663cb92002-04-12 10:26:32 +0000102
jsgf855d93d2003-10-13 22:26:55 +0000103/* If != VG_INVALID_THREADID, this is the preferred tid to schedule */
104static ThreadId prefer_sched = VG_INVALID_THREADID;
sewardje663cb92002-04-12 10:26:32 +0000105
sewardj5f07b662002-04-23 16:52:51 +0000106/* Keeping track of keys. */
107typedef
108 struct {
109 /* Has this key been allocated ? */
110 Bool inuse;
111 /* If .inuse==True, records the address of the associated
112 destructor, or NULL if none. */
113 void (*destructor)(void*);
114 }
115 ThreadKeyState;
116
117/* And our array of thread keys. */
118static ThreadKeyState vg_thread_keys[VG_N_THREAD_KEYS];
119
120typedef UInt ThreadKey;
121
fitzhardinge98abfc72003-12-16 02:05:15 +0000122/* The scheduler does need to know the address of it so it can be
123 called at program exit. */
124static Addr VG_(__libc_freeres_wrapper);
125
sewardj5f07b662002-04-23 16:52:51 +0000126
njnd3040452003-05-19 15:04:06 +0000127UInt VG_(syscall_altered_shadow_reg);
128UInt VG_(signal_delivery_altered_shadow_reg);
129UInt VG_(pthread_op_altered_shadow_reg);
130UInt VG_(client_request_altered_shadow_reg);
njn25e49d8e72002-09-23 09:36:25 +0000131
sewardje663cb92002-04-12 10:26:32 +0000132/* Forwards */
sewardj124ca2a2002-06-20 10:19:38 +0000133static void do_client_request ( ThreadId tid );
sewardj6072c362002-04-19 14:40:57 +0000134static void scheduler_sanity ( void );
sewardj124ca2a2002-06-20 10:19:38 +0000135static void do_pthread_cond_timedwait_TIMEOUT ( ThreadId tid );
sewardjd140e442002-05-29 01:21:19 +0000136
sewardje663cb92002-04-12 10:26:32 +0000137/* ---------------------------------------------------------------------
138 Helper functions for the scheduler.
139 ------------------------------------------------------------------ */
140
sewardjb48e5002002-05-13 00:16:03 +0000141__inline__
142Bool VG_(is_valid_tid) ( ThreadId tid )
sewardj604ec3c2002-04-18 22:38:41 +0000143{
144 /* tid is unsigned, hence no < 0 test. */
sewardj6072c362002-04-19 14:40:57 +0000145 if (tid == 0) return False;
sewardj604ec3c2002-04-18 22:38:41 +0000146 if (tid >= VG_N_THREADS) return False;
sewardj018f7622002-05-15 21:13:39 +0000147 if (VG_(threads)[tid].status == VgTs_Empty) return False;
148 return True;
149}
150
151
152__inline__
153Bool VG_(is_valid_or_empty_tid) ( ThreadId tid )
154{
155 /* tid is unsigned, hence no < 0 test. */
156 if (tid == 0) return False;
157 if (tid >= VG_N_THREADS) return False;
sewardj604ec3c2002-04-18 22:38:41 +0000158 return True;
159}
160
161
sewardj1e8cdc92002-04-18 11:37:52 +0000162/* For constructing error messages only: try and identify a thread
njn25e49d8e72002-09-23 09:36:25 +0000163 whose stack satisfies the predicate p, or return VG_INVALID_THREADID
164 if none do. A small complication is dealing with any currently
165 VG_(baseBlock)-resident thread.
sewardj1e8cdc92002-04-18 11:37:52 +0000166*/
njn43c799e2003-04-08 00:08:52 +0000167ThreadId VG_(first_matching_thread_stack)
njn25e49d8e72002-09-23 09:36:25 +0000168 ( Bool (*p) ( Addr stack_min, Addr stack_max ))
sewardj1e8cdc92002-04-18 11:37:52 +0000169{
170 ThreadId tid, tid_to_skip;
171
172 tid_to_skip = VG_INVALID_THREADID;
173
174 /* First check to see if there's a currently-loaded thread in
175 VG_(baseBlock). */
176 if (vg_tid_currently_in_baseBlock != VG_INVALID_THREADID) {
177 tid = vg_tid_currently_in_baseBlock;
njn25e49d8e72002-09-23 09:36:25 +0000178 if ( p ( VG_(baseBlock)[VGOFF_(m_esp)],
179 VG_(threads)[tid].stack_highest_word) )
sewardj1e8cdc92002-04-18 11:37:52 +0000180 return tid;
181 else
182 tid_to_skip = tid;
183 }
184
sewardj6072c362002-04-19 14:40:57 +0000185 for (tid = 1; tid < VG_N_THREADS; tid++) {
sewardj018f7622002-05-15 21:13:39 +0000186 if (VG_(threads)[tid].status == VgTs_Empty) continue;
sewardj1e8cdc92002-04-18 11:37:52 +0000187 if (tid == tid_to_skip) continue;
njn25e49d8e72002-09-23 09:36:25 +0000188 if ( p ( VG_(threads)[tid].m_esp,
189 VG_(threads)[tid].stack_highest_word) )
sewardj1e8cdc92002-04-18 11:37:52 +0000190 return tid;
191 }
192 return VG_INVALID_THREADID;
193}
194
195
sewardj15a43e12002-04-17 19:35:12 +0000196/* Print the scheduler status. */
197void VG_(pp_sched_status) ( void )
sewardje663cb92002-04-12 10:26:32 +0000198{
199 Int i;
200 VG_(printf)("\nsched status:\n");
sewardj6072c362002-04-19 14:40:57 +0000201 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +0000202 if (VG_(threads)[i].status == VgTs_Empty) continue;
sewardj15a43e12002-04-17 19:35:12 +0000203 VG_(printf)("\nThread %d: status = ", i);
sewardj018f7622002-05-15 21:13:39 +0000204 switch (VG_(threads)[i].status) {
sewardj6072c362002-04-19 14:40:57 +0000205 case VgTs_Runnable: VG_(printf)("Runnable"); break;
sewardj20917d82002-05-28 01:36:45 +0000206 case VgTs_WaitJoinee: VG_(printf)("WaitJoinee(%d)",
207 VG_(threads)[i].joiner_jee_tid);
208 break;
209 case VgTs_WaitJoiner: VG_(printf)("WaitJoiner"); break;
sewardj6072c362002-04-19 14:40:57 +0000210 case VgTs_Sleeping: VG_(printf)("Sleeping"); break;
211 case VgTs_WaitMX: VG_(printf)("WaitMX"); break;
sewardj3b5d8862002-04-20 13:53:23 +0000212 case VgTs_WaitCV: VG_(printf)("WaitCV"); break;
jsgf855d93d2003-10-13 22:26:55 +0000213 case VgTs_WaitSys: VG_(printf)("WaitSys"); break;
sewardje663cb92002-04-12 10:26:32 +0000214 default: VG_(printf)("???"); break;
215 }
sewardj3b5d8862002-04-20 13:53:23 +0000216 VG_(printf)(", associated_mx = %p, associated_cv = %p\n",
sewardj018f7622002-05-15 21:13:39 +0000217 VG_(threads)[i].associated_mx,
218 VG_(threads)[i].associated_cv );
sewardj15a43e12002-04-17 19:35:12 +0000219 VG_(pp_ExeContext)(
njn25e49d8e72002-09-23 09:36:25 +0000220 VG_(get_ExeContext2)( VG_(threads)[i].m_eip, VG_(threads)[i].m_ebp,
221 VG_(threads)[i].m_esp,
222 VG_(threads)[i].stack_highest_word)
223 );
sewardje663cb92002-04-12 10:26:32 +0000224 }
225 VG_(printf)("\n");
226}
227
sewardje663cb92002-04-12 10:26:32 +0000228
229
230static
231void print_sched_event ( ThreadId tid, Char* what )
232{
sewardj45b4b372002-04-16 22:50:32 +0000233 VG_(message)(Vg_DebugMsg, " SCHED[%d]: %s", tid, what );
sewardj8937c812002-04-12 20:12:20 +0000234}
235
236
237static
238void print_pthread_event ( ThreadId tid, Char* what )
239{
240 VG_(message)(Vg_DebugMsg, "PTHREAD[%d]: %s", tid, what );
sewardje663cb92002-04-12 10:26:32 +0000241}
242
243
244static
245Char* name_of_sched_event ( UInt event )
246{
247 switch (event) {
sewardje663cb92002-04-12 10:26:32 +0000248 case VG_TRC_EBP_JMP_SYSCALL: return "SYSCALL";
249 case VG_TRC_EBP_JMP_CLIENTREQ: return "CLIENTREQ";
fitzhardingea02f8812003-12-18 09:06:09 +0000250 case VG_TRC_EBP_JMP_YIELD: return "YIELD";
sewardje663cb92002-04-12 10:26:32 +0000251 case VG_TRC_INNER_COUNTERZERO: return "COUNTERZERO";
252 case VG_TRC_INNER_FASTMISS: return "FASTMISS";
253 case VG_TRC_UNRESUMABLE_SIGNAL: return "FATALSIGNAL";
254 default: return "??UNKNOWN??";
255 }
256}
257
258
259/* Create a translation of the client basic block beginning at
260 orig_addr, and add it to the translation cache & translation table.
261 This probably doesn't really belong here, but, hey ...
262*/
sewardj1e8cdc92002-04-18 11:37:52 +0000263static
264void create_translation_for ( ThreadId tid, Addr orig_addr )
sewardje663cb92002-04-12 10:26:32 +0000265{
sewardj22854b92002-11-30 14:00:47 +0000266 Addr trans_addr;
267 Int orig_size, trans_size;
268 UShort jumps[VG_MAX_JUMPS];
269 Int i;
270
271 for(i = 0; i < VG_MAX_JUMPS; i++)
272 jumps[i] = (UShort)-1;
sewardj6c3769f2002-11-29 01:02:45 +0000273
274 /* Make a translation, into temporary storage. */
njn72718642003-07-24 08:45:32 +0000275 VG_(translate)( tid, orig_addr, /* in */
276 &orig_size, &trans_addr, &trans_size, jumps ); /* out */
sewardj6c3769f2002-11-29 01:02:45 +0000277
278 /* Copy data at trans_addr into the translation cache. */
sewardje663cb92002-04-12 10:26:32 +0000279 /* Since the .orig_size and .trans_size fields are
280 UShort, be paranoid. */
281 vg_assert(orig_size > 0 && orig_size < 65536);
282 vg_assert(trans_size > 0 && trans_size < 65536);
sewardj6c3769f2002-11-29 01:02:45 +0000283
sewardj22854b92002-11-30 14:00:47 +0000284 VG_(add_to_trans_tab)( orig_addr, orig_size, trans_addr, trans_size, jumps );
sewardj6c3769f2002-11-29 01:02:45 +0000285
sewardje663cb92002-04-12 10:26:32 +0000286 /* Free the intermediary -- was allocated by VG_(emit_code). */
njn25e49d8e72002-09-23 09:36:25 +0000287 VG_(arena_free)( VG_AR_JITTER, (void*)trans_addr );
sewardje663cb92002-04-12 10:26:32 +0000288}
289
290
291/* Allocate a completely empty ThreadState record. */
292static
293ThreadId vg_alloc_ThreadState ( void )
294{
295 Int i;
sewardj6072c362002-04-19 14:40:57 +0000296 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +0000297 if (VG_(threads)[i].status == VgTs_Empty)
sewardje663cb92002-04-12 10:26:32 +0000298 return i;
299 }
300 VG_(printf)("vg_alloc_ThreadState: no free slots available\n");
301 VG_(printf)("Increase VG_N_THREADS, rebuild and try again.\n");
njne427a662002-10-02 11:08:25 +0000302 VG_(core_panic)("VG_N_THREADS is too low");
sewardje663cb92002-04-12 10:26:32 +0000303 /*NOTREACHED*/
304}
305
jsgf855d93d2003-10-13 22:26:55 +0000306ThreadState *VG_(get_ThreadState)(ThreadId tid)
307{
308 vg_assert(tid >= 0 && tid < VG_N_THREADS);
309 return &VG_(threads)[tid];
310}
311
njn72718642003-07-24 08:45:32 +0000312Bool VG_(is_running_thread)(ThreadId tid)
njn25e49d8e72002-09-23 09:36:25 +0000313{
njn72718642003-07-24 08:45:32 +0000314 ThreadId curr = VG_(get_current_tid)();
315 return (curr == tid && VG_INVALID_THREADID != tid);
njn25e49d8e72002-09-23 09:36:25 +0000316}
sewardje663cb92002-04-12 10:26:32 +0000317
sewardj1e8cdc92002-04-18 11:37:52 +0000318ThreadId VG_(get_current_tid) ( void )
319{
sewardjb52a1b02002-10-23 21:38:22 +0000320 if (!VG_(is_valid_tid)(vg_tid_currently_in_baseBlock))
321 return VG_INVALID_THREADID;
sewardj1e8cdc92002-04-18 11:37:52 +0000322 return vg_tid_currently_in_baseBlock;
323}
324
sewardjb52a1b02002-10-23 21:38:22 +0000325ThreadId VG_(get_current_or_recent_tid) ( void )
njn25e49d8e72002-09-23 09:36:25 +0000326{
sewardjb52a1b02002-10-23 21:38:22 +0000327 vg_assert(vg_tid_currently_in_baseBlock == vg_tid_last_in_baseBlock ||
328 vg_tid_currently_in_baseBlock == VG_INVALID_THREADID);
329 vg_assert(VG_(is_valid_tid)(vg_tid_last_in_baseBlock));
330
331 return vg_tid_last_in_baseBlock;
njn25e49d8e72002-09-23 09:36:25 +0000332}
333
sewardje663cb92002-04-12 10:26:32 +0000334/* Copy the saved state of a thread into VG_(baseBlock), ready for it
335 to be run. */
sewardje663cb92002-04-12 10:26:32 +0000336void VG_(load_thread_state) ( ThreadId tid )
337{
338 Int i;
sewardj1e8cdc92002-04-18 11:37:52 +0000339 vg_assert(vg_tid_currently_in_baseBlock == VG_INVALID_THREADID);
340
sewardj92a59562002-09-30 00:53:10 +0000341 VG_(baseBlock)[VGOFF_(ldt)] = (UInt)VG_(threads)[tid].ldt;
342 VG_(baseBlock)[VGOFF_(m_cs)] = VG_(threads)[tid].m_cs;
343 VG_(baseBlock)[VGOFF_(m_ss)] = VG_(threads)[tid].m_ss;
344 VG_(baseBlock)[VGOFF_(m_ds)] = VG_(threads)[tid].m_ds;
345 VG_(baseBlock)[VGOFF_(m_es)] = VG_(threads)[tid].m_es;
346 VG_(baseBlock)[VGOFF_(m_fs)] = VG_(threads)[tid].m_fs;
347 VG_(baseBlock)[VGOFF_(m_gs)] = VG_(threads)[tid].m_gs;
348
sewardj018f7622002-05-15 21:13:39 +0000349 VG_(baseBlock)[VGOFF_(m_eax)] = VG_(threads)[tid].m_eax;
350 VG_(baseBlock)[VGOFF_(m_ebx)] = VG_(threads)[tid].m_ebx;
351 VG_(baseBlock)[VGOFF_(m_ecx)] = VG_(threads)[tid].m_ecx;
352 VG_(baseBlock)[VGOFF_(m_edx)] = VG_(threads)[tid].m_edx;
353 VG_(baseBlock)[VGOFF_(m_esi)] = VG_(threads)[tid].m_esi;
354 VG_(baseBlock)[VGOFF_(m_edi)] = VG_(threads)[tid].m_edi;
355 VG_(baseBlock)[VGOFF_(m_ebp)] = VG_(threads)[tid].m_ebp;
356 VG_(baseBlock)[VGOFF_(m_esp)] = VG_(threads)[tid].m_esp;
sewardjb91ae7f2003-04-29 23:50:00 +0000357 VG_(baseBlock)[VGOFF_(m_eflags)]
358 = VG_(threads)[tid].m_eflags & ~EFlagD;
359 VG_(baseBlock)[VGOFF_(m_dflag)]
360 = VG_(extractDflag)(VG_(threads)[tid].m_eflags);
sewardj018f7622002-05-15 21:13:39 +0000361 VG_(baseBlock)[VGOFF_(m_eip)] = VG_(threads)[tid].m_eip;
sewardje663cb92002-04-12 10:26:32 +0000362
sewardjb91ae7f2003-04-29 23:50:00 +0000363 for (i = 0; i < VG_SIZE_OF_SSESTATE_W; i++)
364 VG_(baseBlock)[VGOFF_(m_ssestate) + i]
365 = VG_(threads)[tid].m_sse[i];
sewardje663cb92002-04-12 10:26:32 +0000366
njn25e49d8e72002-09-23 09:36:25 +0000367 if (VG_(needs).shadow_regs) {
368 VG_(baseBlock)[VGOFF_(sh_eax)] = VG_(threads)[tid].sh_eax;
369 VG_(baseBlock)[VGOFF_(sh_ebx)] = VG_(threads)[tid].sh_ebx;
370 VG_(baseBlock)[VGOFF_(sh_ecx)] = VG_(threads)[tid].sh_ecx;
371 VG_(baseBlock)[VGOFF_(sh_edx)] = VG_(threads)[tid].sh_edx;
372 VG_(baseBlock)[VGOFF_(sh_esi)] = VG_(threads)[tid].sh_esi;
373 VG_(baseBlock)[VGOFF_(sh_edi)] = VG_(threads)[tid].sh_edi;
374 VG_(baseBlock)[VGOFF_(sh_ebp)] = VG_(threads)[tid].sh_ebp;
375 VG_(baseBlock)[VGOFF_(sh_esp)] = VG_(threads)[tid].sh_esp;
376 VG_(baseBlock)[VGOFF_(sh_eflags)] = VG_(threads)[tid].sh_eflags;
377 } else {
378 /* Fields shouldn't be used -- check their values haven't changed. */
njn25e49d8e72002-09-23 09:36:25 +0000379 vg_assert(
380 VG_UNUSED_SHADOW_REG_VALUE == VG_(threads)[tid].sh_eax &&
381 VG_UNUSED_SHADOW_REG_VALUE == VG_(threads)[tid].sh_ebx &&
382 VG_UNUSED_SHADOW_REG_VALUE == VG_(threads)[tid].sh_ecx &&
383 VG_UNUSED_SHADOW_REG_VALUE == VG_(threads)[tid].sh_edx &&
384 VG_UNUSED_SHADOW_REG_VALUE == VG_(threads)[tid].sh_esi &&
385 VG_UNUSED_SHADOW_REG_VALUE == VG_(threads)[tid].sh_edi &&
386 VG_UNUSED_SHADOW_REG_VALUE == VG_(threads)[tid].sh_ebp &&
387 VG_UNUSED_SHADOW_REG_VALUE == VG_(threads)[tid].sh_esp &&
388 VG_UNUSED_SHADOW_REG_VALUE == VG_(threads)[tid].sh_eflags);
389 }
sewardj1e8cdc92002-04-18 11:37:52 +0000390
391 vg_tid_currently_in_baseBlock = tid;
sewardjb52a1b02002-10-23 21:38:22 +0000392 vg_tid_last_in_baseBlock = tid;
sewardje663cb92002-04-12 10:26:32 +0000393}
394
395
396/* Copy the state of a thread from VG_(baseBlock), presumably after it
397 has been descheduled. For sanity-check purposes, fill the vacated
398 VG_(baseBlock) with garbage so as to make the system more likely to
399 fail quickly if we erroneously continue to poke around inside
400 VG_(baseBlock) without first doing a load_thread_state().
401*/
sewardje663cb92002-04-12 10:26:32 +0000402void VG_(save_thread_state) ( ThreadId tid )
403{
404 Int i;
405 const UInt junk = 0xDEADBEEF;
406
sewardj1e8cdc92002-04-18 11:37:52 +0000407 vg_assert(vg_tid_currently_in_baseBlock != VG_INVALID_THREADID);
408
sewardj92a59562002-09-30 00:53:10 +0000409
410 /* We don't copy out the LDT entry, because it can never be changed
411 by the normal actions of the thread, only by the modify_ldt
412 syscall, in which case we will correctly be updating
sewardjfb5e5272002-12-08 23:27:21 +0000413 VG_(threads)[tid].ldt. This printf happens iff the following
414 assertion fails. */
sewardjca340b32002-12-08 22:14:11 +0000415 if ((void*)VG_(threads)[tid].ldt != (void*)VG_(baseBlock)[VGOFF_(ldt)])
416 VG_(printf)("VG_(threads)[%d].ldt=%p VG_(baseBlock)[VGOFF_(ldt)]=%p\n",
sewardjfb5e5272002-12-08 23:27:21 +0000417 tid, (void*)VG_(threads)[tid].ldt,
418 (void*)VG_(baseBlock)[VGOFF_(ldt)]);
sewardjca340b32002-12-08 22:14:11 +0000419
sewardj92a59562002-09-30 00:53:10 +0000420 vg_assert((void*)VG_(threads)[tid].ldt
421 == (void*)VG_(baseBlock)[VGOFF_(ldt)]);
422
423 VG_(threads)[tid].m_cs = VG_(baseBlock)[VGOFF_(m_cs)];
424 VG_(threads)[tid].m_ss = VG_(baseBlock)[VGOFF_(m_ss)];
425 VG_(threads)[tid].m_ds = VG_(baseBlock)[VGOFF_(m_ds)];
426 VG_(threads)[tid].m_es = VG_(baseBlock)[VGOFF_(m_es)];
427 VG_(threads)[tid].m_fs = VG_(baseBlock)[VGOFF_(m_fs)];
428 VG_(threads)[tid].m_gs = VG_(baseBlock)[VGOFF_(m_gs)];
429
sewardj018f7622002-05-15 21:13:39 +0000430 VG_(threads)[tid].m_eax = VG_(baseBlock)[VGOFF_(m_eax)];
431 VG_(threads)[tid].m_ebx = VG_(baseBlock)[VGOFF_(m_ebx)];
432 VG_(threads)[tid].m_ecx = VG_(baseBlock)[VGOFF_(m_ecx)];
433 VG_(threads)[tid].m_edx = VG_(baseBlock)[VGOFF_(m_edx)];
434 VG_(threads)[tid].m_esi = VG_(baseBlock)[VGOFF_(m_esi)];
435 VG_(threads)[tid].m_edi = VG_(baseBlock)[VGOFF_(m_edi)];
436 VG_(threads)[tid].m_ebp = VG_(baseBlock)[VGOFF_(m_ebp)];
437 VG_(threads)[tid].m_esp = VG_(baseBlock)[VGOFF_(m_esp)];
sewardjb91ae7f2003-04-29 23:50:00 +0000438 VG_(threads)[tid].m_eflags
439 = VG_(insertDflag)(VG_(baseBlock)[VGOFF_(m_eflags)],
440 VG_(baseBlock)[VGOFF_(m_dflag)]);
sewardj018f7622002-05-15 21:13:39 +0000441 VG_(threads)[tid].m_eip = VG_(baseBlock)[VGOFF_(m_eip)];
sewardje663cb92002-04-12 10:26:32 +0000442
sewardjb91ae7f2003-04-29 23:50:00 +0000443 for (i = 0; i < VG_SIZE_OF_SSESTATE_W; i++)
444 VG_(threads)[tid].m_sse[i]
445 = VG_(baseBlock)[VGOFF_(m_ssestate) + i];
sewardje663cb92002-04-12 10:26:32 +0000446
njn25e49d8e72002-09-23 09:36:25 +0000447 if (VG_(needs).shadow_regs) {
448 VG_(threads)[tid].sh_eax = VG_(baseBlock)[VGOFF_(sh_eax)];
449 VG_(threads)[tid].sh_ebx = VG_(baseBlock)[VGOFF_(sh_ebx)];
450 VG_(threads)[tid].sh_ecx = VG_(baseBlock)[VGOFF_(sh_ecx)];
451 VG_(threads)[tid].sh_edx = VG_(baseBlock)[VGOFF_(sh_edx)];
452 VG_(threads)[tid].sh_esi = VG_(baseBlock)[VGOFF_(sh_esi)];
453 VG_(threads)[tid].sh_edi = VG_(baseBlock)[VGOFF_(sh_edi)];
454 VG_(threads)[tid].sh_ebp = VG_(baseBlock)[VGOFF_(sh_ebp)];
455 VG_(threads)[tid].sh_esp = VG_(baseBlock)[VGOFF_(sh_esp)];
456 VG_(threads)[tid].sh_eflags = VG_(baseBlock)[VGOFF_(sh_eflags)];
457 } else {
458 /* Fill with recognisable junk */
459 VG_(threads)[tid].sh_eax =
460 VG_(threads)[tid].sh_ebx =
461 VG_(threads)[tid].sh_ecx =
462 VG_(threads)[tid].sh_edx =
463 VG_(threads)[tid].sh_esi =
464 VG_(threads)[tid].sh_edi =
465 VG_(threads)[tid].sh_ebp =
466 VG_(threads)[tid].sh_esp =
467 VG_(threads)[tid].sh_eflags = VG_UNUSED_SHADOW_REG_VALUE;
468 }
sewardje663cb92002-04-12 10:26:32 +0000469
470 /* Fill it up with junk. */
sewardj92a59562002-09-30 00:53:10 +0000471 VG_(baseBlock)[VGOFF_(ldt)] = junk;
472 VG_(baseBlock)[VGOFF_(m_cs)] = junk;
473 VG_(baseBlock)[VGOFF_(m_ss)] = junk;
474 VG_(baseBlock)[VGOFF_(m_ds)] = junk;
475 VG_(baseBlock)[VGOFF_(m_es)] = junk;
476 VG_(baseBlock)[VGOFF_(m_fs)] = junk;
477 VG_(baseBlock)[VGOFF_(m_gs)] = junk;
478
sewardje663cb92002-04-12 10:26:32 +0000479 VG_(baseBlock)[VGOFF_(m_eax)] = junk;
480 VG_(baseBlock)[VGOFF_(m_ebx)] = junk;
481 VG_(baseBlock)[VGOFF_(m_ecx)] = junk;
482 VG_(baseBlock)[VGOFF_(m_edx)] = junk;
483 VG_(baseBlock)[VGOFF_(m_esi)] = junk;
484 VG_(baseBlock)[VGOFF_(m_edi)] = junk;
485 VG_(baseBlock)[VGOFF_(m_ebp)] = junk;
486 VG_(baseBlock)[VGOFF_(m_esp)] = junk;
487 VG_(baseBlock)[VGOFF_(m_eflags)] = junk;
488 VG_(baseBlock)[VGOFF_(m_eip)] = junk;
489
sewardjb91ae7f2003-04-29 23:50:00 +0000490 for (i = 0; i < VG_SIZE_OF_SSESTATE_W; i++)
491 VG_(baseBlock)[VGOFF_(m_ssestate) + i] = junk;
sewardj1e8cdc92002-04-18 11:37:52 +0000492
493 vg_tid_currently_in_baseBlock = VG_INVALID_THREADID;
sewardje663cb92002-04-12 10:26:32 +0000494}
495
496
497/* Run the thread tid for a while, and return a VG_TRC_* value to the
498 scheduler indicating what happened. */
sewardj6072c362002-04-19 14:40:57 +0000499static
sewardje663cb92002-04-12 10:26:32 +0000500UInt run_thread_for_a_while ( ThreadId tid )
501{
sewardj7ccc5c22002-04-24 21:39:11 +0000502 volatile UInt trc = 0;
sewardjb48e5002002-05-13 00:16:03 +0000503 vg_assert(VG_(is_valid_tid)(tid));
sewardj018f7622002-05-15 21:13:39 +0000504 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +0000505 vg_assert(VG_(bbs_to_go) > 0);
sewardj872051c2002-07-13 12:12:56 +0000506 vg_assert(!VG_(scheduler_jmpbuf_valid));
sewardje663cb92002-04-12 10:26:32 +0000507
sewardj671ff542002-05-07 09:25:30 +0000508 VGP_PUSHCC(VgpRun);
sewardje663cb92002-04-12 10:26:32 +0000509 VG_(load_thread_state) ( tid );
jsgf855d93d2003-10-13 22:26:55 +0000510
511 /* there should be no undealt-with signals */
512 vg_assert(VG_(unresumable_siginfo).si_signo == 0);
513
sewardje663cb92002-04-12 10:26:32 +0000514 if (__builtin_setjmp(VG_(scheduler_jmpbuf)) == 0) {
515 /* try this ... */
sewardj872051c2002-07-13 12:12:56 +0000516 VG_(scheduler_jmpbuf_valid) = True;
sewardje663cb92002-04-12 10:26:32 +0000517 trc = VG_(run_innerloop)();
sewardj872051c2002-07-13 12:12:56 +0000518 VG_(scheduler_jmpbuf_valid) = False;
sewardje663cb92002-04-12 10:26:32 +0000519 /* We get here if the client didn't take a fault. */
520 } else {
521 /* We get here if the client took a fault, which caused our
522 signal handler to longjmp. */
sewardj872051c2002-07-13 12:12:56 +0000523 VG_(scheduler_jmpbuf_valid) = False;
sewardje663cb92002-04-12 10:26:32 +0000524 vg_assert(trc == 0);
525 trc = VG_TRC_UNRESUMABLE_SIGNAL;
526 }
sewardj872051c2002-07-13 12:12:56 +0000527
528 vg_assert(!VG_(scheduler_jmpbuf_valid));
529
sewardje663cb92002-04-12 10:26:32 +0000530 VG_(save_thread_state) ( tid );
njn25e49d8e72002-09-23 09:36:25 +0000531 VGP_POPCC(VgpRun);
sewardje663cb92002-04-12 10:26:32 +0000532 return trc;
533}
534
535
sewardj20917d82002-05-28 01:36:45 +0000536static
537void mostly_clear_thread_record ( ThreadId tid )
538{
sewardj20917d82002-05-28 01:36:45 +0000539 vg_assert(tid >= 0 && tid < VG_N_THREADS);
sewardj92a59562002-09-30 00:53:10 +0000540 VG_(threads)[tid].ldt = NULL;
sewardj20917d82002-05-28 01:36:45 +0000541 VG_(threads)[tid].tid = tid;
542 VG_(threads)[tid].status = VgTs_Empty;
543 VG_(threads)[tid].associated_mx = NULL;
544 VG_(threads)[tid].associated_cv = NULL;
545 VG_(threads)[tid].awaken_at = 0;
546 VG_(threads)[tid].joinee_retval = NULL;
547 VG_(threads)[tid].joiner_thread_return = NULL;
548 VG_(threads)[tid].joiner_jee_tid = VG_INVALID_THREADID;
sewardj8ad94e12002-05-29 00:10:20 +0000549 VG_(threads)[tid].detached = False;
sewardj20917d82002-05-28 01:36:45 +0000550 VG_(threads)[tid].cancel_st = True; /* PTHREAD_CANCEL_ENABLE */
551 VG_(threads)[tid].cancel_ty = True; /* PTHREAD_CANCEL_DEFERRED */
552 VG_(threads)[tid].cancel_pend = NULL; /* not pending */
sewardj8ad94e12002-05-29 00:10:20 +0000553 VG_(threads)[tid].custack_used = 0;
sewardj20917d82002-05-28 01:36:45 +0000554 VG_(ksigemptyset)(&VG_(threads)[tid].sig_mask);
jsgf855d93d2003-10-13 22:26:55 +0000555 VG_(ksigfillset)(&VG_(threads)[tid].eff_sig_mask);
sewardj00a66b12002-10-12 16:42:35 +0000556 VG_(threads)[tid].specifics_ptr = NULL;
jsgf855d93d2003-10-13 22:26:55 +0000557
558 VG_(threads)[tid].syscallno = -1;
559 VG_(threads)[tid].sys_pre_res = NULL;
560
561 VG_(threads)[tid].proxy = NULL;
sewardj20917d82002-05-28 01:36:45 +0000562}
563
564
jsgf855d93d2003-10-13 22:26:55 +0000565
sewardje663cb92002-04-12 10:26:32 +0000566/* Initialise the scheduler. Create a single "main" thread ready to
sewardj6072c362002-04-19 14:40:57 +0000567 run, with special ThreadId of one. This is called at startup; the
sewardje663cb92002-04-12 10:26:32 +0000568 caller takes care to park the client's state is parked in
569 VG_(baseBlock).
570*/
571void VG_(scheduler_init) ( void )
572{
573 Int i;
sewardje663cb92002-04-12 10:26:32 +0000574 ThreadId tid_main;
575
sewardj6072c362002-04-19 14:40:57 +0000576 for (i = 0 /* NB; not 1 */; i < VG_N_THREADS; i++) {
sewardj20917d82002-05-28 01:36:45 +0000577 mostly_clear_thread_record(i);
578 VG_(threads)[i].stack_size = 0;
579 VG_(threads)[i].stack_base = (Addr)NULL;
580 VG_(threads)[i].stack_highest_word = (Addr)NULL;
sewardje663cb92002-04-12 10:26:32 +0000581 }
582
sewardj5f07b662002-04-23 16:52:51 +0000583 for (i = 0; i < VG_N_THREAD_KEYS; i++) {
584 vg_thread_keys[i].inuse = False;
585 vg_thread_keys[i].destructor = NULL;
586 }
587
sewardj2cb00342002-06-28 01:46:26 +0000588 vg_fhstack_used = 0;
589
sewardje663cb92002-04-12 10:26:32 +0000590 /* Assert this is thread zero, which has certain magic
591 properties. */
592 tid_main = vg_alloc_ThreadState();
sewardj6072c362002-04-19 14:40:57 +0000593 vg_assert(tid_main == 1);
sewardj20917d82002-05-28 01:36:45 +0000594 VG_(threads)[tid_main].status = VgTs_Runnable;
sewardje663cb92002-04-12 10:26:32 +0000595
596 /* Copy VG_(baseBlock) state to tid_main's slot. */
sewardj1e8cdc92002-04-18 11:37:52 +0000597 vg_tid_currently_in_baseBlock = tid_main;
sewardjb52a1b02002-10-23 21:38:22 +0000598 vg_tid_last_in_baseBlock = tid_main;
sewardje663cb92002-04-12 10:26:32 +0000599 VG_(save_thread_state) ( tid_main );
sewardj1e8cdc92002-04-18 11:37:52 +0000600
sewardj018f7622002-05-15 21:13:39 +0000601 VG_(threads)[tid_main].stack_highest_word
fitzhardinge98abfc72003-12-16 02:05:15 +0000602 = VG_(clstk_end) - 4;
603 VG_(threads)[tid_main].stack_base = VG_(clstk_base);
604 VG_(threads)[tid_main].stack_size = VG_(clstk_end) - VG_(clstk_base);
sewardjbf290b92002-05-01 02:28:01 +0000605
sewardj1e8cdc92002-04-18 11:37:52 +0000606 /* So now ... */
607 vg_assert(vg_tid_currently_in_baseBlock == VG_INVALID_THREADID);
sewardj872051c2002-07-13 12:12:56 +0000608
609 /* Not running client code right now. */
610 VG_(scheduler_jmpbuf_valid) = False;
jsgf855d93d2003-10-13 22:26:55 +0000611
612 /* Proxy for main thread */
613 VG_(proxy_create)(tid_main);
sewardje663cb92002-04-12 10:26:32 +0000614}
615
616
sewardj3947e622002-05-23 16:52:11 +0000617
sewardje663cb92002-04-12 10:26:32 +0000618
619
sewardj6072c362002-04-19 14:40:57 +0000620/* vthread tid is returning from a signal handler; modify its
621 stack/regs accordingly. */
sewardj1ffa8da2002-04-26 22:47:57 +0000622
sewardj6072c362002-04-19 14:40:57 +0000623static
624void handle_signal_return ( ThreadId tid )
625{
sewardj6072c362002-04-19 14:40:57 +0000626 Bool restart_blocked_syscalls;
sewardj645030e2002-06-06 01:27:39 +0000627 struct vki_timespec * rem;
sewardj6072c362002-04-19 14:40:57 +0000628
sewardjb48e5002002-05-13 00:16:03 +0000629 vg_assert(VG_(is_valid_tid)(tid));
sewardj6072c362002-04-19 14:40:57 +0000630
631 restart_blocked_syscalls = VG_(signal_returns)(tid);
632
633 if (restart_blocked_syscalls)
634 /* Easy; we don't have to do anything. */
635 return;
636
sewardj645030e2002-06-06 01:27:39 +0000637 if (VG_(threads)[tid].status == VgTs_Sleeping
sewardj018f7622002-05-15 21:13:39 +0000638 && VG_(threads)[tid].m_eax == __NR_nanosleep) {
sewardj6072c362002-04-19 14:40:57 +0000639 /* We interrupted a nanosleep(). The right thing to do is to
sewardj645030e2002-06-06 01:27:39 +0000640 write the unused time to nanosleep's second param, but that's
641 too much effort ... we just say that 1 nanosecond was not
642 used, and return EINTR. */
643 rem = (struct vki_timespec *)VG_(threads)[tid].m_ecx; /* arg2 */
644 if (rem != NULL) {
645 rem->tv_sec = 0;
646 rem->tv_nsec = 1;
647 }
njnd3040452003-05-19 15:04:06 +0000648 SET_SYSCALL_RETVAL(tid, -VKI_EINTR);
sewardj645030e2002-06-06 01:27:39 +0000649 VG_(threads)[tid].status = VgTs_Runnable;
sewardj6072c362002-04-19 14:40:57 +0000650 return;
651 }
652
653 /* All other cases? Just return. */
654}
655
656
sewardje663cb92002-04-12 10:26:32 +0000657static
658void sched_do_syscall ( ThreadId tid )
659{
jsgf855d93d2003-10-13 22:26:55 +0000660 Int syscall_no;
njn25e49d8e72002-09-23 09:36:25 +0000661 Char msg_buf[100];
sewardje663cb92002-04-12 10:26:32 +0000662
sewardjb48e5002002-05-13 00:16:03 +0000663 vg_assert(VG_(is_valid_tid)(tid));
sewardj018f7622002-05-15 21:13:39 +0000664 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +0000665
sewardj018f7622002-05-15 21:13:39 +0000666 syscall_no = VG_(threads)[tid].m_eax; /* syscall number */
sewardje663cb92002-04-12 10:26:32 +0000667
jsgf855d93d2003-10-13 22:26:55 +0000668 /* Special-case nanosleep because we can. But should we?
669
670 XXX not doing so for now, because it doesn't seem to work
671 properly, and we can use the syscall nanosleep just as easily.
672 */
673 if (0 && syscall_no == __NR_nanosleep) {
sewardj5f07b662002-04-23 16:52:51 +0000674 UInt t_now, t_awaken;
sewardje663cb92002-04-12 10:26:32 +0000675 struct vki_timespec* req;
sewardj018f7622002-05-15 21:13:39 +0000676 req = (struct vki_timespec*)VG_(threads)[tid].m_ebx; /* arg1 */
jsgf855d93d2003-10-13 22:26:55 +0000677
678 if (req->tv_sec < 0 || req->tv_nsec < 0 || req->tv_nsec >= 1000000000) {
679 SET_SYSCALL_RETVAL(tid, -VKI_EINVAL);
680 return;
681 }
682
sewardj5f07b662002-04-23 16:52:51 +0000683 t_now = VG_(read_millisecond_timer)();
sewardje663cb92002-04-12 10:26:32 +0000684 t_awaken
685 = t_now
sewardj5f07b662002-04-23 16:52:51 +0000686 + (UInt)1000ULL * (UInt)(req->tv_sec)
687 + (UInt)(req->tv_nsec) / 1000000;
sewardj018f7622002-05-15 21:13:39 +0000688 VG_(threads)[tid].status = VgTs_Sleeping;
689 VG_(threads)[tid].awaken_at = t_awaken;
sewardj8937c812002-04-12 20:12:20 +0000690 if (VG_(clo_trace_sched)) {
sewardj5f07b662002-04-23 16:52:51 +0000691 VG_(sprintf)(msg_buf, "at %d: nanosleep for %d",
sewardje663cb92002-04-12 10:26:32 +0000692 t_now, t_awaken-t_now);
693 print_sched_event(tid, msg_buf);
694 }
jsgf855d93d2003-10-13 22:26:55 +0000695 VG_(add_timeout)(tid, t_awaken);
sewardje663cb92002-04-12 10:26:32 +0000696 /* Force the scheduler to run something else for a while. */
697 return;
698 }
699
jsgf855d93d2003-10-13 22:26:55 +0000700 /* If pre_syscall returns true, then we're done immediately */
701 if (VG_(pre_syscall)(tid)) {
702 VG_(post_syscall(tid));
sewardj3947e622002-05-23 16:52:11 +0000703 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +0000704 } else {
jsgf855d93d2003-10-13 22:26:55 +0000705 vg_assert(VG_(threads)[tid].status == VgTs_WaitSys);
sewardje663cb92002-04-12 10:26:32 +0000706 }
707}
708
709
sewardje663cb92002-04-12 10:26:32 +0000710
jsgf855d93d2003-10-13 22:26:55 +0000711struct timeout {
712 UInt time; /* time we should awaken */
713 ThreadId tid; /* thread which cares about this timeout */
714 struct timeout *next;
715};
sewardje663cb92002-04-12 10:26:32 +0000716
jsgf855d93d2003-10-13 22:26:55 +0000717static struct timeout *timeouts;
718
719void VG_(add_timeout)(ThreadId tid, UInt time)
sewardje663cb92002-04-12 10:26:32 +0000720{
jsgf855d93d2003-10-13 22:26:55 +0000721 struct timeout *t = VG_(arena_malloc)(VG_AR_CORE, sizeof(*t));
722 struct timeout **prev, *tp;
sewardje663cb92002-04-12 10:26:32 +0000723
jsgf855d93d2003-10-13 22:26:55 +0000724 t->time = time;
725 t->tid = tid;
sewardje462e202002-04-13 04:09:07 +0000726
jsgf855d93d2003-10-13 22:26:55 +0000727 if (VG_(clo_trace_sched)) {
728 Char msg_buf[100];
729 VG_(sprintf)(msg_buf, "add_timeout: now=%u adding timeout at %u",
730 VG_(read_millisecond_timer)(), time);
731 print_sched_event(tid, msg_buf);
732 }
sewardj6072c362002-04-19 14:40:57 +0000733
jsgf855d93d2003-10-13 22:26:55 +0000734 for(tp = timeouts, prev = &timeouts;
735 tp != NULL && tp->time < time;
736 prev = &tp->next, tp = tp->next)
737 ;
738 t->next = tp;
739 *prev = t;
740}
741
742/* Sleep for a while, but be willing to be woken. */
743static
744void idle ( void )
745{
746 struct vki_pollfd pollfd[1];
747 Int delta = -1;
748 Int fd = VG_(proxy_resfd)();
749
750 pollfd[0].fd = fd;
751 pollfd[0].events = VKI_POLLIN;
752
753 /* Look though the nearest timeouts, looking for the next future
754 one (there may be stale past timeouts). They'll all be mopped
755 below up when the poll() finishes. */
756 if (timeouts != NULL) {
757 struct timeout *tp;
758 Bool wicked = False;
759 UInt now = VG_(read_millisecond_timer)();
760
761 for(tp = timeouts; tp != NULL && tp->time < now; tp = tp->next) {
762 /* If a thread is still sleeping in the past, make it runnable */
763 ThreadState *tst = VG_(get_ThreadState)(tp->tid);
764 if (tst->status == VgTs_Sleeping)
765 tst->status = VgTs_Runnable;
766 wicked = True; /* no sleep for the wicked */
sewardje663cb92002-04-12 10:26:32 +0000767 }
sewardje663cb92002-04-12 10:26:32 +0000768
jsgf855d93d2003-10-13 22:26:55 +0000769 if (tp != NULL) {
770 delta = tp->time - now;
771 vg_assert(delta >= 0);
sewardje663cb92002-04-12 10:26:32 +0000772 }
jsgf855d93d2003-10-13 22:26:55 +0000773 if (wicked)
774 delta = 0;
sewardje663cb92002-04-12 10:26:32 +0000775 }
776
jsgf855d93d2003-10-13 22:26:55 +0000777 /* gotta wake up for something! */
778 vg_assert(fd != -1 || delta != -1);
sewardje462e202002-04-13 04:09:07 +0000779
jsgf855d93d2003-10-13 22:26:55 +0000780 /* If we need to do signal routing, then poll for pending signals
781 every VG_(clo_signal_polltime) mS */
782 if (VG_(do_signal_routing) && (delta > VG_(clo_signal_polltime) || delta == -1))
783 delta = VG_(clo_signal_polltime);
sewardje663cb92002-04-12 10:26:32 +0000784
jsgf855d93d2003-10-13 22:26:55 +0000785 if (VG_(clo_trace_sched)) {
786 Char msg_buf[100];
787 VG_(sprintf)(msg_buf, "idle: waiting for %dms and fd %d",
788 delta, fd);
789 print_sched_event(0, msg_buf);
sewardje663cb92002-04-12 10:26:32 +0000790 }
sewardje663cb92002-04-12 10:26:32 +0000791
jsgf855d93d2003-10-13 22:26:55 +0000792 VG_(poll)(pollfd, fd != -1 ? 1 : 0, delta);
sewardje663cb92002-04-12 10:26:32 +0000793
jsgf855d93d2003-10-13 22:26:55 +0000794 /* See if there's anything on the timeout list which needs
795 waking, and mop up anything in the past. */
796 {
797 UInt now = VG_(read_millisecond_timer)();
798 struct timeout *tp;
sewardje663cb92002-04-12 10:26:32 +0000799
jsgf855d93d2003-10-13 22:26:55 +0000800 tp = timeouts;
sewardje663cb92002-04-12 10:26:32 +0000801
jsgf855d93d2003-10-13 22:26:55 +0000802 while(tp && tp->time <= now) {
803 struct timeout *dead;
804 ThreadState *tst;
805
806 tst = VG_(get_ThreadState)(tp->tid);
807
808 if (VG_(clo_trace_sched)) {
809 Char msg_buf[100];
810 VG_(sprintf)(msg_buf, "idle: now=%u removing timeout at %u",
811 now, tp->time);
812 print_sched_event(tp->tid, msg_buf);
813 }
sewardje663cb92002-04-12 10:26:32 +0000814
jsgf855d93d2003-10-13 22:26:55 +0000815 /* If awaken_at != tp->time then it means the timeout is
816 stale and we should just ignore it. */
817 if(tst->awaken_at == tp->time) {
818 switch(tst->status) {
819 case VgTs_Sleeping:
820 tst->awaken_at = 0xFFFFFFFF;
821 tst->status = VgTs_Runnable;
822 break;
sewardje663cb92002-04-12 10:26:32 +0000823
jsgf855d93d2003-10-13 22:26:55 +0000824 case VgTs_WaitCV:
825 do_pthread_cond_timedwait_TIMEOUT(tst->tid);
826 break;
sewardje663cb92002-04-12 10:26:32 +0000827
jsgf855d93d2003-10-13 22:26:55 +0000828 default:
829 /* This is a bit odd but OK; if a thread had a timeout
830 but woke for some other reason (signal, condvar
831 wakeup), then it will still be on the list. */
832 if (0)
833 VG_(printf)("idle(): unexpected status tp->tid=%d tst->status = %d\n",
834 tp->tid, tst->status);
835 break;
836 }
837 }
sewardjbc7d8782002-06-30 12:44:54 +0000838
jsgf855d93d2003-10-13 22:26:55 +0000839 dead = tp;
840 tp = tp->next;
njn25e49d8e72002-09-23 09:36:25 +0000841
jsgf855d93d2003-10-13 22:26:55 +0000842 VG_(arena_free)(VG_AR_CORE, dead);
sewardjbc7d8782002-06-30 12:44:54 +0000843 }
844
jsgf855d93d2003-10-13 22:26:55 +0000845 timeouts = tp;
sewardje663cb92002-04-12 10:26:32 +0000846 }
847}
848
849
sewardje663cb92002-04-12 10:26:32 +0000850/* ---------------------------------------------------------------------
851 The scheduler proper.
852 ------------------------------------------------------------------ */
853
854/* Run user-space threads until either
855 * Deadlock occurs
856 * One thread asks to shutdown Valgrind
857 * The specified number of basic blocks has gone by.
858*/
859VgSchedReturnCode VG_(scheduler) ( void )
860{
861 ThreadId tid, tid_next;
862 UInt trc;
863 UInt dispatch_ctr_SAVED;
sewardj124ca2a2002-06-20 10:19:38 +0000864 Int done_this_time, n_in_bounded_wait;
jsgf855d93d2003-10-13 22:26:55 +0000865 Int n_exists, n_waiting_for_reaper;
sewardje663cb92002-04-12 10:26:32 +0000866 Addr trans_addr;
867
sewardje663cb92002-04-12 10:26:32 +0000868 /* Start with the root thread. tid in general indicates the
869 currently runnable/just-finished-running thread. */
sewardj7e87e382002-05-03 19:09:05 +0000870 VG_(last_run_tid) = tid = 1;
sewardje663cb92002-04-12 10:26:32 +0000871
872 /* This is the top level scheduler loop. It falls into three
873 phases. */
874 while (True) {
875
sewardj6072c362002-04-19 14:40:57 +0000876 /* ======================= Phase 0 of 3 =======================
877 Be paranoid. Always a good idea. */
sewardjd7fd4d22002-04-24 01:57:27 +0000878 stage1:
sewardj6072c362002-04-19 14:40:57 +0000879 scheduler_sanity();
sewardj0c3b53f2002-05-01 01:58:35 +0000880 VG_(do_sanity_checks)( False );
sewardj6072c362002-04-19 14:40:57 +0000881
sewardje663cb92002-04-12 10:26:32 +0000882 /* ======================= Phase 1 of 3 =======================
883 Handle I/O completions and signals. This may change the
884 status of various threads. Then select a new thread to run,
885 or declare deadlock, or sleep if there are no runnable
886 threads but some are blocked on I/O. */
887
sewardje663cb92002-04-12 10:26:32 +0000888 /* Was a debug-stop requested? */
889 if (VG_(bbs_to_go) == 0)
890 goto debug_stop;
891
892 /* Do the following loop until a runnable thread is found, or
893 deadlock is detected. */
894 while (True) {
895
896 /* For stats purposes only. */
897 VG_(num_scheduling_events_MAJOR) ++;
898
jsgf855d93d2003-10-13 22:26:55 +0000899 /* Route signals to their proper places */
900 VG_(route_signals)();
sewardje663cb92002-04-12 10:26:32 +0000901
jsgf855d93d2003-10-13 22:26:55 +0000902 /* See if any of the proxy LWPs report any activity: either a
903 syscall completing or a signal arriving. */
904 VG_(proxy_results)();
sewardje663cb92002-04-12 10:26:32 +0000905
906 /* Try and find a thread (tid) to run. */
907 tid_next = tid;
jsgf855d93d2003-10-13 22:26:55 +0000908 if (prefer_sched != VG_INVALID_THREADID) {
909 tid_next = prefer_sched-1;
910 prefer_sched = VG_INVALID_THREADID;
911 }
sewardj51c0aaf2002-04-25 01:32:10 +0000912 n_in_bounded_wait = 0;
jsgf855d93d2003-10-13 22:26:55 +0000913 n_exists = 0;
914 n_waiting_for_reaper = 0;
sewardje663cb92002-04-12 10:26:32 +0000915 while (True) {
916 tid_next++;
sewardj6072c362002-04-19 14:40:57 +0000917 if (tid_next >= VG_N_THREADS) tid_next = 1;
jsgf855d93d2003-10-13 22:26:55 +0000918 if (VG_(threads)[tid_next].status == VgTs_Sleeping
919 || VG_(threads)[tid_next].status == VgTs_WaitSys
sewardj018f7622002-05-15 21:13:39 +0000920 || (VG_(threads)[tid_next].status == VgTs_WaitCV
921 && VG_(threads)[tid_next].awaken_at != 0xFFFFFFFF))
sewardj51c0aaf2002-04-25 01:32:10 +0000922 n_in_bounded_wait ++;
jsgf855d93d2003-10-13 22:26:55 +0000923 if (VG_(threads)[tid_next].status != VgTs_Empty)
924 n_exists++;
925 if (VG_(threads)[tid_next].status == VgTs_WaitJoiner)
926 n_waiting_for_reaper++;
sewardj018f7622002-05-15 21:13:39 +0000927 if (VG_(threads)[tid_next].status == VgTs_Runnable)
sewardje663cb92002-04-12 10:26:32 +0000928 break; /* We can run this one. */
929 if (tid_next == tid)
930 break; /* been all the way round */
931 }
932 tid = tid_next;
933
sewardj018f7622002-05-15 21:13:39 +0000934 if (VG_(threads)[tid].status == VgTs_Runnable) {
sewardje663cb92002-04-12 10:26:32 +0000935 /* Found a suitable candidate. Fall out of this loop, so
936 we can advance to stage 2 of the scheduler: actually
937 running the thread. */
938 break;
939 }
940
jsgf855d93d2003-10-13 22:26:55 +0000941 /* All threads have exited - pretend someone called exit() */
942 if (n_waiting_for_reaper == n_exists) {
943 VG_(exitcode) = 0; /* ? */
944 return VgSrc_ExitSyscall;
945 }
946
sewardje663cb92002-04-12 10:26:32 +0000947 /* We didn't find a runnable thread. Now what? */
sewardj51c0aaf2002-04-25 01:32:10 +0000948 if (n_in_bounded_wait == 0) {
sewardj54cacf02002-04-12 23:24:59 +0000949 /* No runnable threads and no prospect of any appearing
950 even if we wait for an arbitrary length of time. In
951 short, we have a deadlock. */
sewardj15a43e12002-04-17 19:35:12 +0000952 VG_(pp_sched_status)();
sewardje663cb92002-04-12 10:26:32 +0000953 return VgSrc_Deadlock;
954 }
955
jsgf855d93d2003-10-13 22:26:55 +0000956 /* Nothing needs doing, so sit in idle until either a timeout
957 happens or a thread's syscall completes. */
958 idle();
sewardj7e87e382002-05-03 19:09:05 +0000959 /* pp_sched_status(); */
sewardjb48e5002002-05-13 00:16:03 +0000960 /* VG_(printf)("."); */
sewardje663cb92002-04-12 10:26:32 +0000961 }
962
963
964 /* ======================= Phase 2 of 3 =======================
965 Wahey! We've finally decided that thread tid is runnable, so
966 we now do that. Run it for as much of a quanta as possible.
967 Trivial requests are handled and the thread continues. The
968 aim is not to do too many of Phase 1 since it is expensive. */
969
970 if (0)
sewardj3b5d8862002-04-20 13:53:23 +0000971 VG_(printf)("SCHED: tid %d\n", tid);
sewardje663cb92002-04-12 10:26:32 +0000972
njn25e49d8e72002-09-23 09:36:25 +0000973 VG_TRACK( thread_run, tid );
974
sewardje663cb92002-04-12 10:26:32 +0000975 /* Figure out how many bbs to ask vg_run_innerloop to do. Note
976 that it decrements the counter before testing it for zero, so
977 that if VG_(dispatch_ctr) is set to N you get at most N-1
978 iterations. Also this means that VG_(dispatch_ctr) must
979 exceed zero before entering the innerloop. Also also, the
980 decrement is done before the bb is actually run, so you
981 always get at least one decrement even if nothing happens.
982 */
983 if (VG_(bbs_to_go) >= VG_SCHEDULING_QUANTUM)
984 VG_(dispatch_ctr) = VG_SCHEDULING_QUANTUM + 1;
985 else
986 VG_(dispatch_ctr) = (UInt)VG_(bbs_to_go) + 1;
987
988 /* ... and remember what we asked for. */
989 dispatch_ctr_SAVED = VG_(dispatch_ctr);
990
sewardj1e8cdc92002-04-18 11:37:52 +0000991 /* paranoia ... */
sewardj018f7622002-05-15 21:13:39 +0000992 vg_assert(VG_(threads)[tid].tid == tid);
sewardj1e8cdc92002-04-18 11:37:52 +0000993
sewardje663cb92002-04-12 10:26:32 +0000994 /* Actually run thread tid. */
995 while (True) {
996
sewardj7e87e382002-05-03 19:09:05 +0000997 VG_(last_run_tid) = tid;
998
sewardje663cb92002-04-12 10:26:32 +0000999 /* For stats purposes only. */
1000 VG_(num_scheduling_events_MINOR) ++;
1001
1002 if (0)
1003 VG_(message)(Vg_DebugMsg, "thread %d: running for %d bbs",
1004 tid, VG_(dispatch_ctr) - 1 );
sewardjb3eef6b2002-05-01 00:05:27 +00001005# if 0
1006 if (VG_(bbs_done) > 31700000 + 0) {
1007 dispatch_ctr_SAVED = VG_(dispatch_ctr) = 2;
sewardj018f7622002-05-15 21:13:39 +00001008 VG_(translate)(&VG_(threads)[tid], VG_(threads)[tid].m_eip,
sewardjb3eef6b2002-05-01 00:05:27 +00001009 NULL,NULL,NULL);
1010 }
sewardj018f7622002-05-15 21:13:39 +00001011 vg_assert(VG_(threads)[tid].m_eip != 0);
sewardjb3eef6b2002-05-01 00:05:27 +00001012# endif
sewardje663cb92002-04-12 10:26:32 +00001013
1014 trc = run_thread_for_a_while ( tid );
1015
sewardjb3eef6b2002-05-01 00:05:27 +00001016# if 0
sewardj018f7622002-05-15 21:13:39 +00001017 if (0 == VG_(threads)[tid].m_eip) {
sewardjb3eef6b2002-05-01 00:05:27 +00001018 VG_(printf)("tid = %d, dc = %llu\n", tid, VG_(bbs_done));
sewardj018f7622002-05-15 21:13:39 +00001019 vg_assert(0 != VG_(threads)[tid].m_eip);
sewardjb3eef6b2002-05-01 00:05:27 +00001020 }
1021# endif
1022
sewardje663cb92002-04-12 10:26:32 +00001023 /* Deal quickly with trivial scheduling events, and resume the
1024 thread. */
1025
1026 if (trc == VG_TRC_INNER_FASTMISS) {
1027 vg_assert(VG_(dispatch_ctr) > 0);
1028
1029 /* Trivial event. Miss in the fast-cache. Do a full
1030 lookup for it. */
1031 trans_addr
sewardj018f7622002-05-15 21:13:39 +00001032 = VG_(search_transtab) ( VG_(threads)[tid].m_eip );
sewardje663cb92002-04-12 10:26:32 +00001033 if (trans_addr == (Addr)0) {
1034 /* Not found; we need to request a translation. */
njn25e49d8e72002-09-23 09:36:25 +00001035 create_translation_for(
1036 tid, VG_(threads)[tid].m_eip );
sewardj018f7622002-05-15 21:13:39 +00001037 trans_addr = VG_(search_transtab) ( VG_(threads)[tid].m_eip );
sewardje663cb92002-04-12 10:26:32 +00001038 if (trans_addr == (Addr)0)
njne427a662002-10-02 11:08:25 +00001039 VG_(core_panic)("VG_TRC_INNER_FASTMISS: missing tt_fast entry");
sewardje663cb92002-04-12 10:26:32 +00001040 }
1041 continue; /* with this thread */
1042 }
1043
1044 if (trc == VG_TRC_EBP_JMP_CLIENTREQ) {
sewardj18a62ff2002-07-12 22:30:51 +00001045 UInt reqno = *(UInt*)(VG_(threads)[tid].m_eax);
1046 /* VG_(printf)("request 0x%x\n", reqno); */
sewardj1fe7b002002-07-16 01:43:15 +00001047
1048 /* Are we really absolutely totally quitting? */
1049 if (reqno == VG_USERREQ__LIBC_FREERES_DONE) {
1050 if (0 || VG_(clo_trace_syscalls) || VG_(clo_trace_sched)) {
1051 VG_(message)(Vg_DebugMsg,
1052 "__libc_freeres() done; really quitting!");
1053 }
1054 return VgSrc_ExitSyscall;
1055 }
1056
sewardj124ca2a2002-06-20 10:19:38 +00001057 do_client_request(tid);
1058 /* Following the request, we try and continue with the
1059 same thread if still runnable. If not, go back to
1060 Stage 1 to select a new thread to run. */
sewardj18a62ff2002-07-12 22:30:51 +00001061 if (VG_(threads)[tid].status == VgTs_Runnable
1062 && reqno != VG_USERREQ__PTHREAD_YIELD)
sewardj124ca2a2002-06-20 10:19:38 +00001063 continue; /* with this thread */
1064 else
1065 goto stage1;
sewardje663cb92002-04-12 10:26:32 +00001066 }
1067
sewardj51c0aaf2002-04-25 01:32:10 +00001068 if (trc == VG_TRC_EBP_JMP_SYSCALL) {
1069 /* Do a syscall for the vthread tid. This could cause it
sewardj7e87e382002-05-03 19:09:05 +00001070 to become non-runnable. One special case: spot the
1071 client doing calls to exit() and take this as the cue
1072 to exit. */
sewardjb3eef6b2002-05-01 00:05:27 +00001073# if 0
1074 { UInt* esp; Int i;
sewardj018f7622002-05-15 21:13:39 +00001075 esp=(UInt*)VG_(threads)[tid].m_esp;
sewardjb3eef6b2002-05-01 00:05:27 +00001076 VG_(printf)("\nBEFORE\n");
1077 for (i = 10; i >= -10; i--)
1078 VG_(printf)("%2d %p = 0x%x\n", i, &esp[i], esp[i]);
1079 }
1080# endif
1081
sewardj1fe7b002002-07-16 01:43:15 +00001082 /* Deal with calling __libc_freeres() at exit. When the
1083 client does __NR_exit, it's exiting for good. So we
1084 then run VG_(__libc_freeres_wrapper). That quits by
1085 doing VG_USERREQ__LIBC_FREERES_DONE, and at that point
1086 we really exit. To be safe we nuke all other threads
sewardjade9d0d2002-07-26 10:52:48 +00001087 currently running.
1088
1089 If not valgrinding (cachegrinding, etc) don't do this.
1090 __libc_freeres does some invalid frees which crash
1091 the unprotected malloc/free system. */
njn25e49d8e72002-09-23 09:36:25 +00001092
sewardjf3fb92d2003-02-23 03:26:08 +00001093 if (VG_(threads)[tid].m_eax == __NR_exit
sewardjf3fb92d2003-02-23 03:26:08 +00001094 || VG_(threads)[tid].m_eax == __NR_exit_group
sewardjf3fb92d2003-02-23 03:26:08 +00001095 ) {
sewardj858964b2002-10-05 14:15:43 +00001096
1097 /* If __NR_exit, remember the supplied argument. */
njn25e49d8e72002-09-23 09:36:25 +00001098 VG_(exitcode) = VG_(threads)[tid].m_ebx; /* syscall arg1 */
1099
sewardj858964b2002-10-05 14:15:43 +00001100 /* Only run __libc_freeres if the skin says it's ok and
1101 it hasn't been overridden with --run-libc-freeres=no
1102 on the command line. */
1103
fitzhardinge98abfc72003-12-16 02:05:15 +00001104 if (VG_(needs).libc_freeres &&
1105 VG_(clo_run_libc_freeres) &&
1106 VG_(__libc_freeres_wrapper) != 0) {
sewardj00631892002-10-05 15:34:38 +00001107 if (VG_(clo_verbosity) > 2
sewardj858964b2002-10-05 14:15:43 +00001108 || VG_(clo_trace_syscalls) || VG_(clo_trace_sched)) {
1109 VG_(message)(Vg_DebugMsg,
1110 "Caught __NR_exit; running __libc_freeres()");
1111 }
1112 VG_(nuke_all_threads_except) ( tid );
fitzhardinge98abfc72003-12-16 02:05:15 +00001113 VG_(threads)[tid].m_eip = (UInt)VG_(__libc_freeres_wrapper);
sewardj858964b2002-10-05 14:15:43 +00001114 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1115 goto stage1; /* party on, dudes (but not for much longer :) */
1116
1117 } else {
1118 /* We won't run __libc_freeres; just exit now. */
sewardj00631892002-10-05 15:34:38 +00001119 if (VG_(clo_verbosity) > 2
sewardj858964b2002-10-05 14:15:43 +00001120 || VG_(clo_trace_syscalls) || VG_(clo_trace_sched)) {
1121 VG_(message)(Vg_DebugMsg,
1122 "Caught __NR_exit; quitting");
1123 }
1124 return VgSrc_ExitSyscall;
1125 }
1126
sewardjade9d0d2002-07-26 10:52:48 +00001127 }
1128
sewardj858964b2002-10-05 14:15:43 +00001129 /* We've dealt with __NR_exit at this point. */
jsgf855d93d2003-10-13 22:26:55 +00001130 vg_assert(VG_(threads)[tid].m_eax != __NR_exit &&
1131 VG_(threads)[tid].m_eax != __NR_exit_group);
sewardj7e87e382002-05-03 19:09:05 +00001132
sewardj83798bf2002-05-24 00:11:16 +00001133 /* Trap syscalls to __NR_sched_yield and just have this
1134 thread yield instead. Not essential, just an
1135 optimisation. */
1136 if (VG_(threads)[tid].m_eax == __NR_sched_yield) {
njnd3040452003-05-19 15:04:06 +00001137 SET_SYSCALL_RETVAL(tid, 0); /* syscall returns with success */
sewardj83798bf2002-05-24 00:11:16 +00001138 goto stage1; /* find a new thread to run */
1139 }
1140
sewardj51c0aaf2002-04-25 01:32:10 +00001141 sched_do_syscall(tid);
sewardjb3eef6b2002-05-01 00:05:27 +00001142
1143# if 0
1144 { UInt* esp; Int i;
sewardj018f7622002-05-15 21:13:39 +00001145 esp=(UInt*)VG_(threads)[tid].m_esp;
sewardjb3eef6b2002-05-01 00:05:27 +00001146 VG_(printf)("AFTER\n");
1147 for (i = 10; i >= -10; i--)
1148 VG_(printf)("%2d %p = 0x%x\n", i, &esp[i], esp[i]);
1149 }
1150# endif
1151
sewardj77f0fc12002-07-12 01:23:03 +00001152 if (VG_(threads)[tid].status == VgTs_Runnable) {
sewardj51c0aaf2002-04-25 01:32:10 +00001153 continue; /* with this thread */
sewardj77f0fc12002-07-12 01:23:03 +00001154 } else {
1155 goto stage1;
1156 }
sewardj51c0aaf2002-04-25 01:32:10 +00001157 }
1158
sewardjd7fd4d22002-04-24 01:57:27 +00001159 /* It's an event we can't quickly deal with. Give up running
1160 this thread and handle things the expensive way. */
sewardje663cb92002-04-12 10:26:32 +00001161 break;
1162 }
1163
1164 /* ======================= Phase 3 of 3 =======================
1165 Handle non-trivial thread requests, mostly pthread stuff. */
1166
1167 /* Ok, we've fallen out of the dispatcher for a
1168 non-completely-trivial reason. First, update basic-block
1169 counters. */
1170
1171 done_this_time = (Int)dispatch_ctr_SAVED - (Int)VG_(dispatch_ctr) - 1;
1172 vg_assert(done_this_time >= 0);
1173 VG_(bbs_to_go) -= (ULong)done_this_time;
1174 VG_(bbs_done) += (ULong)done_this_time;
1175
1176 if (0 && trc != VG_TRC_INNER_FASTMISS)
1177 VG_(message)(Vg_DebugMsg, "thread %d: completed %d bbs, trc %d",
1178 tid, done_this_time, (Int)trc );
1179
1180 if (0 && trc != VG_TRC_INNER_FASTMISS)
njne0205ff2003-04-08 00:56:14 +00001181 VG_(message)(Vg_DebugMsg, "thread %d: %llu bbs, event %s",
sewardje663cb92002-04-12 10:26:32 +00001182 tid, VG_(bbs_done),
1183 name_of_sched_event(trc) );
sewardj9d1b5d32002-04-17 19:40:49 +00001184
sewardje663cb92002-04-12 10:26:32 +00001185 /* Examine the thread's return code to figure out why it
sewardj124ca2a2002-06-20 10:19:38 +00001186 stopped. */
sewardje663cb92002-04-12 10:26:32 +00001187
1188 switch (trc) {
1189
fitzhardingea02f8812003-12-18 09:06:09 +00001190 case VG_TRC_EBP_JMP_YIELD:
1191 /* Explicit yield. Let a new thread be scheduled,
1192 simply by doing nothing, causing us to arrive back at
1193 Phase 1. */
1194 if (VG_(bbs_to_go) == 0) {
1195 goto debug_stop;
1196 }
1197 break;
1198
sewardje663cb92002-04-12 10:26:32 +00001199 case VG_TRC_INNER_COUNTERZERO:
1200 /* Timeslice is out. Let a new thread be scheduled,
1201 simply by doing nothing, causing us to arrive back at
1202 Phase 1. */
1203 if (VG_(bbs_to_go) == 0) {
1204 goto debug_stop;
1205 }
1206 vg_assert(VG_(dispatch_ctr) == 0);
1207 break;
1208
1209 case VG_TRC_UNRESUMABLE_SIGNAL:
jsgf855d93d2003-10-13 22:26:55 +00001210 /* It got a SIGSEGV/SIGBUS/SIGILL/SIGFPE, which we need to
1211 deliver right away. */
1212 vg_assert(VG_(unresumable_siginfo).si_signo == VKI_SIGSEGV ||
1213 VG_(unresumable_siginfo).si_signo == VKI_SIGBUS ||
1214 VG_(unresumable_siginfo).si_signo == VKI_SIGILL ||
1215 VG_(unresumable_siginfo).si_signo == VKI_SIGFPE);
1216 vg_assert(VG_(longjmpd_on_signal) == VG_(unresumable_siginfo).si_signo);
1217
1218 /* make sure we've unblocked the signals which the handler blocked */
1219 VG_(unblock_host_signal)(VG_(longjmpd_on_signal));
1220
1221 VG_(deliver_signal)(tid, &VG_(unresumable_siginfo), False);
1222 VG_(unresumable_siginfo).si_signo = 0; /* done */
sewardje663cb92002-04-12 10:26:32 +00001223 break;
1224
sewardje663cb92002-04-12 10:26:32 +00001225 default:
1226 VG_(printf)("\ntrc = %d\n", trc);
njne427a662002-10-02 11:08:25 +00001227 VG_(core_panic)("VG_(scheduler), phase 3: "
1228 "unexpected thread return code");
sewardje663cb92002-04-12 10:26:32 +00001229 /* NOTREACHED */
1230 break;
1231
1232 } /* switch (trc) */
1233
1234 /* That completes Phase 3 of 3. Return now to the top of the
1235 main scheduler loop, to Phase 1 of 3. */
1236
1237 } /* top-level scheduler loop */
1238
1239
1240 /* NOTREACHED */
njne427a662002-10-02 11:08:25 +00001241 VG_(core_panic)("scheduler: post-main-loop ?!");
sewardje663cb92002-04-12 10:26:32 +00001242 /* NOTREACHED */
1243
1244 debug_stop:
1245 /* If we exited because of a debug stop, print the translation
1246 of the last block executed -- by translating it again, and
1247 throwing away the result. */
1248 VG_(printf)(
1249 "======vvvvvvvv====== LAST TRANSLATION ======vvvvvvvv======\n");
njn72718642003-07-24 08:45:32 +00001250 VG_(translate)( tid,
sewardj22854b92002-11-30 14:00:47 +00001251 VG_(threads)[tid].m_eip, NULL, NULL, NULL, NULL );
sewardje663cb92002-04-12 10:26:32 +00001252 VG_(printf)("\n");
1253 VG_(printf)(
1254 "======^^^^^^^^====== LAST TRANSLATION ======^^^^^^^^======\n");
1255
1256 return VgSrc_BbsDone;
1257}
1258
jsgf855d93d2003-10-13 22:26:55 +00001259void VG_(need_resched) ( ThreadId prefer )
1260{
1261 /* Tell the scheduler now might be a good time to find a new
1262 runnable thread, because something happened which woke a thread
1263 up.
1264
1265 NB: This can be called unsynchronized from either a signal
1266 handler, or from another LWP (ie, real kernel thread).
1267
1268 In principle this could simply be a matter of setting
1269 VG_(dispatch_ctr) to a small value (say, 2), which would make
1270 any running code come back to the scheduler fairly quickly.
1271
1272 However, since the scheduler implements a strict round-robin
1273 policy with only one priority level, there are, by definition,
1274 no better threads to be running than the current thread anyway,
1275 so we may as well ignore this hint. For processes with a
1276 mixture of compute and I/O bound threads, this means the compute
1277 threads could introduce longish latencies before the I/O threads
1278 run. For programs with only I/O bound threads, need_resched
1279 won't have any effect anyway.
1280
1281 OK, so I've added command-line switches to enable low-latency
1282 syscalls and signals. The prefer_sched variable is in effect
1283 the ID of a single thread which has higher priority than all the
1284 others. If set, the scheduler will prefer to schedule that
1285 thread over all others. Naturally, this could lead to
1286 starvation or other unfairness.
1287 */
1288
1289 if (VG_(dispatch_ctr) > 10)
1290 VG_(dispatch_ctr) = 2;
1291 prefer_sched = prefer;
1292}
1293
sewardje663cb92002-04-12 10:26:32 +00001294
1295/* ---------------------------------------------------------------------
1296 The pthread implementation.
1297 ------------------------------------------------------------------ */
1298
1299#include <pthread.h>
1300#include <errno.h>
1301
sewardjbf290b92002-05-01 02:28:01 +00001302#define VG_PTHREAD_STACK_MIN \
sewardjc3bd5f52002-05-01 03:24:23 +00001303 (VG_PTHREAD_STACK_SIZE - VG_AR_CLIENT_STACKBASE_REDZONE_SZB)
sewardje663cb92002-04-12 10:26:32 +00001304
1305/* /usr/include/bits/pthreadtypes.h:
1306 typedef unsigned long int pthread_t;
1307*/
1308
sewardje663cb92002-04-12 10:26:32 +00001309
sewardj604ec3c2002-04-18 22:38:41 +00001310/* -----------------------------------------------------------
sewardj20917d82002-05-28 01:36:45 +00001311 Thread CREATION, JOINAGE and CANCELLATION: HELPER FNS
sewardj604ec3c2002-04-18 22:38:41 +00001312 -------------------------------------------------------- */
1313
sewardj20917d82002-05-28 01:36:45 +00001314/* We've decided to action a cancellation on tid. Make it jump to
1315 thread_exit_wrapper() in vg_libpthread.c, passing PTHREAD_CANCELED
1316 as the arg. */
1317static
1318void make_thread_jump_to_cancelhdlr ( ThreadId tid )
1319{
1320 Char msg_buf[100];
1321 vg_assert(VG_(is_valid_tid)(tid));
sewardjdadc8d02002-12-08 23:24:18 +00001322
sewardj20917d82002-05-28 01:36:45 +00001323 /* Push PTHREAD_CANCELED on the stack and jump to the cancellation
1324 handler -- which is really thread_exit_wrapper() in
1325 vg_libpthread.c. */
1326 vg_assert(VG_(threads)[tid].cancel_pend != NULL);
sewardj4bdd9962002-12-26 11:51:50 +00001327
1328 /* Push a suitable arg, and mark it as readable. */
njnd3040452003-05-19 15:04:06 +00001329 SET_PTHREQ_ESP(tid, VG_(threads)[tid].m_esp - 4);
sewardj20917d82002-05-28 01:36:45 +00001330 * (UInt*)(VG_(threads)[tid].m_esp) = (UInt)PTHREAD_CANCELED;
sewardj4bdd9962002-12-26 11:51:50 +00001331 VG_TRACK( post_mem_write, VG_(threads)[tid].m_esp, sizeof(void*) );
1332
1333 /* Push a bogus return address. It will not return, but we still
1334 need to have it so that the arg is at the correct stack offset.
1335 Don't mark as readable; any attempt to read this is and internal
1336 valgrind bug since thread_exit_wrapper should not return. */
njnd3040452003-05-19 15:04:06 +00001337 SET_PTHREQ_ESP(tid, VG_(threads)[tid].m_esp - 4);
sewardj4bdd9962002-12-26 11:51:50 +00001338 * (UInt*)(VG_(threads)[tid].m_esp) = 0xBEADDEEF;
1339
1340 /* .cancel_pend will hold &thread_exit_wrapper */
sewardj20917d82002-05-28 01:36:45 +00001341 VG_(threads)[tid].m_eip = (UInt)VG_(threads)[tid].cancel_pend;
sewardjdadc8d02002-12-08 23:24:18 +00001342
jsgf855d93d2003-10-13 22:26:55 +00001343 VG_(proxy_abort_syscall)(tid);
sewardjdadc8d02002-12-08 23:24:18 +00001344
sewardj20917d82002-05-28 01:36:45 +00001345 VG_(threads)[tid].status = VgTs_Runnable;
sewardjdadc8d02002-12-08 23:24:18 +00001346
sewardj20917d82002-05-28 01:36:45 +00001347 /* Make sure we aren't cancelled again whilst handling this
1348 cancellation. */
1349 VG_(threads)[tid].cancel_st = False;
1350 if (VG_(clo_trace_sched)) {
1351 VG_(sprintf)(msg_buf,
1352 "jump to cancellation handler (hdlr = %p)",
1353 VG_(threads)[tid].cancel_pend);
1354 print_sched_event(tid, msg_buf);
1355 }
1356}
1357
1358
1359
sewardjb48e5002002-05-13 00:16:03 +00001360/* Release resources and generally clean up once a thread has finally
1361 disappeared. */
1362static
jsgf855d93d2003-10-13 22:26:55 +00001363void cleanup_after_thread_exited ( ThreadId tid, Bool forcekill )
sewardjb48e5002002-05-13 00:16:03 +00001364{
sewardj018f7622002-05-15 21:13:39 +00001365 vg_assert(VG_(is_valid_or_empty_tid)(tid));
1366 vg_assert(VG_(threads)[tid].status == VgTs_Empty);
njn25e49d8e72002-09-23 09:36:25 +00001367 /* Its stack is now off-limits */
1368 VG_TRACK( die_mem_stack, VG_(threads)[tid].stack_base,
1369 VG_(threads)[tid].stack_size );
1370
sewardj92a59562002-09-30 00:53:10 +00001371 /* Deallocate its LDT, if it ever had one. */
1372 VG_(deallocate_LDT_for_thread)( VG_(threads)[tid].ldt );
1373 VG_(threads)[tid].ldt = NULL;
jsgf855d93d2003-10-13 22:26:55 +00001374
1375 /* Not interested in the timeout anymore */
1376 VG_(threads)[tid].awaken_at = 0xFFFFFFFF;
1377
1378 /* Delete proxy LWP */
1379 VG_(proxy_delete)(tid, forcekill);
sewardjb48e5002002-05-13 00:16:03 +00001380}
1381
1382
sewardj20917d82002-05-28 01:36:45 +00001383/* Look for matching pairs of threads waiting for joiners and threads
1384 waiting for joinees. For each such pair copy the return value of
1385 the joinee into the joiner, let the joiner resume and discard the
1386 joinee. */
1387static
1388void maybe_rendezvous_joiners_and_joinees ( void )
1389{
1390 Char msg_buf[100];
1391 void** thread_return;
1392 ThreadId jnr, jee;
1393
1394 for (jnr = 1; jnr < VG_N_THREADS; jnr++) {
1395 if (VG_(threads)[jnr].status != VgTs_WaitJoinee)
1396 continue;
1397 jee = VG_(threads)[jnr].joiner_jee_tid;
1398 if (jee == VG_INVALID_THREADID)
1399 continue;
1400 vg_assert(VG_(is_valid_tid)(jee));
jsgf855d93d2003-10-13 22:26:55 +00001401 if (VG_(threads)[jee].status != VgTs_WaitJoiner) {
1402 /* if joinee has become detached, then make join fail with
1403 EINVAL */
1404 if (VG_(threads)[jee].detached) {
1405 VG_(threads)[jnr].status = VgTs_Runnable;
1406 VG_(threads)[jnr].joiner_jee_tid = VG_INVALID_THREADID;
1407 SET_PTHREQ_RETVAL(jnr, VKI_EINVAL);
1408 }
sewardj20917d82002-05-28 01:36:45 +00001409 continue;
jsgf855d93d2003-10-13 22:26:55 +00001410 }
sewardj20917d82002-05-28 01:36:45 +00001411 /* ok! jnr is waiting to join with jee, and jee is waiting to be
1412 joined by ... well, any thread. So let's do it! */
1413
1414 /* Copy return value to where joiner wants it. */
1415 thread_return = VG_(threads)[jnr].joiner_thread_return;
1416 if (thread_return != NULL) {
1417 /* CHECK thread_return writable */
njn72718642003-07-24 08:45:32 +00001418 VG_TRACK( pre_mem_write, Vg_CorePThread, jnr,
njn25e49d8e72002-09-23 09:36:25 +00001419 "pthread_join: thread_return",
1420 (Addr)thread_return, sizeof(void*));
sewardj5a3798b2002-06-04 23:24:22 +00001421
sewardj20917d82002-05-28 01:36:45 +00001422 *thread_return = VG_(threads)[jee].joinee_retval;
1423 /* Not really right, since it makes the thread's return value
1424 appear to be defined even if it isn't. */
njn25e49d8e72002-09-23 09:36:25 +00001425 VG_TRACK( post_mem_write, (Addr)thread_return, sizeof(void*) );
sewardj20917d82002-05-28 01:36:45 +00001426 }
1427
1428 /* Joinee is discarded */
1429 VG_(threads)[jee].status = VgTs_Empty; /* bye! */
jsgf855d93d2003-10-13 22:26:55 +00001430 cleanup_after_thread_exited ( jee, False );
sewardjc4a810d2002-11-13 22:25:51 +00001431 if (VG_(clo_trace_sched)) {
1432 VG_(sprintf)(msg_buf,
1433 "rendezvous with joinee %d. %d resumes, %d exits.",
1434 jee, jnr, jee );
sewardj20917d82002-05-28 01:36:45 +00001435 print_sched_event(jnr, msg_buf);
1436 }
sewardjc4a810d2002-11-13 22:25:51 +00001437
1438 VG_TRACK( post_thread_join, jnr, jee );
sewardj20917d82002-05-28 01:36:45 +00001439
1440 /* joiner returns with success */
1441 VG_(threads)[jnr].status = VgTs_Runnable;
njnd3040452003-05-19 15:04:06 +00001442 SET_PTHREQ_RETVAL(jnr, 0);
sewardj20917d82002-05-28 01:36:45 +00001443 }
1444}
1445
1446
sewardjccef2e62002-05-29 19:26:32 +00001447/* Nuke all threads other than tid. POSIX specifies that this should
1448 happen in __NR_exec, and after a __NR_fork() when I am the child,
jsgf855d93d2003-10-13 22:26:55 +00001449 as POSIX requires. Also used at process exit time with
1450 me==VG_INVALID_THREADID */
sewardjccef2e62002-05-29 19:26:32 +00001451void VG_(nuke_all_threads_except) ( ThreadId me )
1452{
1453 ThreadId tid;
1454 for (tid = 1; tid < VG_N_THREADS; tid++) {
1455 if (tid == me
jsgf855d93d2003-10-13 22:26:55 +00001456 || VG_(threads)[tid].status == VgTs_Empty)
sewardjccef2e62002-05-29 19:26:32 +00001457 continue;
sewardjef037c72002-05-30 00:40:03 +00001458 if (0)
1459 VG_(printf)(
1460 "VG_(nuke_all_threads_except): nuking tid %d\n", tid);
jsgf855d93d2003-10-13 22:26:55 +00001461 VG_(proxy_delete)(tid, True);
sewardjccef2e62002-05-29 19:26:32 +00001462 VG_(threads)[tid].status = VgTs_Empty;
jsgf855d93d2003-10-13 22:26:55 +00001463 cleanup_after_thread_exited( tid, True );
sewardjccef2e62002-05-29 19:26:32 +00001464 }
1465}
1466
1467
sewardj20917d82002-05-28 01:36:45 +00001468/* -----------------------------------------------------------
1469 Thread CREATION, JOINAGE and CANCELLATION: REQUESTS
1470 -------------------------------------------------------- */
1471
sewardje663cb92002-04-12 10:26:32 +00001472static
sewardj8ad94e12002-05-29 00:10:20 +00001473void do__cleanup_push ( ThreadId tid, CleanupEntry* cu )
1474{
1475 Int sp;
1476 Char msg_buf[100];
1477 vg_assert(VG_(is_valid_tid)(tid));
1478 sp = VG_(threads)[tid].custack_used;
1479 if (VG_(clo_trace_sched)) {
1480 VG_(sprintf)(msg_buf,
1481 "cleanup_push (fn %p, arg %p) -> slot %d",
1482 cu->fn, cu->arg, sp);
1483 print_sched_event(tid, msg_buf);
1484 }
1485 vg_assert(sp >= 0 && sp <= VG_N_CLEANUPSTACK);
1486 if (sp == VG_N_CLEANUPSTACK)
njne427a662002-10-02 11:08:25 +00001487 VG_(core_panic)("do__cleanup_push: VG_N_CLEANUPSTACK is too small."
sewardj8ad94e12002-05-29 00:10:20 +00001488 " Increase and recompile.");
1489 VG_(threads)[tid].custack[sp] = *cu;
1490 sp++;
1491 VG_(threads)[tid].custack_used = sp;
njnd3040452003-05-19 15:04:06 +00001492 SET_PTHREQ_RETVAL(tid, 0);
sewardj8ad94e12002-05-29 00:10:20 +00001493}
1494
1495
1496static
1497void do__cleanup_pop ( ThreadId tid, CleanupEntry* cu )
1498{
1499 Int sp;
1500 Char msg_buf[100];
1501 vg_assert(VG_(is_valid_tid)(tid));
1502 sp = VG_(threads)[tid].custack_used;
1503 if (VG_(clo_trace_sched)) {
njn36650922002-10-04 09:18:09 +00001504 VG_(sprintf)(msg_buf, "cleanup_pop from slot %d", sp-1);
sewardj8ad94e12002-05-29 00:10:20 +00001505 print_sched_event(tid, msg_buf);
1506 }
1507 vg_assert(sp >= 0 && sp <= VG_N_CLEANUPSTACK);
1508 if (sp == 0) {
njnd3040452003-05-19 15:04:06 +00001509 SET_PTHREQ_RETVAL(tid, -1);
sewardj8ad94e12002-05-29 00:10:20 +00001510 return;
1511 }
1512 sp--;
njn72718642003-07-24 08:45:32 +00001513 VG_TRACK( pre_mem_write, Vg_CorePThread, tid,
njn36650922002-10-04 09:18:09 +00001514 "cleanup pop", (Addr)cu, sizeof(CleanupEntry) );
sewardj8ad94e12002-05-29 00:10:20 +00001515 *cu = VG_(threads)[tid].custack[sp];
njn25e49d8e72002-09-23 09:36:25 +00001516 VG_TRACK( post_mem_write, (Addr)cu, sizeof(CleanupEntry) );
sewardj8ad94e12002-05-29 00:10:20 +00001517 VG_(threads)[tid].custack_used = sp;
njnd3040452003-05-19 15:04:06 +00001518 SET_PTHREQ_RETVAL(tid, 0);
sewardj8ad94e12002-05-29 00:10:20 +00001519}
1520
1521
1522static
sewardjff42d1d2002-05-22 13:17:31 +00001523void do_pthread_yield ( ThreadId tid )
1524{
1525 Char msg_buf[100];
1526 vg_assert(VG_(is_valid_tid)(tid));
sewardjff42d1d2002-05-22 13:17:31 +00001527 if (VG_(clo_trace_sched)) {
1528 VG_(sprintf)(msg_buf, "yield");
1529 print_sched_event(tid, msg_buf);
1530 }
njnd3040452003-05-19 15:04:06 +00001531 SET_PTHREQ_RETVAL(tid, 0);
sewardjff42d1d2002-05-22 13:17:31 +00001532}
1533
1534
1535static
sewardj20917d82002-05-28 01:36:45 +00001536void do__testcancel ( ThreadId tid )
sewardje663cb92002-04-12 10:26:32 +00001537{
sewardj7989d0c2002-05-28 11:00:01 +00001538 Char msg_buf[100];
sewardjb48e5002002-05-13 00:16:03 +00001539 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001540 if (VG_(clo_trace_sched)) {
1541 VG_(sprintf)(msg_buf, "testcancel");
1542 print_sched_event(tid, msg_buf);
1543 }
sewardj20917d82002-05-28 01:36:45 +00001544 if (/* is there a cancellation pending on this thread? */
1545 VG_(threads)[tid].cancel_pend != NULL
1546 && /* is this thread accepting cancellations? */
1547 VG_(threads)[tid].cancel_st) {
1548 /* Ok, let's do the cancellation. */
1549 make_thread_jump_to_cancelhdlr ( tid );
sewardje663cb92002-04-12 10:26:32 +00001550 } else {
sewardj20917d82002-05-28 01:36:45 +00001551 /* No, we keep going. */
njnd3040452003-05-19 15:04:06 +00001552 SET_PTHREQ_RETVAL(tid, 0);
sewardje663cb92002-04-12 10:26:32 +00001553 }
sewardje663cb92002-04-12 10:26:32 +00001554}
1555
1556
1557static
sewardj20917d82002-05-28 01:36:45 +00001558void do__set_cancelstate ( ThreadId tid, Int state )
1559{
1560 Bool old_st;
sewardj7989d0c2002-05-28 11:00:01 +00001561 Char msg_buf[100];
sewardj20917d82002-05-28 01:36:45 +00001562 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001563 if (VG_(clo_trace_sched)) {
1564 VG_(sprintf)(msg_buf, "set_cancelstate to %d (%s)", state,
1565 state==PTHREAD_CANCEL_ENABLE
1566 ? "ENABLE"
1567 : (state==PTHREAD_CANCEL_DISABLE ? "DISABLE" : "???"));
1568 print_sched_event(tid, msg_buf);
1569 }
sewardj20917d82002-05-28 01:36:45 +00001570 old_st = VG_(threads)[tid].cancel_st;
1571 if (state == PTHREAD_CANCEL_ENABLE) {
1572 VG_(threads)[tid].cancel_st = True;
1573 } else
1574 if (state == PTHREAD_CANCEL_DISABLE) {
1575 VG_(threads)[tid].cancel_st = False;
1576 } else {
njne427a662002-10-02 11:08:25 +00001577 VG_(core_panic)("do__set_cancelstate");
sewardj20917d82002-05-28 01:36:45 +00001578 }
njnd3040452003-05-19 15:04:06 +00001579 SET_PTHREQ_RETVAL(tid, old_st ? PTHREAD_CANCEL_ENABLE
1580 : PTHREAD_CANCEL_DISABLE);
sewardj20917d82002-05-28 01:36:45 +00001581}
1582
1583
1584static
1585void do__set_canceltype ( ThreadId tid, Int type )
1586{
1587 Bool old_ty;
sewardj7989d0c2002-05-28 11:00:01 +00001588 Char msg_buf[100];
sewardj20917d82002-05-28 01:36:45 +00001589 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001590 if (VG_(clo_trace_sched)) {
1591 VG_(sprintf)(msg_buf, "set_canceltype to %d (%s)", type,
1592 type==PTHREAD_CANCEL_ASYNCHRONOUS
1593 ? "ASYNCHRONOUS"
1594 : (type==PTHREAD_CANCEL_DEFERRED ? "DEFERRED" : "???"));
1595 print_sched_event(tid, msg_buf);
1596 }
sewardj20917d82002-05-28 01:36:45 +00001597 old_ty = VG_(threads)[tid].cancel_ty;
1598 if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
1599 VG_(threads)[tid].cancel_ty = False;
1600 } else
1601 if (type == PTHREAD_CANCEL_DEFERRED) {
sewardjaf00b6d2002-05-29 23:30:28 +00001602 VG_(threads)[tid].cancel_ty = True;
sewardj20917d82002-05-28 01:36:45 +00001603 } else {
njne427a662002-10-02 11:08:25 +00001604 VG_(core_panic)("do__set_canceltype");
sewardj20917d82002-05-28 01:36:45 +00001605 }
njnd3040452003-05-19 15:04:06 +00001606 SET_PTHREQ_RETVAL(tid, old_ty ? PTHREAD_CANCEL_DEFERRED
sewardj20917d82002-05-28 01:36:45 +00001607 : PTHREAD_CANCEL_ASYNCHRONOUS);
1608}
1609
1610
sewardj7989d0c2002-05-28 11:00:01 +00001611/* Set or get the detach state for thread det. */
sewardj20917d82002-05-28 01:36:45 +00001612static
sewardj7989d0c2002-05-28 11:00:01 +00001613void do__set_or_get_detach ( ThreadId tid,
1614 Int what, ThreadId det )
sewardj20917d82002-05-28 01:36:45 +00001615{
sewardj7989d0c2002-05-28 11:00:01 +00001616 Char msg_buf[100];
1617 /* VG_(printf)("do__set_or_get_detach tid %d what %d det %d\n",
1618 tid, what, det); */
sewardj20917d82002-05-28 01:36:45 +00001619 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001620 if (VG_(clo_trace_sched)) {
1621 VG_(sprintf)(msg_buf, "set_or_get_detach %d (%s) for tid %d", what,
1622 what==0 ? "not-detached" : (
1623 what==1 ? "detached" : (
1624 what==2 ? "fetch old value" : "???")),
1625 det );
1626 print_sched_event(tid, msg_buf);
1627 }
1628
1629 if (!VG_(is_valid_tid)(det)) {
njnd3040452003-05-19 15:04:06 +00001630 SET_PTHREQ_RETVAL(tid, -1);
sewardj7989d0c2002-05-28 11:00:01 +00001631 return;
1632 }
1633
sewardj20917d82002-05-28 01:36:45 +00001634 switch (what) {
1635 case 2: /* get */
njnd3040452003-05-19 15:04:06 +00001636 SET_PTHREQ_RETVAL(tid, VG_(threads)[det].detached ? 1 : 0);
sewardj20917d82002-05-28 01:36:45 +00001637 return;
jsgf855d93d2003-10-13 22:26:55 +00001638 case 1:
sewardj7989d0c2002-05-28 11:00:01 +00001639 VG_(threads)[det].detached = True;
njnd3040452003-05-19 15:04:06 +00001640 SET_PTHREQ_RETVAL(tid, 0);
jsgf855d93d2003-10-13 22:26:55 +00001641 /* wake anyone who was joining on us */
1642 maybe_rendezvous_joiners_and_joinees();
sewardj20917d82002-05-28 01:36:45 +00001643 return;
1644 case 0: /* set not detached */
sewardj7989d0c2002-05-28 11:00:01 +00001645 VG_(threads)[det].detached = False;
njnd3040452003-05-19 15:04:06 +00001646 SET_PTHREQ_RETVAL(tid, 0);
sewardj20917d82002-05-28 01:36:45 +00001647 return;
1648 default:
njne427a662002-10-02 11:08:25 +00001649 VG_(core_panic)("do__set_or_get_detach");
sewardj20917d82002-05-28 01:36:45 +00001650 }
1651}
1652
1653
1654static
1655void do__set_cancelpend ( ThreadId tid,
1656 ThreadId cee,
1657 void (*cancelpend_hdlr)(void*) )
sewardje663cb92002-04-12 10:26:32 +00001658{
1659 Char msg_buf[100];
1660
sewardj20917d82002-05-28 01:36:45 +00001661 vg_assert(VG_(is_valid_tid)(tid));
1662 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1663
sewardj7989d0c2002-05-28 11:00:01 +00001664 if (!VG_(is_valid_tid)(cee)) {
1665 if (VG_(clo_trace_sched)) {
1666 VG_(sprintf)(msg_buf,
1667 "set_cancelpend for invalid tid %d", cee);
1668 print_sched_event(tid, msg_buf);
1669 }
njn25e49d8e72002-09-23 09:36:25 +00001670 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00001671 "pthread_cancel: target thread does not exist, or invalid");
jsgf855d93d2003-10-13 22:26:55 +00001672 SET_PTHREQ_RETVAL(tid, VKI_ESRCH);
sewardj7989d0c2002-05-28 11:00:01 +00001673 return;
1674 }
sewardj20917d82002-05-28 01:36:45 +00001675
1676 VG_(threads)[cee].cancel_pend = cancelpend_hdlr;
1677
jsgf855d93d2003-10-13 22:26:55 +00001678 /* interrupt a pending syscall */
1679 VG_(proxy_abort_syscall)(cee);
1680
sewardj20917d82002-05-28 01:36:45 +00001681 if (VG_(clo_trace_sched)) {
1682 VG_(sprintf)(msg_buf,
sewardj7989d0c2002-05-28 11:00:01 +00001683 "set_cancelpend (hdlr = %p, set by tid %d)",
sewardj20917d82002-05-28 01:36:45 +00001684 cancelpend_hdlr, tid);
1685 print_sched_event(cee, msg_buf);
1686 }
1687
1688 /* Thread doing the cancelling returns with success. */
njnd3040452003-05-19 15:04:06 +00001689 SET_PTHREQ_RETVAL(tid, 0);
sewardj20917d82002-05-28 01:36:45 +00001690
1691 /* Perhaps we can nuke the cancellee right now? */
jsgf855d93d2003-10-13 22:26:55 +00001692 if (!VG_(threads)[cee].cancel_ty) /* if PTHREAD_CANCEL_ASYNCHRONOUS */
1693 do__testcancel(cee);
sewardj20917d82002-05-28 01:36:45 +00001694}
1695
1696
1697static
1698void do_pthread_join ( ThreadId tid,
1699 ThreadId jee, void** thread_return )
1700{
1701 Char msg_buf[100];
1702 ThreadId i;
sewardje663cb92002-04-12 10:26:32 +00001703 /* jee, the joinee, is the thread specified as an arg in thread
1704 tid's call to pthread_join. So tid is the join-er. */
sewardjb48e5002002-05-13 00:16:03 +00001705 vg_assert(VG_(is_valid_tid)(tid));
sewardj018f7622002-05-15 21:13:39 +00001706 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +00001707
1708 if (jee == tid) {
njn25e49d8e72002-09-23 09:36:25 +00001709 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00001710 "pthread_join: attempt to join to self");
njnd3040452003-05-19 15:04:06 +00001711 SET_PTHREQ_RETVAL(tid, EDEADLK); /* libc constant, not a kernel one */
jsgf855d93d2003-10-13 22:26:55 +00001712 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +00001713 return;
1714 }
1715
sewardj20917d82002-05-28 01:36:45 +00001716 /* Flush any completed pairs, so as to make sure what we're looking
1717 at is up-to-date. */
1718 maybe_rendezvous_joiners_and_joinees();
1719
1720 /* Is this a sane request? */
jsgf855d93d2003-10-13 22:26:55 +00001721 if ( ! VG_(is_valid_tid)(jee) ||
1722 VG_(threads)[jee].detached) {
sewardje663cb92002-04-12 10:26:32 +00001723 /* Invalid thread to join to. */
njn25e49d8e72002-09-23 09:36:25 +00001724 VG_(record_pthread_error)( tid,
jsgf855d93d2003-10-13 22:26:55 +00001725 "pthread_join: target thread does not exist, invalid, or detached");
1726 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardje663cb92002-04-12 10:26:32 +00001727 return;
1728 }
1729
sewardj20917d82002-05-28 01:36:45 +00001730 /* Is anyone else already in a join-wait for jee? */
1731 for (i = 1; i < VG_N_THREADS; i++) {
1732 if (i == tid) continue;
1733 if (VG_(threads)[i].status == VgTs_WaitJoinee
1734 && VG_(threads)[i].joiner_jee_tid == jee) {
1735 /* Someone already did join on this thread */
njn25e49d8e72002-09-23 09:36:25 +00001736 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00001737 "pthread_join: another thread already "
1738 "in join-wait for target thread");
jsgf855d93d2003-10-13 22:26:55 +00001739 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
1740 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardj20917d82002-05-28 01:36:45 +00001741 return;
1742 }
sewardje663cb92002-04-12 10:26:32 +00001743 }
1744
sewardj20917d82002-05-28 01:36:45 +00001745 /* Mark this thread as waiting for the joinee. */
sewardj018f7622002-05-15 21:13:39 +00001746 VG_(threads)[tid].status = VgTs_WaitJoinee;
sewardj20917d82002-05-28 01:36:45 +00001747 VG_(threads)[tid].joiner_thread_return = thread_return;
1748 VG_(threads)[tid].joiner_jee_tid = jee;
1749
1750 /* Look for matching joiners and joinees and do the right thing. */
1751 maybe_rendezvous_joiners_and_joinees();
1752
1753 /* Return value is irrelevant since this this thread becomes
1754 non-runnable. maybe_resume_joiner() will cause it to return the
1755 right value when it resumes. */
1756
sewardj8937c812002-04-12 20:12:20 +00001757 if (VG_(clo_trace_sched)) {
sewardj20917d82002-05-28 01:36:45 +00001758 VG_(sprintf)(msg_buf,
1759 "wait for joinee %d (may already be ready)", jee);
sewardje663cb92002-04-12 10:26:32 +00001760 print_sched_event(tid, msg_buf);
1761 }
sewardje663cb92002-04-12 10:26:32 +00001762}
1763
1764
sewardj20917d82002-05-28 01:36:45 +00001765/* ( void* ): calling thread waits for joiner and returns the void* to
1766 it. This is one of two ways in which a thread can finally exit --
1767 the other is do__quit. */
sewardje663cb92002-04-12 10:26:32 +00001768static
sewardj20917d82002-05-28 01:36:45 +00001769void do__wait_joiner ( ThreadId tid, void* retval )
sewardje663cb92002-04-12 10:26:32 +00001770{
sewardj20917d82002-05-28 01:36:45 +00001771 Char msg_buf[100];
1772 vg_assert(VG_(is_valid_tid)(tid));
1773 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1774 if (VG_(clo_trace_sched)) {
1775 VG_(sprintf)(msg_buf,
sewardj7989d0c2002-05-28 11:00:01 +00001776 "do__wait_joiner(retval = %p) (non-detached thread exit)", retval);
sewardj20917d82002-05-28 01:36:45 +00001777 print_sched_event(tid, msg_buf);
1778 }
1779 VG_(threads)[tid].status = VgTs_WaitJoiner;
1780 VG_(threads)[tid].joinee_retval = retval;
1781 maybe_rendezvous_joiners_and_joinees();
1782}
1783
1784
1785/* ( no-args ): calling thread disappears from the system forever.
1786 Reclaim resources. */
1787static
1788void do__quit ( ThreadId tid )
1789{
1790 Char msg_buf[100];
1791 vg_assert(VG_(is_valid_tid)(tid));
1792 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1793 VG_(threads)[tid].status = VgTs_Empty; /* bye! */
jsgf855d93d2003-10-13 22:26:55 +00001794 cleanup_after_thread_exited ( tid, False );
sewardj20917d82002-05-28 01:36:45 +00001795 if (VG_(clo_trace_sched)) {
sewardj7989d0c2002-05-28 11:00:01 +00001796 VG_(sprintf)(msg_buf, "do__quit (detached thread exit)");
sewardj20917d82002-05-28 01:36:45 +00001797 print_sched_event(tid, msg_buf);
1798 }
jsgf855d93d2003-10-13 22:26:55 +00001799 maybe_rendezvous_joiners_and_joinees();
sewardj20917d82002-05-28 01:36:45 +00001800 /* Return value is irrelevant; this thread will not get
1801 rescheduled. */
1802}
1803
1804
1805/* Should never be entered. If it is, will be on the simulated
1806 CPU. */
1807static
1808void do__apply_in_new_thread_bogusRA ( void )
1809{
njne427a662002-10-02 11:08:25 +00001810 VG_(core_panic)("do__apply_in_new_thread_bogusRA");
sewardj20917d82002-05-28 01:36:45 +00001811}
1812
1813/* (Fn, Arg): Create a new thread and run Fn applied to Arg in it. Fn
1814 MUST NOT return -- ever. Eventually it will do either __QUIT or
1815 __WAIT_JOINER. Return the child tid to the parent. */
1816static
1817void do__apply_in_new_thread ( ThreadId parent_tid,
1818 void* (*fn)(void *),
1819 void* arg )
1820{
sewardje663cb92002-04-12 10:26:32 +00001821 Addr new_stack;
1822 UInt new_stk_szb;
1823 ThreadId tid;
1824 Char msg_buf[100];
1825
1826 /* Paranoia ... */
1827 vg_assert(sizeof(pthread_t) == sizeof(UInt));
1828
sewardj018f7622002-05-15 21:13:39 +00001829 vg_assert(VG_(threads)[parent_tid].status != VgTs_Empty);
sewardje663cb92002-04-12 10:26:32 +00001830
sewardj1e8cdc92002-04-18 11:37:52 +00001831 tid = vg_alloc_ThreadState();
sewardje663cb92002-04-12 10:26:32 +00001832
1833 /* If we've created the main thread's tid, we're in deep trouble :) */
sewardj6072c362002-04-19 14:40:57 +00001834 vg_assert(tid != 1);
sewardj018f7622002-05-15 21:13:39 +00001835 vg_assert(VG_(is_valid_or_empty_tid)(tid));
sewardje663cb92002-04-12 10:26:32 +00001836
sewardjc4a810d2002-11-13 22:25:51 +00001837 /* do this early, before the child gets any memory writes */
1838 VG_TRACK ( post_thread_create, parent_tid, tid );
1839
sewardjf6374322002-11-13 22:35:55 +00001840 /* Create new thread with default attrs:
1841 deferred cancellation, not detached
1842 */
1843 mostly_clear_thread_record(tid);
1844 VG_(threads)[tid].status = VgTs_Runnable;
1845
sewardje663cb92002-04-12 10:26:32 +00001846 /* Copy the parent's CPU state into the child's, in a roundabout
1847 way (via baseBlock). */
1848 VG_(load_thread_state)(parent_tid);
sewardjca340b32002-12-08 22:14:11 +00001849
1850 /* We inherit our parent's LDT. */
1851 if (VG_(threads)[parent_tid].ldt == NULL) {
1852 /* We hope this is the common case. */
1853 VG_(baseBlock)[VGOFF_(ldt)] = 0;
1854 } else {
1855 /* No luck .. we have to take a copy of the parent's. */
1856 VG_(threads)[tid].ldt
1857 = VG_(allocate_LDT_for_thread)( VG_(threads)[parent_tid].ldt );
1858 VG_(baseBlock)[VGOFF_(ldt)] = (UInt)VG_(threads)[tid].ldt;
1859 }
1860
sewardje663cb92002-04-12 10:26:32 +00001861 VG_(save_thread_state)(tid);
sewardjf6374322002-11-13 22:35:55 +00001862 vg_tid_last_in_baseBlock = tid;
sewardje663cb92002-04-12 10:26:32 +00001863
1864 /* Consider allocating the child a stack, if the one it already has
1865 is inadequate. */
sewardjbf290b92002-05-01 02:28:01 +00001866 new_stk_szb = VG_PTHREAD_STACK_MIN;
sewardje663cb92002-04-12 10:26:32 +00001867
sewardj018f7622002-05-15 21:13:39 +00001868 if (new_stk_szb > VG_(threads)[tid].stack_size) {
sewardje663cb92002-04-12 10:26:32 +00001869 /* Again, for good measure :) We definitely don't want to be
1870 allocating a stack for the main thread. */
sewardj6072c362002-04-19 14:40:57 +00001871 vg_assert(tid != 1);
sewardje663cb92002-04-12 10:26:32 +00001872 /* for now, we don't handle the case of anything other than
1873 assigning it for the first time. */
sewardj018f7622002-05-15 21:13:39 +00001874 vg_assert(VG_(threads)[tid].stack_size == 0);
1875 vg_assert(VG_(threads)[tid].stack_base == (Addr)NULL);
fitzhardinge98abfc72003-12-16 02:05:15 +00001876 new_stack = VG_(client_alloc)(0, new_stk_szb,
1877 VKI_PROT_READ | VKI_PROT_WRITE | VKI_PROT_EXEC,
1878 SF_STACK);
sewardj018f7622002-05-15 21:13:39 +00001879 VG_(threads)[tid].stack_base = new_stack;
1880 VG_(threads)[tid].stack_size = new_stk_szb;
1881 VG_(threads)[tid].stack_highest_word
sewardje663cb92002-04-12 10:26:32 +00001882 = new_stack + new_stk_szb
sewardj1e8cdc92002-04-18 11:37:52 +00001883 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB; /* -4 ??? */;
sewardje663cb92002-04-12 10:26:32 +00001884 }
sewardj1e8cdc92002-04-18 11:37:52 +00001885
njn25e49d8e72002-09-23 09:36:25 +00001886 /* Having got memory to hold the thread's stack:
1887 - set %esp as base + size
1888 - mark everything below %esp inaccessible
1889 - mark redzone at stack end inaccessible
1890 */
njnd3040452003-05-19 15:04:06 +00001891 SET_PTHREQ_ESP(tid, VG_(threads)[tid].stack_base
1892 + VG_(threads)[tid].stack_size
1893 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB);
sewardj1e8cdc92002-04-18 11:37:52 +00001894
njn25e49d8e72002-09-23 09:36:25 +00001895 VG_TRACK ( die_mem_stack, VG_(threads)[tid].stack_base,
1896 + new_stk_szb - VG_AR_CLIENT_STACKBASE_REDZONE_SZB);
1897 VG_TRACK ( ban_mem_stack, VG_(threads)[tid].m_esp,
1898 VG_AR_CLIENT_STACKBASE_REDZONE_SZB );
sewardje663cb92002-04-12 10:26:32 +00001899
njn25e49d8e72002-09-23 09:36:25 +00001900 /* push two args */
njnd3040452003-05-19 15:04:06 +00001901 SET_PTHREQ_ESP(tid, VG_(threads)[tid].m_esp - 8);
1902
njn25e49d8e72002-09-23 09:36:25 +00001903 VG_TRACK ( new_mem_stack, (Addr)VG_(threads)[tid].m_esp, 2 * 4 );
njn72718642003-07-24 08:45:32 +00001904 VG_TRACK ( pre_mem_write, Vg_CorePThread, tid, "new thread: stack",
njn25e49d8e72002-09-23 09:36:25 +00001905 (Addr)VG_(threads)[tid].m_esp, 2 * 4 );
1906
1907 /* push arg and (bogus) return address */
1908 * (UInt*)(VG_(threads)[tid].m_esp+4) = (UInt)arg;
sewardj20917d82002-05-28 01:36:45 +00001909 * (UInt*)(VG_(threads)[tid].m_esp)
1910 = (UInt)&do__apply_in_new_thread_bogusRA;
sewardje663cb92002-04-12 10:26:32 +00001911
njn25e49d8e72002-09-23 09:36:25 +00001912 VG_TRACK ( post_mem_write, VG_(threads)[tid].m_esp, 2 * 4 );
sewardje663cb92002-04-12 10:26:32 +00001913
1914 /* this is where we start */
sewardj20917d82002-05-28 01:36:45 +00001915 VG_(threads)[tid].m_eip = (UInt)fn;
sewardje663cb92002-04-12 10:26:32 +00001916
sewardj8937c812002-04-12 20:12:20 +00001917 if (VG_(clo_trace_sched)) {
njn25e49d8e72002-09-23 09:36:25 +00001918 VG_(sprintf)(msg_buf, "new thread, created by %d", parent_tid );
sewardje663cb92002-04-12 10:26:32 +00001919 print_sched_event(tid, msg_buf);
1920 }
1921
sewardj018f7622002-05-15 21:13:39 +00001922 /* We inherit our parent's signal mask. */
1923 VG_(threads)[tid].sig_mask = VG_(threads)[parent_tid].sig_mask;
jsgf855d93d2003-10-13 22:26:55 +00001924
1925 /* Now that the signal mask is set up, create a proxy LWP for this thread */
1926 VG_(proxy_create)(tid);
1927
1928 /* Set the proxy's signal mask */
1929 VG_(proxy_setsigmask)(tid);
sewardjb48e5002002-05-13 00:16:03 +00001930
sewardj20917d82002-05-28 01:36:45 +00001931 /* return child's tid to parent */
njnd3040452003-05-19 15:04:06 +00001932 SET_PTHREQ_RETVAL(parent_tid, tid); /* success */
sewardje663cb92002-04-12 10:26:32 +00001933}
1934
1935
sewardj604ec3c2002-04-18 22:38:41 +00001936/* -----------------------------------------------------------
1937 MUTEXes
1938 -------------------------------------------------------- */
1939
sewardj604ec3c2002-04-18 22:38:41 +00001940/* pthread_mutex_t is a struct with at 5 words:
sewardje663cb92002-04-12 10:26:32 +00001941 typedef struct
1942 {
1943 int __m_reserved; -- Reserved for future use
1944 int __m_count; -- Depth of recursive locking
1945 _pthread_descr __m_owner; -- Owner thread (if recursive or errcheck)
1946 int __m_kind; -- Mutex kind: fast, recursive or errcheck
1947 struct _pthread_fastlock __m_lock; -- Underlying fast lock
1948 } pthread_mutex_t;
sewardj604ec3c2002-04-18 22:38:41 +00001949
sewardj6072c362002-04-19 14:40:57 +00001950 #define PTHREAD_MUTEX_INITIALIZER \
1951 {0, 0, 0, PTHREAD_MUTEX_TIMED_NP, __LOCK_INITIALIZER}
1952 # define PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP \
1953 {0, 0, 0, PTHREAD_MUTEX_RECURSIVE_NP, __LOCK_INITIALIZER}
1954 # define PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP \
1955 {0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, __LOCK_INITIALIZER}
1956 # define PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP \
1957 {0, 0, 0, PTHREAD_MUTEX_ADAPTIVE_NP, __LOCK_INITIALIZER}
sewardj604ec3c2002-04-18 22:38:41 +00001958
sewardj6072c362002-04-19 14:40:57 +00001959 How we use it:
sewardj604ec3c2002-04-18 22:38:41 +00001960
sewardj6072c362002-04-19 14:40:57 +00001961 __m_kind never changes and indicates whether or not it is recursive.
1962
1963 __m_count indicates the lock count; if 0, the mutex is not owned by
1964 anybody.
1965
1966 __m_owner has a ThreadId value stuffed into it. We carefully arrange
1967 that ThreadId == 0 is invalid (VG_INVALID_THREADID), so that
1968 statically initialised mutexes correctly appear
1969 to belong to nobody.
1970
1971 In summary, a not-in-use mutex is distinguised by having __m_owner
1972 == 0 (VG_INVALID_THREADID) and __m_count == 0 too. If one of those
1973 conditions holds, the other should too.
1974
1975 There is no linked list of threads waiting for this mutex. Instead
1976 a thread in WaitMX state points at the mutex with its waited_on_mx
1977 field. This makes _unlock() inefficient, but simple to implement the
1978 right semantics viz-a-viz signals.
sewardje663cb92002-04-12 10:26:32 +00001979
sewardj604ec3c2002-04-18 22:38:41 +00001980 We don't have to deal with mutex initialisation; the client side
sewardj6072c362002-04-19 14:40:57 +00001981 deals with that for us.
1982*/
sewardje663cb92002-04-12 10:26:32 +00001983
sewardj3b5d8862002-04-20 13:53:23 +00001984/* Helper fns ... */
1985static
1986void release_one_thread_waiting_on_mutex ( pthread_mutex_t* mutex,
1987 Char* caller )
1988{
1989 Int i;
1990 Char msg_buf[100];
1991
1992 /* Find some arbitrary thread waiting on this mutex, and make it
1993 runnable. If none are waiting, mark the mutex as not held. */
1994 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +00001995 if (VG_(threads)[i].status == VgTs_Empty)
sewardj3b5d8862002-04-20 13:53:23 +00001996 continue;
sewardj018f7622002-05-15 21:13:39 +00001997 if (VG_(threads)[i].status == VgTs_WaitMX
1998 && VG_(threads)[i].associated_mx == mutex)
sewardj3b5d8862002-04-20 13:53:23 +00001999 break;
2000 }
2001
sewardj0af43bc2002-10-22 04:30:35 +00002002 VG_TRACK( post_mutex_unlock, (ThreadId)mutex->__m_owner, mutex );
2003
sewardj3b5d8862002-04-20 13:53:23 +00002004 vg_assert(i <= VG_N_THREADS);
2005 if (i == VG_N_THREADS) {
2006 /* Nobody else is waiting on it. */
2007 mutex->__m_count = 0;
2008 mutex->__m_owner = VG_INVALID_THREADID;
2009 } else {
2010 /* Notionally transfer the hold to thread i, whose
2011 pthread_mutex_lock() call now returns with 0 (success). */
2012 /* The .count is already == 1. */
sewardj018f7622002-05-15 21:13:39 +00002013 vg_assert(VG_(threads)[i].associated_mx == mutex);
sewardj3b5d8862002-04-20 13:53:23 +00002014 mutex->__m_owner = (_pthread_descr)i;
sewardj018f7622002-05-15 21:13:39 +00002015 VG_(threads)[i].status = VgTs_Runnable;
2016 VG_(threads)[i].associated_mx = NULL;
sewardj5f07b662002-04-23 16:52:51 +00002017 /* m_edx already holds pth_mx_lock() success (0) */
sewardj3b5d8862002-04-20 13:53:23 +00002018
sewardj0af43bc2002-10-22 04:30:35 +00002019 VG_TRACK( post_mutex_lock, (ThreadId)i, mutex);
2020
sewardj3b5d8862002-04-20 13:53:23 +00002021 if (VG_(clo_trace_pthread_level) >= 1) {
2022 VG_(sprintf)(msg_buf, "%s mx %p: RESUME",
2023 caller, mutex );
2024 print_pthread_event(i, msg_buf);
2025 }
2026 }
2027}
2028
sewardje663cb92002-04-12 10:26:32 +00002029
2030static
sewardj30671ff2002-04-21 00:13:57 +00002031void do_pthread_mutex_lock( ThreadId tid,
2032 Bool is_trylock,
sewardj124ca2a2002-06-20 10:19:38 +00002033 pthread_mutex_t* mutex )
sewardje663cb92002-04-12 10:26:32 +00002034{
sewardj30671ff2002-04-21 00:13:57 +00002035 Char msg_buf[100];
2036 Char* caller
sewardj8ccc2be2002-05-10 20:26:37 +00002037 = is_trylock ? "pthread_mutex_trylock"
2038 : "pthread_mutex_lock ";
sewardje663cb92002-04-12 10:26:32 +00002039
sewardj604ec3c2002-04-18 22:38:41 +00002040 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj30671ff2002-04-21 00:13:57 +00002041 VG_(sprintf)(msg_buf, "%s mx %p ...", caller, mutex );
sewardj604ec3c2002-04-18 22:38:41 +00002042 print_pthread_event(tid, msg_buf);
2043 }
2044
2045 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00002046 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002047 && VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +00002048
2049 /* POSIX doesn't mandate this, but for sanity ... */
2050 if (mutex == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00002051 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002052 "pthread_mutex_lock/trylock: mutex is NULL");
jsgf855d93d2003-10-13 22:26:55 +00002053 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardje663cb92002-04-12 10:26:32 +00002054 return;
2055 }
2056
sewardj604ec3c2002-04-18 22:38:41 +00002057 /* More paranoia ... */
2058 switch (mutex->__m_kind) {
sewardj2a1dcce2002-04-22 12:45:25 +00002059# ifndef GLIBC_2_1
sewardj604ec3c2002-04-18 22:38:41 +00002060 case PTHREAD_MUTEX_TIMED_NP:
sewardj2a1dcce2002-04-22 12:45:25 +00002061 case PTHREAD_MUTEX_ADAPTIVE_NP:
2062# endif
sewardja1679dd2002-05-10 22:31:40 +00002063# ifdef GLIBC_2_1
sewardj8e651d72002-05-10 21:00:19 +00002064 case PTHREAD_MUTEX_FAST_NP:
sewardja1679dd2002-05-10 22:31:40 +00002065# endif
sewardj604ec3c2002-04-18 22:38:41 +00002066 case PTHREAD_MUTEX_RECURSIVE_NP:
2067 case PTHREAD_MUTEX_ERRORCHECK_NP:
sewardj604ec3c2002-04-18 22:38:41 +00002068 if (mutex->__m_count >= 0) break;
2069 /* else fall thru */
2070 default:
njn25e49d8e72002-09-23 09:36:25 +00002071 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002072 "pthread_mutex_lock/trylock: mutex is invalid");
jsgf855d93d2003-10-13 22:26:55 +00002073 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj604ec3c2002-04-18 22:38:41 +00002074 return;
sewardje663cb92002-04-12 10:26:32 +00002075 }
2076
sewardj604ec3c2002-04-18 22:38:41 +00002077 if (mutex->__m_count > 0) {
sewardje663cb92002-04-12 10:26:32 +00002078
sewardjb48e5002002-05-13 00:16:03 +00002079 vg_assert(VG_(is_valid_tid)((ThreadId)mutex->__m_owner));
sewardjf8f819e2002-04-17 23:21:37 +00002080
2081 /* Someone has it already. */
sewardj604ec3c2002-04-18 22:38:41 +00002082 if ((ThreadId)mutex->__m_owner == tid) {
sewardjf8f819e2002-04-17 23:21:37 +00002083 /* It's locked -- by me! */
sewardj604ec3c2002-04-18 22:38:41 +00002084 if (mutex->__m_kind == PTHREAD_MUTEX_RECURSIVE_NP) {
sewardjf8f819e2002-04-17 23:21:37 +00002085 /* return 0 (success). */
sewardj604ec3c2002-04-18 22:38:41 +00002086 mutex->__m_count++;
njnd3040452003-05-19 15:04:06 +00002087 SET_PTHREQ_RETVAL(tid, 0);
sewardj853f55d2002-04-26 00:27:53 +00002088 if (0)
2089 VG_(printf)("!!!!!! tid %d, mx %p -> locked %d\n",
2090 tid, mutex, mutex->__m_count);
sewardjf8f819e2002-04-17 23:21:37 +00002091 return;
2092 } else {
sewardj30671ff2002-04-21 00:13:57 +00002093 if (is_trylock)
njnd3040452003-05-19 15:04:06 +00002094 SET_PTHREQ_RETVAL(tid, EBUSY);
sewardj30671ff2002-04-21 00:13:57 +00002095 else
njnd3040452003-05-19 15:04:06 +00002096 SET_PTHREQ_RETVAL(tid, EDEADLK);
sewardjf8f819e2002-04-17 23:21:37 +00002097 return;
2098 }
2099 } else {
sewardj6072c362002-04-19 14:40:57 +00002100 /* Someone else has it; we have to wait. Mark ourselves
2101 thusly. */
sewardj05553872002-04-20 20:53:17 +00002102 /* GUARD: __m_count > 0 && __m_owner is valid */
sewardj30671ff2002-04-21 00:13:57 +00002103 if (is_trylock) {
2104 /* caller is polling; so return immediately. */
njnd3040452003-05-19 15:04:06 +00002105 SET_PTHREQ_RETVAL(tid, EBUSY);
sewardj30671ff2002-04-21 00:13:57 +00002106 } else {
sewardjdca84112002-11-13 22:29:34 +00002107 VG_TRACK ( pre_mutex_lock, tid, mutex );
2108
sewardj018f7622002-05-15 21:13:39 +00002109 VG_(threads)[tid].status = VgTs_WaitMX;
2110 VG_(threads)[tid].associated_mx = mutex;
njnd3040452003-05-19 15:04:06 +00002111 SET_PTHREQ_RETVAL(tid, 0); /* pth_mx_lock success value */
sewardj30671ff2002-04-21 00:13:57 +00002112 if (VG_(clo_trace_pthread_level) >= 1) {
2113 VG_(sprintf)(msg_buf, "%s mx %p: BLOCK",
2114 caller, mutex );
2115 print_pthread_event(tid, msg_buf);
2116 }
2117 }
sewardje663cb92002-04-12 10:26:32 +00002118 return;
2119 }
sewardjf8f819e2002-04-17 23:21:37 +00002120
sewardje663cb92002-04-12 10:26:32 +00002121 } else {
sewardj6072c362002-04-19 14:40:57 +00002122 /* Nobody owns it. Sanity check ... */
2123 vg_assert(mutex->__m_owner == VG_INVALID_THREADID);
sewardjdca84112002-11-13 22:29:34 +00002124
2125 VG_TRACK ( pre_mutex_lock, tid, mutex );
2126
sewardjf8f819e2002-04-17 23:21:37 +00002127 /* We get it! [for the first time]. */
sewardj604ec3c2002-04-18 22:38:41 +00002128 mutex->__m_count = 1;
2129 mutex->__m_owner = (_pthread_descr)tid;
njn25e49d8e72002-09-23 09:36:25 +00002130
sewardje663cb92002-04-12 10:26:32 +00002131 /* return 0 (success). */
njnd3040452003-05-19 15:04:06 +00002132 SET_PTHREQ_RETVAL(tid, 0);
sewardjf8f819e2002-04-17 23:21:37 +00002133
njnd3040452003-05-19 15:04:06 +00002134 VG_TRACK( post_mutex_lock, tid, mutex);
2135 }
sewardje663cb92002-04-12 10:26:32 +00002136}
2137
2138
2139static
2140void do_pthread_mutex_unlock ( ThreadId tid,
sewardj124ca2a2002-06-20 10:19:38 +00002141 pthread_mutex_t* mutex )
sewardje663cb92002-04-12 10:26:32 +00002142{
sewardj3b5d8862002-04-20 13:53:23 +00002143 Char msg_buf[100];
sewardje663cb92002-04-12 10:26:32 +00002144
sewardj45b4b372002-04-16 22:50:32 +00002145 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj3b5d8862002-04-20 13:53:23 +00002146 VG_(sprintf)(msg_buf, "pthread_mutex_unlock mx %p ...", mutex );
sewardj8937c812002-04-12 20:12:20 +00002147 print_pthread_event(tid, msg_buf);
2148 }
2149
sewardj604ec3c2002-04-18 22:38:41 +00002150 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00002151 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002152 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj604ec3c2002-04-18 22:38:41 +00002153
2154 if (mutex == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00002155 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002156 "pthread_mutex_unlock: mutex is NULL");
jsgf855d93d2003-10-13 22:26:55 +00002157 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj604ec3c2002-04-18 22:38:41 +00002158 return;
2159 }
2160
sewardjd8acdf22002-11-13 21:57:52 +00002161 /* If this was locked before the dawn of time, pretend it was
2162 locked now so that it balances with unlocks */
2163 if (mutex->__m_kind & VG_PTHREAD_PREHISTORY) {
2164 mutex->__m_kind &= ~VG_PTHREAD_PREHISTORY;
sewardjdca84112002-11-13 22:29:34 +00002165 VG_TRACK( pre_mutex_lock, (ThreadId)mutex->__m_owner, mutex );
sewardjd8acdf22002-11-13 21:57:52 +00002166 VG_TRACK( post_mutex_lock, (ThreadId)mutex->__m_owner, mutex );
2167 }
2168
sewardj604ec3c2002-04-18 22:38:41 +00002169 /* More paranoia ... */
2170 switch (mutex->__m_kind) {
sewardj2a1dcce2002-04-22 12:45:25 +00002171# ifndef GLIBC_2_1
sewardj604ec3c2002-04-18 22:38:41 +00002172 case PTHREAD_MUTEX_TIMED_NP:
sewardj2a1dcce2002-04-22 12:45:25 +00002173 case PTHREAD_MUTEX_ADAPTIVE_NP:
2174# endif
sewardja1679dd2002-05-10 22:31:40 +00002175# ifdef GLIBC_2_1
sewardj8e651d72002-05-10 21:00:19 +00002176 case PTHREAD_MUTEX_FAST_NP:
sewardja1679dd2002-05-10 22:31:40 +00002177# endif
sewardj604ec3c2002-04-18 22:38:41 +00002178 case PTHREAD_MUTEX_RECURSIVE_NP:
2179 case PTHREAD_MUTEX_ERRORCHECK_NP:
sewardj604ec3c2002-04-18 22:38:41 +00002180 if (mutex->__m_count >= 0) break;
2181 /* else fall thru */
2182 default:
njn25e49d8e72002-09-23 09:36:25 +00002183 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002184 "pthread_mutex_unlock: mutex is invalid");
jsgf855d93d2003-10-13 22:26:55 +00002185 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj604ec3c2002-04-18 22:38:41 +00002186 return;
2187 }
sewardje663cb92002-04-12 10:26:32 +00002188
2189 /* Barf if we don't currently hold the mutex. */
sewardj4dced352002-06-04 22:54:20 +00002190 if (mutex->__m_count == 0) {
2191 /* nobody holds it */
njn25e49d8e72002-09-23 09:36:25 +00002192 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002193 "pthread_mutex_unlock: mutex is not locked");
njnd3040452003-05-19 15:04:06 +00002194 SET_PTHREQ_RETVAL(tid, EPERM);
sewardj4dced352002-06-04 22:54:20 +00002195 return;
2196 }
2197
2198 if ((ThreadId)mutex->__m_owner != tid) {
2199 /* we don't hold it */
njn25e49d8e72002-09-23 09:36:25 +00002200 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002201 "pthread_mutex_unlock: mutex is locked by a different thread");
njnd3040452003-05-19 15:04:06 +00002202 SET_PTHREQ_RETVAL(tid, EPERM);
sewardje663cb92002-04-12 10:26:32 +00002203 return;
2204 }
2205
sewardjf8f819e2002-04-17 23:21:37 +00002206 /* If it's a multiply-locked recursive mutex, just decrement the
2207 lock count and return. */
sewardj604ec3c2002-04-18 22:38:41 +00002208 if (mutex->__m_count > 1) {
2209 vg_assert(mutex->__m_kind == PTHREAD_MUTEX_RECURSIVE_NP);
2210 mutex->__m_count --;
njnd3040452003-05-19 15:04:06 +00002211 SET_PTHREQ_RETVAL(tid, 0); /* success */
sewardjf8f819e2002-04-17 23:21:37 +00002212 return;
2213 }
2214
sewardj604ec3c2002-04-18 22:38:41 +00002215 /* Now we're sure it is locked exactly once, and by the thread who
sewardjf8f819e2002-04-17 23:21:37 +00002216 is now doing an unlock on it. */
sewardj604ec3c2002-04-18 22:38:41 +00002217 vg_assert(mutex->__m_count == 1);
sewardj6072c362002-04-19 14:40:57 +00002218 vg_assert((ThreadId)mutex->__m_owner == tid);
sewardjf8f819e2002-04-17 23:21:37 +00002219
sewardj3b5d8862002-04-20 13:53:23 +00002220 /* Release at max one thread waiting on this mutex. */
2221 release_one_thread_waiting_on_mutex ( mutex, "pthread_mutex_lock" );
sewardje663cb92002-04-12 10:26:32 +00002222
sewardj3b5d8862002-04-20 13:53:23 +00002223 /* Our (tid's) pth_unlock() returns with 0 (success). */
njnd3040452003-05-19 15:04:06 +00002224 SET_PTHREQ_RETVAL(tid, 0); /* Success. */
sewardje663cb92002-04-12 10:26:32 +00002225}
2226
2227
sewardj6072c362002-04-19 14:40:57 +00002228/* -----------------------------------------------------------
2229 CONDITION VARIABLES
2230 -------------------------------------------------------- */
sewardje663cb92002-04-12 10:26:32 +00002231
sewardj6072c362002-04-19 14:40:57 +00002232/* The relevant native types are as follows:
2233 (copied from /usr/include/bits/pthreadtypes.h)
sewardj77e466c2002-04-14 02:29:29 +00002234
sewardj6072c362002-04-19 14:40:57 +00002235 -- Conditions (not abstract because of PTHREAD_COND_INITIALIZER
2236 typedef struct
2237 {
2238 struct _pthread_fastlock __c_lock; -- Protect against concurrent access
2239 _pthread_descr __c_waiting; -- Threads waiting on this condition
2240 } pthread_cond_t;
sewardj77e466c2002-04-14 02:29:29 +00002241
sewardj6072c362002-04-19 14:40:57 +00002242 -- Attribute for conditionally variables.
2243 typedef struct
2244 {
2245 int __dummy;
2246 } pthread_condattr_t;
sewardj77e466c2002-04-14 02:29:29 +00002247
sewardj6072c362002-04-19 14:40:57 +00002248 #define PTHREAD_COND_INITIALIZER {__LOCK_INITIALIZER, 0}
sewardj77e466c2002-04-14 02:29:29 +00002249
sewardj3b5d8862002-04-20 13:53:23 +00002250 We don't use any fields of pthread_cond_t for anything at all.
2251 Only the identity of the CVs is important.
sewardj6072c362002-04-19 14:40:57 +00002252
2253 Linux pthreads supports no attributes on condition variables, so we
sewardj3b5d8862002-04-20 13:53:23 +00002254 don't need to think too hard there. */
sewardj6072c362002-04-19 14:40:57 +00002255
sewardj77e466c2002-04-14 02:29:29 +00002256
sewardj5f07b662002-04-23 16:52:51 +00002257static
2258void do_pthread_cond_timedwait_TIMEOUT ( ThreadId tid )
2259{
2260 Char msg_buf[100];
2261 pthread_mutex_t* mx;
2262 pthread_cond_t* cv;
2263
sewardjb48e5002002-05-13 00:16:03 +00002264 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002265 && VG_(threads)[tid].status == VgTs_WaitCV
2266 && VG_(threads)[tid].awaken_at != 0xFFFFFFFF);
2267 mx = VG_(threads)[tid].associated_mx;
sewardj5f07b662002-04-23 16:52:51 +00002268 vg_assert(mx != NULL);
sewardj018f7622002-05-15 21:13:39 +00002269 cv = VG_(threads)[tid].associated_cv;
sewardj5f07b662002-04-23 16:52:51 +00002270 vg_assert(cv != NULL);
2271
2272 if (mx->__m_owner == VG_INVALID_THREADID) {
2273 /* Currently unheld; hand it out to thread tid. */
2274 vg_assert(mx->__m_count == 0);
sewardj018f7622002-05-15 21:13:39 +00002275 VG_(threads)[tid].status = VgTs_Runnable;
njnd3040452003-05-19 15:04:06 +00002276 SET_PTHREQ_RETVAL(tid, ETIMEDOUT); /* pthread_cond_wait return value */
sewardj018f7622002-05-15 21:13:39 +00002277 VG_(threads)[tid].associated_cv = NULL;
2278 VG_(threads)[tid].associated_mx = NULL;
sewardj5f07b662002-04-23 16:52:51 +00002279 mx->__m_owner = (_pthread_descr)tid;
2280 mx->__m_count = 1;
2281
sewardj0af43bc2002-10-22 04:30:35 +00002282 VG_TRACK( post_mutex_lock, tid, mx );
2283
sewardj5f07b662002-04-23 16:52:51 +00002284 if (VG_(clo_trace_pthread_level) >= 1) {
sewardjc3bd5f52002-05-01 03:24:23 +00002285 VG_(sprintf)(msg_buf,
2286 "pthread_cond_timedwai cv %p: TIMEOUT with mx %p",
2287 cv, mx );
sewardj5f07b662002-04-23 16:52:51 +00002288 print_pthread_event(tid, msg_buf);
2289 }
2290 } else {
2291 /* Currently held. Make thread tid be blocked on it. */
2292 vg_assert(mx->__m_count > 0);
sewardjdca84112002-11-13 22:29:34 +00002293 VG_TRACK( pre_mutex_lock, tid, mx );
2294
sewardj018f7622002-05-15 21:13:39 +00002295 VG_(threads)[tid].status = VgTs_WaitMX;
njnd3040452003-05-19 15:04:06 +00002296 SET_PTHREQ_RETVAL(tid, ETIMEDOUT); /* pthread_cond_wait return value */
sewardj018f7622002-05-15 21:13:39 +00002297 VG_(threads)[tid].associated_cv = NULL;
2298 VG_(threads)[tid].associated_mx = mx;
sewardj5f07b662002-04-23 16:52:51 +00002299 if (VG_(clo_trace_pthread_level) >= 1) {
2300 VG_(sprintf)(msg_buf,
2301 "pthread_cond_timedwai cv %p: TIMEOUT -> BLOCK for mx %p",
2302 cv, mx );
2303 print_pthread_event(tid, msg_buf);
2304 }
sewardj5f07b662002-04-23 16:52:51 +00002305 }
2306}
2307
2308
sewardj3b5d8862002-04-20 13:53:23 +00002309static
2310void release_N_threads_waiting_on_cond ( pthread_cond_t* cond,
2311 Int n_to_release,
2312 Char* caller )
2313{
2314 Int i;
2315 Char msg_buf[100];
2316 pthread_mutex_t* mx;
2317
2318 while (True) {
2319 if (n_to_release == 0)
2320 return;
2321
2322 /* Find a thread waiting on this CV. */
2323 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +00002324 if (VG_(threads)[i].status == VgTs_Empty)
sewardj3b5d8862002-04-20 13:53:23 +00002325 continue;
sewardj018f7622002-05-15 21:13:39 +00002326 if (VG_(threads)[i].status == VgTs_WaitCV
2327 && VG_(threads)[i].associated_cv == cond)
sewardj3b5d8862002-04-20 13:53:23 +00002328 break;
2329 }
2330 vg_assert(i <= VG_N_THREADS);
2331
2332 if (i == VG_N_THREADS) {
2333 /* Nobody else is waiting on it. */
2334 return;
2335 }
2336
sewardj018f7622002-05-15 21:13:39 +00002337 mx = VG_(threads)[i].associated_mx;
sewardj3b5d8862002-04-20 13:53:23 +00002338 vg_assert(mx != NULL);
2339
sewardjdca84112002-11-13 22:29:34 +00002340 VG_TRACK( pre_mutex_lock, i, mx );
2341
sewardj3b5d8862002-04-20 13:53:23 +00002342 if (mx->__m_owner == VG_INVALID_THREADID) {
2343 /* Currently unheld; hand it out to thread i. */
2344 vg_assert(mx->__m_count == 0);
sewardj018f7622002-05-15 21:13:39 +00002345 VG_(threads)[i].status = VgTs_Runnable;
2346 VG_(threads)[i].associated_cv = NULL;
2347 VG_(threads)[i].associated_mx = NULL;
sewardj3b5d8862002-04-20 13:53:23 +00002348 mx->__m_owner = (_pthread_descr)i;
2349 mx->__m_count = 1;
sewardj5f07b662002-04-23 16:52:51 +00002350 /* .m_edx already holds pth_cond_wait success value (0) */
sewardj3b5d8862002-04-20 13:53:23 +00002351
sewardj0af43bc2002-10-22 04:30:35 +00002352 VG_TRACK( post_mutex_lock, i, mx );
2353
sewardj3b5d8862002-04-20 13:53:23 +00002354 if (VG_(clo_trace_pthread_level) >= 1) {
2355 VG_(sprintf)(msg_buf, "%s cv %p: RESUME with mx %p",
2356 caller, cond, mx );
2357 print_pthread_event(i, msg_buf);
2358 }
2359
2360 } else {
2361 /* Currently held. Make thread i be blocked on it. */
sewardj5f07b662002-04-23 16:52:51 +00002362 vg_assert(mx->__m_count > 0);
sewardj018f7622002-05-15 21:13:39 +00002363 VG_(threads)[i].status = VgTs_WaitMX;
2364 VG_(threads)[i].associated_cv = NULL;
2365 VG_(threads)[i].associated_mx = mx;
njnd3040452003-05-19 15:04:06 +00002366 SET_PTHREQ_RETVAL(i, 0); /* pth_cond_wait success value */
sewardj3b5d8862002-04-20 13:53:23 +00002367
2368 if (VG_(clo_trace_pthread_level) >= 1) {
2369 VG_(sprintf)(msg_buf, "%s cv %p: BLOCK for mx %p",
2370 caller, cond, mx );
2371 print_pthread_event(i, msg_buf);
2372 }
2373
2374 }
jsgf855d93d2003-10-13 22:26:55 +00002375
sewardj3b5d8862002-04-20 13:53:23 +00002376 n_to_release--;
2377 }
2378}
2379
2380
2381static
2382void do_pthread_cond_wait ( ThreadId tid,
2383 pthread_cond_t *cond,
sewardj5f07b662002-04-23 16:52:51 +00002384 pthread_mutex_t *mutex,
2385 UInt ms_end )
sewardj3b5d8862002-04-20 13:53:23 +00002386{
2387 Char msg_buf[100];
2388
sewardj5f07b662002-04-23 16:52:51 +00002389 /* If ms_end == 0xFFFFFFFF, wait forever (no timeout). Otherwise,
2390 ms_end is the ending millisecond. */
2391
sewardj3b5d8862002-04-20 13:53:23 +00002392 /* pre: mutex should be a valid mutex and owned by tid. */
2393 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj5f07b662002-04-23 16:52:51 +00002394 VG_(sprintf)(msg_buf, "pthread_cond_wait cv %p, mx %p, end %d ...",
2395 cond, mutex, ms_end );
sewardj3b5d8862002-04-20 13:53:23 +00002396 print_pthread_event(tid, msg_buf);
2397 }
2398
2399 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00002400 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002401 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj3b5d8862002-04-20 13:53:23 +00002402
2403 if (mutex == NULL || cond == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00002404 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002405 "pthread_cond_wait/timedwait: cond or mutex is NULL");
jsgf855d93d2003-10-13 22:26:55 +00002406 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj3b5d8862002-04-20 13:53:23 +00002407 return;
2408 }
2409
2410 /* More paranoia ... */
2411 switch (mutex->__m_kind) {
sewardj2a1dcce2002-04-22 12:45:25 +00002412# ifndef GLIBC_2_1
sewardj3b5d8862002-04-20 13:53:23 +00002413 case PTHREAD_MUTEX_TIMED_NP:
sewardj2a1dcce2002-04-22 12:45:25 +00002414 case PTHREAD_MUTEX_ADAPTIVE_NP:
2415# endif
sewardja1679dd2002-05-10 22:31:40 +00002416# ifdef GLIBC_2_1
sewardj8e651d72002-05-10 21:00:19 +00002417 case PTHREAD_MUTEX_FAST_NP:
sewardja1679dd2002-05-10 22:31:40 +00002418# endif
sewardj3b5d8862002-04-20 13:53:23 +00002419 case PTHREAD_MUTEX_RECURSIVE_NP:
2420 case PTHREAD_MUTEX_ERRORCHECK_NP:
sewardj3b5d8862002-04-20 13:53:23 +00002421 if (mutex->__m_count >= 0) break;
2422 /* else fall thru */
2423 default:
njn25e49d8e72002-09-23 09:36:25 +00002424 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002425 "pthread_cond_wait/timedwait: mutex is invalid");
jsgf855d93d2003-10-13 22:26:55 +00002426 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj3b5d8862002-04-20 13:53:23 +00002427 return;
2428 }
2429
2430 /* Barf if we don't currently hold the mutex. */
2431 if (mutex->__m_count == 0 /* nobody holds it */
2432 || (ThreadId)mutex->__m_owner != tid /* we don't hold it */) {
njn25e49d8e72002-09-23 09:36:25 +00002433 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002434 "pthread_cond_wait/timedwait: mutex is unlocked "
2435 "or is locked but not owned by thread");
jsgf855d93d2003-10-13 22:26:55 +00002436 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj3b5d8862002-04-20 13:53:23 +00002437 return;
2438 }
2439
2440 /* Queue ourselves on the condition. */
sewardj018f7622002-05-15 21:13:39 +00002441 VG_(threads)[tid].status = VgTs_WaitCV;
2442 VG_(threads)[tid].associated_cv = cond;
2443 VG_(threads)[tid].associated_mx = mutex;
2444 VG_(threads)[tid].awaken_at = ms_end;
jsgf855d93d2003-10-13 22:26:55 +00002445 if (ms_end != 0xFFFFFFFF)
2446 VG_(add_timeout)(tid, ms_end);
sewardj3b5d8862002-04-20 13:53:23 +00002447
2448 if (VG_(clo_trace_pthread_level) >= 1) {
2449 VG_(sprintf)(msg_buf,
2450 "pthread_cond_wait cv %p, mx %p: BLOCK",
2451 cond, mutex );
2452 print_pthread_event(tid, msg_buf);
2453 }
2454
2455 /* Release the mutex. */
2456 release_one_thread_waiting_on_mutex ( mutex, "pthread_cond_wait " );
2457}
2458
2459
2460static
2461void do_pthread_cond_signal_or_broadcast ( ThreadId tid,
2462 Bool broadcast,
2463 pthread_cond_t *cond )
2464{
2465 Char msg_buf[100];
2466 Char* caller
2467 = broadcast ? "pthread_cond_broadcast"
2468 : "pthread_cond_signal ";
2469
2470 if (VG_(clo_trace_pthread_level) >= 2) {
2471 VG_(sprintf)(msg_buf, "%s cv %p ...",
2472 caller, cond );
2473 print_pthread_event(tid, msg_buf);
2474 }
2475
2476 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00002477 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002478 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj3b5d8862002-04-20 13:53:23 +00002479
2480 if (cond == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00002481 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002482 "pthread_cond_signal/broadcast: cond is NULL");
jsgf855d93d2003-10-13 22:26:55 +00002483 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj3b5d8862002-04-20 13:53:23 +00002484 return;
2485 }
2486
2487 release_N_threads_waiting_on_cond (
2488 cond,
2489 broadcast ? VG_N_THREADS : 1,
2490 caller
2491 );
2492
njnd3040452003-05-19 15:04:06 +00002493 SET_PTHREQ_RETVAL(tid, 0); /* success */
sewardj3b5d8862002-04-20 13:53:23 +00002494}
2495
sewardj77e466c2002-04-14 02:29:29 +00002496
sewardj5f07b662002-04-23 16:52:51 +00002497/* -----------------------------------------------------------
2498 THREAD SPECIFIC DATA
2499 -------------------------------------------------------- */
2500
2501static __inline__
2502Bool is_valid_key ( ThreadKey k )
2503{
2504 /* k unsigned; hence no < 0 check */
2505 if (k >= VG_N_THREAD_KEYS) return False;
2506 if (!vg_thread_keys[k].inuse) return False;
2507 return True;
2508}
2509
sewardj00a66b12002-10-12 16:42:35 +00002510
2511/* Return in %EDX a value of 1 if the key is valid, else 0. */
2512static
2513void do_pthread_key_validate ( ThreadId tid,
2514 pthread_key_t key )
2515{
2516 Char msg_buf[100];
2517
2518 if (VG_(clo_trace_pthread_level) >= 1) {
2519 VG_(sprintf)(msg_buf, "pthread_key_validate key %p",
2520 key );
2521 print_pthread_event(tid, msg_buf);
2522 }
2523
2524 vg_assert(sizeof(pthread_key_t) == sizeof(ThreadKey));
2525 vg_assert(VG_(is_valid_tid)(tid)
2526 && VG_(threads)[tid].status == VgTs_Runnable);
2527
2528 if (is_valid_key((ThreadKey)key)) {
njnd3040452003-05-19 15:04:06 +00002529 SET_PTHREQ_RETVAL(tid, 1);
sewardj00a66b12002-10-12 16:42:35 +00002530 } else {
njnd3040452003-05-19 15:04:06 +00002531 SET_PTHREQ_RETVAL(tid, 0);
sewardj00a66b12002-10-12 16:42:35 +00002532 }
2533}
2534
2535
sewardj5f07b662002-04-23 16:52:51 +00002536static
2537void do_pthread_key_create ( ThreadId tid,
2538 pthread_key_t* key,
2539 void (*destructor)(void*) )
2540{
2541 Int i;
2542 Char msg_buf[100];
2543
2544 if (VG_(clo_trace_pthread_level) >= 1) {
2545 VG_(sprintf)(msg_buf, "pthread_key_create *key %p, destr %p",
2546 key, destructor );
2547 print_pthread_event(tid, msg_buf);
2548 }
2549
2550 vg_assert(sizeof(pthread_key_t) == sizeof(ThreadKey));
sewardjb48e5002002-05-13 00:16:03 +00002551 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002552 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj5f07b662002-04-23 16:52:51 +00002553
2554 for (i = 0; i < VG_N_THREAD_KEYS; i++)
2555 if (!vg_thread_keys[i].inuse)
2556 break;
2557
2558 if (i == VG_N_THREAD_KEYS) {
jsgf855d93d2003-10-13 22:26:55 +00002559 VG_(message)(Vg_UserMsg, "pthread_key_create() asked for too many keys (more than %d): increase VG_N_THREAD_KEYS and recompile Valgrind.",
2560 VG_N_THREAD_KEYS);
2561 SET_PTHREQ_RETVAL(tid, EAGAIN);
2562 return;
sewardj5f07b662002-04-23 16:52:51 +00002563 }
2564
sewardj870497a2002-05-29 01:06:47 +00002565 vg_thread_keys[i].inuse = True;
2566 vg_thread_keys[i].destructor = destructor;
sewardjc3bd5f52002-05-01 03:24:23 +00002567
sewardj5a3798b2002-06-04 23:24:22 +00002568 /* check key for addressibility */
njn72718642003-07-24 08:45:32 +00002569 VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "pthread_key_create: key",
njn25e49d8e72002-09-23 09:36:25 +00002570 (Addr)key, sizeof(pthread_key_t));
sewardj5f07b662002-04-23 16:52:51 +00002571 *key = i;
njn25e49d8e72002-09-23 09:36:25 +00002572 VG_TRACK( post_mem_write, (Addr)key, sizeof(pthread_key_t) );
sewardjc3bd5f52002-05-01 03:24:23 +00002573
njnd3040452003-05-19 15:04:06 +00002574 SET_PTHREQ_RETVAL(tid, 0);
sewardj5f07b662002-04-23 16:52:51 +00002575}
2576
2577
2578static
2579void do_pthread_key_delete ( ThreadId tid, pthread_key_t key )
2580{
2581 Char msg_buf[100];
2582 if (VG_(clo_trace_pthread_level) >= 1) {
2583 VG_(sprintf)(msg_buf, "pthread_key_delete key %d",
2584 key );
2585 print_pthread_event(tid, msg_buf);
2586 }
2587
sewardjb48e5002002-05-13 00:16:03 +00002588 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002589 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj5f07b662002-04-23 16:52:51 +00002590
2591 if (!is_valid_key(key)) {
njn25e49d8e72002-09-23 09:36:25 +00002592 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002593 "pthread_key_delete: key is invalid");
jsgf855d93d2003-10-13 22:26:55 +00002594 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj5f07b662002-04-23 16:52:51 +00002595 return;
2596 }
2597
2598 vg_thread_keys[key].inuse = False;
sewardj648b3152002-12-09 19:07:59 +00002599 vg_thread_keys[key].destructor = NULL;
njnd3040452003-05-19 15:04:06 +00002600 SET_PTHREQ_RETVAL(tid, 0);
sewardj5f07b662002-04-23 16:52:51 +00002601}
2602
2603
sewardj00a66b12002-10-12 16:42:35 +00002604/* Get the .specific_ptr for a thread. Return 1 if the thread-slot
2605 isn't in use, so that client-space can scan all thread slots. 1
2606 cannot be confused with NULL or a legitimately-aligned specific_ptr
2607 value. */
sewardj5f07b662002-04-23 16:52:51 +00002608static
sewardj00a66b12002-10-12 16:42:35 +00002609void do_pthread_getspecific_ptr ( ThreadId tid )
sewardj5f07b662002-04-23 16:52:51 +00002610{
sewardj00a66b12002-10-12 16:42:35 +00002611 void** specifics_ptr;
2612 Char msg_buf[100];
2613
jsgf855d93d2003-10-13 22:26:55 +00002614 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj00a66b12002-10-12 16:42:35 +00002615 VG_(sprintf)(msg_buf, "pthread_getspecific_ptr" );
sewardj5f07b662002-04-23 16:52:51 +00002616 print_pthread_event(tid, msg_buf);
2617 }
2618
sewardj00a66b12002-10-12 16:42:35 +00002619 vg_assert(VG_(is_valid_or_empty_tid)(tid));
sewardj5f07b662002-04-23 16:52:51 +00002620
sewardj00a66b12002-10-12 16:42:35 +00002621 if (VG_(threads)[tid].status == VgTs_Empty) {
njnd3040452003-05-19 15:04:06 +00002622 SET_PTHREQ_RETVAL(tid, 1);
sewardj5f07b662002-04-23 16:52:51 +00002623 return;
2624 }
2625
sewardj00a66b12002-10-12 16:42:35 +00002626 specifics_ptr = VG_(threads)[tid].specifics_ptr;
2627 vg_assert(specifics_ptr == NULL
2628 || IS_ALIGNED4_ADDR(specifics_ptr));
2629
njnd3040452003-05-19 15:04:06 +00002630 SET_PTHREQ_RETVAL(tid, (UInt)specifics_ptr);
sewardj5f07b662002-04-23 16:52:51 +00002631}
2632
2633
2634static
sewardj00a66b12002-10-12 16:42:35 +00002635void do_pthread_setspecific_ptr ( ThreadId tid, void** ptr )
sewardj5f07b662002-04-23 16:52:51 +00002636{
2637 Char msg_buf[100];
2638 if (VG_(clo_trace_pthread_level) >= 1) {
sewardj00a66b12002-10-12 16:42:35 +00002639 VG_(sprintf)(msg_buf, "pthread_setspecific_ptr ptr %p",
2640 ptr );
sewardj5f07b662002-04-23 16:52:51 +00002641 print_pthread_event(tid, msg_buf);
2642 }
2643
sewardjb48e5002002-05-13 00:16:03 +00002644 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002645 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj5f07b662002-04-23 16:52:51 +00002646
sewardj00a66b12002-10-12 16:42:35 +00002647 VG_(threads)[tid].specifics_ptr = ptr;
njnd3040452003-05-19 15:04:06 +00002648 SET_PTHREQ_RETVAL(tid, 0);
sewardj5f07b662002-04-23 16:52:51 +00002649}
2650
2651
sewardj870497a2002-05-29 01:06:47 +00002652/* Helper for calling destructors at thread exit. If key is valid,
2653 copy the thread's specific value into cu->arg and put the *key*'s
2654 destructor fn address in cu->fn. Then return 0 to the caller.
2655 Otherwise return non-zero to the caller. */
2656static
2657void do__get_key_destr_and_spec ( ThreadId tid,
2658 pthread_key_t key,
2659 CleanupEntry* cu )
2660{
2661 Char msg_buf[100];
jsgf855d93d2003-10-13 22:26:55 +00002662 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj870497a2002-05-29 01:06:47 +00002663 VG_(sprintf)(msg_buf,
2664 "get_key_destr_and_arg (key = %d)", key );
2665 print_pthread_event(tid, msg_buf);
2666 }
2667 vg_assert(VG_(is_valid_tid)(tid));
2668 vg_assert(key >= 0 && key < VG_N_THREAD_KEYS);
njn25e49d8e72002-09-23 09:36:25 +00002669
sewardj870497a2002-05-29 01:06:47 +00002670 if (!vg_thread_keys[key].inuse) {
njnd3040452003-05-19 15:04:06 +00002671 SET_PTHREQ_RETVAL(tid, -1);
sewardj870497a2002-05-29 01:06:47 +00002672 return;
2673 }
njn72718642003-07-24 08:45:32 +00002674 VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "get_key_destr_and_spec: cu",
2675 (Addr)cu, sizeof(CleanupEntry) );
sewardj00a66b12002-10-12 16:42:35 +00002676
sewardj870497a2002-05-29 01:06:47 +00002677 cu->fn = vg_thread_keys[key].destructor;
sewardj00a66b12002-10-12 16:42:35 +00002678 if (VG_(threads)[tid].specifics_ptr == NULL) {
2679 cu->arg = NULL;
2680 } else {
njn72718642003-07-24 08:45:32 +00002681 VG_TRACK( pre_mem_read, Vg_CorePThread, tid,
sewardj00a66b12002-10-12 16:42:35 +00002682 "get_key_destr_and_spec: key",
2683 (Addr)(&VG_(threads)[tid].specifics_ptr[key]),
2684 sizeof(void*) );
2685 cu->arg = VG_(threads)[tid].specifics_ptr[key];
2686 }
2687
njn25e49d8e72002-09-23 09:36:25 +00002688 VG_TRACK( post_mem_write, (Addr)cu, sizeof(CleanupEntry) );
njnd3040452003-05-19 15:04:06 +00002689 SET_PTHREQ_RETVAL(tid, 0);
sewardj870497a2002-05-29 01:06:47 +00002690}
2691
2692
sewardjb48e5002002-05-13 00:16:03 +00002693/* ---------------------------------------------------
2694 SIGNALS
2695 ------------------------------------------------ */
2696
2697/* See comment in vg_libthread.c:pthread_sigmask() regarding
sewardj018f7622002-05-15 21:13:39 +00002698 deliberate confusion of types sigset_t and vki_sigset_t. Return 0
2699 for OK and 1 for some kind of addressing error, which the
2700 vg_libpthread.c routine turns into return values 0 and EFAULT
2701 respectively. */
sewardjb48e5002002-05-13 00:16:03 +00002702static
2703void do_pthread_sigmask ( ThreadId tid,
sewardj018f7622002-05-15 21:13:39 +00002704 Int vki_how,
sewardjb48e5002002-05-13 00:16:03 +00002705 vki_ksigset_t* newmask,
2706 vki_ksigset_t* oldmask )
2707{
2708 Char msg_buf[100];
2709 if (VG_(clo_trace_pthread_level) >= 1) {
2710 VG_(sprintf)(msg_buf,
sewardj018f7622002-05-15 21:13:39 +00002711 "pthread_sigmask vki_how %d, newmask %p, oldmask %p",
2712 vki_how, newmask, oldmask );
sewardjb48e5002002-05-13 00:16:03 +00002713 print_pthread_event(tid, msg_buf);
2714 }
2715
2716 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002717 && VG_(threads)[tid].status == VgTs_Runnable);
sewardjb48e5002002-05-13 00:16:03 +00002718
njn25e49d8e72002-09-23 09:36:25 +00002719 if (newmask)
njn72718642003-07-24 08:45:32 +00002720 VG_TRACK( pre_mem_read, Vg_CorePThread, tid, "pthread_sigmask: newmask",
njn25e49d8e72002-09-23 09:36:25 +00002721 (Addr)newmask, sizeof(vki_ksigset_t));
2722 if (oldmask)
njn72718642003-07-24 08:45:32 +00002723 VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "pthread_sigmask: oldmask",
njn25e49d8e72002-09-23 09:36:25 +00002724 (Addr)oldmask, sizeof(vki_ksigset_t));
sewardjb48e5002002-05-13 00:16:03 +00002725
sewardj018f7622002-05-15 21:13:39 +00002726 VG_(do_pthread_sigmask_SCSS_upd) ( tid, vki_how, newmask, oldmask );
sewardjb48e5002002-05-13 00:16:03 +00002727
njn25e49d8e72002-09-23 09:36:25 +00002728 if (oldmask)
2729 VG_TRACK( post_mem_write, (Addr)oldmask, sizeof(vki_ksigset_t) );
sewardj3a951cf2002-05-15 22:25:47 +00002730
sewardj018f7622002-05-15 21:13:39 +00002731 /* Success. */
njnd3040452003-05-19 15:04:06 +00002732 SET_PTHREQ_RETVAL(tid, 0);
sewardjb48e5002002-05-13 00:16:03 +00002733}
2734
2735
2736static
sewardj018f7622002-05-15 21:13:39 +00002737void do_pthread_kill ( ThreadId tid, /* me */
2738 ThreadId thread, /* thread to signal */
2739 Int sig )
2740{
2741 Char msg_buf[100];
2742
2743 if (VG_(clo_trace_signals) || VG_(clo_trace_pthread_level) >= 1) {
2744 VG_(sprintf)(msg_buf,
2745 "pthread_kill thread %d, signo %d",
2746 thread, sig );
2747 print_pthread_event(tid, msg_buf);
2748 }
2749
2750 vg_assert(VG_(is_valid_tid)(tid)
2751 && VG_(threads)[tid].status == VgTs_Runnable);
2752
sewardj4dced352002-06-04 22:54:20 +00002753 if (!VG_(is_valid_tid)(thread)) {
njn25e49d8e72002-09-23 09:36:25 +00002754 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002755 "pthread_kill: invalid target thread");
jsgf855d93d2003-10-13 22:26:55 +00002756 SET_PTHREQ_RETVAL(tid, VKI_ESRCH);
2757 return;
2758 }
2759
2760 if (sig == 0) {
2761 /* OK, signal 0 is just for testing */
2762 SET_PTHREQ_RETVAL(tid, 0);
sewardj018f7622002-05-15 21:13:39 +00002763 return;
2764 }
2765
2766 if (sig < 1 || sig > VKI_KNSIG) {
jsgf855d93d2003-10-13 22:26:55 +00002767 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj018f7622002-05-15 21:13:39 +00002768 return;
2769 }
2770
2771 VG_(send_signal_to_thread)( thread, sig );
njnd3040452003-05-19 15:04:06 +00002772 SET_PTHREQ_RETVAL(tid, 0);
sewardjb48e5002002-05-13 00:16:03 +00002773}
2774
2775
sewardj2cb00342002-06-28 01:46:26 +00002776/* -----------------------------------------------------------
2777 FORK HANDLERS.
2778 -------------------------------------------------------- */
2779
2780static
2781void do__set_fhstack_used ( ThreadId tid, Int n )
2782{
2783 Char msg_buf[100];
2784 if (VG_(clo_trace_sched)) {
2785 VG_(sprintf)(msg_buf, "set_fhstack_used to %d", n );
2786 print_pthread_event(tid, msg_buf);
2787 }
2788
2789 vg_assert(VG_(is_valid_tid)(tid)
2790 && VG_(threads)[tid].status == VgTs_Runnable);
2791
2792 if (n >= 0 && n < VG_N_FORKHANDLERSTACK) {
2793 vg_fhstack_used = n;
njnd3040452003-05-19 15:04:06 +00002794 SET_PTHREQ_RETVAL(tid, 0);
sewardj2cb00342002-06-28 01:46:26 +00002795 } else {
njnd3040452003-05-19 15:04:06 +00002796 SET_PTHREQ_RETVAL(tid, -1);
sewardj2cb00342002-06-28 01:46:26 +00002797 }
2798}
2799
2800
2801static
2802void do__get_fhstack_used ( ThreadId tid )
2803{
2804 Int n;
2805 Char msg_buf[100];
2806 if (VG_(clo_trace_sched)) {
2807 VG_(sprintf)(msg_buf, "get_fhstack_used" );
2808 print_pthread_event(tid, msg_buf);
2809 }
2810
2811 vg_assert(VG_(is_valid_tid)(tid)
2812 && VG_(threads)[tid].status == VgTs_Runnable);
2813
2814 n = vg_fhstack_used;
2815 vg_assert(n >= 0 && n < VG_N_FORKHANDLERSTACK);
njnd3040452003-05-19 15:04:06 +00002816 SET_PTHREQ_RETVAL(tid, n);
sewardj2cb00342002-06-28 01:46:26 +00002817}
2818
2819static
2820void do__set_fhstack_entry ( ThreadId tid, Int n, ForkHandlerEntry* fh )
2821{
2822 Char msg_buf[100];
2823 if (VG_(clo_trace_sched)) {
2824 VG_(sprintf)(msg_buf, "set_fhstack_entry %d to %p", n, fh );
2825 print_pthread_event(tid, msg_buf);
2826 }
2827
2828 vg_assert(VG_(is_valid_tid)(tid)
2829 && VG_(threads)[tid].status == VgTs_Runnable);
njn72718642003-07-24 08:45:32 +00002830 VG_TRACK( pre_mem_read, Vg_CorePThread, tid,
njn25e49d8e72002-09-23 09:36:25 +00002831 "pthread_atfork: prepare/parent/child",
2832 (Addr)fh, sizeof(ForkHandlerEntry));
sewardj2cb00342002-06-28 01:46:26 +00002833
njn25e49d8e72002-09-23 09:36:25 +00002834 if (n < 0 || n >= VG_N_FORKHANDLERSTACK) {
njnd3040452003-05-19 15:04:06 +00002835 SET_PTHREQ_RETVAL(tid, -1);
sewardj2cb00342002-06-28 01:46:26 +00002836 return;
2837 }
2838
2839 vg_fhstack[n] = *fh;
njnd3040452003-05-19 15:04:06 +00002840 SET_PTHREQ_RETVAL(tid, 0);
sewardj2cb00342002-06-28 01:46:26 +00002841}
2842
2843
2844static
2845void do__get_fhstack_entry ( ThreadId tid, Int n, /*OUT*/
2846 ForkHandlerEntry* fh )
2847{
2848 Char msg_buf[100];
2849 if (VG_(clo_trace_sched)) {
2850 VG_(sprintf)(msg_buf, "get_fhstack_entry %d", n );
2851 print_pthread_event(tid, msg_buf);
2852 }
2853
2854 vg_assert(VG_(is_valid_tid)(tid)
2855 && VG_(threads)[tid].status == VgTs_Runnable);
njn72718642003-07-24 08:45:32 +00002856 VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "fork: prepare/parent/child",
njn25e49d8e72002-09-23 09:36:25 +00002857 (Addr)fh, sizeof(ForkHandlerEntry));
sewardj2cb00342002-06-28 01:46:26 +00002858
njn25e49d8e72002-09-23 09:36:25 +00002859 if (n < 0 || n >= VG_N_FORKHANDLERSTACK) {
njnd3040452003-05-19 15:04:06 +00002860 SET_PTHREQ_RETVAL(tid, -1);
sewardj2cb00342002-06-28 01:46:26 +00002861 return;
2862 }
2863
2864 *fh = vg_fhstack[n];
njnd3040452003-05-19 15:04:06 +00002865 SET_PTHREQ_RETVAL(tid, 0);
sewardj2cb00342002-06-28 01:46:26 +00002866
njn25e49d8e72002-09-23 09:36:25 +00002867 VG_TRACK( post_mem_write, (Addr)fh, sizeof(ForkHandlerEntry) );
sewardj2cb00342002-06-28 01:46:26 +00002868}
2869
njnd3040452003-05-19 15:04:06 +00002870/* ---------------------------------------------------------------------
2871 Specifying shadow register values
2872 ------------------------------------------------------------------ */
2873
2874void VG_(set_return_from_syscall_shadow) ( ThreadId tid, UInt ret_shadow )
2875{
2876 VG_(set_thread_shadow_archreg)(tid, R_EAX, ret_shadow);
2877}
2878
2879UInt VG_(get_exit_status_shadow) ( void )
2880{
2881 return VG_(get_shadow_archreg)(R_EBX);
2882}
2883
sewardj2cb00342002-06-28 01:46:26 +00002884
sewardje663cb92002-04-12 10:26:32 +00002885/* ---------------------------------------------------------------------
sewardj124ca2a2002-06-20 10:19:38 +00002886 Handle client requests.
sewardje663cb92002-04-12 10:26:32 +00002887 ------------------------------------------------------------------ */
2888
sewardj124ca2a2002-06-20 10:19:38 +00002889/* Do a client request for the thread tid. After the request, tid may
2890 or may not still be runnable; if not, the scheduler will have to
2891 choose a new thread to run.
2892*/
sewardje663cb92002-04-12 10:26:32 +00002893static
sewardj124ca2a2002-06-20 10:19:38 +00002894void do_client_request ( ThreadId tid )
sewardje663cb92002-04-12 10:26:32 +00002895{
sewardj124ca2a2002-06-20 10:19:38 +00002896 UInt* arg = (UInt*)(VG_(threads)[tid].m_eax);
2897 UInt req_no = arg[0];
2898
fitzhardinge98abfc72003-12-16 02:05:15 +00002899 if (0)
2900 VG_(printf)("req no = 0x%x\n", req_no);
sewardje663cb92002-04-12 10:26:32 +00002901 switch (req_no) {
2902
njn3e884182003-04-15 13:03:23 +00002903 case VG_USERREQ__CLIENT_CALL0: {
2904 UInt (*f)(void) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00002905 if (f == NULL)
2906 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL: func=%p\n", f);
2907 else
2908 SET_CLCALL_RETVAL(tid, f ( ), (Addr)f);
njn3e884182003-04-15 13:03:23 +00002909 break;
2910 }
2911 case VG_USERREQ__CLIENT_CALL1: {
2912 UInt (*f)(UInt) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00002913 if (f == NULL)
2914 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL: func=%p\n", f);
2915 else
2916 SET_CLCALL_RETVAL(tid, f ( arg[2] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00002917 break;
2918 }
2919 case VG_USERREQ__CLIENT_CALL2: {
2920 UInt (*f)(UInt, UInt) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00002921 if (f == NULL)
2922 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL: func=%p\n", f);
2923 else
2924 SET_CLCALL_RETVAL(tid, f ( arg[2], arg[3] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00002925 break;
2926 }
2927 case VG_USERREQ__CLIENT_CALL3: {
2928 UInt (*f)(UInt, UInt, UInt) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00002929 if (f == NULL)
2930 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL: func=%p\n", f);
2931 else
2932 SET_CLCALL_RETVAL(tid, f ( arg[2], arg[3], arg[4] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00002933 break;
2934 }
2935
njn3e884182003-04-15 13:03:23 +00002936 /* Note: for skins that replace malloc() et al, we want to call
2937 the replacement versions. For those that don't, we want to call
2938 VG_(cli_malloc)() et al. We do this by calling SK_(malloc)(), which
2939 malloc-replacing skins must replace, but have its default definition
2940 call */
2941
2942 /* Note: for MALLOC and FREE, must set the appropriate "lock"... see
2943 the comment in vg_defaults.c/SK_(malloc)() for why. */
sewardj124ca2a2002-06-20 10:19:38 +00002944 case VG_USERREQ__MALLOC:
njn3e884182003-04-15 13:03:23 +00002945 VG_(sk_malloc_called_by_scheduler) = True;
njnd3040452003-05-19 15:04:06 +00002946 SET_PTHREQ_RETVAL(
njn72718642003-07-24 08:45:32 +00002947 tid, (UInt)SK_(malloc) ( arg[1] )
sewardj124ca2a2002-06-20 10:19:38 +00002948 );
njn3e884182003-04-15 13:03:23 +00002949 VG_(sk_malloc_called_by_scheduler) = False;
sewardj124ca2a2002-06-20 10:19:38 +00002950 break;
2951
2952 case VG_USERREQ__FREE:
njn3e884182003-04-15 13:03:23 +00002953 VG_(sk_malloc_called_by_scheduler) = True;
njn72718642003-07-24 08:45:32 +00002954 SK_(free) ( (void*)arg[1] );
njn3e884182003-04-15 13:03:23 +00002955 VG_(sk_malloc_called_by_scheduler) = False;
njnd3040452003-05-19 15:04:06 +00002956 SET_PTHREQ_RETVAL(tid, 0); /* irrelevant */
sewardj124ca2a2002-06-20 10:19:38 +00002957 break;
2958
sewardj124ca2a2002-06-20 10:19:38 +00002959 case VG_USERREQ__PTHREAD_GET_THREADID:
njnd3040452003-05-19 15:04:06 +00002960 SET_PTHREQ_RETVAL(tid, tid);
sewardj124ca2a2002-06-20 10:19:38 +00002961 break;
2962
2963 case VG_USERREQ__RUNNING_ON_VALGRIND:
njnd3040452003-05-19 15:04:06 +00002964 SET_CLREQ_RETVAL(tid, 1);
sewardj124ca2a2002-06-20 10:19:38 +00002965 break;
2966
2967 case VG_USERREQ__GET_PTHREAD_TRACE_LEVEL:
njnd3040452003-05-19 15:04:06 +00002968 SET_PTHREQ_RETVAL(tid, VG_(clo_trace_pthread_level));
sewardj124ca2a2002-06-20 10:19:38 +00002969 break;
2970
2971 case VG_USERREQ__READ_MILLISECOND_TIMER:
njnd3040452003-05-19 15:04:06 +00002972 SET_PTHREQ_RETVAL(tid, VG_(read_millisecond_timer)());
sewardj124ca2a2002-06-20 10:19:38 +00002973 break;
2974
2975 /* Some of these may make thread tid non-runnable, but the
2976 scheduler checks for that on return from this function. */
2977 case VG_USERREQ__PTHREAD_MUTEX_LOCK:
2978 do_pthread_mutex_lock( tid, False, (void *)(arg[1]) );
2979 break;
2980
2981 case VG_USERREQ__PTHREAD_MUTEX_TRYLOCK:
2982 do_pthread_mutex_lock( tid, True, (void *)(arg[1]) );
2983 break;
2984
2985 case VG_USERREQ__PTHREAD_MUTEX_UNLOCK:
2986 do_pthread_mutex_unlock( tid, (void *)(arg[1]) );
2987 break;
2988
sewardj00a66b12002-10-12 16:42:35 +00002989 case VG_USERREQ__PTHREAD_GETSPECIFIC_PTR:
2990 do_pthread_getspecific_ptr ( tid );
sewardj124ca2a2002-06-20 10:19:38 +00002991 break;
2992
2993 case VG_USERREQ__SET_CANCELTYPE:
2994 do__set_canceltype ( tid, arg[1] );
2995 break;
2996
2997 case VG_USERREQ__CLEANUP_PUSH:
2998 do__cleanup_push ( tid, (CleanupEntry*)(arg[1]) );
2999 break;
3000
3001 case VG_USERREQ__CLEANUP_POP:
3002 do__cleanup_pop ( tid, (CleanupEntry*)(arg[1]) );
3003 break;
3004
3005 case VG_USERREQ__TESTCANCEL:
3006 do__testcancel ( tid );
3007 break;
3008
sewardje663cb92002-04-12 10:26:32 +00003009 case VG_USERREQ__PTHREAD_JOIN:
3010 do_pthread_join( tid, arg[1], (void**)(arg[2]) );
3011 break;
3012
sewardj3b5d8862002-04-20 13:53:23 +00003013 case VG_USERREQ__PTHREAD_COND_WAIT:
3014 do_pthread_cond_wait( tid,
3015 (pthread_cond_t *)(arg[1]),
sewardj5f07b662002-04-23 16:52:51 +00003016 (pthread_mutex_t *)(arg[2]),
3017 0xFFFFFFFF /* no timeout */ );
3018 break;
3019
3020 case VG_USERREQ__PTHREAD_COND_TIMEDWAIT:
3021 do_pthread_cond_wait( tid,
3022 (pthread_cond_t *)(arg[1]),
3023 (pthread_mutex_t *)(arg[2]),
3024 arg[3] /* timeout millisecond point */ );
sewardj3b5d8862002-04-20 13:53:23 +00003025 break;
3026
3027 case VG_USERREQ__PTHREAD_COND_SIGNAL:
3028 do_pthread_cond_signal_or_broadcast(
3029 tid,
3030 False, /* signal, not broadcast */
3031 (pthread_cond_t *)(arg[1]) );
3032 break;
3033
3034 case VG_USERREQ__PTHREAD_COND_BROADCAST:
3035 do_pthread_cond_signal_or_broadcast(
3036 tid,
3037 True, /* broadcast, not signal */
3038 (pthread_cond_t *)(arg[1]) );
3039 break;
3040
sewardj00a66b12002-10-12 16:42:35 +00003041 case VG_USERREQ__PTHREAD_KEY_VALIDATE:
3042 do_pthread_key_validate ( tid,
3043 (pthread_key_t)(arg[1]) );
3044 break;
3045
sewardj5f07b662002-04-23 16:52:51 +00003046 case VG_USERREQ__PTHREAD_KEY_CREATE:
3047 do_pthread_key_create ( tid,
3048 (pthread_key_t*)(arg[1]),
3049 (void(*)(void*))(arg[2]) );
3050 break;
3051
3052 case VG_USERREQ__PTHREAD_KEY_DELETE:
3053 do_pthread_key_delete ( tid,
3054 (pthread_key_t)(arg[1]) );
3055 break;
3056
sewardj00a66b12002-10-12 16:42:35 +00003057 case VG_USERREQ__PTHREAD_SETSPECIFIC_PTR:
3058 do_pthread_setspecific_ptr ( tid,
3059 (void**)(arg[1]) );
sewardj5f07b662002-04-23 16:52:51 +00003060 break;
3061
sewardjb48e5002002-05-13 00:16:03 +00003062 case VG_USERREQ__PTHREAD_SIGMASK:
3063 do_pthread_sigmask ( tid,
3064 arg[1],
3065 (vki_ksigset_t*)(arg[2]),
3066 (vki_ksigset_t*)(arg[3]) );
3067 break;
3068
sewardj018f7622002-05-15 21:13:39 +00003069 case VG_USERREQ__PTHREAD_KILL:
3070 do_pthread_kill ( tid, arg[1], arg[2] );
3071 break;
3072
sewardjff42d1d2002-05-22 13:17:31 +00003073 case VG_USERREQ__PTHREAD_YIELD:
3074 do_pthread_yield ( tid );
sewardj18a62ff2002-07-12 22:30:51 +00003075 /* On return from do_client_request(), the scheduler will
3076 select a new thread to run. */
sewardjff42d1d2002-05-22 13:17:31 +00003077 break;
sewardj018f7622002-05-15 21:13:39 +00003078
sewardj7989d0c2002-05-28 11:00:01 +00003079 case VG_USERREQ__SET_CANCELSTATE:
3080 do__set_cancelstate ( tid, arg[1] );
3081 break;
3082
sewardj7989d0c2002-05-28 11:00:01 +00003083 case VG_USERREQ__SET_OR_GET_DETACH:
3084 do__set_or_get_detach ( tid, arg[1], arg[2] );
3085 break;
3086
3087 case VG_USERREQ__SET_CANCELPEND:
3088 do__set_cancelpend ( tid, arg[1], (void(*)(void*))arg[2] );
3089 break;
3090
3091 case VG_USERREQ__WAIT_JOINER:
3092 do__wait_joiner ( tid, (void*)arg[1] );
3093 break;
3094
3095 case VG_USERREQ__QUIT:
3096 do__quit ( tid );
3097 break;
3098
3099 case VG_USERREQ__APPLY_IN_NEW_THREAD:
3100 do__apply_in_new_thread ( tid, (void*(*)(void*))arg[1],
3101 (void*)arg[2] );
3102 break;
3103
sewardj870497a2002-05-29 01:06:47 +00003104 case VG_USERREQ__GET_KEY_D_AND_S:
3105 do__get_key_destr_and_spec ( tid,
3106 (pthread_key_t)arg[1],
3107 (CleanupEntry*)arg[2] );
3108 break;
3109
sewardjef037c72002-05-30 00:40:03 +00003110 case VG_USERREQ__NUKE_OTHER_THREADS:
3111 VG_(nuke_all_threads_except) ( tid );
njnd3040452003-05-19 15:04:06 +00003112 SET_PTHREQ_RETVAL(tid, 0);
sewardjef037c72002-05-30 00:40:03 +00003113 break;
3114
sewardj4dced352002-06-04 22:54:20 +00003115 case VG_USERREQ__PTHREAD_ERROR:
njn25e49d8e72002-09-23 09:36:25 +00003116 VG_(record_pthread_error)( tid, (Char*)(arg[1]) );
njnd3040452003-05-19 15:04:06 +00003117 SET_PTHREQ_RETVAL(tid, 0);
sewardj4dced352002-06-04 22:54:20 +00003118 break;
3119
sewardj2cb00342002-06-28 01:46:26 +00003120 case VG_USERREQ__SET_FHSTACK_USED:
3121 do__set_fhstack_used( tid, (Int)(arg[1]) );
3122 break;
3123
3124 case VG_USERREQ__GET_FHSTACK_USED:
3125 do__get_fhstack_used( tid );
3126 break;
3127
3128 case VG_USERREQ__SET_FHSTACK_ENTRY:
3129 do__set_fhstack_entry( tid, (Int)(arg[1]),
3130 (ForkHandlerEntry*)(arg[2]) );
3131 break;
3132
3133 case VG_USERREQ__GET_FHSTACK_ENTRY:
3134 do__get_fhstack_entry( tid, (Int)(arg[1]),
3135 (ForkHandlerEntry*)(arg[2]) );
3136 break;
3137
sewardj77e466c2002-04-14 02:29:29 +00003138 case VG_USERREQ__SIGNAL_RETURNS:
3139 handle_signal_return(tid);
3140 break;
fitzhardinge98abfc72003-12-16 02:05:15 +00003141
3142
3143 case VG_USERREQ__GET_SIGRT_MIN:
3144 SET_PTHREQ_RETVAL(tid, VG_(sig_rtmin));
3145 break;
3146
3147 case VG_USERREQ__GET_SIGRT_MAX:
3148 SET_PTHREQ_RETVAL(tid, VG_(sig_rtmax));
3149 break;
3150
3151 case VG_USERREQ__ALLOC_RTSIG:
3152 SET_PTHREQ_RETVAL(tid, VG_(sig_alloc_rtsig)((Int)arg[1]));
3153 break;
3154
fitzhardinge39de4b42003-10-31 07:12:21 +00003155 case VG_USERREQ__PRINTF: {
3156 int count =
3157 VG_(vmessage)( Vg_ClientMsg, (char *)arg[1], (va_list)arg[2] );
3158 SET_CLREQ_RETVAL( tid, count );
3159 break; }
3160
fitzhardinge98abfc72003-12-16 02:05:15 +00003161
fitzhardinge39de4b42003-10-31 07:12:21 +00003162 case VG_USERREQ__INTERNAL_PRINTF: {
3163 int count =
3164 VG_(vmessage)( Vg_UserMsg, (char *)arg[1], (va_list)arg[2] );
3165 SET_CLREQ_RETVAL( tid, count );
3166 break; }
3167
3168 case VG_USERREQ__PRINTF_BACKTRACE: {
3169 ExeContext *e = VG_(get_ExeContext)( tid );
3170 int count =
3171 VG_(vmessage)( Vg_ClientMsg, (char *)arg[1], (va_list)arg[2] );
3172 VG_(mini_stack_dump)(e->eips, VG_(clo_backtrace_size));
3173 SET_CLREQ_RETVAL( tid, count );
3174 break; }
3175
3176 case VG_USERREQ__INTERNAL_PRINTF_BACKTRACE: {
3177 ExeContext *e = VG_(get_ExeContext)( tid );
3178 int count =
3179 VG_(vmessage)( Vg_UserMsg, (char *)arg[1], (va_list)arg[2] );
3180 VG_(mini_stack_dump)(e->eips, VG_(clo_backtrace_size));
3181 SET_CLREQ_RETVAL( tid, count );
3182 break; }
3183
fitzhardinge98abfc72003-12-16 02:05:15 +00003184 case VG_USERREQ__REGISTER_LIBC_FREERES:
3185 VG_(__libc_freeres_wrapper) = arg[1];
3186 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
3187 break;
3188
3189 case VG_USERREQ__GET_MALLOCFUNCS: {
3190 struct vg_mallocfunc_info *info = (struct vg_mallocfunc_info *)arg[1];
3191
3192 info->sk_malloc = (Addr)SK_(malloc);
3193 info->sk_calloc = (Addr)SK_(calloc);
3194 info->sk_realloc = (Addr)SK_(realloc);
3195 info->sk_memalign = (Addr)SK_(memalign);
3196 info->sk___builtin_new = (Addr)SK_(__builtin_new);
3197 info->sk___builtin_vec_new = (Addr)SK_(__builtin_vec_new);
3198 info->sk_free = (Addr)SK_(free);
3199 info->sk___builtin_delete = (Addr)SK_(__builtin_delete);
3200 info->sk___builtin_vec_delete = (Addr)SK_(__builtin_vec_delete);
3201
3202 info->arena_payload_szB = (Addr)VG_(arena_payload_szB);
3203
3204 info->clo_sloppy_malloc = VG_(clo_sloppy_malloc);
3205 info->clo_trace_malloc = VG_(clo_trace_malloc);
3206
3207 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
3208
3209 break;
3210 }
3211
3212 case VG_USERREQ__REGISTER_REDIRECT_SYM: {
3213 VG_(add_redirect_sym)((const Char *)arg[1], (const Char *)arg[2],
3214 (const Char *)arg[3], (const Char *)arg[4]);
3215 break;
3216 }
3217
3218 case VG_USERREQ__REGISTER_REDIRECT_ADDR: {
3219 VG_(add_redirect_addr)((const Char *)arg[1], (const Char *)arg[2],
3220 (Addr)arg[3]);
3221 break;
3222 }
3223
njn25e49d8e72002-09-23 09:36:25 +00003224 /* Requests from the client program */
3225
3226 case VG_USERREQ__DISCARD_TRANSLATIONS:
3227 if (VG_(clo_verbosity) > 2)
3228 VG_(printf)( "client request: DISCARD_TRANSLATIONS,"
3229 " addr %p, len %d\n",
3230 (void*)arg[1], arg[2] );
3231
sewardj97ad5522003-05-04 12:32:56 +00003232 VG_(invalidate_translations)( arg[1], arg[2], True );
njn25e49d8e72002-09-23 09:36:25 +00003233
njnd3040452003-05-19 15:04:06 +00003234 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
njn25e49d8e72002-09-23 09:36:25 +00003235 break;
3236
njn47363ab2003-04-21 13:24:40 +00003237 case VG_USERREQ__COUNT_ERRORS:
njnd3040452003-05-19 15:04:06 +00003238 SET_CLREQ_RETVAL( tid, VG_(n_errs_found) );
njn47363ab2003-04-21 13:24:40 +00003239 break;
3240
sewardje663cb92002-04-12 10:26:32 +00003241 default:
njn25e49d8e72002-09-23 09:36:25 +00003242 if (VG_(needs).client_requests) {
sewardj34042512002-10-22 04:14:35 +00003243 UInt ret;
3244
njn25e49d8e72002-09-23 09:36:25 +00003245 if (VG_(clo_verbosity) > 2)
fitzhardinge98abfc72003-12-16 02:05:15 +00003246 VG_(printf)("client request: code %x, addr %p, len %d\n",
njn25e49d8e72002-09-23 09:36:25 +00003247 arg[0], (void*)arg[1], arg[2] );
3248
njn72718642003-07-24 08:45:32 +00003249 if (SK_(handle_client_request) ( tid, arg, &ret ))
njnd3040452003-05-19 15:04:06 +00003250 SET_CLREQ_RETVAL(tid, ret);
njn25e49d8e72002-09-23 09:36:25 +00003251 } else {
sewardj34042512002-10-22 04:14:35 +00003252 static Bool whined = False;
3253
3254 if (!whined) {
njnd7994182003-10-02 13:44:04 +00003255 // Allow for requests in core, but defined by skins, which
3256 // have 0 and 0 in their two high bytes.
3257 Char c1 = (arg[0] >> 24) & 0xff;
3258 Char c2 = (arg[0] >> 16) & 0xff;
3259 if (c1 == 0) c1 = '_';
3260 if (c2 == 0) c2 = '_';
sewardj34042512002-10-22 04:14:35 +00003261 VG_(message)(Vg_UserMsg, "Warning:\n"
njnd7994182003-10-02 13:44:04 +00003262 " unhandled client request: 0x%x (%c%c+0x%x). Perhaps\n"
3263 " VG_(needs).client_requests should be set?\n",
3264 arg[0], c1, c2, arg[0] & 0xffff);
sewardj34042512002-10-22 04:14:35 +00003265 whined = True;
3266 }
njn25e49d8e72002-09-23 09:36:25 +00003267 }
sewardje663cb92002-04-12 10:26:32 +00003268 break;
3269 }
3270}
3271
3272
sewardj6072c362002-04-19 14:40:57 +00003273/* ---------------------------------------------------------------------
3274 Sanity checking.
3275 ------------------------------------------------------------------ */
3276
3277/* Internal consistency checks on the sched/pthread structures. */
3278static
3279void scheduler_sanity ( void )
3280{
sewardj3b5d8862002-04-20 13:53:23 +00003281 pthread_mutex_t* mx;
3282 pthread_cond_t* cv;
sewardj6072c362002-04-19 14:40:57 +00003283 Int i;
jsgf855d93d2003-10-13 22:26:55 +00003284 struct timeout* top;
3285 UInt lasttime = 0;
3286
3287 for(top = timeouts; top != NULL; top = top->next) {
3288 vg_assert(top->time >= lasttime);
3289 vg_assert(VG_(is_valid_or_empty_tid)(top->tid));
3290
3291#if 0
3292 /* assert timeout entry is either stale, or associated with a
3293 thread in the right state
3294
3295 XXX disable for now - can be stale, but times happen to match
3296 */
3297 vg_assert(VG_(threads)[top->tid].awaken_at != top->time ||
3298 VG_(threads)[top->tid].status == VgTs_Sleeping ||
3299 VG_(threads)[top->tid].status == VgTs_WaitCV);
3300#endif
3301
3302 lasttime = top->time;
3303 }
sewardj5f07b662002-04-23 16:52:51 +00003304
sewardj6072c362002-04-19 14:40:57 +00003305 /* VG_(printf)("scheduler_sanity\n"); */
3306 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +00003307 mx = VG_(threads)[i].associated_mx;
3308 cv = VG_(threads)[i].associated_cv;
3309 if (VG_(threads)[i].status == VgTs_WaitMX) {
sewardjbf290b92002-05-01 02:28:01 +00003310 /* If we're waiting on a MX: (1) the mx is not null, (2, 3)
3311 it's actually held by someone, since otherwise this thread
3312 is deadlocked, (4) the mutex's owner is not us, since
3313 otherwise this thread is also deadlocked. The logic in
3314 do_pthread_mutex_lock rejects attempts by a thread to lock
3315 a (non-recursive) mutex which it already owns.
sewardj05553872002-04-20 20:53:17 +00003316
sewardjbf290b92002-05-01 02:28:01 +00003317 (2) has been seen to fail sometimes. I don't know why.
3318 Possibly to do with signals. */
sewardj3b5d8862002-04-20 13:53:23 +00003319 vg_assert(cv == NULL);
sewardj05553872002-04-20 20:53:17 +00003320 /* 1 */ vg_assert(mx != NULL);
3321 /* 2 */ vg_assert(mx->__m_count > 0);
sewardjb48e5002002-05-13 00:16:03 +00003322 /* 3 */ vg_assert(VG_(is_valid_tid)((ThreadId)mx->__m_owner));
sewardj05bcdcb2003-05-18 10:05:38 +00003323 /* 4 */ vg_assert((UInt)i != (ThreadId)mx->__m_owner);
sewardj3b5d8862002-04-20 13:53:23 +00003324 } else
sewardj018f7622002-05-15 21:13:39 +00003325 if (VG_(threads)[i].status == VgTs_WaitCV) {
sewardj3b5d8862002-04-20 13:53:23 +00003326 vg_assert(cv != NULL);
3327 vg_assert(mx != NULL);
sewardj6072c362002-04-19 14:40:57 +00003328 } else {
sewardj05553872002-04-20 20:53:17 +00003329 /* Unfortunately these don't hold true when a sighandler is
3330 running. To be fixed. */
3331 /* vg_assert(cv == NULL); */
3332 /* vg_assert(mx == NULL); */
sewardj6072c362002-04-19 14:40:57 +00003333 }
sewardjbf290b92002-05-01 02:28:01 +00003334
sewardj018f7622002-05-15 21:13:39 +00003335 if (VG_(threads)[i].status != VgTs_Empty) {
sewardjbf290b92002-05-01 02:28:01 +00003336 Int
sewardj018f7622002-05-15 21:13:39 +00003337 stack_used = (Addr)VG_(threads)[i].stack_highest_word
3338 - (Addr)VG_(threads)[i].m_esp;
sewardjbf290b92002-05-01 02:28:01 +00003339 if (i > 1 /* not the root thread */
3340 && stack_used
3341 >= (VG_PTHREAD_STACK_MIN - 1000 /* paranoia */)) {
3342 VG_(message)(Vg_UserMsg,
njn25e49d8e72002-09-23 09:36:25 +00003343 "Error: STACK OVERFLOW: "
sewardjbf290b92002-05-01 02:28:01 +00003344 "thread %d: stack used %d, available %d",
3345 i, stack_used, VG_PTHREAD_STACK_MIN );
3346 VG_(message)(Vg_UserMsg,
3347 "Terminating Valgrind. If thread(s) "
3348 "really need more stack, increase");
3349 VG_(message)(Vg_UserMsg,
3350 "VG_PTHREAD_STACK_SIZE in vg_include.h and recompile.");
3351 VG_(exit)(1);
3352 }
3353 }
sewardj6072c362002-04-19 14:40:57 +00003354 }
sewardj5f07b662002-04-23 16:52:51 +00003355
3356 for (i = 0; i < VG_N_THREAD_KEYS; i++) {
3357 if (!vg_thread_keys[i].inuse)
3358 vg_assert(vg_thread_keys[i].destructor == NULL);
3359 }
sewardj6072c362002-04-19 14:40:57 +00003360}
3361
3362
sewardje663cb92002-04-12 10:26:32 +00003363/*--------------------------------------------------------------------*/
3364/*--- end vg_scheduler.c ---*/
3365/*--------------------------------------------------------------------*/