blob: 8e9a67726171aa65a9bf78c63d0a332be4e04c91 [file] [log] [blame]
sewardje663cb92002-04-12 10:26:32 +00001
2/*--------------------------------------------------------------------*/
3/*--- A user-space pthreads implementation. vg_scheduler.c ---*/
4/*--------------------------------------------------------------------*/
5
6/*
njnc9539842002-10-02 13:26:35 +00007 This file is part of Valgrind, an extensible x86 protected-mode
8 emulator for monitoring program execution on x86-Unixes.
sewardje663cb92002-04-12 10:26:32 +00009
nethercotebb1c9912004-01-04 16:43:23 +000010 Copyright (C) 2000-2004 Julian Seward
sewardje663cb92002-04-12 10:26:32 +000011 jseward@acm.org
sewardje663cb92002-04-12 10:26:32 +000012
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
njn25e49d8e72002-09-23 09:36:25 +000028 The GNU General Public License is contained in the file COPYING.
sewardje663cb92002-04-12 10:26:32 +000029*/
30
njn25e49d8e72002-09-23 09:36:25 +000031#include "valgrind.h" /* for VG_USERREQ__RUNNING_ON_VALGRIND and
njn47363ab2003-04-21 13:24:40 +000032 VG_USERREQ__DISCARD_TRANSLATIONS, and others */
nethercote851b0f62003-11-13 23:02:16 +000033#include "vg_include.h"
sewardje663cb92002-04-12 10:26:32 +000034
sewardjb60c1ad2002-05-29 20:23:26 +000035/* BORKAGE/ISSUES as of 29 May 02
sewardje663cb92002-04-12 10:26:32 +000036
sewardj77e466c2002-04-14 02:29:29 +000037- Currently, when a signal is run, just the ThreadStatus.status fields
38 are saved in the signal frame, along with the CPU state. Question:
39 should I also save and restore:
40 ThreadStatus.joiner
41 ThreadStatus.waited_on_mid
42 ThreadStatus.awaken_at
43 ThreadStatus.retval
44 Currently unsure, and so am not doing so.
sewardje663cb92002-04-12 10:26:32 +000045
sewardj705d3cb2002-05-23 13:13:12 +000046- So, what's the deal with signals and mutexes? If a thread is
sewardj6072c362002-04-19 14:40:57 +000047 blocked on a mutex, or for a condition variable for that matter, can
48 signals still be delivered to it? This has serious consequences --
49 deadlocks, etc.
50
sewardjb60c1ad2002-05-29 20:23:26 +000051 TODO for valgrind-1.0:
52
sewardj055fbb82002-05-30 00:40:55 +000053- Update assertion checking in scheduler_sanity().
54
sewardjb60c1ad2002-05-29 20:23:26 +000055 TODO sometime:
56
57- Mutex scrubbing - clearup_after_thread_exit: look for threads
58 blocked on mutexes held by the exiting thread, and release them
59 appropriately. (??)
60
sewardje462e202002-04-13 04:09:07 +000061*/
sewardje663cb92002-04-12 10:26:32 +000062
63
64/* ---------------------------------------------------------------------
65 Types and globals for the scheduler.
66 ------------------------------------------------------------------ */
67
68/* type ThreadId is defined in vg_include.h. */
69
70/* struct ThreadState is defined in vg_include.h. */
71
sewardj018f7622002-05-15 21:13:39 +000072/* Globals. A statically allocated array of threads. NOTE: [0] is
73 never used, to simplify the simulation of initialisers for
sewardj6072c362002-04-19 14:40:57 +000074 LinuxThreads. */
sewardj018f7622002-05-15 21:13:39 +000075ThreadState VG_(threads)[VG_N_THREADS];
sewardje663cb92002-04-12 10:26:32 +000076
sewardj2cb00342002-06-28 01:46:26 +000077/* The process' fork-handler stack. */
78static Int vg_fhstack_used = 0;
79static ForkHandlerEntry vg_fhstack[VG_N_FORKHANDLERSTACK];
80
81
sewardj1e8cdc92002-04-18 11:37:52 +000082/* The tid of the thread currently in VG_(baseBlock). */
njn1be61612003-05-14 14:04:39 +000083static ThreadId vg_tid_currently_in_baseBlock = VG_INVALID_THREADID;
sewardj1e8cdc92002-04-18 11:37:52 +000084
sewardjb52a1b02002-10-23 21:38:22 +000085/* The tid either currently in baseBlock, or was in baseBlock before
86 was saved it out; this is only updated when a new thread is loaded
87 into the baseBlock */
njn1be61612003-05-14 14:04:39 +000088static ThreadId vg_tid_last_in_baseBlock = VG_INVALID_THREADID;
sewardje663cb92002-04-12 10:26:32 +000089
90/* vg_oursignalhandler() might longjmp(). Here's the jmp_buf. */
91jmp_buf VG_(scheduler_jmpbuf);
sewardj872051c2002-07-13 12:12:56 +000092/* This says whether scheduler_jmpbuf is actually valid. Needed so
93 that our signal handler doesn't longjmp when the buffer isn't
94 actually valid. */
95Bool VG_(scheduler_jmpbuf_valid) = False;
sewardje663cb92002-04-12 10:26:32 +000096/* ... and if so, here's the signal which caused it to do so. */
97Int VG_(longjmpd_on_signal);
jsgf855d93d2003-10-13 22:26:55 +000098/* If the current thread gets a syncronous unresumable signal, then
99 its details are placed here by the signal handler, to be passed to
100 the applications signal handler later on. */
101vki_ksiginfo_t VG_(unresumable_siginfo);
sewardje663cb92002-04-12 10:26:32 +0000102
jsgf855d93d2003-10-13 22:26:55 +0000103/* If != VG_INVALID_THREADID, this is the preferred tid to schedule */
104static ThreadId prefer_sched = VG_INVALID_THREADID;
sewardje663cb92002-04-12 10:26:32 +0000105
sewardj5f07b662002-04-23 16:52:51 +0000106/* Keeping track of keys. */
107typedef
108 struct {
109 /* Has this key been allocated ? */
110 Bool inuse;
111 /* If .inuse==True, records the address of the associated
112 destructor, or NULL if none. */
113 void (*destructor)(void*);
114 }
115 ThreadKeyState;
116
117/* And our array of thread keys. */
118static ThreadKeyState vg_thread_keys[VG_N_THREAD_KEYS];
119
120typedef UInt ThreadKey;
121
fitzhardinge98abfc72003-12-16 02:05:15 +0000122/* The scheduler does need to know the address of it so it can be
123 called at program exit. */
124static Addr VG_(__libc_freeres_wrapper);
125
sewardj5f07b662002-04-23 16:52:51 +0000126
njnd3040452003-05-19 15:04:06 +0000127UInt VG_(syscall_altered_shadow_reg);
128UInt VG_(signal_delivery_altered_shadow_reg);
129UInt VG_(pthread_op_altered_shadow_reg);
130UInt VG_(client_request_altered_shadow_reg);
njn25e49d8e72002-09-23 09:36:25 +0000131
sewardje663cb92002-04-12 10:26:32 +0000132/* Forwards */
sewardj124ca2a2002-06-20 10:19:38 +0000133static void do_client_request ( ThreadId tid );
sewardj6072c362002-04-19 14:40:57 +0000134static void scheduler_sanity ( void );
sewardj124ca2a2002-06-20 10:19:38 +0000135static void do_pthread_cond_timedwait_TIMEOUT ( ThreadId tid );
sewardjd140e442002-05-29 01:21:19 +0000136
sewardje663cb92002-04-12 10:26:32 +0000137/* ---------------------------------------------------------------------
138 Helper functions for the scheduler.
139 ------------------------------------------------------------------ */
140
sewardjb48e5002002-05-13 00:16:03 +0000141__inline__
142Bool VG_(is_valid_tid) ( ThreadId tid )
sewardj604ec3c2002-04-18 22:38:41 +0000143{
144 /* tid is unsigned, hence no < 0 test. */
sewardj6072c362002-04-19 14:40:57 +0000145 if (tid == 0) return False;
sewardj604ec3c2002-04-18 22:38:41 +0000146 if (tid >= VG_N_THREADS) return False;
sewardj018f7622002-05-15 21:13:39 +0000147 if (VG_(threads)[tid].status == VgTs_Empty) return False;
148 return True;
149}
150
151
152__inline__
153Bool VG_(is_valid_or_empty_tid) ( ThreadId tid )
154{
155 /* tid is unsigned, hence no < 0 test. */
156 if (tid == 0) return False;
157 if (tid >= VG_N_THREADS) return False;
sewardj604ec3c2002-04-18 22:38:41 +0000158 return True;
159}
160
161
sewardj1e8cdc92002-04-18 11:37:52 +0000162/* For constructing error messages only: try and identify a thread
njn25e49d8e72002-09-23 09:36:25 +0000163 whose stack satisfies the predicate p, or return VG_INVALID_THREADID
164 if none do. A small complication is dealing with any currently
165 VG_(baseBlock)-resident thread.
sewardj1e8cdc92002-04-18 11:37:52 +0000166*/
njn43c799e2003-04-08 00:08:52 +0000167ThreadId VG_(first_matching_thread_stack)
njn25e49d8e72002-09-23 09:36:25 +0000168 ( Bool (*p) ( Addr stack_min, Addr stack_max ))
sewardj1e8cdc92002-04-18 11:37:52 +0000169{
170 ThreadId tid, tid_to_skip;
171
172 tid_to_skip = VG_INVALID_THREADID;
173
174 /* First check to see if there's a currently-loaded thread in
175 VG_(baseBlock). */
176 if (vg_tid_currently_in_baseBlock != VG_INVALID_THREADID) {
177 tid = vg_tid_currently_in_baseBlock;
njn25e49d8e72002-09-23 09:36:25 +0000178 if ( p ( VG_(baseBlock)[VGOFF_(m_esp)],
179 VG_(threads)[tid].stack_highest_word) )
sewardj1e8cdc92002-04-18 11:37:52 +0000180 return tid;
181 else
182 tid_to_skip = tid;
183 }
184
sewardj6072c362002-04-19 14:40:57 +0000185 for (tid = 1; tid < VG_N_THREADS; tid++) {
sewardj018f7622002-05-15 21:13:39 +0000186 if (VG_(threads)[tid].status == VgTs_Empty) continue;
sewardj1e8cdc92002-04-18 11:37:52 +0000187 if (tid == tid_to_skip) continue;
njn25e49d8e72002-09-23 09:36:25 +0000188 if ( p ( VG_(threads)[tid].m_esp,
189 VG_(threads)[tid].stack_highest_word) )
sewardj1e8cdc92002-04-18 11:37:52 +0000190 return tid;
191 }
192 return VG_INVALID_THREADID;
193}
194
195
sewardj15a43e12002-04-17 19:35:12 +0000196/* Print the scheduler status. */
197void VG_(pp_sched_status) ( void )
sewardje663cb92002-04-12 10:26:32 +0000198{
199 Int i;
200 VG_(printf)("\nsched status:\n");
sewardj6072c362002-04-19 14:40:57 +0000201 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +0000202 if (VG_(threads)[i].status == VgTs_Empty) continue;
sewardj15a43e12002-04-17 19:35:12 +0000203 VG_(printf)("\nThread %d: status = ", i);
sewardj018f7622002-05-15 21:13:39 +0000204 switch (VG_(threads)[i].status) {
sewardj6072c362002-04-19 14:40:57 +0000205 case VgTs_Runnable: VG_(printf)("Runnable"); break;
sewardj20917d82002-05-28 01:36:45 +0000206 case VgTs_WaitJoinee: VG_(printf)("WaitJoinee(%d)",
207 VG_(threads)[i].joiner_jee_tid);
208 break;
209 case VgTs_WaitJoiner: VG_(printf)("WaitJoiner"); break;
sewardj6072c362002-04-19 14:40:57 +0000210 case VgTs_Sleeping: VG_(printf)("Sleeping"); break;
211 case VgTs_WaitMX: VG_(printf)("WaitMX"); break;
sewardj3b5d8862002-04-20 13:53:23 +0000212 case VgTs_WaitCV: VG_(printf)("WaitCV"); break;
jsgf855d93d2003-10-13 22:26:55 +0000213 case VgTs_WaitSys: VG_(printf)("WaitSys"); break;
sewardje663cb92002-04-12 10:26:32 +0000214 default: VG_(printf)("???"); break;
215 }
sewardj3b5d8862002-04-20 13:53:23 +0000216 VG_(printf)(", associated_mx = %p, associated_cv = %p\n",
sewardj018f7622002-05-15 21:13:39 +0000217 VG_(threads)[i].associated_mx,
218 VG_(threads)[i].associated_cv );
sewardj15a43e12002-04-17 19:35:12 +0000219 VG_(pp_ExeContext)(
njn25e49d8e72002-09-23 09:36:25 +0000220 VG_(get_ExeContext2)( VG_(threads)[i].m_eip, VG_(threads)[i].m_ebp,
221 VG_(threads)[i].m_esp,
222 VG_(threads)[i].stack_highest_word)
223 );
sewardje663cb92002-04-12 10:26:32 +0000224 }
225 VG_(printf)("\n");
226}
227
sewardje663cb92002-04-12 10:26:32 +0000228
229
230static
231void print_sched_event ( ThreadId tid, Char* what )
232{
sewardj45b4b372002-04-16 22:50:32 +0000233 VG_(message)(Vg_DebugMsg, " SCHED[%d]: %s", tid, what );
sewardj8937c812002-04-12 20:12:20 +0000234}
235
236
237static
238void print_pthread_event ( ThreadId tid, Char* what )
239{
240 VG_(message)(Vg_DebugMsg, "PTHREAD[%d]: %s", tid, what );
sewardje663cb92002-04-12 10:26:32 +0000241}
242
243
244static
245Char* name_of_sched_event ( UInt event )
246{
247 switch (event) {
sewardje663cb92002-04-12 10:26:32 +0000248 case VG_TRC_EBP_JMP_SYSCALL: return "SYSCALL";
249 case VG_TRC_EBP_JMP_CLIENTREQ: return "CLIENTREQ";
fitzhardingea02f8812003-12-18 09:06:09 +0000250 case VG_TRC_EBP_JMP_YIELD: return "YIELD";
sewardje663cb92002-04-12 10:26:32 +0000251 case VG_TRC_INNER_COUNTERZERO: return "COUNTERZERO";
252 case VG_TRC_INNER_FASTMISS: return "FASTMISS";
253 case VG_TRC_UNRESUMABLE_SIGNAL: return "FATALSIGNAL";
254 default: return "??UNKNOWN??";
255 }
256}
257
258
259/* Create a translation of the client basic block beginning at
260 orig_addr, and add it to the translation cache & translation table.
261 This probably doesn't really belong here, but, hey ...
262*/
sewardj1e8cdc92002-04-18 11:37:52 +0000263static
264void create_translation_for ( ThreadId tid, Addr orig_addr )
sewardje663cb92002-04-12 10:26:32 +0000265{
sewardj22854b92002-11-30 14:00:47 +0000266 Addr trans_addr;
267 Int orig_size, trans_size;
268 UShort jumps[VG_MAX_JUMPS];
269 Int i;
270
271 for(i = 0; i < VG_MAX_JUMPS; i++)
272 jumps[i] = (UShort)-1;
sewardj6c3769f2002-11-29 01:02:45 +0000273
274 /* Make a translation, into temporary storage. */
njn72718642003-07-24 08:45:32 +0000275 VG_(translate)( tid, orig_addr, /* in */
276 &orig_size, &trans_addr, &trans_size, jumps ); /* out */
sewardj6c3769f2002-11-29 01:02:45 +0000277
278 /* Copy data at trans_addr into the translation cache. */
sewardje663cb92002-04-12 10:26:32 +0000279 /* Since the .orig_size and .trans_size fields are
280 UShort, be paranoid. */
281 vg_assert(orig_size > 0 && orig_size < 65536);
282 vg_assert(trans_size > 0 && trans_size < 65536);
sewardj6c3769f2002-11-29 01:02:45 +0000283
sewardj22854b92002-11-30 14:00:47 +0000284 VG_(add_to_trans_tab)( orig_addr, orig_size, trans_addr, trans_size, jumps );
sewardj6c3769f2002-11-29 01:02:45 +0000285
sewardje663cb92002-04-12 10:26:32 +0000286 /* Free the intermediary -- was allocated by VG_(emit_code). */
njn25e49d8e72002-09-23 09:36:25 +0000287 VG_(arena_free)( VG_AR_JITTER, (void*)trans_addr );
sewardje663cb92002-04-12 10:26:32 +0000288}
289
290
291/* Allocate a completely empty ThreadState record. */
292static
293ThreadId vg_alloc_ThreadState ( void )
294{
295 Int i;
sewardj6072c362002-04-19 14:40:57 +0000296 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +0000297 if (VG_(threads)[i].status == VgTs_Empty)
sewardje663cb92002-04-12 10:26:32 +0000298 return i;
299 }
300 VG_(printf)("vg_alloc_ThreadState: no free slots available\n");
301 VG_(printf)("Increase VG_N_THREADS, rebuild and try again.\n");
njne427a662002-10-02 11:08:25 +0000302 VG_(core_panic)("VG_N_THREADS is too low");
sewardje663cb92002-04-12 10:26:32 +0000303 /*NOTREACHED*/
304}
305
jsgf855d93d2003-10-13 22:26:55 +0000306ThreadState *VG_(get_ThreadState)(ThreadId tid)
307{
308 vg_assert(tid >= 0 && tid < VG_N_THREADS);
309 return &VG_(threads)[tid];
310}
311
njn72718642003-07-24 08:45:32 +0000312Bool VG_(is_running_thread)(ThreadId tid)
njn25e49d8e72002-09-23 09:36:25 +0000313{
njn72718642003-07-24 08:45:32 +0000314 ThreadId curr = VG_(get_current_tid)();
315 return (curr == tid && VG_INVALID_THREADID != tid);
njn25e49d8e72002-09-23 09:36:25 +0000316}
sewardje663cb92002-04-12 10:26:32 +0000317
sewardj1e8cdc92002-04-18 11:37:52 +0000318ThreadId VG_(get_current_tid) ( void )
319{
sewardjb52a1b02002-10-23 21:38:22 +0000320 if (!VG_(is_valid_tid)(vg_tid_currently_in_baseBlock))
321 return VG_INVALID_THREADID;
sewardj1e8cdc92002-04-18 11:37:52 +0000322 return vg_tid_currently_in_baseBlock;
323}
324
sewardjb52a1b02002-10-23 21:38:22 +0000325ThreadId VG_(get_current_or_recent_tid) ( void )
njn25e49d8e72002-09-23 09:36:25 +0000326{
sewardjb52a1b02002-10-23 21:38:22 +0000327 vg_assert(vg_tid_currently_in_baseBlock == vg_tid_last_in_baseBlock ||
328 vg_tid_currently_in_baseBlock == VG_INVALID_THREADID);
329 vg_assert(VG_(is_valid_tid)(vg_tid_last_in_baseBlock));
330
331 return vg_tid_last_in_baseBlock;
njn25e49d8e72002-09-23 09:36:25 +0000332}
333
sewardje663cb92002-04-12 10:26:32 +0000334/* Copy the saved state of a thread into VG_(baseBlock), ready for it
335 to be run. */
sewardje663cb92002-04-12 10:26:32 +0000336void VG_(load_thread_state) ( ThreadId tid )
337{
338 Int i;
sewardj1e8cdc92002-04-18 11:37:52 +0000339 vg_assert(vg_tid_currently_in_baseBlock == VG_INVALID_THREADID);
340
sewardj92a59562002-09-30 00:53:10 +0000341 VG_(baseBlock)[VGOFF_(ldt)] = (UInt)VG_(threads)[tid].ldt;
fitzhardinge47735af2004-01-21 01:27:27 +0000342 VG_(baseBlock)[VGOFF_(tls)] = (UInt)VG_(threads)[tid].tls;
sewardj92a59562002-09-30 00:53:10 +0000343 VG_(baseBlock)[VGOFF_(m_cs)] = VG_(threads)[tid].m_cs;
344 VG_(baseBlock)[VGOFF_(m_ss)] = VG_(threads)[tid].m_ss;
345 VG_(baseBlock)[VGOFF_(m_ds)] = VG_(threads)[tid].m_ds;
346 VG_(baseBlock)[VGOFF_(m_es)] = VG_(threads)[tid].m_es;
347 VG_(baseBlock)[VGOFF_(m_fs)] = VG_(threads)[tid].m_fs;
348 VG_(baseBlock)[VGOFF_(m_gs)] = VG_(threads)[tid].m_gs;
349
sewardj018f7622002-05-15 21:13:39 +0000350 VG_(baseBlock)[VGOFF_(m_eax)] = VG_(threads)[tid].m_eax;
351 VG_(baseBlock)[VGOFF_(m_ebx)] = VG_(threads)[tid].m_ebx;
352 VG_(baseBlock)[VGOFF_(m_ecx)] = VG_(threads)[tid].m_ecx;
353 VG_(baseBlock)[VGOFF_(m_edx)] = VG_(threads)[tid].m_edx;
354 VG_(baseBlock)[VGOFF_(m_esi)] = VG_(threads)[tid].m_esi;
355 VG_(baseBlock)[VGOFF_(m_edi)] = VG_(threads)[tid].m_edi;
356 VG_(baseBlock)[VGOFF_(m_ebp)] = VG_(threads)[tid].m_ebp;
357 VG_(baseBlock)[VGOFF_(m_esp)] = VG_(threads)[tid].m_esp;
sewardjb91ae7f2003-04-29 23:50:00 +0000358 VG_(baseBlock)[VGOFF_(m_eflags)]
359 = VG_(threads)[tid].m_eflags & ~EFlagD;
360 VG_(baseBlock)[VGOFF_(m_dflag)]
361 = VG_(extractDflag)(VG_(threads)[tid].m_eflags);
sewardj018f7622002-05-15 21:13:39 +0000362 VG_(baseBlock)[VGOFF_(m_eip)] = VG_(threads)[tid].m_eip;
sewardje663cb92002-04-12 10:26:32 +0000363
sewardjb91ae7f2003-04-29 23:50:00 +0000364 for (i = 0; i < VG_SIZE_OF_SSESTATE_W; i++)
365 VG_(baseBlock)[VGOFF_(m_ssestate) + i]
366 = VG_(threads)[tid].m_sse[i];
sewardje663cb92002-04-12 10:26:32 +0000367
njn25e49d8e72002-09-23 09:36:25 +0000368 if (VG_(needs).shadow_regs) {
369 VG_(baseBlock)[VGOFF_(sh_eax)] = VG_(threads)[tid].sh_eax;
370 VG_(baseBlock)[VGOFF_(sh_ebx)] = VG_(threads)[tid].sh_ebx;
371 VG_(baseBlock)[VGOFF_(sh_ecx)] = VG_(threads)[tid].sh_ecx;
372 VG_(baseBlock)[VGOFF_(sh_edx)] = VG_(threads)[tid].sh_edx;
373 VG_(baseBlock)[VGOFF_(sh_esi)] = VG_(threads)[tid].sh_esi;
374 VG_(baseBlock)[VGOFF_(sh_edi)] = VG_(threads)[tid].sh_edi;
375 VG_(baseBlock)[VGOFF_(sh_ebp)] = VG_(threads)[tid].sh_ebp;
376 VG_(baseBlock)[VGOFF_(sh_esp)] = VG_(threads)[tid].sh_esp;
377 VG_(baseBlock)[VGOFF_(sh_eflags)] = VG_(threads)[tid].sh_eflags;
378 } else {
379 /* Fields shouldn't be used -- check their values haven't changed. */
njn25e49d8e72002-09-23 09:36:25 +0000380 vg_assert(
381 VG_UNUSED_SHADOW_REG_VALUE == VG_(threads)[tid].sh_eax &&
382 VG_UNUSED_SHADOW_REG_VALUE == VG_(threads)[tid].sh_ebx &&
383 VG_UNUSED_SHADOW_REG_VALUE == VG_(threads)[tid].sh_ecx &&
384 VG_UNUSED_SHADOW_REG_VALUE == VG_(threads)[tid].sh_edx &&
385 VG_UNUSED_SHADOW_REG_VALUE == VG_(threads)[tid].sh_esi &&
386 VG_UNUSED_SHADOW_REG_VALUE == VG_(threads)[tid].sh_edi &&
387 VG_UNUSED_SHADOW_REG_VALUE == VG_(threads)[tid].sh_ebp &&
388 VG_UNUSED_SHADOW_REG_VALUE == VG_(threads)[tid].sh_esp &&
389 VG_UNUSED_SHADOW_REG_VALUE == VG_(threads)[tid].sh_eflags);
390 }
sewardj1e8cdc92002-04-18 11:37:52 +0000391
392 vg_tid_currently_in_baseBlock = tid;
sewardjb52a1b02002-10-23 21:38:22 +0000393 vg_tid_last_in_baseBlock = tid;
sewardje663cb92002-04-12 10:26:32 +0000394}
395
396
397/* Copy the state of a thread from VG_(baseBlock), presumably after it
398 has been descheduled. For sanity-check purposes, fill the vacated
399 VG_(baseBlock) with garbage so as to make the system more likely to
400 fail quickly if we erroneously continue to poke around inside
401 VG_(baseBlock) without first doing a load_thread_state().
402*/
sewardje663cb92002-04-12 10:26:32 +0000403void VG_(save_thread_state) ( ThreadId tid )
404{
405 Int i;
406 const UInt junk = 0xDEADBEEF;
407
sewardj1e8cdc92002-04-18 11:37:52 +0000408 vg_assert(vg_tid_currently_in_baseBlock != VG_INVALID_THREADID);
409
sewardj92a59562002-09-30 00:53:10 +0000410
411 /* We don't copy out the LDT entry, because it can never be changed
412 by the normal actions of the thread, only by the modify_ldt
413 syscall, in which case we will correctly be updating
sewardjfb5e5272002-12-08 23:27:21 +0000414 VG_(threads)[tid].ldt. This printf happens iff the following
415 assertion fails. */
sewardjca340b32002-12-08 22:14:11 +0000416 if ((void*)VG_(threads)[tid].ldt != (void*)VG_(baseBlock)[VGOFF_(ldt)])
417 VG_(printf)("VG_(threads)[%d].ldt=%p VG_(baseBlock)[VGOFF_(ldt)]=%p\n",
sewardjfb5e5272002-12-08 23:27:21 +0000418 tid, (void*)VG_(threads)[tid].ldt,
419 (void*)VG_(baseBlock)[VGOFF_(ldt)]);
sewardjca340b32002-12-08 22:14:11 +0000420
sewardj92a59562002-09-30 00:53:10 +0000421 vg_assert((void*)VG_(threads)[tid].ldt
422 == (void*)VG_(baseBlock)[VGOFF_(ldt)]);
423
fitzhardinge47735af2004-01-21 01:27:27 +0000424 /* We don't copy out the TLS entry, because it can never be changed
425 by the normal actions of the thread, only by the set_thread_area
426 syscall, in which case we will correctly be updating
427 VG_(threads)[tid].tls. This printf happens iff the following
428 assertion fails. */
429 if ((void*)VG_(threads)[tid].tls != (void*)VG_(baseBlock)[VGOFF_(tls)])
430 VG_(printf)("VG_(threads)[%d].tls=%p VG_(baseBlock)[VGOFF_(tls)]=%p\n",
431 tid, (void*)VG_(threads)[tid].tls,
432 (void*)VG_(baseBlock)[VGOFF_(tls)]);
433
434 vg_assert((void*)VG_(threads)[tid].tls
435 == (void*)VG_(baseBlock)[VGOFF_(tls)]);
436
sewardj92a59562002-09-30 00:53:10 +0000437 VG_(threads)[tid].m_cs = VG_(baseBlock)[VGOFF_(m_cs)];
438 VG_(threads)[tid].m_ss = VG_(baseBlock)[VGOFF_(m_ss)];
439 VG_(threads)[tid].m_ds = VG_(baseBlock)[VGOFF_(m_ds)];
440 VG_(threads)[tid].m_es = VG_(baseBlock)[VGOFF_(m_es)];
441 VG_(threads)[tid].m_fs = VG_(baseBlock)[VGOFF_(m_fs)];
442 VG_(threads)[tid].m_gs = VG_(baseBlock)[VGOFF_(m_gs)];
443
sewardj018f7622002-05-15 21:13:39 +0000444 VG_(threads)[tid].m_eax = VG_(baseBlock)[VGOFF_(m_eax)];
445 VG_(threads)[tid].m_ebx = VG_(baseBlock)[VGOFF_(m_ebx)];
446 VG_(threads)[tid].m_ecx = VG_(baseBlock)[VGOFF_(m_ecx)];
447 VG_(threads)[tid].m_edx = VG_(baseBlock)[VGOFF_(m_edx)];
448 VG_(threads)[tid].m_esi = VG_(baseBlock)[VGOFF_(m_esi)];
449 VG_(threads)[tid].m_edi = VG_(baseBlock)[VGOFF_(m_edi)];
450 VG_(threads)[tid].m_ebp = VG_(baseBlock)[VGOFF_(m_ebp)];
451 VG_(threads)[tid].m_esp = VG_(baseBlock)[VGOFF_(m_esp)];
sewardjb91ae7f2003-04-29 23:50:00 +0000452 VG_(threads)[tid].m_eflags
453 = VG_(insertDflag)(VG_(baseBlock)[VGOFF_(m_eflags)],
454 VG_(baseBlock)[VGOFF_(m_dflag)]);
sewardj018f7622002-05-15 21:13:39 +0000455 VG_(threads)[tid].m_eip = VG_(baseBlock)[VGOFF_(m_eip)];
sewardje663cb92002-04-12 10:26:32 +0000456
sewardjb91ae7f2003-04-29 23:50:00 +0000457 for (i = 0; i < VG_SIZE_OF_SSESTATE_W; i++)
458 VG_(threads)[tid].m_sse[i]
459 = VG_(baseBlock)[VGOFF_(m_ssestate) + i];
sewardje663cb92002-04-12 10:26:32 +0000460
njn25e49d8e72002-09-23 09:36:25 +0000461 if (VG_(needs).shadow_regs) {
462 VG_(threads)[tid].sh_eax = VG_(baseBlock)[VGOFF_(sh_eax)];
463 VG_(threads)[tid].sh_ebx = VG_(baseBlock)[VGOFF_(sh_ebx)];
464 VG_(threads)[tid].sh_ecx = VG_(baseBlock)[VGOFF_(sh_ecx)];
465 VG_(threads)[tid].sh_edx = VG_(baseBlock)[VGOFF_(sh_edx)];
466 VG_(threads)[tid].sh_esi = VG_(baseBlock)[VGOFF_(sh_esi)];
467 VG_(threads)[tid].sh_edi = VG_(baseBlock)[VGOFF_(sh_edi)];
468 VG_(threads)[tid].sh_ebp = VG_(baseBlock)[VGOFF_(sh_ebp)];
469 VG_(threads)[tid].sh_esp = VG_(baseBlock)[VGOFF_(sh_esp)];
470 VG_(threads)[tid].sh_eflags = VG_(baseBlock)[VGOFF_(sh_eflags)];
471 } else {
472 /* Fill with recognisable junk */
473 VG_(threads)[tid].sh_eax =
474 VG_(threads)[tid].sh_ebx =
475 VG_(threads)[tid].sh_ecx =
476 VG_(threads)[tid].sh_edx =
477 VG_(threads)[tid].sh_esi =
478 VG_(threads)[tid].sh_edi =
479 VG_(threads)[tid].sh_ebp =
480 VG_(threads)[tid].sh_esp =
481 VG_(threads)[tid].sh_eflags = VG_UNUSED_SHADOW_REG_VALUE;
482 }
sewardje663cb92002-04-12 10:26:32 +0000483
484 /* Fill it up with junk. */
sewardj92a59562002-09-30 00:53:10 +0000485 VG_(baseBlock)[VGOFF_(ldt)] = junk;
fitzhardinge47735af2004-01-21 01:27:27 +0000486 VG_(baseBlock)[VGOFF_(tls)] = junk;
sewardj92a59562002-09-30 00:53:10 +0000487 VG_(baseBlock)[VGOFF_(m_cs)] = junk;
488 VG_(baseBlock)[VGOFF_(m_ss)] = junk;
489 VG_(baseBlock)[VGOFF_(m_ds)] = junk;
490 VG_(baseBlock)[VGOFF_(m_es)] = junk;
491 VG_(baseBlock)[VGOFF_(m_fs)] = junk;
492 VG_(baseBlock)[VGOFF_(m_gs)] = junk;
493
sewardje663cb92002-04-12 10:26:32 +0000494 VG_(baseBlock)[VGOFF_(m_eax)] = junk;
495 VG_(baseBlock)[VGOFF_(m_ebx)] = junk;
496 VG_(baseBlock)[VGOFF_(m_ecx)] = junk;
497 VG_(baseBlock)[VGOFF_(m_edx)] = junk;
498 VG_(baseBlock)[VGOFF_(m_esi)] = junk;
499 VG_(baseBlock)[VGOFF_(m_edi)] = junk;
500 VG_(baseBlock)[VGOFF_(m_ebp)] = junk;
501 VG_(baseBlock)[VGOFF_(m_esp)] = junk;
502 VG_(baseBlock)[VGOFF_(m_eflags)] = junk;
503 VG_(baseBlock)[VGOFF_(m_eip)] = junk;
504
sewardjb91ae7f2003-04-29 23:50:00 +0000505 for (i = 0; i < VG_SIZE_OF_SSESTATE_W; i++)
506 VG_(baseBlock)[VGOFF_(m_ssestate) + i] = junk;
sewardj1e8cdc92002-04-18 11:37:52 +0000507
508 vg_tid_currently_in_baseBlock = VG_INVALID_THREADID;
sewardje663cb92002-04-12 10:26:32 +0000509}
510
511
512/* Run the thread tid for a while, and return a VG_TRC_* value to the
513 scheduler indicating what happened. */
sewardj6072c362002-04-19 14:40:57 +0000514static
sewardje663cb92002-04-12 10:26:32 +0000515UInt run_thread_for_a_while ( ThreadId tid )
516{
sewardj7ccc5c22002-04-24 21:39:11 +0000517 volatile UInt trc = 0;
sewardjb48e5002002-05-13 00:16:03 +0000518 vg_assert(VG_(is_valid_tid)(tid));
sewardj018f7622002-05-15 21:13:39 +0000519 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +0000520 vg_assert(VG_(bbs_to_go) > 0);
sewardj872051c2002-07-13 12:12:56 +0000521 vg_assert(!VG_(scheduler_jmpbuf_valid));
sewardje663cb92002-04-12 10:26:32 +0000522
sewardj671ff542002-05-07 09:25:30 +0000523 VGP_PUSHCC(VgpRun);
sewardje663cb92002-04-12 10:26:32 +0000524 VG_(load_thread_state) ( tid );
jsgf855d93d2003-10-13 22:26:55 +0000525
526 /* there should be no undealt-with signals */
527 vg_assert(VG_(unresumable_siginfo).si_signo == 0);
528
sewardje663cb92002-04-12 10:26:32 +0000529 if (__builtin_setjmp(VG_(scheduler_jmpbuf)) == 0) {
530 /* try this ... */
sewardj872051c2002-07-13 12:12:56 +0000531 VG_(scheduler_jmpbuf_valid) = True;
sewardje663cb92002-04-12 10:26:32 +0000532 trc = VG_(run_innerloop)();
sewardj872051c2002-07-13 12:12:56 +0000533 VG_(scheduler_jmpbuf_valid) = False;
sewardje663cb92002-04-12 10:26:32 +0000534 /* We get here if the client didn't take a fault. */
535 } else {
536 /* We get here if the client took a fault, which caused our
537 signal handler to longjmp. */
sewardj872051c2002-07-13 12:12:56 +0000538 VG_(scheduler_jmpbuf_valid) = False;
sewardje663cb92002-04-12 10:26:32 +0000539 vg_assert(trc == 0);
540 trc = VG_TRC_UNRESUMABLE_SIGNAL;
541 }
sewardj872051c2002-07-13 12:12:56 +0000542
543 vg_assert(!VG_(scheduler_jmpbuf_valid));
544
sewardje663cb92002-04-12 10:26:32 +0000545 VG_(save_thread_state) ( tid );
njn25e49d8e72002-09-23 09:36:25 +0000546 VGP_POPCC(VgpRun);
sewardje663cb92002-04-12 10:26:32 +0000547 return trc;
548}
549
550
sewardj20917d82002-05-28 01:36:45 +0000551static
552void mostly_clear_thread_record ( ThreadId tid )
553{
sewardj20917d82002-05-28 01:36:45 +0000554 vg_assert(tid >= 0 && tid < VG_N_THREADS);
sewardj92a59562002-09-30 00:53:10 +0000555 VG_(threads)[tid].ldt = NULL;
fitzhardinge47735af2004-01-21 01:27:27 +0000556 VG_(clear_TLS_for_thread)(VG_(threads)[tid].tls);
sewardj20917d82002-05-28 01:36:45 +0000557 VG_(threads)[tid].tid = tid;
558 VG_(threads)[tid].status = VgTs_Empty;
559 VG_(threads)[tid].associated_mx = NULL;
560 VG_(threads)[tid].associated_cv = NULL;
561 VG_(threads)[tid].awaken_at = 0;
562 VG_(threads)[tid].joinee_retval = NULL;
563 VG_(threads)[tid].joiner_thread_return = NULL;
564 VG_(threads)[tid].joiner_jee_tid = VG_INVALID_THREADID;
sewardj8ad94e12002-05-29 00:10:20 +0000565 VG_(threads)[tid].detached = False;
sewardj20917d82002-05-28 01:36:45 +0000566 VG_(threads)[tid].cancel_st = True; /* PTHREAD_CANCEL_ENABLE */
567 VG_(threads)[tid].cancel_ty = True; /* PTHREAD_CANCEL_DEFERRED */
568 VG_(threads)[tid].cancel_pend = NULL; /* not pending */
sewardj8ad94e12002-05-29 00:10:20 +0000569 VG_(threads)[tid].custack_used = 0;
sewardj20917d82002-05-28 01:36:45 +0000570 VG_(ksigemptyset)(&VG_(threads)[tid].sig_mask);
jsgf855d93d2003-10-13 22:26:55 +0000571 VG_(ksigfillset)(&VG_(threads)[tid].eff_sig_mask);
sewardj00a66b12002-10-12 16:42:35 +0000572 VG_(threads)[tid].specifics_ptr = NULL;
jsgf855d93d2003-10-13 22:26:55 +0000573
574 VG_(threads)[tid].syscallno = -1;
575 VG_(threads)[tid].sys_pre_res = NULL;
576
577 VG_(threads)[tid].proxy = NULL;
sewardj20917d82002-05-28 01:36:45 +0000578}
579
580
jsgf855d93d2003-10-13 22:26:55 +0000581
sewardje663cb92002-04-12 10:26:32 +0000582/* Initialise the scheduler. Create a single "main" thread ready to
sewardj6072c362002-04-19 14:40:57 +0000583 run, with special ThreadId of one. This is called at startup; the
nethercote71980f02004-01-24 18:18:54 +0000584 caller takes care to park the client's state in VG_(baseBlock).
sewardje663cb92002-04-12 10:26:32 +0000585*/
586void VG_(scheduler_init) ( void )
587{
588 Int i;
sewardje663cb92002-04-12 10:26:32 +0000589 ThreadId tid_main;
590
sewardj6072c362002-04-19 14:40:57 +0000591 for (i = 0 /* NB; not 1 */; i < VG_N_THREADS; i++) {
sewardj20917d82002-05-28 01:36:45 +0000592 mostly_clear_thread_record(i);
593 VG_(threads)[i].stack_size = 0;
594 VG_(threads)[i].stack_base = (Addr)NULL;
595 VG_(threads)[i].stack_highest_word = (Addr)NULL;
sewardje663cb92002-04-12 10:26:32 +0000596 }
597
sewardj5f07b662002-04-23 16:52:51 +0000598 for (i = 0; i < VG_N_THREAD_KEYS; i++) {
599 vg_thread_keys[i].inuse = False;
600 vg_thread_keys[i].destructor = NULL;
601 }
602
sewardj2cb00342002-06-28 01:46:26 +0000603 vg_fhstack_used = 0;
604
sewardje663cb92002-04-12 10:26:32 +0000605 /* Assert this is thread zero, which has certain magic
606 properties. */
607 tid_main = vg_alloc_ThreadState();
sewardj6072c362002-04-19 14:40:57 +0000608 vg_assert(tid_main == 1);
sewardj20917d82002-05-28 01:36:45 +0000609 VG_(threads)[tid_main].status = VgTs_Runnable;
sewardje663cb92002-04-12 10:26:32 +0000610
611 /* Copy VG_(baseBlock) state to tid_main's slot. */
sewardj1e8cdc92002-04-18 11:37:52 +0000612 vg_tid_currently_in_baseBlock = tid_main;
sewardjb52a1b02002-10-23 21:38:22 +0000613 vg_tid_last_in_baseBlock = tid_main;
fitzhardinge47735af2004-01-21 01:27:27 +0000614 VG_(baseBlock)[VGOFF_(tls)] = (UInt)VG_(threads)[tid_main].tls;
sewardje663cb92002-04-12 10:26:32 +0000615 VG_(save_thread_state) ( tid_main );
sewardj1e8cdc92002-04-18 11:37:52 +0000616
sewardj018f7622002-05-15 21:13:39 +0000617 VG_(threads)[tid_main].stack_highest_word
fitzhardinge98abfc72003-12-16 02:05:15 +0000618 = VG_(clstk_end) - 4;
619 VG_(threads)[tid_main].stack_base = VG_(clstk_base);
620 VG_(threads)[tid_main].stack_size = VG_(clstk_end) - VG_(clstk_base);
sewardjbf290b92002-05-01 02:28:01 +0000621
sewardj1e8cdc92002-04-18 11:37:52 +0000622 /* So now ... */
623 vg_assert(vg_tid_currently_in_baseBlock == VG_INVALID_THREADID);
sewardj872051c2002-07-13 12:12:56 +0000624
625 /* Not running client code right now. */
626 VG_(scheduler_jmpbuf_valid) = False;
jsgf855d93d2003-10-13 22:26:55 +0000627
628 /* Proxy for main thread */
629 VG_(proxy_create)(tid_main);
sewardje663cb92002-04-12 10:26:32 +0000630}
631
632
sewardj3947e622002-05-23 16:52:11 +0000633
sewardje663cb92002-04-12 10:26:32 +0000634
635
sewardj6072c362002-04-19 14:40:57 +0000636/* vthread tid is returning from a signal handler; modify its
637 stack/regs accordingly. */
sewardj1ffa8da2002-04-26 22:47:57 +0000638
sewardj6072c362002-04-19 14:40:57 +0000639static
640void handle_signal_return ( ThreadId tid )
641{
sewardj6072c362002-04-19 14:40:57 +0000642 Bool restart_blocked_syscalls;
sewardj645030e2002-06-06 01:27:39 +0000643 struct vki_timespec * rem;
sewardj6072c362002-04-19 14:40:57 +0000644
sewardjb48e5002002-05-13 00:16:03 +0000645 vg_assert(VG_(is_valid_tid)(tid));
sewardj6072c362002-04-19 14:40:57 +0000646
647 restart_blocked_syscalls = VG_(signal_returns)(tid);
648
649 if (restart_blocked_syscalls)
650 /* Easy; we don't have to do anything. */
651 return;
652
sewardj645030e2002-06-06 01:27:39 +0000653 if (VG_(threads)[tid].status == VgTs_Sleeping
sewardj018f7622002-05-15 21:13:39 +0000654 && VG_(threads)[tid].m_eax == __NR_nanosleep) {
sewardj6072c362002-04-19 14:40:57 +0000655 /* We interrupted a nanosleep(). The right thing to do is to
sewardj645030e2002-06-06 01:27:39 +0000656 write the unused time to nanosleep's second param, but that's
657 too much effort ... we just say that 1 nanosecond was not
658 used, and return EINTR. */
659 rem = (struct vki_timespec *)VG_(threads)[tid].m_ecx; /* arg2 */
660 if (rem != NULL) {
661 rem->tv_sec = 0;
662 rem->tv_nsec = 1;
663 }
njnd3040452003-05-19 15:04:06 +0000664 SET_SYSCALL_RETVAL(tid, -VKI_EINTR);
sewardj645030e2002-06-06 01:27:39 +0000665 VG_(threads)[tid].status = VgTs_Runnable;
sewardj6072c362002-04-19 14:40:57 +0000666 return;
667 }
668
669 /* All other cases? Just return. */
670}
671
672
sewardje663cb92002-04-12 10:26:32 +0000673static
674void sched_do_syscall ( ThreadId tid )
675{
jsgf855d93d2003-10-13 22:26:55 +0000676 Int syscall_no;
njn25e49d8e72002-09-23 09:36:25 +0000677 Char msg_buf[100];
sewardje663cb92002-04-12 10:26:32 +0000678
sewardjb48e5002002-05-13 00:16:03 +0000679 vg_assert(VG_(is_valid_tid)(tid));
sewardj018f7622002-05-15 21:13:39 +0000680 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +0000681
sewardj018f7622002-05-15 21:13:39 +0000682 syscall_no = VG_(threads)[tid].m_eax; /* syscall number */
sewardje663cb92002-04-12 10:26:32 +0000683
jsgf855d93d2003-10-13 22:26:55 +0000684 /* Special-case nanosleep because we can. But should we?
685
686 XXX not doing so for now, because it doesn't seem to work
687 properly, and we can use the syscall nanosleep just as easily.
688 */
689 if (0 && syscall_no == __NR_nanosleep) {
sewardj5f07b662002-04-23 16:52:51 +0000690 UInt t_now, t_awaken;
sewardje663cb92002-04-12 10:26:32 +0000691 struct vki_timespec* req;
sewardj018f7622002-05-15 21:13:39 +0000692 req = (struct vki_timespec*)VG_(threads)[tid].m_ebx; /* arg1 */
jsgf855d93d2003-10-13 22:26:55 +0000693
694 if (req->tv_sec < 0 || req->tv_nsec < 0 || req->tv_nsec >= 1000000000) {
695 SET_SYSCALL_RETVAL(tid, -VKI_EINVAL);
696 return;
697 }
698
sewardj5f07b662002-04-23 16:52:51 +0000699 t_now = VG_(read_millisecond_timer)();
sewardje663cb92002-04-12 10:26:32 +0000700 t_awaken
701 = t_now
sewardj5f07b662002-04-23 16:52:51 +0000702 + (UInt)1000ULL * (UInt)(req->tv_sec)
703 + (UInt)(req->tv_nsec) / 1000000;
sewardj018f7622002-05-15 21:13:39 +0000704 VG_(threads)[tid].status = VgTs_Sleeping;
705 VG_(threads)[tid].awaken_at = t_awaken;
sewardj8937c812002-04-12 20:12:20 +0000706 if (VG_(clo_trace_sched)) {
sewardj5f07b662002-04-23 16:52:51 +0000707 VG_(sprintf)(msg_buf, "at %d: nanosleep for %d",
sewardje663cb92002-04-12 10:26:32 +0000708 t_now, t_awaken-t_now);
709 print_sched_event(tid, msg_buf);
710 }
jsgf855d93d2003-10-13 22:26:55 +0000711 VG_(add_timeout)(tid, t_awaken);
sewardje663cb92002-04-12 10:26:32 +0000712 /* Force the scheduler to run something else for a while. */
713 return;
714 }
715
jsgf855d93d2003-10-13 22:26:55 +0000716 /* If pre_syscall returns true, then we're done immediately */
717 if (VG_(pre_syscall)(tid)) {
fitzhardinge31ba9052004-01-16 02:15:23 +0000718 VG_(post_syscall(tid, True));
sewardj3947e622002-05-23 16:52:11 +0000719 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +0000720 } else {
jsgf855d93d2003-10-13 22:26:55 +0000721 vg_assert(VG_(threads)[tid].status == VgTs_WaitSys);
sewardje663cb92002-04-12 10:26:32 +0000722 }
723}
724
725
sewardje663cb92002-04-12 10:26:32 +0000726
jsgf855d93d2003-10-13 22:26:55 +0000727struct timeout {
728 UInt time; /* time we should awaken */
729 ThreadId tid; /* thread which cares about this timeout */
730 struct timeout *next;
731};
sewardje663cb92002-04-12 10:26:32 +0000732
jsgf855d93d2003-10-13 22:26:55 +0000733static struct timeout *timeouts;
734
735void VG_(add_timeout)(ThreadId tid, UInt time)
sewardje663cb92002-04-12 10:26:32 +0000736{
jsgf855d93d2003-10-13 22:26:55 +0000737 struct timeout *t = VG_(arena_malloc)(VG_AR_CORE, sizeof(*t));
738 struct timeout **prev, *tp;
sewardje663cb92002-04-12 10:26:32 +0000739
jsgf855d93d2003-10-13 22:26:55 +0000740 t->time = time;
741 t->tid = tid;
sewardje462e202002-04-13 04:09:07 +0000742
jsgf855d93d2003-10-13 22:26:55 +0000743 if (VG_(clo_trace_sched)) {
744 Char msg_buf[100];
745 VG_(sprintf)(msg_buf, "add_timeout: now=%u adding timeout at %u",
746 VG_(read_millisecond_timer)(), time);
747 print_sched_event(tid, msg_buf);
748 }
sewardj6072c362002-04-19 14:40:57 +0000749
jsgf855d93d2003-10-13 22:26:55 +0000750 for(tp = timeouts, prev = &timeouts;
751 tp != NULL && tp->time < time;
752 prev = &tp->next, tp = tp->next)
753 ;
754 t->next = tp;
755 *prev = t;
756}
757
758/* Sleep for a while, but be willing to be woken. */
759static
760void idle ( void )
761{
762 struct vki_pollfd pollfd[1];
763 Int delta = -1;
764 Int fd = VG_(proxy_resfd)();
765
766 pollfd[0].fd = fd;
767 pollfd[0].events = VKI_POLLIN;
768
769 /* Look though the nearest timeouts, looking for the next future
770 one (there may be stale past timeouts). They'll all be mopped
771 below up when the poll() finishes. */
772 if (timeouts != NULL) {
773 struct timeout *tp;
774 Bool wicked = False;
775 UInt now = VG_(read_millisecond_timer)();
776
777 for(tp = timeouts; tp != NULL && tp->time < now; tp = tp->next) {
778 /* If a thread is still sleeping in the past, make it runnable */
779 ThreadState *tst = VG_(get_ThreadState)(tp->tid);
780 if (tst->status == VgTs_Sleeping)
781 tst->status = VgTs_Runnable;
782 wicked = True; /* no sleep for the wicked */
sewardje663cb92002-04-12 10:26:32 +0000783 }
sewardje663cb92002-04-12 10:26:32 +0000784
jsgf855d93d2003-10-13 22:26:55 +0000785 if (tp != NULL) {
786 delta = tp->time - now;
787 vg_assert(delta >= 0);
sewardje663cb92002-04-12 10:26:32 +0000788 }
jsgf855d93d2003-10-13 22:26:55 +0000789 if (wicked)
790 delta = 0;
sewardje663cb92002-04-12 10:26:32 +0000791 }
792
jsgf855d93d2003-10-13 22:26:55 +0000793 /* gotta wake up for something! */
794 vg_assert(fd != -1 || delta != -1);
sewardje462e202002-04-13 04:09:07 +0000795
jsgf855d93d2003-10-13 22:26:55 +0000796 /* If we need to do signal routing, then poll for pending signals
797 every VG_(clo_signal_polltime) mS */
798 if (VG_(do_signal_routing) && (delta > VG_(clo_signal_polltime) || delta == -1))
799 delta = VG_(clo_signal_polltime);
sewardje663cb92002-04-12 10:26:32 +0000800
jsgf855d93d2003-10-13 22:26:55 +0000801 if (VG_(clo_trace_sched)) {
802 Char msg_buf[100];
803 VG_(sprintf)(msg_buf, "idle: waiting for %dms and fd %d",
804 delta, fd);
805 print_sched_event(0, msg_buf);
sewardje663cb92002-04-12 10:26:32 +0000806 }
sewardje663cb92002-04-12 10:26:32 +0000807
jsgf855d93d2003-10-13 22:26:55 +0000808 VG_(poll)(pollfd, fd != -1 ? 1 : 0, delta);
sewardje663cb92002-04-12 10:26:32 +0000809
jsgf855d93d2003-10-13 22:26:55 +0000810 /* See if there's anything on the timeout list which needs
811 waking, and mop up anything in the past. */
812 {
813 UInt now = VG_(read_millisecond_timer)();
814 struct timeout *tp;
sewardje663cb92002-04-12 10:26:32 +0000815
jsgf855d93d2003-10-13 22:26:55 +0000816 tp = timeouts;
sewardje663cb92002-04-12 10:26:32 +0000817
jsgf855d93d2003-10-13 22:26:55 +0000818 while(tp && tp->time <= now) {
819 struct timeout *dead;
820 ThreadState *tst;
821
822 tst = VG_(get_ThreadState)(tp->tid);
823
824 if (VG_(clo_trace_sched)) {
825 Char msg_buf[100];
826 VG_(sprintf)(msg_buf, "idle: now=%u removing timeout at %u",
827 now, tp->time);
828 print_sched_event(tp->tid, msg_buf);
829 }
sewardje663cb92002-04-12 10:26:32 +0000830
jsgf855d93d2003-10-13 22:26:55 +0000831 /* If awaken_at != tp->time then it means the timeout is
832 stale and we should just ignore it. */
833 if(tst->awaken_at == tp->time) {
834 switch(tst->status) {
835 case VgTs_Sleeping:
836 tst->awaken_at = 0xFFFFFFFF;
837 tst->status = VgTs_Runnable;
838 break;
sewardje663cb92002-04-12 10:26:32 +0000839
jsgf855d93d2003-10-13 22:26:55 +0000840 case VgTs_WaitCV:
841 do_pthread_cond_timedwait_TIMEOUT(tst->tid);
842 break;
sewardje663cb92002-04-12 10:26:32 +0000843
jsgf855d93d2003-10-13 22:26:55 +0000844 default:
845 /* This is a bit odd but OK; if a thread had a timeout
846 but woke for some other reason (signal, condvar
847 wakeup), then it will still be on the list. */
848 if (0)
849 VG_(printf)("idle(): unexpected status tp->tid=%d tst->status = %d\n",
850 tp->tid, tst->status);
851 break;
852 }
853 }
sewardjbc7d8782002-06-30 12:44:54 +0000854
jsgf855d93d2003-10-13 22:26:55 +0000855 dead = tp;
856 tp = tp->next;
njn25e49d8e72002-09-23 09:36:25 +0000857
jsgf855d93d2003-10-13 22:26:55 +0000858 VG_(arena_free)(VG_AR_CORE, dead);
sewardjbc7d8782002-06-30 12:44:54 +0000859 }
860
jsgf855d93d2003-10-13 22:26:55 +0000861 timeouts = tp;
sewardje663cb92002-04-12 10:26:32 +0000862 }
863}
864
865
sewardje663cb92002-04-12 10:26:32 +0000866/* ---------------------------------------------------------------------
867 The scheduler proper.
868 ------------------------------------------------------------------ */
869
870/* Run user-space threads until either
871 * Deadlock occurs
872 * One thread asks to shutdown Valgrind
873 * The specified number of basic blocks has gone by.
874*/
875VgSchedReturnCode VG_(scheduler) ( void )
876{
877 ThreadId tid, tid_next;
878 UInt trc;
879 UInt dispatch_ctr_SAVED;
sewardj124ca2a2002-06-20 10:19:38 +0000880 Int done_this_time, n_in_bounded_wait;
jsgf855d93d2003-10-13 22:26:55 +0000881 Int n_exists, n_waiting_for_reaper;
sewardje663cb92002-04-12 10:26:32 +0000882 Addr trans_addr;
883
sewardje663cb92002-04-12 10:26:32 +0000884 /* Start with the root thread. tid in general indicates the
885 currently runnable/just-finished-running thread. */
sewardj7e87e382002-05-03 19:09:05 +0000886 VG_(last_run_tid) = tid = 1;
sewardje663cb92002-04-12 10:26:32 +0000887
888 /* This is the top level scheduler loop. It falls into three
889 phases. */
890 while (True) {
891
sewardj6072c362002-04-19 14:40:57 +0000892 /* ======================= Phase 0 of 3 =======================
893 Be paranoid. Always a good idea. */
sewardjd7fd4d22002-04-24 01:57:27 +0000894 stage1:
sewardj6072c362002-04-19 14:40:57 +0000895 scheduler_sanity();
sewardj0c3b53f2002-05-01 01:58:35 +0000896 VG_(do_sanity_checks)( False );
sewardj6072c362002-04-19 14:40:57 +0000897
sewardje663cb92002-04-12 10:26:32 +0000898 /* ======================= Phase 1 of 3 =======================
899 Handle I/O completions and signals. This may change the
900 status of various threads. Then select a new thread to run,
901 or declare deadlock, or sleep if there are no runnable
902 threads but some are blocked on I/O. */
903
sewardje663cb92002-04-12 10:26:32 +0000904 /* Was a debug-stop requested? */
905 if (VG_(bbs_to_go) == 0)
906 goto debug_stop;
907
908 /* Do the following loop until a runnable thread is found, or
909 deadlock is detected. */
910 while (True) {
911
912 /* For stats purposes only. */
913 VG_(num_scheduling_events_MAJOR) ++;
914
jsgf855d93d2003-10-13 22:26:55 +0000915 /* Route signals to their proper places */
916 VG_(route_signals)();
sewardje663cb92002-04-12 10:26:32 +0000917
jsgf855d93d2003-10-13 22:26:55 +0000918 /* See if any of the proxy LWPs report any activity: either a
919 syscall completing or a signal arriving. */
920 VG_(proxy_results)();
sewardje663cb92002-04-12 10:26:32 +0000921
922 /* Try and find a thread (tid) to run. */
923 tid_next = tid;
jsgf855d93d2003-10-13 22:26:55 +0000924 if (prefer_sched != VG_INVALID_THREADID) {
925 tid_next = prefer_sched-1;
926 prefer_sched = VG_INVALID_THREADID;
927 }
sewardj51c0aaf2002-04-25 01:32:10 +0000928 n_in_bounded_wait = 0;
jsgf855d93d2003-10-13 22:26:55 +0000929 n_exists = 0;
930 n_waiting_for_reaper = 0;
sewardje663cb92002-04-12 10:26:32 +0000931 while (True) {
932 tid_next++;
sewardj6072c362002-04-19 14:40:57 +0000933 if (tid_next >= VG_N_THREADS) tid_next = 1;
jsgf855d93d2003-10-13 22:26:55 +0000934 if (VG_(threads)[tid_next].status == VgTs_Sleeping
935 || VG_(threads)[tid_next].status == VgTs_WaitSys
sewardj018f7622002-05-15 21:13:39 +0000936 || (VG_(threads)[tid_next].status == VgTs_WaitCV
937 && VG_(threads)[tid_next].awaken_at != 0xFFFFFFFF))
sewardj51c0aaf2002-04-25 01:32:10 +0000938 n_in_bounded_wait ++;
jsgf855d93d2003-10-13 22:26:55 +0000939 if (VG_(threads)[tid_next].status != VgTs_Empty)
940 n_exists++;
941 if (VG_(threads)[tid_next].status == VgTs_WaitJoiner)
942 n_waiting_for_reaper++;
sewardj018f7622002-05-15 21:13:39 +0000943 if (VG_(threads)[tid_next].status == VgTs_Runnable)
sewardje663cb92002-04-12 10:26:32 +0000944 break; /* We can run this one. */
945 if (tid_next == tid)
946 break; /* been all the way round */
947 }
948 tid = tid_next;
949
sewardj018f7622002-05-15 21:13:39 +0000950 if (VG_(threads)[tid].status == VgTs_Runnable) {
sewardje663cb92002-04-12 10:26:32 +0000951 /* Found a suitable candidate. Fall out of this loop, so
952 we can advance to stage 2 of the scheduler: actually
953 running the thread. */
954 break;
955 }
956
jsgf855d93d2003-10-13 22:26:55 +0000957 /* All threads have exited - pretend someone called exit() */
958 if (n_waiting_for_reaper == n_exists) {
959 VG_(exitcode) = 0; /* ? */
960 return VgSrc_ExitSyscall;
961 }
962
sewardje663cb92002-04-12 10:26:32 +0000963 /* We didn't find a runnable thread. Now what? */
sewardj51c0aaf2002-04-25 01:32:10 +0000964 if (n_in_bounded_wait == 0) {
sewardj54cacf02002-04-12 23:24:59 +0000965 /* No runnable threads and no prospect of any appearing
966 even if we wait for an arbitrary length of time. In
967 short, we have a deadlock. */
sewardj15a43e12002-04-17 19:35:12 +0000968 VG_(pp_sched_status)();
sewardje663cb92002-04-12 10:26:32 +0000969 return VgSrc_Deadlock;
970 }
971
jsgf855d93d2003-10-13 22:26:55 +0000972 /* Nothing needs doing, so sit in idle until either a timeout
973 happens or a thread's syscall completes. */
974 idle();
sewardj7e87e382002-05-03 19:09:05 +0000975 /* pp_sched_status(); */
sewardjb48e5002002-05-13 00:16:03 +0000976 /* VG_(printf)("."); */
sewardje663cb92002-04-12 10:26:32 +0000977 }
978
979
980 /* ======================= Phase 2 of 3 =======================
981 Wahey! We've finally decided that thread tid is runnable, so
982 we now do that. Run it for as much of a quanta as possible.
983 Trivial requests are handled and the thread continues. The
984 aim is not to do too many of Phase 1 since it is expensive. */
985
986 if (0)
sewardj3b5d8862002-04-20 13:53:23 +0000987 VG_(printf)("SCHED: tid %d\n", tid);
sewardje663cb92002-04-12 10:26:32 +0000988
njn25e49d8e72002-09-23 09:36:25 +0000989 VG_TRACK( thread_run, tid );
990
sewardje663cb92002-04-12 10:26:32 +0000991 /* Figure out how many bbs to ask vg_run_innerloop to do. Note
992 that it decrements the counter before testing it for zero, so
993 that if VG_(dispatch_ctr) is set to N you get at most N-1
994 iterations. Also this means that VG_(dispatch_ctr) must
995 exceed zero before entering the innerloop. Also also, the
996 decrement is done before the bb is actually run, so you
997 always get at least one decrement even if nothing happens.
998 */
999 if (VG_(bbs_to_go) >= VG_SCHEDULING_QUANTUM)
1000 VG_(dispatch_ctr) = VG_SCHEDULING_QUANTUM + 1;
1001 else
1002 VG_(dispatch_ctr) = (UInt)VG_(bbs_to_go) + 1;
1003
1004 /* ... and remember what we asked for. */
1005 dispatch_ctr_SAVED = VG_(dispatch_ctr);
1006
sewardj1e8cdc92002-04-18 11:37:52 +00001007 /* paranoia ... */
sewardj018f7622002-05-15 21:13:39 +00001008 vg_assert(VG_(threads)[tid].tid == tid);
sewardj1e8cdc92002-04-18 11:37:52 +00001009
sewardje663cb92002-04-12 10:26:32 +00001010 /* Actually run thread tid. */
1011 while (True) {
1012
sewardj7e87e382002-05-03 19:09:05 +00001013 VG_(last_run_tid) = tid;
1014
sewardje663cb92002-04-12 10:26:32 +00001015 /* For stats purposes only. */
1016 VG_(num_scheduling_events_MINOR) ++;
1017
1018 if (0)
1019 VG_(message)(Vg_DebugMsg, "thread %d: running for %d bbs",
1020 tid, VG_(dispatch_ctr) - 1 );
sewardjb3eef6b2002-05-01 00:05:27 +00001021# if 0
1022 if (VG_(bbs_done) > 31700000 + 0) {
1023 dispatch_ctr_SAVED = VG_(dispatch_ctr) = 2;
sewardj018f7622002-05-15 21:13:39 +00001024 VG_(translate)(&VG_(threads)[tid], VG_(threads)[tid].m_eip,
sewardjb3eef6b2002-05-01 00:05:27 +00001025 NULL,NULL,NULL);
1026 }
sewardj018f7622002-05-15 21:13:39 +00001027 vg_assert(VG_(threads)[tid].m_eip != 0);
sewardjb3eef6b2002-05-01 00:05:27 +00001028# endif
sewardje663cb92002-04-12 10:26:32 +00001029
1030 trc = run_thread_for_a_while ( tid );
1031
sewardjb3eef6b2002-05-01 00:05:27 +00001032# if 0
sewardj018f7622002-05-15 21:13:39 +00001033 if (0 == VG_(threads)[tid].m_eip) {
sewardjb3eef6b2002-05-01 00:05:27 +00001034 VG_(printf)("tid = %d, dc = %llu\n", tid, VG_(bbs_done));
sewardj018f7622002-05-15 21:13:39 +00001035 vg_assert(0 != VG_(threads)[tid].m_eip);
sewardjb3eef6b2002-05-01 00:05:27 +00001036 }
1037# endif
1038
sewardje663cb92002-04-12 10:26:32 +00001039 /* Deal quickly with trivial scheduling events, and resume the
1040 thread. */
1041
1042 if (trc == VG_TRC_INNER_FASTMISS) {
1043 vg_assert(VG_(dispatch_ctr) > 0);
1044
1045 /* Trivial event. Miss in the fast-cache. Do a full
1046 lookup for it. */
1047 trans_addr
sewardj018f7622002-05-15 21:13:39 +00001048 = VG_(search_transtab) ( VG_(threads)[tid].m_eip );
sewardje663cb92002-04-12 10:26:32 +00001049 if (trans_addr == (Addr)0) {
1050 /* Not found; we need to request a translation. */
njn25e49d8e72002-09-23 09:36:25 +00001051 create_translation_for(
1052 tid, VG_(threads)[tid].m_eip );
sewardj018f7622002-05-15 21:13:39 +00001053 trans_addr = VG_(search_transtab) ( VG_(threads)[tid].m_eip );
sewardje663cb92002-04-12 10:26:32 +00001054 if (trans_addr == (Addr)0)
njne427a662002-10-02 11:08:25 +00001055 VG_(core_panic)("VG_TRC_INNER_FASTMISS: missing tt_fast entry");
sewardje663cb92002-04-12 10:26:32 +00001056 }
1057 continue; /* with this thread */
1058 }
1059
1060 if (trc == VG_TRC_EBP_JMP_CLIENTREQ) {
sewardj18a62ff2002-07-12 22:30:51 +00001061 UInt reqno = *(UInt*)(VG_(threads)[tid].m_eax);
1062 /* VG_(printf)("request 0x%x\n", reqno); */
sewardj1fe7b002002-07-16 01:43:15 +00001063
1064 /* Are we really absolutely totally quitting? */
1065 if (reqno == VG_USERREQ__LIBC_FREERES_DONE) {
1066 if (0 || VG_(clo_trace_syscalls) || VG_(clo_trace_sched)) {
1067 VG_(message)(Vg_DebugMsg,
1068 "__libc_freeres() done; really quitting!");
1069 }
1070 return VgSrc_ExitSyscall;
1071 }
1072
sewardj124ca2a2002-06-20 10:19:38 +00001073 do_client_request(tid);
1074 /* Following the request, we try and continue with the
1075 same thread if still runnable. If not, go back to
1076 Stage 1 to select a new thread to run. */
sewardj18a62ff2002-07-12 22:30:51 +00001077 if (VG_(threads)[tid].status == VgTs_Runnable
1078 && reqno != VG_USERREQ__PTHREAD_YIELD)
sewardj124ca2a2002-06-20 10:19:38 +00001079 continue; /* with this thread */
1080 else
1081 goto stage1;
sewardje663cb92002-04-12 10:26:32 +00001082 }
1083
sewardj51c0aaf2002-04-25 01:32:10 +00001084 if (trc == VG_TRC_EBP_JMP_SYSCALL) {
1085 /* Do a syscall for the vthread tid. This could cause it
sewardj7e87e382002-05-03 19:09:05 +00001086 to become non-runnable. One special case: spot the
1087 client doing calls to exit() and take this as the cue
1088 to exit. */
sewardjb3eef6b2002-05-01 00:05:27 +00001089# if 0
1090 { UInt* esp; Int i;
sewardj018f7622002-05-15 21:13:39 +00001091 esp=(UInt*)VG_(threads)[tid].m_esp;
sewardjb3eef6b2002-05-01 00:05:27 +00001092 VG_(printf)("\nBEFORE\n");
1093 for (i = 10; i >= -10; i--)
1094 VG_(printf)("%2d %p = 0x%x\n", i, &esp[i], esp[i]);
1095 }
1096# endif
1097
sewardj1fe7b002002-07-16 01:43:15 +00001098 /* Deal with calling __libc_freeres() at exit. When the
1099 client does __NR_exit, it's exiting for good. So we
1100 then run VG_(__libc_freeres_wrapper). That quits by
1101 doing VG_USERREQ__LIBC_FREERES_DONE, and at that point
1102 we really exit. To be safe we nuke all other threads
sewardjade9d0d2002-07-26 10:52:48 +00001103 currently running.
1104
1105 If not valgrinding (cachegrinding, etc) don't do this.
1106 __libc_freeres does some invalid frees which crash
1107 the unprotected malloc/free system. */
njn25e49d8e72002-09-23 09:36:25 +00001108
sewardjf3fb92d2003-02-23 03:26:08 +00001109 if (VG_(threads)[tid].m_eax == __NR_exit
sewardjf3fb92d2003-02-23 03:26:08 +00001110 || VG_(threads)[tid].m_eax == __NR_exit_group
sewardjf3fb92d2003-02-23 03:26:08 +00001111 ) {
sewardj858964b2002-10-05 14:15:43 +00001112
1113 /* If __NR_exit, remember the supplied argument. */
njn25e49d8e72002-09-23 09:36:25 +00001114 VG_(exitcode) = VG_(threads)[tid].m_ebx; /* syscall arg1 */
1115
nethercote7cc9c232004-01-21 15:08:04 +00001116 /* Only run __libc_freeres if the tool says it's ok and
sewardj858964b2002-10-05 14:15:43 +00001117 it hasn't been overridden with --run-libc-freeres=no
1118 on the command line. */
1119
fitzhardinge98abfc72003-12-16 02:05:15 +00001120 if (VG_(needs).libc_freeres &&
1121 VG_(clo_run_libc_freeres) &&
1122 VG_(__libc_freeres_wrapper) != 0) {
sewardj00631892002-10-05 15:34:38 +00001123 if (VG_(clo_verbosity) > 2
sewardj858964b2002-10-05 14:15:43 +00001124 || VG_(clo_trace_syscalls) || VG_(clo_trace_sched)) {
1125 VG_(message)(Vg_DebugMsg,
1126 "Caught __NR_exit; running __libc_freeres()");
1127 }
1128 VG_(nuke_all_threads_except) ( tid );
fitzhardinge98abfc72003-12-16 02:05:15 +00001129 VG_(threads)[tid].m_eip = (UInt)VG_(__libc_freeres_wrapper);
sewardj858964b2002-10-05 14:15:43 +00001130 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1131 goto stage1; /* party on, dudes (but not for much longer :) */
1132
1133 } else {
1134 /* We won't run __libc_freeres; just exit now. */
sewardj00631892002-10-05 15:34:38 +00001135 if (VG_(clo_verbosity) > 2
sewardj858964b2002-10-05 14:15:43 +00001136 || VG_(clo_trace_syscalls) || VG_(clo_trace_sched)) {
1137 VG_(message)(Vg_DebugMsg,
1138 "Caught __NR_exit; quitting");
1139 }
1140 return VgSrc_ExitSyscall;
1141 }
1142
sewardjade9d0d2002-07-26 10:52:48 +00001143 }
1144
sewardj858964b2002-10-05 14:15:43 +00001145 /* We've dealt with __NR_exit at this point. */
jsgf855d93d2003-10-13 22:26:55 +00001146 vg_assert(VG_(threads)[tid].m_eax != __NR_exit &&
1147 VG_(threads)[tid].m_eax != __NR_exit_group);
sewardj7e87e382002-05-03 19:09:05 +00001148
sewardj83798bf2002-05-24 00:11:16 +00001149 /* Trap syscalls to __NR_sched_yield and just have this
1150 thread yield instead. Not essential, just an
1151 optimisation. */
1152 if (VG_(threads)[tid].m_eax == __NR_sched_yield) {
njnd3040452003-05-19 15:04:06 +00001153 SET_SYSCALL_RETVAL(tid, 0); /* syscall returns with success */
sewardj83798bf2002-05-24 00:11:16 +00001154 goto stage1; /* find a new thread to run */
1155 }
1156
sewardj51c0aaf2002-04-25 01:32:10 +00001157 sched_do_syscall(tid);
sewardjb3eef6b2002-05-01 00:05:27 +00001158
1159# if 0
1160 { UInt* esp; Int i;
sewardj018f7622002-05-15 21:13:39 +00001161 esp=(UInt*)VG_(threads)[tid].m_esp;
sewardjb3eef6b2002-05-01 00:05:27 +00001162 VG_(printf)("AFTER\n");
1163 for (i = 10; i >= -10; i--)
1164 VG_(printf)("%2d %p = 0x%x\n", i, &esp[i], esp[i]);
1165 }
1166# endif
1167
sewardj77f0fc12002-07-12 01:23:03 +00001168 if (VG_(threads)[tid].status == VgTs_Runnable) {
sewardj51c0aaf2002-04-25 01:32:10 +00001169 continue; /* with this thread */
sewardj77f0fc12002-07-12 01:23:03 +00001170 } else {
1171 goto stage1;
1172 }
sewardj51c0aaf2002-04-25 01:32:10 +00001173 }
1174
sewardjd7fd4d22002-04-24 01:57:27 +00001175 /* It's an event we can't quickly deal with. Give up running
1176 this thread and handle things the expensive way. */
sewardje663cb92002-04-12 10:26:32 +00001177 break;
1178 }
1179
1180 /* ======================= Phase 3 of 3 =======================
1181 Handle non-trivial thread requests, mostly pthread stuff. */
1182
1183 /* Ok, we've fallen out of the dispatcher for a
1184 non-completely-trivial reason. First, update basic-block
1185 counters. */
1186
1187 done_this_time = (Int)dispatch_ctr_SAVED - (Int)VG_(dispatch_ctr) - 1;
1188 vg_assert(done_this_time >= 0);
1189 VG_(bbs_to_go) -= (ULong)done_this_time;
1190 VG_(bbs_done) += (ULong)done_this_time;
1191
1192 if (0 && trc != VG_TRC_INNER_FASTMISS)
1193 VG_(message)(Vg_DebugMsg, "thread %d: completed %d bbs, trc %d",
1194 tid, done_this_time, (Int)trc );
1195
1196 if (0 && trc != VG_TRC_INNER_FASTMISS)
njne0205ff2003-04-08 00:56:14 +00001197 VG_(message)(Vg_DebugMsg, "thread %d: %llu bbs, event %s",
sewardje663cb92002-04-12 10:26:32 +00001198 tid, VG_(bbs_done),
1199 name_of_sched_event(trc) );
sewardj9d1b5d32002-04-17 19:40:49 +00001200
sewardje663cb92002-04-12 10:26:32 +00001201 /* Examine the thread's return code to figure out why it
sewardj124ca2a2002-06-20 10:19:38 +00001202 stopped. */
sewardje663cb92002-04-12 10:26:32 +00001203
1204 switch (trc) {
1205
fitzhardingea02f8812003-12-18 09:06:09 +00001206 case VG_TRC_EBP_JMP_YIELD:
1207 /* Explicit yield. Let a new thread be scheduled,
1208 simply by doing nothing, causing us to arrive back at
1209 Phase 1. */
1210 if (VG_(bbs_to_go) == 0) {
1211 goto debug_stop;
1212 }
1213 break;
1214
sewardje663cb92002-04-12 10:26:32 +00001215 case VG_TRC_INNER_COUNTERZERO:
1216 /* Timeslice is out. Let a new thread be scheduled,
1217 simply by doing nothing, causing us to arrive back at
1218 Phase 1. */
1219 if (VG_(bbs_to_go) == 0) {
1220 goto debug_stop;
1221 }
1222 vg_assert(VG_(dispatch_ctr) == 0);
1223 break;
1224
1225 case VG_TRC_UNRESUMABLE_SIGNAL:
jsgf855d93d2003-10-13 22:26:55 +00001226 /* It got a SIGSEGV/SIGBUS/SIGILL/SIGFPE, which we need to
1227 deliver right away. */
1228 vg_assert(VG_(unresumable_siginfo).si_signo == VKI_SIGSEGV ||
1229 VG_(unresumable_siginfo).si_signo == VKI_SIGBUS ||
1230 VG_(unresumable_siginfo).si_signo == VKI_SIGILL ||
1231 VG_(unresumable_siginfo).si_signo == VKI_SIGFPE);
1232 vg_assert(VG_(longjmpd_on_signal) == VG_(unresumable_siginfo).si_signo);
1233
1234 /* make sure we've unblocked the signals which the handler blocked */
1235 VG_(unblock_host_signal)(VG_(longjmpd_on_signal));
1236
1237 VG_(deliver_signal)(tid, &VG_(unresumable_siginfo), False);
1238 VG_(unresumable_siginfo).si_signo = 0; /* done */
sewardje663cb92002-04-12 10:26:32 +00001239 break;
1240
sewardje663cb92002-04-12 10:26:32 +00001241 default:
1242 VG_(printf)("\ntrc = %d\n", trc);
njne427a662002-10-02 11:08:25 +00001243 VG_(core_panic)("VG_(scheduler), phase 3: "
1244 "unexpected thread return code");
sewardje663cb92002-04-12 10:26:32 +00001245 /* NOTREACHED */
1246 break;
1247
1248 } /* switch (trc) */
1249
1250 /* That completes Phase 3 of 3. Return now to the top of the
1251 main scheduler loop, to Phase 1 of 3. */
1252
1253 } /* top-level scheduler loop */
1254
1255
1256 /* NOTREACHED */
njne427a662002-10-02 11:08:25 +00001257 VG_(core_panic)("scheduler: post-main-loop ?!");
sewardje663cb92002-04-12 10:26:32 +00001258 /* NOTREACHED */
1259
1260 debug_stop:
1261 /* If we exited because of a debug stop, print the translation
1262 of the last block executed -- by translating it again, and
1263 throwing away the result. */
1264 VG_(printf)(
1265 "======vvvvvvvv====== LAST TRANSLATION ======vvvvvvvv======\n");
njn72718642003-07-24 08:45:32 +00001266 VG_(translate)( tid,
sewardj22854b92002-11-30 14:00:47 +00001267 VG_(threads)[tid].m_eip, NULL, NULL, NULL, NULL );
sewardje663cb92002-04-12 10:26:32 +00001268 VG_(printf)("\n");
1269 VG_(printf)(
1270 "======^^^^^^^^====== LAST TRANSLATION ======^^^^^^^^======\n");
1271
1272 return VgSrc_BbsDone;
1273}
1274
jsgf855d93d2003-10-13 22:26:55 +00001275void VG_(need_resched) ( ThreadId prefer )
1276{
1277 /* Tell the scheduler now might be a good time to find a new
1278 runnable thread, because something happened which woke a thread
1279 up.
1280
1281 NB: This can be called unsynchronized from either a signal
1282 handler, or from another LWP (ie, real kernel thread).
1283
1284 In principle this could simply be a matter of setting
1285 VG_(dispatch_ctr) to a small value (say, 2), which would make
1286 any running code come back to the scheduler fairly quickly.
1287
1288 However, since the scheduler implements a strict round-robin
1289 policy with only one priority level, there are, by definition,
1290 no better threads to be running than the current thread anyway,
1291 so we may as well ignore this hint. For processes with a
1292 mixture of compute and I/O bound threads, this means the compute
1293 threads could introduce longish latencies before the I/O threads
1294 run. For programs with only I/O bound threads, need_resched
1295 won't have any effect anyway.
1296
1297 OK, so I've added command-line switches to enable low-latency
1298 syscalls and signals. The prefer_sched variable is in effect
1299 the ID of a single thread which has higher priority than all the
1300 others. If set, the scheduler will prefer to schedule that
1301 thread over all others. Naturally, this could lead to
1302 starvation or other unfairness.
1303 */
1304
1305 if (VG_(dispatch_ctr) > 10)
1306 VG_(dispatch_ctr) = 2;
1307 prefer_sched = prefer;
1308}
1309
sewardje663cb92002-04-12 10:26:32 +00001310
1311/* ---------------------------------------------------------------------
1312 The pthread implementation.
1313 ------------------------------------------------------------------ */
1314
1315#include <pthread.h>
1316#include <errno.h>
1317
sewardjbf290b92002-05-01 02:28:01 +00001318#define VG_PTHREAD_STACK_MIN \
sewardjc3bd5f52002-05-01 03:24:23 +00001319 (VG_PTHREAD_STACK_SIZE - VG_AR_CLIENT_STACKBASE_REDZONE_SZB)
sewardje663cb92002-04-12 10:26:32 +00001320
1321/* /usr/include/bits/pthreadtypes.h:
1322 typedef unsigned long int pthread_t;
1323*/
1324
sewardje663cb92002-04-12 10:26:32 +00001325
sewardj604ec3c2002-04-18 22:38:41 +00001326/* -----------------------------------------------------------
sewardj20917d82002-05-28 01:36:45 +00001327 Thread CREATION, JOINAGE and CANCELLATION: HELPER FNS
sewardj604ec3c2002-04-18 22:38:41 +00001328 -------------------------------------------------------- */
1329
sewardj20917d82002-05-28 01:36:45 +00001330/* We've decided to action a cancellation on tid. Make it jump to
1331 thread_exit_wrapper() in vg_libpthread.c, passing PTHREAD_CANCELED
1332 as the arg. */
1333static
1334void make_thread_jump_to_cancelhdlr ( ThreadId tid )
1335{
1336 Char msg_buf[100];
1337 vg_assert(VG_(is_valid_tid)(tid));
sewardjdadc8d02002-12-08 23:24:18 +00001338
sewardj20917d82002-05-28 01:36:45 +00001339 /* Push PTHREAD_CANCELED on the stack and jump to the cancellation
1340 handler -- which is really thread_exit_wrapper() in
1341 vg_libpthread.c. */
1342 vg_assert(VG_(threads)[tid].cancel_pend != NULL);
sewardj4bdd9962002-12-26 11:51:50 +00001343
1344 /* Push a suitable arg, and mark it as readable. */
njnd3040452003-05-19 15:04:06 +00001345 SET_PTHREQ_ESP(tid, VG_(threads)[tid].m_esp - 4);
sewardj20917d82002-05-28 01:36:45 +00001346 * (UInt*)(VG_(threads)[tid].m_esp) = (UInt)PTHREAD_CANCELED;
sewardj4bdd9962002-12-26 11:51:50 +00001347 VG_TRACK( post_mem_write, VG_(threads)[tid].m_esp, sizeof(void*) );
1348
1349 /* Push a bogus return address. It will not return, but we still
1350 need to have it so that the arg is at the correct stack offset.
1351 Don't mark as readable; any attempt to read this is and internal
1352 valgrind bug since thread_exit_wrapper should not return. */
njnd3040452003-05-19 15:04:06 +00001353 SET_PTHREQ_ESP(tid, VG_(threads)[tid].m_esp - 4);
sewardj4bdd9962002-12-26 11:51:50 +00001354 * (UInt*)(VG_(threads)[tid].m_esp) = 0xBEADDEEF;
1355
1356 /* .cancel_pend will hold &thread_exit_wrapper */
sewardj20917d82002-05-28 01:36:45 +00001357 VG_(threads)[tid].m_eip = (UInt)VG_(threads)[tid].cancel_pend;
sewardjdadc8d02002-12-08 23:24:18 +00001358
jsgf855d93d2003-10-13 22:26:55 +00001359 VG_(proxy_abort_syscall)(tid);
sewardjdadc8d02002-12-08 23:24:18 +00001360
sewardj20917d82002-05-28 01:36:45 +00001361 VG_(threads)[tid].status = VgTs_Runnable;
sewardjdadc8d02002-12-08 23:24:18 +00001362
sewardj20917d82002-05-28 01:36:45 +00001363 /* Make sure we aren't cancelled again whilst handling this
1364 cancellation. */
1365 VG_(threads)[tid].cancel_st = False;
1366 if (VG_(clo_trace_sched)) {
1367 VG_(sprintf)(msg_buf,
1368 "jump to cancellation handler (hdlr = %p)",
1369 VG_(threads)[tid].cancel_pend);
1370 print_sched_event(tid, msg_buf);
1371 }
1372}
1373
1374
1375
sewardjb48e5002002-05-13 00:16:03 +00001376/* Release resources and generally clean up once a thread has finally
1377 disappeared. */
1378static
jsgf855d93d2003-10-13 22:26:55 +00001379void cleanup_after_thread_exited ( ThreadId tid, Bool forcekill )
sewardjb48e5002002-05-13 00:16:03 +00001380{
sewardj018f7622002-05-15 21:13:39 +00001381 vg_assert(VG_(is_valid_or_empty_tid)(tid));
1382 vg_assert(VG_(threads)[tid].status == VgTs_Empty);
njn25e49d8e72002-09-23 09:36:25 +00001383 /* Its stack is now off-limits */
1384 VG_TRACK( die_mem_stack, VG_(threads)[tid].stack_base,
1385 VG_(threads)[tid].stack_size );
1386
sewardj92a59562002-09-30 00:53:10 +00001387 /* Deallocate its LDT, if it ever had one. */
1388 VG_(deallocate_LDT_for_thread)( VG_(threads)[tid].ldt );
1389 VG_(threads)[tid].ldt = NULL;
jsgf855d93d2003-10-13 22:26:55 +00001390
fitzhardinge47735af2004-01-21 01:27:27 +00001391 /* Clear its TLS array. */
1392 VG_(clear_TLS_for_thread)( VG_(threads)[tid].tls );
1393
jsgf855d93d2003-10-13 22:26:55 +00001394 /* Not interested in the timeout anymore */
1395 VG_(threads)[tid].awaken_at = 0xFFFFFFFF;
1396
1397 /* Delete proxy LWP */
1398 VG_(proxy_delete)(tid, forcekill);
sewardjb48e5002002-05-13 00:16:03 +00001399}
1400
1401
sewardj20917d82002-05-28 01:36:45 +00001402/* Look for matching pairs of threads waiting for joiners and threads
1403 waiting for joinees. For each such pair copy the return value of
1404 the joinee into the joiner, let the joiner resume and discard the
1405 joinee. */
1406static
1407void maybe_rendezvous_joiners_and_joinees ( void )
1408{
1409 Char msg_buf[100];
1410 void** thread_return;
1411 ThreadId jnr, jee;
1412
1413 for (jnr = 1; jnr < VG_N_THREADS; jnr++) {
1414 if (VG_(threads)[jnr].status != VgTs_WaitJoinee)
1415 continue;
1416 jee = VG_(threads)[jnr].joiner_jee_tid;
1417 if (jee == VG_INVALID_THREADID)
1418 continue;
1419 vg_assert(VG_(is_valid_tid)(jee));
jsgf855d93d2003-10-13 22:26:55 +00001420 if (VG_(threads)[jee].status != VgTs_WaitJoiner) {
1421 /* if joinee has become detached, then make join fail with
1422 EINVAL */
1423 if (VG_(threads)[jee].detached) {
1424 VG_(threads)[jnr].status = VgTs_Runnable;
1425 VG_(threads)[jnr].joiner_jee_tid = VG_INVALID_THREADID;
1426 SET_PTHREQ_RETVAL(jnr, VKI_EINVAL);
1427 }
sewardj20917d82002-05-28 01:36:45 +00001428 continue;
jsgf855d93d2003-10-13 22:26:55 +00001429 }
sewardj20917d82002-05-28 01:36:45 +00001430 /* ok! jnr is waiting to join with jee, and jee is waiting to be
1431 joined by ... well, any thread. So let's do it! */
1432
1433 /* Copy return value to where joiner wants it. */
1434 thread_return = VG_(threads)[jnr].joiner_thread_return;
1435 if (thread_return != NULL) {
1436 /* CHECK thread_return writable */
njn72718642003-07-24 08:45:32 +00001437 VG_TRACK( pre_mem_write, Vg_CorePThread, jnr,
njn25e49d8e72002-09-23 09:36:25 +00001438 "pthread_join: thread_return",
1439 (Addr)thread_return, sizeof(void*));
sewardj5a3798b2002-06-04 23:24:22 +00001440
sewardj20917d82002-05-28 01:36:45 +00001441 *thread_return = VG_(threads)[jee].joinee_retval;
1442 /* Not really right, since it makes the thread's return value
1443 appear to be defined even if it isn't. */
njn25e49d8e72002-09-23 09:36:25 +00001444 VG_TRACK( post_mem_write, (Addr)thread_return, sizeof(void*) );
sewardj20917d82002-05-28 01:36:45 +00001445 }
1446
1447 /* Joinee is discarded */
1448 VG_(threads)[jee].status = VgTs_Empty; /* bye! */
jsgf855d93d2003-10-13 22:26:55 +00001449 cleanup_after_thread_exited ( jee, False );
sewardjc4a810d2002-11-13 22:25:51 +00001450 if (VG_(clo_trace_sched)) {
1451 VG_(sprintf)(msg_buf,
1452 "rendezvous with joinee %d. %d resumes, %d exits.",
1453 jee, jnr, jee );
sewardj20917d82002-05-28 01:36:45 +00001454 print_sched_event(jnr, msg_buf);
1455 }
sewardjc4a810d2002-11-13 22:25:51 +00001456
1457 VG_TRACK( post_thread_join, jnr, jee );
sewardj20917d82002-05-28 01:36:45 +00001458
1459 /* joiner returns with success */
1460 VG_(threads)[jnr].status = VgTs_Runnable;
njnd3040452003-05-19 15:04:06 +00001461 SET_PTHREQ_RETVAL(jnr, 0);
sewardj20917d82002-05-28 01:36:45 +00001462 }
1463}
1464
1465
sewardjccef2e62002-05-29 19:26:32 +00001466/* Nuke all threads other than tid. POSIX specifies that this should
1467 happen in __NR_exec, and after a __NR_fork() when I am the child,
jsgf855d93d2003-10-13 22:26:55 +00001468 as POSIX requires. Also used at process exit time with
1469 me==VG_INVALID_THREADID */
sewardjccef2e62002-05-29 19:26:32 +00001470void VG_(nuke_all_threads_except) ( ThreadId me )
1471{
1472 ThreadId tid;
1473 for (tid = 1; tid < VG_N_THREADS; tid++) {
1474 if (tid == me
jsgf855d93d2003-10-13 22:26:55 +00001475 || VG_(threads)[tid].status == VgTs_Empty)
sewardjccef2e62002-05-29 19:26:32 +00001476 continue;
sewardjef037c72002-05-30 00:40:03 +00001477 if (0)
1478 VG_(printf)(
1479 "VG_(nuke_all_threads_except): nuking tid %d\n", tid);
jsgf855d93d2003-10-13 22:26:55 +00001480 VG_(proxy_delete)(tid, True);
sewardjccef2e62002-05-29 19:26:32 +00001481 VG_(threads)[tid].status = VgTs_Empty;
jsgf855d93d2003-10-13 22:26:55 +00001482 cleanup_after_thread_exited( tid, True );
sewardjccef2e62002-05-29 19:26:32 +00001483 }
1484}
1485
1486
sewardj20917d82002-05-28 01:36:45 +00001487/* -----------------------------------------------------------
1488 Thread CREATION, JOINAGE and CANCELLATION: REQUESTS
1489 -------------------------------------------------------- */
1490
sewardje663cb92002-04-12 10:26:32 +00001491static
sewardj8ad94e12002-05-29 00:10:20 +00001492void do__cleanup_push ( ThreadId tid, CleanupEntry* cu )
1493{
1494 Int sp;
1495 Char msg_buf[100];
1496 vg_assert(VG_(is_valid_tid)(tid));
1497 sp = VG_(threads)[tid].custack_used;
1498 if (VG_(clo_trace_sched)) {
1499 VG_(sprintf)(msg_buf,
1500 "cleanup_push (fn %p, arg %p) -> slot %d",
1501 cu->fn, cu->arg, sp);
1502 print_sched_event(tid, msg_buf);
1503 }
1504 vg_assert(sp >= 0 && sp <= VG_N_CLEANUPSTACK);
1505 if (sp == VG_N_CLEANUPSTACK)
njne427a662002-10-02 11:08:25 +00001506 VG_(core_panic)("do__cleanup_push: VG_N_CLEANUPSTACK is too small."
sewardj8ad94e12002-05-29 00:10:20 +00001507 " Increase and recompile.");
1508 VG_(threads)[tid].custack[sp] = *cu;
1509 sp++;
1510 VG_(threads)[tid].custack_used = sp;
njnd3040452003-05-19 15:04:06 +00001511 SET_PTHREQ_RETVAL(tid, 0);
sewardj8ad94e12002-05-29 00:10:20 +00001512}
1513
1514
1515static
1516void do__cleanup_pop ( ThreadId tid, CleanupEntry* cu )
1517{
1518 Int sp;
1519 Char msg_buf[100];
1520 vg_assert(VG_(is_valid_tid)(tid));
1521 sp = VG_(threads)[tid].custack_used;
1522 if (VG_(clo_trace_sched)) {
njn36650922002-10-04 09:18:09 +00001523 VG_(sprintf)(msg_buf, "cleanup_pop from slot %d", sp-1);
sewardj8ad94e12002-05-29 00:10:20 +00001524 print_sched_event(tid, msg_buf);
1525 }
1526 vg_assert(sp >= 0 && sp <= VG_N_CLEANUPSTACK);
1527 if (sp == 0) {
njnd3040452003-05-19 15:04:06 +00001528 SET_PTHREQ_RETVAL(tid, -1);
sewardj8ad94e12002-05-29 00:10:20 +00001529 return;
1530 }
1531 sp--;
njn72718642003-07-24 08:45:32 +00001532 VG_TRACK( pre_mem_write, Vg_CorePThread, tid,
njn36650922002-10-04 09:18:09 +00001533 "cleanup pop", (Addr)cu, sizeof(CleanupEntry) );
sewardj8ad94e12002-05-29 00:10:20 +00001534 *cu = VG_(threads)[tid].custack[sp];
njn25e49d8e72002-09-23 09:36:25 +00001535 VG_TRACK( post_mem_write, (Addr)cu, sizeof(CleanupEntry) );
sewardj8ad94e12002-05-29 00:10:20 +00001536 VG_(threads)[tid].custack_used = sp;
njnd3040452003-05-19 15:04:06 +00001537 SET_PTHREQ_RETVAL(tid, 0);
sewardj8ad94e12002-05-29 00:10:20 +00001538}
1539
1540
1541static
sewardjff42d1d2002-05-22 13:17:31 +00001542void do_pthread_yield ( ThreadId tid )
1543{
1544 Char msg_buf[100];
1545 vg_assert(VG_(is_valid_tid)(tid));
sewardjff42d1d2002-05-22 13:17:31 +00001546 if (VG_(clo_trace_sched)) {
1547 VG_(sprintf)(msg_buf, "yield");
1548 print_sched_event(tid, msg_buf);
1549 }
njnd3040452003-05-19 15:04:06 +00001550 SET_PTHREQ_RETVAL(tid, 0);
sewardjff42d1d2002-05-22 13:17:31 +00001551}
1552
1553
1554static
sewardj20917d82002-05-28 01:36:45 +00001555void do__testcancel ( ThreadId tid )
sewardje663cb92002-04-12 10:26:32 +00001556{
sewardj7989d0c2002-05-28 11:00:01 +00001557 Char msg_buf[100];
sewardjb48e5002002-05-13 00:16:03 +00001558 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001559 if (VG_(clo_trace_sched)) {
1560 VG_(sprintf)(msg_buf, "testcancel");
1561 print_sched_event(tid, msg_buf);
1562 }
sewardj20917d82002-05-28 01:36:45 +00001563 if (/* is there a cancellation pending on this thread? */
1564 VG_(threads)[tid].cancel_pend != NULL
1565 && /* is this thread accepting cancellations? */
1566 VG_(threads)[tid].cancel_st) {
1567 /* Ok, let's do the cancellation. */
1568 make_thread_jump_to_cancelhdlr ( tid );
sewardje663cb92002-04-12 10:26:32 +00001569 } else {
sewardj20917d82002-05-28 01:36:45 +00001570 /* No, we keep going. */
njnd3040452003-05-19 15:04:06 +00001571 SET_PTHREQ_RETVAL(tid, 0);
sewardje663cb92002-04-12 10:26:32 +00001572 }
sewardje663cb92002-04-12 10:26:32 +00001573}
1574
1575
1576static
sewardj20917d82002-05-28 01:36:45 +00001577void do__set_cancelstate ( ThreadId tid, Int state )
1578{
1579 Bool old_st;
sewardj7989d0c2002-05-28 11:00:01 +00001580 Char msg_buf[100];
sewardj20917d82002-05-28 01:36:45 +00001581 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001582 if (VG_(clo_trace_sched)) {
1583 VG_(sprintf)(msg_buf, "set_cancelstate to %d (%s)", state,
1584 state==PTHREAD_CANCEL_ENABLE
1585 ? "ENABLE"
1586 : (state==PTHREAD_CANCEL_DISABLE ? "DISABLE" : "???"));
1587 print_sched_event(tid, msg_buf);
1588 }
sewardj20917d82002-05-28 01:36:45 +00001589 old_st = VG_(threads)[tid].cancel_st;
1590 if (state == PTHREAD_CANCEL_ENABLE) {
1591 VG_(threads)[tid].cancel_st = True;
1592 } else
1593 if (state == PTHREAD_CANCEL_DISABLE) {
1594 VG_(threads)[tid].cancel_st = False;
1595 } else {
njne427a662002-10-02 11:08:25 +00001596 VG_(core_panic)("do__set_cancelstate");
sewardj20917d82002-05-28 01:36:45 +00001597 }
njnd3040452003-05-19 15:04:06 +00001598 SET_PTHREQ_RETVAL(tid, old_st ? PTHREAD_CANCEL_ENABLE
1599 : PTHREAD_CANCEL_DISABLE);
sewardj20917d82002-05-28 01:36:45 +00001600}
1601
1602
1603static
1604void do__set_canceltype ( ThreadId tid, Int type )
1605{
1606 Bool old_ty;
sewardj7989d0c2002-05-28 11:00:01 +00001607 Char msg_buf[100];
sewardj20917d82002-05-28 01:36:45 +00001608 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001609 if (VG_(clo_trace_sched)) {
1610 VG_(sprintf)(msg_buf, "set_canceltype to %d (%s)", type,
1611 type==PTHREAD_CANCEL_ASYNCHRONOUS
1612 ? "ASYNCHRONOUS"
1613 : (type==PTHREAD_CANCEL_DEFERRED ? "DEFERRED" : "???"));
1614 print_sched_event(tid, msg_buf);
1615 }
sewardj20917d82002-05-28 01:36:45 +00001616 old_ty = VG_(threads)[tid].cancel_ty;
1617 if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
1618 VG_(threads)[tid].cancel_ty = False;
1619 } else
1620 if (type == PTHREAD_CANCEL_DEFERRED) {
sewardjaf00b6d2002-05-29 23:30:28 +00001621 VG_(threads)[tid].cancel_ty = True;
sewardj20917d82002-05-28 01:36:45 +00001622 } else {
njne427a662002-10-02 11:08:25 +00001623 VG_(core_panic)("do__set_canceltype");
sewardj20917d82002-05-28 01:36:45 +00001624 }
njnd3040452003-05-19 15:04:06 +00001625 SET_PTHREQ_RETVAL(tid, old_ty ? PTHREAD_CANCEL_DEFERRED
sewardj20917d82002-05-28 01:36:45 +00001626 : PTHREAD_CANCEL_ASYNCHRONOUS);
1627}
1628
1629
sewardj7989d0c2002-05-28 11:00:01 +00001630/* Set or get the detach state for thread det. */
sewardj20917d82002-05-28 01:36:45 +00001631static
sewardj7989d0c2002-05-28 11:00:01 +00001632void do__set_or_get_detach ( ThreadId tid,
1633 Int what, ThreadId det )
sewardj20917d82002-05-28 01:36:45 +00001634{
sewardj7989d0c2002-05-28 11:00:01 +00001635 Char msg_buf[100];
1636 /* VG_(printf)("do__set_or_get_detach tid %d what %d det %d\n",
1637 tid, what, det); */
sewardj20917d82002-05-28 01:36:45 +00001638 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001639 if (VG_(clo_trace_sched)) {
1640 VG_(sprintf)(msg_buf, "set_or_get_detach %d (%s) for tid %d", what,
1641 what==0 ? "not-detached" : (
1642 what==1 ? "detached" : (
1643 what==2 ? "fetch old value" : "???")),
1644 det );
1645 print_sched_event(tid, msg_buf);
1646 }
1647
1648 if (!VG_(is_valid_tid)(det)) {
njnd3040452003-05-19 15:04:06 +00001649 SET_PTHREQ_RETVAL(tid, -1);
sewardj7989d0c2002-05-28 11:00:01 +00001650 return;
1651 }
1652
sewardj20917d82002-05-28 01:36:45 +00001653 switch (what) {
1654 case 2: /* get */
njnd3040452003-05-19 15:04:06 +00001655 SET_PTHREQ_RETVAL(tid, VG_(threads)[det].detached ? 1 : 0);
sewardj20917d82002-05-28 01:36:45 +00001656 return;
jsgf855d93d2003-10-13 22:26:55 +00001657 case 1:
sewardj7989d0c2002-05-28 11:00:01 +00001658 VG_(threads)[det].detached = True;
njnd3040452003-05-19 15:04:06 +00001659 SET_PTHREQ_RETVAL(tid, 0);
jsgf855d93d2003-10-13 22:26:55 +00001660 /* wake anyone who was joining on us */
1661 maybe_rendezvous_joiners_and_joinees();
sewardj20917d82002-05-28 01:36:45 +00001662 return;
1663 case 0: /* set not detached */
sewardj7989d0c2002-05-28 11:00:01 +00001664 VG_(threads)[det].detached = False;
njnd3040452003-05-19 15:04:06 +00001665 SET_PTHREQ_RETVAL(tid, 0);
sewardj20917d82002-05-28 01:36:45 +00001666 return;
1667 default:
njne427a662002-10-02 11:08:25 +00001668 VG_(core_panic)("do__set_or_get_detach");
sewardj20917d82002-05-28 01:36:45 +00001669 }
1670}
1671
1672
1673static
1674void do__set_cancelpend ( ThreadId tid,
1675 ThreadId cee,
1676 void (*cancelpend_hdlr)(void*) )
sewardje663cb92002-04-12 10:26:32 +00001677{
1678 Char msg_buf[100];
1679
sewardj20917d82002-05-28 01:36:45 +00001680 vg_assert(VG_(is_valid_tid)(tid));
1681 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1682
sewardj7989d0c2002-05-28 11:00:01 +00001683 if (!VG_(is_valid_tid)(cee)) {
1684 if (VG_(clo_trace_sched)) {
1685 VG_(sprintf)(msg_buf,
1686 "set_cancelpend for invalid tid %d", cee);
1687 print_sched_event(tid, msg_buf);
1688 }
njn25e49d8e72002-09-23 09:36:25 +00001689 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00001690 "pthread_cancel: target thread does not exist, or invalid");
jsgf855d93d2003-10-13 22:26:55 +00001691 SET_PTHREQ_RETVAL(tid, VKI_ESRCH);
sewardj7989d0c2002-05-28 11:00:01 +00001692 return;
1693 }
sewardj20917d82002-05-28 01:36:45 +00001694
1695 VG_(threads)[cee].cancel_pend = cancelpend_hdlr;
1696
jsgf855d93d2003-10-13 22:26:55 +00001697 /* interrupt a pending syscall */
1698 VG_(proxy_abort_syscall)(cee);
1699
sewardj20917d82002-05-28 01:36:45 +00001700 if (VG_(clo_trace_sched)) {
1701 VG_(sprintf)(msg_buf,
sewardj7989d0c2002-05-28 11:00:01 +00001702 "set_cancelpend (hdlr = %p, set by tid %d)",
sewardj20917d82002-05-28 01:36:45 +00001703 cancelpend_hdlr, tid);
1704 print_sched_event(cee, msg_buf);
1705 }
1706
1707 /* Thread doing the cancelling returns with success. */
njnd3040452003-05-19 15:04:06 +00001708 SET_PTHREQ_RETVAL(tid, 0);
sewardj20917d82002-05-28 01:36:45 +00001709
1710 /* Perhaps we can nuke the cancellee right now? */
jsgf855d93d2003-10-13 22:26:55 +00001711 if (!VG_(threads)[cee].cancel_ty) /* if PTHREAD_CANCEL_ASYNCHRONOUS */
1712 do__testcancel(cee);
sewardj20917d82002-05-28 01:36:45 +00001713}
1714
1715
1716static
1717void do_pthread_join ( ThreadId tid,
1718 ThreadId jee, void** thread_return )
1719{
1720 Char msg_buf[100];
1721 ThreadId i;
sewardje663cb92002-04-12 10:26:32 +00001722 /* jee, the joinee, is the thread specified as an arg in thread
1723 tid's call to pthread_join. So tid is the join-er. */
sewardjb48e5002002-05-13 00:16:03 +00001724 vg_assert(VG_(is_valid_tid)(tid));
sewardj018f7622002-05-15 21:13:39 +00001725 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +00001726
1727 if (jee == tid) {
njn25e49d8e72002-09-23 09:36:25 +00001728 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00001729 "pthread_join: attempt to join to self");
njnd3040452003-05-19 15:04:06 +00001730 SET_PTHREQ_RETVAL(tid, EDEADLK); /* libc constant, not a kernel one */
jsgf855d93d2003-10-13 22:26:55 +00001731 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +00001732 return;
1733 }
1734
sewardj20917d82002-05-28 01:36:45 +00001735 /* Flush any completed pairs, so as to make sure what we're looking
1736 at is up-to-date. */
1737 maybe_rendezvous_joiners_and_joinees();
1738
1739 /* Is this a sane request? */
jsgf855d93d2003-10-13 22:26:55 +00001740 if ( ! VG_(is_valid_tid)(jee) ||
1741 VG_(threads)[jee].detached) {
sewardje663cb92002-04-12 10:26:32 +00001742 /* Invalid thread to join to. */
njn25e49d8e72002-09-23 09:36:25 +00001743 VG_(record_pthread_error)( tid,
jsgf855d93d2003-10-13 22:26:55 +00001744 "pthread_join: target thread does not exist, invalid, or detached");
1745 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardje663cb92002-04-12 10:26:32 +00001746 return;
1747 }
1748
sewardj20917d82002-05-28 01:36:45 +00001749 /* Is anyone else already in a join-wait for jee? */
1750 for (i = 1; i < VG_N_THREADS; i++) {
1751 if (i == tid) continue;
1752 if (VG_(threads)[i].status == VgTs_WaitJoinee
1753 && VG_(threads)[i].joiner_jee_tid == jee) {
1754 /* Someone already did join on this thread */
njn25e49d8e72002-09-23 09:36:25 +00001755 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00001756 "pthread_join: another thread already "
1757 "in join-wait for target thread");
jsgf855d93d2003-10-13 22:26:55 +00001758 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
1759 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardj20917d82002-05-28 01:36:45 +00001760 return;
1761 }
sewardje663cb92002-04-12 10:26:32 +00001762 }
1763
sewardj20917d82002-05-28 01:36:45 +00001764 /* Mark this thread as waiting for the joinee. */
sewardj018f7622002-05-15 21:13:39 +00001765 VG_(threads)[tid].status = VgTs_WaitJoinee;
sewardj20917d82002-05-28 01:36:45 +00001766 VG_(threads)[tid].joiner_thread_return = thread_return;
1767 VG_(threads)[tid].joiner_jee_tid = jee;
1768
1769 /* Look for matching joiners and joinees and do the right thing. */
1770 maybe_rendezvous_joiners_and_joinees();
1771
1772 /* Return value is irrelevant since this this thread becomes
1773 non-runnable. maybe_resume_joiner() will cause it to return the
1774 right value when it resumes. */
1775
sewardj8937c812002-04-12 20:12:20 +00001776 if (VG_(clo_trace_sched)) {
sewardj20917d82002-05-28 01:36:45 +00001777 VG_(sprintf)(msg_buf,
1778 "wait for joinee %d (may already be ready)", jee);
sewardje663cb92002-04-12 10:26:32 +00001779 print_sched_event(tid, msg_buf);
1780 }
sewardje663cb92002-04-12 10:26:32 +00001781}
1782
1783
sewardj20917d82002-05-28 01:36:45 +00001784/* ( void* ): calling thread waits for joiner and returns the void* to
1785 it. This is one of two ways in which a thread can finally exit --
1786 the other is do__quit. */
sewardje663cb92002-04-12 10:26:32 +00001787static
sewardj20917d82002-05-28 01:36:45 +00001788void do__wait_joiner ( ThreadId tid, void* retval )
sewardje663cb92002-04-12 10:26:32 +00001789{
sewardj20917d82002-05-28 01:36:45 +00001790 Char msg_buf[100];
1791 vg_assert(VG_(is_valid_tid)(tid));
1792 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1793 if (VG_(clo_trace_sched)) {
1794 VG_(sprintf)(msg_buf,
sewardj7989d0c2002-05-28 11:00:01 +00001795 "do__wait_joiner(retval = %p) (non-detached thread exit)", retval);
sewardj20917d82002-05-28 01:36:45 +00001796 print_sched_event(tid, msg_buf);
1797 }
1798 VG_(threads)[tid].status = VgTs_WaitJoiner;
1799 VG_(threads)[tid].joinee_retval = retval;
1800 maybe_rendezvous_joiners_and_joinees();
1801}
1802
1803
1804/* ( no-args ): calling thread disappears from the system forever.
1805 Reclaim resources. */
1806static
1807void do__quit ( ThreadId tid )
1808{
1809 Char msg_buf[100];
1810 vg_assert(VG_(is_valid_tid)(tid));
1811 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1812 VG_(threads)[tid].status = VgTs_Empty; /* bye! */
jsgf855d93d2003-10-13 22:26:55 +00001813 cleanup_after_thread_exited ( tid, False );
sewardj20917d82002-05-28 01:36:45 +00001814 if (VG_(clo_trace_sched)) {
sewardj7989d0c2002-05-28 11:00:01 +00001815 VG_(sprintf)(msg_buf, "do__quit (detached thread exit)");
sewardj20917d82002-05-28 01:36:45 +00001816 print_sched_event(tid, msg_buf);
1817 }
jsgf855d93d2003-10-13 22:26:55 +00001818 maybe_rendezvous_joiners_and_joinees();
sewardj20917d82002-05-28 01:36:45 +00001819 /* Return value is irrelevant; this thread will not get
1820 rescheduled. */
1821}
1822
1823
1824/* Should never be entered. If it is, will be on the simulated
1825 CPU. */
1826static
1827void do__apply_in_new_thread_bogusRA ( void )
1828{
njne427a662002-10-02 11:08:25 +00001829 VG_(core_panic)("do__apply_in_new_thread_bogusRA");
sewardj20917d82002-05-28 01:36:45 +00001830}
1831
1832/* (Fn, Arg): Create a new thread and run Fn applied to Arg in it. Fn
1833 MUST NOT return -- ever. Eventually it will do either __QUIT or
1834 __WAIT_JOINER. Return the child tid to the parent. */
1835static
1836void do__apply_in_new_thread ( ThreadId parent_tid,
1837 void* (*fn)(void *),
1838 void* arg )
1839{
sewardje663cb92002-04-12 10:26:32 +00001840 Addr new_stack;
1841 UInt new_stk_szb;
1842 ThreadId tid;
1843 Char msg_buf[100];
1844
1845 /* Paranoia ... */
1846 vg_assert(sizeof(pthread_t) == sizeof(UInt));
1847
sewardj018f7622002-05-15 21:13:39 +00001848 vg_assert(VG_(threads)[parent_tid].status != VgTs_Empty);
sewardje663cb92002-04-12 10:26:32 +00001849
sewardj1e8cdc92002-04-18 11:37:52 +00001850 tid = vg_alloc_ThreadState();
sewardje663cb92002-04-12 10:26:32 +00001851
1852 /* If we've created the main thread's tid, we're in deep trouble :) */
sewardj6072c362002-04-19 14:40:57 +00001853 vg_assert(tid != 1);
sewardj018f7622002-05-15 21:13:39 +00001854 vg_assert(VG_(is_valid_or_empty_tid)(tid));
sewardje663cb92002-04-12 10:26:32 +00001855
sewardjc4a810d2002-11-13 22:25:51 +00001856 /* do this early, before the child gets any memory writes */
1857 VG_TRACK ( post_thread_create, parent_tid, tid );
1858
sewardjf6374322002-11-13 22:35:55 +00001859 /* Create new thread with default attrs:
1860 deferred cancellation, not detached
1861 */
1862 mostly_clear_thread_record(tid);
1863 VG_(threads)[tid].status = VgTs_Runnable;
1864
sewardje663cb92002-04-12 10:26:32 +00001865 /* Copy the parent's CPU state into the child's, in a roundabout
1866 way (via baseBlock). */
1867 VG_(load_thread_state)(parent_tid);
sewardjca340b32002-12-08 22:14:11 +00001868
1869 /* We inherit our parent's LDT. */
1870 if (VG_(threads)[parent_tid].ldt == NULL) {
1871 /* We hope this is the common case. */
1872 VG_(baseBlock)[VGOFF_(ldt)] = 0;
1873 } else {
1874 /* No luck .. we have to take a copy of the parent's. */
1875 VG_(threads)[tid].ldt
1876 = VG_(allocate_LDT_for_thread)( VG_(threads)[parent_tid].ldt );
1877 VG_(baseBlock)[VGOFF_(ldt)] = (UInt)VG_(threads)[tid].ldt;
1878 }
1879
fitzhardinge47735af2004-01-21 01:27:27 +00001880 /* Initialise the thread's TLS array */
1881 VG_(clear_TLS_for_thread)( VG_(threads)[tid].tls );
1882 VG_(baseBlock)[VGOFF_(tls)] = (UInt)VG_(threads)[tid].tls;
1883
sewardje663cb92002-04-12 10:26:32 +00001884 VG_(save_thread_state)(tid);
sewardjf6374322002-11-13 22:35:55 +00001885 vg_tid_last_in_baseBlock = tid;
sewardje663cb92002-04-12 10:26:32 +00001886
1887 /* Consider allocating the child a stack, if the one it already has
1888 is inadequate. */
sewardjbf290b92002-05-01 02:28:01 +00001889 new_stk_szb = VG_PTHREAD_STACK_MIN;
sewardje663cb92002-04-12 10:26:32 +00001890
sewardj018f7622002-05-15 21:13:39 +00001891 if (new_stk_szb > VG_(threads)[tid].stack_size) {
sewardje663cb92002-04-12 10:26:32 +00001892 /* Again, for good measure :) We definitely don't want to be
1893 allocating a stack for the main thread. */
sewardj6072c362002-04-19 14:40:57 +00001894 vg_assert(tid != 1);
sewardje663cb92002-04-12 10:26:32 +00001895 /* for now, we don't handle the case of anything other than
1896 assigning it for the first time. */
sewardj018f7622002-05-15 21:13:39 +00001897 vg_assert(VG_(threads)[tid].stack_size == 0);
1898 vg_assert(VG_(threads)[tid].stack_base == (Addr)NULL);
fitzhardinge98abfc72003-12-16 02:05:15 +00001899 new_stack = VG_(client_alloc)(0, new_stk_szb,
1900 VKI_PROT_READ | VKI_PROT_WRITE | VKI_PROT_EXEC,
1901 SF_STACK);
sewardj018f7622002-05-15 21:13:39 +00001902 VG_(threads)[tid].stack_base = new_stack;
1903 VG_(threads)[tid].stack_size = new_stk_szb;
1904 VG_(threads)[tid].stack_highest_word
sewardje663cb92002-04-12 10:26:32 +00001905 = new_stack + new_stk_szb
sewardj1e8cdc92002-04-18 11:37:52 +00001906 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB; /* -4 ??? */;
sewardje663cb92002-04-12 10:26:32 +00001907 }
sewardj1e8cdc92002-04-18 11:37:52 +00001908
njn25e49d8e72002-09-23 09:36:25 +00001909 /* Having got memory to hold the thread's stack:
1910 - set %esp as base + size
1911 - mark everything below %esp inaccessible
1912 - mark redzone at stack end inaccessible
1913 */
njnd3040452003-05-19 15:04:06 +00001914 SET_PTHREQ_ESP(tid, VG_(threads)[tid].stack_base
1915 + VG_(threads)[tid].stack_size
1916 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB);
sewardj1e8cdc92002-04-18 11:37:52 +00001917
njn25e49d8e72002-09-23 09:36:25 +00001918 VG_TRACK ( die_mem_stack, VG_(threads)[tid].stack_base,
1919 + new_stk_szb - VG_AR_CLIENT_STACKBASE_REDZONE_SZB);
1920 VG_TRACK ( ban_mem_stack, VG_(threads)[tid].m_esp,
1921 VG_AR_CLIENT_STACKBASE_REDZONE_SZB );
sewardje663cb92002-04-12 10:26:32 +00001922
njn25e49d8e72002-09-23 09:36:25 +00001923 /* push two args */
njnd3040452003-05-19 15:04:06 +00001924 SET_PTHREQ_ESP(tid, VG_(threads)[tid].m_esp - 8);
1925
njn25e49d8e72002-09-23 09:36:25 +00001926 VG_TRACK ( new_mem_stack, (Addr)VG_(threads)[tid].m_esp, 2 * 4 );
njn72718642003-07-24 08:45:32 +00001927 VG_TRACK ( pre_mem_write, Vg_CorePThread, tid, "new thread: stack",
njn25e49d8e72002-09-23 09:36:25 +00001928 (Addr)VG_(threads)[tid].m_esp, 2 * 4 );
1929
1930 /* push arg and (bogus) return address */
1931 * (UInt*)(VG_(threads)[tid].m_esp+4) = (UInt)arg;
sewardj20917d82002-05-28 01:36:45 +00001932 * (UInt*)(VG_(threads)[tid].m_esp)
1933 = (UInt)&do__apply_in_new_thread_bogusRA;
sewardje663cb92002-04-12 10:26:32 +00001934
njn25e49d8e72002-09-23 09:36:25 +00001935 VG_TRACK ( post_mem_write, VG_(threads)[tid].m_esp, 2 * 4 );
sewardje663cb92002-04-12 10:26:32 +00001936
1937 /* this is where we start */
sewardj20917d82002-05-28 01:36:45 +00001938 VG_(threads)[tid].m_eip = (UInt)fn;
sewardje663cb92002-04-12 10:26:32 +00001939
sewardj8937c812002-04-12 20:12:20 +00001940 if (VG_(clo_trace_sched)) {
njn25e49d8e72002-09-23 09:36:25 +00001941 VG_(sprintf)(msg_buf, "new thread, created by %d", parent_tid );
sewardje663cb92002-04-12 10:26:32 +00001942 print_sched_event(tid, msg_buf);
1943 }
1944
sewardj018f7622002-05-15 21:13:39 +00001945 /* We inherit our parent's signal mask. */
1946 VG_(threads)[tid].sig_mask = VG_(threads)[parent_tid].sig_mask;
jsgf855d93d2003-10-13 22:26:55 +00001947
1948 /* Now that the signal mask is set up, create a proxy LWP for this thread */
1949 VG_(proxy_create)(tid);
1950
1951 /* Set the proxy's signal mask */
1952 VG_(proxy_setsigmask)(tid);
sewardjb48e5002002-05-13 00:16:03 +00001953
sewardj20917d82002-05-28 01:36:45 +00001954 /* return child's tid to parent */
njnd3040452003-05-19 15:04:06 +00001955 SET_PTHREQ_RETVAL(parent_tid, tid); /* success */
sewardje663cb92002-04-12 10:26:32 +00001956}
1957
1958
sewardj604ec3c2002-04-18 22:38:41 +00001959/* -----------------------------------------------------------
1960 MUTEXes
1961 -------------------------------------------------------- */
1962
sewardj604ec3c2002-04-18 22:38:41 +00001963/* pthread_mutex_t is a struct with at 5 words:
sewardje663cb92002-04-12 10:26:32 +00001964 typedef struct
1965 {
1966 int __m_reserved; -- Reserved for future use
1967 int __m_count; -- Depth of recursive locking
1968 _pthread_descr __m_owner; -- Owner thread (if recursive or errcheck)
1969 int __m_kind; -- Mutex kind: fast, recursive or errcheck
1970 struct _pthread_fastlock __m_lock; -- Underlying fast lock
1971 } pthread_mutex_t;
sewardj604ec3c2002-04-18 22:38:41 +00001972
sewardj6072c362002-04-19 14:40:57 +00001973 #define PTHREAD_MUTEX_INITIALIZER \
1974 {0, 0, 0, PTHREAD_MUTEX_TIMED_NP, __LOCK_INITIALIZER}
1975 # define PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP \
1976 {0, 0, 0, PTHREAD_MUTEX_RECURSIVE_NP, __LOCK_INITIALIZER}
1977 # define PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP \
1978 {0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, __LOCK_INITIALIZER}
1979 # define PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP \
1980 {0, 0, 0, PTHREAD_MUTEX_ADAPTIVE_NP, __LOCK_INITIALIZER}
sewardj604ec3c2002-04-18 22:38:41 +00001981
sewardj6072c362002-04-19 14:40:57 +00001982 How we use it:
sewardj604ec3c2002-04-18 22:38:41 +00001983
sewardj6072c362002-04-19 14:40:57 +00001984 __m_kind never changes and indicates whether or not it is recursive.
1985
1986 __m_count indicates the lock count; if 0, the mutex is not owned by
1987 anybody.
1988
1989 __m_owner has a ThreadId value stuffed into it. We carefully arrange
1990 that ThreadId == 0 is invalid (VG_INVALID_THREADID), so that
1991 statically initialised mutexes correctly appear
1992 to belong to nobody.
1993
1994 In summary, a not-in-use mutex is distinguised by having __m_owner
1995 == 0 (VG_INVALID_THREADID) and __m_count == 0 too. If one of those
1996 conditions holds, the other should too.
1997
1998 There is no linked list of threads waiting for this mutex. Instead
1999 a thread in WaitMX state points at the mutex with its waited_on_mx
2000 field. This makes _unlock() inefficient, but simple to implement the
2001 right semantics viz-a-viz signals.
sewardje663cb92002-04-12 10:26:32 +00002002
sewardj604ec3c2002-04-18 22:38:41 +00002003 We don't have to deal with mutex initialisation; the client side
sewardj6072c362002-04-19 14:40:57 +00002004 deals with that for us.
2005*/
sewardje663cb92002-04-12 10:26:32 +00002006
sewardj3b5d8862002-04-20 13:53:23 +00002007/* Helper fns ... */
2008static
2009void release_one_thread_waiting_on_mutex ( pthread_mutex_t* mutex,
2010 Char* caller )
2011{
2012 Int i;
2013 Char msg_buf[100];
2014
2015 /* Find some arbitrary thread waiting on this mutex, and make it
2016 runnable. If none are waiting, mark the mutex as not held. */
2017 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +00002018 if (VG_(threads)[i].status == VgTs_Empty)
sewardj3b5d8862002-04-20 13:53:23 +00002019 continue;
sewardj018f7622002-05-15 21:13:39 +00002020 if (VG_(threads)[i].status == VgTs_WaitMX
2021 && VG_(threads)[i].associated_mx == mutex)
sewardj3b5d8862002-04-20 13:53:23 +00002022 break;
2023 }
2024
sewardj0af43bc2002-10-22 04:30:35 +00002025 VG_TRACK( post_mutex_unlock, (ThreadId)mutex->__m_owner, mutex );
2026
sewardj3b5d8862002-04-20 13:53:23 +00002027 vg_assert(i <= VG_N_THREADS);
2028 if (i == VG_N_THREADS) {
2029 /* Nobody else is waiting on it. */
2030 mutex->__m_count = 0;
2031 mutex->__m_owner = VG_INVALID_THREADID;
2032 } else {
2033 /* Notionally transfer the hold to thread i, whose
2034 pthread_mutex_lock() call now returns with 0 (success). */
2035 /* The .count is already == 1. */
sewardj018f7622002-05-15 21:13:39 +00002036 vg_assert(VG_(threads)[i].associated_mx == mutex);
sewardj3b5d8862002-04-20 13:53:23 +00002037 mutex->__m_owner = (_pthread_descr)i;
sewardj018f7622002-05-15 21:13:39 +00002038 VG_(threads)[i].status = VgTs_Runnable;
2039 VG_(threads)[i].associated_mx = NULL;
sewardj5f07b662002-04-23 16:52:51 +00002040 /* m_edx already holds pth_mx_lock() success (0) */
sewardj3b5d8862002-04-20 13:53:23 +00002041
sewardj0af43bc2002-10-22 04:30:35 +00002042 VG_TRACK( post_mutex_lock, (ThreadId)i, mutex);
2043
sewardj3b5d8862002-04-20 13:53:23 +00002044 if (VG_(clo_trace_pthread_level) >= 1) {
2045 VG_(sprintf)(msg_buf, "%s mx %p: RESUME",
2046 caller, mutex );
2047 print_pthread_event(i, msg_buf);
2048 }
2049 }
2050}
2051
sewardje663cb92002-04-12 10:26:32 +00002052
2053static
sewardj30671ff2002-04-21 00:13:57 +00002054void do_pthread_mutex_lock( ThreadId tid,
2055 Bool is_trylock,
sewardj124ca2a2002-06-20 10:19:38 +00002056 pthread_mutex_t* mutex )
sewardje663cb92002-04-12 10:26:32 +00002057{
sewardj30671ff2002-04-21 00:13:57 +00002058 Char msg_buf[100];
2059 Char* caller
sewardj8ccc2be2002-05-10 20:26:37 +00002060 = is_trylock ? "pthread_mutex_trylock"
2061 : "pthread_mutex_lock ";
sewardje663cb92002-04-12 10:26:32 +00002062
sewardj604ec3c2002-04-18 22:38:41 +00002063 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj30671ff2002-04-21 00:13:57 +00002064 VG_(sprintf)(msg_buf, "%s mx %p ...", caller, mutex );
sewardj604ec3c2002-04-18 22:38:41 +00002065 print_pthread_event(tid, msg_buf);
2066 }
2067
2068 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00002069 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002070 && VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +00002071
2072 /* POSIX doesn't mandate this, but for sanity ... */
2073 if (mutex == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00002074 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002075 "pthread_mutex_lock/trylock: mutex is NULL");
jsgf855d93d2003-10-13 22:26:55 +00002076 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardje663cb92002-04-12 10:26:32 +00002077 return;
2078 }
2079
sewardj604ec3c2002-04-18 22:38:41 +00002080 /* More paranoia ... */
2081 switch (mutex->__m_kind) {
sewardj2a1dcce2002-04-22 12:45:25 +00002082# ifndef GLIBC_2_1
sewardj604ec3c2002-04-18 22:38:41 +00002083 case PTHREAD_MUTEX_TIMED_NP:
sewardj2a1dcce2002-04-22 12:45:25 +00002084 case PTHREAD_MUTEX_ADAPTIVE_NP:
2085# endif
sewardja1679dd2002-05-10 22:31:40 +00002086# ifdef GLIBC_2_1
sewardj8e651d72002-05-10 21:00:19 +00002087 case PTHREAD_MUTEX_FAST_NP:
sewardja1679dd2002-05-10 22:31:40 +00002088# endif
sewardj604ec3c2002-04-18 22:38:41 +00002089 case PTHREAD_MUTEX_RECURSIVE_NP:
2090 case PTHREAD_MUTEX_ERRORCHECK_NP:
sewardj604ec3c2002-04-18 22:38:41 +00002091 if (mutex->__m_count >= 0) break;
2092 /* else fall thru */
2093 default:
njn25e49d8e72002-09-23 09:36:25 +00002094 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002095 "pthread_mutex_lock/trylock: mutex is invalid");
jsgf855d93d2003-10-13 22:26:55 +00002096 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj604ec3c2002-04-18 22:38:41 +00002097 return;
sewardje663cb92002-04-12 10:26:32 +00002098 }
2099
sewardj604ec3c2002-04-18 22:38:41 +00002100 if (mutex->__m_count > 0) {
fitzhardinge47735af2004-01-21 01:27:27 +00002101 if (!VG_(is_valid_tid)((ThreadId)mutex->__m_owner)) {
2102 VG_(record_pthread_error)( tid,
2103 "pthread_mutex_lock/trylock: mutex has invalid owner");
2104 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
2105 return;
2106 }
sewardjf8f819e2002-04-17 23:21:37 +00002107
2108 /* Someone has it already. */
sewardj604ec3c2002-04-18 22:38:41 +00002109 if ((ThreadId)mutex->__m_owner == tid) {
sewardjf8f819e2002-04-17 23:21:37 +00002110 /* It's locked -- by me! */
sewardj604ec3c2002-04-18 22:38:41 +00002111 if (mutex->__m_kind == PTHREAD_MUTEX_RECURSIVE_NP) {
sewardjf8f819e2002-04-17 23:21:37 +00002112 /* return 0 (success). */
sewardj604ec3c2002-04-18 22:38:41 +00002113 mutex->__m_count++;
njnd3040452003-05-19 15:04:06 +00002114 SET_PTHREQ_RETVAL(tid, 0);
sewardj853f55d2002-04-26 00:27:53 +00002115 if (0)
2116 VG_(printf)("!!!!!! tid %d, mx %p -> locked %d\n",
2117 tid, mutex, mutex->__m_count);
sewardjf8f819e2002-04-17 23:21:37 +00002118 return;
2119 } else {
sewardj30671ff2002-04-21 00:13:57 +00002120 if (is_trylock)
njnd3040452003-05-19 15:04:06 +00002121 SET_PTHREQ_RETVAL(tid, EBUSY);
sewardj30671ff2002-04-21 00:13:57 +00002122 else
njnd3040452003-05-19 15:04:06 +00002123 SET_PTHREQ_RETVAL(tid, EDEADLK);
sewardjf8f819e2002-04-17 23:21:37 +00002124 return;
2125 }
2126 } else {
sewardj6072c362002-04-19 14:40:57 +00002127 /* Someone else has it; we have to wait. Mark ourselves
2128 thusly. */
sewardj05553872002-04-20 20:53:17 +00002129 /* GUARD: __m_count > 0 && __m_owner is valid */
sewardj30671ff2002-04-21 00:13:57 +00002130 if (is_trylock) {
2131 /* caller is polling; so return immediately. */
njnd3040452003-05-19 15:04:06 +00002132 SET_PTHREQ_RETVAL(tid, EBUSY);
sewardj30671ff2002-04-21 00:13:57 +00002133 } else {
sewardjdca84112002-11-13 22:29:34 +00002134 VG_TRACK ( pre_mutex_lock, tid, mutex );
2135
sewardj018f7622002-05-15 21:13:39 +00002136 VG_(threads)[tid].status = VgTs_WaitMX;
2137 VG_(threads)[tid].associated_mx = mutex;
njnd3040452003-05-19 15:04:06 +00002138 SET_PTHREQ_RETVAL(tid, 0); /* pth_mx_lock success value */
sewardj30671ff2002-04-21 00:13:57 +00002139 if (VG_(clo_trace_pthread_level) >= 1) {
2140 VG_(sprintf)(msg_buf, "%s mx %p: BLOCK",
2141 caller, mutex );
2142 print_pthread_event(tid, msg_buf);
2143 }
2144 }
sewardje663cb92002-04-12 10:26:32 +00002145 return;
2146 }
sewardjf8f819e2002-04-17 23:21:37 +00002147
sewardje663cb92002-04-12 10:26:32 +00002148 } else {
sewardj6072c362002-04-19 14:40:57 +00002149 /* Nobody owns it. Sanity check ... */
2150 vg_assert(mutex->__m_owner == VG_INVALID_THREADID);
sewardjdca84112002-11-13 22:29:34 +00002151
2152 VG_TRACK ( pre_mutex_lock, tid, mutex );
2153
sewardjf8f819e2002-04-17 23:21:37 +00002154 /* We get it! [for the first time]. */
sewardj604ec3c2002-04-18 22:38:41 +00002155 mutex->__m_count = 1;
2156 mutex->__m_owner = (_pthread_descr)tid;
njn25e49d8e72002-09-23 09:36:25 +00002157
sewardje663cb92002-04-12 10:26:32 +00002158 /* return 0 (success). */
njnd3040452003-05-19 15:04:06 +00002159 SET_PTHREQ_RETVAL(tid, 0);
sewardjf8f819e2002-04-17 23:21:37 +00002160
njnd3040452003-05-19 15:04:06 +00002161 VG_TRACK( post_mutex_lock, tid, mutex);
2162 }
sewardje663cb92002-04-12 10:26:32 +00002163}
2164
2165
2166static
2167void do_pthread_mutex_unlock ( ThreadId tid,
sewardj124ca2a2002-06-20 10:19:38 +00002168 pthread_mutex_t* mutex )
sewardje663cb92002-04-12 10:26:32 +00002169{
sewardj3b5d8862002-04-20 13:53:23 +00002170 Char msg_buf[100];
sewardje663cb92002-04-12 10:26:32 +00002171
sewardj45b4b372002-04-16 22:50:32 +00002172 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj3b5d8862002-04-20 13:53:23 +00002173 VG_(sprintf)(msg_buf, "pthread_mutex_unlock mx %p ...", mutex );
sewardj8937c812002-04-12 20:12:20 +00002174 print_pthread_event(tid, msg_buf);
2175 }
2176
sewardj604ec3c2002-04-18 22:38:41 +00002177 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00002178 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002179 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj604ec3c2002-04-18 22:38:41 +00002180
2181 if (mutex == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00002182 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002183 "pthread_mutex_unlock: mutex is NULL");
jsgf855d93d2003-10-13 22:26:55 +00002184 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj604ec3c2002-04-18 22:38:41 +00002185 return;
2186 }
2187
sewardjd8acdf22002-11-13 21:57:52 +00002188 /* If this was locked before the dawn of time, pretend it was
2189 locked now so that it balances with unlocks */
2190 if (mutex->__m_kind & VG_PTHREAD_PREHISTORY) {
2191 mutex->__m_kind &= ~VG_PTHREAD_PREHISTORY;
sewardjdca84112002-11-13 22:29:34 +00002192 VG_TRACK( pre_mutex_lock, (ThreadId)mutex->__m_owner, mutex );
sewardjd8acdf22002-11-13 21:57:52 +00002193 VG_TRACK( post_mutex_lock, (ThreadId)mutex->__m_owner, mutex );
2194 }
2195
sewardj604ec3c2002-04-18 22:38:41 +00002196 /* More paranoia ... */
2197 switch (mutex->__m_kind) {
sewardj2a1dcce2002-04-22 12:45:25 +00002198# ifndef GLIBC_2_1
sewardj604ec3c2002-04-18 22:38:41 +00002199 case PTHREAD_MUTEX_TIMED_NP:
sewardj2a1dcce2002-04-22 12:45:25 +00002200 case PTHREAD_MUTEX_ADAPTIVE_NP:
2201# endif
sewardja1679dd2002-05-10 22:31:40 +00002202# ifdef GLIBC_2_1
sewardj8e651d72002-05-10 21:00:19 +00002203 case PTHREAD_MUTEX_FAST_NP:
sewardja1679dd2002-05-10 22:31:40 +00002204# endif
sewardj604ec3c2002-04-18 22:38:41 +00002205 case PTHREAD_MUTEX_RECURSIVE_NP:
2206 case PTHREAD_MUTEX_ERRORCHECK_NP:
sewardj604ec3c2002-04-18 22:38:41 +00002207 if (mutex->__m_count >= 0) break;
2208 /* else fall thru */
2209 default:
njn25e49d8e72002-09-23 09:36:25 +00002210 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002211 "pthread_mutex_unlock: mutex is invalid");
jsgf855d93d2003-10-13 22:26:55 +00002212 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj604ec3c2002-04-18 22:38:41 +00002213 return;
2214 }
sewardje663cb92002-04-12 10:26:32 +00002215
2216 /* Barf if we don't currently hold the mutex. */
sewardj4dced352002-06-04 22:54:20 +00002217 if (mutex->__m_count == 0) {
2218 /* nobody holds it */
njn25e49d8e72002-09-23 09:36:25 +00002219 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002220 "pthread_mutex_unlock: mutex is not locked");
njnd3040452003-05-19 15:04:06 +00002221 SET_PTHREQ_RETVAL(tid, EPERM);
sewardj4dced352002-06-04 22:54:20 +00002222 return;
2223 }
2224
2225 if ((ThreadId)mutex->__m_owner != tid) {
2226 /* we don't hold it */
njn25e49d8e72002-09-23 09:36:25 +00002227 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002228 "pthread_mutex_unlock: mutex is locked by a different thread");
njnd3040452003-05-19 15:04:06 +00002229 SET_PTHREQ_RETVAL(tid, EPERM);
sewardje663cb92002-04-12 10:26:32 +00002230 return;
2231 }
2232
sewardjf8f819e2002-04-17 23:21:37 +00002233 /* If it's a multiply-locked recursive mutex, just decrement the
2234 lock count and return. */
sewardj604ec3c2002-04-18 22:38:41 +00002235 if (mutex->__m_count > 1) {
2236 vg_assert(mutex->__m_kind == PTHREAD_MUTEX_RECURSIVE_NP);
2237 mutex->__m_count --;
njnd3040452003-05-19 15:04:06 +00002238 SET_PTHREQ_RETVAL(tid, 0); /* success */
sewardjf8f819e2002-04-17 23:21:37 +00002239 return;
2240 }
2241
sewardj604ec3c2002-04-18 22:38:41 +00002242 /* Now we're sure it is locked exactly once, and by the thread who
sewardjf8f819e2002-04-17 23:21:37 +00002243 is now doing an unlock on it. */
sewardj604ec3c2002-04-18 22:38:41 +00002244 vg_assert(mutex->__m_count == 1);
sewardj6072c362002-04-19 14:40:57 +00002245 vg_assert((ThreadId)mutex->__m_owner == tid);
sewardjf8f819e2002-04-17 23:21:37 +00002246
sewardj3b5d8862002-04-20 13:53:23 +00002247 /* Release at max one thread waiting on this mutex. */
2248 release_one_thread_waiting_on_mutex ( mutex, "pthread_mutex_lock" );
sewardje663cb92002-04-12 10:26:32 +00002249
sewardj3b5d8862002-04-20 13:53:23 +00002250 /* Our (tid's) pth_unlock() returns with 0 (success). */
njnd3040452003-05-19 15:04:06 +00002251 SET_PTHREQ_RETVAL(tid, 0); /* Success. */
sewardje663cb92002-04-12 10:26:32 +00002252}
2253
2254
sewardj6072c362002-04-19 14:40:57 +00002255/* -----------------------------------------------------------
2256 CONDITION VARIABLES
2257 -------------------------------------------------------- */
sewardje663cb92002-04-12 10:26:32 +00002258
sewardj6072c362002-04-19 14:40:57 +00002259/* The relevant native types are as follows:
2260 (copied from /usr/include/bits/pthreadtypes.h)
sewardj77e466c2002-04-14 02:29:29 +00002261
sewardj6072c362002-04-19 14:40:57 +00002262 -- Conditions (not abstract because of PTHREAD_COND_INITIALIZER
2263 typedef struct
2264 {
2265 struct _pthread_fastlock __c_lock; -- Protect against concurrent access
2266 _pthread_descr __c_waiting; -- Threads waiting on this condition
2267 } pthread_cond_t;
sewardj77e466c2002-04-14 02:29:29 +00002268
sewardj6072c362002-04-19 14:40:57 +00002269 -- Attribute for conditionally variables.
2270 typedef struct
2271 {
2272 int __dummy;
2273 } pthread_condattr_t;
sewardj77e466c2002-04-14 02:29:29 +00002274
sewardj6072c362002-04-19 14:40:57 +00002275 #define PTHREAD_COND_INITIALIZER {__LOCK_INITIALIZER, 0}
sewardj77e466c2002-04-14 02:29:29 +00002276
sewardj3b5d8862002-04-20 13:53:23 +00002277 We don't use any fields of pthread_cond_t for anything at all.
2278 Only the identity of the CVs is important.
sewardj6072c362002-04-19 14:40:57 +00002279
2280 Linux pthreads supports no attributes on condition variables, so we
sewardj3b5d8862002-04-20 13:53:23 +00002281 don't need to think too hard there. */
sewardj6072c362002-04-19 14:40:57 +00002282
sewardj77e466c2002-04-14 02:29:29 +00002283
sewardj5f07b662002-04-23 16:52:51 +00002284static
2285void do_pthread_cond_timedwait_TIMEOUT ( ThreadId tid )
2286{
2287 Char msg_buf[100];
2288 pthread_mutex_t* mx;
2289 pthread_cond_t* cv;
2290
sewardjb48e5002002-05-13 00:16:03 +00002291 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002292 && VG_(threads)[tid].status == VgTs_WaitCV
2293 && VG_(threads)[tid].awaken_at != 0xFFFFFFFF);
2294 mx = VG_(threads)[tid].associated_mx;
sewardj5f07b662002-04-23 16:52:51 +00002295 vg_assert(mx != NULL);
sewardj018f7622002-05-15 21:13:39 +00002296 cv = VG_(threads)[tid].associated_cv;
sewardj5f07b662002-04-23 16:52:51 +00002297 vg_assert(cv != NULL);
2298
2299 if (mx->__m_owner == VG_INVALID_THREADID) {
2300 /* Currently unheld; hand it out to thread tid. */
2301 vg_assert(mx->__m_count == 0);
sewardj018f7622002-05-15 21:13:39 +00002302 VG_(threads)[tid].status = VgTs_Runnable;
njnd3040452003-05-19 15:04:06 +00002303 SET_PTHREQ_RETVAL(tid, ETIMEDOUT); /* pthread_cond_wait return value */
sewardj018f7622002-05-15 21:13:39 +00002304 VG_(threads)[tid].associated_cv = NULL;
2305 VG_(threads)[tid].associated_mx = NULL;
sewardj5f07b662002-04-23 16:52:51 +00002306 mx->__m_owner = (_pthread_descr)tid;
2307 mx->__m_count = 1;
2308
sewardj0af43bc2002-10-22 04:30:35 +00002309 VG_TRACK( post_mutex_lock, tid, mx );
2310
sewardj5f07b662002-04-23 16:52:51 +00002311 if (VG_(clo_trace_pthread_level) >= 1) {
sewardjc3bd5f52002-05-01 03:24:23 +00002312 VG_(sprintf)(msg_buf,
2313 "pthread_cond_timedwai cv %p: TIMEOUT with mx %p",
2314 cv, mx );
sewardj5f07b662002-04-23 16:52:51 +00002315 print_pthread_event(tid, msg_buf);
2316 }
2317 } else {
2318 /* Currently held. Make thread tid be blocked on it. */
2319 vg_assert(mx->__m_count > 0);
sewardjdca84112002-11-13 22:29:34 +00002320 VG_TRACK( pre_mutex_lock, tid, mx );
2321
sewardj018f7622002-05-15 21:13:39 +00002322 VG_(threads)[tid].status = VgTs_WaitMX;
njnd3040452003-05-19 15:04:06 +00002323 SET_PTHREQ_RETVAL(tid, ETIMEDOUT); /* pthread_cond_wait return value */
sewardj018f7622002-05-15 21:13:39 +00002324 VG_(threads)[tid].associated_cv = NULL;
2325 VG_(threads)[tid].associated_mx = mx;
sewardj5f07b662002-04-23 16:52:51 +00002326 if (VG_(clo_trace_pthread_level) >= 1) {
2327 VG_(sprintf)(msg_buf,
2328 "pthread_cond_timedwai cv %p: TIMEOUT -> BLOCK for mx %p",
2329 cv, mx );
2330 print_pthread_event(tid, msg_buf);
2331 }
sewardj5f07b662002-04-23 16:52:51 +00002332 }
2333}
2334
2335
sewardj3b5d8862002-04-20 13:53:23 +00002336static
2337void release_N_threads_waiting_on_cond ( pthread_cond_t* cond,
2338 Int n_to_release,
2339 Char* caller )
2340{
2341 Int i;
2342 Char msg_buf[100];
2343 pthread_mutex_t* mx;
2344
2345 while (True) {
2346 if (n_to_release == 0)
2347 return;
2348
2349 /* Find a thread waiting on this CV. */
2350 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +00002351 if (VG_(threads)[i].status == VgTs_Empty)
sewardj3b5d8862002-04-20 13:53:23 +00002352 continue;
sewardj018f7622002-05-15 21:13:39 +00002353 if (VG_(threads)[i].status == VgTs_WaitCV
2354 && VG_(threads)[i].associated_cv == cond)
sewardj3b5d8862002-04-20 13:53:23 +00002355 break;
2356 }
2357 vg_assert(i <= VG_N_THREADS);
2358
2359 if (i == VG_N_THREADS) {
2360 /* Nobody else is waiting on it. */
2361 return;
2362 }
2363
sewardj018f7622002-05-15 21:13:39 +00002364 mx = VG_(threads)[i].associated_mx;
sewardj3b5d8862002-04-20 13:53:23 +00002365 vg_assert(mx != NULL);
2366
sewardjdca84112002-11-13 22:29:34 +00002367 VG_TRACK( pre_mutex_lock, i, mx );
2368
sewardj3b5d8862002-04-20 13:53:23 +00002369 if (mx->__m_owner == VG_INVALID_THREADID) {
2370 /* Currently unheld; hand it out to thread i. */
2371 vg_assert(mx->__m_count == 0);
sewardj018f7622002-05-15 21:13:39 +00002372 VG_(threads)[i].status = VgTs_Runnable;
2373 VG_(threads)[i].associated_cv = NULL;
2374 VG_(threads)[i].associated_mx = NULL;
sewardj3b5d8862002-04-20 13:53:23 +00002375 mx->__m_owner = (_pthread_descr)i;
2376 mx->__m_count = 1;
sewardj5f07b662002-04-23 16:52:51 +00002377 /* .m_edx already holds pth_cond_wait success value (0) */
sewardj3b5d8862002-04-20 13:53:23 +00002378
sewardj0af43bc2002-10-22 04:30:35 +00002379 VG_TRACK( post_mutex_lock, i, mx );
2380
sewardj3b5d8862002-04-20 13:53:23 +00002381 if (VG_(clo_trace_pthread_level) >= 1) {
2382 VG_(sprintf)(msg_buf, "%s cv %p: RESUME with mx %p",
2383 caller, cond, mx );
2384 print_pthread_event(i, msg_buf);
2385 }
2386
2387 } else {
2388 /* Currently held. Make thread i be blocked on it. */
sewardj5f07b662002-04-23 16:52:51 +00002389 vg_assert(mx->__m_count > 0);
sewardj018f7622002-05-15 21:13:39 +00002390 VG_(threads)[i].status = VgTs_WaitMX;
2391 VG_(threads)[i].associated_cv = NULL;
2392 VG_(threads)[i].associated_mx = mx;
njnd3040452003-05-19 15:04:06 +00002393 SET_PTHREQ_RETVAL(i, 0); /* pth_cond_wait success value */
sewardj3b5d8862002-04-20 13:53:23 +00002394
2395 if (VG_(clo_trace_pthread_level) >= 1) {
2396 VG_(sprintf)(msg_buf, "%s cv %p: BLOCK for mx %p",
2397 caller, cond, mx );
2398 print_pthread_event(i, msg_buf);
2399 }
2400
2401 }
jsgf855d93d2003-10-13 22:26:55 +00002402
sewardj3b5d8862002-04-20 13:53:23 +00002403 n_to_release--;
2404 }
2405}
2406
2407
2408static
2409void do_pthread_cond_wait ( ThreadId tid,
2410 pthread_cond_t *cond,
sewardj5f07b662002-04-23 16:52:51 +00002411 pthread_mutex_t *mutex,
2412 UInt ms_end )
sewardj3b5d8862002-04-20 13:53:23 +00002413{
2414 Char msg_buf[100];
2415
sewardj5f07b662002-04-23 16:52:51 +00002416 /* If ms_end == 0xFFFFFFFF, wait forever (no timeout). Otherwise,
2417 ms_end is the ending millisecond. */
2418
sewardj3b5d8862002-04-20 13:53:23 +00002419 /* pre: mutex should be a valid mutex and owned by tid. */
2420 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj5f07b662002-04-23 16:52:51 +00002421 VG_(sprintf)(msg_buf, "pthread_cond_wait cv %p, mx %p, end %d ...",
2422 cond, mutex, ms_end );
sewardj3b5d8862002-04-20 13:53:23 +00002423 print_pthread_event(tid, msg_buf);
2424 }
2425
2426 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00002427 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002428 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj3b5d8862002-04-20 13:53:23 +00002429
2430 if (mutex == NULL || cond == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00002431 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002432 "pthread_cond_wait/timedwait: cond or mutex is NULL");
jsgf855d93d2003-10-13 22:26:55 +00002433 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj3b5d8862002-04-20 13:53:23 +00002434 return;
2435 }
2436
2437 /* More paranoia ... */
2438 switch (mutex->__m_kind) {
sewardj2a1dcce2002-04-22 12:45:25 +00002439# ifndef GLIBC_2_1
sewardj3b5d8862002-04-20 13:53:23 +00002440 case PTHREAD_MUTEX_TIMED_NP:
sewardj2a1dcce2002-04-22 12:45:25 +00002441 case PTHREAD_MUTEX_ADAPTIVE_NP:
2442# endif
sewardja1679dd2002-05-10 22:31:40 +00002443# ifdef GLIBC_2_1
sewardj8e651d72002-05-10 21:00:19 +00002444 case PTHREAD_MUTEX_FAST_NP:
sewardja1679dd2002-05-10 22:31:40 +00002445# endif
sewardj3b5d8862002-04-20 13:53:23 +00002446 case PTHREAD_MUTEX_RECURSIVE_NP:
2447 case PTHREAD_MUTEX_ERRORCHECK_NP:
sewardj3b5d8862002-04-20 13:53:23 +00002448 if (mutex->__m_count >= 0) break;
2449 /* else fall thru */
2450 default:
njn25e49d8e72002-09-23 09:36:25 +00002451 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002452 "pthread_cond_wait/timedwait: mutex is invalid");
jsgf855d93d2003-10-13 22:26:55 +00002453 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj3b5d8862002-04-20 13:53:23 +00002454 return;
2455 }
2456
2457 /* Barf if we don't currently hold the mutex. */
2458 if (mutex->__m_count == 0 /* nobody holds it */
2459 || (ThreadId)mutex->__m_owner != tid /* we don't hold it */) {
njn25e49d8e72002-09-23 09:36:25 +00002460 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002461 "pthread_cond_wait/timedwait: mutex is unlocked "
2462 "or is locked but not owned by thread");
jsgf855d93d2003-10-13 22:26:55 +00002463 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj3b5d8862002-04-20 13:53:23 +00002464 return;
2465 }
2466
2467 /* Queue ourselves on the condition. */
sewardj018f7622002-05-15 21:13:39 +00002468 VG_(threads)[tid].status = VgTs_WaitCV;
2469 VG_(threads)[tid].associated_cv = cond;
2470 VG_(threads)[tid].associated_mx = mutex;
2471 VG_(threads)[tid].awaken_at = ms_end;
jsgf855d93d2003-10-13 22:26:55 +00002472 if (ms_end != 0xFFFFFFFF)
2473 VG_(add_timeout)(tid, ms_end);
sewardj3b5d8862002-04-20 13:53:23 +00002474
2475 if (VG_(clo_trace_pthread_level) >= 1) {
2476 VG_(sprintf)(msg_buf,
2477 "pthread_cond_wait cv %p, mx %p: BLOCK",
2478 cond, mutex );
2479 print_pthread_event(tid, msg_buf);
2480 }
2481
2482 /* Release the mutex. */
2483 release_one_thread_waiting_on_mutex ( mutex, "pthread_cond_wait " );
2484}
2485
2486
2487static
2488void do_pthread_cond_signal_or_broadcast ( ThreadId tid,
2489 Bool broadcast,
2490 pthread_cond_t *cond )
2491{
2492 Char msg_buf[100];
2493 Char* caller
2494 = broadcast ? "pthread_cond_broadcast"
2495 : "pthread_cond_signal ";
2496
2497 if (VG_(clo_trace_pthread_level) >= 2) {
2498 VG_(sprintf)(msg_buf, "%s cv %p ...",
2499 caller, cond );
2500 print_pthread_event(tid, msg_buf);
2501 }
2502
2503 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00002504 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002505 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj3b5d8862002-04-20 13:53:23 +00002506
2507 if (cond == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00002508 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002509 "pthread_cond_signal/broadcast: cond is NULL");
jsgf855d93d2003-10-13 22:26:55 +00002510 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj3b5d8862002-04-20 13:53:23 +00002511 return;
2512 }
2513
2514 release_N_threads_waiting_on_cond (
2515 cond,
2516 broadcast ? VG_N_THREADS : 1,
2517 caller
2518 );
2519
njnd3040452003-05-19 15:04:06 +00002520 SET_PTHREQ_RETVAL(tid, 0); /* success */
sewardj3b5d8862002-04-20 13:53:23 +00002521}
2522
sewardj77e466c2002-04-14 02:29:29 +00002523
sewardj5f07b662002-04-23 16:52:51 +00002524/* -----------------------------------------------------------
2525 THREAD SPECIFIC DATA
2526 -------------------------------------------------------- */
2527
2528static __inline__
2529Bool is_valid_key ( ThreadKey k )
2530{
2531 /* k unsigned; hence no < 0 check */
2532 if (k >= VG_N_THREAD_KEYS) return False;
2533 if (!vg_thread_keys[k].inuse) return False;
2534 return True;
2535}
2536
sewardj00a66b12002-10-12 16:42:35 +00002537
2538/* Return in %EDX a value of 1 if the key is valid, else 0. */
2539static
2540void do_pthread_key_validate ( ThreadId tid,
2541 pthread_key_t key )
2542{
2543 Char msg_buf[100];
2544
2545 if (VG_(clo_trace_pthread_level) >= 1) {
2546 VG_(sprintf)(msg_buf, "pthread_key_validate key %p",
2547 key );
2548 print_pthread_event(tid, msg_buf);
2549 }
2550
2551 vg_assert(sizeof(pthread_key_t) == sizeof(ThreadKey));
2552 vg_assert(VG_(is_valid_tid)(tid)
2553 && VG_(threads)[tid].status == VgTs_Runnable);
2554
2555 if (is_valid_key((ThreadKey)key)) {
njnd3040452003-05-19 15:04:06 +00002556 SET_PTHREQ_RETVAL(tid, 1);
sewardj00a66b12002-10-12 16:42:35 +00002557 } else {
njnd3040452003-05-19 15:04:06 +00002558 SET_PTHREQ_RETVAL(tid, 0);
sewardj00a66b12002-10-12 16:42:35 +00002559 }
2560}
2561
2562
sewardj5f07b662002-04-23 16:52:51 +00002563static
2564void do_pthread_key_create ( ThreadId tid,
2565 pthread_key_t* key,
2566 void (*destructor)(void*) )
2567{
2568 Int i;
2569 Char msg_buf[100];
2570
2571 if (VG_(clo_trace_pthread_level) >= 1) {
2572 VG_(sprintf)(msg_buf, "pthread_key_create *key %p, destr %p",
2573 key, destructor );
2574 print_pthread_event(tid, msg_buf);
2575 }
2576
2577 vg_assert(sizeof(pthread_key_t) == sizeof(ThreadKey));
sewardjb48e5002002-05-13 00:16:03 +00002578 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002579 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj5f07b662002-04-23 16:52:51 +00002580
2581 for (i = 0; i < VG_N_THREAD_KEYS; i++)
2582 if (!vg_thread_keys[i].inuse)
2583 break;
2584
2585 if (i == VG_N_THREAD_KEYS) {
jsgf855d93d2003-10-13 22:26:55 +00002586 VG_(message)(Vg_UserMsg, "pthread_key_create() asked for too many keys (more than %d): increase VG_N_THREAD_KEYS and recompile Valgrind.",
2587 VG_N_THREAD_KEYS);
2588 SET_PTHREQ_RETVAL(tid, EAGAIN);
2589 return;
sewardj5f07b662002-04-23 16:52:51 +00002590 }
2591
sewardj870497a2002-05-29 01:06:47 +00002592 vg_thread_keys[i].inuse = True;
2593 vg_thread_keys[i].destructor = destructor;
sewardjc3bd5f52002-05-01 03:24:23 +00002594
sewardj5a3798b2002-06-04 23:24:22 +00002595 /* check key for addressibility */
njn72718642003-07-24 08:45:32 +00002596 VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "pthread_key_create: key",
njn25e49d8e72002-09-23 09:36:25 +00002597 (Addr)key, sizeof(pthread_key_t));
sewardj5f07b662002-04-23 16:52:51 +00002598 *key = i;
njn25e49d8e72002-09-23 09:36:25 +00002599 VG_TRACK( post_mem_write, (Addr)key, sizeof(pthread_key_t) );
sewardjc3bd5f52002-05-01 03:24:23 +00002600
njnd3040452003-05-19 15:04:06 +00002601 SET_PTHREQ_RETVAL(tid, 0);
sewardj5f07b662002-04-23 16:52:51 +00002602}
2603
2604
2605static
2606void do_pthread_key_delete ( ThreadId tid, pthread_key_t key )
2607{
2608 Char msg_buf[100];
2609 if (VG_(clo_trace_pthread_level) >= 1) {
2610 VG_(sprintf)(msg_buf, "pthread_key_delete key %d",
2611 key );
2612 print_pthread_event(tid, msg_buf);
2613 }
2614
sewardjb48e5002002-05-13 00:16:03 +00002615 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002616 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj5f07b662002-04-23 16:52:51 +00002617
2618 if (!is_valid_key(key)) {
njn25e49d8e72002-09-23 09:36:25 +00002619 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002620 "pthread_key_delete: key is invalid");
jsgf855d93d2003-10-13 22:26:55 +00002621 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj5f07b662002-04-23 16:52:51 +00002622 return;
2623 }
2624
2625 vg_thread_keys[key].inuse = False;
sewardj648b3152002-12-09 19:07:59 +00002626 vg_thread_keys[key].destructor = NULL;
njnd3040452003-05-19 15:04:06 +00002627 SET_PTHREQ_RETVAL(tid, 0);
sewardj5f07b662002-04-23 16:52:51 +00002628}
2629
2630
sewardj00a66b12002-10-12 16:42:35 +00002631/* Get the .specific_ptr for a thread. Return 1 if the thread-slot
2632 isn't in use, so that client-space can scan all thread slots. 1
2633 cannot be confused with NULL or a legitimately-aligned specific_ptr
2634 value. */
sewardj5f07b662002-04-23 16:52:51 +00002635static
sewardj00a66b12002-10-12 16:42:35 +00002636void do_pthread_getspecific_ptr ( ThreadId tid )
sewardj5f07b662002-04-23 16:52:51 +00002637{
sewardj00a66b12002-10-12 16:42:35 +00002638 void** specifics_ptr;
2639 Char msg_buf[100];
2640
jsgf855d93d2003-10-13 22:26:55 +00002641 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj00a66b12002-10-12 16:42:35 +00002642 VG_(sprintf)(msg_buf, "pthread_getspecific_ptr" );
sewardj5f07b662002-04-23 16:52:51 +00002643 print_pthread_event(tid, msg_buf);
2644 }
2645
sewardj00a66b12002-10-12 16:42:35 +00002646 vg_assert(VG_(is_valid_or_empty_tid)(tid));
sewardj5f07b662002-04-23 16:52:51 +00002647
sewardj00a66b12002-10-12 16:42:35 +00002648 if (VG_(threads)[tid].status == VgTs_Empty) {
njnd3040452003-05-19 15:04:06 +00002649 SET_PTHREQ_RETVAL(tid, 1);
sewardj5f07b662002-04-23 16:52:51 +00002650 return;
2651 }
2652
sewardj00a66b12002-10-12 16:42:35 +00002653 specifics_ptr = VG_(threads)[tid].specifics_ptr;
2654 vg_assert(specifics_ptr == NULL
2655 || IS_ALIGNED4_ADDR(specifics_ptr));
2656
njnd3040452003-05-19 15:04:06 +00002657 SET_PTHREQ_RETVAL(tid, (UInt)specifics_ptr);
sewardj5f07b662002-04-23 16:52:51 +00002658}
2659
2660
2661static
sewardj00a66b12002-10-12 16:42:35 +00002662void do_pthread_setspecific_ptr ( ThreadId tid, void** ptr )
sewardj5f07b662002-04-23 16:52:51 +00002663{
2664 Char msg_buf[100];
2665 if (VG_(clo_trace_pthread_level) >= 1) {
sewardj00a66b12002-10-12 16:42:35 +00002666 VG_(sprintf)(msg_buf, "pthread_setspecific_ptr ptr %p",
2667 ptr );
sewardj5f07b662002-04-23 16:52:51 +00002668 print_pthread_event(tid, msg_buf);
2669 }
2670
sewardjb48e5002002-05-13 00:16:03 +00002671 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002672 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj5f07b662002-04-23 16:52:51 +00002673
sewardj00a66b12002-10-12 16:42:35 +00002674 VG_(threads)[tid].specifics_ptr = ptr;
njnd3040452003-05-19 15:04:06 +00002675 SET_PTHREQ_RETVAL(tid, 0);
sewardj5f07b662002-04-23 16:52:51 +00002676}
2677
2678
sewardj870497a2002-05-29 01:06:47 +00002679/* Helper for calling destructors at thread exit. If key is valid,
2680 copy the thread's specific value into cu->arg and put the *key*'s
2681 destructor fn address in cu->fn. Then return 0 to the caller.
2682 Otherwise return non-zero to the caller. */
2683static
2684void do__get_key_destr_and_spec ( ThreadId tid,
2685 pthread_key_t key,
2686 CleanupEntry* cu )
2687{
2688 Char msg_buf[100];
jsgf855d93d2003-10-13 22:26:55 +00002689 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj870497a2002-05-29 01:06:47 +00002690 VG_(sprintf)(msg_buf,
2691 "get_key_destr_and_arg (key = %d)", key );
2692 print_pthread_event(tid, msg_buf);
2693 }
2694 vg_assert(VG_(is_valid_tid)(tid));
2695 vg_assert(key >= 0 && key < VG_N_THREAD_KEYS);
njn25e49d8e72002-09-23 09:36:25 +00002696
sewardj870497a2002-05-29 01:06:47 +00002697 if (!vg_thread_keys[key].inuse) {
njnd3040452003-05-19 15:04:06 +00002698 SET_PTHREQ_RETVAL(tid, -1);
sewardj870497a2002-05-29 01:06:47 +00002699 return;
2700 }
njn72718642003-07-24 08:45:32 +00002701 VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "get_key_destr_and_spec: cu",
2702 (Addr)cu, sizeof(CleanupEntry) );
sewardj00a66b12002-10-12 16:42:35 +00002703
sewardj870497a2002-05-29 01:06:47 +00002704 cu->fn = vg_thread_keys[key].destructor;
sewardj00a66b12002-10-12 16:42:35 +00002705 if (VG_(threads)[tid].specifics_ptr == NULL) {
2706 cu->arg = NULL;
2707 } else {
njn72718642003-07-24 08:45:32 +00002708 VG_TRACK( pre_mem_read, Vg_CorePThread, tid,
sewardj00a66b12002-10-12 16:42:35 +00002709 "get_key_destr_and_spec: key",
2710 (Addr)(&VG_(threads)[tid].specifics_ptr[key]),
2711 sizeof(void*) );
2712 cu->arg = VG_(threads)[tid].specifics_ptr[key];
2713 }
2714
njn25e49d8e72002-09-23 09:36:25 +00002715 VG_TRACK( post_mem_write, (Addr)cu, sizeof(CleanupEntry) );
njnd3040452003-05-19 15:04:06 +00002716 SET_PTHREQ_RETVAL(tid, 0);
sewardj870497a2002-05-29 01:06:47 +00002717}
2718
2719
sewardjb48e5002002-05-13 00:16:03 +00002720/* ---------------------------------------------------
2721 SIGNALS
2722 ------------------------------------------------ */
2723
2724/* See comment in vg_libthread.c:pthread_sigmask() regarding
sewardj018f7622002-05-15 21:13:39 +00002725 deliberate confusion of types sigset_t and vki_sigset_t. Return 0
2726 for OK and 1 for some kind of addressing error, which the
2727 vg_libpthread.c routine turns into return values 0 and EFAULT
2728 respectively. */
sewardjb48e5002002-05-13 00:16:03 +00002729static
2730void do_pthread_sigmask ( ThreadId tid,
sewardj018f7622002-05-15 21:13:39 +00002731 Int vki_how,
sewardjb48e5002002-05-13 00:16:03 +00002732 vki_ksigset_t* newmask,
2733 vki_ksigset_t* oldmask )
2734{
2735 Char msg_buf[100];
2736 if (VG_(clo_trace_pthread_level) >= 1) {
2737 VG_(sprintf)(msg_buf,
sewardj018f7622002-05-15 21:13:39 +00002738 "pthread_sigmask vki_how %d, newmask %p, oldmask %p",
2739 vki_how, newmask, oldmask );
sewardjb48e5002002-05-13 00:16:03 +00002740 print_pthread_event(tid, msg_buf);
2741 }
2742
2743 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002744 && VG_(threads)[tid].status == VgTs_Runnable);
sewardjb48e5002002-05-13 00:16:03 +00002745
njn25e49d8e72002-09-23 09:36:25 +00002746 if (newmask)
njn72718642003-07-24 08:45:32 +00002747 VG_TRACK( pre_mem_read, Vg_CorePThread, tid, "pthread_sigmask: newmask",
njn25e49d8e72002-09-23 09:36:25 +00002748 (Addr)newmask, sizeof(vki_ksigset_t));
2749 if (oldmask)
njn72718642003-07-24 08:45:32 +00002750 VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "pthread_sigmask: oldmask",
njn25e49d8e72002-09-23 09:36:25 +00002751 (Addr)oldmask, sizeof(vki_ksigset_t));
sewardjb48e5002002-05-13 00:16:03 +00002752
sewardj018f7622002-05-15 21:13:39 +00002753 VG_(do_pthread_sigmask_SCSS_upd) ( tid, vki_how, newmask, oldmask );
sewardjb48e5002002-05-13 00:16:03 +00002754
njn25e49d8e72002-09-23 09:36:25 +00002755 if (oldmask)
2756 VG_TRACK( post_mem_write, (Addr)oldmask, sizeof(vki_ksigset_t) );
sewardj3a951cf2002-05-15 22:25:47 +00002757
sewardj018f7622002-05-15 21:13:39 +00002758 /* Success. */
njnd3040452003-05-19 15:04:06 +00002759 SET_PTHREQ_RETVAL(tid, 0);
sewardjb48e5002002-05-13 00:16:03 +00002760}
2761
2762
2763static
sewardj018f7622002-05-15 21:13:39 +00002764void do_pthread_kill ( ThreadId tid, /* me */
2765 ThreadId thread, /* thread to signal */
2766 Int sig )
2767{
2768 Char msg_buf[100];
2769
2770 if (VG_(clo_trace_signals) || VG_(clo_trace_pthread_level) >= 1) {
2771 VG_(sprintf)(msg_buf,
2772 "pthread_kill thread %d, signo %d",
2773 thread, sig );
2774 print_pthread_event(tid, msg_buf);
2775 }
2776
2777 vg_assert(VG_(is_valid_tid)(tid)
2778 && VG_(threads)[tid].status == VgTs_Runnable);
2779
sewardj4dced352002-06-04 22:54:20 +00002780 if (!VG_(is_valid_tid)(thread)) {
njn25e49d8e72002-09-23 09:36:25 +00002781 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002782 "pthread_kill: invalid target thread");
jsgf855d93d2003-10-13 22:26:55 +00002783 SET_PTHREQ_RETVAL(tid, VKI_ESRCH);
2784 return;
2785 }
2786
2787 if (sig == 0) {
2788 /* OK, signal 0 is just for testing */
2789 SET_PTHREQ_RETVAL(tid, 0);
sewardj018f7622002-05-15 21:13:39 +00002790 return;
2791 }
2792
2793 if (sig < 1 || sig > VKI_KNSIG) {
jsgf855d93d2003-10-13 22:26:55 +00002794 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj018f7622002-05-15 21:13:39 +00002795 return;
2796 }
2797
2798 VG_(send_signal_to_thread)( thread, sig );
njnd3040452003-05-19 15:04:06 +00002799 SET_PTHREQ_RETVAL(tid, 0);
sewardjb48e5002002-05-13 00:16:03 +00002800}
2801
2802
sewardj2cb00342002-06-28 01:46:26 +00002803/* -----------------------------------------------------------
2804 FORK HANDLERS.
2805 -------------------------------------------------------- */
2806
2807static
2808void do__set_fhstack_used ( ThreadId tid, Int n )
2809{
2810 Char msg_buf[100];
2811 if (VG_(clo_trace_sched)) {
2812 VG_(sprintf)(msg_buf, "set_fhstack_used to %d", n );
2813 print_pthread_event(tid, msg_buf);
2814 }
2815
2816 vg_assert(VG_(is_valid_tid)(tid)
2817 && VG_(threads)[tid].status == VgTs_Runnable);
2818
2819 if (n >= 0 && n < VG_N_FORKHANDLERSTACK) {
2820 vg_fhstack_used = n;
njnd3040452003-05-19 15:04:06 +00002821 SET_PTHREQ_RETVAL(tid, 0);
sewardj2cb00342002-06-28 01:46:26 +00002822 } else {
njnd3040452003-05-19 15:04:06 +00002823 SET_PTHREQ_RETVAL(tid, -1);
sewardj2cb00342002-06-28 01:46:26 +00002824 }
2825}
2826
2827
2828static
2829void do__get_fhstack_used ( ThreadId tid )
2830{
2831 Int n;
2832 Char msg_buf[100];
2833 if (VG_(clo_trace_sched)) {
2834 VG_(sprintf)(msg_buf, "get_fhstack_used" );
2835 print_pthread_event(tid, msg_buf);
2836 }
2837
2838 vg_assert(VG_(is_valid_tid)(tid)
2839 && VG_(threads)[tid].status == VgTs_Runnable);
2840
2841 n = vg_fhstack_used;
2842 vg_assert(n >= 0 && n < VG_N_FORKHANDLERSTACK);
njnd3040452003-05-19 15:04:06 +00002843 SET_PTHREQ_RETVAL(tid, n);
sewardj2cb00342002-06-28 01:46:26 +00002844}
2845
2846static
2847void do__set_fhstack_entry ( ThreadId tid, Int n, ForkHandlerEntry* fh )
2848{
2849 Char msg_buf[100];
2850 if (VG_(clo_trace_sched)) {
2851 VG_(sprintf)(msg_buf, "set_fhstack_entry %d to %p", n, fh );
2852 print_pthread_event(tid, msg_buf);
2853 }
2854
2855 vg_assert(VG_(is_valid_tid)(tid)
2856 && VG_(threads)[tid].status == VgTs_Runnable);
njn72718642003-07-24 08:45:32 +00002857 VG_TRACK( pre_mem_read, Vg_CorePThread, tid,
njn25e49d8e72002-09-23 09:36:25 +00002858 "pthread_atfork: prepare/parent/child",
2859 (Addr)fh, sizeof(ForkHandlerEntry));
sewardj2cb00342002-06-28 01:46:26 +00002860
njn25e49d8e72002-09-23 09:36:25 +00002861 if (n < 0 || n >= VG_N_FORKHANDLERSTACK) {
njnd3040452003-05-19 15:04:06 +00002862 SET_PTHREQ_RETVAL(tid, -1);
sewardj2cb00342002-06-28 01:46:26 +00002863 return;
2864 }
2865
2866 vg_fhstack[n] = *fh;
njnd3040452003-05-19 15:04:06 +00002867 SET_PTHREQ_RETVAL(tid, 0);
sewardj2cb00342002-06-28 01:46:26 +00002868}
2869
2870
2871static
2872void do__get_fhstack_entry ( ThreadId tid, Int n, /*OUT*/
2873 ForkHandlerEntry* fh )
2874{
2875 Char msg_buf[100];
2876 if (VG_(clo_trace_sched)) {
2877 VG_(sprintf)(msg_buf, "get_fhstack_entry %d", n );
2878 print_pthread_event(tid, msg_buf);
2879 }
2880
2881 vg_assert(VG_(is_valid_tid)(tid)
2882 && VG_(threads)[tid].status == VgTs_Runnable);
njn72718642003-07-24 08:45:32 +00002883 VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "fork: prepare/parent/child",
njn25e49d8e72002-09-23 09:36:25 +00002884 (Addr)fh, sizeof(ForkHandlerEntry));
sewardj2cb00342002-06-28 01:46:26 +00002885
njn25e49d8e72002-09-23 09:36:25 +00002886 if (n < 0 || n >= VG_N_FORKHANDLERSTACK) {
njnd3040452003-05-19 15:04:06 +00002887 SET_PTHREQ_RETVAL(tid, -1);
sewardj2cb00342002-06-28 01:46:26 +00002888 return;
2889 }
2890
2891 *fh = vg_fhstack[n];
njnd3040452003-05-19 15:04:06 +00002892 SET_PTHREQ_RETVAL(tid, 0);
sewardj2cb00342002-06-28 01:46:26 +00002893
njn25e49d8e72002-09-23 09:36:25 +00002894 VG_TRACK( post_mem_write, (Addr)fh, sizeof(ForkHandlerEntry) );
sewardj2cb00342002-06-28 01:46:26 +00002895}
2896
njnd3040452003-05-19 15:04:06 +00002897/* ---------------------------------------------------------------------
2898 Specifying shadow register values
2899 ------------------------------------------------------------------ */
2900
2901void VG_(set_return_from_syscall_shadow) ( ThreadId tid, UInt ret_shadow )
2902{
2903 VG_(set_thread_shadow_archreg)(tid, R_EAX, ret_shadow);
2904}
2905
2906UInt VG_(get_exit_status_shadow) ( void )
2907{
2908 return VG_(get_shadow_archreg)(R_EBX);
2909}
2910
sewardj2cb00342002-06-28 01:46:26 +00002911
sewardje663cb92002-04-12 10:26:32 +00002912/* ---------------------------------------------------------------------
sewardj124ca2a2002-06-20 10:19:38 +00002913 Handle client requests.
sewardje663cb92002-04-12 10:26:32 +00002914 ------------------------------------------------------------------ */
2915
sewardj124ca2a2002-06-20 10:19:38 +00002916/* Do a client request for the thread tid. After the request, tid may
2917 or may not still be runnable; if not, the scheduler will have to
2918 choose a new thread to run.
2919*/
sewardje663cb92002-04-12 10:26:32 +00002920static
sewardj124ca2a2002-06-20 10:19:38 +00002921void do_client_request ( ThreadId tid )
sewardje663cb92002-04-12 10:26:32 +00002922{
sewardj124ca2a2002-06-20 10:19:38 +00002923 UInt* arg = (UInt*)(VG_(threads)[tid].m_eax);
2924 UInt req_no = arg[0];
2925
fitzhardinge98abfc72003-12-16 02:05:15 +00002926 if (0)
2927 VG_(printf)("req no = 0x%x\n", req_no);
sewardje663cb92002-04-12 10:26:32 +00002928 switch (req_no) {
2929
njn3e884182003-04-15 13:03:23 +00002930 case VG_USERREQ__CLIENT_CALL0: {
2931 UInt (*f)(void) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00002932 if (f == NULL)
2933 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL: func=%p\n", f);
2934 else
2935 SET_CLCALL_RETVAL(tid, f ( ), (Addr)f);
njn3e884182003-04-15 13:03:23 +00002936 break;
2937 }
2938 case VG_USERREQ__CLIENT_CALL1: {
2939 UInt (*f)(UInt) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00002940 if (f == NULL)
2941 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL: func=%p\n", f);
2942 else
2943 SET_CLCALL_RETVAL(tid, f ( arg[2] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00002944 break;
2945 }
2946 case VG_USERREQ__CLIENT_CALL2: {
2947 UInt (*f)(UInt, UInt) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00002948 if (f == NULL)
2949 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL: func=%p\n", f);
2950 else
2951 SET_CLCALL_RETVAL(tid, f ( arg[2], arg[3] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00002952 break;
2953 }
2954 case VG_USERREQ__CLIENT_CALL3: {
2955 UInt (*f)(UInt, UInt, UInt) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00002956 if (f == NULL)
2957 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL: func=%p\n", f);
2958 else
2959 SET_CLCALL_RETVAL(tid, f ( arg[2], arg[3], arg[4] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00002960 break;
2961 }
2962
nethercote7cc9c232004-01-21 15:08:04 +00002963 /* Note: for tools that replace malloc() et al, we want to call
njn3e884182003-04-15 13:03:23 +00002964 the replacement versions. For those that don't, we want to call
2965 VG_(cli_malloc)() et al. We do this by calling SK_(malloc)(), which
nethercote3ced0e32004-01-26 14:50:45 +00002966 malloc-replacing tools must replace, but have the default definition
2967 of SK_(malloc)() call VG_(cli_malloc)(). */
njn3e884182003-04-15 13:03:23 +00002968
2969 /* Note: for MALLOC and FREE, must set the appropriate "lock"... see
2970 the comment in vg_defaults.c/SK_(malloc)() for why. */
sewardj124ca2a2002-06-20 10:19:38 +00002971 case VG_USERREQ__MALLOC:
njn3e884182003-04-15 13:03:23 +00002972 VG_(sk_malloc_called_by_scheduler) = True;
njnd3040452003-05-19 15:04:06 +00002973 SET_PTHREQ_RETVAL(
njn72718642003-07-24 08:45:32 +00002974 tid, (UInt)SK_(malloc) ( arg[1] )
sewardj124ca2a2002-06-20 10:19:38 +00002975 );
njn3e884182003-04-15 13:03:23 +00002976 VG_(sk_malloc_called_by_scheduler) = False;
sewardj124ca2a2002-06-20 10:19:38 +00002977 break;
2978
2979 case VG_USERREQ__FREE:
njn3e884182003-04-15 13:03:23 +00002980 VG_(sk_malloc_called_by_scheduler) = True;
njn72718642003-07-24 08:45:32 +00002981 SK_(free) ( (void*)arg[1] );
njn3e884182003-04-15 13:03:23 +00002982 VG_(sk_malloc_called_by_scheduler) = False;
njnd3040452003-05-19 15:04:06 +00002983 SET_PTHREQ_RETVAL(tid, 0); /* irrelevant */
sewardj124ca2a2002-06-20 10:19:38 +00002984 break;
2985
sewardj124ca2a2002-06-20 10:19:38 +00002986 case VG_USERREQ__PTHREAD_GET_THREADID:
njnd3040452003-05-19 15:04:06 +00002987 SET_PTHREQ_RETVAL(tid, tid);
sewardj124ca2a2002-06-20 10:19:38 +00002988 break;
2989
2990 case VG_USERREQ__RUNNING_ON_VALGRIND:
njnd3040452003-05-19 15:04:06 +00002991 SET_CLREQ_RETVAL(tid, 1);
sewardj124ca2a2002-06-20 10:19:38 +00002992 break;
2993
2994 case VG_USERREQ__GET_PTHREAD_TRACE_LEVEL:
njnd3040452003-05-19 15:04:06 +00002995 SET_PTHREQ_RETVAL(tid, VG_(clo_trace_pthread_level));
sewardj124ca2a2002-06-20 10:19:38 +00002996 break;
2997
2998 case VG_USERREQ__READ_MILLISECOND_TIMER:
njnd3040452003-05-19 15:04:06 +00002999 SET_PTHREQ_RETVAL(tid, VG_(read_millisecond_timer)());
sewardj124ca2a2002-06-20 10:19:38 +00003000 break;
3001
3002 /* Some of these may make thread tid non-runnable, but the
3003 scheduler checks for that on return from this function. */
3004 case VG_USERREQ__PTHREAD_MUTEX_LOCK:
3005 do_pthread_mutex_lock( tid, False, (void *)(arg[1]) );
3006 break;
3007
3008 case VG_USERREQ__PTHREAD_MUTEX_TRYLOCK:
3009 do_pthread_mutex_lock( tid, True, (void *)(arg[1]) );
3010 break;
3011
3012 case VG_USERREQ__PTHREAD_MUTEX_UNLOCK:
3013 do_pthread_mutex_unlock( tid, (void *)(arg[1]) );
3014 break;
3015
sewardj00a66b12002-10-12 16:42:35 +00003016 case VG_USERREQ__PTHREAD_GETSPECIFIC_PTR:
3017 do_pthread_getspecific_ptr ( tid );
sewardj124ca2a2002-06-20 10:19:38 +00003018 break;
3019
3020 case VG_USERREQ__SET_CANCELTYPE:
3021 do__set_canceltype ( tid, arg[1] );
3022 break;
3023
3024 case VG_USERREQ__CLEANUP_PUSH:
3025 do__cleanup_push ( tid, (CleanupEntry*)(arg[1]) );
3026 break;
3027
3028 case VG_USERREQ__CLEANUP_POP:
3029 do__cleanup_pop ( tid, (CleanupEntry*)(arg[1]) );
3030 break;
3031
3032 case VG_USERREQ__TESTCANCEL:
3033 do__testcancel ( tid );
3034 break;
3035
sewardje663cb92002-04-12 10:26:32 +00003036 case VG_USERREQ__PTHREAD_JOIN:
3037 do_pthread_join( tid, arg[1], (void**)(arg[2]) );
3038 break;
3039
sewardj3b5d8862002-04-20 13:53:23 +00003040 case VG_USERREQ__PTHREAD_COND_WAIT:
3041 do_pthread_cond_wait( tid,
3042 (pthread_cond_t *)(arg[1]),
sewardj5f07b662002-04-23 16:52:51 +00003043 (pthread_mutex_t *)(arg[2]),
3044 0xFFFFFFFF /* no timeout */ );
3045 break;
3046
3047 case VG_USERREQ__PTHREAD_COND_TIMEDWAIT:
3048 do_pthread_cond_wait( tid,
3049 (pthread_cond_t *)(arg[1]),
3050 (pthread_mutex_t *)(arg[2]),
3051 arg[3] /* timeout millisecond point */ );
sewardj3b5d8862002-04-20 13:53:23 +00003052 break;
3053
3054 case VG_USERREQ__PTHREAD_COND_SIGNAL:
3055 do_pthread_cond_signal_or_broadcast(
3056 tid,
3057 False, /* signal, not broadcast */
3058 (pthread_cond_t *)(arg[1]) );
3059 break;
3060
3061 case VG_USERREQ__PTHREAD_COND_BROADCAST:
3062 do_pthread_cond_signal_or_broadcast(
3063 tid,
3064 True, /* broadcast, not signal */
3065 (pthread_cond_t *)(arg[1]) );
3066 break;
3067
sewardj00a66b12002-10-12 16:42:35 +00003068 case VG_USERREQ__PTHREAD_KEY_VALIDATE:
3069 do_pthread_key_validate ( tid,
3070 (pthread_key_t)(arg[1]) );
3071 break;
3072
sewardj5f07b662002-04-23 16:52:51 +00003073 case VG_USERREQ__PTHREAD_KEY_CREATE:
3074 do_pthread_key_create ( tid,
3075 (pthread_key_t*)(arg[1]),
3076 (void(*)(void*))(arg[2]) );
3077 break;
3078
3079 case VG_USERREQ__PTHREAD_KEY_DELETE:
3080 do_pthread_key_delete ( tid,
3081 (pthread_key_t)(arg[1]) );
3082 break;
3083
sewardj00a66b12002-10-12 16:42:35 +00003084 case VG_USERREQ__PTHREAD_SETSPECIFIC_PTR:
3085 do_pthread_setspecific_ptr ( tid,
3086 (void**)(arg[1]) );
sewardj5f07b662002-04-23 16:52:51 +00003087 break;
3088
sewardjb48e5002002-05-13 00:16:03 +00003089 case VG_USERREQ__PTHREAD_SIGMASK:
3090 do_pthread_sigmask ( tid,
3091 arg[1],
3092 (vki_ksigset_t*)(arg[2]),
3093 (vki_ksigset_t*)(arg[3]) );
3094 break;
3095
sewardj018f7622002-05-15 21:13:39 +00003096 case VG_USERREQ__PTHREAD_KILL:
3097 do_pthread_kill ( tid, arg[1], arg[2] );
3098 break;
3099
sewardjff42d1d2002-05-22 13:17:31 +00003100 case VG_USERREQ__PTHREAD_YIELD:
3101 do_pthread_yield ( tid );
sewardj18a62ff2002-07-12 22:30:51 +00003102 /* On return from do_client_request(), the scheduler will
3103 select a new thread to run. */
sewardjff42d1d2002-05-22 13:17:31 +00003104 break;
sewardj018f7622002-05-15 21:13:39 +00003105
sewardj7989d0c2002-05-28 11:00:01 +00003106 case VG_USERREQ__SET_CANCELSTATE:
3107 do__set_cancelstate ( tid, arg[1] );
3108 break;
3109
sewardj7989d0c2002-05-28 11:00:01 +00003110 case VG_USERREQ__SET_OR_GET_DETACH:
3111 do__set_or_get_detach ( tid, arg[1], arg[2] );
3112 break;
3113
3114 case VG_USERREQ__SET_CANCELPEND:
3115 do__set_cancelpend ( tid, arg[1], (void(*)(void*))arg[2] );
3116 break;
3117
3118 case VG_USERREQ__WAIT_JOINER:
3119 do__wait_joiner ( tid, (void*)arg[1] );
3120 break;
3121
3122 case VG_USERREQ__QUIT:
3123 do__quit ( tid );
3124 break;
3125
3126 case VG_USERREQ__APPLY_IN_NEW_THREAD:
3127 do__apply_in_new_thread ( tid, (void*(*)(void*))arg[1],
3128 (void*)arg[2] );
3129 break;
3130
sewardj870497a2002-05-29 01:06:47 +00003131 case VG_USERREQ__GET_KEY_D_AND_S:
3132 do__get_key_destr_and_spec ( tid,
3133 (pthread_key_t)arg[1],
3134 (CleanupEntry*)arg[2] );
3135 break;
3136
sewardjef037c72002-05-30 00:40:03 +00003137 case VG_USERREQ__NUKE_OTHER_THREADS:
3138 VG_(nuke_all_threads_except) ( tid );
njnd3040452003-05-19 15:04:06 +00003139 SET_PTHREQ_RETVAL(tid, 0);
sewardjef037c72002-05-30 00:40:03 +00003140 break;
3141
sewardj4dced352002-06-04 22:54:20 +00003142 case VG_USERREQ__PTHREAD_ERROR:
njn25e49d8e72002-09-23 09:36:25 +00003143 VG_(record_pthread_error)( tid, (Char*)(arg[1]) );
njnd3040452003-05-19 15:04:06 +00003144 SET_PTHREQ_RETVAL(tid, 0);
sewardj4dced352002-06-04 22:54:20 +00003145 break;
3146
sewardj2cb00342002-06-28 01:46:26 +00003147 case VG_USERREQ__SET_FHSTACK_USED:
3148 do__set_fhstack_used( tid, (Int)(arg[1]) );
3149 break;
3150
3151 case VG_USERREQ__GET_FHSTACK_USED:
3152 do__get_fhstack_used( tid );
3153 break;
3154
3155 case VG_USERREQ__SET_FHSTACK_ENTRY:
3156 do__set_fhstack_entry( tid, (Int)(arg[1]),
3157 (ForkHandlerEntry*)(arg[2]) );
3158 break;
3159
3160 case VG_USERREQ__GET_FHSTACK_ENTRY:
3161 do__get_fhstack_entry( tid, (Int)(arg[1]),
3162 (ForkHandlerEntry*)(arg[2]) );
3163 break;
3164
sewardj77e466c2002-04-14 02:29:29 +00003165 case VG_USERREQ__SIGNAL_RETURNS:
3166 handle_signal_return(tid);
3167 break;
fitzhardinge98abfc72003-12-16 02:05:15 +00003168
3169
3170 case VG_USERREQ__GET_SIGRT_MIN:
3171 SET_PTHREQ_RETVAL(tid, VG_(sig_rtmin));
3172 break;
3173
3174 case VG_USERREQ__GET_SIGRT_MAX:
3175 SET_PTHREQ_RETVAL(tid, VG_(sig_rtmax));
3176 break;
3177
3178 case VG_USERREQ__ALLOC_RTSIG:
3179 SET_PTHREQ_RETVAL(tid, VG_(sig_alloc_rtsig)((Int)arg[1]));
3180 break;
3181
fitzhardinge39de4b42003-10-31 07:12:21 +00003182 case VG_USERREQ__PRINTF: {
3183 int count =
3184 VG_(vmessage)( Vg_ClientMsg, (char *)arg[1], (va_list)arg[2] );
3185 SET_CLREQ_RETVAL( tid, count );
3186 break; }
3187
fitzhardinge98abfc72003-12-16 02:05:15 +00003188
fitzhardinge39de4b42003-10-31 07:12:21 +00003189 case VG_USERREQ__INTERNAL_PRINTF: {
3190 int count =
3191 VG_(vmessage)( Vg_UserMsg, (char *)arg[1], (va_list)arg[2] );
3192 SET_CLREQ_RETVAL( tid, count );
3193 break; }
3194
3195 case VG_USERREQ__PRINTF_BACKTRACE: {
3196 ExeContext *e = VG_(get_ExeContext)( tid );
3197 int count =
3198 VG_(vmessage)( Vg_ClientMsg, (char *)arg[1], (va_list)arg[2] );
3199 VG_(mini_stack_dump)(e->eips, VG_(clo_backtrace_size));
3200 SET_CLREQ_RETVAL( tid, count );
3201 break; }
3202
3203 case VG_USERREQ__INTERNAL_PRINTF_BACKTRACE: {
3204 ExeContext *e = VG_(get_ExeContext)( tid );
3205 int count =
3206 VG_(vmessage)( Vg_UserMsg, (char *)arg[1], (va_list)arg[2] );
3207 VG_(mini_stack_dump)(e->eips, VG_(clo_backtrace_size));
3208 SET_CLREQ_RETVAL( tid, count );
3209 break; }
3210
fitzhardinge98abfc72003-12-16 02:05:15 +00003211 case VG_USERREQ__REGISTER_LIBC_FREERES:
3212 VG_(__libc_freeres_wrapper) = arg[1];
3213 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
3214 break;
3215
3216 case VG_USERREQ__GET_MALLOCFUNCS: {
3217 struct vg_mallocfunc_info *info = (struct vg_mallocfunc_info *)arg[1];
3218
3219 info->sk_malloc = (Addr)SK_(malloc);
3220 info->sk_calloc = (Addr)SK_(calloc);
3221 info->sk_realloc = (Addr)SK_(realloc);
3222 info->sk_memalign = (Addr)SK_(memalign);
3223 info->sk___builtin_new = (Addr)SK_(__builtin_new);
3224 info->sk___builtin_vec_new = (Addr)SK_(__builtin_vec_new);
3225 info->sk_free = (Addr)SK_(free);
3226 info->sk___builtin_delete = (Addr)SK_(__builtin_delete);
3227 info->sk___builtin_vec_delete = (Addr)SK_(__builtin_vec_delete);
3228
3229 info->arena_payload_szB = (Addr)VG_(arena_payload_szB);
3230
3231 info->clo_sloppy_malloc = VG_(clo_sloppy_malloc);
3232 info->clo_trace_malloc = VG_(clo_trace_malloc);
3233
3234 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
3235
3236 break;
3237 }
3238
3239 case VG_USERREQ__REGISTER_REDIRECT_SYM: {
3240 VG_(add_redirect_sym)((const Char *)arg[1], (const Char *)arg[2],
3241 (const Char *)arg[3], (const Char *)arg[4]);
3242 break;
3243 }
3244
3245 case VG_USERREQ__REGISTER_REDIRECT_ADDR: {
3246 VG_(add_redirect_addr)((const Char *)arg[1], (const Char *)arg[2],
3247 (Addr)arg[3]);
3248 break;
3249 }
3250
njn25e49d8e72002-09-23 09:36:25 +00003251 /* Requests from the client program */
3252
3253 case VG_USERREQ__DISCARD_TRANSLATIONS:
3254 if (VG_(clo_verbosity) > 2)
3255 VG_(printf)( "client request: DISCARD_TRANSLATIONS,"
3256 " addr %p, len %d\n",
3257 (void*)arg[1], arg[2] );
3258
sewardj97ad5522003-05-04 12:32:56 +00003259 VG_(invalidate_translations)( arg[1], arg[2], True );
njn25e49d8e72002-09-23 09:36:25 +00003260
njnd3040452003-05-19 15:04:06 +00003261 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
njn25e49d8e72002-09-23 09:36:25 +00003262 break;
3263
njn47363ab2003-04-21 13:24:40 +00003264 case VG_USERREQ__COUNT_ERRORS:
njnd3040452003-05-19 15:04:06 +00003265 SET_CLREQ_RETVAL( tid, VG_(n_errs_found) );
njn47363ab2003-04-21 13:24:40 +00003266 break;
3267
sewardje663cb92002-04-12 10:26:32 +00003268 default:
njn25e49d8e72002-09-23 09:36:25 +00003269 if (VG_(needs).client_requests) {
sewardj34042512002-10-22 04:14:35 +00003270 UInt ret;
3271
njn25e49d8e72002-09-23 09:36:25 +00003272 if (VG_(clo_verbosity) > 2)
fitzhardinge98abfc72003-12-16 02:05:15 +00003273 VG_(printf)("client request: code %x, addr %p, len %d\n",
njn25e49d8e72002-09-23 09:36:25 +00003274 arg[0], (void*)arg[1], arg[2] );
3275
njn72718642003-07-24 08:45:32 +00003276 if (SK_(handle_client_request) ( tid, arg, &ret ))
njnd3040452003-05-19 15:04:06 +00003277 SET_CLREQ_RETVAL(tid, ret);
njn25e49d8e72002-09-23 09:36:25 +00003278 } else {
sewardj34042512002-10-22 04:14:35 +00003279 static Bool whined = False;
3280
3281 if (!whined) {
nethercote7cc9c232004-01-21 15:08:04 +00003282 // Allow for requests in core, but defined by tools, which
njnd7994182003-10-02 13:44:04 +00003283 // have 0 and 0 in their two high bytes.
3284 Char c1 = (arg[0] >> 24) & 0xff;
3285 Char c2 = (arg[0] >> 16) & 0xff;
3286 if (c1 == 0) c1 = '_';
3287 if (c2 == 0) c2 = '_';
sewardj34042512002-10-22 04:14:35 +00003288 VG_(message)(Vg_UserMsg, "Warning:\n"
njnd7994182003-10-02 13:44:04 +00003289 " unhandled client request: 0x%x (%c%c+0x%x). Perhaps\n"
3290 " VG_(needs).client_requests should be set?\n",
3291 arg[0], c1, c2, arg[0] & 0xffff);
sewardj34042512002-10-22 04:14:35 +00003292 whined = True;
3293 }
njn25e49d8e72002-09-23 09:36:25 +00003294 }
sewardje663cb92002-04-12 10:26:32 +00003295 break;
3296 }
3297}
3298
3299
sewardj6072c362002-04-19 14:40:57 +00003300/* ---------------------------------------------------------------------
3301 Sanity checking.
3302 ------------------------------------------------------------------ */
3303
3304/* Internal consistency checks on the sched/pthread structures. */
3305static
3306void scheduler_sanity ( void )
3307{
sewardj3b5d8862002-04-20 13:53:23 +00003308 pthread_mutex_t* mx;
3309 pthread_cond_t* cv;
sewardj6072c362002-04-19 14:40:57 +00003310 Int i;
jsgf855d93d2003-10-13 22:26:55 +00003311 struct timeout* top;
3312 UInt lasttime = 0;
3313
3314 for(top = timeouts; top != NULL; top = top->next) {
3315 vg_assert(top->time >= lasttime);
3316 vg_assert(VG_(is_valid_or_empty_tid)(top->tid));
3317
3318#if 0
3319 /* assert timeout entry is either stale, or associated with a
3320 thread in the right state
3321
3322 XXX disable for now - can be stale, but times happen to match
3323 */
3324 vg_assert(VG_(threads)[top->tid].awaken_at != top->time ||
3325 VG_(threads)[top->tid].status == VgTs_Sleeping ||
3326 VG_(threads)[top->tid].status == VgTs_WaitCV);
3327#endif
3328
3329 lasttime = top->time;
3330 }
sewardj5f07b662002-04-23 16:52:51 +00003331
sewardj6072c362002-04-19 14:40:57 +00003332 /* VG_(printf)("scheduler_sanity\n"); */
3333 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +00003334 mx = VG_(threads)[i].associated_mx;
3335 cv = VG_(threads)[i].associated_cv;
3336 if (VG_(threads)[i].status == VgTs_WaitMX) {
sewardjbf290b92002-05-01 02:28:01 +00003337 /* If we're waiting on a MX: (1) the mx is not null, (2, 3)
3338 it's actually held by someone, since otherwise this thread
3339 is deadlocked, (4) the mutex's owner is not us, since
3340 otherwise this thread is also deadlocked. The logic in
3341 do_pthread_mutex_lock rejects attempts by a thread to lock
3342 a (non-recursive) mutex which it already owns.
sewardj05553872002-04-20 20:53:17 +00003343
sewardjbf290b92002-05-01 02:28:01 +00003344 (2) has been seen to fail sometimes. I don't know why.
3345 Possibly to do with signals. */
sewardj3b5d8862002-04-20 13:53:23 +00003346 vg_assert(cv == NULL);
sewardj05553872002-04-20 20:53:17 +00003347 /* 1 */ vg_assert(mx != NULL);
3348 /* 2 */ vg_assert(mx->__m_count > 0);
sewardjb48e5002002-05-13 00:16:03 +00003349 /* 3 */ vg_assert(VG_(is_valid_tid)((ThreadId)mx->__m_owner));
sewardj05bcdcb2003-05-18 10:05:38 +00003350 /* 4 */ vg_assert((UInt)i != (ThreadId)mx->__m_owner);
sewardj3b5d8862002-04-20 13:53:23 +00003351 } else
sewardj018f7622002-05-15 21:13:39 +00003352 if (VG_(threads)[i].status == VgTs_WaitCV) {
sewardj3b5d8862002-04-20 13:53:23 +00003353 vg_assert(cv != NULL);
3354 vg_assert(mx != NULL);
sewardj6072c362002-04-19 14:40:57 +00003355 } else {
sewardj05553872002-04-20 20:53:17 +00003356 /* Unfortunately these don't hold true when a sighandler is
3357 running. To be fixed. */
3358 /* vg_assert(cv == NULL); */
3359 /* vg_assert(mx == NULL); */
sewardj6072c362002-04-19 14:40:57 +00003360 }
sewardjbf290b92002-05-01 02:28:01 +00003361
sewardj018f7622002-05-15 21:13:39 +00003362 if (VG_(threads)[i].status != VgTs_Empty) {
sewardjbf290b92002-05-01 02:28:01 +00003363 Int
sewardj018f7622002-05-15 21:13:39 +00003364 stack_used = (Addr)VG_(threads)[i].stack_highest_word
3365 - (Addr)VG_(threads)[i].m_esp;
sewardjbf290b92002-05-01 02:28:01 +00003366 if (i > 1 /* not the root thread */
3367 && stack_used
3368 >= (VG_PTHREAD_STACK_MIN - 1000 /* paranoia */)) {
3369 VG_(message)(Vg_UserMsg,
njn25e49d8e72002-09-23 09:36:25 +00003370 "Error: STACK OVERFLOW: "
sewardjbf290b92002-05-01 02:28:01 +00003371 "thread %d: stack used %d, available %d",
3372 i, stack_used, VG_PTHREAD_STACK_MIN );
3373 VG_(message)(Vg_UserMsg,
3374 "Terminating Valgrind. If thread(s) "
3375 "really need more stack, increase");
3376 VG_(message)(Vg_UserMsg,
3377 "VG_PTHREAD_STACK_SIZE in vg_include.h and recompile.");
3378 VG_(exit)(1);
3379 }
3380 }
sewardj6072c362002-04-19 14:40:57 +00003381 }
sewardj5f07b662002-04-23 16:52:51 +00003382
3383 for (i = 0; i < VG_N_THREAD_KEYS; i++) {
3384 if (!vg_thread_keys[i].inuse)
3385 vg_assert(vg_thread_keys[i].destructor == NULL);
3386 }
sewardj6072c362002-04-19 14:40:57 +00003387}
3388
3389
sewardje663cb92002-04-12 10:26:32 +00003390/*--------------------------------------------------------------------*/
3391/*--- end vg_scheduler.c ---*/
3392/*--------------------------------------------------------------------*/