blob: dd604da8c4f6544921cb84fe1014d871178027d9 [file] [log] [blame]
sewardje663cb92002-04-12 10:26:32 +00001
2/*--------------------------------------------------------------------*/
3/*--- A user-space pthreads implementation. vg_scheduler.c ---*/
4/*--------------------------------------------------------------------*/
5
6/*
njnc9539842002-10-02 13:26:35 +00007 This file is part of Valgrind, an extensible x86 protected-mode
8 emulator for monitoring program execution on x86-Unixes.
sewardje663cb92002-04-12 10:26:32 +00009
nethercotebb1c9912004-01-04 16:43:23 +000010 Copyright (C) 2000-2004 Julian Seward
sewardje663cb92002-04-12 10:26:32 +000011 jseward@acm.org
sewardje663cb92002-04-12 10:26:32 +000012
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
njn25e49d8e72002-09-23 09:36:25 +000028 The GNU General Public License is contained in the file COPYING.
sewardje663cb92002-04-12 10:26:32 +000029*/
30
njn25e49d8e72002-09-23 09:36:25 +000031#include "valgrind.h" /* for VG_USERREQ__RUNNING_ON_VALGRIND and
njn47363ab2003-04-21 13:24:40 +000032 VG_USERREQ__DISCARD_TRANSLATIONS, and others */
nethercote851b0f62003-11-13 23:02:16 +000033#include "vg_include.h"
sewardje663cb92002-04-12 10:26:32 +000034
sewardjb60c1ad2002-05-29 20:23:26 +000035/* BORKAGE/ISSUES as of 29 May 02
sewardje663cb92002-04-12 10:26:32 +000036
sewardj77e466c2002-04-14 02:29:29 +000037- Currently, when a signal is run, just the ThreadStatus.status fields
38 are saved in the signal frame, along with the CPU state. Question:
39 should I also save and restore:
40 ThreadStatus.joiner
41 ThreadStatus.waited_on_mid
42 ThreadStatus.awaken_at
43 ThreadStatus.retval
44 Currently unsure, and so am not doing so.
sewardje663cb92002-04-12 10:26:32 +000045
sewardj705d3cb2002-05-23 13:13:12 +000046- So, what's the deal with signals and mutexes? If a thread is
sewardj6072c362002-04-19 14:40:57 +000047 blocked on a mutex, or for a condition variable for that matter, can
48 signals still be delivered to it? This has serious consequences --
49 deadlocks, etc.
50
sewardjb60c1ad2002-05-29 20:23:26 +000051 TODO for valgrind-1.0:
52
sewardj055fbb82002-05-30 00:40:55 +000053- Update assertion checking in scheduler_sanity().
54
sewardjb60c1ad2002-05-29 20:23:26 +000055 TODO sometime:
56
57- Mutex scrubbing - clearup_after_thread_exit: look for threads
58 blocked on mutexes held by the exiting thread, and release them
59 appropriately. (??)
60
sewardje462e202002-04-13 04:09:07 +000061*/
sewardje663cb92002-04-12 10:26:32 +000062
63
64/* ---------------------------------------------------------------------
65 Types and globals for the scheduler.
66 ------------------------------------------------------------------ */
67
68/* type ThreadId is defined in vg_include.h. */
69
70/* struct ThreadState is defined in vg_include.h. */
71
sewardj018f7622002-05-15 21:13:39 +000072/* Globals. A statically allocated array of threads. NOTE: [0] is
73 never used, to simplify the simulation of initialisers for
sewardj6072c362002-04-19 14:40:57 +000074 LinuxThreads. */
sewardj018f7622002-05-15 21:13:39 +000075ThreadState VG_(threads)[VG_N_THREADS];
sewardje663cb92002-04-12 10:26:32 +000076
sewardj2cb00342002-06-28 01:46:26 +000077/* The process' fork-handler stack. */
78static Int vg_fhstack_used = 0;
79static ForkHandlerEntry vg_fhstack[VG_N_FORKHANDLERSTACK];
80
81
sewardj1e8cdc92002-04-18 11:37:52 +000082/* The tid of the thread currently in VG_(baseBlock). */
njn1be61612003-05-14 14:04:39 +000083static ThreadId vg_tid_currently_in_baseBlock = VG_INVALID_THREADID;
sewardj1e8cdc92002-04-18 11:37:52 +000084
sewardjb52a1b02002-10-23 21:38:22 +000085/* The tid either currently in baseBlock, or was in baseBlock before
86 was saved it out; this is only updated when a new thread is loaded
87 into the baseBlock */
njn1be61612003-05-14 14:04:39 +000088static ThreadId vg_tid_last_in_baseBlock = VG_INVALID_THREADID;
sewardje663cb92002-04-12 10:26:32 +000089
90/* vg_oursignalhandler() might longjmp(). Here's the jmp_buf. */
91jmp_buf VG_(scheduler_jmpbuf);
sewardj872051c2002-07-13 12:12:56 +000092/* This says whether scheduler_jmpbuf is actually valid. Needed so
93 that our signal handler doesn't longjmp when the buffer isn't
94 actually valid. */
95Bool VG_(scheduler_jmpbuf_valid) = False;
sewardje663cb92002-04-12 10:26:32 +000096/* ... and if so, here's the signal which caused it to do so. */
97Int VG_(longjmpd_on_signal);
jsgf855d93d2003-10-13 22:26:55 +000098/* If the current thread gets a syncronous unresumable signal, then
99 its details are placed here by the signal handler, to be passed to
100 the applications signal handler later on. */
101vki_ksiginfo_t VG_(unresumable_siginfo);
sewardje663cb92002-04-12 10:26:32 +0000102
jsgf855d93d2003-10-13 22:26:55 +0000103/* If != VG_INVALID_THREADID, this is the preferred tid to schedule */
104static ThreadId prefer_sched = VG_INVALID_THREADID;
sewardje663cb92002-04-12 10:26:32 +0000105
sewardj5f07b662002-04-23 16:52:51 +0000106/* Keeping track of keys. */
107typedef
108 struct {
109 /* Has this key been allocated ? */
110 Bool inuse;
111 /* If .inuse==True, records the address of the associated
112 destructor, or NULL if none. */
113 void (*destructor)(void*);
114 }
115 ThreadKeyState;
116
117/* And our array of thread keys. */
118static ThreadKeyState vg_thread_keys[VG_N_THREAD_KEYS];
119
120typedef UInt ThreadKey;
121
fitzhardinge98abfc72003-12-16 02:05:15 +0000122/* The scheduler does need to know the address of it so it can be
123 called at program exit. */
124static Addr VG_(__libc_freeres_wrapper);
125
sewardj5f07b662002-04-23 16:52:51 +0000126
njnd3040452003-05-19 15:04:06 +0000127UInt VG_(syscall_altered_shadow_reg);
128UInt VG_(signal_delivery_altered_shadow_reg);
129UInt VG_(pthread_op_altered_shadow_reg);
130UInt VG_(client_request_altered_shadow_reg);
njn25e49d8e72002-09-23 09:36:25 +0000131
sewardje663cb92002-04-12 10:26:32 +0000132/* Forwards */
sewardj124ca2a2002-06-20 10:19:38 +0000133static void do_client_request ( ThreadId tid );
sewardj6072c362002-04-19 14:40:57 +0000134static void scheduler_sanity ( void );
sewardj124ca2a2002-06-20 10:19:38 +0000135static void do_pthread_cond_timedwait_TIMEOUT ( ThreadId tid );
sewardjd140e442002-05-29 01:21:19 +0000136
sewardje663cb92002-04-12 10:26:32 +0000137/* ---------------------------------------------------------------------
138 Helper functions for the scheduler.
139 ------------------------------------------------------------------ */
140
sewardjb48e5002002-05-13 00:16:03 +0000141__inline__
142Bool VG_(is_valid_tid) ( ThreadId tid )
sewardj604ec3c2002-04-18 22:38:41 +0000143{
144 /* tid is unsigned, hence no < 0 test. */
sewardj6072c362002-04-19 14:40:57 +0000145 if (tid == 0) return False;
sewardj604ec3c2002-04-18 22:38:41 +0000146 if (tid >= VG_N_THREADS) return False;
sewardj018f7622002-05-15 21:13:39 +0000147 if (VG_(threads)[tid].status == VgTs_Empty) return False;
148 return True;
149}
150
151
152__inline__
153Bool VG_(is_valid_or_empty_tid) ( ThreadId tid )
154{
155 /* tid is unsigned, hence no < 0 test. */
156 if (tid == 0) return False;
157 if (tid >= VG_N_THREADS) return False;
sewardj604ec3c2002-04-18 22:38:41 +0000158 return True;
159}
160
161
sewardj1e8cdc92002-04-18 11:37:52 +0000162/* For constructing error messages only: try and identify a thread
njn25e49d8e72002-09-23 09:36:25 +0000163 whose stack satisfies the predicate p, or return VG_INVALID_THREADID
164 if none do. A small complication is dealing with any currently
165 VG_(baseBlock)-resident thread.
sewardj1e8cdc92002-04-18 11:37:52 +0000166*/
njn43c799e2003-04-08 00:08:52 +0000167ThreadId VG_(first_matching_thread_stack)
njn25e49d8e72002-09-23 09:36:25 +0000168 ( Bool (*p) ( Addr stack_min, Addr stack_max ))
sewardj1e8cdc92002-04-18 11:37:52 +0000169{
170 ThreadId tid, tid_to_skip;
171
172 tid_to_skip = VG_INVALID_THREADID;
173
174 /* First check to see if there's a currently-loaded thread in
175 VG_(baseBlock). */
176 if (vg_tid_currently_in_baseBlock != VG_INVALID_THREADID) {
177 tid = vg_tid_currently_in_baseBlock;
njn25e49d8e72002-09-23 09:36:25 +0000178 if ( p ( VG_(baseBlock)[VGOFF_(m_esp)],
179 VG_(threads)[tid].stack_highest_word) )
sewardj1e8cdc92002-04-18 11:37:52 +0000180 return tid;
181 else
182 tid_to_skip = tid;
183 }
184
sewardj6072c362002-04-19 14:40:57 +0000185 for (tid = 1; tid < VG_N_THREADS; tid++) {
sewardj018f7622002-05-15 21:13:39 +0000186 if (VG_(threads)[tid].status == VgTs_Empty) continue;
sewardj1e8cdc92002-04-18 11:37:52 +0000187 if (tid == tid_to_skip) continue;
njn25e49d8e72002-09-23 09:36:25 +0000188 if ( p ( VG_(threads)[tid].m_esp,
189 VG_(threads)[tid].stack_highest_word) )
sewardj1e8cdc92002-04-18 11:37:52 +0000190 return tid;
191 }
192 return VG_INVALID_THREADID;
193}
194
195
sewardj15a43e12002-04-17 19:35:12 +0000196/* Print the scheduler status. */
197void VG_(pp_sched_status) ( void )
sewardje663cb92002-04-12 10:26:32 +0000198{
199 Int i;
200 VG_(printf)("\nsched status:\n");
sewardj6072c362002-04-19 14:40:57 +0000201 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +0000202 if (VG_(threads)[i].status == VgTs_Empty) continue;
sewardj15a43e12002-04-17 19:35:12 +0000203 VG_(printf)("\nThread %d: status = ", i);
sewardj018f7622002-05-15 21:13:39 +0000204 switch (VG_(threads)[i].status) {
sewardj6072c362002-04-19 14:40:57 +0000205 case VgTs_Runnable: VG_(printf)("Runnable"); break;
sewardj20917d82002-05-28 01:36:45 +0000206 case VgTs_WaitJoinee: VG_(printf)("WaitJoinee(%d)",
207 VG_(threads)[i].joiner_jee_tid);
208 break;
209 case VgTs_WaitJoiner: VG_(printf)("WaitJoiner"); break;
sewardj6072c362002-04-19 14:40:57 +0000210 case VgTs_Sleeping: VG_(printf)("Sleeping"); break;
211 case VgTs_WaitMX: VG_(printf)("WaitMX"); break;
sewardj3b5d8862002-04-20 13:53:23 +0000212 case VgTs_WaitCV: VG_(printf)("WaitCV"); break;
jsgf855d93d2003-10-13 22:26:55 +0000213 case VgTs_WaitSys: VG_(printf)("WaitSys"); break;
sewardje663cb92002-04-12 10:26:32 +0000214 default: VG_(printf)("???"); break;
215 }
sewardj3b5d8862002-04-20 13:53:23 +0000216 VG_(printf)(", associated_mx = %p, associated_cv = %p\n",
sewardj018f7622002-05-15 21:13:39 +0000217 VG_(threads)[i].associated_mx,
218 VG_(threads)[i].associated_cv );
sewardj15a43e12002-04-17 19:35:12 +0000219 VG_(pp_ExeContext)(
njn25e49d8e72002-09-23 09:36:25 +0000220 VG_(get_ExeContext2)( VG_(threads)[i].m_eip, VG_(threads)[i].m_ebp,
221 VG_(threads)[i].m_esp,
222 VG_(threads)[i].stack_highest_word)
223 );
sewardje663cb92002-04-12 10:26:32 +0000224 }
225 VG_(printf)("\n");
226}
227
sewardje663cb92002-04-12 10:26:32 +0000228
229
230static
231void print_sched_event ( ThreadId tid, Char* what )
232{
sewardj45b4b372002-04-16 22:50:32 +0000233 VG_(message)(Vg_DebugMsg, " SCHED[%d]: %s", tid, what );
sewardj8937c812002-04-12 20:12:20 +0000234}
235
236
237static
238void print_pthread_event ( ThreadId tid, Char* what )
239{
240 VG_(message)(Vg_DebugMsg, "PTHREAD[%d]: %s", tid, what );
sewardje663cb92002-04-12 10:26:32 +0000241}
242
243
244static
245Char* name_of_sched_event ( UInt event )
246{
247 switch (event) {
sewardje663cb92002-04-12 10:26:32 +0000248 case VG_TRC_EBP_JMP_SYSCALL: return "SYSCALL";
249 case VG_TRC_EBP_JMP_CLIENTREQ: return "CLIENTREQ";
fitzhardingea02f8812003-12-18 09:06:09 +0000250 case VG_TRC_EBP_JMP_YIELD: return "YIELD";
sewardje663cb92002-04-12 10:26:32 +0000251 case VG_TRC_INNER_COUNTERZERO: return "COUNTERZERO";
252 case VG_TRC_INNER_FASTMISS: return "FASTMISS";
253 case VG_TRC_UNRESUMABLE_SIGNAL: return "FATALSIGNAL";
254 default: return "??UNKNOWN??";
255 }
256}
257
258
259/* Create a translation of the client basic block beginning at
260 orig_addr, and add it to the translation cache & translation table.
261 This probably doesn't really belong here, but, hey ...
262*/
sewardj1e8cdc92002-04-18 11:37:52 +0000263static
264void create_translation_for ( ThreadId tid, Addr orig_addr )
sewardje663cb92002-04-12 10:26:32 +0000265{
sewardj22854b92002-11-30 14:00:47 +0000266 Addr trans_addr;
267 Int orig_size, trans_size;
268 UShort jumps[VG_MAX_JUMPS];
269 Int i;
270
271 for(i = 0; i < VG_MAX_JUMPS; i++)
272 jumps[i] = (UShort)-1;
sewardj6c3769f2002-11-29 01:02:45 +0000273
274 /* Make a translation, into temporary storage. */
njn72718642003-07-24 08:45:32 +0000275 VG_(translate)( tid, orig_addr, /* in */
276 &orig_size, &trans_addr, &trans_size, jumps ); /* out */
sewardj6c3769f2002-11-29 01:02:45 +0000277
278 /* Copy data at trans_addr into the translation cache. */
sewardje663cb92002-04-12 10:26:32 +0000279 /* Since the .orig_size and .trans_size fields are
280 UShort, be paranoid. */
281 vg_assert(orig_size > 0 && orig_size < 65536);
282 vg_assert(trans_size > 0 && trans_size < 65536);
sewardj6c3769f2002-11-29 01:02:45 +0000283
sewardj22854b92002-11-30 14:00:47 +0000284 VG_(add_to_trans_tab)( orig_addr, orig_size, trans_addr, trans_size, jumps );
sewardj6c3769f2002-11-29 01:02:45 +0000285
sewardje663cb92002-04-12 10:26:32 +0000286 /* Free the intermediary -- was allocated by VG_(emit_code). */
njn25e49d8e72002-09-23 09:36:25 +0000287 VG_(arena_free)( VG_AR_JITTER, (void*)trans_addr );
sewardje663cb92002-04-12 10:26:32 +0000288}
289
290
291/* Allocate a completely empty ThreadState record. */
292static
293ThreadId vg_alloc_ThreadState ( void )
294{
295 Int i;
sewardj6072c362002-04-19 14:40:57 +0000296 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +0000297 if (VG_(threads)[i].status == VgTs_Empty)
sewardje663cb92002-04-12 10:26:32 +0000298 return i;
299 }
300 VG_(printf)("vg_alloc_ThreadState: no free slots available\n");
301 VG_(printf)("Increase VG_N_THREADS, rebuild and try again.\n");
njne427a662002-10-02 11:08:25 +0000302 VG_(core_panic)("VG_N_THREADS is too low");
sewardje663cb92002-04-12 10:26:32 +0000303 /*NOTREACHED*/
304}
305
jsgf855d93d2003-10-13 22:26:55 +0000306ThreadState *VG_(get_ThreadState)(ThreadId tid)
307{
308 vg_assert(tid >= 0 && tid < VG_N_THREADS);
309 return &VG_(threads)[tid];
310}
311
njn72718642003-07-24 08:45:32 +0000312Bool VG_(is_running_thread)(ThreadId tid)
njn25e49d8e72002-09-23 09:36:25 +0000313{
njn72718642003-07-24 08:45:32 +0000314 ThreadId curr = VG_(get_current_tid)();
315 return (curr == tid && VG_INVALID_THREADID != tid);
njn25e49d8e72002-09-23 09:36:25 +0000316}
sewardje663cb92002-04-12 10:26:32 +0000317
sewardj1e8cdc92002-04-18 11:37:52 +0000318ThreadId VG_(get_current_tid) ( void )
319{
sewardjb52a1b02002-10-23 21:38:22 +0000320 if (!VG_(is_valid_tid)(vg_tid_currently_in_baseBlock))
321 return VG_INVALID_THREADID;
sewardj1e8cdc92002-04-18 11:37:52 +0000322 return vg_tid_currently_in_baseBlock;
323}
324
sewardjb52a1b02002-10-23 21:38:22 +0000325ThreadId VG_(get_current_or_recent_tid) ( void )
njn25e49d8e72002-09-23 09:36:25 +0000326{
sewardjb52a1b02002-10-23 21:38:22 +0000327 vg_assert(vg_tid_currently_in_baseBlock == vg_tid_last_in_baseBlock ||
328 vg_tid_currently_in_baseBlock == VG_INVALID_THREADID);
329 vg_assert(VG_(is_valid_tid)(vg_tid_last_in_baseBlock));
330
331 return vg_tid_last_in_baseBlock;
njn25e49d8e72002-09-23 09:36:25 +0000332}
333
sewardje663cb92002-04-12 10:26:32 +0000334/* Copy the saved state of a thread into VG_(baseBlock), ready for it
335 to be run. */
sewardje663cb92002-04-12 10:26:32 +0000336void VG_(load_thread_state) ( ThreadId tid )
337{
338 Int i;
sewardj1e8cdc92002-04-18 11:37:52 +0000339 vg_assert(vg_tid_currently_in_baseBlock == VG_INVALID_THREADID);
340
sewardj92a59562002-09-30 00:53:10 +0000341 VG_(baseBlock)[VGOFF_(ldt)] = (UInt)VG_(threads)[tid].ldt;
fitzhardinge47735af2004-01-21 01:27:27 +0000342 VG_(baseBlock)[VGOFF_(tls)] = (UInt)VG_(threads)[tid].tls;
sewardj92a59562002-09-30 00:53:10 +0000343 VG_(baseBlock)[VGOFF_(m_cs)] = VG_(threads)[tid].m_cs;
344 VG_(baseBlock)[VGOFF_(m_ss)] = VG_(threads)[tid].m_ss;
345 VG_(baseBlock)[VGOFF_(m_ds)] = VG_(threads)[tid].m_ds;
346 VG_(baseBlock)[VGOFF_(m_es)] = VG_(threads)[tid].m_es;
347 VG_(baseBlock)[VGOFF_(m_fs)] = VG_(threads)[tid].m_fs;
348 VG_(baseBlock)[VGOFF_(m_gs)] = VG_(threads)[tid].m_gs;
349
sewardj018f7622002-05-15 21:13:39 +0000350 VG_(baseBlock)[VGOFF_(m_eax)] = VG_(threads)[tid].m_eax;
351 VG_(baseBlock)[VGOFF_(m_ebx)] = VG_(threads)[tid].m_ebx;
352 VG_(baseBlock)[VGOFF_(m_ecx)] = VG_(threads)[tid].m_ecx;
353 VG_(baseBlock)[VGOFF_(m_edx)] = VG_(threads)[tid].m_edx;
354 VG_(baseBlock)[VGOFF_(m_esi)] = VG_(threads)[tid].m_esi;
355 VG_(baseBlock)[VGOFF_(m_edi)] = VG_(threads)[tid].m_edi;
356 VG_(baseBlock)[VGOFF_(m_ebp)] = VG_(threads)[tid].m_ebp;
357 VG_(baseBlock)[VGOFF_(m_esp)] = VG_(threads)[tid].m_esp;
sewardjb91ae7f2003-04-29 23:50:00 +0000358 VG_(baseBlock)[VGOFF_(m_eflags)]
359 = VG_(threads)[tid].m_eflags & ~EFlagD;
360 VG_(baseBlock)[VGOFF_(m_dflag)]
361 = VG_(extractDflag)(VG_(threads)[tid].m_eflags);
sewardj018f7622002-05-15 21:13:39 +0000362 VG_(baseBlock)[VGOFF_(m_eip)] = VG_(threads)[tid].m_eip;
sewardje663cb92002-04-12 10:26:32 +0000363
sewardjb91ae7f2003-04-29 23:50:00 +0000364 for (i = 0; i < VG_SIZE_OF_SSESTATE_W; i++)
365 VG_(baseBlock)[VGOFF_(m_ssestate) + i]
366 = VG_(threads)[tid].m_sse[i];
sewardje663cb92002-04-12 10:26:32 +0000367
njn25e49d8e72002-09-23 09:36:25 +0000368 if (VG_(needs).shadow_regs) {
369 VG_(baseBlock)[VGOFF_(sh_eax)] = VG_(threads)[tid].sh_eax;
370 VG_(baseBlock)[VGOFF_(sh_ebx)] = VG_(threads)[tid].sh_ebx;
371 VG_(baseBlock)[VGOFF_(sh_ecx)] = VG_(threads)[tid].sh_ecx;
372 VG_(baseBlock)[VGOFF_(sh_edx)] = VG_(threads)[tid].sh_edx;
373 VG_(baseBlock)[VGOFF_(sh_esi)] = VG_(threads)[tid].sh_esi;
374 VG_(baseBlock)[VGOFF_(sh_edi)] = VG_(threads)[tid].sh_edi;
375 VG_(baseBlock)[VGOFF_(sh_ebp)] = VG_(threads)[tid].sh_ebp;
376 VG_(baseBlock)[VGOFF_(sh_esp)] = VG_(threads)[tid].sh_esp;
377 VG_(baseBlock)[VGOFF_(sh_eflags)] = VG_(threads)[tid].sh_eflags;
378 } else {
379 /* Fields shouldn't be used -- check their values haven't changed. */
njn25e49d8e72002-09-23 09:36:25 +0000380 vg_assert(
381 VG_UNUSED_SHADOW_REG_VALUE == VG_(threads)[tid].sh_eax &&
382 VG_UNUSED_SHADOW_REG_VALUE == VG_(threads)[tid].sh_ebx &&
383 VG_UNUSED_SHADOW_REG_VALUE == VG_(threads)[tid].sh_ecx &&
384 VG_UNUSED_SHADOW_REG_VALUE == VG_(threads)[tid].sh_edx &&
385 VG_UNUSED_SHADOW_REG_VALUE == VG_(threads)[tid].sh_esi &&
386 VG_UNUSED_SHADOW_REG_VALUE == VG_(threads)[tid].sh_edi &&
387 VG_UNUSED_SHADOW_REG_VALUE == VG_(threads)[tid].sh_ebp &&
388 VG_UNUSED_SHADOW_REG_VALUE == VG_(threads)[tid].sh_esp &&
389 VG_UNUSED_SHADOW_REG_VALUE == VG_(threads)[tid].sh_eflags);
390 }
sewardj1e8cdc92002-04-18 11:37:52 +0000391
392 vg_tid_currently_in_baseBlock = tid;
sewardjb52a1b02002-10-23 21:38:22 +0000393 vg_tid_last_in_baseBlock = tid;
sewardje663cb92002-04-12 10:26:32 +0000394}
395
396
397/* Copy the state of a thread from VG_(baseBlock), presumably after it
398 has been descheduled. For sanity-check purposes, fill the vacated
399 VG_(baseBlock) with garbage so as to make the system more likely to
400 fail quickly if we erroneously continue to poke around inside
401 VG_(baseBlock) without first doing a load_thread_state().
402*/
sewardje663cb92002-04-12 10:26:32 +0000403void VG_(save_thread_state) ( ThreadId tid )
404{
405 Int i;
406 const UInt junk = 0xDEADBEEF;
407
sewardj1e8cdc92002-04-18 11:37:52 +0000408 vg_assert(vg_tid_currently_in_baseBlock != VG_INVALID_THREADID);
409
sewardj92a59562002-09-30 00:53:10 +0000410
411 /* We don't copy out the LDT entry, because it can never be changed
412 by the normal actions of the thread, only by the modify_ldt
413 syscall, in which case we will correctly be updating
sewardjfb5e5272002-12-08 23:27:21 +0000414 VG_(threads)[tid].ldt. This printf happens iff the following
415 assertion fails. */
sewardjca340b32002-12-08 22:14:11 +0000416 if ((void*)VG_(threads)[tid].ldt != (void*)VG_(baseBlock)[VGOFF_(ldt)])
417 VG_(printf)("VG_(threads)[%d].ldt=%p VG_(baseBlock)[VGOFF_(ldt)]=%p\n",
sewardjfb5e5272002-12-08 23:27:21 +0000418 tid, (void*)VG_(threads)[tid].ldt,
419 (void*)VG_(baseBlock)[VGOFF_(ldt)]);
sewardjca340b32002-12-08 22:14:11 +0000420
sewardj92a59562002-09-30 00:53:10 +0000421 vg_assert((void*)VG_(threads)[tid].ldt
422 == (void*)VG_(baseBlock)[VGOFF_(ldt)]);
423
fitzhardinge47735af2004-01-21 01:27:27 +0000424 /* We don't copy out the TLS entry, because it can never be changed
425 by the normal actions of the thread, only by the set_thread_area
426 syscall, in which case we will correctly be updating
427 VG_(threads)[tid].tls. This printf happens iff the following
428 assertion fails. */
429 if ((void*)VG_(threads)[tid].tls != (void*)VG_(baseBlock)[VGOFF_(tls)])
430 VG_(printf)("VG_(threads)[%d].tls=%p VG_(baseBlock)[VGOFF_(tls)]=%p\n",
431 tid, (void*)VG_(threads)[tid].tls,
432 (void*)VG_(baseBlock)[VGOFF_(tls)]);
433
434 vg_assert((void*)VG_(threads)[tid].tls
435 == (void*)VG_(baseBlock)[VGOFF_(tls)]);
436
sewardj92a59562002-09-30 00:53:10 +0000437 VG_(threads)[tid].m_cs = VG_(baseBlock)[VGOFF_(m_cs)];
438 VG_(threads)[tid].m_ss = VG_(baseBlock)[VGOFF_(m_ss)];
439 VG_(threads)[tid].m_ds = VG_(baseBlock)[VGOFF_(m_ds)];
440 VG_(threads)[tid].m_es = VG_(baseBlock)[VGOFF_(m_es)];
441 VG_(threads)[tid].m_fs = VG_(baseBlock)[VGOFF_(m_fs)];
442 VG_(threads)[tid].m_gs = VG_(baseBlock)[VGOFF_(m_gs)];
443
sewardj018f7622002-05-15 21:13:39 +0000444 VG_(threads)[tid].m_eax = VG_(baseBlock)[VGOFF_(m_eax)];
445 VG_(threads)[tid].m_ebx = VG_(baseBlock)[VGOFF_(m_ebx)];
446 VG_(threads)[tid].m_ecx = VG_(baseBlock)[VGOFF_(m_ecx)];
447 VG_(threads)[tid].m_edx = VG_(baseBlock)[VGOFF_(m_edx)];
448 VG_(threads)[tid].m_esi = VG_(baseBlock)[VGOFF_(m_esi)];
449 VG_(threads)[tid].m_edi = VG_(baseBlock)[VGOFF_(m_edi)];
450 VG_(threads)[tid].m_ebp = VG_(baseBlock)[VGOFF_(m_ebp)];
451 VG_(threads)[tid].m_esp = VG_(baseBlock)[VGOFF_(m_esp)];
sewardjb91ae7f2003-04-29 23:50:00 +0000452 VG_(threads)[tid].m_eflags
453 = VG_(insertDflag)(VG_(baseBlock)[VGOFF_(m_eflags)],
454 VG_(baseBlock)[VGOFF_(m_dflag)]);
sewardj018f7622002-05-15 21:13:39 +0000455 VG_(threads)[tid].m_eip = VG_(baseBlock)[VGOFF_(m_eip)];
sewardje663cb92002-04-12 10:26:32 +0000456
sewardjb91ae7f2003-04-29 23:50:00 +0000457 for (i = 0; i < VG_SIZE_OF_SSESTATE_W; i++)
458 VG_(threads)[tid].m_sse[i]
459 = VG_(baseBlock)[VGOFF_(m_ssestate) + i];
sewardje663cb92002-04-12 10:26:32 +0000460
njn25e49d8e72002-09-23 09:36:25 +0000461 if (VG_(needs).shadow_regs) {
462 VG_(threads)[tid].sh_eax = VG_(baseBlock)[VGOFF_(sh_eax)];
463 VG_(threads)[tid].sh_ebx = VG_(baseBlock)[VGOFF_(sh_ebx)];
464 VG_(threads)[tid].sh_ecx = VG_(baseBlock)[VGOFF_(sh_ecx)];
465 VG_(threads)[tid].sh_edx = VG_(baseBlock)[VGOFF_(sh_edx)];
466 VG_(threads)[tid].sh_esi = VG_(baseBlock)[VGOFF_(sh_esi)];
467 VG_(threads)[tid].sh_edi = VG_(baseBlock)[VGOFF_(sh_edi)];
468 VG_(threads)[tid].sh_ebp = VG_(baseBlock)[VGOFF_(sh_ebp)];
469 VG_(threads)[tid].sh_esp = VG_(baseBlock)[VGOFF_(sh_esp)];
470 VG_(threads)[tid].sh_eflags = VG_(baseBlock)[VGOFF_(sh_eflags)];
471 } else {
472 /* Fill with recognisable junk */
473 VG_(threads)[tid].sh_eax =
474 VG_(threads)[tid].sh_ebx =
475 VG_(threads)[tid].sh_ecx =
476 VG_(threads)[tid].sh_edx =
477 VG_(threads)[tid].sh_esi =
478 VG_(threads)[tid].sh_edi =
479 VG_(threads)[tid].sh_ebp =
480 VG_(threads)[tid].sh_esp =
481 VG_(threads)[tid].sh_eflags = VG_UNUSED_SHADOW_REG_VALUE;
482 }
sewardje663cb92002-04-12 10:26:32 +0000483
484 /* Fill it up with junk. */
sewardj92a59562002-09-30 00:53:10 +0000485 VG_(baseBlock)[VGOFF_(ldt)] = junk;
fitzhardinge47735af2004-01-21 01:27:27 +0000486 VG_(baseBlock)[VGOFF_(tls)] = junk;
sewardj92a59562002-09-30 00:53:10 +0000487 VG_(baseBlock)[VGOFF_(m_cs)] = junk;
488 VG_(baseBlock)[VGOFF_(m_ss)] = junk;
489 VG_(baseBlock)[VGOFF_(m_ds)] = junk;
490 VG_(baseBlock)[VGOFF_(m_es)] = junk;
491 VG_(baseBlock)[VGOFF_(m_fs)] = junk;
492 VG_(baseBlock)[VGOFF_(m_gs)] = junk;
493
sewardje663cb92002-04-12 10:26:32 +0000494 VG_(baseBlock)[VGOFF_(m_eax)] = junk;
495 VG_(baseBlock)[VGOFF_(m_ebx)] = junk;
496 VG_(baseBlock)[VGOFF_(m_ecx)] = junk;
497 VG_(baseBlock)[VGOFF_(m_edx)] = junk;
498 VG_(baseBlock)[VGOFF_(m_esi)] = junk;
499 VG_(baseBlock)[VGOFF_(m_edi)] = junk;
500 VG_(baseBlock)[VGOFF_(m_ebp)] = junk;
501 VG_(baseBlock)[VGOFF_(m_esp)] = junk;
502 VG_(baseBlock)[VGOFF_(m_eflags)] = junk;
503 VG_(baseBlock)[VGOFF_(m_eip)] = junk;
504
sewardjb91ae7f2003-04-29 23:50:00 +0000505 for (i = 0; i < VG_SIZE_OF_SSESTATE_W; i++)
506 VG_(baseBlock)[VGOFF_(m_ssestate) + i] = junk;
sewardj1e8cdc92002-04-18 11:37:52 +0000507
508 vg_tid_currently_in_baseBlock = VG_INVALID_THREADID;
sewardje663cb92002-04-12 10:26:32 +0000509}
510
511
512/* Run the thread tid for a while, and return a VG_TRC_* value to the
513 scheduler indicating what happened. */
sewardj6072c362002-04-19 14:40:57 +0000514static
sewardje663cb92002-04-12 10:26:32 +0000515UInt run_thread_for_a_while ( ThreadId tid )
516{
sewardj7ccc5c22002-04-24 21:39:11 +0000517 volatile UInt trc = 0;
sewardjb48e5002002-05-13 00:16:03 +0000518 vg_assert(VG_(is_valid_tid)(tid));
sewardj018f7622002-05-15 21:13:39 +0000519 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardj872051c2002-07-13 12:12:56 +0000520 vg_assert(!VG_(scheduler_jmpbuf_valid));
sewardje663cb92002-04-12 10:26:32 +0000521
sewardj671ff542002-05-07 09:25:30 +0000522 VGP_PUSHCC(VgpRun);
sewardje663cb92002-04-12 10:26:32 +0000523 VG_(load_thread_state) ( tid );
jsgf855d93d2003-10-13 22:26:55 +0000524
525 /* there should be no undealt-with signals */
526 vg_assert(VG_(unresumable_siginfo).si_signo == 0);
527
sewardje663cb92002-04-12 10:26:32 +0000528 if (__builtin_setjmp(VG_(scheduler_jmpbuf)) == 0) {
529 /* try this ... */
sewardj872051c2002-07-13 12:12:56 +0000530 VG_(scheduler_jmpbuf_valid) = True;
sewardje663cb92002-04-12 10:26:32 +0000531 trc = VG_(run_innerloop)();
sewardj872051c2002-07-13 12:12:56 +0000532 VG_(scheduler_jmpbuf_valid) = False;
sewardje663cb92002-04-12 10:26:32 +0000533 /* We get here if the client didn't take a fault. */
534 } else {
535 /* We get here if the client took a fault, which caused our
536 signal handler to longjmp. */
sewardj872051c2002-07-13 12:12:56 +0000537 VG_(scheduler_jmpbuf_valid) = False;
sewardje663cb92002-04-12 10:26:32 +0000538 vg_assert(trc == 0);
539 trc = VG_TRC_UNRESUMABLE_SIGNAL;
540 }
sewardj872051c2002-07-13 12:12:56 +0000541
542 vg_assert(!VG_(scheduler_jmpbuf_valid));
543
sewardje663cb92002-04-12 10:26:32 +0000544 VG_(save_thread_state) ( tid );
njn25e49d8e72002-09-23 09:36:25 +0000545 VGP_POPCC(VgpRun);
sewardje663cb92002-04-12 10:26:32 +0000546 return trc;
547}
548
549
sewardj20917d82002-05-28 01:36:45 +0000550static
551void mostly_clear_thread_record ( ThreadId tid )
552{
sewardj20917d82002-05-28 01:36:45 +0000553 vg_assert(tid >= 0 && tid < VG_N_THREADS);
sewardj92a59562002-09-30 00:53:10 +0000554 VG_(threads)[tid].ldt = NULL;
fitzhardinge47735af2004-01-21 01:27:27 +0000555 VG_(clear_TLS_for_thread)(VG_(threads)[tid].tls);
sewardj20917d82002-05-28 01:36:45 +0000556 VG_(threads)[tid].tid = tid;
557 VG_(threads)[tid].status = VgTs_Empty;
558 VG_(threads)[tid].associated_mx = NULL;
559 VG_(threads)[tid].associated_cv = NULL;
560 VG_(threads)[tid].awaken_at = 0;
561 VG_(threads)[tid].joinee_retval = NULL;
562 VG_(threads)[tid].joiner_thread_return = NULL;
563 VG_(threads)[tid].joiner_jee_tid = VG_INVALID_THREADID;
sewardj8ad94e12002-05-29 00:10:20 +0000564 VG_(threads)[tid].detached = False;
sewardj20917d82002-05-28 01:36:45 +0000565 VG_(threads)[tid].cancel_st = True; /* PTHREAD_CANCEL_ENABLE */
566 VG_(threads)[tid].cancel_ty = True; /* PTHREAD_CANCEL_DEFERRED */
567 VG_(threads)[tid].cancel_pend = NULL; /* not pending */
sewardj8ad94e12002-05-29 00:10:20 +0000568 VG_(threads)[tid].custack_used = 0;
sewardj20917d82002-05-28 01:36:45 +0000569 VG_(ksigemptyset)(&VG_(threads)[tid].sig_mask);
jsgf855d93d2003-10-13 22:26:55 +0000570 VG_(ksigfillset)(&VG_(threads)[tid].eff_sig_mask);
sewardj00a66b12002-10-12 16:42:35 +0000571 VG_(threads)[tid].specifics_ptr = NULL;
jsgf855d93d2003-10-13 22:26:55 +0000572
573 VG_(threads)[tid].syscallno = -1;
574 VG_(threads)[tid].sys_pre_res = NULL;
575
576 VG_(threads)[tid].proxy = NULL;
sewardj20917d82002-05-28 01:36:45 +0000577}
578
579
jsgf855d93d2003-10-13 22:26:55 +0000580
sewardje663cb92002-04-12 10:26:32 +0000581/* Initialise the scheduler. Create a single "main" thread ready to
sewardj6072c362002-04-19 14:40:57 +0000582 run, with special ThreadId of one. This is called at startup; the
nethercote71980f02004-01-24 18:18:54 +0000583 caller takes care to park the client's state in VG_(baseBlock).
sewardje663cb92002-04-12 10:26:32 +0000584*/
585void VG_(scheduler_init) ( void )
586{
587 Int i;
sewardje663cb92002-04-12 10:26:32 +0000588 ThreadId tid_main;
589
sewardj6072c362002-04-19 14:40:57 +0000590 for (i = 0 /* NB; not 1 */; i < VG_N_THREADS; i++) {
sewardj20917d82002-05-28 01:36:45 +0000591 mostly_clear_thread_record(i);
592 VG_(threads)[i].stack_size = 0;
593 VG_(threads)[i].stack_base = (Addr)NULL;
594 VG_(threads)[i].stack_highest_word = (Addr)NULL;
sewardje663cb92002-04-12 10:26:32 +0000595 }
596
sewardj5f07b662002-04-23 16:52:51 +0000597 for (i = 0; i < VG_N_THREAD_KEYS; i++) {
598 vg_thread_keys[i].inuse = False;
599 vg_thread_keys[i].destructor = NULL;
600 }
601
sewardj2cb00342002-06-28 01:46:26 +0000602 vg_fhstack_used = 0;
603
sewardje663cb92002-04-12 10:26:32 +0000604 /* Assert this is thread zero, which has certain magic
605 properties. */
606 tid_main = vg_alloc_ThreadState();
sewardj6072c362002-04-19 14:40:57 +0000607 vg_assert(tid_main == 1);
sewardj20917d82002-05-28 01:36:45 +0000608 VG_(threads)[tid_main].status = VgTs_Runnable;
sewardje663cb92002-04-12 10:26:32 +0000609
610 /* Copy VG_(baseBlock) state to tid_main's slot. */
sewardj1e8cdc92002-04-18 11:37:52 +0000611 vg_tid_currently_in_baseBlock = tid_main;
sewardjb52a1b02002-10-23 21:38:22 +0000612 vg_tid_last_in_baseBlock = tid_main;
fitzhardinge47735af2004-01-21 01:27:27 +0000613 VG_(baseBlock)[VGOFF_(tls)] = (UInt)VG_(threads)[tid_main].tls;
sewardje663cb92002-04-12 10:26:32 +0000614 VG_(save_thread_state) ( tid_main );
sewardj1e8cdc92002-04-18 11:37:52 +0000615
sewardj018f7622002-05-15 21:13:39 +0000616 VG_(threads)[tid_main].stack_highest_word
fitzhardinge98abfc72003-12-16 02:05:15 +0000617 = VG_(clstk_end) - 4;
618 VG_(threads)[tid_main].stack_base = VG_(clstk_base);
619 VG_(threads)[tid_main].stack_size = VG_(clstk_end) - VG_(clstk_base);
sewardjbf290b92002-05-01 02:28:01 +0000620
sewardj1e8cdc92002-04-18 11:37:52 +0000621 /* So now ... */
622 vg_assert(vg_tid_currently_in_baseBlock == VG_INVALID_THREADID);
sewardj872051c2002-07-13 12:12:56 +0000623
624 /* Not running client code right now. */
625 VG_(scheduler_jmpbuf_valid) = False;
jsgf855d93d2003-10-13 22:26:55 +0000626
627 /* Proxy for main thread */
628 VG_(proxy_create)(tid_main);
sewardje663cb92002-04-12 10:26:32 +0000629}
630
631
sewardj3947e622002-05-23 16:52:11 +0000632
sewardje663cb92002-04-12 10:26:32 +0000633
634
sewardj6072c362002-04-19 14:40:57 +0000635/* vthread tid is returning from a signal handler; modify its
636 stack/regs accordingly. */
sewardj1ffa8da2002-04-26 22:47:57 +0000637
sewardj6072c362002-04-19 14:40:57 +0000638static
639void handle_signal_return ( ThreadId tid )
640{
sewardj6072c362002-04-19 14:40:57 +0000641 Bool restart_blocked_syscalls;
sewardj645030e2002-06-06 01:27:39 +0000642 struct vki_timespec * rem;
sewardj6072c362002-04-19 14:40:57 +0000643
sewardjb48e5002002-05-13 00:16:03 +0000644 vg_assert(VG_(is_valid_tid)(tid));
sewardj6072c362002-04-19 14:40:57 +0000645
646 restart_blocked_syscalls = VG_(signal_returns)(tid);
647
648 if (restart_blocked_syscalls)
649 /* Easy; we don't have to do anything. */
650 return;
651
sewardj645030e2002-06-06 01:27:39 +0000652 if (VG_(threads)[tid].status == VgTs_Sleeping
sewardj018f7622002-05-15 21:13:39 +0000653 && VG_(threads)[tid].m_eax == __NR_nanosleep) {
sewardj6072c362002-04-19 14:40:57 +0000654 /* We interrupted a nanosleep(). The right thing to do is to
sewardj645030e2002-06-06 01:27:39 +0000655 write the unused time to nanosleep's second param, but that's
656 too much effort ... we just say that 1 nanosecond was not
657 used, and return EINTR. */
658 rem = (struct vki_timespec *)VG_(threads)[tid].m_ecx; /* arg2 */
659 if (rem != NULL) {
660 rem->tv_sec = 0;
661 rem->tv_nsec = 1;
662 }
njnd3040452003-05-19 15:04:06 +0000663 SET_SYSCALL_RETVAL(tid, -VKI_EINTR);
sewardj645030e2002-06-06 01:27:39 +0000664 VG_(threads)[tid].status = VgTs_Runnable;
sewardj6072c362002-04-19 14:40:57 +0000665 return;
666 }
667
668 /* All other cases? Just return. */
669}
670
671
sewardje663cb92002-04-12 10:26:32 +0000672static
673void sched_do_syscall ( ThreadId tid )
674{
jsgf855d93d2003-10-13 22:26:55 +0000675 Int syscall_no;
njn25e49d8e72002-09-23 09:36:25 +0000676 Char msg_buf[100];
sewardje663cb92002-04-12 10:26:32 +0000677
sewardjb48e5002002-05-13 00:16:03 +0000678 vg_assert(VG_(is_valid_tid)(tid));
sewardj018f7622002-05-15 21:13:39 +0000679 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +0000680
sewardj018f7622002-05-15 21:13:39 +0000681 syscall_no = VG_(threads)[tid].m_eax; /* syscall number */
sewardje663cb92002-04-12 10:26:32 +0000682
jsgf855d93d2003-10-13 22:26:55 +0000683 /* Special-case nanosleep because we can. But should we?
684
685 XXX not doing so for now, because it doesn't seem to work
686 properly, and we can use the syscall nanosleep just as easily.
687 */
688 if (0 && syscall_no == __NR_nanosleep) {
sewardj5f07b662002-04-23 16:52:51 +0000689 UInt t_now, t_awaken;
sewardje663cb92002-04-12 10:26:32 +0000690 struct vki_timespec* req;
sewardj018f7622002-05-15 21:13:39 +0000691 req = (struct vki_timespec*)VG_(threads)[tid].m_ebx; /* arg1 */
jsgf855d93d2003-10-13 22:26:55 +0000692
693 if (req->tv_sec < 0 || req->tv_nsec < 0 || req->tv_nsec >= 1000000000) {
694 SET_SYSCALL_RETVAL(tid, -VKI_EINVAL);
695 return;
696 }
697
sewardj5f07b662002-04-23 16:52:51 +0000698 t_now = VG_(read_millisecond_timer)();
sewardje663cb92002-04-12 10:26:32 +0000699 t_awaken
700 = t_now
sewardj5f07b662002-04-23 16:52:51 +0000701 + (UInt)1000ULL * (UInt)(req->tv_sec)
702 + (UInt)(req->tv_nsec) / 1000000;
sewardj018f7622002-05-15 21:13:39 +0000703 VG_(threads)[tid].status = VgTs_Sleeping;
704 VG_(threads)[tid].awaken_at = t_awaken;
sewardj8937c812002-04-12 20:12:20 +0000705 if (VG_(clo_trace_sched)) {
sewardj5f07b662002-04-23 16:52:51 +0000706 VG_(sprintf)(msg_buf, "at %d: nanosleep for %d",
sewardje663cb92002-04-12 10:26:32 +0000707 t_now, t_awaken-t_now);
708 print_sched_event(tid, msg_buf);
709 }
jsgf855d93d2003-10-13 22:26:55 +0000710 VG_(add_timeout)(tid, t_awaken);
sewardje663cb92002-04-12 10:26:32 +0000711 /* Force the scheduler to run something else for a while. */
712 return;
713 }
714
jsgf855d93d2003-10-13 22:26:55 +0000715 /* If pre_syscall returns true, then we're done immediately */
716 if (VG_(pre_syscall)(tid)) {
fitzhardinge31ba9052004-01-16 02:15:23 +0000717 VG_(post_syscall(tid, True));
sewardj3947e622002-05-23 16:52:11 +0000718 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +0000719 } else {
jsgf855d93d2003-10-13 22:26:55 +0000720 vg_assert(VG_(threads)[tid].status == VgTs_WaitSys);
sewardje663cb92002-04-12 10:26:32 +0000721 }
722}
723
724
sewardje663cb92002-04-12 10:26:32 +0000725
jsgf855d93d2003-10-13 22:26:55 +0000726struct timeout {
727 UInt time; /* time we should awaken */
728 ThreadId tid; /* thread which cares about this timeout */
729 struct timeout *next;
730};
sewardje663cb92002-04-12 10:26:32 +0000731
jsgf855d93d2003-10-13 22:26:55 +0000732static struct timeout *timeouts;
733
734void VG_(add_timeout)(ThreadId tid, UInt time)
sewardje663cb92002-04-12 10:26:32 +0000735{
jsgf855d93d2003-10-13 22:26:55 +0000736 struct timeout *t = VG_(arena_malloc)(VG_AR_CORE, sizeof(*t));
737 struct timeout **prev, *tp;
sewardje663cb92002-04-12 10:26:32 +0000738
jsgf855d93d2003-10-13 22:26:55 +0000739 t->time = time;
740 t->tid = tid;
sewardje462e202002-04-13 04:09:07 +0000741
jsgf855d93d2003-10-13 22:26:55 +0000742 if (VG_(clo_trace_sched)) {
743 Char msg_buf[100];
744 VG_(sprintf)(msg_buf, "add_timeout: now=%u adding timeout at %u",
745 VG_(read_millisecond_timer)(), time);
746 print_sched_event(tid, msg_buf);
747 }
sewardj6072c362002-04-19 14:40:57 +0000748
jsgf855d93d2003-10-13 22:26:55 +0000749 for(tp = timeouts, prev = &timeouts;
750 tp != NULL && tp->time < time;
751 prev = &tp->next, tp = tp->next)
752 ;
753 t->next = tp;
754 *prev = t;
755}
756
757/* Sleep for a while, but be willing to be woken. */
758static
759void idle ( void )
760{
761 struct vki_pollfd pollfd[1];
762 Int delta = -1;
763 Int fd = VG_(proxy_resfd)();
764
765 pollfd[0].fd = fd;
766 pollfd[0].events = VKI_POLLIN;
767
768 /* Look though the nearest timeouts, looking for the next future
769 one (there may be stale past timeouts). They'll all be mopped
770 below up when the poll() finishes. */
771 if (timeouts != NULL) {
772 struct timeout *tp;
773 Bool wicked = False;
774 UInt now = VG_(read_millisecond_timer)();
775
776 for(tp = timeouts; tp != NULL && tp->time < now; tp = tp->next) {
777 /* If a thread is still sleeping in the past, make it runnable */
778 ThreadState *tst = VG_(get_ThreadState)(tp->tid);
779 if (tst->status == VgTs_Sleeping)
780 tst->status = VgTs_Runnable;
781 wicked = True; /* no sleep for the wicked */
sewardje663cb92002-04-12 10:26:32 +0000782 }
sewardje663cb92002-04-12 10:26:32 +0000783
jsgf855d93d2003-10-13 22:26:55 +0000784 if (tp != NULL) {
785 delta = tp->time - now;
786 vg_assert(delta >= 0);
sewardje663cb92002-04-12 10:26:32 +0000787 }
jsgf855d93d2003-10-13 22:26:55 +0000788 if (wicked)
789 delta = 0;
sewardje663cb92002-04-12 10:26:32 +0000790 }
791
jsgf855d93d2003-10-13 22:26:55 +0000792 /* gotta wake up for something! */
793 vg_assert(fd != -1 || delta != -1);
sewardje462e202002-04-13 04:09:07 +0000794
jsgf855d93d2003-10-13 22:26:55 +0000795 /* If we need to do signal routing, then poll for pending signals
796 every VG_(clo_signal_polltime) mS */
797 if (VG_(do_signal_routing) && (delta > VG_(clo_signal_polltime) || delta == -1))
798 delta = VG_(clo_signal_polltime);
sewardje663cb92002-04-12 10:26:32 +0000799
jsgf855d93d2003-10-13 22:26:55 +0000800 if (VG_(clo_trace_sched)) {
801 Char msg_buf[100];
802 VG_(sprintf)(msg_buf, "idle: waiting for %dms and fd %d",
803 delta, fd);
804 print_sched_event(0, msg_buf);
sewardje663cb92002-04-12 10:26:32 +0000805 }
sewardje663cb92002-04-12 10:26:32 +0000806
jsgf855d93d2003-10-13 22:26:55 +0000807 VG_(poll)(pollfd, fd != -1 ? 1 : 0, delta);
sewardje663cb92002-04-12 10:26:32 +0000808
jsgf855d93d2003-10-13 22:26:55 +0000809 /* See if there's anything on the timeout list which needs
810 waking, and mop up anything in the past. */
811 {
812 UInt now = VG_(read_millisecond_timer)();
813 struct timeout *tp;
sewardje663cb92002-04-12 10:26:32 +0000814
jsgf855d93d2003-10-13 22:26:55 +0000815 tp = timeouts;
sewardje663cb92002-04-12 10:26:32 +0000816
jsgf855d93d2003-10-13 22:26:55 +0000817 while(tp && tp->time <= now) {
818 struct timeout *dead;
819 ThreadState *tst;
820
821 tst = VG_(get_ThreadState)(tp->tid);
822
823 if (VG_(clo_trace_sched)) {
824 Char msg_buf[100];
825 VG_(sprintf)(msg_buf, "idle: now=%u removing timeout at %u",
826 now, tp->time);
827 print_sched_event(tp->tid, msg_buf);
828 }
sewardje663cb92002-04-12 10:26:32 +0000829
jsgf855d93d2003-10-13 22:26:55 +0000830 /* If awaken_at != tp->time then it means the timeout is
831 stale and we should just ignore it. */
832 if(tst->awaken_at == tp->time) {
833 switch(tst->status) {
834 case VgTs_Sleeping:
835 tst->awaken_at = 0xFFFFFFFF;
836 tst->status = VgTs_Runnable;
837 break;
sewardje663cb92002-04-12 10:26:32 +0000838
jsgf855d93d2003-10-13 22:26:55 +0000839 case VgTs_WaitCV:
840 do_pthread_cond_timedwait_TIMEOUT(tst->tid);
841 break;
sewardje663cb92002-04-12 10:26:32 +0000842
jsgf855d93d2003-10-13 22:26:55 +0000843 default:
844 /* This is a bit odd but OK; if a thread had a timeout
845 but woke for some other reason (signal, condvar
846 wakeup), then it will still be on the list. */
847 if (0)
848 VG_(printf)("idle(): unexpected status tp->tid=%d tst->status = %d\n",
849 tp->tid, tst->status);
850 break;
851 }
852 }
sewardjbc7d8782002-06-30 12:44:54 +0000853
jsgf855d93d2003-10-13 22:26:55 +0000854 dead = tp;
855 tp = tp->next;
njn25e49d8e72002-09-23 09:36:25 +0000856
jsgf855d93d2003-10-13 22:26:55 +0000857 VG_(arena_free)(VG_AR_CORE, dead);
sewardjbc7d8782002-06-30 12:44:54 +0000858 }
859
jsgf855d93d2003-10-13 22:26:55 +0000860 timeouts = tp;
sewardje663cb92002-04-12 10:26:32 +0000861 }
862}
863
864
sewardje663cb92002-04-12 10:26:32 +0000865/* ---------------------------------------------------------------------
866 The scheduler proper.
867 ------------------------------------------------------------------ */
868
869/* Run user-space threads until either
870 * Deadlock occurs
871 * One thread asks to shutdown Valgrind
872 * The specified number of basic blocks has gone by.
873*/
874VgSchedReturnCode VG_(scheduler) ( void )
875{
876 ThreadId tid, tid_next;
877 UInt trc;
878 UInt dispatch_ctr_SAVED;
sewardj124ca2a2002-06-20 10:19:38 +0000879 Int done_this_time, n_in_bounded_wait;
jsgf855d93d2003-10-13 22:26:55 +0000880 Int n_exists, n_waiting_for_reaper;
sewardje663cb92002-04-12 10:26:32 +0000881 Addr trans_addr;
882
sewardje663cb92002-04-12 10:26:32 +0000883 /* Start with the root thread. tid in general indicates the
884 currently runnable/just-finished-running thread. */
sewardj7e87e382002-05-03 19:09:05 +0000885 VG_(last_run_tid) = tid = 1;
sewardje663cb92002-04-12 10:26:32 +0000886
887 /* This is the top level scheduler loop. It falls into three
888 phases. */
889 while (True) {
890
sewardj6072c362002-04-19 14:40:57 +0000891 /* ======================= Phase 0 of 3 =======================
892 Be paranoid. Always a good idea. */
sewardjd7fd4d22002-04-24 01:57:27 +0000893 stage1:
sewardj6072c362002-04-19 14:40:57 +0000894 scheduler_sanity();
sewardj0c3b53f2002-05-01 01:58:35 +0000895 VG_(do_sanity_checks)( False );
sewardj6072c362002-04-19 14:40:57 +0000896
sewardje663cb92002-04-12 10:26:32 +0000897 /* ======================= Phase 1 of 3 =======================
898 Handle I/O completions and signals. This may change the
899 status of various threads. Then select a new thread to run,
900 or declare deadlock, or sleep if there are no runnable
901 threads but some are blocked on I/O. */
902
sewardje663cb92002-04-12 10:26:32 +0000903 /* Do the following loop until a runnable thread is found, or
904 deadlock is detected. */
905 while (True) {
906
907 /* For stats purposes only. */
908 VG_(num_scheduling_events_MAJOR) ++;
909
jsgf855d93d2003-10-13 22:26:55 +0000910 /* Route signals to their proper places */
911 VG_(route_signals)();
sewardje663cb92002-04-12 10:26:32 +0000912
jsgf855d93d2003-10-13 22:26:55 +0000913 /* See if any of the proxy LWPs report any activity: either a
914 syscall completing or a signal arriving. */
915 VG_(proxy_results)();
sewardje663cb92002-04-12 10:26:32 +0000916
917 /* Try and find a thread (tid) to run. */
918 tid_next = tid;
jsgf855d93d2003-10-13 22:26:55 +0000919 if (prefer_sched != VG_INVALID_THREADID) {
920 tid_next = prefer_sched-1;
921 prefer_sched = VG_INVALID_THREADID;
922 }
sewardj51c0aaf2002-04-25 01:32:10 +0000923 n_in_bounded_wait = 0;
jsgf855d93d2003-10-13 22:26:55 +0000924 n_exists = 0;
925 n_waiting_for_reaper = 0;
sewardje663cb92002-04-12 10:26:32 +0000926 while (True) {
927 tid_next++;
sewardj6072c362002-04-19 14:40:57 +0000928 if (tid_next >= VG_N_THREADS) tid_next = 1;
jsgf855d93d2003-10-13 22:26:55 +0000929 if (VG_(threads)[tid_next].status == VgTs_Sleeping
930 || VG_(threads)[tid_next].status == VgTs_WaitSys
sewardj018f7622002-05-15 21:13:39 +0000931 || (VG_(threads)[tid_next].status == VgTs_WaitCV
932 && VG_(threads)[tid_next].awaken_at != 0xFFFFFFFF))
sewardj51c0aaf2002-04-25 01:32:10 +0000933 n_in_bounded_wait ++;
jsgf855d93d2003-10-13 22:26:55 +0000934 if (VG_(threads)[tid_next].status != VgTs_Empty)
935 n_exists++;
936 if (VG_(threads)[tid_next].status == VgTs_WaitJoiner)
937 n_waiting_for_reaper++;
sewardj018f7622002-05-15 21:13:39 +0000938 if (VG_(threads)[tid_next].status == VgTs_Runnable)
sewardje663cb92002-04-12 10:26:32 +0000939 break; /* We can run this one. */
940 if (tid_next == tid)
941 break; /* been all the way round */
942 }
943 tid = tid_next;
944
sewardj018f7622002-05-15 21:13:39 +0000945 if (VG_(threads)[tid].status == VgTs_Runnable) {
sewardje663cb92002-04-12 10:26:32 +0000946 /* Found a suitable candidate. Fall out of this loop, so
947 we can advance to stage 2 of the scheduler: actually
948 running the thread. */
949 break;
950 }
951
jsgf855d93d2003-10-13 22:26:55 +0000952 /* All threads have exited - pretend someone called exit() */
953 if (n_waiting_for_reaper == n_exists) {
954 VG_(exitcode) = 0; /* ? */
955 return VgSrc_ExitSyscall;
956 }
957
sewardje663cb92002-04-12 10:26:32 +0000958 /* We didn't find a runnable thread. Now what? */
sewardj51c0aaf2002-04-25 01:32:10 +0000959 if (n_in_bounded_wait == 0) {
sewardj54cacf02002-04-12 23:24:59 +0000960 /* No runnable threads and no prospect of any appearing
961 even if we wait for an arbitrary length of time. In
962 short, we have a deadlock. */
sewardj15a43e12002-04-17 19:35:12 +0000963 VG_(pp_sched_status)();
sewardje663cb92002-04-12 10:26:32 +0000964 return VgSrc_Deadlock;
965 }
966
jsgf855d93d2003-10-13 22:26:55 +0000967 /* Nothing needs doing, so sit in idle until either a timeout
968 happens or a thread's syscall completes. */
969 idle();
sewardj7e87e382002-05-03 19:09:05 +0000970 /* pp_sched_status(); */
sewardjb48e5002002-05-13 00:16:03 +0000971 /* VG_(printf)("."); */
sewardje663cb92002-04-12 10:26:32 +0000972 }
973
974
975 /* ======================= Phase 2 of 3 =======================
976 Wahey! We've finally decided that thread tid is runnable, so
977 we now do that. Run it for as much of a quanta as possible.
978 Trivial requests are handled and the thread continues. The
979 aim is not to do too many of Phase 1 since it is expensive. */
980
981 if (0)
sewardj3b5d8862002-04-20 13:53:23 +0000982 VG_(printf)("SCHED: tid %d\n", tid);
sewardje663cb92002-04-12 10:26:32 +0000983
njn25e49d8e72002-09-23 09:36:25 +0000984 VG_TRACK( thread_run, tid );
985
sewardje663cb92002-04-12 10:26:32 +0000986 /* Figure out how many bbs to ask vg_run_innerloop to do. Note
987 that it decrements the counter before testing it for zero, so
988 that if VG_(dispatch_ctr) is set to N you get at most N-1
989 iterations. Also this means that VG_(dispatch_ctr) must
990 exceed zero before entering the innerloop. Also also, the
991 decrement is done before the bb is actually run, so you
992 always get at least one decrement even if nothing happens.
993 */
nethercote1d447092004-02-01 17:29:59 +0000994 VG_(dispatch_ctr) = VG_SCHEDULING_QUANTUM + 1;
sewardje663cb92002-04-12 10:26:32 +0000995
996 /* ... and remember what we asked for. */
997 dispatch_ctr_SAVED = VG_(dispatch_ctr);
998
sewardj1e8cdc92002-04-18 11:37:52 +0000999 /* paranoia ... */
sewardj018f7622002-05-15 21:13:39 +00001000 vg_assert(VG_(threads)[tid].tid == tid);
sewardj1e8cdc92002-04-18 11:37:52 +00001001
sewardje663cb92002-04-12 10:26:32 +00001002 /* Actually run thread tid. */
1003 while (True) {
1004
sewardj7e87e382002-05-03 19:09:05 +00001005 VG_(last_run_tid) = tid;
1006
sewardje663cb92002-04-12 10:26:32 +00001007 /* For stats purposes only. */
1008 VG_(num_scheduling_events_MINOR) ++;
1009
1010 if (0)
1011 VG_(message)(Vg_DebugMsg, "thread %d: running for %d bbs",
1012 tid, VG_(dispatch_ctr) - 1 );
sewardjb3eef6b2002-05-01 00:05:27 +00001013# if 0
1014 if (VG_(bbs_done) > 31700000 + 0) {
1015 dispatch_ctr_SAVED = VG_(dispatch_ctr) = 2;
sewardj018f7622002-05-15 21:13:39 +00001016 VG_(translate)(&VG_(threads)[tid], VG_(threads)[tid].m_eip,
sewardjb3eef6b2002-05-01 00:05:27 +00001017 NULL,NULL,NULL);
1018 }
sewardj018f7622002-05-15 21:13:39 +00001019 vg_assert(VG_(threads)[tid].m_eip != 0);
sewardjb3eef6b2002-05-01 00:05:27 +00001020# endif
sewardje663cb92002-04-12 10:26:32 +00001021
1022 trc = run_thread_for_a_while ( tid );
1023
sewardjb3eef6b2002-05-01 00:05:27 +00001024# if 0
sewardj018f7622002-05-15 21:13:39 +00001025 if (0 == VG_(threads)[tid].m_eip) {
sewardjb3eef6b2002-05-01 00:05:27 +00001026 VG_(printf)("tid = %d, dc = %llu\n", tid, VG_(bbs_done));
sewardj018f7622002-05-15 21:13:39 +00001027 vg_assert(0 != VG_(threads)[tid].m_eip);
sewardjb3eef6b2002-05-01 00:05:27 +00001028 }
1029# endif
1030
sewardje663cb92002-04-12 10:26:32 +00001031 /* Deal quickly with trivial scheduling events, and resume the
1032 thread. */
1033
1034 if (trc == VG_TRC_INNER_FASTMISS) {
1035 vg_assert(VG_(dispatch_ctr) > 0);
1036
1037 /* Trivial event. Miss in the fast-cache. Do a full
1038 lookup for it. */
1039 trans_addr
sewardj018f7622002-05-15 21:13:39 +00001040 = VG_(search_transtab) ( VG_(threads)[tid].m_eip );
sewardje663cb92002-04-12 10:26:32 +00001041 if (trans_addr == (Addr)0) {
1042 /* Not found; we need to request a translation. */
njn25e49d8e72002-09-23 09:36:25 +00001043 create_translation_for(
1044 tid, VG_(threads)[tid].m_eip );
sewardj018f7622002-05-15 21:13:39 +00001045 trans_addr = VG_(search_transtab) ( VG_(threads)[tid].m_eip );
sewardje663cb92002-04-12 10:26:32 +00001046 if (trans_addr == (Addr)0)
njne427a662002-10-02 11:08:25 +00001047 VG_(core_panic)("VG_TRC_INNER_FASTMISS: missing tt_fast entry");
sewardje663cb92002-04-12 10:26:32 +00001048 }
1049 continue; /* with this thread */
1050 }
1051
1052 if (trc == VG_TRC_EBP_JMP_CLIENTREQ) {
sewardj18a62ff2002-07-12 22:30:51 +00001053 UInt reqno = *(UInt*)(VG_(threads)[tid].m_eax);
1054 /* VG_(printf)("request 0x%x\n", reqno); */
sewardj1fe7b002002-07-16 01:43:15 +00001055
1056 /* Are we really absolutely totally quitting? */
1057 if (reqno == VG_USERREQ__LIBC_FREERES_DONE) {
1058 if (0 || VG_(clo_trace_syscalls) || VG_(clo_trace_sched)) {
1059 VG_(message)(Vg_DebugMsg,
1060 "__libc_freeres() done; really quitting!");
1061 }
1062 return VgSrc_ExitSyscall;
1063 }
1064
sewardj124ca2a2002-06-20 10:19:38 +00001065 do_client_request(tid);
1066 /* Following the request, we try and continue with the
1067 same thread if still runnable. If not, go back to
1068 Stage 1 to select a new thread to run. */
sewardj18a62ff2002-07-12 22:30:51 +00001069 if (VG_(threads)[tid].status == VgTs_Runnable
1070 && reqno != VG_USERREQ__PTHREAD_YIELD)
sewardj124ca2a2002-06-20 10:19:38 +00001071 continue; /* with this thread */
1072 else
1073 goto stage1;
sewardje663cb92002-04-12 10:26:32 +00001074 }
1075
sewardj51c0aaf2002-04-25 01:32:10 +00001076 if (trc == VG_TRC_EBP_JMP_SYSCALL) {
1077 /* Do a syscall for the vthread tid. This could cause it
sewardj7e87e382002-05-03 19:09:05 +00001078 to become non-runnable. One special case: spot the
1079 client doing calls to exit() and take this as the cue
1080 to exit. */
sewardjb3eef6b2002-05-01 00:05:27 +00001081# if 0
1082 { UInt* esp; Int i;
sewardj018f7622002-05-15 21:13:39 +00001083 esp=(UInt*)VG_(threads)[tid].m_esp;
sewardjb3eef6b2002-05-01 00:05:27 +00001084 VG_(printf)("\nBEFORE\n");
1085 for (i = 10; i >= -10; i--)
1086 VG_(printf)("%2d %p = 0x%x\n", i, &esp[i], esp[i]);
1087 }
1088# endif
1089
sewardj1fe7b002002-07-16 01:43:15 +00001090 /* Deal with calling __libc_freeres() at exit. When the
1091 client does __NR_exit, it's exiting for good. So we
1092 then run VG_(__libc_freeres_wrapper). That quits by
1093 doing VG_USERREQ__LIBC_FREERES_DONE, and at that point
1094 we really exit. To be safe we nuke all other threads
sewardjade9d0d2002-07-26 10:52:48 +00001095 currently running.
1096
1097 If not valgrinding (cachegrinding, etc) don't do this.
1098 __libc_freeres does some invalid frees which crash
1099 the unprotected malloc/free system. */
njn25e49d8e72002-09-23 09:36:25 +00001100
sewardjf3fb92d2003-02-23 03:26:08 +00001101 if (VG_(threads)[tid].m_eax == __NR_exit
sewardjf3fb92d2003-02-23 03:26:08 +00001102 || VG_(threads)[tid].m_eax == __NR_exit_group
sewardjf3fb92d2003-02-23 03:26:08 +00001103 ) {
sewardj858964b2002-10-05 14:15:43 +00001104
1105 /* If __NR_exit, remember the supplied argument. */
njn25e49d8e72002-09-23 09:36:25 +00001106 VG_(exitcode) = VG_(threads)[tid].m_ebx; /* syscall arg1 */
1107
nethercote7cc9c232004-01-21 15:08:04 +00001108 /* Only run __libc_freeres if the tool says it's ok and
sewardj858964b2002-10-05 14:15:43 +00001109 it hasn't been overridden with --run-libc-freeres=no
1110 on the command line. */
1111
fitzhardinge98abfc72003-12-16 02:05:15 +00001112 if (VG_(needs).libc_freeres &&
1113 VG_(clo_run_libc_freeres) &&
1114 VG_(__libc_freeres_wrapper) != 0) {
sewardj00631892002-10-05 15:34:38 +00001115 if (VG_(clo_verbosity) > 2
sewardj858964b2002-10-05 14:15:43 +00001116 || VG_(clo_trace_syscalls) || VG_(clo_trace_sched)) {
1117 VG_(message)(Vg_DebugMsg,
1118 "Caught __NR_exit; running __libc_freeres()");
1119 }
1120 VG_(nuke_all_threads_except) ( tid );
fitzhardinge98abfc72003-12-16 02:05:15 +00001121 VG_(threads)[tid].m_eip = (UInt)VG_(__libc_freeres_wrapper);
sewardj858964b2002-10-05 14:15:43 +00001122 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1123 goto stage1; /* party on, dudes (but not for much longer :) */
1124
1125 } else {
1126 /* We won't run __libc_freeres; just exit now. */
sewardj00631892002-10-05 15:34:38 +00001127 if (VG_(clo_verbosity) > 2
sewardj858964b2002-10-05 14:15:43 +00001128 || VG_(clo_trace_syscalls) || VG_(clo_trace_sched)) {
1129 VG_(message)(Vg_DebugMsg,
1130 "Caught __NR_exit; quitting");
1131 }
1132 return VgSrc_ExitSyscall;
1133 }
1134
sewardjade9d0d2002-07-26 10:52:48 +00001135 }
1136
sewardj858964b2002-10-05 14:15:43 +00001137 /* We've dealt with __NR_exit at this point. */
jsgf855d93d2003-10-13 22:26:55 +00001138 vg_assert(VG_(threads)[tid].m_eax != __NR_exit &&
1139 VG_(threads)[tid].m_eax != __NR_exit_group);
sewardj7e87e382002-05-03 19:09:05 +00001140
sewardj83798bf2002-05-24 00:11:16 +00001141 /* Trap syscalls to __NR_sched_yield and just have this
1142 thread yield instead. Not essential, just an
1143 optimisation. */
1144 if (VG_(threads)[tid].m_eax == __NR_sched_yield) {
njnd3040452003-05-19 15:04:06 +00001145 SET_SYSCALL_RETVAL(tid, 0); /* syscall returns with success */
sewardj83798bf2002-05-24 00:11:16 +00001146 goto stage1; /* find a new thread to run */
1147 }
1148
sewardj51c0aaf2002-04-25 01:32:10 +00001149 sched_do_syscall(tid);
sewardjb3eef6b2002-05-01 00:05:27 +00001150
1151# if 0
1152 { UInt* esp; Int i;
sewardj018f7622002-05-15 21:13:39 +00001153 esp=(UInt*)VG_(threads)[tid].m_esp;
sewardjb3eef6b2002-05-01 00:05:27 +00001154 VG_(printf)("AFTER\n");
1155 for (i = 10; i >= -10; i--)
1156 VG_(printf)("%2d %p = 0x%x\n", i, &esp[i], esp[i]);
1157 }
1158# endif
1159
sewardj77f0fc12002-07-12 01:23:03 +00001160 if (VG_(threads)[tid].status == VgTs_Runnable) {
sewardj51c0aaf2002-04-25 01:32:10 +00001161 continue; /* with this thread */
sewardj77f0fc12002-07-12 01:23:03 +00001162 } else {
1163 goto stage1;
1164 }
sewardj51c0aaf2002-04-25 01:32:10 +00001165 }
1166
sewardjd7fd4d22002-04-24 01:57:27 +00001167 /* It's an event we can't quickly deal with. Give up running
1168 this thread and handle things the expensive way. */
sewardje663cb92002-04-12 10:26:32 +00001169 break;
1170 }
1171
1172 /* ======================= Phase 3 of 3 =======================
1173 Handle non-trivial thread requests, mostly pthread stuff. */
1174
1175 /* Ok, we've fallen out of the dispatcher for a
1176 non-completely-trivial reason. First, update basic-block
1177 counters. */
1178
1179 done_this_time = (Int)dispatch_ctr_SAVED - (Int)VG_(dispatch_ctr) - 1;
1180 vg_assert(done_this_time >= 0);
sewardje663cb92002-04-12 10:26:32 +00001181 VG_(bbs_done) += (ULong)done_this_time;
1182
1183 if (0 && trc != VG_TRC_INNER_FASTMISS)
1184 VG_(message)(Vg_DebugMsg, "thread %d: completed %d bbs, trc %d",
1185 tid, done_this_time, (Int)trc );
1186
1187 if (0 && trc != VG_TRC_INNER_FASTMISS)
njne0205ff2003-04-08 00:56:14 +00001188 VG_(message)(Vg_DebugMsg, "thread %d: %llu bbs, event %s",
sewardje663cb92002-04-12 10:26:32 +00001189 tid, VG_(bbs_done),
1190 name_of_sched_event(trc) );
sewardj9d1b5d32002-04-17 19:40:49 +00001191
sewardje663cb92002-04-12 10:26:32 +00001192 /* Examine the thread's return code to figure out why it
sewardj124ca2a2002-06-20 10:19:38 +00001193 stopped. */
sewardje663cb92002-04-12 10:26:32 +00001194
1195 switch (trc) {
1196
fitzhardingea02f8812003-12-18 09:06:09 +00001197 case VG_TRC_EBP_JMP_YIELD:
1198 /* Explicit yield. Let a new thread be scheduled,
1199 simply by doing nothing, causing us to arrive back at
1200 Phase 1. */
fitzhardingea02f8812003-12-18 09:06:09 +00001201 break;
1202
sewardje663cb92002-04-12 10:26:32 +00001203 case VG_TRC_INNER_COUNTERZERO:
1204 /* Timeslice is out. Let a new thread be scheduled,
1205 simply by doing nothing, causing us to arrive back at
1206 Phase 1. */
sewardje663cb92002-04-12 10:26:32 +00001207 vg_assert(VG_(dispatch_ctr) == 0);
1208 break;
1209
1210 case VG_TRC_UNRESUMABLE_SIGNAL:
jsgf855d93d2003-10-13 22:26:55 +00001211 /* It got a SIGSEGV/SIGBUS/SIGILL/SIGFPE, which we need to
1212 deliver right away. */
1213 vg_assert(VG_(unresumable_siginfo).si_signo == VKI_SIGSEGV ||
1214 VG_(unresumable_siginfo).si_signo == VKI_SIGBUS ||
1215 VG_(unresumable_siginfo).si_signo == VKI_SIGILL ||
1216 VG_(unresumable_siginfo).si_signo == VKI_SIGFPE);
1217 vg_assert(VG_(longjmpd_on_signal) == VG_(unresumable_siginfo).si_signo);
1218
1219 /* make sure we've unblocked the signals which the handler blocked */
1220 VG_(unblock_host_signal)(VG_(longjmpd_on_signal));
1221
1222 VG_(deliver_signal)(tid, &VG_(unresumable_siginfo), False);
1223 VG_(unresumable_siginfo).si_signo = 0; /* done */
sewardje663cb92002-04-12 10:26:32 +00001224 break;
1225
sewardje663cb92002-04-12 10:26:32 +00001226 default:
1227 VG_(printf)("\ntrc = %d\n", trc);
njne427a662002-10-02 11:08:25 +00001228 VG_(core_panic)("VG_(scheduler), phase 3: "
1229 "unexpected thread return code");
sewardje663cb92002-04-12 10:26:32 +00001230 /* NOTREACHED */
1231 break;
1232
1233 } /* switch (trc) */
1234
1235 /* That completes Phase 3 of 3. Return now to the top of the
1236 main scheduler loop, to Phase 1 of 3. */
1237
1238 } /* top-level scheduler loop */
1239
1240
1241 /* NOTREACHED */
njne427a662002-10-02 11:08:25 +00001242 VG_(core_panic)("scheduler: post-main-loop ?!");
sewardje663cb92002-04-12 10:26:32 +00001243 /* NOTREACHED */
sewardje663cb92002-04-12 10:26:32 +00001244}
1245
jsgf855d93d2003-10-13 22:26:55 +00001246void VG_(need_resched) ( ThreadId prefer )
1247{
1248 /* Tell the scheduler now might be a good time to find a new
1249 runnable thread, because something happened which woke a thread
1250 up.
1251
1252 NB: This can be called unsynchronized from either a signal
1253 handler, or from another LWP (ie, real kernel thread).
1254
1255 In principle this could simply be a matter of setting
1256 VG_(dispatch_ctr) to a small value (say, 2), which would make
1257 any running code come back to the scheduler fairly quickly.
1258
1259 However, since the scheduler implements a strict round-robin
1260 policy with only one priority level, there are, by definition,
1261 no better threads to be running than the current thread anyway,
1262 so we may as well ignore this hint. For processes with a
1263 mixture of compute and I/O bound threads, this means the compute
1264 threads could introduce longish latencies before the I/O threads
1265 run. For programs with only I/O bound threads, need_resched
1266 won't have any effect anyway.
1267
1268 OK, so I've added command-line switches to enable low-latency
1269 syscalls and signals. The prefer_sched variable is in effect
1270 the ID of a single thread which has higher priority than all the
1271 others. If set, the scheduler will prefer to schedule that
1272 thread over all others. Naturally, this could lead to
1273 starvation or other unfairness.
1274 */
1275
1276 if (VG_(dispatch_ctr) > 10)
1277 VG_(dispatch_ctr) = 2;
1278 prefer_sched = prefer;
1279}
1280
sewardje663cb92002-04-12 10:26:32 +00001281
1282/* ---------------------------------------------------------------------
1283 The pthread implementation.
1284 ------------------------------------------------------------------ */
1285
1286#include <pthread.h>
1287#include <errno.h>
1288
sewardjbf290b92002-05-01 02:28:01 +00001289#define VG_PTHREAD_STACK_MIN \
sewardjc3bd5f52002-05-01 03:24:23 +00001290 (VG_PTHREAD_STACK_SIZE - VG_AR_CLIENT_STACKBASE_REDZONE_SZB)
sewardje663cb92002-04-12 10:26:32 +00001291
1292/* /usr/include/bits/pthreadtypes.h:
1293 typedef unsigned long int pthread_t;
1294*/
1295
sewardje663cb92002-04-12 10:26:32 +00001296
sewardj604ec3c2002-04-18 22:38:41 +00001297/* -----------------------------------------------------------
sewardj20917d82002-05-28 01:36:45 +00001298 Thread CREATION, JOINAGE and CANCELLATION: HELPER FNS
sewardj604ec3c2002-04-18 22:38:41 +00001299 -------------------------------------------------------- */
1300
sewardj20917d82002-05-28 01:36:45 +00001301/* We've decided to action a cancellation on tid. Make it jump to
1302 thread_exit_wrapper() in vg_libpthread.c, passing PTHREAD_CANCELED
1303 as the arg. */
1304static
1305void make_thread_jump_to_cancelhdlr ( ThreadId tid )
1306{
1307 Char msg_buf[100];
1308 vg_assert(VG_(is_valid_tid)(tid));
sewardjdadc8d02002-12-08 23:24:18 +00001309
sewardj20917d82002-05-28 01:36:45 +00001310 /* Push PTHREAD_CANCELED on the stack and jump to the cancellation
1311 handler -- which is really thread_exit_wrapper() in
1312 vg_libpthread.c. */
1313 vg_assert(VG_(threads)[tid].cancel_pend != NULL);
sewardj4bdd9962002-12-26 11:51:50 +00001314
1315 /* Push a suitable arg, and mark it as readable. */
njnd3040452003-05-19 15:04:06 +00001316 SET_PTHREQ_ESP(tid, VG_(threads)[tid].m_esp - 4);
sewardj20917d82002-05-28 01:36:45 +00001317 * (UInt*)(VG_(threads)[tid].m_esp) = (UInt)PTHREAD_CANCELED;
sewardj4bdd9962002-12-26 11:51:50 +00001318 VG_TRACK( post_mem_write, VG_(threads)[tid].m_esp, sizeof(void*) );
1319
1320 /* Push a bogus return address. It will not return, but we still
1321 need to have it so that the arg is at the correct stack offset.
1322 Don't mark as readable; any attempt to read this is and internal
1323 valgrind bug since thread_exit_wrapper should not return. */
njnd3040452003-05-19 15:04:06 +00001324 SET_PTHREQ_ESP(tid, VG_(threads)[tid].m_esp - 4);
sewardj4bdd9962002-12-26 11:51:50 +00001325 * (UInt*)(VG_(threads)[tid].m_esp) = 0xBEADDEEF;
1326
1327 /* .cancel_pend will hold &thread_exit_wrapper */
sewardj20917d82002-05-28 01:36:45 +00001328 VG_(threads)[tid].m_eip = (UInt)VG_(threads)[tid].cancel_pend;
sewardjdadc8d02002-12-08 23:24:18 +00001329
jsgf855d93d2003-10-13 22:26:55 +00001330 VG_(proxy_abort_syscall)(tid);
sewardjdadc8d02002-12-08 23:24:18 +00001331
sewardj20917d82002-05-28 01:36:45 +00001332 VG_(threads)[tid].status = VgTs_Runnable;
sewardjdadc8d02002-12-08 23:24:18 +00001333
sewardj20917d82002-05-28 01:36:45 +00001334 /* Make sure we aren't cancelled again whilst handling this
1335 cancellation. */
1336 VG_(threads)[tid].cancel_st = False;
1337 if (VG_(clo_trace_sched)) {
1338 VG_(sprintf)(msg_buf,
1339 "jump to cancellation handler (hdlr = %p)",
1340 VG_(threads)[tid].cancel_pend);
1341 print_sched_event(tid, msg_buf);
1342 }
1343}
1344
1345
1346
sewardjb48e5002002-05-13 00:16:03 +00001347/* Release resources and generally clean up once a thread has finally
1348 disappeared. */
1349static
jsgf855d93d2003-10-13 22:26:55 +00001350void cleanup_after_thread_exited ( ThreadId tid, Bool forcekill )
sewardjb48e5002002-05-13 00:16:03 +00001351{
sewardj018f7622002-05-15 21:13:39 +00001352 vg_assert(VG_(is_valid_or_empty_tid)(tid));
1353 vg_assert(VG_(threads)[tid].status == VgTs_Empty);
njn25e49d8e72002-09-23 09:36:25 +00001354 /* Its stack is now off-limits */
1355 VG_TRACK( die_mem_stack, VG_(threads)[tid].stack_base,
1356 VG_(threads)[tid].stack_size );
1357
sewardj92a59562002-09-30 00:53:10 +00001358 /* Deallocate its LDT, if it ever had one. */
1359 VG_(deallocate_LDT_for_thread)( VG_(threads)[tid].ldt );
1360 VG_(threads)[tid].ldt = NULL;
jsgf855d93d2003-10-13 22:26:55 +00001361
fitzhardinge47735af2004-01-21 01:27:27 +00001362 /* Clear its TLS array. */
1363 VG_(clear_TLS_for_thread)( VG_(threads)[tid].tls );
1364
jsgf855d93d2003-10-13 22:26:55 +00001365 /* Not interested in the timeout anymore */
1366 VG_(threads)[tid].awaken_at = 0xFFFFFFFF;
1367
1368 /* Delete proxy LWP */
1369 VG_(proxy_delete)(tid, forcekill);
sewardjb48e5002002-05-13 00:16:03 +00001370}
1371
1372
sewardj20917d82002-05-28 01:36:45 +00001373/* Look for matching pairs of threads waiting for joiners and threads
1374 waiting for joinees. For each such pair copy the return value of
1375 the joinee into the joiner, let the joiner resume and discard the
1376 joinee. */
1377static
1378void maybe_rendezvous_joiners_and_joinees ( void )
1379{
1380 Char msg_buf[100];
1381 void** thread_return;
1382 ThreadId jnr, jee;
1383
1384 for (jnr = 1; jnr < VG_N_THREADS; jnr++) {
1385 if (VG_(threads)[jnr].status != VgTs_WaitJoinee)
1386 continue;
1387 jee = VG_(threads)[jnr].joiner_jee_tid;
1388 if (jee == VG_INVALID_THREADID)
1389 continue;
1390 vg_assert(VG_(is_valid_tid)(jee));
jsgf855d93d2003-10-13 22:26:55 +00001391 if (VG_(threads)[jee].status != VgTs_WaitJoiner) {
1392 /* if joinee has become detached, then make join fail with
1393 EINVAL */
1394 if (VG_(threads)[jee].detached) {
1395 VG_(threads)[jnr].status = VgTs_Runnable;
1396 VG_(threads)[jnr].joiner_jee_tid = VG_INVALID_THREADID;
1397 SET_PTHREQ_RETVAL(jnr, VKI_EINVAL);
1398 }
sewardj20917d82002-05-28 01:36:45 +00001399 continue;
jsgf855d93d2003-10-13 22:26:55 +00001400 }
sewardj20917d82002-05-28 01:36:45 +00001401 /* ok! jnr is waiting to join with jee, and jee is waiting to be
1402 joined by ... well, any thread. So let's do it! */
1403
1404 /* Copy return value to where joiner wants it. */
1405 thread_return = VG_(threads)[jnr].joiner_thread_return;
1406 if (thread_return != NULL) {
1407 /* CHECK thread_return writable */
njn72718642003-07-24 08:45:32 +00001408 VG_TRACK( pre_mem_write, Vg_CorePThread, jnr,
njn25e49d8e72002-09-23 09:36:25 +00001409 "pthread_join: thread_return",
1410 (Addr)thread_return, sizeof(void*));
sewardj5a3798b2002-06-04 23:24:22 +00001411
sewardj20917d82002-05-28 01:36:45 +00001412 *thread_return = VG_(threads)[jee].joinee_retval;
1413 /* Not really right, since it makes the thread's return value
1414 appear to be defined even if it isn't. */
njn25e49d8e72002-09-23 09:36:25 +00001415 VG_TRACK( post_mem_write, (Addr)thread_return, sizeof(void*) );
sewardj20917d82002-05-28 01:36:45 +00001416 }
1417
1418 /* Joinee is discarded */
1419 VG_(threads)[jee].status = VgTs_Empty; /* bye! */
jsgf855d93d2003-10-13 22:26:55 +00001420 cleanup_after_thread_exited ( jee, False );
sewardjc4a810d2002-11-13 22:25:51 +00001421 if (VG_(clo_trace_sched)) {
1422 VG_(sprintf)(msg_buf,
1423 "rendezvous with joinee %d. %d resumes, %d exits.",
1424 jee, jnr, jee );
sewardj20917d82002-05-28 01:36:45 +00001425 print_sched_event(jnr, msg_buf);
1426 }
sewardjc4a810d2002-11-13 22:25:51 +00001427
1428 VG_TRACK( post_thread_join, jnr, jee );
sewardj20917d82002-05-28 01:36:45 +00001429
1430 /* joiner returns with success */
1431 VG_(threads)[jnr].status = VgTs_Runnable;
njnd3040452003-05-19 15:04:06 +00001432 SET_PTHREQ_RETVAL(jnr, 0);
sewardj20917d82002-05-28 01:36:45 +00001433 }
1434}
1435
1436
sewardjccef2e62002-05-29 19:26:32 +00001437/* Nuke all threads other than tid. POSIX specifies that this should
1438 happen in __NR_exec, and after a __NR_fork() when I am the child,
jsgf855d93d2003-10-13 22:26:55 +00001439 as POSIX requires. Also used at process exit time with
1440 me==VG_INVALID_THREADID */
sewardjccef2e62002-05-29 19:26:32 +00001441void VG_(nuke_all_threads_except) ( ThreadId me )
1442{
1443 ThreadId tid;
1444 for (tid = 1; tid < VG_N_THREADS; tid++) {
1445 if (tid == me
jsgf855d93d2003-10-13 22:26:55 +00001446 || VG_(threads)[tid].status == VgTs_Empty)
sewardjccef2e62002-05-29 19:26:32 +00001447 continue;
sewardjef037c72002-05-30 00:40:03 +00001448 if (0)
1449 VG_(printf)(
1450 "VG_(nuke_all_threads_except): nuking tid %d\n", tid);
jsgf855d93d2003-10-13 22:26:55 +00001451 VG_(proxy_delete)(tid, True);
sewardjccef2e62002-05-29 19:26:32 +00001452 VG_(threads)[tid].status = VgTs_Empty;
jsgf855d93d2003-10-13 22:26:55 +00001453 cleanup_after_thread_exited( tid, True );
sewardjccef2e62002-05-29 19:26:32 +00001454 }
1455}
1456
1457
sewardj20917d82002-05-28 01:36:45 +00001458/* -----------------------------------------------------------
1459 Thread CREATION, JOINAGE and CANCELLATION: REQUESTS
1460 -------------------------------------------------------- */
1461
sewardje663cb92002-04-12 10:26:32 +00001462static
sewardj8ad94e12002-05-29 00:10:20 +00001463void do__cleanup_push ( ThreadId tid, CleanupEntry* cu )
1464{
1465 Int sp;
1466 Char msg_buf[100];
1467 vg_assert(VG_(is_valid_tid)(tid));
1468 sp = VG_(threads)[tid].custack_used;
1469 if (VG_(clo_trace_sched)) {
1470 VG_(sprintf)(msg_buf,
1471 "cleanup_push (fn %p, arg %p) -> slot %d",
1472 cu->fn, cu->arg, sp);
1473 print_sched_event(tid, msg_buf);
1474 }
1475 vg_assert(sp >= 0 && sp <= VG_N_CLEANUPSTACK);
1476 if (sp == VG_N_CLEANUPSTACK)
njne427a662002-10-02 11:08:25 +00001477 VG_(core_panic)("do__cleanup_push: VG_N_CLEANUPSTACK is too small."
sewardj8ad94e12002-05-29 00:10:20 +00001478 " Increase and recompile.");
1479 VG_(threads)[tid].custack[sp] = *cu;
1480 sp++;
1481 VG_(threads)[tid].custack_used = sp;
njnd3040452003-05-19 15:04:06 +00001482 SET_PTHREQ_RETVAL(tid, 0);
sewardj8ad94e12002-05-29 00:10:20 +00001483}
1484
1485
1486static
1487void do__cleanup_pop ( ThreadId tid, CleanupEntry* cu )
1488{
1489 Int sp;
1490 Char msg_buf[100];
1491 vg_assert(VG_(is_valid_tid)(tid));
1492 sp = VG_(threads)[tid].custack_used;
1493 if (VG_(clo_trace_sched)) {
njn36650922002-10-04 09:18:09 +00001494 VG_(sprintf)(msg_buf, "cleanup_pop from slot %d", sp-1);
sewardj8ad94e12002-05-29 00:10:20 +00001495 print_sched_event(tid, msg_buf);
1496 }
1497 vg_assert(sp >= 0 && sp <= VG_N_CLEANUPSTACK);
1498 if (sp == 0) {
njnd3040452003-05-19 15:04:06 +00001499 SET_PTHREQ_RETVAL(tid, -1);
sewardj8ad94e12002-05-29 00:10:20 +00001500 return;
1501 }
1502 sp--;
njn72718642003-07-24 08:45:32 +00001503 VG_TRACK( pre_mem_write, Vg_CorePThread, tid,
njn36650922002-10-04 09:18:09 +00001504 "cleanup pop", (Addr)cu, sizeof(CleanupEntry) );
sewardj8ad94e12002-05-29 00:10:20 +00001505 *cu = VG_(threads)[tid].custack[sp];
njn25e49d8e72002-09-23 09:36:25 +00001506 VG_TRACK( post_mem_write, (Addr)cu, sizeof(CleanupEntry) );
sewardj8ad94e12002-05-29 00:10:20 +00001507 VG_(threads)[tid].custack_used = sp;
njnd3040452003-05-19 15:04:06 +00001508 SET_PTHREQ_RETVAL(tid, 0);
sewardj8ad94e12002-05-29 00:10:20 +00001509}
1510
1511
1512static
sewardjff42d1d2002-05-22 13:17:31 +00001513void do_pthread_yield ( ThreadId tid )
1514{
1515 Char msg_buf[100];
1516 vg_assert(VG_(is_valid_tid)(tid));
sewardjff42d1d2002-05-22 13:17:31 +00001517 if (VG_(clo_trace_sched)) {
1518 VG_(sprintf)(msg_buf, "yield");
1519 print_sched_event(tid, msg_buf);
1520 }
njnd3040452003-05-19 15:04:06 +00001521 SET_PTHREQ_RETVAL(tid, 0);
sewardjff42d1d2002-05-22 13:17:31 +00001522}
1523
1524
1525static
sewardj20917d82002-05-28 01:36:45 +00001526void do__testcancel ( ThreadId tid )
sewardje663cb92002-04-12 10:26:32 +00001527{
sewardj7989d0c2002-05-28 11:00:01 +00001528 Char msg_buf[100];
sewardjb48e5002002-05-13 00:16:03 +00001529 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001530 if (VG_(clo_trace_sched)) {
1531 VG_(sprintf)(msg_buf, "testcancel");
1532 print_sched_event(tid, msg_buf);
1533 }
sewardj20917d82002-05-28 01:36:45 +00001534 if (/* is there a cancellation pending on this thread? */
1535 VG_(threads)[tid].cancel_pend != NULL
1536 && /* is this thread accepting cancellations? */
1537 VG_(threads)[tid].cancel_st) {
1538 /* Ok, let's do the cancellation. */
1539 make_thread_jump_to_cancelhdlr ( tid );
sewardje663cb92002-04-12 10:26:32 +00001540 } else {
sewardj20917d82002-05-28 01:36:45 +00001541 /* No, we keep going. */
njnd3040452003-05-19 15:04:06 +00001542 SET_PTHREQ_RETVAL(tid, 0);
sewardje663cb92002-04-12 10:26:32 +00001543 }
sewardje663cb92002-04-12 10:26:32 +00001544}
1545
1546
1547static
sewardj20917d82002-05-28 01:36:45 +00001548void do__set_cancelstate ( ThreadId tid, Int state )
1549{
1550 Bool old_st;
sewardj7989d0c2002-05-28 11:00:01 +00001551 Char msg_buf[100];
sewardj20917d82002-05-28 01:36:45 +00001552 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001553 if (VG_(clo_trace_sched)) {
1554 VG_(sprintf)(msg_buf, "set_cancelstate to %d (%s)", state,
1555 state==PTHREAD_CANCEL_ENABLE
1556 ? "ENABLE"
1557 : (state==PTHREAD_CANCEL_DISABLE ? "DISABLE" : "???"));
1558 print_sched_event(tid, msg_buf);
1559 }
sewardj20917d82002-05-28 01:36:45 +00001560 old_st = VG_(threads)[tid].cancel_st;
1561 if (state == PTHREAD_CANCEL_ENABLE) {
1562 VG_(threads)[tid].cancel_st = True;
1563 } else
1564 if (state == PTHREAD_CANCEL_DISABLE) {
1565 VG_(threads)[tid].cancel_st = False;
1566 } else {
njne427a662002-10-02 11:08:25 +00001567 VG_(core_panic)("do__set_cancelstate");
sewardj20917d82002-05-28 01:36:45 +00001568 }
njnd3040452003-05-19 15:04:06 +00001569 SET_PTHREQ_RETVAL(tid, old_st ? PTHREAD_CANCEL_ENABLE
1570 : PTHREAD_CANCEL_DISABLE);
sewardj20917d82002-05-28 01:36:45 +00001571}
1572
1573
1574static
1575void do__set_canceltype ( ThreadId tid, Int type )
1576{
1577 Bool old_ty;
sewardj7989d0c2002-05-28 11:00:01 +00001578 Char msg_buf[100];
sewardj20917d82002-05-28 01:36:45 +00001579 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001580 if (VG_(clo_trace_sched)) {
1581 VG_(sprintf)(msg_buf, "set_canceltype to %d (%s)", type,
1582 type==PTHREAD_CANCEL_ASYNCHRONOUS
1583 ? "ASYNCHRONOUS"
1584 : (type==PTHREAD_CANCEL_DEFERRED ? "DEFERRED" : "???"));
1585 print_sched_event(tid, msg_buf);
1586 }
sewardj20917d82002-05-28 01:36:45 +00001587 old_ty = VG_(threads)[tid].cancel_ty;
1588 if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
1589 VG_(threads)[tid].cancel_ty = False;
1590 } else
1591 if (type == PTHREAD_CANCEL_DEFERRED) {
sewardjaf00b6d2002-05-29 23:30:28 +00001592 VG_(threads)[tid].cancel_ty = True;
sewardj20917d82002-05-28 01:36:45 +00001593 } else {
njne427a662002-10-02 11:08:25 +00001594 VG_(core_panic)("do__set_canceltype");
sewardj20917d82002-05-28 01:36:45 +00001595 }
njnd3040452003-05-19 15:04:06 +00001596 SET_PTHREQ_RETVAL(tid, old_ty ? PTHREAD_CANCEL_DEFERRED
sewardj20917d82002-05-28 01:36:45 +00001597 : PTHREAD_CANCEL_ASYNCHRONOUS);
1598}
1599
1600
sewardj7989d0c2002-05-28 11:00:01 +00001601/* Set or get the detach state for thread det. */
sewardj20917d82002-05-28 01:36:45 +00001602static
sewardj7989d0c2002-05-28 11:00:01 +00001603void do__set_or_get_detach ( ThreadId tid,
1604 Int what, ThreadId det )
sewardj20917d82002-05-28 01:36:45 +00001605{
sewardj7989d0c2002-05-28 11:00:01 +00001606 Char msg_buf[100];
1607 /* VG_(printf)("do__set_or_get_detach tid %d what %d det %d\n",
1608 tid, what, det); */
sewardj20917d82002-05-28 01:36:45 +00001609 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001610 if (VG_(clo_trace_sched)) {
1611 VG_(sprintf)(msg_buf, "set_or_get_detach %d (%s) for tid %d", what,
1612 what==0 ? "not-detached" : (
1613 what==1 ? "detached" : (
1614 what==2 ? "fetch old value" : "???")),
1615 det );
1616 print_sched_event(tid, msg_buf);
1617 }
1618
1619 if (!VG_(is_valid_tid)(det)) {
njnd3040452003-05-19 15:04:06 +00001620 SET_PTHREQ_RETVAL(tid, -1);
sewardj7989d0c2002-05-28 11:00:01 +00001621 return;
1622 }
1623
sewardj20917d82002-05-28 01:36:45 +00001624 switch (what) {
1625 case 2: /* get */
njnd3040452003-05-19 15:04:06 +00001626 SET_PTHREQ_RETVAL(tid, VG_(threads)[det].detached ? 1 : 0);
sewardj20917d82002-05-28 01:36:45 +00001627 return;
jsgf855d93d2003-10-13 22:26:55 +00001628 case 1:
sewardj7989d0c2002-05-28 11:00:01 +00001629 VG_(threads)[det].detached = True;
njnd3040452003-05-19 15:04:06 +00001630 SET_PTHREQ_RETVAL(tid, 0);
jsgf855d93d2003-10-13 22:26:55 +00001631 /* wake anyone who was joining on us */
1632 maybe_rendezvous_joiners_and_joinees();
sewardj20917d82002-05-28 01:36:45 +00001633 return;
1634 case 0: /* set not detached */
sewardj7989d0c2002-05-28 11:00:01 +00001635 VG_(threads)[det].detached = False;
njnd3040452003-05-19 15:04:06 +00001636 SET_PTHREQ_RETVAL(tid, 0);
sewardj20917d82002-05-28 01:36:45 +00001637 return;
1638 default:
njne427a662002-10-02 11:08:25 +00001639 VG_(core_panic)("do__set_or_get_detach");
sewardj20917d82002-05-28 01:36:45 +00001640 }
1641}
1642
1643
1644static
1645void do__set_cancelpend ( ThreadId tid,
1646 ThreadId cee,
1647 void (*cancelpend_hdlr)(void*) )
sewardje663cb92002-04-12 10:26:32 +00001648{
1649 Char msg_buf[100];
1650
sewardj20917d82002-05-28 01:36:45 +00001651 vg_assert(VG_(is_valid_tid)(tid));
1652 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1653
sewardj7989d0c2002-05-28 11:00:01 +00001654 if (!VG_(is_valid_tid)(cee)) {
1655 if (VG_(clo_trace_sched)) {
1656 VG_(sprintf)(msg_buf,
1657 "set_cancelpend for invalid tid %d", cee);
1658 print_sched_event(tid, msg_buf);
1659 }
njn25e49d8e72002-09-23 09:36:25 +00001660 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00001661 "pthread_cancel: target thread does not exist, or invalid");
jsgf855d93d2003-10-13 22:26:55 +00001662 SET_PTHREQ_RETVAL(tid, VKI_ESRCH);
sewardj7989d0c2002-05-28 11:00:01 +00001663 return;
1664 }
sewardj20917d82002-05-28 01:36:45 +00001665
1666 VG_(threads)[cee].cancel_pend = cancelpend_hdlr;
1667
jsgf855d93d2003-10-13 22:26:55 +00001668 /* interrupt a pending syscall */
1669 VG_(proxy_abort_syscall)(cee);
1670
sewardj20917d82002-05-28 01:36:45 +00001671 if (VG_(clo_trace_sched)) {
1672 VG_(sprintf)(msg_buf,
sewardj7989d0c2002-05-28 11:00:01 +00001673 "set_cancelpend (hdlr = %p, set by tid %d)",
sewardj20917d82002-05-28 01:36:45 +00001674 cancelpend_hdlr, tid);
1675 print_sched_event(cee, msg_buf);
1676 }
1677
1678 /* Thread doing the cancelling returns with success. */
njnd3040452003-05-19 15:04:06 +00001679 SET_PTHREQ_RETVAL(tid, 0);
sewardj20917d82002-05-28 01:36:45 +00001680
1681 /* Perhaps we can nuke the cancellee right now? */
jsgf855d93d2003-10-13 22:26:55 +00001682 if (!VG_(threads)[cee].cancel_ty) /* if PTHREAD_CANCEL_ASYNCHRONOUS */
1683 do__testcancel(cee);
sewardj20917d82002-05-28 01:36:45 +00001684}
1685
1686
1687static
1688void do_pthread_join ( ThreadId tid,
1689 ThreadId jee, void** thread_return )
1690{
1691 Char msg_buf[100];
1692 ThreadId i;
sewardje663cb92002-04-12 10:26:32 +00001693 /* jee, the joinee, is the thread specified as an arg in thread
1694 tid's call to pthread_join. So tid is the join-er. */
sewardjb48e5002002-05-13 00:16:03 +00001695 vg_assert(VG_(is_valid_tid)(tid));
sewardj018f7622002-05-15 21:13:39 +00001696 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +00001697
1698 if (jee == tid) {
njn25e49d8e72002-09-23 09:36:25 +00001699 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00001700 "pthread_join: attempt to join to self");
njnd3040452003-05-19 15:04:06 +00001701 SET_PTHREQ_RETVAL(tid, EDEADLK); /* libc constant, not a kernel one */
jsgf855d93d2003-10-13 22:26:55 +00001702 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +00001703 return;
1704 }
1705
sewardj20917d82002-05-28 01:36:45 +00001706 /* Flush any completed pairs, so as to make sure what we're looking
1707 at is up-to-date. */
1708 maybe_rendezvous_joiners_and_joinees();
1709
1710 /* Is this a sane request? */
jsgf855d93d2003-10-13 22:26:55 +00001711 if ( ! VG_(is_valid_tid)(jee) ||
1712 VG_(threads)[jee].detached) {
sewardje663cb92002-04-12 10:26:32 +00001713 /* Invalid thread to join to. */
njn25e49d8e72002-09-23 09:36:25 +00001714 VG_(record_pthread_error)( tid,
jsgf855d93d2003-10-13 22:26:55 +00001715 "pthread_join: target thread does not exist, invalid, or detached");
1716 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardje663cb92002-04-12 10:26:32 +00001717 return;
1718 }
1719
sewardj20917d82002-05-28 01:36:45 +00001720 /* Is anyone else already in a join-wait for jee? */
1721 for (i = 1; i < VG_N_THREADS; i++) {
1722 if (i == tid) continue;
1723 if (VG_(threads)[i].status == VgTs_WaitJoinee
1724 && VG_(threads)[i].joiner_jee_tid == jee) {
1725 /* Someone already did join on this thread */
njn25e49d8e72002-09-23 09:36:25 +00001726 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00001727 "pthread_join: another thread already "
1728 "in join-wait for target thread");
jsgf855d93d2003-10-13 22:26:55 +00001729 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
1730 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardj20917d82002-05-28 01:36:45 +00001731 return;
1732 }
sewardje663cb92002-04-12 10:26:32 +00001733 }
1734
sewardj20917d82002-05-28 01:36:45 +00001735 /* Mark this thread as waiting for the joinee. */
sewardj018f7622002-05-15 21:13:39 +00001736 VG_(threads)[tid].status = VgTs_WaitJoinee;
sewardj20917d82002-05-28 01:36:45 +00001737 VG_(threads)[tid].joiner_thread_return = thread_return;
1738 VG_(threads)[tid].joiner_jee_tid = jee;
1739
1740 /* Look for matching joiners and joinees and do the right thing. */
1741 maybe_rendezvous_joiners_and_joinees();
1742
1743 /* Return value is irrelevant since this this thread becomes
1744 non-runnable. maybe_resume_joiner() will cause it to return the
1745 right value when it resumes. */
1746
sewardj8937c812002-04-12 20:12:20 +00001747 if (VG_(clo_trace_sched)) {
sewardj20917d82002-05-28 01:36:45 +00001748 VG_(sprintf)(msg_buf,
1749 "wait for joinee %d (may already be ready)", jee);
sewardje663cb92002-04-12 10:26:32 +00001750 print_sched_event(tid, msg_buf);
1751 }
sewardje663cb92002-04-12 10:26:32 +00001752}
1753
1754
sewardj20917d82002-05-28 01:36:45 +00001755/* ( void* ): calling thread waits for joiner and returns the void* to
1756 it. This is one of two ways in which a thread can finally exit --
1757 the other is do__quit. */
sewardje663cb92002-04-12 10:26:32 +00001758static
sewardj20917d82002-05-28 01:36:45 +00001759void do__wait_joiner ( ThreadId tid, void* retval )
sewardje663cb92002-04-12 10:26:32 +00001760{
sewardj20917d82002-05-28 01:36:45 +00001761 Char msg_buf[100];
1762 vg_assert(VG_(is_valid_tid)(tid));
1763 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1764 if (VG_(clo_trace_sched)) {
1765 VG_(sprintf)(msg_buf,
sewardj7989d0c2002-05-28 11:00:01 +00001766 "do__wait_joiner(retval = %p) (non-detached thread exit)", retval);
sewardj20917d82002-05-28 01:36:45 +00001767 print_sched_event(tid, msg_buf);
1768 }
1769 VG_(threads)[tid].status = VgTs_WaitJoiner;
1770 VG_(threads)[tid].joinee_retval = retval;
1771 maybe_rendezvous_joiners_and_joinees();
1772}
1773
1774
1775/* ( no-args ): calling thread disappears from the system forever.
1776 Reclaim resources. */
1777static
1778void do__quit ( ThreadId tid )
1779{
1780 Char msg_buf[100];
1781 vg_assert(VG_(is_valid_tid)(tid));
1782 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1783 VG_(threads)[tid].status = VgTs_Empty; /* bye! */
jsgf855d93d2003-10-13 22:26:55 +00001784 cleanup_after_thread_exited ( tid, False );
sewardj20917d82002-05-28 01:36:45 +00001785 if (VG_(clo_trace_sched)) {
sewardj7989d0c2002-05-28 11:00:01 +00001786 VG_(sprintf)(msg_buf, "do__quit (detached thread exit)");
sewardj20917d82002-05-28 01:36:45 +00001787 print_sched_event(tid, msg_buf);
1788 }
jsgf855d93d2003-10-13 22:26:55 +00001789 maybe_rendezvous_joiners_and_joinees();
sewardj20917d82002-05-28 01:36:45 +00001790 /* Return value is irrelevant; this thread will not get
1791 rescheduled. */
1792}
1793
1794
1795/* Should never be entered. If it is, will be on the simulated
1796 CPU. */
1797static
1798void do__apply_in_new_thread_bogusRA ( void )
1799{
njne427a662002-10-02 11:08:25 +00001800 VG_(core_panic)("do__apply_in_new_thread_bogusRA");
sewardj20917d82002-05-28 01:36:45 +00001801}
1802
1803/* (Fn, Arg): Create a new thread and run Fn applied to Arg in it. Fn
1804 MUST NOT return -- ever. Eventually it will do either __QUIT or
1805 __WAIT_JOINER. Return the child tid to the parent. */
1806static
1807void do__apply_in_new_thread ( ThreadId parent_tid,
1808 void* (*fn)(void *),
1809 void* arg )
1810{
sewardje663cb92002-04-12 10:26:32 +00001811 Addr new_stack;
1812 UInt new_stk_szb;
1813 ThreadId tid;
1814 Char msg_buf[100];
1815
1816 /* Paranoia ... */
1817 vg_assert(sizeof(pthread_t) == sizeof(UInt));
1818
sewardj018f7622002-05-15 21:13:39 +00001819 vg_assert(VG_(threads)[parent_tid].status != VgTs_Empty);
sewardje663cb92002-04-12 10:26:32 +00001820
sewardj1e8cdc92002-04-18 11:37:52 +00001821 tid = vg_alloc_ThreadState();
sewardje663cb92002-04-12 10:26:32 +00001822
1823 /* If we've created the main thread's tid, we're in deep trouble :) */
sewardj6072c362002-04-19 14:40:57 +00001824 vg_assert(tid != 1);
sewardj018f7622002-05-15 21:13:39 +00001825 vg_assert(VG_(is_valid_or_empty_tid)(tid));
sewardje663cb92002-04-12 10:26:32 +00001826
sewardjc4a810d2002-11-13 22:25:51 +00001827 /* do this early, before the child gets any memory writes */
1828 VG_TRACK ( post_thread_create, parent_tid, tid );
1829
sewardjf6374322002-11-13 22:35:55 +00001830 /* Create new thread with default attrs:
1831 deferred cancellation, not detached
1832 */
1833 mostly_clear_thread_record(tid);
1834 VG_(threads)[tid].status = VgTs_Runnable;
1835
sewardje663cb92002-04-12 10:26:32 +00001836 /* Copy the parent's CPU state into the child's, in a roundabout
1837 way (via baseBlock). */
1838 VG_(load_thread_state)(parent_tid);
sewardjca340b32002-12-08 22:14:11 +00001839
1840 /* We inherit our parent's LDT. */
1841 if (VG_(threads)[parent_tid].ldt == NULL) {
1842 /* We hope this is the common case. */
1843 VG_(baseBlock)[VGOFF_(ldt)] = 0;
1844 } else {
1845 /* No luck .. we have to take a copy of the parent's. */
1846 VG_(threads)[tid].ldt
1847 = VG_(allocate_LDT_for_thread)( VG_(threads)[parent_tid].ldt );
1848 VG_(baseBlock)[VGOFF_(ldt)] = (UInt)VG_(threads)[tid].ldt;
1849 }
1850
fitzhardinge47735af2004-01-21 01:27:27 +00001851 /* Initialise the thread's TLS array */
1852 VG_(clear_TLS_for_thread)( VG_(threads)[tid].tls );
1853 VG_(baseBlock)[VGOFF_(tls)] = (UInt)VG_(threads)[tid].tls;
1854
sewardje663cb92002-04-12 10:26:32 +00001855 VG_(save_thread_state)(tid);
sewardjf6374322002-11-13 22:35:55 +00001856 vg_tid_last_in_baseBlock = tid;
sewardje663cb92002-04-12 10:26:32 +00001857
1858 /* Consider allocating the child a stack, if the one it already has
1859 is inadequate. */
sewardjbf290b92002-05-01 02:28:01 +00001860 new_stk_szb = VG_PTHREAD_STACK_MIN;
sewardje663cb92002-04-12 10:26:32 +00001861
sewardj018f7622002-05-15 21:13:39 +00001862 if (new_stk_szb > VG_(threads)[tid].stack_size) {
sewardje663cb92002-04-12 10:26:32 +00001863 /* Again, for good measure :) We definitely don't want to be
1864 allocating a stack for the main thread. */
sewardj6072c362002-04-19 14:40:57 +00001865 vg_assert(tid != 1);
sewardje663cb92002-04-12 10:26:32 +00001866 /* for now, we don't handle the case of anything other than
1867 assigning it for the first time. */
sewardj018f7622002-05-15 21:13:39 +00001868 vg_assert(VG_(threads)[tid].stack_size == 0);
1869 vg_assert(VG_(threads)[tid].stack_base == (Addr)NULL);
fitzhardinge98abfc72003-12-16 02:05:15 +00001870 new_stack = VG_(client_alloc)(0, new_stk_szb,
1871 VKI_PROT_READ | VKI_PROT_WRITE | VKI_PROT_EXEC,
1872 SF_STACK);
sewardj018f7622002-05-15 21:13:39 +00001873 VG_(threads)[tid].stack_base = new_stack;
1874 VG_(threads)[tid].stack_size = new_stk_szb;
1875 VG_(threads)[tid].stack_highest_word
sewardje663cb92002-04-12 10:26:32 +00001876 = new_stack + new_stk_szb
sewardj1e8cdc92002-04-18 11:37:52 +00001877 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB; /* -4 ??? */;
sewardje663cb92002-04-12 10:26:32 +00001878 }
sewardj1e8cdc92002-04-18 11:37:52 +00001879
njn25e49d8e72002-09-23 09:36:25 +00001880 /* Having got memory to hold the thread's stack:
1881 - set %esp as base + size
1882 - mark everything below %esp inaccessible
1883 - mark redzone at stack end inaccessible
1884 */
njnd3040452003-05-19 15:04:06 +00001885 SET_PTHREQ_ESP(tid, VG_(threads)[tid].stack_base
1886 + VG_(threads)[tid].stack_size
1887 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB);
sewardj1e8cdc92002-04-18 11:37:52 +00001888
njn25e49d8e72002-09-23 09:36:25 +00001889 VG_TRACK ( die_mem_stack, VG_(threads)[tid].stack_base,
1890 + new_stk_szb - VG_AR_CLIENT_STACKBASE_REDZONE_SZB);
1891 VG_TRACK ( ban_mem_stack, VG_(threads)[tid].m_esp,
1892 VG_AR_CLIENT_STACKBASE_REDZONE_SZB );
sewardje663cb92002-04-12 10:26:32 +00001893
njn25e49d8e72002-09-23 09:36:25 +00001894 /* push two args */
njnd3040452003-05-19 15:04:06 +00001895 SET_PTHREQ_ESP(tid, VG_(threads)[tid].m_esp - 8);
1896
njn25e49d8e72002-09-23 09:36:25 +00001897 VG_TRACK ( new_mem_stack, (Addr)VG_(threads)[tid].m_esp, 2 * 4 );
njn72718642003-07-24 08:45:32 +00001898 VG_TRACK ( pre_mem_write, Vg_CorePThread, tid, "new thread: stack",
njn25e49d8e72002-09-23 09:36:25 +00001899 (Addr)VG_(threads)[tid].m_esp, 2 * 4 );
1900
1901 /* push arg and (bogus) return address */
1902 * (UInt*)(VG_(threads)[tid].m_esp+4) = (UInt)arg;
sewardj20917d82002-05-28 01:36:45 +00001903 * (UInt*)(VG_(threads)[tid].m_esp)
1904 = (UInt)&do__apply_in_new_thread_bogusRA;
sewardje663cb92002-04-12 10:26:32 +00001905
njn25e49d8e72002-09-23 09:36:25 +00001906 VG_TRACK ( post_mem_write, VG_(threads)[tid].m_esp, 2 * 4 );
sewardje663cb92002-04-12 10:26:32 +00001907
1908 /* this is where we start */
sewardj20917d82002-05-28 01:36:45 +00001909 VG_(threads)[tid].m_eip = (UInt)fn;
sewardje663cb92002-04-12 10:26:32 +00001910
sewardj8937c812002-04-12 20:12:20 +00001911 if (VG_(clo_trace_sched)) {
njn25e49d8e72002-09-23 09:36:25 +00001912 VG_(sprintf)(msg_buf, "new thread, created by %d", parent_tid );
sewardje663cb92002-04-12 10:26:32 +00001913 print_sched_event(tid, msg_buf);
1914 }
1915
sewardj018f7622002-05-15 21:13:39 +00001916 /* We inherit our parent's signal mask. */
1917 VG_(threads)[tid].sig_mask = VG_(threads)[parent_tid].sig_mask;
jsgf855d93d2003-10-13 22:26:55 +00001918
1919 /* Now that the signal mask is set up, create a proxy LWP for this thread */
1920 VG_(proxy_create)(tid);
1921
1922 /* Set the proxy's signal mask */
1923 VG_(proxy_setsigmask)(tid);
sewardjb48e5002002-05-13 00:16:03 +00001924
sewardj20917d82002-05-28 01:36:45 +00001925 /* return child's tid to parent */
njnd3040452003-05-19 15:04:06 +00001926 SET_PTHREQ_RETVAL(parent_tid, tid); /* success */
sewardje663cb92002-04-12 10:26:32 +00001927}
1928
1929
sewardj604ec3c2002-04-18 22:38:41 +00001930/* -----------------------------------------------------------
1931 MUTEXes
1932 -------------------------------------------------------- */
1933
nethercote1f0173b2004-02-28 15:40:36 +00001934/* vg_pthread_mutex_t is defined in vg_include.h.
sewardj604ec3c2002-04-18 22:38:41 +00001935
nethercote1f0173b2004-02-28 15:40:36 +00001936 The initializers zero everything, except possibly the fourth word,
1937 which in vg_pthread_mutex_t is the __vg_m_kind field. It gets set to one
1938 of PTHREAD_MUTEX_{TIMED,RECURSIVE,ERRORCHECK,ADAPTIVE}_NP
sewardj604ec3c2002-04-18 22:38:41 +00001939
sewardj6072c362002-04-19 14:40:57 +00001940 How we use it:
sewardj604ec3c2002-04-18 22:38:41 +00001941
nethercote1f0173b2004-02-28 15:40:36 +00001942 __vg_m_kind never changes and indicates whether or not it is recursive.
sewardj6072c362002-04-19 14:40:57 +00001943
nethercote1f0173b2004-02-28 15:40:36 +00001944 __vg_m_count indicates the lock count; if 0, the mutex is not owned by
sewardj6072c362002-04-19 14:40:57 +00001945 anybody.
1946
nethercote1f0173b2004-02-28 15:40:36 +00001947 __vg_m_owner has a ThreadId value stuffed into it. We carefully arrange
sewardj6072c362002-04-19 14:40:57 +00001948 that ThreadId == 0 is invalid (VG_INVALID_THREADID), so that
1949 statically initialised mutexes correctly appear
1950 to belong to nobody.
1951
nethercote1f0173b2004-02-28 15:40:36 +00001952 In summary, a not-in-use mutex is distinguised by having __vg_m_owner
1953 == 0 (VG_INVALID_THREADID) and __vg_m_count == 0 too. If one of those
sewardj6072c362002-04-19 14:40:57 +00001954 conditions holds, the other should too.
1955
1956 There is no linked list of threads waiting for this mutex. Instead
1957 a thread in WaitMX state points at the mutex with its waited_on_mx
1958 field. This makes _unlock() inefficient, but simple to implement the
1959 right semantics viz-a-viz signals.
sewardje663cb92002-04-12 10:26:32 +00001960
sewardj604ec3c2002-04-18 22:38:41 +00001961 We don't have to deal with mutex initialisation; the client side
sewardj6072c362002-04-19 14:40:57 +00001962 deals with that for us.
1963*/
sewardje663cb92002-04-12 10:26:32 +00001964
sewardj3b5d8862002-04-20 13:53:23 +00001965/* Helper fns ... */
1966static
nethercote1f0173b2004-02-28 15:40:36 +00001967void release_one_thread_waiting_on_mutex ( vg_pthread_mutex_t* mutex,
sewardj3b5d8862002-04-20 13:53:23 +00001968 Char* caller )
1969{
1970 Int i;
1971 Char msg_buf[100];
1972
1973 /* Find some arbitrary thread waiting on this mutex, and make it
1974 runnable. If none are waiting, mark the mutex as not held. */
1975 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +00001976 if (VG_(threads)[i].status == VgTs_Empty)
sewardj3b5d8862002-04-20 13:53:23 +00001977 continue;
sewardj018f7622002-05-15 21:13:39 +00001978 if (VG_(threads)[i].status == VgTs_WaitMX
1979 && VG_(threads)[i].associated_mx == mutex)
sewardj3b5d8862002-04-20 13:53:23 +00001980 break;
1981 }
1982
nethercote1f0173b2004-02-28 15:40:36 +00001983 VG_TRACK( post_mutex_unlock, (ThreadId)mutex->__vg_m_owner, mutex );
sewardj0af43bc2002-10-22 04:30:35 +00001984
sewardj3b5d8862002-04-20 13:53:23 +00001985 vg_assert(i <= VG_N_THREADS);
1986 if (i == VG_N_THREADS) {
1987 /* Nobody else is waiting on it. */
nethercote1f0173b2004-02-28 15:40:36 +00001988 mutex->__vg_m_count = 0;
1989 mutex->__vg_m_owner = VG_INVALID_THREADID;
sewardj3b5d8862002-04-20 13:53:23 +00001990 } else {
1991 /* Notionally transfer the hold to thread i, whose
1992 pthread_mutex_lock() call now returns with 0 (success). */
1993 /* The .count is already == 1. */
sewardj018f7622002-05-15 21:13:39 +00001994 vg_assert(VG_(threads)[i].associated_mx == mutex);
nethercote1f0173b2004-02-28 15:40:36 +00001995 mutex->__vg_m_owner = (/*_pthread_descr*/void*)i;
sewardj018f7622002-05-15 21:13:39 +00001996 VG_(threads)[i].status = VgTs_Runnable;
1997 VG_(threads)[i].associated_mx = NULL;
sewardj5f07b662002-04-23 16:52:51 +00001998 /* m_edx already holds pth_mx_lock() success (0) */
sewardj3b5d8862002-04-20 13:53:23 +00001999
sewardj0af43bc2002-10-22 04:30:35 +00002000 VG_TRACK( post_mutex_lock, (ThreadId)i, mutex);
2001
sewardj3b5d8862002-04-20 13:53:23 +00002002 if (VG_(clo_trace_pthread_level) >= 1) {
2003 VG_(sprintf)(msg_buf, "%s mx %p: RESUME",
2004 caller, mutex );
2005 print_pthread_event(i, msg_buf);
2006 }
2007 }
2008}
2009
sewardje663cb92002-04-12 10:26:32 +00002010
2011static
sewardj30671ff2002-04-21 00:13:57 +00002012void do_pthread_mutex_lock( ThreadId tid,
2013 Bool is_trylock,
nethercote1f0173b2004-02-28 15:40:36 +00002014 vg_pthread_mutex_t* mutex )
sewardje663cb92002-04-12 10:26:32 +00002015{
sewardj30671ff2002-04-21 00:13:57 +00002016 Char msg_buf[100];
2017 Char* caller
sewardj8ccc2be2002-05-10 20:26:37 +00002018 = is_trylock ? "pthread_mutex_trylock"
2019 : "pthread_mutex_lock ";
sewardje663cb92002-04-12 10:26:32 +00002020
sewardj604ec3c2002-04-18 22:38:41 +00002021 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj30671ff2002-04-21 00:13:57 +00002022 VG_(sprintf)(msg_buf, "%s mx %p ...", caller, mutex );
sewardj604ec3c2002-04-18 22:38:41 +00002023 print_pthread_event(tid, msg_buf);
2024 }
2025
2026 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00002027 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002028 && VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +00002029
2030 /* POSIX doesn't mandate this, but for sanity ... */
2031 if (mutex == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00002032 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002033 "pthread_mutex_lock/trylock: mutex is NULL");
jsgf855d93d2003-10-13 22:26:55 +00002034 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardje663cb92002-04-12 10:26:32 +00002035 return;
2036 }
2037
sewardj604ec3c2002-04-18 22:38:41 +00002038 /* More paranoia ... */
nethercote1f0173b2004-02-28 15:40:36 +00002039 switch (mutex->__vg_m_kind) {
sewardj2a1dcce2002-04-22 12:45:25 +00002040# ifndef GLIBC_2_1
sewardj604ec3c2002-04-18 22:38:41 +00002041 case PTHREAD_MUTEX_TIMED_NP:
sewardj2a1dcce2002-04-22 12:45:25 +00002042 case PTHREAD_MUTEX_ADAPTIVE_NP:
2043# endif
sewardja1679dd2002-05-10 22:31:40 +00002044# ifdef GLIBC_2_1
sewardj8e651d72002-05-10 21:00:19 +00002045 case PTHREAD_MUTEX_FAST_NP:
sewardja1679dd2002-05-10 22:31:40 +00002046# endif
sewardj604ec3c2002-04-18 22:38:41 +00002047 case PTHREAD_MUTEX_RECURSIVE_NP:
2048 case PTHREAD_MUTEX_ERRORCHECK_NP:
nethercote1f0173b2004-02-28 15:40:36 +00002049 if (mutex->__vg_m_count >= 0) break;
sewardj604ec3c2002-04-18 22:38:41 +00002050 /* else fall thru */
2051 default:
njn25e49d8e72002-09-23 09:36:25 +00002052 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002053 "pthread_mutex_lock/trylock: mutex is invalid");
jsgf855d93d2003-10-13 22:26:55 +00002054 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj604ec3c2002-04-18 22:38:41 +00002055 return;
sewardje663cb92002-04-12 10:26:32 +00002056 }
2057
nethercote1f0173b2004-02-28 15:40:36 +00002058 if (mutex->__vg_m_count > 0) {
2059 if (!VG_(is_valid_tid)((ThreadId)mutex->__vg_m_owner)) {
fitzhardinge47735af2004-01-21 01:27:27 +00002060 VG_(record_pthread_error)( tid,
2061 "pthread_mutex_lock/trylock: mutex has invalid owner");
2062 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
2063 return;
2064 }
sewardjf8f819e2002-04-17 23:21:37 +00002065
2066 /* Someone has it already. */
nethercote1f0173b2004-02-28 15:40:36 +00002067 if ((ThreadId)mutex->__vg_m_owner == tid) {
sewardjf8f819e2002-04-17 23:21:37 +00002068 /* It's locked -- by me! */
nethercote1f0173b2004-02-28 15:40:36 +00002069 if (mutex->__vg_m_kind == PTHREAD_MUTEX_RECURSIVE_NP) {
sewardjf8f819e2002-04-17 23:21:37 +00002070 /* return 0 (success). */
nethercote1f0173b2004-02-28 15:40:36 +00002071 mutex->__vg_m_count++;
njnd3040452003-05-19 15:04:06 +00002072 SET_PTHREQ_RETVAL(tid, 0);
sewardj853f55d2002-04-26 00:27:53 +00002073 if (0)
2074 VG_(printf)("!!!!!! tid %d, mx %p -> locked %d\n",
nethercote1f0173b2004-02-28 15:40:36 +00002075 tid, mutex, mutex->__vg_m_count);
sewardjf8f819e2002-04-17 23:21:37 +00002076 return;
2077 } else {
sewardj30671ff2002-04-21 00:13:57 +00002078 if (is_trylock)
njnd3040452003-05-19 15:04:06 +00002079 SET_PTHREQ_RETVAL(tid, EBUSY);
sewardj30671ff2002-04-21 00:13:57 +00002080 else
njnd3040452003-05-19 15:04:06 +00002081 SET_PTHREQ_RETVAL(tid, EDEADLK);
sewardjf8f819e2002-04-17 23:21:37 +00002082 return;
2083 }
2084 } else {
sewardj6072c362002-04-19 14:40:57 +00002085 /* Someone else has it; we have to wait. Mark ourselves
2086 thusly. */
nethercote1f0173b2004-02-28 15:40:36 +00002087 /* GUARD: __vg_m_count > 0 && __vg_m_owner is valid */
sewardj30671ff2002-04-21 00:13:57 +00002088 if (is_trylock) {
2089 /* caller is polling; so return immediately. */
njnd3040452003-05-19 15:04:06 +00002090 SET_PTHREQ_RETVAL(tid, EBUSY);
sewardj30671ff2002-04-21 00:13:57 +00002091 } else {
sewardjdca84112002-11-13 22:29:34 +00002092 VG_TRACK ( pre_mutex_lock, tid, mutex );
2093
sewardj018f7622002-05-15 21:13:39 +00002094 VG_(threads)[tid].status = VgTs_WaitMX;
2095 VG_(threads)[tid].associated_mx = mutex;
njnd3040452003-05-19 15:04:06 +00002096 SET_PTHREQ_RETVAL(tid, 0); /* pth_mx_lock success value */
sewardj30671ff2002-04-21 00:13:57 +00002097 if (VG_(clo_trace_pthread_level) >= 1) {
2098 VG_(sprintf)(msg_buf, "%s mx %p: BLOCK",
2099 caller, mutex );
2100 print_pthread_event(tid, msg_buf);
2101 }
2102 }
sewardje663cb92002-04-12 10:26:32 +00002103 return;
2104 }
sewardjf8f819e2002-04-17 23:21:37 +00002105
sewardje663cb92002-04-12 10:26:32 +00002106 } else {
sewardj6072c362002-04-19 14:40:57 +00002107 /* Nobody owns it. Sanity check ... */
nethercote1f0173b2004-02-28 15:40:36 +00002108 vg_assert(mutex->__vg_m_owner == VG_INVALID_THREADID);
sewardjdca84112002-11-13 22:29:34 +00002109
2110 VG_TRACK ( pre_mutex_lock, tid, mutex );
2111
sewardjf8f819e2002-04-17 23:21:37 +00002112 /* We get it! [for the first time]. */
nethercote1f0173b2004-02-28 15:40:36 +00002113 mutex->__vg_m_count = 1;
2114 mutex->__vg_m_owner = (/*_pthread_descr*/void*)tid;
njn25e49d8e72002-09-23 09:36:25 +00002115
sewardje663cb92002-04-12 10:26:32 +00002116 /* return 0 (success). */
njnd3040452003-05-19 15:04:06 +00002117 SET_PTHREQ_RETVAL(tid, 0);
sewardjf8f819e2002-04-17 23:21:37 +00002118
njnd3040452003-05-19 15:04:06 +00002119 VG_TRACK( post_mutex_lock, tid, mutex);
2120 }
sewardje663cb92002-04-12 10:26:32 +00002121}
2122
2123
2124static
2125void do_pthread_mutex_unlock ( ThreadId tid,
nethercote1f0173b2004-02-28 15:40:36 +00002126 vg_pthread_mutex_t* mutex )
sewardje663cb92002-04-12 10:26:32 +00002127{
sewardj3b5d8862002-04-20 13:53:23 +00002128 Char msg_buf[100];
sewardje663cb92002-04-12 10:26:32 +00002129
sewardj45b4b372002-04-16 22:50:32 +00002130 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj3b5d8862002-04-20 13:53:23 +00002131 VG_(sprintf)(msg_buf, "pthread_mutex_unlock mx %p ...", mutex );
sewardj8937c812002-04-12 20:12:20 +00002132 print_pthread_event(tid, msg_buf);
2133 }
2134
sewardj604ec3c2002-04-18 22:38:41 +00002135 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00002136 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002137 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj604ec3c2002-04-18 22:38:41 +00002138
2139 if (mutex == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00002140 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002141 "pthread_mutex_unlock: mutex is NULL");
jsgf855d93d2003-10-13 22:26:55 +00002142 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj604ec3c2002-04-18 22:38:41 +00002143 return;
2144 }
2145
sewardjd8acdf22002-11-13 21:57:52 +00002146 /* If this was locked before the dawn of time, pretend it was
2147 locked now so that it balances with unlocks */
nethercote1f0173b2004-02-28 15:40:36 +00002148 if (mutex->__vg_m_kind & VG_PTHREAD_PREHISTORY) {
2149 mutex->__vg_m_kind &= ~VG_PTHREAD_PREHISTORY;
2150 VG_TRACK( pre_mutex_lock, (ThreadId)mutex->__vg_m_owner, mutex );
2151 VG_TRACK( post_mutex_lock, (ThreadId)mutex->__vg_m_owner, mutex );
sewardjd8acdf22002-11-13 21:57:52 +00002152 }
2153
sewardj604ec3c2002-04-18 22:38:41 +00002154 /* More paranoia ... */
nethercote1f0173b2004-02-28 15:40:36 +00002155 switch (mutex->__vg_m_kind) {
sewardj2a1dcce2002-04-22 12:45:25 +00002156# ifndef GLIBC_2_1
sewardj604ec3c2002-04-18 22:38:41 +00002157 case PTHREAD_MUTEX_TIMED_NP:
sewardj2a1dcce2002-04-22 12:45:25 +00002158 case PTHREAD_MUTEX_ADAPTIVE_NP:
2159# endif
sewardja1679dd2002-05-10 22:31:40 +00002160# ifdef GLIBC_2_1
sewardj8e651d72002-05-10 21:00:19 +00002161 case PTHREAD_MUTEX_FAST_NP:
sewardja1679dd2002-05-10 22:31:40 +00002162# endif
sewardj604ec3c2002-04-18 22:38:41 +00002163 case PTHREAD_MUTEX_RECURSIVE_NP:
2164 case PTHREAD_MUTEX_ERRORCHECK_NP:
nethercote1f0173b2004-02-28 15:40:36 +00002165 if (mutex->__vg_m_count >= 0) break;
sewardj604ec3c2002-04-18 22:38:41 +00002166 /* else fall thru */
2167 default:
njn25e49d8e72002-09-23 09:36:25 +00002168 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002169 "pthread_mutex_unlock: mutex is invalid");
jsgf855d93d2003-10-13 22:26:55 +00002170 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj604ec3c2002-04-18 22:38:41 +00002171 return;
2172 }
sewardje663cb92002-04-12 10:26:32 +00002173
2174 /* Barf if we don't currently hold the mutex. */
nethercote1f0173b2004-02-28 15:40:36 +00002175 if (mutex->__vg_m_count == 0) {
sewardj4dced352002-06-04 22:54:20 +00002176 /* nobody holds it */
njn25e49d8e72002-09-23 09:36:25 +00002177 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002178 "pthread_mutex_unlock: mutex is not locked");
njnd3040452003-05-19 15:04:06 +00002179 SET_PTHREQ_RETVAL(tid, EPERM);
sewardj4dced352002-06-04 22:54:20 +00002180 return;
2181 }
2182
nethercote1f0173b2004-02-28 15:40:36 +00002183 if ((ThreadId)mutex->__vg_m_owner != tid) {
sewardj4dced352002-06-04 22:54:20 +00002184 /* we don't hold it */
njn25e49d8e72002-09-23 09:36:25 +00002185 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002186 "pthread_mutex_unlock: mutex is locked by a different thread");
njnd3040452003-05-19 15:04:06 +00002187 SET_PTHREQ_RETVAL(tid, EPERM);
sewardje663cb92002-04-12 10:26:32 +00002188 return;
2189 }
2190
sewardjf8f819e2002-04-17 23:21:37 +00002191 /* If it's a multiply-locked recursive mutex, just decrement the
2192 lock count and return. */
nethercote1f0173b2004-02-28 15:40:36 +00002193 if (mutex->__vg_m_count > 1) {
2194 vg_assert(mutex->__vg_m_kind == PTHREAD_MUTEX_RECURSIVE_NP);
2195 mutex->__vg_m_count --;
njnd3040452003-05-19 15:04:06 +00002196 SET_PTHREQ_RETVAL(tid, 0); /* success */
sewardjf8f819e2002-04-17 23:21:37 +00002197 return;
2198 }
2199
sewardj604ec3c2002-04-18 22:38:41 +00002200 /* Now we're sure it is locked exactly once, and by the thread who
sewardjf8f819e2002-04-17 23:21:37 +00002201 is now doing an unlock on it. */
nethercote1f0173b2004-02-28 15:40:36 +00002202 vg_assert(mutex->__vg_m_count == 1);
2203 vg_assert((ThreadId)mutex->__vg_m_owner == tid);
sewardjf8f819e2002-04-17 23:21:37 +00002204
sewardj3b5d8862002-04-20 13:53:23 +00002205 /* Release at max one thread waiting on this mutex. */
2206 release_one_thread_waiting_on_mutex ( mutex, "pthread_mutex_lock" );
sewardje663cb92002-04-12 10:26:32 +00002207
sewardj3b5d8862002-04-20 13:53:23 +00002208 /* Our (tid's) pth_unlock() returns with 0 (success). */
njnd3040452003-05-19 15:04:06 +00002209 SET_PTHREQ_RETVAL(tid, 0); /* Success. */
sewardje663cb92002-04-12 10:26:32 +00002210}
2211
2212
sewardj6072c362002-04-19 14:40:57 +00002213/* -----------------------------------------------------------
2214 CONDITION VARIABLES
2215 -------------------------------------------------------- */
sewardje663cb92002-04-12 10:26:32 +00002216
nethercote1f0173b2004-02-28 15:40:36 +00002217/* The relevant type (vg_pthread_cond_t) is in vg_include.h.
sewardj77e466c2002-04-14 02:29:29 +00002218
nethercote1f0173b2004-02-28 15:40:36 +00002219 We don't use any fields of vg_pthread_cond_t for anything at all.
2220 Only the identity of the CVs is important. (Actually, we initialise
2221 __vg_c_waiting in pthread_cond_init() to VG_INVALID_THREADID.)
sewardj6072c362002-04-19 14:40:57 +00002222
2223 Linux pthreads supports no attributes on condition variables, so we
sewardj3b5d8862002-04-20 13:53:23 +00002224 don't need to think too hard there. */
sewardj6072c362002-04-19 14:40:57 +00002225
sewardj77e466c2002-04-14 02:29:29 +00002226
sewardj5f07b662002-04-23 16:52:51 +00002227static
2228void do_pthread_cond_timedwait_TIMEOUT ( ThreadId tid )
2229{
2230 Char msg_buf[100];
nethercote1f0173b2004-02-28 15:40:36 +00002231 vg_pthread_mutex_t* mx;
2232 vg_pthread_cond_t* cv;
sewardj5f07b662002-04-23 16:52:51 +00002233
sewardjb48e5002002-05-13 00:16:03 +00002234 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002235 && VG_(threads)[tid].status == VgTs_WaitCV
2236 && VG_(threads)[tid].awaken_at != 0xFFFFFFFF);
2237 mx = VG_(threads)[tid].associated_mx;
sewardj5f07b662002-04-23 16:52:51 +00002238 vg_assert(mx != NULL);
sewardj018f7622002-05-15 21:13:39 +00002239 cv = VG_(threads)[tid].associated_cv;
sewardj5f07b662002-04-23 16:52:51 +00002240 vg_assert(cv != NULL);
2241
nethercote1f0173b2004-02-28 15:40:36 +00002242 if (mx->__vg_m_owner == VG_INVALID_THREADID) {
sewardj5f07b662002-04-23 16:52:51 +00002243 /* Currently unheld; hand it out to thread tid. */
nethercote1f0173b2004-02-28 15:40:36 +00002244 vg_assert(mx->__vg_m_count == 0);
sewardj018f7622002-05-15 21:13:39 +00002245 VG_(threads)[tid].status = VgTs_Runnable;
njnd3040452003-05-19 15:04:06 +00002246 SET_PTHREQ_RETVAL(tid, ETIMEDOUT); /* pthread_cond_wait return value */
sewardj018f7622002-05-15 21:13:39 +00002247 VG_(threads)[tid].associated_cv = NULL;
2248 VG_(threads)[tid].associated_mx = NULL;
nethercote1f0173b2004-02-28 15:40:36 +00002249 mx->__vg_m_owner = (/*_pthread_descr*/void*)tid;
2250 mx->__vg_m_count = 1;
sewardj5f07b662002-04-23 16:52:51 +00002251
sewardj0af43bc2002-10-22 04:30:35 +00002252 VG_TRACK( post_mutex_lock, tid, mx );
2253
sewardj5f07b662002-04-23 16:52:51 +00002254 if (VG_(clo_trace_pthread_level) >= 1) {
sewardjc3bd5f52002-05-01 03:24:23 +00002255 VG_(sprintf)(msg_buf,
nethercote1f0173b2004-02-28 15:40:36 +00002256 "pthread_cond_timedwait cv %p: TIMEOUT with mx %p",
sewardjc3bd5f52002-05-01 03:24:23 +00002257 cv, mx );
sewardj5f07b662002-04-23 16:52:51 +00002258 print_pthread_event(tid, msg_buf);
2259 }
2260 } else {
2261 /* Currently held. Make thread tid be blocked on it. */
nethercote1f0173b2004-02-28 15:40:36 +00002262 vg_assert(mx->__vg_m_count > 0);
sewardjdca84112002-11-13 22:29:34 +00002263 VG_TRACK( pre_mutex_lock, tid, mx );
2264
sewardj018f7622002-05-15 21:13:39 +00002265 VG_(threads)[tid].status = VgTs_WaitMX;
njnd3040452003-05-19 15:04:06 +00002266 SET_PTHREQ_RETVAL(tid, ETIMEDOUT); /* pthread_cond_wait return value */
sewardj018f7622002-05-15 21:13:39 +00002267 VG_(threads)[tid].associated_cv = NULL;
2268 VG_(threads)[tid].associated_mx = mx;
sewardj5f07b662002-04-23 16:52:51 +00002269 if (VG_(clo_trace_pthread_level) >= 1) {
2270 VG_(sprintf)(msg_buf,
nethercote1f0173b2004-02-28 15:40:36 +00002271 "pthread_cond_timedwait cv %p: TIMEOUT -> BLOCK for mx %p",
sewardj5f07b662002-04-23 16:52:51 +00002272 cv, mx );
2273 print_pthread_event(tid, msg_buf);
2274 }
sewardj5f07b662002-04-23 16:52:51 +00002275 }
2276}
2277
2278
sewardj3b5d8862002-04-20 13:53:23 +00002279static
nethercote1f0173b2004-02-28 15:40:36 +00002280void release_N_threads_waiting_on_cond ( vg_pthread_cond_t* cond,
sewardj3b5d8862002-04-20 13:53:23 +00002281 Int n_to_release,
2282 Char* caller )
2283{
2284 Int i;
2285 Char msg_buf[100];
nethercote1f0173b2004-02-28 15:40:36 +00002286 vg_pthread_mutex_t* mx;
sewardj3b5d8862002-04-20 13:53:23 +00002287
2288 while (True) {
2289 if (n_to_release == 0)
2290 return;
2291
2292 /* Find a thread waiting on this CV. */
2293 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +00002294 if (VG_(threads)[i].status == VgTs_Empty)
sewardj3b5d8862002-04-20 13:53:23 +00002295 continue;
sewardj018f7622002-05-15 21:13:39 +00002296 if (VG_(threads)[i].status == VgTs_WaitCV
2297 && VG_(threads)[i].associated_cv == cond)
sewardj3b5d8862002-04-20 13:53:23 +00002298 break;
2299 }
2300 vg_assert(i <= VG_N_THREADS);
2301
2302 if (i == VG_N_THREADS) {
2303 /* Nobody else is waiting on it. */
2304 return;
2305 }
2306
sewardj018f7622002-05-15 21:13:39 +00002307 mx = VG_(threads)[i].associated_mx;
sewardj3b5d8862002-04-20 13:53:23 +00002308 vg_assert(mx != NULL);
2309
sewardjdca84112002-11-13 22:29:34 +00002310 VG_TRACK( pre_mutex_lock, i, mx );
2311
nethercote1f0173b2004-02-28 15:40:36 +00002312 if (mx->__vg_m_owner == VG_INVALID_THREADID) {
sewardj3b5d8862002-04-20 13:53:23 +00002313 /* Currently unheld; hand it out to thread i. */
nethercote1f0173b2004-02-28 15:40:36 +00002314 vg_assert(mx->__vg_m_count == 0);
sewardj018f7622002-05-15 21:13:39 +00002315 VG_(threads)[i].status = VgTs_Runnable;
2316 VG_(threads)[i].associated_cv = NULL;
2317 VG_(threads)[i].associated_mx = NULL;
nethercote1f0173b2004-02-28 15:40:36 +00002318 mx->__vg_m_owner = (/*_pthread_descr*/void*)i;
2319 mx->__vg_m_count = 1;
sewardj5f07b662002-04-23 16:52:51 +00002320 /* .m_edx already holds pth_cond_wait success value (0) */
sewardj3b5d8862002-04-20 13:53:23 +00002321
sewardj0af43bc2002-10-22 04:30:35 +00002322 VG_TRACK( post_mutex_lock, i, mx );
2323
sewardj3b5d8862002-04-20 13:53:23 +00002324 if (VG_(clo_trace_pthread_level) >= 1) {
2325 VG_(sprintf)(msg_buf, "%s cv %p: RESUME with mx %p",
2326 caller, cond, mx );
2327 print_pthread_event(i, msg_buf);
2328 }
2329
2330 } else {
2331 /* Currently held. Make thread i be blocked on it. */
nethercote1f0173b2004-02-28 15:40:36 +00002332 vg_assert(mx->__vg_m_count > 0);
sewardj018f7622002-05-15 21:13:39 +00002333 VG_(threads)[i].status = VgTs_WaitMX;
2334 VG_(threads)[i].associated_cv = NULL;
2335 VG_(threads)[i].associated_mx = mx;
njnd3040452003-05-19 15:04:06 +00002336 SET_PTHREQ_RETVAL(i, 0); /* pth_cond_wait success value */
sewardj3b5d8862002-04-20 13:53:23 +00002337
2338 if (VG_(clo_trace_pthread_level) >= 1) {
2339 VG_(sprintf)(msg_buf, "%s cv %p: BLOCK for mx %p",
2340 caller, cond, mx );
2341 print_pthread_event(i, msg_buf);
2342 }
2343
2344 }
jsgf855d93d2003-10-13 22:26:55 +00002345
sewardj3b5d8862002-04-20 13:53:23 +00002346 n_to_release--;
2347 }
2348}
2349
2350
2351static
2352void do_pthread_cond_wait ( ThreadId tid,
nethercote1f0173b2004-02-28 15:40:36 +00002353 vg_pthread_cond_t *cond,
2354 vg_pthread_mutex_t *mutex,
sewardj5f07b662002-04-23 16:52:51 +00002355 UInt ms_end )
sewardj3b5d8862002-04-20 13:53:23 +00002356{
2357 Char msg_buf[100];
2358
sewardj5f07b662002-04-23 16:52:51 +00002359 /* If ms_end == 0xFFFFFFFF, wait forever (no timeout). Otherwise,
2360 ms_end is the ending millisecond. */
2361
sewardj3b5d8862002-04-20 13:53:23 +00002362 /* pre: mutex should be a valid mutex and owned by tid. */
2363 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj5f07b662002-04-23 16:52:51 +00002364 VG_(sprintf)(msg_buf, "pthread_cond_wait cv %p, mx %p, end %d ...",
2365 cond, mutex, ms_end );
sewardj3b5d8862002-04-20 13:53:23 +00002366 print_pthread_event(tid, msg_buf);
2367 }
2368
2369 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00002370 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002371 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj3b5d8862002-04-20 13:53:23 +00002372
2373 if (mutex == NULL || cond == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00002374 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002375 "pthread_cond_wait/timedwait: cond or mutex is NULL");
jsgf855d93d2003-10-13 22:26:55 +00002376 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj3b5d8862002-04-20 13:53:23 +00002377 return;
2378 }
2379
2380 /* More paranoia ... */
nethercote1f0173b2004-02-28 15:40:36 +00002381 switch (mutex->__vg_m_kind) {
sewardj2a1dcce2002-04-22 12:45:25 +00002382# ifndef GLIBC_2_1
sewardj3b5d8862002-04-20 13:53:23 +00002383 case PTHREAD_MUTEX_TIMED_NP:
sewardj2a1dcce2002-04-22 12:45:25 +00002384 case PTHREAD_MUTEX_ADAPTIVE_NP:
2385# endif
sewardja1679dd2002-05-10 22:31:40 +00002386# ifdef GLIBC_2_1
sewardj8e651d72002-05-10 21:00:19 +00002387 case PTHREAD_MUTEX_FAST_NP:
sewardja1679dd2002-05-10 22:31:40 +00002388# endif
sewardj3b5d8862002-04-20 13:53:23 +00002389 case PTHREAD_MUTEX_RECURSIVE_NP:
2390 case PTHREAD_MUTEX_ERRORCHECK_NP:
nethercote1f0173b2004-02-28 15:40:36 +00002391 if (mutex->__vg_m_count >= 0) break;
sewardj3b5d8862002-04-20 13:53:23 +00002392 /* else fall thru */
2393 default:
njn25e49d8e72002-09-23 09:36:25 +00002394 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002395 "pthread_cond_wait/timedwait: mutex is invalid");
jsgf855d93d2003-10-13 22:26:55 +00002396 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj3b5d8862002-04-20 13:53:23 +00002397 return;
2398 }
2399
2400 /* Barf if we don't currently hold the mutex. */
nethercote1f0173b2004-02-28 15:40:36 +00002401 if (mutex->__vg_m_count == 0 /* nobody holds it */
2402 || (ThreadId)mutex->__vg_m_owner != tid /* we don't hold it */) {
njn25e49d8e72002-09-23 09:36:25 +00002403 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002404 "pthread_cond_wait/timedwait: mutex is unlocked "
2405 "or is locked but not owned by thread");
jsgf855d93d2003-10-13 22:26:55 +00002406 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj3b5d8862002-04-20 13:53:23 +00002407 return;
2408 }
2409
2410 /* Queue ourselves on the condition. */
sewardj018f7622002-05-15 21:13:39 +00002411 VG_(threads)[tid].status = VgTs_WaitCV;
2412 VG_(threads)[tid].associated_cv = cond;
2413 VG_(threads)[tid].associated_mx = mutex;
2414 VG_(threads)[tid].awaken_at = ms_end;
jsgf855d93d2003-10-13 22:26:55 +00002415 if (ms_end != 0xFFFFFFFF)
2416 VG_(add_timeout)(tid, ms_end);
sewardj3b5d8862002-04-20 13:53:23 +00002417
2418 if (VG_(clo_trace_pthread_level) >= 1) {
2419 VG_(sprintf)(msg_buf,
2420 "pthread_cond_wait cv %p, mx %p: BLOCK",
2421 cond, mutex );
2422 print_pthread_event(tid, msg_buf);
2423 }
2424
2425 /* Release the mutex. */
2426 release_one_thread_waiting_on_mutex ( mutex, "pthread_cond_wait " );
2427}
2428
2429
2430static
2431void do_pthread_cond_signal_or_broadcast ( ThreadId tid,
2432 Bool broadcast,
nethercote1f0173b2004-02-28 15:40:36 +00002433 vg_pthread_cond_t *cond )
sewardj3b5d8862002-04-20 13:53:23 +00002434{
2435 Char msg_buf[100];
2436 Char* caller
2437 = broadcast ? "pthread_cond_broadcast"
2438 : "pthread_cond_signal ";
2439
2440 if (VG_(clo_trace_pthread_level) >= 2) {
2441 VG_(sprintf)(msg_buf, "%s cv %p ...",
2442 caller, cond );
2443 print_pthread_event(tid, msg_buf);
2444 }
2445
2446 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00002447 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002448 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj3b5d8862002-04-20 13:53:23 +00002449
2450 if (cond == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00002451 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002452 "pthread_cond_signal/broadcast: cond is NULL");
jsgf855d93d2003-10-13 22:26:55 +00002453 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj3b5d8862002-04-20 13:53:23 +00002454 return;
2455 }
2456
2457 release_N_threads_waiting_on_cond (
2458 cond,
2459 broadcast ? VG_N_THREADS : 1,
2460 caller
2461 );
2462
njnd3040452003-05-19 15:04:06 +00002463 SET_PTHREQ_RETVAL(tid, 0); /* success */
sewardj3b5d8862002-04-20 13:53:23 +00002464}
2465
sewardj77e466c2002-04-14 02:29:29 +00002466
sewardj5f07b662002-04-23 16:52:51 +00002467/* -----------------------------------------------------------
2468 THREAD SPECIFIC DATA
2469 -------------------------------------------------------- */
2470
2471static __inline__
2472Bool is_valid_key ( ThreadKey k )
2473{
2474 /* k unsigned; hence no < 0 check */
2475 if (k >= VG_N_THREAD_KEYS) return False;
2476 if (!vg_thread_keys[k].inuse) return False;
2477 return True;
2478}
2479
sewardj00a66b12002-10-12 16:42:35 +00002480
2481/* Return in %EDX a value of 1 if the key is valid, else 0. */
2482static
2483void do_pthread_key_validate ( ThreadId tid,
2484 pthread_key_t key )
2485{
2486 Char msg_buf[100];
2487
2488 if (VG_(clo_trace_pthread_level) >= 1) {
2489 VG_(sprintf)(msg_buf, "pthread_key_validate key %p",
2490 key );
2491 print_pthread_event(tid, msg_buf);
2492 }
2493
2494 vg_assert(sizeof(pthread_key_t) == sizeof(ThreadKey));
2495 vg_assert(VG_(is_valid_tid)(tid)
2496 && VG_(threads)[tid].status == VgTs_Runnable);
2497
2498 if (is_valid_key((ThreadKey)key)) {
njnd3040452003-05-19 15:04:06 +00002499 SET_PTHREQ_RETVAL(tid, 1);
sewardj00a66b12002-10-12 16:42:35 +00002500 } else {
njnd3040452003-05-19 15:04:06 +00002501 SET_PTHREQ_RETVAL(tid, 0);
sewardj00a66b12002-10-12 16:42:35 +00002502 }
2503}
2504
2505
sewardj5f07b662002-04-23 16:52:51 +00002506static
2507void do_pthread_key_create ( ThreadId tid,
2508 pthread_key_t* key,
2509 void (*destructor)(void*) )
2510{
2511 Int i;
2512 Char msg_buf[100];
2513
2514 if (VG_(clo_trace_pthread_level) >= 1) {
2515 VG_(sprintf)(msg_buf, "pthread_key_create *key %p, destr %p",
2516 key, destructor );
2517 print_pthread_event(tid, msg_buf);
2518 }
2519
2520 vg_assert(sizeof(pthread_key_t) == sizeof(ThreadKey));
sewardjb48e5002002-05-13 00:16:03 +00002521 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002522 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj5f07b662002-04-23 16:52:51 +00002523
2524 for (i = 0; i < VG_N_THREAD_KEYS; i++)
2525 if (!vg_thread_keys[i].inuse)
2526 break;
2527
2528 if (i == VG_N_THREAD_KEYS) {
jsgf855d93d2003-10-13 22:26:55 +00002529 VG_(message)(Vg_UserMsg, "pthread_key_create() asked for too many keys (more than %d): increase VG_N_THREAD_KEYS and recompile Valgrind.",
2530 VG_N_THREAD_KEYS);
2531 SET_PTHREQ_RETVAL(tid, EAGAIN);
2532 return;
sewardj5f07b662002-04-23 16:52:51 +00002533 }
2534
sewardj870497a2002-05-29 01:06:47 +00002535 vg_thread_keys[i].inuse = True;
2536 vg_thread_keys[i].destructor = destructor;
sewardjc3bd5f52002-05-01 03:24:23 +00002537
sewardj5a3798b2002-06-04 23:24:22 +00002538 /* check key for addressibility */
njn72718642003-07-24 08:45:32 +00002539 VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "pthread_key_create: key",
njn25e49d8e72002-09-23 09:36:25 +00002540 (Addr)key, sizeof(pthread_key_t));
sewardj5f07b662002-04-23 16:52:51 +00002541 *key = i;
njn25e49d8e72002-09-23 09:36:25 +00002542 VG_TRACK( post_mem_write, (Addr)key, sizeof(pthread_key_t) );
sewardjc3bd5f52002-05-01 03:24:23 +00002543
njnd3040452003-05-19 15:04:06 +00002544 SET_PTHREQ_RETVAL(tid, 0);
sewardj5f07b662002-04-23 16:52:51 +00002545}
2546
2547
2548static
2549void do_pthread_key_delete ( ThreadId tid, pthread_key_t key )
2550{
2551 Char msg_buf[100];
2552 if (VG_(clo_trace_pthread_level) >= 1) {
2553 VG_(sprintf)(msg_buf, "pthread_key_delete key %d",
2554 key );
2555 print_pthread_event(tid, msg_buf);
2556 }
2557
sewardjb48e5002002-05-13 00:16:03 +00002558 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002559 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj5f07b662002-04-23 16:52:51 +00002560
2561 if (!is_valid_key(key)) {
njn25e49d8e72002-09-23 09:36:25 +00002562 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002563 "pthread_key_delete: key is invalid");
jsgf855d93d2003-10-13 22:26:55 +00002564 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj5f07b662002-04-23 16:52:51 +00002565 return;
2566 }
2567
2568 vg_thread_keys[key].inuse = False;
sewardj648b3152002-12-09 19:07:59 +00002569 vg_thread_keys[key].destructor = NULL;
njnd3040452003-05-19 15:04:06 +00002570 SET_PTHREQ_RETVAL(tid, 0);
sewardj5f07b662002-04-23 16:52:51 +00002571}
2572
2573
sewardj00a66b12002-10-12 16:42:35 +00002574/* Get the .specific_ptr for a thread. Return 1 if the thread-slot
2575 isn't in use, so that client-space can scan all thread slots. 1
2576 cannot be confused with NULL or a legitimately-aligned specific_ptr
2577 value. */
sewardj5f07b662002-04-23 16:52:51 +00002578static
sewardj00a66b12002-10-12 16:42:35 +00002579void do_pthread_getspecific_ptr ( ThreadId tid )
sewardj5f07b662002-04-23 16:52:51 +00002580{
sewardj00a66b12002-10-12 16:42:35 +00002581 void** specifics_ptr;
2582 Char msg_buf[100];
2583
jsgf855d93d2003-10-13 22:26:55 +00002584 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj00a66b12002-10-12 16:42:35 +00002585 VG_(sprintf)(msg_buf, "pthread_getspecific_ptr" );
sewardj5f07b662002-04-23 16:52:51 +00002586 print_pthread_event(tid, msg_buf);
2587 }
2588
sewardj00a66b12002-10-12 16:42:35 +00002589 vg_assert(VG_(is_valid_or_empty_tid)(tid));
sewardj5f07b662002-04-23 16:52:51 +00002590
sewardj00a66b12002-10-12 16:42:35 +00002591 if (VG_(threads)[tid].status == VgTs_Empty) {
njnd3040452003-05-19 15:04:06 +00002592 SET_PTHREQ_RETVAL(tid, 1);
sewardj5f07b662002-04-23 16:52:51 +00002593 return;
2594 }
2595
sewardj00a66b12002-10-12 16:42:35 +00002596 specifics_ptr = VG_(threads)[tid].specifics_ptr;
2597 vg_assert(specifics_ptr == NULL
2598 || IS_ALIGNED4_ADDR(specifics_ptr));
2599
njnd3040452003-05-19 15:04:06 +00002600 SET_PTHREQ_RETVAL(tid, (UInt)specifics_ptr);
sewardj5f07b662002-04-23 16:52:51 +00002601}
2602
2603
2604static
sewardj00a66b12002-10-12 16:42:35 +00002605void do_pthread_setspecific_ptr ( ThreadId tid, void** ptr )
sewardj5f07b662002-04-23 16:52:51 +00002606{
2607 Char msg_buf[100];
2608 if (VG_(clo_trace_pthread_level) >= 1) {
sewardj00a66b12002-10-12 16:42:35 +00002609 VG_(sprintf)(msg_buf, "pthread_setspecific_ptr ptr %p",
2610 ptr );
sewardj5f07b662002-04-23 16:52:51 +00002611 print_pthread_event(tid, msg_buf);
2612 }
2613
sewardjb48e5002002-05-13 00:16:03 +00002614 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002615 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj5f07b662002-04-23 16:52:51 +00002616
sewardj00a66b12002-10-12 16:42:35 +00002617 VG_(threads)[tid].specifics_ptr = ptr;
njnd3040452003-05-19 15:04:06 +00002618 SET_PTHREQ_RETVAL(tid, 0);
sewardj5f07b662002-04-23 16:52:51 +00002619}
2620
2621
sewardj870497a2002-05-29 01:06:47 +00002622/* Helper for calling destructors at thread exit. If key is valid,
2623 copy the thread's specific value into cu->arg and put the *key*'s
2624 destructor fn address in cu->fn. Then return 0 to the caller.
2625 Otherwise return non-zero to the caller. */
2626static
2627void do__get_key_destr_and_spec ( ThreadId tid,
2628 pthread_key_t key,
2629 CleanupEntry* cu )
2630{
2631 Char msg_buf[100];
jsgf855d93d2003-10-13 22:26:55 +00002632 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj870497a2002-05-29 01:06:47 +00002633 VG_(sprintf)(msg_buf,
2634 "get_key_destr_and_arg (key = %d)", key );
2635 print_pthread_event(tid, msg_buf);
2636 }
2637 vg_assert(VG_(is_valid_tid)(tid));
2638 vg_assert(key >= 0 && key < VG_N_THREAD_KEYS);
njn25e49d8e72002-09-23 09:36:25 +00002639
sewardj870497a2002-05-29 01:06:47 +00002640 if (!vg_thread_keys[key].inuse) {
njnd3040452003-05-19 15:04:06 +00002641 SET_PTHREQ_RETVAL(tid, -1);
sewardj870497a2002-05-29 01:06:47 +00002642 return;
2643 }
njn72718642003-07-24 08:45:32 +00002644 VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "get_key_destr_and_spec: cu",
2645 (Addr)cu, sizeof(CleanupEntry) );
sewardj00a66b12002-10-12 16:42:35 +00002646
sewardj870497a2002-05-29 01:06:47 +00002647 cu->fn = vg_thread_keys[key].destructor;
sewardj00a66b12002-10-12 16:42:35 +00002648 if (VG_(threads)[tid].specifics_ptr == NULL) {
2649 cu->arg = NULL;
2650 } else {
njn72718642003-07-24 08:45:32 +00002651 VG_TRACK( pre_mem_read, Vg_CorePThread, tid,
sewardj00a66b12002-10-12 16:42:35 +00002652 "get_key_destr_and_spec: key",
2653 (Addr)(&VG_(threads)[tid].specifics_ptr[key]),
2654 sizeof(void*) );
2655 cu->arg = VG_(threads)[tid].specifics_ptr[key];
2656 }
2657
njn25e49d8e72002-09-23 09:36:25 +00002658 VG_TRACK( post_mem_write, (Addr)cu, sizeof(CleanupEntry) );
njnd3040452003-05-19 15:04:06 +00002659 SET_PTHREQ_RETVAL(tid, 0);
sewardj870497a2002-05-29 01:06:47 +00002660}
2661
2662
sewardjb48e5002002-05-13 00:16:03 +00002663/* ---------------------------------------------------
2664 SIGNALS
2665 ------------------------------------------------ */
2666
2667/* See comment in vg_libthread.c:pthread_sigmask() regarding
sewardj018f7622002-05-15 21:13:39 +00002668 deliberate confusion of types sigset_t and vki_sigset_t. Return 0
2669 for OK and 1 for some kind of addressing error, which the
2670 vg_libpthread.c routine turns into return values 0 and EFAULT
2671 respectively. */
sewardjb48e5002002-05-13 00:16:03 +00002672static
2673void do_pthread_sigmask ( ThreadId tid,
sewardj018f7622002-05-15 21:13:39 +00002674 Int vki_how,
sewardjb48e5002002-05-13 00:16:03 +00002675 vki_ksigset_t* newmask,
2676 vki_ksigset_t* oldmask )
2677{
2678 Char msg_buf[100];
2679 if (VG_(clo_trace_pthread_level) >= 1) {
2680 VG_(sprintf)(msg_buf,
sewardj018f7622002-05-15 21:13:39 +00002681 "pthread_sigmask vki_how %d, newmask %p, oldmask %p",
2682 vki_how, newmask, oldmask );
sewardjb48e5002002-05-13 00:16:03 +00002683 print_pthread_event(tid, msg_buf);
2684 }
2685
2686 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002687 && VG_(threads)[tid].status == VgTs_Runnable);
sewardjb48e5002002-05-13 00:16:03 +00002688
njn25e49d8e72002-09-23 09:36:25 +00002689 if (newmask)
njn72718642003-07-24 08:45:32 +00002690 VG_TRACK( pre_mem_read, Vg_CorePThread, tid, "pthread_sigmask: newmask",
njn25e49d8e72002-09-23 09:36:25 +00002691 (Addr)newmask, sizeof(vki_ksigset_t));
2692 if (oldmask)
njn72718642003-07-24 08:45:32 +00002693 VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "pthread_sigmask: oldmask",
njn25e49d8e72002-09-23 09:36:25 +00002694 (Addr)oldmask, sizeof(vki_ksigset_t));
sewardjb48e5002002-05-13 00:16:03 +00002695
sewardj018f7622002-05-15 21:13:39 +00002696 VG_(do_pthread_sigmask_SCSS_upd) ( tid, vki_how, newmask, oldmask );
sewardjb48e5002002-05-13 00:16:03 +00002697
njn25e49d8e72002-09-23 09:36:25 +00002698 if (oldmask)
2699 VG_TRACK( post_mem_write, (Addr)oldmask, sizeof(vki_ksigset_t) );
sewardj3a951cf2002-05-15 22:25:47 +00002700
sewardj018f7622002-05-15 21:13:39 +00002701 /* Success. */
njnd3040452003-05-19 15:04:06 +00002702 SET_PTHREQ_RETVAL(tid, 0);
sewardjb48e5002002-05-13 00:16:03 +00002703}
2704
2705
2706static
sewardj018f7622002-05-15 21:13:39 +00002707void do_pthread_kill ( ThreadId tid, /* me */
2708 ThreadId thread, /* thread to signal */
2709 Int sig )
2710{
2711 Char msg_buf[100];
2712
2713 if (VG_(clo_trace_signals) || VG_(clo_trace_pthread_level) >= 1) {
2714 VG_(sprintf)(msg_buf,
2715 "pthread_kill thread %d, signo %d",
2716 thread, sig );
2717 print_pthread_event(tid, msg_buf);
2718 }
2719
2720 vg_assert(VG_(is_valid_tid)(tid)
2721 && VG_(threads)[tid].status == VgTs_Runnable);
2722
sewardj4dced352002-06-04 22:54:20 +00002723 if (!VG_(is_valid_tid)(thread)) {
njn25e49d8e72002-09-23 09:36:25 +00002724 VG_(record_pthread_error)( tid,
sewardj4dced352002-06-04 22:54:20 +00002725 "pthread_kill: invalid target thread");
jsgf855d93d2003-10-13 22:26:55 +00002726 SET_PTHREQ_RETVAL(tid, VKI_ESRCH);
2727 return;
2728 }
2729
2730 if (sig == 0) {
2731 /* OK, signal 0 is just for testing */
2732 SET_PTHREQ_RETVAL(tid, 0);
sewardj018f7622002-05-15 21:13:39 +00002733 return;
2734 }
2735
2736 if (sig < 1 || sig > VKI_KNSIG) {
jsgf855d93d2003-10-13 22:26:55 +00002737 SET_PTHREQ_RETVAL(tid, VKI_EINVAL);
sewardj018f7622002-05-15 21:13:39 +00002738 return;
2739 }
2740
2741 VG_(send_signal_to_thread)( thread, sig );
njnd3040452003-05-19 15:04:06 +00002742 SET_PTHREQ_RETVAL(tid, 0);
sewardjb48e5002002-05-13 00:16:03 +00002743}
2744
2745
sewardj2cb00342002-06-28 01:46:26 +00002746/* -----------------------------------------------------------
2747 FORK HANDLERS.
2748 -------------------------------------------------------- */
2749
2750static
2751void do__set_fhstack_used ( ThreadId tid, Int n )
2752{
2753 Char msg_buf[100];
2754 if (VG_(clo_trace_sched)) {
2755 VG_(sprintf)(msg_buf, "set_fhstack_used to %d", n );
2756 print_pthread_event(tid, msg_buf);
2757 }
2758
2759 vg_assert(VG_(is_valid_tid)(tid)
2760 && VG_(threads)[tid].status == VgTs_Runnable);
2761
2762 if (n >= 0 && n < VG_N_FORKHANDLERSTACK) {
2763 vg_fhstack_used = n;
njnd3040452003-05-19 15:04:06 +00002764 SET_PTHREQ_RETVAL(tid, 0);
sewardj2cb00342002-06-28 01:46:26 +00002765 } else {
njnd3040452003-05-19 15:04:06 +00002766 SET_PTHREQ_RETVAL(tid, -1);
sewardj2cb00342002-06-28 01:46:26 +00002767 }
2768}
2769
2770
2771static
2772void do__get_fhstack_used ( ThreadId tid )
2773{
2774 Int n;
2775 Char msg_buf[100];
2776 if (VG_(clo_trace_sched)) {
2777 VG_(sprintf)(msg_buf, "get_fhstack_used" );
2778 print_pthread_event(tid, msg_buf);
2779 }
2780
2781 vg_assert(VG_(is_valid_tid)(tid)
2782 && VG_(threads)[tid].status == VgTs_Runnable);
2783
2784 n = vg_fhstack_used;
2785 vg_assert(n >= 0 && n < VG_N_FORKHANDLERSTACK);
njnd3040452003-05-19 15:04:06 +00002786 SET_PTHREQ_RETVAL(tid, n);
sewardj2cb00342002-06-28 01:46:26 +00002787}
2788
2789static
2790void do__set_fhstack_entry ( ThreadId tid, Int n, ForkHandlerEntry* fh )
2791{
2792 Char msg_buf[100];
2793 if (VG_(clo_trace_sched)) {
2794 VG_(sprintf)(msg_buf, "set_fhstack_entry %d to %p", n, fh );
2795 print_pthread_event(tid, msg_buf);
2796 }
2797
2798 vg_assert(VG_(is_valid_tid)(tid)
2799 && VG_(threads)[tid].status == VgTs_Runnable);
njn72718642003-07-24 08:45:32 +00002800 VG_TRACK( pre_mem_read, Vg_CorePThread, tid,
njn25e49d8e72002-09-23 09:36:25 +00002801 "pthread_atfork: prepare/parent/child",
2802 (Addr)fh, sizeof(ForkHandlerEntry));
sewardj2cb00342002-06-28 01:46:26 +00002803
njn25e49d8e72002-09-23 09:36:25 +00002804 if (n < 0 || n >= VG_N_FORKHANDLERSTACK) {
njnd3040452003-05-19 15:04:06 +00002805 SET_PTHREQ_RETVAL(tid, -1);
sewardj2cb00342002-06-28 01:46:26 +00002806 return;
2807 }
2808
2809 vg_fhstack[n] = *fh;
njnd3040452003-05-19 15:04:06 +00002810 SET_PTHREQ_RETVAL(tid, 0);
sewardj2cb00342002-06-28 01:46:26 +00002811}
2812
2813
2814static
2815void do__get_fhstack_entry ( ThreadId tid, Int n, /*OUT*/
2816 ForkHandlerEntry* fh )
2817{
2818 Char msg_buf[100];
2819 if (VG_(clo_trace_sched)) {
2820 VG_(sprintf)(msg_buf, "get_fhstack_entry %d", n );
2821 print_pthread_event(tid, msg_buf);
2822 }
2823
2824 vg_assert(VG_(is_valid_tid)(tid)
2825 && VG_(threads)[tid].status == VgTs_Runnable);
njn72718642003-07-24 08:45:32 +00002826 VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "fork: prepare/parent/child",
njn25e49d8e72002-09-23 09:36:25 +00002827 (Addr)fh, sizeof(ForkHandlerEntry));
sewardj2cb00342002-06-28 01:46:26 +00002828
njn25e49d8e72002-09-23 09:36:25 +00002829 if (n < 0 || n >= VG_N_FORKHANDLERSTACK) {
njnd3040452003-05-19 15:04:06 +00002830 SET_PTHREQ_RETVAL(tid, -1);
sewardj2cb00342002-06-28 01:46:26 +00002831 return;
2832 }
2833
2834 *fh = vg_fhstack[n];
njnd3040452003-05-19 15:04:06 +00002835 SET_PTHREQ_RETVAL(tid, 0);
sewardj2cb00342002-06-28 01:46:26 +00002836
njn25e49d8e72002-09-23 09:36:25 +00002837 VG_TRACK( post_mem_write, (Addr)fh, sizeof(ForkHandlerEntry) );
sewardj2cb00342002-06-28 01:46:26 +00002838}
2839
njnd3040452003-05-19 15:04:06 +00002840/* ---------------------------------------------------------------------
2841 Specifying shadow register values
2842 ------------------------------------------------------------------ */
2843
2844void VG_(set_return_from_syscall_shadow) ( ThreadId tid, UInt ret_shadow )
2845{
2846 VG_(set_thread_shadow_archreg)(tid, R_EAX, ret_shadow);
2847}
2848
2849UInt VG_(get_exit_status_shadow) ( void )
2850{
2851 return VG_(get_shadow_archreg)(R_EBX);
2852}
2853
sewardj2cb00342002-06-28 01:46:26 +00002854
sewardje663cb92002-04-12 10:26:32 +00002855/* ---------------------------------------------------------------------
sewardj124ca2a2002-06-20 10:19:38 +00002856 Handle client requests.
sewardje663cb92002-04-12 10:26:32 +00002857 ------------------------------------------------------------------ */
2858
sewardj124ca2a2002-06-20 10:19:38 +00002859/* Do a client request for the thread tid. After the request, tid may
2860 or may not still be runnable; if not, the scheduler will have to
2861 choose a new thread to run.
2862*/
sewardje663cb92002-04-12 10:26:32 +00002863static
sewardj124ca2a2002-06-20 10:19:38 +00002864void do_client_request ( ThreadId tid )
sewardje663cb92002-04-12 10:26:32 +00002865{
sewardj124ca2a2002-06-20 10:19:38 +00002866 UInt* arg = (UInt*)(VG_(threads)[tid].m_eax);
2867 UInt req_no = arg[0];
2868
fitzhardinge98abfc72003-12-16 02:05:15 +00002869 if (0)
2870 VG_(printf)("req no = 0x%x\n", req_no);
sewardje663cb92002-04-12 10:26:32 +00002871 switch (req_no) {
2872
njn3e884182003-04-15 13:03:23 +00002873 case VG_USERREQ__CLIENT_CALL0: {
2874 UInt (*f)(void) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00002875 if (f == NULL)
2876 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL: func=%p\n", f);
2877 else
2878 SET_CLCALL_RETVAL(tid, f ( ), (Addr)f);
njn3e884182003-04-15 13:03:23 +00002879 break;
2880 }
2881 case VG_USERREQ__CLIENT_CALL1: {
2882 UInt (*f)(UInt) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00002883 if (f == NULL)
2884 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL: func=%p\n", f);
2885 else
2886 SET_CLCALL_RETVAL(tid, f ( arg[2] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00002887 break;
2888 }
2889 case VG_USERREQ__CLIENT_CALL2: {
2890 UInt (*f)(UInt, UInt) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00002891 if (f == NULL)
2892 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL: func=%p\n", f);
2893 else
2894 SET_CLCALL_RETVAL(tid, f ( arg[2], arg[3] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00002895 break;
2896 }
2897 case VG_USERREQ__CLIENT_CALL3: {
2898 UInt (*f)(UInt, UInt, UInt) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00002899 if (f == NULL)
2900 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL: func=%p\n", f);
2901 else
2902 SET_CLCALL_RETVAL(tid, f ( arg[2], arg[3], arg[4] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00002903 break;
2904 }
2905
nethercote7cc9c232004-01-21 15:08:04 +00002906 /* Note: for tools that replace malloc() et al, we want to call
njn3e884182003-04-15 13:03:23 +00002907 the replacement versions. For those that don't, we want to call
2908 VG_(cli_malloc)() et al. We do this by calling SK_(malloc)(), which
nethercote3ced0e32004-01-26 14:50:45 +00002909 malloc-replacing tools must replace, but have the default definition
2910 of SK_(malloc)() call VG_(cli_malloc)(). */
njn3e884182003-04-15 13:03:23 +00002911
2912 /* Note: for MALLOC and FREE, must set the appropriate "lock"... see
2913 the comment in vg_defaults.c/SK_(malloc)() for why. */
sewardj124ca2a2002-06-20 10:19:38 +00002914 case VG_USERREQ__MALLOC:
njn3e884182003-04-15 13:03:23 +00002915 VG_(sk_malloc_called_by_scheduler) = True;
njnd3040452003-05-19 15:04:06 +00002916 SET_PTHREQ_RETVAL(
njn72718642003-07-24 08:45:32 +00002917 tid, (UInt)SK_(malloc) ( arg[1] )
sewardj124ca2a2002-06-20 10:19:38 +00002918 );
njn3e884182003-04-15 13:03:23 +00002919 VG_(sk_malloc_called_by_scheduler) = False;
sewardj124ca2a2002-06-20 10:19:38 +00002920 break;
2921
2922 case VG_USERREQ__FREE:
njn3e884182003-04-15 13:03:23 +00002923 VG_(sk_malloc_called_by_scheduler) = True;
njn72718642003-07-24 08:45:32 +00002924 SK_(free) ( (void*)arg[1] );
njn3e884182003-04-15 13:03:23 +00002925 VG_(sk_malloc_called_by_scheduler) = False;
njnd3040452003-05-19 15:04:06 +00002926 SET_PTHREQ_RETVAL(tid, 0); /* irrelevant */
sewardj124ca2a2002-06-20 10:19:38 +00002927 break;
2928
sewardj124ca2a2002-06-20 10:19:38 +00002929 case VG_USERREQ__PTHREAD_GET_THREADID:
njnd3040452003-05-19 15:04:06 +00002930 SET_PTHREQ_RETVAL(tid, tid);
sewardj124ca2a2002-06-20 10:19:38 +00002931 break;
2932
2933 case VG_USERREQ__RUNNING_ON_VALGRIND:
njnd3040452003-05-19 15:04:06 +00002934 SET_CLREQ_RETVAL(tid, 1);
sewardj124ca2a2002-06-20 10:19:38 +00002935 break;
2936
2937 case VG_USERREQ__GET_PTHREAD_TRACE_LEVEL:
njnd3040452003-05-19 15:04:06 +00002938 SET_PTHREQ_RETVAL(tid, VG_(clo_trace_pthread_level));
sewardj124ca2a2002-06-20 10:19:38 +00002939 break;
2940
2941 case VG_USERREQ__READ_MILLISECOND_TIMER:
njnd3040452003-05-19 15:04:06 +00002942 SET_PTHREQ_RETVAL(tid, VG_(read_millisecond_timer)());
sewardj124ca2a2002-06-20 10:19:38 +00002943 break;
2944
2945 /* Some of these may make thread tid non-runnable, but the
2946 scheduler checks for that on return from this function. */
2947 case VG_USERREQ__PTHREAD_MUTEX_LOCK:
2948 do_pthread_mutex_lock( tid, False, (void *)(arg[1]) );
2949 break;
2950
2951 case VG_USERREQ__PTHREAD_MUTEX_TRYLOCK:
2952 do_pthread_mutex_lock( tid, True, (void *)(arg[1]) );
2953 break;
2954
2955 case VG_USERREQ__PTHREAD_MUTEX_UNLOCK:
2956 do_pthread_mutex_unlock( tid, (void *)(arg[1]) );
2957 break;
2958
sewardj00a66b12002-10-12 16:42:35 +00002959 case VG_USERREQ__PTHREAD_GETSPECIFIC_PTR:
2960 do_pthread_getspecific_ptr ( tid );
sewardj124ca2a2002-06-20 10:19:38 +00002961 break;
2962
2963 case VG_USERREQ__SET_CANCELTYPE:
2964 do__set_canceltype ( tid, arg[1] );
2965 break;
2966
2967 case VG_USERREQ__CLEANUP_PUSH:
2968 do__cleanup_push ( tid, (CleanupEntry*)(arg[1]) );
2969 break;
2970
2971 case VG_USERREQ__CLEANUP_POP:
2972 do__cleanup_pop ( tid, (CleanupEntry*)(arg[1]) );
2973 break;
2974
2975 case VG_USERREQ__TESTCANCEL:
2976 do__testcancel ( tid );
2977 break;
2978
sewardje663cb92002-04-12 10:26:32 +00002979 case VG_USERREQ__PTHREAD_JOIN:
2980 do_pthread_join( tid, arg[1], (void**)(arg[2]) );
2981 break;
2982
sewardj3b5d8862002-04-20 13:53:23 +00002983 case VG_USERREQ__PTHREAD_COND_WAIT:
2984 do_pthread_cond_wait( tid,
nethercote1f0173b2004-02-28 15:40:36 +00002985 (vg_pthread_cond_t *)(arg[1]),
2986 (vg_pthread_mutex_t *)(arg[2]),
sewardj5f07b662002-04-23 16:52:51 +00002987 0xFFFFFFFF /* no timeout */ );
2988 break;
2989
2990 case VG_USERREQ__PTHREAD_COND_TIMEDWAIT:
2991 do_pthread_cond_wait( tid,
nethercote1f0173b2004-02-28 15:40:36 +00002992 (vg_pthread_cond_t *)(arg[1]),
2993 (vg_pthread_mutex_t *)(arg[2]),
sewardj5f07b662002-04-23 16:52:51 +00002994 arg[3] /* timeout millisecond point */ );
sewardj3b5d8862002-04-20 13:53:23 +00002995 break;
2996
2997 case VG_USERREQ__PTHREAD_COND_SIGNAL:
2998 do_pthread_cond_signal_or_broadcast(
2999 tid,
3000 False, /* signal, not broadcast */
nethercote1f0173b2004-02-28 15:40:36 +00003001 (vg_pthread_cond_t *)(arg[1]) );
sewardj3b5d8862002-04-20 13:53:23 +00003002 break;
3003
3004 case VG_USERREQ__PTHREAD_COND_BROADCAST:
3005 do_pthread_cond_signal_or_broadcast(
3006 tid,
3007 True, /* broadcast, not signal */
nethercote1f0173b2004-02-28 15:40:36 +00003008 (vg_pthread_cond_t *)(arg[1]) );
sewardj3b5d8862002-04-20 13:53:23 +00003009 break;
3010
sewardj00a66b12002-10-12 16:42:35 +00003011 case VG_USERREQ__PTHREAD_KEY_VALIDATE:
3012 do_pthread_key_validate ( tid,
3013 (pthread_key_t)(arg[1]) );
3014 break;
3015
sewardj5f07b662002-04-23 16:52:51 +00003016 case VG_USERREQ__PTHREAD_KEY_CREATE:
3017 do_pthread_key_create ( tid,
3018 (pthread_key_t*)(arg[1]),
3019 (void(*)(void*))(arg[2]) );
3020 break;
3021
3022 case VG_USERREQ__PTHREAD_KEY_DELETE:
3023 do_pthread_key_delete ( tid,
3024 (pthread_key_t)(arg[1]) );
3025 break;
3026
sewardj00a66b12002-10-12 16:42:35 +00003027 case VG_USERREQ__PTHREAD_SETSPECIFIC_PTR:
3028 do_pthread_setspecific_ptr ( tid,
3029 (void**)(arg[1]) );
sewardj5f07b662002-04-23 16:52:51 +00003030 break;
3031
sewardjb48e5002002-05-13 00:16:03 +00003032 case VG_USERREQ__PTHREAD_SIGMASK:
3033 do_pthread_sigmask ( tid,
3034 arg[1],
3035 (vki_ksigset_t*)(arg[2]),
3036 (vki_ksigset_t*)(arg[3]) );
3037 break;
3038
sewardj018f7622002-05-15 21:13:39 +00003039 case VG_USERREQ__PTHREAD_KILL:
3040 do_pthread_kill ( tid, arg[1], arg[2] );
3041 break;
3042
sewardjff42d1d2002-05-22 13:17:31 +00003043 case VG_USERREQ__PTHREAD_YIELD:
3044 do_pthread_yield ( tid );
sewardj18a62ff2002-07-12 22:30:51 +00003045 /* On return from do_client_request(), the scheduler will
3046 select a new thread to run. */
sewardjff42d1d2002-05-22 13:17:31 +00003047 break;
sewardj018f7622002-05-15 21:13:39 +00003048
sewardj7989d0c2002-05-28 11:00:01 +00003049 case VG_USERREQ__SET_CANCELSTATE:
3050 do__set_cancelstate ( tid, arg[1] );
3051 break;
3052
sewardj7989d0c2002-05-28 11:00:01 +00003053 case VG_USERREQ__SET_OR_GET_DETACH:
3054 do__set_or_get_detach ( tid, arg[1], arg[2] );
3055 break;
3056
3057 case VG_USERREQ__SET_CANCELPEND:
3058 do__set_cancelpend ( tid, arg[1], (void(*)(void*))arg[2] );
3059 break;
3060
3061 case VG_USERREQ__WAIT_JOINER:
3062 do__wait_joiner ( tid, (void*)arg[1] );
3063 break;
3064
3065 case VG_USERREQ__QUIT:
3066 do__quit ( tid );
3067 break;
3068
3069 case VG_USERREQ__APPLY_IN_NEW_THREAD:
3070 do__apply_in_new_thread ( tid, (void*(*)(void*))arg[1],
3071 (void*)arg[2] );
3072 break;
3073
sewardj870497a2002-05-29 01:06:47 +00003074 case VG_USERREQ__GET_KEY_D_AND_S:
3075 do__get_key_destr_and_spec ( tid,
3076 (pthread_key_t)arg[1],
3077 (CleanupEntry*)arg[2] );
3078 break;
3079
sewardjef037c72002-05-30 00:40:03 +00003080 case VG_USERREQ__NUKE_OTHER_THREADS:
3081 VG_(nuke_all_threads_except) ( tid );
njnd3040452003-05-19 15:04:06 +00003082 SET_PTHREQ_RETVAL(tid, 0);
sewardjef037c72002-05-30 00:40:03 +00003083 break;
3084
sewardj4dced352002-06-04 22:54:20 +00003085 case VG_USERREQ__PTHREAD_ERROR:
njn25e49d8e72002-09-23 09:36:25 +00003086 VG_(record_pthread_error)( tid, (Char*)(arg[1]) );
njnd3040452003-05-19 15:04:06 +00003087 SET_PTHREQ_RETVAL(tid, 0);
sewardj4dced352002-06-04 22:54:20 +00003088 break;
3089
sewardj2cb00342002-06-28 01:46:26 +00003090 case VG_USERREQ__SET_FHSTACK_USED:
3091 do__set_fhstack_used( tid, (Int)(arg[1]) );
3092 break;
3093
3094 case VG_USERREQ__GET_FHSTACK_USED:
3095 do__get_fhstack_used( tid );
3096 break;
3097
3098 case VG_USERREQ__SET_FHSTACK_ENTRY:
3099 do__set_fhstack_entry( tid, (Int)(arg[1]),
3100 (ForkHandlerEntry*)(arg[2]) );
3101 break;
3102
3103 case VG_USERREQ__GET_FHSTACK_ENTRY:
3104 do__get_fhstack_entry( tid, (Int)(arg[1]),
3105 (ForkHandlerEntry*)(arg[2]) );
3106 break;
3107
sewardj77e466c2002-04-14 02:29:29 +00003108 case VG_USERREQ__SIGNAL_RETURNS:
3109 handle_signal_return(tid);
3110 break;
fitzhardinge98abfc72003-12-16 02:05:15 +00003111
3112
3113 case VG_USERREQ__GET_SIGRT_MIN:
3114 SET_PTHREQ_RETVAL(tid, VG_(sig_rtmin));
3115 break;
3116
3117 case VG_USERREQ__GET_SIGRT_MAX:
3118 SET_PTHREQ_RETVAL(tid, VG_(sig_rtmax));
3119 break;
3120
3121 case VG_USERREQ__ALLOC_RTSIG:
3122 SET_PTHREQ_RETVAL(tid, VG_(sig_alloc_rtsig)((Int)arg[1]));
3123 break;
3124
fitzhardinge39de4b42003-10-31 07:12:21 +00003125 case VG_USERREQ__PRINTF: {
3126 int count =
3127 VG_(vmessage)( Vg_ClientMsg, (char *)arg[1], (va_list)arg[2] );
3128 SET_CLREQ_RETVAL( tid, count );
3129 break; }
3130
fitzhardinge98abfc72003-12-16 02:05:15 +00003131
fitzhardinge39de4b42003-10-31 07:12:21 +00003132 case VG_USERREQ__INTERNAL_PRINTF: {
3133 int count =
3134 VG_(vmessage)( Vg_UserMsg, (char *)arg[1], (va_list)arg[2] );
3135 SET_CLREQ_RETVAL( tid, count );
3136 break; }
3137
3138 case VG_USERREQ__PRINTF_BACKTRACE: {
3139 ExeContext *e = VG_(get_ExeContext)( tid );
3140 int count =
3141 VG_(vmessage)( Vg_ClientMsg, (char *)arg[1], (va_list)arg[2] );
3142 VG_(mini_stack_dump)(e->eips, VG_(clo_backtrace_size));
3143 SET_CLREQ_RETVAL( tid, count );
3144 break; }
3145
3146 case VG_USERREQ__INTERNAL_PRINTF_BACKTRACE: {
3147 ExeContext *e = VG_(get_ExeContext)( tid );
3148 int count =
3149 VG_(vmessage)( Vg_UserMsg, (char *)arg[1], (va_list)arg[2] );
3150 VG_(mini_stack_dump)(e->eips, VG_(clo_backtrace_size));
3151 SET_CLREQ_RETVAL( tid, count );
3152 break; }
3153
fitzhardinge98abfc72003-12-16 02:05:15 +00003154 case VG_USERREQ__REGISTER_LIBC_FREERES:
3155 VG_(__libc_freeres_wrapper) = arg[1];
3156 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
3157 break;
3158
3159 case VG_USERREQ__GET_MALLOCFUNCS: {
3160 struct vg_mallocfunc_info *info = (struct vg_mallocfunc_info *)arg[1];
3161
3162 info->sk_malloc = (Addr)SK_(malloc);
3163 info->sk_calloc = (Addr)SK_(calloc);
3164 info->sk_realloc = (Addr)SK_(realloc);
3165 info->sk_memalign = (Addr)SK_(memalign);
3166 info->sk___builtin_new = (Addr)SK_(__builtin_new);
3167 info->sk___builtin_vec_new = (Addr)SK_(__builtin_vec_new);
3168 info->sk_free = (Addr)SK_(free);
3169 info->sk___builtin_delete = (Addr)SK_(__builtin_delete);
3170 info->sk___builtin_vec_delete = (Addr)SK_(__builtin_vec_delete);
3171
3172 info->arena_payload_szB = (Addr)VG_(arena_payload_szB);
3173
3174 info->clo_sloppy_malloc = VG_(clo_sloppy_malloc);
3175 info->clo_trace_malloc = VG_(clo_trace_malloc);
3176
3177 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
3178
3179 break;
3180 }
3181
3182 case VG_USERREQ__REGISTER_REDIRECT_SYM: {
3183 VG_(add_redirect_sym)((const Char *)arg[1], (const Char *)arg[2],
3184 (const Char *)arg[3], (const Char *)arg[4]);
3185 break;
3186 }
3187
3188 case VG_USERREQ__REGISTER_REDIRECT_ADDR: {
3189 VG_(add_redirect_addr)((const Char *)arg[1], (const Char *)arg[2],
3190 (Addr)arg[3]);
3191 break;
3192 }
3193
njn25e49d8e72002-09-23 09:36:25 +00003194 /* Requests from the client program */
3195
3196 case VG_USERREQ__DISCARD_TRANSLATIONS:
3197 if (VG_(clo_verbosity) > 2)
3198 VG_(printf)( "client request: DISCARD_TRANSLATIONS,"
3199 " addr %p, len %d\n",
3200 (void*)arg[1], arg[2] );
3201
sewardj97ad5522003-05-04 12:32:56 +00003202 VG_(invalidate_translations)( arg[1], arg[2], True );
njn25e49d8e72002-09-23 09:36:25 +00003203
njnd3040452003-05-19 15:04:06 +00003204 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
njn25e49d8e72002-09-23 09:36:25 +00003205 break;
3206
njn47363ab2003-04-21 13:24:40 +00003207 case VG_USERREQ__COUNT_ERRORS:
njnd3040452003-05-19 15:04:06 +00003208 SET_CLREQ_RETVAL( tid, VG_(n_errs_found) );
njn47363ab2003-04-21 13:24:40 +00003209 break;
3210
sewardje663cb92002-04-12 10:26:32 +00003211 default:
njn25e49d8e72002-09-23 09:36:25 +00003212 if (VG_(needs).client_requests) {
sewardj34042512002-10-22 04:14:35 +00003213 UInt ret;
3214
njn25e49d8e72002-09-23 09:36:25 +00003215 if (VG_(clo_verbosity) > 2)
fitzhardinge98abfc72003-12-16 02:05:15 +00003216 VG_(printf)("client request: code %x, addr %p, len %d\n",
njn25e49d8e72002-09-23 09:36:25 +00003217 arg[0], (void*)arg[1], arg[2] );
3218
njn72718642003-07-24 08:45:32 +00003219 if (SK_(handle_client_request) ( tid, arg, &ret ))
njnd3040452003-05-19 15:04:06 +00003220 SET_CLREQ_RETVAL(tid, ret);
njn25e49d8e72002-09-23 09:36:25 +00003221 } else {
sewardj34042512002-10-22 04:14:35 +00003222 static Bool whined = False;
3223
3224 if (!whined) {
nethercote7cc9c232004-01-21 15:08:04 +00003225 // Allow for requests in core, but defined by tools, which
njnd7994182003-10-02 13:44:04 +00003226 // have 0 and 0 in their two high bytes.
3227 Char c1 = (arg[0] >> 24) & 0xff;
3228 Char c2 = (arg[0] >> 16) & 0xff;
3229 if (c1 == 0) c1 = '_';
3230 if (c2 == 0) c2 = '_';
sewardj34042512002-10-22 04:14:35 +00003231 VG_(message)(Vg_UserMsg, "Warning:\n"
njnd7994182003-10-02 13:44:04 +00003232 " unhandled client request: 0x%x (%c%c+0x%x). Perhaps\n"
3233 " VG_(needs).client_requests should be set?\n",
3234 arg[0], c1, c2, arg[0] & 0xffff);
sewardj34042512002-10-22 04:14:35 +00003235 whined = True;
3236 }
njn25e49d8e72002-09-23 09:36:25 +00003237 }
sewardje663cb92002-04-12 10:26:32 +00003238 break;
3239 }
3240}
3241
3242
sewardj6072c362002-04-19 14:40:57 +00003243/* ---------------------------------------------------------------------
3244 Sanity checking.
3245 ------------------------------------------------------------------ */
3246
3247/* Internal consistency checks on the sched/pthread structures. */
3248static
3249void scheduler_sanity ( void )
3250{
nethercote1f0173b2004-02-28 15:40:36 +00003251 vg_pthread_mutex_t* mx;
3252 vg_pthread_cond_t* cv;
sewardj6072c362002-04-19 14:40:57 +00003253 Int i;
jsgf855d93d2003-10-13 22:26:55 +00003254 struct timeout* top;
3255 UInt lasttime = 0;
3256
3257 for(top = timeouts; top != NULL; top = top->next) {
3258 vg_assert(top->time >= lasttime);
3259 vg_assert(VG_(is_valid_or_empty_tid)(top->tid));
3260
3261#if 0
3262 /* assert timeout entry is either stale, or associated with a
3263 thread in the right state
3264
3265 XXX disable for now - can be stale, but times happen to match
3266 */
3267 vg_assert(VG_(threads)[top->tid].awaken_at != top->time ||
3268 VG_(threads)[top->tid].status == VgTs_Sleeping ||
3269 VG_(threads)[top->tid].status == VgTs_WaitCV);
3270#endif
3271
3272 lasttime = top->time;
3273 }
sewardj5f07b662002-04-23 16:52:51 +00003274
sewardj6072c362002-04-19 14:40:57 +00003275 /* VG_(printf)("scheduler_sanity\n"); */
3276 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +00003277 mx = VG_(threads)[i].associated_mx;
3278 cv = VG_(threads)[i].associated_cv;
3279 if (VG_(threads)[i].status == VgTs_WaitMX) {
sewardjbf290b92002-05-01 02:28:01 +00003280 /* If we're waiting on a MX: (1) the mx is not null, (2, 3)
3281 it's actually held by someone, since otherwise this thread
3282 is deadlocked, (4) the mutex's owner is not us, since
3283 otherwise this thread is also deadlocked. The logic in
3284 do_pthread_mutex_lock rejects attempts by a thread to lock
3285 a (non-recursive) mutex which it already owns.
sewardj05553872002-04-20 20:53:17 +00003286
sewardjbf290b92002-05-01 02:28:01 +00003287 (2) has been seen to fail sometimes. I don't know why.
3288 Possibly to do with signals. */
sewardj3b5d8862002-04-20 13:53:23 +00003289 vg_assert(cv == NULL);
sewardj05553872002-04-20 20:53:17 +00003290 /* 1 */ vg_assert(mx != NULL);
nethercote1f0173b2004-02-28 15:40:36 +00003291 /* 2 */ vg_assert(mx->__vg_m_count > 0);
3292 /* 3 */ vg_assert(VG_(is_valid_tid)((ThreadId)mx->__vg_m_owner));
3293 /* 4 */ vg_assert((UInt)i != (ThreadId)mx->__vg_m_owner);
sewardj3b5d8862002-04-20 13:53:23 +00003294 } else
sewardj018f7622002-05-15 21:13:39 +00003295 if (VG_(threads)[i].status == VgTs_WaitCV) {
sewardj3b5d8862002-04-20 13:53:23 +00003296 vg_assert(cv != NULL);
3297 vg_assert(mx != NULL);
sewardj6072c362002-04-19 14:40:57 +00003298 } else {
sewardj05553872002-04-20 20:53:17 +00003299 /* Unfortunately these don't hold true when a sighandler is
3300 running. To be fixed. */
3301 /* vg_assert(cv == NULL); */
3302 /* vg_assert(mx == NULL); */
sewardj6072c362002-04-19 14:40:57 +00003303 }
sewardjbf290b92002-05-01 02:28:01 +00003304
sewardj018f7622002-05-15 21:13:39 +00003305 if (VG_(threads)[i].status != VgTs_Empty) {
sewardjbf290b92002-05-01 02:28:01 +00003306 Int
sewardj018f7622002-05-15 21:13:39 +00003307 stack_used = (Addr)VG_(threads)[i].stack_highest_word
3308 - (Addr)VG_(threads)[i].m_esp;
fitzhardinge98c4dc02004-03-16 08:27:29 +00003309
3310 /* This test is a bit bogus - it doesn't take into account
3311 alternate signal stacks, for a start. Also, if a thread
3312 has it's stack pointer somewhere strange, killing Valgrind
3313 isn't the right answer. */
3314 if (0 && i > 1 /* not the root thread */
sewardjbf290b92002-05-01 02:28:01 +00003315 && stack_used
3316 >= (VG_PTHREAD_STACK_MIN - 1000 /* paranoia */)) {
3317 VG_(message)(Vg_UserMsg,
njn25e49d8e72002-09-23 09:36:25 +00003318 "Error: STACK OVERFLOW: "
sewardjbf290b92002-05-01 02:28:01 +00003319 "thread %d: stack used %d, available %d",
3320 i, stack_used, VG_PTHREAD_STACK_MIN );
3321 VG_(message)(Vg_UserMsg,
3322 "Terminating Valgrind. If thread(s) "
3323 "really need more stack, increase");
3324 VG_(message)(Vg_UserMsg,
3325 "VG_PTHREAD_STACK_SIZE in vg_include.h and recompile.");
3326 VG_(exit)(1);
3327 }
3328 }
sewardj6072c362002-04-19 14:40:57 +00003329 }
sewardj5f07b662002-04-23 16:52:51 +00003330
3331 for (i = 0; i < VG_N_THREAD_KEYS; i++) {
3332 if (!vg_thread_keys[i].inuse)
3333 vg_assert(vg_thread_keys[i].destructor == NULL);
3334 }
sewardj6072c362002-04-19 14:40:57 +00003335}
3336
3337
sewardje663cb92002-04-12 10:26:32 +00003338/*--------------------------------------------------------------------*/
3339/*--- end vg_scheduler.c ---*/
3340/*--------------------------------------------------------------------*/