blob: 855f710ecde3b00a963ef4e499fbd78862948857 [file] [log] [blame]
sewardje663cb92002-04-12 10:26:32 +00001
2/*--------------------------------------------------------------------*/
3/*--- A user-space pthreads implementation. vg_scheduler.c ---*/
4/*--------------------------------------------------------------------*/
5
6/*
7 This file is part of Valgrind, an x86 protected-mode emulator
8 designed for debugging and profiling binaries on x86-Unixes.
9
10 Copyright (C) 2000-2002 Julian Seward
11 jseward@acm.org
sewardje663cb92002-04-12 10:26:32 +000012
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
28 The GNU General Public License is contained in the file LICENSE.
29*/
30
31#include "vg_include.h"
32#include "vg_constants.h"
33
34#include "valgrind.h" /* for VG_USERREQ__MAKE_NOACCESS and
35 VG_USERREQ__DO_LEAK_CHECK */
36
sewardj77e466c2002-04-14 02:29:29 +000037/* BORKAGE/ISSUES as of 14 Apr 02
sewardje663cb92002-04-12 10:26:32 +000038
sewardj77e466c2002-04-14 02:29:29 +000039Note! This pthreads implementation is so poor as to not be
40suitable for use by anyone at all!
sewardje663cb92002-04-12 10:26:32 +000041
sewardj77e466c2002-04-14 02:29:29 +000042- Currently, when a signal is run, just the ThreadStatus.status fields
43 are saved in the signal frame, along with the CPU state. Question:
44 should I also save and restore:
45 ThreadStatus.joiner
46 ThreadStatus.waited_on_mid
47 ThreadStatus.awaken_at
48 ThreadStatus.retval
49 Currently unsure, and so am not doing so.
sewardje663cb92002-04-12 10:26:32 +000050
sewardj77e466c2002-04-14 02:29:29 +000051- Signals interrupting read/write and nanosleep: SA_RESTART settings.
52 Read/write correctly return with EINTR when SA_RESTART isn't
53 specified and they are interrupted by a signal. nanosleep just
54 pretends signals don't exist -- should be fixed.
sewardje663cb92002-04-12 10:26:32 +000055
sewardj75fe1892002-04-14 02:46:33 +000056- Read/write syscall starts: don't crap out when the initial
57 nonblocking read/write returns an error.
sewardj8937c812002-04-12 20:12:20 +000058
sewardj9a199dc2002-04-14 13:01:38 +000059- Get rid of restrictions re use of sigaltstack; they are no longer
60 needed.
61
sewardj6072c362002-04-19 14:40:57 +000062- Fix signals properly, so that each thread has its own blocking mask.
63 Currently this isn't done, and (worse?) signals are delivered to
64 Thread 1 (the root thread) regardless.
65
66 So, what's the deal with signals and mutexes? If a thread is
67 blocked on a mutex, or for a condition variable for that matter, can
68 signals still be delivered to it? This has serious consequences --
69 deadlocks, etc.
70
sewardje462e202002-04-13 04:09:07 +000071*/
sewardje663cb92002-04-12 10:26:32 +000072
73
74/* ---------------------------------------------------------------------
75 Types and globals for the scheduler.
76 ------------------------------------------------------------------ */
77
78/* type ThreadId is defined in vg_include.h. */
79
80/* struct ThreadState is defined in vg_include.h. */
81
sewardj6072c362002-04-19 14:40:57 +000082/* Private globals. A statically allocated array of threads. NOTE:
83 [0] is never used, to simplify the simulation of initialisers for
84 LinuxThreads. */
sewardje663cb92002-04-12 10:26:32 +000085static ThreadState vg_threads[VG_N_THREADS];
86
sewardj1e8cdc92002-04-18 11:37:52 +000087/* The tid of the thread currently in VG_(baseBlock). */
88static Int vg_tid_currently_in_baseBlock = VG_INVALID_THREADID;
89
sewardje663cb92002-04-12 10:26:32 +000090
91/* vg_oursignalhandler() might longjmp(). Here's the jmp_buf. */
92jmp_buf VG_(scheduler_jmpbuf);
93/* ... and if so, here's the signal which caused it to do so. */
94Int VG_(longjmpd_on_signal);
95
96
97/* Machinery to keep track of which threads are waiting on which
98 fds. */
99typedef
100 struct {
101 /* The thread which made the request. */
102 ThreadId tid;
103
104 /* The next two fields describe the request. */
105 /* File descriptor waited for. -1 means this slot is not in use */
106 Int fd;
107 /* The syscall number the fd is used in. */
108 Int syscall_no;
109
110 /* False => still waiting for select to tell us the fd is ready
111 to go. True => the fd is ready, but the results have not yet
112 been delivered back to the calling thread. Once the latter
113 happens, this entire record is marked as no longer in use, by
114 making the fd field be -1. */
115 Bool ready;
116 }
117 VgWaitedOnFd;
118
119static VgWaitedOnFd vg_waiting_fds[VG_N_WAITING_FDS];
120
121
sewardj5f07b662002-04-23 16:52:51 +0000122/* Keeping track of keys. */
123typedef
124 struct {
125 /* Has this key been allocated ? */
126 Bool inuse;
127 /* If .inuse==True, records the address of the associated
128 destructor, or NULL if none. */
129 void (*destructor)(void*);
130 }
131 ThreadKeyState;
132
133/* And our array of thread keys. */
134static ThreadKeyState vg_thread_keys[VG_N_THREAD_KEYS];
135
136typedef UInt ThreadKey;
137
138
sewardje663cb92002-04-12 10:26:32 +0000139/* Forwards */
sewardj5f07b662002-04-23 16:52:51 +0000140static void do_pthread_cond_timedwait_TIMEOUT ( ThreadId tid );
141
sewardje663cb92002-04-12 10:26:32 +0000142static void do_nontrivial_clientreq ( ThreadId tid );
143
sewardj6072c362002-04-19 14:40:57 +0000144static void scheduler_sanity ( void );
145
sewardjd7fd4d22002-04-24 01:57:27 +0000146static void do_pthread_mutex_unlock ( ThreadId,
147 void* /* pthread_cond_t* */ );
148static void do_pthread_mutex_lock ( ThreadId, Bool,
149 void* /* pthread_cond_t* */ );
150
sewardje663cb92002-04-12 10:26:32 +0000151
152/* ---------------------------------------------------------------------
153 Helper functions for the scheduler.
154 ------------------------------------------------------------------ */
155
sewardj604ec3c2002-04-18 22:38:41 +0000156static __inline__
157Bool is_valid_tid ( ThreadId tid )
158{
159 /* tid is unsigned, hence no < 0 test. */
sewardj6072c362002-04-19 14:40:57 +0000160 if (tid == 0) return False;
sewardj604ec3c2002-04-18 22:38:41 +0000161 if (tid >= VG_N_THREADS) return False;
sewardj604ec3c2002-04-18 22:38:41 +0000162 return True;
163}
164
165
sewardj1e8cdc92002-04-18 11:37:52 +0000166/* For constructing error messages only: try and identify a thread
167 whose stack this address currently falls within, or return
168 VG_INVALID_THREADID if it doesn't. A small complication is dealing
169 with any currently VG_(baseBlock)-resident thread.
170*/
171ThreadId VG_(identify_stack_addr)( Addr a )
172{
173 ThreadId tid, tid_to_skip;
174
175 tid_to_skip = VG_INVALID_THREADID;
176
177 /* First check to see if there's a currently-loaded thread in
178 VG_(baseBlock). */
179 if (vg_tid_currently_in_baseBlock != VG_INVALID_THREADID) {
180 tid = vg_tid_currently_in_baseBlock;
181 if (VG_(baseBlock)[VGOFF_(m_esp)] <= a
182 && a <= vg_threads[tid].stack_highest_word)
183 return tid;
184 else
185 tid_to_skip = tid;
186 }
187
sewardj6072c362002-04-19 14:40:57 +0000188 for (tid = 1; tid < VG_N_THREADS; tid++) {
sewardj1e8cdc92002-04-18 11:37:52 +0000189 if (vg_threads[tid].status == VgTs_Empty) continue;
190 if (tid == tid_to_skip) continue;
191 if (vg_threads[tid].m_esp <= a
192 && a <= vg_threads[tid].stack_highest_word)
193 return tid;
194 }
195 return VG_INVALID_THREADID;
196}
197
198
sewardj15a43e12002-04-17 19:35:12 +0000199/* Print the scheduler status. */
200void VG_(pp_sched_status) ( void )
sewardje663cb92002-04-12 10:26:32 +0000201{
202 Int i;
203 VG_(printf)("\nsched status:\n");
sewardj6072c362002-04-19 14:40:57 +0000204 for (i = 1; i < VG_N_THREADS; i++) {
sewardje663cb92002-04-12 10:26:32 +0000205 if (vg_threads[i].status == VgTs_Empty) continue;
sewardj15a43e12002-04-17 19:35:12 +0000206 VG_(printf)("\nThread %d: status = ", i);
sewardje663cb92002-04-12 10:26:32 +0000207 switch (vg_threads[i].status) {
sewardj6072c362002-04-19 14:40:57 +0000208 case VgTs_Runnable: VG_(printf)("Runnable"); break;
209 case VgTs_WaitFD: VG_(printf)("WaitFD"); break;
210 case VgTs_WaitJoiner: VG_(printf)("WaitJoiner(%d)",
sewardje663cb92002-04-12 10:26:32 +0000211 vg_threads[i].joiner); break;
sewardj6072c362002-04-19 14:40:57 +0000212 case VgTs_WaitJoinee: VG_(printf)("WaitJoinee"); break;
213 case VgTs_Sleeping: VG_(printf)("Sleeping"); break;
214 case VgTs_WaitMX: VG_(printf)("WaitMX"); break;
sewardj3b5d8862002-04-20 13:53:23 +0000215 case VgTs_WaitCV: VG_(printf)("WaitCV"); break;
sewardje663cb92002-04-12 10:26:32 +0000216 default: VG_(printf)("???"); break;
217 }
sewardj3b5d8862002-04-20 13:53:23 +0000218 VG_(printf)(", associated_mx = %p, associated_cv = %p\n",
219 vg_threads[i].associated_mx,
220 vg_threads[i].associated_cv );
sewardj15a43e12002-04-17 19:35:12 +0000221 VG_(pp_ExeContext)(
222 VG_(get_ExeContext)( False, vg_threads[i].m_eip,
223 vg_threads[i].m_ebp ));
sewardje663cb92002-04-12 10:26:32 +0000224 }
225 VG_(printf)("\n");
226}
227
228static
229void add_waiting_fd ( ThreadId tid, Int fd, Int syscall_no )
230{
231 Int i;
232
233 vg_assert(fd != -1); /* avoid total chaos */
234
235 for (i = 0; i < VG_N_WAITING_FDS; i++)
236 if (vg_waiting_fds[i].fd == -1)
237 break;
238
239 if (i == VG_N_WAITING_FDS)
240 VG_(panic)("add_waiting_fd: VG_N_WAITING_FDS is too low");
241 /*
242 VG_(printf)("add_waiting_fd: add (tid %d, fd %d) at slot %d\n",
243 tid, fd, i);
244 */
245 vg_waiting_fds[i].fd = fd;
246 vg_waiting_fds[i].tid = tid;
247 vg_waiting_fds[i].ready = False;
248 vg_waiting_fds[i].syscall_no = syscall_no;
249}
250
251
252
253static
254void print_sched_event ( ThreadId tid, Char* what )
255{
sewardj45b4b372002-04-16 22:50:32 +0000256 VG_(message)(Vg_DebugMsg, " SCHED[%d]: %s", tid, what );
sewardj8937c812002-04-12 20:12:20 +0000257}
258
259
260static
261void print_pthread_event ( ThreadId tid, Char* what )
262{
263 VG_(message)(Vg_DebugMsg, "PTHREAD[%d]: %s", tid, what );
sewardje663cb92002-04-12 10:26:32 +0000264}
265
266
267static
268Char* name_of_sched_event ( UInt event )
269{
270 switch (event) {
sewardje663cb92002-04-12 10:26:32 +0000271 case VG_TRC_EBP_JMP_SYSCALL: return "SYSCALL";
272 case VG_TRC_EBP_JMP_CLIENTREQ: return "CLIENTREQ";
273 case VG_TRC_INNER_COUNTERZERO: return "COUNTERZERO";
274 case VG_TRC_INNER_FASTMISS: return "FASTMISS";
275 case VG_TRC_UNRESUMABLE_SIGNAL: return "FATALSIGNAL";
276 default: return "??UNKNOWN??";
277 }
278}
279
280
281/* Create a translation of the client basic block beginning at
282 orig_addr, and add it to the translation cache & translation table.
283 This probably doesn't really belong here, but, hey ...
284*/
sewardj1e8cdc92002-04-18 11:37:52 +0000285static
286void create_translation_for ( ThreadId tid, Addr orig_addr )
sewardje663cb92002-04-12 10:26:32 +0000287{
288 Addr trans_addr;
289 TTEntry tte;
290 Int orig_size, trans_size;
291 /* Ensure there is space to hold a translation. */
292 VG_(maybe_do_lru_pass)();
sewardj1e8cdc92002-04-18 11:37:52 +0000293 VG_(translate)( &vg_threads[tid],
294 orig_addr, &orig_size, &trans_addr, &trans_size );
sewardje663cb92002-04-12 10:26:32 +0000295 /* Copy data at trans_addr into the translation cache.
296 Returned pointer is to the code, not to the 4-byte
297 header. */
298 /* Since the .orig_size and .trans_size fields are
299 UShort, be paranoid. */
300 vg_assert(orig_size > 0 && orig_size < 65536);
301 vg_assert(trans_size > 0 && trans_size < 65536);
302 tte.orig_size = orig_size;
303 tte.orig_addr = orig_addr;
304 tte.trans_size = trans_size;
305 tte.trans_addr = VG_(copy_to_transcache)
306 ( trans_addr, trans_size );
307 tte.mru_epoch = VG_(current_epoch);
308 /* Free the intermediary -- was allocated by VG_(emit_code). */
309 VG_(jitfree)( (void*)trans_addr );
310 /* Add to trans tab and set back pointer. */
311 VG_(add_to_trans_tab) ( &tte );
312 /* Update stats. */
313 VG_(this_epoch_in_count) ++;
314 VG_(this_epoch_in_osize) += orig_size;
315 VG_(this_epoch_in_tsize) += trans_size;
316 VG_(overall_in_count) ++;
317 VG_(overall_in_osize) += orig_size;
318 VG_(overall_in_tsize) += trans_size;
319 /* Record translated area for SMC detection. */
320 VG_(smc_mark_original) ( orig_addr, orig_size );
321}
322
323
324/* Allocate a completely empty ThreadState record. */
325static
326ThreadId vg_alloc_ThreadState ( void )
327{
328 Int i;
sewardj6072c362002-04-19 14:40:57 +0000329 for (i = 1; i < VG_N_THREADS; i++) {
sewardje663cb92002-04-12 10:26:32 +0000330 if (vg_threads[i].status == VgTs_Empty)
331 return i;
332 }
333 VG_(printf)("vg_alloc_ThreadState: no free slots available\n");
334 VG_(printf)("Increase VG_N_THREADS, rebuild and try again.\n");
335 VG_(panic)("VG_N_THREADS is too low");
336 /*NOTREACHED*/
337}
338
339
340ThreadState* VG_(get_thread_state) ( ThreadId tid )
341{
sewardj6072c362002-04-19 14:40:57 +0000342 vg_assert(is_valid_tid(tid));
sewardje663cb92002-04-12 10:26:32 +0000343 vg_assert(vg_threads[tid].status != VgTs_Empty);
344 return & vg_threads[tid];
345}
346
347
sewardj1e8cdc92002-04-18 11:37:52 +0000348ThreadState* VG_(get_current_thread_state) ( void )
349{
350 vg_assert(vg_tid_currently_in_baseBlock != VG_INVALID_THREADID);
sewardj6072c362002-04-19 14:40:57 +0000351 return VG_(get_thread_state) ( vg_tid_currently_in_baseBlock );
sewardj1e8cdc92002-04-18 11:37:52 +0000352}
353
354
355ThreadId VG_(get_current_tid) ( void )
356{
357 vg_assert(vg_tid_currently_in_baseBlock != VG_INVALID_THREADID);
358 return vg_tid_currently_in_baseBlock;
359}
360
361
sewardje663cb92002-04-12 10:26:32 +0000362/* Copy the saved state of a thread into VG_(baseBlock), ready for it
363 to be run. */
364__inline__
365void VG_(load_thread_state) ( ThreadId tid )
366{
367 Int i;
sewardj1e8cdc92002-04-18 11:37:52 +0000368 vg_assert(vg_tid_currently_in_baseBlock == VG_INVALID_THREADID);
369
sewardje663cb92002-04-12 10:26:32 +0000370 VG_(baseBlock)[VGOFF_(m_eax)] = vg_threads[tid].m_eax;
371 VG_(baseBlock)[VGOFF_(m_ebx)] = vg_threads[tid].m_ebx;
372 VG_(baseBlock)[VGOFF_(m_ecx)] = vg_threads[tid].m_ecx;
373 VG_(baseBlock)[VGOFF_(m_edx)] = vg_threads[tid].m_edx;
374 VG_(baseBlock)[VGOFF_(m_esi)] = vg_threads[tid].m_esi;
375 VG_(baseBlock)[VGOFF_(m_edi)] = vg_threads[tid].m_edi;
376 VG_(baseBlock)[VGOFF_(m_ebp)] = vg_threads[tid].m_ebp;
377 VG_(baseBlock)[VGOFF_(m_esp)] = vg_threads[tid].m_esp;
378 VG_(baseBlock)[VGOFF_(m_eflags)] = vg_threads[tid].m_eflags;
379 VG_(baseBlock)[VGOFF_(m_eip)] = vg_threads[tid].m_eip;
380
381 for (i = 0; i < VG_SIZE_OF_FPUSTATE_W; i++)
382 VG_(baseBlock)[VGOFF_(m_fpustate) + i] = vg_threads[tid].m_fpu[i];
383
384 VG_(baseBlock)[VGOFF_(sh_eax)] = vg_threads[tid].sh_eax;
385 VG_(baseBlock)[VGOFF_(sh_ebx)] = vg_threads[tid].sh_ebx;
386 VG_(baseBlock)[VGOFF_(sh_ecx)] = vg_threads[tid].sh_ecx;
387 VG_(baseBlock)[VGOFF_(sh_edx)] = vg_threads[tid].sh_edx;
388 VG_(baseBlock)[VGOFF_(sh_esi)] = vg_threads[tid].sh_esi;
389 VG_(baseBlock)[VGOFF_(sh_edi)] = vg_threads[tid].sh_edi;
390 VG_(baseBlock)[VGOFF_(sh_ebp)] = vg_threads[tid].sh_ebp;
391 VG_(baseBlock)[VGOFF_(sh_esp)] = vg_threads[tid].sh_esp;
392 VG_(baseBlock)[VGOFF_(sh_eflags)] = vg_threads[tid].sh_eflags;
sewardj1e8cdc92002-04-18 11:37:52 +0000393
394 vg_tid_currently_in_baseBlock = tid;
sewardje663cb92002-04-12 10:26:32 +0000395}
396
397
398/* Copy the state of a thread from VG_(baseBlock), presumably after it
399 has been descheduled. For sanity-check purposes, fill the vacated
400 VG_(baseBlock) with garbage so as to make the system more likely to
401 fail quickly if we erroneously continue to poke around inside
402 VG_(baseBlock) without first doing a load_thread_state().
403*/
404__inline__
405void VG_(save_thread_state) ( ThreadId tid )
406{
407 Int i;
408 const UInt junk = 0xDEADBEEF;
409
sewardj1e8cdc92002-04-18 11:37:52 +0000410 vg_assert(vg_tid_currently_in_baseBlock != VG_INVALID_THREADID);
411
sewardje663cb92002-04-12 10:26:32 +0000412 vg_threads[tid].m_eax = VG_(baseBlock)[VGOFF_(m_eax)];
413 vg_threads[tid].m_ebx = VG_(baseBlock)[VGOFF_(m_ebx)];
414 vg_threads[tid].m_ecx = VG_(baseBlock)[VGOFF_(m_ecx)];
415 vg_threads[tid].m_edx = VG_(baseBlock)[VGOFF_(m_edx)];
416 vg_threads[tid].m_esi = VG_(baseBlock)[VGOFF_(m_esi)];
417 vg_threads[tid].m_edi = VG_(baseBlock)[VGOFF_(m_edi)];
418 vg_threads[tid].m_ebp = VG_(baseBlock)[VGOFF_(m_ebp)];
419 vg_threads[tid].m_esp = VG_(baseBlock)[VGOFF_(m_esp)];
420 vg_threads[tid].m_eflags = VG_(baseBlock)[VGOFF_(m_eflags)];
421 vg_threads[tid].m_eip = VG_(baseBlock)[VGOFF_(m_eip)];
422
423 for (i = 0; i < VG_SIZE_OF_FPUSTATE_W; i++)
424 vg_threads[tid].m_fpu[i] = VG_(baseBlock)[VGOFF_(m_fpustate) + i];
425
426 vg_threads[tid].sh_eax = VG_(baseBlock)[VGOFF_(sh_eax)];
427 vg_threads[tid].sh_ebx = VG_(baseBlock)[VGOFF_(sh_ebx)];
428 vg_threads[tid].sh_ecx = VG_(baseBlock)[VGOFF_(sh_ecx)];
429 vg_threads[tid].sh_edx = VG_(baseBlock)[VGOFF_(sh_edx)];
430 vg_threads[tid].sh_esi = VG_(baseBlock)[VGOFF_(sh_esi)];
431 vg_threads[tid].sh_edi = VG_(baseBlock)[VGOFF_(sh_edi)];
432 vg_threads[tid].sh_ebp = VG_(baseBlock)[VGOFF_(sh_ebp)];
433 vg_threads[tid].sh_esp = VG_(baseBlock)[VGOFF_(sh_esp)];
434 vg_threads[tid].sh_eflags = VG_(baseBlock)[VGOFF_(sh_eflags)];
435
436 /* Fill it up with junk. */
437 VG_(baseBlock)[VGOFF_(m_eax)] = junk;
438 VG_(baseBlock)[VGOFF_(m_ebx)] = junk;
439 VG_(baseBlock)[VGOFF_(m_ecx)] = junk;
440 VG_(baseBlock)[VGOFF_(m_edx)] = junk;
441 VG_(baseBlock)[VGOFF_(m_esi)] = junk;
442 VG_(baseBlock)[VGOFF_(m_edi)] = junk;
443 VG_(baseBlock)[VGOFF_(m_ebp)] = junk;
444 VG_(baseBlock)[VGOFF_(m_esp)] = junk;
445 VG_(baseBlock)[VGOFF_(m_eflags)] = junk;
446 VG_(baseBlock)[VGOFF_(m_eip)] = junk;
447
448 for (i = 0; i < VG_SIZE_OF_FPUSTATE_W; i++)
449 VG_(baseBlock)[VGOFF_(m_fpustate) + i] = junk;
sewardj1e8cdc92002-04-18 11:37:52 +0000450
451 vg_tid_currently_in_baseBlock = VG_INVALID_THREADID;
sewardje663cb92002-04-12 10:26:32 +0000452}
453
454
455/* Run the thread tid for a while, and return a VG_TRC_* value to the
456 scheduler indicating what happened. */
sewardj6072c362002-04-19 14:40:57 +0000457static
sewardje663cb92002-04-12 10:26:32 +0000458UInt run_thread_for_a_while ( ThreadId tid )
459{
sewardj7ccc5c22002-04-24 21:39:11 +0000460 volatile UInt trc = 0;
sewardj6072c362002-04-19 14:40:57 +0000461 vg_assert(is_valid_tid(tid));
462 vg_assert(vg_threads[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +0000463 vg_assert(VG_(bbs_to_go) > 0);
464
465 VG_(load_thread_state) ( tid );
466 if (__builtin_setjmp(VG_(scheduler_jmpbuf)) == 0) {
467 /* try this ... */
468 trc = VG_(run_innerloop)();
469 /* We get here if the client didn't take a fault. */
470 } else {
471 /* We get here if the client took a fault, which caused our
472 signal handler to longjmp. */
473 vg_assert(trc == 0);
474 trc = VG_TRC_UNRESUMABLE_SIGNAL;
475 }
476 VG_(save_thread_state) ( tid );
477 return trc;
478}
479
480
481/* Increment the LRU epoch counter. */
482static
483void increment_epoch ( void )
484{
485 VG_(current_epoch)++;
486 if (VG_(clo_verbosity) > 2) {
487 UInt tt_used, tc_used;
488 VG_(get_tt_tc_used) ( &tt_used, &tc_used );
489 VG_(message)(Vg_UserMsg,
490 "%lu bbs, in: %d (%d -> %d), out %d (%d -> %d), TT %d, TC %d",
491 VG_(bbs_done),
492 VG_(this_epoch_in_count),
493 VG_(this_epoch_in_osize),
494 VG_(this_epoch_in_tsize),
495 VG_(this_epoch_out_count),
496 VG_(this_epoch_out_osize),
497 VG_(this_epoch_out_tsize),
498 tt_used, tc_used
499 );
500 }
501 VG_(this_epoch_in_count) = 0;
502 VG_(this_epoch_in_osize) = 0;
503 VG_(this_epoch_in_tsize) = 0;
504 VG_(this_epoch_out_count) = 0;
505 VG_(this_epoch_out_osize) = 0;
506 VG_(this_epoch_out_tsize) = 0;
507}
508
509
510/* Initialise the scheduler. Create a single "main" thread ready to
sewardj6072c362002-04-19 14:40:57 +0000511 run, with special ThreadId of one. This is called at startup; the
sewardje663cb92002-04-12 10:26:32 +0000512 caller takes care to park the client's state is parked in
513 VG_(baseBlock).
514*/
515void VG_(scheduler_init) ( void )
516{
517 Int i;
518 Addr startup_esp;
519 ThreadId tid_main;
520
521 startup_esp = VG_(baseBlock)[VGOFF_(m_esp)];
522 if ((startup_esp & VG_STARTUP_STACK_MASK) != VG_STARTUP_STACK_MASK) {
sewardj9a199dc2002-04-14 13:01:38 +0000523 VG_(printf)("%%esp at startup = %p is not near %p; aborting\n",
524 (void*)startup_esp, (void*)VG_STARTUP_STACK_MASK);
sewardje663cb92002-04-12 10:26:32 +0000525 VG_(panic)("unexpected %esp at startup");
526 }
527
sewardj6072c362002-04-19 14:40:57 +0000528 for (i = 0 /* NB; not 1 */; i < VG_N_THREADS; i++) {
529 vg_threads[i].status = VgTs_Empty;
sewardje663cb92002-04-12 10:26:32 +0000530 vg_threads[i].stack_size = 0;
531 vg_threads[i].stack_base = (Addr)NULL;
sewardj1e8cdc92002-04-18 11:37:52 +0000532 vg_threads[i].tid = i;
sewardje663cb92002-04-12 10:26:32 +0000533 }
534
535 for (i = 0; i < VG_N_WAITING_FDS; i++)
536 vg_waiting_fds[i].fd = -1; /* not in use */
537
sewardj5f07b662002-04-23 16:52:51 +0000538 for (i = 0; i < VG_N_THREAD_KEYS; i++) {
539 vg_thread_keys[i].inuse = False;
540 vg_thread_keys[i].destructor = NULL;
541 }
542
sewardje663cb92002-04-12 10:26:32 +0000543 /* Assert this is thread zero, which has certain magic
544 properties. */
545 tid_main = vg_alloc_ThreadState();
sewardj6072c362002-04-19 14:40:57 +0000546 vg_assert(tid_main == 1);
sewardje663cb92002-04-12 10:26:32 +0000547
sewardj3b5d8862002-04-20 13:53:23 +0000548 vg_threads[tid_main].status = VgTs_Runnable;
549 vg_threads[tid_main].joiner = VG_INVALID_THREADID;
550 vg_threads[tid_main].associated_mx = NULL;
551 vg_threads[tid_main].associated_cv = NULL;
552 vg_threads[tid_main].retval = NULL; /* not important */
sewardj1e8cdc92002-04-18 11:37:52 +0000553 vg_threads[tid_main].stack_highest_word
554 = vg_threads[tid_main].m_esp /* -4 ??? */;
sewardj5f07b662002-04-23 16:52:51 +0000555 for (i = 0; i < VG_N_THREAD_KEYS; i++)
556 vg_threads[tid_main].specifics[i] = NULL;
sewardje663cb92002-04-12 10:26:32 +0000557
558 /* Copy VG_(baseBlock) state to tid_main's slot. */
sewardj1e8cdc92002-04-18 11:37:52 +0000559 vg_tid_currently_in_baseBlock = tid_main;
sewardje663cb92002-04-12 10:26:32 +0000560 VG_(save_thread_state) ( tid_main );
sewardj1e8cdc92002-04-18 11:37:52 +0000561
562 /* So now ... */
563 vg_assert(vg_tid_currently_in_baseBlock == VG_INVALID_THREADID);
sewardje663cb92002-04-12 10:26:32 +0000564}
565
566
567/* What if fd isn't a valid fd? */
568static
569void set_fd_nonblocking ( Int fd )
570{
571 Int res = VG_(fcntl)( fd, VKI_F_GETFL, 0 );
572 vg_assert(!VG_(is_kerror)(res));
573 res |= VKI_O_NONBLOCK;
574 res = VG_(fcntl)( fd, VKI_F_SETFL, res );
575 vg_assert(!VG_(is_kerror)(res));
576}
577
578static
579void set_fd_blocking ( Int fd )
580{
581 Int res = VG_(fcntl)( fd, VKI_F_GETFL, 0 );
582 vg_assert(!VG_(is_kerror)(res));
583 res &= ~VKI_O_NONBLOCK;
584 res = VG_(fcntl)( fd, VKI_F_SETFL, res );
585 vg_assert(!VG_(is_kerror)(res));
586}
587
588static
589Bool fd_is_blockful ( Int fd )
590{
591 Int res = VG_(fcntl)( fd, VKI_F_GETFL, 0 );
592 vg_assert(!VG_(is_kerror)(res));
593 return (res & VKI_O_NONBLOCK) ? False : True;
594}
595
596
597
sewardjd7fd4d22002-04-24 01:57:27 +0000598/* Possibly do a for tid. Return values are:
sewardje663cb92002-04-12 10:26:32 +0000599
sewardjd7fd4d22002-04-24 01:57:27 +0000600 True = request done. Thread may or may not be still runnable;
601 caller must check. If it is still runnable, the result will be in
602 the thread's %EDX as expected.
603
604 False = request not done. A more capable but slower mechanism will
605 deal with it.
sewardje663cb92002-04-12 10:26:32 +0000606*/
sewardjd7fd4d22002-04-24 01:57:27 +0000607static
sewardje663cb92002-04-12 10:26:32 +0000608Bool maybe_do_trivial_clientreq ( ThreadId tid )
609{
610# define SIMPLE_RETURN(vvv) \
sewardj8c824512002-04-14 04:16:48 +0000611 { tst->m_edx = (vvv); \
sewardje663cb92002-04-12 10:26:32 +0000612 return True; \
613 }
614
sewardj8c824512002-04-14 04:16:48 +0000615 ThreadState* tst = &vg_threads[tid];
616 UInt* arg = (UInt*)(tst->m_eax);
617 UInt req_no = arg[0];
618
sewardje663cb92002-04-12 10:26:32 +0000619 switch (req_no) {
620 case VG_USERREQ__MALLOC:
621 SIMPLE_RETURN(
sewardj8c824512002-04-14 04:16:48 +0000622 (UInt)VG_(client_malloc) ( tst, arg[1], Vg_AllocMalloc )
sewardje663cb92002-04-12 10:26:32 +0000623 );
624 case VG_USERREQ__BUILTIN_NEW:
625 SIMPLE_RETURN(
sewardj8c824512002-04-14 04:16:48 +0000626 (UInt)VG_(client_malloc) ( tst, arg[1], Vg_AllocNew )
sewardje663cb92002-04-12 10:26:32 +0000627 );
628 case VG_USERREQ__BUILTIN_VEC_NEW:
629 SIMPLE_RETURN(
sewardj8c824512002-04-14 04:16:48 +0000630 (UInt)VG_(client_malloc) ( tst, arg[1], Vg_AllocNewVec )
sewardje663cb92002-04-12 10:26:32 +0000631 );
632 case VG_USERREQ__FREE:
sewardj8c824512002-04-14 04:16:48 +0000633 VG_(client_free) ( tst, (void*)arg[1], Vg_AllocMalloc );
sewardje663cb92002-04-12 10:26:32 +0000634 SIMPLE_RETURN(0); /* irrelevant */
635 case VG_USERREQ__BUILTIN_DELETE:
sewardj8c824512002-04-14 04:16:48 +0000636 VG_(client_free) ( tst, (void*)arg[1], Vg_AllocNew );
sewardje663cb92002-04-12 10:26:32 +0000637 SIMPLE_RETURN(0); /* irrelevant */
638 case VG_USERREQ__BUILTIN_VEC_DELETE:
sewardj8c824512002-04-14 04:16:48 +0000639 VG_(client_free) ( tst, (void*)arg[1], Vg_AllocNewVec );
sewardje663cb92002-04-12 10:26:32 +0000640 SIMPLE_RETURN(0); /* irrelevant */
641 case VG_USERREQ__CALLOC:
642 SIMPLE_RETURN(
sewardj8c824512002-04-14 04:16:48 +0000643 (UInt)VG_(client_calloc) ( tst, arg[1], arg[2] )
sewardje663cb92002-04-12 10:26:32 +0000644 );
645 case VG_USERREQ__REALLOC:
646 SIMPLE_RETURN(
sewardj8c824512002-04-14 04:16:48 +0000647 (UInt)VG_(client_realloc) ( tst, (void*)arg[1], arg[2] )
sewardje663cb92002-04-12 10:26:32 +0000648 );
649 case VG_USERREQ__MEMALIGN:
650 SIMPLE_RETURN(
sewardj8c824512002-04-14 04:16:48 +0000651 (UInt)VG_(client_memalign) ( tst, arg[1], arg[2] )
sewardje663cb92002-04-12 10:26:32 +0000652 );
sewardj9650c992002-04-16 03:44:31 +0000653
sewardj5f07b662002-04-23 16:52:51 +0000654 /* These are heavily used -- or at least we want them to be
655 cheap. */
sewardj9650c992002-04-16 03:44:31 +0000656 case VG_USERREQ__PTHREAD_GET_THREADID:
657 SIMPLE_RETURN(tid);
658 case VG_USERREQ__RUNNING_ON_VALGRIND:
659 SIMPLE_RETURN(1);
sewardj45b4b372002-04-16 22:50:32 +0000660 case VG_USERREQ__GET_PTHREAD_TRACE_LEVEL:
661 SIMPLE_RETURN(VG_(clo_trace_pthread_level));
sewardj5f07b662002-04-23 16:52:51 +0000662 case VG_USERREQ__READ_MILLISECOND_TIMER:
663 SIMPLE_RETURN(VG_(read_millisecond_timer)());
sewardj9650c992002-04-16 03:44:31 +0000664
sewardjd7fd4d22002-04-24 01:57:27 +0000665 case VG_USERREQ__PTHREAD_MUTEX_UNLOCK:
666 do_pthread_mutex_unlock( tid, (void *)(arg[1]) );
667 return True;
668
669 /* This may make thread tid non-runnable, but the scheduler
670 checks for that on return from this function. */
671 case VG_USERREQ__PTHREAD_MUTEX_LOCK:
672 do_pthread_mutex_lock( tid, False, (void *)(arg[1]) );
673 return True;
674
sewardj14e03422002-04-24 19:51:31 +0000675 case VG_USERREQ__PTHREAD_MUTEX_TRYLOCK:
676 do_pthread_mutex_lock( tid, True, (void *)(arg[1]) );
677 return True;
678
sewardje663cb92002-04-12 10:26:32 +0000679 default:
680 /* Too hard; wimp out. */
681 return False;
682 }
683# undef SIMPLE_RETURN
684}
685
686
sewardj6072c362002-04-19 14:40:57 +0000687/* vthread tid is returning from a signal handler; modify its
688 stack/regs accordingly. */
689static
690void handle_signal_return ( ThreadId tid )
691{
692 Char msg_buf[100];
693 Bool restart_blocked_syscalls;
694
695 vg_assert(is_valid_tid(tid));
696
697 restart_blocked_syscalls = VG_(signal_returns)(tid);
698
699 if (restart_blocked_syscalls)
700 /* Easy; we don't have to do anything. */
701 return;
702
703 if (vg_threads[tid].status == VgTs_WaitFD) {
704 vg_assert(vg_threads[tid].m_eax == __NR_read
705 || vg_threads[tid].m_eax == __NR_write);
706 /* read() or write() interrupted. Force a return with EINTR. */
707 vg_threads[tid].m_eax = -VKI_EINTR;
708 vg_threads[tid].status = VgTs_Runnable;
709 if (VG_(clo_trace_sched)) {
710 VG_(sprintf)(msg_buf,
711 "read() / write() interrupted by signal; return EINTR" );
712 print_sched_event(tid, msg_buf);
713 }
714 return;
715 }
716
717 if (vg_threads[tid].status == VgTs_WaitFD) {
718 vg_assert(vg_threads[tid].m_eax == __NR_nanosleep);
719 /* We interrupted a nanosleep(). The right thing to do is to
720 write the unused time to nanosleep's second param and return
721 EINTR, but I'm too lazy for that. */
722 return;
723 }
724
725 /* All other cases? Just return. */
726}
727
728
sewardje663cb92002-04-12 10:26:32 +0000729static
730void sched_do_syscall ( ThreadId tid )
731{
732 UInt saved_eax;
733 UInt res, syscall_no;
734 UInt fd;
735 Bool might_block, assumed_nonblocking;
736 Bool orig_fd_blockness;
737 Char msg_buf[100];
738
sewardj6072c362002-04-19 14:40:57 +0000739 vg_assert(is_valid_tid(tid));
sewardje663cb92002-04-12 10:26:32 +0000740 vg_assert(vg_threads[tid].status == VgTs_Runnable);
741
742 syscall_no = vg_threads[tid].m_eax; /* syscall number */
743
744 if (syscall_no == __NR_nanosleep) {
sewardj5f07b662002-04-23 16:52:51 +0000745 UInt t_now, t_awaken;
sewardje663cb92002-04-12 10:26:32 +0000746 struct vki_timespec* req;
747 req = (struct vki_timespec*)vg_threads[tid].m_ebx; /* arg1 */
sewardj5f07b662002-04-23 16:52:51 +0000748 t_now = VG_(read_millisecond_timer)();
sewardje663cb92002-04-12 10:26:32 +0000749 t_awaken
750 = t_now
sewardj5f07b662002-04-23 16:52:51 +0000751 + (UInt)1000ULL * (UInt)(req->tv_sec)
752 + (UInt)(req->tv_nsec) / 1000000;
sewardje663cb92002-04-12 10:26:32 +0000753 vg_threads[tid].status = VgTs_Sleeping;
754 vg_threads[tid].awaken_at = t_awaken;
sewardj8937c812002-04-12 20:12:20 +0000755 if (VG_(clo_trace_sched)) {
sewardj5f07b662002-04-23 16:52:51 +0000756 VG_(sprintf)(msg_buf, "at %d: nanosleep for %d",
sewardje663cb92002-04-12 10:26:32 +0000757 t_now, t_awaken-t_now);
758 print_sched_event(tid, msg_buf);
759 }
760 /* Force the scheduler to run something else for a while. */
761 return;
762 }
763
764 switch (syscall_no) {
765 case __NR_read:
766 case __NR_write:
767 assumed_nonblocking
768 = False;
769 might_block
770 = fd_is_blockful(vg_threads[tid].m_ebx /* arg1 */);
771 break;
772 default:
773 might_block = False;
774 assumed_nonblocking = True;
775 }
776
777 if (assumed_nonblocking) {
778 /* We think it's non-blocking. Just do it in the normal way. */
779 VG_(perform_assumed_nonblocking_syscall)(tid);
780 /* The thread is still runnable. */
781 return;
782 }
783
784 /* It might block. Take evasive action. */
785 switch (syscall_no) {
786 case __NR_read:
787 case __NR_write:
788 fd = vg_threads[tid].m_ebx; break;
789 default:
790 vg_assert(3+3 == 7);
791 }
792
793 /* Set the fd to nonblocking, and do the syscall, which will return
794 immediately, in order to lodge a request with the Linux kernel.
795 We later poll for I/O completion using select(). */
796
797 orig_fd_blockness = fd_is_blockful(fd);
798 set_fd_nonblocking(fd);
799 vg_assert(!fd_is_blockful(fd));
800 VG_(check_known_blocking_syscall)(tid, syscall_no, NULL /* PRE */);
801
802 /* This trashes the thread's %eax; we have to preserve it. */
803 saved_eax = vg_threads[tid].m_eax;
804 KERNEL_DO_SYSCALL(tid,res);
805
806 /* Restore original blockfulness of the fd. */
807 if (orig_fd_blockness)
808 set_fd_blocking(fd);
809 else
810 set_fd_nonblocking(fd);
811
812 if (res != -VKI_EWOULDBLOCK) {
813 /* It didn't block; it went through immediately. So finish off
814 in the normal way. Don't restore %EAX, since that now
815 (correctly) holds the result of the call. */
816 VG_(check_known_blocking_syscall)(tid, syscall_no, &res /* POST */);
817 /* We're still runnable. */
818 vg_assert(vg_threads[tid].status == VgTs_Runnable);
819
820 } else {
821
822 /* It would have blocked. First, restore %EAX to what it was
823 before our speculative call. */
824 vg_threads[tid].m_eax = saved_eax;
825 /* Put this fd in a table of fds on which we are waiting for
826 completion. The arguments for select() later are constructed
827 from this table. */
828 add_waiting_fd(tid, fd, saved_eax /* which holds the syscall # */);
829 /* Deschedule thread until an I/O completion happens. */
830 vg_threads[tid].status = VgTs_WaitFD;
sewardj8937c812002-04-12 20:12:20 +0000831 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +0000832 VG_(sprintf)(msg_buf,"block until I/O ready on fd %d", fd);
833 print_sched_event(tid, msg_buf);
834 }
835
836 }
837}
838
839
840/* Find out which of the fds in vg_waiting_fds are now ready to go, by
841 making enquiries with select(), and mark them as ready. We have to
842 wait for the requesting threads to fall into the the WaitFD state
843 before we can actually finally deliver the results, so this
844 procedure doesn't do that; complete_blocked_syscalls() does it.
845
846 It might seem odd that a thread which has done a blocking syscall
847 is not in WaitFD state; the way this can happen is if it initially
848 becomes WaitFD, but then a signal is delivered to it, so it becomes
849 Runnable for a while. In this case we have to wait for the
850 sighandler to return, whereupon the WaitFD state is resumed, and
851 only at that point can the I/O result be delivered to it. However,
852 this point may be long after the fd is actually ready.
853
854 So, poll_for_ready_fds() merely detects fds which are ready.
855 complete_blocked_syscalls() does the second half of the trick,
856 possibly much later: it delivers the results from ready fds to
857 threads in WaitFD state.
858*/
sewardj9a199dc2002-04-14 13:01:38 +0000859static
sewardje663cb92002-04-12 10:26:32 +0000860void poll_for_ready_fds ( void )
861{
862 vki_ksigset_t saved_procmask;
863 vki_fd_set readfds;
864 vki_fd_set writefds;
865 vki_fd_set exceptfds;
866 struct vki_timeval timeout;
867 Int fd, fd_max, i, n_ready, syscall_no, n_ok;
868 ThreadId tid;
869 Bool rd_ok, wr_ok, ex_ok;
870 Char msg_buf[100];
871
sewardje462e202002-04-13 04:09:07 +0000872 struct vki_timespec* rem;
sewardj5f07b662002-04-23 16:52:51 +0000873 UInt t_now;
sewardje462e202002-04-13 04:09:07 +0000874
sewardje663cb92002-04-12 10:26:32 +0000875 /* Awaken any sleeping threads whose sleep has expired. */
sewardj6072c362002-04-19 14:40:57 +0000876 for (tid = 1; tid < VG_N_THREADS; tid++)
877 if (vg_threads[tid].status == VgTs_Sleeping)
878 break;
879
sewardj5f07b662002-04-23 16:52:51 +0000880 /* Avoid pointless calls to VG_(read_millisecond_timer). */
sewardj6072c362002-04-19 14:40:57 +0000881 if (tid < VG_N_THREADS) {
sewardj5f07b662002-04-23 16:52:51 +0000882 t_now = VG_(read_millisecond_timer)();
sewardj6072c362002-04-19 14:40:57 +0000883 for (tid = 1; tid < VG_N_THREADS; tid++) {
884 if (vg_threads[tid].status != VgTs_Sleeping)
885 continue;
886 if (t_now >= vg_threads[tid].awaken_at) {
887 /* Resume this thread. Set to zero the remaining-time
888 (second) arg of nanosleep, since it's used up all its
889 time. */
890 vg_assert(vg_threads[tid].m_eax == __NR_nanosleep);
891 rem = (struct vki_timespec *)vg_threads[tid].m_ecx; /* arg2 */
892 if (rem != NULL) {
893 rem->tv_sec = 0;
894 rem->tv_nsec = 0;
895 }
896 /* Make the syscall return 0 (success). */
897 vg_threads[tid].m_eax = 0;
898 /* Reschedule this thread. */
899 vg_threads[tid].status = VgTs_Runnable;
900 if (VG_(clo_trace_sched)) {
sewardj5f07b662002-04-23 16:52:51 +0000901 VG_(sprintf)(msg_buf, "at %d: nanosleep done",
sewardj6072c362002-04-19 14:40:57 +0000902 t_now);
903 print_sched_event(tid, msg_buf);
904 }
sewardje663cb92002-04-12 10:26:32 +0000905 }
906 }
907 }
sewardje663cb92002-04-12 10:26:32 +0000908
sewardje462e202002-04-13 04:09:07 +0000909 /* And look for threads waiting on file descriptors which are now
910 ready for I/O.*/
sewardje663cb92002-04-12 10:26:32 +0000911 timeout.tv_sec = 0;
912 timeout.tv_usec = 0;
913
914 VKI_FD_ZERO(&readfds);
915 VKI_FD_ZERO(&writefds);
916 VKI_FD_ZERO(&exceptfds);
917 fd_max = -1;
918 for (i = 0; i < VG_N_WAITING_FDS; i++) {
919 if (vg_waiting_fds[i].fd == -1 /* not in use */)
920 continue;
921 if (vg_waiting_fds[i].ready /* already ready? */)
922 continue;
923 fd = vg_waiting_fds[i].fd;
924 /* VG_(printf)("adding QUERY for fd %d\n", fd); */
sewardje462e202002-04-13 04:09:07 +0000925 vg_assert(fd >= 0);
sewardje663cb92002-04-12 10:26:32 +0000926 if (fd > fd_max)
927 fd_max = fd;
928 tid = vg_waiting_fds[i].tid;
sewardj6072c362002-04-19 14:40:57 +0000929 vg_assert(is_valid_tid(tid));
sewardje663cb92002-04-12 10:26:32 +0000930 syscall_no = vg_waiting_fds[i].syscall_no;
931 switch (syscall_no) {
932 case __NR_read:
933 VKI_FD_SET(fd, &readfds); break;
934 case __NR_write:
935 VKI_FD_SET(fd, &writefds); break;
936 default:
937 VG_(panic)("poll_for_ready_fds: unexpected syscall");
938 /*NOTREACHED*/
939 break;
940 }
941 }
942
sewardje462e202002-04-13 04:09:07 +0000943 /* Short cut: if no fds are waiting, give up now. */
944 if (fd_max == -1)
945 return;
946
sewardje663cb92002-04-12 10:26:32 +0000947 /* BLOCK ALL SIGNALS. We don't want the complication of select()
948 getting interrupted. */
949 VG_(block_all_host_signals)( &saved_procmask );
950
951 n_ready = VG_(select)
952 ( fd_max+1, &readfds, &writefds, &exceptfds, &timeout);
953 if (VG_(is_kerror)(n_ready)) {
954 VG_(printf)("poll_for_ready_fds: select returned %d\n", n_ready);
955 VG_(panic)("poll_for_ready_fds: select failed?!");
956 /*NOTREACHED*/
957 }
958
959 /* UNBLOCK ALL SIGNALS */
960 VG_(restore_host_signals)( &saved_procmask );
961
962 /* VG_(printf)("poll_for_io_completions: %d fs ready\n", n_ready); */
963
964 if (n_ready == 0)
965 return;
966
967 /* Inspect all the fds we know about, and handle any completions that
968 have happened. */
969 /*
970 VG_(printf)("\n\n");
971 for (fd = 0; fd < 100; fd++)
972 if (VKI_FD_ISSET(fd, &writefds) || VKI_FD_ISSET(fd, &readfds)) {
973 VG_(printf)("X"); } else { VG_(printf)("."); };
974 VG_(printf)("\n\nfd_max = %d\n", fd_max);
975 */
976
977 for (fd = 0; fd <= fd_max; fd++) {
978 rd_ok = VKI_FD_ISSET(fd, &readfds);
979 wr_ok = VKI_FD_ISSET(fd, &writefds);
980 ex_ok = VKI_FD_ISSET(fd, &exceptfds);
981
982 n_ok = (rd_ok ? 1 : 0) + (wr_ok ? 1 : 0) + (ex_ok ? 1 : 0);
983 if (n_ok == 0)
984 continue;
985 if (n_ok > 1) {
986 VG_(printf)("offending fd = %d\n", fd);
987 VG_(panic)("poll_for_ready_fds: multiple events on fd");
988 }
989
990 /* An I/O event completed for fd. Find the thread which
991 requested this. */
992 for (i = 0; i < VG_N_WAITING_FDS; i++) {
993 if (vg_waiting_fds[i].fd == -1 /* not in use */)
994 continue;
995 if (vg_waiting_fds[i].fd == fd)
996 break;
997 }
998
999 /* And a bit more paranoia ... */
1000 vg_assert(i >= 0 && i < VG_N_WAITING_FDS);
1001
1002 /* Mark the fd as ready. */
1003 vg_assert(! vg_waiting_fds[i].ready);
1004 vg_waiting_fds[i].ready = True;
1005 }
1006}
1007
1008
1009/* See comment attached to poll_for_ready_fds() for explaination. */
sewardj9a199dc2002-04-14 13:01:38 +00001010static
sewardje663cb92002-04-12 10:26:32 +00001011void complete_blocked_syscalls ( void )
1012{
1013 Int fd, i, res, syscall_no;
1014 ThreadId tid;
1015 Char msg_buf[100];
1016
1017 /* Inspect all the outstanding fds we know about. */
1018
1019 for (i = 0; i < VG_N_WAITING_FDS; i++) {
1020 if (vg_waiting_fds[i].fd == -1 /* not in use */)
1021 continue;
1022 if (! vg_waiting_fds[i].ready)
1023 continue;
1024
1025 fd = vg_waiting_fds[i].fd;
1026 tid = vg_waiting_fds[i].tid;
sewardj6072c362002-04-19 14:40:57 +00001027 vg_assert(is_valid_tid(tid));
sewardje663cb92002-04-12 10:26:32 +00001028
1029 /* The thread actually has to be waiting for the I/O event it
1030 requested before we can deliver the result! */
1031 if (vg_threads[tid].status != VgTs_WaitFD)
1032 continue;
1033
1034 /* Ok, actually do it! We can safely use %EAX as the syscall
1035 number, because the speculative call made by
1036 sched_do_syscall() doesn't change %EAX in the case where the
1037 call would have blocked. */
1038
1039 syscall_no = vg_waiting_fds[i].syscall_no;
1040 vg_assert(syscall_no == vg_threads[tid].m_eax);
1041 KERNEL_DO_SYSCALL(tid,res);
1042 VG_(check_known_blocking_syscall)(tid, syscall_no, &res /* POST */);
1043
1044 /* Reschedule. */
1045 vg_threads[tid].status = VgTs_Runnable;
1046 /* Mark slot as no longer in use. */
1047 vg_waiting_fds[i].fd = -1;
1048 /* pp_sched_status(); */
sewardj8937c812002-04-12 20:12:20 +00001049 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +00001050 VG_(sprintf)(msg_buf,"resume due to I/O completion on fd %d", fd);
1051 print_sched_event(tid, msg_buf);
1052 }
1053 }
1054}
1055
1056
1057static
sewardj5f07b662002-04-23 16:52:51 +00001058void check_for_pthread_cond_timedwait ( void )
1059{
1060 Int i;
1061 for (i = 1; i < VG_N_THREADS; i++) {
1062 if (vg_threads[i].status != VgTs_WaitCV)
1063 continue;
1064 if (vg_threads[i].awaken_at == 0xFFFFFFFF /* no timeout */)
1065 continue;
1066 if (VG_(read_millisecond_timer)() >= vg_threads[i].awaken_at)
1067 do_pthread_cond_timedwait_TIMEOUT(i);
1068 }
1069}
1070
1071
1072static
sewardje663cb92002-04-12 10:26:32 +00001073void nanosleep_for_a_while ( void )
1074{
1075 Int res;
1076 struct vki_timespec req;
1077 struct vki_timespec rem;
1078 req.tv_sec = 0;
sewardj5f07b662002-04-23 16:52:51 +00001079 req.tv_nsec = 50 * 1000 * 1000;
sewardje663cb92002-04-12 10:26:32 +00001080 res = VG_(nanosleep)( &req, &rem );
sewardj5f07b662002-04-23 16:52:51 +00001081 vg_assert(res == 0 /* ok */ || res == 1 /* interrupted by signal */);
sewardje663cb92002-04-12 10:26:32 +00001082}
1083
1084
1085/* ---------------------------------------------------------------------
1086 The scheduler proper.
1087 ------------------------------------------------------------------ */
1088
1089/* Run user-space threads until either
1090 * Deadlock occurs
1091 * One thread asks to shutdown Valgrind
1092 * The specified number of basic blocks has gone by.
1093*/
1094VgSchedReturnCode VG_(scheduler) ( void )
1095{
1096 ThreadId tid, tid_next;
1097 UInt trc;
1098 UInt dispatch_ctr_SAVED;
sewardj54cacf02002-04-12 23:24:59 +00001099 Int request_code, done_this_time, n_in_fdwait_or_sleep;
sewardje663cb92002-04-12 10:26:32 +00001100 Char msg_buf[100];
1101 Addr trans_addr;
sewardj14e03422002-04-24 19:51:31 +00001102 Bool sigs_delivered;
sewardje663cb92002-04-12 10:26:32 +00001103
1104 /* For the LRU structures, records when the epoch began. */
1105 ULong lru_epoch_started_at = 0;
1106
1107 /* Start with the root thread. tid in general indicates the
1108 currently runnable/just-finished-running thread. */
sewardj6072c362002-04-19 14:40:57 +00001109 tid = 1;
sewardje663cb92002-04-12 10:26:32 +00001110
1111 /* This is the top level scheduler loop. It falls into three
1112 phases. */
1113 while (True) {
1114
sewardj6072c362002-04-19 14:40:57 +00001115 /* ======================= Phase 0 of 3 =======================
1116 Be paranoid. Always a good idea. */
sewardjd7fd4d22002-04-24 01:57:27 +00001117 stage1:
sewardj6072c362002-04-19 14:40:57 +00001118 scheduler_sanity();
1119
sewardje663cb92002-04-12 10:26:32 +00001120 /* ======================= Phase 1 of 3 =======================
1121 Handle I/O completions and signals. This may change the
1122 status of various threads. Then select a new thread to run,
1123 or declare deadlock, or sleep if there are no runnable
1124 threads but some are blocked on I/O. */
1125
1126 /* Age the LRU structures if an epoch has been completed. */
1127 if (VG_(bbs_done) - lru_epoch_started_at >= VG_BBS_PER_EPOCH) {
1128 lru_epoch_started_at = VG_(bbs_done);
1129 increment_epoch();
1130 }
1131
1132 /* Was a debug-stop requested? */
1133 if (VG_(bbs_to_go) == 0)
1134 goto debug_stop;
1135
1136 /* Do the following loop until a runnable thread is found, or
1137 deadlock is detected. */
1138 while (True) {
1139
1140 /* For stats purposes only. */
1141 VG_(num_scheduling_events_MAJOR) ++;
1142
1143 /* See if any I/O operations which we were waiting for have
1144 completed, and, if so, make runnable the relevant waiting
1145 threads. */
1146 poll_for_ready_fds();
1147 complete_blocked_syscalls();
sewardj5f07b662002-04-23 16:52:51 +00001148 check_for_pthread_cond_timedwait();
sewardje663cb92002-04-12 10:26:32 +00001149
1150 /* See if there are any signals which need to be delivered. If
1151 so, choose thread(s) to deliver them to, and build signal
1152 delivery frames on those thread(s) stacks. */
sewardj6072c362002-04-19 14:40:57 +00001153
1154 /* Be careful about delivering signals to a thread waiting
1155 for a mutex. In particular, when the handler is running,
1156 that thread is temporarily apparently-not-waiting for the
1157 mutex, so if it is unlocked by another thread whilst the
1158 handler is running, this thread is not informed. When the
1159 handler returns, the thread resumes waiting on the mutex,
1160 even if, as a result, it has missed the unlocking of it.
1161 Potential deadlock. This sounds all very strange, but the
1162 POSIX standard appears to require this behaviour. */
sewardj14e03422002-04-24 19:51:31 +00001163 sigs_delivered = VG_(deliver_signals)( 1 /*HACK*/ );
1164 if (sigs_delivered)
1165 VG_(do_sanity_checks)( 1 /*HACK*/, False );
sewardje663cb92002-04-12 10:26:32 +00001166
1167 /* Try and find a thread (tid) to run. */
1168 tid_next = tid;
sewardj54cacf02002-04-12 23:24:59 +00001169 n_in_fdwait_or_sleep = 0;
sewardje663cb92002-04-12 10:26:32 +00001170 while (True) {
1171 tid_next++;
sewardj6072c362002-04-19 14:40:57 +00001172 if (tid_next >= VG_N_THREADS) tid_next = 1;
sewardj54cacf02002-04-12 23:24:59 +00001173 if (vg_threads[tid_next].status == VgTs_WaitFD
1174 || vg_threads[tid_next].status == VgTs_Sleeping)
1175 n_in_fdwait_or_sleep ++;
sewardje663cb92002-04-12 10:26:32 +00001176 if (vg_threads[tid_next].status == VgTs_Runnable)
1177 break; /* We can run this one. */
1178 if (tid_next == tid)
1179 break; /* been all the way round */
1180 }
1181 tid = tid_next;
1182
1183 if (vg_threads[tid].status == VgTs_Runnable) {
1184 /* Found a suitable candidate. Fall out of this loop, so
1185 we can advance to stage 2 of the scheduler: actually
1186 running the thread. */
1187 break;
1188 }
1189
1190 /* We didn't find a runnable thread. Now what? */
sewardj54cacf02002-04-12 23:24:59 +00001191 if (n_in_fdwait_or_sleep == 0) {
1192 /* No runnable threads and no prospect of any appearing
1193 even if we wait for an arbitrary length of time. In
1194 short, we have a deadlock. */
sewardj15a43e12002-04-17 19:35:12 +00001195 VG_(pp_sched_status)();
sewardje663cb92002-04-12 10:26:32 +00001196 return VgSrc_Deadlock;
1197 }
1198
1199 /* At least one thread is in a fd-wait state. Delay for a
1200 while, and go round again, in the hope that eventually a
1201 thread becomes runnable. */
1202 nanosleep_for_a_while();
1203 // pp_sched_status();
1204 // VG_(printf)(".\n");
1205 }
1206
1207
1208 /* ======================= Phase 2 of 3 =======================
1209 Wahey! We've finally decided that thread tid is runnable, so
1210 we now do that. Run it for as much of a quanta as possible.
1211 Trivial requests are handled and the thread continues. The
1212 aim is not to do too many of Phase 1 since it is expensive. */
1213
1214 if (0)
sewardj3b5d8862002-04-20 13:53:23 +00001215 VG_(printf)("SCHED: tid %d\n", tid);
sewardje663cb92002-04-12 10:26:32 +00001216
1217 /* Figure out how many bbs to ask vg_run_innerloop to do. Note
1218 that it decrements the counter before testing it for zero, so
1219 that if VG_(dispatch_ctr) is set to N you get at most N-1
1220 iterations. Also this means that VG_(dispatch_ctr) must
1221 exceed zero before entering the innerloop. Also also, the
1222 decrement is done before the bb is actually run, so you
1223 always get at least one decrement even if nothing happens.
1224 */
1225 if (VG_(bbs_to_go) >= VG_SCHEDULING_QUANTUM)
1226 VG_(dispatch_ctr) = VG_SCHEDULING_QUANTUM + 1;
1227 else
1228 VG_(dispatch_ctr) = (UInt)VG_(bbs_to_go) + 1;
1229
1230 /* ... and remember what we asked for. */
1231 dispatch_ctr_SAVED = VG_(dispatch_ctr);
1232
sewardj1e8cdc92002-04-18 11:37:52 +00001233 /* paranoia ... */
1234 vg_assert(vg_threads[tid].tid == tid);
1235
sewardje663cb92002-04-12 10:26:32 +00001236 /* Actually run thread tid. */
1237 while (True) {
1238
1239 /* For stats purposes only. */
1240 VG_(num_scheduling_events_MINOR) ++;
1241
1242 if (0)
1243 VG_(message)(Vg_DebugMsg, "thread %d: running for %d bbs",
1244 tid, VG_(dispatch_ctr) - 1 );
1245
1246 trc = run_thread_for_a_while ( tid );
1247
1248 /* Deal quickly with trivial scheduling events, and resume the
1249 thread. */
1250
1251 if (trc == VG_TRC_INNER_FASTMISS) {
1252 vg_assert(VG_(dispatch_ctr) > 0);
1253
1254 /* Trivial event. Miss in the fast-cache. Do a full
1255 lookup for it. */
1256 trans_addr
1257 = VG_(search_transtab) ( vg_threads[tid].m_eip );
1258 if (trans_addr == (Addr)0) {
1259 /* Not found; we need to request a translation. */
sewardj1e8cdc92002-04-18 11:37:52 +00001260 create_translation_for( tid, vg_threads[tid].m_eip );
sewardje663cb92002-04-12 10:26:32 +00001261 trans_addr = VG_(search_transtab) ( vg_threads[tid].m_eip );
1262 if (trans_addr == (Addr)0)
1263 VG_(panic)("VG_TRC_INNER_FASTMISS: missing tt_fast entry");
1264 }
1265 continue; /* with this thread */
1266 }
1267
1268 if (trc == VG_TRC_EBP_JMP_CLIENTREQ) {
sewardjd7fd4d22002-04-24 01:57:27 +00001269 Bool done = maybe_do_trivial_clientreq(tid);
1270 if (done) {
1271 /* The request is done. We try and continue with the
1272 same thread if still runnable. If not, go back to
1273 Stage 1 to select a new thread to run. */
1274 if (vg_threads[tid].status == VgTs_Runnable)
1275 continue; /* with this thread */
1276 else
1277 goto stage1;
sewardje663cb92002-04-12 10:26:32 +00001278 }
1279 }
1280
sewardjd7fd4d22002-04-24 01:57:27 +00001281 /* It's an event we can't quickly deal with. Give up running
1282 this thread and handle things the expensive way. */
sewardje663cb92002-04-12 10:26:32 +00001283 break;
1284 }
1285
1286 /* ======================= Phase 3 of 3 =======================
1287 Handle non-trivial thread requests, mostly pthread stuff. */
1288
1289 /* Ok, we've fallen out of the dispatcher for a
1290 non-completely-trivial reason. First, update basic-block
1291 counters. */
1292
1293 done_this_time = (Int)dispatch_ctr_SAVED - (Int)VG_(dispatch_ctr) - 1;
1294 vg_assert(done_this_time >= 0);
1295 VG_(bbs_to_go) -= (ULong)done_this_time;
1296 VG_(bbs_done) += (ULong)done_this_time;
1297
1298 if (0 && trc != VG_TRC_INNER_FASTMISS)
1299 VG_(message)(Vg_DebugMsg, "thread %d: completed %d bbs, trc %d",
1300 tid, done_this_time, (Int)trc );
1301
1302 if (0 && trc != VG_TRC_INNER_FASTMISS)
1303 VG_(message)(Vg_DebugMsg, "thread %d: %ld bbs, event %s",
1304 tid, VG_(bbs_done),
1305 name_of_sched_event(trc) );
sewardj9d1b5d32002-04-17 19:40:49 +00001306
sewardje663cb92002-04-12 10:26:32 +00001307 /* Examine the thread's return code to figure out why it
1308 stopped, and handle requests. */
1309
1310 switch (trc) {
1311
1312 case VG_TRC_INNER_FASTMISS:
1313 VG_(panic)("VG_(scheduler): VG_TRC_INNER_FASTMISS");
1314 /*NOTREACHED*/
1315 break;
1316
1317 case VG_TRC_INNER_COUNTERZERO:
1318 /* Timeslice is out. Let a new thread be scheduled,
1319 simply by doing nothing, causing us to arrive back at
1320 Phase 1. */
1321 if (VG_(bbs_to_go) == 0) {
1322 goto debug_stop;
1323 }
1324 vg_assert(VG_(dispatch_ctr) == 0);
1325 break;
1326
1327 case VG_TRC_UNRESUMABLE_SIGNAL:
1328 /* It got a SIGSEGV/SIGBUS, which we need to deliver right
1329 away. Again, do nothing, so we wind up back at Phase
1330 1, whereupon the signal will be "delivered". */
1331 break;
1332
sewardje663cb92002-04-12 10:26:32 +00001333 case VG_TRC_EBP_JMP_SYSCALL:
1334 /* Do a syscall for the vthread tid. This could cause it
1335 to become non-runnable. */
1336 sched_do_syscall(tid);
1337 break;
1338
1339 case VG_TRC_EBP_JMP_CLIENTREQ:
1340 /* Do a client request for the vthread tid. Note that
1341 some requests will have been handled by
1342 maybe_do_trivial_clientreq(), so we don't expect to see
1343 those here.
1344 */
sewardj54cacf02002-04-12 23:24:59 +00001345 /* The thread's %EAX points at an arg block, the first
1346 word of which is the request code. */
1347 request_code = ((UInt*)(vg_threads[tid].m_eax))[0];
sewardje663cb92002-04-12 10:26:32 +00001348 if (0) {
sewardj54cacf02002-04-12 23:24:59 +00001349 VG_(sprintf)(msg_buf, "request 0x%x", request_code );
sewardje663cb92002-04-12 10:26:32 +00001350 print_sched_event(tid, msg_buf);
1351 }
1352 /* Do a non-trivial client request for thread tid. tid's
1353 %EAX points to a short vector of argument words, the
1354 first of which is the request code. The result of the
1355 request is put in tid's %EDX. Alternatively, perhaps
1356 the request causes tid to become non-runnable and/or
1357 other blocked threads become runnable. In general we
1358 can and often do mess with the state of arbitrary
1359 threads at this point. */
sewardj54cacf02002-04-12 23:24:59 +00001360 if (request_code == VG_USERREQ__SHUTDOWN_VALGRIND) {
1361 return VgSrc_Shutdown;
1362 } else {
1363 do_nontrivial_clientreq(tid);
1364 }
sewardje663cb92002-04-12 10:26:32 +00001365 break;
1366
1367 default:
1368 VG_(printf)("\ntrc = %d\n", trc);
1369 VG_(panic)("VG_(scheduler), phase 3: "
1370 "unexpected thread return code");
1371 /* NOTREACHED */
1372 break;
1373
1374 } /* switch (trc) */
1375
1376 /* That completes Phase 3 of 3. Return now to the top of the
1377 main scheduler loop, to Phase 1 of 3. */
1378
1379 } /* top-level scheduler loop */
1380
1381
1382 /* NOTREACHED */
1383 VG_(panic)("scheduler: post-main-loop ?!");
1384 /* NOTREACHED */
1385
1386 debug_stop:
1387 /* If we exited because of a debug stop, print the translation
1388 of the last block executed -- by translating it again, and
1389 throwing away the result. */
1390 VG_(printf)(
1391 "======vvvvvvvv====== LAST TRANSLATION ======vvvvvvvv======\n");
sewardj1e8cdc92002-04-18 11:37:52 +00001392 VG_(translate)( &vg_threads[tid], vg_threads[tid].m_eip, NULL, NULL, NULL );
sewardje663cb92002-04-12 10:26:32 +00001393 VG_(printf)("\n");
1394 VG_(printf)(
1395 "======^^^^^^^^====== LAST TRANSLATION ======^^^^^^^^======\n");
1396
1397 return VgSrc_BbsDone;
1398}
1399
1400
1401/* ---------------------------------------------------------------------
1402 The pthread implementation.
1403 ------------------------------------------------------------------ */
1404
1405#include <pthread.h>
1406#include <errno.h>
1407
1408#if !defined(PTHREAD_STACK_MIN)
1409# define PTHREAD_STACK_MIN (16384 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB)
1410#endif
1411
1412/* /usr/include/bits/pthreadtypes.h:
1413 typedef unsigned long int pthread_t;
1414*/
1415
sewardje663cb92002-04-12 10:26:32 +00001416
sewardj604ec3c2002-04-18 22:38:41 +00001417/* -----------------------------------------------------------
1418 Thread CREATION, JOINAGE and CANCELLATION.
1419 -------------------------------------------------------- */
1420
sewardje663cb92002-04-12 10:26:32 +00001421static
1422void do_pthread_cancel ( ThreadId tid_canceller,
1423 pthread_t tid_cancellee )
1424{
1425 Char msg_buf[100];
1426 /* We want make is appear that this thread has returned to
1427 do_pthread_create_bogusRA with PTHREAD_CANCELED as the
1428 return value. So: simple: put PTHREAD_CANCELED into %EAX
1429 and &do_pthread_create_bogusRA into %EIP and keep going! */
sewardj8937c812002-04-12 20:12:20 +00001430 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +00001431 VG_(sprintf)(msg_buf, "cancelled by %d", tid_canceller);
1432 print_sched_event(tid_cancellee, msg_buf);
1433 }
1434 vg_threads[tid_cancellee].m_eax = (UInt)PTHREAD_CANCELED;
sewardjbc5b99f2002-04-13 00:08:51 +00001435 vg_threads[tid_cancellee].m_eip = (UInt)&VG_(pthreadreturn_bogusRA);
sewardje663cb92002-04-12 10:26:32 +00001436 vg_threads[tid_cancellee].status = VgTs_Runnable;
1437}
1438
1439
sewardj3b5d8862002-04-20 13:53:23 +00001440static
1441void do_pthread_exit ( ThreadId tid, void* retval )
1442{
1443 Char msg_buf[100];
1444 /* We want make is appear that this thread has returned to
1445 do_pthread_create_bogusRA with retval as the
1446 return value. So: simple: put retval into %EAX
1447 and &do_pthread_create_bogusRA into %EIP and keep going! */
1448 if (VG_(clo_trace_sched)) {
1449 VG_(sprintf)(msg_buf, "exiting with %p", retval);
1450 print_sched_event(tid, msg_buf);
1451 }
1452 vg_threads[tid].m_eax = (UInt)retval;
1453 vg_threads[tid].m_eip = (UInt)&VG_(pthreadreturn_bogusRA);
1454 vg_threads[tid].status = VgTs_Runnable;
1455}
1456
sewardje663cb92002-04-12 10:26:32 +00001457
1458/* Thread tid is exiting, by returning from the function it was
sewardjbc5b99f2002-04-13 00:08:51 +00001459 created with. Or possibly due to pthread_exit or cancellation.
1460 The main complication here is to resume any thread waiting to join
1461 with this one. */
sewardje663cb92002-04-12 10:26:32 +00001462static
sewardjbc5b99f2002-04-13 00:08:51 +00001463void handle_pthread_return ( ThreadId tid, void* retval )
sewardje663cb92002-04-12 10:26:32 +00001464{
1465 ThreadId jnr; /* joiner, the thread calling pthread_join. */
1466 UInt* jnr_args;
1467 void** jnr_thread_return;
1468 Char msg_buf[100];
1469
1470 /* Mark it as not in use. Leave the stack in place so the next
1471 user of this slot doesn't reallocate it. */
sewardj6072c362002-04-19 14:40:57 +00001472 vg_assert(is_valid_tid(tid));
sewardje663cb92002-04-12 10:26:32 +00001473 vg_assert(vg_threads[tid].status != VgTs_Empty);
1474
sewardjbc5b99f2002-04-13 00:08:51 +00001475 vg_threads[tid].retval = retval;
sewardje663cb92002-04-12 10:26:32 +00001476
1477 if (vg_threads[tid].joiner == VG_INVALID_THREADID) {
1478 /* No one has yet done a join on me */
1479 vg_threads[tid].status = VgTs_WaitJoiner;
sewardj8937c812002-04-12 20:12:20 +00001480 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +00001481 VG_(sprintf)(msg_buf,
1482 "root fn returns, waiting for a call pthread_join(%d)",
1483 tid);
1484 print_sched_event(tid, msg_buf);
1485 }
1486 } else {
1487 /* Some is waiting; make their join call return with success,
1488 putting my exit code in the place specified by the caller's
1489 thread_return param. This is all very horrible, since we
1490 need to consult the joiner's arg block -- pointed to by its
1491 %EAX -- in order to extract the 2nd param of its pthread_join
1492 call. TODO: free properly the slot (also below).
1493 */
1494 jnr = vg_threads[tid].joiner;
sewardj6072c362002-04-19 14:40:57 +00001495 vg_assert(is_valid_tid(jnr));
sewardje663cb92002-04-12 10:26:32 +00001496 vg_assert(vg_threads[jnr].status == VgTs_WaitJoinee);
1497 jnr_args = (UInt*)vg_threads[jnr].m_eax;
1498 jnr_thread_return = (void**)(jnr_args[2]);
1499 if (jnr_thread_return != NULL)
1500 *jnr_thread_return = vg_threads[tid].retval;
1501 vg_threads[jnr].m_edx = 0; /* success */
1502 vg_threads[jnr].status = VgTs_Runnable;
1503 vg_threads[tid].status = VgTs_Empty; /* bye! */
sewardj75fe1892002-04-14 02:46:33 +00001504 if (VG_(clo_instrument) && tid != 0)
1505 VGM_(make_noaccess)( vg_threads[tid].stack_base,
1506 vg_threads[tid].stack_size );
sewardj8937c812002-04-12 20:12:20 +00001507 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +00001508 VG_(sprintf)(msg_buf,
1509 "root fn returns, to find a waiting pthread_join(%d)", tid);
1510 print_sched_event(tid, msg_buf);
1511 VG_(sprintf)(msg_buf,
1512 "my pthread_join(%d) returned; resuming", tid);
1513 print_sched_event(jnr, msg_buf);
1514 }
1515 }
1516
1517 /* Return value is irrelevant; this thread will not get
1518 rescheduled. */
1519}
1520
1521
1522static
1523void do_pthread_join ( ThreadId tid, ThreadId jee, void** thread_return )
1524{
1525 Char msg_buf[100];
1526
1527 /* jee, the joinee, is the thread specified as an arg in thread
1528 tid's call to pthread_join. So tid is the join-er. */
sewardj6072c362002-04-19 14:40:57 +00001529 vg_assert(is_valid_tid(tid));
sewardje663cb92002-04-12 10:26:32 +00001530 vg_assert(vg_threads[tid].status == VgTs_Runnable);
1531
1532 if (jee == tid) {
1533 vg_threads[tid].m_edx = EDEADLK; /* libc constant, not a kernel one */
1534 vg_threads[tid].status = VgTs_Runnable;
1535 return;
1536 }
1537
1538 if (jee < 0
1539 || jee >= VG_N_THREADS
1540 || vg_threads[jee].status == VgTs_Empty) {
1541 /* Invalid thread to join to. */
1542 vg_threads[tid].m_edx = EINVAL;
1543 vg_threads[tid].status = VgTs_Runnable;
1544 return;
1545 }
1546
1547 if (vg_threads[jee].joiner != VG_INVALID_THREADID) {
1548 /* Someone already did join on this thread */
1549 vg_threads[tid].m_edx = EINVAL;
1550 vg_threads[tid].status = VgTs_Runnable;
1551 return;
1552 }
1553
1554 /* if (vg_threads[jee].detached) ... */
1555
1556 /* Perhaps the joinee has already finished? If so return
1557 immediately with its return code, and free up the slot. TODO:
1558 free it properly (also above). */
1559 if (vg_threads[jee].status == VgTs_WaitJoiner) {
1560 vg_assert(vg_threads[jee].joiner == VG_INVALID_THREADID);
1561 vg_threads[tid].m_edx = 0; /* success */
1562 if (thread_return != NULL)
1563 *thread_return = vg_threads[jee].retval;
1564 vg_threads[tid].status = VgTs_Runnable;
1565 vg_threads[jee].status = VgTs_Empty; /* bye! */
sewardj75fe1892002-04-14 02:46:33 +00001566 if (VG_(clo_instrument) && jee != 0)
1567 VGM_(make_noaccess)( vg_threads[jee].stack_base,
1568 vg_threads[jee].stack_size );
sewardj8937c812002-04-12 20:12:20 +00001569 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +00001570 VG_(sprintf)(msg_buf,
1571 "someone called pthread_join() on me; bye!");
1572 print_sched_event(jee, msg_buf);
1573 VG_(sprintf)(msg_buf,
1574 "my pthread_join(%d) returned immediately",
1575 jee );
1576 print_sched_event(tid, msg_buf);
1577 }
1578 return;
1579 }
1580
1581 /* Ok, so we'll have to wait on jee. */
1582 vg_threads[jee].joiner = tid;
1583 vg_threads[tid].status = VgTs_WaitJoinee;
sewardj8937c812002-04-12 20:12:20 +00001584 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +00001585 VG_(sprintf)(msg_buf,
1586 "blocking on call of pthread_join(%d)", jee );
1587 print_sched_event(tid, msg_buf);
1588 }
1589 /* So tid's join call does not return just now. */
1590}
1591
1592
1593static
1594void do_pthread_create ( ThreadId parent_tid,
1595 pthread_t* thread,
1596 pthread_attr_t* attr,
1597 void* (*start_routine)(void *),
1598 void* arg )
1599{
sewardj5f07b662002-04-23 16:52:51 +00001600 Int i;
sewardje663cb92002-04-12 10:26:32 +00001601 Addr new_stack;
1602 UInt new_stk_szb;
1603 ThreadId tid;
1604 Char msg_buf[100];
1605
1606 /* Paranoia ... */
1607 vg_assert(sizeof(pthread_t) == sizeof(UInt));
1608
1609 vg_assert(vg_threads[parent_tid].status != VgTs_Empty);
1610
sewardj1e8cdc92002-04-18 11:37:52 +00001611 tid = vg_alloc_ThreadState();
sewardje663cb92002-04-12 10:26:32 +00001612
1613 /* If we've created the main thread's tid, we're in deep trouble :) */
sewardj6072c362002-04-19 14:40:57 +00001614 vg_assert(tid != 1);
1615 vg_assert(is_valid_tid(tid));
sewardje663cb92002-04-12 10:26:32 +00001616
1617 /* Copy the parent's CPU state into the child's, in a roundabout
1618 way (via baseBlock). */
1619 VG_(load_thread_state)(parent_tid);
1620 VG_(save_thread_state)(tid);
1621
1622 /* Consider allocating the child a stack, if the one it already has
1623 is inadequate. */
1624 new_stk_szb = PTHREAD_STACK_MIN;
1625
1626 if (new_stk_szb > vg_threads[tid].stack_size) {
1627 /* Again, for good measure :) We definitely don't want to be
1628 allocating a stack for the main thread. */
sewardj6072c362002-04-19 14:40:57 +00001629 vg_assert(tid != 1);
sewardje663cb92002-04-12 10:26:32 +00001630 /* for now, we don't handle the case of anything other than
1631 assigning it for the first time. */
1632 vg_assert(vg_threads[tid].stack_size == 0);
1633 vg_assert(vg_threads[tid].stack_base == (Addr)NULL);
1634 new_stack = (Addr)VG_(get_memory_from_mmap)( new_stk_szb );
1635 vg_threads[tid].stack_base = new_stack;
1636 vg_threads[tid].stack_size = new_stk_szb;
sewardj1e8cdc92002-04-18 11:37:52 +00001637 vg_threads[tid].stack_highest_word
sewardje663cb92002-04-12 10:26:32 +00001638 = new_stack + new_stk_szb
sewardj1e8cdc92002-04-18 11:37:52 +00001639 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB; /* -4 ??? */;
sewardje663cb92002-04-12 10:26:32 +00001640 }
sewardj1e8cdc92002-04-18 11:37:52 +00001641
1642 vg_threads[tid].m_esp
1643 = vg_threads[tid].stack_base
1644 + vg_threads[tid].stack_size
1645 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB;
1646
sewardje663cb92002-04-12 10:26:32 +00001647 if (VG_(clo_instrument))
1648 VGM_(make_noaccess)( vg_threads[tid].m_esp,
1649 VG_AR_CLIENT_STACKBASE_REDZONE_SZB );
1650
1651 /* push arg */
1652 vg_threads[tid].m_esp -= 4;
1653 * (UInt*)(vg_threads[tid].m_esp) = (UInt)arg;
1654
1655 /* push (magical) return address */
1656 vg_threads[tid].m_esp -= 4;
sewardjbc5b99f2002-04-13 00:08:51 +00001657 * (UInt*)(vg_threads[tid].m_esp) = (UInt)VG_(pthreadreturn_bogusRA);
sewardje663cb92002-04-12 10:26:32 +00001658
1659 if (VG_(clo_instrument))
1660 VGM_(make_readable)( vg_threads[tid].m_esp, 2 * 4 );
1661
1662 /* this is where we start */
1663 vg_threads[tid].m_eip = (UInt)start_routine;
1664
sewardj8937c812002-04-12 20:12:20 +00001665 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +00001666 VG_(sprintf)(msg_buf,
1667 "new thread, created by %d", parent_tid );
1668 print_sched_event(tid, msg_buf);
1669 }
1670
1671 /* store the thread id in *thread. */
1672 // if (VG_(clo_instrument))
1673 // ***** CHECK *thread is writable
1674 *thread = (pthread_t)tid;
1675
sewardj3b5d8862002-04-20 13:53:23 +00001676 vg_threads[tid].associated_mx = NULL;
1677 vg_threads[tid].associated_cv = NULL;
1678 vg_threads[tid].joiner = VG_INVALID_THREADID;
1679 vg_threads[tid].status = VgTs_Runnable;
sewardj604ec3c2002-04-18 22:38:41 +00001680
sewardj5f07b662002-04-23 16:52:51 +00001681 for (i = 0; i < VG_N_THREAD_KEYS; i++)
1682 vg_threads[tid].specifics[i] = NULL;
1683
sewardj604ec3c2002-04-18 22:38:41 +00001684 /* return zero */
sewardje663cb92002-04-12 10:26:32 +00001685 vg_threads[tid].m_edx = 0; /* success */
1686}
1687
1688
sewardj604ec3c2002-04-18 22:38:41 +00001689/* -----------------------------------------------------------
1690 MUTEXes
1691 -------------------------------------------------------- */
1692
sewardj604ec3c2002-04-18 22:38:41 +00001693/* pthread_mutex_t is a struct with at 5 words:
sewardje663cb92002-04-12 10:26:32 +00001694 typedef struct
1695 {
1696 int __m_reserved; -- Reserved for future use
1697 int __m_count; -- Depth of recursive locking
1698 _pthread_descr __m_owner; -- Owner thread (if recursive or errcheck)
1699 int __m_kind; -- Mutex kind: fast, recursive or errcheck
1700 struct _pthread_fastlock __m_lock; -- Underlying fast lock
1701 } pthread_mutex_t;
sewardj604ec3c2002-04-18 22:38:41 +00001702
sewardj6072c362002-04-19 14:40:57 +00001703 #define PTHREAD_MUTEX_INITIALIZER \
1704 {0, 0, 0, PTHREAD_MUTEX_TIMED_NP, __LOCK_INITIALIZER}
1705 # define PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP \
1706 {0, 0, 0, PTHREAD_MUTEX_RECURSIVE_NP, __LOCK_INITIALIZER}
1707 # define PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP \
1708 {0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, __LOCK_INITIALIZER}
1709 # define PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP \
1710 {0, 0, 0, PTHREAD_MUTEX_ADAPTIVE_NP, __LOCK_INITIALIZER}
sewardj604ec3c2002-04-18 22:38:41 +00001711
sewardj6072c362002-04-19 14:40:57 +00001712 How we use it:
sewardj604ec3c2002-04-18 22:38:41 +00001713
sewardj6072c362002-04-19 14:40:57 +00001714 __m_kind never changes and indicates whether or not it is recursive.
1715
1716 __m_count indicates the lock count; if 0, the mutex is not owned by
1717 anybody.
1718
1719 __m_owner has a ThreadId value stuffed into it. We carefully arrange
1720 that ThreadId == 0 is invalid (VG_INVALID_THREADID), so that
1721 statically initialised mutexes correctly appear
1722 to belong to nobody.
1723
1724 In summary, a not-in-use mutex is distinguised by having __m_owner
1725 == 0 (VG_INVALID_THREADID) and __m_count == 0 too. If one of those
1726 conditions holds, the other should too.
1727
1728 There is no linked list of threads waiting for this mutex. Instead
1729 a thread in WaitMX state points at the mutex with its waited_on_mx
1730 field. This makes _unlock() inefficient, but simple to implement the
1731 right semantics viz-a-viz signals.
sewardje663cb92002-04-12 10:26:32 +00001732
sewardj604ec3c2002-04-18 22:38:41 +00001733 We don't have to deal with mutex initialisation; the client side
sewardj6072c362002-04-19 14:40:57 +00001734 deals with that for us.
1735*/
sewardje663cb92002-04-12 10:26:32 +00001736
sewardj3b5d8862002-04-20 13:53:23 +00001737/* Helper fns ... */
1738static
1739void release_one_thread_waiting_on_mutex ( pthread_mutex_t* mutex,
1740 Char* caller )
1741{
1742 Int i;
1743 Char msg_buf[100];
1744
1745 /* Find some arbitrary thread waiting on this mutex, and make it
1746 runnable. If none are waiting, mark the mutex as not held. */
1747 for (i = 1; i < VG_N_THREADS; i++) {
1748 if (vg_threads[i].status == VgTs_Empty)
1749 continue;
1750 if (vg_threads[i].status == VgTs_WaitMX
1751 && vg_threads[i].associated_mx == mutex)
1752 break;
1753 }
1754
1755 vg_assert(i <= VG_N_THREADS);
1756 if (i == VG_N_THREADS) {
1757 /* Nobody else is waiting on it. */
1758 mutex->__m_count = 0;
1759 mutex->__m_owner = VG_INVALID_THREADID;
1760 } else {
1761 /* Notionally transfer the hold to thread i, whose
1762 pthread_mutex_lock() call now returns with 0 (success). */
1763 /* The .count is already == 1. */
1764 vg_assert(vg_threads[i].associated_mx == mutex);
1765 mutex->__m_owner = (_pthread_descr)i;
1766 vg_threads[i].status = VgTs_Runnable;
1767 vg_threads[i].associated_mx = NULL;
sewardj5f07b662002-04-23 16:52:51 +00001768 /* m_edx already holds pth_mx_lock() success (0) */
sewardj3b5d8862002-04-20 13:53:23 +00001769
1770 if (VG_(clo_trace_pthread_level) >= 1) {
1771 VG_(sprintf)(msg_buf, "%s mx %p: RESUME",
1772 caller, mutex );
1773 print_pthread_event(i, msg_buf);
1774 }
1775 }
1776}
1777
sewardje663cb92002-04-12 10:26:32 +00001778
1779static
sewardj30671ff2002-04-21 00:13:57 +00001780void do_pthread_mutex_lock( ThreadId tid,
1781 Bool is_trylock,
sewardjd7fd4d22002-04-24 01:57:27 +00001782 void* /* pthread_mutex_t* */ mutexV )
sewardje663cb92002-04-12 10:26:32 +00001783{
sewardj30671ff2002-04-21 00:13:57 +00001784 Char msg_buf[100];
1785 Char* caller
1786 = is_trylock ? "pthread_mutex_lock "
1787 : "pthread_mutex_trylock";
sewardje663cb92002-04-12 10:26:32 +00001788
sewardjd7fd4d22002-04-24 01:57:27 +00001789 pthread_mutex_t* mutex = (pthread_mutex_t*)mutexV;
1790
sewardj604ec3c2002-04-18 22:38:41 +00001791 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj30671ff2002-04-21 00:13:57 +00001792 VG_(sprintf)(msg_buf, "%s mx %p ...", caller, mutex );
sewardj604ec3c2002-04-18 22:38:41 +00001793 print_pthread_event(tid, msg_buf);
1794 }
1795
1796 /* Paranoia ... */
1797 vg_assert(is_valid_tid(tid)
1798 && vg_threads[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +00001799
1800 /* POSIX doesn't mandate this, but for sanity ... */
1801 if (mutex == NULL) {
1802 vg_threads[tid].m_edx = EINVAL;
1803 return;
1804 }
1805
sewardj604ec3c2002-04-18 22:38:41 +00001806 /* More paranoia ... */
1807 switch (mutex->__m_kind) {
sewardj2a1dcce2002-04-22 12:45:25 +00001808# ifndef GLIBC_2_1
sewardj604ec3c2002-04-18 22:38:41 +00001809 case PTHREAD_MUTEX_TIMED_NP:
sewardj2a1dcce2002-04-22 12:45:25 +00001810 case PTHREAD_MUTEX_ADAPTIVE_NP:
1811# endif
sewardj604ec3c2002-04-18 22:38:41 +00001812 case PTHREAD_MUTEX_RECURSIVE_NP:
1813 case PTHREAD_MUTEX_ERRORCHECK_NP:
sewardj604ec3c2002-04-18 22:38:41 +00001814 if (mutex->__m_count >= 0) break;
1815 /* else fall thru */
1816 default:
1817 vg_threads[tid].m_edx = EINVAL;
1818 return;
sewardje663cb92002-04-12 10:26:32 +00001819 }
1820
sewardj604ec3c2002-04-18 22:38:41 +00001821 if (mutex->__m_count > 0) {
sewardje663cb92002-04-12 10:26:32 +00001822
sewardj604ec3c2002-04-18 22:38:41 +00001823 vg_assert(is_valid_tid((ThreadId)mutex->__m_owner));
sewardjf8f819e2002-04-17 23:21:37 +00001824
1825 /* Someone has it already. */
sewardj604ec3c2002-04-18 22:38:41 +00001826 if ((ThreadId)mutex->__m_owner == tid) {
sewardjf8f819e2002-04-17 23:21:37 +00001827 /* It's locked -- by me! */
sewardj604ec3c2002-04-18 22:38:41 +00001828 if (mutex->__m_kind == PTHREAD_MUTEX_RECURSIVE_NP) {
sewardjf8f819e2002-04-17 23:21:37 +00001829 /* return 0 (success). */
sewardj604ec3c2002-04-18 22:38:41 +00001830 mutex->__m_count++;
sewardjf8f819e2002-04-17 23:21:37 +00001831 vg_threads[tid].m_edx = 0;
sewardj3b5d8862002-04-20 13:53:23 +00001832 VG_(printf)("!!!!!! tid %d, mx %p -> locked %d\n",
sewardj604ec3c2002-04-18 22:38:41 +00001833 tid, mutex, mutex->__m_count);
sewardjf8f819e2002-04-17 23:21:37 +00001834 return;
1835 } else {
sewardj30671ff2002-04-21 00:13:57 +00001836 if (is_trylock)
1837 vg_threads[tid].m_edx = EBUSY;
1838 else
1839 vg_threads[tid].m_edx = EDEADLK;
sewardjf8f819e2002-04-17 23:21:37 +00001840 return;
1841 }
1842 } else {
sewardj6072c362002-04-19 14:40:57 +00001843 /* Someone else has it; we have to wait. Mark ourselves
1844 thusly. */
sewardj05553872002-04-20 20:53:17 +00001845 /* GUARD: __m_count > 0 && __m_owner is valid */
sewardj30671ff2002-04-21 00:13:57 +00001846 if (is_trylock) {
1847 /* caller is polling; so return immediately. */
1848 vg_threads[tid].m_edx = EBUSY;
1849 } else {
1850 vg_threads[tid].status = VgTs_WaitMX;
1851 vg_threads[tid].associated_mx = mutex;
sewardj5f07b662002-04-23 16:52:51 +00001852 vg_threads[tid].m_edx = 0; /* pth_mx_lock success value */
sewardj30671ff2002-04-21 00:13:57 +00001853 if (VG_(clo_trace_pthread_level) >= 1) {
1854 VG_(sprintf)(msg_buf, "%s mx %p: BLOCK",
1855 caller, mutex );
1856 print_pthread_event(tid, msg_buf);
1857 }
1858 }
sewardje663cb92002-04-12 10:26:32 +00001859 return;
1860 }
sewardjf8f819e2002-04-17 23:21:37 +00001861
sewardje663cb92002-04-12 10:26:32 +00001862 } else {
sewardj6072c362002-04-19 14:40:57 +00001863 /* Nobody owns it. Sanity check ... */
1864 vg_assert(mutex->__m_owner == VG_INVALID_THREADID);
sewardjf8f819e2002-04-17 23:21:37 +00001865 /* We get it! [for the first time]. */
sewardj604ec3c2002-04-18 22:38:41 +00001866 mutex->__m_count = 1;
1867 mutex->__m_owner = (_pthread_descr)tid;
sewardj3b5d8862002-04-20 13:53:23 +00001868 vg_assert(vg_threads[tid].associated_mx == NULL);
sewardje663cb92002-04-12 10:26:32 +00001869 /* return 0 (success). */
1870 vg_threads[tid].m_edx = 0;
1871 }
sewardjf8f819e2002-04-17 23:21:37 +00001872
sewardje663cb92002-04-12 10:26:32 +00001873}
1874
1875
1876static
1877void do_pthread_mutex_unlock ( ThreadId tid,
sewardjd7fd4d22002-04-24 01:57:27 +00001878 void* /* pthread_mutex_t* */ mutexV )
sewardje663cb92002-04-12 10:26:32 +00001879{
sewardj3b5d8862002-04-20 13:53:23 +00001880 Char msg_buf[100];
sewardjd7fd4d22002-04-24 01:57:27 +00001881 pthread_mutex_t* mutex = (pthread_mutex_t*)mutexV;
sewardje663cb92002-04-12 10:26:32 +00001882
sewardj45b4b372002-04-16 22:50:32 +00001883 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj3b5d8862002-04-20 13:53:23 +00001884 VG_(sprintf)(msg_buf, "pthread_mutex_unlock mx %p ...", mutex );
sewardj8937c812002-04-12 20:12:20 +00001885 print_pthread_event(tid, msg_buf);
1886 }
1887
sewardj604ec3c2002-04-18 22:38:41 +00001888 /* Paranoia ... */
1889 vg_assert(is_valid_tid(tid)
1890 && vg_threads[tid].status == VgTs_Runnable);
1891
1892 if (mutex == NULL) {
1893 vg_threads[tid].m_edx = EINVAL;
1894 return;
1895 }
1896
1897 /* More paranoia ... */
1898 switch (mutex->__m_kind) {
sewardj2a1dcce2002-04-22 12:45:25 +00001899# ifndef GLIBC_2_1
sewardj604ec3c2002-04-18 22:38:41 +00001900 case PTHREAD_MUTEX_TIMED_NP:
sewardj2a1dcce2002-04-22 12:45:25 +00001901 case PTHREAD_MUTEX_ADAPTIVE_NP:
1902# endif
sewardj604ec3c2002-04-18 22:38:41 +00001903 case PTHREAD_MUTEX_RECURSIVE_NP:
1904 case PTHREAD_MUTEX_ERRORCHECK_NP:
sewardj604ec3c2002-04-18 22:38:41 +00001905 if (mutex->__m_count >= 0) break;
1906 /* else fall thru */
1907 default:
1908 vg_threads[tid].m_edx = EINVAL;
1909 return;
1910 }
sewardje663cb92002-04-12 10:26:32 +00001911
1912 /* Barf if we don't currently hold the mutex. */
sewardj604ec3c2002-04-18 22:38:41 +00001913 if (mutex->__m_count == 0 /* nobody holds it */
1914 || (ThreadId)mutex->__m_owner != tid /* we don't hold it */) {
sewardje663cb92002-04-12 10:26:32 +00001915 vg_threads[tid].m_edx = EPERM;
1916 return;
1917 }
1918
sewardjf8f819e2002-04-17 23:21:37 +00001919 /* If it's a multiply-locked recursive mutex, just decrement the
1920 lock count and return. */
sewardj604ec3c2002-04-18 22:38:41 +00001921 if (mutex->__m_count > 1) {
1922 vg_assert(mutex->__m_kind == PTHREAD_MUTEX_RECURSIVE_NP);
1923 mutex->__m_count --;
sewardjf8f819e2002-04-17 23:21:37 +00001924 vg_threads[tid].m_edx = 0; /* success */
1925 return;
1926 }
1927
sewardj604ec3c2002-04-18 22:38:41 +00001928 /* Now we're sure it is locked exactly once, and by the thread who
sewardjf8f819e2002-04-17 23:21:37 +00001929 is now doing an unlock on it. */
sewardj604ec3c2002-04-18 22:38:41 +00001930 vg_assert(mutex->__m_count == 1);
sewardj6072c362002-04-19 14:40:57 +00001931 vg_assert((ThreadId)mutex->__m_owner == tid);
sewardjf8f819e2002-04-17 23:21:37 +00001932
sewardj3b5d8862002-04-20 13:53:23 +00001933 /* Release at max one thread waiting on this mutex. */
1934 release_one_thread_waiting_on_mutex ( mutex, "pthread_mutex_lock" );
sewardje663cb92002-04-12 10:26:32 +00001935
sewardj3b5d8862002-04-20 13:53:23 +00001936 /* Our (tid's) pth_unlock() returns with 0 (success). */
sewardje663cb92002-04-12 10:26:32 +00001937 vg_threads[tid].m_edx = 0; /* Success. */
1938}
1939
1940
sewardj6072c362002-04-19 14:40:57 +00001941/* -----------------------------------------------------------
1942 CONDITION VARIABLES
1943 -------------------------------------------------------- */
sewardje663cb92002-04-12 10:26:32 +00001944
sewardj6072c362002-04-19 14:40:57 +00001945/* The relevant native types are as follows:
1946 (copied from /usr/include/bits/pthreadtypes.h)
sewardj77e466c2002-04-14 02:29:29 +00001947
sewardj6072c362002-04-19 14:40:57 +00001948 -- Conditions (not abstract because of PTHREAD_COND_INITIALIZER
1949 typedef struct
1950 {
1951 struct _pthread_fastlock __c_lock; -- Protect against concurrent access
1952 _pthread_descr __c_waiting; -- Threads waiting on this condition
1953 } pthread_cond_t;
sewardj77e466c2002-04-14 02:29:29 +00001954
sewardj6072c362002-04-19 14:40:57 +00001955 -- Attribute for conditionally variables.
1956 typedef struct
1957 {
1958 int __dummy;
1959 } pthread_condattr_t;
sewardj77e466c2002-04-14 02:29:29 +00001960
sewardj6072c362002-04-19 14:40:57 +00001961 #define PTHREAD_COND_INITIALIZER {__LOCK_INITIALIZER, 0}
sewardj77e466c2002-04-14 02:29:29 +00001962
sewardj3b5d8862002-04-20 13:53:23 +00001963 We don't use any fields of pthread_cond_t for anything at all.
1964 Only the identity of the CVs is important.
sewardj6072c362002-04-19 14:40:57 +00001965
1966 Linux pthreads supports no attributes on condition variables, so we
sewardj3b5d8862002-04-20 13:53:23 +00001967 don't need to think too hard there. */
sewardj6072c362002-04-19 14:40:57 +00001968
sewardj77e466c2002-04-14 02:29:29 +00001969
sewardj5f07b662002-04-23 16:52:51 +00001970static
1971void do_pthread_cond_timedwait_TIMEOUT ( ThreadId tid )
1972{
1973 Char msg_buf[100];
1974 pthread_mutex_t* mx;
1975 pthread_cond_t* cv;
1976
1977 vg_assert(is_valid_tid(tid)
1978 && vg_threads[tid].status == VgTs_WaitCV
1979 && vg_threads[tid].awaken_at != 0xFFFFFFFF);
1980 mx = vg_threads[tid].associated_mx;
1981 vg_assert(mx != NULL);
1982 cv = vg_threads[tid].associated_cv;
1983 vg_assert(cv != NULL);
1984
1985 if (mx->__m_owner == VG_INVALID_THREADID) {
1986 /* Currently unheld; hand it out to thread tid. */
1987 vg_assert(mx->__m_count == 0);
1988 vg_threads[tid].status = VgTs_Runnable;
1989 vg_threads[tid].m_edx = ETIMEDOUT;
1990 /* pthread_cond_wait return value */
1991 vg_threads[tid].associated_cv = NULL;
1992 vg_threads[tid].associated_mx = NULL;
1993 mx->__m_owner = (_pthread_descr)tid;
1994 mx->__m_count = 1;
1995
1996 if (VG_(clo_trace_pthread_level) >= 1) {
1997 VG_(sprintf)(msg_buf, "pthread_cond_timedwai cv %p: TIMEOUT with mx %p",
1998 cv, mx );
1999 print_pthread_event(tid, msg_buf);
2000 }
2001 } else {
2002 /* Currently held. Make thread tid be blocked on it. */
2003 vg_assert(mx->__m_count > 0);
2004 vg_threads[tid].status = VgTs_WaitMX;
2005 vg_threads[tid].m_edx = ETIMEDOUT;
2006 /* pthread_cond_wait return value */
2007 vg_threads[tid].associated_cv = NULL;
2008 vg_threads[tid].associated_mx = mx;
2009 if (VG_(clo_trace_pthread_level) >= 1) {
2010 VG_(sprintf)(msg_buf,
2011 "pthread_cond_timedwai cv %p: TIMEOUT -> BLOCK for mx %p",
2012 cv, mx );
2013 print_pthread_event(tid, msg_buf);
2014 }
2015
2016 }
2017}
2018
2019
sewardj3b5d8862002-04-20 13:53:23 +00002020static
2021void release_N_threads_waiting_on_cond ( pthread_cond_t* cond,
2022 Int n_to_release,
2023 Char* caller )
2024{
2025 Int i;
2026 Char msg_buf[100];
2027 pthread_mutex_t* mx;
2028
2029 while (True) {
2030 if (n_to_release == 0)
2031 return;
2032
2033 /* Find a thread waiting on this CV. */
2034 for (i = 1; i < VG_N_THREADS; i++) {
2035 if (vg_threads[i].status == VgTs_Empty)
2036 continue;
2037 if (vg_threads[i].status == VgTs_WaitCV
2038 && vg_threads[i].associated_cv == cond)
2039 break;
2040 }
2041 vg_assert(i <= VG_N_THREADS);
2042
2043 if (i == VG_N_THREADS) {
2044 /* Nobody else is waiting on it. */
2045 return;
2046 }
2047
2048 mx = vg_threads[i].associated_mx;
2049 vg_assert(mx != NULL);
2050
2051 if (mx->__m_owner == VG_INVALID_THREADID) {
2052 /* Currently unheld; hand it out to thread i. */
2053 vg_assert(mx->__m_count == 0);
2054 vg_threads[i].status = VgTs_Runnable;
2055 vg_threads[i].associated_cv = NULL;
2056 vg_threads[i].associated_mx = NULL;
2057 mx->__m_owner = (_pthread_descr)i;
2058 mx->__m_count = 1;
sewardj5f07b662002-04-23 16:52:51 +00002059 /* .m_edx already holds pth_cond_wait success value (0) */
sewardj3b5d8862002-04-20 13:53:23 +00002060
2061 if (VG_(clo_trace_pthread_level) >= 1) {
2062 VG_(sprintf)(msg_buf, "%s cv %p: RESUME with mx %p",
2063 caller, cond, mx );
2064 print_pthread_event(i, msg_buf);
2065 }
2066
2067 } else {
2068 /* Currently held. Make thread i be blocked on it. */
sewardj5f07b662002-04-23 16:52:51 +00002069 vg_assert(mx->__m_count > 0);
sewardj3b5d8862002-04-20 13:53:23 +00002070 vg_threads[i].status = VgTs_WaitMX;
2071 vg_threads[i].associated_cv = NULL;
2072 vg_threads[i].associated_mx = mx;
sewardj5f07b662002-04-23 16:52:51 +00002073 vg_threads[i].m_edx = 0; /* pth_cond_wait success value */
sewardj3b5d8862002-04-20 13:53:23 +00002074
2075 if (VG_(clo_trace_pthread_level) >= 1) {
2076 VG_(sprintf)(msg_buf, "%s cv %p: BLOCK for mx %p",
2077 caller, cond, mx );
2078 print_pthread_event(i, msg_buf);
2079 }
2080
2081 }
2082
2083 n_to_release--;
2084 }
2085}
2086
2087
2088static
2089void do_pthread_cond_wait ( ThreadId tid,
2090 pthread_cond_t *cond,
sewardj5f07b662002-04-23 16:52:51 +00002091 pthread_mutex_t *mutex,
2092 UInt ms_end )
sewardj3b5d8862002-04-20 13:53:23 +00002093{
2094 Char msg_buf[100];
2095
sewardj5f07b662002-04-23 16:52:51 +00002096 /* If ms_end == 0xFFFFFFFF, wait forever (no timeout). Otherwise,
2097 ms_end is the ending millisecond. */
2098
sewardj3b5d8862002-04-20 13:53:23 +00002099 /* pre: mutex should be a valid mutex and owned by tid. */
2100 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj5f07b662002-04-23 16:52:51 +00002101 VG_(sprintf)(msg_buf, "pthread_cond_wait cv %p, mx %p, end %d ...",
2102 cond, mutex, ms_end );
sewardj3b5d8862002-04-20 13:53:23 +00002103 print_pthread_event(tid, msg_buf);
2104 }
2105
2106 /* Paranoia ... */
2107 vg_assert(is_valid_tid(tid)
2108 && vg_threads[tid].status == VgTs_Runnable);
2109
2110 if (mutex == NULL || cond == NULL) {
2111 vg_threads[tid].m_edx = EINVAL;
2112 return;
2113 }
2114
2115 /* More paranoia ... */
2116 switch (mutex->__m_kind) {
sewardj2a1dcce2002-04-22 12:45:25 +00002117# ifndef GLIBC_2_1
sewardj3b5d8862002-04-20 13:53:23 +00002118 case PTHREAD_MUTEX_TIMED_NP:
sewardj2a1dcce2002-04-22 12:45:25 +00002119 case PTHREAD_MUTEX_ADAPTIVE_NP:
2120# endif
sewardj3b5d8862002-04-20 13:53:23 +00002121 case PTHREAD_MUTEX_RECURSIVE_NP:
2122 case PTHREAD_MUTEX_ERRORCHECK_NP:
sewardj3b5d8862002-04-20 13:53:23 +00002123 if (mutex->__m_count >= 0) break;
2124 /* else fall thru */
2125 default:
2126 vg_threads[tid].m_edx = EINVAL;
2127 return;
2128 }
2129
2130 /* Barf if we don't currently hold the mutex. */
2131 if (mutex->__m_count == 0 /* nobody holds it */
2132 || (ThreadId)mutex->__m_owner != tid /* we don't hold it */) {
2133 vg_threads[tid].m_edx = EINVAL;
2134 return;
2135 }
2136
2137 /* Queue ourselves on the condition. */
2138 vg_threads[tid].status = VgTs_WaitCV;
2139 vg_threads[tid].associated_cv = cond;
2140 vg_threads[tid].associated_mx = mutex;
sewardj5f07b662002-04-23 16:52:51 +00002141 vg_threads[tid].awaken_at = ms_end;
sewardj3b5d8862002-04-20 13:53:23 +00002142
2143 if (VG_(clo_trace_pthread_level) >= 1) {
2144 VG_(sprintf)(msg_buf,
2145 "pthread_cond_wait cv %p, mx %p: BLOCK",
2146 cond, mutex );
2147 print_pthread_event(tid, msg_buf);
2148 }
2149
2150 /* Release the mutex. */
2151 release_one_thread_waiting_on_mutex ( mutex, "pthread_cond_wait " );
2152}
2153
2154
2155static
2156void do_pthread_cond_signal_or_broadcast ( ThreadId tid,
2157 Bool broadcast,
2158 pthread_cond_t *cond )
2159{
2160 Char msg_buf[100];
2161 Char* caller
2162 = broadcast ? "pthread_cond_broadcast"
2163 : "pthread_cond_signal ";
2164
2165 if (VG_(clo_trace_pthread_level) >= 2) {
2166 VG_(sprintf)(msg_buf, "%s cv %p ...",
2167 caller, cond );
2168 print_pthread_event(tid, msg_buf);
2169 }
2170
2171 /* Paranoia ... */
2172 vg_assert(is_valid_tid(tid)
2173 && vg_threads[tid].status == VgTs_Runnable);
2174
2175 if (cond == NULL) {
2176 vg_threads[tid].m_edx = EINVAL;
2177 return;
2178 }
2179
2180 release_N_threads_waiting_on_cond (
2181 cond,
2182 broadcast ? VG_N_THREADS : 1,
2183 caller
2184 );
2185
2186 vg_threads[tid].m_edx = 0; /* success */
2187}
2188
sewardj77e466c2002-04-14 02:29:29 +00002189
sewardj5f07b662002-04-23 16:52:51 +00002190/* -----------------------------------------------------------
2191 THREAD SPECIFIC DATA
2192 -------------------------------------------------------- */
2193
2194static __inline__
2195Bool is_valid_key ( ThreadKey k )
2196{
2197 /* k unsigned; hence no < 0 check */
2198 if (k >= VG_N_THREAD_KEYS) return False;
2199 if (!vg_thread_keys[k].inuse) return False;
2200 return True;
2201}
2202
2203static
2204void do_pthread_key_create ( ThreadId tid,
2205 pthread_key_t* key,
2206 void (*destructor)(void*) )
2207{
2208 Int i;
2209 Char msg_buf[100];
2210
2211 if (VG_(clo_trace_pthread_level) >= 1) {
2212 VG_(sprintf)(msg_buf, "pthread_key_create *key %p, destr %p",
2213 key, destructor );
2214 print_pthread_event(tid, msg_buf);
2215 }
2216
2217 vg_assert(sizeof(pthread_key_t) == sizeof(ThreadKey));
2218 vg_assert(is_valid_tid(tid)
2219 && vg_threads[tid].status == VgTs_Runnable);
2220
2221 for (i = 0; i < VG_N_THREAD_KEYS; i++)
2222 if (!vg_thread_keys[i].inuse)
2223 break;
2224
2225 if (i == VG_N_THREAD_KEYS) {
2226 /* vg_threads[tid].m_edx = EAGAIN;
2227 return;
2228 */
2229 VG_(panic)("pthread_key_create: VG_N_THREAD_KEYS is too low;"
2230 " increase and recompile");
2231 }
2232
2233 vg_thread_keys[i].inuse = True;
2234 /* TODO: check key for addressibility */
2235 *key = i;
2236 vg_threads[tid].m_edx = 0;
2237}
2238
2239
2240static
2241void do_pthread_key_delete ( ThreadId tid, pthread_key_t key )
2242{
2243 Char msg_buf[100];
2244 if (VG_(clo_trace_pthread_level) >= 1) {
2245 VG_(sprintf)(msg_buf, "pthread_key_delete key %d",
2246 key );
2247 print_pthread_event(tid, msg_buf);
2248 }
2249
2250 vg_assert(is_valid_tid(tid)
2251 && vg_threads[tid].status == VgTs_Runnable);
2252
2253 if (!is_valid_key(key)) {
2254 vg_threads[tid].m_edx = EINVAL;
2255 return;
2256 }
2257
2258 vg_thread_keys[key].inuse = False;
2259
2260 /* Optional. We're not required to do this, although it shouldn't
2261 make any difference to programs which use the key/specifics
2262 functions correctly. */
2263 for (tid = 1; tid < VG_N_THREADS; tid++) {
2264 if (vg_threads[tid].status != VgTs_Empty)
2265 vg_threads[tid].specifics[key] = NULL;
2266 }
2267}
2268
2269
2270static
2271void do_pthread_getspecific ( ThreadId tid, pthread_key_t key )
2272{
2273 Char msg_buf[100];
2274 if (VG_(clo_trace_pthread_level) >= 1) {
2275 VG_(sprintf)(msg_buf, "pthread_getspecific key %d",
2276 key );
2277 print_pthread_event(tid, msg_buf);
2278 }
2279
2280 vg_assert(is_valid_tid(tid)
2281 && vg_threads[tid].status == VgTs_Runnable);
2282
2283 if (!is_valid_key(key)) {
2284 vg_threads[tid].m_edx = (UInt)NULL;
2285 return;
2286 }
2287
2288 vg_threads[tid].m_edx = (UInt)vg_threads[tid].specifics[key];
2289}
2290
2291
2292static
2293void do_pthread_setspecific ( ThreadId tid,
2294 pthread_key_t key,
2295 void *pointer )
2296{
2297 Char msg_buf[100];
2298 if (VG_(clo_trace_pthread_level) >= 1) {
2299 VG_(sprintf)(msg_buf, "pthread_setspecific key %d, ptr %p",
2300 key, pointer );
2301 print_pthread_event(tid, msg_buf);
2302 }
2303
2304 vg_assert(is_valid_tid(tid)
2305 && vg_threads[tid].status == VgTs_Runnable);
2306
2307 if (!is_valid_key(key)) {
2308 vg_threads[tid].m_edx = EINVAL;
2309 return;
2310 }
2311
2312 vg_threads[tid].specifics[key] = pointer;
2313 vg_threads[tid].m_edx = 0;
2314}
2315
2316
sewardje663cb92002-04-12 10:26:32 +00002317/* ---------------------------------------------------------------------
2318 Handle non-trivial client requests.
2319 ------------------------------------------------------------------ */
2320
2321static
2322void do_nontrivial_clientreq ( ThreadId tid )
2323{
2324 UInt* arg = (UInt*)(vg_threads[tid].m_eax);
2325 UInt req_no = arg[0];
2326 switch (req_no) {
2327
2328 case VG_USERREQ__PTHREAD_CREATE:
2329 do_pthread_create( tid,
2330 (pthread_t*)arg[1],
2331 (pthread_attr_t*)arg[2],
2332 (void*(*)(void*))arg[3],
2333 (void*)arg[4] );
2334 break;
2335
sewardjbc5b99f2002-04-13 00:08:51 +00002336 case VG_USERREQ__PTHREAD_RETURNS:
2337 handle_pthread_return( tid, (void*)arg[1] );
sewardje663cb92002-04-12 10:26:32 +00002338 break;
2339
2340 case VG_USERREQ__PTHREAD_JOIN:
2341 do_pthread_join( tid, arg[1], (void**)(arg[2]) );
2342 break;
2343
sewardje663cb92002-04-12 10:26:32 +00002344 case VG_USERREQ__PTHREAD_CANCEL:
2345 do_pthread_cancel( tid, (pthread_t)(arg[1]) );
2346 break;
2347
sewardj3b5d8862002-04-20 13:53:23 +00002348 case VG_USERREQ__PTHREAD_EXIT:
2349 do_pthread_exit( tid, (void*)(arg[1]) );
2350 break;
2351
2352 case VG_USERREQ__PTHREAD_COND_WAIT:
2353 do_pthread_cond_wait( tid,
2354 (pthread_cond_t *)(arg[1]),
sewardj5f07b662002-04-23 16:52:51 +00002355 (pthread_mutex_t *)(arg[2]),
2356 0xFFFFFFFF /* no timeout */ );
2357 break;
2358
2359 case VG_USERREQ__PTHREAD_COND_TIMEDWAIT:
2360 do_pthread_cond_wait( tid,
2361 (pthread_cond_t *)(arg[1]),
2362 (pthread_mutex_t *)(arg[2]),
2363 arg[3] /* timeout millisecond point */ );
sewardj3b5d8862002-04-20 13:53:23 +00002364 break;
2365
2366 case VG_USERREQ__PTHREAD_COND_SIGNAL:
2367 do_pthread_cond_signal_or_broadcast(
2368 tid,
2369 False, /* signal, not broadcast */
2370 (pthread_cond_t *)(arg[1]) );
2371 break;
2372
2373 case VG_USERREQ__PTHREAD_COND_BROADCAST:
2374 do_pthread_cond_signal_or_broadcast(
2375 tid,
2376 True, /* broadcast, not signal */
2377 (pthread_cond_t *)(arg[1]) );
2378 break;
2379
sewardj5f07b662002-04-23 16:52:51 +00002380 case VG_USERREQ__PTHREAD_KEY_CREATE:
2381 do_pthread_key_create ( tid,
2382 (pthread_key_t*)(arg[1]),
2383 (void(*)(void*))(arg[2]) );
2384 break;
2385
2386 case VG_USERREQ__PTHREAD_KEY_DELETE:
2387 do_pthread_key_delete ( tid,
2388 (pthread_key_t)(arg[1]) );
2389 break;
2390
2391 case VG_USERREQ__PTHREAD_GETSPECIFIC:
2392 do_pthread_getspecific ( tid,
2393 (pthread_key_t)(arg[1]) );
2394 break;
2395
2396 case VG_USERREQ__PTHREAD_SETSPECIFIC:
2397 do_pthread_setspecific ( tid,
2398 (pthread_key_t)(arg[1]),
2399 (void*)(arg[2]) );
2400 break;
2401
sewardje663cb92002-04-12 10:26:32 +00002402 case VG_USERREQ__MAKE_NOACCESS:
2403 case VG_USERREQ__MAKE_WRITABLE:
2404 case VG_USERREQ__MAKE_READABLE:
2405 case VG_USERREQ__DISCARD:
2406 case VG_USERREQ__CHECK_WRITABLE:
2407 case VG_USERREQ__CHECK_READABLE:
2408 case VG_USERREQ__MAKE_NOACCESS_STACK:
2409 case VG_USERREQ__RUNNING_ON_VALGRIND:
2410 case VG_USERREQ__DO_LEAK_CHECK:
sewardj8c824512002-04-14 04:16:48 +00002411 vg_threads[tid].m_edx
2412 = VG_(handle_client_request) ( &vg_threads[tid], arg );
sewardje663cb92002-04-12 10:26:32 +00002413 break;
2414
sewardj77e466c2002-04-14 02:29:29 +00002415 case VG_USERREQ__SIGNAL_RETURNS:
2416 handle_signal_return(tid);
2417 break;
sewardj54cacf02002-04-12 23:24:59 +00002418
sewardje663cb92002-04-12 10:26:32 +00002419 default:
2420 VG_(printf)("panic'd on private request = 0x%x\n", arg[0] );
2421 VG_(panic)("handle_private_client_pthread_request: "
2422 "unknown request");
2423 /*NOTREACHED*/
2424 break;
2425 }
2426}
2427
2428
sewardj6072c362002-04-19 14:40:57 +00002429/* ---------------------------------------------------------------------
2430 Sanity checking.
2431 ------------------------------------------------------------------ */
2432
2433/* Internal consistency checks on the sched/pthread structures. */
2434static
2435void scheduler_sanity ( void )
2436{
sewardj3b5d8862002-04-20 13:53:23 +00002437 pthread_mutex_t* mx;
2438 pthread_cond_t* cv;
sewardj6072c362002-04-19 14:40:57 +00002439 Int i;
sewardj5f07b662002-04-23 16:52:51 +00002440
sewardj6072c362002-04-19 14:40:57 +00002441 /* VG_(printf)("scheduler_sanity\n"); */
2442 for (i = 1; i < VG_N_THREADS; i++) {
sewardj3b5d8862002-04-20 13:53:23 +00002443 mx = vg_threads[i].associated_mx;
2444 cv = vg_threads[i].associated_cv;
sewardj6072c362002-04-19 14:40:57 +00002445 if (vg_threads[i].status == VgTs_WaitMX) {
sewardj05553872002-04-20 20:53:17 +00002446 /* If we're waiting on a MX: (1) the mx is not null, (2, 3)
2447 it's actually held by someone, since otherwise this thread
2448 is deadlocked, (4) the mutex's owner is not us, since
2449 otherwise this thread is also deadlocked. The logic in
2450 do_pthread_mutex_lock rejects attempts by a thread to lock
2451 a (non-recursive) mutex which it already owns.
2452
2453 (2) has been seen to fail sometimes. I don't know why.
2454 Possibly to do with signals. */
sewardj3b5d8862002-04-20 13:53:23 +00002455 vg_assert(cv == NULL);
sewardj05553872002-04-20 20:53:17 +00002456 /* 1 */ vg_assert(mx != NULL);
2457 /* 2 */ vg_assert(mx->__m_count > 0);
2458 /* 3 */ vg_assert(is_valid_tid((ThreadId)mx->__m_owner));
2459 /* 4 */ vg_assert(i != (ThreadId)mx->__m_owner);
sewardj3b5d8862002-04-20 13:53:23 +00002460 } else
2461 if (vg_threads[i].status == VgTs_WaitCV) {
2462 vg_assert(cv != NULL);
2463 vg_assert(mx != NULL);
sewardj6072c362002-04-19 14:40:57 +00002464 } else {
sewardj05553872002-04-20 20:53:17 +00002465 /* Unfortunately these don't hold true when a sighandler is
2466 running. To be fixed. */
2467 /* vg_assert(cv == NULL); */
2468 /* vg_assert(mx == NULL); */
sewardj6072c362002-04-19 14:40:57 +00002469 }
2470 }
sewardj5f07b662002-04-23 16:52:51 +00002471
2472 for (i = 0; i < VG_N_THREAD_KEYS; i++) {
2473 if (!vg_thread_keys[i].inuse)
2474 vg_assert(vg_thread_keys[i].destructor == NULL);
2475 }
sewardj6072c362002-04-19 14:40:57 +00002476}
2477
2478
sewardje663cb92002-04-12 10:26:32 +00002479/*--------------------------------------------------------------------*/
2480/*--- end vg_scheduler.c ---*/
2481/*--------------------------------------------------------------------*/