blob: 7eeee5181c16a4a78e17df067bb507eaf2541d5c [file] [log] [blame]
sewardje663cb92002-04-12 10:26:32 +00001
2/*--------------------------------------------------------------------*/
3/*--- A user-space pthreads implementation. vg_scheduler.c ---*/
4/*--------------------------------------------------------------------*/
5
6/*
7 This file is part of Valgrind, an x86 protected-mode emulator
8 designed for debugging and profiling binaries on x86-Unixes.
9
10 Copyright (C) 2000-2002 Julian Seward
11 jseward@acm.org
sewardje663cb92002-04-12 10:26:32 +000012
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
28 The GNU General Public License is contained in the file LICENSE.
29*/
30
31#include "vg_include.h"
32#include "vg_constants.h"
sewardje663cb92002-04-12 10:26:32 +000033#include "valgrind.h" /* for VG_USERREQ__MAKE_NOACCESS and
34 VG_USERREQ__DO_LEAK_CHECK */
35
sewardj705d3cb2002-05-23 13:13:12 +000036/* BORKAGE/ISSUES as of 23 May 02
sewardje663cb92002-04-12 10:26:32 +000037
sewardj77e466c2002-04-14 02:29:29 +000038- Currently, when a signal is run, just the ThreadStatus.status fields
39 are saved in the signal frame, along with the CPU state. Question:
40 should I also save and restore:
41 ThreadStatus.joiner
42 ThreadStatus.waited_on_mid
43 ThreadStatus.awaken_at
44 ThreadStatus.retval
45 Currently unsure, and so am not doing so.
sewardje663cb92002-04-12 10:26:32 +000046
sewardj77e466c2002-04-14 02:29:29 +000047- Signals interrupting read/write and nanosleep: SA_RESTART settings.
48 Read/write correctly return with EINTR when SA_RESTART isn't
49 specified and they are interrupted by a signal. nanosleep just
50 pretends signals don't exist -- should be fixed.
sewardje663cb92002-04-12 10:26:32 +000051
sewardj75fe1892002-04-14 02:46:33 +000052- Read/write syscall starts: don't crap out when the initial
53 nonblocking read/write returns an error.
sewardj8937c812002-04-12 20:12:20 +000054
sewardj705d3cb2002-05-23 13:13:12 +000055- So, what's the deal with signals and mutexes? If a thread is
sewardj6072c362002-04-19 14:40:57 +000056 blocked on a mutex, or for a condition variable for that matter, can
57 signals still be delivered to it? This has serious consequences --
58 deadlocks, etc.
59
sewardj705d3cb2002-05-23 13:13:12 +000060- Signals still not really right. Each thread should have its
61 own pending-set, but there is just one process-wide pending set.
62
sewardje462e202002-04-13 04:09:07 +000063*/
sewardje663cb92002-04-12 10:26:32 +000064
65
66/* ---------------------------------------------------------------------
67 Types and globals for the scheduler.
68 ------------------------------------------------------------------ */
69
70/* type ThreadId is defined in vg_include.h. */
71
72/* struct ThreadState is defined in vg_include.h. */
73
sewardj018f7622002-05-15 21:13:39 +000074/* Globals. A statically allocated array of threads. NOTE: [0] is
75 never used, to simplify the simulation of initialisers for
sewardj6072c362002-04-19 14:40:57 +000076 LinuxThreads. */
sewardj018f7622002-05-15 21:13:39 +000077ThreadState VG_(threads)[VG_N_THREADS];
sewardje663cb92002-04-12 10:26:32 +000078
sewardj1e8cdc92002-04-18 11:37:52 +000079/* The tid of the thread currently in VG_(baseBlock). */
80static Int vg_tid_currently_in_baseBlock = VG_INVALID_THREADID;
81
sewardje663cb92002-04-12 10:26:32 +000082
83/* vg_oursignalhandler() might longjmp(). Here's the jmp_buf. */
84jmp_buf VG_(scheduler_jmpbuf);
85/* ... and if so, here's the signal which caused it to do so. */
86Int VG_(longjmpd_on_signal);
87
88
89/* Machinery to keep track of which threads are waiting on which
90 fds. */
91typedef
92 struct {
93 /* The thread which made the request. */
94 ThreadId tid;
95
96 /* The next two fields describe the request. */
97 /* File descriptor waited for. -1 means this slot is not in use */
98 Int fd;
99 /* The syscall number the fd is used in. */
100 Int syscall_no;
101
102 /* False => still waiting for select to tell us the fd is ready
103 to go. True => the fd is ready, but the results have not yet
104 been delivered back to the calling thread. Once the latter
105 happens, this entire record is marked as no longer in use, by
106 making the fd field be -1. */
107 Bool ready;
108 }
109 VgWaitedOnFd;
110
111static VgWaitedOnFd vg_waiting_fds[VG_N_WAITING_FDS];
112
113
sewardj5f07b662002-04-23 16:52:51 +0000114/* Keeping track of keys. */
115typedef
116 struct {
117 /* Has this key been allocated ? */
118 Bool inuse;
119 /* If .inuse==True, records the address of the associated
120 destructor, or NULL if none. */
121 void (*destructor)(void*);
122 }
123 ThreadKeyState;
124
125/* And our array of thread keys. */
126static ThreadKeyState vg_thread_keys[VG_N_THREAD_KEYS];
127
128typedef UInt ThreadKey;
129
130
sewardje663cb92002-04-12 10:26:32 +0000131/* Forwards */
sewardj5f07b662002-04-23 16:52:51 +0000132static void do_pthread_cond_timedwait_TIMEOUT ( ThreadId tid );
133
sewardje663cb92002-04-12 10:26:32 +0000134static void do_nontrivial_clientreq ( ThreadId tid );
135
sewardj6072c362002-04-19 14:40:57 +0000136static void scheduler_sanity ( void );
137
sewardjd7fd4d22002-04-24 01:57:27 +0000138static void do_pthread_mutex_unlock ( ThreadId,
sewardj8ccc2be2002-05-10 20:26:37 +0000139 void* /* pthread_mutex_t* */ );
sewardjd7fd4d22002-04-24 01:57:27 +0000140static void do_pthread_mutex_lock ( ThreadId, Bool,
sewardj8ccc2be2002-05-10 20:26:37 +0000141 void* /* pthread_mutex_t* */ );
sewardjd7fd4d22002-04-24 01:57:27 +0000142
sewardj51c0aaf2002-04-25 01:32:10 +0000143static void do_pthread_getspecific ( ThreadId,
144 UInt /* pthread_key_t */ );
145
sewardj8ad94e12002-05-29 00:10:20 +0000146static void do__cleanup_push ( ThreadId tid, CleanupEntry* cu );
147static void do__cleanup_pop ( ThreadId tid, CleanupEntry* cu );
148static void do__set_canceltype ( ThreadId tid, Int type );
sewardje663cb92002-04-12 10:26:32 +0000149
150/* ---------------------------------------------------------------------
151 Helper functions for the scheduler.
152 ------------------------------------------------------------------ */
153
sewardjb48e5002002-05-13 00:16:03 +0000154__inline__
155Bool VG_(is_valid_tid) ( ThreadId tid )
sewardj604ec3c2002-04-18 22:38:41 +0000156{
157 /* tid is unsigned, hence no < 0 test. */
sewardj6072c362002-04-19 14:40:57 +0000158 if (tid == 0) return False;
sewardj604ec3c2002-04-18 22:38:41 +0000159 if (tid >= VG_N_THREADS) return False;
sewardj018f7622002-05-15 21:13:39 +0000160 if (VG_(threads)[tid].status == VgTs_Empty) return False;
161 return True;
162}
163
164
165__inline__
166Bool VG_(is_valid_or_empty_tid) ( ThreadId tid )
167{
168 /* tid is unsigned, hence no < 0 test. */
169 if (tid == 0) return False;
170 if (tid >= VG_N_THREADS) return False;
sewardj604ec3c2002-04-18 22:38:41 +0000171 return True;
172}
173
174
sewardj1e8cdc92002-04-18 11:37:52 +0000175/* For constructing error messages only: try and identify a thread
176 whose stack this address currently falls within, or return
177 VG_INVALID_THREADID if it doesn't. A small complication is dealing
178 with any currently VG_(baseBlock)-resident thread.
179*/
180ThreadId VG_(identify_stack_addr)( Addr a )
181{
182 ThreadId tid, tid_to_skip;
183
184 tid_to_skip = VG_INVALID_THREADID;
185
186 /* First check to see if there's a currently-loaded thread in
187 VG_(baseBlock). */
188 if (vg_tid_currently_in_baseBlock != VG_INVALID_THREADID) {
189 tid = vg_tid_currently_in_baseBlock;
190 if (VG_(baseBlock)[VGOFF_(m_esp)] <= a
sewardj018f7622002-05-15 21:13:39 +0000191 && a <= VG_(threads)[tid].stack_highest_word)
sewardj1e8cdc92002-04-18 11:37:52 +0000192 return tid;
193 else
194 tid_to_skip = tid;
195 }
196
sewardj6072c362002-04-19 14:40:57 +0000197 for (tid = 1; tid < VG_N_THREADS; tid++) {
sewardj018f7622002-05-15 21:13:39 +0000198 if (VG_(threads)[tid].status == VgTs_Empty) continue;
sewardj1e8cdc92002-04-18 11:37:52 +0000199 if (tid == tid_to_skip) continue;
sewardj018f7622002-05-15 21:13:39 +0000200 if (VG_(threads)[tid].m_esp <= a
201 && a <= VG_(threads)[tid].stack_highest_word)
sewardj1e8cdc92002-04-18 11:37:52 +0000202 return tid;
203 }
204 return VG_INVALID_THREADID;
205}
206
207
sewardj15a43e12002-04-17 19:35:12 +0000208/* Print the scheduler status. */
209void VG_(pp_sched_status) ( void )
sewardje663cb92002-04-12 10:26:32 +0000210{
211 Int i;
212 VG_(printf)("\nsched status:\n");
sewardj6072c362002-04-19 14:40:57 +0000213 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +0000214 if (VG_(threads)[i].status == VgTs_Empty) continue;
sewardj15a43e12002-04-17 19:35:12 +0000215 VG_(printf)("\nThread %d: status = ", i);
sewardj018f7622002-05-15 21:13:39 +0000216 switch (VG_(threads)[i].status) {
sewardj6072c362002-04-19 14:40:57 +0000217 case VgTs_Runnable: VG_(printf)("Runnable"); break;
218 case VgTs_WaitFD: VG_(printf)("WaitFD"); break;
sewardj20917d82002-05-28 01:36:45 +0000219 case VgTs_WaitJoinee: VG_(printf)("WaitJoinee(%d)",
220 VG_(threads)[i].joiner_jee_tid);
221 break;
222 case VgTs_WaitJoiner: VG_(printf)("WaitJoiner"); break;
sewardj6072c362002-04-19 14:40:57 +0000223 case VgTs_Sleeping: VG_(printf)("Sleeping"); break;
224 case VgTs_WaitMX: VG_(printf)("WaitMX"); break;
sewardj3b5d8862002-04-20 13:53:23 +0000225 case VgTs_WaitCV: VG_(printf)("WaitCV"); break;
sewardjb48e5002002-05-13 00:16:03 +0000226 case VgTs_WaitSIG: VG_(printf)("WaitSIG"); break;
sewardje663cb92002-04-12 10:26:32 +0000227 default: VG_(printf)("???"); break;
228 }
sewardj3b5d8862002-04-20 13:53:23 +0000229 VG_(printf)(", associated_mx = %p, associated_cv = %p\n",
sewardj018f7622002-05-15 21:13:39 +0000230 VG_(threads)[i].associated_mx,
231 VG_(threads)[i].associated_cv );
sewardj15a43e12002-04-17 19:35:12 +0000232 VG_(pp_ExeContext)(
sewardj018f7622002-05-15 21:13:39 +0000233 VG_(get_ExeContext)( False, VG_(threads)[i].m_eip,
234 VG_(threads)[i].m_ebp ));
sewardje663cb92002-04-12 10:26:32 +0000235 }
236 VG_(printf)("\n");
237}
238
239static
240void add_waiting_fd ( ThreadId tid, Int fd, Int syscall_no )
241{
242 Int i;
243
244 vg_assert(fd != -1); /* avoid total chaos */
245
246 for (i = 0; i < VG_N_WAITING_FDS; i++)
247 if (vg_waiting_fds[i].fd == -1)
248 break;
249
250 if (i == VG_N_WAITING_FDS)
251 VG_(panic)("add_waiting_fd: VG_N_WAITING_FDS is too low");
252 /*
253 VG_(printf)("add_waiting_fd: add (tid %d, fd %d) at slot %d\n",
254 tid, fd, i);
255 */
256 vg_waiting_fds[i].fd = fd;
257 vg_waiting_fds[i].tid = tid;
258 vg_waiting_fds[i].ready = False;
259 vg_waiting_fds[i].syscall_no = syscall_no;
260}
261
262
263
264static
265void print_sched_event ( ThreadId tid, Char* what )
266{
sewardj45b4b372002-04-16 22:50:32 +0000267 VG_(message)(Vg_DebugMsg, " SCHED[%d]: %s", tid, what );
sewardj8937c812002-04-12 20:12:20 +0000268}
269
270
271static
272void print_pthread_event ( ThreadId tid, Char* what )
273{
274 VG_(message)(Vg_DebugMsg, "PTHREAD[%d]: %s", tid, what );
sewardje663cb92002-04-12 10:26:32 +0000275}
276
277
278static
279Char* name_of_sched_event ( UInt event )
280{
281 switch (event) {
sewardje663cb92002-04-12 10:26:32 +0000282 case VG_TRC_EBP_JMP_SYSCALL: return "SYSCALL";
283 case VG_TRC_EBP_JMP_CLIENTREQ: return "CLIENTREQ";
284 case VG_TRC_INNER_COUNTERZERO: return "COUNTERZERO";
285 case VG_TRC_INNER_FASTMISS: return "FASTMISS";
286 case VG_TRC_UNRESUMABLE_SIGNAL: return "FATALSIGNAL";
287 default: return "??UNKNOWN??";
288 }
289}
290
291
292/* Create a translation of the client basic block beginning at
293 orig_addr, and add it to the translation cache & translation table.
294 This probably doesn't really belong here, but, hey ...
295*/
sewardj1e8cdc92002-04-18 11:37:52 +0000296static
297void create_translation_for ( ThreadId tid, Addr orig_addr )
sewardje663cb92002-04-12 10:26:32 +0000298{
299 Addr trans_addr;
300 TTEntry tte;
301 Int orig_size, trans_size;
302 /* Ensure there is space to hold a translation. */
303 VG_(maybe_do_lru_pass)();
sewardj018f7622002-05-15 21:13:39 +0000304 VG_(translate)( &VG_(threads)[tid],
sewardj1e8cdc92002-04-18 11:37:52 +0000305 orig_addr, &orig_size, &trans_addr, &trans_size );
sewardje663cb92002-04-12 10:26:32 +0000306 /* Copy data at trans_addr into the translation cache.
307 Returned pointer is to the code, not to the 4-byte
308 header. */
309 /* Since the .orig_size and .trans_size fields are
310 UShort, be paranoid. */
311 vg_assert(orig_size > 0 && orig_size < 65536);
312 vg_assert(trans_size > 0 && trans_size < 65536);
313 tte.orig_size = orig_size;
314 tte.orig_addr = orig_addr;
315 tte.trans_size = trans_size;
316 tte.trans_addr = VG_(copy_to_transcache)
317 ( trans_addr, trans_size );
318 tte.mru_epoch = VG_(current_epoch);
319 /* Free the intermediary -- was allocated by VG_(emit_code). */
320 VG_(jitfree)( (void*)trans_addr );
321 /* Add to trans tab and set back pointer. */
322 VG_(add_to_trans_tab) ( &tte );
323 /* Update stats. */
324 VG_(this_epoch_in_count) ++;
325 VG_(this_epoch_in_osize) += orig_size;
326 VG_(this_epoch_in_tsize) += trans_size;
327 VG_(overall_in_count) ++;
328 VG_(overall_in_osize) += orig_size;
329 VG_(overall_in_tsize) += trans_size;
sewardje663cb92002-04-12 10:26:32 +0000330}
331
332
333/* Allocate a completely empty ThreadState record. */
334static
335ThreadId vg_alloc_ThreadState ( void )
336{
337 Int i;
sewardj6072c362002-04-19 14:40:57 +0000338 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +0000339 if (VG_(threads)[i].status == VgTs_Empty)
sewardje663cb92002-04-12 10:26:32 +0000340 return i;
341 }
342 VG_(printf)("vg_alloc_ThreadState: no free slots available\n");
343 VG_(printf)("Increase VG_N_THREADS, rebuild and try again.\n");
344 VG_(panic)("VG_N_THREADS is too low");
345 /*NOTREACHED*/
346}
347
348
sewardj1e8cdc92002-04-18 11:37:52 +0000349ThreadState* VG_(get_current_thread_state) ( void )
350{
sewardj018f7622002-05-15 21:13:39 +0000351 vg_assert(VG_(is_valid_tid)(vg_tid_currently_in_baseBlock));
352 return & VG_(threads)[vg_tid_currently_in_baseBlock];
sewardj1e8cdc92002-04-18 11:37:52 +0000353}
354
355
356ThreadId VG_(get_current_tid) ( void )
357{
sewardj018f7622002-05-15 21:13:39 +0000358 vg_assert(VG_(is_valid_tid)(vg_tid_currently_in_baseBlock));
sewardj1e8cdc92002-04-18 11:37:52 +0000359 return vg_tid_currently_in_baseBlock;
360}
361
362
sewardje663cb92002-04-12 10:26:32 +0000363/* Copy the saved state of a thread into VG_(baseBlock), ready for it
364 to be run. */
365__inline__
366void VG_(load_thread_state) ( ThreadId tid )
367{
368 Int i;
sewardj1e8cdc92002-04-18 11:37:52 +0000369 vg_assert(vg_tid_currently_in_baseBlock == VG_INVALID_THREADID);
370
sewardj018f7622002-05-15 21:13:39 +0000371 VG_(baseBlock)[VGOFF_(m_eax)] = VG_(threads)[tid].m_eax;
372 VG_(baseBlock)[VGOFF_(m_ebx)] = VG_(threads)[tid].m_ebx;
373 VG_(baseBlock)[VGOFF_(m_ecx)] = VG_(threads)[tid].m_ecx;
374 VG_(baseBlock)[VGOFF_(m_edx)] = VG_(threads)[tid].m_edx;
375 VG_(baseBlock)[VGOFF_(m_esi)] = VG_(threads)[tid].m_esi;
376 VG_(baseBlock)[VGOFF_(m_edi)] = VG_(threads)[tid].m_edi;
377 VG_(baseBlock)[VGOFF_(m_ebp)] = VG_(threads)[tid].m_ebp;
378 VG_(baseBlock)[VGOFF_(m_esp)] = VG_(threads)[tid].m_esp;
379 VG_(baseBlock)[VGOFF_(m_eflags)] = VG_(threads)[tid].m_eflags;
380 VG_(baseBlock)[VGOFF_(m_eip)] = VG_(threads)[tid].m_eip;
sewardje663cb92002-04-12 10:26:32 +0000381
382 for (i = 0; i < VG_SIZE_OF_FPUSTATE_W; i++)
sewardj018f7622002-05-15 21:13:39 +0000383 VG_(baseBlock)[VGOFF_(m_fpustate) + i] = VG_(threads)[tid].m_fpu[i];
sewardje663cb92002-04-12 10:26:32 +0000384
sewardj018f7622002-05-15 21:13:39 +0000385 VG_(baseBlock)[VGOFF_(sh_eax)] = VG_(threads)[tid].sh_eax;
386 VG_(baseBlock)[VGOFF_(sh_ebx)] = VG_(threads)[tid].sh_ebx;
387 VG_(baseBlock)[VGOFF_(sh_ecx)] = VG_(threads)[tid].sh_ecx;
388 VG_(baseBlock)[VGOFF_(sh_edx)] = VG_(threads)[tid].sh_edx;
389 VG_(baseBlock)[VGOFF_(sh_esi)] = VG_(threads)[tid].sh_esi;
390 VG_(baseBlock)[VGOFF_(sh_edi)] = VG_(threads)[tid].sh_edi;
391 VG_(baseBlock)[VGOFF_(sh_ebp)] = VG_(threads)[tid].sh_ebp;
392 VG_(baseBlock)[VGOFF_(sh_esp)] = VG_(threads)[tid].sh_esp;
393 VG_(baseBlock)[VGOFF_(sh_eflags)] = VG_(threads)[tid].sh_eflags;
sewardj1e8cdc92002-04-18 11:37:52 +0000394
395 vg_tid_currently_in_baseBlock = tid;
sewardje663cb92002-04-12 10:26:32 +0000396}
397
398
399/* Copy the state of a thread from VG_(baseBlock), presumably after it
400 has been descheduled. For sanity-check purposes, fill the vacated
401 VG_(baseBlock) with garbage so as to make the system more likely to
402 fail quickly if we erroneously continue to poke around inside
403 VG_(baseBlock) without first doing a load_thread_state().
404*/
405__inline__
406void VG_(save_thread_state) ( ThreadId tid )
407{
408 Int i;
409 const UInt junk = 0xDEADBEEF;
410
sewardj1e8cdc92002-04-18 11:37:52 +0000411 vg_assert(vg_tid_currently_in_baseBlock != VG_INVALID_THREADID);
412
sewardj018f7622002-05-15 21:13:39 +0000413 VG_(threads)[tid].m_eax = VG_(baseBlock)[VGOFF_(m_eax)];
414 VG_(threads)[tid].m_ebx = VG_(baseBlock)[VGOFF_(m_ebx)];
415 VG_(threads)[tid].m_ecx = VG_(baseBlock)[VGOFF_(m_ecx)];
416 VG_(threads)[tid].m_edx = VG_(baseBlock)[VGOFF_(m_edx)];
417 VG_(threads)[tid].m_esi = VG_(baseBlock)[VGOFF_(m_esi)];
418 VG_(threads)[tid].m_edi = VG_(baseBlock)[VGOFF_(m_edi)];
419 VG_(threads)[tid].m_ebp = VG_(baseBlock)[VGOFF_(m_ebp)];
420 VG_(threads)[tid].m_esp = VG_(baseBlock)[VGOFF_(m_esp)];
421 VG_(threads)[tid].m_eflags = VG_(baseBlock)[VGOFF_(m_eflags)];
422 VG_(threads)[tid].m_eip = VG_(baseBlock)[VGOFF_(m_eip)];
sewardje663cb92002-04-12 10:26:32 +0000423
424 for (i = 0; i < VG_SIZE_OF_FPUSTATE_W; i++)
sewardj018f7622002-05-15 21:13:39 +0000425 VG_(threads)[tid].m_fpu[i] = VG_(baseBlock)[VGOFF_(m_fpustate) + i];
sewardje663cb92002-04-12 10:26:32 +0000426
sewardj018f7622002-05-15 21:13:39 +0000427 VG_(threads)[tid].sh_eax = VG_(baseBlock)[VGOFF_(sh_eax)];
428 VG_(threads)[tid].sh_ebx = VG_(baseBlock)[VGOFF_(sh_ebx)];
429 VG_(threads)[tid].sh_ecx = VG_(baseBlock)[VGOFF_(sh_ecx)];
430 VG_(threads)[tid].sh_edx = VG_(baseBlock)[VGOFF_(sh_edx)];
431 VG_(threads)[tid].sh_esi = VG_(baseBlock)[VGOFF_(sh_esi)];
432 VG_(threads)[tid].sh_edi = VG_(baseBlock)[VGOFF_(sh_edi)];
433 VG_(threads)[tid].sh_ebp = VG_(baseBlock)[VGOFF_(sh_ebp)];
434 VG_(threads)[tid].sh_esp = VG_(baseBlock)[VGOFF_(sh_esp)];
435 VG_(threads)[tid].sh_eflags = VG_(baseBlock)[VGOFF_(sh_eflags)];
sewardje663cb92002-04-12 10:26:32 +0000436
437 /* Fill it up with junk. */
438 VG_(baseBlock)[VGOFF_(m_eax)] = junk;
439 VG_(baseBlock)[VGOFF_(m_ebx)] = junk;
440 VG_(baseBlock)[VGOFF_(m_ecx)] = junk;
441 VG_(baseBlock)[VGOFF_(m_edx)] = junk;
442 VG_(baseBlock)[VGOFF_(m_esi)] = junk;
443 VG_(baseBlock)[VGOFF_(m_edi)] = junk;
444 VG_(baseBlock)[VGOFF_(m_ebp)] = junk;
445 VG_(baseBlock)[VGOFF_(m_esp)] = junk;
446 VG_(baseBlock)[VGOFF_(m_eflags)] = junk;
447 VG_(baseBlock)[VGOFF_(m_eip)] = junk;
448
449 for (i = 0; i < VG_SIZE_OF_FPUSTATE_W; i++)
450 VG_(baseBlock)[VGOFF_(m_fpustate) + i] = junk;
sewardj1e8cdc92002-04-18 11:37:52 +0000451
452 vg_tid_currently_in_baseBlock = VG_INVALID_THREADID;
sewardje663cb92002-04-12 10:26:32 +0000453}
454
455
456/* Run the thread tid for a while, and return a VG_TRC_* value to the
457 scheduler indicating what happened. */
sewardj6072c362002-04-19 14:40:57 +0000458static
sewardje663cb92002-04-12 10:26:32 +0000459UInt run_thread_for_a_while ( ThreadId tid )
460{
sewardj7ccc5c22002-04-24 21:39:11 +0000461 volatile UInt trc = 0;
sewardjb48e5002002-05-13 00:16:03 +0000462 vg_assert(VG_(is_valid_tid)(tid));
sewardj018f7622002-05-15 21:13:39 +0000463 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +0000464 vg_assert(VG_(bbs_to_go) > 0);
465
sewardj671ff542002-05-07 09:25:30 +0000466 VGP_PUSHCC(VgpRun);
sewardje663cb92002-04-12 10:26:32 +0000467 VG_(load_thread_state) ( tid );
468 if (__builtin_setjmp(VG_(scheduler_jmpbuf)) == 0) {
469 /* try this ... */
470 trc = VG_(run_innerloop)();
471 /* We get here if the client didn't take a fault. */
472 } else {
473 /* We get here if the client took a fault, which caused our
474 signal handler to longjmp. */
475 vg_assert(trc == 0);
476 trc = VG_TRC_UNRESUMABLE_SIGNAL;
477 }
478 VG_(save_thread_state) ( tid );
sewardj671ff542002-05-07 09:25:30 +0000479 VGP_POPCC;
sewardje663cb92002-04-12 10:26:32 +0000480 return trc;
481}
482
483
484/* Increment the LRU epoch counter. */
485static
486void increment_epoch ( void )
487{
488 VG_(current_epoch)++;
489 if (VG_(clo_verbosity) > 2) {
490 UInt tt_used, tc_used;
491 VG_(get_tt_tc_used) ( &tt_used, &tc_used );
492 VG_(message)(Vg_UserMsg,
493 "%lu bbs, in: %d (%d -> %d), out %d (%d -> %d), TT %d, TC %d",
494 VG_(bbs_done),
495 VG_(this_epoch_in_count),
496 VG_(this_epoch_in_osize),
497 VG_(this_epoch_in_tsize),
498 VG_(this_epoch_out_count),
499 VG_(this_epoch_out_osize),
500 VG_(this_epoch_out_tsize),
501 tt_used, tc_used
502 );
503 }
504 VG_(this_epoch_in_count) = 0;
505 VG_(this_epoch_in_osize) = 0;
506 VG_(this_epoch_in_tsize) = 0;
507 VG_(this_epoch_out_count) = 0;
508 VG_(this_epoch_out_osize) = 0;
509 VG_(this_epoch_out_tsize) = 0;
510}
511
512
sewardj20917d82002-05-28 01:36:45 +0000513static
514void mostly_clear_thread_record ( ThreadId tid )
515{
516 Int j;
517 vg_assert(tid >= 0 && tid < VG_N_THREADS);
518 VG_(threads)[tid].tid = tid;
519 VG_(threads)[tid].status = VgTs_Empty;
520 VG_(threads)[tid].associated_mx = NULL;
521 VG_(threads)[tid].associated_cv = NULL;
522 VG_(threads)[tid].awaken_at = 0;
523 VG_(threads)[tid].joinee_retval = NULL;
524 VG_(threads)[tid].joiner_thread_return = NULL;
525 VG_(threads)[tid].joiner_jee_tid = VG_INVALID_THREADID;
sewardj8ad94e12002-05-29 00:10:20 +0000526 VG_(threads)[tid].detached = False;
sewardj20917d82002-05-28 01:36:45 +0000527 VG_(threads)[tid].cancel_st = True; /* PTHREAD_CANCEL_ENABLE */
528 VG_(threads)[tid].cancel_ty = True; /* PTHREAD_CANCEL_DEFERRED */
529 VG_(threads)[tid].cancel_pend = NULL; /* not pending */
sewardj8ad94e12002-05-29 00:10:20 +0000530 VG_(threads)[tid].custack_used = 0;
sewardj20917d82002-05-28 01:36:45 +0000531 VG_(ksigemptyset)(&VG_(threads)[tid].sig_mask);
532 VG_(ksigemptyset)(&VG_(threads)[tid].sigs_waited_for);
533 for (j = 0; j < VG_N_THREAD_KEYS; j++)
534 VG_(threads)[tid].specifics[j] = NULL;
535}
536
537
sewardje663cb92002-04-12 10:26:32 +0000538/* Initialise the scheduler. Create a single "main" thread ready to
sewardj6072c362002-04-19 14:40:57 +0000539 run, with special ThreadId of one. This is called at startup; the
sewardje663cb92002-04-12 10:26:32 +0000540 caller takes care to park the client's state is parked in
541 VG_(baseBlock).
542*/
543void VG_(scheduler_init) ( void )
544{
545 Int i;
546 Addr startup_esp;
547 ThreadId tid_main;
548
549 startup_esp = VG_(baseBlock)[VGOFF_(m_esp)];
sewardja1679dd2002-05-10 22:31:40 +0000550
551 if (VG_STACK_MATCHES_BASE(startup_esp, VG_STARTUP_STACK_BASE_1)
552 || VG_STACK_MATCHES_BASE(startup_esp, VG_STARTUP_STACK_BASE_2)) {
553 /* Jolly good! */
554 } else {
555 VG_(printf)("%%esp at startup = %p is not near %p or %p; aborting\n",
556 (void*)startup_esp,
557 (void*)VG_STARTUP_STACK_BASE_1,
558 (void*)VG_STARTUP_STACK_BASE_2 );
sewardje663cb92002-04-12 10:26:32 +0000559 VG_(panic)("unexpected %esp at startup");
560 }
561
sewardj6072c362002-04-19 14:40:57 +0000562 for (i = 0 /* NB; not 1 */; i < VG_N_THREADS; i++) {
sewardj20917d82002-05-28 01:36:45 +0000563 mostly_clear_thread_record(i);
564 VG_(threads)[i].stack_size = 0;
565 VG_(threads)[i].stack_base = (Addr)NULL;
566 VG_(threads)[i].stack_highest_word = (Addr)NULL;
sewardje663cb92002-04-12 10:26:32 +0000567 }
568
569 for (i = 0; i < VG_N_WAITING_FDS; i++)
570 vg_waiting_fds[i].fd = -1; /* not in use */
571
sewardj5f07b662002-04-23 16:52:51 +0000572 for (i = 0; i < VG_N_THREAD_KEYS; i++) {
573 vg_thread_keys[i].inuse = False;
574 vg_thread_keys[i].destructor = NULL;
575 }
576
sewardje663cb92002-04-12 10:26:32 +0000577 /* Assert this is thread zero, which has certain magic
578 properties. */
579 tid_main = vg_alloc_ThreadState();
sewardj6072c362002-04-19 14:40:57 +0000580 vg_assert(tid_main == 1);
sewardj20917d82002-05-28 01:36:45 +0000581 VG_(threads)[tid_main].status = VgTs_Runnable;
sewardje663cb92002-04-12 10:26:32 +0000582
583 /* Copy VG_(baseBlock) state to tid_main's slot. */
sewardj1e8cdc92002-04-18 11:37:52 +0000584 vg_tid_currently_in_baseBlock = tid_main;
sewardje663cb92002-04-12 10:26:32 +0000585 VG_(save_thread_state) ( tid_main );
sewardj1e8cdc92002-04-18 11:37:52 +0000586
sewardj018f7622002-05-15 21:13:39 +0000587 VG_(threads)[tid_main].stack_highest_word
588 = VG_(threads)[tid_main].m_esp /* -4 ??? */;
sewardjbf290b92002-05-01 02:28:01 +0000589
sewardj1e8cdc92002-04-18 11:37:52 +0000590 /* So now ... */
591 vg_assert(vg_tid_currently_in_baseBlock == VG_INVALID_THREADID);
sewardje663cb92002-04-12 10:26:32 +0000592}
593
594
595/* What if fd isn't a valid fd? */
596static
597void set_fd_nonblocking ( Int fd )
598{
599 Int res = VG_(fcntl)( fd, VKI_F_GETFL, 0 );
600 vg_assert(!VG_(is_kerror)(res));
601 res |= VKI_O_NONBLOCK;
602 res = VG_(fcntl)( fd, VKI_F_SETFL, res );
603 vg_assert(!VG_(is_kerror)(res));
604}
605
606static
607void set_fd_blocking ( Int fd )
608{
609 Int res = VG_(fcntl)( fd, VKI_F_GETFL, 0 );
610 vg_assert(!VG_(is_kerror)(res));
611 res &= ~VKI_O_NONBLOCK;
612 res = VG_(fcntl)( fd, VKI_F_SETFL, res );
613 vg_assert(!VG_(is_kerror)(res));
614}
615
616static
617Bool fd_is_blockful ( Int fd )
618{
619 Int res = VG_(fcntl)( fd, VKI_F_GETFL, 0 );
620 vg_assert(!VG_(is_kerror)(res));
621 return (res & VKI_O_NONBLOCK) ? False : True;
622}
623
sewardj3947e622002-05-23 16:52:11 +0000624static
625Bool fd_is_valid ( Int fd )
626{
627 Int res = VG_(fcntl)( fd, VKI_F_GETFL, 0 );
628 return VG_(is_kerror)(res) ? False : True;
629}
630
sewardje663cb92002-04-12 10:26:32 +0000631
632
sewardjd7fd4d22002-04-24 01:57:27 +0000633/* Possibly do a for tid. Return values are:
sewardje663cb92002-04-12 10:26:32 +0000634
sewardjd7fd4d22002-04-24 01:57:27 +0000635 True = request done. Thread may or may not be still runnable;
636 caller must check. If it is still runnable, the result will be in
637 the thread's %EDX as expected.
638
639 False = request not done. A more capable but slower mechanism will
640 deal with it.
sewardje663cb92002-04-12 10:26:32 +0000641*/
sewardjd7fd4d22002-04-24 01:57:27 +0000642static
sewardje663cb92002-04-12 10:26:32 +0000643Bool maybe_do_trivial_clientreq ( ThreadId tid )
644{
645# define SIMPLE_RETURN(vvv) \
sewardj8c824512002-04-14 04:16:48 +0000646 { tst->m_edx = (vvv); \
sewardjc3bd5f52002-05-01 03:24:23 +0000647 tst->sh_edx = VGM_WORD_VALID; \
sewardje663cb92002-04-12 10:26:32 +0000648 return True; \
649 }
650
sewardj018f7622002-05-15 21:13:39 +0000651 ThreadState* tst = &VG_(threads)[tid];
sewardj8c824512002-04-14 04:16:48 +0000652 UInt* arg = (UInt*)(tst->m_eax);
653 UInt req_no = arg[0];
654
sewardj8ccc2be2002-05-10 20:26:37 +0000655 /* VG_(printf)("req no = 0x%x\n", req_no); */
sewardje663cb92002-04-12 10:26:32 +0000656 switch (req_no) {
657 case VG_USERREQ__MALLOC:
658 SIMPLE_RETURN(
sewardj8c824512002-04-14 04:16:48 +0000659 (UInt)VG_(client_malloc) ( tst, arg[1], Vg_AllocMalloc )
sewardje663cb92002-04-12 10:26:32 +0000660 );
661 case VG_USERREQ__BUILTIN_NEW:
662 SIMPLE_RETURN(
sewardj8c824512002-04-14 04:16:48 +0000663 (UInt)VG_(client_malloc) ( tst, arg[1], Vg_AllocNew )
sewardje663cb92002-04-12 10:26:32 +0000664 );
665 case VG_USERREQ__BUILTIN_VEC_NEW:
666 SIMPLE_RETURN(
sewardj8c824512002-04-14 04:16:48 +0000667 (UInt)VG_(client_malloc) ( tst, arg[1], Vg_AllocNewVec )
sewardje663cb92002-04-12 10:26:32 +0000668 );
669 case VG_USERREQ__FREE:
sewardj8c824512002-04-14 04:16:48 +0000670 VG_(client_free) ( tst, (void*)arg[1], Vg_AllocMalloc );
sewardje663cb92002-04-12 10:26:32 +0000671 SIMPLE_RETURN(0); /* irrelevant */
672 case VG_USERREQ__BUILTIN_DELETE:
sewardj8c824512002-04-14 04:16:48 +0000673 VG_(client_free) ( tst, (void*)arg[1], Vg_AllocNew );
sewardje663cb92002-04-12 10:26:32 +0000674 SIMPLE_RETURN(0); /* irrelevant */
675 case VG_USERREQ__BUILTIN_VEC_DELETE:
sewardj8c824512002-04-14 04:16:48 +0000676 VG_(client_free) ( tst, (void*)arg[1], Vg_AllocNewVec );
sewardje663cb92002-04-12 10:26:32 +0000677 SIMPLE_RETURN(0); /* irrelevant */
678 case VG_USERREQ__CALLOC:
679 SIMPLE_RETURN(
sewardj8c824512002-04-14 04:16:48 +0000680 (UInt)VG_(client_calloc) ( tst, arg[1], arg[2] )
sewardje663cb92002-04-12 10:26:32 +0000681 );
682 case VG_USERREQ__REALLOC:
683 SIMPLE_RETURN(
sewardj8c824512002-04-14 04:16:48 +0000684 (UInt)VG_(client_realloc) ( tst, (void*)arg[1], arg[2] )
sewardje663cb92002-04-12 10:26:32 +0000685 );
686 case VG_USERREQ__MEMALIGN:
687 SIMPLE_RETURN(
sewardj8c824512002-04-14 04:16:48 +0000688 (UInt)VG_(client_memalign) ( tst, arg[1], arg[2] )
sewardje663cb92002-04-12 10:26:32 +0000689 );
sewardj9650c992002-04-16 03:44:31 +0000690
sewardj5f07b662002-04-23 16:52:51 +0000691 /* These are heavily used -- or at least we want them to be
692 cheap. */
sewardj9650c992002-04-16 03:44:31 +0000693 case VG_USERREQ__PTHREAD_GET_THREADID:
694 SIMPLE_RETURN(tid);
695 case VG_USERREQ__RUNNING_ON_VALGRIND:
696 SIMPLE_RETURN(1);
sewardj45b4b372002-04-16 22:50:32 +0000697 case VG_USERREQ__GET_PTHREAD_TRACE_LEVEL:
698 SIMPLE_RETURN(VG_(clo_trace_pthread_level));
sewardj5f07b662002-04-23 16:52:51 +0000699 case VG_USERREQ__READ_MILLISECOND_TIMER:
700 SIMPLE_RETURN(VG_(read_millisecond_timer)());
sewardj9650c992002-04-16 03:44:31 +0000701
sewardjd7fd4d22002-04-24 01:57:27 +0000702 /* This may make thread tid non-runnable, but the scheduler
703 checks for that on return from this function. */
704 case VG_USERREQ__PTHREAD_MUTEX_LOCK:
705 do_pthread_mutex_lock( tid, False, (void *)(arg[1]) );
706 return True;
707
sewardj14e03422002-04-24 19:51:31 +0000708 case VG_USERREQ__PTHREAD_MUTEX_TRYLOCK:
709 do_pthread_mutex_lock( tid, True, (void *)(arg[1]) );
710 return True;
711
sewardj8ad94e12002-05-29 00:10:20 +0000712 case VG_USERREQ__PTHREAD_MUTEX_UNLOCK:
713 do_pthread_mutex_unlock( tid, (void *)(arg[1]) );
714 return True;
715
sewardj51c0aaf2002-04-25 01:32:10 +0000716 case VG_USERREQ__PTHREAD_GETSPECIFIC:
717 do_pthread_getspecific ( tid, (UInt)(arg[1]) );
718 return True;
719
sewardj8ad94e12002-05-29 00:10:20 +0000720 case VG_USERREQ__SET_CANCELTYPE:
721 do__set_canceltype ( tid, arg[1] );
722 return True;
723
724 case VG_USERREQ__CLEANUP_PUSH:
725 do__cleanup_push ( tid, (CleanupEntry*)(arg[1]) );
726 return True;
727
728 case VG_USERREQ__CLEANUP_POP:
729 do__cleanup_pop ( tid, (CleanupEntry*)(arg[1]) );
730 return True;
731
sewardje663cb92002-04-12 10:26:32 +0000732 default:
733 /* Too hard; wimp out. */
734 return False;
735 }
736# undef SIMPLE_RETURN
737}
738
739
sewardj6072c362002-04-19 14:40:57 +0000740/* vthread tid is returning from a signal handler; modify its
741 stack/regs accordingly. */
sewardj1ffa8da2002-04-26 22:47:57 +0000742
743/* [Helper fn for handle_signal_return] tid, assumed to be in WaitFD
744 for read or write, has been interrupted by a signal. Find and
745 clear the relevant vg_waiting_fd[] entry. Most of the code in this
746 procedure is total paranoia, if you look closely. */
747static
748void cleanup_waiting_fd_table ( ThreadId tid )
749{
750 Int i, waiters;
751
sewardjb48e5002002-05-13 00:16:03 +0000752 vg_assert(VG_(is_valid_tid)(tid));
sewardj018f7622002-05-15 21:13:39 +0000753 vg_assert(VG_(threads)[tid].status == VgTs_WaitFD);
754 vg_assert(VG_(threads)[tid].m_eax == __NR_read
755 || VG_(threads)[tid].m_eax == __NR_write);
sewardj1ffa8da2002-04-26 22:47:57 +0000756
757 /* Excessively paranoidly ... find the fd this op was waiting
758 for, and mark it as not being waited on. */
759 waiters = 0;
760 for (i = 0; i < VG_N_WAITING_FDS; i++) {
761 if (vg_waiting_fds[i].tid == tid) {
762 waiters++;
sewardj018f7622002-05-15 21:13:39 +0000763 vg_assert(vg_waiting_fds[i].syscall_no == VG_(threads)[tid].m_eax);
sewardj1ffa8da2002-04-26 22:47:57 +0000764 }
765 }
766 vg_assert(waiters == 1);
767 for (i = 0; i < VG_N_WAITING_FDS; i++)
768 if (vg_waiting_fds[i].tid == tid)
769 break;
770 vg_assert(i < VG_N_WAITING_FDS);
771 vg_assert(vg_waiting_fds[i].fd != -1);
772 vg_waiting_fds[i].fd = -1; /* not in use */
773}
774
775
sewardj6072c362002-04-19 14:40:57 +0000776static
777void handle_signal_return ( ThreadId tid )
778{
779 Char msg_buf[100];
780 Bool restart_blocked_syscalls;
781
sewardjb48e5002002-05-13 00:16:03 +0000782 vg_assert(VG_(is_valid_tid)(tid));
sewardj6072c362002-04-19 14:40:57 +0000783
784 restart_blocked_syscalls = VG_(signal_returns)(tid);
785
786 if (restart_blocked_syscalls)
787 /* Easy; we don't have to do anything. */
788 return;
789
sewardj018f7622002-05-15 21:13:39 +0000790 if (VG_(threads)[tid].status == VgTs_WaitFD
791 && (VG_(threads)[tid].m_eax == __NR_read
792 || VG_(threads)[tid].m_eax == __NR_write)) {
sewardj6072c362002-04-19 14:40:57 +0000793 /* read() or write() interrupted. Force a return with EINTR. */
sewardj1ffa8da2002-04-26 22:47:57 +0000794 cleanup_waiting_fd_table(tid);
sewardj018f7622002-05-15 21:13:39 +0000795 VG_(threads)[tid].m_eax = -VKI_EINTR;
796 VG_(threads)[tid].status = VgTs_Runnable;
sewardj1ffa8da2002-04-26 22:47:57 +0000797
sewardj6072c362002-04-19 14:40:57 +0000798 if (VG_(clo_trace_sched)) {
799 VG_(sprintf)(msg_buf,
800 "read() / write() interrupted by signal; return EINTR" );
801 print_sched_event(tid, msg_buf);
802 }
803 return;
804 }
805
sewardj018f7622002-05-15 21:13:39 +0000806 if (VG_(threads)[tid].status == VgTs_WaitFD
807 && VG_(threads)[tid].m_eax == __NR_nanosleep) {
sewardj6072c362002-04-19 14:40:57 +0000808 /* We interrupted a nanosleep(). The right thing to do is to
809 write the unused time to nanosleep's second param and return
810 EINTR, but I'm too lazy for that. */
811 return;
812 }
813
sewardj018f7622002-05-15 21:13:39 +0000814 if (VG_(threads)[tid].status == VgTs_WaitFD) {
sewardj1ffa8da2002-04-26 22:47:57 +0000815 VG_(panic)("handle_signal_return: unknown interrupted syscall");
816 }
817
sewardj6072c362002-04-19 14:40:57 +0000818 /* All other cases? Just return. */
819}
820
821
sewardje663cb92002-04-12 10:26:32 +0000822static
823void sched_do_syscall ( ThreadId tid )
824{
825 UInt saved_eax;
826 UInt res, syscall_no;
827 UInt fd;
sewardje663cb92002-04-12 10:26:32 +0000828 Bool orig_fd_blockness;
829 Char msg_buf[100];
830
sewardjb48e5002002-05-13 00:16:03 +0000831 vg_assert(VG_(is_valid_tid)(tid));
sewardj018f7622002-05-15 21:13:39 +0000832 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +0000833
sewardj018f7622002-05-15 21:13:39 +0000834 syscall_no = VG_(threads)[tid].m_eax; /* syscall number */
sewardje663cb92002-04-12 10:26:32 +0000835
836 if (syscall_no == __NR_nanosleep) {
sewardj5f07b662002-04-23 16:52:51 +0000837 UInt t_now, t_awaken;
sewardje663cb92002-04-12 10:26:32 +0000838 struct vki_timespec* req;
sewardj018f7622002-05-15 21:13:39 +0000839 req = (struct vki_timespec*)VG_(threads)[tid].m_ebx; /* arg1 */
sewardj5f07b662002-04-23 16:52:51 +0000840 t_now = VG_(read_millisecond_timer)();
sewardje663cb92002-04-12 10:26:32 +0000841 t_awaken
842 = t_now
sewardj5f07b662002-04-23 16:52:51 +0000843 + (UInt)1000ULL * (UInt)(req->tv_sec)
844 + (UInt)(req->tv_nsec) / 1000000;
sewardj018f7622002-05-15 21:13:39 +0000845 VG_(threads)[tid].status = VgTs_Sleeping;
846 VG_(threads)[tid].awaken_at = t_awaken;
sewardj8937c812002-04-12 20:12:20 +0000847 if (VG_(clo_trace_sched)) {
sewardj5f07b662002-04-23 16:52:51 +0000848 VG_(sprintf)(msg_buf, "at %d: nanosleep for %d",
sewardje663cb92002-04-12 10:26:32 +0000849 t_now, t_awaken-t_now);
850 print_sched_event(tid, msg_buf);
851 }
852 /* Force the scheduler to run something else for a while. */
853 return;
854 }
855
sewardjaec22c02002-04-29 01:58:08 +0000856 if (syscall_no != __NR_read && syscall_no != __NR_write) {
sewardje663cb92002-04-12 10:26:32 +0000857 /* We think it's non-blocking. Just do it in the normal way. */
858 VG_(perform_assumed_nonblocking_syscall)(tid);
859 /* The thread is still runnable. */
860 return;
861 }
862
sewardje663cb92002-04-12 10:26:32 +0000863 /* Set the fd to nonblocking, and do the syscall, which will return
864 immediately, in order to lodge a request with the Linux kernel.
865 We later poll for I/O completion using select(). */
866
sewardj018f7622002-05-15 21:13:39 +0000867 fd = VG_(threads)[tid].m_ebx /* arg1 */;
sewardj3947e622002-05-23 16:52:11 +0000868
869 /* Deal with error case immediately. */
870 if (!fd_is_valid(fd)) {
871 VG_(message)(Vg_UserMsg,
872 "Warning: invalid file descriptor %d in syscall %s",
873 fd, syscall_no == __NR_read ? "read()" : "write()" );
874 VG_(check_known_blocking_syscall)(tid, syscall_no, NULL /* PRE */);
875 KERNEL_DO_SYSCALL(tid, res);
876 VG_(check_known_blocking_syscall)(tid, syscall_no, &res /* POST */);
877 /* We're still runnable. */
878 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
879 return;
880 }
881
882 /* From here onwards we know that fd is valid. */
883
sewardje663cb92002-04-12 10:26:32 +0000884 orig_fd_blockness = fd_is_blockful(fd);
885 set_fd_nonblocking(fd);
886 vg_assert(!fd_is_blockful(fd));
887 VG_(check_known_blocking_syscall)(tid, syscall_no, NULL /* PRE */);
888
889 /* This trashes the thread's %eax; we have to preserve it. */
sewardj018f7622002-05-15 21:13:39 +0000890 saved_eax = VG_(threads)[tid].m_eax;
sewardje663cb92002-04-12 10:26:32 +0000891 KERNEL_DO_SYSCALL(tid,res);
892
893 /* Restore original blockfulness of the fd. */
894 if (orig_fd_blockness)
895 set_fd_blocking(fd);
896 else
897 set_fd_nonblocking(fd);
898
sewardjaec22c02002-04-29 01:58:08 +0000899 if (res != -VKI_EWOULDBLOCK || !orig_fd_blockness) {
900 /* Finish off in the normal way. Don't restore %EAX, since that
901 now (correctly) holds the result of the call. We get here if either:
902 1. The call didn't block, or
903 2. The fd was already in nonblocking mode before we started to
904 mess with it. In this case, we're not expecting to handle
905 the I/O completion -- the client is. So don't file a
906 completion-wait entry.
907 */
sewardje663cb92002-04-12 10:26:32 +0000908 VG_(check_known_blocking_syscall)(tid, syscall_no, &res /* POST */);
909 /* We're still runnable. */
sewardj018f7622002-05-15 21:13:39 +0000910 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +0000911
912 } else {
913
sewardjaec22c02002-04-29 01:58:08 +0000914 vg_assert(res == -VKI_EWOULDBLOCK && orig_fd_blockness);
915
sewardje663cb92002-04-12 10:26:32 +0000916 /* It would have blocked. First, restore %EAX to what it was
917 before our speculative call. */
sewardj018f7622002-05-15 21:13:39 +0000918 VG_(threads)[tid].m_eax = saved_eax;
sewardje663cb92002-04-12 10:26:32 +0000919 /* Put this fd in a table of fds on which we are waiting for
920 completion. The arguments for select() later are constructed
921 from this table. */
922 add_waiting_fd(tid, fd, saved_eax /* which holds the syscall # */);
923 /* Deschedule thread until an I/O completion happens. */
sewardj018f7622002-05-15 21:13:39 +0000924 VG_(threads)[tid].status = VgTs_WaitFD;
sewardj8937c812002-04-12 20:12:20 +0000925 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +0000926 VG_(sprintf)(msg_buf,"block until I/O ready on fd %d", fd);
927 print_sched_event(tid, msg_buf);
928 }
929
930 }
931}
932
933
934/* Find out which of the fds in vg_waiting_fds are now ready to go, by
935 making enquiries with select(), and mark them as ready. We have to
936 wait for the requesting threads to fall into the the WaitFD state
937 before we can actually finally deliver the results, so this
938 procedure doesn't do that; complete_blocked_syscalls() does it.
939
940 It might seem odd that a thread which has done a blocking syscall
941 is not in WaitFD state; the way this can happen is if it initially
942 becomes WaitFD, but then a signal is delivered to it, so it becomes
943 Runnable for a while. In this case we have to wait for the
944 sighandler to return, whereupon the WaitFD state is resumed, and
945 only at that point can the I/O result be delivered to it. However,
946 this point may be long after the fd is actually ready.
947
948 So, poll_for_ready_fds() merely detects fds which are ready.
949 complete_blocked_syscalls() does the second half of the trick,
950 possibly much later: it delivers the results from ready fds to
951 threads in WaitFD state.
952*/
sewardj9a199dc2002-04-14 13:01:38 +0000953static
sewardje663cb92002-04-12 10:26:32 +0000954void poll_for_ready_fds ( void )
955{
956 vki_ksigset_t saved_procmask;
957 vki_fd_set readfds;
958 vki_fd_set writefds;
959 vki_fd_set exceptfds;
960 struct vki_timeval timeout;
961 Int fd, fd_max, i, n_ready, syscall_no, n_ok;
962 ThreadId tid;
963 Bool rd_ok, wr_ok, ex_ok;
964 Char msg_buf[100];
965
sewardje462e202002-04-13 04:09:07 +0000966 struct vki_timespec* rem;
sewardj5f07b662002-04-23 16:52:51 +0000967 UInt t_now;
sewardje462e202002-04-13 04:09:07 +0000968
sewardje663cb92002-04-12 10:26:32 +0000969 /* Awaken any sleeping threads whose sleep has expired. */
sewardj6072c362002-04-19 14:40:57 +0000970 for (tid = 1; tid < VG_N_THREADS; tid++)
sewardj018f7622002-05-15 21:13:39 +0000971 if (VG_(threads)[tid].status == VgTs_Sleeping)
sewardj853f55d2002-04-26 00:27:53 +0000972 break;
sewardj6072c362002-04-19 14:40:57 +0000973
sewardj5f07b662002-04-23 16:52:51 +0000974 /* Avoid pointless calls to VG_(read_millisecond_timer). */
sewardj6072c362002-04-19 14:40:57 +0000975 if (tid < VG_N_THREADS) {
sewardj5f07b662002-04-23 16:52:51 +0000976 t_now = VG_(read_millisecond_timer)();
sewardj6072c362002-04-19 14:40:57 +0000977 for (tid = 1; tid < VG_N_THREADS; tid++) {
sewardj018f7622002-05-15 21:13:39 +0000978 if (VG_(threads)[tid].status != VgTs_Sleeping)
sewardj6072c362002-04-19 14:40:57 +0000979 continue;
sewardj018f7622002-05-15 21:13:39 +0000980 if (t_now >= VG_(threads)[tid].awaken_at) {
sewardj6072c362002-04-19 14:40:57 +0000981 /* Resume this thread. Set to zero the remaining-time
982 (second) arg of nanosleep, since it's used up all its
983 time. */
sewardj018f7622002-05-15 21:13:39 +0000984 vg_assert(VG_(threads)[tid].m_eax == __NR_nanosleep);
985 rem = (struct vki_timespec *)VG_(threads)[tid].m_ecx; /* arg2 */
sewardj6072c362002-04-19 14:40:57 +0000986 if (rem != NULL) {
987 rem->tv_sec = 0;
988 rem->tv_nsec = 0;
989 }
990 /* Make the syscall return 0 (success). */
sewardj018f7622002-05-15 21:13:39 +0000991 VG_(threads)[tid].m_eax = 0;
sewardj6072c362002-04-19 14:40:57 +0000992 /* Reschedule this thread. */
sewardj018f7622002-05-15 21:13:39 +0000993 VG_(threads)[tid].status = VgTs_Runnable;
sewardj6072c362002-04-19 14:40:57 +0000994 if (VG_(clo_trace_sched)) {
sewardj5f07b662002-04-23 16:52:51 +0000995 VG_(sprintf)(msg_buf, "at %d: nanosleep done",
sewardj6072c362002-04-19 14:40:57 +0000996 t_now);
997 print_sched_event(tid, msg_buf);
998 }
sewardje663cb92002-04-12 10:26:32 +0000999 }
1000 }
1001 }
sewardje663cb92002-04-12 10:26:32 +00001002
sewardje462e202002-04-13 04:09:07 +00001003 /* And look for threads waiting on file descriptors which are now
1004 ready for I/O.*/
sewardje663cb92002-04-12 10:26:32 +00001005 timeout.tv_sec = 0;
1006 timeout.tv_usec = 0;
1007
1008 VKI_FD_ZERO(&readfds);
1009 VKI_FD_ZERO(&writefds);
1010 VKI_FD_ZERO(&exceptfds);
1011 fd_max = -1;
1012 for (i = 0; i < VG_N_WAITING_FDS; i++) {
1013 if (vg_waiting_fds[i].fd == -1 /* not in use */)
1014 continue;
1015 if (vg_waiting_fds[i].ready /* already ready? */)
1016 continue;
1017 fd = vg_waiting_fds[i].fd;
1018 /* VG_(printf)("adding QUERY for fd %d\n", fd); */
sewardje462e202002-04-13 04:09:07 +00001019 vg_assert(fd >= 0);
sewardje663cb92002-04-12 10:26:32 +00001020 if (fd > fd_max)
1021 fd_max = fd;
1022 tid = vg_waiting_fds[i].tid;
sewardjb48e5002002-05-13 00:16:03 +00001023 vg_assert(VG_(is_valid_tid)(tid));
sewardje663cb92002-04-12 10:26:32 +00001024 syscall_no = vg_waiting_fds[i].syscall_no;
1025 switch (syscall_no) {
sewardj3984b852002-05-12 03:00:17 +00001026 case __NR_read:
1027 /* In order to catch timeout events on fds which are
1028 readable and which have been ioctl(TCSETA)'d with a
1029 VTIMEout, we appear to need to ask if the fd is
1030 writable, for some reason. Ask me not why. Since this
1031 is strange and potentially troublesome we only do it if
1032 the user asks specially. */
sewardj8d365b52002-05-12 10:52:16 +00001033 if (VG_(strstr)(VG_(clo_weird_hacks), "ioctl-VTIME") != NULL)
sewardj3984b852002-05-12 03:00:17 +00001034 VKI_FD_SET(fd, &writefds);
sewardje663cb92002-04-12 10:26:32 +00001035 VKI_FD_SET(fd, &readfds); break;
1036 case __NR_write:
1037 VKI_FD_SET(fd, &writefds); break;
1038 default:
1039 VG_(panic)("poll_for_ready_fds: unexpected syscall");
1040 /*NOTREACHED*/
1041 break;
1042 }
1043 }
1044
sewardje462e202002-04-13 04:09:07 +00001045 /* Short cut: if no fds are waiting, give up now. */
1046 if (fd_max == -1)
1047 return;
1048
sewardje663cb92002-04-12 10:26:32 +00001049 /* BLOCK ALL SIGNALS. We don't want the complication of select()
1050 getting interrupted. */
1051 VG_(block_all_host_signals)( &saved_procmask );
1052
1053 n_ready = VG_(select)
1054 ( fd_max+1, &readfds, &writefds, &exceptfds, &timeout);
1055 if (VG_(is_kerror)(n_ready)) {
1056 VG_(printf)("poll_for_ready_fds: select returned %d\n", n_ready);
1057 VG_(panic)("poll_for_ready_fds: select failed?!");
1058 /*NOTREACHED*/
1059 }
1060
1061 /* UNBLOCK ALL SIGNALS */
sewardj018f7622002-05-15 21:13:39 +00001062 VG_(restore_all_host_signals)( &saved_procmask );
sewardje663cb92002-04-12 10:26:32 +00001063
1064 /* VG_(printf)("poll_for_io_completions: %d fs ready\n", n_ready); */
1065
1066 if (n_ready == 0)
1067 return;
1068
1069 /* Inspect all the fds we know about, and handle any completions that
1070 have happened. */
1071 /*
1072 VG_(printf)("\n\n");
1073 for (fd = 0; fd < 100; fd++)
1074 if (VKI_FD_ISSET(fd, &writefds) || VKI_FD_ISSET(fd, &readfds)) {
1075 VG_(printf)("X"); } else { VG_(printf)("."); };
1076 VG_(printf)("\n\nfd_max = %d\n", fd_max);
1077 */
1078
1079 for (fd = 0; fd <= fd_max; fd++) {
1080 rd_ok = VKI_FD_ISSET(fd, &readfds);
1081 wr_ok = VKI_FD_ISSET(fd, &writefds);
1082 ex_ok = VKI_FD_ISSET(fd, &exceptfds);
1083
1084 n_ok = (rd_ok ? 1 : 0) + (wr_ok ? 1 : 0) + (ex_ok ? 1 : 0);
1085 if (n_ok == 0)
1086 continue;
1087 if (n_ok > 1) {
1088 VG_(printf)("offending fd = %d\n", fd);
1089 VG_(panic)("poll_for_ready_fds: multiple events on fd");
1090 }
1091
1092 /* An I/O event completed for fd. Find the thread which
1093 requested this. */
1094 for (i = 0; i < VG_N_WAITING_FDS; i++) {
1095 if (vg_waiting_fds[i].fd == -1 /* not in use */)
1096 continue;
1097 if (vg_waiting_fds[i].fd == fd)
1098 break;
1099 }
1100
1101 /* And a bit more paranoia ... */
1102 vg_assert(i >= 0 && i < VG_N_WAITING_FDS);
1103
1104 /* Mark the fd as ready. */
1105 vg_assert(! vg_waiting_fds[i].ready);
1106 vg_waiting_fds[i].ready = True;
1107 }
1108}
1109
1110
1111/* See comment attached to poll_for_ready_fds() for explaination. */
sewardj9a199dc2002-04-14 13:01:38 +00001112static
sewardje663cb92002-04-12 10:26:32 +00001113void complete_blocked_syscalls ( void )
1114{
1115 Int fd, i, res, syscall_no;
1116 ThreadId tid;
1117 Char msg_buf[100];
1118
1119 /* Inspect all the outstanding fds we know about. */
1120
1121 for (i = 0; i < VG_N_WAITING_FDS; i++) {
1122 if (vg_waiting_fds[i].fd == -1 /* not in use */)
1123 continue;
1124 if (! vg_waiting_fds[i].ready)
1125 continue;
1126
1127 fd = vg_waiting_fds[i].fd;
1128 tid = vg_waiting_fds[i].tid;
sewardjb48e5002002-05-13 00:16:03 +00001129 vg_assert(VG_(is_valid_tid)(tid));
sewardje663cb92002-04-12 10:26:32 +00001130
1131 /* The thread actually has to be waiting for the I/O event it
1132 requested before we can deliver the result! */
sewardj018f7622002-05-15 21:13:39 +00001133 if (VG_(threads)[tid].status != VgTs_WaitFD)
sewardje663cb92002-04-12 10:26:32 +00001134 continue;
1135
1136 /* Ok, actually do it! We can safely use %EAX as the syscall
1137 number, because the speculative call made by
1138 sched_do_syscall() doesn't change %EAX in the case where the
1139 call would have blocked. */
1140
1141 syscall_no = vg_waiting_fds[i].syscall_no;
sewardj018f7622002-05-15 21:13:39 +00001142 vg_assert(syscall_no == VG_(threads)[tid].m_eax);
sewardje663cb92002-04-12 10:26:32 +00001143 KERNEL_DO_SYSCALL(tid,res);
1144 VG_(check_known_blocking_syscall)(tid, syscall_no, &res /* POST */);
1145
1146 /* Reschedule. */
sewardj018f7622002-05-15 21:13:39 +00001147 VG_(threads)[tid].status = VgTs_Runnable;
sewardje663cb92002-04-12 10:26:32 +00001148 /* Mark slot as no longer in use. */
1149 vg_waiting_fds[i].fd = -1;
1150 /* pp_sched_status(); */
sewardj8937c812002-04-12 20:12:20 +00001151 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +00001152 VG_(sprintf)(msg_buf,"resume due to I/O completion on fd %d", fd);
1153 print_sched_event(tid, msg_buf);
1154 }
1155 }
1156}
1157
1158
1159static
sewardj5f07b662002-04-23 16:52:51 +00001160void check_for_pthread_cond_timedwait ( void )
1161{
sewardj51c0aaf2002-04-25 01:32:10 +00001162 Int i, now;
sewardj5f07b662002-04-23 16:52:51 +00001163 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +00001164 if (VG_(threads)[i].status != VgTs_WaitCV)
sewardj5f07b662002-04-23 16:52:51 +00001165 continue;
sewardj018f7622002-05-15 21:13:39 +00001166 if (VG_(threads)[i].awaken_at == 0xFFFFFFFF /* no timeout */)
sewardj5f07b662002-04-23 16:52:51 +00001167 continue;
sewardj51c0aaf2002-04-25 01:32:10 +00001168 now = VG_(read_millisecond_timer)();
sewardj018f7622002-05-15 21:13:39 +00001169 if (now >= VG_(threads)[i].awaken_at) {
sewardj5f07b662002-04-23 16:52:51 +00001170 do_pthread_cond_timedwait_TIMEOUT(i);
sewardj51c0aaf2002-04-25 01:32:10 +00001171 }
sewardj5f07b662002-04-23 16:52:51 +00001172 }
1173}
1174
1175
1176static
sewardje663cb92002-04-12 10:26:32 +00001177void nanosleep_for_a_while ( void )
1178{
1179 Int res;
1180 struct vki_timespec req;
1181 struct vki_timespec rem;
1182 req.tv_sec = 0;
sewardj51c0aaf2002-04-25 01:32:10 +00001183 req.tv_nsec = 20 * 1000 * 1000;
sewardje663cb92002-04-12 10:26:32 +00001184 res = VG_(nanosleep)( &req, &rem );
sewardj5f07b662002-04-23 16:52:51 +00001185 vg_assert(res == 0 /* ok */ || res == 1 /* interrupted by signal */);
sewardje663cb92002-04-12 10:26:32 +00001186}
1187
1188
1189/* ---------------------------------------------------------------------
1190 The scheduler proper.
1191 ------------------------------------------------------------------ */
1192
1193/* Run user-space threads until either
1194 * Deadlock occurs
1195 * One thread asks to shutdown Valgrind
1196 * The specified number of basic blocks has gone by.
1197*/
1198VgSchedReturnCode VG_(scheduler) ( void )
1199{
1200 ThreadId tid, tid_next;
1201 UInt trc;
1202 UInt dispatch_ctr_SAVED;
sewardj51c0aaf2002-04-25 01:32:10 +00001203 Int request_code, done_this_time, n_in_bounded_wait;
sewardje663cb92002-04-12 10:26:32 +00001204 Char msg_buf[100];
1205 Addr trans_addr;
sewardj14e03422002-04-24 19:51:31 +00001206 Bool sigs_delivered;
sewardje663cb92002-04-12 10:26:32 +00001207
1208 /* For the LRU structures, records when the epoch began. */
1209 ULong lru_epoch_started_at = 0;
1210
1211 /* Start with the root thread. tid in general indicates the
1212 currently runnable/just-finished-running thread. */
sewardj7e87e382002-05-03 19:09:05 +00001213 VG_(last_run_tid) = tid = 1;
sewardje663cb92002-04-12 10:26:32 +00001214
1215 /* This is the top level scheduler loop. It falls into three
1216 phases. */
1217 while (True) {
1218
sewardj6072c362002-04-19 14:40:57 +00001219 /* ======================= Phase 0 of 3 =======================
1220 Be paranoid. Always a good idea. */
sewardjd7fd4d22002-04-24 01:57:27 +00001221 stage1:
sewardj6072c362002-04-19 14:40:57 +00001222 scheduler_sanity();
sewardj0c3b53f2002-05-01 01:58:35 +00001223 VG_(do_sanity_checks)( False );
sewardj6072c362002-04-19 14:40:57 +00001224
sewardje663cb92002-04-12 10:26:32 +00001225 /* ======================= Phase 1 of 3 =======================
1226 Handle I/O completions and signals. This may change the
1227 status of various threads. Then select a new thread to run,
1228 or declare deadlock, or sleep if there are no runnable
1229 threads but some are blocked on I/O. */
1230
1231 /* Age the LRU structures if an epoch has been completed. */
1232 if (VG_(bbs_done) - lru_epoch_started_at >= VG_BBS_PER_EPOCH) {
1233 lru_epoch_started_at = VG_(bbs_done);
1234 increment_epoch();
1235 }
1236
1237 /* Was a debug-stop requested? */
1238 if (VG_(bbs_to_go) == 0)
1239 goto debug_stop;
1240
1241 /* Do the following loop until a runnable thread is found, or
1242 deadlock is detected. */
1243 while (True) {
1244
1245 /* For stats purposes only. */
1246 VG_(num_scheduling_events_MAJOR) ++;
1247
1248 /* See if any I/O operations which we were waiting for have
1249 completed, and, if so, make runnable the relevant waiting
1250 threads. */
1251 poll_for_ready_fds();
1252 complete_blocked_syscalls();
sewardj5f07b662002-04-23 16:52:51 +00001253 check_for_pthread_cond_timedwait();
sewardje663cb92002-04-12 10:26:32 +00001254
1255 /* See if there are any signals which need to be delivered. If
1256 so, choose thread(s) to deliver them to, and build signal
1257 delivery frames on those thread(s) stacks. */
sewardj6072c362002-04-19 14:40:57 +00001258
1259 /* Be careful about delivering signals to a thread waiting
1260 for a mutex. In particular, when the handler is running,
1261 that thread is temporarily apparently-not-waiting for the
1262 mutex, so if it is unlocked by another thread whilst the
1263 handler is running, this thread is not informed. When the
1264 handler returns, the thread resumes waiting on the mutex,
1265 even if, as a result, it has missed the unlocking of it.
1266 Potential deadlock. This sounds all very strange, but the
1267 POSIX standard appears to require this behaviour. */
sewardjb48e5002002-05-13 00:16:03 +00001268 sigs_delivered = VG_(deliver_signals)();
sewardj14e03422002-04-24 19:51:31 +00001269 if (sigs_delivered)
sewardj0c3b53f2002-05-01 01:58:35 +00001270 VG_(do_sanity_checks)( False );
sewardje663cb92002-04-12 10:26:32 +00001271
1272 /* Try and find a thread (tid) to run. */
1273 tid_next = tid;
sewardj51c0aaf2002-04-25 01:32:10 +00001274 n_in_bounded_wait = 0;
sewardje663cb92002-04-12 10:26:32 +00001275 while (True) {
1276 tid_next++;
sewardj6072c362002-04-19 14:40:57 +00001277 if (tid_next >= VG_N_THREADS) tid_next = 1;
sewardj018f7622002-05-15 21:13:39 +00001278 if (VG_(threads)[tid_next].status == VgTs_WaitFD
1279 || VG_(threads)[tid_next].status == VgTs_Sleeping
1280 || VG_(threads)[tid_next].status == VgTs_WaitSIG
1281 || (VG_(threads)[tid_next].status == VgTs_WaitCV
1282 && VG_(threads)[tid_next].awaken_at != 0xFFFFFFFF))
sewardj51c0aaf2002-04-25 01:32:10 +00001283 n_in_bounded_wait ++;
sewardj018f7622002-05-15 21:13:39 +00001284 if (VG_(threads)[tid_next].status == VgTs_Runnable)
sewardje663cb92002-04-12 10:26:32 +00001285 break; /* We can run this one. */
1286 if (tid_next == tid)
1287 break; /* been all the way round */
1288 }
1289 tid = tid_next;
1290
sewardj018f7622002-05-15 21:13:39 +00001291 if (VG_(threads)[tid].status == VgTs_Runnable) {
sewardje663cb92002-04-12 10:26:32 +00001292 /* Found a suitable candidate. Fall out of this loop, so
1293 we can advance to stage 2 of the scheduler: actually
1294 running the thread. */
1295 break;
1296 }
1297
1298 /* We didn't find a runnable thread. Now what? */
sewardj51c0aaf2002-04-25 01:32:10 +00001299 if (n_in_bounded_wait == 0) {
sewardj54cacf02002-04-12 23:24:59 +00001300 /* No runnable threads and no prospect of any appearing
1301 even if we wait for an arbitrary length of time. In
1302 short, we have a deadlock. */
sewardj15a43e12002-04-17 19:35:12 +00001303 VG_(pp_sched_status)();
sewardje663cb92002-04-12 10:26:32 +00001304 return VgSrc_Deadlock;
1305 }
1306
1307 /* At least one thread is in a fd-wait state. Delay for a
1308 while, and go round again, in the hope that eventually a
1309 thread becomes runnable. */
1310 nanosleep_for_a_while();
sewardj7e87e382002-05-03 19:09:05 +00001311 /* pp_sched_status(); */
sewardjb48e5002002-05-13 00:16:03 +00001312 /* VG_(printf)("."); */
sewardje663cb92002-04-12 10:26:32 +00001313 }
1314
1315
1316 /* ======================= Phase 2 of 3 =======================
1317 Wahey! We've finally decided that thread tid is runnable, so
1318 we now do that. Run it for as much of a quanta as possible.
1319 Trivial requests are handled and the thread continues. The
1320 aim is not to do too many of Phase 1 since it is expensive. */
1321
1322 if (0)
sewardj3b5d8862002-04-20 13:53:23 +00001323 VG_(printf)("SCHED: tid %d\n", tid);
sewardje663cb92002-04-12 10:26:32 +00001324
1325 /* Figure out how many bbs to ask vg_run_innerloop to do. Note
1326 that it decrements the counter before testing it for zero, so
1327 that if VG_(dispatch_ctr) is set to N you get at most N-1
1328 iterations. Also this means that VG_(dispatch_ctr) must
1329 exceed zero before entering the innerloop. Also also, the
1330 decrement is done before the bb is actually run, so you
1331 always get at least one decrement even if nothing happens.
1332 */
1333 if (VG_(bbs_to_go) >= VG_SCHEDULING_QUANTUM)
1334 VG_(dispatch_ctr) = VG_SCHEDULING_QUANTUM + 1;
1335 else
1336 VG_(dispatch_ctr) = (UInt)VG_(bbs_to_go) + 1;
1337
1338 /* ... and remember what we asked for. */
1339 dispatch_ctr_SAVED = VG_(dispatch_ctr);
1340
sewardj1e8cdc92002-04-18 11:37:52 +00001341 /* paranoia ... */
sewardj018f7622002-05-15 21:13:39 +00001342 vg_assert(VG_(threads)[tid].tid == tid);
sewardj1e8cdc92002-04-18 11:37:52 +00001343
sewardje663cb92002-04-12 10:26:32 +00001344 /* Actually run thread tid. */
1345 while (True) {
1346
sewardj7e87e382002-05-03 19:09:05 +00001347 VG_(last_run_tid) = tid;
1348
sewardje663cb92002-04-12 10:26:32 +00001349 /* For stats purposes only. */
1350 VG_(num_scheduling_events_MINOR) ++;
1351
1352 if (0)
1353 VG_(message)(Vg_DebugMsg, "thread %d: running for %d bbs",
1354 tid, VG_(dispatch_ctr) - 1 );
sewardjb3eef6b2002-05-01 00:05:27 +00001355# if 0
1356 if (VG_(bbs_done) > 31700000 + 0) {
1357 dispatch_ctr_SAVED = VG_(dispatch_ctr) = 2;
sewardj018f7622002-05-15 21:13:39 +00001358 VG_(translate)(&VG_(threads)[tid], VG_(threads)[tid].m_eip,
sewardjb3eef6b2002-05-01 00:05:27 +00001359 NULL,NULL,NULL);
1360 }
sewardj018f7622002-05-15 21:13:39 +00001361 vg_assert(VG_(threads)[tid].m_eip != 0);
sewardjb3eef6b2002-05-01 00:05:27 +00001362# endif
sewardje663cb92002-04-12 10:26:32 +00001363
1364 trc = run_thread_for_a_while ( tid );
1365
sewardjb3eef6b2002-05-01 00:05:27 +00001366# if 0
sewardj018f7622002-05-15 21:13:39 +00001367 if (0 == VG_(threads)[tid].m_eip) {
sewardjb3eef6b2002-05-01 00:05:27 +00001368 VG_(printf)("tid = %d, dc = %llu\n", tid, VG_(bbs_done));
sewardj018f7622002-05-15 21:13:39 +00001369 vg_assert(0 != VG_(threads)[tid].m_eip);
sewardjb3eef6b2002-05-01 00:05:27 +00001370 }
1371# endif
1372
sewardje663cb92002-04-12 10:26:32 +00001373 /* Deal quickly with trivial scheduling events, and resume the
1374 thread. */
1375
1376 if (trc == VG_TRC_INNER_FASTMISS) {
1377 vg_assert(VG_(dispatch_ctr) > 0);
1378
1379 /* Trivial event. Miss in the fast-cache. Do a full
1380 lookup for it. */
1381 trans_addr
sewardj018f7622002-05-15 21:13:39 +00001382 = VG_(search_transtab) ( VG_(threads)[tid].m_eip );
sewardje663cb92002-04-12 10:26:32 +00001383 if (trans_addr == (Addr)0) {
1384 /* Not found; we need to request a translation. */
sewardj018f7622002-05-15 21:13:39 +00001385 create_translation_for( tid, VG_(threads)[tid].m_eip );
1386 trans_addr = VG_(search_transtab) ( VG_(threads)[tid].m_eip );
sewardje663cb92002-04-12 10:26:32 +00001387 if (trans_addr == (Addr)0)
1388 VG_(panic)("VG_TRC_INNER_FASTMISS: missing tt_fast entry");
1389 }
1390 continue; /* with this thread */
1391 }
1392
1393 if (trc == VG_TRC_EBP_JMP_CLIENTREQ) {
sewardj8ccc2be2002-05-10 20:26:37 +00001394 Bool done;
1395 /* VG_(printf)("request 0x%x\n",
sewardj018f7622002-05-15 21:13:39 +00001396 *(UInt*)(VG_(threads)[tid].m_eax)); */
sewardj8ccc2be2002-05-10 20:26:37 +00001397 done = maybe_do_trivial_clientreq(tid);
sewardjd7fd4d22002-04-24 01:57:27 +00001398 if (done) {
1399 /* The request is done. We try and continue with the
1400 same thread if still runnable. If not, go back to
1401 Stage 1 to select a new thread to run. */
sewardj018f7622002-05-15 21:13:39 +00001402 if (VG_(threads)[tid].status == VgTs_Runnable)
sewardjd7fd4d22002-04-24 01:57:27 +00001403 continue; /* with this thread */
1404 else
1405 goto stage1;
sewardje663cb92002-04-12 10:26:32 +00001406 }
1407 }
1408
sewardj51c0aaf2002-04-25 01:32:10 +00001409 if (trc == VG_TRC_EBP_JMP_SYSCALL) {
1410 /* Do a syscall for the vthread tid. This could cause it
sewardj7e87e382002-05-03 19:09:05 +00001411 to become non-runnable. One special case: spot the
1412 client doing calls to exit() and take this as the cue
1413 to exit. */
sewardjb3eef6b2002-05-01 00:05:27 +00001414# if 0
1415 { UInt* esp; Int i;
sewardj018f7622002-05-15 21:13:39 +00001416 esp=(UInt*)VG_(threads)[tid].m_esp;
sewardjb3eef6b2002-05-01 00:05:27 +00001417 VG_(printf)("\nBEFORE\n");
1418 for (i = 10; i >= -10; i--)
1419 VG_(printf)("%2d %p = 0x%x\n", i, &esp[i], esp[i]);
1420 }
1421# endif
1422
sewardj83798bf2002-05-24 00:11:16 +00001423 /* Is the client exiting for good? */
sewardj018f7622002-05-15 21:13:39 +00001424 if (VG_(threads)[tid].m_eax == __NR_exit)
sewardj7e87e382002-05-03 19:09:05 +00001425 return VgSrc_ExitSyscall;
1426
sewardj83798bf2002-05-24 00:11:16 +00001427 /* Trap syscalls to __NR_sched_yield and just have this
1428 thread yield instead. Not essential, just an
1429 optimisation. */
1430 if (VG_(threads)[tid].m_eax == __NR_sched_yield) {
1431 SET_EAX(tid, 0); /* syscall returns with success */
1432 goto stage1; /* find a new thread to run */
1433 }
1434
sewardj51c0aaf2002-04-25 01:32:10 +00001435 sched_do_syscall(tid);
sewardjb3eef6b2002-05-01 00:05:27 +00001436
1437# if 0
1438 { UInt* esp; Int i;
sewardj018f7622002-05-15 21:13:39 +00001439 esp=(UInt*)VG_(threads)[tid].m_esp;
sewardjb3eef6b2002-05-01 00:05:27 +00001440 VG_(printf)("AFTER\n");
1441 for (i = 10; i >= -10; i--)
1442 VG_(printf)("%2d %p = 0x%x\n", i, &esp[i], esp[i]);
1443 }
1444# endif
1445
sewardj018f7622002-05-15 21:13:39 +00001446 if (VG_(threads)[tid].status == VgTs_Runnable)
sewardj51c0aaf2002-04-25 01:32:10 +00001447 continue; /* with this thread */
1448 else
1449 goto stage1;
1450 }
1451
sewardjd7fd4d22002-04-24 01:57:27 +00001452 /* It's an event we can't quickly deal with. Give up running
1453 this thread and handle things the expensive way. */
sewardje663cb92002-04-12 10:26:32 +00001454 break;
1455 }
1456
1457 /* ======================= Phase 3 of 3 =======================
1458 Handle non-trivial thread requests, mostly pthread stuff. */
1459
1460 /* Ok, we've fallen out of the dispatcher for a
1461 non-completely-trivial reason. First, update basic-block
1462 counters. */
1463
1464 done_this_time = (Int)dispatch_ctr_SAVED - (Int)VG_(dispatch_ctr) - 1;
1465 vg_assert(done_this_time >= 0);
1466 VG_(bbs_to_go) -= (ULong)done_this_time;
1467 VG_(bbs_done) += (ULong)done_this_time;
1468
1469 if (0 && trc != VG_TRC_INNER_FASTMISS)
1470 VG_(message)(Vg_DebugMsg, "thread %d: completed %d bbs, trc %d",
1471 tid, done_this_time, (Int)trc );
1472
1473 if (0 && trc != VG_TRC_INNER_FASTMISS)
1474 VG_(message)(Vg_DebugMsg, "thread %d: %ld bbs, event %s",
1475 tid, VG_(bbs_done),
1476 name_of_sched_event(trc) );
sewardj9d1b5d32002-04-17 19:40:49 +00001477
sewardje663cb92002-04-12 10:26:32 +00001478 /* Examine the thread's return code to figure out why it
1479 stopped, and handle requests. */
1480
1481 switch (trc) {
1482
1483 case VG_TRC_INNER_FASTMISS:
1484 VG_(panic)("VG_(scheduler): VG_TRC_INNER_FASTMISS");
1485 /*NOTREACHED*/
1486 break;
1487
1488 case VG_TRC_INNER_COUNTERZERO:
1489 /* Timeslice is out. Let a new thread be scheduled,
1490 simply by doing nothing, causing us to arrive back at
1491 Phase 1. */
1492 if (VG_(bbs_to_go) == 0) {
1493 goto debug_stop;
1494 }
1495 vg_assert(VG_(dispatch_ctr) == 0);
1496 break;
1497
1498 case VG_TRC_UNRESUMABLE_SIGNAL:
1499 /* It got a SIGSEGV/SIGBUS, which we need to deliver right
1500 away. Again, do nothing, so we wind up back at Phase
1501 1, whereupon the signal will be "delivered". */
1502 break;
1503
sewardje663cb92002-04-12 10:26:32 +00001504 case VG_TRC_EBP_JMP_CLIENTREQ:
1505 /* Do a client request for the vthread tid. Note that
1506 some requests will have been handled by
1507 maybe_do_trivial_clientreq(), so we don't expect to see
1508 those here.
1509 */
sewardj54cacf02002-04-12 23:24:59 +00001510 /* The thread's %EAX points at an arg block, the first
1511 word of which is the request code. */
sewardj018f7622002-05-15 21:13:39 +00001512 request_code = ((UInt*)(VG_(threads)[tid].m_eax))[0];
sewardje663cb92002-04-12 10:26:32 +00001513 if (0) {
sewardj54cacf02002-04-12 23:24:59 +00001514 VG_(sprintf)(msg_buf, "request 0x%x", request_code );
sewardje663cb92002-04-12 10:26:32 +00001515 print_sched_event(tid, msg_buf);
1516 }
1517 /* Do a non-trivial client request for thread tid. tid's
1518 %EAX points to a short vector of argument words, the
1519 first of which is the request code. The result of the
1520 request is put in tid's %EDX. Alternatively, perhaps
1521 the request causes tid to become non-runnable and/or
1522 other blocked threads become runnable. In general we
1523 can and often do mess with the state of arbitrary
1524 threads at this point. */
sewardj7e87e382002-05-03 19:09:05 +00001525 do_nontrivial_clientreq(tid);
sewardje663cb92002-04-12 10:26:32 +00001526 break;
1527
1528 default:
1529 VG_(printf)("\ntrc = %d\n", trc);
1530 VG_(panic)("VG_(scheduler), phase 3: "
1531 "unexpected thread return code");
1532 /* NOTREACHED */
1533 break;
1534
1535 } /* switch (trc) */
1536
1537 /* That completes Phase 3 of 3. Return now to the top of the
1538 main scheduler loop, to Phase 1 of 3. */
1539
1540 } /* top-level scheduler loop */
1541
1542
1543 /* NOTREACHED */
1544 VG_(panic)("scheduler: post-main-loop ?!");
1545 /* NOTREACHED */
1546
1547 debug_stop:
1548 /* If we exited because of a debug stop, print the translation
1549 of the last block executed -- by translating it again, and
1550 throwing away the result. */
1551 VG_(printf)(
1552 "======vvvvvvvv====== LAST TRANSLATION ======vvvvvvvv======\n");
sewardj018f7622002-05-15 21:13:39 +00001553 VG_(translate)( &VG_(threads)[tid],
1554 VG_(threads)[tid].m_eip, NULL, NULL, NULL );
sewardje663cb92002-04-12 10:26:32 +00001555 VG_(printf)("\n");
1556 VG_(printf)(
1557 "======^^^^^^^^====== LAST TRANSLATION ======^^^^^^^^======\n");
1558
1559 return VgSrc_BbsDone;
1560}
1561
1562
1563/* ---------------------------------------------------------------------
1564 The pthread implementation.
1565 ------------------------------------------------------------------ */
1566
1567#include <pthread.h>
1568#include <errno.h>
1569
sewardjbf290b92002-05-01 02:28:01 +00001570#define VG_PTHREAD_STACK_MIN \
sewardjc3bd5f52002-05-01 03:24:23 +00001571 (VG_PTHREAD_STACK_SIZE - VG_AR_CLIENT_STACKBASE_REDZONE_SZB)
sewardje663cb92002-04-12 10:26:32 +00001572
1573/* /usr/include/bits/pthreadtypes.h:
1574 typedef unsigned long int pthread_t;
1575*/
1576
sewardje663cb92002-04-12 10:26:32 +00001577
sewardj604ec3c2002-04-18 22:38:41 +00001578/* -----------------------------------------------------------
sewardj20917d82002-05-28 01:36:45 +00001579 Thread CREATION, JOINAGE and CANCELLATION: HELPER FNS
sewardj604ec3c2002-04-18 22:38:41 +00001580 -------------------------------------------------------- */
1581
sewardj20917d82002-05-28 01:36:45 +00001582/* We've decided to action a cancellation on tid. Make it jump to
1583 thread_exit_wrapper() in vg_libpthread.c, passing PTHREAD_CANCELED
1584 as the arg. */
1585static
1586void make_thread_jump_to_cancelhdlr ( ThreadId tid )
1587{
1588 Char msg_buf[100];
1589 vg_assert(VG_(is_valid_tid)(tid));
1590 /* Push PTHREAD_CANCELED on the stack and jump to the cancellation
1591 handler -- which is really thread_exit_wrapper() in
1592 vg_libpthread.c. */
1593 vg_assert(VG_(threads)[tid].cancel_pend != NULL);
1594 VG_(threads)[tid].m_esp -= 4;
1595 * (UInt*)(VG_(threads)[tid].m_esp) = (UInt)PTHREAD_CANCELED;
1596 VG_(threads)[tid].m_eip = (UInt)VG_(threads)[tid].cancel_pend;
1597 VG_(threads)[tid].status = VgTs_Runnable;
1598 /* Make sure we aren't cancelled again whilst handling this
1599 cancellation. */
1600 VG_(threads)[tid].cancel_st = False;
1601 if (VG_(clo_trace_sched)) {
1602 VG_(sprintf)(msg_buf,
1603 "jump to cancellation handler (hdlr = %p)",
1604 VG_(threads)[tid].cancel_pend);
1605 print_sched_event(tid, msg_buf);
1606 }
1607}
1608
1609
1610
sewardjb48e5002002-05-13 00:16:03 +00001611/* Release resources and generally clean up once a thread has finally
1612 disappeared. */
1613static
1614void cleanup_after_thread_exited ( ThreadId tid )
1615{
sewardj3a951cf2002-05-15 22:25:47 +00001616 vki_ksigset_t irrelevant_sigmask;
sewardj018f7622002-05-15 21:13:39 +00001617 vg_assert(VG_(is_valid_or_empty_tid)(tid));
1618 vg_assert(VG_(threads)[tid].status == VgTs_Empty);
sewardjb48e5002002-05-13 00:16:03 +00001619 /* Mark its stack no-access */
1620 if (VG_(clo_instrument) && tid != 1)
sewardj018f7622002-05-15 21:13:39 +00001621 VGM_(make_noaccess)( VG_(threads)[tid].stack_base,
1622 VG_(threads)[tid].stack_size );
sewardjb48e5002002-05-13 00:16:03 +00001623 /* Forget about any pending signals directed specifically at this
sewardj018f7622002-05-15 21:13:39 +00001624 thread, and get rid of signal handlers specifically arranged for
1625 this thread. */
sewardj3a951cf2002-05-15 22:25:47 +00001626 VG_(block_all_host_signals)( &irrelevant_sigmask );
sewardj018f7622002-05-15 21:13:39 +00001627 VG_(handle_SCSS_change)( False /* lazy update */ );
sewardjb48e5002002-05-13 00:16:03 +00001628}
1629
1630
sewardj20917d82002-05-28 01:36:45 +00001631/* Look for matching pairs of threads waiting for joiners and threads
1632 waiting for joinees. For each such pair copy the return value of
1633 the joinee into the joiner, let the joiner resume and discard the
1634 joinee. */
1635static
1636void maybe_rendezvous_joiners_and_joinees ( void )
1637{
1638 Char msg_buf[100];
1639 void** thread_return;
1640 ThreadId jnr, jee;
1641
1642 for (jnr = 1; jnr < VG_N_THREADS; jnr++) {
1643 if (VG_(threads)[jnr].status != VgTs_WaitJoinee)
1644 continue;
1645 jee = VG_(threads)[jnr].joiner_jee_tid;
1646 if (jee == VG_INVALID_THREADID)
1647 continue;
1648 vg_assert(VG_(is_valid_tid)(jee));
1649 if (VG_(threads)[jee].status != VgTs_WaitJoiner)
1650 continue;
1651 /* ok! jnr is waiting to join with jee, and jee is waiting to be
1652 joined by ... well, any thread. So let's do it! */
1653
1654 /* Copy return value to where joiner wants it. */
1655 thread_return = VG_(threads)[jnr].joiner_thread_return;
1656 if (thread_return != NULL) {
1657 /* CHECK thread_return writable */
1658 *thread_return = VG_(threads)[jee].joinee_retval;
1659 /* Not really right, since it makes the thread's return value
1660 appear to be defined even if it isn't. */
1661 if (VG_(clo_instrument))
1662 VGM_(make_readable)( (Addr)thread_return, sizeof(void*) );
1663 }
1664
1665 /* Joinee is discarded */
1666 VG_(threads)[jee].status = VgTs_Empty; /* bye! */
1667 cleanup_after_thread_exited ( jee );
1668 if (VG_(clo_trace_sched)) {
1669 VG_(sprintf)(msg_buf,
1670 "rendezvous with joinee %d. %d resumes, %d exits.",
1671 jee, jnr, jee );
1672 print_sched_event(jnr, msg_buf);
1673 }
1674
1675 /* joiner returns with success */
1676 VG_(threads)[jnr].status = VgTs_Runnable;
1677 SET_EDX(jnr, 0);
1678 }
1679}
1680
1681
1682/* -----------------------------------------------------------
1683 Thread CREATION, JOINAGE and CANCELLATION: REQUESTS
1684 -------------------------------------------------------- */
1685
sewardje663cb92002-04-12 10:26:32 +00001686static
sewardj8ad94e12002-05-29 00:10:20 +00001687void do__cleanup_push ( ThreadId tid, CleanupEntry* cu )
1688{
1689 Int sp;
1690 Char msg_buf[100];
1691 vg_assert(VG_(is_valid_tid)(tid));
1692 sp = VG_(threads)[tid].custack_used;
1693 if (VG_(clo_trace_sched)) {
1694 VG_(sprintf)(msg_buf,
1695 "cleanup_push (fn %p, arg %p) -> slot %d",
1696 cu->fn, cu->arg, sp);
1697 print_sched_event(tid, msg_buf);
1698 }
1699 vg_assert(sp >= 0 && sp <= VG_N_CLEANUPSTACK);
1700 if (sp == VG_N_CLEANUPSTACK)
1701 VG_(panic)("do__cleanup_push: VG_N_CLEANUPSTACK is too small."
1702 " Increase and recompile.");
1703 VG_(threads)[tid].custack[sp] = *cu;
1704 sp++;
1705 VG_(threads)[tid].custack_used = sp;
1706 SET_EDX(tid, 0);
1707}
1708
1709
1710static
1711void do__cleanup_pop ( ThreadId tid, CleanupEntry* cu )
1712{
1713 Int sp;
1714 Char msg_buf[100];
1715 vg_assert(VG_(is_valid_tid)(tid));
1716 sp = VG_(threads)[tid].custack_used;
1717 if (VG_(clo_trace_sched)) {
1718 VG_(sprintf)(msg_buf,
1719 "cleanup_pop from slot %d", sp);
1720 print_sched_event(tid, msg_buf);
1721 }
1722 vg_assert(sp >= 0 && sp <= VG_N_CLEANUPSTACK);
1723 if (sp == 0) {
1724 SET_EDX(tid, -1);
1725 return;
1726 }
1727 sp--;
1728 *cu = VG_(threads)[tid].custack[sp];
1729 VG_(threads)[tid].custack_used = sp;
1730 SET_EDX(tid, 0);
1731}
1732
1733
1734static
sewardjff42d1d2002-05-22 13:17:31 +00001735void do_pthread_yield ( ThreadId tid )
1736{
1737 Char msg_buf[100];
1738 vg_assert(VG_(is_valid_tid)(tid));
sewardjff42d1d2002-05-22 13:17:31 +00001739 if (VG_(clo_trace_sched)) {
1740 VG_(sprintf)(msg_buf, "yield");
1741 print_sched_event(tid, msg_buf);
1742 }
1743 SET_EDX(tid, 0);
1744}
1745
1746
1747static
sewardj20917d82002-05-28 01:36:45 +00001748void do__testcancel ( ThreadId tid )
sewardje663cb92002-04-12 10:26:32 +00001749{
sewardj7989d0c2002-05-28 11:00:01 +00001750 Char msg_buf[100];
sewardjb48e5002002-05-13 00:16:03 +00001751 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001752 if (VG_(clo_trace_sched)) {
1753 VG_(sprintf)(msg_buf, "testcancel");
1754 print_sched_event(tid, msg_buf);
1755 }
sewardj20917d82002-05-28 01:36:45 +00001756 if (/* is there a cancellation pending on this thread? */
1757 VG_(threads)[tid].cancel_pend != NULL
1758 && /* is this thread accepting cancellations? */
1759 VG_(threads)[tid].cancel_st) {
1760 /* Ok, let's do the cancellation. */
1761 make_thread_jump_to_cancelhdlr ( tid );
sewardje663cb92002-04-12 10:26:32 +00001762 } else {
sewardj20917d82002-05-28 01:36:45 +00001763 /* No, we keep going. */
1764 SET_EDX(tid, 0);
sewardje663cb92002-04-12 10:26:32 +00001765 }
sewardje663cb92002-04-12 10:26:32 +00001766}
1767
1768
1769static
sewardj20917d82002-05-28 01:36:45 +00001770void do__set_cancelstate ( ThreadId tid, Int state )
1771{
1772 Bool old_st;
sewardj7989d0c2002-05-28 11:00:01 +00001773 Char msg_buf[100];
sewardj20917d82002-05-28 01:36:45 +00001774 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001775 if (VG_(clo_trace_sched)) {
1776 VG_(sprintf)(msg_buf, "set_cancelstate to %d (%s)", state,
1777 state==PTHREAD_CANCEL_ENABLE
1778 ? "ENABLE"
1779 : (state==PTHREAD_CANCEL_DISABLE ? "DISABLE" : "???"));
1780 print_sched_event(tid, msg_buf);
1781 }
sewardj20917d82002-05-28 01:36:45 +00001782 old_st = VG_(threads)[tid].cancel_st;
1783 if (state == PTHREAD_CANCEL_ENABLE) {
1784 VG_(threads)[tid].cancel_st = True;
1785 } else
1786 if (state == PTHREAD_CANCEL_DISABLE) {
1787 VG_(threads)[tid].cancel_st = False;
1788 } else {
1789 VG_(panic)("do__set_cancelstate");
1790 }
1791 SET_EDX(tid, old_st ? PTHREAD_CANCEL_ENABLE
1792 : PTHREAD_CANCEL_DISABLE);
1793}
1794
1795
1796static
1797void do__set_canceltype ( ThreadId tid, Int type )
1798{
1799 Bool old_ty;
sewardj7989d0c2002-05-28 11:00:01 +00001800 Char msg_buf[100];
sewardj20917d82002-05-28 01:36:45 +00001801 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001802 if (VG_(clo_trace_sched)) {
1803 VG_(sprintf)(msg_buf, "set_canceltype to %d (%s)", type,
1804 type==PTHREAD_CANCEL_ASYNCHRONOUS
1805 ? "ASYNCHRONOUS"
1806 : (type==PTHREAD_CANCEL_DEFERRED ? "DEFERRED" : "???"));
1807 print_sched_event(tid, msg_buf);
1808 }
sewardj20917d82002-05-28 01:36:45 +00001809 old_ty = VG_(threads)[tid].cancel_ty;
1810 if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
1811 VG_(threads)[tid].cancel_ty = False;
1812 } else
1813 if (type == PTHREAD_CANCEL_DEFERRED) {
1814 VG_(threads)[tid].cancel_st = True;
1815 } else {
1816 VG_(panic)("do__set_canceltype");
1817 }
1818 SET_EDX(tid, old_ty ? PTHREAD_CANCEL_DEFERRED
1819 : PTHREAD_CANCEL_ASYNCHRONOUS);
1820}
1821
1822
sewardj7989d0c2002-05-28 11:00:01 +00001823/* Set or get the detach state for thread det. */
sewardj20917d82002-05-28 01:36:45 +00001824static
sewardj7989d0c2002-05-28 11:00:01 +00001825void do__set_or_get_detach ( ThreadId tid,
1826 Int what, ThreadId det )
sewardj20917d82002-05-28 01:36:45 +00001827{
sewardj7989d0c2002-05-28 11:00:01 +00001828 ThreadId i;
1829 Char msg_buf[100];
1830 /* VG_(printf)("do__set_or_get_detach tid %d what %d det %d\n",
1831 tid, what, det); */
sewardj20917d82002-05-28 01:36:45 +00001832 vg_assert(VG_(is_valid_tid)(tid));
sewardj7989d0c2002-05-28 11:00:01 +00001833 if (VG_(clo_trace_sched)) {
1834 VG_(sprintf)(msg_buf, "set_or_get_detach %d (%s) for tid %d", what,
1835 what==0 ? "not-detached" : (
1836 what==1 ? "detached" : (
1837 what==2 ? "fetch old value" : "???")),
1838 det );
1839 print_sched_event(tid, msg_buf);
1840 }
1841
1842 if (!VG_(is_valid_tid)(det)) {
1843 SET_EDX(tid, -1);
1844 return;
1845 }
1846
sewardj20917d82002-05-28 01:36:45 +00001847 switch (what) {
1848 case 2: /* get */
sewardj7989d0c2002-05-28 11:00:01 +00001849 SET_EDX(tid, VG_(threads)[det].detached ? 1 : 0);
sewardj20917d82002-05-28 01:36:45 +00001850 return;
sewardj7989d0c2002-05-28 11:00:01 +00001851 case 1: /* set detached. If someone is in a join-wait for det,
1852 do not detach. */
1853 for (i = 1; i < VG_N_THREADS; i++) {
1854 if (VG_(threads)[i].status == VgTs_WaitJoinee
1855 && VG_(threads)[i].joiner_jee_tid == det) {
1856 SET_EDX(tid, 0);
1857 if (VG_(clo_trace_sched)) {
1858 VG_(sprintf)(msg_buf,
1859 "tid %d not detached because %d in join-wait for it %d",
1860 det, i);
1861 print_sched_event(tid, msg_buf);
1862 }
1863 return;
1864 }
1865 }
1866 VG_(threads)[det].detached = True;
sewardj20917d82002-05-28 01:36:45 +00001867 SET_EDX(tid, 0);
1868 return;
1869 case 0: /* set not detached */
sewardj7989d0c2002-05-28 11:00:01 +00001870 VG_(threads)[det].detached = False;
sewardj20917d82002-05-28 01:36:45 +00001871 SET_EDX(tid, 0);
1872 return;
1873 default:
1874 VG_(panic)("do__set_or_get_detach");
1875 }
1876}
1877
1878
1879static
1880void do__set_cancelpend ( ThreadId tid,
1881 ThreadId cee,
1882 void (*cancelpend_hdlr)(void*) )
sewardje663cb92002-04-12 10:26:32 +00001883{
1884 Char msg_buf[100];
1885
sewardj20917d82002-05-28 01:36:45 +00001886 vg_assert(VG_(is_valid_tid)(tid));
1887 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1888
sewardj7989d0c2002-05-28 11:00:01 +00001889 if (!VG_(is_valid_tid)(cee)) {
1890 if (VG_(clo_trace_sched)) {
1891 VG_(sprintf)(msg_buf,
1892 "set_cancelpend for invalid tid %d", cee);
1893 print_sched_event(tid, msg_buf);
1894 }
1895 SET_EDX(tid, -VKI_ESRCH);
1896 return;
1897 }
sewardj20917d82002-05-28 01:36:45 +00001898
1899 VG_(threads)[cee].cancel_pend = cancelpend_hdlr;
1900
1901 if (VG_(clo_trace_sched)) {
1902 VG_(sprintf)(msg_buf,
sewardj7989d0c2002-05-28 11:00:01 +00001903 "set_cancelpend (hdlr = %p, set by tid %d)",
sewardj20917d82002-05-28 01:36:45 +00001904 cancelpend_hdlr, tid);
1905 print_sched_event(cee, msg_buf);
1906 }
1907
1908 /* Thread doing the cancelling returns with success. */
1909 SET_EDX(tid, 0);
1910
1911 /* Perhaps we can nuke the cancellee right now? */
1912 do__testcancel(cee);
1913}
1914
1915
1916static
1917void do_pthread_join ( ThreadId tid,
1918 ThreadId jee, void** thread_return )
1919{
1920 Char msg_buf[100];
1921 ThreadId i;
sewardje663cb92002-04-12 10:26:32 +00001922 /* jee, the joinee, is the thread specified as an arg in thread
1923 tid's call to pthread_join. So tid is the join-er. */
sewardjb48e5002002-05-13 00:16:03 +00001924 vg_assert(VG_(is_valid_tid)(tid));
sewardj018f7622002-05-15 21:13:39 +00001925 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +00001926
1927 if (jee == tid) {
sewardjc3bd5f52002-05-01 03:24:23 +00001928 SET_EDX(tid, EDEADLK); /* libc constant, not a kernel one */
sewardj018f7622002-05-15 21:13:39 +00001929 VG_(threads)[tid].status = VgTs_Runnable;
sewardje663cb92002-04-12 10:26:32 +00001930 return;
1931 }
1932
sewardj20917d82002-05-28 01:36:45 +00001933 /* Flush any completed pairs, so as to make sure what we're looking
1934 at is up-to-date. */
1935 maybe_rendezvous_joiners_and_joinees();
1936
1937 /* Is this a sane request? */
sewardje663cb92002-04-12 10:26:32 +00001938 if (jee < 0
1939 || jee >= VG_N_THREADS
sewardj018f7622002-05-15 21:13:39 +00001940 || VG_(threads)[jee].status == VgTs_Empty) {
sewardje663cb92002-04-12 10:26:32 +00001941 /* Invalid thread to join to. */
sewardjc3bd5f52002-05-01 03:24:23 +00001942 SET_EDX(tid, EINVAL);
sewardj018f7622002-05-15 21:13:39 +00001943 VG_(threads)[tid].status = VgTs_Runnable;
sewardje663cb92002-04-12 10:26:32 +00001944 return;
1945 }
1946
sewardj20917d82002-05-28 01:36:45 +00001947 /* Is anyone else already in a join-wait for jee? */
1948 for (i = 1; i < VG_N_THREADS; i++) {
1949 if (i == tid) continue;
1950 if (VG_(threads)[i].status == VgTs_WaitJoinee
1951 && VG_(threads)[i].joiner_jee_tid == jee) {
1952 /* Someone already did join on this thread */
1953 SET_EDX(tid, EINVAL);
1954 VG_(threads)[tid].status = VgTs_Runnable;
1955 return;
1956 }
sewardje663cb92002-04-12 10:26:32 +00001957 }
1958
sewardj20917d82002-05-28 01:36:45 +00001959 /* Mark this thread as waiting for the joinee. */
sewardj018f7622002-05-15 21:13:39 +00001960 VG_(threads)[tid].status = VgTs_WaitJoinee;
sewardj20917d82002-05-28 01:36:45 +00001961 VG_(threads)[tid].joiner_thread_return = thread_return;
1962 VG_(threads)[tid].joiner_jee_tid = jee;
1963
1964 /* Look for matching joiners and joinees and do the right thing. */
1965 maybe_rendezvous_joiners_and_joinees();
1966
1967 /* Return value is irrelevant since this this thread becomes
1968 non-runnable. maybe_resume_joiner() will cause it to return the
1969 right value when it resumes. */
1970
sewardj8937c812002-04-12 20:12:20 +00001971 if (VG_(clo_trace_sched)) {
sewardj20917d82002-05-28 01:36:45 +00001972 VG_(sprintf)(msg_buf,
1973 "wait for joinee %d (may already be ready)", jee);
sewardje663cb92002-04-12 10:26:32 +00001974 print_sched_event(tid, msg_buf);
1975 }
sewardje663cb92002-04-12 10:26:32 +00001976}
1977
1978
sewardj20917d82002-05-28 01:36:45 +00001979/* ( void* ): calling thread waits for joiner and returns the void* to
1980 it. This is one of two ways in which a thread can finally exit --
1981 the other is do__quit. */
sewardje663cb92002-04-12 10:26:32 +00001982static
sewardj20917d82002-05-28 01:36:45 +00001983void do__wait_joiner ( ThreadId tid, void* retval )
sewardje663cb92002-04-12 10:26:32 +00001984{
sewardj20917d82002-05-28 01:36:45 +00001985 Char msg_buf[100];
1986 vg_assert(VG_(is_valid_tid)(tid));
1987 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1988 if (VG_(clo_trace_sched)) {
1989 VG_(sprintf)(msg_buf,
sewardj7989d0c2002-05-28 11:00:01 +00001990 "do__wait_joiner(retval = %p) (non-detached thread exit)", retval);
sewardj20917d82002-05-28 01:36:45 +00001991 print_sched_event(tid, msg_buf);
1992 }
1993 VG_(threads)[tid].status = VgTs_WaitJoiner;
1994 VG_(threads)[tid].joinee_retval = retval;
1995 maybe_rendezvous_joiners_and_joinees();
1996}
1997
1998
1999/* ( no-args ): calling thread disappears from the system forever.
2000 Reclaim resources. */
2001static
2002void do__quit ( ThreadId tid )
2003{
2004 Char msg_buf[100];
2005 vg_assert(VG_(is_valid_tid)(tid));
2006 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
2007 VG_(threads)[tid].status = VgTs_Empty; /* bye! */
2008 cleanup_after_thread_exited ( tid );
sewardj20917d82002-05-28 01:36:45 +00002009 if (VG_(clo_trace_sched)) {
sewardj7989d0c2002-05-28 11:00:01 +00002010 VG_(sprintf)(msg_buf, "do__quit (detached thread exit)");
sewardj20917d82002-05-28 01:36:45 +00002011 print_sched_event(tid, msg_buf);
2012 }
2013 /* Return value is irrelevant; this thread will not get
2014 rescheduled. */
2015}
2016
2017
2018/* Should never be entered. If it is, will be on the simulated
2019 CPU. */
2020static
2021void do__apply_in_new_thread_bogusRA ( void )
2022{
2023 VG_(panic)("do__apply_in_new_thread_bogusRA");
2024}
2025
2026/* (Fn, Arg): Create a new thread and run Fn applied to Arg in it. Fn
2027 MUST NOT return -- ever. Eventually it will do either __QUIT or
2028 __WAIT_JOINER. Return the child tid to the parent. */
2029static
2030void do__apply_in_new_thread ( ThreadId parent_tid,
2031 void* (*fn)(void *),
2032 void* arg )
2033{
sewardje663cb92002-04-12 10:26:32 +00002034 Addr new_stack;
2035 UInt new_stk_szb;
2036 ThreadId tid;
2037 Char msg_buf[100];
2038
2039 /* Paranoia ... */
2040 vg_assert(sizeof(pthread_t) == sizeof(UInt));
2041
sewardj018f7622002-05-15 21:13:39 +00002042 vg_assert(VG_(threads)[parent_tid].status != VgTs_Empty);
sewardje663cb92002-04-12 10:26:32 +00002043
sewardj1e8cdc92002-04-18 11:37:52 +00002044 tid = vg_alloc_ThreadState();
sewardje663cb92002-04-12 10:26:32 +00002045
2046 /* If we've created the main thread's tid, we're in deep trouble :) */
sewardj6072c362002-04-19 14:40:57 +00002047 vg_assert(tid != 1);
sewardj018f7622002-05-15 21:13:39 +00002048 vg_assert(VG_(is_valid_or_empty_tid)(tid));
sewardje663cb92002-04-12 10:26:32 +00002049
2050 /* Copy the parent's CPU state into the child's, in a roundabout
2051 way (via baseBlock). */
2052 VG_(load_thread_state)(parent_tid);
2053 VG_(save_thread_state)(tid);
2054
2055 /* Consider allocating the child a stack, if the one it already has
2056 is inadequate. */
sewardjbf290b92002-05-01 02:28:01 +00002057 new_stk_szb = VG_PTHREAD_STACK_MIN;
sewardje663cb92002-04-12 10:26:32 +00002058
sewardj018f7622002-05-15 21:13:39 +00002059 if (new_stk_szb > VG_(threads)[tid].stack_size) {
sewardje663cb92002-04-12 10:26:32 +00002060 /* Again, for good measure :) We definitely don't want to be
2061 allocating a stack for the main thread. */
sewardj6072c362002-04-19 14:40:57 +00002062 vg_assert(tid != 1);
sewardje663cb92002-04-12 10:26:32 +00002063 /* for now, we don't handle the case of anything other than
2064 assigning it for the first time. */
sewardj018f7622002-05-15 21:13:39 +00002065 vg_assert(VG_(threads)[tid].stack_size == 0);
2066 vg_assert(VG_(threads)[tid].stack_base == (Addr)NULL);
sewardje663cb92002-04-12 10:26:32 +00002067 new_stack = (Addr)VG_(get_memory_from_mmap)( new_stk_szb );
sewardj018f7622002-05-15 21:13:39 +00002068 VG_(threads)[tid].stack_base = new_stack;
2069 VG_(threads)[tid].stack_size = new_stk_szb;
2070 VG_(threads)[tid].stack_highest_word
sewardje663cb92002-04-12 10:26:32 +00002071 = new_stack + new_stk_szb
sewardj1e8cdc92002-04-18 11:37:52 +00002072 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB; /* -4 ??? */;
sewardje663cb92002-04-12 10:26:32 +00002073 }
sewardj1e8cdc92002-04-18 11:37:52 +00002074
sewardj018f7622002-05-15 21:13:39 +00002075 VG_(threads)[tid].m_esp
2076 = VG_(threads)[tid].stack_base
2077 + VG_(threads)[tid].stack_size
sewardj1e8cdc92002-04-18 11:37:52 +00002078 - VG_AR_CLIENT_STACKBASE_REDZONE_SZB;
2079
sewardje663cb92002-04-12 10:26:32 +00002080 if (VG_(clo_instrument))
sewardj018f7622002-05-15 21:13:39 +00002081 VGM_(make_noaccess)( VG_(threads)[tid].m_esp,
sewardje663cb92002-04-12 10:26:32 +00002082 VG_AR_CLIENT_STACKBASE_REDZONE_SZB );
2083
2084 /* push arg */
sewardj018f7622002-05-15 21:13:39 +00002085 VG_(threads)[tid].m_esp -= 4;
2086 * (UInt*)(VG_(threads)[tid].m_esp) = (UInt)arg;
sewardje663cb92002-04-12 10:26:32 +00002087
sewardj20917d82002-05-28 01:36:45 +00002088 /* push (bogus) return address */
sewardj018f7622002-05-15 21:13:39 +00002089 VG_(threads)[tid].m_esp -= 4;
sewardj20917d82002-05-28 01:36:45 +00002090 * (UInt*)(VG_(threads)[tid].m_esp)
2091 = (UInt)&do__apply_in_new_thread_bogusRA;
sewardje663cb92002-04-12 10:26:32 +00002092
2093 if (VG_(clo_instrument))
sewardj018f7622002-05-15 21:13:39 +00002094 VGM_(make_readable)( VG_(threads)[tid].m_esp, 2 * 4 );
sewardje663cb92002-04-12 10:26:32 +00002095
2096 /* this is where we start */
sewardj20917d82002-05-28 01:36:45 +00002097 VG_(threads)[tid].m_eip = (UInt)fn;
sewardje663cb92002-04-12 10:26:32 +00002098
sewardj8937c812002-04-12 20:12:20 +00002099 if (VG_(clo_trace_sched)) {
sewardje663cb92002-04-12 10:26:32 +00002100 VG_(sprintf)(msg_buf,
2101 "new thread, created by %d", parent_tid );
2102 print_sched_event(tid, msg_buf);
2103 }
2104
sewardj20917d82002-05-28 01:36:45 +00002105 /* Create new thread with default attrs:
2106 deferred cancellation, not detached
2107 */
2108 mostly_clear_thread_record(tid);
2109 VG_(threads)[tid].status = VgTs_Runnable;
sewardj5f07b662002-04-23 16:52:51 +00002110
sewardj018f7622002-05-15 21:13:39 +00002111 /* We inherit our parent's signal mask. */
2112 VG_(threads)[tid].sig_mask = VG_(threads)[parent_tid].sig_mask;
sewardj20917d82002-05-28 01:36:45 +00002113 VG_(ksigemptyset)(&VG_(threads)[tid].sigs_waited_for);
sewardjb48e5002002-05-13 00:16:03 +00002114
sewardj20917d82002-05-28 01:36:45 +00002115 /* return child's tid to parent */
2116 SET_EDX(parent_tid, tid); /* success */
sewardje663cb92002-04-12 10:26:32 +00002117}
2118
2119
sewardj604ec3c2002-04-18 22:38:41 +00002120/* -----------------------------------------------------------
2121 MUTEXes
2122 -------------------------------------------------------- */
2123
sewardj604ec3c2002-04-18 22:38:41 +00002124/* pthread_mutex_t is a struct with at 5 words:
sewardje663cb92002-04-12 10:26:32 +00002125 typedef struct
2126 {
2127 int __m_reserved; -- Reserved for future use
2128 int __m_count; -- Depth of recursive locking
2129 _pthread_descr __m_owner; -- Owner thread (if recursive or errcheck)
2130 int __m_kind; -- Mutex kind: fast, recursive or errcheck
2131 struct _pthread_fastlock __m_lock; -- Underlying fast lock
2132 } pthread_mutex_t;
sewardj604ec3c2002-04-18 22:38:41 +00002133
sewardj6072c362002-04-19 14:40:57 +00002134 #define PTHREAD_MUTEX_INITIALIZER \
2135 {0, 0, 0, PTHREAD_MUTEX_TIMED_NP, __LOCK_INITIALIZER}
2136 # define PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP \
2137 {0, 0, 0, PTHREAD_MUTEX_RECURSIVE_NP, __LOCK_INITIALIZER}
2138 # define PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP \
2139 {0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, __LOCK_INITIALIZER}
2140 # define PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP \
2141 {0, 0, 0, PTHREAD_MUTEX_ADAPTIVE_NP, __LOCK_INITIALIZER}
sewardj604ec3c2002-04-18 22:38:41 +00002142
sewardj6072c362002-04-19 14:40:57 +00002143 How we use it:
sewardj604ec3c2002-04-18 22:38:41 +00002144
sewardj6072c362002-04-19 14:40:57 +00002145 __m_kind never changes and indicates whether or not it is recursive.
2146
2147 __m_count indicates the lock count; if 0, the mutex is not owned by
2148 anybody.
2149
2150 __m_owner has a ThreadId value stuffed into it. We carefully arrange
2151 that ThreadId == 0 is invalid (VG_INVALID_THREADID), so that
2152 statically initialised mutexes correctly appear
2153 to belong to nobody.
2154
2155 In summary, a not-in-use mutex is distinguised by having __m_owner
2156 == 0 (VG_INVALID_THREADID) and __m_count == 0 too. If one of those
2157 conditions holds, the other should too.
2158
2159 There is no linked list of threads waiting for this mutex. Instead
2160 a thread in WaitMX state points at the mutex with its waited_on_mx
2161 field. This makes _unlock() inefficient, but simple to implement the
2162 right semantics viz-a-viz signals.
sewardje663cb92002-04-12 10:26:32 +00002163
sewardj604ec3c2002-04-18 22:38:41 +00002164 We don't have to deal with mutex initialisation; the client side
sewardj6072c362002-04-19 14:40:57 +00002165 deals with that for us.
2166*/
sewardje663cb92002-04-12 10:26:32 +00002167
sewardj3b5d8862002-04-20 13:53:23 +00002168/* Helper fns ... */
2169static
2170void release_one_thread_waiting_on_mutex ( pthread_mutex_t* mutex,
2171 Char* caller )
2172{
2173 Int i;
2174 Char msg_buf[100];
2175
2176 /* Find some arbitrary thread waiting on this mutex, and make it
2177 runnable. If none are waiting, mark the mutex as not held. */
2178 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +00002179 if (VG_(threads)[i].status == VgTs_Empty)
sewardj3b5d8862002-04-20 13:53:23 +00002180 continue;
sewardj018f7622002-05-15 21:13:39 +00002181 if (VG_(threads)[i].status == VgTs_WaitMX
2182 && VG_(threads)[i].associated_mx == mutex)
sewardj3b5d8862002-04-20 13:53:23 +00002183 break;
2184 }
2185
2186 vg_assert(i <= VG_N_THREADS);
2187 if (i == VG_N_THREADS) {
2188 /* Nobody else is waiting on it. */
2189 mutex->__m_count = 0;
2190 mutex->__m_owner = VG_INVALID_THREADID;
2191 } else {
2192 /* Notionally transfer the hold to thread i, whose
2193 pthread_mutex_lock() call now returns with 0 (success). */
2194 /* The .count is already == 1. */
sewardj018f7622002-05-15 21:13:39 +00002195 vg_assert(VG_(threads)[i].associated_mx == mutex);
sewardj3b5d8862002-04-20 13:53:23 +00002196 mutex->__m_owner = (_pthread_descr)i;
sewardj018f7622002-05-15 21:13:39 +00002197 VG_(threads)[i].status = VgTs_Runnable;
2198 VG_(threads)[i].associated_mx = NULL;
sewardj5f07b662002-04-23 16:52:51 +00002199 /* m_edx already holds pth_mx_lock() success (0) */
sewardj3b5d8862002-04-20 13:53:23 +00002200
2201 if (VG_(clo_trace_pthread_level) >= 1) {
2202 VG_(sprintf)(msg_buf, "%s mx %p: RESUME",
2203 caller, mutex );
2204 print_pthread_event(i, msg_buf);
2205 }
2206 }
2207}
2208
sewardje663cb92002-04-12 10:26:32 +00002209
2210static
sewardj30671ff2002-04-21 00:13:57 +00002211void do_pthread_mutex_lock( ThreadId tid,
2212 Bool is_trylock,
sewardjd7fd4d22002-04-24 01:57:27 +00002213 void* /* pthread_mutex_t* */ mutexV )
sewardje663cb92002-04-12 10:26:32 +00002214{
sewardj30671ff2002-04-21 00:13:57 +00002215 Char msg_buf[100];
2216 Char* caller
sewardj8ccc2be2002-05-10 20:26:37 +00002217 = is_trylock ? "pthread_mutex_trylock"
2218 : "pthread_mutex_lock ";
sewardje663cb92002-04-12 10:26:32 +00002219
sewardjd7fd4d22002-04-24 01:57:27 +00002220 pthread_mutex_t* mutex = (pthread_mutex_t*)mutexV;
2221
sewardj604ec3c2002-04-18 22:38:41 +00002222 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj30671ff2002-04-21 00:13:57 +00002223 VG_(sprintf)(msg_buf, "%s mx %p ...", caller, mutex );
sewardj604ec3c2002-04-18 22:38:41 +00002224 print_pthread_event(tid, msg_buf);
2225 }
2226
2227 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00002228 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002229 && VG_(threads)[tid].status == VgTs_Runnable);
sewardje663cb92002-04-12 10:26:32 +00002230
2231 /* POSIX doesn't mandate this, but for sanity ... */
2232 if (mutex == NULL) {
sewardj8e651d72002-05-10 21:00:19 +00002233 /* VG_(printf)("NULL mutex\n"); */
sewardjc3bd5f52002-05-01 03:24:23 +00002234 SET_EDX(tid, EINVAL);
sewardje663cb92002-04-12 10:26:32 +00002235 return;
2236 }
2237
sewardj604ec3c2002-04-18 22:38:41 +00002238 /* More paranoia ... */
2239 switch (mutex->__m_kind) {
sewardj2a1dcce2002-04-22 12:45:25 +00002240# ifndef GLIBC_2_1
sewardj604ec3c2002-04-18 22:38:41 +00002241 case PTHREAD_MUTEX_TIMED_NP:
sewardj2a1dcce2002-04-22 12:45:25 +00002242 case PTHREAD_MUTEX_ADAPTIVE_NP:
2243# endif
sewardja1679dd2002-05-10 22:31:40 +00002244# ifdef GLIBC_2_1
sewardj8e651d72002-05-10 21:00:19 +00002245 case PTHREAD_MUTEX_FAST_NP:
sewardja1679dd2002-05-10 22:31:40 +00002246# endif
sewardj604ec3c2002-04-18 22:38:41 +00002247 case PTHREAD_MUTEX_RECURSIVE_NP:
2248 case PTHREAD_MUTEX_ERRORCHECK_NP:
sewardj604ec3c2002-04-18 22:38:41 +00002249 if (mutex->__m_count >= 0) break;
2250 /* else fall thru */
2251 default:
sewardj8e651d72002-05-10 21:00:19 +00002252 /* VG_(printf)("unknown __m_kind %d in mutex\n", mutex->__m_kind); */
sewardjc3bd5f52002-05-01 03:24:23 +00002253 SET_EDX(tid, EINVAL);
sewardj604ec3c2002-04-18 22:38:41 +00002254 return;
sewardje663cb92002-04-12 10:26:32 +00002255 }
2256
sewardj604ec3c2002-04-18 22:38:41 +00002257 if (mutex->__m_count > 0) {
sewardje663cb92002-04-12 10:26:32 +00002258
sewardjb48e5002002-05-13 00:16:03 +00002259 vg_assert(VG_(is_valid_tid)((ThreadId)mutex->__m_owner));
sewardjf8f819e2002-04-17 23:21:37 +00002260
2261 /* Someone has it already. */
sewardj604ec3c2002-04-18 22:38:41 +00002262 if ((ThreadId)mutex->__m_owner == tid) {
sewardjf8f819e2002-04-17 23:21:37 +00002263 /* It's locked -- by me! */
sewardj604ec3c2002-04-18 22:38:41 +00002264 if (mutex->__m_kind == PTHREAD_MUTEX_RECURSIVE_NP) {
sewardjf8f819e2002-04-17 23:21:37 +00002265 /* return 0 (success). */
sewardj604ec3c2002-04-18 22:38:41 +00002266 mutex->__m_count++;
sewardjc3bd5f52002-05-01 03:24:23 +00002267 SET_EDX(tid, 0);
sewardj853f55d2002-04-26 00:27:53 +00002268 if (0)
2269 VG_(printf)("!!!!!! tid %d, mx %p -> locked %d\n",
2270 tid, mutex, mutex->__m_count);
sewardjf8f819e2002-04-17 23:21:37 +00002271 return;
2272 } else {
sewardj30671ff2002-04-21 00:13:57 +00002273 if (is_trylock)
sewardjc3bd5f52002-05-01 03:24:23 +00002274 SET_EDX(tid, EBUSY);
sewardj30671ff2002-04-21 00:13:57 +00002275 else
sewardjc3bd5f52002-05-01 03:24:23 +00002276 SET_EDX(tid, EDEADLK);
sewardjf8f819e2002-04-17 23:21:37 +00002277 return;
2278 }
2279 } else {
sewardj6072c362002-04-19 14:40:57 +00002280 /* Someone else has it; we have to wait. Mark ourselves
2281 thusly. */
sewardj05553872002-04-20 20:53:17 +00002282 /* GUARD: __m_count > 0 && __m_owner is valid */
sewardj30671ff2002-04-21 00:13:57 +00002283 if (is_trylock) {
2284 /* caller is polling; so return immediately. */
sewardjc3bd5f52002-05-01 03:24:23 +00002285 SET_EDX(tid, EBUSY);
sewardj30671ff2002-04-21 00:13:57 +00002286 } else {
sewardj018f7622002-05-15 21:13:39 +00002287 VG_(threads)[tid].status = VgTs_WaitMX;
2288 VG_(threads)[tid].associated_mx = mutex;
sewardjc3bd5f52002-05-01 03:24:23 +00002289 SET_EDX(tid, 0); /* pth_mx_lock success value */
sewardj30671ff2002-04-21 00:13:57 +00002290 if (VG_(clo_trace_pthread_level) >= 1) {
2291 VG_(sprintf)(msg_buf, "%s mx %p: BLOCK",
2292 caller, mutex );
2293 print_pthread_event(tid, msg_buf);
2294 }
2295 }
sewardje663cb92002-04-12 10:26:32 +00002296 return;
2297 }
sewardjf8f819e2002-04-17 23:21:37 +00002298
sewardje663cb92002-04-12 10:26:32 +00002299 } else {
sewardj6072c362002-04-19 14:40:57 +00002300 /* Nobody owns it. Sanity check ... */
2301 vg_assert(mutex->__m_owner == VG_INVALID_THREADID);
sewardjf8f819e2002-04-17 23:21:37 +00002302 /* We get it! [for the first time]. */
sewardj604ec3c2002-04-18 22:38:41 +00002303 mutex->__m_count = 1;
2304 mutex->__m_owner = (_pthread_descr)tid;
sewardj018f7622002-05-15 21:13:39 +00002305 vg_assert(VG_(threads)[tid].associated_mx == NULL);
sewardje663cb92002-04-12 10:26:32 +00002306 /* return 0 (success). */
sewardjc3bd5f52002-05-01 03:24:23 +00002307 SET_EDX(tid, 0);
sewardje663cb92002-04-12 10:26:32 +00002308 }
sewardjf8f819e2002-04-17 23:21:37 +00002309
sewardje663cb92002-04-12 10:26:32 +00002310}
2311
2312
2313static
2314void do_pthread_mutex_unlock ( ThreadId tid,
sewardjd7fd4d22002-04-24 01:57:27 +00002315 void* /* pthread_mutex_t* */ mutexV )
sewardje663cb92002-04-12 10:26:32 +00002316{
sewardj3b5d8862002-04-20 13:53:23 +00002317 Char msg_buf[100];
sewardjd7fd4d22002-04-24 01:57:27 +00002318 pthread_mutex_t* mutex = (pthread_mutex_t*)mutexV;
sewardje663cb92002-04-12 10:26:32 +00002319
sewardj45b4b372002-04-16 22:50:32 +00002320 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj3b5d8862002-04-20 13:53:23 +00002321 VG_(sprintf)(msg_buf, "pthread_mutex_unlock mx %p ...", mutex );
sewardj8937c812002-04-12 20:12:20 +00002322 print_pthread_event(tid, msg_buf);
2323 }
2324
sewardj604ec3c2002-04-18 22:38:41 +00002325 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00002326 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002327 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj604ec3c2002-04-18 22:38:41 +00002328
2329 if (mutex == NULL) {
sewardjc3bd5f52002-05-01 03:24:23 +00002330 SET_EDX(tid, EINVAL);
sewardj604ec3c2002-04-18 22:38:41 +00002331 return;
2332 }
2333
2334 /* More paranoia ... */
2335 switch (mutex->__m_kind) {
sewardj2a1dcce2002-04-22 12:45:25 +00002336# ifndef GLIBC_2_1
sewardj604ec3c2002-04-18 22:38:41 +00002337 case PTHREAD_MUTEX_TIMED_NP:
sewardj2a1dcce2002-04-22 12:45:25 +00002338 case PTHREAD_MUTEX_ADAPTIVE_NP:
2339# endif
sewardja1679dd2002-05-10 22:31:40 +00002340# ifdef GLIBC_2_1
sewardj8e651d72002-05-10 21:00:19 +00002341 case PTHREAD_MUTEX_FAST_NP:
sewardja1679dd2002-05-10 22:31:40 +00002342# endif
sewardj604ec3c2002-04-18 22:38:41 +00002343 case PTHREAD_MUTEX_RECURSIVE_NP:
2344 case PTHREAD_MUTEX_ERRORCHECK_NP:
sewardj604ec3c2002-04-18 22:38:41 +00002345 if (mutex->__m_count >= 0) break;
2346 /* else fall thru */
2347 default:
sewardjc3bd5f52002-05-01 03:24:23 +00002348 SET_EDX(tid, EINVAL);
sewardj604ec3c2002-04-18 22:38:41 +00002349 return;
2350 }
sewardje663cb92002-04-12 10:26:32 +00002351
2352 /* Barf if we don't currently hold the mutex. */
sewardj604ec3c2002-04-18 22:38:41 +00002353 if (mutex->__m_count == 0 /* nobody holds it */
2354 || (ThreadId)mutex->__m_owner != tid /* we don't hold it */) {
sewardjc3bd5f52002-05-01 03:24:23 +00002355 SET_EDX(tid, EPERM);
sewardje663cb92002-04-12 10:26:32 +00002356 return;
2357 }
2358
sewardjf8f819e2002-04-17 23:21:37 +00002359 /* If it's a multiply-locked recursive mutex, just decrement the
2360 lock count and return. */
sewardj604ec3c2002-04-18 22:38:41 +00002361 if (mutex->__m_count > 1) {
2362 vg_assert(mutex->__m_kind == PTHREAD_MUTEX_RECURSIVE_NP);
2363 mutex->__m_count --;
sewardjc3bd5f52002-05-01 03:24:23 +00002364 SET_EDX(tid, 0); /* success */
sewardjf8f819e2002-04-17 23:21:37 +00002365 return;
2366 }
2367
sewardj604ec3c2002-04-18 22:38:41 +00002368 /* Now we're sure it is locked exactly once, and by the thread who
sewardjf8f819e2002-04-17 23:21:37 +00002369 is now doing an unlock on it. */
sewardj604ec3c2002-04-18 22:38:41 +00002370 vg_assert(mutex->__m_count == 1);
sewardj6072c362002-04-19 14:40:57 +00002371 vg_assert((ThreadId)mutex->__m_owner == tid);
sewardjf8f819e2002-04-17 23:21:37 +00002372
sewardj3b5d8862002-04-20 13:53:23 +00002373 /* Release at max one thread waiting on this mutex. */
2374 release_one_thread_waiting_on_mutex ( mutex, "pthread_mutex_lock" );
sewardje663cb92002-04-12 10:26:32 +00002375
sewardj3b5d8862002-04-20 13:53:23 +00002376 /* Our (tid's) pth_unlock() returns with 0 (success). */
sewardjc3bd5f52002-05-01 03:24:23 +00002377 SET_EDX(tid, 0); /* Success. */
sewardje663cb92002-04-12 10:26:32 +00002378}
2379
2380
sewardj6072c362002-04-19 14:40:57 +00002381/* -----------------------------------------------------------
2382 CONDITION VARIABLES
2383 -------------------------------------------------------- */
sewardje663cb92002-04-12 10:26:32 +00002384
sewardj6072c362002-04-19 14:40:57 +00002385/* The relevant native types are as follows:
2386 (copied from /usr/include/bits/pthreadtypes.h)
sewardj77e466c2002-04-14 02:29:29 +00002387
sewardj6072c362002-04-19 14:40:57 +00002388 -- Conditions (not abstract because of PTHREAD_COND_INITIALIZER
2389 typedef struct
2390 {
2391 struct _pthread_fastlock __c_lock; -- Protect against concurrent access
2392 _pthread_descr __c_waiting; -- Threads waiting on this condition
2393 } pthread_cond_t;
sewardj77e466c2002-04-14 02:29:29 +00002394
sewardj6072c362002-04-19 14:40:57 +00002395 -- Attribute for conditionally variables.
2396 typedef struct
2397 {
2398 int __dummy;
2399 } pthread_condattr_t;
sewardj77e466c2002-04-14 02:29:29 +00002400
sewardj6072c362002-04-19 14:40:57 +00002401 #define PTHREAD_COND_INITIALIZER {__LOCK_INITIALIZER, 0}
sewardj77e466c2002-04-14 02:29:29 +00002402
sewardj3b5d8862002-04-20 13:53:23 +00002403 We don't use any fields of pthread_cond_t for anything at all.
2404 Only the identity of the CVs is important.
sewardj6072c362002-04-19 14:40:57 +00002405
2406 Linux pthreads supports no attributes on condition variables, so we
sewardj3b5d8862002-04-20 13:53:23 +00002407 don't need to think too hard there. */
sewardj6072c362002-04-19 14:40:57 +00002408
sewardj77e466c2002-04-14 02:29:29 +00002409
sewardj5f07b662002-04-23 16:52:51 +00002410static
2411void do_pthread_cond_timedwait_TIMEOUT ( ThreadId tid )
2412{
2413 Char msg_buf[100];
2414 pthread_mutex_t* mx;
2415 pthread_cond_t* cv;
2416
sewardjb48e5002002-05-13 00:16:03 +00002417 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002418 && VG_(threads)[tid].status == VgTs_WaitCV
2419 && VG_(threads)[tid].awaken_at != 0xFFFFFFFF);
2420 mx = VG_(threads)[tid].associated_mx;
sewardj5f07b662002-04-23 16:52:51 +00002421 vg_assert(mx != NULL);
sewardj018f7622002-05-15 21:13:39 +00002422 cv = VG_(threads)[tid].associated_cv;
sewardj5f07b662002-04-23 16:52:51 +00002423 vg_assert(cv != NULL);
2424
2425 if (mx->__m_owner == VG_INVALID_THREADID) {
2426 /* Currently unheld; hand it out to thread tid. */
2427 vg_assert(mx->__m_count == 0);
sewardj018f7622002-05-15 21:13:39 +00002428 VG_(threads)[tid].status = VgTs_Runnable;
sewardjc3bd5f52002-05-01 03:24:23 +00002429 SET_EDX(tid, ETIMEDOUT); /* pthread_cond_wait return value */
sewardj018f7622002-05-15 21:13:39 +00002430 VG_(threads)[tid].associated_cv = NULL;
2431 VG_(threads)[tid].associated_mx = NULL;
sewardj5f07b662002-04-23 16:52:51 +00002432 mx->__m_owner = (_pthread_descr)tid;
2433 mx->__m_count = 1;
2434
2435 if (VG_(clo_trace_pthread_level) >= 1) {
sewardjc3bd5f52002-05-01 03:24:23 +00002436 VG_(sprintf)(msg_buf,
2437 "pthread_cond_timedwai cv %p: TIMEOUT with mx %p",
2438 cv, mx );
sewardj5f07b662002-04-23 16:52:51 +00002439 print_pthread_event(tid, msg_buf);
2440 }
2441 } else {
2442 /* Currently held. Make thread tid be blocked on it. */
2443 vg_assert(mx->__m_count > 0);
sewardj018f7622002-05-15 21:13:39 +00002444 VG_(threads)[tid].status = VgTs_WaitMX;
sewardjc3bd5f52002-05-01 03:24:23 +00002445 SET_EDX(tid, ETIMEDOUT); /* pthread_cond_wait return value */
sewardj018f7622002-05-15 21:13:39 +00002446 VG_(threads)[tid].associated_cv = NULL;
2447 VG_(threads)[tid].associated_mx = mx;
sewardj5f07b662002-04-23 16:52:51 +00002448 if (VG_(clo_trace_pthread_level) >= 1) {
2449 VG_(sprintf)(msg_buf,
2450 "pthread_cond_timedwai cv %p: TIMEOUT -> BLOCK for mx %p",
2451 cv, mx );
2452 print_pthread_event(tid, msg_buf);
2453 }
2454
2455 }
2456}
2457
2458
sewardj3b5d8862002-04-20 13:53:23 +00002459static
2460void release_N_threads_waiting_on_cond ( pthread_cond_t* cond,
2461 Int n_to_release,
2462 Char* caller )
2463{
2464 Int i;
2465 Char msg_buf[100];
2466 pthread_mutex_t* mx;
2467
2468 while (True) {
2469 if (n_to_release == 0)
2470 return;
2471
2472 /* Find a thread waiting on this CV. */
2473 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +00002474 if (VG_(threads)[i].status == VgTs_Empty)
sewardj3b5d8862002-04-20 13:53:23 +00002475 continue;
sewardj018f7622002-05-15 21:13:39 +00002476 if (VG_(threads)[i].status == VgTs_WaitCV
2477 && VG_(threads)[i].associated_cv == cond)
sewardj3b5d8862002-04-20 13:53:23 +00002478 break;
2479 }
2480 vg_assert(i <= VG_N_THREADS);
2481
2482 if (i == VG_N_THREADS) {
2483 /* Nobody else is waiting on it. */
2484 return;
2485 }
2486
sewardj018f7622002-05-15 21:13:39 +00002487 mx = VG_(threads)[i].associated_mx;
sewardj3b5d8862002-04-20 13:53:23 +00002488 vg_assert(mx != NULL);
2489
2490 if (mx->__m_owner == VG_INVALID_THREADID) {
2491 /* Currently unheld; hand it out to thread i. */
2492 vg_assert(mx->__m_count == 0);
sewardj018f7622002-05-15 21:13:39 +00002493 VG_(threads)[i].status = VgTs_Runnable;
2494 VG_(threads)[i].associated_cv = NULL;
2495 VG_(threads)[i].associated_mx = NULL;
sewardj3b5d8862002-04-20 13:53:23 +00002496 mx->__m_owner = (_pthread_descr)i;
2497 mx->__m_count = 1;
sewardj5f07b662002-04-23 16:52:51 +00002498 /* .m_edx already holds pth_cond_wait success value (0) */
sewardj3b5d8862002-04-20 13:53:23 +00002499
2500 if (VG_(clo_trace_pthread_level) >= 1) {
2501 VG_(sprintf)(msg_buf, "%s cv %p: RESUME with mx %p",
2502 caller, cond, mx );
2503 print_pthread_event(i, msg_buf);
2504 }
2505
2506 } else {
2507 /* Currently held. Make thread i be blocked on it. */
sewardj5f07b662002-04-23 16:52:51 +00002508 vg_assert(mx->__m_count > 0);
sewardj018f7622002-05-15 21:13:39 +00002509 VG_(threads)[i].status = VgTs_WaitMX;
2510 VG_(threads)[i].associated_cv = NULL;
2511 VG_(threads)[i].associated_mx = mx;
sewardjc3bd5f52002-05-01 03:24:23 +00002512 SET_EDX(i, 0); /* pth_cond_wait success value */
sewardj3b5d8862002-04-20 13:53:23 +00002513
2514 if (VG_(clo_trace_pthread_level) >= 1) {
2515 VG_(sprintf)(msg_buf, "%s cv %p: BLOCK for mx %p",
2516 caller, cond, mx );
2517 print_pthread_event(i, msg_buf);
2518 }
2519
2520 }
2521
2522 n_to_release--;
2523 }
2524}
2525
2526
2527static
2528void do_pthread_cond_wait ( ThreadId tid,
2529 pthread_cond_t *cond,
sewardj5f07b662002-04-23 16:52:51 +00002530 pthread_mutex_t *mutex,
2531 UInt ms_end )
sewardj3b5d8862002-04-20 13:53:23 +00002532{
2533 Char msg_buf[100];
2534
sewardj5f07b662002-04-23 16:52:51 +00002535 /* If ms_end == 0xFFFFFFFF, wait forever (no timeout). Otherwise,
2536 ms_end is the ending millisecond. */
2537
sewardj3b5d8862002-04-20 13:53:23 +00002538 /* pre: mutex should be a valid mutex and owned by tid. */
2539 if (VG_(clo_trace_pthread_level) >= 2) {
sewardj5f07b662002-04-23 16:52:51 +00002540 VG_(sprintf)(msg_buf, "pthread_cond_wait cv %p, mx %p, end %d ...",
2541 cond, mutex, ms_end );
sewardj3b5d8862002-04-20 13:53:23 +00002542 print_pthread_event(tid, msg_buf);
2543 }
2544
2545 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00002546 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002547 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj3b5d8862002-04-20 13:53:23 +00002548
2549 if (mutex == NULL || cond == NULL) {
sewardjc3bd5f52002-05-01 03:24:23 +00002550 SET_EDX(tid, EINVAL);
sewardj3b5d8862002-04-20 13:53:23 +00002551 return;
2552 }
2553
2554 /* More paranoia ... */
2555 switch (mutex->__m_kind) {
sewardj2a1dcce2002-04-22 12:45:25 +00002556# ifndef GLIBC_2_1
sewardj3b5d8862002-04-20 13:53:23 +00002557 case PTHREAD_MUTEX_TIMED_NP:
sewardj2a1dcce2002-04-22 12:45:25 +00002558 case PTHREAD_MUTEX_ADAPTIVE_NP:
2559# endif
sewardja1679dd2002-05-10 22:31:40 +00002560# ifdef GLIBC_2_1
sewardj8e651d72002-05-10 21:00:19 +00002561 case PTHREAD_MUTEX_FAST_NP:
sewardja1679dd2002-05-10 22:31:40 +00002562# endif
sewardj3b5d8862002-04-20 13:53:23 +00002563 case PTHREAD_MUTEX_RECURSIVE_NP:
2564 case PTHREAD_MUTEX_ERRORCHECK_NP:
sewardj3b5d8862002-04-20 13:53:23 +00002565 if (mutex->__m_count >= 0) break;
2566 /* else fall thru */
2567 default:
sewardjc3bd5f52002-05-01 03:24:23 +00002568 SET_EDX(tid, EINVAL);
sewardj3b5d8862002-04-20 13:53:23 +00002569 return;
2570 }
2571
2572 /* Barf if we don't currently hold the mutex. */
2573 if (mutex->__m_count == 0 /* nobody holds it */
2574 || (ThreadId)mutex->__m_owner != tid /* we don't hold it */) {
sewardjc3bd5f52002-05-01 03:24:23 +00002575 SET_EDX(tid, EINVAL);
sewardj3b5d8862002-04-20 13:53:23 +00002576 return;
2577 }
2578
2579 /* Queue ourselves on the condition. */
sewardj018f7622002-05-15 21:13:39 +00002580 VG_(threads)[tid].status = VgTs_WaitCV;
2581 VG_(threads)[tid].associated_cv = cond;
2582 VG_(threads)[tid].associated_mx = mutex;
2583 VG_(threads)[tid].awaken_at = ms_end;
sewardj3b5d8862002-04-20 13:53:23 +00002584
2585 if (VG_(clo_trace_pthread_level) >= 1) {
2586 VG_(sprintf)(msg_buf,
2587 "pthread_cond_wait cv %p, mx %p: BLOCK",
2588 cond, mutex );
2589 print_pthread_event(tid, msg_buf);
2590 }
2591
2592 /* Release the mutex. */
2593 release_one_thread_waiting_on_mutex ( mutex, "pthread_cond_wait " );
2594}
2595
2596
2597static
2598void do_pthread_cond_signal_or_broadcast ( ThreadId tid,
2599 Bool broadcast,
2600 pthread_cond_t *cond )
2601{
2602 Char msg_buf[100];
2603 Char* caller
2604 = broadcast ? "pthread_cond_broadcast"
2605 : "pthread_cond_signal ";
2606
2607 if (VG_(clo_trace_pthread_level) >= 2) {
2608 VG_(sprintf)(msg_buf, "%s cv %p ...",
2609 caller, cond );
2610 print_pthread_event(tid, msg_buf);
2611 }
2612
2613 /* Paranoia ... */
sewardjb48e5002002-05-13 00:16:03 +00002614 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002615 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj3b5d8862002-04-20 13:53:23 +00002616
2617 if (cond == NULL) {
sewardjc3bd5f52002-05-01 03:24:23 +00002618 SET_EDX(tid, EINVAL);
sewardj3b5d8862002-04-20 13:53:23 +00002619 return;
2620 }
2621
2622 release_N_threads_waiting_on_cond (
2623 cond,
2624 broadcast ? VG_N_THREADS : 1,
2625 caller
2626 );
2627
sewardjc3bd5f52002-05-01 03:24:23 +00002628 SET_EDX(tid, 0); /* success */
sewardj3b5d8862002-04-20 13:53:23 +00002629}
2630
sewardj77e466c2002-04-14 02:29:29 +00002631
sewardj5f07b662002-04-23 16:52:51 +00002632/* -----------------------------------------------------------
2633 THREAD SPECIFIC DATA
2634 -------------------------------------------------------- */
2635
2636static __inline__
2637Bool is_valid_key ( ThreadKey k )
2638{
2639 /* k unsigned; hence no < 0 check */
2640 if (k >= VG_N_THREAD_KEYS) return False;
2641 if (!vg_thread_keys[k].inuse) return False;
2642 return True;
2643}
2644
2645static
2646void do_pthread_key_create ( ThreadId tid,
2647 pthread_key_t* key,
2648 void (*destructor)(void*) )
2649{
2650 Int i;
2651 Char msg_buf[100];
2652
2653 if (VG_(clo_trace_pthread_level) >= 1) {
2654 VG_(sprintf)(msg_buf, "pthread_key_create *key %p, destr %p",
2655 key, destructor );
2656 print_pthread_event(tid, msg_buf);
2657 }
2658
2659 vg_assert(sizeof(pthread_key_t) == sizeof(ThreadKey));
sewardjb48e5002002-05-13 00:16:03 +00002660 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002661 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj5f07b662002-04-23 16:52:51 +00002662
2663 for (i = 0; i < VG_N_THREAD_KEYS; i++)
2664 if (!vg_thread_keys[i].inuse)
2665 break;
2666
2667 if (i == VG_N_THREAD_KEYS) {
sewardjc3bd5f52002-05-01 03:24:23 +00002668 /* SET_EDX(tid, EAGAIN);
sewardj5f07b662002-04-23 16:52:51 +00002669 return;
2670 */
2671 VG_(panic)("pthread_key_create: VG_N_THREAD_KEYS is too low;"
2672 " increase and recompile");
2673 }
2674
2675 vg_thread_keys[i].inuse = True;
sewardjc3bd5f52002-05-01 03:24:23 +00002676
sewardj5f07b662002-04-23 16:52:51 +00002677 /* TODO: check key for addressibility */
2678 *key = i;
sewardjc3bd5f52002-05-01 03:24:23 +00002679 if (VG_(clo_instrument))
2680 VGM_(make_readable)( (Addr)key, sizeof(pthread_key_t) );
2681
2682 SET_EDX(tid, 0);
sewardj5f07b662002-04-23 16:52:51 +00002683}
2684
2685
2686static
2687void do_pthread_key_delete ( ThreadId tid, pthread_key_t key )
2688{
2689 Char msg_buf[100];
2690 if (VG_(clo_trace_pthread_level) >= 1) {
2691 VG_(sprintf)(msg_buf, "pthread_key_delete key %d",
2692 key );
2693 print_pthread_event(tid, msg_buf);
2694 }
2695
sewardjb48e5002002-05-13 00:16:03 +00002696 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002697 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj5f07b662002-04-23 16:52:51 +00002698
2699 if (!is_valid_key(key)) {
sewardjc3bd5f52002-05-01 03:24:23 +00002700 SET_EDX(tid, EINVAL);
sewardj5f07b662002-04-23 16:52:51 +00002701 return;
2702 }
2703
2704 vg_thread_keys[key].inuse = False;
2705
2706 /* Optional. We're not required to do this, although it shouldn't
2707 make any difference to programs which use the key/specifics
2708 functions correctly. */
sewardj3b13f0e2002-04-25 20:17:29 +00002709# if 1
sewardj5f07b662002-04-23 16:52:51 +00002710 for (tid = 1; tid < VG_N_THREADS; tid++) {
sewardj018f7622002-05-15 21:13:39 +00002711 if (VG_(threads)[tid].status != VgTs_Empty)
2712 VG_(threads)[tid].specifics[key] = NULL;
sewardj5f07b662002-04-23 16:52:51 +00002713 }
sewardj3b13f0e2002-04-25 20:17:29 +00002714# endif
sewardj5f07b662002-04-23 16:52:51 +00002715}
2716
2717
2718static
2719void do_pthread_getspecific ( ThreadId tid, pthread_key_t key )
2720{
2721 Char msg_buf[100];
2722 if (VG_(clo_trace_pthread_level) >= 1) {
2723 VG_(sprintf)(msg_buf, "pthread_getspecific key %d",
2724 key );
2725 print_pthread_event(tid, msg_buf);
2726 }
2727
sewardjb48e5002002-05-13 00:16:03 +00002728 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002729 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj5f07b662002-04-23 16:52:51 +00002730
2731 if (!is_valid_key(key)) {
sewardjc3bd5f52002-05-01 03:24:23 +00002732 SET_EDX(tid, (UInt)NULL);
sewardj5f07b662002-04-23 16:52:51 +00002733 return;
2734 }
2735
sewardj018f7622002-05-15 21:13:39 +00002736 SET_EDX(tid, (UInt)VG_(threads)[tid].specifics[key]);
sewardj5f07b662002-04-23 16:52:51 +00002737}
2738
2739
2740static
2741void do_pthread_setspecific ( ThreadId tid,
2742 pthread_key_t key,
2743 void *pointer )
2744{
2745 Char msg_buf[100];
2746 if (VG_(clo_trace_pthread_level) >= 1) {
2747 VG_(sprintf)(msg_buf, "pthread_setspecific key %d, ptr %p",
2748 key, pointer );
2749 print_pthread_event(tid, msg_buf);
2750 }
2751
sewardjb48e5002002-05-13 00:16:03 +00002752 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002753 && VG_(threads)[tid].status == VgTs_Runnable);
sewardj5f07b662002-04-23 16:52:51 +00002754
2755 if (!is_valid_key(key)) {
sewardjc3bd5f52002-05-01 03:24:23 +00002756 SET_EDX(tid, EINVAL);
sewardj5f07b662002-04-23 16:52:51 +00002757 return;
2758 }
2759
sewardj018f7622002-05-15 21:13:39 +00002760 VG_(threads)[tid].specifics[key] = pointer;
sewardjc3bd5f52002-05-01 03:24:23 +00002761 SET_EDX(tid, 0);
sewardj5f07b662002-04-23 16:52:51 +00002762}
2763
2764
sewardjb48e5002002-05-13 00:16:03 +00002765/* ---------------------------------------------------
2766 SIGNALS
2767 ------------------------------------------------ */
2768
2769/* See comment in vg_libthread.c:pthread_sigmask() regarding
sewardj018f7622002-05-15 21:13:39 +00002770 deliberate confusion of types sigset_t and vki_sigset_t. Return 0
2771 for OK and 1 for some kind of addressing error, which the
2772 vg_libpthread.c routine turns into return values 0 and EFAULT
2773 respectively. */
sewardjb48e5002002-05-13 00:16:03 +00002774static
2775void do_pthread_sigmask ( ThreadId tid,
sewardj018f7622002-05-15 21:13:39 +00002776 Int vki_how,
sewardjb48e5002002-05-13 00:16:03 +00002777 vki_ksigset_t* newmask,
2778 vki_ksigset_t* oldmask )
2779{
2780 Char msg_buf[100];
2781 if (VG_(clo_trace_pthread_level) >= 1) {
2782 VG_(sprintf)(msg_buf,
sewardj018f7622002-05-15 21:13:39 +00002783 "pthread_sigmask vki_how %d, newmask %p, oldmask %p",
2784 vki_how, newmask, oldmask );
sewardjb48e5002002-05-13 00:16:03 +00002785 print_pthread_event(tid, msg_buf);
2786 }
2787
2788 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002789 && VG_(threads)[tid].status == VgTs_Runnable);
sewardjb48e5002002-05-13 00:16:03 +00002790
2791 if (VG_(clo_instrument)) {
2792 /* TODO check newmask/oldmask are addressible/defined */
2793 }
2794
sewardj018f7622002-05-15 21:13:39 +00002795 VG_(do_pthread_sigmask_SCSS_upd) ( tid, vki_how, newmask, oldmask );
sewardjb48e5002002-05-13 00:16:03 +00002796
sewardj3a951cf2002-05-15 22:25:47 +00002797 if (newmask && VG_(clo_instrument)) {
2798 VGM_(make_readable)( (Addr)newmask, sizeof(vki_ksigset_t) );
2799 }
2800
sewardj018f7622002-05-15 21:13:39 +00002801 /* Success. */
sewardjb48e5002002-05-13 00:16:03 +00002802 SET_EDX(tid, 0);
2803}
2804
2805
2806static
2807void do_sigwait ( ThreadId tid,
2808 vki_ksigset_t* set,
2809 Int* sig )
2810{
sewardj018f7622002-05-15 21:13:39 +00002811 vki_ksigset_t irrelevant_sigmask;
2812 Char msg_buf[100];
2813
sewardjb48e5002002-05-13 00:16:03 +00002814 if (VG_(clo_trace_signals) || VG_(clo_trace_sched)) {
2815 VG_(sprintf)(msg_buf,
2816 "suspend due to sigwait(): set %p, sig %p",
2817 set, sig );
2818 print_pthread_event(tid, msg_buf);
2819 }
2820
2821 vg_assert(VG_(is_valid_tid)(tid)
sewardj018f7622002-05-15 21:13:39 +00002822 && VG_(threads)[tid].status == VgTs_Runnable);
sewardjb48e5002002-05-13 00:16:03 +00002823
sewardj018f7622002-05-15 21:13:39 +00002824 /* Change SCSS */
2825 VG_(threads)[tid].sigs_waited_for = *set;
2826 VG_(threads)[tid].status = VgTs_WaitSIG;
2827
2828 VG_(block_all_host_signals)( &irrelevant_sigmask );
2829 VG_(handle_SCSS_change)( False /* lazy update */ );
2830}
2831
2832
2833static
2834void do_pthread_kill ( ThreadId tid, /* me */
2835 ThreadId thread, /* thread to signal */
2836 Int sig )
2837{
2838 Char msg_buf[100];
2839
2840 if (VG_(clo_trace_signals) || VG_(clo_trace_pthread_level) >= 1) {
2841 VG_(sprintf)(msg_buf,
2842 "pthread_kill thread %d, signo %d",
2843 thread, sig );
2844 print_pthread_event(tid, msg_buf);
2845 }
2846
2847 vg_assert(VG_(is_valid_tid)(tid)
2848 && VG_(threads)[tid].status == VgTs_Runnable);
2849
2850 if (!VG_(is_valid_tid)(tid)) {
2851 SET_EDX(tid, -VKI_ESRCH);
2852 return;
2853 }
2854
2855 if (sig < 1 || sig > VKI_KNSIG) {
2856 SET_EDX(tid, -VKI_EINVAL);
2857 return;
2858 }
2859
2860 VG_(send_signal_to_thread)( thread, sig );
2861 SET_EDX(tid, 0);
sewardjb48e5002002-05-13 00:16:03 +00002862}
2863
2864
sewardje663cb92002-04-12 10:26:32 +00002865/* ---------------------------------------------------------------------
2866 Handle non-trivial client requests.
2867 ------------------------------------------------------------------ */
2868
2869static
2870void do_nontrivial_clientreq ( ThreadId tid )
2871{
sewardj018f7622002-05-15 21:13:39 +00002872 UInt* arg = (UInt*)(VG_(threads)[tid].m_eax);
sewardje663cb92002-04-12 10:26:32 +00002873 UInt req_no = arg[0];
2874 switch (req_no) {
2875
sewardje663cb92002-04-12 10:26:32 +00002876 case VG_USERREQ__PTHREAD_JOIN:
2877 do_pthread_join( tid, arg[1], (void**)(arg[2]) );
2878 break;
2879
sewardj3b5d8862002-04-20 13:53:23 +00002880 case VG_USERREQ__PTHREAD_COND_WAIT:
2881 do_pthread_cond_wait( tid,
2882 (pthread_cond_t *)(arg[1]),
sewardj5f07b662002-04-23 16:52:51 +00002883 (pthread_mutex_t *)(arg[2]),
2884 0xFFFFFFFF /* no timeout */ );
2885 break;
2886
2887 case VG_USERREQ__PTHREAD_COND_TIMEDWAIT:
2888 do_pthread_cond_wait( tid,
2889 (pthread_cond_t *)(arg[1]),
2890 (pthread_mutex_t *)(arg[2]),
2891 arg[3] /* timeout millisecond point */ );
sewardj3b5d8862002-04-20 13:53:23 +00002892 break;
2893
2894 case VG_USERREQ__PTHREAD_COND_SIGNAL:
2895 do_pthread_cond_signal_or_broadcast(
2896 tid,
2897 False, /* signal, not broadcast */
2898 (pthread_cond_t *)(arg[1]) );
2899 break;
2900
2901 case VG_USERREQ__PTHREAD_COND_BROADCAST:
2902 do_pthread_cond_signal_or_broadcast(
2903 tid,
2904 True, /* broadcast, not signal */
2905 (pthread_cond_t *)(arg[1]) );
2906 break;
2907
sewardj5f07b662002-04-23 16:52:51 +00002908 case VG_USERREQ__PTHREAD_KEY_CREATE:
2909 do_pthread_key_create ( tid,
2910 (pthread_key_t*)(arg[1]),
2911 (void(*)(void*))(arg[2]) );
2912 break;
2913
2914 case VG_USERREQ__PTHREAD_KEY_DELETE:
2915 do_pthread_key_delete ( tid,
2916 (pthread_key_t)(arg[1]) );
2917 break;
2918
sewardj5f07b662002-04-23 16:52:51 +00002919 case VG_USERREQ__PTHREAD_SETSPECIFIC:
2920 do_pthread_setspecific ( tid,
2921 (pthread_key_t)(arg[1]),
2922 (void*)(arg[2]) );
2923 break;
2924
sewardjb48e5002002-05-13 00:16:03 +00002925 case VG_USERREQ__PTHREAD_SIGMASK:
2926 do_pthread_sigmask ( tid,
2927 arg[1],
2928 (vki_ksigset_t*)(arg[2]),
2929 (vki_ksigset_t*)(arg[3]) );
2930 break;
2931
2932 case VG_USERREQ__SIGWAIT:
2933 do_sigwait ( tid,
2934 (vki_ksigset_t*)(arg[1]),
2935 (Int*)(arg[2]) );
2936 break;
2937
sewardj018f7622002-05-15 21:13:39 +00002938 case VG_USERREQ__PTHREAD_KILL:
2939 do_pthread_kill ( tid, arg[1], arg[2] );
2940 break;
2941
sewardjff42d1d2002-05-22 13:17:31 +00002942 case VG_USERREQ__PTHREAD_YIELD:
2943 do_pthread_yield ( tid );
2944 /* because this is classified as a non-trivial client
2945 request, the scheduler should now select a new thread to
2946 run. */
2947 break;
sewardj018f7622002-05-15 21:13:39 +00002948
sewardj7989d0c2002-05-28 11:00:01 +00002949 case VG_USERREQ__SET_CANCELSTATE:
2950 do__set_cancelstate ( tid, arg[1] );
2951 break;
2952
sewardj7989d0c2002-05-28 11:00:01 +00002953 case VG_USERREQ__SET_OR_GET_DETACH:
2954 do__set_or_get_detach ( tid, arg[1], arg[2] );
2955 break;
2956
2957 case VG_USERREQ__SET_CANCELPEND:
2958 do__set_cancelpend ( tid, arg[1], (void(*)(void*))arg[2] );
2959 break;
2960
2961 case VG_USERREQ__WAIT_JOINER:
2962 do__wait_joiner ( tid, (void*)arg[1] );
2963 break;
2964
2965 case VG_USERREQ__QUIT:
2966 do__quit ( tid );
2967 break;
2968
2969 case VG_USERREQ__APPLY_IN_NEW_THREAD:
2970 do__apply_in_new_thread ( tid, (void*(*)(void*))arg[1],
2971 (void*)arg[2] );
2972 break;
2973
sewardje663cb92002-04-12 10:26:32 +00002974 case VG_USERREQ__MAKE_NOACCESS:
2975 case VG_USERREQ__MAKE_WRITABLE:
2976 case VG_USERREQ__MAKE_READABLE:
2977 case VG_USERREQ__DISCARD:
2978 case VG_USERREQ__CHECK_WRITABLE:
2979 case VG_USERREQ__CHECK_READABLE:
2980 case VG_USERREQ__MAKE_NOACCESS_STACK:
2981 case VG_USERREQ__RUNNING_ON_VALGRIND:
2982 case VG_USERREQ__DO_LEAK_CHECK:
sewardj18d75132002-05-16 11:06:21 +00002983 case VG_USERREQ__DISCARD_TRANSLATIONS:
sewardjc3bd5f52002-05-01 03:24:23 +00002984 SET_EDX(
2985 tid,
sewardj018f7622002-05-15 21:13:39 +00002986 VG_(handle_client_request) ( &VG_(threads)[tid], arg )
sewardjc3bd5f52002-05-01 03:24:23 +00002987 );
sewardje663cb92002-04-12 10:26:32 +00002988 break;
2989
sewardj77e466c2002-04-14 02:29:29 +00002990 case VG_USERREQ__SIGNAL_RETURNS:
2991 handle_signal_return(tid);
2992 break;
sewardj54cacf02002-04-12 23:24:59 +00002993
sewardje663cb92002-04-12 10:26:32 +00002994 default:
2995 VG_(printf)("panic'd on private request = 0x%x\n", arg[0] );
2996 VG_(panic)("handle_private_client_pthread_request: "
2997 "unknown request");
2998 /*NOTREACHED*/
2999 break;
3000 }
3001}
3002
3003
sewardj6072c362002-04-19 14:40:57 +00003004/* ---------------------------------------------------------------------
3005 Sanity checking.
3006 ------------------------------------------------------------------ */
3007
3008/* Internal consistency checks on the sched/pthread structures. */
3009static
3010void scheduler_sanity ( void )
3011{
sewardj3b5d8862002-04-20 13:53:23 +00003012 pthread_mutex_t* mx;
3013 pthread_cond_t* cv;
sewardj6072c362002-04-19 14:40:57 +00003014 Int i;
sewardj5f07b662002-04-23 16:52:51 +00003015
sewardj6072c362002-04-19 14:40:57 +00003016 /* VG_(printf)("scheduler_sanity\n"); */
3017 for (i = 1; i < VG_N_THREADS; i++) {
sewardj018f7622002-05-15 21:13:39 +00003018 mx = VG_(threads)[i].associated_mx;
3019 cv = VG_(threads)[i].associated_cv;
3020 if (VG_(threads)[i].status == VgTs_WaitMX) {
sewardjbf290b92002-05-01 02:28:01 +00003021 /* If we're waiting on a MX: (1) the mx is not null, (2, 3)
3022 it's actually held by someone, since otherwise this thread
3023 is deadlocked, (4) the mutex's owner is not us, since
3024 otherwise this thread is also deadlocked. The logic in
3025 do_pthread_mutex_lock rejects attempts by a thread to lock
3026 a (non-recursive) mutex which it already owns.
sewardj05553872002-04-20 20:53:17 +00003027
sewardjbf290b92002-05-01 02:28:01 +00003028 (2) has been seen to fail sometimes. I don't know why.
3029 Possibly to do with signals. */
sewardj3b5d8862002-04-20 13:53:23 +00003030 vg_assert(cv == NULL);
sewardj05553872002-04-20 20:53:17 +00003031 /* 1 */ vg_assert(mx != NULL);
3032 /* 2 */ vg_assert(mx->__m_count > 0);
sewardjb48e5002002-05-13 00:16:03 +00003033 /* 3 */ vg_assert(VG_(is_valid_tid)((ThreadId)mx->__m_owner));
sewardj05553872002-04-20 20:53:17 +00003034 /* 4 */ vg_assert(i != (ThreadId)mx->__m_owner);
sewardj3b5d8862002-04-20 13:53:23 +00003035 } else
sewardj018f7622002-05-15 21:13:39 +00003036 if (VG_(threads)[i].status == VgTs_WaitCV) {
sewardj3b5d8862002-04-20 13:53:23 +00003037 vg_assert(cv != NULL);
3038 vg_assert(mx != NULL);
sewardj6072c362002-04-19 14:40:57 +00003039 } else {
sewardj05553872002-04-20 20:53:17 +00003040 /* Unfortunately these don't hold true when a sighandler is
3041 running. To be fixed. */
3042 /* vg_assert(cv == NULL); */
3043 /* vg_assert(mx == NULL); */
sewardj6072c362002-04-19 14:40:57 +00003044 }
sewardjbf290b92002-05-01 02:28:01 +00003045
sewardj018f7622002-05-15 21:13:39 +00003046 if (VG_(threads)[i].status != VgTs_Empty) {
sewardjbf290b92002-05-01 02:28:01 +00003047 Int
sewardj018f7622002-05-15 21:13:39 +00003048 stack_used = (Addr)VG_(threads)[i].stack_highest_word
3049 - (Addr)VG_(threads)[i].m_esp;
sewardjbf290b92002-05-01 02:28:01 +00003050 if (i > 1 /* not the root thread */
3051 && stack_used
3052 >= (VG_PTHREAD_STACK_MIN - 1000 /* paranoia */)) {
3053 VG_(message)(Vg_UserMsg,
3054 "Warning: STACK OVERFLOW: "
3055 "thread %d: stack used %d, available %d",
3056 i, stack_used, VG_PTHREAD_STACK_MIN );
3057 VG_(message)(Vg_UserMsg,
3058 "Terminating Valgrind. If thread(s) "
3059 "really need more stack, increase");
3060 VG_(message)(Vg_UserMsg,
3061 "VG_PTHREAD_STACK_SIZE in vg_include.h and recompile.");
3062 VG_(exit)(1);
3063 }
sewardjb48e5002002-05-13 00:16:03 +00003064
sewardj018f7622002-05-15 21:13:39 +00003065 if (VG_(threads)[i].status == VgTs_WaitSIG) {
sewardjb48e5002002-05-13 00:16:03 +00003066 vg_assert( ! VG_(kisemptysigset)(
sewardj018f7622002-05-15 21:13:39 +00003067 & VG_(threads)[i].sigs_waited_for) );
sewardjb48e5002002-05-13 00:16:03 +00003068 } else {
3069 vg_assert( VG_(kisemptysigset)(
sewardj018f7622002-05-15 21:13:39 +00003070 & VG_(threads)[i].sigs_waited_for) );
sewardjb48e5002002-05-13 00:16:03 +00003071 }
3072
sewardjbf290b92002-05-01 02:28:01 +00003073 }
sewardj6072c362002-04-19 14:40:57 +00003074 }
sewardj5f07b662002-04-23 16:52:51 +00003075
3076 for (i = 0; i < VG_N_THREAD_KEYS; i++) {
3077 if (!vg_thread_keys[i].inuse)
3078 vg_assert(vg_thread_keys[i].destructor == NULL);
3079 }
sewardj6072c362002-04-19 14:40:57 +00003080}
3081
3082
sewardje663cb92002-04-12 10:26:32 +00003083/*--------------------------------------------------------------------*/
3084/*--- end vg_scheduler.c ---*/
3085/*--------------------------------------------------------------------*/