blob: c2f4333b8a1e3c0a7514f3de16720070e928377a [file] [log] [blame]
sewardje663cb92002-04-12 10:26:32 +00001
2/*--------------------------------------------------------------------*/
njn278b3d62005-05-30 23:20:51 +00003/*--- Thread scheduling. scheduler.c ---*/
sewardje663cb92002-04-12 10:26:32 +00004/*--------------------------------------------------------------------*/
5
6/*
njnc0ae7052005-08-25 22:55:19 +00007 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
sewardje663cb92002-04-12 10:26:32 +00009
sewardje4b0bf02006-06-05 23:21:15 +000010 Copyright (C) 2000-2006 Julian Seward
sewardje663cb92002-04-12 10:26:32 +000011 jseward@acm.org
sewardje663cb92002-04-12 10:26:32 +000012
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
njn25e49d8e72002-09-23 09:36:25 +000028 The GNU General Public License is contained in the file COPYING.
sewardje663cb92002-04-12 10:26:32 +000029*/
30
sewardjb5f6f512005-03-10 23:59:00 +000031/*
32 Overview
33
34 Valgrind tries to emulate the kernel's threading as closely as
35 possible. The client does all threading via the normal syscalls
36 (on Linux: clone, etc). Valgrind emulates this by creating exactly
37 the same process structure as would be created without Valgrind.
38 There are no extra threads.
39
40 The main difference is that Valgrind only allows one client thread
41 to run at once. This is controlled with the VCPU semaphore,
42 "run_sema". Any time a thread wants to run client code or
43 manipulate any shared state (which is anything other than its own
44 ThreadState entry), it must hold the run_sema.
45
46 When a thread is about to block in a blocking syscall, it releases
47 run_sema, and re-takes it when it becomes runnable again (either
48 because the syscall finished, or we took a signal).
49
50 VG_(scheduler) therefore runs in each thread. It returns only when
51 the thread is exiting, either because it exited itself, or it was
52 told to exit by another thread.
53
54 This file is almost entirely OS-independent. The details of how
55 the OS handles threading and signalling are abstracted away and
njn12771092005-06-18 02:18:04 +000056 implemented elsewhere. [Some of the functions have worked their
57 way back for the moment, until we do an OS port in earnest...]
sewardjb5f6f512005-03-10 23:59:00 +000058 */
59
njnc7561b92005-06-19 01:24:32 +000060#include "pub_core_basics.h"
61#include "pub_core_threadstate.h"
njn04e16982005-05-31 00:23:43 +000062#include "pub_core_aspacemgr.h"
njn93fe3b22005-12-21 20:22:52 +000063#include "pub_core_clreq.h" // for VG_USERREQ__*
njn36b66df2005-05-12 05:13:04 +000064#include "pub_core_dispatch.h"
njnf4c50162005-06-20 14:18:12 +000065#include "pub_core_errormgr.h" // For VG_(get_n_errs_found)()
njn97405b22005-06-02 03:39:33 +000066#include "pub_core_libcbase.h"
njn132bfcc2005-06-04 19:16:06 +000067#include "pub_core_libcassert.h"
njn36a20fa2005-06-03 03:08:39 +000068#include "pub_core_libcprint.h"
njnf39e9a32005-06-12 02:43:17 +000069#include "pub_core_libcproc.h"
njnde62cbf2005-06-10 22:08:14 +000070#include "pub_core_libcsignal.h"
njnf536bbb2005-06-13 04:21:38 +000071#include "pub_core_machine.h"
njnaf1d7df2005-06-11 01:31:52 +000072#include "pub_core_mallocfree.h"
njn20242342005-05-16 23:31:24 +000073#include "pub_core_options.h"
njn717cde52005-05-10 02:47:21 +000074#include "pub_core_replacemalloc.h"
njn278b3d62005-05-30 23:20:51 +000075#include "pub_core_scheduler.h"
njn0c246472005-05-31 01:00:08 +000076#include "pub_core_signals.h"
njn945ed2e2005-06-24 03:28:30 +000077#include "pub_core_stacks.h"
njnf4c50162005-06-20 14:18:12 +000078#include "pub_core_stacktrace.h" // For VG_(get_and_pp_StackTrace)()
njn9abd6082005-06-17 21:31:45 +000079#include "pub_core_syscall.h"
njnc1b01812005-06-17 22:19:06 +000080#include "pub_core_syswrap.h"
njn43b9a8a2005-05-10 04:37:01 +000081#include "pub_core_tooliface.h"
njnf4c50162005-06-20 14:18:12 +000082#include "pub_core_translate.h" // For VG_(translate)()
njn8bddf582005-05-13 23:40:55 +000083#include "pub_core_transtab.h"
sewardj4eee4762006-10-14 15:51:32 +000084#include "pub_core_vkiscnums.h"
njn278b3d62005-05-30 23:20:51 +000085#include "priv_sema.h"
sewardje663cb92002-04-12 10:26:32 +000086
sewardj63fed7f2006-01-17 02:02:47 +000087/* #include "pub_core_debuginfo.h" */ // DEBUGGING HACK ONLY
88
89
sewardje663cb92002-04-12 10:26:32 +000090/* ---------------------------------------------------------------------
91 Types and globals for the scheduler.
92 ------------------------------------------------------------------ */
93
njnc7561b92005-06-19 01:24:32 +000094/* ThreadId and ThreadState are defined elsewhere*/
sewardje663cb92002-04-12 10:26:32 +000095
njn14319cc2005-03-13 06:26:22 +000096/* Defines the thread-scheduling timeslice, in terms of the number of
97 basic blocks we attempt to run each thread for. Smaller values
98 give finer interleaving but much increased scheduling overheads. */
sewardjea3a99f2006-05-07 14:37:03 +000099#define SCHEDULING_QUANTUM 100000
njn14319cc2005-03-13 06:26:22 +0000100
sewardj0ec07f32006-01-12 12:32:32 +0000101/* If False, a fault is Valgrind-internal (ie, a bug) */
102Bool VG_(in_generated_code) = False;
njn25e49d8e72002-09-23 09:36:25 +0000103
njnde583aa2005-05-11 18:57:02 +0000104/* Counts downwards in VG_(run_innerloop). */
105UInt VG_(dispatch_ctr);
106
njn394213a2005-06-19 18:38:24 +0000107/* 64-bit counter for the number of basic blocks done. */
108static ULong bbs_done = 0;
109
sewardje663cb92002-04-12 10:26:32 +0000110/* Forwards */
sewardjb5f6f512005-03-10 23:59:00 +0000111static void do_client_request ( ThreadId tid );
112static void scheduler_sanity ( ThreadId tid );
113static void mostly_clear_thread_record ( ThreadId tid );
sewardjd140e442002-05-29 01:21:19 +0000114
nethercote844e7122004-08-02 15:27:22 +0000115/* Stats. */
njn0fd92f42005-10-06 03:32:42 +0000116static ULong n_scheduling_events_MINOR = 0;
117static ULong n_scheduling_events_MAJOR = 0;
nethercote844e7122004-08-02 15:27:22 +0000118
njn6676d5b2005-06-19 18:49:19 +0000119/* Sanity checking counts. */
120static UInt sanity_fast_count = 0;
121static UInt sanity_slow_count = 0;
sewardjb5f6f512005-03-10 23:59:00 +0000122
nethercote844e7122004-08-02 15:27:22 +0000123void VG_(print_scheduler_stats)(void)
124{
125 VG_(message)(Vg_DebugMsg,
njn0fd92f42005-10-06 03:32:42 +0000126 "scheduler: %,llu jumps (bb entries).", bbs_done );
njn394213a2005-06-19 18:38:24 +0000127 VG_(message)(Vg_DebugMsg,
njn0fd92f42005-10-06 03:32:42 +0000128 "scheduler: %,llu/%,llu major/minor sched events.",
nethercote844e7122004-08-02 15:27:22 +0000129 n_scheduling_events_MAJOR, n_scheduling_events_MINOR);
njn6676d5b2005-06-19 18:49:19 +0000130 VG_(message)(Vg_DebugMsg,
131 " sanity: %d cheap, %d expensive checks.",
132 sanity_fast_count, sanity_slow_count );
nethercote844e7122004-08-02 15:27:22 +0000133}
134
sewardjb5f6f512005-03-10 23:59:00 +0000135/* CPU semaphore, so that threads can run exclusively */
136static vg_sema_t run_sema;
sewardjb5f6f512005-03-10 23:59:00 +0000137
138
sewardje663cb92002-04-12 10:26:32 +0000139/* ---------------------------------------------------------------------
140 Helper functions for the scheduler.
141 ------------------------------------------------------------------ */
142
sewardje663cb92002-04-12 10:26:32 +0000143static
144void print_sched_event ( ThreadId tid, Char* what )
145{
sewardj45b4b372002-04-16 22:50:32 +0000146 VG_(message)(Vg_DebugMsg, " SCHED[%d]: %s", tid, what );
sewardj8937c812002-04-12 20:12:20 +0000147}
148
sewardj8937c812002-04-12 20:12:20 +0000149static
sewardjb5f6f512005-03-10 23:59:00 +0000150HChar* name_of_sched_event ( UInt event )
sewardje663cb92002-04-12 10:26:32 +0000151{
152 switch (event) {
sewardja0fef1b2005-11-03 13:46:30 +0000153 case VEX_TRC_JMP_SYS_SYSCALL: return "SYSCALL";
154 case VEX_TRC_JMP_SYS_INT32: return "INT32";
155 case VEX_TRC_JMP_SYS_INT128: return "INT128";
156 case VEX_TRC_JMP_SYS_SYSENTER: return "SYSENTER";
sewardjd79ef682004-11-26 13:25:17 +0000157 case VEX_TRC_JMP_CLIENTREQ: return "CLIENTREQ";
158 case VEX_TRC_JMP_YIELD: return "YIELD";
sewardj45f02c42005-02-05 18:27:14 +0000159 case VEX_TRC_JMP_NODECODE: return "NODECODE";
sewardj1f430d32005-12-16 01:07:11 +0000160 case VEX_TRC_JMP_MAPFAIL: return "MAPFAIL";
tom360ed5d2006-01-13 09:26:23 +0000161 case VEX_TRC_JMP_NOREDIR: return "NOREDIR";
sewardj1f430d32005-12-16 01:07:11 +0000162 case VEX_TRC_JMP_EMWARN: return "EMWARN";
163 case VEX_TRC_JMP_TINVAL: return "TINVAL";
164 case VG_TRC_INVARIANT_FAILED: return "INVFAILED";
sewardje663cb92002-04-12 10:26:32 +0000165 case VG_TRC_INNER_COUNTERZERO: return "COUNTERZERO";
166 case VG_TRC_INNER_FASTMISS: return "FASTMISS";
sewardjb5f6f512005-03-10 23:59:00 +0000167 case VG_TRC_FAULT_SIGNAL: return "FAULTSIGNAL";
sewardje663cb92002-04-12 10:26:32 +0000168 default: return "??UNKNOWN??";
169 }
170}
171
sewardje663cb92002-04-12 10:26:32 +0000172/* Allocate a completely empty ThreadState record. */
sewardjb5f6f512005-03-10 23:59:00 +0000173ThreadId VG_(alloc_ThreadState) ( void )
sewardje663cb92002-04-12 10:26:32 +0000174{
175 Int i;
sewardj6072c362002-04-19 14:40:57 +0000176 for (i = 1; i < VG_N_THREADS; i++) {
sewardjb5f6f512005-03-10 23:59:00 +0000177 if (VG_(threads)[i].status == VgTs_Empty) {
178 VG_(threads)[i].status = VgTs_Init;
179 VG_(threads)[i].exitreason = VgSrc_None;
sewardje663cb92002-04-12 10:26:32 +0000180 return i;
sewardjb5f6f512005-03-10 23:59:00 +0000181 }
sewardje663cb92002-04-12 10:26:32 +0000182 }
183 VG_(printf)("vg_alloc_ThreadState: no free slots available\n");
184 VG_(printf)("Increase VG_N_THREADS, rebuild and try again.\n");
njne427a662002-10-02 11:08:25 +0000185 VG_(core_panic)("VG_N_THREADS is too low");
sewardje663cb92002-04-12 10:26:32 +0000186 /*NOTREACHED*/
187}
188
sewardjb5f6f512005-03-10 23:59:00 +0000189/*
190 Mark a thread as Runnable. This will block until the run_sema is
191 available, so that we get exclusive access to all the shared
192 structures and the CPU. Up until we get the sema, we must not
193 touch any shared state.
194
195 When this returns, we'll actually be running.
196 */
197void VG_(set_running)(ThreadId tid)
198{
199 ThreadState *tst = VG_(get_ThreadState)(tid);
200
201 vg_assert(tst->status != VgTs_Runnable);
202
203 tst->status = VgTs_Runnable;
204
sewardj7eb7c582005-06-23 01:02:53 +0000205 ML_(sema_down)(&run_sema);
njnc7561b92005-06-19 01:24:32 +0000206 if (VG_(running_tid) != VG_INVALID_THREADID)
207 VG_(printf)("tid %d found %d running\n", tid, VG_(running_tid));
208 vg_assert(VG_(running_tid) == VG_INVALID_THREADID);
209 VG_(running_tid) = tid;
sewardjb5f6f512005-03-10 23:59:00 +0000210
tome0008d62005-11-10 15:02:42 +0000211 VG_(unknown_SP_update)(VG_(get_SP(tid)), VG_(get_SP(tid)));
212
sewardjb5f6f512005-03-10 23:59:00 +0000213 if (VG_(clo_trace_sched))
214 print_sched_event(tid, "now running");
tomdeca43f2005-07-27 23:04:28 +0000215
216 // While thre modeling is disable, issue thread_run events here
217 // VG_(tm_thread_switchto)(tid);
218 VG_TRACK( thread_run, tid );
sewardjb5f6f512005-03-10 23:59:00 +0000219}
220
sewardjb5f6f512005-03-10 23:59:00 +0000221/*
222 Set a thread into a sleeping state, and give up exclusive access to
223 the CPU. On return, the thread must be prepared to block until it
224 is ready to run again (generally this means blocking in a syscall,
225 but it may mean that we remain in a Runnable state and we're just
226 yielding the CPU to another thread).
227 */
228void VG_(set_sleeping)(ThreadId tid, ThreadStatus sleepstate)
229{
230 ThreadState *tst = VG_(get_ThreadState)(tid);
231
232 vg_assert(tst->status == VgTs_Runnable);
233
234 vg_assert(sleepstate == VgTs_WaitSys ||
235 sleepstate == VgTs_Yielding);
236
237 tst->status = sleepstate;
238
njnc7561b92005-06-19 01:24:32 +0000239 vg_assert(VG_(running_tid) == tid);
240 VG_(running_tid) = VG_INVALID_THREADID;
sewardjb5f6f512005-03-10 23:59:00 +0000241
242 /* Release the run_sema; this will reschedule any runnable
243 thread. */
sewardj7eb7c582005-06-23 01:02:53 +0000244 ML_(sema_up)(&run_sema);
sewardjb5f6f512005-03-10 23:59:00 +0000245
246 if (VG_(clo_trace_sched)) {
247 Char buf[50];
sewardja8d8e232005-06-07 20:04:56 +0000248 VG_(sprintf)(buf, "now sleeping in state %s",
njnc7561b92005-06-19 01:24:32 +0000249 VG_(name_of_ThreadStatus)(sleepstate));
sewardjb5f6f512005-03-10 23:59:00 +0000250 print_sched_event(tid, buf);
nethercote75d26242004-08-01 22:59:18 +0000251 }
252}
253
sewardjb5f6f512005-03-10 23:59:00 +0000254/* Clear out the ThreadState and release the semaphore. Leaves the
255 ThreadState in VgTs_Zombie state, so that it doesn't get
256 reallocated until the caller is really ready. */
257void VG_(exit_thread)(ThreadId tid)
258{
259 vg_assert(VG_(is_valid_tid)(tid));
260 vg_assert(VG_(is_running_thread)(tid));
261 vg_assert(VG_(is_exiting)(tid));
262
sewardjb5f6f512005-03-10 23:59:00 +0000263 mostly_clear_thread_record(tid);
njnc7561b92005-06-19 01:24:32 +0000264 VG_(running_tid) = VG_INVALID_THREADID;
sewardjb5f6f512005-03-10 23:59:00 +0000265
266 /* There should still be a valid exitreason for this thread */
267 vg_assert(VG_(threads)[tid].exitreason != VgSrc_None);
268
sewardj7eb7c582005-06-23 01:02:53 +0000269 ML_(sema_up)(&run_sema);
sewardjb5f6f512005-03-10 23:59:00 +0000270}
271
272/* Kill a thread. This interrupts whatever a thread is doing, and
273 makes it exit ASAP. This does not set the exitreason or
274 exitcode. */
275void VG_(kill_thread)(ThreadId tid)
276{
277 vg_assert(VG_(is_valid_tid)(tid));
278 vg_assert(!VG_(is_running_thread)(tid));
279 vg_assert(VG_(is_exiting)(tid));
280
281 if (VG_(threads)[tid].status == VgTs_WaitSys) {
282 if (VG_(clo_trace_signals))
283 VG_(message)(Vg_DebugMsg, "kill_thread zaps tid %d lwp %d",
284 tid, VG_(threads)[tid].os_state.lwpid);
njn351d0062005-06-21 22:23:59 +0000285 VG_(tkill)(VG_(threads)[tid].os_state.lwpid, VG_SIGVGKILL);
sewardjb5f6f512005-03-10 23:59:00 +0000286 }
287}
288
289/*
290 Yield the CPU for a short time to let some other thread run.
291 */
292void VG_(vg_yield)(void)
293{
294 struct vki_timespec ts = { 0, 1 };
njnc7561b92005-06-19 01:24:32 +0000295 ThreadId tid = VG_(running_tid);
sewardjb5f6f512005-03-10 23:59:00 +0000296
297 vg_assert(tid != VG_INVALID_THREADID);
298 vg_assert(VG_(threads)[tid].os_state.lwpid == VG_(gettid)());
299
300 VG_(set_sleeping)(tid, VgTs_Yielding);
301
302 //VG_(printf)("tid %d yielding EIP=%p\n", tid, VG_(threads)[tid].arch.m_eip);
303
304 /*
305 Tell the kernel we're yielding.
306 */
307 if (1)
308 VG_(do_syscall0)(__NR_sched_yield);
309 else
310 VG_(nanosleep)(&ts);
311
312 VG_(set_running)(tid);
sewardjb5f6f512005-03-10 23:59:00 +0000313}
314
315
sewardj0ec07f32006-01-12 12:32:32 +0000316/* Set the standard set of blocked signals, used whenever we're not
njn9fc31122005-05-11 18:48:33 +0000317 running a client syscall. */
318static void block_signals(ThreadId tid)
319{
320 vki_sigset_t mask;
321
322 VG_(sigfillset)(&mask);
323
324 /* Don't block these because they're synchronous */
325 VG_(sigdelset)(&mask, VKI_SIGSEGV);
326 VG_(sigdelset)(&mask, VKI_SIGBUS);
327 VG_(sigdelset)(&mask, VKI_SIGFPE);
328 VG_(sigdelset)(&mask, VKI_SIGILL);
329 VG_(sigdelset)(&mask, VKI_SIGTRAP);
330
331 /* Can't block these anyway */
332 VG_(sigdelset)(&mask, VKI_SIGSTOP);
333 VG_(sigdelset)(&mask, VKI_SIGKILL);
334
njn9fc31122005-05-11 18:48:33 +0000335 VG_(sigprocmask)(VKI_SIG_SETMASK, &mask, NULL);
336}
337
njn8aa35852005-06-10 22:59:56 +0000338static void os_state_clear(ThreadState *tst)
339{
sewardj45f4e7c2005-09-27 19:20:21 +0000340 tst->os_state.lwpid = 0;
njn8aa35852005-06-10 22:59:56 +0000341 tst->os_state.threadgroup = 0;
342}
343
344static void os_state_init(ThreadState *tst)
345{
sewardj45f4e7c2005-09-27 19:20:21 +0000346 tst->os_state.valgrind_stack_base = 0;
347 tst->os_state.valgrind_stack_init_SP = 0;
njn8aa35852005-06-10 22:59:56 +0000348 os_state_clear(tst);
349}
350
sewardj20917d82002-05-28 01:36:45 +0000351static
352void mostly_clear_thread_record ( ThreadId tid )
353{
sewardjb5f6f512005-03-10 23:59:00 +0000354 vki_sigset_t savedmask;
355
sewardj20917d82002-05-28 01:36:45 +0000356 vg_assert(tid >= 0 && tid < VG_N_THREADS);
njnaf839f52005-06-23 03:27:57 +0000357 VG_(cleanup_thread)(&VG_(threads)[tid].arch);
sewardjb5f6f512005-03-10 23:59:00 +0000358 VG_(threads)[tid].tid = tid;
359
360 /* Leave the thread in Zombie, so that it doesn't get reallocated
361 until the caller is finally done with the thread stack. */
362 VG_(threads)[tid].status = VgTs_Zombie;
363
nethercote73b526f2004-10-31 18:48:21 +0000364 VG_(sigemptyset)(&VG_(threads)[tid].sig_mask);
sewardjb5f6f512005-03-10 23:59:00 +0000365 VG_(sigemptyset)(&VG_(threads)[tid].tmp_sig_mask);
jsgf855d93d2003-10-13 22:26:55 +0000366
njn8aa35852005-06-10 22:59:56 +0000367 os_state_clear(&VG_(threads)[tid]);
fitzhardinge28428592004-03-16 22:07:12 +0000368
369 /* start with no altstack */
370 VG_(threads)[tid].altstack.ss_sp = (void *)0xdeadbeef;
371 VG_(threads)[tid].altstack.ss_size = 0;
372 VG_(threads)[tid].altstack.ss_flags = VKI_SS_DISABLE;
sewardjb5f6f512005-03-10 23:59:00 +0000373
njn444eba12005-05-12 03:47:31 +0000374 VG_(clear_out_queued_signals)(tid, &savedmask);
sewardjb5f6f512005-03-10 23:59:00 +0000375
376 VG_(threads)[tid].sched_jmpbuf_valid = False;
sewardj20917d82002-05-28 01:36:45 +0000377}
378
njn3f8c4372005-03-13 04:43:10 +0000379/*
sewardj0ec07f32006-01-12 12:32:32 +0000380 Called in the child after fork. If the parent has multiple
381 threads, then we've inherited a VG_(threads) array describing them,
382 but only the thread which called fork() is actually alive in the
383 child. This functions needs to clean up all those other thread
384 structures.
njn3f8c4372005-03-13 04:43:10 +0000385
386 Whichever tid in the parent which called fork() becomes the
387 master_tid in the child. That's because the only living slot in
388 VG_(threads) in the child after fork is VG_(threads)[tid], and it
389 would be too hard to try to re-number the thread and relocate the
390 thread state down to VG_(threads)[1].
391
392 This function also needs to reinitialize the run_sema, since
393 otherwise we may end up sharing its state with the parent, which
394 would be deeply confusing.
395*/
sewardjb5f6f512005-03-10 23:59:00 +0000396static void sched_fork_cleanup(ThreadId me)
397{
398 ThreadId tid;
njnc7561b92005-06-19 01:24:32 +0000399 vg_assert(VG_(running_tid) == me);
sewardjb5f6f512005-03-10 23:59:00 +0000400
sewardjb5f6f512005-03-10 23:59:00 +0000401 VG_(threads)[me].os_state.lwpid = VG_(gettid)();
402 VG_(threads)[me].os_state.threadgroup = VG_(getpid)();
403
404 /* clear out all the unused thread slots */
405 for (tid = 1; tid < VG_N_THREADS; tid++) {
njn3f8c4372005-03-13 04:43:10 +0000406 if (tid != me) {
407 mostly_clear_thread_record(tid);
sewardjb5f6f512005-03-10 23:59:00 +0000408 VG_(threads)[tid].status = VgTs_Empty;
sewardja8d8e232005-06-07 20:04:56 +0000409 VG_(clear_syscallInfo)(tid);
njn3f8c4372005-03-13 04:43:10 +0000410 }
sewardjb5f6f512005-03-10 23:59:00 +0000411 }
412
413 /* re-init and take the sema */
sewardj7eb7c582005-06-23 01:02:53 +0000414 ML_(sema_deinit)(&run_sema);
415 ML_(sema_init)(&run_sema);
416 ML_(sema_down)(&run_sema);
sewardjb5f6f512005-03-10 23:59:00 +0000417}
sewardj20917d82002-05-28 01:36:45 +0000418
jsgf855d93d2003-10-13 22:26:55 +0000419
sewardje663cb92002-04-12 10:26:32 +0000420/* Initialise the scheduler. Create a single "main" thread ready to
sewardj2a99cf62004-11-24 10:44:19 +0000421 run, with special ThreadId of one. This is called at startup. The
sewardjb5f6f512005-03-10 23:59:00 +0000422 caller subsequently initialises the guest state components of this
423 main thread, thread 1.
sewardje663cb92002-04-12 10:26:32 +0000424*/
sewardj45f4e7c2005-09-27 19:20:21 +0000425void VG_(scheduler_init) ( Addr clstack_end, SizeT clstack_size )
sewardje663cb92002-04-12 10:26:32 +0000426{
thughesc37184f2004-09-11 14:16:57 +0000427 Int i;
sewardje663cb92002-04-12 10:26:32 +0000428 ThreadId tid_main;
429
sewardj45f4e7c2005-09-27 19:20:21 +0000430 vg_assert(VG_IS_PAGE_ALIGNED(clstack_end+1));
431 vg_assert(VG_IS_PAGE_ALIGNED(clstack_size));
432
sewardj7eb7c582005-06-23 01:02:53 +0000433 ML_(sema_init)(&run_sema);
sewardjb5f6f512005-03-10 23:59:00 +0000434
sewardj6072c362002-04-19 14:40:57 +0000435 for (i = 0 /* NB; not 1 */; i < VG_N_THREADS; i++) {
sewardjc793fd32005-05-31 17:24:49 +0000436
437 /* Paranoia .. completely zero it out. */
438 VG_(memset)( & VG_(threads)[i], 0, sizeof( VG_(threads)[i] ) );
439
440 VG_(threads)[i].sig_queue = NULL;
sewardjb5f6f512005-03-10 23:59:00 +0000441
njn8aa35852005-06-10 22:59:56 +0000442 os_state_init(&VG_(threads)[i]);
sewardj20917d82002-05-28 01:36:45 +0000443 mostly_clear_thread_record(i);
sewardjb5f6f512005-03-10 23:59:00 +0000444
njn50ba34e2005-04-04 02:41:42 +0000445 VG_(threads)[i].status = VgTs_Empty;
446 VG_(threads)[i].client_stack_szB = 0;
447 VG_(threads)[i].client_stack_highest_word = (Addr)NULL;
sewardje663cb92002-04-12 10:26:32 +0000448 }
449
sewardjb5f6f512005-03-10 23:59:00 +0000450 tid_main = VG_(alloc_ThreadState)();
sewardja4068de2006-04-05 23:06:31 +0000451 vg_assert(tid_main == 1);
sewardj5f07b662002-04-23 16:52:51 +0000452
njn50ba34e2005-04-04 02:41:42 +0000453 VG_(threads)[tid_main].client_stack_highest_word
sewardj45f4e7c2005-09-27 19:20:21 +0000454 = clstack_end + 1 - sizeof(UWord);
455 VG_(threads)[tid_main].client_stack_szB
456 = clstack_size;
sewardjbf290b92002-05-01 02:28:01 +0000457
njn310ed282005-06-26 15:11:37 +0000458 VG_(atfork_child)(sched_fork_cleanup);
sewardje663cb92002-04-12 10:26:32 +0000459}
460
461
sewardje663cb92002-04-12 10:26:32 +0000462/* ---------------------------------------------------------------------
sewardj0ec07f32006-01-12 12:32:32 +0000463 Helpers for running translations.
464 ------------------------------------------------------------------ */
465
466/* Use gcc's built-in setjmp/longjmp. longjmp must not restore signal
467 mask state, but does need to pass "val" through. */
468#define SCHEDSETJMP(tid, jumped, stmt) \
469 do { \
470 ThreadState * volatile _qq_tst = VG_(get_ThreadState)(tid); \
471 \
472 (jumped) = __builtin_setjmp(_qq_tst->sched_jmpbuf); \
473 if ((jumped) == 0) { \
474 vg_assert(!_qq_tst->sched_jmpbuf_valid); \
475 _qq_tst->sched_jmpbuf_valid = True; \
476 stmt; \
477 } else if (VG_(clo_trace_sched)) \
478 VG_(printf)("SCHEDSETJMP(line %d) tid %d, jumped=%d\n", \
479 __LINE__, tid, jumped); \
480 vg_assert(_qq_tst->sched_jmpbuf_valid); \
481 _qq_tst->sched_jmpbuf_valid = False; \
482 } while(0)
483
484
485/* Do various guest state alignment checks prior to running a thread.
486 Specifically, check that what we have matches Vex's guest state
487 layout requirements. */
sewardj6b0d5b32006-01-22 01:10:12 +0000488static void do_pre_run_checks ( volatile ThreadState* tst )
sewardj0ec07f32006-01-12 12:32:32 +0000489{
490 Addr a_vex = (Addr) & tst->arch.vex;
491 Addr a_vexsh = (Addr) & tst->arch.vex_shadow;
492 Addr a_spill = (Addr) & tst->arch.vex_spill;
493 UInt sz_vex = (UInt) sizeof tst->arch.vex;
494 UInt sz_vexsh = (UInt) sizeof tst->arch.vex_shadow;
495 UInt sz_spill = (UInt) sizeof tst->arch.vex_spill;
496
497 if (0)
498 VG_(printf)("%p %d %p %d %p %d\n",
499 (void*)a_vex, sz_vex, (void*)a_vexsh, sz_vexsh,
500 (void*)a_spill, sz_spill );
501
502 vg_assert(VG_IS_8_ALIGNED(sz_vex));
503 vg_assert(VG_IS_8_ALIGNED(sz_vexsh));
504 vg_assert(VG_IS_16_ALIGNED(sz_spill));
505
506 vg_assert(VG_IS_4_ALIGNED(a_vex));
507 vg_assert(VG_IS_4_ALIGNED(a_vexsh));
508 vg_assert(VG_IS_4_ALIGNED(a_spill));
509
510 vg_assert(sz_vex == sz_vexsh);
511 vg_assert(a_vex + sz_vex == a_vexsh);
512
513 vg_assert(sz_spill == LibVEX_N_SPILL_BYTES);
514 vg_assert(a_vex + 2 * sz_vex == a_spill);
515
516# if defined(VGA_ppc32) || defined(VGA_ppc64)
517 /* ppc guest_state vector regs must be 16 byte aligned for
518 loads/stores */
519 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex.guest_VR0));
520 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow.guest_VR0));
521# endif
522}
523
524
525/* Run the thread tid for a while, and return a VG_TRC_* value
526 indicating why VG_(run_innerloop) stopped. */
527static UInt run_thread_for_a_while ( ThreadId tid )
528{
529 volatile Int jumped;
sewardj1a85f4f2006-01-12 21:15:35 +0000530 volatile ThreadState* tst = NULL; /* stop gcc complaining */
sewardj0ec07f32006-01-12 12:32:32 +0000531 volatile UInt trc;
532 volatile Int dispatch_ctr_SAVED;
533 volatile Int done_this_time;
534
535 /* Paranoia */
536 vg_assert(VG_(is_valid_tid)(tid));
537 vg_assert(VG_(is_running_thread)(tid));
538 vg_assert(!VG_(is_exiting)(tid));
539
540 tst = VG_(get_ThreadState)(tid);
541 do_pre_run_checks(tst);
542 /* end Paranoia */
543
sewardj63fed7f2006-01-17 02:02:47 +0000544 //if (0) {
545 // Char buf[100];
546 // Bool ok = VG_(get_fnname_if_entry) ( tst->arch.vex.guest_CIA,
547 // buf, 100 );
548 // if (ok) {
549 // Addr r2actual = tst->arch.vex.guest_GPR2;
550 // Addr r2tocptr = VG_(get_tocptr)( tst->arch.vex.guest_CIA );
551 // if (1) VG_(printf)("R2 act 0x%016llx toc 0x%016llx %s\n",
552 // r2actual, r2tocptr, buf);
553 // if (r2tocptr != 0) vg_assert(r2actual == r2tocptr);
554 // }
555 //}
556
sewardj0ec07f32006-01-12 12:32:32 +0000557 trc = 0;
558 dispatch_ctr_SAVED = VG_(dispatch_ctr);
559
560# if defined(VGA_ppc32) || defined(VGA_ppc64)
561 /* This is necessary due to the hacky way vex models reservations
562 on ppc. It's really quite incorrect for each thread to have its
563 own reservation flag/address, since it's really something that
564 all threads share (that's the whole point). But having shared
565 guest state is something we can't model with Vex. However, as
566 per PaulM's 2.4.0ppc, the reservation is modelled using a
567 reservation flag which is cleared at each context switch. So it
568 is indeed possible to get away with a per thread-reservation if
569 the thread's reservation is cleared before running it.
570 */
571 /* Clear any existing reservation that this thread might have made
572 last time it was running. */
573 VG_(threads)[tid].arch.vex.guest_RESVN = 0;
574# endif
575
576 /* there should be no undealt-with signals */
577 //vg_assert(VG_(threads)[tid].siginfo.si_signo == 0);
578
579 vg_assert(VG_(in_generated_code) == False);
580 VG_(in_generated_code) = True;
581
582 SCHEDSETJMP(
583 tid,
584 jumped,
585 trc = (UInt)VG_(run_innerloop)( (void*)&tst->arch.vex,
586 VG_(clo_profile_flags) > 0 ? 1 : 0 )
587 );
588
589 VG_(in_generated_code) = False;
590
591 if (jumped) {
592 /* We get here if the client took a fault that caused our signal
593 handler to longjmp. */
594 vg_assert(trc == 0);
595 trc = VG_TRC_FAULT_SIGNAL;
596 block_signals(tid);
597 }
598
599 done_this_time = (Int)dispatch_ctr_SAVED - (Int)VG_(dispatch_ctr) - 0;
600
601 vg_assert(done_this_time >= 0);
602 bbs_done += (ULong)done_this_time;
603
604 return trc;
605}
606
607
608/* Run a no-redir translation just once, and return the resulting
609 VG_TRC_* value. */
610static UInt run_noredir_translation ( Addr hcode, ThreadId tid )
611{
612 volatile Int jumped;
613 volatile ThreadState* tst;
614 volatile UWord argblock[4];
615
616 /* Paranoia */
617 vg_assert(VG_(is_valid_tid)(tid));
618 vg_assert(VG_(is_running_thread)(tid));
619 vg_assert(!VG_(is_exiting)(tid));
620
621 tst = VG_(get_ThreadState)(tid);
622 do_pre_run_checks(tst);
623 /* end Paranoia */
624
625# if defined(VGA_ppc32) || defined(VGA_ppc64)
626 /* I don't think we need to clear this thread's guest_RESVN here,
627 because we can only get here if run_thread_for_a_while() has
628 been used immediately before, on this same thread. */
629# endif
630
sewardj74d6e0e2006-01-13 13:04:03 +0000631 /* There can be 3 outcomes from VG_(run_a_noredir_translation):
632
633 - a signal occurred and the sighandler longjmp'd. Then both [2]
634 and [3] are unchanged - hence zero.
635
636 - translation ran normally, set [2] (next guest IP) and set [3]
637 to whatever [1] was beforehand, indicating a normal (boring)
638 jump to the next block.
639
640 - translation ran normally, set [2] (next guest IP) and set [3]
641 to something different from [1] beforehand, which indicates a
642 TRC_ value.
643 */
sewardj0ec07f32006-01-12 12:32:32 +0000644 argblock[0] = (UWord)hcode;
645 argblock[1] = (UWord)&VG_(threads)[tid].arch.vex;
sewardj74d6e0e2006-01-13 13:04:03 +0000646 argblock[2] = 0; /* next guest IP is written here */
647 argblock[3] = 0; /* guest state ptr afterwards is written here */
sewardj0ec07f32006-01-12 12:32:32 +0000648
649 vg_assert(VG_(in_generated_code) == False);
650 VG_(in_generated_code) = True;
651
652 SCHEDSETJMP(
653 tid,
654 jumped,
655 VG_(run_a_noredir_translation)( &argblock[0] )
656 );
657
658 VG_(in_generated_code) = False;
659
660 if (jumped) {
661 /* We get here if the client took a fault that caused our signal
662 handler to longjmp. */
sewardj74d6e0e2006-01-13 13:04:03 +0000663 vg_assert(argblock[2] == 0); /* next guest IP was not written */
664 vg_assert(argblock[3] == 0); /* trc was not written */
sewardj0ec07f32006-01-12 12:32:32 +0000665 block_signals(tid);
666 return VG_TRC_FAULT_SIGNAL;
667 } else {
668 /* store away the guest program counter */
669 VG_(set_IP)( tid, argblock[2] );
670 if (argblock[3] == argblock[1])
671 /* the guest state pointer afterwards was unchanged */
672 return VG_TRC_BORING;
673 else
674 return (UInt)argblock[3];
675 }
676}
677
sewardj0ec07f32006-01-12 12:32:32 +0000678
679/* ---------------------------------------------------------------------
sewardje663cb92002-04-12 10:26:32 +0000680 The scheduler proper.
681 ------------------------------------------------------------------ */
682
sewardjb5f6f512005-03-10 23:59:00 +0000683static void handle_tt_miss ( ThreadId tid )
sewardje663cb92002-04-12 10:26:32 +0000684{
sewardjb5f6f512005-03-10 23:59:00 +0000685 Bool found;
njnf536bbb2005-06-13 04:21:38 +0000686 Addr ip = VG_(get_IP)(tid);
sewardjb5f6f512005-03-10 23:59:00 +0000687
688 /* Trivial event. Miss in the fast-cache. Do a full
689 lookup for it. */
njnf536bbb2005-06-13 04:21:38 +0000690 found = VG_(search_transtab)( NULL, ip, True/*upd_fast_cache*/ );
sewardjb5f6f512005-03-10 23:59:00 +0000691 if (!found) {
692 /* Not found; we need to request a translation. */
sewardj0ec07f32006-01-12 12:32:32 +0000693 if (VG_(translate)( tid, ip, /*debug*/False, 0/*not verbose*/,
694 bbs_done, True/*allow redirection*/ )) {
sewardjb5f6f512005-03-10 23:59:00 +0000695 found = VG_(search_transtab)( NULL, ip, True );
njn50ae1a72005-04-08 23:28:23 +0000696 vg_assert2(found, "VG_TRC_INNER_FASTMISS: missing tt_fast entry");
697
sewardjb5f6f512005-03-10 23:59:00 +0000698 } else {
699 // If VG_(translate)() fails, it's because it had to throw a
700 // signal because the client jumped to a bad address. That
701 // means that either a signal has been set up for delivery,
702 // or the thread has been marked for termination. Either
703 // way, we just need to go back into the scheduler loop.
704 }
705 }
706}
707
708static void handle_syscall(ThreadId tid)
709{
710 ThreadState *tst = VG_(get_ThreadState)(tid);
711 Bool jumped;
712
713 /* Syscall may or may not block; either way, it will be
714 complete by the time this call returns, and we'll be
715 runnable again. We could take a signal while the
716 syscall runs. */
sewardj45f4e7c2005-09-27 19:20:21 +0000717
718 if (VG_(clo_sanity_level >= 3))
719 VG_(am_do_sync_check)("(BEFORE SYSCALL)",__FILE__,__LINE__);
720
sewardjb5f6f512005-03-10 23:59:00 +0000721 SCHEDSETJMP(tid, jumped, VG_(client_syscall)(tid));
722
sewardj45f4e7c2005-09-27 19:20:21 +0000723 if (VG_(clo_sanity_level >= 3))
724 VG_(am_do_sync_check)("(AFTER SYSCALL)",__FILE__,__LINE__);
725
sewardjb5f6f512005-03-10 23:59:00 +0000726 if (!VG_(is_running_thread)(tid))
njnc7561b92005-06-19 01:24:32 +0000727 VG_(printf)("tid %d not running; VG_(running_tid)=%d, tid %d status %d\n",
728 tid, VG_(running_tid), tid, tst->status);
sewardjb5f6f512005-03-10 23:59:00 +0000729 vg_assert(VG_(is_running_thread)(tid));
730
731 if (jumped) {
njn9fc31122005-05-11 18:48:33 +0000732 block_signals(tid);
sewardjb5f6f512005-03-10 23:59:00 +0000733 VG_(poll_signals)(tid);
734 }
735}
736
sewardja591a052006-01-12 14:04:46 +0000737/* tid just requested a jump to the noredir version of its current
738 program counter. So make up that translation if needed, run it,
739 and return the resulting thread return code. */
740static UInt/*trc*/ handle_noredir_jump ( ThreadId tid )
741{
742 AddrH hcode = 0;
743 Addr ip = VG_(get_IP)(tid);
744
745 Bool found = VG_(search_unredir_transtab)( &hcode, ip );
746 if (!found) {
747 /* Not found; we need to request a translation. */
748 if (VG_(translate)( tid, ip, /*debug*/False, 0/*not verbose*/, bbs_done,
749 False/*NO REDIRECTION*/ )) {
750
751 found = VG_(search_unredir_transtab)( &hcode, ip );
752 vg_assert2(found, "unredir translation missing after creation?!");
753
754 } else {
755 // If VG_(translate)() fails, it's because it had to throw a
756 // signal because the client jumped to a bad address. That
757 // means that either a signal has been set up for delivery,
758 // or the thread has been marked for termination. Either
759 // way, we just need to go back into the scheduler loop.
760 return VG_TRC_BORING;
761 }
762
763 }
764
765 vg_assert(found);
766 vg_assert(hcode != 0);
767
768 /* Otherwise run it and return the resulting VG_TRC_* value. */
769 return run_noredir_translation( hcode, tid );
770}
771
772
sewardjb5f6f512005-03-10 23:59:00 +0000773/*
774 Run a thread until it wants to exit.
775
776 We assume that the caller has already called VG_(set_running) for
777 us, so we own the VCPU. Also, all signals are blocked.
778 */
779VgSchedReturnCode VG_(scheduler) ( ThreadId tid )
780{
sewardje663cb92002-04-12 10:26:32 +0000781 UInt trc;
sewardjb5f6f512005-03-10 23:59:00 +0000782 ThreadState *tst = VG_(get_ThreadState)(tid);
sewardje663cb92002-04-12 10:26:32 +0000783
sewardjc24be7a2005-03-15 01:40:12 +0000784 if (VG_(clo_trace_sched))
785 print_sched_event(tid, "entering VG_(scheduler)");
786
sewardjb5f6f512005-03-10 23:59:00 +0000787 /* set the proper running signal mask */
njn9fc31122005-05-11 18:48:33 +0000788 block_signals(tid);
sewardjb5f6f512005-03-10 23:59:00 +0000789
790 vg_assert(VG_(is_running_thread)(tid));
sewardje663cb92002-04-12 10:26:32 +0000791
njn14319cc2005-03-13 06:26:22 +0000792 VG_(dispatch_ctr) = SCHEDULING_QUANTUM + 1;
sewardj6072c362002-04-19 14:40:57 +0000793
sewardjb5f6f512005-03-10 23:59:00 +0000794 while(!VG_(is_exiting)(tid)) {
sewardjb5f6f512005-03-10 23:59:00 +0000795 if (VG_(dispatch_ctr) == 1) {
796 /* Our slice is done, so yield the CPU to another thread. This
797 doesn't sleep between sleeping and running, since that would
798 take too much time. */
799 VG_(set_sleeping)(tid, VgTs_Yielding);
800 /* nothing */
801 VG_(set_running)(tid);
sewardje663cb92002-04-12 10:26:32 +0000802
sewardjb5f6f512005-03-10 23:59:00 +0000803 /* OK, do some relatively expensive housekeeping stuff */
804 scheduler_sanity(tid);
805 VG_(sanity_check_general)(False);
sewardje663cb92002-04-12 10:26:32 +0000806
sewardjb5f6f512005-03-10 23:59:00 +0000807 /* Look for any pending signals for this thread, and set them up
808 for delivery */
809 VG_(poll_signals)(tid);
sewardje663cb92002-04-12 10:26:32 +0000810
sewardjb5f6f512005-03-10 23:59:00 +0000811 if (VG_(is_exiting)(tid))
812 break; /* poll_signals picked up a fatal signal */
sewardje663cb92002-04-12 10:26:32 +0000813
sewardjb5f6f512005-03-10 23:59:00 +0000814 /* For stats purposes only. */
815 n_scheduling_events_MAJOR++;
sewardje663cb92002-04-12 10:26:32 +0000816
sewardjb5f6f512005-03-10 23:59:00 +0000817 /* Figure out how many bbs to ask vg_run_innerloop to do. Note
818 that it decrements the counter before testing it for zero, so
819 that if tst->dispatch_ctr is set to N you get at most N-1
820 iterations. Also this means that tst->dispatch_ctr must
821 exceed zero before entering the innerloop. Also also, the
822 decrement is done before the bb is actually run, so you
823 always get at least one decrement even if nothing happens. */
njn14319cc2005-03-13 06:26:22 +0000824 VG_(dispatch_ctr) = SCHEDULING_QUANTUM + 1;
jsgf855d93d2003-10-13 22:26:55 +0000825
sewardjb5f6f512005-03-10 23:59:00 +0000826 /* paranoia ... */
827 vg_assert(tst->tid == tid);
828 vg_assert(tst->os_state.lwpid == VG_(gettid)());
sewardje663cb92002-04-12 10:26:32 +0000829 }
830
sewardjb5f6f512005-03-10 23:59:00 +0000831 /* For stats purposes only. */
832 n_scheduling_events_MINOR++;
sewardje663cb92002-04-12 10:26:32 +0000833
834 if (0)
sewardjb5f6f512005-03-10 23:59:00 +0000835 VG_(message)(Vg_DebugMsg, "thread %d: running for %d bbs",
836 tid, VG_(dispatch_ctr) - 1 );
sewardje663cb92002-04-12 10:26:32 +0000837
sewardjb5f6f512005-03-10 23:59:00 +0000838 trc = run_thread_for_a_while ( tid );
sewardje663cb92002-04-12 10:26:32 +0000839
sewardjb5f6f512005-03-10 23:59:00 +0000840 if (VG_(clo_trace_sched) && VG_(clo_verbosity) > 2) {
841 Char buf[50];
842 VG_(sprintf)(buf, "TRC: %s", name_of_sched_event(trc));
843 print_sched_event(tid, buf);
sewardje663cb92002-04-12 10:26:32 +0000844 }
845
sewardj0ec07f32006-01-12 12:32:32 +0000846 if (trc == VEX_TRC_JMP_NOREDIR) {
847 /* If we got a request to run a no-redir version of
848 something, do so now -- handle_noredir_jump just (creates
849 and) runs that one translation. The flip side is that the
850 noredir translation can't itself return another noredir
851 request -- that would be nonsensical. It can, however,
852 return VG_TRC_BORING, which just means keep going as
853 normal. */
854 trc = handle_noredir_jump(tid);
855 vg_assert(trc != VEX_TRC_JMP_NOREDIR);
856 }
857
858 switch (trc) {
859 case VG_TRC_BORING:
860 /* no special event, just keep going. */
861 break;
862
sewardjb5f6f512005-03-10 23:59:00 +0000863 case VG_TRC_INNER_FASTMISS:
864 vg_assert(VG_(dispatch_ctr) > 1);
865 handle_tt_miss(tid);
866 break;
867
868 case VEX_TRC_JMP_CLIENTREQ:
869 do_client_request(tid);
870 break;
sewardja0fef1b2005-11-03 13:46:30 +0000871
872 case VEX_TRC_JMP_SYS_INT128: /* x86-linux */
873 case VEX_TRC_JMP_SYS_SYSCALL: /* amd64-linux, ppc32-linux */
sewardjb5f6f512005-03-10 23:59:00 +0000874 handle_syscall(tid);
875 if (VG_(clo_sanity_level) > 2)
876 VG_(sanity_check_general)(True); /* sanity-check every syscall */
877 break;
sewardje663cb92002-04-12 10:26:32 +0000878
sewardjb5f6f512005-03-10 23:59:00 +0000879 case VEX_TRC_JMP_YIELD:
880 /* Explicit yield, because this thread is in a spin-lock
sewardj3fc75752005-03-12 15:16:31 +0000881 or something. Only let the thread run for a short while
882 longer. Because swapping to another thread is expensive,
883 we're prepared to let this thread eat a little more CPU
884 before swapping to another. That means that short term
885 spins waiting for hardware to poke memory won't cause a
886 thread swap. */
sewardj3a74fb02006-03-16 11:31:29 +0000887 if (VG_(dispatch_ctr) > 2000)
888 VG_(dispatch_ctr) = 2000;
sewardjb5f6f512005-03-10 23:59:00 +0000889 break;
sewardje663cb92002-04-12 10:26:32 +0000890
sewardjb5f6f512005-03-10 23:59:00 +0000891 case VG_TRC_INNER_COUNTERZERO:
892 /* Timeslice is out. Let a new thread be scheduled. */
893 vg_assert(VG_(dispatch_ctr) == 1);
894 break;
sewardje663cb92002-04-12 10:26:32 +0000895
sewardjb5f6f512005-03-10 23:59:00 +0000896 case VG_TRC_FAULT_SIGNAL:
897 /* Everything should be set up (either we're exiting, or
898 about to start in a signal handler). */
899 break;
sewardj9d1b5d32002-04-17 19:40:49 +0000900
sewardj07bdc5e2005-03-11 13:19:47 +0000901 case VEX_TRC_JMP_MAPFAIL:
902 /* Failure of arch-specific address translation (x86/amd64
903 segment override use) */
904 /* jrs 2005 03 11: is this correct? */
905 VG_(synth_fault)(tid);
906 break;
907
sewardjb5f6f512005-03-10 23:59:00 +0000908 case VEX_TRC_JMP_EMWARN: {
909 static Int counts[EmWarn_NUMBER];
910 static Bool counts_initted = False;
911 VexEmWarn ew;
912 HChar* what;
913 Bool show;
914 Int q;
915 if (!counts_initted) {
916 counts_initted = True;
917 for (q = 0; q < EmWarn_NUMBER; q++)
918 counts[q] = 0;
919 }
920 ew = (VexEmWarn)VG_(threads)[tid].arch.vex.guest_EMWARN;
921 what = (ew < 0 || ew >= EmWarn_NUMBER)
922 ? "unknown (?!)"
923 : LibVEX_EmWarn_string(ew);
924 show = (ew < 0 || ew >= EmWarn_NUMBER)
925 ? True
926 : counts[ew]++ < 3;
sewardjd68ac3e2006-01-20 14:31:57 +0000927 if (show && VG_(clo_show_emwarns) && !VG_(clo_xml)) {
sewardjb5f6f512005-03-10 23:59:00 +0000928 VG_(message)( Vg_UserMsg,
929 "Emulation warning: unsupported action:");
930 VG_(message)( Vg_UserMsg, " %s", what);
njnd01fef72005-03-25 23:35:48 +0000931 VG_(get_and_pp_StackTrace)( tid, VG_(clo_backtrace_size) );
sewardjb5f6f512005-03-10 23:59:00 +0000932 }
933 break;
934 }
sewardje663cb92002-04-12 10:26:32 +0000935
sewardjd68ac3e2006-01-20 14:31:57 +0000936 case VEX_TRC_JMP_EMFAIL: {
937 VexEmWarn ew;
938 HChar* what;
939 ew = (VexEmWarn)VG_(threads)[tid].arch.vex.guest_EMWARN;
940 what = (ew < 0 || ew >= EmWarn_NUMBER)
941 ? "unknown (?!)"
942 : LibVEX_EmWarn_string(ew);
943 VG_(message)( Vg_UserMsg,
944 "Emulation fatal error -- Valgrind cannot continue:");
945 VG_(message)( Vg_UserMsg, " %s", what);
946 VG_(get_and_pp_StackTrace)( tid, VG_(clo_backtrace_size) );
947 VG_(message)(Vg_UserMsg, "");
948 VG_(message)(Vg_UserMsg, "Valgrind has to exit now. Sorry.");
949 VG_(message)(Vg_UserMsg, "");
950 VG_(exit)(1);
951 break;
952 }
953
sewardj86df1552006-02-07 20:56:41 +0000954 case VEX_TRC_JMP_TRAP:
955 VG_(synth_sigtrap)(tid);
956 break;
957
sewardjb5f6f512005-03-10 23:59:00 +0000958 case VEX_TRC_JMP_NODECODE:
njnec4d5132006-03-21 23:15:43 +0000959 VG_(message)(Vg_UserMsg,
960 "valgrind: Unrecognised instruction at address %p.", VG_(get_IP)(tid));
njnd5021362005-09-29 00:35:18 +0000961#define M(a) VG_(message)(Vg_UserMsg, a);
njn7cf66582005-10-15 17:18:08 +0000962 M("Your program just tried to execute an instruction that Valgrind" );
963 M("did not recognise. There are two possible reasons for this." );
964 M("1. Your program has a bug and erroneously jumped to a non-code" );
965 M(" location. If you are running Memcheck and you just saw a" );
966 M(" warning about a bad jump, it's probably your program's fault.");
967 M("2. The instruction is legitimate but Valgrind doesn't handle it,");
968 M(" i.e. it's Valgrind's fault. If you think this is the case or");
njnec4d5132006-03-21 23:15:43 +0000969 M(" you are not sure, please let us know and we'll try to fix it.");
njn7cf66582005-10-15 17:18:08 +0000970 M("Either way, Valgrind will now raise a SIGILL signal which will" );
971 M("probably kill your program." );
njnd5021362005-09-29 00:35:18 +0000972#undef M
njnf536bbb2005-06-13 04:21:38 +0000973 VG_(synth_sigill)(tid, VG_(get_IP)(tid));
sewardjb5f6f512005-03-10 23:59:00 +0000974 break;
sewardje663cb92002-04-12 10:26:32 +0000975
cerion85665ca2005-06-20 15:51:07 +0000976 case VEX_TRC_JMP_TINVAL:
cerion85665ca2005-06-20 15:51:07 +0000977 VG_(discard_translations)(
978 (Addr64)VG_(threads)[tid].arch.vex.guest_TISTART,
sewardj45f4e7c2005-09-27 19:20:21 +0000979 VG_(threads)[tid].arch.vex.guest_TILEN,
980 "scheduler(VEX_TRC_JMP_TINVAL)"
sewardj487ac702005-06-21 12:52:38 +0000981 );
cerion85665ca2005-06-20 15:51:07 +0000982 if (0)
983 VG_(printf)("dump translations done.\n");
cerion85665ca2005-06-20 15:51:07 +0000984 break;
985
sewardje3a384b2005-07-29 08:51:34 +0000986 case VG_TRC_INVARIANT_FAILED:
987 /* This typically happens if, after running generated code,
988 it is detected that host CPU settings (eg, FPU/Vector
989 control words) are not as they should be. Vex's code
990 generation specifies the state such control words should
991 be in on entry to Vex-generated code, and they should be
992 unchanged on exit from it. Failure of this assertion
993 usually means a bug in Vex's code generation. */
994 vg_assert2(0, "VG_(scheduler), phase 3: "
995 "run_innerloop detected host "
996 "state invariant failure", trc);
997
sewardja0fef1b2005-11-03 13:46:30 +0000998 case VEX_TRC_JMP_SYS_SYSENTER:
sewardj5438a012005-08-07 14:49:27 +0000999 /* Do whatever simulation is appropriate for an x86 sysenter
1000 instruction. Note that it is critical to set this thread's
1001 guest_EIP to point at the code to execute after the
1002 sysenter, since Vex-generated code will not have set it --
1003 vex does not know what it should be. Vex sets the next
1004 address to zero, so if you don't guest_EIP, the thread will
1005 jump to zero afterwards and probably die as a result. */
1006# if defined(VGA_x86)
1007 //FIXME: VG_(threads)[tid].arch.vex.guest_EIP = ....
1008 //handle_sysenter_x86(tid);
1009 vg_assert2(0, "VG_(scheduler), phase 3: "
1010 "sysenter_x86 on not yet implemented");
1011# else
1012 vg_assert2(0, "VG_(scheduler), phase 3: "
1013 "sysenter_x86 on non-x86 platform?!?!");
1014# endif
1015
sewardjb5f6f512005-03-10 23:59:00 +00001016 default:
njn50ae1a72005-04-08 23:28:23 +00001017 vg_assert2(0, "VG_(scheduler), phase 3: "
1018 "unexpected thread return code (%u)", trc);
sewardjb5f6f512005-03-10 23:59:00 +00001019 /* NOTREACHED */
1020 break;
sewardje663cb92002-04-12 10:26:32 +00001021
1022 } /* switch (trc) */
nethercote238a3c32004-08-09 13:13:31 +00001023 }
sewardjc24be7a2005-03-15 01:40:12 +00001024
1025 if (VG_(clo_trace_sched))
1026 print_sched_event(tid, "exiting VG_(scheduler)");
1027
sewardjb5f6f512005-03-10 23:59:00 +00001028 vg_assert(VG_(is_exiting)(tid));
thughes513197c2004-06-13 12:07:53 +00001029
sewardjb5f6f512005-03-10 23:59:00 +00001030 //if (VG_(clo_model_pthreads))
1031 // VG_(tm_thread_exit)(tid);
1032
1033 return tst->exitreason;
sewardj20917d82002-05-28 01:36:45 +00001034}
1035
1036
sewardjb5f6f512005-03-10 23:59:00 +00001037/*
1038 This causes all threads to forceably exit. They aren't actually
1039 dead by the time this returns; you need to call
njnaf839f52005-06-23 03:27:57 +00001040 VG_(reap_threads)() to wait for them.
sewardjb5f6f512005-03-10 23:59:00 +00001041 */
1042void VG_(nuke_all_threads_except) ( ThreadId me, VgSchedReturnCode src )
sewardjccef2e62002-05-29 19:26:32 +00001043{
1044 ThreadId tid;
sewardjb5f6f512005-03-10 23:59:00 +00001045
1046 vg_assert(VG_(is_running_thread)(me));
sewardj45f02c42005-02-05 18:27:14 +00001047
sewardjccef2e62002-05-29 19:26:32 +00001048 for (tid = 1; tid < VG_N_THREADS; tid++) {
1049 if (tid == me
jsgf855d93d2003-10-13 22:26:55 +00001050 || VG_(threads)[tid].status == VgTs_Empty)
sewardjccef2e62002-05-29 19:26:32 +00001051 continue;
sewardjb5f6f512005-03-10 23:59:00 +00001052 if (0)
sewardjef037c72002-05-30 00:40:03 +00001053 VG_(printf)(
1054 "VG_(nuke_all_threads_except): nuking tid %d\n", tid);
sewardjb5f6f512005-03-10 23:59:00 +00001055
1056 VG_(threads)[tid].exitreason = src;
sewardja8d8e232005-06-07 20:04:56 +00001057 if (src == VgSrc_FatalSig)
1058 VG_(threads)[tid].os_state.fatalsig = VKI_SIGKILL;
sewardjb5f6f512005-03-10 23:59:00 +00001059 VG_(kill_thread)(tid);
sewardjccef2e62002-05-29 19:26:32 +00001060 }
1061}
1062
1063
njnd3040452003-05-19 15:04:06 +00001064/* ---------------------------------------------------------------------
sewardjb5f6f512005-03-10 23:59:00 +00001065 Specifying shadow register values
njnd3040452003-05-19 15:04:06 +00001066 ------------------------------------------------------------------ */
1067
njnf536bbb2005-06-13 04:21:38 +00001068#if defined(VGA_x86)
njnaf839f52005-06-23 03:27:57 +00001069# define VG_CLREQ_ARGS guest_EAX
1070# define VG_CLREQ_RET guest_EDX
njnf536bbb2005-06-13 04:21:38 +00001071#elif defined(VGA_amd64)
njnaf839f52005-06-23 03:27:57 +00001072# define VG_CLREQ_ARGS guest_RAX
1073# define VG_CLREQ_RET guest_RDX
sewardj2c48c7b2005-11-29 13:05:56 +00001074#elif defined(VGA_ppc32) || defined(VGA_ppc64)
njnaf839f52005-06-23 03:27:57 +00001075# define VG_CLREQ_ARGS guest_GPR4
1076# define VG_CLREQ_RET guest_GPR3
njnf536bbb2005-06-13 04:21:38 +00001077#else
1078# error Unknown arch
1079#endif
1080
njnaf839f52005-06-23 03:27:57 +00001081#define CLREQ_ARGS(regs) ((regs).vex.VG_CLREQ_ARGS)
1082#define CLREQ_RET(regs) ((regs).vex.VG_CLREQ_RET)
1083#define O_CLREQ_RET (offsetof(VexGuestArchState, VG_CLREQ_RET))
njnf536bbb2005-06-13 04:21:38 +00001084
njn502badb2005-05-08 02:04:49 +00001085// These macros write a value to a client's thread register, and tell the
1086// tool that it's happened (if necessary).
1087
1088#define SET_CLREQ_RETVAL(zztid, zzval) \
1089 do { CLREQ_RET(VG_(threads)[zztid].arch) = (zzval); \
1090 VG_TRACK( post_reg_write, \
1091 Vg_CoreClientReq, zztid, O_CLREQ_RET, sizeof(UWord)); \
1092 } while (0)
1093
1094#define SET_CLCALL_RETVAL(zztid, zzval, f) \
1095 do { CLREQ_RET(VG_(threads)[zztid].arch) = (zzval); \
1096 VG_TRACK( post_reg_write_clientcall_return, \
1097 zztid, O_CLREQ_RET, sizeof(UWord), f); \
1098 } while (0)
1099
sewardj0ec07f32006-01-12 12:32:32 +00001100
sewardje663cb92002-04-12 10:26:32 +00001101/* ---------------------------------------------------------------------
sewardj124ca2a2002-06-20 10:19:38 +00001102 Handle client requests.
sewardje663cb92002-04-12 10:26:32 +00001103 ------------------------------------------------------------------ */
1104
njn9cb54ac2005-06-12 04:19:17 +00001105// OS-specific(?) client requests
1106static Bool os_client_request(ThreadId tid, UWord *args)
1107{
1108 Bool handled = True;
1109
1110 vg_assert(VG_(is_running_thread)(tid));
1111
1112 switch(args[0]) {
1113 case VG_USERREQ__LIBC_FREERES_DONE:
1114 /* This is equivalent to an exit() syscall, but we don't set the
1115 exitcode (since it might already be set) */
1116 if (0 || VG_(clo_trace_syscalls) || VG_(clo_trace_sched))
1117 VG_(message)(Vg_DebugMsg,
1118 "__libc_freeres() done; really quitting!");
1119 VG_(threads)[tid].exitreason = VgSrc_ExitSyscall;
1120 break;
1121
1122 default:
1123 handled = False;
1124 break;
1125 }
1126
1127 return handled;
1128}
1129
1130
sewardj124ca2a2002-06-20 10:19:38 +00001131/* Do a client request for the thread tid. After the request, tid may
1132 or may not still be runnable; if not, the scheduler will have to
1133 choose a new thread to run.
1134*/
sewardje663cb92002-04-12 10:26:32 +00001135static
sewardjb5f6f512005-03-10 23:59:00 +00001136void do_client_request ( ThreadId tid )
sewardje663cb92002-04-12 10:26:32 +00001137{
sewardjb5f6f512005-03-10 23:59:00 +00001138 UWord* arg = (UWord*)(CLREQ_ARGS(VG_(threads)[tid].arch));
nethercoted1b64b22004-11-04 18:22:28 +00001139 UWord req_no = arg[0];
sewardj124ca2a2002-06-20 10:19:38 +00001140
fitzhardinge98abfc72003-12-16 02:05:15 +00001141 if (0)
nethercoted1b64b22004-11-04 18:22:28 +00001142 VG_(printf)("req no = 0x%llx, arg = %p\n", (ULong)req_no, arg);
sewardje663cb92002-04-12 10:26:32 +00001143 switch (req_no) {
1144
njn3e884182003-04-15 13:03:23 +00001145 case VG_USERREQ__CLIENT_CALL0: {
njn2ac95242005-03-13 23:07:30 +00001146 UWord (*f)(ThreadId) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00001147 if (f == NULL)
njn3d9edb22005-08-25 01:52:52 +00001148 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL0: func=%p", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00001149 else
njn2ac95242005-03-13 23:07:30 +00001150 SET_CLCALL_RETVAL(tid, f ( tid ), (Addr)f);
njn3e884182003-04-15 13:03:23 +00001151 break;
1152 }
1153 case VG_USERREQ__CLIENT_CALL1: {
njn2ac95242005-03-13 23:07:30 +00001154 UWord (*f)(ThreadId, UWord) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00001155 if (f == NULL)
njn3d9edb22005-08-25 01:52:52 +00001156 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL1: func=%p", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00001157 else
njn2ac95242005-03-13 23:07:30 +00001158 SET_CLCALL_RETVAL(tid, f ( tid, arg[2] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00001159 break;
1160 }
1161 case VG_USERREQ__CLIENT_CALL2: {
njn2ac95242005-03-13 23:07:30 +00001162 UWord (*f)(ThreadId, UWord, UWord) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00001163 if (f == NULL)
njn3d9edb22005-08-25 01:52:52 +00001164 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL2: func=%p", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00001165 else
njn2ac95242005-03-13 23:07:30 +00001166 SET_CLCALL_RETVAL(tid, f ( tid, arg[2], arg[3] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00001167 break;
1168 }
1169 case VG_USERREQ__CLIENT_CALL3: {
njn2ac95242005-03-13 23:07:30 +00001170 UWord (*f)(ThreadId, UWord, UWord, UWord) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00001171 if (f == NULL)
njn3d9edb22005-08-25 01:52:52 +00001172 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL3: func=%p", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00001173 else
njn2ac95242005-03-13 23:07:30 +00001174 SET_CLCALL_RETVAL(tid, f ( tid, arg[2], arg[3], arg[4] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00001175 break;
1176 }
1177
njnf09745a2005-05-10 03:01:23 +00001178 // Nb: this looks like a circular definition, because it kind of is.
1179 // See comment in valgrind.h to understand what's going on.
sewardj124ca2a2002-06-20 10:19:38 +00001180 case VG_USERREQ__RUNNING_ON_VALGRIND:
sewardjb5f6f512005-03-10 23:59:00 +00001181 SET_CLREQ_RETVAL(tid, RUNNING_ON_VALGRIND+1);
sewardj124ca2a2002-06-20 10:19:38 +00001182 break;
1183
fitzhardinge39de4b42003-10-31 07:12:21 +00001184 case VG_USERREQ__PRINTF: {
sewardj63fed7f2006-01-17 02:02:47 +00001185 Int count =
nethercote3e901a22004-09-11 13:17:02 +00001186 VG_(vmessage)( Vg_ClientMsg, (char *)arg[1], (void*)arg[2] );
fitzhardinge39de4b42003-10-31 07:12:21 +00001187 SET_CLREQ_RETVAL( tid, count );
1188 break; }
1189
1190 case VG_USERREQ__INTERNAL_PRINTF: {
sewardj63fed7f2006-01-17 02:02:47 +00001191 Int count =
njnaa3c26b2005-03-12 05:32:28 +00001192 VG_(vmessage)( Vg_DebugMsg, (char *)arg[1], (void*)arg[2] );
fitzhardinge39de4b42003-10-31 07:12:21 +00001193 SET_CLREQ_RETVAL( tid, count );
1194 break; }
1195
1196 case VG_USERREQ__PRINTF_BACKTRACE: {
sewardj63fed7f2006-01-17 02:02:47 +00001197 Int count =
nethercote3e901a22004-09-11 13:17:02 +00001198 VG_(vmessage)( Vg_ClientMsg, (char *)arg[1], (void*)arg[2] );
njnd01fef72005-03-25 23:35:48 +00001199 VG_(get_and_pp_StackTrace)( tid, VG_(clo_backtrace_size) );
fitzhardinge39de4b42003-10-31 07:12:21 +00001200 SET_CLREQ_RETVAL( tid, count );
1201 break; }
1202
rjwalsh0140af52005-06-04 20:42:33 +00001203 case VG_USERREQ__STACK_REGISTER: {
njn945ed2e2005-06-24 03:28:30 +00001204 UWord sid = VG_(register_stack)((Addr)arg[1], (Addr)arg[2]);
rjwalsh0140af52005-06-04 20:42:33 +00001205 SET_CLREQ_RETVAL( tid, sid );
1206 break; }
1207
1208 case VG_USERREQ__STACK_DEREGISTER: {
njn945ed2e2005-06-24 03:28:30 +00001209 VG_(deregister_stack)(arg[1]);
rjwalsh0140af52005-06-04 20:42:33 +00001210 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
1211 break; }
1212
1213 case VG_USERREQ__STACK_CHANGE: {
njn945ed2e2005-06-24 03:28:30 +00001214 VG_(change_stack)(arg[1], (Addr)arg[2], (Addr)arg[3]);
rjwalsh0140af52005-06-04 20:42:33 +00001215 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
1216 break; }
1217
fitzhardinge98abfc72003-12-16 02:05:15 +00001218 case VG_USERREQ__GET_MALLOCFUNCS: {
1219 struct vg_mallocfunc_info *info = (struct vg_mallocfunc_info *)arg[1];
1220
njnfc51f8d2005-06-21 03:20:17 +00001221 info->tl_malloc = VG_(tdict).tool_malloc;
1222 info->tl_calloc = VG_(tdict).tool_calloc;
1223 info->tl_realloc = VG_(tdict).tool_realloc;
1224 info->tl_memalign = VG_(tdict).tool_memalign;
1225 info->tl___builtin_new = VG_(tdict).tool___builtin_new;
1226 info->tl___builtin_vec_new = VG_(tdict).tool___builtin_vec_new;
1227 info->tl_free = VG_(tdict).tool_free;
1228 info->tl___builtin_delete = VG_(tdict).tool___builtin_delete;
1229 info->tl___builtin_vec_delete = VG_(tdict).tool___builtin_vec_delete;
fitzhardinge98abfc72003-12-16 02:05:15 +00001230
njncf81d552005-03-31 04:52:26 +00001231 info->arena_payload_szB = VG_(arena_payload_szB);
njn088bfb42005-08-17 05:01:37 +00001232 info->mallinfo = VG_(mallinfo);
sewardjb5f6f512005-03-10 23:59:00 +00001233 info->clo_trace_malloc = VG_(clo_trace_malloc);
fitzhardinge98abfc72003-12-16 02:05:15 +00001234
1235 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
1236
1237 break;
1238 }
1239
njn25e49d8e72002-09-23 09:36:25 +00001240 /* Requests from the client program */
1241
1242 case VG_USERREQ__DISCARD_TRANSLATIONS:
1243 if (VG_(clo_verbosity) > 2)
1244 VG_(printf)( "client request: DISCARD_TRANSLATIONS,"
1245 " addr %p, len %d\n",
1246 (void*)arg[1], arg[2] );
1247
sewardj45f4e7c2005-09-27 19:20:21 +00001248 VG_(discard_translations)(
1249 arg[1], arg[2], "scheduler(VG_USERREQ__DISCARD_TRANSLATIONS)"
1250 );
njn25e49d8e72002-09-23 09:36:25 +00001251
njnd3040452003-05-19 15:04:06 +00001252 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
njn25e49d8e72002-09-23 09:36:25 +00001253 break;
1254
njn47363ab2003-04-21 13:24:40 +00001255 case VG_USERREQ__COUNT_ERRORS:
nethercotef2b11482004-08-02 12:36:01 +00001256 SET_CLREQ_RETVAL( tid, VG_(get_n_errs_found)() );
njn47363ab2003-04-21 13:24:40 +00001257 break;
1258
sewardje663cb92002-04-12 10:26:32 +00001259 default:
njn9cb54ac2005-06-12 04:19:17 +00001260 if (os_client_request(tid, arg)) {
1261 // do nothing, os_client_request() handled it
sewardjb5f6f512005-03-10 23:59:00 +00001262 } else if (VG_(needs).client_requests) {
nethercoted1b64b22004-11-04 18:22:28 +00001263 UWord ret;
sewardj34042512002-10-22 04:14:35 +00001264
njn25e49d8e72002-09-23 09:36:25 +00001265 if (VG_(clo_verbosity) > 2)
fitzhardinge98abfc72003-12-16 02:05:15 +00001266 VG_(printf)("client request: code %x, addr %p, len %d\n",
njn25e49d8e72002-09-23 09:36:25 +00001267 arg[0], (void*)arg[1], arg[2] );
1268
njn51d827b2005-05-09 01:02:08 +00001269 if ( VG_TDICT_CALL(tool_handle_client_request, tid, arg, &ret) )
sewardjb5f6f512005-03-10 23:59:00 +00001270 SET_CLREQ_RETVAL(tid, ret);
njn25e49d8e72002-09-23 09:36:25 +00001271 } else {
sewardj34042512002-10-22 04:14:35 +00001272 static Bool whined = False;
1273
sewardjb5f6f512005-03-10 23:59:00 +00001274 if (!whined && VG_(clo_verbosity) > 2) {
nethercote7cc9c232004-01-21 15:08:04 +00001275 // Allow for requests in core, but defined by tools, which
njnd7994182003-10-02 13:44:04 +00001276 // have 0 and 0 in their two high bytes.
1277 Char c1 = (arg[0] >> 24) & 0xff;
1278 Char c2 = (arg[0] >> 16) & 0xff;
1279 if (c1 == 0) c1 = '_';
1280 if (c2 == 0) c2 = '_';
sewardj34042512002-10-22 04:14:35 +00001281 VG_(message)(Vg_UserMsg, "Warning:\n"
njnd7994182003-10-02 13:44:04 +00001282 " unhandled client request: 0x%x (%c%c+0x%x). Perhaps\n"
sewardj45f4e7c2005-09-27 19:20:21 +00001283 " VG_(needs).client_requests should be set?",
njnd7994182003-10-02 13:44:04 +00001284 arg[0], c1, c2, arg[0] & 0xffff);
sewardj34042512002-10-22 04:14:35 +00001285 whined = True;
1286 }
njn25e49d8e72002-09-23 09:36:25 +00001287 }
sewardje663cb92002-04-12 10:26:32 +00001288 break;
1289 }
1290}
1291
1292
sewardj6072c362002-04-19 14:40:57 +00001293/* ---------------------------------------------------------------------
njn6676d5b2005-06-19 18:49:19 +00001294 Sanity checking (permanently engaged)
sewardj6072c362002-04-19 14:40:57 +00001295 ------------------------------------------------------------------ */
1296
sewardjb5f6f512005-03-10 23:59:00 +00001297/* Internal consistency checks on the sched structures. */
sewardj6072c362002-04-19 14:40:57 +00001298static
sewardjb5f6f512005-03-10 23:59:00 +00001299void scheduler_sanity ( ThreadId tid )
sewardj6072c362002-04-19 14:40:57 +00001300{
sewardjb5f6f512005-03-10 23:59:00 +00001301 Bool bad = False;
jsgf855d93d2003-10-13 22:26:55 +00001302
sewardjb5f6f512005-03-10 23:59:00 +00001303 if (!VG_(is_running_thread)(tid)) {
1304 VG_(message)(Vg_DebugMsg,
1305 "Thread %d is supposed to be running, but doesn't own run_sema (owned by %d)\n",
njnc7561b92005-06-19 01:24:32 +00001306 tid, VG_(running_tid));
sewardjb5f6f512005-03-10 23:59:00 +00001307 bad = True;
jsgf855d93d2003-10-13 22:26:55 +00001308 }
sewardj5f07b662002-04-23 16:52:51 +00001309
sewardjb5f6f512005-03-10 23:59:00 +00001310 if (VG_(gettid)() != VG_(threads)[tid].os_state.lwpid) {
1311 VG_(message)(Vg_DebugMsg,
njnd06ed472005-03-13 05:12:31 +00001312 "Thread %d supposed to be in LWP %d, but we're actually %d\n",
1313 tid, VG_(threads)[tid].os_state.lwpid, VG_(gettid)());
sewardjb5f6f512005-03-10 23:59:00 +00001314 bad = True;
sewardj5f07b662002-04-23 16:52:51 +00001315 }
sewardj6072c362002-04-19 14:40:57 +00001316}
1317
njn6676d5b2005-06-19 18:49:19 +00001318void VG_(sanity_check_general) ( Bool force_expensive )
1319{
1320 ThreadId tid;
1321
njn6676d5b2005-06-19 18:49:19 +00001322 if (VG_(clo_sanity_level) < 1) return;
1323
1324 /* --- First do all the tests that we can do quickly. ---*/
1325
1326 sanity_fast_count++;
1327
1328 /* Check stuff pertaining to the memory check system. */
1329
1330 /* Check that nobody has spuriously claimed that the first or
1331 last 16 pages of memory have become accessible [...] */
1332 if (VG_(needs).sanity_checks) {
njn6676d5b2005-06-19 18:49:19 +00001333 vg_assert(VG_TDICT_CALL(tool_cheap_sanity_check));
njn6676d5b2005-06-19 18:49:19 +00001334 }
1335
1336 /* --- Now some more expensive checks. ---*/
1337
1338 /* Once every 25 times, check some more expensive stuff. */
1339 if ( force_expensive
1340 || VG_(clo_sanity_level) > 1
1341 || (VG_(clo_sanity_level) == 1 && (sanity_fast_count % 25) == 0)) {
1342
njn6676d5b2005-06-19 18:49:19 +00001343 sanity_slow_count++;
1344
njn6676d5b2005-06-19 18:49:19 +00001345 if (VG_(needs).sanity_checks) {
njn6676d5b2005-06-19 18:49:19 +00001346 vg_assert(VG_TDICT_CALL(tool_expensive_sanity_check));
njn6676d5b2005-06-19 18:49:19 +00001347 }
1348
njn6676d5b2005-06-19 18:49:19 +00001349 /* Look for stack overruns. Visit all threads. */
njnd666ea72005-06-26 17:26:22 +00001350 for (tid = 1; tid < VG_N_THREADS; tid++) {
sewardj45f4e7c2005-09-27 19:20:21 +00001351 SizeT remains;
1352 VgStack* stack;
njn6676d5b2005-06-19 18:49:19 +00001353
1354 if (VG_(threads)[tid].status == VgTs_Empty ||
1355 VG_(threads)[tid].status == VgTs_Zombie)
1356 continue;
1357
sewardj45f4e7c2005-09-27 19:20:21 +00001358 stack
1359 = (VgStack*)
1360 VG_(get_ThreadState)(tid)->os_state.valgrind_stack_base;
1361 remains
1362 = VG_(am_get_VgStack_unused_szB)(stack);
njn6676d5b2005-06-19 18:49:19 +00001363 if (remains < VKI_PAGE_SIZE)
1364 VG_(message)(Vg_DebugMsg,
1365 "WARNING: Thread %d is within %d bytes "
1366 "of running out of stack!",
1367 tid, remains);
1368 }
njn6676d5b2005-06-19 18:49:19 +00001369 }
1370
1371 if (VG_(clo_sanity_level) > 1) {
njn6676d5b2005-06-19 18:49:19 +00001372 /* Check sanity of the low-level memory manager. Note that bugs
1373 in the client's code can cause this to fail, so we don't do
1374 this check unless specially asked for. And because it's
1375 potentially very expensive. */
1376 VG_(sanity_check_malloc_all)();
njn6676d5b2005-06-19 18:49:19 +00001377 }
njn6676d5b2005-06-19 18:49:19 +00001378}
sewardj6072c362002-04-19 14:40:57 +00001379
sewardje663cb92002-04-12 10:26:32 +00001380/*--------------------------------------------------------------------*/
njn278b3d62005-05-30 23:20:51 +00001381/*--- end ---*/
sewardje663cb92002-04-12 10:26:32 +00001382/*--------------------------------------------------------------------*/