blob: 113341cfcbc3df54a967de3f41868f13f1a132f2 [file] [log] [blame]
sewardje663cb92002-04-12 10:26:32 +00001
2/*--------------------------------------------------------------------*/
njn278b3d62005-05-30 23:20:51 +00003/*--- Thread scheduling. scheduler.c ---*/
sewardje663cb92002-04-12 10:26:32 +00004/*--------------------------------------------------------------------*/
5
6/*
njnc0ae7052005-08-25 22:55:19 +00007 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
sewardje663cb92002-04-12 10:26:32 +00009
njn53612422005-03-12 16:22:54 +000010 Copyright (C) 2000-2005 Julian Seward
sewardje663cb92002-04-12 10:26:32 +000011 jseward@acm.org
sewardje663cb92002-04-12 10:26:32 +000012
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
njn25e49d8e72002-09-23 09:36:25 +000028 The GNU General Public License is contained in the file COPYING.
sewardje663cb92002-04-12 10:26:32 +000029*/
30
sewardjb5f6f512005-03-10 23:59:00 +000031/*
32 Overview
33
34 Valgrind tries to emulate the kernel's threading as closely as
35 possible. The client does all threading via the normal syscalls
36 (on Linux: clone, etc). Valgrind emulates this by creating exactly
37 the same process structure as would be created without Valgrind.
38 There are no extra threads.
39
40 The main difference is that Valgrind only allows one client thread
41 to run at once. This is controlled with the VCPU semaphore,
42 "run_sema". Any time a thread wants to run client code or
43 manipulate any shared state (which is anything other than its own
44 ThreadState entry), it must hold the run_sema.
45
46 When a thread is about to block in a blocking syscall, it releases
47 run_sema, and re-takes it when it becomes runnable again (either
48 because the syscall finished, or we took a signal).
49
50 VG_(scheduler) therefore runs in each thread. It returns only when
51 the thread is exiting, either because it exited itself, or it was
52 told to exit by another thread.
53
54 This file is almost entirely OS-independent. The details of how
55 the OS handles threading and signalling are abstracted away and
njn12771092005-06-18 02:18:04 +000056 implemented elsewhere. [Some of the functions have worked their
57 way back for the moment, until we do an OS port in earnest...]
sewardjb5f6f512005-03-10 23:59:00 +000058 */
59
njn12771092005-06-18 02:18:04 +000060#include "valgrind.h" // for VG_USERREQ__*
61#include "coregrind.h" // for VG_USERREQ__*
sewardje663cb92002-04-12 10:26:32 +000062
njnc7561b92005-06-19 01:24:32 +000063#include "pub_core_basics.h"
64#include "pub_core_threadstate.h"
njn04e16982005-05-31 00:23:43 +000065#include "pub_core_aspacemgr.h"
njn36b66df2005-05-12 05:13:04 +000066#include "pub_core_dispatch.h"
njnf4c50162005-06-20 14:18:12 +000067#include "pub_core_errormgr.h" // For VG_(get_n_errs_found)()
njn97405b22005-06-02 03:39:33 +000068#include "pub_core_libcbase.h"
njn132bfcc2005-06-04 19:16:06 +000069#include "pub_core_libcassert.h"
njn36a20fa2005-06-03 03:08:39 +000070#include "pub_core_libcprint.h"
njnf39e9a32005-06-12 02:43:17 +000071#include "pub_core_libcproc.h"
njnde62cbf2005-06-10 22:08:14 +000072#include "pub_core_libcsignal.h"
njnf536bbb2005-06-13 04:21:38 +000073#include "pub_core_machine.h"
njnaf1d7df2005-06-11 01:31:52 +000074#include "pub_core_mallocfree.h"
njn20242342005-05-16 23:31:24 +000075#include "pub_core_options.h"
njn31513b42005-06-01 03:09:59 +000076#include "pub_core_profile.h"
njn717cde52005-05-10 02:47:21 +000077#include "pub_core_replacemalloc.h"
njn278b3d62005-05-30 23:20:51 +000078#include "pub_core_scheduler.h"
njn0c246472005-05-31 01:00:08 +000079#include "pub_core_signals.h"
njn945ed2e2005-06-24 03:28:30 +000080#include "pub_core_stacks.h"
njnf4c50162005-06-20 14:18:12 +000081#include "pub_core_stacktrace.h" // For VG_(get_and_pp_StackTrace)()
njn9abd6082005-06-17 21:31:45 +000082#include "pub_core_syscall.h"
njnc1b01812005-06-17 22:19:06 +000083#include "pub_core_syswrap.h"
njn43b9a8a2005-05-10 04:37:01 +000084#include "pub_core_tooliface.h"
njnf4c50162005-06-20 14:18:12 +000085#include "pub_core_translate.h" // For VG_(translate)()
njn8bddf582005-05-13 23:40:55 +000086#include "pub_core_transtab.h"
njn3c660b62005-05-13 22:18:47 +000087#include "vki_unistd.h"
njn278b3d62005-05-30 23:20:51 +000088#include "priv_sema.h"
sewardje663cb92002-04-12 10:26:32 +000089
90/* ---------------------------------------------------------------------
91 Types and globals for the scheduler.
92 ------------------------------------------------------------------ */
93
njnc7561b92005-06-19 01:24:32 +000094/* ThreadId and ThreadState are defined elsewhere*/
sewardje663cb92002-04-12 10:26:32 +000095
njn14319cc2005-03-13 06:26:22 +000096/* Defines the thread-scheduling timeslice, in terms of the number of
97 basic blocks we attempt to run each thread for. Smaller values
98 give finer interleaving but much increased scheduling overheads. */
99#define SCHEDULING_QUANTUM 50000
100
sewardjb5f6f512005-03-10 23:59:00 +0000101/* If true, a fault is Valgrind-internal (ie, a bug) */
102Bool VG_(my_fault) = True;
njn25e49d8e72002-09-23 09:36:25 +0000103
njnde583aa2005-05-11 18:57:02 +0000104/* Counts downwards in VG_(run_innerloop). */
105UInt VG_(dispatch_ctr);
106
njn394213a2005-06-19 18:38:24 +0000107/* 64-bit counter for the number of basic blocks done. */
108static ULong bbs_done = 0;
109
sewardje663cb92002-04-12 10:26:32 +0000110/* Forwards */
sewardjb5f6f512005-03-10 23:59:00 +0000111static void do_client_request ( ThreadId tid );
112static void scheduler_sanity ( ThreadId tid );
113static void mostly_clear_thread_record ( ThreadId tid );
sewardjd140e442002-05-29 01:21:19 +0000114
nethercote844e7122004-08-02 15:27:22 +0000115/* Stats. */
njn0fd92f42005-10-06 03:32:42 +0000116static ULong n_scheduling_events_MINOR = 0;
117static ULong n_scheduling_events_MAJOR = 0;
nethercote844e7122004-08-02 15:27:22 +0000118
njn6676d5b2005-06-19 18:49:19 +0000119/* Sanity checking counts. */
120static UInt sanity_fast_count = 0;
121static UInt sanity_slow_count = 0;
sewardjb5f6f512005-03-10 23:59:00 +0000122
nethercote844e7122004-08-02 15:27:22 +0000123void VG_(print_scheduler_stats)(void)
124{
125 VG_(message)(Vg_DebugMsg,
njn0fd92f42005-10-06 03:32:42 +0000126 "scheduler: %,llu jumps (bb entries).", bbs_done );
njn394213a2005-06-19 18:38:24 +0000127 VG_(message)(Vg_DebugMsg,
njn0fd92f42005-10-06 03:32:42 +0000128 "scheduler: %,llu/%,llu major/minor sched events.",
nethercote844e7122004-08-02 15:27:22 +0000129 n_scheduling_events_MAJOR, n_scheduling_events_MINOR);
njn6676d5b2005-06-19 18:49:19 +0000130 VG_(message)(Vg_DebugMsg,
131 " sanity: %d cheap, %d expensive checks.",
132 sanity_fast_count, sanity_slow_count );
nethercote844e7122004-08-02 15:27:22 +0000133}
134
sewardjb5f6f512005-03-10 23:59:00 +0000135/* CPU semaphore, so that threads can run exclusively */
136static vg_sema_t run_sema;
sewardjb5f6f512005-03-10 23:59:00 +0000137
138
sewardje663cb92002-04-12 10:26:32 +0000139/* ---------------------------------------------------------------------
140 Helper functions for the scheduler.
141 ------------------------------------------------------------------ */
142
sewardje663cb92002-04-12 10:26:32 +0000143static
144void print_sched_event ( ThreadId tid, Char* what )
145{
sewardj45b4b372002-04-16 22:50:32 +0000146 VG_(message)(Vg_DebugMsg, " SCHED[%d]: %s", tid, what );
sewardj8937c812002-04-12 20:12:20 +0000147}
148
sewardj8937c812002-04-12 20:12:20 +0000149static
sewardjb5f6f512005-03-10 23:59:00 +0000150HChar* name_of_sched_event ( UInt event )
sewardje663cb92002-04-12 10:26:32 +0000151{
152 switch (event) {
sewardja0fef1b2005-11-03 13:46:30 +0000153 case VEX_TRC_JMP_SYS_SYSCALL: return "SYSCALL";
154 case VEX_TRC_JMP_SYS_INT32: return "INT32";
155 case VEX_TRC_JMP_SYS_INT128: return "INT128";
156 case VEX_TRC_JMP_SYS_SYSENTER: return "SYSENTER";
sewardjd79ef682004-11-26 13:25:17 +0000157 case VEX_TRC_JMP_CLIENTREQ: return "CLIENTREQ";
158 case VEX_TRC_JMP_YIELD: return "YIELD";
sewardj45f02c42005-02-05 18:27:14 +0000159 case VEX_TRC_JMP_NODECODE: return "NODECODE";
sewardj1f430d32005-12-16 01:07:11 +0000160 case VEX_TRC_JMP_MAPFAIL: return "MAPFAIL";
161 case VEX_TRC_JMP_EMWARN: return "EMWARN";
162 case VEX_TRC_JMP_TINVAL: return "TINVAL";
163 case VG_TRC_INVARIANT_FAILED: return "INVFAILED";
sewardje663cb92002-04-12 10:26:32 +0000164 case VG_TRC_INNER_COUNTERZERO: return "COUNTERZERO";
165 case VG_TRC_INNER_FASTMISS: return "FASTMISS";
sewardjb5f6f512005-03-10 23:59:00 +0000166 case VG_TRC_FAULT_SIGNAL: return "FAULTSIGNAL";
sewardje663cb92002-04-12 10:26:32 +0000167 default: return "??UNKNOWN??";
168 }
169}
170
sewardje663cb92002-04-12 10:26:32 +0000171/* Allocate a completely empty ThreadState record. */
sewardjb5f6f512005-03-10 23:59:00 +0000172ThreadId VG_(alloc_ThreadState) ( void )
sewardje663cb92002-04-12 10:26:32 +0000173{
174 Int i;
sewardj6072c362002-04-19 14:40:57 +0000175 for (i = 1; i < VG_N_THREADS; i++) {
sewardjb5f6f512005-03-10 23:59:00 +0000176 if (VG_(threads)[i].status == VgTs_Empty) {
177 VG_(threads)[i].status = VgTs_Init;
178 VG_(threads)[i].exitreason = VgSrc_None;
sewardje663cb92002-04-12 10:26:32 +0000179 return i;
sewardjb5f6f512005-03-10 23:59:00 +0000180 }
sewardje663cb92002-04-12 10:26:32 +0000181 }
182 VG_(printf)("vg_alloc_ThreadState: no free slots available\n");
183 VG_(printf)("Increase VG_N_THREADS, rebuild and try again.\n");
njne427a662002-10-02 11:08:25 +0000184 VG_(core_panic)("VG_N_THREADS is too low");
sewardje663cb92002-04-12 10:26:32 +0000185 /*NOTREACHED*/
186}
187
sewardjb5f6f512005-03-10 23:59:00 +0000188/*
189 Mark a thread as Runnable. This will block until the run_sema is
190 available, so that we get exclusive access to all the shared
191 structures and the CPU. Up until we get the sema, we must not
192 touch any shared state.
193
194 When this returns, we'll actually be running.
195 */
196void VG_(set_running)(ThreadId tid)
197{
198 ThreadState *tst = VG_(get_ThreadState)(tid);
199
200 vg_assert(tst->status != VgTs_Runnable);
201
202 tst->status = VgTs_Runnable;
203
sewardj7eb7c582005-06-23 01:02:53 +0000204 ML_(sema_down)(&run_sema);
njnc7561b92005-06-19 01:24:32 +0000205 if (VG_(running_tid) != VG_INVALID_THREADID)
206 VG_(printf)("tid %d found %d running\n", tid, VG_(running_tid));
207 vg_assert(VG_(running_tid) == VG_INVALID_THREADID);
208 VG_(running_tid) = tid;
sewardjb5f6f512005-03-10 23:59:00 +0000209
tome0008d62005-11-10 15:02:42 +0000210 VG_(unknown_SP_update)(VG_(get_SP(tid)), VG_(get_SP(tid)));
211
sewardjb5f6f512005-03-10 23:59:00 +0000212 if (VG_(clo_trace_sched))
213 print_sched_event(tid, "now running");
tomdeca43f2005-07-27 23:04:28 +0000214
215 // While thre modeling is disable, issue thread_run events here
216 // VG_(tm_thread_switchto)(tid);
217 VG_TRACK( thread_run, tid );
sewardjb5f6f512005-03-10 23:59:00 +0000218}
219
sewardjb5f6f512005-03-10 23:59:00 +0000220/*
221 Set a thread into a sleeping state, and give up exclusive access to
222 the CPU. On return, the thread must be prepared to block until it
223 is ready to run again (generally this means blocking in a syscall,
224 but it may mean that we remain in a Runnable state and we're just
225 yielding the CPU to another thread).
226 */
227void VG_(set_sleeping)(ThreadId tid, ThreadStatus sleepstate)
228{
229 ThreadState *tst = VG_(get_ThreadState)(tid);
230
231 vg_assert(tst->status == VgTs_Runnable);
232
233 vg_assert(sleepstate == VgTs_WaitSys ||
234 sleepstate == VgTs_Yielding);
235
236 tst->status = sleepstate;
237
njnc7561b92005-06-19 01:24:32 +0000238 vg_assert(VG_(running_tid) == tid);
239 VG_(running_tid) = VG_INVALID_THREADID;
sewardjb5f6f512005-03-10 23:59:00 +0000240
241 /* Release the run_sema; this will reschedule any runnable
242 thread. */
sewardj7eb7c582005-06-23 01:02:53 +0000243 ML_(sema_up)(&run_sema);
sewardjb5f6f512005-03-10 23:59:00 +0000244
245 if (VG_(clo_trace_sched)) {
246 Char buf[50];
sewardja8d8e232005-06-07 20:04:56 +0000247 VG_(sprintf)(buf, "now sleeping in state %s",
njnc7561b92005-06-19 01:24:32 +0000248 VG_(name_of_ThreadStatus)(sleepstate));
sewardjb5f6f512005-03-10 23:59:00 +0000249 print_sched_event(tid, buf);
nethercote75d26242004-08-01 22:59:18 +0000250 }
251}
252
sewardjb5f6f512005-03-10 23:59:00 +0000253/* Clear out the ThreadState and release the semaphore. Leaves the
254 ThreadState in VgTs_Zombie state, so that it doesn't get
255 reallocated until the caller is really ready. */
256void VG_(exit_thread)(ThreadId tid)
257{
258 vg_assert(VG_(is_valid_tid)(tid));
259 vg_assert(VG_(is_running_thread)(tid));
260 vg_assert(VG_(is_exiting)(tid));
261
sewardjb5f6f512005-03-10 23:59:00 +0000262 mostly_clear_thread_record(tid);
njnc7561b92005-06-19 01:24:32 +0000263 VG_(running_tid) = VG_INVALID_THREADID;
sewardjb5f6f512005-03-10 23:59:00 +0000264
265 /* There should still be a valid exitreason for this thread */
266 vg_assert(VG_(threads)[tid].exitreason != VgSrc_None);
267
sewardj7eb7c582005-06-23 01:02:53 +0000268 ML_(sema_up)(&run_sema);
sewardjb5f6f512005-03-10 23:59:00 +0000269}
270
271/* Kill a thread. This interrupts whatever a thread is doing, and
272 makes it exit ASAP. This does not set the exitreason or
273 exitcode. */
274void VG_(kill_thread)(ThreadId tid)
275{
276 vg_assert(VG_(is_valid_tid)(tid));
277 vg_assert(!VG_(is_running_thread)(tid));
278 vg_assert(VG_(is_exiting)(tid));
279
280 if (VG_(threads)[tid].status == VgTs_WaitSys) {
281 if (VG_(clo_trace_signals))
282 VG_(message)(Vg_DebugMsg, "kill_thread zaps tid %d lwp %d",
283 tid, VG_(threads)[tid].os_state.lwpid);
njn351d0062005-06-21 22:23:59 +0000284 VG_(tkill)(VG_(threads)[tid].os_state.lwpid, VG_SIGVGKILL);
sewardjb5f6f512005-03-10 23:59:00 +0000285 }
286}
287
288/*
289 Yield the CPU for a short time to let some other thread run.
290 */
291void VG_(vg_yield)(void)
292{
293 struct vki_timespec ts = { 0, 1 };
njnc7561b92005-06-19 01:24:32 +0000294 ThreadId tid = VG_(running_tid);
sewardjb5f6f512005-03-10 23:59:00 +0000295
296 vg_assert(tid != VG_INVALID_THREADID);
297 vg_assert(VG_(threads)[tid].os_state.lwpid == VG_(gettid)());
298
299 VG_(set_sleeping)(tid, VgTs_Yielding);
300
301 //VG_(printf)("tid %d yielding EIP=%p\n", tid, VG_(threads)[tid].arch.m_eip);
302
303 /*
304 Tell the kernel we're yielding.
305 */
306 if (1)
307 VG_(do_syscall0)(__NR_sched_yield);
308 else
309 VG_(nanosleep)(&ts);
310
311 VG_(set_running)(tid);
sewardjb5f6f512005-03-10 23:59:00 +0000312}
313
314
njn9fc31122005-05-11 18:48:33 +0000315/* Set the standard set of blocked signals, used wheneever we're not
316 running a client syscall. */
317static void block_signals(ThreadId tid)
318{
319 vki_sigset_t mask;
320
321 VG_(sigfillset)(&mask);
322
323 /* Don't block these because they're synchronous */
324 VG_(sigdelset)(&mask, VKI_SIGSEGV);
325 VG_(sigdelset)(&mask, VKI_SIGBUS);
326 VG_(sigdelset)(&mask, VKI_SIGFPE);
327 VG_(sigdelset)(&mask, VKI_SIGILL);
328 VG_(sigdelset)(&mask, VKI_SIGTRAP);
329
330 /* Can't block these anyway */
331 VG_(sigdelset)(&mask, VKI_SIGSTOP);
332 VG_(sigdelset)(&mask, VKI_SIGKILL);
333
njn9fc31122005-05-11 18:48:33 +0000334 VG_(sigprocmask)(VKI_SIG_SETMASK, &mask, NULL);
335}
336
njn4f6e3702005-05-16 20:50:52 +0000337/* Use libc setjmp/longjmp. longjmp must not restore signal mask
338 state, but does need to pass "val" through. */
sewardjb5f6f512005-03-10 23:59:00 +0000339#define SCHEDSETJMP(tid, jumped, stmt) \
340 do { \
341 ThreadState * volatile _qq_tst = VG_(get_ThreadState)(tid); \
342 \
sewardj45f4e7c2005-09-27 19:20:21 +0000343 (jumped) = __builtin_setjmp(_qq_tst->sched_jmpbuf); \
sewardjb5f6f512005-03-10 23:59:00 +0000344 if ((jumped) == 0) { \
345 vg_assert(!_qq_tst->sched_jmpbuf_valid); \
346 _qq_tst->sched_jmpbuf_valid = True; \
347 stmt; \
348 } else if (VG_(clo_trace_sched)) \
349 VG_(printf)("SCHEDSETJMP(line %d) tid %d, jumped=%d\n", __LINE__, tid, jumped); \
350 vg_assert(_qq_tst->sched_jmpbuf_valid); \
351 _qq_tst->sched_jmpbuf_valid = False; \
352 } while(0)
353
354/* Run the thread tid for a while, and return a VG_TRC_* value to the
355 scheduler indicating what happened. */
sewardj6072c362002-04-19 14:40:57 +0000356static
sewardje663cb92002-04-12 10:26:32 +0000357UInt run_thread_for_a_while ( ThreadId tid )
358{
sewardjb5f6f512005-03-10 23:59:00 +0000359 volatile Bool jumped;
360 volatile ThreadState *tst = VG_(get_ThreadState)(tid);
sewardjb5f6f512005-03-10 23:59:00 +0000361
sewardj7ccc5c22002-04-24 21:39:11 +0000362 volatile UInt trc = 0;
sewardjb5f6f512005-03-10 23:59:00 +0000363 volatile Int dispatch_ctr_SAVED = VG_(dispatch_ctr);
364 volatile Int done_this_time;
sewardj8b635a42004-11-22 19:01:47 +0000365
sewardj873b3132004-11-25 22:50:17 +0000366 /* For paranoia purposes only */
367 volatile Addr a_vex = (Addr) & VG_(threads)[tid].arch.vex;
368 volatile Addr a_vexsh = (Addr) & VG_(threads)[tid].arch.vex_shadow;
369 volatile Addr a_spill = (Addr) & VG_(threads)[tid].arch.vex_spill;
370 volatile UInt sz_vex = (UInt) sizeof VG_(threads)[tid].arch.vex;
371 volatile UInt sz_vexsh = (UInt) sizeof VG_(threads)[tid].arch.vex_shadow;
372 volatile UInt sz_spill = (UInt) sizeof VG_(threads)[tid].arch.vex_spill;
373
374 /* Paranoia */
sewardjb48e5002002-05-13 00:16:03 +0000375 vg_assert(VG_(is_valid_tid)(tid));
sewardjb5f6f512005-03-10 23:59:00 +0000376 vg_assert(VG_(is_valid_tid)(tid));
377 vg_assert(VG_(is_running_thread)(tid));
378 vg_assert(!VG_(is_exiting)(tid));
sewardje663cb92002-04-12 10:26:32 +0000379
sewardj873b3132004-11-25 22:50:17 +0000380 /* Even more paranoia. Check that what we have matches
381 Vex's guest state layout requirements. */
sewardj12a74b52004-11-26 11:57:41 +0000382 if (0)
383 VG_(printf)("%p %d %p %d %p %d\n",
sewardjb5f6f512005-03-10 23:59:00 +0000384 (void*)a_vex, sz_vex, (void*)a_vexsh, sz_vexsh,
385 (void*)a_spill, sz_spill );
sewardj873b3132004-11-25 22:50:17 +0000386
njnbe91aae2005-03-27 01:42:41 +0000387 vg_assert(VG_IS_8_ALIGNED(sz_vex));
388 vg_assert(VG_IS_8_ALIGNED(sz_vexsh));
389 vg_assert(VG_IS_16_ALIGNED(sz_spill));
sewardj12a74b52004-11-26 11:57:41 +0000390
njnbe91aae2005-03-27 01:42:41 +0000391 vg_assert(VG_IS_4_ALIGNED(a_vex));
392 vg_assert(VG_IS_4_ALIGNED(a_vexsh));
393 vg_assert(VG_IS_4_ALIGNED(a_spill));
sewardj873b3132004-11-25 22:50:17 +0000394
395 vg_assert(sz_vex == sz_vexsh);
396 vg_assert(a_vex + sz_vex == a_vexsh);
397
398 vg_assert(sz_spill == LibVEX_N_SPILL_BYTES);
399 vg_assert(a_vex + 2 * sz_vex == a_spill);
400
sewardj671ff542002-05-07 09:25:30 +0000401 VGP_PUSHCC(VgpRun);
jsgf855d93d2003-10-13 22:26:55 +0000402
sewardj10537332005-08-05 00:25:11 +0000403# if defined(VGA_ppc32)
404 /* This is necessary due to the hacky way vex models reservations
405 on ppc. It's really quite incorrect for each thread to have its
406 own reservation flag/address, since it's really something that
407 all threads share (that's the whole point). But having shared
408 guest state is something we can't model with Vex. However, as
409 per PaulM's 2.4.0ppc, the reservation is modelled using a
410 reservation flag which is cleared at each context switch. So it
411 is indeed possible to get away with a per thread-reservation if
412 the thread's reservation is cleared before running it.
413
414 This should be abstractified and lifted out.
415 */
416 { Int i;
417 /* Clear any existing reservation. Be paranoid and clear them all. */
418 for (i = 0; i < VG_N_THREADS; i++)
419 VG_(threads)[i].arch.vex.guest_RESVN = 0;
420 }
cerion78090d22005-09-12 22:53:39 +0000421
422 /* ppc guest_state vector regs must be 16byte aligned for loads/stores */
423 vg_assert(VG_IS_16_ALIGNED(VG_(threads)[tid].arch.vex.guest_VR0));
424 vg_assert(VG_IS_16_ALIGNED(VG_(threads)[tid].arch.vex_shadow.guest_VR0));
sewardj10537332005-08-05 00:25:11 +0000425# endif
426
jsgf855d93d2003-10-13 22:26:55 +0000427 /* there should be no undealt-with signals */
sewardjb5f6f512005-03-10 23:59:00 +0000428 //vg_assert(VG_(threads)[tid].siginfo.si_signo == 0);
jsgf855d93d2003-10-13 22:26:55 +0000429
sewardjb5f6f512005-03-10 23:59:00 +0000430 //VG_(printf)("running EIP = %p ESP=%p\n", VG_(threads)[tid].arch.m_eip, VG_(threads)[tid].arch.m_esp);
431
432 vg_assert(VG_(my_fault));
433 VG_(my_fault) = False;
434
sewardj274807d2005-12-15 14:07:07 +0000435 SCHEDSETJMP(
436 tid,
437 jumped,
438 trc = (UInt)VG_(run_innerloop)( (void*)&tst->arch.vex,
439 VG_(clo_profile_flags) > 0 ? 1 : 0 )
440 );
sewardjb5f6f512005-03-10 23:59:00 +0000441
442 //nextEIP = tst->arch.m_eip;
443 //if (nextEIP >= VG_(client_end))
444 // VG_(printf)("trc=%d jump to %p from %p\n",
445 // trc, nextEIP, EIP);
446
447 VG_(my_fault) = True;
448
449 if (jumped) {
sewardje663cb92002-04-12 10:26:32 +0000450 /* We get here if the client took a fault, which caused our
451 signal handler to longjmp. */
452 vg_assert(trc == 0);
sewardjb5f6f512005-03-10 23:59:00 +0000453 trc = VG_TRC_FAULT_SIGNAL;
njn9fc31122005-05-11 18:48:33 +0000454 block_signals(tid);
sewardjb5f6f512005-03-10 23:59:00 +0000455 }
sewardj5390e662005-01-10 16:51:14 +0000456
sewardj8b635a42004-11-22 19:01:47 +0000457 done_this_time = (Int)dispatch_ctr_SAVED - (Int)VG_(dispatch_ctr) - 0;
458
459 vg_assert(done_this_time >= 0);
njn394213a2005-06-19 18:38:24 +0000460 bbs_done += (ULong)done_this_time;
sewardj8b635a42004-11-22 19:01:47 +0000461
njn25e49d8e72002-09-23 09:36:25 +0000462 VGP_POPCC(VgpRun);
sewardje663cb92002-04-12 10:26:32 +0000463 return trc;
464}
465
466
njn8aa35852005-06-10 22:59:56 +0000467static void os_state_clear(ThreadState *tst)
468{
sewardj45f4e7c2005-09-27 19:20:21 +0000469 tst->os_state.lwpid = 0;
njn8aa35852005-06-10 22:59:56 +0000470 tst->os_state.threadgroup = 0;
471}
472
473static void os_state_init(ThreadState *tst)
474{
sewardj45f4e7c2005-09-27 19:20:21 +0000475 tst->os_state.valgrind_stack_base = 0;
476 tst->os_state.valgrind_stack_init_SP = 0;
njn8aa35852005-06-10 22:59:56 +0000477 os_state_clear(tst);
478}
479
sewardj20917d82002-05-28 01:36:45 +0000480static
481void mostly_clear_thread_record ( ThreadId tid )
482{
sewardjb5f6f512005-03-10 23:59:00 +0000483 vki_sigset_t savedmask;
484
sewardj20917d82002-05-28 01:36:45 +0000485 vg_assert(tid >= 0 && tid < VG_N_THREADS);
njnaf839f52005-06-23 03:27:57 +0000486 VG_(cleanup_thread)(&VG_(threads)[tid].arch);
sewardjb5f6f512005-03-10 23:59:00 +0000487 VG_(threads)[tid].tid = tid;
488
489 /* Leave the thread in Zombie, so that it doesn't get reallocated
490 until the caller is finally done with the thread stack. */
491 VG_(threads)[tid].status = VgTs_Zombie;
492
nethercote73b526f2004-10-31 18:48:21 +0000493 VG_(sigemptyset)(&VG_(threads)[tid].sig_mask);
sewardjb5f6f512005-03-10 23:59:00 +0000494 VG_(sigemptyset)(&VG_(threads)[tid].tmp_sig_mask);
jsgf855d93d2003-10-13 22:26:55 +0000495
njn8aa35852005-06-10 22:59:56 +0000496 os_state_clear(&VG_(threads)[tid]);
fitzhardinge28428592004-03-16 22:07:12 +0000497
498 /* start with no altstack */
499 VG_(threads)[tid].altstack.ss_sp = (void *)0xdeadbeef;
500 VG_(threads)[tid].altstack.ss_size = 0;
501 VG_(threads)[tid].altstack.ss_flags = VKI_SS_DISABLE;
sewardjb5f6f512005-03-10 23:59:00 +0000502
njn444eba12005-05-12 03:47:31 +0000503 VG_(clear_out_queued_signals)(tid, &savedmask);
sewardjb5f6f512005-03-10 23:59:00 +0000504
505 VG_(threads)[tid].sched_jmpbuf_valid = False;
sewardj20917d82002-05-28 01:36:45 +0000506}
507
njn3f8c4372005-03-13 04:43:10 +0000508/*
509 Called in the child after fork. If the parent has multiple
510 threads, then we've inhereted a VG_(threads) array describing them,
511 but only the thread which called fork() is actually alive in the
512 child. This functions needs to clean up all those other thread
513 structures.
514
515 Whichever tid in the parent which called fork() becomes the
516 master_tid in the child. That's because the only living slot in
517 VG_(threads) in the child after fork is VG_(threads)[tid], and it
518 would be too hard to try to re-number the thread and relocate the
519 thread state down to VG_(threads)[1].
520
521 This function also needs to reinitialize the run_sema, since
522 otherwise we may end up sharing its state with the parent, which
523 would be deeply confusing.
524*/
sewardjb5f6f512005-03-10 23:59:00 +0000525static void sched_fork_cleanup(ThreadId me)
526{
527 ThreadId tid;
njnc7561b92005-06-19 01:24:32 +0000528 vg_assert(VG_(running_tid) == me);
sewardjb5f6f512005-03-10 23:59:00 +0000529
sewardjb5f6f512005-03-10 23:59:00 +0000530 VG_(threads)[me].os_state.lwpid = VG_(gettid)();
531 VG_(threads)[me].os_state.threadgroup = VG_(getpid)();
532
533 /* clear out all the unused thread slots */
534 for (tid = 1; tid < VG_N_THREADS; tid++) {
njn3f8c4372005-03-13 04:43:10 +0000535 if (tid != me) {
536 mostly_clear_thread_record(tid);
sewardjb5f6f512005-03-10 23:59:00 +0000537 VG_(threads)[tid].status = VgTs_Empty;
sewardja8d8e232005-06-07 20:04:56 +0000538 VG_(clear_syscallInfo)(tid);
njn3f8c4372005-03-13 04:43:10 +0000539 }
sewardjb5f6f512005-03-10 23:59:00 +0000540 }
541
542 /* re-init and take the sema */
sewardj7eb7c582005-06-23 01:02:53 +0000543 ML_(sema_deinit)(&run_sema);
544 ML_(sema_init)(&run_sema);
545 ML_(sema_down)(&run_sema);
sewardjb5f6f512005-03-10 23:59:00 +0000546}
sewardj20917d82002-05-28 01:36:45 +0000547
jsgf855d93d2003-10-13 22:26:55 +0000548
sewardje663cb92002-04-12 10:26:32 +0000549/* Initialise the scheduler. Create a single "main" thread ready to
sewardj2a99cf62004-11-24 10:44:19 +0000550 run, with special ThreadId of one. This is called at startup. The
sewardjb5f6f512005-03-10 23:59:00 +0000551 caller subsequently initialises the guest state components of this
552 main thread, thread 1.
sewardje663cb92002-04-12 10:26:32 +0000553*/
sewardj45f4e7c2005-09-27 19:20:21 +0000554void VG_(scheduler_init) ( Addr clstack_end, SizeT clstack_size )
sewardje663cb92002-04-12 10:26:32 +0000555{
thughesc37184f2004-09-11 14:16:57 +0000556 Int i;
sewardje663cb92002-04-12 10:26:32 +0000557 ThreadId tid_main;
558
sewardj45f4e7c2005-09-27 19:20:21 +0000559 vg_assert(VG_IS_PAGE_ALIGNED(clstack_end+1));
560 vg_assert(VG_IS_PAGE_ALIGNED(clstack_size));
561
sewardj7eb7c582005-06-23 01:02:53 +0000562 ML_(sema_init)(&run_sema);
sewardjb5f6f512005-03-10 23:59:00 +0000563
sewardj6072c362002-04-19 14:40:57 +0000564 for (i = 0 /* NB; not 1 */; i < VG_N_THREADS; i++) {
sewardjc793fd32005-05-31 17:24:49 +0000565
566 /* Paranoia .. completely zero it out. */
567 VG_(memset)( & VG_(threads)[i], 0, sizeof( VG_(threads)[i] ) );
568
569 VG_(threads)[i].sig_queue = NULL;
sewardjb5f6f512005-03-10 23:59:00 +0000570
njn8aa35852005-06-10 22:59:56 +0000571 os_state_init(&VG_(threads)[i]);
sewardj20917d82002-05-28 01:36:45 +0000572 mostly_clear_thread_record(i);
sewardjb5f6f512005-03-10 23:59:00 +0000573
njn50ba34e2005-04-04 02:41:42 +0000574 VG_(threads)[i].status = VgTs_Empty;
575 VG_(threads)[i].client_stack_szB = 0;
576 VG_(threads)[i].client_stack_highest_word = (Addr)NULL;
sewardje663cb92002-04-12 10:26:32 +0000577 }
578
sewardjb5f6f512005-03-10 23:59:00 +0000579 tid_main = VG_(alloc_ThreadState)();
sewardj5f07b662002-04-23 16:52:51 +0000580
njn50ba34e2005-04-04 02:41:42 +0000581 VG_(threads)[tid_main].client_stack_highest_word
sewardj45f4e7c2005-09-27 19:20:21 +0000582 = clstack_end + 1 - sizeof(UWord);
583 VG_(threads)[tid_main].client_stack_szB
584 = clstack_size;
sewardjbf290b92002-05-01 02:28:01 +0000585
njn310ed282005-06-26 15:11:37 +0000586 VG_(atfork_child)(sched_fork_cleanup);
sewardje663cb92002-04-12 10:26:32 +0000587}
588
589
sewardje663cb92002-04-12 10:26:32 +0000590/* ---------------------------------------------------------------------
591 The scheduler proper.
592 ------------------------------------------------------------------ */
593
sewardjb5f6f512005-03-10 23:59:00 +0000594static void handle_tt_miss ( ThreadId tid )
sewardje663cb92002-04-12 10:26:32 +0000595{
sewardjb5f6f512005-03-10 23:59:00 +0000596 Bool found;
njnf536bbb2005-06-13 04:21:38 +0000597 Addr ip = VG_(get_IP)(tid);
sewardjb5f6f512005-03-10 23:59:00 +0000598
599 /* Trivial event. Miss in the fast-cache. Do a full
600 lookup for it. */
njnf536bbb2005-06-13 04:21:38 +0000601 found = VG_(search_transtab)( NULL, ip, True/*upd_fast_cache*/ );
sewardjb5f6f512005-03-10 23:59:00 +0000602 if (!found) {
603 /* Not found; we need to request a translation. */
njn394213a2005-06-19 18:38:24 +0000604 if (VG_(translate)( tid, ip, /*debug*/False, 0/*not verbose*/, bbs_done )) {
sewardjb5f6f512005-03-10 23:59:00 +0000605 found = VG_(search_transtab)( NULL, ip, True );
njn50ae1a72005-04-08 23:28:23 +0000606 vg_assert2(found, "VG_TRC_INNER_FASTMISS: missing tt_fast entry");
607
sewardjb5f6f512005-03-10 23:59:00 +0000608 } else {
609 // If VG_(translate)() fails, it's because it had to throw a
610 // signal because the client jumped to a bad address. That
611 // means that either a signal has been set up for delivery,
612 // or the thread has been marked for termination. Either
613 // way, we just need to go back into the scheduler loop.
614 }
615 }
616}
617
618static void handle_syscall(ThreadId tid)
619{
620 ThreadState *tst = VG_(get_ThreadState)(tid);
621 Bool jumped;
622
623 /* Syscall may or may not block; either way, it will be
624 complete by the time this call returns, and we'll be
625 runnable again. We could take a signal while the
626 syscall runs. */
sewardj45f4e7c2005-09-27 19:20:21 +0000627
628 if (VG_(clo_sanity_level >= 3))
629 VG_(am_do_sync_check)("(BEFORE SYSCALL)",__FILE__,__LINE__);
630
sewardjb5f6f512005-03-10 23:59:00 +0000631 SCHEDSETJMP(tid, jumped, VG_(client_syscall)(tid));
632
sewardj45f4e7c2005-09-27 19:20:21 +0000633 if (VG_(clo_sanity_level >= 3))
634 VG_(am_do_sync_check)("(AFTER SYSCALL)",__FILE__,__LINE__);
635
sewardjb5f6f512005-03-10 23:59:00 +0000636 if (!VG_(is_running_thread)(tid))
njnc7561b92005-06-19 01:24:32 +0000637 VG_(printf)("tid %d not running; VG_(running_tid)=%d, tid %d status %d\n",
638 tid, VG_(running_tid), tid, tst->status);
sewardjb5f6f512005-03-10 23:59:00 +0000639 vg_assert(VG_(is_running_thread)(tid));
640
641 if (jumped) {
njn9fc31122005-05-11 18:48:33 +0000642 block_signals(tid);
sewardjb5f6f512005-03-10 23:59:00 +0000643 VG_(poll_signals)(tid);
644 }
645}
646
647/*
648 Run a thread until it wants to exit.
649
650 We assume that the caller has already called VG_(set_running) for
651 us, so we own the VCPU. Also, all signals are blocked.
652 */
653VgSchedReturnCode VG_(scheduler) ( ThreadId tid )
654{
sewardje663cb92002-04-12 10:26:32 +0000655 UInt trc;
sewardjb5f6f512005-03-10 23:59:00 +0000656 ThreadState *tst = VG_(get_ThreadState)(tid);
sewardje663cb92002-04-12 10:26:32 +0000657
sewardjc24be7a2005-03-15 01:40:12 +0000658 if (VG_(clo_trace_sched))
659 print_sched_event(tid, "entering VG_(scheduler)");
660
sewardjb5f6f512005-03-10 23:59:00 +0000661 VGP_PUSHCC(VgpSched);
sewardje663cb92002-04-12 10:26:32 +0000662
sewardjb5f6f512005-03-10 23:59:00 +0000663 /* set the proper running signal mask */
njn9fc31122005-05-11 18:48:33 +0000664 block_signals(tid);
sewardjb5f6f512005-03-10 23:59:00 +0000665
666 vg_assert(VG_(is_running_thread)(tid));
sewardje663cb92002-04-12 10:26:32 +0000667
njn14319cc2005-03-13 06:26:22 +0000668 VG_(dispatch_ctr) = SCHEDULING_QUANTUM + 1;
sewardj6072c362002-04-19 14:40:57 +0000669
sewardjb5f6f512005-03-10 23:59:00 +0000670 while(!VG_(is_exiting)(tid)) {
sewardjb5f6f512005-03-10 23:59:00 +0000671 if (VG_(dispatch_ctr) == 1) {
672 /* Our slice is done, so yield the CPU to another thread. This
673 doesn't sleep between sleeping and running, since that would
674 take too much time. */
675 VG_(set_sleeping)(tid, VgTs_Yielding);
676 /* nothing */
677 VG_(set_running)(tid);
sewardje663cb92002-04-12 10:26:32 +0000678
sewardjb5f6f512005-03-10 23:59:00 +0000679 /* OK, do some relatively expensive housekeeping stuff */
680 scheduler_sanity(tid);
681 VG_(sanity_check_general)(False);
sewardje663cb92002-04-12 10:26:32 +0000682
sewardjb5f6f512005-03-10 23:59:00 +0000683 /* Look for any pending signals for this thread, and set them up
684 for delivery */
685 VG_(poll_signals)(tid);
sewardje663cb92002-04-12 10:26:32 +0000686
sewardjb5f6f512005-03-10 23:59:00 +0000687 if (VG_(is_exiting)(tid))
688 break; /* poll_signals picked up a fatal signal */
sewardje663cb92002-04-12 10:26:32 +0000689
sewardjb5f6f512005-03-10 23:59:00 +0000690 /* For stats purposes only. */
691 n_scheduling_events_MAJOR++;
sewardje663cb92002-04-12 10:26:32 +0000692
sewardjb5f6f512005-03-10 23:59:00 +0000693 /* Figure out how many bbs to ask vg_run_innerloop to do. Note
694 that it decrements the counter before testing it for zero, so
695 that if tst->dispatch_ctr is set to N you get at most N-1
696 iterations. Also this means that tst->dispatch_ctr must
697 exceed zero before entering the innerloop. Also also, the
698 decrement is done before the bb is actually run, so you
699 always get at least one decrement even if nothing happens. */
njn14319cc2005-03-13 06:26:22 +0000700 VG_(dispatch_ctr) = SCHEDULING_QUANTUM + 1;
jsgf855d93d2003-10-13 22:26:55 +0000701
sewardjb5f6f512005-03-10 23:59:00 +0000702 /* paranoia ... */
703 vg_assert(tst->tid == tid);
704 vg_assert(tst->os_state.lwpid == VG_(gettid)());
sewardje663cb92002-04-12 10:26:32 +0000705 }
706
sewardjb5f6f512005-03-10 23:59:00 +0000707 /* For stats purposes only. */
708 n_scheduling_events_MINOR++;
sewardje663cb92002-04-12 10:26:32 +0000709
710 if (0)
sewardjb5f6f512005-03-10 23:59:00 +0000711 VG_(message)(Vg_DebugMsg, "thread %d: running for %d bbs",
712 tid, VG_(dispatch_ctr) - 1 );
sewardje663cb92002-04-12 10:26:32 +0000713
sewardjb5f6f512005-03-10 23:59:00 +0000714 trc = run_thread_for_a_while ( tid );
sewardje663cb92002-04-12 10:26:32 +0000715
sewardjb5f6f512005-03-10 23:59:00 +0000716 if (VG_(clo_trace_sched) && VG_(clo_verbosity) > 2) {
717 Char buf[50];
718 VG_(sprintf)(buf, "TRC: %s", name_of_sched_event(trc));
719 print_sched_event(tid, buf);
sewardje663cb92002-04-12 10:26:32 +0000720 }
721
sewardjb5f6f512005-03-10 23:59:00 +0000722 switch(trc) {
723 case VG_TRC_INNER_FASTMISS:
724 vg_assert(VG_(dispatch_ctr) > 1);
725 handle_tt_miss(tid);
726 break;
727
728 case VEX_TRC_JMP_CLIENTREQ:
729 do_client_request(tid);
730 break;
sewardja0fef1b2005-11-03 13:46:30 +0000731
732 case VEX_TRC_JMP_SYS_INT128: /* x86-linux */
733 case VEX_TRC_JMP_SYS_SYSCALL: /* amd64-linux, ppc32-linux */
sewardjb5f6f512005-03-10 23:59:00 +0000734 handle_syscall(tid);
735 if (VG_(clo_sanity_level) > 2)
736 VG_(sanity_check_general)(True); /* sanity-check every syscall */
737 break;
sewardje663cb92002-04-12 10:26:32 +0000738
sewardjb5f6f512005-03-10 23:59:00 +0000739 case VEX_TRC_JMP_YIELD:
740 /* Explicit yield, because this thread is in a spin-lock
sewardj3fc75752005-03-12 15:16:31 +0000741 or something. Only let the thread run for a short while
742 longer. Because swapping to another thread is expensive,
743 we're prepared to let this thread eat a little more CPU
744 before swapping to another. That means that short term
745 spins waiting for hardware to poke memory won't cause a
746 thread swap. */
747 if (VG_(dispatch_ctr) > 100)
748 VG_(dispatch_ctr) = 100;
sewardjb5f6f512005-03-10 23:59:00 +0000749 break;
sewardje663cb92002-04-12 10:26:32 +0000750
sewardjb5f6f512005-03-10 23:59:00 +0000751 case VG_TRC_INNER_COUNTERZERO:
752 /* Timeslice is out. Let a new thread be scheduled. */
753 vg_assert(VG_(dispatch_ctr) == 1);
754 break;
sewardje663cb92002-04-12 10:26:32 +0000755
sewardjb5f6f512005-03-10 23:59:00 +0000756 case VG_TRC_FAULT_SIGNAL:
757 /* Everything should be set up (either we're exiting, or
758 about to start in a signal handler). */
759 break;
sewardj9d1b5d32002-04-17 19:40:49 +0000760
sewardj07bdc5e2005-03-11 13:19:47 +0000761 case VEX_TRC_JMP_MAPFAIL:
762 /* Failure of arch-specific address translation (x86/amd64
763 segment override use) */
764 /* jrs 2005 03 11: is this correct? */
765 VG_(synth_fault)(tid);
766 break;
767
sewardjb5f6f512005-03-10 23:59:00 +0000768 case VEX_TRC_JMP_EMWARN: {
769 static Int counts[EmWarn_NUMBER];
770 static Bool counts_initted = False;
771 VexEmWarn ew;
772 HChar* what;
773 Bool show;
774 Int q;
775 if (!counts_initted) {
776 counts_initted = True;
777 for (q = 0; q < EmWarn_NUMBER; q++)
778 counts[q] = 0;
779 }
780 ew = (VexEmWarn)VG_(threads)[tid].arch.vex.guest_EMWARN;
781 what = (ew < 0 || ew >= EmWarn_NUMBER)
782 ? "unknown (?!)"
783 : LibVEX_EmWarn_string(ew);
784 show = (ew < 0 || ew >= EmWarn_NUMBER)
785 ? True
786 : counts[ew]++ < 3;
sewardjb1131a82005-03-19 15:12:21 +0000787 if (show && VG_(clo_show_emwarns)) {
sewardjb5f6f512005-03-10 23:59:00 +0000788 VG_(message)( Vg_UserMsg,
789 "Emulation warning: unsupported action:");
790 VG_(message)( Vg_UserMsg, " %s", what);
njnd01fef72005-03-25 23:35:48 +0000791 VG_(get_and_pp_StackTrace)( tid, VG_(clo_backtrace_size) );
sewardjb5f6f512005-03-10 23:59:00 +0000792 }
793 break;
794 }
sewardje663cb92002-04-12 10:26:32 +0000795
sewardjb5f6f512005-03-10 23:59:00 +0000796 case VEX_TRC_JMP_NODECODE:
njnd5021362005-09-29 00:35:18 +0000797#define M(a) VG_(message)(Vg_UserMsg, a);
njn7cf66582005-10-15 17:18:08 +0000798 M("Your program just tried to execute an instruction that Valgrind" );
799 M("did not recognise. There are two possible reasons for this." );
800 M("1. Your program has a bug and erroneously jumped to a non-code" );
801 M(" location. If you are running Memcheck and you just saw a" );
802 M(" warning about a bad jump, it's probably your program's fault.");
803 M("2. The instruction is legitimate but Valgrind doesn't handle it,");
804 M(" i.e. it's Valgrind's fault. If you think this is the case or");
805 M(" you are not sure, please let us know." );
806 M("Either way, Valgrind will now raise a SIGILL signal which will" );
807 M("probably kill your program." );
njnd5021362005-09-29 00:35:18 +0000808#undef M
njnf536bbb2005-06-13 04:21:38 +0000809 VG_(synth_sigill)(tid, VG_(get_IP)(tid));
sewardjb5f6f512005-03-10 23:59:00 +0000810 break;
sewardje663cb92002-04-12 10:26:32 +0000811
cerion85665ca2005-06-20 15:51:07 +0000812 case VEX_TRC_JMP_TINVAL:
cerion85665ca2005-06-20 15:51:07 +0000813 VG_(discard_translations)(
814 (Addr64)VG_(threads)[tid].arch.vex.guest_TISTART,
sewardj45f4e7c2005-09-27 19:20:21 +0000815 VG_(threads)[tid].arch.vex.guest_TILEN,
816 "scheduler(VEX_TRC_JMP_TINVAL)"
sewardj487ac702005-06-21 12:52:38 +0000817 );
cerion85665ca2005-06-20 15:51:07 +0000818 if (0)
819 VG_(printf)("dump translations done.\n");
cerion85665ca2005-06-20 15:51:07 +0000820 break;
821
sewardje3a384b2005-07-29 08:51:34 +0000822 case VG_TRC_INVARIANT_FAILED:
823 /* This typically happens if, after running generated code,
824 it is detected that host CPU settings (eg, FPU/Vector
825 control words) are not as they should be. Vex's code
826 generation specifies the state such control words should
827 be in on entry to Vex-generated code, and they should be
828 unchanged on exit from it. Failure of this assertion
829 usually means a bug in Vex's code generation. */
830 vg_assert2(0, "VG_(scheduler), phase 3: "
831 "run_innerloop detected host "
832 "state invariant failure", trc);
833
sewardja0fef1b2005-11-03 13:46:30 +0000834 case VEX_TRC_JMP_SYS_SYSENTER:
sewardj5438a012005-08-07 14:49:27 +0000835 /* Do whatever simulation is appropriate for an x86 sysenter
836 instruction. Note that it is critical to set this thread's
837 guest_EIP to point at the code to execute after the
838 sysenter, since Vex-generated code will not have set it --
839 vex does not know what it should be. Vex sets the next
840 address to zero, so if you don't guest_EIP, the thread will
841 jump to zero afterwards and probably die as a result. */
842# if defined(VGA_x86)
843 //FIXME: VG_(threads)[tid].arch.vex.guest_EIP = ....
844 //handle_sysenter_x86(tid);
845 vg_assert2(0, "VG_(scheduler), phase 3: "
846 "sysenter_x86 on not yet implemented");
847# else
848 vg_assert2(0, "VG_(scheduler), phase 3: "
849 "sysenter_x86 on non-x86 platform?!?!");
850# endif
851
sewardjb5f6f512005-03-10 23:59:00 +0000852 default:
njn50ae1a72005-04-08 23:28:23 +0000853 vg_assert2(0, "VG_(scheduler), phase 3: "
854 "unexpected thread return code (%u)", trc);
sewardjb5f6f512005-03-10 23:59:00 +0000855 /* NOTREACHED */
856 break;
sewardje663cb92002-04-12 10:26:32 +0000857
858 } /* switch (trc) */
nethercote238a3c32004-08-09 13:13:31 +0000859 }
sewardjc24be7a2005-03-15 01:40:12 +0000860
861 if (VG_(clo_trace_sched))
862 print_sched_event(tid, "exiting VG_(scheduler)");
863
sewardjb5f6f512005-03-10 23:59:00 +0000864 vg_assert(VG_(is_exiting)(tid));
thughes513197c2004-06-13 12:07:53 +0000865
sewardjb5f6f512005-03-10 23:59:00 +0000866 VGP_POPCC(VgpSched);
thughes513197c2004-06-13 12:07:53 +0000867
sewardjb5f6f512005-03-10 23:59:00 +0000868 //if (VG_(clo_model_pthreads))
869 // VG_(tm_thread_exit)(tid);
870
871 return tst->exitreason;
sewardj20917d82002-05-28 01:36:45 +0000872}
873
874
sewardjb5f6f512005-03-10 23:59:00 +0000875/*
876 This causes all threads to forceably exit. They aren't actually
877 dead by the time this returns; you need to call
njnaf839f52005-06-23 03:27:57 +0000878 VG_(reap_threads)() to wait for them.
sewardjb5f6f512005-03-10 23:59:00 +0000879 */
880void VG_(nuke_all_threads_except) ( ThreadId me, VgSchedReturnCode src )
sewardjccef2e62002-05-29 19:26:32 +0000881{
882 ThreadId tid;
sewardjb5f6f512005-03-10 23:59:00 +0000883
884 vg_assert(VG_(is_running_thread)(me));
sewardj45f02c42005-02-05 18:27:14 +0000885
sewardjccef2e62002-05-29 19:26:32 +0000886 for (tid = 1; tid < VG_N_THREADS; tid++) {
887 if (tid == me
jsgf855d93d2003-10-13 22:26:55 +0000888 || VG_(threads)[tid].status == VgTs_Empty)
sewardjccef2e62002-05-29 19:26:32 +0000889 continue;
sewardjb5f6f512005-03-10 23:59:00 +0000890 if (0)
sewardjef037c72002-05-30 00:40:03 +0000891 VG_(printf)(
892 "VG_(nuke_all_threads_except): nuking tid %d\n", tid);
sewardjb5f6f512005-03-10 23:59:00 +0000893
894 VG_(threads)[tid].exitreason = src;
sewardja8d8e232005-06-07 20:04:56 +0000895 if (src == VgSrc_FatalSig)
896 VG_(threads)[tid].os_state.fatalsig = VKI_SIGKILL;
sewardjb5f6f512005-03-10 23:59:00 +0000897 VG_(kill_thread)(tid);
sewardjccef2e62002-05-29 19:26:32 +0000898 }
899}
900
901
njnd3040452003-05-19 15:04:06 +0000902/* ---------------------------------------------------------------------
sewardjb5f6f512005-03-10 23:59:00 +0000903 Specifying shadow register values
njnd3040452003-05-19 15:04:06 +0000904 ------------------------------------------------------------------ */
905
njnf536bbb2005-06-13 04:21:38 +0000906#if defined(VGA_x86)
njnaf839f52005-06-23 03:27:57 +0000907# define VG_CLREQ_ARGS guest_EAX
908# define VG_CLREQ_RET guest_EDX
njnf536bbb2005-06-13 04:21:38 +0000909#elif defined(VGA_amd64)
njnaf839f52005-06-23 03:27:57 +0000910# define VG_CLREQ_ARGS guest_RAX
911# define VG_CLREQ_RET guest_RDX
sewardj2c48c7b2005-11-29 13:05:56 +0000912#elif defined(VGA_ppc32) || defined(VGA_ppc64)
njnaf839f52005-06-23 03:27:57 +0000913# define VG_CLREQ_ARGS guest_GPR4
914# define VG_CLREQ_RET guest_GPR3
njnf536bbb2005-06-13 04:21:38 +0000915#else
916# error Unknown arch
917#endif
918
njnaf839f52005-06-23 03:27:57 +0000919#define CLREQ_ARGS(regs) ((regs).vex.VG_CLREQ_ARGS)
920#define CLREQ_RET(regs) ((regs).vex.VG_CLREQ_RET)
921#define O_CLREQ_RET (offsetof(VexGuestArchState, VG_CLREQ_RET))
njnf536bbb2005-06-13 04:21:38 +0000922
njn502badb2005-05-08 02:04:49 +0000923// These macros write a value to a client's thread register, and tell the
924// tool that it's happened (if necessary).
925
926#define SET_CLREQ_RETVAL(zztid, zzval) \
927 do { CLREQ_RET(VG_(threads)[zztid].arch) = (zzval); \
928 VG_TRACK( post_reg_write, \
929 Vg_CoreClientReq, zztid, O_CLREQ_RET, sizeof(UWord)); \
930 } while (0)
931
932#define SET_CLCALL_RETVAL(zztid, zzval, f) \
933 do { CLREQ_RET(VG_(threads)[zztid].arch) = (zzval); \
934 VG_TRACK( post_reg_write_clientcall_return, \
935 zztid, O_CLREQ_RET, sizeof(UWord), f); \
936 } while (0)
937
sewardje663cb92002-04-12 10:26:32 +0000938/* ---------------------------------------------------------------------
sewardj124ca2a2002-06-20 10:19:38 +0000939 Handle client requests.
sewardje663cb92002-04-12 10:26:32 +0000940 ------------------------------------------------------------------ */
941
njn9cb54ac2005-06-12 04:19:17 +0000942// OS-specific(?) client requests
943static Bool os_client_request(ThreadId tid, UWord *args)
944{
945 Bool handled = True;
946
947 vg_assert(VG_(is_running_thread)(tid));
948
949 switch(args[0]) {
950 case VG_USERREQ__LIBC_FREERES_DONE:
951 /* This is equivalent to an exit() syscall, but we don't set the
952 exitcode (since it might already be set) */
953 if (0 || VG_(clo_trace_syscalls) || VG_(clo_trace_sched))
954 VG_(message)(Vg_DebugMsg,
955 "__libc_freeres() done; really quitting!");
956 VG_(threads)[tid].exitreason = VgSrc_ExitSyscall;
957 break;
958
959 default:
960 handled = False;
961 break;
962 }
963
964 return handled;
965}
966
967
sewardj124ca2a2002-06-20 10:19:38 +0000968/* Do a client request for the thread tid. After the request, tid may
969 or may not still be runnable; if not, the scheduler will have to
970 choose a new thread to run.
971*/
sewardje663cb92002-04-12 10:26:32 +0000972static
sewardjb5f6f512005-03-10 23:59:00 +0000973void do_client_request ( ThreadId tid )
sewardje663cb92002-04-12 10:26:32 +0000974{
sewardjb5f6f512005-03-10 23:59:00 +0000975 UWord* arg = (UWord*)(CLREQ_ARGS(VG_(threads)[tid].arch));
nethercoted1b64b22004-11-04 18:22:28 +0000976 UWord req_no = arg[0];
sewardj124ca2a2002-06-20 10:19:38 +0000977
fitzhardinge98abfc72003-12-16 02:05:15 +0000978 if (0)
nethercoted1b64b22004-11-04 18:22:28 +0000979 VG_(printf)("req no = 0x%llx, arg = %p\n", (ULong)req_no, arg);
sewardje663cb92002-04-12 10:26:32 +0000980 switch (req_no) {
981
njn3e884182003-04-15 13:03:23 +0000982 case VG_USERREQ__CLIENT_CALL0: {
njn2ac95242005-03-13 23:07:30 +0000983 UWord (*f)(ThreadId) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +0000984 if (f == NULL)
njn3d9edb22005-08-25 01:52:52 +0000985 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL0: func=%p", f);
fitzhardinge98abfc72003-12-16 02:05:15 +0000986 else
njn2ac95242005-03-13 23:07:30 +0000987 SET_CLCALL_RETVAL(tid, f ( tid ), (Addr)f);
njn3e884182003-04-15 13:03:23 +0000988 break;
989 }
990 case VG_USERREQ__CLIENT_CALL1: {
njn2ac95242005-03-13 23:07:30 +0000991 UWord (*f)(ThreadId, UWord) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +0000992 if (f == NULL)
njn3d9edb22005-08-25 01:52:52 +0000993 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL1: func=%p", f);
fitzhardinge98abfc72003-12-16 02:05:15 +0000994 else
njn2ac95242005-03-13 23:07:30 +0000995 SET_CLCALL_RETVAL(tid, f ( tid, arg[2] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +0000996 break;
997 }
998 case VG_USERREQ__CLIENT_CALL2: {
njn2ac95242005-03-13 23:07:30 +0000999 UWord (*f)(ThreadId, UWord, UWord) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00001000 if (f == NULL)
njn3d9edb22005-08-25 01:52:52 +00001001 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL2: func=%p", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00001002 else
njn2ac95242005-03-13 23:07:30 +00001003 SET_CLCALL_RETVAL(tid, f ( tid, arg[2], arg[3] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00001004 break;
1005 }
1006 case VG_USERREQ__CLIENT_CALL3: {
njn2ac95242005-03-13 23:07:30 +00001007 UWord (*f)(ThreadId, UWord, UWord, UWord) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00001008 if (f == NULL)
njn3d9edb22005-08-25 01:52:52 +00001009 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL3: func=%p", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00001010 else
njn2ac95242005-03-13 23:07:30 +00001011 SET_CLCALL_RETVAL(tid, f ( tid, arg[2], arg[3], arg[4] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00001012 break;
1013 }
1014
njnf09745a2005-05-10 03:01:23 +00001015 // Nb: this looks like a circular definition, because it kind of is.
1016 // See comment in valgrind.h to understand what's going on.
sewardj124ca2a2002-06-20 10:19:38 +00001017 case VG_USERREQ__RUNNING_ON_VALGRIND:
sewardjb5f6f512005-03-10 23:59:00 +00001018 SET_CLREQ_RETVAL(tid, RUNNING_ON_VALGRIND+1);
sewardj124ca2a2002-06-20 10:19:38 +00001019 break;
1020
fitzhardinge39de4b42003-10-31 07:12:21 +00001021 case VG_USERREQ__PRINTF: {
1022 int count =
nethercote3e901a22004-09-11 13:17:02 +00001023 VG_(vmessage)( Vg_ClientMsg, (char *)arg[1], (void*)arg[2] );
fitzhardinge39de4b42003-10-31 07:12:21 +00001024 SET_CLREQ_RETVAL( tid, count );
1025 break; }
1026
1027 case VG_USERREQ__INTERNAL_PRINTF: {
1028 int count =
njnaa3c26b2005-03-12 05:32:28 +00001029 VG_(vmessage)( Vg_DebugMsg, (char *)arg[1], (void*)arg[2] );
fitzhardinge39de4b42003-10-31 07:12:21 +00001030 SET_CLREQ_RETVAL( tid, count );
1031 break; }
1032
1033 case VG_USERREQ__PRINTF_BACKTRACE: {
fitzhardinge39de4b42003-10-31 07:12:21 +00001034 int count =
nethercote3e901a22004-09-11 13:17:02 +00001035 VG_(vmessage)( Vg_ClientMsg, (char *)arg[1], (void*)arg[2] );
njnd01fef72005-03-25 23:35:48 +00001036 VG_(get_and_pp_StackTrace)( tid, VG_(clo_backtrace_size) );
fitzhardinge39de4b42003-10-31 07:12:21 +00001037 SET_CLREQ_RETVAL( tid, count );
1038 break; }
1039
rjwalsh0140af52005-06-04 20:42:33 +00001040 case VG_USERREQ__STACK_REGISTER: {
njn945ed2e2005-06-24 03:28:30 +00001041 UWord sid = VG_(register_stack)((Addr)arg[1], (Addr)arg[2]);
rjwalsh0140af52005-06-04 20:42:33 +00001042 SET_CLREQ_RETVAL( tid, sid );
1043 break; }
1044
1045 case VG_USERREQ__STACK_DEREGISTER: {
njn945ed2e2005-06-24 03:28:30 +00001046 VG_(deregister_stack)(arg[1]);
rjwalsh0140af52005-06-04 20:42:33 +00001047 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
1048 break; }
1049
1050 case VG_USERREQ__STACK_CHANGE: {
njn945ed2e2005-06-24 03:28:30 +00001051 VG_(change_stack)(arg[1], (Addr)arg[2], (Addr)arg[3]);
rjwalsh0140af52005-06-04 20:42:33 +00001052 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
1053 break; }
1054
fitzhardinge98abfc72003-12-16 02:05:15 +00001055 case VG_USERREQ__GET_MALLOCFUNCS: {
1056 struct vg_mallocfunc_info *info = (struct vg_mallocfunc_info *)arg[1];
1057
njnfc51f8d2005-06-21 03:20:17 +00001058 info->tl_malloc = VG_(tdict).tool_malloc;
1059 info->tl_calloc = VG_(tdict).tool_calloc;
1060 info->tl_realloc = VG_(tdict).tool_realloc;
1061 info->tl_memalign = VG_(tdict).tool_memalign;
1062 info->tl___builtin_new = VG_(tdict).tool___builtin_new;
1063 info->tl___builtin_vec_new = VG_(tdict).tool___builtin_vec_new;
1064 info->tl_free = VG_(tdict).tool_free;
1065 info->tl___builtin_delete = VG_(tdict).tool___builtin_delete;
1066 info->tl___builtin_vec_delete = VG_(tdict).tool___builtin_vec_delete;
fitzhardinge98abfc72003-12-16 02:05:15 +00001067
njncf81d552005-03-31 04:52:26 +00001068 info->arena_payload_szB = VG_(arena_payload_szB);
njn088bfb42005-08-17 05:01:37 +00001069 info->mallinfo = VG_(mallinfo);
sewardjb5f6f512005-03-10 23:59:00 +00001070 info->clo_trace_malloc = VG_(clo_trace_malloc);
fitzhardinge98abfc72003-12-16 02:05:15 +00001071
1072 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
1073
1074 break;
1075 }
1076
njn25e49d8e72002-09-23 09:36:25 +00001077 /* Requests from the client program */
1078
1079 case VG_USERREQ__DISCARD_TRANSLATIONS:
1080 if (VG_(clo_verbosity) > 2)
1081 VG_(printf)( "client request: DISCARD_TRANSLATIONS,"
1082 " addr %p, len %d\n",
1083 (void*)arg[1], arg[2] );
1084
sewardj45f4e7c2005-09-27 19:20:21 +00001085 VG_(discard_translations)(
1086 arg[1], arg[2], "scheduler(VG_USERREQ__DISCARD_TRANSLATIONS)"
1087 );
njn25e49d8e72002-09-23 09:36:25 +00001088
njnd3040452003-05-19 15:04:06 +00001089 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
njn25e49d8e72002-09-23 09:36:25 +00001090 break;
1091
njn47363ab2003-04-21 13:24:40 +00001092 case VG_USERREQ__COUNT_ERRORS:
nethercotef2b11482004-08-02 12:36:01 +00001093 SET_CLREQ_RETVAL( tid, VG_(get_n_errs_found)() );
njn47363ab2003-04-21 13:24:40 +00001094 break;
1095
sewardje663cb92002-04-12 10:26:32 +00001096 default:
njn9cb54ac2005-06-12 04:19:17 +00001097 if (os_client_request(tid, arg)) {
1098 // do nothing, os_client_request() handled it
sewardjb5f6f512005-03-10 23:59:00 +00001099 } else if (VG_(needs).client_requests) {
nethercoted1b64b22004-11-04 18:22:28 +00001100 UWord ret;
sewardj34042512002-10-22 04:14:35 +00001101
njn25e49d8e72002-09-23 09:36:25 +00001102 if (VG_(clo_verbosity) > 2)
fitzhardinge98abfc72003-12-16 02:05:15 +00001103 VG_(printf)("client request: code %x, addr %p, len %d\n",
njn25e49d8e72002-09-23 09:36:25 +00001104 arg[0], (void*)arg[1], arg[2] );
1105
njn51d827b2005-05-09 01:02:08 +00001106 if ( VG_TDICT_CALL(tool_handle_client_request, tid, arg, &ret) )
sewardjb5f6f512005-03-10 23:59:00 +00001107 SET_CLREQ_RETVAL(tid, ret);
njn25e49d8e72002-09-23 09:36:25 +00001108 } else {
sewardj34042512002-10-22 04:14:35 +00001109 static Bool whined = False;
1110
sewardjb5f6f512005-03-10 23:59:00 +00001111 if (!whined && VG_(clo_verbosity) > 2) {
nethercote7cc9c232004-01-21 15:08:04 +00001112 // Allow for requests in core, but defined by tools, which
njnd7994182003-10-02 13:44:04 +00001113 // have 0 and 0 in their two high bytes.
1114 Char c1 = (arg[0] >> 24) & 0xff;
1115 Char c2 = (arg[0] >> 16) & 0xff;
1116 if (c1 == 0) c1 = '_';
1117 if (c2 == 0) c2 = '_';
sewardj34042512002-10-22 04:14:35 +00001118 VG_(message)(Vg_UserMsg, "Warning:\n"
njnd7994182003-10-02 13:44:04 +00001119 " unhandled client request: 0x%x (%c%c+0x%x). Perhaps\n"
sewardj45f4e7c2005-09-27 19:20:21 +00001120 " VG_(needs).client_requests should be set?",
njnd7994182003-10-02 13:44:04 +00001121 arg[0], c1, c2, arg[0] & 0xffff);
sewardj34042512002-10-22 04:14:35 +00001122 whined = True;
1123 }
njn25e49d8e72002-09-23 09:36:25 +00001124 }
sewardje663cb92002-04-12 10:26:32 +00001125 break;
1126 }
1127}
1128
1129
sewardj6072c362002-04-19 14:40:57 +00001130/* ---------------------------------------------------------------------
njn6676d5b2005-06-19 18:49:19 +00001131 Sanity checking (permanently engaged)
sewardj6072c362002-04-19 14:40:57 +00001132 ------------------------------------------------------------------ */
1133
sewardjb5f6f512005-03-10 23:59:00 +00001134/* Internal consistency checks on the sched structures. */
sewardj6072c362002-04-19 14:40:57 +00001135static
sewardjb5f6f512005-03-10 23:59:00 +00001136void scheduler_sanity ( ThreadId tid )
sewardj6072c362002-04-19 14:40:57 +00001137{
sewardjb5f6f512005-03-10 23:59:00 +00001138 Bool bad = False;
jsgf855d93d2003-10-13 22:26:55 +00001139
sewardjb5f6f512005-03-10 23:59:00 +00001140 if (!VG_(is_running_thread)(tid)) {
1141 VG_(message)(Vg_DebugMsg,
1142 "Thread %d is supposed to be running, but doesn't own run_sema (owned by %d)\n",
njnc7561b92005-06-19 01:24:32 +00001143 tid, VG_(running_tid));
sewardjb5f6f512005-03-10 23:59:00 +00001144 bad = True;
jsgf855d93d2003-10-13 22:26:55 +00001145 }
sewardj5f07b662002-04-23 16:52:51 +00001146
sewardjb5f6f512005-03-10 23:59:00 +00001147 if (VG_(gettid)() != VG_(threads)[tid].os_state.lwpid) {
1148 VG_(message)(Vg_DebugMsg,
njnd06ed472005-03-13 05:12:31 +00001149 "Thread %d supposed to be in LWP %d, but we're actually %d\n",
1150 tid, VG_(threads)[tid].os_state.lwpid, VG_(gettid)());
sewardjb5f6f512005-03-10 23:59:00 +00001151 bad = True;
sewardj5f07b662002-04-23 16:52:51 +00001152 }
sewardj6072c362002-04-19 14:40:57 +00001153}
1154
njn6676d5b2005-06-19 18:49:19 +00001155void VG_(sanity_check_general) ( Bool force_expensive )
1156{
1157 ThreadId tid;
1158
1159 VGP_PUSHCC(VgpCoreCheapSanity);
1160
1161 if (VG_(clo_sanity_level) < 1) return;
1162
1163 /* --- First do all the tests that we can do quickly. ---*/
1164
1165 sanity_fast_count++;
1166
1167 /* Check stuff pertaining to the memory check system. */
1168
1169 /* Check that nobody has spuriously claimed that the first or
1170 last 16 pages of memory have become accessible [...] */
1171 if (VG_(needs).sanity_checks) {
1172 VGP_PUSHCC(VgpToolCheapSanity);
1173 vg_assert(VG_TDICT_CALL(tool_cheap_sanity_check));
1174 VGP_POPCC(VgpToolCheapSanity);
1175 }
1176
1177 /* --- Now some more expensive checks. ---*/
1178
1179 /* Once every 25 times, check some more expensive stuff. */
1180 if ( force_expensive
1181 || VG_(clo_sanity_level) > 1
1182 || (VG_(clo_sanity_level) == 1 && (sanity_fast_count % 25) == 0)) {
1183
1184 VGP_PUSHCC(VgpCoreExpensiveSanity);
1185 sanity_slow_count++;
1186
njn6676d5b2005-06-19 18:49:19 +00001187 if (VG_(needs).sanity_checks) {
1188 VGP_PUSHCC(VgpToolExpensiveSanity);
1189 vg_assert(VG_TDICT_CALL(tool_expensive_sanity_check));
1190 VGP_POPCC(VgpToolExpensiveSanity);
1191 }
1192
njn6676d5b2005-06-19 18:49:19 +00001193 /* Look for stack overruns. Visit all threads. */
njnd666ea72005-06-26 17:26:22 +00001194 for (tid = 1; tid < VG_N_THREADS; tid++) {
sewardj45f4e7c2005-09-27 19:20:21 +00001195 SizeT remains;
1196 VgStack* stack;
njn6676d5b2005-06-19 18:49:19 +00001197
1198 if (VG_(threads)[tid].status == VgTs_Empty ||
1199 VG_(threads)[tid].status == VgTs_Zombie)
1200 continue;
1201
sewardj45f4e7c2005-09-27 19:20:21 +00001202 stack
1203 = (VgStack*)
1204 VG_(get_ThreadState)(tid)->os_state.valgrind_stack_base;
1205 remains
1206 = VG_(am_get_VgStack_unused_szB)(stack);
njn6676d5b2005-06-19 18:49:19 +00001207 if (remains < VKI_PAGE_SIZE)
1208 VG_(message)(Vg_DebugMsg,
1209 "WARNING: Thread %d is within %d bytes "
1210 "of running out of stack!",
1211 tid, remains);
1212 }
1213
njn6676d5b2005-06-19 18:49:19 +00001214 VGP_POPCC(VgpCoreExpensiveSanity);
1215 }
1216
1217 if (VG_(clo_sanity_level) > 1) {
1218 VGP_PUSHCC(VgpCoreExpensiveSanity);
1219 /* Check sanity of the low-level memory manager. Note that bugs
1220 in the client's code can cause this to fail, so we don't do
1221 this check unless specially asked for. And because it's
1222 potentially very expensive. */
1223 VG_(sanity_check_malloc_all)();
1224 VGP_POPCC(VgpCoreExpensiveSanity);
1225 }
1226 VGP_POPCC(VgpCoreCheapSanity);
1227}
sewardj6072c362002-04-19 14:40:57 +00001228
sewardje663cb92002-04-12 10:26:32 +00001229/*--------------------------------------------------------------------*/
njn278b3d62005-05-30 23:20:51 +00001230/*--- end ---*/
sewardje663cb92002-04-12 10:26:32 +00001231/*--------------------------------------------------------------------*/