blob: 248dc35951a3f766d3728de21fabd069bddcb26a [file] [log] [blame]
sewardje663cb92002-04-12 10:26:32 +00001
2/*--------------------------------------------------------------------*/
njn278b3d62005-05-30 23:20:51 +00003/*--- Thread scheduling. scheduler.c ---*/
sewardje663cb92002-04-12 10:26:32 +00004/*--------------------------------------------------------------------*/
5
6/*
njnc0ae7052005-08-25 22:55:19 +00007 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
sewardje663cb92002-04-12 10:26:32 +00009
sewardj03f8d3f2012-08-05 15:46:46 +000010 Copyright (C) 2000-2012 Julian Seward
sewardje663cb92002-04-12 10:26:32 +000011 jseward@acm.org
sewardje663cb92002-04-12 10:26:32 +000012
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
njn25e49d8e72002-09-23 09:36:25 +000028 The GNU General Public License is contained in the file COPYING.
sewardje663cb92002-04-12 10:26:32 +000029*/
30
sewardjb5f6f512005-03-10 23:59:00 +000031/*
32 Overview
33
34 Valgrind tries to emulate the kernel's threading as closely as
35 possible. The client does all threading via the normal syscalls
36 (on Linux: clone, etc). Valgrind emulates this by creating exactly
37 the same process structure as would be created without Valgrind.
38 There are no extra threads.
39
40 The main difference is that Valgrind only allows one client thread
sewardjad0a3a82006-12-17 18:58:55 +000041 to run at once. This is controlled with the CPU Big Lock,
42 "the_BigLock". Any time a thread wants to run client code or
sewardjb5f6f512005-03-10 23:59:00 +000043 manipulate any shared state (which is anything other than its own
sewardjad0a3a82006-12-17 18:58:55 +000044 ThreadState entry), it must hold the_BigLock.
sewardjb5f6f512005-03-10 23:59:00 +000045
46 When a thread is about to block in a blocking syscall, it releases
sewardjad0a3a82006-12-17 18:58:55 +000047 the_BigLock, and re-takes it when it becomes runnable again (either
sewardjb5f6f512005-03-10 23:59:00 +000048 because the syscall finished, or we took a signal).
49
50 VG_(scheduler) therefore runs in each thread. It returns only when
51 the thread is exiting, either because it exited itself, or it was
52 told to exit by another thread.
53
54 This file is almost entirely OS-independent. The details of how
55 the OS handles threading and signalling are abstracted away and
njn12771092005-06-18 02:18:04 +000056 implemented elsewhere. [Some of the functions have worked their
57 way back for the moment, until we do an OS port in earnest...]
sewardj291849f2012-04-20 23:58:55 +000058*/
59
sewardjb5f6f512005-03-10 23:59:00 +000060
njnc7561b92005-06-19 01:24:32 +000061#include "pub_core_basics.h"
sewardjf9d2f9b2006-11-17 20:00:57 +000062#include "pub_core_debuglog.h"
sewardj4cfea4f2006-10-14 19:26:10 +000063#include "pub_core_vki.h"
sewardjf54342a2006-10-17 01:51:24 +000064#include "pub_core_vkiscnums.h" // __NR_sched_yield
sewardj6c591e12011-04-11 16:17:51 +000065#include "pub_core_libcsetjmp.h" // to keep _threadstate.h happy
njnc7561b92005-06-19 01:24:32 +000066#include "pub_core_threadstate.h"
njn04e16982005-05-31 00:23:43 +000067#include "pub_core_aspacemgr.h"
njn93fe3b22005-12-21 20:22:52 +000068#include "pub_core_clreq.h" // for VG_USERREQ__*
njn36b66df2005-05-12 05:13:04 +000069#include "pub_core_dispatch.h"
njnf4c50162005-06-20 14:18:12 +000070#include "pub_core_errormgr.h" // For VG_(get_n_errs_found)()
sewardj3b290482011-05-06 21:02:55 +000071#include "pub_core_gdbserver.h" // for VG_(gdbserver) and VG_(gdbserver_activity)
njn97405b22005-06-02 03:39:33 +000072#include "pub_core_libcbase.h"
njn132bfcc2005-06-04 19:16:06 +000073#include "pub_core_libcassert.h"
njn36a20fa2005-06-03 03:08:39 +000074#include "pub_core_libcprint.h"
njnf39e9a32005-06-12 02:43:17 +000075#include "pub_core_libcproc.h"
njnde62cbf2005-06-10 22:08:14 +000076#include "pub_core_libcsignal.h"
njnf76d27a2009-05-28 01:53:07 +000077#if defined(VGO_darwin)
78#include "pub_core_mach.h"
79#endif
njnf536bbb2005-06-13 04:21:38 +000080#include "pub_core_machine.h"
njnaf1d7df2005-06-11 01:31:52 +000081#include "pub_core_mallocfree.h"
njn20242342005-05-16 23:31:24 +000082#include "pub_core_options.h"
njn717cde52005-05-10 02:47:21 +000083#include "pub_core_replacemalloc.h"
sewardj17c5e2e2012-12-28 09:12:14 +000084#include "pub_core_sbprofile.h"
njn0c246472005-05-31 01:00:08 +000085#include "pub_core_signals.h"
njn945ed2e2005-06-24 03:28:30 +000086#include "pub_core_stacks.h"
njnf4c50162005-06-20 14:18:12 +000087#include "pub_core_stacktrace.h" // For VG_(get_and_pp_StackTrace)()
njn9abd6082005-06-17 21:31:45 +000088#include "pub_core_syscall.h"
njnc1b01812005-06-17 22:19:06 +000089#include "pub_core_syswrap.h"
njn43b9a8a2005-05-10 04:37:01 +000090#include "pub_core_tooliface.h"
njnf4c50162005-06-20 14:18:12 +000091#include "pub_core_translate.h" // For VG_(translate)()
njn8bddf582005-05-13 23:40:55 +000092#include "pub_core_transtab.h"
sewardjc8259b82009-04-22 22:42:10 +000093#include "pub_core_debuginfo.h" // VG_(di_notify_pdb_debuginfo)
bart78bfc712011-12-08 16:14:59 +000094#include "priv_sched-lock.h"
sewardjf54342a2006-10-17 01:51:24 +000095#include "pub_core_scheduler.h" // self
tomd2645142009-10-29 09:27:11 +000096#include "pub_core_redir.h"
florian639e1f82012-09-30 20:30:40 +000097#include "libvex_emnote.h" // VexEmNote
sewardje663cb92002-04-12 10:26:32 +000098
sewardj63fed7f2006-01-17 02:02:47 +000099
sewardje663cb92002-04-12 10:26:32 +0000100/* ---------------------------------------------------------------------
101 Types and globals for the scheduler.
102 ------------------------------------------------------------------ */
103
njnc7561b92005-06-19 01:24:32 +0000104/* ThreadId and ThreadState are defined elsewhere*/
sewardje663cb92002-04-12 10:26:32 +0000105
njn14319cc2005-03-13 06:26:22 +0000106/* Defines the thread-scheduling timeslice, in terms of the number of
107 basic blocks we attempt to run each thread for. Smaller values
108 give finer interleaving but much increased scheduling overheads. */
sewardjea3a99f2006-05-07 14:37:03 +0000109#define SCHEDULING_QUANTUM 100000
njn14319cc2005-03-13 06:26:22 +0000110
sewardj0ec07f32006-01-12 12:32:32 +0000111/* If False, a fault is Valgrind-internal (ie, a bug) */
112Bool VG_(in_generated_code) = False;
njn25e49d8e72002-09-23 09:36:25 +0000113
njn394213a2005-06-19 18:38:24 +0000114/* 64-bit counter for the number of basic blocks done. */
115static ULong bbs_done = 0;
116
sewardj3b290482011-05-06 21:02:55 +0000117/* Counter to see if vgdb activity is to be verified.
118 When nr of bbs done reaches vgdb_next_poll, scheduler will
119 poll for gdbserver activity. VG_(force_vgdb_poll) and
120 VG_(disable_vgdb_poll) allows the valgrind core (e.g. m_gdbserver)
121 to control when the next poll will be done. */
122static ULong vgdb_next_poll;
123
sewardje663cb92002-04-12 10:26:32 +0000124/* Forwards */
sewardjb5f6f512005-03-10 23:59:00 +0000125static void do_client_request ( ThreadId tid );
126static void scheduler_sanity ( ThreadId tid );
127static void mostly_clear_thread_record ( ThreadId tid );
sewardjd140e442002-05-29 01:21:19 +0000128
nethercote844e7122004-08-02 15:27:22 +0000129/* Stats. */
njn0fd92f42005-10-06 03:32:42 +0000130static ULong n_scheduling_events_MINOR = 0;
131static ULong n_scheduling_events_MAJOR = 0;
nethercote844e7122004-08-02 15:27:22 +0000132
sewardjbba6f312012-04-21 23:05:57 +0000133/* Stats: number of XIndirs, and number that missed in the fast
134 cache. */
135static ULong stats__n_xindirs = 0;
136static ULong stats__n_xindir_misses = 0;
137
138/* And 32-bit temp bins for the above, so that 32-bit platforms don't
139 have to do 64 bit incs on the hot path through
140 VG_(cp_disp_xindir). */
141/*global*/ UInt VG_(stats__n_xindirs_32) = 0;
142/*global*/ UInt VG_(stats__n_xindir_misses_32) = 0;
sewardj291849f2012-04-20 23:58:55 +0000143
njn6676d5b2005-06-19 18:49:19 +0000144/* Sanity checking counts. */
145static UInt sanity_fast_count = 0;
146static UInt sanity_slow_count = 0;
sewardjb5f6f512005-03-10 23:59:00 +0000147
nethercote844e7122004-08-02 15:27:22 +0000148void VG_(print_scheduler_stats)(void)
149{
150 VG_(message)(Vg_DebugMsg,
sewardj291849f2012-04-20 23:58:55 +0000151 "scheduler: %'llu event checks.\n", bbs_done );
152 VG_(message)(Vg_DebugMsg,
153 "scheduler: %'llu indir transfers, %'llu misses (1 in %llu)\n",
sewardjbba6f312012-04-21 23:05:57 +0000154 stats__n_xindirs, stats__n_xindir_misses,
155 stats__n_xindirs / (stats__n_xindir_misses
156 ? stats__n_xindir_misses : 1));
njn394213a2005-06-19 18:38:24 +0000157 VG_(message)(Vg_DebugMsg,
sewardj738856f2009-07-15 14:48:32 +0000158 "scheduler: %'llu/%'llu major/minor sched events.\n",
nethercote844e7122004-08-02 15:27:22 +0000159 n_scheduling_events_MAJOR, n_scheduling_events_MINOR);
njn6676d5b2005-06-19 18:49:19 +0000160 VG_(message)(Vg_DebugMsg,
sewardj738856f2009-07-15 14:48:32 +0000161 " sanity: %d cheap, %d expensive checks.\n",
njn6676d5b2005-06-19 18:49:19 +0000162 sanity_fast_count, sanity_slow_count );
nethercote844e7122004-08-02 15:27:22 +0000163}
164
bart78bfc712011-12-08 16:14:59 +0000165/*
166 * Mutual exclusion object used to serialize threads.
167 */
168static struct sched_lock *the_BigLock;
sewardjb5f6f512005-03-10 23:59:00 +0000169
170
sewardje663cb92002-04-12 10:26:32 +0000171/* ---------------------------------------------------------------------
172 Helper functions for the scheduler.
173 ------------------------------------------------------------------ */
174
sewardje663cb92002-04-12 10:26:32 +0000175static
floriandbb35842012-10-27 18:39:11 +0000176void print_sched_event ( ThreadId tid, const HChar* what )
sewardje663cb92002-04-12 10:26:32 +0000177{
sewardj738856f2009-07-15 14:48:32 +0000178 VG_(message)(Vg_DebugMsg, " SCHED[%d]: %s\n", tid, what );
sewardj8937c812002-04-12 20:12:20 +0000179}
180
sewardj17c5e2e2012-12-28 09:12:14 +0000181/* For showing SB profiles, if the user asks to see them. */
sewardjb0473e92011-06-07 22:54:32 +0000182static
sewardj17c5e2e2012-12-28 09:12:14 +0000183void maybe_show_sb_profile ( void )
sewardjb0473e92011-06-07 22:54:32 +0000184{
sewardj17c5e2e2012-12-28 09:12:14 +0000185 /* DO NOT MAKE NON-STATIC */
186 static ULong bbs_done_lastcheck = 0;
187 /* */
188 vg_assert(VG_(clo_profyle_interval) > 0);
189 Long delta = (Long)(bbs_done - bbs_done_lastcheck);
sewardjb0473e92011-06-07 22:54:32 +0000190 vg_assert(delta >= 0);
sewardj17c5e2e2012-12-28 09:12:14 +0000191 if ((ULong)delta >= VG_(clo_profyle_interval)) {
sewardjb0473e92011-06-07 22:54:32 +0000192 bbs_done_lastcheck = bbs_done;
sewardj17c5e2e2012-12-28 09:12:14 +0000193 VG_(get_and_show_SB_profile)(bbs_done);
sewardjb0473e92011-06-07 22:54:32 +0000194 }
195}
196
sewardj8937c812002-04-12 20:12:20 +0000197static
floriancd19e992012-11-03 19:32:28 +0000198const HChar* name_of_sched_event ( UInt event )
sewardje663cb92002-04-12 10:26:32 +0000199{
200 switch (event) {
philippe6d6ddbc2012-05-17 14:31:13 +0000201 case VEX_TRC_JMP_TINVAL: return "TINVAL";
202 case VEX_TRC_JMP_NOREDIR: return "NOREDIR";
203 case VEX_TRC_JMP_SIGTRAP: return "SIGTRAP";
204 case VEX_TRC_JMP_SIGSEGV: return "SIGSEGV";
205 case VEX_TRC_JMP_SIGBUS: return "SIGBUS";
petarj80e5c172012-10-19 14:45:17 +0000206 case VEX_TRC_JMP_SIGFPE_INTOVF:
207 case VEX_TRC_JMP_SIGFPE_INTDIV: return "SIGFPE";
philippe6d6ddbc2012-05-17 14:31:13 +0000208 case VEX_TRC_JMP_EMWARN: return "EMWARN";
209 case VEX_TRC_JMP_EMFAIL: return "EMFAIL";
210 case VEX_TRC_JMP_CLIENTREQ: return "CLIENTREQ";
211 case VEX_TRC_JMP_YIELD: return "YIELD";
212 case VEX_TRC_JMP_NODECODE: return "NODECODE";
213 case VEX_TRC_JMP_MAPFAIL: return "MAPFAIL";
214 case VEX_TRC_JMP_SYS_SYSCALL: return "SYSCALL";
215 case VEX_TRC_JMP_SYS_INT32: return "INT32";
216 case VEX_TRC_JMP_SYS_INT128: return "INT128";
217 case VEX_TRC_JMP_SYS_INT129: return "INT129";
218 case VEX_TRC_JMP_SYS_INT130: return "INT130";
219 case VEX_TRC_JMP_SYS_SYSENTER: return "SYSENTER";
220 case VEX_TRC_JMP_BORING: return "VEX_BORING";
221
222 case VG_TRC_BORING: return "VG_BORING";
223 case VG_TRC_INNER_FASTMISS: return "FASTMISS";
224 case VG_TRC_INNER_COUNTERZERO: return "COUNTERZERO";
225 case VG_TRC_FAULT_SIGNAL: return "FAULTSIGNAL";
226 case VG_TRC_INVARIANT_FAILED: return "INVFAILED";
227 case VG_TRC_CHAIN_ME_TO_SLOW_EP: return "CHAIN_ME_SLOW";
228 case VG_TRC_CHAIN_ME_TO_FAST_EP: return "CHAIN_ME_FAST";
229 default: return "??UNKNOWN??";
sewardje663cb92002-04-12 10:26:32 +0000230 }
231}
232
sewardje663cb92002-04-12 10:26:32 +0000233/* Allocate a completely empty ThreadState record. */
sewardjb5f6f512005-03-10 23:59:00 +0000234ThreadId VG_(alloc_ThreadState) ( void )
sewardje663cb92002-04-12 10:26:32 +0000235{
236 Int i;
sewardj6072c362002-04-19 14:40:57 +0000237 for (i = 1; i < VG_N_THREADS; i++) {
sewardjb5f6f512005-03-10 23:59:00 +0000238 if (VG_(threads)[i].status == VgTs_Empty) {
239 VG_(threads)[i].status = VgTs_Init;
240 VG_(threads)[i].exitreason = VgSrc_None;
sewardje663cb92002-04-12 10:26:32 +0000241 return i;
sewardjb5f6f512005-03-10 23:59:00 +0000242 }
sewardje663cb92002-04-12 10:26:32 +0000243 }
244 VG_(printf)("vg_alloc_ThreadState: no free slots available\n");
245 VG_(printf)("Increase VG_N_THREADS, rebuild and try again.\n");
njne427a662002-10-02 11:08:25 +0000246 VG_(core_panic)("VG_N_THREADS is too low");
sewardje663cb92002-04-12 10:26:32 +0000247 /*NOTREACHED*/
248}
249
sewardjb5f6f512005-03-10 23:59:00 +0000250/*
sewardjad0a3a82006-12-17 18:58:55 +0000251 Mark a thread as Runnable. This will block until the_BigLock is
sewardjb5f6f512005-03-10 23:59:00 +0000252 available, so that we get exclusive access to all the shared
sewardjad0a3a82006-12-17 18:58:55 +0000253 structures and the CPU. Up until we get the_BigLock, we must not
sewardjb5f6f512005-03-10 23:59:00 +0000254 touch any shared state.
255
256 When this returns, we'll actually be running.
257 */
floriandbb35842012-10-27 18:39:11 +0000258void VG_(acquire_BigLock)(ThreadId tid, const HChar* who)
sewardjb5f6f512005-03-10 23:59:00 +0000259{
sewardjf54342a2006-10-17 01:51:24 +0000260 ThreadState *tst;
261
262#if 0
263 if (VG_(clo_trace_sched)) {
264 HChar buf[100];
265 vg_assert(VG_(strlen)(who) <= 100-50);
266 VG_(sprintf)(buf, "waiting for lock (%s)", who);
267 print_sched_event(tid, buf);
268 }
269#endif
270
sewardjad0a3a82006-12-17 18:58:55 +0000271 /* First, acquire the_BigLock. We can't do anything else safely
272 prior to this point. Even doing debug printing prior to this
273 point is, technically, wrong. */
bart78bfc712011-12-08 16:14:59 +0000274 VG_(acquire_BigLock_LL)(NULL);
sewardjf54342a2006-10-17 01:51:24 +0000275
276 tst = VG_(get_ThreadState)(tid);
sewardjb5f6f512005-03-10 23:59:00 +0000277
278 vg_assert(tst->status != VgTs_Runnable);
279
280 tst->status = VgTs_Runnable;
sewardjf54342a2006-10-17 01:51:24 +0000281
njnc7561b92005-06-19 01:24:32 +0000282 if (VG_(running_tid) != VG_INVALID_THREADID)
283 VG_(printf)("tid %d found %d running\n", tid, VG_(running_tid));
284 vg_assert(VG_(running_tid) == VG_INVALID_THREADID);
285 VG_(running_tid) = tid;
sewardjb5f6f512005-03-10 23:59:00 +0000286
sewardj7cf4e6b2008-05-01 20:24:26 +0000287 { Addr gsp = VG_(get_SP)(tid);
philipped5fb89d2013-01-13 13:59:17 +0000288 if (NULL != VG_(tdict).track_new_mem_stack_w_ECU)
289 VG_(unknown_SP_update_w_ECU)(gsp, gsp, 0/*unknown origin*/);
290 else
291 VG_(unknown_SP_update)(gsp, gsp);
sewardj7cf4e6b2008-05-01 20:24:26 +0000292 }
tome0008d62005-11-10 15:02:42 +0000293
sewardjf54342a2006-10-17 01:51:24 +0000294 if (VG_(clo_trace_sched)) {
295 HChar buf[150];
296 vg_assert(VG_(strlen)(who) <= 150-50);
297 VG_(sprintf)(buf, " acquired lock (%s)", who);
298 print_sched_event(tid, buf);
299 }
sewardjb5f6f512005-03-10 23:59:00 +0000300}
301
sewardjb5f6f512005-03-10 23:59:00 +0000302/*
303 Set a thread into a sleeping state, and give up exclusive access to
304 the CPU. On return, the thread must be prepared to block until it
305 is ready to run again (generally this means blocking in a syscall,
306 but it may mean that we remain in a Runnable state and we're just
307 yielding the CPU to another thread).
308 */
floriandbb35842012-10-27 18:39:11 +0000309void VG_(release_BigLock)(ThreadId tid, ThreadStatus sleepstate,
310 const HChar* who)
sewardjb5f6f512005-03-10 23:59:00 +0000311{
312 ThreadState *tst = VG_(get_ThreadState)(tid);
313
314 vg_assert(tst->status == VgTs_Runnable);
315
316 vg_assert(sleepstate == VgTs_WaitSys ||
317 sleepstate == VgTs_Yielding);
318
319 tst->status = sleepstate;
320
njnc7561b92005-06-19 01:24:32 +0000321 vg_assert(VG_(running_tid) == tid);
322 VG_(running_tid) = VG_INVALID_THREADID;
sewardjb5f6f512005-03-10 23:59:00 +0000323
sewardjf54342a2006-10-17 01:51:24 +0000324 if (VG_(clo_trace_sched)) {
floriandbb35842012-10-27 18:39:11 +0000325 HChar buf[200];
sewardjf54342a2006-10-17 01:51:24 +0000326 vg_assert(VG_(strlen)(who) <= 200-100);
327 VG_(sprintf)(buf, "releasing lock (%s) -> %s",
328 who, VG_(name_of_ThreadStatus)(sleepstate));
329 print_sched_event(tid, buf);
330 }
331
sewardjad0a3a82006-12-17 18:58:55 +0000332 /* Release the_BigLock; this will reschedule any runnable
sewardjb5f6f512005-03-10 23:59:00 +0000333 thread. */
bart78bfc712011-12-08 16:14:59 +0000334 VG_(release_BigLock_LL)(NULL);
335}
336
337static void init_BigLock(void)
338{
339 vg_assert(!the_BigLock);
340 the_BigLock = ML_(create_sched_lock)();
341}
342
343static void deinit_BigLock(void)
344{
345 ML_(destroy_sched_lock)(the_BigLock);
346 the_BigLock = NULL;
nethercote75d26242004-08-01 22:59:18 +0000347}
348
njnf76d27a2009-05-28 01:53:07 +0000349/* See pub_core_scheduler.h for description */
floriandbb35842012-10-27 18:39:11 +0000350void VG_(acquire_BigLock_LL) ( const HChar* who )
njnf76d27a2009-05-28 01:53:07 +0000351{
bart78bfc712011-12-08 16:14:59 +0000352 ML_(acquire_sched_lock)(the_BigLock);
njnf76d27a2009-05-28 01:53:07 +0000353}
354
355/* See pub_core_scheduler.h for description */
floriandbb35842012-10-27 18:39:11 +0000356void VG_(release_BigLock_LL) ( const HChar* who )
njnf76d27a2009-05-28 01:53:07 +0000357{
bart78bfc712011-12-08 16:14:59 +0000358 ML_(release_sched_lock)(the_BigLock);
njnf76d27a2009-05-28 01:53:07 +0000359}
360
bart9a2b80d2012-03-25 17:51:59 +0000361Bool VG_(owns_BigLock_LL) ( ThreadId tid )
362{
363 return (ML_(get_sched_lock_owner)(the_BigLock)
364 == VG_(threads)[tid].os_state.lwpid);
365}
366
njnf76d27a2009-05-28 01:53:07 +0000367
sewardjb5f6f512005-03-10 23:59:00 +0000368/* Clear out the ThreadState and release the semaphore. Leaves the
369 ThreadState in VgTs_Zombie state, so that it doesn't get
370 reallocated until the caller is really ready. */
371void VG_(exit_thread)(ThreadId tid)
372{
373 vg_assert(VG_(is_valid_tid)(tid));
374 vg_assert(VG_(is_running_thread)(tid));
375 vg_assert(VG_(is_exiting)(tid));
376
sewardjb5f6f512005-03-10 23:59:00 +0000377 mostly_clear_thread_record(tid);
njnc7561b92005-06-19 01:24:32 +0000378 VG_(running_tid) = VG_INVALID_THREADID;
sewardjb5f6f512005-03-10 23:59:00 +0000379
380 /* There should still be a valid exitreason for this thread */
381 vg_assert(VG_(threads)[tid].exitreason != VgSrc_None);
382
sewardjf54342a2006-10-17 01:51:24 +0000383 if (VG_(clo_trace_sched))
384 print_sched_event(tid, "release lock in VG_(exit_thread)");
385
bart78bfc712011-12-08 16:14:59 +0000386 VG_(release_BigLock_LL)(NULL);
sewardjb5f6f512005-03-10 23:59:00 +0000387}
388
sewardjf54342a2006-10-17 01:51:24 +0000389/* If 'tid' is blocked in a syscall, send it SIGVGKILL so as to get it
390 out of the syscall and onto doing the next thing, whatever that is.
391 If it isn't blocked in a syscall, has no effect on the thread. */
392void VG_(get_thread_out_of_syscall)(ThreadId tid)
sewardjb5f6f512005-03-10 23:59:00 +0000393{
394 vg_assert(VG_(is_valid_tid)(tid));
395 vg_assert(!VG_(is_running_thread)(tid));
sewardjb5f6f512005-03-10 23:59:00 +0000396
397 if (VG_(threads)[tid].status == VgTs_WaitSys) {
njnf76d27a2009-05-28 01:53:07 +0000398 if (VG_(clo_trace_signals)) {
sewardjf54342a2006-10-17 01:51:24 +0000399 VG_(message)(Vg_DebugMsg,
sewardj738856f2009-07-15 14:48:32 +0000400 "get_thread_out_of_syscall zaps tid %d lwp %d\n",
sewardjb5f6f512005-03-10 23:59:00 +0000401 tid, VG_(threads)[tid].os_state.lwpid);
njnf76d27a2009-05-28 01:53:07 +0000402 }
403# if defined(VGO_darwin)
404 {
405 // GrP fixme use mach primitives on darwin?
406 // GrP fixme thread_abort_safely?
407 // GrP fixme race for thread with WaitSys set but not in syscall yet?
408 extern kern_return_t thread_abort(mach_port_t);
409 thread_abort(VG_(threads)[tid].os_state.lwpid);
410 }
411# else
412 {
413 __attribute__((unused))
414 Int r = VG_(tkill)(VG_(threads)[tid].os_state.lwpid, VG_SIGVGKILL);
415 /* JRS 2009-Mar-20: should we assert for r==0 (tkill succeeded)?
416 I'm really not sure. Here's a race scenario which argues
417 that we shoudn't; but equally I'm not sure the scenario is
418 even possible, because of constraints caused by the question
419 of who holds the BigLock when.
420
421 Target thread tid does sys_read on a socket and blocks. This
422 function gets called, and we observe correctly that tid's
423 status is WaitSys but then for whatever reason this function
424 goes very slowly for a while. Then data arrives from
425 wherever, tid's sys_read returns, tid exits. Then we do
426 tkill on tid, but tid no longer exists; tkill returns an
427 error code and the assert fails. */
428 /* vg_assert(r == 0); */
429 }
430# endif
sewardjb5f6f512005-03-10 23:59:00 +0000431 }
432}
433
434/*
435 Yield the CPU for a short time to let some other thread run.
436 */
437void VG_(vg_yield)(void)
438{
njnc7561b92005-06-19 01:24:32 +0000439 ThreadId tid = VG_(running_tid);
sewardjb5f6f512005-03-10 23:59:00 +0000440
441 vg_assert(tid != VG_INVALID_THREADID);
442 vg_assert(VG_(threads)[tid].os_state.lwpid == VG_(gettid)());
443
sewardjad0a3a82006-12-17 18:58:55 +0000444 VG_(release_BigLock)(tid, VgTs_Yielding, "VG_(vg_yield)");
sewardjb5f6f512005-03-10 23:59:00 +0000445
446 /*
447 Tell the kernel we're yielding.
448 */
sewardjf54342a2006-10-17 01:51:24 +0000449 VG_(do_syscall0)(__NR_sched_yield);
sewardjb5f6f512005-03-10 23:59:00 +0000450
sewardjad0a3a82006-12-17 18:58:55 +0000451 VG_(acquire_BigLock)(tid, "VG_(vg_yield)");
sewardjb5f6f512005-03-10 23:59:00 +0000452}
453
454
sewardj0ec07f32006-01-12 12:32:32 +0000455/* Set the standard set of blocked signals, used whenever we're not
njn9fc31122005-05-11 18:48:33 +0000456 running a client syscall. */
njn1dcee092009-02-24 03:07:37 +0000457static void block_signals(void)
njn9fc31122005-05-11 18:48:33 +0000458{
459 vki_sigset_t mask;
460
461 VG_(sigfillset)(&mask);
462
463 /* Don't block these because they're synchronous */
464 VG_(sigdelset)(&mask, VKI_SIGSEGV);
465 VG_(sigdelset)(&mask, VKI_SIGBUS);
466 VG_(sigdelset)(&mask, VKI_SIGFPE);
467 VG_(sigdelset)(&mask, VKI_SIGILL);
468 VG_(sigdelset)(&mask, VKI_SIGTRAP);
469
470 /* Can't block these anyway */
471 VG_(sigdelset)(&mask, VKI_SIGSTOP);
472 VG_(sigdelset)(&mask, VKI_SIGKILL);
473
njn9fc31122005-05-11 18:48:33 +0000474 VG_(sigprocmask)(VKI_SIG_SETMASK, &mask, NULL);
475}
476
njn8aa35852005-06-10 22:59:56 +0000477static void os_state_clear(ThreadState *tst)
478{
sewardj45f4e7c2005-09-27 19:20:21 +0000479 tst->os_state.lwpid = 0;
njn8aa35852005-06-10 22:59:56 +0000480 tst->os_state.threadgroup = 0;
njnf76d27a2009-05-28 01:53:07 +0000481# if defined(VGO_linux)
482 /* no other fields to clear */
njnf76d27a2009-05-28 01:53:07 +0000483# elif defined(VGO_darwin)
484 tst->os_state.post_mach_trap_fn = NULL;
485 tst->os_state.pthread = 0;
486 tst->os_state.func_arg = 0;
487 VG_(memset)(&tst->os_state.child_go, 0, sizeof(tst->os_state.child_go));
488 VG_(memset)(&tst->os_state.child_done, 0, sizeof(tst->os_state.child_done));
489 tst->os_state.wq_jmpbuf_valid = False;
490 tst->os_state.remote_port = 0;
491 tst->os_state.msgh_id = 0;
492 VG_(memset)(&tst->os_state.mach_args, 0, sizeof(tst->os_state.mach_args));
493# else
494# error "Unknown OS"
sewardjf54342a2006-10-17 01:51:24 +0000495# endif
njn8aa35852005-06-10 22:59:56 +0000496}
497
498static void os_state_init(ThreadState *tst)
499{
sewardj45f4e7c2005-09-27 19:20:21 +0000500 tst->os_state.valgrind_stack_base = 0;
501 tst->os_state.valgrind_stack_init_SP = 0;
njn8aa35852005-06-10 22:59:56 +0000502 os_state_clear(tst);
503}
504
sewardj20917d82002-05-28 01:36:45 +0000505static
506void mostly_clear_thread_record ( ThreadId tid )
507{
sewardjb5f6f512005-03-10 23:59:00 +0000508 vki_sigset_t savedmask;
509
sewardj20917d82002-05-28 01:36:45 +0000510 vg_assert(tid >= 0 && tid < VG_N_THREADS);
njnaf839f52005-06-23 03:27:57 +0000511 VG_(cleanup_thread)(&VG_(threads)[tid].arch);
sewardjb5f6f512005-03-10 23:59:00 +0000512 VG_(threads)[tid].tid = tid;
513
514 /* Leave the thread in Zombie, so that it doesn't get reallocated
515 until the caller is finally done with the thread stack. */
516 VG_(threads)[tid].status = VgTs_Zombie;
517
nethercote73b526f2004-10-31 18:48:21 +0000518 VG_(sigemptyset)(&VG_(threads)[tid].sig_mask);
sewardjb5f6f512005-03-10 23:59:00 +0000519 VG_(sigemptyset)(&VG_(threads)[tid].tmp_sig_mask);
jsgf855d93d2003-10-13 22:26:55 +0000520
njn8aa35852005-06-10 22:59:56 +0000521 os_state_clear(&VG_(threads)[tid]);
fitzhardinge28428592004-03-16 22:07:12 +0000522
523 /* start with no altstack */
524 VG_(threads)[tid].altstack.ss_sp = (void *)0xdeadbeef;
525 VG_(threads)[tid].altstack.ss_size = 0;
526 VG_(threads)[tid].altstack.ss_flags = VKI_SS_DISABLE;
sewardjb5f6f512005-03-10 23:59:00 +0000527
njn444eba12005-05-12 03:47:31 +0000528 VG_(clear_out_queued_signals)(tid, &savedmask);
sewardjb5f6f512005-03-10 23:59:00 +0000529
530 VG_(threads)[tid].sched_jmpbuf_valid = False;
sewardj20917d82002-05-28 01:36:45 +0000531}
532
njn3f8c4372005-03-13 04:43:10 +0000533/*
sewardj0ec07f32006-01-12 12:32:32 +0000534 Called in the child after fork. If the parent has multiple
535 threads, then we've inherited a VG_(threads) array describing them,
536 but only the thread which called fork() is actually alive in the
537 child. This functions needs to clean up all those other thread
538 structures.
njn3f8c4372005-03-13 04:43:10 +0000539
540 Whichever tid in the parent which called fork() becomes the
541 master_tid in the child. That's because the only living slot in
542 VG_(threads) in the child after fork is VG_(threads)[tid], and it
543 would be too hard to try to re-number the thread and relocate the
544 thread state down to VG_(threads)[1].
545
sewardjad0a3a82006-12-17 18:58:55 +0000546 This function also needs to reinitialize the_BigLock, since
547 otherwise we may end up sharing its state with the parent, which
548 would be deeply confusing.
njn3f8c4372005-03-13 04:43:10 +0000549*/
sewardjb5f6f512005-03-10 23:59:00 +0000550static void sched_fork_cleanup(ThreadId me)
551{
552 ThreadId tid;
njnc7561b92005-06-19 01:24:32 +0000553 vg_assert(VG_(running_tid) == me);
sewardjb5f6f512005-03-10 23:59:00 +0000554
njnf76d27a2009-05-28 01:53:07 +0000555# if defined(VGO_darwin)
556 // GrP fixme hack reset Mach ports
557 VG_(mach_init)();
558# endif
559
sewardjb5f6f512005-03-10 23:59:00 +0000560 VG_(threads)[me].os_state.lwpid = VG_(gettid)();
561 VG_(threads)[me].os_state.threadgroup = VG_(getpid)();
562
563 /* clear out all the unused thread slots */
564 for (tid = 1; tid < VG_N_THREADS; tid++) {
njn3f8c4372005-03-13 04:43:10 +0000565 if (tid != me) {
566 mostly_clear_thread_record(tid);
sewardjb5f6f512005-03-10 23:59:00 +0000567 VG_(threads)[tid].status = VgTs_Empty;
sewardja8d8e232005-06-07 20:04:56 +0000568 VG_(clear_syscallInfo)(tid);
njn3f8c4372005-03-13 04:43:10 +0000569 }
sewardjb5f6f512005-03-10 23:59:00 +0000570 }
571
572 /* re-init and take the sema */
bart78bfc712011-12-08 16:14:59 +0000573 deinit_BigLock();
574 init_BigLock();
575 VG_(acquire_BigLock_LL)(NULL);
sewardjb5f6f512005-03-10 23:59:00 +0000576}
sewardj20917d82002-05-28 01:36:45 +0000577
jsgf855d93d2003-10-13 22:26:55 +0000578
sewardjde764e82007-11-09 23:13:22 +0000579/* First phase of initialisation of the scheduler. Initialise the
580 bigLock, zeroise the VG_(threads) structure and decide on the
581 ThreadId of the root thread.
sewardje663cb92002-04-12 10:26:32 +0000582*/
sewardjde764e82007-11-09 23:13:22 +0000583ThreadId VG_(scheduler_init_phase1) ( void )
sewardje663cb92002-04-12 10:26:32 +0000584{
thughesc37184f2004-09-11 14:16:57 +0000585 Int i;
sewardje663cb92002-04-12 10:26:32 +0000586 ThreadId tid_main;
587
sewardjde764e82007-11-09 23:13:22 +0000588 VG_(debugLog)(1,"sched","sched_init_phase1\n");
sewardj45f4e7c2005-09-27 19:20:21 +0000589
bart78bfc712011-12-08 16:14:59 +0000590 if (VG_(clo_fair_sched) != disable_fair_sched
591 && !ML_(set_sched_lock_impl)(sched_lock_ticket)
592 && VG_(clo_fair_sched) == enable_fair_sched)
593 {
594 VG_(printf)("Error: fair scheduling is not supported on this system.\n");
595 VG_(exit)(1);
596 }
597
598 if (VG_(clo_verbosity) > 1) {
599 VG_(message)(Vg_DebugMsg,
600 "Scheduler: using %s scheduler lock implementation.\n",
601 ML_(get_sched_lock_name)());
602 }
603
604 init_BigLock();
sewardjb5f6f512005-03-10 23:59:00 +0000605
sewardj6072c362002-04-19 14:40:57 +0000606 for (i = 0 /* NB; not 1 */; i < VG_N_THREADS; i++) {
sewardjc793fd32005-05-31 17:24:49 +0000607 /* Paranoia .. completely zero it out. */
608 VG_(memset)( & VG_(threads)[i], 0, sizeof( VG_(threads)[i] ) );
609
610 VG_(threads)[i].sig_queue = NULL;
sewardjb5f6f512005-03-10 23:59:00 +0000611
njn8aa35852005-06-10 22:59:56 +0000612 os_state_init(&VG_(threads)[i]);
sewardj20917d82002-05-28 01:36:45 +0000613 mostly_clear_thread_record(i);
sewardjb5f6f512005-03-10 23:59:00 +0000614
njn50ba34e2005-04-04 02:41:42 +0000615 VG_(threads)[i].status = VgTs_Empty;
616 VG_(threads)[i].client_stack_szB = 0;
617 VG_(threads)[i].client_stack_highest_word = (Addr)NULL;
sewardjdc873c02011-07-24 16:02:33 +0000618 VG_(threads)[i].err_disablement_level = 0;
sewardje663cb92002-04-12 10:26:32 +0000619 }
620
sewardjb5f6f512005-03-10 23:59:00 +0000621 tid_main = VG_(alloc_ThreadState)();
sewardjde764e82007-11-09 23:13:22 +0000622
sewardj95d86c02007-12-18 01:49:23 +0000623 /* Bleh. Unfortunately there are various places in the system that
624 assume that the main thread has a ThreadId of 1.
625 - Helgrind (possibly)
626 - stack overflow message in default_action() in m_signals.c
627 - definitely a lot more places
628 */
629 vg_assert(tid_main == 1);
630
sewardjde764e82007-11-09 23:13:22 +0000631 return tid_main;
632}
633
634
635/* Second phase of initialisation of the scheduler. Given the root
636 ThreadId computed by first phase of initialisation, fill in stack
637 details and acquire bigLock. Initialise the scheduler. This is
638 called at startup. The caller subsequently initialises the guest
639 state components of this main thread.
640*/
641void VG_(scheduler_init_phase2) ( ThreadId tid_main,
642 Addr clstack_end,
643 SizeT clstack_size )
644{
645 VG_(debugLog)(1,"sched","sched_init_phase2: tid_main=%d, "
646 "cls_end=0x%lx, cls_sz=%ld\n",
647 tid_main, clstack_end, clstack_size);
648
649 vg_assert(VG_IS_PAGE_ALIGNED(clstack_end+1));
650 vg_assert(VG_IS_PAGE_ALIGNED(clstack_size));
sewardj5f07b662002-04-23 16:52:51 +0000651
njn50ba34e2005-04-04 02:41:42 +0000652 VG_(threads)[tid_main].client_stack_highest_word
sewardj45f4e7c2005-09-27 19:20:21 +0000653 = clstack_end + 1 - sizeof(UWord);
654 VG_(threads)[tid_main].client_stack_szB
655 = clstack_size;
sewardjbf290b92002-05-01 02:28:01 +0000656
njne9ba34a2008-10-13 04:19:15 +0000657 VG_(atfork)(NULL, NULL, sched_fork_cleanup);
sewardje663cb92002-04-12 10:26:32 +0000658}
659
660
sewardje663cb92002-04-12 10:26:32 +0000661/* ---------------------------------------------------------------------
sewardj0ec07f32006-01-12 12:32:32 +0000662 Helpers for running translations.
663 ------------------------------------------------------------------ */
664
665/* Use gcc's built-in setjmp/longjmp. longjmp must not restore signal
sewardjadbb4912011-09-29 17:34:17 +0000666 mask state, but does need to pass "val" through. jumped must be a
667 volatile UWord. */
sewardj0ec07f32006-01-12 12:32:32 +0000668#define SCHEDSETJMP(tid, jumped, stmt) \
669 do { \
670 ThreadState * volatile _qq_tst = VG_(get_ThreadState)(tid); \
671 \
sewardj6c591e12011-04-11 16:17:51 +0000672 (jumped) = VG_MINIMAL_SETJMP(_qq_tst->sched_jmpbuf); \
sewardjadbb4912011-09-29 17:34:17 +0000673 if ((jumped) == ((UWord)0)) { \
sewardj0ec07f32006-01-12 12:32:32 +0000674 vg_assert(!_qq_tst->sched_jmpbuf_valid); \
675 _qq_tst->sched_jmpbuf_valid = True; \
676 stmt; \
677 } else if (VG_(clo_trace_sched)) \
sewardjadbb4912011-09-29 17:34:17 +0000678 VG_(printf)("SCHEDSETJMP(line %d) tid %d, jumped=%ld\n", \
sewardj0ec07f32006-01-12 12:32:32 +0000679 __LINE__, tid, jumped); \
680 vg_assert(_qq_tst->sched_jmpbuf_valid); \
681 _qq_tst->sched_jmpbuf_valid = False; \
682 } while(0)
683
684
685/* Do various guest state alignment checks prior to running a thread.
686 Specifically, check that what we have matches Vex's guest state
sewardj7cf4e6b2008-05-01 20:24:26 +0000687 layout requirements. See libvex.h for details, but in short the
688 requirements are: There must be no holes in between the primary
689 guest state, its two copies, and the spill area. In short, all 4
690 areas must have a 16-aligned size and be 16-aligned, and placed
691 back-to-back. */
692static void do_pre_run_checks ( ThreadState* tst )
sewardj0ec07f32006-01-12 12:32:32 +0000693{
sewardj7cf4e6b2008-05-01 20:24:26 +0000694 Addr a_vex = (Addr) & tst->arch.vex;
695 Addr a_vexsh1 = (Addr) & tst->arch.vex_shadow1;
696 Addr a_vexsh2 = (Addr) & tst->arch.vex_shadow2;
697 Addr a_spill = (Addr) & tst->arch.vex_spill;
698 UInt sz_vex = (UInt) sizeof tst->arch.vex;
699 UInt sz_vexsh1 = (UInt) sizeof tst->arch.vex_shadow1;
700 UInt sz_vexsh2 = (UInt) sizeof tst->arch.vex_shadow2;
701 UInt sz_spill = (UInt) sizeof tst->arch.vex_spill;
sewardj0ec07f32006-01-12 12:32:32 +0000702
703 if (0)
sewardj7cf4e6b2008-05-01 20:24:26 +0000704 VG_(printf)("gst %p %d, sh1 %p %d, "
705 "sh2 %p %d, spill %p %d\n",
706 (void*)a_vex, sz_vex,
707 (void*)a_vexsh1, sz_vexsh1,
708 (void*)a_vexsh2, sz_vexsh2,
sewardj0ec07f32006-01-12 12:32:32 +0000709 (void*)a_spill, sz_spill );
710
sewardj02e97e92012-08-02 22:08:53 +0000711 vg_assert(VG_IS_16_ALIGNED(sz_vex));
712 vg_assert(VG_IS_16_ALIGNED(sz_vexsh1));
713 vg_assert(VG_IS_16_ALIGNED(sz_vexsh2));
714 vg_assert(VG_IS_16_ALIGNED(sz_spill));
sewardj0ec07f32006-01-12 12:32:32 +0000715
sewardj02e97e92012-08-02 22:08:53 +0000716 vg_assert(VG_IS_16_ALIGNED(a_vex));
717 vg_assert(VG_IS_16_ALIGNED(a_vexsh1));
718 vg_assert(VG_IS_16_ALIGNED(a_vexsh2));
719 vg_assert(VG_IS_16_ALIGNED(a_spill));
sewardj0ec07f32006-01-12 12:32:32 +0000720
sewardj7cf4e6b2008-05-01 20:24:26 +0000721 /* Check that the guest state and its two shadows have the same
722 size, and that there are no holes in between. The latter is
723 important because Memcheck assumes that it can reliably access
724 the shadows by indexing off a pointer to the start of the
725 primary guest state area. */
726 vg_assert(sz_vex == sz_vexsh1);
727 vg_assert(sz_vex == sz_vexsh2);
728 vg_assert(a_vex + 1 * sz_vex == a_vexsh1);
729 vg_assert(a_vex + 2 * sz_vex == a_vexsh2);
730 /* Also check there's no hole between the second shadow area and
731 the spill area. */
sewardj0ec07f32006-01-12 12:32:32 +0000732 vg_assert(sz_spill == LibVEX_N_SPILL_BYTES);
sewardj7cf4e6b2008-05-01 20:24:26 +0000733 vg_assert(a_vex + 3 * sz_vex == a_spill);
sewardj0ec07f32006-01-12 12:32:32 +0000734
sewardj291849f2012-04-20 23:58:55 +0000735# if defined(VGA_x86)
736 /* x86 XMM regs must form an array, ie, have no holes in
737 between. */
738 vg_assert(
739 (offsetof(VexGuestX86State,guest_XMM7)
740 - offsetof(VexGuestX86State,guest_XMM0))
741 == (8/*#regs*/-1) * 16/*bytes per reg*/
742 );
743 vg_assert(VG_IS_16_ALIGNED(offsetof(VexGuestX86State,guest_XMM0)));
744 vg_assert(VG_IS_8_ALIGNED(offsetof(VexGuestX86State,guest_FPREG)));
745 vg_assert(8 == offsetof(VexGuestX86State,guest_EAX));
746 vg_assert(VG_IS_4_ALIGNED(offsetof(VexGuestX86State,guest_EAX)));
747 vg_assert(VG_IS_4_ALIGNED(offsetof(VexGuestX86State,guest_EIP)));
748# endif
749
sewardj565dc132010-08-06 08:01:47 +0000750# if defined(VGA_amd64)
sewardj45fa9f42012-05-21 10:18:10 +0000751 /* amd64 YMM regs must form an array, ie, have no holes in
sewardj291849f2012-04-20 23:58:55 +0000752 between. */
sewardj565dc132010-08-06 08:01:47 +0000753 vg_assert(
sewardj45fa9f42012-05-21 10:18:10 +0000754 (offsetof(VexGuestAMD64State,guest_YMM16)
755 - offsetof(VexGuestAMD64State,guest_YMM0))
756 == (17/*#regs*/-1) * 32/*bytes per reg*/
sewardj565dc132010-08-06 08:01:47 +0000757 );
sewardj02e97e92012-08-02 22:08:53 +0000758 vg_assert(VG_IS_16_ALIGNED(offsetof(VexGuestAMD64State,guest_YMM0)));
sewardj291849f2012-04-20 23:58:55 +0000759 vg_assert(VG_IS_8_ALIGNED(offsetof(VexGuestAMD64State,guest_FPREG)));
760 vg_assert(16 == offsetof(VexGuestAMD64State,guest_RAX));
761 vg_assert(VG_IS_8_ALIGNED(offsetof(VexGuestAMD64State,guest_RAX)));
762 vg_assert(VG_IS_8_ALIGNED(offsetof(VexGuestAMD64State,guest_RIP)));
sewardj565dc132010-08-06 08:01:47 +0000763# endif
764
sewardj0ec07f32006-01-12 12:32:32 +0000765# if defined(VGA_ppc32) || defined(VGA_ppc64)
766 /* ppc guest_state vector regs must be 16 byte aligned for
sewardj7cf4e6b2008-05-01 20:24:26 +0000767 loads/stores. This is important! */
sewardjf34eb492011-04-15 11:57:05 +0000768 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex.guest_VSR0));
769 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow1.guest_VSR0));
770 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow2.guest_VSR0));
sewardj7cf4e6b2008-05-01 20:24:26 +0000771 /* be extra paranoid .. */
sewardjf34eb492011-04-15 11:57:05 +0000772 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex.guest_VSR1));
773 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow1.guest_VSR1));
774 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow2.guest_VSR1));
sewardj565dc132010-08-06 08:01:47 +0000775# endif
sewardj59570ff2010-01-01 11:59:33 +0000776
777# if defined(VGA_arm)
778 /* arm guest_state VFP regs must be 8 byte aligned for
sewardj291849f2012-04-20 23:58:55 +0000779 loads/stores. Let's use 16 just to be on the safe side. */
780 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex.guest_D0));
781 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow1.guest_D0));
782 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow2.guest_D0));
sewardj59570ff2010-01-01 11:59:33 +0000783 /* be extra paranoid .. */
784 vg_assert(VG_IS_8_ALIGNED(& tst->arch.vex.guest_D1));
785 vg_assert(VG_IS_8_ALIGNED(& tst->arch.vex_shadow1.guest_D1));
786 vg_assert(VG_IS_8_ALIGNED(& tst->arch.vex_shadow2.guest_D1));
787# endif
sewardjb5b87402011-03-07 16:05:35 +0000788
789# if defined(VGA_s390x)
790 /* no special requirements */
791# endif
sewardj5db15402012-06-07 09:13:21 +0000792
793# if defined(VGA_mips32)
794 /* no special requirements */
795# endif
sewardj0ec07f32006-01-12 12:32:32 +0000796}
797
sewardj3b290482011-05-06 21:02:55 +0000798// NO_VGDB_POLL value ensures vgdb is not polled, while
799// VGDB_POLL_ASAP ensures that the next scheduler call
800// will cause a poll.
801#define NO_VGDB_POLL 0xffffffffffffffffULL
802#define VGDB_POLL_ASAP 0x0ULL
803
804void VG_(disable_vgdb_poll) (void )
805{
806 vgdb_next_poll = NO_VGDB_POLL;
807}
808void VG_(force_vgdb_poll) ( void )
809{
810 vgdb_next_poll = VGDB_POLL_ASAP;
811}
sewardj0ec07f32006-01-12 12:32:32 +0000812
813/* Run the thread tid for a while, and return a VG_TRC_* value
sewardj291849f2012-04-20 23:58:55 +0000814 indicating why VG_(disp_run_translations) stopped, and possibly an
815 auxiliary word. Also, only allow the thread to run for at most
816 *dispatchCtrP events. If (as is the normal case) use_alt_host_addr
817 is False, we are running ordinary redir'd translations, and we
818 should therefore start by looking up the guest next IP in TT. If
819 it is True then we ignore the guest next IP and just run from
820 alt_host_addr, which presumably points at host code for a no-redir
821 translation.
822
823 Return results are placed in two_words. two_words[0] is set to the
824 TRC. In the case where that is VG_TRC_CHAIN_ME_TO_{SLOW,FAST}_EP,
825 the address to patch is placed in two_words[1].
826*/
827static
828void run_thread_for_a_while ( /*OUT*/HWord* two_words,
829 /*MOD*/Int* dispatchCtrP,
830 ThreadId tid,
831 HWord alt_host_addr,
832 Bool use_alt_host_addr )
sewardj0ec07f32006-01-12 12:32:32 +0000833{
sewardj291849f2012-04-20 23:58:55 +0000834 volatile HWord jumped = 0;
835 volatile ThreadState* tst = NULL; /* stop gcc complaining */
836 volatile Int done_this_time = 0;
837 volatile HWord host_code_addr = 0;
sewardj0ec07f32006-01-12 12:32:32 +0000838
839 /* Paranoia */
840 vg_assert(VG_(is_valid_tid)(tid));
841 vg_assert(VG_(is_running_thread)(tid));
842 vg_assert(!VG_(is_exiting)(tid));
sewardj291849f2012-04-20 23:58:55 +0000843 vg_assert(*dispatchCtrP > 0);
sewardj0ec07f32006-01-12 12:32:32 +0000844
845 tst = VG_(get_ThreadState)(tid);
sewardj7cf4e6b2008-05-01 20:24:26 +0000846 do_pre_run_checks( (ThreadState*)tst );
sewardj0ec07f32006-01-12 12:32:32 +0000847 /* end Paranoia */
848
sewardjbba6f312012-04-21 23:05:57 +0000849 /* Futz with the XIndir stats counters. */
850 vg_assert(VG_(stats__n_xindirs_32) == 0);
851 vg_assert(VG_(stats__n_xindir_misses_32) == 0);
852
sewardj291849f2012-04-20 23:58:55 +0000853 /* Clear return area. */
854 two_words[0] = two_words[1] = 0;
855
856 /* Figure out where we're starting from. */
857 if (use_alt_host_addr) {
858 /* unusual case -- no-redir translation */
859 host_code_addr = alt_host_addr;
860 } else {
861 /* normal case -- redir translation */
862 UInt cno = (UInt)VG_TT_FAST_HASH((Addr)tst->arch.vex.VG_INSTR_PTR);
863 if (LIKELY(VG_(tt_fast)[cno].guest == (Addr)tst->arch.vex.VG_INSTR_PTR))
864 host_code_addr = VG_(tt_fast)[cno].host;
865 else {
866 AddrH res = 0;
867 /* not found in VG_(tt_fast). Searching here the transtab
868 improves the performance compared to returning directly
869 to the scheduler. */
870 Bool found = VG_(search_transtab)(&res, NULL, NULL,
871 (Addr)tst->arch.vex.VG_INSTR_PTR,
872 True/*upd cache*/
873 );
874 if (LIKELY(found)) {
875 host_code_addr = res;
876 } else {
877 /* At this point, we know that we intended to start at a
878 normal redir translation, but it was not found. In
879 which case we can return now claiming it's not
880 findable. */
881 two_words[0] = VG_TRC_INNER_FASTMISS; /* hmm, is that right? */
882 return;
883 }
884 }
885 }
886 /* We have either a no-redir or a redir translation. */
887 vg_assert(host_code_addr != 0); /* implausible */
888
sewardj0ec07f32006-01-12 12:32:32 +0000889 /* there should be no undealt-with signals */
890 //vg_assert(VG_(threads)[tid].siginfo.si_signo == 0);
891
sewardj291849f2012-04-20 23:58:55 +0000892 /* Set up event counter stuff for the run. */
893 tst->arch.vex.host_EvC_COUNTER = *dispatchCtrP;
894 tst->arch.vex.host_EvC_FAILADDR
895 = (HWord)VG_(fnptr_to_fnentry)( &VG_(disp_cp_evcheck_fail) );
896
sewardjf54342a2006-10-17 01:51:24 +0000897 if (0) {
898 vki_sigset_t m;
899 Int i, err = VG_(sigprocmask)(VKI_SIG_SETMASK, NULL, &m);
900 vg_assert(err == 0);
901 VG_(printf)("tid %d: entering code with unblocked signals: ", tid);
902 for (i = 1; i <= _VKI_NSIG; i++)
903 if (!VG_(sigismember)(&m, i))
904 VG_(printf)("%d ", i);
905 VG_(printf)("\n");
906 }
907
sewardj291849f2012-04-20 23:58:55 +0000908 /* Set up return-value area. */
909
sewardj97561812006-12-23 01:21:12 +0000910 // Tell the tool this thread is about to run client code
njn3e32c872006-12-24 07:51:17 +0000911 VG_TRACK( start_client_code, tid, bbs_done );
sewardj97561812006-12-23 01:21:12 +0000912
sewardj0ec07f32006-01-12 12:32:32 +0000913 vg_assert(VG_(in_generated_code) == False);
914 VG_(in_generated_code) = True;
915
916 SCHEDSETJMP(
917 tid,
918 jumped,
sewardj291849f2012-04-20 23:58:55 +0000919 VG_(disp_run_translations)(
920 two_words,
921 (void*)&tst->arch.vex,
922 host_code_addr
923 )
sewardj0ec07f32006-01-12 12:32:32 +0000924 );
925
sewardjde764e82007-11-09 23:13:22 +0000926 vg_assert(VG_(in_generated_code) == True);
sewardj0ec07f32006-01-12 12:32:32 +0000927 VG_(in_generated_code) = False;
928
sewardj291849f2012-04-20 23:58:55 +0000929 if (jumped != (HWord)0) {
sewardj0ec07f32006-01-12 12:32:32 +0000930 /* We get here if the client took a fault that caused our signal
931 handler to longjmp. */
sewardj291849f2012-04-20 23:58:55 +0000932 vg_assert(two_words[0] == 0 && two_words[1] == 0); // correct?
933 two_words[0] = VG_TRC_FAULT_SIGNAL;
934 two_words[1] = 0;
njn1dcee092009-02-24 03:07:37 +0000935 block_signals();
sewardj0ec07f32006-01-12 12:32:32 +0000936 }
937
sewardjbba6f312012-04-21 23:05:57 +0000938 /* Merge the 32-bit XIndir/miss counters into the 64 bit versions,
939 and zero out the 32-bit ones in preparation for the next run of
940 generated code. */
941 stats__n_xindirs += (ULong)VG_(stats__n_xindirs_32);
942 VG_(stats__n_xindirs_32) = 0;
943 stats__n_xindir_misses += (ULong)VG_(stats__n_xindir_misses_32);
944 VG_(stats__n_xindir_misses_32) = 0;
945
946 /* Inspect the event counter. */
sewardj291849f2012-04-20 23:58:55 +0000947 vg_assert((Int)tst->arch.vex.host_EvC_COUNTER >= -1);
948 vg_assert(tst->arch.vex.host_EvC_FAILADDR
949 == (HWord)VG_(fnptr_to_fnentry)( &VG_(disp_cp_evcheck_fail)) );
950
951 done_this_time = *dispatchCtrP - ((Int)tst->arch.vex.host_EvC_COUNTER + 1);
sewardj0ec07f32006-01-12 12:32:32 +0000952
953 vg_assert(done_this_time >= 0);
954 bbs_done += (ULong)done_this_time;
955
sewardj291849f2012-04-20 23:58:55 +0000956 *dispatchCtrP -= done_this_time;
957 vg_assert(*dispatchCtrP >= 0);
958
sewardj97561812006-12-23 01:21:12 +0000959 // Tell the tool this thread has stopped running client code
njn3e32c872006-12-24 07:51:17 +0000960 VG_TRACK( stop_client_code, tid, bbs_done );
sewardj97561812006-12-23 01:21:12 +0000961
sewardj3b290482011-05-06 21:02:55 +0000962 if (bbs_done >= vgdb_next_poll) {
963 if (VG_(clo_vgdb_poll))
964 vgdb_next_poll = bbs_done + (ULong)VG_(clo_vgdb_poll);
965 else
966 /* value was changed due to gdbserver invocation via ptrace */
967 vgdb_next_poll = NO_VGDB_POLL;
968 if (VG_(gdbserver_activity) (tid))
969 VG_(gdbserver) (tid);
970 }
971
sewardj291849f2012-04-20 23:58:55 +0000972 /* TRC value and possible auxiliary patch-address word are already
973 in two_words[0] and [1] respectively, as a result of the call to
974 VG_(run_innerloop). */
975 /* Stay sane .. */
976 if (two_words[0] == VG_TRC_CHAIN_ME_TO_SLOW_EP
977 || two_words[0] == VG_TRC_CHAIN_ME_TO_FAST_EP) {
978 vg_assert(two_words[1] != 0); /* we have a legit patch addr */
sewardj0ec07f32006-01-12 12:32:32 +0000979 } else {
sewardj291849f2012-04-20 23:58:55 +0000980 vg_assert(two_words[1] == 0); /* nobody messed with it */
sewardj0ec07f32006-01-12 12:32:32 +0000981 }
982}
983
sewardj0ec07f32006-01-12 12:32:32 +0000984
985/* ---------------------------------------------------------------------
sewardje663cb92002-04-12 10:26:32 +0000986 The scheduler proper.
987 ------------------------------------------------------------------ */
988
sewardjb5f6f512005-03-10 23:59:00 +0000989static void handle_tt_miss ( ThreadId tid )
sewardje663cb92002-04-12 10:26:32 +0000990{
sewardjb5f6f512005-03-10 23:59:00 +0000991 Bool found;
njnf536bbb2005-06-13 04:21:38 +0000992 Addr ip = VG_(get_IP)(tid);
sewardjb5f6f512005-03-10 23:59:00 +0000993
994 /* Trivial event. Miss in the fast-cache. Do a full
995 lookup for it. */
sewardj291849f2012-04-20 23:58:55 +0000996 found = VG_(search_transtab)( NULL, NULL, NULL,
997 ip, True/*upd_fast_cache*/ );
sewardj5d0d1f32010-03-14 15:09:27 +0000998 if (UNLIKELY(!found)) {
sewardjb5f6f512005-03-10 23:59:00 +0000999 /* Not found; we need to request a translation. */
sewardj0ec07f32006-01-12 12:32:32 +00001000 if (VG_(translate)( tid, ip, /*debug*/False, 0/*not verbose*/,
1001 bbs_done, True/*allow redirection*/ )) {
sewardj291849f2012-04-20 23:58:55 +00001002 found = VG_(search_transtab)( NULL, NULL, NULL,
1003 ip, True );
1004 vg_assert2(found, "handle_tt_miss: missing tt_fast entry");
njn50ae1a72005-04-08 23:28:23 +00001005
sewardjb5f6f512005-03-10 23:59:00 +00001006 } else {
1007 // If VG_(translate)() fails, it's because it had to throw a
1008 // signal because the client jumped to a bad address. That
1009 // means that either a signal has been set up for delivery,
1010 // or the thread has been marked for termination. Either
1011 // way, we just need to go back into the scheduler loop.
1012 }
1013 }
1014}
1015
sewardj291849f2012-04-20 23:58:55 +00001016static
1017void handle_chain_me ( ThreadId tid, void* place_to_chain, Bool toFastEP )
1018{
1019 Bool found = False;
1020 Addr ip = VG_(get_IP)(tid);
1021 UInt to_sNo = (UInt)-1;
1022 UInt to_tteNo = (UInt)-1;
1023
1024 found = VG_(search_transtab)( NULL, &to_sNo, &to_tteNo,
1025 ip, False/*dont_upd_fast_cache*/ );
1026 if (!found) {
1027 /* Not found; we need to request a translation. */
1028 if (VG_(translate)( tid, ip, /*debug*/False, 0/*not verbose*/,
1029 bbs_done, True/*allow redirection*/ )) {
1030 found = VG_(search_transtab)( NULL, &to_sNo, &to_tteNo,
1031 ip, False );
1032 vg_assert2(found, "handle_chain_me: missing tt_fast entry");
1033 } else {
1034 // If VG_(translate)() fails, it's because it had to throw a
1035 // signal because the client jumped to a bad address. That
1036 // means that either a signal has been set up for delivery,
1037 // or the thread has been marked for termination. Either
1038 // way, we just need to go back into the scheduler loop.
1039 return;
1040 }
1041 }
1042 vg_assert(found);
1043 vg_assert(to_sNo != -1);
1044 vg_assert(to_tteNo != -1);
1045
1046 /* So, finally we know where to patch through to. Do the patching
1047 and update the various admin tables that allow it to be undone
1048 in the case that the destination block gets deleted. */
1049 VG_(tt_tc_do_chaining)( place_to_chain,
1050 to_sNo, to_tteNo, toFastEP );
1051}
1052
njnf76d27a2009-05-28 01:53:07 +00001053static void handle_syscall(ThreadId tid, UInt trc)
sewardjb5f6f512005-03-10 23:59:00 +00001054{
sewardj1ac9d0c2007-05-01 14:18:48 +00001055 ThreadState * volatile tst = VG_(get_ThreadState)(tid);
sewardjadbb4912011-09-29 17:34:17 +00001056 volatile UWord jumped;
sewardjb5f6f512005-03-10 23:59:00 +00001057
1058 /* Syscall may or may not block; either way, it will be
1059 complete by the time this call returns, and we'll be
1060 runnable again. We could take a signal while the
1061 syscall runs. */
sewardj45f4e7c2005-09-27 19:20:21 +00001062
1063 if (VG_(clo_sanity_level >= 3))
1064 VG_(am_do_sync_check)("(BEFORE SYSCALL)",__FILE__,__LINE__);
1065
njnf76d27a2009-05-28 01:53:07 +00001066 SCHEDSETJMP(tid, jumped, VG_(client_syscall)(tid, trc));
sewardjb5f6f512005-03-10 23:59:00 +00001067
sewardj45f4e7c2005-09-27 19:20:21 +00001068 if (VG_(clo_sanity_level >= 3))
1069 VG_(am_do_sync_check)("(AFTER SYSCALL)",__FILE__,__LINE__);
1070
sewardjb5f6f512005-03-10 23:59:00 +00001071 if (!VG_(is_running_thread)(tid))
njnc7561b92005-06-19 01:24:32 +00001072 VG_(printf)("tid %d not running; VG_(running_tid)=%d, tid %d status %d\n",
1073 tid, VG_(running_tid), tid, tst->status);
sewardjb5f6f512005-03-10 23:59:00 +00001074 vg_assert(VG_(is_running_thread)(tid));
1075
sewardjadbb4912011-09-29 17:34:17 +00001076 if (jumped != (UWord)0) {
njn1dcee092009-02-24 03:07:37 +00001077 block_signals();
sewardjb5f6f512005-03-10 23:59:00 +00001078 VG_(poll_signals)(tid);
1079 }
1080}
1081
sewardja591a052006-01-12 14:04:46 +00001082/* tid just requested a jump to the noredir version of its current
1083 program counter. So make up that translation if needed, run it,
sewardj291849f2012-04-20 23:58:55 +00001084 and return the resulting thread return code in two_words[]. */
1085static
1086void handle_noredir_jump ( /*OUT*/HWord* two_words,
1087 /*MOD*/Int* dispatchCtrP,
1088 ThreadId tid )
sewardja591a052006-01-12 14:04:46 +00001089{
sewardj291849f2012-04-20 23:58:55 +00001090 /* Clear return area. */
1091 two_words[0] = two_words[1] = 0;
1092
sewardja591a052006-01-12 14:04:46 +00001093 AddrH hcode = 0;
1094 Addr ip = VG_(get_IP)(tid);
1095
1096 Bool found = VG_(search_unredir_transtab)( &hcode, ip );
1097 if (!found) {
1098 /* Not found; we need to request a translation. */
1099 if (VG_(translate)( tid, ip, /*debug*/False, 0/*not verbose*/, bbs_done,
1100 False/*NO REDIRECTION*/ )) {
1101
1102 found = VG_(search_unredir_transtab)( &hcode, ip );
1103 vg_assert2(found, "unredir translation missing after creation?!");
sewardja591a052006-01-12 14:04:46 +00001104 } else {
1105 // If VG_(translate)() fails, it's because it had to throw a
1106 // signal because the client jumped to a bad address. That
1107 // means that either a signal has been set up for delivery,
1108 // or the thread has been marked for termination. Either
1109 // way, we just need to go back into the scheduler loop.
sewardj291849f2012-04-20 23:58:55 +00001110 two_words[0] = VG_TRC_BORING;
1111 return;
sewardja591a052006-01-12 14:04:46 +00001112 }
1113
1114 }
1115
1116 vg_assert(found);
1117 vg_assert(hcode != 0);
1118
sewardj291849f2012-04-20 23:58:55 +00001119 /* Otherwise run it and return the resulting VG_TRC_* value. */
1120 vg_assert(*dispatchCtrP > 0); /* so as to guarantee progress */
1121 run_thread_for_a_while( two_words, dispatchCtrP, tid,
1122 hcode, True/*use hcode*/ );
sewardja591a052006-01-12 14:04:46 +00001123}
1124
1125
sewardjb5f6f512005-03-10 23:59:00 +00001126/*
1127 Run a thread until it wants to exit.
1128
sewardjad0a3a82006-12-17 18:58:55 +00001129 We assume that the caller has already called VG_(acquire_BigLock) for
sewardjb5f6f512005-03-10 23:59:00 +00001130 us, so we own the VCPU. Also, all signals are blocked.
1131 */
1132VgSchedReturnCode VG_(scheduler) ( ThreadId tid )
1133{
sewardj291849f2012-04-20 23:58:55 +00001134 /* Holds the remaining size of this thread's "timeslice". */
1135 Int dispatch_ctr = 0;
1136
sewardjb5f6f512005-03-10 23:59:00 +00001137 ThreadState *tst = VG_(get_ThreadState)(tid);
sewardj3b290482011-05-06 21:02:55 +00001138 static Bool vgdb_startup_action_done = False;
sewardje663cb92002-04-12 10:26:32 +00001139
sewardjc24be7a2005-03-15 01:40:12 +00001140 if (VG_(clo_trace_sched))
1141 print_sched_event(tid, "entering VG_(scheduler)");
1142
sewardj3b290482011-05-06 21:02:55 +00001143 /* Do vgdb initialization (but once). Only the first (main) task
1144 starting up will do the below.
1145 Initialize gdbserver earlier than at the first
1146 thread VG_(scheduler) is causing problems:
1147 * at the end of VG_(scheduler_init_phase2) :
1148 The main thread is in VgTs_Init state, but in a not yet
1149 consistent state => the thread cannot be reported to gdb
1150 (e.g. causes an assert in LibVEX_GuestX86_get_eflags when giving
1151 back the guest registers to gdb).
1152 * at end of valgrind_main, just
1153 before VG_(main_thread_wrapper_NORETURN)(1) :
1154 The main thread is still in VgTs_Init state but in a
1155 more advanced state. However, the thread state is not yet
1156 completely initialized : a.o., the os_state is not yet fully
1157 set => the thread is then not properly reported to gdb,
1158 which is then confused (causing e.g. a duplicate thread be
1159 shown, without thread id).
1160 * it would be possible to initialize gdbserver "lower" in the
1161 call stack (e.g. in VG_(main_thread_wrapper_NORETURN)) but
1162 these are platform dependent and the place at which
1163 the thread state is completely initialized is not
1164 specific anymore to the main thread (so a similar "do it only
1165 once" would be needed).
1166
1167 => a "once only" initialization here is the best compromise. */
1168 if (!vgdb_startup_action_done) {
1169 vg_assert(tid == 1); // it must be the main thread.
1170 vgdb_startup_action_done = True;
1171 if (VG_(clo_vgdb) != Vg_VgdbNo) {
1172 /* If we have to poll, ensures we do an initial poll at first
1173 scheduler call. Otherwise, ensure no poll (unless interrupted
1174 by ptrace). */
1175 if (VG_(clo_vgdb_poll))
1176 VG_(force_vgdb_poll) ();
1177 else
1178 VG_(disable_vgdb_poll) ();
1179
1180 vg_assert (VG_(dyn_vgdb_error) == VG_(clo_vgdb_error));
1181 /* As we are initializing, VG_(dyn_vgdb_error) can't have been
1182 changed yet. */
1183
sewardj997546c2011-05-17 18:14:53 +00001184 VG_(gdbserver_prerun_action) (1);
sewardj3b290482011-05-06 21:02:55 +00001185 } else {
1186 VG_(disable_vgdb_poll) ();
1187 }
1188 }
1189
sewardjb5f6f512005-03-10 23:59:00 +00001190 /* set the proper running signal mask */
njn1dcee092009-02-24 03:07:37 +00001191 block_signals();
sewardjb5f6f512005-03-10 23:59:00 +00001192
1193 vg_assert(VG_(is_running_thread)(tid));
sewardje663cb92002-04-12 10:26:32 +00001194
sewardj291849f2012-04-20 23:58:55 +00001195 dispatch_ctr = SCHEDULING_QUANTUM;
sewardj6072c362002-04-19 14:40:57 +00001196
sewardjf54342a2006-10-17 01:51:24 +00001197 while (!VG_(is_exiting)(tid)) {
1198
sewardj291849f2012-04-20 23:58:55 +00001199 vg_assert(dispatch_ctr >= 0);
1200 if (dispatch_ctr == 0) {
sewardjf54342a2006-10-17 01:51:24 +00001201
sewardjf54342a2006-10-17 01:51:24 +00001202 /* Our slice is done, so yield the CPU to another thread. On
1203 Linux, this doesn't sleep between sleeping and running,
sewardj6e9de462011-06-28 07:25:29 +00001204 since that would take too much time. */
sewardjf54342a2006-10-17 01:51:24 +00001205
1206 /* 4 July 06: it seems that a zero-length nsleep is needed to
1207 cause async thread cancellation (canceller.c) to terminate
1208 in finite time; else it is in some kind of race/starvation
1209 situation and completion is arbitrarily delayed (although
1210 this is not a deadlock).
1211
1212 Unfortunately these sleeps cause MPI jobs not to terminate
1213 sometimes (some kind of livelock). So sleeping once
1214 every N opportunities appears to work. */
1215
1216 /* 3 Aug 06: doing sys__nsleep works but crashes some apps.
1217 sys_yield also helps the problem, whilst not crashing apps. */
1218
sewardjad0a3a82006-12-17 18:58:55 +00001219 VG_(release_BigLock)(tid, VgTs_Yielding,
1220 "VG_(scheduler):timeslice");
sewardjf54342a2006-10-17 01:51:24 +00001221 /* ------------ now we don't have The Lock ------------ */
1222
sewardjad0a3a82006-12-17 18:58:55 +00001223 VG_(acquire_BigLock)(tid, "VG_(scheduler):timeslice");
sewardjf54342a2006-10-17 01:51:24 +00001224 /* ------------ now we do have The Lock ------------ */
sewardje663cb92002-04-12 10:26:32 +00001225
sewardjb5f6f512005-03-10 23:59:00 +00001226 /* OK, do some relatively expensive housekeeping stuff */
1227 scheduler_sanity(tid);
1228 VG_(sanity_check_general)(False);
sewardje663cb92002-04-12 10:26:32 +00001229
sewardjb5f6f512005-03-10 23:59:00 +00001230 /* Look for any pending signals for this thread, and set them up
1231 for delivery */
1232 VG_(poll_signals)(tid);
sewardje663cb92002-04-12 10:26:32 +00001233
sewardjb5f6f512005-03-10 23:59:00 +00001234 if (VG_(is_exiting)(tid))
1235 break; /* poll_signals picked up a fatal signal */
sewardje663cb92002-04-12 10:26:32 +00001236
sewardjb5f6f512005-03-10 23:59:00 +00001237 /* For stats purposes only. */
1238 n_scheduling_events_MAJOR++;
sewardje663cb92002-04-12 10:26:32 +00001239
sewardjb5f6f512005-03-10 23:59:00 +00001240 /* Figure out how many bbs to ask vg_run_innerloop to do. Note
1241 that it decrements the counter before testing it for zero, so
1242 that if tst->dispatch_ctr is set to N you get at most N-1
1243 iterations. Also this means that tst->dispatch_ctr must
1244 exceed zero before entering the innerloop. Also also, the
1245 decrement is done before the bb is actually run, so you
1246 always get at least one decrement even if nothing happens. */
sewardj291849f2012-04-20 23:58:55 +00001247 // FIXME is this right?
1248 dispatch_ctr = SCHEDULING_QUANTUM;
jsgf855d93d2003-10-13 22:26:55 +00001249
sewardjb5f6f512005-03-10 23:59:00 +00001250 /* paranoia ... */
1251 vg_assert(tst->tid == tid);
1252 vg_assert(tst->os_state.lwpid == VG_(gettid)());
sewardje663cb92002-04-12 10:26:32 +00001253 }
1254
sewardjb5f6f512005-03-10 23:59:00 +00001255 /* For stats purposes only. */
1256 n_scheduling_events_MINOR++;
sewardje663cb92002-04-12 10:26:32 +00001257
1258 if (0)
sewardj738856f2009-07-15 14:48:32 +00001259 VG_(message)(Vg_DebugMsg, "thread %d: running for %d bbs\n",
sewardj291849f2012-04-20 23:58:55 +00001260 tid, dispatch_ctr - 1 );
sewardje663cb92002-04-12 10:26:32 +00001261
sewardj291849f2012-04-20 23:58:55 +00001262 HWord trc[2]; /* "two_words" */
1263 run_thread_for_a_while( &trc[0],
1264 &dispatch_ctr,
1265 tid, 0/*ignored*/, False );
sewardje663cb92002-04-12 10:26:32 +00001266
sewardjb5f6f512005-03-10 23:59:00 +00001267 if (VG_(clo_trace_sched) && VG_(clo_verbosity) > 2) {
sewardj291849f2012-04-20 23:58:55 +00001268 HChar buf[50];
1269 VG_(sprintf)(buf, "TRC: %s", name_of_sched_event(trc[0]));
sewardjb5f6f512005-03-10 23:59:00 +00001270 print_sched_event(tid, buf);
sewardje663cb92002-04-12 10:26:32 +00001271 }
1272
sewardj291849f2012-04-20 23:58:55 +00001273 if (trc[0] == VEX_TRC_JMP_NOREDIR) {
sewardj0ec07f32006-01-12 12:32:32 +00001274 /* If we got a request to run a no-redir version of
1275 something, do so now -- handle_noredir_jump just (creates
1276 and) runs that one translation. The flip side is that the
1277 noredir translation can't itself return another noredir
1278 request -- that would be nonsensical. It can, however,
1279 return VG_TRC_BORING, which just means keep going as
1280 normal. */
sewardj291849f2012-04-20 23:58:55 +00001281 /* Note that the fact that we need to continue with a
1282 no-redir jump is not recorded anywhere else in this
1283 thread's state. So we *must* execute the block right now
1284 -- we can't fail to execute it and later resume with it,
1285 because by then we'll have forgotten the fact that it
1286 should be run as no-redir, but will get run as a normal
1287 potentially-redir'd, hence screwing up. This really ought
1288 to be cleaned up, by noting in the guest state that the
1289 next block to be executed should be no-redir. Then we can
1290 suspend and resume at any point, which isn't the case at
1291 the moment. */
1292 handle_noredir_jump( &trc[0],
1293 &dispatch_ctr,
1294 tid );
1295 vg_assert(trc[0] != VEX_TRC_JMP_NOREDIR);
1296
1297 /* This can't be allowed to happen, since it means the block
1298 didn't execute, and we have no way to resume-as-noredir
1299 after we get more timeslice. But I don't think it ever
1300 can, since handle_noredir_jump will assert if the counter
1301 is zero on entry. */
1302 vg_assert(trc[0] != VG_TRC_INNER_COUNTERZERO);
1303
1304 /* A no-redir translation can't return with a chain-me
1305 request, since chaining in the no-redir cache is too
1306 complex. */
1307 vg_assert(trc[0] != VG_TRC_CHAIN_ME_TO_SLOW_EP
1308 && trc[0] != VG_TRC_CHAIN_ME_TO_FAST_EP);
sewardj0ec07f32006-01-12 12:32:32 +00001309 }
1310
sewardj291849f2012-04-20 23:58:55 +00001311 switch (trc[0]) {
1312 case VEX_TRC_JMP_BORING:
1313 /* assisted dispatch, no event. Used by no-redir
1314 translations to force return to the scheduler. */
sewardj0ec07f32006-01-12 12:32:32 +00001315 case VG_TRC_BORING:
1316 /* no special event, just keep going. */
1317 break;
1318
sewardjb5f6f512005-03-10 23:59:00 +00001319 case VG_TRC_INNER_FASTMISS:
sewardj291849f2012-04-20 23:58:55 +00001320 vg_assert(dispatch_ctr > 0);
sewardjb5f6f512005-03-10 23:59:00 +00001321 handle_tt_miss(tid);
1322 break;
sewardj291849f2012-04-20 23:58:55 +00001323
1324 case VG_TRC_CHAIN_ME_TO_SLOW_EP: {
1325 if (0) VG_(printf)("sched: CHAIN_TO_SLOW_EP: %p\n", (void*)trc[1] );
1326 handle_chain_me(tid, (void*)trc[1], False);
1327 break;
1328 }
1329
1330 case VG_TRC_CHAIN_ME_TO_FAST_EP: {
1331 if (0) VG_(printf)("sched: CHAIN_TO_FAST_EP: %p\n", (void*)trc[1] );
1332 handle_chain_me(tid, (void*)trc[1], True);
1333 break;
1334 }
1335
sewardjb5f6f512005-03-10 23:59:00 +00001336 case VEX_TRC_JMP_CLIENTREQ:
1337 do_client_request(tid);
1338 break;
sewardja0fef1b2005-11-03 13:46:30 +00001339
1340 case VEX_TRC_JMP_SYS_INT128: /* x86-linux */
njnf76d27a2009-05-28 01:53:07 +00001341 case VEX_TRC_JMP_SYS_INT129: /* x86-darwin */
1342 case VEX_TRC_JMP_SYS_INT130: /* x86-darwin */
1343 case VEX_TRC_JMP_SYS_SYSCALL: /* amd64-linux, ppc32-linux, amd64-darwin */
sewardj291849f2012-04-20 23:58:55 +00001344 handle_syscall(tid, trc[0]);
sewardjb5f6f512005-03-10 23:59:00 +00001345 if (VG_(clo_sanity_level) > 2)
1346 VG_(sanity_check_general)(True); /* sanity-check every syscall */
1347 break;
sewardje663cb92002-04-12 10:26:32 +00001348
sewardjb5f6f512005-03-10 23:59:00 +00001349 case VEX_TRC_JMP_YIELD:
1350 /* Explicit yield, because this thread is in a spin-lock
sewardj3fc75752005-03-12 15:16:31 +00001351 or something. Only let the thread run for a short while
1352 longer. Because swapping to another thread is expensive,
1353 we're prepared to let this thread eat a little more CPU
1354 before swapping to another. That means that short term
1355 spins waiting for hardware to poke memory won't cause a
1356 thread swap. */
sewardj291849f2012-04-20 23:58:55 +00001357 if (dispatch_ctr > 2000)
1358 dispatch_ctr = 2000;
sewardjb5f6f512005-03-10 23:59:00 +00001359 break;
sewardje663cb92002-04-12 10:26:32 +00001360
sewardjb5f6f512005-03-10 23:59:00 +00001361 case VG_TRC_INNER_COUNTERZERO:
1362 /* Timeslice is out. Let a new thread be scheduled. */
sewardj291849f2012-04-20 23:58:55 +00001363 vg_assert(dispatch_ctr == 0);
sewardjb5f6f512005-03-10 23:59:00 +00001364 break;
sewardje663cb92002-04-12 10:26:32 +00001365
sewardjb5f6f512005-03-10 23:59:00 +00001366 case VG_TRC_FAULT_SIGNAL:
1367 /* Everything should be set up (either we're exiting, or
1368 about to start in a signal handler). */
1369 break;
sewardj9d1b5d32002-04-17 19:40:49 +00001370
sewardj07bdc5e2005-03-11 13:19:47 +00001371 case VEX_TRC_JMP_MAPFAIL:
1372 /* Failure of arch-specific address translation (x86/amd64
1373 segment override use) */
1374 /* jrs 2005 03 11: is this correct? */
1375 VG_(synth_fault)(tid);
1376 break;
1377
sewardjb5f6f512005-03-10 23:59:00 +00001378 case VEX_TRC_JMP_EMWARN: {
florian2e497412012-08-26 03:22:09 +00001379 static Int counts[EmNote_NUMBER];
sewardjb5f6f512005-03-10 23:59:00 +00001380 static Bool counts_initted = False;
florian2e497412012-08-26 03:22:09 +00001381 VexEmNote ew;
florian11f3cc82012-10-21 02:19:35 +00001382 const HChar* what;
sewardjb5f6f512005-03-10 23:59:00 +00001383 Bool show;
1384 Int q;
1385 if (!counts_initted) {
1386 counts_initted = True;
florian2e497412012-08-26 03:22:09 +00001387 for (q = 0; q < EmNote_NUMBER; q++)
sewardjb5f6f512005-03-10 23:59:00 +00001388 counts[q] = 0;
1389 }
florian2e497412012-08-26 03:22:09 +00001390 ew = (VexEmNote)VG_(threads)[tid].arch.vex.guest_EMNOTE;
1391 what = (ew < 0 || ew >= EmNote_NUMBER)
sewardjb5f6f512005-03-10 23:59:00 +00001392 ? "unknown (?!)"
florian2e497412012-08-26 03:22:09 +00001393 : LibVEX_EmNote_string(ew);
1394 show = (ew < 0 || ew >= EmNote_NUMBER)
sewardjb5f6f512005-03-10 23:59:00 +00001395 ? True
1396 : counts[ew]++ < 3;
sewardjd68ac3e2006-01-20 14:31:57 +00001397 if (show && VG_(clo_show_emwarns) && !VG_(clo_xml)) {
sewardjb5f6f512005-03-10 23:59:00 +00001398 VG_(message)( Vg_UserMsg,
sewardj738856f2009-07-15 14:48:32 +00001399 "Emulation warning: unsupported action:\n");
1400 VG_(message)( Vg_UserMsg, " %s\n", what);
njnd01fef72005-03-25 23:35:48 +00001401 VG_(get_and_pp_StackTrace)( tid, VG_(clo_backtrace_size) );
sewardjb5f6f512005-03-10 23:59:00 +00001402 }
1403 break;
1404 }
sewardje663cb92002-04-12 10:26:32 +00001405
sewardjd68ac3e2006-01-20 14:31:57 +00001406 case VEX_TRC_JMP_EMFAIL: {
florian2e497412012-08-26 03:22:09 +00001407 VexEmNote ew;
florian11f3cc82012-10-21 02:19:35 +00001408 const HChar* what;
florian2e497412012-08-26 03:22:09 +00001409 ew = (VexEmNote)VG_(threads)[tid].arch.vex.guest_EMNOTE;
1410 what = (ew < 0 || ew >= EmNote_NUMBER)
sewardjd68ac3e2006-01-20 14:31:57 +00001411 ? "unknown (?!)"
florian2e497412012-08-26 03:22:09 +00001412 : LibVEX_EmNote_string(ew);
sewardjd68ac3e2006-01-20 14:31:57 +00001413 VG_(message)( Vg_UserMsg,
sewardj738856f2009-07-15 14:48:32 +00001414 "Emulation fatal error -- Valgrind cannot continue:\n");
1415 VG_(message)( Vg_UserMsg, " %s\n", what);
sewardjd68ac3e2006-01-20 14:31:57 +00001416 VG_(get_and_pp_StackTrace)( tid, VG_(clo_backtrace_size) );
sewardj738856f2009-07-15 14:48:32 +00001417 VG_(message)(Vg_UserMsg, "\n");
1418 VG_(message)(Vg_UserMsg, "Valgrind has to exit now. Sorry.\n");
1419 VG_(message)(Vg_UserMsg, "\n");
sewardjd68ac3e2006-01-20 14:31:57 +00001420 VG_(exit)(1);
1421 break;
1422 }
1423
sewardj4f9d6742007-08-29 09:11:35 +00001424 case VEX_TRC_JMP_SIGTRAP:
sewardj86df1552006-02-07 20:56:41 +00001425 VG_(synth_sigtrap)(tid);
1426 break;
1427
sewardj4f9d6742007-08-29 09:11:35 +00001428 case VEX_TRC_JMP_SIGSEGV:
1429 VG_(synth_fault)(tid);
1430 break;
1431
sewardj1c0ce7a2009-07-01 08:10:49 +00001432 case VEX_TRC_JMP_SIGBUS:
1433 VG_(synth_sigbus)(tid);
1434 break;
1435
petarj80e5c172012-10-19 14:45:17 +00001436 case VEX_TRC_JMP_SIGFPE_INTDIV:
1437 VG_(synth_sigfpe)(tid, VKI_FPE_INTDIV);
1438 break;
1439
1440 case VEX_TRC_JMP_SIGFPE_INTOVF:
1441 VG_(synth_sigfpe)(tid, VKI_FPE_INTOVF);
1442 break;
1443
florian2baf7532012-07-26 02:41:31 +00001444 case VEX_TRC_JMP_NODECODE: {
1445 Addr addr = VG_(get_IP)(tid);
1446
sewardjc30cd9b2012-12-06 18:08:54 +00001447 if (VG_(clo_sigill_diag)) {
1448 VG_(umsg)(
1449 "valgrind: Unrecognised instruction at address %#lx.\n", addr);
1450 VG_(get_and_pp_StackTrace)(tid, VG_(clo_backtrace_size));
sewardj738856f2009-07-15 14:48:32 +00001451#define M(a) VG_(umsg)(a "\n");
njn7cf66582005-10-15 17:18:08 +00001452 M("Your program just tried to execute an instruction that Valgrind" );
1453 M("did not recognise. There are two possible reasons for this." );
1454 M("1. Your program has a bug and erroneously jumped to a non-code" );
1455 M(" location. If you are running Memcheck and you just saw a" );
1456 M(" warning about a bad jump, it's probably your program's fault.");
1457 M("2. The instruction is legitimate but Valgrind doesn't handle it,");
1458 M(" i.e. it's Valgrind's fault. If you think this is the case or");
njnec4d5132006-03-21 23:15:43 +00001459 M(" you are not sure, please let us know and we'll try to fix it.");
njn7cf66582005-10-15 17:18:08 +00001460 M("Either way, Valgrind will now raise a SIGILL signal which will" );
1461 M("probably kill your program." );
njnd5021362005-09-29 00:35:18 +00001462#undef M
sewardjc30cd9b2012-12-06 18:08:54 +00001463 }
sewardje663cb92002-04-12 10:26:32 +00001464
florian2baf7532012-07-26 02:41:31 +00001465#if defined(VGA_s390x)
1466 /* Now that the complaint is out we need to adjust the guest_IA. The
1467 reason is that -- after raising the exception -- execution will
1468 continue with the insn that follows the invalid insn. As the first
1469 2 bits of the invalid insn determine its length in the usual way,
1470 we can compute the address of the next insn here and adjust the
1471 guest_IA accordingly. This adjustment is essential and tested by
1472 none/tests/s390x/op_exception.c (which would loop forever
1473 otherwise) */
1474 UChar byte = ((UChar *)addr)[0];
1475 UInt insn_length = ((((byte >> 6) + 1) >> 1) + 1) << 1;
1476 Addr next_insn_addr = addr + insn_length;
1477
1478 VG_(set_IP)(tid, next_insn_addr);
1479#endif
1480 VG_(synth_sigill)(tid, addr);
1481 break;
1482 }
cerion85665ca2005-06-20 15:51:07 +00001483 case VEX_TRC_JMP_TINVAL:
cerion85665ca2005-06-20 15:51:07 +00001484 VG_(discard_translations)(
1485 (Addr64)VG_(threads)[tid].arch.vex.guest_TISTART,
sewardj45f4e7c2005-09-27 19:20:21 +00001486 VG_(threads)[tid].arch.vex.guest_TILEN,
1487 "scheduler(VEX_TRC_JMP_TINVAL)"
sewardj487ac702005-06-21 12:52:38 +00001488 );
cerion85665ca2005-06-20 15:51:07 +00001489 if (0)
1490 VG_(printf)("dump translations done.\n");
cerion85665ca2005-06-20 15:51:07 +00001491 break;
1492
sewardje3a384b2005-07-29 08:51:34 +00001493 case VG_TRC_INVARIANT_FAILED:
1494 /* This typically happens if, after running generated code,
1495 it is detected that host CPU settings (eg, FPU/Vector
1496 control words) are not as they should be. Vex's code
1497 generation specifies the state such control words should
1498 be in on entry to Vex-generated code, and they should be
1499 unchanged on exit from it. Failure of this assertion
1500 usually means a bug in Vex's code generation. */
sewardj59570ff2010-01-01 11:59:33 +00001501 //{ UInt xx;
1502 // __asm__ __volatile__ (
1503 // "\t.word 0xEEF12A10\n" // fmrx r2,fpscr
1504 // "\tmov %0, r2" : "=r"(xx) : : "r2" );
1505 // VG_(printf)("QQQQ new fpscr = %08x\n", xx);
1506 //}
sewardje3a384b2005-07-29 08:51:34 +00001507 vg_assert2(0, "VG_(scheduler), phase 3: "
1508 "run_innerloop detected host "
1509 "state invariant failure", trc);
1510
sewardja0fef1b2005-11-03 13:46:30 +00001511 case VEX_TRC_JMP_SYS_SYSENTER:
sewardj5438a012005-08-07 14:49:27 +00001512 /* Do whatever simulation is appropriate for an x86 sysenter
1513 instruction. Note that it is critical to set this thread's
1514 guest_EIP to point at the code to execute after the
1515 sysenter, since Vex-generated code will not have set it --
1516 vex does not know what it should be. Vex sets the next
njncda2f0f2009-05-18 02:12:08 +00001517 address to zero, so if you don't set guest_EIP, the thread
1518 will jump to zero afterwards and probably die as a result. */
1519# if defined(VGP_x86_linux)
sewardj5438a012005-08-07 14:49:27 +00001520 vg_assert2(0, "VG_(scheduler), phase 3: "
njncda2f0f2009-05-18 02:12:08 +00001521 "sysenter_x86 on x86-linux is not supported");
njnf76d27a2009-05-28 01:53:07 +00001522# elif defined(VGP_x86_darwin)
1523 /* return address in client edx */
1524 VG_(threads)[tid].arch.vex.guest_EIP
1525 = VG_(threads)[tid].arch.vex.guest_EDX;
sewardj93a97572012-04-21 15:35:12 +00001526 handle_syscall(tid, trc[0]);
sewardj5438a012005-08-07 14:49:27 +00001527# else
1528 vg_assert2(0, "VG_(scheduler), phase 3: "
1529 "sysenter_x86 on non-x86 platform?!?!");
1530# endif
njnf76d27a2009-05-28 01:53:07 +00001531 break;
sewardj5438a012005-08-07 14:49:27 +00001532
sewardjb5f6f512005-03-10 23:59:00 +00001533 default:
njn50ae1a72005-04-08 23:28:23 +00001534 vg_assert2(0, "VG_(scheduler), phase 3: "
sewardj291849f2012-04-20 23:58:55 +00001535 "unexpected thread return code (%u)", trc[0]);
sewardjb5f6f512005-03-10 23:59:00 +00001536 /* NOTREACHED */
1537 break;
sewardje663cb92002-04-12 10:26:32 +00001538
1539 } /* switch (trc) */
sewardjb0473e92011-06-07 22:54:32 +00001540
sewardj17c5e2e2012-12-28 09:12:14 +00001541 if (UNLIKELY(VG_(clo_profyle_sbs)) && VG_(clo_profyle_interval) > 0)
1542 maybe_show_sb_profile();
nethercote238a3c32004-08-09 13:13:31 +00001543 }
sewardjc24be7a2005-03-15 01:40:12 +00001544
1545 if (VG_(clo_trace_sched))
1546 print_sched_event(tid, "exiting VG_(scheduler)");
1547
sewardjb5f6f512005-03-10 23:59:00 +00001548 vg_assert(VG_(is_exiting)(tid));
thughes513197c2004-06-13 12:07:53 +00001549
sewardjb5f6f512005-03-10 23:59:00 +00001550 return tst->exitreason;
sewardj20917d82002-05-28 01:36:45 +00001551}
1552
1553
sewardjb5f6f512005-03-10 23:59:00 +00001554/*
1555 This causes all threads to forceably exit. They aren't actually
1556 dead by the time this returns; you need to call
njnaf839f52005-06-23 03:27:57 +00001557 VG_(reap_threads)() to wait for them.
sewardjb5f6f512005-03-10 23:59:00 +00001558 */
1559void VG_(nuke_all_threads_except) ( ThreadId me, VgSchedReturnCode src )
sewardjccef2e62002-05-29 19:26:32 +00001560{
1561 ThreadId tid;
sewardjb5f6f512005-03-10 23:59:00 +00001562
1563 vg_assert(VG_(is_running_thread)(me));
sewardj45f02c42005-02-05 18:27:14 +00001564
sewardjccef2e62002-05-29 19:26:32 +00001565 for (tid = 1; tid < VG_N_THREADS; tid++) {
1566 if (tid == me
jsgf855d93d2003-10-13 22:26:55 +00001567 || VG_(threads)[tid].status == VgTs_Empty)
sewardjccef2e62002-05-29 19:26:32 +00001568 continue;
sewardjb5f6f512005-03-10 23:59:00 +00001569 if (0)
sewardjef037c72002-05-30 00:40:03 +00001570 VG_(printf)(
1571 "VG_(nuke_all_threads_except): nuking tid %d\n", tid);
sewardjb5f6f512005-03-10 23:59:00 +00001572
1573 VG_(threads)[tid].exitreason = src;
sewardja8d8e232005-06-07 20:04:56 +00001574 if (src == VgSrc_FatalSig)
1575 VG_(threads)[tid].os_state.fatalsig = VKI_SIGKILL;
sewardjf54342a2006-10-17 01:51:24 +00001576 VG_(get_thread_out_of_syscall)(tid);
sewardjccef2e62002-05-29 19:26:32 +00001577 }
1578}
1579
1580
njnd3040452003-05-19 15:04:06 +00001581/* ---------------------------------------------------------------------
sewardjb5f6f512005-03-10 23:59:00 +00001582 Specifying shadow register values
njnd3040452003-05-19 15:04:06 +00001583 ------------------------------------------------------------------ */
1584
njnf536bbb2005-06-13 04:21:38 +00001585#if defined(VGA_x86)
njnaf839f52005-06-23 03:27:57 +00001586# define VG_CLREQ_ARGS guest_EAX
1587# define VG_CLREQ_RET guest_EDX
njnf536bbb2005-06-13 04:21:38 +00001588#elif defined(VGA_amd64)
njnaf839f52005-06-23 03:27:57 +00001589# define VG_CLREQ_ARGS guest_RAX
1590# define VG_CLREQ_RET guest_RDX
sewardj2c48c7b2005-11-29 13:05:56 +00001591#elif defined(VGA_ppc32) || defined(VGA_ppc64)
njnaf839f52005-06-23 03:27:57 +00001592# define VG_CLREQ_ARGS guest_GPR4
1593# define VG_CLREQ_RET guest_GPR3
sewardj59570ff2010-01-01 11:59:33 +00001594#elif defined(VGA_arm)
1595# define VG_CLREQ_ARGS guest_R4
1596# define VG_CLREQ_RET guest_R3
sewardjb5b87402011-03-07 16:05:35 +00001597#elif defined (VGA_s390x)
1598# define VG_CLREQ_ARGS guest_r2
1599# define VG_CLREQ_RET guest_r3
sewardj5db15402012-06-07 09:13:21 +00001600#elif defined(VGA_mips32)
1601# define VG_CLREQ_ARGS guest_r12
1602# define VG_CLREQ_RET guest_r11
njnf536bbb2005-06-13 04:21:38 +00001603#else
1604# error Unknown arch
1605#endif
1606
njnaf839f52005-06-23 03:27:57 +00001607#define CLREQ_ARGS(regs) ((regs).vex.VG_CLREQ_ARGS)
1608#define CLREQ_RET(regs) ((regs).vex.VG_CLREQ_RET)
1609#define O_CLREQ_RET (offsetof(VexGuestArchState, VG_CLREQ_RET))
njnf536bbb2005-06-13 04:21:38 +00001610
njn502badb2005-05-08 02:04:49 +00001611// These macros write a value to a client's thread register, and tell the
1612// tool that it's happened (if necessary).
1613
1614#define SET_CLREQ_RETVAL(zztid, zzval) \
1615 do { CLREQ_RET(VG_(threads)[zztid].arch) = (zzval); \
1616 VG_TRACK( post_reg_write, \
1617 Vg_CoreClientReq, zztid, O_CLREQ_RET, sizeof(UWord)); \
1618 } while (0)
1619
1620#define SET_CLCALL_RETVAL(zztid, zzval, f) \
1621 do { CLREQ_RET(VG_(threads)[zztid].arch) = (zzval); \
1622 VG_TRACK( post_reg_write_clientcall_return, \
1623 zztid, O_CLREQ_RET, sizeof(UWord), f); \
1624 } while (0)
1625
sewardj0ec07f32006-01-12 12:32:32 +00001626
sewardje663cb92002-04-12 10:26:32 +00001627/* ---------------------------------------------------------------------
sewardj124ca2a2002-06-20 10:19:38 +00001628 Handle client requests.
sewardje663cb92002-04-12 10:26:32 +00001629 ------------------------------------------------------------------ */
1630
njn9cb54ac2005-06-12 04:19:17 +00001631// OS-specific(?) client requests
1632static Bool os_client_request(ThreadId tid, UWord *args)
1633{
1634 Bool handled = True;
1635
1636 vg_assert(VG_(is_running_thread)(tid));
1637
1638 switch(args[0]) {
1639 case VG_USERREQ__LIBC_FREERES_DONE:
1640 /* This is equivalent to an exit() syscall, but we don't set the
1641 exitcode (since it might already be set) */
1642 if (0 || VG_(clo_trace_syscalls) || VG_(clo_trace_sched))
sewardj738856f2009-07-15 14:48:32 +00001643 VG_(message)(Vg_DebugMsg,
1644 "__libc_freeres() done; really quitting!\n");
sewardjf54342a2006-10-17 01:51:24 +00001645 VG_(threads)[tid].exitreason = VgSrc_ExitThread;
njn9cb54ac2005-06-12 04:19:17 +00001646 break;
1647
1648 default:
1649 handled = False;
1650 break;
1651 }
1652
1653 return handled;
1654}
1655
1656
sewardj124ca2a2002-06-20 10:19:38 +00001657/* Do a client request for the thread tid. After the request, tid may
1658 or may not still be runnable; if not, the scheduler will have to
1659 choose a new thread to run.
1660*/
sewardje663cb92002-04-12 10:26:32 +00001661static
sewardjb5f6f512005-03-10 23:59:00 +00001662void do_client_request ( ThreadId tid )
sewardje663cb92002-04-12 10:26:32 +00001663{
sewardjb5f6f512005-03-10 23:59:00 +00001664 UWord* arg = (UWord*)(CLREQ_ARGS(VG_(threads)[tid].arch));
nethercoted1b64b22004-11-04 18:22:28 +00001665 UWord req_no = arg[0];
sewardj124ca2a2002-06-20 10:19:38 +00001666
fitzhardinge98abfc72003-12-16 02:05:15 +00001667 if (0)
nethercoted1b64b22004-11-04 18:22:28 +00001668 VG_(printf)("req no = 0x%llx, arg = %p\n", (ULong)req_no, arg);
sewardje663cb92002-04-12 10:26:32 +00001669 switch (req_no) {
1670
njn3e884182003-04-15 13:03:23 +00001671 case VG_USERREQ__CLIENT_CALL0: {
njn2ac95242005-03-13 23:07:30 +00001672 UWord (*f)(ThreadId) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00001673 if (f == NULL)
sewardj738856f2009-07-15 14:48:32 +00001674 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL0: func=%p\n", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00001675 else
njn2ac95242005-03-13 23:07:30 +00001676 SET_CLCALL_RETVAL(tid, f ( tid ), (Addr)f);
njn3e884182003-04-15 13:03:23 +00001677 break;
1678 }
1679 case VG_USERREQ__CLIENT_CALL1: {
njn2ac95242005-03-13 23:07:30 +00001680 UWord (*f)(ThreadId, UWord) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00001681 if (f == NULL)
sewardj738856f2009-07-15 14:48:32 +00001682 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL1: func=%p\n", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00001683 else
njn2ac95242005-03-13 23:07:30 +00001684 SET_CLCALL_RETVAL(tid, f ( tid, arg[2] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00001685 break;
1686 }
1687 case VG_USERREQ__CLIENT_CALL2: {
njn2ac95242005-03-13 23:07:30 +00001688 UWord (*f)(ThreadId, UWord, UWord) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00001689 if (f == NULL)
sewardj738856f2009-07-15 14:48:32 +00001690 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL2: func=%p\n", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00001691 else
njn2ac95242005-03-13 23:07:30 +00001692 SET_CLCALL_RETVAL(tid, f ( tid, arg[2], arg[3] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00001693 break;
1694 }
1695 case VG_USERREQ__CLIENT_CALL3: {
njn2ac95242005-03-13 23:07:30 +00001696 UWord (*f)(ThreadId, UWord, UWord, UWord) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00001697 if (f == NULL)
sewardj738856f2009-07-15 14:48:32 +00001698 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL3: func=%p\n", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00001699 else
njn2ac95242005-03-13 23:07:30 +00001700 SET_CLCALL_RETVAL(tid, f ( tid, arg[2], arg[3], arg[4] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00001701 break;
1702 }
1703
njnf09745a2005-05-10 03:01:23 +00001704 // Nb: this looks like a circular definition, because it kind of is.
1705 // See comment in valgrind.h to understand what's going on.
sewardj124ca2a2002-06-20 10:19:38 +00001706 case VG_USERREQ__RUNNING_ON_VALGRIND:
sewardjb5f6f512005-03-10 23:59:00 +00001707 SET_CLREQ_RETVAL(tid, RUNNING_ON_VALGRIND+1);
sewardj124ca2a2002-06-20 10:19:38 +00001708 break;
1709
fitzhardinge39de4b42003-10-31 07:12:21 +00001710 case VG_USERREQ__PRINTF: {
sewardjc560fb32010-01-28 15:23:54 +00001711 /* JRS 2010-Jan-28: this is DEPRECATED; use the
1712 _VALIST_BY_REF version instead */
1713 if (sizeof(va_list) != sizeof(UWord))
1714 goto va_list_casting_error_NORETURN;
sewardj05b07152010-01-04 01:01:02 +00001715 union {
1716 va_list vargs;
sewardjc560fb32010-01-28 15:23:54 +00001717 unsigned long uw;
1718 } u;
1719 u.uw = (unsigned long)arg[2];
1720 Int count =
floriancd19e992012-11-03 19:32:28 +00001721 VG_(vmessage)( Vg_ClientMsg, (HChar *)arg[1], u.vargs );
sewardjc560fb32010-01-28 15:23:54 +00001722 VG_(message_flush)();
1723 SET_CLREQ_RETVAL( tid, count );
1724 break;
1725 }
fitzhardinge39de4b42003-10-31 07:12:21 +00001726
sewardjc560fb32010-01-28 15:23:54 +00001727 case VG_USERREQ__PRINTF_BACKTRACE: {
1728 /* JRS 2010-Jan-28: this is DEPRECATED; use the
1729 _VALIST_BY_REF version instead */
1730 if (sizeof(va_list) != sizeof(UWord))
1731 goto va_list_casting_error_NORETURN;
sewardj05b07152010-01-04 01:01:02 +00001732 union {
1733 va_list vargs;
sewardjc560fb32010-01-28 15:23:54 +00001734 unsigned long uw;
1735 } u;
1736 u.uw = (unsigned long)arg[2];
1737 Int count =
floriancd19e992012-11-03 19:32:28 +00001738 VG_(vmessage)( Vg_ClientMsg, (HChar *)arg[1], u.vargs );
sewardjc560fb32010-01-28 15:23:54 +00001739 VG_(message_flush)();
1740 VG_(get_and_pp_StackTrace)( tid, VG_(clo_backtrace_size) );
1741 SET_CLREQ_RETVAL( tid, count );
1742 break;
1743 }
1744
1745 case VG_USERREQ__PRINTF_VALIST_BY_REF: {
1746 va_list* vargsp = (va_list*)arg[2];
1747 Int count =
floriancd19e992012-11-03 19:32:28 +00001748 VG_(vmessage)( Vg_ClientMsg, (HChar *)arg[1], *vargsp );
sewardjc560fb32010-01-28 15:23:54 +00001749 VG_(message_flush)();
1750 SET_CLREQ_RETVAL( tid, count );
1751 break;
1752 }
1753
1754 case VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF: {
1755 va_list* vargsp = (va_list*)arg[2];
1756 Int count =
floriancd19e992012-11-03 19:32:28 +00001757 VG_(vmessage)( Vg_ClientMsg, (HChar *)arg[1], *vargsp );
sewardjc560fb32010-01-28 15:23:54 +00001758 VG_(message_flush)();
1759 VG_(get_and_pp_StackTrace)( tid, VG_(clo_backtrace_size) );
1760 SET_CLREQ_RETVAL( tid, count );
1761 break;
1762 }
1763
1764 case VG_USERREQ__INTERNAL_PRINTF_VALIST_BY_REF: {
1765 va_list* vargsp = (va_list*)arg[2];
1766 Int count =
floriancd19e992012-11-03 19:32:28 +00001767 VG_(vmessage)( Vg_DebugMsg, (HChar *)arg[1], *vargsp );
sewardjc560fb32010-01-28 15:23:54 +00001768 VG_(message_flush)();
1769 SET_CLREQ_RETVAL( tid, count );
1770 break;
1771 }
fitzhardinge39de4b42003-10-31 07:12:21 +00001772
tomd2645142009-10-29 09:27:11 +00001773 case VG_USERREQ__ADD_IFUNC_TARGET: {
1774 VG_(redir_add_ifunc_target)( arg[1], arg[2] );
1775 SET_CLREQ_RETVAL( tid, 0);
1776 break; }
1777
rjwalsh0140af52005-06-04 20:42:33 +00001778 case VG_USERREQ__STACK_REGISTER: {
njn945ed2e2005-06-24 03:28:30 +00001779 UWord sid = VG_(register_stack)((Addr)arg[1], (Addr)arg[2]);
rjwalsh0140af52005-06-04 20:42:33 +00001780 SET_CLREQ_RETVAL( tid, sid );
1781 break; }
1782
1783 case VG_USERREQ__STACK_DEREGISTER: {
njn945ed2e2005-06-24 03:28:30 +00001784 VG_(deregister_stack)(arg[1]);
rjwalsh0140af52005-06-04 20:42:33 +00001785 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
1786 break; }
1787
1788 case VG_USERREQ__STACK_CHANGE: {
njn945ed2e2005-06-24 03:28:30 +00001789 VG_(change_stack)(arg[1], (Addr)arg[2], (Addr)arg[3]);
rjwalsh0140af52005-06-04 20:42:33 +00001790 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
1791 break; }
1792
fitzhardinge98abfc72003-12-16 02:05:15 +00001793 case VG_USERREQ__GET_MALLOCFUNCS: {
1794 struct vg_mallocfunc_info *info = (struct vg_mallocfunc_info *)arg[1];
1795
njnfc51f8d2005-06-21 03:20:17 +00001796 info->tl_malloc = VG_(tdict).tool_malloc;
1797 info->tl_calloc = VG_(tdict).tool_calloc;
1798 info->tl_realloc = VG_(tdict).tool_realloc;
1799 info->tl_memalign = VG_(tdict).tool_memalign;
1800 info->tl___builtin_new = VG_(tdict).tool___builtin_new;
1801 info->tl___builtin_vec_new = VG_(tdict).tool___builtin_vec_new;
1802 info->tl_free = VG_(tdict).tool_free;
1803 info->tl___builtin_delete = VG_(tdict).tool___builtin_delete;
1804 info->tl___builtin_vec_delete = VG_(tdict).tool___builtin_vec_delete;
njn8b140de2009-02-17 04:31:18 +00001805 info->tl_malloc_usable_size = VG_(tdict).tool_malloc_usable_size;
fitzhardinge98abfc72003-12-16 02:05:15 +00001806
njn088bfb42005-08-17 05:01:37 +00001807 info->mallinfo = VG_(mallinfo);
sewardjb5f6f512005-03-10 23:59:00 +00001808 info->clo_trace_malloc = VG_(clo_trace_malloc);
fitzhardinge98abfc72003-12-16 02:05:15 +00001809
1810 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
1811
1812 break;
1813 }
1814
njn25e49d8e72002-09-23 09:36:25 +00001815 /* Requests from the client program */
1816
1817 case VG_USERREQ__DISCARD_TRANSLATIONS:
1818 if (VG_(clo_verbosity) > 2)
1819 VG_(printf)( "client request: DISCARD_TRANSLATIONS,"
njn8a7b41b2007-09-23 00:51:24 +00001820 " addr %p, len %lu\n",
njn25e49d8e72002-09-23 09:36:25 +00001821 (void*)arg[1], arg[2] );
1822
sewardj45f4e7c2005-09-27 19:20:21 +00001823 VG_(discard_translations)(
1824 arg[1], arg[2], "scheduler(VG_USERREQ__DISCARD_TRANSLATIONS)"
1825 );
njn25e49d8e72002-09-23 09:36:25 +00001826
njnd3040452003-05-19 15:04:06 +00001827 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
njn25e49d8e72002-09-23 09:36:25 +00001828 break;
1829
njn47363ab2003-04-21 13:24:40 +00001830 case VG_USERREQ__COUNT_ERRORS:
nethercotef2b11482004-08-02 12:36:01 +00001831 SET_CLREQ_RETVAL( tid, VG_(get_n_errs_found)() );
njn47363ab2003-04-21 13:24:40 +00001832 break;
1833
sewardjc8259b82009-04-22 22:42:10 +00001834 case VG_USERREQ__LOAD_PDB_DEBUGINFO:
1835 VG_(di_notify_pdb_debuginfo)( arg[1], arg[2], arg[3], arg[4] );
1836 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
1837 break;
1838
sewardj5c659622010-08-20 18:22:07 +00001839 case VG_USERREQ__MAP_IP_TO_SRCLOC: {
1840 Addr ip = arg[1];
floriandbb35842012-10-27 18:39:11 +00001841 HChar* buf64 = (HChar*)arg[2];
sewardj5c659622010-08-20 18:22:07 +00001842
1843 VG_(memset)(buf64, 0, 64);
1844 UInt linenum = 0;
1845 Bool ok = VG_(get_filename_linenum)(
1846 ip, &buf64[0], 50, NULL, 0, NULL, &linenum
1847 );
1848 if (ok) {
1849 /* Find the terminating zero in the first 50 bytes. */
1850 UInt i;
1851 for (i = 0; i < 50; i++) {
1852 if (buf64[i] == 0)
1853 break;
1854 }
1855 /* We must find a zero somewhere in 0 .. 49. Else
1856 VG_(get_filename_linenum) is not properly zero
1857 terminating. */
1858 vg_assert(i < 50);
1859 VG_(sprintf)(&buf64[i], ":%u", linenum);
1860 } else {
1861 buf64[0] = 0;
1862 }
1863
1864 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
1865 break;
1866 }
1867
sewardjdc873c02011-07-24 16:02:33 +00001868 case VG_USERREQ__CHANGE_ERR_DISABLEMENT: {
1869 Word delta = arg[1];
1870 vg_assert(delta == 1 || delta == -1);
1871 ThreadState* tst = VG_(get_ThreadState)(tid);
1872 vg_assert(tst);
1873 if (delta == 1 && tst->err_disablement_level < 0xFFFFFFFF) {
1874 tst->err_disablement_level++;
1875 }
1876 else
1877 if (delta == -1 && tst->err_disablement_level > 0) {
1878 tst->err_disablement_level--;
1879 }
1880 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
1881 break;
1882 }
1883
njn32f8d8c2009-07-15 02:31:45 +00001884 case VG_USERREQ__MALLOCLIKE_BLOCK:
bart91347382011-03-25 20:07:25 +00001885 case VG_USERREQ__RESIZEINPLACE_BLOCK:
njn32f8d8c2009-07-15 02:31:45 +00001886 case VG_USERREQ__FREELIKE_BLOCK:
1887 // Ignore them if the addr is NULL; otherwise pass onto the tool.
1888 if (!arg[1]) {
1889 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
1890 break;
1891 } else {
1892 goto my_default;
1893 }
1894
florianbb913cd2012-08-28 16:50:39 +00001895 case VG_USERREQ__VEX_INIT_FOR_IRI:
1896 LibVEX_InitIRI ( (IRICB *)arg[1] );
1897 break;
1898
sewardje663cb92002-04-12 10:26:32 +00001899 default:
njn32f8d8c2009-07-15 02:31:45 +00001900 my_default:
njn9cb54ac2005-06-12 04:19:17 +00001901 if (os_client_request(tid, arg)) {
1902 // do nothing, os_client_request() handled it
sewardjb5f6f512005-03-10 23:59:00 +00001903 } else if (VG_(needs).client_requests) {
nethercoted1b64b22004-11-04 18:22:28 +00001904 UWord ret;
sewardj34042512002-10-22 04:14:35 +00001905
njn25e49d8e72002-09-23 09:36:25 +00001906 if (VG_(clo_verbosity) > 2)
njn8a7b41b2007-09-23 00:51:24 +00001907 VG_(printf)("client request: code %lx, addr %p, len %lu\n",
njn25e49d8e72002-09-23 09:36:25 +00001908 arg[0], (void*)arg[1], arg[2] );
1909
njn51d827b2005-05-09 01:02:08 +00001910 if ( VG_TDICT_CALL(tool_handle_client_request, tid, arg, &ret) )
sewardjb5f6f512005-03-10 23:59:00 +00001911 SET_CLREQ_RETVAL(tid, ret);
njn25e49d8e72002-09-23 09:36:25 +00001912 } else {
sewardj34042512002-10-22 04:14:35 +00001913 static Bool whined = False;
1914
sewardjb5f6f512005-03-10 23:59:00 +00001915 if (!whined && VG_(clo_verbosity) > 2) {
nethercote7cc9c232004-01-21 15:08:04 +00001916 // Allow for requests in core, but defined by tools, which
njnd7994182003-10-02 13:44:04 +00001917 // have 0 and 0 in their two high bytes.
floriandbb35842012-10-27 18:39:11 +00001918 HChar c1 = (arg[0] >> 24) & 0xff;
1919 HChar c2 = (arg[0] >> 16) & 0xff;
njnd7994182003-10-02 13:44:04 +00001920 if (c1 == 0) c1 = '_';
1921 if (c2 == 0) c2 = '_';
sewardj34042512002-10-22 04:14:35 +00001922 VG_(message)(Vg_UserMsg, "Warning:\n"
barta0b6b2c2008-07-07 06:49:24 +00001923 " unhandled client request: 0x%lx (%c%c+0x%lx). Perhaps\n"
sewardj738856f2009-07-15 14:48:32 +00001924 " VG_(needs).client_requests should be set?\n",
njnd7994182003-10-02 13:44:04 +00001925 arg[0], c1, c2, arg[0] & 0xffff);
sewardj34042512002-10-22 04:14:35 +00001926 whined = True;
1927 }
njn25e49d8e72002-09-23 09:36:25 +00001928 }
sewardje663cb92002-04-12 10:26:32 +00001929 break;
1930 }
sewardjc560fb32010-01-28 15:23:54 +00001931 return;
1932
1933 /*NOTREACHED*/
1934 va_list_casting_error_NORETURN:
1935 VG_(umsg)(
1936 "Valgrind: fatal error - cannot continue: use of the deprecated\n"
1937 "client requests VG_USERREQ__PRINTF or VG_USERREQ__PRINTF_BACKTRACE\n"
1938 "on a platform where they cannot be supported. Please use the\n"
1939 "equivalent _VALIST_BY_REF versions instead.\n"
1940 "\n"
1941 "This is a binary-incompatible change in Valgrind's client request\n"
1942 "mechanism. It is unfortunate, but difficult to avoid. End-users\n"
1943 "are expected to almost never see this message. The only case in\n"
1944 "which you might see this message is if your code uses the macros\n"
1945 "VALGRIND_PRINTF or VALGRIND_PRINTF_BACKTRACE. If so, you will need\n"
1946 "to recompile such code, using the header files from this version of\n"
1947 "Valgrind, and not any previous version.\n"
1948 "\n"
1949 "If you see this mesage in any other circumstances, it is probably\n"
1950 "a bug in Valgrind. In this case, please file a bug report at\n"
1951 "\n"
1952 " http://www.valgrind.org/support/bug_reports.html\n"
1953 "\n"
1954 "Will now abort.\n"
1955 );
1956 vg_assert(0);
sewardje663cb92002-04-12 10:26:32 +00001957}
1958
1959
sewardj6072c362002-04-19 14:40:57 +00001960/* ---------------------------------------------------------------------
njn6676d5b2005-06-19 18:49:19 +00001961 Sanity checking (permanently engaged)
sewardj6072c362002-04-19 14:40:57 +00001962 ------------------------------------------------------------------ */
1963
sewardjb5f6f512005-03-10 23:59:00 +00001964/* Internal consistency checks on the sched structures. */
sewardj6072c362002-04-19 14:40:57 +00001965static
sewardjb5f6f512005-03-10 23:59:00 +00001966void scheduler_sanity ( ThreadId tid )
sewardj6072c362002-04-19 14:40:57 +00001967{
sewardjb5f6f512005-03-10 23:59:00 +00001968 Bool bad = False;
sewardjf54342a2006-10-17 01:51:24 +00001969 static UInt lasttime = 0;
1970 UInt now;
1971 Int lwpid = VG_(gettid)();
jsgf855d93d2003-10-13 22:26:55 +00001972
sewardjb5f6f512005-03-10 23:59:00 +00001973 if (!VG_(is_running_thread)(tid)) {
1974 VG_(message)(Vg_DebugMsg,
sewardjf54342a2006-10-17 01:51:24 +00001975 "Thread %d is supposed to be running, "
sewardjad0a3a82006-12-17 18:58:55 +00001976 "but doesn't own the_BigLock (owned by %d)\n",
njnc7561b92005-06-19 01:24:32 +00001977 tid, VG_(running_tid));
sewardjb5f6f512005-03-10 23:59:00 +00001978 bad = True;
jsgf855d93d2003-10-13 22:26:55 +00001979 }
sewardj5f07b662002-04-23 16:52:51 +00001980
sewardjf54342a2006-10-17 01:51:24 +00001981 if (lwpid != VG_(threads)[tid].os_state.lwpid) {
sewardjb5f6f512005-03-10 23:59:00 +00001982 VG_(message)(Vg_DebugMsg,
njnd06ed472005-03-13 05:12:31 +00001983 "Thread %d supposed to be in LWP %d, but we're actually %d\n",
1984 tid, VG_(threads)[tid].os_state.lwpid, VG_(gettid)());
sewardjb5f6f512005-03-10 23:59:00 +00001985 bad = True;
sewardj5f07b662002-04-23 16:52:51 +00001986 }
sewardjf54342a2006-10-17 01:51:24 +00001987
bart78bfc712011-12-08 16:14:59 +00001988 if (lwpid != ML_(get_sched_lock_owner)(the_BigLock)) {
sewardjf54342a2006-10-17 01:51:24 +00001989 VG_(message)(Vg_DebugMsg,
sewardjad0a3a82006-12-17 18:58:55 +00001990 "Thread (LWPID) %d doesn't own the_BigLock\n",
sewardjf54342a2006-10-17 01:51:24 +00001991 tid);
1992 bad = True;
1993 }
1994
1995 /* Periodically show the state of all threads, for debugging
1996 purposes. */
1997 now = VG_(read_millisecond_timer)();
1998 if (0 && (!bad) && (lasttime + 4000/*ms*/ <= now)) {
1999 lasttime = now;
2000 VG_(printf)("\n------------ Sched State at %d ms ------------\n",
2001 (Int)now);
2002 VG_(show_sched_status)();
2003 }
2004
2005 /* core_panic also shows the sched status, which is why we don't
2006 show it above if bad==True. */
2007 if (bad)
2008 VG_(core_panic)("scheduler_sanity: failed");
sewardj6072c362002-04-19 14:40:57 +00002009}
2010
njn6676d5b2005-06-19 18:49:19 +00002011void VG_(sanity_check_general) ( Bool force_expensive )
2012{
2013 ThreadId tid;
2014
sewardjf54342a2006-10-17 01:51:24 +00002015 static UInt next_slow_check_at = 1;
2016 static UInt slow_check_interval = 25;
2017
njn6676d5b2005-06-19 18:49:19 +00002018 if (VG_(clo_sanity_level) < 1) return;
2019
2020 /* --- First do all the tests that we can do quickly. ---*/
2021
2022 sanity_fast_count++;
2023
2024 /* Check stuff pertaining to the memory check system. */
2025
2026 /* Check that nobody has spuriously claimed that the first or
2027 last 16 pages of memory have become accessible [...] */
2028 if (VG_(needs).sanity_checks) {
njn6676d5b2005-06-19 18:49:19 +00002029 vg_assert(VG_TDICT_CALL(tool_cheap_sanity_check));
njn6676d5b2005-06-19 18:49:19 +00002030 }
2031
2032 /* --- Now some more expensive checks. ---*/
2033
sewardjf54342a2006-10-17 01:51:24 +00002034 /* Once every now and again, check some more expensive stuff.
2035 Gradually increase the interval between such checks so as not to
2036 burden long-running programs too much. */
njn6676d5b2005-06-19 18:49:19 +00002037 if ( force_expensive
sewardjf54342a2006-10-17 01:51:24 +00002038 || VG_(clo_sanity_level) > 1
2039 || (VG_(clo_sanity_level) == 1
2040 && sanity_fast_count == next_slow_check_at)) {
njn6676d5b2005-06-19 18:49:19 +00002041
sewardjf54342a2006-10-17 01:51:24 +00002042 if (0) VG_(printf)("SLOW at %d\n", sanity_fast_count-1);
2043
2044 next_slow_check_at = sanity_fast_count - 1 + slow_check_interval;
2045 slow_check_interval++;
njn6676d5b2005-06-19 18:49:19 +00002046 sanity_slow_count++;
2047
njn6676d5b2005-06-19 18:49:19 +00002048 if (VG_(needs).sanity_checks) {
njn6676d5b2005-06-19 18:49:19 +00002049 vg_assert(VG_TDICT_CALL(tool_expensive_sanity_check));
njn6676d5b2005-06-19 18:49:19 +00002050 }
2051
njn6676d5b2005-06-19 18:49:19 +00002052 /* Look for stack overruns. Visit all threads. */
njnd666ea72005-06-26 17:26:22 +00002053 for (tid = 1; tid < VG_N_THREADS; tid++) {
sewardj45f4e7c2005-09-27 19:20:21 +00002054 SizeT remains;
2055 VgStack* stack;
njn6676d5b2005-06-19 18:49:19 +00002056
2057 if (VG_(threads)[tid].status == VgTs_Empty ||
2058 VG_(threads)[tid].status == VgTs_Zombie)
2059 continue;
2060
sewardj45f4e7c2005-09-27 19:20:21 +00002061 stack
2062 = (VgStack*)
2063 VG_(get_ThreadState)(tid)->os_state.valgrind_stack_base;
sewardj46dbd3f2010-09-08 08:30:31 +00002064 SizeT limit
2065 = 4096; // Let's say. Checking more causes lots of L2 misses.
sewardj45f4e7c2005-09-27 19:20:21 +00002066 remains
sewardj46dbd3f2010-09-08 08:30:31 +00002067 = VG_(am_get_VgStack_unused_szB)(stack, limit);
2068 if (remains < limit)
njn6676d5b2005-06-19 18:49:19 +00002069 VG_(message)(Vg_DebugMsg,
barta0b6b2c2008-07-07 06:49:24 +00002070 "WARNING: Thread %d is within %ld bytes "
sewardj738856f2009-07-15 14:48:32 +00002071 "of running out of stack!\n",
njn6676d5b2005-06-19 18:49:19 +00002072 tid, remains);
2073 }
njn6676d5b2005-06-19 18:49:19 +00002074 }
2075
2076 if (VG_(clo_sanity_level) > 1) {
njn6676d5b2005-06-19 18:49:19 +00002077 /* Check sanity of the low-level memory manager. Note that bugs
2078 in the client's code can cause this to fail, so we don't do
2079 this check unless specially asked for. And because it's
2080 potentially very expensive. */
2081 VG_(sanity_check_malloc_all)();
njn6676d5b2005-06-19 18:49:19 +00002082 }
njn6676d5b2005-06-19 18:49:19 +00002083}
sewardj6072c362002-04-19 14:40:57 +00002084
sewardje663cb92002-04-12 10:26:32 +00002085/*--------------------------------------------------------------------*/
njn278b3d62005-05-30 23:20:51 +00002086/*--- end ---*/
sewardje663cb92002-04-12 10:26:32 +00002087/*--------------------------------------------------------------------*/