blob: 8359abaaac30426fe3578eba5cc0eef1854c15e7 [file] [log] [blame]
sewardje663cb92002-04-12 10:26:32 +00001
2/*--------------------------------------------------------------------*/
njn278b3d62005-05-30 23:20:51 +00003/*--- Thread scheduling. scheduler.c ---*/
sewardje663cb92002-04-12 10:26:32 +00004/*--------------------------------------------------------------------*/
5
6/*
njnc0ae7052005-08-25 22:55:19 +00007 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
sewardje663cb92002-04-12 10:26:32 +00009
sewardj0f157dd2013-10-18 14:27:36 +000010 Copyright (C) 2000-2013 Julian Seward
sewardje663cb92002-04-12 10:26:32 +000011 jseward@acm.org
sewardje663cb92002-04-12 10:26:32 +000012
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
njn25e49d8e72002-09-23 09:36:25 +000028 The GNU General Public License is contained in the file COPYING.
sewardje663cb92002-04-12 10:26:32 +000029*/
30
sewardjb5f6f512005-03-10 23:59:00 +000031/*
32 Overview
33
34 Valgrind tries to emulate the kernel's threading as closely as
35 possible. The client does all threading via the normal syscalls
36 (on Linux: clone, etc). Valgrind emulates this by creating exactly
37 the same process structure as would be created without Valgrind.
38 There are no extra threads.
39
40 The main difference is that Valgrind only allows one client thread
sewardjad0a3a82006-12-17 18:58:55 +000041 to run at once. This is controlled with the CPU Big Lock,
42 "the_BigLock". Any time a thread wants to run client code or
sewardjb5f6f512005-03-10 23:59:00 +000043 manipulate any shared state (which is anything other than its own
sewardjad0a3a82006-12-17 18:58:55 +000044 ThreadState entry), it must hold the_BigLock.
sewardjb5f6f512005-03-10 23:59:00 +000045
46 When a thread is about to block in a blocking syscall, it releases
sewardjad0a3a82006-12-17 18:58:55 +000047 the_BigLock, and re-takes it when it becomes runnable again (either
sewardjb5f6f512005-03-10 23:59:00 +000048 because the syscall finished, or we took a signal).
49
50 VG_(scheduler) therefore runs in each thread. It returns only when
51 the thread is exiting, either because it exited itself, or it was
52 told to exit by another thread.
53
54 This file is almost entirely OS-independent. The details of how
55 the OS handles threading and signalling are abstracted away and
njn12771092005-06-18 02:18:04 +000056 implemented elsewhere. [Some of the functions have worked their
57 way back for the moment, until we do an OS port in earnest...]
sewardj291849f2012-04-20 23:58:55 +000058*/
59
sewardjb5f6f512005-03-10 23:59:00 +000060
njnc7561b92005-06-19 01:24:32 +000061#include "pub_core_basics.h"
sewardjf9d2f9b2006-11-17 20:00:57 +000062#include "pub_core_debuglog.h"
sewardj4cfea4f2006-10-14 19:26:10 +000063#include "pub_core_vki.h"
sewardjf54342a2006-10-17 01:51:24 +000064#include "pub_core_vkiscnums.h" // __NR_sched_yield
sewardj6c591e12011-04-11 16:17:51 +000065#include "pub_core_libcsetjmp.h" // to keep _threadstate.h happy
njnc7561b92005-06-19 01:24:32 +000066#include "pub_core_threadstate.h"
njn04e16982005-05-31 00:23:43 +000067#include "pub_core_aspacemgr.h"
njn93fe3b22005-12-21 20:22:52 +000068#include "pub_core_clreq.h" // for VG_USERREQ__*
njn36b66df2005-05-12 05:13:04 +000069#include "pub_core_dispatch.h"
njnf4c50162005-06-20 14:18:12 +000070#include "pub_core_errormgr.h" // For VG_(get_n_errs_found)()
sewardj3b290482011-05-06 21:02:55 +000071#include "pub_core_gdbserver.h" // for VG_(gdbserver) and VG_(gdbserver_activity)
njn97405b22005-06-02 03:39:33 +000072#include "pub_core_libcbase.h"
njn132bfcc2005-06-04 19:16:06 +000073#include "pub_core_libcassert.h"
njn36a20fa2005-06-03 03:08:39 +000074#include "pub_core_libcprint.h"
njnf39e9a32005-06-12 02:43:17 +000075#include "pub_core_libcproc.h"
njnde62cbf2005-06-10 22:08:14 +000076#include "pub_core_libcsignal.h"
njnf76d27a2009-05-28 01:53:07 +000077#if defined(VGO_darwin)
78#include "pub_core_mach.h"
79#endif
njnf536bbb2005-06-13 04:21:38 +000080#include "pub_core_machine.h"
njnaf1d7df2005-06-11 01:31:52 +000081#include "pub_core_mallocfree.h"
njn20242342005-05-16 23:31:24 +000082#include "pub_core_options.h"
njn717cde52005-05-10 02:47:21 +000083#include "pub_core_replacemalloc.h"
sewardj17c5e2e2012-12-28 09:12:14 +000084#include "pub_core_sbprofile.h"
njn0c246472005-05-31 01:00:08 +000085#include "pub_core_signals.h"
njn945ed2e2005-06-24 03:28:30 +000086#include "pub_core_stacks.h"
njnf4c50162005-06-20 14:18:12 +000087#include "pub_core_stacktrace.h" // For VG_(get_and_pp_StackTrace)()
njn9abd6082005-06-17 21:31:45 +000088#include "pub_core_syscall.h"
njnc1b01812005-06-17 22:19:06 +000089#include "pub_core_syswrap.h"
njn43b9a8a2005-05-10 04:37:01 +000090#include "pub_core_tooliface.h"
njnf4c50162005-06-20 14:18:12 +000091#include "pub_core_translate.h" // For VG_(translate)()
njn8bddf582005-05-13 23:40:55 +000092#include "pub_core_transtab.h"
sewardjc8259b82009-04-22 22:42:10 +000093#include "pub_core_debuginfo.h" // VG_(di_notify_pdb_debuginfo)
bart78bfc712011-12-08 16:14:59 +000094#include "priv_sched-lock.h"
sewardjf54342a2006-10-17 01:51:24 +000095#include "pub_core_scheduler.h" // self
tomd2645142009-10-29 09:27:11 +000096#include "pub_core_redir.h"
florian639e1f82012-09-30 20:30:40 +000097#include "libvex_emnote.h" // VexEmNote
sewardje663cb92002-04-12 10:26:32 +000098
sewardj63fed7f2006-01-17 02:02:47 +000099
sewardje663cb92002-04-12 10:26:32 +0000100/* ---------------------------------------------------------------------
101 Types and globals for the scheduler.
102 ------------------------------------------------------------------ */
103
njnc7561b92005-06-19 01:24:32 +0000104/* ThreadId and ThreadState are defined elsewhere*/
sewardje663cb92002-04-12 10:26:32 +0000105
njn14319cc2005-03-13 06:26:22 +0000106/* Defines the thread-scheduling timeslice, in terms of the number of
107 basic blocks we attempt to run each thread for. Smaller values
108 give finer interleaving but much increased scheduling overheads. */
sewardjea3a99f2006-05-07 14:37:03 +0000109#define SCHEDULING_QUANTUM 100000
njn14319cc2005-03-13 06:26:22 +0000110
sewardj0ec07f32006-01-12 12:32:32 +0000111/* If False, a fault is Valgrind-internal (ie, a bug) */
112Bool VG_(in_generated_code) = False;
njn25e49d8e72002-09-23 09:36:25 +0000113
njn394213a2005-06-19 18:38:24 +0000114/* 64-bit counter for the number of basic blocks done. */
115static ULong bbs_done = 0;
116
sewardj3b290482011-05-06 21:02:55 +0000117/* Counter to see if vgdb activity is to be verified.
118 When nr of bbs done reaches vgdb_next_poll, scheduler will
119 poll for gdbserver activity. VG_(force_vgdb_poll) and
120 VG_(disable_vgdb_poll) allows the valgrind core (e.g. m_gdbserver)
121 to control when the next poll will be done. */
122static ULong vgdb_next_poll;
123
sewardje663cb92002-04-12 10:26:32 +0000124/* Forwards */
sewardjb5f6f512005-03-10 23:59:00 +0000125static void do_client_request ( ThreadId tid );
126static void scheduler_sanity ( ThreadId tid );
127static void mostly_clear_thread_record ( ThreadId tid );
sewardjd140e442002-05-29 01:21:19 +0000128
nethercote844e7122004-08-02 15:27:22 +0000129/* Stats. */
njn0fd92f42005-10-06 03:32:42 +0000130static ULong n_scheduling_events_MINOR = 0;
131static ULong n_scheduling_events_MAJOR = 0;
nethercote844e7122004-08-02 15:27:22 +0000132
sewardjbba6f312012-04-21 23:05:57 +0000133/* Stats: number of XIndirs, and number that missed in the fast
134 cache. */
135static ULong stats__n_xindirs = 0;
136static ULong stats__n_xindir_misses = 0;
137
138/* And 32-bit temp bins for the above, so that 32-bit platforms don't
139 have to do 64 bit incs on the hot path through
140 VG_(cp_disp_xindir). */
141/*global*/ UInt VG_(stats__n_xindirs_32) = 0;
142/*global*/ UInt VG_(stats__n_xindir_misses_32) = 0;
sewardj291849f2012-04-20 23:58:55 +0000143
njn6676d5b2005-06-19 18:49:19 +0000144/* Sanity checking counts. */
145static UInt sanity_fast_count = 0;
146static UInt sanity_slow_count = 0;
sewardjb5f6f512005-03-10 23:59:00 +0000147
nethercote844e7122004-08-02 15:27:22 +0000148void VG_(print_scheduler_stats)(void)
149{
150 VG_(message)(Vg_DebugMsg,
sewardj291849f2012-04-20 23:58:55 +0000151 "scheduler: %'llu event checks.\n", bbs_done );
152 VG_(message)(Vg_DebugMsg,
153 "scheduler: %'llu indir transfers, %'llu misses (1 in %llu)\n",
sewardjbba6f312012-04-21 23:05:57 +0000154 stats__n_xindirs, stats__n_xindir_misses,
155 stats__n_xindirs / (stats__n_xindir_misses
156 ? stats__n_xindir_misses : 1));
njn394213a2005-06-19 18:38:24 +0000157 VG_(message)(Vg_DebugMsg,
sewardj738856f2009-07-15 14:48:32 +0000158 "scheduler: %'llu/%'llu major/minor sched events.\n",
nethercote844e7122004-08-02 15:27:22 +0000159 n_scheduling_events_MAJOR, n_scheduling_events_MINOR);
njn6676d5b2005-06-19 18:49:19 +0000160 VG_(message)(Vg_DebugMsg,
sewardj738856f2009-07-15 14:48:32 +0000161 " sanity: %d cheap, %d expensive checks.\n",
njn6676d5b2005-06-19 18:49:19 +0000162 sanity_fast_count, sanity_slow_count );
nethercote844e7122004-08-02 15:27:22 +0000163}
164
bart78bfc712011-12-08 16:14:59 +0000165/*
166 * Mutual exclusion object used to serialize threads.
167 */
168static struct sched_lock *the_BigLock;
sewardjb5f6f512005-03-10 23:59:00 +0000169
170
sewardje663cb92002-04-12 10:26:32 +0000171/* ---------------------------------------------------------------------
172 Helper functions for the scheduler.
173 ------------------------------------------------------------------ */
174
sewardje663cb92002-04-12 10:26:32 +0000175static
floriandbb35842012-10-27 18:39:11 +0000176void print_sched_event ( ThreadId tid, const HChar* what )
sewardje663cb92002-04-12 10:26:32 +0000177{
sewardj738856f2009-07-15 14:48:32 +0000178 VG_(message)(Vg_DebugMsg, " SCHED[%d]: %s\n", tid, what );
sewardj8937c812002-04-12 20:12:20 +0000179}
180
sewardj17c5e2e2012-12-28 09:12:14 +0000181/* For showing SB profiles, if the user asks to see them. */
sewardjb0473e92011-06-07 22:54:32 +0000182static
sewardj17c5e2e2012-12-28 09:12:14 +0000183void maybe_show_sb_profile ( void )
sewardjb0473e92011-06-07 22:54:32 +0000184{
sewardj17c5e2e2012-12-28 09:12:14 +0000185 /* DO NOT MAKE NON-STATIC */
186 static ULong bbs_done_lastcheck = 0;
187 /* */
188 vg_assert(VG_(clo_profyle_interval) > 0);
189 Long delta = (Long)(bbs_done - bbs_done_lastcheck);
sewardjb0473e92011-06-07 22:54:32 +0000190 vg_assert(delta >= 0);
sewardj17c5e2e2012-12-28 09:12:14 +0000191 if ((ULong)delta >= VG_(clo_profyle_interval)) {
sewardjb0473e92011-06-07 22:54:32 +0000192 bbs_done_lastcheck = bbs_done;
sewardj17c5e2e2012-12-28 09:12:14 +0000193 VG_(get_and_show_SB_profile)(bbs_done);
sewardjb0473e92011-06-07 22:54:32 +0000194 }
195}
196
sewardj8937c812002-04-12 20:12:20 +0000197static
floriancd19e992012-11-03 19:32:28 +0000198const HChar* name_of_sched_event ( UInt event )
sewardje663cb92002-04-12 10:26:32 +0000199{
200 switch (event) {
philippe6d6ddbc2012-05-17 14:31:13 +0000201 case VEX_TRC_JMP_TINVAL: return "TINVAL";
202 case VEX_TRC_JMP_NOREDIR: return "NOREDIR";
dejanj24f0c3a2014-02-19 11:57:22 +0000203 case VEX_TRC_JMP_SIGILL: return "SIGILL";
philippe6d6ddbc2012-05-17 14:31:13 +0000204 case VEX_TRC_JMP_SIGTRAP: return "SIGTRAP";
205 case VEX_TRC_JMP_SIGSEGV: return "SIGSEGV";
206 case VEX_TRC_JMP_SIGBUS: return "SIGBUS";
petarj80e5c172012-10-19 14:45:17 +0000207 case VEX_TRC_JMP_SIGFPE_INTOVF:
208 case VEX_TRC_JMP_SIGFPE_INTDIV: return "SIGFPE";
philippe6d6ddbc2012-05-17 14:31:13 +0000209 case VEX_TRC_JMP_EMWARN: return "EMWARN";
210 case VEX_TRC_JMP_EMFAIL: return "EMFAIL";
211 case VEX_TRC_JMP_CLIENTREQ: return "CLIENTREQ";
212 case VEX_TRC_JMP_YIELD: return "YIELD";
213 case VEX_TRC_JMP_NODECODE: return "NODECODE";
214 case VEX_TRC_JMP_MAPFAIL: return "MAPFAIL";
215 case VEX_TRC_JMP_SYS_SYSCALL: return "SYSCALL";
216 case VEX_TRC_JMP_SYS_INT32: return "INT32";
217 case VEX_TRC_JMP_SYS_INT128: return "INT128";
218 case VEX_TRC_JMP_SYS_INT129: return "INT129";
219 case VEX_TRC_JMP_SYS_INT130: return "INT130";
220 case VEX_TRC_JMP_SYS_SYSENTER: return "SYSENTER";
221 case VEX_TRC_JMP_BORING: return "VEX_BORING";
222
223 case VG_TRC_BORING: return "VG_BORING";
224 case VG_TRC_INNER_FASTMISS: return "FASTMISS";
225 case VG_TRC_INNER_COUNTERZERO: return "COUNTERZERO";
226 case VG_TRC_FAULT_SIGNAL: return "FAULTSIGNAL";
227 case VG_TRC_INVARIANT_FAILED: return "INVFAILED";
228 case VG_TRC_CHAIN_ME_TO_SLOW_EP: return "CHAIN_ME_SLOW";
229 case VG_TRC_CHAIN_ME_TO_FAST_EP: return "CHAIN_ME_FAST";
230 default: return "??UNKNOWN??";
sewardje663cb92002-04-12 10:26:32 +0000231 }
232}
233
sewardje663cb92002-04-12 10:26:32 +0000234/* Allocate a completely empty ThreadState record. */
sewardjb5f6f512005-03-10 23:59:00 +0000235ThreadId VG_(alloc_ThreadState) ( void )
sewardje663cb92002-04-12 10:26:32 +0000236{
237 Int i;
sewardj6072c362002-04-19 14:40:57 +0000238 for (i = 1; i < VG_N_THREADS; i++) {
sewardjb5f6f512005-03-10 23:59:00 +0000239 if (VG_(threads)[i].status == VgTs_Empty) {
240 VG_(threads)[i].status = VgTs_Init;
241 VG_(threads)[i].exitreason = VgSrc_None;
florianb8911212013-09-18 14:00:10 +0000242 if (VG_(threads)[i].thread_name)
243 VG_(arena_free)(VG_AR_CORE, VG_(threads)[i].thread_name);
244 VG_(threads)[i].thread_name = NULL;
sewardje663cb92002-04-12 10:26:32 +0000245 return i;
sewardjb5f6f512005-03-10 23:59:00 +0000246 }
sewardje663cb92002-04-12 10:26:32 +0000247 }
248 VG_(printf)("vg_alloc_ThreadState: no free slots available\n");
249 VG_(printf)("Increase VG_N_THREADS, rebuild and try again.\n");
njne427a662002-10-02 11:08:25 +0000250 VG_(core_panic)("VG_N_THREADS is too low");
sewardje663cb92002-04-12 10:26:32 +0000251 /*NOTREACHED*/
252}
253
sewardjb5f6f512005-03-10 23:59:00 +0000254/*
sewardjad0a3a82006-12-17 18:58:55 +0000255 Mark a thread as Runnable. This will block until the_BigLock is
sewardjb5f6f512005-03-10 23:59:00 +0000256 available, so that we get exclusive access to all the shared
sewardjad0a3a82006-12-17 18:58:55 +0000257 structures and the CPU. Up until we get the_BigLock, we must not
sewardjb5f6f512005-03-10 23:59:00 +0000258 touch any shared state.
259
260 When this returns, we'll actually be running.
261 */
floriandbb35842012-10-27 18:39:11 +0000262void VG_(acquire_BigLock)(ThreadId tid, const HChar* who)
sewardjb5f6f512005-03-10 23:59:00 +0000263{
sewardjf54342a2006-10-17 01:51:24 +0000264 ThreadState *tst;
265
266#if 0
267 if (VG_(clo_trace_sched)) {
268 HChar buf[100];
269 vg_assert(VG_(strlen)(who) <= 100-50);
270 VG_(sprintf)(buf, "waiting for lock (%s)", who);
271 print_sched_event(tid, buf);
272 }
273#endif
274
sewardjad0a3a82006-12-17 18:58:55 +0000275 /* First, acquire the_BigLock. We can't do anything else safely
276 prior to this point. Even doing debug printing prior to this
277 point is, technically, wrong. */
bart78bfc712011-12-08 16:14:59 +0000278 VG_(acquire_BigLock_LL)(NULL);
sewardjf54342a2006-10-17 01:51:24 +0000279
280 tst = VG_(get_ThreadState)(tid);
sewardjb5f6f512005-03-10 23:59:00 +0000281
282 vg_assert(tst->status != VgTs_Runnable);
283
284 tst->status = VgTs_Runnable;
sewardjf54342a2006-10-17 01:51:24 +0000285
njnc7561b92005-06-19 01:24:32 +0000286 if (VG_(running_tid) != VG_INVALID_THREADID)
287 VG_(printf)("tid %d found %d running\n", tid, VG_(running_tid));
288 vg_assert(VG_(running_tid) == VG_INVALID_THREADID);
289 VG_(running_tid) = tid;
sewardjb5f6f512005-03-10 23:59:00 +0000290
sewardj7cf4e6b2008-05-01 20:24:26 +0000291 { Addr gsp = VG_(get_SP)(tid);
philipped5fb89d2013-01-13 13:59:17 +0000292 if (NULL != VG_(tdict).track_new_mem_stack_w_ECU)
293 VG_(unknown_SP_update_w_ECU)(gsp, gsp, 0/*unknown origin*/);
294 else
295 VG_(unknown_SP_update)(gsp, gsp);
sewardj7cf4e6b2008-05-01 20:24:26 +0000296 }
tome0008d62005-11-10 15:02:42 +0000297
sewardjf54342a2006-10-17 01:51:24 +0000298 if (VG_(clo_trace_sched)) {
299 HChar buf[150];
300 vg_assert(VG_(strlen)(who) <= 150-50);
301 VG_(sprintf)(buf, " acquired lock (%s)", who);
302 print_sched_event(tid, buf);
303 }
sewardjb5f6f512005-03-10 23:59:00 +0000304}
305
sewardjb5f6f512005-03-10 23:59:00 +0000306/*
307 Set a thread into a sleeping state, and give up exclusive access to
308 the CPU. On return, the thread must be prepared to block until it
309 is ready to run again (generally this means blocking in a syscall,
310 but it may mean that we remain in a Runnable state and we're just
311 yielding the CPU to another thread).
312 */
floriandbb35842012-10-27 18:39:11 +0000313void VG_(release_BigLock)(ThreadId tid, ThreadStatus sleepstate,
314 const HChar* who)
sewardjb5f6f512005-03-10 23:59:00 +0000315{
316 ThreadState *tst = VG_(get_ThreadState)(tid);
317
318 vg_assert(tst->status == VgTs_Runnable);
319
320 vg_assert(sleepstate == VgTs_WaitSys ||
321 sleepstate == VgTs_Yielding);
322
323 tst->status = sleepstate;
324
njnc7561b92005-06-19 01:24:32 +0000325 vg_assert(VG_(running_tid) == tid);
326 VG_(running_tid) = VG_INVALID_THREADID;
sewardjb5f6f512005-03-10 23:59:00 +0000327
sewardjf54342a2006-10-17 01:51:24 +0000328 if (VG_(clo_trace_sched)) {
floriandbb35842012-10-27 18:39:11 +0000329 HChar buf[200];
sewardjf54342a2006-10-17 01:51:24 +0000330 vg_assert(VG_(strlen)(who) <= 200-100);
331 VG_(sprintf)(buf, "releasing lock (%s) -> %s",
332 who, VG_(name_of_ThreadStatus)(sleepstate));
333 print_sched_event(tid, buf);
334 }
335
sewardjad0a3a82006-12-17 18:58:55 +0000336 /* Release the_BigLock; this will reschedule any runnable
sewardjb5f6f512005-03-10 23:59:00 +0000337 thread. */
bart78bfc712011-12-08 16:14:59 +0000338 VG_(release_BigLock_LL)(NULL);
339}
340
341static void init_BigLock(void)
342{
343 vg_assert(!the_BigLock);
344 the_BigLock = ML_(create_sched_lock)();
345}
346
347static void deinit_BigLock(void)
348{
349 ML_(destroy_sched_lock)(the_BigLock);
350 the_BigLock = NULL;
nethercote75d26242004-08-01 22:59:18 +0000351}
352
njnf76d27a2009-05-28 01:53:07 +0000353/* See pub_core_scheduler.h for description */
floriandbb35842012-10-27 18:39:11 +0000354void VG_(acquire_BigLock_LL) ( const HChar* who )
njnf76d27a2009-05-28 01:53:07 +0000355{
bart78bfc712011-12-08 16:14:59 +0000356 ML_(acquire_sched_lock)(the_BigLock);
njnf76d27a2009-05-28 01:53:07 +0000357}
358
359/* See pub_core_scheduler.h for description */
floriandbb35842012-10-27 18:39:11 +0000360void VG_(release_BigLock_LL) ( const HChar* who )
njnf76d27a2009-05-28 01:53:07 +0000361{
bart78bfc712011-12-08 16:14:59 +0000362 ML_(release_sched_lock)(the_BigLock);
njnf76d27a2009-05-28 01:53:07 +0000363}
364
bart9a2b80d2012-03-25 17:51:59 +0000365Bool VG_(owns_BigLock_LL) ( ThreadId tid )
366{
367 return (ML_(get_sched_lock_owner)(the_BigLock)
368 == VG_(threads)[tid].os_state.lwpid);
369}
370
njnf76d27a2009-05-28 01:53:07 +0000371
sewardjb5f6f512005-03-10 23:59:00 +0000372/* Clear out the ThreadState and release the semaphore. Leaves the
373 ThreadState in VgTs_Zombie state, so that it doesn't get
374 reallocated until the caller is really ready. */
375void VG_(exit_thread)(ThreadId tid)
376{
377 vg_assert(VG_(is_valid_tid)(tid));
378 vg_assert(VG_(is_running_thread)(tid));
379 vg_assert(VG_(is_exiting)(tid));
380
sewardjb5f6f512005-03-10 23:59:00 +0000381 mostly_clear_thread_record(tid);
njnc7561b92005-06-19 01:24:32 +0000382 VG_(running_tid) = VG_INVALID_THREADID;
sewardjb5f6f512005-03-10 23:59:00 +0000383
384 /* There should still be a valid exitreason for this thread */
385 vg_assert(VG_(threads)[tid].exitreason != VgSrc_None);
386
sewardjf54342a2006-10-17 01:51:24 +0000387 if (VG_(clo_trace_sched))
388 print_sched_event(tid, "release lock in VG_(exit_thread)");
389
bart78bfc712011-12-08 16:14:59 +0000390 VG_(release_BigLock_LL)(NULL);
sewardjb5f6f512005-03-10 23:59:00 +0000391}
392
sewardjf54342a2006-10-17 01:51:24 +0000393/* If 'tid' is blocked in a syscall, send it SIGVGKILL so as to get it
394 out of the syscall and onto doing the next thing, whatever that is.
395 If it isn't blocked in a syscall, has no effect on the thread. */
396void VG_(get_thread_out_of_syscall)(ThreadId tid)
sewardjb5f6f512005-03-10 23:59:00 +0000397{
398 vg_assert(VG_(is_valid_tid)(tid));
399 vg_assert(!VG_(is_running_thread)(tid));
sewardjb5f6f512005-03-10 23:59:00 +0000400
401 if (VG_(threads)[tid].status == VgTs_WaitSys) {
njnf76d27a2009-05-28 01:53:07 +0000402 if (VG_(clo_trace_signals)) {
sewardjf54342a2006-10-17 01:51:24 +0000403 VG_(message)(Vg_DebugMsg,
sewardj738856f2009-07-15 14:48:32 +0000404 "get_thread_out_of_syscall zaps tid %d lwp %d\n",
sewardjb5f6f512005-03-10 23:59:00 +0000405 tid, VG_(threads)[tid].os_state.lwpid);
njnf76d27a2009-05-28 01:53:07 +0000406 }
407# if defined(VGO_darwin)
408 {
409 // GrP fixme use mach primitives on darwin?
410 // GrP fixme thread_abort_safely?
411 // GrP fixme race for thread with WaitSys set but not in syscall yet?
412 extern kern_return_t thread_abort(mach_port_t);
413 thread_abort(VG_(threads)[tid].os_state.lwpid);
414 }
415# else
416 {
417 __attribute__((unused))
418 Int r = VG_(tkill)(VG_(threads)[tid].os_state.lwpid, VG_SIGVGKILL);
419 /* JRS 2009-Mar-20: should we assert for r==0 (tkill succeeded)?
420 I'm really not sure. Here's a race scenario which argues
421 that we shoudn't; but equally I'm not sure the scenario is
422 even possible, because of constraints caused by the question
423 of who holds the BigLock when.
424
425 Target thread tid does sys_read on a socket and blocks. This
426 function gets called, and we observe correctly that tid's
427 status is WaitSys but then for whatever reason this function
428 goes very slowly for a while. Then data arrives from
429 wherever, tid's sys_read returns, tid exits. Then we do
430 tkill on tid, but tid no longer exists; tkill returns an
431 error code and the assert fails. */
432 /* vg_assert(r == 0); */
433 }
434# endif
sewardjb5f6f512005-03-10 23:59:00 +0000435 }
436}
437
438/*
439 Yield the CPU for a short time to let some other thread run.
440 */
441void VG_(vg_yield)(void)
442{
njnc7561b92005-06-19 01:24:32 +0000443 ThreadId tid = VG_(running_tid);
sewardjb5f6f512005-03-10 23:59:00 +0000444
445 vg_assert(tid != VG_INVALID_THREADID);
446 vg_assert(VG_(threads)[tid].os_state.lwpid == VG_(gettid)());
447
sewardjad0a3a82006-12-17 18:58:55 +0000448 VG_(release_BigLock)(tid, VgTs_Yielding, "VG_(vg_yield)");
sewardjb5f6f512005-03-10 23:59:00 +0000449
450 /*
451 Tell the kernel we're yielding.
452 */
sewardjf54342a2006-10-17 01:51:24 +0000453 VG_(do_syscall0)(__NR_sched_yield);
sewardjb5f6f512005-03-10 23:59:00 +0000454
sewardjad0a3a82006-12-17 18:58:55 +0000455 VG_(acquire_BigLock)(tid, "VG_(vg_yield)");
sewardjb5f6f512005-03-10 23:59:00 +0000456}
457
458
sewardj0ec07f32006-01-12 12:32:32 +0000459/* Set the standard set of blocked signals, used whenever we're not
njn9fc31122005-05-11 18:48:33 +0000460 running a client syscall. */
njn1dcee092009-02-24 03:07:37 +0000461static void block_signals(void)
njn9fc31122005-05-11 18:48:33 +0000462{
463 vki_sigset_t mask;
464
465 VG_(sigfillset)(&mask);
466
467 /* Don't block these because they're synchronous */
468 VG_(sigdelset)(&mask, VKI_SIGSEGV);
469 VG_(sigdelset)(&mask, VKI_SIGBUS);
470 VG_(sigdelset)(&mask, VKI_SIGFPE);
471 VG_(sigdelset)(&mask, VKI_SIGILL);
472 VG_(sigdelset)(&mask, VKI_SIGTRAP);
473
474 /* Can't block these anyway */
475 VG_(sigdelset)(&mask, VKI_SIGSTOP);
476 VG_(sigdelset)(&mask, VKI_SIGKILL);
477
njn9fc31122005-05-11 18:48:33 +0000478 VG_(sigprocmask)(VKI_SIG_SETMASK, &mask, NULL);
479}
480
njn8aa35852005-06-10 22:59:56 +0000481static void os_state_clear(ThreadState *tst)
482{
sewardj45f4e7c2005-09-27 19:20:21 +0000483 tst->os_state.lwpid = 0;
njn8aa35852005-06-10 22:59:56 +0000484 tst->os_state.threadgroup = 0;
njnf76d27a2009-05-28 01:53:07 +0000485# if defined(VGO_linux)
486 /* no other fields to clear */
njnf76d27a2009-05-28 01:53:07 +0000487# elif defined(VGO_darwin)
488 tst->os_state.post_mach_trap_fn = NULL;
489 tst->os_state.pthread = 0;
490 tst->os_state.func_arg = 0;
491 VG_(memset)(&tst->os_state.child_go, 0, sizeof(tst->os_state.child_go));
492 VG_(memset)(&tst->os_state.child_done, 0, sizeof(tst->os_state.child_done));
493 tst->os_state.wq_jmpbuf_valid = False;
494 tst->os_state.remote_port = 0;
495 tst->os_state.msgh_id = 0;
496 VG_(memset)(&tst->os_state.mach_args, 0, sizeof(tst->os_state.mach_args));
497# else
498# error "Unknown OS"
sewardjf54342a2006-10-17 01:51:24 +0000499# endif
njn8aa35852005-06-10 22:59:56 +0000500}
501
502static void os_state_init(ThreadState *tst)
503{
sewardj45f4e7c2005-09-27 19:20:21 +0000504 tst->os_state.valgrind_stack_base = 0;
505 tst->os_state.valgrind_stack_init_SP = 0;
njn8aa35852005-06-10 22:59:56 +0000506 os_state_clear(tst);
507}
508
sewardj20917d82002-05-28 01:36:45 +0000509static
510void mostly_clear_thread_record ( ThreadId tid )
511{
sewardjb5f6f512005-03-10 23:59:00 +0000512 vki_sigset_t savedmask;
513
sewardj20917d82002-05-28 01:36:45 +0000514 vg_assert(tid >= 0 && tid < VG_N_THREADS);
njnaf839f52005-06-23 03:27:57 +0000515 VG_(cleanup_thread)(&VG_(threads)[tid].arch);
sewardjb5f6f512005-03-10 23:59:00 +0000516 VG_(threads)[tid].tid = tid;
517
518 /* Leave the thread in Zombie, so that it doesn't get reallocated
519 until the caller is finally done with the thread stack. */
520 VG_(threads)[tid].status = VgTs_Zombie;
521
nethercote73b526f2004-10-31 18:48:21 +0000522 VG_(sigemptyset)(&VG_(threads)[tid].sig_mask);
sewardjb5f6f512005-03-10 23:59:00 +0000523 VG_(sigemptyset)(&VG_(threads)[tid].tmp_sig_mask);
jsgf855d93d2003-10-13 22:26:55 +0000524
njn8aa35852005-06-10 22:59:56 +0000525 os_state_clear(&VG_(threads)[tid]);
fitzhardinge28428592004-03-16 22:07:12 +0000526
527 /* start with no altstack */
528 VG_(threads)[tid].altstack.ss_sp = (void *)0xdeadbeef;
529 VG_(threads)[tid].altstack.ss_size = 0;
530 VG_(threads)[tid].altstack.ss_flags = VKI_SS_DISABLE;
sewardjb5f6f512005-03-10 23:59:00 +0000531
njn444eba12005-05-12 03:47:31 +0000532 VG_(clear_out_queued_signals)(tid, &savedmask);
sewardjb5f6f512005-03-10 23:59:00 +0000533
534 VG_(threads)[tid].sched_jmpbuf_valid = False;
sewardj20917d82002-05-28 01:36:45 +0000535}
536
njn3f8c4372005-03-13 04:43:10 +0000537/*
sewardj0ec07f32006-01-12 12:32:32 +0000538 Called in the child after fork. If the parent has multiple
539 threads, then we've inherited a VG_(threads) array describing them,
540 but only the thread which called fork() is actually alive in the
541 child. This functions needs to clean up all those other thread
542 structures.
njn3f8c4372005-03-13 04:43:10 +0000543
544 Whichever tid in the parent which called fork() becomes the
545 master_tid in the child. That's because the only living slot in
546 VG_(threads) in the child after fork is VG_(threads)[tid], and it
547 would be too hard to try to re-number the thread and relocate the
548 thread state down to VG_(threads)[1].
549
sewardjad0a3a82006-12-17 18:58:55 +0000550 This function also needs to reinitialize the_BigLock, since
551 otherwise we may end up sharing its state with the parent, which
552 would be deeply confusing.
njn3f8c4372005-03-13 04:43:10 +0000553*/
sewardjb5f6f512005-03-10 23:59:00 +0000554static void sched_fork_cleanup(ThreadId me)
555{
556 ThreadId tid;
njnc7561b92005-06-19 01:24:32 +0000557 vg_assert(VG_(running_tid) == me);
sewardjb5f6f512005-03-10 23:59:00 +0000558
njnf76d27a2009-05-28 01:53:07 +0000559# if defined(VGO_darwin)
560 // GrP fixme hack reset Mach ports
561 VG_(mach_init)();
562# endif
563
sewardjb5f6f512005-03-10 23:59:00 +0000564 VG_(threads)[me].os_state.lwpid = VG_(gettid)();
565 VG_(threads)[me].os_state.threadgroup = VG_(getpid)();
566
567 /* clear out all the unused thread slots */
568 for (tid = 1; tid < VG_N_THREADS; tid++) {
njn3f8c4372005-03-13 04:43:10 +0000569 if (tid != me) {
570 mostly_clear_thread_record(tid);
sewardjb5f6f512005-03-10 23:59:00 +0000571 VG_(threads)[tid].status = VgTs_Empty;
sewardja8d8e232005-06-07 20:04:56 +0000572 VG_(clear_syscallInfo)(tid);
njn3f8c4372005-03-13 04:43:10 +0000573 }
sewardjb5f6f512005-03-10 23:59:00 +0000574 }
575
576 /* re-init and take the sema */
bart78bfc712011-12-08 16:14:59 +0000577 deinit_BigLock();
578 init_BigLock();
579 VG_(acquire_BigLock_LL)(NULL);
sewardjb5f6f512005-03-10 23:59:00 +0000580}
sewardj20917d82002-05-28 01:36:45 +0000581
jsgf855d93d2003-10-13 22:26:55 +0000582
sewardjde764e82007-11-09 23:13:22 +0000583/* First phase of initialisation of the scheduler. Initialise the
584 bigLock, zeroise the VG_(threads) structure and decide on the
585 ThreadId of the root thread.
sewardje663cb92002-04-12 10:26:32 +0000586*/
sewardjde764e82007-11-09 23:13:22 +0000587ThreadId VG_(scheduler_init_phase1) ( void )
sewardje663cb92002-04-12 10:26:32 +0000588{
thughesc37184f2004-09-11 14:16:57 +0000589 Int i;
sewardje663cb92002-04-12 10:26:32 +0000590 ThreadId tid_main;
591
sewardjde764e82007-11-09 23:13:22 +0000592 VG_(debugLog)(1,"sched","sched_init_phase1\n");
sewardj45f4e7c2005-09-27 19:20:21 +0000593
bart78bfc712011-12-08 16:14:59 +0000594 if (VG_(clo_fair_sched) != disable_fair_sched
595 && !ML_(set_sched_lock_impl)(sched_lock_ticket)
596 && VG_(clo_fair_sched) == enable_fair_sched)
597 {
598 VG_(printf)("Error: fair scheduling is not supported on this system.\n");
599 VG_(exit)(1);
600 }
601
602 if (VG_(clo_verbosity) > 1) {
603 VG_(message)(Vg_DebugMsg,
604 "Scheduler: using %s scheduler lock implementation.\n",
605 ML_(get_sched_lock_name)());
606 }
607
608 init_BigLock();
sewardjb5f6f512005-03-10 23:59:00 +0000609
sewardj6072c362002-04-19 14:40:57 +0000610 for (i = 0 /* NB; not 1 */; i < VG_N_THREADS; i++) {
sewardjc793fd32005-05-31 17:24:49 +0000611 /* Paranoia .. completely zero it out. */
612 VG_(memset)( & VG_(threads)[i], 0, sizeof( VG_(threads)[i] ) );
613
614 VG_(threads)[i].sig_queue = NULL;
sewardjb5f6f512005-03-10 23:59:00 +0000615
njn8aa35852005-06-10 22:59:56 +0000616 os_state_init(&VG_(threads)[i]);
sewardj20917d82002-05-28 01:36:45 +0000617 mostly_clear_thread_record(i);
sewardjb5f6f512005-03-10 23:59:00 +0000618
njn50ba34e2005-04-04 02:41:42 +0000619 VG_(threads)[i].status = VgTs_Empty;
620 VG_(threads)[i].client_stack_szB = 0;
621 VG_(threads)[i].client_stack_highest_word = (Addr)NULL;
sewardjdc873c02011-07-24 16:02:33 +0000622 VG_(threads)[i].err_disablement_level = 0;
florian49789512013-09-16 17:08:50 +0000623 VG_(threads)[i].thread_name = NULL;
sewardje663cb92002-04-12 10:26:32 +0000624 }
625
sewardjb5f6f512005-03-10 23:59:00 +0000626 tid_main = VG_(alloc_ThreadState)();
sewardjde764e82007-11-09 23:13:22 +0000627
sewardj95d86c02007-12-18 01:49:23 +0000628 /* Bleh. Unfortunately there are various places in the system that
629 assume that the main thread has a ThreadId of 1.
630 - Helgrind (possibly)
631 - stack overflow message in default_action() in m_signals.c
632 - definitely a lot more places
633 */
634 vg_assert(tid_main == 1);
635
sewardjde764e82007-11-09 23:13:22 +0000636 return tid_main;
637}
638
639
640/* Second phase of initialisation of the scheduler. Given the root
641 ThreadId computed by first phase of initialisation, fill in stack
642 details and acquire bigLock. Initialise the scheduler. This is
643 called at startup. The caller subsequently initialises the guest
644 state components of this main thread.
645*/
646void VG_(scheduler_init_phase2) ( ThreadId tid_main,
647 Addr clstack_end,
648 SizeT clstack_size )
649{
650 VG_(debugLog)(1,"sched","sched_init_phase2: tid_main=%d, "
651 "cls_end=0x%lx, cls_sz=%ld\n",
652 tid_main, clstack_end, clstack_size);
653
654 vg_assert(VG_IS_PAGE_ALIGNED(clstack_end+1));
655 vg_assert(VG_IS_PAGE_ALIGNED(clstack_size));
sewardj5f07b662002-04-23 16:52:51 +0000656
njn50ba34e2005-04-04 02:41:42 +0000657 VG_(threads)[tid_main].client_stack_highest_word
sewardj45f4e7c2005-09-27 19:20:21 +0000658 = clstack_end + 1 - sizeof(UWord);
659 VG_(threads)[tid_main].client_stack_szB
660 = clstack_size;
sewardjbf290b92002-05-01 02:28:01 +0000661
njne9ba34a2008-10-13 04:19:15 +0000662 VG_(atfork)(NULL, NULL, sched_fork_cleanup);
sewardje663cb92002-04-12 10:26:32 +0000663}
664
665
sewardje663cb92002-04-12 10:26:32 +0000666/* ---------------------------------------------------------------------
sewardj0ec07f32006-01-12 12:32:32 +0000667 Helpers for running translations.
668 ------------------------------------------------------------------ */
669
670/* Use gcc's built-in setjmp/longjmp. longjmp must not restore signal
sewardjadbb4912011-09-29 17:34:17 +0000671 mask state, but does need to pass "val" through. jumped must be a
672 volatile UWord. */
sewardj0ec07f32006-01-12 12:32:32 +0000673#define SCHEDSETJMP(tid, jumped, stmt) \
674 do { \
675 ThreadState * volatile _qq_tst = VG_(get_ThreadState)(tid); \
676 \
sewardj6c591e12011-04-11 16:17:51 +0000677 (jumped) = VG_MINIMAL_SETJMP(_qq_tst->sched_jmpbuf); \
sewardjadbb4912011-09-29 17:34:17 +0000678 if ((jumped) == ((UWord)0)) { \
sewardj0ec07f32006-01-12 12:32:32 +0000679 vg_assert(!_qq_tst->sched_jmpbuf_valid); \
680 _qq_tst->sched_jmpbuf_valid = True; \
681 stmt; \
682 } else if (VG_(clo_trace_sched)) \
sewardjadbb4912011-09-29 17:34:17 +0000683 VG_(printf)("SCHEDSETJMP(line %d) tid %d, jumped=%ld\n", \
sewardj0ec07f32006-01-12 12:32:32 +0000684 __LINE__, tid, jumped); \
685 vg_assert(_qq_tst->sched_jmpbuf_valid); \
686 _qq_tst->sched_jmpbuf_valid = False; \
687 } while(0)
688
689
690/* Do various guest state alignment checks prior to running a thread.
691 Specifically, check that what we have matches Vex's guest state
sewardj7cf4e6b2008-05-01 20:24:26 +0000692 layout requirements. See libvex.h for details, but in short the
693 requirements are: There must be no holes in between the primary
694 guest state, its two copies, and the spill area. In short, all 4
695 areas must have a 16-aligned size and be 16-aligned, and placed
696 back-to-back. */
697static void do_pre_run_checks ( ThreadState* tst )
sewardj0ec07f32006-01-12 12:32:32 +0000698{
sewardj7cf4e6b2008-05-01 20:24:26 +0000699 Addr a_vex = (Addr) & tst->arch.vex;
700 Addr a_vexsh1 = (Addr) & tst->arch.vex_shadow1;
701 Addr a_vexsh2 = (Addr) & tst->arch.vex_shadow2;
702 Addr a_spill = (Addr) & tst->arch.vex_spill;
703 UInt sz_vex = (UInt) sizeof tst->arch.vex;
704 UInt sz_vexsh1 = (UInt) sizeof tst->arch.vex_shadow1;
705 UInt sz_vexsh2 = (UInt) sizeof tst->arch.vex_shadow2;
706 UInt sz_spill = (UInt) sizeof tst->arch.vex_spill;
sewardj0ec07f32006-01-12 12:32:32 +0000707
708 if (0)
sewardj7cf4e6b2008-05-01 20:24:26 +0000709 VG_(printf)("gst %p %d, sh1 %p %d, "
710 "sh2 %p %d, spill %p %d\n",
711 (void*)a_vex, sz_vex,
712 (void*)a_vexsh1, sz_vexsh1,
713 (void*)a_vexsh2, sz_vexsh2,
sewardj0ec07f32006-01-12 12:32:32 +0000714 (void*)a_spill, sz_spill );
715
sewardj02e97e92012-08-02 22:08:53 +0000716 vg_assert(VG_IS_16_ALIGNED(sz_vex));
717 vg_assert(VG_IS_16_ALIGNED(sz_vexsh1));
718 vg_assert(VG_IS_16_ALIGNED(sz_vexsh2));
719 vg_assert(VG_IS_16_ALIGNED(sz_spill));
sewardj0ec07f32006-01-12 12:32:32 +0000720
sewardj02e97e92012-08-02 22:08:53 +0000721 vg_assert(VG_IS_16_ALIGNED(a_vex));
722 vg_assert(VG_IS_16_ALIGNED(a_vexsh1));
723 vg_assert(VG_IS_16_ALIGNED(a_vexsh2));
724 vg_assert(VG_IS_16_ALIGNED(a_spill));
sewardj0ec07f32006-01-12 12:32:32 +0000725
sewardj7cf4e6b2008-05-01 20:24:26 +0000726 /* Check that the guest state and its two shadows have the same
727 size, and that there are no holes in between. The latter is
728 important because Memcheck assumes that it can reliably access
729 the shadows by indexing off a pointer to the start of the
730 primary guest state area. */
731 vg_assert(sz_vex == sz_vexsh1);
732 vg_assert(sz_vex == sz_vexsh2);
733 vg_assert(a_vex + 1 * sz_vex == a_vexsh1);
734 vg_assert(a_vex + 2 * sz_vex == a_vexsh2);
735 /* Also check there's no hole between the second shadow area and
736 the spill area. */
sewardj0ec07f32006-01-12 12:32:32 +0000737 vg_assert(sz_spill == LibVEX_N_SPILL_BYTES);
sewardj7cf4e6b2008-05-01 20:24:26 +0000738 vg_assert(a_vex + 3 * sz_vex == a_spill);
sewardj0ec07f32006-01-12 12:32:32 +0000739
sewardj291849f2012-04-20 23:58:55 +0000740# if defined(VGA_x86)
741 /* x86 XMM regs must form an array, ie, have no holes in
742 between. */
743 vg_assert(
744 (offsetof(VexGuestX86State,guest_XMM7)
745 - offsetof(VexGuestX86State,guest_XMM0))
746 == (8/*#regs*/-1) * 16/*bytes per reg*/
747 );
748 vg_assert(VG_IS_16_ALIGNED(offsetof(VexGuestX86State,guest_XMM0)));
749 vg_assert(VG_IS_8_ALIGNED(offsetof(VexGuestX86State,guest_FPREG)));
750 vg_assert(8 == offsetof(VexGuestX86State,guest_EAX));
751 vg_assert(VG_IS_4_ALIGNED(offsetof(VexGuestX86State,guest_EAX)));
752 vg_assert(VG_IS_4_ALIGNED(offsetof(VexGuestX86State,guest_EIP)));
753# endif
754
sewardj565dc132010-08-06 08:01:47 +0000755# if defined(VGA_amd64)
sewardj45fa9f42012-05-21 10:18:10 +0000756 /* amd64 YMM regs must form an array, ie, have no holes in
sewardj291849f2012-04-20 23:58:55 +0000757 between. */
sewardj565dc132010-08-06 08:01:47 +0000758 vg_assert(
sewardj45fa9f42012-05-21 10:18:10 +0000759 (offsetof(VexGuestAMD64State,guest_YMM16)
760 - offsetof(VexGuestAMD64State,guest_YMM0))
761 == (17/*#regs*/-1) * 32/*bytes per reg*/
sewardj565dc132010-08-06 08:01:47 +0000762 );
sewardj02e97e92012-08-02 22:08:53 +0000763 vg_assert(VG_IS_16_ALIGNED(offsetof(VexGuestAMD64State,guest_YMM0)));
sewardj291849f2012-04-20 23:58:55 +0000764 vg_assert(VG_IS_8_ALIGNED(offsetof(VexGuestAMD64State,guest_FPREG)));
765 vg_assert(16 == offsetof(VexGuestAMD64State,guest_RAX));
766 vg_assert(VG_IS_8_ALIGNED(offsetof(VexGuestAMD64State,guest_RAX)));
767 vg_assert(VG_IS_8_ALIGNED(offsetof(VexGuestAMD64State,guest_RIP)));
sewardj565dc132010-08-06 08:01:47 +0000768# endif
769
sewardj0ec07f32006-01-12 12:32:32 +0000770# if defined(VGA_ppc32) || defined(VGA_ppc64)
771 /* ppc guest_state vector regs must be 16 byte aligned for
sewardj7cf4e6b2008-05-01 20:24:26 +0000772 loads/stores. This is important! */
sewardjf34eb492011-04-15 11:57:05 +0000773 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex.guest_VSR0));
774 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow1.guest_VSR0));
775 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow2.guest_VSR0));
sewardj7cf4e6b2008-05-01 20:24:26 +0000776 /* be extra paranoid .. */
sewardjf34eb492011-04-15 11:57:05 +0000777 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex.guest_VSR1));
778 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow1.guest_VSR1));
779 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow2.guest_VSR1));
sewardj565dc132010-08-06 08:01:47 +0000780# endif
sewardj59570ff2010-01-01 11:59:33 +0000781
782# if defined(VGA_arm)
783 /* arm guest_state VFP regs must be 8 byte aligned for
sewardj291849f2012-04-20 23:58:55 +0000784 loads/stores. Let's use 16 just to be on the safe side. */
785 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex.guest_D0));
786 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow1.guest_D0));
787 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow2.guest_D0));
sewardj59570ff2010-01-01 11:59:33 +0000788 /* be extra paranoid .. */
789 vg_assert(VG_IS_8_ALIGNED(& tst->arch.vex.guest_D1));
790 vg_assert(VG_IS_8_ALIGNED(& tst->arch.vex_shadow1.guest_D1));
791 vg_assert(VG_IS_8_ALIGNED(& tst->arch.vex_shadow2.guest_D1));
792# endif
sewardjb5b87402011-03-07 16:05:35 +0000793
sewardjf0c12502014-01-12 12:54:00 +0000794# if defined(VGA_arm64)
795 vg_assert(VG_IS_8_ALIGNED(& tst->arch.vex.guest_X0));
796 vg_assert(VG_IS_8_ALIGNED(& tst->arch.vex_shadow1.guest_X0));
797 vg_assert(VG_IS_8_ALIGNED(& tst->arch.vex_shadow2.guest_X0));
798 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex.guest_Q0));
799 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow1.guest_Q0));
800 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow2.guest_Q0));
801# endif
802
sewardjb5b87402011-03-07 16:05:35 +0000803# if defined(VGA_s390x)
804 /* no special requirements */
805# endif
sewardj5db15402012-06-07 09:13:21 +0000806
petarj4df0bfc2013-02-27 23:17:33 +0000807# if defined(VGA_mips32) || defined(VGA_mips64)
sewardjf0c12502014-01-12 12:54:00 +0000808 /* no special requirements */
sewardj5db15402012-06-07 09:13:21 +0000809# endif
sewardj0ec07f32006-01-12 12:32:32 +0000810}
811
sewardj3b290482011-05-06 21:02:55 +0000812// NO_VGDB_POLL value ensures vgdb is not polled, while
813// VGDB_POLL_ASAP ensures that the next scheduler call
814// will cause a poll.
815#define NO_VGDB_POLL 0xffffffffffffffffULL
816#define VGDB_POLL_ASAP 0x0ULL
817
818void VG_(disable_vgdb_poll) (void )
819{
820 vgdb_next_poll = NO_VGDB_POLL;
821}
822void VG_(force_vgdb_poll) ( void )
823{
824 vgdb_next_poll = VGDB_POLL_ASAP;
825}
sewardj0ec07f32006-01-12 12:32:32 +0000826
827/* Run the thread tid for a while, and return a VG_TRC_* value
sewardj291849f2012-04-20 23:58:55 +0000828 indicating why VG_(disp_run_translations) stopped, and possibly an
829 auxiliary word. Also, only allow the thread to run for at most
830 *dispatchCtrP events. If (as is the normal case) use_alt_host_addr
831 is False, we are running ordinary redir'd translations, and we
832 should therefore start by looking up the guest next IP in TT. If
833 it is True then we ignore the guest next IP and just run from
834 alt_host_addr, which presumably points at host code for a no-redir
835 translation.
836
837 Return results are placed in two_words. two_words[0] is set to the
838 TRC. In the case where that is VG_TRC_CHAIN_ME_TO_{SLOW,FAST}_EP,
839 the address to patch is placed in two_words[1].
840*/
841static
842void run_thread_for_a_while ( /*OUT*/HWord* two_words,
843 /*MOD*/Int* dispatchCtrP,
844 ThreadId tid,
845 HWord alt_host_addr,
846 Bool use_alt_host_addr )
sewardj0ec07f32006-01-12 12:32:32 +0000847{
sewardj291849f2012-04-20 23:58:55 +0000848 volatile HWord jumped = 0;
849 volatile ThreadState* tst = NULL; /* stop gcc complaining */
850 volatile Int done_this_time = 0;
851 volatile HWord host_code_addr = 0;
sewardj0ec07f32006-01-12 12:32:32 +0000852
853 /* Paranoia */
854 vg_assert(VG_(is_valid_tid)(tid));
855 vg_assert(VG_(is_running_thread)(tid));
856 vg_assert(!VG_(is_exiting)(tid));
sewardj291849f2012-04-20 23:58:55 +0000857 vg_assert(*dispatchCtrP > 0);
sewardj0ec07f32006-01-12 12:32:32 +0000858
859 tst = VG_(get_ThreadState)(tid);
sewardj7cf4e6b2008-05-01 20:24:26 +0000860 do_pre_run_checks( (ThreadState*)tst );
sewardj0ec07f32006-01-12 12:32:32 +0000861 /* end Paranoia */
862
sewardjbba6f312012-04-21 23:05:57 +0000863 /* Futz with the XIndir stats counters. */
864 vg_assert(VG_(stats__n_xindirs_32) == 0);
865 vg_assert(VG_(stats__n_xindir_misses_32) == 0);
866
sewardj291849f2012-04-20 23:58:55 +0000867 /* Clear return area. */
868 two_words[0] = two_words[1] = 0;
869
870 /* Figure out where we're starting from. */
871 if (use_alt_host_addr) {
872 /* unusual case -- no-redir translation */
873 host_code_addr = alt_host_addr;
874 } else {
875 /* normal case -- redir translation */
876 UInt cno = (UInt)VG_TT_FAST_HASH((Addr)tst->arch.vex.VG_INSTR_PTR);
877 if (LIKELY(VG_(tt_fast)[cno].guest == (Addr)tst->arch.vex.VG_INSTR_PTR))
878 host_code_addr = VG_(tt_fast)[cno].host;
879 else {
880 AddrH res = 0;
881 /* not found in VG_(tt_fast). Searching here the transtab
882 improves the performance compared to returning directly
883 to the scheduler. */
884 Bool found = VG_(search_transtab)(&res, NULL, NULL,
885 (Addr)tst->arch.vex.VG_INSTR_PTR,
886 True/*upd cache*/
887 );
888 if (LIKELY(found)) {
889 host_code_addr = res;
890 } else {
891 /* At this point, we know that we intended to start at a
892 normal redir translation, but it was not found. In
893 which case we can return now claiming it's not
894 findable. */
895 two_words[0] = VG_TRC_INNER_FASTMISS; /* hmm, is that right? */
896 return;
897 }
898 }
899 }
900 /* We have either a no-redir or a redir translation. */
901 vg_assert(host_code_addr != 0); /* implausible */
902
sewardj0ec07f32006-01-12 12:32:32 +0000903 /* there should be no undealt-with signals */
904 //vg_assert(VG_(threads)[tid].siginfo.si_signo == 0);
905
sewardj291849f2012-04-20 23:58:55 +0000906 /* Set up event counter stuff for the run. */
907 tst->arch.vex.host_EvC_COUNTER = *dispatchCtrP;
908 tst->arch.vex.host_EvC_FAILADDR
909 = (HWord)VG_(fnptr_to_fnentry)( &VG_(disp_cp_evcheck_fail) );
910
sewardjf54342a2006-10-17 01:51:24 +0000911 if (0) {
912 vki_sigset_t m;
913 Int i, err = VG_(sigprocmask)(VKI_SIG_SETMASK, NULL, &m);
914 vg_assert(err == 0);
915 VG_(printf)("tid %d: entering code with unblocked signals: ", tid);
916 for (i = 1; i <= _VKI_NSIG; i++)
917 if (!VG_(sigismember)(&m, i))
918 VG_(printf)("%d ", i);
919 VG_(printf)("\n");
920 }
921
sewardj291849f2012-04-20 23:58:55 +0000922 /* Set up return-value area. */
923
sewardj97561812006-12-23 01:21:12 +0000924 // Tell the tool this thread is about to run client code
njn3e32c872006-12-24 07:51:17 +0000925 VG_TRACK( start_client_code, tid, bbs_done );
sewardj97561812006-12-23 01:21:12 +0000926
sewardj0ec07f32006-01-12 12:32:32 +0000927 vg_assert(VG_(in_generated_code) == False);
928 VG_(in_generated_code) = True;
929
930 SCHEDSETJMP(
931 tid,
932 jumped,
sewardj291849f2012-04-20 23:58:55 +0000933 VG_(disp_run_translations)(
934 two_words,
935 (void*)&tst->arch.vex,
936 host_code_addr
937 )
sewardj0ec07f32006-01-12 12:32:32 +0000938 );
939
sewardjde764e82007-11-09 23:13:22 +0000940 vg_assert(VG_(in_generated_code) == True);
sewardj0ec07f32006-01-12 12:32:32 +0000941 VG_(in_generated_code) = False;
942
sewardj291849f2012-04-20 23:58:55 +0000943 if (jumped != (HWord)0) {
sewardj0ec07f32006-01-12 12:32:32 +0000944 /* We get here if the client took a fault that caused our signal
945 handler to longjmp. */
sewardj291849f2012-04-20 23:58:55 +0000946 vg_assert(two_words[0] == 0 && two_words[1] == 0); // correct?
947 two_words[0] = VG_TRC_FAULT_SIGNAL;
948 two_words[1] = 0;
njn1dcee092009-02-24 03:07:37 +0000949 block_signals();
sewardj0ec07f32006-01-12 12:32:32 +0000950 }
951
sewardjbba6f312012-04-21 23:05:57 +0000952 /* Merge the 32-bit XIndir/miss counters into the 64 bit versions,
953 and zero out the 32-bit ones in preparation for the next run of
954 generated code. */
955 stats__n_xindirs += (ULong)VG_(stats__n_xindirs_32);
956 VG_(stats__n_xindirs_32) = 0;
957 stats__n_xindir_misses += (ULong)VG_(stats__n_xindir_misses_32);
958 VG_(stats__n_xindir_misses_32) = 0;
959
960 /* Inspect the event counter. */
sewardj291849f2012-04-20 23:58:55 +0000961 vg_assert((Int)tst->arch.vex.host_EvC_COUNTER >= -1);
962 vg_assert(tst->arch.vex.host_EvC_FAILADDR
963 == (HWord)VG_(fnptr_to_fnentry)( &VG_(disp_cp_evcheck_fail)) );
964
965 done_this_time = *dispatchCtrP - ((Int)tst->arch.vex.host_EvC_COUNTER + 1);
sewardj0ec07f32006-01-12 12:32:32 +0000966
967 vg_assert(done_this_time >= 0);
968 bbs_done += (ULong)done_this_time;
969
sewardj291849f2012-04-20 23:58:55 +0000970 *dispatchCtrP -= done_this_time;
971 vg_assert(*dispatchCtrP >= 0);
972
sewardj97561812006-12-23 01:21:12 +0000973 // Tell the tool this thread has stopped running client code
njn3e32c872006-12-24 07:51:17 +0000974 VG_TRACK( stop_client_code, tid, bbs_done );
sewardj97561812006-12-23 01:21:12 +0000975
sewardj3b290482011-05-06 21:02:55 +0000976 if (bbs_done >= vgdb_next_poll) {
977 if (VG_(clo_vgdb_poll))
978 vgdb_next_poll = bbs_done + (ULong)VG_(clo_vgdb_poll);
979 else
980 /* value was changed due to gdbserver invocation via ptrace */
981 vgdb_next_poll = NO_VGDB_POLL;
982 if (VG_(gdbserver_activity) (tid))
983 VG_(gdbserver) (tid);
984 }
985
sewardj291849f2012-04-20 23:58:55 +0000986 /* TRC value and possible auxiliary patch-address word are already
987 in two_words[0] and [1] respectively, as a result of the call to
988 VG_(run_innerloop). */
989 /* Stay sane .. */
990 if (two_words[0] == VG_TRC_CHAIN_ME_TO_SLOW_EP
991 || two_words[0] == VG_TRC_CHAIN_ME_TO_FAST_EP) {
992 vg_assert(two_words[1] != 0); /* we have a legit patch addr */
sewardj0ec07f32006-01-12 12:32:32 +0000993 } else {
sewardj291849f2012-04-20 23:58:55 +0000994 vg_assert(two_words[1] == 0); /* nobody messed with it */
sewardj0ec07f32006-01-12 12:32:32 +0000995 }
996}
997
sewardj0ec07f32006-01-12 12:32:32 +0000998
999/* ---------------------------------------------------------------------
sewardje663cb92002-04-12 10:26:32 +00001000 The scheduler proper.
1001 ------------------------------------------------------------------ */
1002
sewardjb5f6f512005-03-10 23:59:00 +00001003static void handle_tt_miss ( ThreadId tid )
sewardje663cb92002-04-12 10:26:32 +00001004{
sewardjb5f6f512005-03-10 23:59:00 +00001005 Bool found;
njnf536bbb2005-06-13 04:21:38 +00001006 Addr ip = VG_(get_IP)(tid);
sewardjb5f6f512005-03-10 23:59:00 +00001007
1008 /* Trivial event. Miss in the fast-cache. Do a full
1009 lookup for it. */
sewardj291849f2012-04-20 23:58:55 +00001010 found = VG_(search_transtab)( NULL, NULL, NULL,
1011 ip, True/*upd_fast_cache*/ );
sewardj5d0d1f32010-03-14 15:09:27 +00001012 if (UNLIKELY(!found)) {
sewardjb5f6f512005-03-10 23:59:00 +00001013 /* Not found; we need to request a translation. */
sewardj0ec07f32006-01-12 12:32:32 +00001014 if (VG_(translate)( tid, ip, /*debug*/False, 0/*not verbose*/,
1015 bbs_done, True/*allow redirection*/ )) {
sewardj291849f2012-04-20 23:58:55 +00001016 found = VG_(search_transtab)( NULL, NULL, NULL,
1017 ip, True );
1018 vg_assert2(found, "handle_tt_miss: missing tt_fast entry");
njn50ae1a72005-04-08 23:28:23 +00001019
sewardjb5f6f512005-03-10 23:59:00 +00001020 } else {
1021 // If VG_(translate)() fails, it's because it had to throw a
1022 // signal because the client jumped to a bad address. That
1023 // means that either a signal has been set up for delivery,
1024 // or the thread has been marked for termination. Either
1025 // way, we just need to go back into the scheduler loop.
1026 }
1027 }
1028}
1029
sewardj291849f2012-04-20 23:58:55 +00001030static
1031void handle_chain_me ( ThreadId tid, void* place_to_chain, Bool toFastEP )
1032{
1033 Bool found = False;
1034 Addr ip = VG_(get_IP)(tid);
1035 UInt to_sNo = (UInt)-1;
1036 UInt to_tteNo = (UInt)-1;
1037
1038 found = VG_(search_transtab)( NULL, &to_sNo, &to_tteNo,
1039 ip, False/*dont_upd_fast_cache*/ );
1040 if (!found) {
1041 /* Not found; we need to request a translation. */
1042 if (VG_(translate)( tid, ip, /*debug*/False, 0/*not verbose*/,
1043 bbs_done, True/*allow redirection*/ )) {
1044 found = VG_(search_transtab)( NULL, &to_sNo, &to_tteNo,
1045 ip, False );
1046 vg_assert2(found, "handle_chain_me: missing tt_fast entry");
1047 } else {
1048 // If VG_(translate)() fails, it's because it had to throw a
1049 // signal because the client jumped to a bad address. That
1050 // means that either a signal has been set up for delivery,
1051 // or the thread has been marked for termination. Either
1052 // way, we just need to go back into the scheduler loop.
1053 return;
1054 }
1055 }
1056 vg_assert(found);
1057 vg_assert(to_sNo != -1);
1058 vg_assert(to_tteNo != -1);
1059
1060 /* So, finally we know where to patch through to. Do the patching
1061 and update the various admin tables that allow it to be undone
1062 in the case that the destination block gets deleted. */
1063 VG_(tt_tc_do_chaining)( place_to_chain,
1064 to_sNo, to_tteNo, toFastEP );
1065}
1066
njnf76d27a2009-05-28 01:53:07 +00001067static void handle_syscall(ThreadId tid, UInt trc)
sewardjb5f6f512005-03-10 23:59:00 +00001068{
sewardj1ac9d0c2007-05-01 14:18:48 +00001069 ThreadState * volatile tst = VG_(get_ThreadState)(tid);
sewardjadbb4912011-09-29 17:34:17 +00001070 volatile UWord jumped;
sewardjb5f6f512005-03-10 23:59:00 +00001071
1072 /* Syscall may or may not block; either way, it will be
1073 complete by the time this call returns, and we'll be
1074 runnable again. We could take a signal while the
1075 syscall runs. */
sewardj45f4e7c2005-09-27 19:20:21 +00001076
1077 if (VG_(clo_sanity_level >= 3))
1078 VG_(am_do_sync_check)("(BEFORE SYSCALL)",__FILE__,__LINE__);
1079
njnf76d27a2009-05-28 01:53:07 +00001080 SCHEDSETJMP(tid, jumped, VG_(client_syscall)(tid, trc));
sewardjb5f6f512005-03-10 23:59:00 +00001081
sewardj45f4e7c2005-09-27 19:20:21 +00001082 if (VG_(clo_sanity_level >= 3))
1083 VG_(am_do_sync_check)("(AFTER SYSCALL)",__FILE__,__LINE__);
1084
sewardjb5f6f512005-03-10 23:59:00 +00001085 if (!VG_(is_running_thread)(tid))
njnc7561b92005-06-19 01:24:32 +00001086 VG_(printf)("tid %d not running; VG_(running_tid)=%d, tid %d status %d\n",
1087 tid, VG_(running_tid), tid, tst->status);
sewardjb5f6f512005-03-10 23:59:00 +00001088 vg_assert(VG_(is_running_thread)(tid));
1089
sewardjadbb4912011-09-29 17:34:17 +00001090 if (jumped != (UWord)0) {
njn1dcee092009-02-24 03:07:37 +00001091 block_signals();
sewardjb5f6f512005-03-10 23:59:00 +00001092 VG_(poll_signals)(tid);
1093 }
1094}
1095
sewardja591a052006-01-12 14:04:46 +00001096/* tid just requested a jump to the noredir version of its current
1097 program counter. So make up that translation if needed, run it,
sewardj291849f2012-04-20 23:58:55 +00001098 and return the resulting thread return code in two_words[]. */
1099static
1100void handle_noredir_jump ( /*OUT*/HWord* two_words,
1101 /*MOD*/Int* dispatchCtrP,
1102 ThreadId tid )
sewardja591a052006-01-12 14:04:46 +00001103{
sewardj291849f2012-04-20 23:58:55 +00001104 /* Clear return area. */
1105 two_words[0] = two_words[1] = 0;
1106
sewardja591a052006-01-12 14:04:46 +00001107 AddrH hcode = 0;
1108 Addr ip = VG_(get_IP)(tid);
1109
1110 Bool found = VG_(search_unredir_transtab)( &hcode, ip );
1111 if (!found) {
1112 /* Not found; we need to request a translation. */
1113 if (VG_(translate)( tid, ip, /*debug*/False, 0/*not verbose*/, bbs_done,
1114 False/*NO REDIRECTION*/ )) {
1115
1116 found = VG_(search_unredir_transtab)( &hcode, ip );
1117 vg_assert2(found, "unredir translation missing after creation?!");
sewardja591a052006-01-12 14:04:46 +00001118 } else {
1119 // If VG_(translate)() fails, it's because it had to throw a
1120 // signal because the client jumped to a bad address. That
1121 // means that either a signal has been set up for delivery,
1122 // or the thread has been marked for termination. Either
1123 // way, we just need to go back into the scheduler loop.
sewardj291849f2012-04-20 23:58:55 +00001124 two_words[0] = VG_TRC_BORING;
1125 return;
sewardja591a052006-01-12 14:04:46 +00001126 }
1127
1128 }
1129
1130 vg_assert(found);
1131 vg_assert(hcode != 0);
1132
sewardj291849f2012-04-20 23:58:55 +00001133 /* Otherwise run it and return the resulting VG_TRC_* value. */
1134 vg_assert(*dispatchCtrP > 0); /* so as to guarantee progress */
1135 run_thread_for_a_while( two_words, dispatchCtrP, tid,
1136 hcode, True/*use hcode*/ );
sewardja591a052006-01-12 14:04:46 +00001137}
1138
1139
sewardjb5f6f512005-03-10 23:59:00 +00001140/*
1141 Run a thread until it wants to exit.
1142
sewardjad0a3a82006-12-17 18:58:55 +00001143 We assume that the caller has already called VG_(acquire_BigLock) for
sewardjb5f6f512005-03-10 23:59:00 +00001144 us, so we own the VCPU. Also, all signals are blocked.
1145 */
1146VgSchedReturnCode VG_(scheduler) ( ThreadId tid )
1147{
sewardj291849f2012-04-20 23:58:55 +00001148 /* Holds the remaining size of this thread's "timeslice". */
1149 Int dispatch_ctr = 0;
1150
sewardjb5f6f512005-03-10 23:59:00 +00001151 ThreadState *tst = VG_(get_ThreadState)(tid);
sewardj3b290482011-05-06 21:02:55 +00001152 static Bool vgdb_startup_action_done = False;
sewardje663cb92002-04-12 10:26:32 +00001153
sewardjc24be7a2005-03-15 01:40:12 +00001154 if (VG_(clo_trace_sched))
1155 print_sched_event(tid, "entering VG_(scheduler)");
1156
sewardj3b290482011-05-06 21:02:55 +00001157 /* Do vgdb initialization (but once). Only the first (main) task
1158 starting up will do the below.
1159 Initialize gdbserver earlier than at the first
1160 thread VG_(scheduler) is causing problems:
1161 * at the end of VG_(scheduler_init_phase2) :
1162 The main thread is in VgTs_Init state, but in a not yet
1163 consistent state => the thread cannot be reported to gdb
1164 (e.g. causes an assert in LibVEX_GuestX86_get_eflags when giving
1165 back the guest registers to gdb).
1166 * at end of valgrind_main, just
1167 before VG_(main_thread_wrapper_NORETURN)(1) :
1168 The main thread is still in VgTs_Init state but in a
1169 more advanced state. However, the thread state is not yet
1170 completely initialized : a.o., the os_state is not yet fully
1171 set => the thread is then not properly reported to gdb,
1172 which is then confused (causing e.g. a duplicate thread be
1173 shown, without thread id).
1174 * it would be possible to initialize gdbserver "lower" in the
1175 call stack (e.g. in VG_(main_thread_wrapper_NORETURN)) but
1176 these are platform dependent and the place at which
1177 the thread state is completely initialized is not
1178 specific anymore to the main thread (so a similar "do it only
1179 once" would be needed).
1180
1181 => a "once only" initialization here is the best compromise. */
1182 if (!vgdb_startup_action_done) {
1183 vg_assert(tid == 1); // it must be the main thread.
1184 vgdb_startup_action_done = True;
1185 if (VG_(clo_vgdb) != Vg_VgdbNo) {
1186 /* If we have to poll, ensures we do an initial poll at first
1187 scheduler call. Otherwise, ensure no poll (unless interrupted
1188 by ptrace). */
1189 if (VG_(clo_vgdb_poll))
1190 VG_(force_vgdb_poll) ();
1191 else
1192 VG_(disable_vgdb_poll) ();
1193
1194 vg_assert (VG_(dyn_vgdb_error) == VG_(clo_vgdb_error));
1195 /* As we are initializing, VG_(dyn_vgdb_error) can't have been
1196 changed yet. */
1197
sewardj997546c2011-05-17 18:14:53 +00001198 VG_(gdbserver_prerun_action) (1);
sewardj3b290482011-05-06 21:02:55 +00001199 } else {
1200 VG_(disable_vgdb_poll) ();
1201 }
1202 }
1203
sewardjb5f6f512005-03-10 23:59:00 +00001204 /* set the proper running signal mask */
njn1dcee092009-02-24 03:07:37 +00001205 block_signals();
sewardjb5f6f512005-03-10 23:59:00 +00001206
1207 vg_assert(VG_(is_running_thread)(tid));
sewardje663cb92002-04-12 10:26:32 +00001208
sewardj291849f2012-04-20 23:58:55 +00001209 dispatch_ctr = SCHEDULING_QUANTUM;
sewardj6072c362002-04-19 14:40:57 +00001210
sewardjf54342a2006-10-17 01:51:24 +00001211 while (!VG_(is_exiting)(tid)) {
1212
sewardj291849f2012-04-20 23:58:55 +00001213 vg_assert(dispatch_ctr >= 0);
1214 if (dispatch_ctr == 0) {
sewardjf54342a2006-10-17 01:51:24 +00001215
sewardjf54342a2006-10-17 01:51:24 +00001216 /* Our slice is done, so yield the CPU to another thread. On
1217 Linux, this doesn't sleep between sleeping and running,
sewardj6e9de462011-06-28 07:25:29 +00001218 since that would take too much time. */
sewardjf54342a2006-10-17 01:51:24 +00001219
1220 /* 4 July 06: it seems that a zero-length nsleep is needed to
1221 cause async thread cancellation (canceller.c) to terminate
1222 in finite time; else it is in some kind of race/starvation
1223 situation and completion is arbitrarily delayed (although
1224 this is not a deadlock).
1225
1226 Unfortunately these sleeps cause MPI jobs not to terminate
1227 sometimes (some kind of livelock). So sleeping once
1228 every N opportunities appears to work. */
1229
1230 /* 3 Aug 06: doing sys__nsleep works but crashes some apps.
1231 sys_yield also helps the problem, whilst not crashing apps. */
1232
sewardjad0a3a82006-12-17 18:58:55 +00001233 VG_(release_BigLock)(tid, VgTs_Yielding,
1234 "VG_(scheduler):timeslice");
sewardjf54342a2006-10-17 01:51:24 +00001235 /* ------------ now we don't have The Lock ------------ */
1236
sewardjad0a3a82006-12-17 18:58:55 +00001237 VG_(acquire_BigLock)(tid, "VG_(scheduler):timeslice");
sewardjf54342a2006-10-17 01:51:24 +00001238 /* ------------ now we do have The Lock ------------ */
sewardje663cb92002-04-12 10:26:32 +00001239
sewardjb5f6f512005-03-10 23:59:00 +00001240 /* OK, do some relatively expensive housekeeping stuff */
1241 scheduler_sanity(tid);
1242 VG_(sanity_check_general)(False);
sewardje663cb92002-04-12 10:26:32 +00001243
sewardjb5f6f512005-03-10 23:59:00 +00001244 /* Look for any pending signals for this thread, and set them up
1245 for delivery */
1246 VG_(poll_signals)(tid);
sewardje663cb92002-04-12 10:26:32 +00001247
sewardjb5f6f512005-03-10 23:59:00 +00001248 if (VG_(is_exiting)(tid))
1249 break; /* poll_signals picked up a fatal signal */
sewardje663cb92002-04-12 10:26:32 +00001250
sewardjb5f6f512005-03-10 23:59:00 +00001251 /* For stats purposes only. */
1252 n_scheduling_events_MAJOR++;
sewardje663cb92002-04-12 10:26:32 +00001253
sewardjb5f6f512005-03-10 23:59:00 +00001254 /* Figure out how many bbs to ask vg_run_innerloop to do. Note
1255 that it decrements the counter before testing it for zero, so
1256 that if tst->dispatch_ctr is set to N you get at most N-1
1257 iterations. Also this means that tst->dispatch_ctr must
1258 exceed zero before entering the innerloop. Also also, the
1259 decrement is done before the bb is actually run, so you
1260 always get at least one decrement even if nothing happens. */
sewardj291849f2012-04-20 23:58:55 +00001261 // FIXME is this right?
1262 dispatch_ctr = SCHEDULING_QUANTUM;
jsgf855d93d2003-10-13 22:26:55 +00001263
sewardjb5f6f512005-03-10 23:59:00 +00001264 /* paranoia ... */
1265 vg_assert(tst->tid == tid);
1266 vg_assert(tst->os_state.lwpid == VG_(gettid)());
sewardje663cb92002-04-12 10:26:32 +00001267 }
1268
sewardjb5f6f512005-03-10 23:59:00 +00001269 /* For stats purposes only. */
1270 n_scheduling_events_MINOR++;
sewardje663cb92002-04-12 10:26:32 +00001271
1272 if (0)
sewardj738856f2009-07-15 14:48:32 +00001273 VG_(message)(Vg_DebugMsg, "thread %d: running for %d bbs\n",
sewardj291849f2012-04-20 23:58:55 +00001274 tid, dispatch_ctr - 1 );
sewardje663cb92002-04-12 10:26:32 +00001275
sewardj291849f2012-04-20 23:58:55 +00001276 HWord trc[2]; /* "two_words" */
1277 run_thread_for_a_while( &trc[0],
1278 &dispatch_ctr,
1279 tid, 0/*ignored*/, False );
sewardje663cb92002-04-12 10:26:32 +00001280
sewardjb5f6f512005-03-10 23:59:00 +00001281 if (VG_(clo_trace_sched) && VG_(clo_verbosity) > 2) {
sewardj291849f2012-04-20 23:58:55 +00001282 HChar buf[50];
1283 VG_(sprintf)(buf, "TRC: %s", name_of_sched_event(trc[0]));
sewardjb5f6f512005-03-10 23:59:00 +00001284 print_sched_event(tid, buf);
sewardje663cb92002-04-12 10:26:32 +00001285 }
1286
sewardj291849f2012-04-20 23:58:55 +00001287 if (trc[0] == VEX_TRC_JMP_NOREDIR) {
sewardj0ec07f32006-01-12 12:32:32 +00001288 /* If we got a request to run a no-redir version of
1289 something, do so now -- handle_noredir_jump just (creates
1290 and) runs that one translation. The flip side is that the
1291 noredir translation can't itself return another noredir
1292 request -- that would be nonsensical. It can, however,
1293 return VG_TRC_BORING, which just means keep going as
1294 normal. */
sewardj291849f2012-04-20 23:58:55 +00001295 /* Note that the fact that we need to continue with a
1296 no-redir jump is not recorded anywhere else in this
1297 thread's state. So we *must* execute the block right now
1298 -- we can't fail to execute it and later resume with it,
1299 because by then we'll have forgotten the fact that it
1300 should be run as no-redir, but will get run as a normal
1301 potentially-redir'd, hence screwing up. This really ought
1302 to be cleaned up, by noting in the guest state that the
1303 next block to be executed should be no-redir. Then we can
1304 suspend and resume at any point, which isn't the case at
1305 the moment. */
1306 handle_noredir_jump( &trc[0],
1307 &dispatch_ctr,
1308 tid );
1309 vg_assert(trc[0] != VEX_TRC_JMP_NOREDIR);
1310
1311 /* This can't be allowed to happen, since it means the block
1312 didn't execute, and we have no way to resume-as-noredir
1313 after we get more timeslice. But I don't think it ever
1314 can, since handle_noredir_jump will assert if the counter
1315 is zero on entry. */
1316 vg_assert(trc[0] != VG_TRC_INNER_COUNTERZERO);
1317
1318 /* A no-redir translation can't return with a chain-me
1319 request, since chaining in the no-redir cache is too
1320 complex. */
1321 vg_assert(trc[0] != VG_TRC_CHAIN_ME_TO_SLOW_EP
1322 && trc[0] != VG_TRC_CHAIN_ME_TO_FAST_EP);
sewardj0ec07f32006-01-12 12:32:32 +00001323 }
1324
sewardj291849f2012-04-20 23:58:55 +00001325 switch (trc[0]) {
1326 case VEX_TRC_JMP_BORING:
1327 /* assisted dispatch, no event. Used by no-redir
1328 translations to force return to the scheduler. */
sewardj0ec07f32006-01-12 12:32:32 +00001329 case VG_TRC_BORING:
1330 /* no special event, just keep going. */
1331 break;
1332
sewardjb5f6f512005-03-10 23:59:00 +00001333 case VG_TRC_INNER_FASTMISS:
sewardj291849f2012-04-20 23:58:55 +00001334 vg_assert(dispatch_ctr > 0);
sewardjb5f6f512005-03-10 23:59:00 +00001335 handle_tt_miss(tid);
1336 break;
sewardj291849f2012-04-20 23:58:55 +00001337
1338 case VG_TRC_CHAIN_ME_TO_SLOW_EP: {
1339 if (0) VG_(printf)("sched: CHAIN_TO_SLOW_EP: %p\n", (void*)trc[1] );
1340 handle_chain_me(tid, (void*)trc[1], False);
1341 break;
1342 }
1343
1344 case VG_TRC_CHAIN_ME_TO_FAST_EP: {
1345 if (0) VG_(printf)("sched: CHAIN_TO_FAST_EP: %p\n", (void*)trc[1] );
1346 handle_chain_me(tid, (void*)trc[1], True);
1347 break;
1348 }
1349
sewardjb5f6f512005-03-10 23:59:00 +00001350 case VEX_TRC_JMP_CLIENTREQ:
1351 do_client_request(tid);
1352 break;
sewardja0fef1b2005-11-03 13:46:30 +00001353
1354 case VEX_TRC_JMP_SYS_INT128: /* x86-linux */
njnf76d27a2009-05-28 01:53:07 +00001355 case VEX_TRC_JMP_SYS_INT129: /* x86-darwin */
1356 case VEX_TRC_JMP_SYS_INT130: /* x86-darwin */
1357 case VEX_TRC_JMP_SYS_SYSCALL: /* amd64-linux, ppc32-linux, amd64-darwin */
sewardj291849f2012-04-20 23:58:55 +00001358 handle_syscall(tid, trc[0]);
sewardjb5f6f512005-03-10 23:59:00 +00001359 if (VG_(clo_sanity_level) > 2)
1360 VG_(sanity_check_general)(True); /* sanity-check every syscall */
1361 break;
sewardje663cb92002-04-12 10:26:32 +00001362
sewardjb5f6f512005-03-10 23:59:00 +00001363 case VEX_TRC_JMP_YIELD:
1364 /* Explicit yield, because this thread is in a spin-lock
sewardj3fc75752005-03-12 15:16:31 +00001365 or something. Only let the thread run for a short while
1366 longer. Because swapping to another thread is expensive,
1367 we're prepared to let this thread eat a little more CPU
1368 before swapping to another. That means that short term
1369 spins waiting for hardware to poke memory won't cause a
1370 thread swap. */
sewardje1374cf2013-03-28 10:40:53 +00001371 if (dispatch_ctr > 1000)
1372 dispatch_ctr = 1000;
sewardjb5f6f512005-03-10 23:59:00 +00001373 break;
sewardje663cb92002-04-12 10:26:32 +00001374
sewardjb5f6f512005-03-10 23:59:00 +00001375 case VG_TRC_INNER_COUNTERZERO:
1376 /* Timeslice is out. Let a new thread be scheduled. */
sewardj291849f2012-04-20 23:58:55 +00001377 vg_assert(dispatch_ctr == 0);
sewardjb5f6f512005-03-10 23:59:00 +00001378 break;
sewardje663cb92002-04-12 10:26:32 +00001379
sewardjb5f6f512005-03-10 23:59:00 +00001380 case VG_TRC_FAULT_SIGNAL:
1381 /* Everything should be set up (either we're exiting, or
1382 about to start in a signal handler). */
1383 break;
sewardj9d1b5d32002-04-17 19:40:49 +00001384
sewardj07bdc5e2005-03-11 13:19:47 +00001385 case VEX_TRC_JMP_MAPFAIL:
1386 /* Failure of arch-specific address translation (x86/amd64
1387 segment override use) */
1388 /* jrs 2005 03 11: is this correct? */
1389 VG_(synth_fault)(tid);
1390 break;
1391
sewardjb5f6f512005-03-10 23:59:00 +00001392 case VEX_TRC_JMP_EMWARN: {
florian2e497412012-08-26 03:22:09 +00001393 static Int counts[EmNote_NUMBER];
sewardjb5f6f512005-03-10 23:59:00 +00001394 static Bool counts_initted = False;
florian2e497412012-08-26 03:22:09 +00001395 VexEmNote ew;
florian11f3cc82012-10-21 02:19:35 +00001396 const HChar* what;
sewardjb5f6f512005-03-10 23:59:00 +00001397 Bool show;
1398 Int q;
1399 if (!counts_initted) {
1400 counts_initted = True;
florian2e497412012-08-26 03:22:09 +00001401 for (q = 0; q < EmNote_NUMBER; q++)
sewardjb5f6f512005-03-10 23:59:00 +00001402 counts[q] = 0;
1403 }
florian2e497412012-08-26 03:22:09 +00001404 ew = (VexEmNote)VG_(threads)[tid].arch.vex.guest_EMNOTE;
1405 what = (ew < 0 || ew >= EmNote_NUMBER)
sewardjb5f6f512005-03-10 23:59:00 +00001406 ? "unknown (?!)"
florian2e497412012-08-26 03:22:09 +00001407 : LibVEX_EmNote_string(ew);
1408 show = (ew < 0 || ew >= EmNote_NUMBER)
sewardjb5f6f512005-03-10 23:59:00 +00001409 ? True
1410 : counts[ew]++ < 3;
sewardjd68ac3e2006-01-20 14:31:57 +00001411 if (show && VG_(clo_show_emwarns) && !VG_(clo_xml)) {
sewardjb5f6f512005-03-10 23:59:00 +00001412 VG_(message)( Vg_UserMsg,
sewardj738856f2009-07-15 14:48:32 +00001413 "Emulation warning: unsupported action:\n");
1414 VG_(message)( Vg_UserMsg, " %s\n", what);
njnd01fef72005-03-25 23:35:48 +00001415 VG_(get_and_pp_StackTrace)( tid, VG_(clo_backtrace_size) );
sewardjb5f6f512005-03-10 23:59:00 +00001416 }
1417 break;
1418 }
sewardje663cb92002-04-12 10:26:32 +00001419
sewardjd68ac3e2006-01-20 14:31:57 +00001420 case VEX_TRC_JMP_EMFAIL: {
florian2e497412012-08-26 03:22:09 +00001421 VexEmNote ew;
florian11f3cc82012-10-21 02:19:35 +00001422 const HChar* what;
florian2e497412012-08-26 03:22:09 +00001423 ew = (VexEmNote)VG_(threads)[tid].arch.vex.guest_EMNOTE;
1424 what = (ew < 0 || ew >= EmNote_NUMBER)
sewardjd68ac3e2006-01-20 14:31:57 +00001425 ? "unknown (?!)"
florian2e497412012-08-26 03:22:09 +00001426 : LibVEX_EmNote_string(ew);
sewardjd68ac3e2006-01-20 14:31:57 +00001427 VG_(message)( Vg_UserMsg,
sewardj738856f2009-07-15 14:48:32 +00001428 "Emulation fatal error -- Valgrind cannot continue:\n");
1429 VG_(message)( Vg_UserMsg, " %s\n", what);
sewardjd68ac3e2006-01-20 14:31:57 +00001430 VG_(get_and_pp_StackTrace)( tid, VG_(clo_backtrace_size) );
sewardj738856f2009-07-15 14:48:32 +00001431 VG_(message)(Vg_UserMsg, "\n");
1432 VG_(message)(Vg_UserMsg, "Valgrind has to exit now. Sorry.\n");
1433 VG_(message)(Vg_UserMsg, "\n");
sewardjd68ac3e2006-01-20 14:31:57 +00001434 VG_(exit)(1);
1435 break;
1436 }
1437
dejanj24f0c3a2014-02-19 11:57:22 +00001438 case VEX_TRC_JMP_SIGILL:
1439 VG_(synth_sigill)(tid, VG_(get_IP)(tid));
1440 break;
1441
sewardj4f9d6742007-08-29 09:11:35 +00001442 case VEX_TRC_JMP_SIGTRAP:
sewardj86df1552006-02-07 20:56:41 +00001443 VG_(synth_sigtrap)(tid);
1444 break;
1445
sewardj4f9d6742007-08-29 09:11:35 +00001446 case VEX_TRC_JMP_SIGSEGV:
1447 VG_(synth_fault)(tid);
1448 break;
1449
sewardj1c0ce7a2009-07-01 08:10:49 +00001450 case VEX_TRC_JMP_SIGBUS:
1451 VG_(synth_sigbus)(tid);
1452 break;
1453
petarj80e5c172012-10-19 14:45:17 +00001454 case VEX_TRC_JMP_SIGFPE_INTDIV:
1455 VG_(synth_sigfpe)(tid, VKI_FPE_INTDIV);
1456 break;
1457
1458 case VEX_TRC_JMP_SIGFPE_INTOVF:
1459 VG_(synth_sigfpe)(tid, VKI_FPE_INTOVF);
1460 break;
1461
florian2baf7532012-07-26 02:41:31 +00001462 case VEX_TRC_JMP_NODECODE: {
1463 Addr addr = VG_(get_IP)(tid);
1464
sewardjc30cd9b2012-12-06 18:08:54 +00001465 if (VG_(clo_sigill_diag)) {
1466 VG_(umsg)(
1467 "valgrind: Unrecognised instruction at address %#lx.\n", addr);
1468 VG_(get_and_pp_StackTrace)(tid, VG_(clo_backtrace_size));
sewardj738856f2009-07-15 14:48:32 +00001469#define M(a) VG_(umsg)(a "\n");
njn7cf66582005-10-15 17:18:08 +00001470 M("Your program just tried to execute an instruction that Valgrind" );
1471 M("did not recognise. There are two possible reasons for this." );
1472 M("1. Your program has a bug and erroneously jumped to a non-code" );
1473 M(" location. If you are running Memcheck and you just saw a" );
1474 M(" warning about a bad jump, it's probably your program's fault.");
1475 M("2. The instruction is legitimate but Valgrind doesn't handle it,");
1476 M(" i.e. it's Valgrind's fault. If you think this is the case or");
njnec4d5132006-03-21 23:15:43 +00001477 M(" you are not sure, please let us know and we'll try to fix it.");
njn7cf66582005-10-15 17:18:08 +00001478 M("Either way, Valgrind will now raise a SIGILL signal which will" );
1479 M("probably kill your program." );
njnd5021362005-09-29 00:35:18 +00001480#undef M
sewardjc30cd9b2012-12-06 18:08:54 +00001481 }
sewardje663cb92002-04-12 10:26:32 +00001482
florian2baf7532012-07-26 02:41:31 +00001483#if defined(VGA_s390x)
1484 /* Now that the complaint is out we need to adjust the guest_IA. The
1485 reason is that -- after raising the exception -- execution will
1486 continue with the insn that follows the invalid insn. As the first
1487 2 bits of the invalid insn determine its length in the usual way,
1488 we can compute the address of the next insn here and adjust the
1489 guest_IA accordingly. This adjustment is essential and tested by
1490 none/tests/s390x/op_exception.c (which would loop forever
1491 otherwise) */
1492 UChar byte = ((UChar *)addr)[0];
1493 UInt insn_length = ((((byte >> 6) + 1) >> 1) + 1) << 1;
1494 Addr next_insn_addr = addr + insn_length;
1495
1496 VG_(set_IP)(tid, next_insn_addr);
1497#endif
1498 VG_(synth_sigill)(tid, addr);
1499 break;
1500 }
cerion85665ca2005-06-20 15:51:07 +00001501 case VEX_TRC_JMP_TINVAL:
cerion85665ca2005-06-20 15:51:07 +00001502 VG_(discard_translations)(
1503 (Addr64)VG_(threads)[tid].arch.vex.guest_TISTART,
sewardj45f4e7c2005-09-27 19:20:21 +00001504 VG_(threads)[tid].arch.vex.guest_TILEN,
1505 "scheduler(VEX_TRC_JMP_TINVAL)"
sewardj487ac702005-06-21 12:52:38 +00001506 );
cerion85665ca2005-06-20 15:51:07 +00001507 if (0)
1508 VG_(printf)("dump translations done.\n");
cerion85665ca2005-06-20 15:51:07 +00001509 break;
1510
sewardje3a384b2005-07-29 08:51:34 +00001511 case VG_TRC_INVARIANT_FAILED:
1512 /* This typically happens if, after running generated code,
1513 it is detected that host CPU settings (eg, FPU/Vector
1514 control words) are not as they should be. Vex's code
1515 generation specifies the state such control words should
1516 be in on entry to Vex-generated code, and they should be
1517 unchanged on exit from it. Failure of this assertion
1518 usually means a bug in Vex's code generation. */
sewardj59570ff2010-01-01 11:59:33 +00001519 //{ UInt xx;
1520 // __asm__ __volatile__ (
1521 // "\t.word 0xEEF12A10\n" // fmrx r2,fpscr
1522 // "\tmov %0, r2" : "=r"(xx) : : "r2" );
1523 // VG_(printf)("QQQQ new fpscr = %08x\n", xx);
1524 //}
sewardje3a384b2005-07-29 08:51:34 +00001525 vg_assert2(0, "VG_(scheduler), phase 3: "
1526 "run_innerloop detected host "
1527 "state invariant failure", trc);
1528
sewardja0fef1b2005-11-03 13:46:30 +00001529 case VEX_TRC_JMP_SYS_SYSENTER:
sewardj5438a012005-08-07 14:49:27 +00001530 /* Do whatever simulation is appropriate for an x86 sysenter
1531 instruction. Note that it is critical to set this thread's
1532 guest_EIP to point at the code to execute after the
1533 sysenter, since Vex-generated code will not have set it --
1534 vex does not know what it should be. Vex sets the next
njncda2f0f2009-05-18 02:12:08 +00001535 address to zero, so if you don't set guest_EIP, the thread
1536 will jump to zero afterwards and probably die as a result. */
1537# if defined(VGP_x86_linux)
sewardj5438a012005-08-07 14:49:27 +00001538 vg_assert2(0, "VG_(scheduler), phase 3: "
njncda2f0f2009-05-18 02:12:08 +00001539 "sysenter_x86 on x86-linux is not supported");
njnf76d27a2009-05-28 01:53:07 +00001540# elif defined(VGP_x86_darwin)
1541 /* return address in client edx */
1542 VG_(threads)[tid].arch.vex.guest_EIP
1543 = VG_(threads)[tid].arch.vex.guest_EDX;
sewardj93a97572012-04-21 15:35:12 +00001544 handle_syscall(tid, trc[0]);
sewardj5438a012005-08-07 14:49:27 +00001545# else
1546 vg_assert2(0, "VG_(scheduler), phase 3: "
1547 "sysenter_x86 on non-x86 platform?!?!");
1548# endif
njnf76d27a2009-05-28 01:53:07 +00001549 break;
sewardj5438a012005-08-07 14:49:27 +00001550
sewardjb5f6f512005-03-10 23:59:00 +00001551 default:
njn50ae1a72005-04-08 23:28:23 +00001552 vg_assert2(0, "VG_(scheduler), phase 3: "
sewardj291849f2012-04-20 23:58:55 +00001553 "unexpected thread return code (%u)", trc[0]);
sewardjb5f6f512005-03-10 23:59:00 +00001554 /* NOTREACHED */
1555 break;
sewardje663cb92002-04-12 10:26:32 +00001556
1557 } /* switch (trc) */
sewardjb0473e92011-06-07 22:54:32 +00001558
sewardj17c5e2e2012-12-28 09:12:14 +00001559 if (UNLIKELY(VG_(clo_profyle_sbs)) && VG_(clo_profyle_interval) > 0)
1560 maybe_show_sb_profile();
nethercote238a3c32004-08-09 13:13:31 +00001561 }
sewardjc24be7a2005-03-15 01:40:12 +00001562
1563 if (VG_(clo_trace_sched))
1564 print_sched_event(tid, "exiting VG_(scheduler)");
1565
sewardjb5f6f512005-03-10 23:59:00 +00001566 vg_assert(VG_(is_exiting)(tid));
thughes513197c2004-06-13 12:07:53 +00001567
sewardjb5f6f512005-03-10 23:59:00 +00001568 return tst->exitreason;
sewardj20917d82002-05-28 01:36:45 +00001569}
1570
1571
sewardjb5f6f512005-03-10 23:59:00 +00001572/*
1573 This causes all threads to forceably exit. They aren't actually
1574 dead by the time this returns; you need to call
njnaf839f52005-06-23 03:27:57 +00001575 VG_(reap_threads)() to wait for them.
sewardjb5f6f512005-03-10 23:59:00 +00001576 */
1577void VG_(nuke_all_threads_except) ( ThreadId me, VgSchedReturnCode src )
sewardjccef2e62002-05-29 19:26:32 +00001578{
1579 ThreadId tid;
sewardjb5f6f512005-03-10 23:59:00 +00001580
1581 vg_assert(VG_(is_running_thread)(me));
sewardj45f02c42005-02-05 18:27:14 +00001582
sewardjccef2e62002-05-29 19:26:32 +00001583 for (tid = 1; tid < VG_N_THREADS; tid++) {
1584 if (tid == me
jsgf855d93d2003-10-13 22:26:55 +00001585 || VG_(threads)[tid].status == VgTs_Empty)
sewardjccef2e62002-05-29 19:26:32 +00001586 continue;
sewardjb5f6f512005-03-10 23:59:00 +00001587 if (0)
sewardjef037c72002-05-30 00:40:03 +00001588 VG_(printf)(
1589 "VG_(nuke_all_threads_except): nuking tid %d\n", tid);
sewardjb5f6f512005-03-10 23:59:00 +00001590
1591 VG_(threads)[tid].exitreason = src;
sewardja8d8e232005-06-07 20:04:56 +00001592 if (src == VgSrc_FatalSig)
1593 VG_(threads)[tid].os_state.fatalsig = VKI_SIGKILL;
sewardjf54342a2006-10-17 01:51:24 +00001594 VG_(get_thread_out_of_syscall)(tid);
sewardjccef2e62002-05-29 19:26:32 +00001595 }
1596}
1597
1598
njnd3040452003-05-19 15:04:06 +00001599/* ---------------------------------------------------------------------
sewardjb5f6f512005-03-10 23:59:00 +00001600 Specifying shadow register values
njnd3040452003-05-19 15:04:06 +00001601 ------------------------------------------------------------------ */
1602
njnf536bbb2005-06-13 04:21:38 +00001603#if defined(VGA_x86)
njnaf839f52005-06-23 03:27:57 +00001604# define VG_CLREQ_ARGS guest_EAX
1605# define VG_CLREQ_RET guest_EDX
njnf536bbb2005-06-13 04:21:38 +00001606#elif defined(VGA_amd64)
njnaf839f52005-06-23 03:27:57 +00001607# define VG_CLREQ_ARGS guest_RAX
1608# define VG_CLREQ_RET guest_RDX
sewardj2c48c7b2005-11-29 13:05:56 +00001609#elif defined(VGA_ppc32) || defined(VGA_ppc64)
njnaf839f52005-06-23 03:27:57 +00001610# define VG_CLREQ_ARGS guest_GPR4
1611# define VG_CLREQ_RET guest_GPR3
sewardj59570ff2010-01-01 11:59:33 +00001612#elif defined(VGA_arm)
1613# define VG_CLREQ_ARGS guest_R4
1614# define VG_CLREQ_RET guest_R3
sewardjf0c12502014-01-12 12:54:00 +00001615#elif defined(VGA_arm64)
1616# define VG_CLREQ_ARGS guest_X4
1617# define VG_CLREQ_RET guest_X3
sewardjb5b87402011-03-07 16:05:35 +00001618#elif defined (VGA_s390x)
1619# define VG_CLREQ_ARGS guest_r2
1620# define VG_CLREQ_RET guest_r3
petarj4df0bfc2013-02-27 23:17:33 +00001621#elif defined(VGA_mips32) || defined(VGA_mips64)
sewardj5db15402012-06-07 09:13:21 +00001622# define VG_CLREQ_ARGS guest_r12
1623# define VG_CLREQ_RET guest_r11
njnf536bbb2005-06-13 04:21:38 +00001624#else
1625# error Unknown arch
1626#endif
1627
njnaf839f52005-06-23 03:27:57 +00001628#define CLREQ_ARGS(regs) ((regs).vex.VG_CLREQ_ARGS)
1629#define CLREQ_RET(regs) ((regs).vex.VG_CLREQ_RET)
1630#define O_CLREQ_RET (offsetof(VexGuestArchState, VG_CLREQ_RET))
njnf536bbb2005-06-13 04:21:38 +00001631
njn502badb2005-05-08 02:04:49 +00001632// These macros write a value to a client's thread register, and tell the
1633// tool that it's happened (if necessary).
1634
1635#define SET_CLREQ_RETVAL(zztid, zzval) \
1636 do { CLREQ_RET(VG_(threads)[zztid].arch) = (zzval); \
1637 VG_TRACK( post_reg_write, \
1638 Vg_CoreClientReq, zztid, O_CLREQ_RET, sizeof(UWord)); \
1639 } while (0)
1640
1641#define SET_CLCALL_RETVAL(zztid, zzval, f) \
1642 do { CLREQ_RET(VG_(threads)[zztid].arch) = (zzval); \
1643 VG_TRACK( post_reg_write_clientcall_return, \
1644 zztid, O_CLREQ_RET, sizeof(UWord), f); \
1645 } while (0)
1646
sewardj0ec07f32006-01-12 12:32:32 +00001647
sewardje663cb92002-04-12 10:26:32 +00001648/* ---------------------------------------------------------------------
sewardj124ca2a2002-06-20 10:19:38 +00001649 Handle client requests.
sewardje663cb92002-04-12 10:26:32 +00001650 ------------------------------------------------------------------ */
1651
njn9cb54ac2005-06-12 04:19:17 +00001652// OS-specific(?) client requests
1653static Bool os_client_request(ThreadId tid, UWord *args)
1654{
1655 Bool handled = True;
1656
1657 vg_assert(VG_(is_running_thread)(tid));
1658
1659 switch(args[0]) {
1660 case VG_USERREQ__LIBC_FREERES_DONE:
1661 /* This is equivalent to an exit() syscall, but we don't set the
1662 exitcode (since it might already be set) */
1663 if (0 || VG_(clo_trace_syscalls) || VG_(clo_trace_sched))
sewardj738856f2009-07-15 14:48:32 +00001664 VG_(message)(Vg_DebugMsg,
1665 "__libc_freeres() done; really quitting!\n");
sewardjf54342a2006-10-17 01:51:24 +00001666 VG_(threads)[tid].exitreason = VgSrc_ExitThread;
njn9cb54ac2005-06-12 04:19:17 +00001667 break;
1668
1669 default:
1670 handled = False;
1671 break;
1672 }
1673
1674 return handled;
1675}
1676
1677
florian661786e2013-08-27 15:17:53 +00001678/* Write out a client message, possibly including a back trace. Return
1679 the number of characters written. In case of XML output, the format
1680 string as well as any arguments it requires will be XML'ified.
1681 I.e. special characters such as the angle brackets will be translated
1682 into proper escape sequences. */
1683static
1684Int print_client_message( ThreadId tid, const HChar *format,
1685 va_list *vargsp, Bool include_backtrace)
1686{
1687 Int count;
1688
1689 if (VG_(clo_xml)) {
1690 /* Translate the format string as follows:
1691 < --> &lt;
1692 > --> &gt;
1693 & --> &amp;
1694 %s --> %pS
1695 Yes, yes, it's simplified but in synch with
1696 myvprintf_str_XML_simplistic and VG_(debugLog_vprintf).
1697 */
1698
1699 /* Allocate a buffer that is for sure large enough. */
1700 HChar xml_format[VG_(strlen)(format) * 5 + 1];
1701
1702 const HChar *p;
1703 HChar *q = xml_format;
1704
1705 for (p = format; *p; ++p) {
1706 switch (*p) {
1707 case '<': VG_(strcpy)(q, "&lt;"); q += 4; break;
1708 case '>': VG_(strcpy)(q, "&gt;"); q += 4; break;
1709 case '&': VG_(strcpy)(q, "&amp;"); q += 5; break;
1710 case '%':
1711 /* Careful: make sure %%s stays %%s */
1712 *q++ = *p++;
1713 if (*p == 's') {
1714 *q++ = 'p';
1715 *q++ = 'S';
1716 } else {
1717 *q++ = *p;
1718 }
1719 break;
1720
1721 default:
1722 *q++ = *p;
1723 break;
1724 }
1725 }
1726 *q = '\0';
1727
1728 VG_(printf_xml)( "<clientmsg>\n" );
1729 VG_(printf_xml)( " <tid>%d</tid>\n", tid );
1730 VG_(printf_xml)( " <text>" );
1731 count = VG_(vprintf_xml)( xml_format, *vargsp );
1732 VG_(printf_xml)( " </text>\n" );
1733 } else {
1734 count = VG_(vmessage)( Vg_ClientMsg, format, *vargsp );
1735 VG_(message_flush)();
1736 }
1737
1738 if (include_backtrace)
1739 VG_(get_and_pp_StackTrace)( tid, VG_(clo_backtrace_size) );
1740
1741 if (VG_(clo_xml))
1742 VG_(printf_xml)( "</clientmsg>\n" );
1743
1744 return count;
1745}
1746
1747
sewardj124ca2a2002-06-20 10:19:38 +00001748/* Do a client request for the thread tid. After the request, tid may
1749 or may not still be runnable; if not, the scheduler will have to
1750 choose a new thread to run.
1751*/
sewardje663cb92002-04-12 10:26:32 +00001752static
sewardjb5f6f512005-03-10 23:59:00 +00001753void do_client_request ( ThreadId tid )
sewardje663cb92002-04-12 10:26:32 +00001754{
sewardjb5f6f512005-03-10 23:59:00 +00001755 UWord* arg = (UWord*)(CLREQ_ARGS(VG_(threads)[tid].arch));
nethercoted1b64b22004-11-04 18:22:28 +00001756 UWord req_no = arg[0];
sewardj124ca2a2002-06-20 10:19:38 +00001757
fitzhardinge98abfc72003-12-16 02:05:15 +00001758 if (0)
nethercoted1b64b22004-11-04 18:22:28 +00001759 VG_(printf)("req no = 0x%llx, arg = %p\n", (ULong)req_no, arg);
sewardje663cb92002-04-12 10:26:32 +00001760 switch (req_no) {
1761
njn3e884182003-04-15 13:03:23 +00001762 case VG_USERREQ__CLIENT_CALL0: {
njn2ac95242005-03-13 23:07:30 +00001763 UWord (*f)(ThreadId) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00001764 if (f == NULL)
sewardj738856f2009-07-15 14:48:32 +00001765 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL0: func=%p\n", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00001766 else
njn2ac95242005-03-13 23:07:30 +00001767 SET_CLCALL_RETVAL(tid, f ( tid ), (Addr)f);
njn3e884182003-04-15 13:03:23 +00001768 break;
1769 }
1770 case VG_USERREQ__CLIENT_CALL1: {
njn2ac95242005-03-13 23:07:30 +00001771 UWord (*f)(ThreadId, UWord) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00001772 if (f == NULL)
sewardj738856f2009-07-15 14:48:32 +00001773 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL1: func=%p\n", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00001774 else
njn2ac95242005-03-13 23:07:30 +00001775 SET_CLCALL_RETVAL(tid, f ( tid, arg[2] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00001776 break;
1777 }
1778 case VG_USERREQ__CLIENT_CALL2: {
njn2ac95242005-03-13 23:07:30 +00001779 UWord (*f)(ThreadId, UWord, UWord) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00001780 if (f == NULL)
sewardj738856f2009-07-15 14:48:32 +00001781 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL2: func=%p\n", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00001782 else
njn2ac95242005-03-13 23:07:30 +00001783 SET_CLCALL_RETVAL(tid, f ( tid, arg[2], arg[3] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00001784 break;
1785 }
1786 case VG_USERREQ__CLIENT_CALL3: {
njn2ac95242005-03-13 23:07:30 +00001787 UWord (*f)(ThreadId, UWord, UWord, UWord) = (void*)arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00001788 if (f == NULL)
sewardj738856f2009-07-15 14:48:32 +00001789 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL3: func=%p\n", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00001790 else
njn2ac95242005-03-13 23:07:30 +00001791 SET_CLCALL_RETVAL(tid, f ( tid, arg[2], arg[3], arg[4] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00001792 break;
1793 }
1794
njnf09745a2005-05-10 03:01:23 +00001795 // Nb: this looks like a circular definition, because it kind of is.
1796 // See comment in valgrind.h to understand what's going on.
sewardj124ca2a2002-06-20 10:19:38 +00001797 case VG_USERREQ__RUNNING_ON_VALGRIND:
sewardjb5f6f512005-03-10 23:59:00 +00001798 SET_CLREQ_RETVAL(tid, RUNNING_ON_VALGRIND+1);
sewardj124ca2a2002-06-20 10:19:38 +00001799 break;
1800
fitzhardinge39de4b42003-10-31 07:12:21 +00001801 case VG_USERREQ__PRINTF: {
florian661786e2013-08-27 15:17:53 +00001802 const HChar* format = (HChar *)arg[1];
sewardjc560fb32010-01-28 15:23:54 +00001803 /* JRS 2010-Jan-28: this is DEPRECATED; use the
1804 _VALIST_BY_REF version instead */
1805 if (sizeof(va_list) != sizeof(UWord))
1806 goto va_list_casting_error_NORETURN;
sewardj05b07152010-01-04 01:01:02 +00001807 union {
1808 va_list vargs;
sewardjc560fb32010-01-28 15:23:54 +00001809 unsigned long uw;
1810 } u;
1811 u.uw = (unsigned long)arg[2];
1812 Int count =
florian661786e2013-08-27 15:17:53 +00001813 print_client_message( tid, format, &u.vargs,
1814 /* include_backtrace */ False );
sewardjc560fb32010-01-28 15:23:54 +00001815 SET_CLREQ_RETVAL( tid, count );
1816 break;
1817 }
fitzhardinge39de4b42003-10-31 07:12:21 +00001818
sewardjc560fb32010-01-28 15:23:54 +00001819 case VG_USERREQ__PRINTF_BACKTRACE: {
florian661786e2013-08-27 15:17:53 +00001820 const HChar* format = (HChar *)arg[1];
sewardjc560fb32010-01-28 15:23:54 +00001821 /* JRS 2010-Jan-28: this is DEPRECATED; use the
1822 _VALIST_BY_REF version instead */
1823 if (sizeof(va_list) != sizeof(UWord))
1824 goto va_list_casting_error_NORETURN;
sewardj05b07152010-01-04 01:01:02 +00001825 union {
1826 va_list vargs;
sewardjc560fb32010-01-28 15:23:54 +00001827 unsigned long uw;
1828 } u;
1829 u.uw = (unsigned long)arg[2];
1830 Int count =
florian661786e2013-08-27 15:17:53 +00001831 print_client_message( tid, format, &u.vargs,
1832 /* include_backtrace */ True );
sewardjc560fb32010-01-28 15:23:54 +00001833 SET_CLREQ_RETVAL( tid, count );
1834 break;
1835 }
1836
1837 case VG_USERREQ__PRINTF_VALIST_BY_REF: {
florian661786e2013-08-27 15:17:53 +00001838 const HChar* format = (HChar *)arg[1];
sewardjc560fb32010-01-28 15:23:54 +00001839 va_list* vargsp = (va_list*)arg[2];
florian661786e2013-08-27 15:17:53 +00001840 Int count =
1841 print_client_message( tid, format, vargsp,
1842 /* include_backtrace */ False );
1843
sewardjc560fb32010-01-28 15:23:54 +00001844 SET_CLREQ_RETVAL( tid, count );
1845 break;
1846 }
1847
1848 case VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF: {
florian661786e2013-08-27 15:17:53 +00001849 const HChar* format = (HChar *)arg[1];
sewardjc560fb32010-01-28 15:23:54 +00001850 va_list* vargsp = (va_list*)arg[2];
1851 Int count =
florian661786e2013-08-27 15:17:53 +00001852 print_client_message( tid, format, vargsp,
1853 /* include_backtrace */ True );
sewardjc560fb32010-01-28 15:23:54 +00001854 SET_CLREQ_RETVAL( tid, count );
1855 break;
1856 }
1857
1858 case VG_USERREQ__INTERNAL_PRINTF_VALIST_BY_REF: {
1859 va_list* vargsp = (va_list*)arg[2];
1860 Int count =
floriancd19e992012-11-03 19:32:28 +00001861 VG_(vmessage)( Vg_DebugMsg, (HChar *)arg[1], *vargsp );
sewardjc560fb32010-01-28 15:23:54 +00001862 VG_(message_flush)();
1863 SET_CLREQ_RETVAL( tid, count );
1864 break;
1865 }
fitzhardinge39de4b42003-10-31 07:12:21 +00001866
tomd2645142009-10-29 09:27:11 +00001867 case VG_USERREQ__ADD_IFUNC_TARGET: {
1868 VG_(redir_add_ifunc_target)( arg[1], arg[2] );
1869 SET_CLREQ_RETVAL( tid, 0);
1870 break; }
1871
rjwalsh0140af52005-06-04 20:42:33 +00001872 case VG_USERREQ__STACK_REGISTER: {
njn945ed2e2005-06-24 03:28:30 +00001873 UWord sid = VG_(register_stack)((Addr)arg[1], (Addr)arg[2]);
rjwalsh0140af52005-06-04 20:42:33 +00001874 SET_CLREQ_RETVAL( tid, sid );
1875 break; }
1876
1877 case VG_USERREQ__STACK_DEREGISTER: {
njn945ed2e2005-06-24 03:28:30 +00001878 VG_(deregister_stack)(arg[1]);
rjwalsh0140af52005-06-04 20:42:33 +00001879 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
1880 break; }
1881
1882 case VG_USERREQ__STACK_CHANGE: {
njn945ed2e2005-06-24 03:28:30 +00001883 VG_(change_stack)(arg[1], (Addr)arg[2], (Addr)arg[3]);
rjwalsh0140af52005-06-04 20:42:33 +00001884 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
1885 break; }
1886
fitzhardinge98abfc72003-12-16 02:05:15 +00001887 case VG_USERREQ__GET_MALLOCFUNCS: {
1888 struct vg_mallocfunc_info *info = (struct vg_mallocfunc_info *)arg[1];
1889
njnfc51f8d2005-06-21 03:20:17 +00001890 info->tl_malloc = VG_(tdict).tool_malloc;
1891 info->tl_calloc = VG_(tdict).tool_calloc;
1892 info->tl_realloc = VG_(tdict).tool_realloc;
1893 info->tl_memalign = VG_(tdict).tool_memalign;
1894 info->tl___builtin_new = VG_(tdict).tool___builtin_new;
1895 info->tl___builtin_vec_new = VG_(tdict).tool___builtin_vec_new;
1896 info->tl_free = VG_(tdict).tool_free;
1897 info->tl___builtin_delete = VG_(tdict).tool___builtin_delete;
1898 info->tl___builtin_vec_delete = VG_(tdict).tool___builtin_vec_delete;
njn8b140de2009-02-17 04:31:18 +00001899 info->tl_malloc_usable_size = VG_(tdict).tool_malloc_usable_size;
fitzhardinge98abfc72003-12-16 02:05:15 +00001900
njn088bfb42005-08-17 05:01:37 +00001901 info->mallinfo = VG_(mallinfo);
sewardjb5f6f512005-03-10 23:59:00 +00001902 info->clo_trace_malloc = VG_(clo_trace_malloc);
fitzhardinge98abfc72003-12-16 02:05:15 +00001903
1904 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
1905
1906 break;
1907 }
1908
njn25e49d8e72002-09-23 09:36:25 +00001909 /* Requests from the client program */
1910
1911 case VG_USERREQ__DISCARD_TRANSLATIONS:
1912 if (VG_(clo_verbosity) > 2)
1913 VG_(printf)( "client request: DISCARD_TRANSLATIONS,"
njn8a7b41b2007-09-23 00:51:24 +00001914 " addr %p, len %lu\n",
njn25e49d8e72002-09-23 09:36:25 +00001915 (void*)arg[1], arg[2] );
1916
sewardj45f4e7c2005-09-27 19:20:21 +00001917 VG_(discard_translations)(
1918 arg[1], arg[2], "scheduler(VG_USERREQ__DISCARD_TRANSLATIONS)"
1919 );
njn25e49d8e72002-09-23 09:36:25 +00001920
njnd3040452003-05-19 15:04:06 +00001921 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
njn25e49d8e72002-09-23 09:36:25 +00001922 break;
1923
njn47363ab2003-04-21 13:24:40 +00001924 case VG_USERREQ__COUNT_ERRORS:
nethercotef2b11482004-08-02 12:36:01 +00001925 SET_CLREQ_RETVAL( tid, VG_(get_n_errs_found)() );
njn47363ab2003-04-21 13:24:40 +00001926 break;
1927
sewardjc8259b82009-04-22 22:42:10 +00001928 case VG_USERREQ__LOAD_PDB_DEBUGINFO:
1929 VG_(di_notify_pdb_debuginfo)( arg[1], arg[2], arg[3], arg[4] );
1930 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
1931 break;
1932
sewardj5c659622010-08-20 18:22:07 +00001933 case VG_USERREQ__MAP_IP_TO_SRCLOC: {
1934 Addr ip = arg[1];
floriandbb35842012-10-27 18:39:11 +00001935 HChar* buf64 = (HChar*)arg[2];
sewardj5c659622010-08-20 18:22:07 +00001936
1937 VG_(memset)(buf64, 0, 64);
1938 UInt linenum = 0;
1939 Bool ok = VG_(get_filename_linenum)(
1940 ip, &buf64[0], 50, NULL, 0, NULL, &linenum
1941 );
1942 if (ok) {
1943 /* Find the terminating zero in the first 50 bytes. */
1944 UInt i;
1945 for (i = 0; i < 50; i++) {
1946 if (buf64[i] == 0)
1947 break;
1948 }
1949 /* We must find a zero somewhere in 0 .. 49. Else
1950 VG_(get_filename_linenum) is not properly zero
1951 terminating. */
1952 vg_assert(i < 50);
1953 VG_(sprintf)(&buf64[i], ":%u", linenum);
1954 } else {
1955 buf64[0] = 0;
1956 }
1957
1958 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
1959 break;
1960 }
1961
sewardjdc873c02011-07-24 16:02:33 +00001962 case VG_USERREQ__CHANGE_ERR_DISABLEMENT: {
1963 Word delta = arg[1];
1964 vg_assert(delta == 1 || delta == -1);
1965 ThreadState* tst = VG_(get_ThreadState)(tid);
1966 vg_assert(tst);
1967 if (delta == 1 && tst->err_disablement_level < 0xFFFFFFFF) {
1968 tst->err_disablement_level++;
1969 }
1970 else
1971 if (delta == -1 && tst->err_disablement_level > 0) {
1972 tst->err_disablement_level--;
1973 }
1974 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
1975 break;
1976 }
1977
philippe46207652013-01-20 17:11:58 +00001978 case VG_USERREQ__GDB_MONITOR_COMMAND: {
1979 UWord ret;
1980 ret = (UWord) VG_(client_monitor_command) ((HChar*)arg[1]);
1981 SET_CLREQ_RETVAL(tid, ret);
1982 break;
1983 }
1984
njn32f8d8c2009-07-15 02:31:45 +00001985 case VG_USERREQ__MALLOCLIKE_BLOCK:
bart91347382011-03-25 20:07:25 +00001986 case VG_USERREQ__RESIZEINPLACE_BLOCK:
njn32f8d8c2009-07-15 02:31:45 +00001987 case VG_USERREQ__FREELIKE_BLOCK:
1988 // Ignore them if the addr is NULL; otherwise pass onto the tool.
1989 if (!arg[1]) {
1990 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
1991 break;
1992 } else {
1993 goto my_default;
1994 }
1995
florianbb913cd2012-08-28 16:50:39 +00001996 case VG_USERREQ__VEX_INIT_FOR_IRI:
1997 LibVEX_InitIRI ( (IRICB *)arg[1] );
1998 break;
1999
sewardje663cb92002-04-12 10:26:32 +00002000 default:
njn32f8d8c2009-07-15 02:31:45 +00002001 my_default:
njn9cb54ac2005-06-12 04:19:17 +00002002 if (os_client_request(tid, arg)) {
2003 // do nothing, os_client_request() handled it
sewardjb5f6f512005-03-10 23:59:00 +00002004 } else if (VG_(needs).client_requests) {
nethercoted1b64b22004-11-04 18:22:28 +00002005 UWord ret;
sewardj34042512002-10-22 04:14:35 +00002006
njn25e49d8e72002-09-23 09:36:25 +00002007 if (VG_(clo_verbosity) > 2)
njn8a7b41b2007-09-23 00:51:24 +00002008 VG_(printf)("client request: code %lx, addr %p, len %lu\n",
njn25e49d8e72002-09-23 09:36:25 +00002009 arg[0], (void*)arg[1], arg[2] );
2010
njn51d827b2005-05-09 01:02:08 +00002011 if ( VG_TDICT_CALL(tool_handle_client_request, tid, arg, &ret) )
sewardjb5f6f512005-03-10 23:59:00 +00002012 SET_CLREQ_RETVAL(tid, ret);
njn25e49d8e72002-09-23 09:36:25 +00002013 } else {
sewardj34042512002-10-22 04:14:35 +00002014 static Bool whined = False;
2015
sewardjb5f6f512005-03-10 23:59:00 +00002016 if (!whined && VG_(clo_verbosity) > 2) {
nethercote7cc9c232004-01-21 15:08:04 +00002017 // Allow for requests in core, but defined by tools, which
njnd7994182003-10-02 13:44:04 +00002018 // have 0 and 0 in their two high bytes.
floriandbb35842012-10-27 18:39:11 +00002019 HChar c1 = (arg[0] >> 24) & 0xff;
2020 HChar c2 = (arg[0] >> 16) & 0xff;
njnd7994182003-10-02 13:44:04 +00002021 if (c1 == 0) c1 = '_';
2022 if (c2 == 0) c2 = '_';
sewardj34042512002-10-22 04:14:35 +00002023 VG_(message)(Vg_UserMsg, "Warning:\n"
barta0b6b2c2008-07-07 06:49:24 +00002024 " unhandled client request: 0x%lx (%c%c+0x%lx). Perhaps\n"
sewardj738856f2009-07-15 14:48:32 +00002025 " VG_(needs).client_requests should be set?\n",
njnd7994182003-10-02 13:44:04 +00002026 arg[0], c1, c2, arg[0] & 0xffff);
sewardj34042512002-10-22 04:14:35 +00002027 whined = True;
2028 }
njn25e49d8e72002-09-23 09:36:25 +00002029 }
sewardje663cb92002-04-12 10:26:32 +00002030 break;
2031 }
sewardjc560fb32010-01-28 15:23:54 +00002032 return;
2033
2034 /*NOTREACHED*/
2035 va_list_casting_error_NORETURN:
2036 VG_(umsg)(
2037 "Valgrind: fatal error - cannot continue: use of the deprecated\n"
2038 "client requests VG_USERREQ__PRINTF or VG_USERREQ__PRINTF_BACKTRACE\n"
2039 "on a platform where they cannot be supported. Please use the\n"
2040 "equivalent _VALIST_BY_REF versions instead.\n"
2041 "\n"
2042 "This is a binary-incompatible change in Valgrind's client request\n"
2043 "mechanism. It is unfortunate, but difficult to avoid. End-users\n"
2044 "are expected to almost never see this message. The only case in\n"
2045 "which you might see this message is if your code uses the macros\n"
2046 "VALGRIND_PRINTF or VALGRIND_PRINTF_BACKTRACE. If so, you will need\n"
2047 "to recompile such code, using the header files from this version of\n"
2048 "Valgrind, and not any previous version.\n"
2049 "\n"
2050 "If you see this mesage in any other circumstances, it is probably\n"
2051 "a bug in Valgrind. In this case, please file a bug report at\n"
2052 "\n"
2053 " http://www.valgrind.org/support/bug_reports.html\n"
2054 "\n"
2055 "Will now abort.\n"
2056 );
2057 vg_assert(0);
sewardje663cb92002-04-12 10:26:32 +00002058}
2059
2060
sewardj6072c362002-04-19 14:40:57 +00002061/* ---------------------------------------------------------------------
njn6676d5b2005-06-19 18:49:19 +00002062 Sanity checking (permanently engaged)
sewardj6072c362002-04-19 14:40:57 +00002063 ------------------------------------------------------------------ */
2064
sewardjb5f6f512005-03-10 23:59:00 +00002065/* Internal consistency checks on the sched structures. */
sewardj6072c362002-04-19 14:40:57 +00002066static
sewardjb5f6f512005-03-10 23:59:00 +00002067void scheduler_sanity ( ThreadId tid )
sewardj6072c362002-04-19 14:40:57 +00002068{
sewardjb5f6f512005-03-10 23:59:00 +00002069 Bool bad = False;
sewardjf54342a2006-10-17 01:51:24 +00002070 Int lwpid = VG_(gettid)();
jsgf855d93d2003-10-13 22:26:55 +00002071
sewardjb5f6f512005-03-10 23:59:00 +00002072 if (!VG_(is_running_thread)(tid)) {
2073 VG_(message)(Vg_DebugMsg,
sewardjf54342a2006-10-17 01:51:24 +00002074 "Thread %d is supposed to be running, "
sewardjad0a3a82006-12-17 18:58:55 +00002075 "but doesn't own the_BigLock (owned by %d)\n",
njnc7561b92005-06-19 01:24:32 +00002076 tid, VG_(running_tid));
sewardjb5f6f512005-03-10 23:59:00 +00002077 bad = True;
jsgf855d93d2003-10-13 22:26:55 +00002078 }
sewardj5f07b662002-04-23 16:52:51 +00002079
sewardjf54342a2006-10-17 01:51:24 +00002080 if (lwpid != VG_(threads)[tid].os_state.lwpid) {
sewardjb5f6f512005-03-10 23:59:00 +00002081 VG_(message)(Vg_DebugMsg,
njnd06ed472005-03-13 05:12:31 +00002082 "Thread %d supposed to be in LWP %d, but we're actually %d\n",
2083 tid, VG_(threads)[tid].os_state.lwpid, VG_(gettid)());
sewardjb5f6f512005-03-10 23:59:00 +00002084 bad = True;
sewardj5f07b662002-04-23 16:52:51 +00002085 }
sewardjf54342a2006-10-17 01:51:24 +00002086
bart78bfc712011-12-08 16:14:59 +00002087 if (lwpid != ML_(get_sched_lock_owner)(the_BigLock)) {
sewardjf54342a2006-10-17 01:51:24 +00002088 VG_(message)(Vg_DebugMsg,
sewardjad0a3a82006-12-17 18:58:55 +00002089 "Thread (LWPID) %d doesn't own the_BigLock\n",
sewardjf54342a2006-10-17 01:51:24 +00002090 tid);
2091 bad = True;
2092 }
2093
philippe9e9b5892013-01-23 22:19:36 +00002094 if (0) {
2095 /* Periodically show the state of all threads, for debugging
2096 purposes. */
2097 static UInt lasttime = 0;
2098 UInt now;
2099 now = VG_(read_millisecond_timer)();
2100 if ((!bad) && (lasttime + 4000/*ms*/ <= now)) {
2101 lasttime = now;
2102 VG_(printf)("\n------------ Sched State at %d ms ------------\n",
2103 (Int)now);
philippe4f6f3362014-04-19 00:25:54 +00002104 VG_(show_sched_status)(True, // host_stacktrace
2105 True, // valgrind_stack_usage
2106 True); // exited_threads);
philippe9e9b5892013-01-23 22:19:36 +00002107 }
sewardjf54342a2006-10-17 01:51:24 +00002108 }
2109
2110 /* core_panic also shows the sched status, which is why we don't
2111 show it above if bad==True. */
2112 if (bad)
2113 VG_(core_panic)("scheduler_sanity: failed");
sewardj6072c362002-04-19 14:40:57 +00002114}
2115
njn6676d5b2005-06-19 18:49:19 +00002116void VG_(sanity_check_general) ( Bool force_expensive )
2117{
2118 ThreadId tid;
2119
sewardjf54342a2006-10-17 01:51:24 +00002120 static UInt next_slow_check_at = 1;
2121 static UInt slow_check_interval = 25;
2122
njn6676d5b2005-06-19 18:49:19 +00002123 if (VG_(clo_sanity_level) < 1) return;
2124
2125 /* --- First do all the tests that we can do quickly. ---*/
2126
2127 sanity_fast_count++;
2128
2129 /* Check stuff pertaining to the memory check system. */
2130
2131 /* Check that nobody has spuriously claimed that the first or
2132 last 16 pages of memory have become accessible [...] */
2133 if (VG_(needs).sanity_checks) {
njn6676d5b2005-06-19 18:49:19 +00002134 vg_assert(VG_TDICT_CALL(tool_cheap_sanity_check));
njn6676d5b2005-06-19 18:49:19 +00002135 }
2136
2137 /* --- Now some more expensive checks. ---*/
2138
sewardjf54342a2006-10-17 01:51:24 +00002139 /* Once every now and again, check some more expensive stuff.
2140 Gradually increase the interval between such checks so as not to
2141 burden long-running programs too much. */
njn6676d5b2005-06-19 18:49:19 +00002142 if ( force_expensive
sewardjf54342a2006-10-17 01:51:24 +00002143 || VG_(clo_sanity_level) > 1
2144 || (VG_(clo_sanity_level) == 1
2145 && sanity_fast_count == next_slow_check_at)) {
njn6676d5b2005-06-19 18:49:19 +00002146
sewardjf54342a2006-10-17 01:51:24 +00002147 if (0) VG_(printf)("SLOW at %d\n", sanity_fast_count-1);
2148
2149 next_slow_check_at = sanity_fast_count - 1 + slow_check_interval;
2150 slow_check_interval++;
njn6676d5b2005-06-19 18:49:19 +00002151 sanity_slow_count++;
2152
njn6676d5b2005-06-19 18:49:19 +00002153 if (VG_(needs).sanity_checks) {
njn6676d5b2005-06-19 18:49:19 +00002154 vg_assert(VG_TDICT_CALL(tool_expensive_sanity_check));
njn6676d5b2005-06-19 18:49:19 +00002155 }
2156
njn6676d5b2005-06-19 18:49:19 +00002157 /* Look for stack overruns. Visit all threads. */
njnd666ea72005-06-26 17:26:22 +00002158 for (tid = 1; tid < VG_N_THREADS; tid++) {
sewardj45f4e7c2005-09-27 19:20:21 +00002159 SizeT remains;
2160 VgStack* stack;
njn6676d5b2005-06-19 18:49:19 +00002161
2162 if (VG_(threads)[tid].status == VgTs_Empty ||
2163 VG_(threads)[tid].status == VgTs_Zombie)
2164 continue;
2165
sewardj45f4e7c2005-09-27 19:20:21 +00002166 stack
2167 = (VgStack*)
2168 VG_(get_ThreadState)(tid)->os_state.valgrind_stack_base;
sewardj46dbd3f2010-09-08 08:30:31 +00002169 SizeT limit
2170 = 4096; // Let's say. Checking more causes lots of L2 misses.
sewardj45f4e7c2005-09-27 19:20:21 +00002171 remains
sewardj46dbd3f2010-09-08 08:30:31 +00002172 = VG_(am_get_VgStack_unused_szB)(stack, limit);
2173 if (remains < limit)
njn6676d5b2005-06-19 18:49:19 +00002174 VG_(message)(Vg_DebugMsg,
barta0b6b2c2008-07-07 06:49:24 +00002175 "WARNING: Thread %d is within %ld bytes "
sewardj738856f2009-07-15 14:48:32 +00002176 "of running out of stack!\n",
njn6676d5b2005-06-19 18:49:19 +00002177 tid, remains);
2178 }
njn6676d5b2005-06-19 18:49:19 +00002179 }
2180
2181 if (VG_(clo_sanity_level) > 1) {
njn6676d5b2005-06-19 18:49:19 +00002182 /* Check sanity of the low-level memory manager. Note that bugs
2183 in the client's code can cause this to fail, so we don't do
2184 this check unless specially asked for. And because it's
2185 potentially very expensive. */
2186 VG_(sanity_check_malloc_all)();
njn6676d5b2005-06-19 18:49:19 +00002187 }
njn6676d5b2005-06-19 18:49:19 +00002188}
sewardj6072c362002-04-19 14:40:57 +00002189
sewardje663cb92002-04-12 10:26:32 +00002190/*--------------------------------------------------------------------*/
njn278b3d62005-05-30 23:20:51 +00002191/*--- end ---*/
sewardje663cb92002-04-12 10:26:32 +00002192/*--------------------------------------------------------------------*/