blob: 727275c29a4dee562eadcbfcfad340bb616983e4 [file] [log] [blame]
sewardje663cb92002-04-12 10:26:32 +00001
2/*--------------------------------------------------------------------*/
njn278b3d62005-05-30 23:20:51 +00003/*--- Thread scheduling. scheduler.c ---*/
sewardje663cb92002-04-12 10:26:32 +00004/*--------------------------------------------------------------------*/
5
6/*
njnc0ae7052005-08-25 22:55:19 +00007 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
sewardje663cb92002-04-12 10:26:32 +00009
sewardjb3a1e4b2015-08-21 11:32:26 +000010 Copyright (C) 2000-2015 Julian Seward
sewardje663cb92002-04-12 10:26:32 +000011 jseward@acm.org
sewardje663cb92002-04-12 10:26:32 +000012
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
njn25e49d8e72002-09-23 09:36:25 +000028 The GNU General Public License is contained in the file COPYING.
sewardje663cb92002-04-12 10:26:32 +000029*/
30
sewardjb5f6f512005-03-10 23:59:00 +000031/*
32 Overview
33
34 Valgrind tries to emulate the kernel's threading as closely as
35 possible. The client does all threading via the normal syscalls
36 (on Linux: clone, etc). Valgrind emulates this by creating exactly
37 the same process structure as would be created without Valgrind.
38 There are no extra threads.
39
40 The main difference is that Valgrind only allows one client thread
sewardjad0a3a82006-12-17 18:58:55 +000041 to run at once. This is controlled with the CPU Big Lock,
42 "the_BigLock". Any time a thread wants to run client code or
sewardjb5f6f512005-03-10 23:59:00 +000043 manipulate any shared state (which is anything other than its own
sewardjad0a3a82006-12-17 18:58:55 +000044 ThreadState entry), it must hold the_BigLock.
sewardjb5f6f512005-03-10 23:59:00 +000045
46 When a thread is about to block in a blocking syscall, it releases
sewardjad0a3a82006-12-17 18:58:55 +000047 the_BigLock, and re-takes it when it becomes runnable again (either
sewardjb5f6f512005-03-10 23:59:00 +000048 because the syscall finished, or we took a signal).
49
50 VG_(scheduler) therefore runs in each thread. It returns only when
51 the thread is exiting, either because it exited itself, or it was
52 told to exit by another thread.
53
54 This file is almost entirely OS-independent. The details of how
55 the OS handles threading and signalling are abstracted away and
njn12771092005-06-18 02:18:04 +000056 implemented elsewhere. [Some of the functions have worked their
57 way back for the moment, until we do an OS port in earnest...]
sewardj291849f2012-04-20 23:58:55 +000058*/
59
sewardjb5f6f512005-03-10 23:59:00 +000060
njnc7561b92005-06-19 01:24:32 +000061#include "pub_core_basics.h"
sewardjf9d2f9b2006-11-17 20:00:57 +000062#include "pub_core_debuglog.h"
sewardj4cfea4f2006-10-14 19:26:10 +000063#include "pub_core_vki.h"
philippe98486902014-08-19 22:46:44 +000064#include "pub_core_vkiscnums.h" // __NR_sched_yield
njnc7561b92005-06-19 01:24:32 +000065#include "pub_core_threadstate.h"
philippe98486902014-08-19 22:46:44 +000066#include "pub_core_clientstate.h"
njn04e16982005-05-31 00:23:43 +000067#include "pub_core_aspacemgr.h"
philippe98486902014-08-19 22:46:44 +000068#include "pub_core_clreq.h" // for VG_USERREQ__*
njn36b66df2005-05-12 05:13:04 +000069#include "pub_core_dispatch.h"
philippe98486902014-08-19 22:46:44 +000070#include "pub_core_errormgr.h" // For VG_(get_n_errs_found)()
71#include "pub_core_gdbserver.h" // for VG_(gdbserver)/VG_(gdbserver_activity)
njn97405b22005-06-02 03:39:33 +000072#include "pub_core_libcbase.h"
njn132bfcc2005-06-04 19:16:06 +000073#include "pub_core_libcassert.h"
njn36a20fa2005-06-03 03:08:39 +000074#include "pub_core_libcprint.h"
njnf39e9a32005-06-12 02:43:17 +000075#include "pub_core_libcproc.h"
njnde62cbf2005-06-10 22:08:14 +000076#include "pub_core_libcsignal.h"
njnf76d27a2009-05-28 01:53:07 +000077#if defined(VGO_darwin)
78#include "pub_core_mach.h"
79#endif
njnf536bbb2005-06-13 04:21:38 +000080#include "pub_core_machine.h"
njnaf1d7df2005-06-11 01:31:52 +000081#include "pub_core_mallocfree.h"
njn20242342005-05-16 23:31:24 +000082#include "pub_core_options.h"
njn717cde52005-05-10 02:47:21 +000083#include "pub_core_replacemalloc.h"
sewardj17c5e2e2012-12-28 09:12:14 +000084#include "pub_core_sbprofile.h"
njn0c246472005-05-31 01:00:08 +000085#include "pub_core_signals.h"
njn945ed2e2005-06-24 03:28:30 +000086#include "pub_core_stacks.h"
njnf4c50162005-06-20 14:18:12 +000087#include "pub_core_stacktrace.h" // For VG_(get_and_pp_StackTrace)()
njn9abd6082005-06-17 21:31:45 +000088#include "pub_core_syscall.h"
njnc1b01812005-06-17 22:19:06 +000089#include "pub_core_syswrap.h"
njn43b9a8a2005-05-10 04:37:01 +000090#include "pub_core_tooliface.h"
njnf4c50162005-06-20 14:18:12 +000091#include "pub_core_translate.h" // For VG_(translate)()
njn8bddf582005-05-13 23:40:55 +000092#include "pub_core_transtab.h"
sewardjc8259b82009-04-22 22:42:10 +000093#include "pub_core_debuginfo.h" // VG_(di_notify_pdb_debuginfo)
bart78bfc712011-12-08 16:14:59 +000094#include "priv_sched-lock.h"
sewardjf54342a2006-10-17 01:51:24 +000095#include "pub_core_scheduler.h" // self
tomd2645142009-10-29 09:27:11 +000096#include "pub_core_redir.h"
florian639e1f82012-09-30 20:30:40 +000097#include "libvex_emnote.h" // VexEmNote
sewardje663cb92002-04-12 10:26:32 +000098
sewardj63fed7f2006-01-17 02:02:47 +000099
sewardje663cb92002-04-12 10:26:32 +0000100/* ---------------------------------------------------------------------
101 Types and globals for the scheduler.
102 ------------------------------------------------------------------ */
103
njnc7561b92005-06-19 01:24:32 +0000104/* ThreadId and ThreadState are defined elsewhere*/
sewardje663cb92002-04-12 10:26:32 +0000105
njn14319cc2005-03-13 06:26:22 +0000106/* Defines the thread-scheduling timeslice, in terms of the number of
107 basic blocks we attempt to run each thread for. Smaller values
108 give finer interleaving but much increased scheduling overheads. */
sewardjea3a99f2006-05-07 14:37:03 +0000109#define SCHEDULING_QUANTUM 100000
njn14319cc2005-03-13 06:26:22 +0000110
sewardj0ec07f32006-01-12 12:32:32 +0000111/* If False, a fault is Valgrind-internal (ie, a bug) */
112Bool VG_(in_generated_code) = False;
njn25e49d8e72002-09-23 09:36:25 +0000113
njn394213a2005-06-19 18:38:24 +0000114/* 64-bit counter for the number of basic blocks done. */
115static ULong bbs_done = 0;
116
sewardj3b290482011-05-06 21:02:55 +0000117/* Counter to see if vgdb activity is to be verified.
118 When nr of bbs done reaches vgdb_next_poll, scheduler will
119 poll for gdbserver activity. VG_(force_vgdb_poll) and
120 VG_(disable_vgdb_poll) allows the valgrind core (e.g. m_gdbserver)
121 to control when the next poll will be done. */
122static ULong vgdb_next_poll;
123
sewardje663cb92002-04-12 10:26:32 +0000124/* Forwards */
sewardjb5f6f512005-03-10 23:59:00 +0000125static void do_client_request ( ThreadId tid );
126static void scheduler_sanity ( ThreadId tid );
127static void mostly_clear_thread_record ( ThreadId tid );
sewardjd140e442002-05-29 01:21:19 +0000128
nethercote844e7122004-08-02 15:27:22 +0000129/* Stats. */
njn0fd92f42005-10-06 03:32:42 +0000130static ULong n_scheduling_events_MINOR = 0;
131static ULong n_scheduling_events_MAJOR = 0;
nethercote844e7122004-08-02 15:27:22 +0000132
sewardjbba6f312012-04-21 23:05:57 +0000133/* Stats: number of XIndirs, and number that missed in the fast
134 cache. */
135static ULong stats__n_xindirs = 0;
136static ULong stats__n_xindir_misses = 0;
137
138/* And 32-bit temp bins for the above, so that 32-bit platforms don't
139 have to do 64 bit incs on the hot path through
140 VG_(cp_disp_xindir). */
141/*global*/ UInt VG_(stats__n_xindirs_32) = 0;
142/*global*/ UInt VG_(stats__n_xindir_misses_32) = 0;
sewardj291849f2012-04-20 23:58:55 +0000143
njn6676d5b2005-06-19 18:49:19 +0000144/* Sanity checking counts. */
145static UInt sanity_fast_count = 0;
146static UInt sanity_slow_count = 0;
sewardjb5f6f512005-03-10 23:59:00 +0000147
nethercote844e7122004-08-02 15:27:22 +0000148void VG_(print_scheduler_stats)(void)
149{
150 VG_(message)(Vg_DebugMsg,
sewardj291849f2012-04-20 23:58:55 +0000151 "scheduler: %'llu event checks.\n", bbs_done );
152 VG_(message)(Vg_DebugMsg,
153 "scheduler: %'llu indir transfers, %'llu misses (1 in %llu)\n",
sewardjbba6f312012-04-21 23:05:57 +0000154 stats__n_xindirs, stats__n_xindir_misses,
155 stats__n_xindirs / (stats__n_xindir_misses
156 ? stats__n_xindir_misses : 1));
njn394213a2005-06-19 18:38:24 +0000157 VG_(message)(Vg_DebugMsg,
sewardj738856f2009-07-15 14:48:32 +0000158 "scheduler: %'llu/%'llu major/minor sched events.\n",
nethercote844e7122004-08-02 15:27:22 +0000159 n_scheduling_events_MAJOR, n_scheduling_events_MINOR);
njn6676d5b2005-06-19 18:49:19 +0000160 VG_(message)(Vg_DebugMsg,
florianc6e5d762015-08-05 22:27:24 +0000161 " sanity: %u cheap, %u expensive checks.\n",
njn6676d5b2005-06-19 18:49:19 +0000162 sanity_fast_count, sanity_slow_count );
nethercote844e7122004-08-02 15:27:22 +0000163}
164
bart78bfc712011-12-08 16:14:59 +0000165/*
166 * Mutual exclusion object used to serialize threads.
167 */
168static struct sched_lock *the_BigLock;
sewardjb5f6f512005-03-10 23:59:00 +0000169
170
sewardje663cb92002-04-12 10:26:32 +0000171/* ---------------------------------------------------------------------
172 Helper functions for the scheduler.
173 ------------------------------------------------------------------ */
174
sewardje663cb92002-04-12 10:26:32 +0000175static
floriandbb35842012-10-27 18:39:11 +0000176void print_sched_event ( ThreadId tid, const HChar* what )
sewardje663cb92002-04-12 10:26:32 +0000177{
florianc6e5d762015-08-05 22:27:24 +0000178 VG_(message)(Vg_DebugMsg, " SCHED[%u]: %s\n", tid, what );
sewardj8937c812002-04-12 20:12:20 +0000179}
180
sewardj17c5e2e2012-12-28 09:12:14 +0000181/* For showing SB profiles, if the user asks to see them. */
sewardjb0473e92011-06-07 22:54:32 +0000182static
sewardj17c5e2e2012-12-28 09:12:14 +0000183void maybe_show_sb_profile ( void )
sewardjb0473e92011-06-07 22:54:32 +0000184{
sewardj17c5e2e2012-12-28 09:12:14 +0000185 /* DO NOT MAKE NON-STATIC */
186 static ULong bbs_done_lastcheck = 0;
187 /* */
188 vg_assert(VG_(clo_profyle_interval) > 0);
189 Long delta = (Long)(bbs_done - bbs_done_lastcheck);
sewardjb0473e92011-06-07 22:54:32 +0000190 vg_assert(delta >= 0);
sewardj17c5e2e2012-12-28 09:12:14 +0000191 if ((ULong)delta >= VG_(clo_profyle_interval)) {
sewardjb0473e92011-06-07 22:54:32 +0000192 bbs_done_lastcheck = bbs_done;
sewardj17c5e2e2012-12-28 09:12:14 +0000193 VG_(get_and_show_SB_profile)(bbs_done);
sewardjb0473e92011-06-07 22:54:32 +0000194 }
195}
196
sewardj8937c812002-04-12 20:12:20 +0000197static
floriancd19e992012-11-03 19:32:28 +0000198const HChar* name_of_sched_event ( UInt event )
sewardje663cb92002-04-12 10:26:32 +0000199{
200 switch (event) {
sewardj1146ae62014-05-04 10:54:08 +0000201 case VEX_TRC_JMP_INVALICACHE: return "INVALICACHE";
202 case VEX_TRC_JMP_FLUSHDCACHE: return "FLUSHDCACHE";
philippe6d6ddbc2012-05-17 14:31:13 +0000203 case VEX_TRC_JMP_NOREDIR: return "NOREDIR";
dejanj24f0c3a2014-02-19 11:57:22 +0000204 case VEX_TRC_JMP_SIGILL: return "SIGILL";
philippe6d6ddbc2012-05-17 14:31:13 +0000205 case VEX_TRC_JMP_SIGTRAP: return "SIGTRAP";
206 case VEX_TRC_JMP_SIGSEGV: return "SIGSEGV";
207 case VEX_TRC_JMP_SIGBUS: return "SIGBUS";
petarj80e5c172012-10-19 14:45:17 +0000208 case VEX_TRC_JMP_SIGFPE_INTOVF:
209 case VEX_TRC_JMP_SIGFPE_INTDIV: return "SIGFPE";
philippe6d6ddbc2012-05-17 14:31:13 +0000210 case VEX_TRC_JMP_EMWARN: return "EMWARN";
211 case VEX_TRC_JMP_EMFAIL: return "EMFAIL";
212 case VEX_TRC_JMP_CLIENTREQ: return "CLIENTREQ";
213 case VEX_TRC_JMP_YIELD: return "YIELD";
214 case VEX_TRC_JMP_NODECODE: return "NODECODE";
215 case VEX_TRC_JMP_MAPFAIL: return "MAPFAIL";
216 case VEX_TRC_JMP_SYS_SYSCALL: return "SYSCALL";
217 case VEX_TRC_JMP_SYS_INT32: return "INT32";
218 case VEX_TRC_JMP_SYS_INT128: return "INT128";
219 case VEX_TRC_JMP_SYS_INT129: return "INT129";
220 case VEX_TRC_JMP_SYS_INT130: return "INT130";
sewardj8eb8bab2015-07-21 14:44:28 +0000221 case VEX_TRC_JMP_SYS_INT145: return "INT145";
222 case VEX_TRC_JMP_SYS_INT210: return "INT210";
philippe6d6ddbc2012-05-17 14:31:13 +0000223 case VEX_TRC_JMP_SYS_SYSENTER: return "SYSENTER";
224 case VEX_TRC_JMP_BORING: return "VEX_BORING";
225
226 case VG_TRC_BORING: return "VG_BORING";
227 case VG_TRC_INNER_FASTMISS: return "FASTMISS";
228 case VG_TRC_INNER_COUNTERZERO: return "COUNTERZERO";
229 case VG_TRC_FAULT_SIGNAL: return "FAULTSIGNAL";
230 case VG_TRC_INVARIANT_FAILED: return "INVFAILED";
231 case VG_TRC_CHAIN_ME_TO_SLOW_EP: return "CHAIN_ME_SLOW";
232 case VG_TRC_CHAIN_ME_TO_FAST_EP: return "CHAIN_ME_FAST";
233 default: return "??UNKNOWN??";
sewardje663cb92002-04-12 10:26:32 +0000234 }
235}
236
sewardje663cb92002-04-12 10:26:32 +0000237/* Allocate a completely empty ThreadState record. */
sewardjb5f6f512005-03-10 23:59:00 +0000238ThreadId VG_(alloc_ThreadState) ( void )
sewardje663cb92002-04-12 10:26:32 +0000239{
240 Int i;
sewardj6072c362002-04-19 14:40:57 +0000241 for (i = 1; i < VG_N_THREADS; i++) {
sewardjb5f6f512005-03-10 23:59:00 +0000242 if (VG_(threads)[i].status == VgTs_Empty) {
243 VG_(threads)[i].status = VgTs_Init;
244 VG_(threads)[i].exitreason = VgSrc_None;
florianb8911212013-09-18 14:00:10 +0000245 if (VG_(threads)[i].thread_name)
florian77eb20b2014-09-11 21:19:17 +0000246 VG_(free)(VG_(threads)[i].thread_name);
florianb8911212013-09-18 14:00:10 +0000247 VG_(threads)[i].thread_name = NULL;
sewardje663cb92002-04-12 10:26:32 +0000248 return i;
sewardjb5f6f512005-03-10 23:59:00 +0000249 }
sewardje663cb92002-04-12 10:26:32 +0000250 }
florian1e802b62015-02-13 19:08:26 +0000251 VG_(printf)("Use --max-threads=INT to specify a larger number of threads\n"
252 "and rerun valgrind\n");
253 VG_(core_panic)("Max number of threads is too low");
sewardje663cb92002-04-12 10:26:32 +0000254 /*NOTREACHED*/
255}
256
sewardjb5f6f512005-03-10 23:59:00 +0000257/*
sewardjad0a3a82006-12-17 18:58:55 +0000258 Mark a thread as Runnable. This will block until the_BigLock is
sewardjb5f6f512005-03-10 23:59:00 +0000259 available, so that we get exclusive access to all the shared
sewardjad0a3a82006-12-17 18:58:55 +0000260 structures and the CPU. Up until we get the_BigLock, we must not
sewardjb5f6f512005-03-10 23:59:00 +0000261 touch any shared state.
262
263 When this returns, we'll actually be running.
264 */
floriandbb35842012-10-27 18:39:11 +0000265void VG_(acquire_BigLock)(ThreadId tid, const HChar* who)
sewardjb5f6f512005-03-10 23:59:00 +0000266{
sewardjf54342a2006-10-17 01:51:24 +0000267 ThreadState *tst;
268
269#if 0
270 if (VG_(clo_trace_sched)) {
florian7b7d5942014-12-19 20:29:22 +0000271 HChar buf[VG_(strlen)(who) + 30];
sewardjf54342a2006-10-17 01:51:24 +0000272 VG_(sprintf)(buf, "waiting for lock (%s)", who);
273 print_sched_event(tid, buf);
274 }
275#endif
276
sewardjad0a3a82006-12-17 18:58:55 +0000277 /* First, acquire the_BigLock. We can't do anything else safely
278 prior to this point. Even doing debug printing prior to this
279 point is, technically, wrong. */
bart78bfc712011-12-08 16:14:59 +0000280 VG_(acquire_BigLock_LL)(NULL);
sewardjf54342a2006-10-17 01:51:24 +0000281
282 tst = VG_(get_ThreadState)(tid);
sewardjb5f6f512005-03-10 23:59:00 +0000283
284 vg_assert(tst->status != VgTs_Runnable);
285
286 tst->status = VgTs_Runnable;
sewardjf54342a2006-10-17 01:51:24 +0000287
njnc7561b92005-06-19 01:24:32 +0000288 if (VG_(running_tid) != VG_INVALID_THREADID)
florianc6e5d762015-08-05 22:27:24 +0000289 VG_(printf)("tid %u found %u running\n", tid, VG_(running_tid));
njnc7561b92005-06-19 01:24:32 +0000290 vg_assert(VG_(running_tid) == VG_INVALID_THREADID);
291 VG_(running_tid) = tid;
sewardjb5f6f512005-03-10 23:59:00 +0000292
sewardj7cf4e6b2008-05-01 20:24:26 +0000293 { Addr gsp = VG_(get_SP)(tid);
philipped5fb89d2013-01-13 13:59:17 +0000294 if (NULL != VG_(tdict).track_new_mem_stack_w_ECU)
295 VG_(unknown_SP_update_w_ECU)(gsp, gsp, 0/*unknown origin*/);
296 else
297 VG_(unknown_SP_update)(gsp, gsp);
sewardj7cf4e6b2008-05-01 20:24:26 +0000298 }
tome0008d62005-11-10 15:02:42 +0000299
sewardjf54342a2006-10-17 01:51:24 +0000300 if (VG_(clo_trace_sched)) {
florian7b7d5942014-12-19 20:29:22 +0000301 HChar buf[VG_(strlen)(who) + 30];
sewardjf54342a2006-10-17 01:51:24 +0000302 VG_(sprintf)(buf, " acquired lock (%s)", who);
303 print_sched_event(tid, buf);
304 }
sewardjb5f6f512005-03-10 23:59:00 +0000305}
306
sewardjb5f6f512005-03-10 23:59:00 +0000307/*
308 Set a thread into a sleeping state, and give up exclusive access to
309 the CPU. On return, the thread must be prepared to block until it
310 is ready to run again (generally this means blocking in a syscall,
311 but it may mean that we remain in a Runnable state and we're just
312 yielding the CPU to another thread).
313 */
floriandbb35842012-10-27 18:39:11 +0000314void VG_(release_BigLock)(ThreadId tid, ThreadStatus sleepstate,
315 const HChar* who)
sewardjb5f6f512005-03-10 23:59:00 +0000316{
317 ThreadState *tst = VG_(get_ThreadState)(tid);
318
319 vg_assert(tst->status == VgTs_Runnable);
320
321 vg_assert(sleepstate == VgTs_WaitSys ||
322 sleepstate == VgTs_Yielding);
323
324 tst->status = sleepstate;
325
njnc7561b92005-06-19 01:24:32 +0000326 vg_assert(VG_(running_tid) == tid);
327 VG_(running_tid) = VG_INVALID_THREADID;
sewardjb5f6f512005-03-10 23:59:00 +0000328
sewardjf54342a2006-10-17 01:51:24 +0000329 if (VG_(clo_trace_sched)) {
florian7b7d5942014-12-19 20:29:22 +0000330 const HChar *status = VG_(name_of_ThreadStatus)(sleepstate);
331 HChar buf[VG_(strlen)(who) + VG_(strlen)(status) + 30];
332 VG_(sprintf)(buf, "releasing lock (%s) -> %s", who, status);
sewardjf54342a2006-10-17 01:51:24 +0000333 print_sched_event(tid, buf);
334 }
335
sewardjad0a3a82006-12-17 18:58:55 +0000336 /* Release the_BigLock; this will reschedule any runnable
sewardjb5f6f512005-03-10 23:59:00 +0000337 thread. */
bart78bfc712011-12-08 16:14:59 +0000338 VG_(release_BigLock_LL)(NULL);
339}
340
341static void init_BigLock(void)
342{
343 vg_assert(!the_BigLock);
344 the_BigLock = ML_(create_sched_lock)();
345}
346
347static void deinit_BigLock(void)
348{
349 ML_(destroy_sched_lock)(the_BigLock);
350 the_BigLock = NULL;
nethercote75d26242004-08-01 22:59:18 +0000351}
352
njnf76d27a2009-05-28 01:53:07 +0000353/* See pub_core_scheduler.h for description */
floriandbb35842012-10-27 18:39:11 +0000354void VG_(acquire_BigLock_LL) ( const HChar* who )
njnf76d27a2009-05-28 01:53:07 +0000355{
bart78bfc712011-12-08 16:14:59 +0000356 ML_(acquire_sched_lock)(the_BigLock);
njnf76d27a2009-05-28 01:53:07 +0000357}
358
359/* See pub_core_scheduler.h for description */
floriandbb35842012-10-27 18:39:11 +0000360void VG_(release_BigLock_LL) ( const HChar* who )
njnf76d27a2009-05-28 01:53:07 +0000361{
bart78bfc712011-12-08 16:14:59 +0000362 ML_(release_sched_lock)(the_BigLock);
njnf76d27a2009-05-28 01:53:07 +0000363}
364
bart9a2b80d2012-03-25 17:51:59 +0000365Bool VG_(owns_BigLock_LL) ( ThreadId tid )
366{
367 return (ML_(get_sched_lock_owner)(the_BigLock)
368 == VG_(threads)[tid].os_state.lwpid);
369}
370
njnf76d27a2009-05-28 01:53:07 +0000371
sewardjb5f6f512005-03-10 23:59:00 +0000372/* Clear out the ThreadState and release the semaphore. Leaves the
373 ThreadState in VgTs_Zombie state, so that it doesn't get
374 reallocated until the caller is really ready. */
375void VG_(exit_thread)(ThreadId tid)
376{
377 vg_assert(VG_(is_valid_tid)(tid));
378 vg_assert(VG_(is_running_thread)(tid));
379 vg_assert(VG_(is_exiting)(tid));
380
sewardjb5f6f512005-03-10 23:59:00 +0000381 mostly_clear_thread_record(tid);
njnc7561b92005-06-19 01:24:32 +0000382 VG_(running_tid) = VG_INVALID_THREADID;
sewardjb5f6f512005-03-10 23:59:00 +0000383
384 /* There should still be a valid exitreason for this thread */
385 vg_assert(VG_(threads)[tid].exitreason != VgSrc_None);
386
sewardjf54342a2006-10-17 01:51:24 +0000387 if (VG_(clo_trace_sched))
388 print_sched_event(tid, "release lock in VG_(exit_thread)");
389
bart78bfc712011-12-08 16:14:59 +0000390 VG_(release_BigLock_LL)(NULL);
sewardjb5f6f512005-03-10 23:59:00 +0000391}
392
sewardjf54342a2006-10-17 01:51:24 +0000393/* If 'tid' is blocked in a syscall, send it SIGVGKILL so as to get it
394 out of the syscall and onto doing the next thing, whatever that is.
395 If it isn't blocked in a syscall, has no effect on the thread. */
396void VG_(get_thread_out_of_syscall)(ThreadId tid)
sewardjb5f6f512005-03-10 23:59:00 +0000397{
398 vg_assert(VG_(is_valid_tid)(tid));
399 vg_assert(!VG_(is_running_thread)(tid));
sewardjb5f6f512005-03-10 23:59:00 +0000400
401 if (VG_(threads)[tid].status == VgTs_WaitSys) {
njnf76d27a2009-05-28 01:53:07 +0000402 if (VG_(clo_trace_signals)) {
sewardjf54342a2006-10-17 01:51:24 +0000403 VG_(message)(Vg_DebugMsg,
florianc6e5d762015-08-05 22:27:24 +0000404 "get_thread_out_of_syscall zaps tid %u lwp %d\n",
sewardjb5f6f512005-03-10 23:59:00 +0000405 tid, VG_(threads)[tid].os_state.lwpid);
njnf76d27a2009-05-28 01:53:07 +0000406 }
407# if defined(VGO_darwin)
408 {
409 // GrP fixme use mach primitives on darwin?
410 // GrP fixme thread_abort_safely?
411 // GrP fixme race for thread with WaitSys set but not in syscall yet?
412 extern kern_return_t thread_abort(mach_port_t);
413 thread_abort(VG_(threads)[tid].os_state.lwpid);
414 }
415# else
416 {
417 __attribute__((unused))
418 Int r = VG_(tkill)(VG_(threads)[tid].os_state.lwpid, VG_SIGVGKILL);
419 /* JRS 2009-Mar-20: should we assert for r==0 (tkill succeeded)?
420 I'm really not sure. Here's a race scenario which argues
421 that we shoudn't; but equally I'm not sure the scenario is
422 even possible, because of constraints caused by the question
423 of who holds the BigLock when.
424
425 Target thread tid does sys_read on a socket and blocks. This
426 function gets called, and we observe correctly that tid's
427 status is WaitSys but then for whatever reason this function
428 goes very slowly for a while. Then data arrives from
429 wherever, tid's sys_read returns, tid exits. Then we do
430 tkill on tid, but tid no longer exists; tkill returns an
431 error code and the assert fails. */
432 /* vg_assert(r == 0); */
433 }
434# endif
sewardjb5f6f512005-03-10 23:59:00 +0000435 }
436}
437
438/*
439 Yield the CPU for a short time to let some other thread run.
440 */
441void VG_(vg_yield)(void)
442{
njnc7561b92005-06-19 01:24:32 +0000443 ThreadId tid = VG_(running_tid);
sewardjb5f6f512005-03-10 23:59:00 +0000444
445 vg_assert(tid != VG_INVALID_THREADID);
446 vg_assert(VG_(threads)[tid].os_state.lwpid == VG_(gettid)());
447
sewardjad0a3a82006-12-17 18:58:55 +0000448 VG_(release_BigLock)(tid, VgTs_Yielding, "VG_(vg_yield)");
sewardjb5f6f512005-03-10 23:59:00 +0000449
450 /*
451 Tell the kernel we're yielding.
452 */
sewardj8eb8bab2015-07-21 14:44:28 +0000453# if defined(VGO_linux) || defined(VGO_darwin)
sewardjf54342a2006-10-17 01:51:24 +0000454 VG_(do_syscall0)(__NR_sched_yield);
sewardj8eb8bab2015-07-21 14:44:28 +0000455# elif defined(VGO_solaris)
456 VG_(do_syscall0)(__NR_yield);
457# else
458# error Unknown OS
459# endif
sewardjb5f6f512005-03-10 23:59:00 +0000460
sewardjad0a3a82006-12-17 18:58:55 +0000461 VG_(acquire_BigLock)(tid, "VG_(vg_yield)");
sewardjb5f6f512005-03-10 23:59:00 +0000462}
463
464
sewardj0ec07f32006-01-12 12:32:32 +0000465/* Set the standard set of blocked signals, used whenever we're not
njn9fc31122005-05-11 18:48:33 +0000466 running a client syscall. */
njn1dcee092009-02-24 03:07:37 +0000467static void block_signals(void)
njn9fc31122005-05-11 18:48:33 +0000468{
469 vki_sigset_t mask;
470
471 VG_(sigfillset)(&mask);
472
473 /* Don't block these because they're synchronous */
474 VG_(sigdelset)(&mask, VKI_SIGSEGV);
475 VG_(sigdelset)(&mask, VKI_SIGBUS);
476 VG_(sigdelset)(&mask, VKI_SIGFPE);
477 VG_(sigdelset)(&mask, VKI_SIGILL);
478 VG_(sigdelset)(&mask, VKI_SIGTRAP);
479
480 /* Can't block these anyway */
481 VG_(sigdelset)(&mask, VKI_SIGSTOP);
482 VG_(sigdelset)(&mask, VKI_SIGKILL);
483
njn9fc31122005-05-11 18:48:33 +0000484 VG_(sigprocmask)(VKI_SIG_SETMASK, &mask, NULL);
485}
486
njn8aa35852005-06-10 22:59:56 +0000487static void os_state_clear(ThreadState *tst)
488{
sewardj45f4e7c2005-09-27 19:20:21 +0000489 tst->os_state.lwpid = 0;
njn8aa35852005-06-10 22:59:56 +0000490 tst->os_state.threadgroup = 0;
njnf76d27a2009-05-28 01:53:07 +0000491# if defined(VGO_linux)
492 /* no other fields to clear */
njnf76d27a2009-05-28 01:53:07 +0000493# elif defined(VGO_darwin)
494 tst->os_state.post_mach_trap_fn = NULL;
495 tst->os_state.pthread = 0;
496 tst->os_state.func_arg = 0;
497 VG_(memset)(&tst->os_state.child_go, 0, sizeof(tst->os_state.child_go));
498 VG_(memset)(&tst->os_state.child_done, 0, sizeof(tst->os_state.child_done));
499 tst->os_state.wq_jmpbuf_valid = False;
500 tst->os_state.remote_port = 0;
501 tst->os_state.msgh_id = 0;
502 VG_(memset)(&tst->os_state.mach_args, 0, sizeof(tst->os_state.mach_args));
sewardj8eb8bab2015-07-21 14:44:28 +0000503# elif defined(VGO_solaris)
504# if defined(VGP_x86_solaris)
505 tst->os_state.thrptr = 0;
506# endif
507 tst->os_state.stk_id = (UWord)-1;
508 tst->os_state.ustack = NULL;
509 tst->os_state.in_door_return = False;
510 tst->os_state.door_return_procedure = 0;
511 tst->os_state.oldcontext = NULL;
512 tst->os_state.schedctl_data = 0;
513 tst->os_state.daemon_thread = False;
njnf76d27a2009-05-28 01:53:07 +0000514# else
515# error "Unknown OS"
sewardjf54342a2006-10-17 01:51:24 +0000516# endif
njn8aa35852005-06-10 22:59:56 +0000517}
518
519static void os_state_init(ThreadState *tst)
520{
sewardj45f4e7c2005-09-27 19:20:21 +0000521 tst->os_state.valgrind_stack_base = 0;
522 tst->os_state.valgrind_stack_init_SP = 0;
njn8aa35852005-06-10 22:59:56 +0000523 os_state_clear(tst);
524}
525
sewardj20917d82002-05-28 01:36:45 +0000526static
527void mostly_clear_thread_record ( ThreadId tid )
528{
sewardjb5f6f512005-03-10 23:59:00 +0000529 vki_sigset_t savedmask;
530
sewardj20917d82002-05-28 01:36:45 +0000531 vg_assert(tid >= 0 && tid < VG_N_THREADS);
njnaf839f52005-06-23 03:27:57 +0000532 VG_(cleanup_thread)(&VG_(threads)[tid].arch);
sewardjb5f6f512005-03-10 23:59:00 +0000533 VG_(threads)[tid].tid = tid;
534
535 /* Leave the thread in Zombie, so that it doesn't get reallocated
536 until the caller is finally done with the thread stack. */
537 VG_(threads)[tid].status = VgTs_Zombie;
538
nethercote73b526f2004-10-31 18:48:21 +0000539 VG_(sigemptyset)(&VG_(threads)[tid].sig_mask);
sewardjb5f6f512005-03-10 23:59:00 +0000540 VG_(sigemptyset)(&VG_(threads)[tid].tmp_sig_mask);
jsgf855d93d2003-10-13 22:26:55 +0000541
njn8aa35852005-06-10 22:59:56 +0000542 os_state_clear(&VG_(threads)[tid]);
fitzhardinge28428592004-03-16 22:07:12 +0000543
544 /* start with no altstack */
545 VG_(threads)[tid].altstack.ss_sp = (void *)0xdeadbeef;
546 VG_(threads)[tid].altstack.ss_size = 0;
547 VG_(threads)[tid].altstack.ss_flags = VKI_SS_DISABLE;
sewardjb5f6f512005-03-10 23:59:00 +0000548
njn444eba12005-05-12 03:47:31 +0000549 VG_(clear_out_queued_signals)(tid, &savedmask);
sewardjb5f6f512005-03-10 23:59:00 +0000550
551 VG_(threads)[tid].sched_jmpbuf_valid = False;
sewardj20917d82002-05-28 01:36:45 +0000552}
553
njn3f8c4372005-03-13 04:43:10 +0000554/*
sewardj0ec07f32006-01-12 12:32:32 +0000555 Called in the child after fork. If the parent has multiple
556 threads, then we've inherited a VG_(threads) array describing them,
557 but only the thread which called fork() is actually alive in the
558 child. This functions needs to clean up all those other thread
559 structures.
njn3f8c4372005-03-13 04:43:10 +0000560
561 Whichever tid in the parent which called fork() becomes the
562 master_tid in the child. That's because the only living slot in
563 VG_(threads) in the child after fork is VG_(threads)[tid], and it
564 would be too hard to try to re-number the thread and relocate the
565 thread state down to VG_(threads)[1].
566
sewardjad0a3a82006-12-17 18:58:55 +0000567 This function also needs to reinitialize the_BigLock, since
568 otherwise we may end up sharing its state with the parent, which
569 would be deeply confusing.
njn3f8c4372005-03-13 04:43:10 +0000570*/
sewardjb5f6f512005-03-10 23:59:00 +0000571static void sched_fork_cleanup(ThreadId me)
572{
573 ThreadId tid;
njnc7561b92005-06-19 01:24:32 +0000574 vg_assert(VG_(running_tid) == me);
sewardjb5f6f512005-03-10 23:59:00 +0000575
njnf76d27a2009-05-28 01:53:07 +0000576# if defined(VGO_darwin)
577 // GrP fixme hack reset Mach ports
578 VG_(mach_init)();
579# endif
580
sewardjb5f6f512005-03-10 23:59:00 +0000581 VG_(threads)[me].os_state.lwpid = VG_(gettid)();
582 VG_(threads)[me].os_state.threadgroup = VG_(getpid)();
583
584 /* clear out all the unused thread slots */
585 for (tid = 1; tid < VG_N_THREADS; tid++) {
njn3f8c4372005-03-13 04:43:10 +0000586 if (tid != me) {
587 mostly_clear_thread_record(tid);
sewardjb5f6f512005-03-10 23:59:00 +0000588 VG_(threads)[tid].status = VgTs_Empty;
sewardja8d8e232005-06-07 20:04:56 +0000589 VG_(clear_syscallInfo)(tid);
njn3f8c4372005-03-13 04:43:10 +0000590 }
sewardjb5f6f512005-03-10 23:59:00 +0000591 }
592
593 /* re-init and take the sema */
bart78bfc712011-12-08 16:14:59 +0000594 deinit_BigLock();
595 init_BigLock();
596 VG_(acquire_BigLock_LL)(NULL);
sewardjb5f6f512005-03-10 23:59:00 +0000597}
sewardj20917d82002-05-28 01:36:45 +0000598
jsgf855d93d2003-10-13 22:26:55 +0000599
sewardjde764e82007-11-09 23:13:22 +0000600/* First phase of initialisation of the scheduler. Initialise the
601 bigLock, zeroise the VG_(threads) structure and decide on the
602 ThreadId of the root thread.
sewardje663cb92002-04-12 10:26:32 +0000603*/
sewardjde764e82007-11-09 23:13:22 +0000604ThreadId VG_(scheduler_init_phase1) ( void )
sewardje663cb92002-04-12 10:26:32 +0000605{
thughesc37184f2004-09-11 14:16:57 +0000606 Int i;
sewardje663cb92002-04-12 10:26:32 +0000607 ThreadId tid_main;
608
sewardjde764e82007-11-09 23:13:22 +0000609 VG_(debugLog)(1,"sched","sched_init_phase1\n");
sewardj45f4e7c2005-09-27 19:20:21 +0000610
bart78bfc712011-12-08 16:14:59 +0000611 if (VG_(clo_fair_sched) != disable_fair_sched
612 && !ML_(set_sched_lock_impl)(sched_lock_ticket)
613 && VG_(clo_fair_sched) == enable_fair_sched)
614 {
615 VG_(printf)("Error: fair scheduling is not supported on this system.\n");
616 VG_(exit)(1);
617 }
618
619 if (VG_(clo_verbosity) > 1) {
620 VG_(message)(Vg_DebugMsg,
621 "Scheduler: using %s scheduler lock implementation.\n",
622 ML_(get_sched_lock_name)());
623 }
624
625 init_BigLock();
sewardjb5f6f512005-03-10 23:59:00 +0000626
sewardj6072c362002-04-19 14:40:57 +0000627 for (i = 0 /* NB; not 1 */; i < VG_N_THREADS; i++) {
sewardjc793fd32005-05-31 17:24:49 +0000628 /* Paranoia .. completely zero it out. */
629 VG_(memset)( & VG_(threads)[i], 0, sizeof( VG_(threads)[i] ) );
630
631 VG_(threads)[i].sig_queue = NULL;
sewardjb5f6f512005-03-10 23:59:00 +0000632
njn8aa35852005-06-10 22:59:56 +0000633 os_state_init(&VG_(threads)[i]);
sewardj20917d82002-05-28 01:36:45 +0000634 mostly_clear_thread_record(i);
sewardjb5f6f512005-03-10 23:59:00 +0000635
njn50ba34e2005-04-04 02:41:42 +0000636 VG_(threads)[i].status = VgTs_Empty;
637 VG_(threads)[i].client_stack_szB = 0;
philippe38a74d22014-08-29 22:53:19 +0000638 VG_(threads)[i].client_stack_highest_byte = (Addr)NULL;
sewardjdc873c02011-07-24 16:02:33 +0000639 VG_(threads)[i].err_disablement_level = 0;
florian49789512013-09-16 17:08:50 +0000640 VG_(threads)[i].thread_name = NULL;
sewardje663cb92002-04-12 10:26:32 +0000641 }
642
sewardjb5f6f512005-03-10 23:59:00 +0000643 tid_main = VG_(alloc_ThreadState)();
sewardjde764e82007-11-09 23:13:22 +0000644
sewardj95d86c02007-12-18 01:49:23 +0000645 /* Bleh. Unfortunately there are various places in the system that
646 assume that the main thread has a ThreadId of 1.
647 - Helgrind (possibly)
648 - stack overflow message in default_action() in m_signals.c
649 - definitely a lot more places
650 */
651 vg_assert(tid_main == 1);
652
sewardjde764e82007-11-09 23:13:22 +0000653 return tid_main;
654}
655
656
657/* Second phase of initialisation of the scheduler. Given the root
658 ThreadId computed by first phase of initialisation, fill in stack
659 details and acquire bigLock. Initialise the scheduler. This is
660 called at startup. The caller subsequently initialises the guest
661 state components of this main thread.
662*/
663void VG_(scheduler_init_phase2) ( ThreadId tid_main,
664 Addr clstack_end,
665 SizeT clstack_size )
666{
florianc6e5d762015-08-05 22:27:24 +0000667 VG_(debugLog)(1,"sched","sched_init_phase2: tid_main=%u, "
668 "cls_end=0x%lx, cls_sz=%lu\n",
sewardjde764e82007-11-09 23:13:22 +0000669 tid_main, clstack_end, clstack_size);
670
671 vg_assert(VG_IS_PAGE_ALIGNED(clstack_end+1));
672 vg_assert(VG_IS_PAGE_ALIGNED(clstack_size));
sewardj5f07b662002-04-23 16:52:51 +0000673
philippe38a74d22014-08-29 22:53:19 +0000674 VG_(threads)[tid_main].client_stack_highest_byte
675 = clstack_end;
sewardj45f4e7c2005-09-27 19:20:21 +0000676 VG_(threads)[tid_main].client_stack_szB
677 = clstack_size;
sewardjbf290b92002-05-01 02:28:01 +0000678
njne9ba34a2008-10-13 04:19:15 +0000679 VG_(atfork)(NULL, NULL, sched_fork_cleanup);
sewardje663cb92002-04-12 10:26:32 +0000680}
681
682
sewardje663cb92002-04-12 10:26:32 +0000683/* ---------------------------------------------------------------------
sewardj0ec07f32006-01-12 12:32:32 +0000684 Helpers for running translations.
685 ------------------------------------------------------------------ */
686
687/* Use gcc's built-in setjmp/longjmp. longjmp must not restore signal
sewardjadbb4912011-09-29 17:34:17 +0000688 mask state, but does need to pass "val" through. jumped must be a
689 volatile UWord. */
sewardj0ec07f32006-01-12 12:32:32 +0000690#define SCHEDSETJMP(tid, jumped, stmt) \
691 do { \
692 ThreadState * volatile _qq_tst = VG_(get_ThreadState)(tid); \
693 \
sewardj6c591e12011-04-11 16:17:51 +0000694 (jumped) = VG_MINIMAL_SETJMP(_qq_tst->sched_jmpbuf); \
sewardjadbb4912011-09-29 17:34:17 +0000695 if ((jumped) == ((UWord)0)) { \
sewardj0ec07f32006-01-12 12:32:32 +0000696 vg_assert(!_qq_tst->sched_jmpbuf_valid); \
697 _qq_tst->sched_jmpbuf_valid = True; \
698 stmt; \
699 } else if (VG_(clo_trace_sched)) \
florianc6e5d762015-08-05 22:27:24 +0000700 VG_(printf)("SCHEDSETJMP(line %d) tid %u, jumped=%lu\n", \
sewardj0ec07f32006-01-12 12:32:32 +0000701 __LINE__, tid, jumped); \
702 vg_assert(_qq_tst->sched_jmpbuf_valid); \
703 _qq_tst->sched_jmpbuf_valid = False; \
704 } while(0)
705
706
707/* Do various guest state alignment checks prior to running a thread.
708 Specifically, check that what we have matches Vex's guest state
sewardj7cf4e6b2008-05-01 20:24:26 +0000709 layout requirements. See libvex.h for details, but in short the
710 requirements are: There must be no holes in between the primary
711 guest state, its two copies, and the spill area. In short, all 4
florian5fdb28c2015-02-13 17:05:57 +0000712 areas must be aligned on the LibVEX_GUEST_STATE_ALIGN boundary and
713 be placed back-to-back without holes in between. */
florian8eebf232014-09-18 18:35:47 +0000714static void do_pre_run_checks ( volatile ThreadState* tst )
sewardj0ec07f32006-01-12 12:32:32 +0000715{
sewardj7cf4e6b2008-05-01 20:24:26 +0000716 Addr a_vex = (Addr) & tst->arch.vex;
717 Addr a_vexsh1 = (Addr) & tst->arch.vex_shadow1;
718 Addr a_vexsh2 = (Addr) & tst->arch.vex_shadow2;
719 Addr a_spill = (Addr) & tst->arch.vex_spill;
720 UInt sz_vex = (UInt) sizeof tst->arch.vex;
721 UInt sz_vexsh1 = (UInt) sizeof tst->arch.vex_shadow1;
722 UInt sz_vexsh2 = (UInt) sizeof tst->arch.vex_shadow2;
723 UInt sz_spill = (UInt) sizeof tst->arch.vex_spill;
sewardj0ec07f32006-01-12 12:32:32 +0000724
725 if (0)
florianc6e5d762015-08-05 22:27:24 +0000726 VG_(printf)("gst %p %u, sh1 %p %u, "
727 "sh2 %p %u, spill %p %u\n",
sewardj7cf4e6b2008-05-01 20:24:26 +0000728 (void*)a_vex, sz_vex,
729 (void*)a_vexsh1, sz_vexsh1,
730 (void*)a_vexsh2, sz_vexsh2,
sewardj0ec07f32006-01-12 12:32:32 +0000731 (void*)a_spill, sz_spill );
732
florian5fdb28c2015-02-13 17:05:57 +0000733 vg_assert(sz_vex % LibVEX_GUEST_STATE_ALIGN == 0);
734 vg_assert(sz_vexsh1 % LibVEX_GUEST_STATE_ALIGN == 0);
735 vg_assert(sz_vexsh2 % LibVEX_GUEST_STATE_ALIGN == 0);
736 vg_assert(sz_spill % LibVEX_GUEST_STATE_ALIGN == 0);
sewardj0ec07f32006-01-12 12:32:32 +0000737
florian5fdb28c2015-02-13 17:05:57 +0000738 vg_assert(a_vex % LibVEX_GUEST_STATE_ALIGN == 0);
739 vg_assert(a_vexsh1 % LibVEX_GUEST_STATE_ALIGN == 0);
740 vg_assert(a_vexsh2 % LibVEX_GUEST_STATE_ALIGN == 0);
741 vg_assert(a_spill % LibVEX_GUEST_STATE_ALIGN == 0);
sewardj0ec07f32006-01-12 12:32:32 +0000742
sewardj7cf4e6b2008-05-01 20:24:26 +0000743 /* Check that the guest state and its two shadows have the same
744 size, and that there are no holes in between. The latter is
745 important because Memcheck assumes that it can reliably access
746 the shadows by indexing off a pointer to the start of the
747 primary guest state area. */
748 vg_assert(sz_vex == sz_vexsh1);
749 vg_assert(sz_vex == sz_vexsh2);
750 vg_assert(a_vex + 1 * sz_vex == a_vexsh1);
751 vg_assert(a_vex + 2 * sz_vex == a_vexsh2);
752 /* Also check there's no hole between the second shadow area and
753 the spill area. */
sewardj0ec07f32006-01-12 12:32:32 +0000754 vg_assert(sz_spill == LibVEX_N_SPILL_BYTES);
sewardj7cf4e6b2008-05-01 20:24:26 +0000755 vg_assert(a_vex + 3 * sz_vex == a_spill);
sewardj0ec07f32006-01-12 12:32:32 +0000756
sewardj291849f2012-04-20 23:58:55 +0000757# if defined(VGA_x86)
758 /* x86 XMM regs must form an array, ie, have no holes in
759 between. */
760 vg_assert(
761 (offsetof(VexGuestX86State,guest_XMM7)
762 - offsetof(VexGuestX86State,guest_XMM0))
763 == (8/*#regs*/-1) * 16/*bytes per reg*/
764 );
765 vg_assert(VG_IS_16_ALIGNED(offsetof(VexGuestX86State,guest_XMM0)));
766 vg_assert(VG_IS_8_ALIGNED(offsetof(VexGuestX86State,guest_FPREG)));
767 vg_assert(8 == offsetof(VexGuestX86State,guest_EAX));
768 vg_assert(VG_IS_4_ALIGNED(offsetof(VexGuestX86State,guest_EAX)));
769 vg_assert(VG_IS_4_ALIGNED(offsetof(VexGuestX86State,guest_EIP)));
770# endif
771
sewardj565dc132010-08-06 08:01:47 +0000772# if defined(VGA_amd64)
sewardj45fa9f42012-05-21 10:18:10 +0000773 /* amd64 YMM regs must form an array, ie, have no holes in
sewardj291849f2012-04-20 23:58:55 +0000774 between. */
sewardj565dc132010-08-06 08:01:47 +0000775 vg_assert(
sewardj45fa9f42012-05-21 10:18:10 +0000776 (offsetof(VexGuestAMD64State,guest_YMM16)
777 - offsetof(VexGuestAMD64State,guest_YMM0))
778 == (17/*#regs*/-1) * 32/*bytes per reg*/
sewardj565dc132010-08-06 08:01:47 +0000779 );
sewardj02e97e92012-08-02 22:08:53 +0000780 vg_assert(VG_IS_16_ALIGNED(offsetof(VexGuestAMD64State,guest_YMM0)));
sewardj291849f2012-04-20 23:58:55 +0000781 vg_assert(VG_IS_8_ALIGNED(offsetof(VexGuestAMD64State,guest_FPREG)));
782 vg_assert(16 == offsetof(VexGuestAMD64State,guest_RAX));
783 vg_assert(VG_IS_8_ALIGNED(offsetof(VexGuestAMD64State,guest_RAX)));
784 vg_assert(VG_IS_8_ALIGNED(offsetof(VexGuestAMD64State,guest_RIP)));
sewardj565dc132010-08-06 08:01:47 +0000785# endif
786
carllcae0cc22014-08-07 23:17:29 +0000787# if defined(VGA_ppc32) || defined(VGA_ppc64be) || defined(VGA_ppc64le)
sewardj0ec07f32006-01-12 12:32:32 +0000788 /* ppc guest_state vector regs must be 16 byte aligned for
sewardj7cf4e6b2008-05-01 20:24:26 +0000789 loads/stores. This is important! */
sewardjf34eb492011-04-15 11:57:05 +0000790 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex.guest_VSR0));
791 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow1.guest_VSR0));
792 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow2.guest_VSR0));
sewardj7cf4e6b2008-05-01 20:24:26 +0000793 /* be extra paranoid .. */
sewardjf34eb492011-04-15 11:57:05 +0000794 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex.guest_VSR1));
795 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow1.guest_VSR1));
796 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow2.guest_VSR1));
sewardj565dc132010-08-06 08:01:47 +0000797# endif
sewardj59570ff2010-01-01 11:59:33 +0000798
799# if defined(VGA_arm)
800 /* arm guest_state VFP regs must be 8 byte aligned for
sewardj291849f2012-04-20 23:58:55 +0000801 loads/stores. Let's use 16 just to be on the safe side. */
802 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex.guest_D0));
803 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow1.guest_D0));
804 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow2.guest_D0));
sewardj59570ff2010-01-01 11:59:33 +0000805 /* be extra paranoid .. */
806 vg_assert(VG_IS_8_ALIGNED(& tst->arch.vex.guest_D1));
807 vg_assert(VG_IS_8_ALIGNED(& tst->arch.vex_shadow1.guest_D1));
808 vg_assert(VG_IS_8_ALIGNED(& tst->arch.vex_shadow2.guest_D1));
809# endif
sewardjb5b87402011-03-07 16:05:35 +0000810
sewardjf0c12502014-01-12 12:54:00 +0000811# if defined(VGA_arm64)
812 vg_assert(VG_IS_8_ALIGNED(& tst->arch.vex.guest_X0));
813 vg_assert(VG_IS_8_ALIGNED(& tst->arch.vex_shadow1.guest_X0));
814 vg_assert(VG_IS_8_ALIGNED(& tst->arch.vex_shadow2.guest_X0));
815 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex.guest_Q0));
816 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow1.guest_Q0));
817 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow2.guest_Q0));
818# endif
819
sewardjb5b87402011-03-07 16:05:35 +0000820# if defined(VGA_s390x)
821 /* no special requirements */
822# endif
sewardj5db15402012-06-07 09:13:21 +0000823
petarj4df0bfc2013-02-27 23:17:33 +0000824# if defined(VGA_mips32) || defined(VGA_mips64)
sewardjf0c12502014-01-12 12:54:00 +0000825 /* no special requirements */
sewardj5db15402012-06-07 09:13:21 +0000826# endif
sewardj0ec07f32006-01-12 12:32:32 +0000827}
828
sewardj3b290482011-05-06 21:02:55 +0000829// NO_VGDB_POLL value ensures vgdb is not polled, while
830// VGDB_POLL_ASAP ensures that the next scheduler call
831// will cause a poll.
832#define NO_VGDB_POLL 0xffffffffffffffffULL
833#define VGDB_POLL_ASAP 0x0ULL
834
835void VG_(disable_vgdb_poll) (void )
836{
837 vgdb_next_poll = NO_VGDB_POLL;
838}
839void VG_(force_vgdb_poll) ( void )
840{
841 vgdb_next_poll = VGDB_POLL_ASAP;
842}
sewardj0ec07f32006-01-12 12:32:32 +0000843
844/* Run the thread tid for a while, and return a VG_TRC_* value
sewardj291849f2012-04-20 23:58:55 +0000845 indicating why VG_(disp_run_translations) stopped, and possibly an
846 auxiliary word. Also, only allow the thread to run for at most
847 *dispatchCtrP events. If (as is the normal case) use_alt_host_addr
848 is False, we are running ordinary redir'd translations, and we
849 should therefore start by looking up the guest next IP in TT. If
850 it is True then we ignore the guest next IP and just run from
851 alt_host_addr, which presumably points at host code for a no-redir
852 translation.
853
854 Return results are placed in two_words. two_words[0] is set to the
855 TRC. In the case where that is VG_TRC_CHAIN_ME_TO_{SLOW,FAST}_EP,
856 the address to patch is placed in two_words[1].
857*/
858static
859void run_thread_for_a_while ( /*OUT*/HWord* two_words,
860 /*MOD*/Int* dispatchCtrP,
861 ThreadId tid,
862 HWord alt_host_addr,
863 Bool use_alt_host_addr )
sewardj0ec07f32006-01-12 12:32:32 +0000864{
sewardj291849f2012-04-20 23:58:55 +0000865 volatile HWord jumped = 0;
866 volatile ThreadState* tst = NULL; /* stop gcc complaining */
867 volatile Int done_this_time = 0;
868 volatile HWord host_code_addr = 0;
sewardj0ec07f32006-01-12 12:32:32 +0000869
870 /* Paranoia */
871 vg_assert(VG_(is_valid_tid)(tid));
872 vg_assert(VG_(is_running_thread)(tid));
873 vg_assert(!VG_(is_exiting)(tid));
sewardj291849f2012-04-20 23:58:55 +0000874 vg_assert(*dispatchCtrP > 0);
sewardj0ec07f32006-01-12 12:32:32 +0000875
876 tst = VG_(get_ThreadState)(tid);
florian8eebf232014-09-18 18:35:47 +0000877 do_pre_run_checks( tst );
sewardj0ec07f32006-01-12 12:32:32 +0000878 /* end Paranoia */
879
sewardjbba6f312012-04-21 23:05:57 +0000880 /* Futz with the XIndir stats counters. */
881 vg_assert(VG_(stats__n_xindirs_32) == 0);
882 vg_assert(VG_(stats__n_xindir_misses_32) == 0);
883
sewardj291849f2012-04-20 23:58:55 +0000884 /* Clear return area. */
885 two_words[0] = two_words[1] = 0;
886
887 /* Figure out where we're starting from. */
888 if (use_alt_host_addr) {
889 /* unusual case -- no-redir translation */
890 host_code_addr = alt_host_addr;
891 } else {
892 /* normal case -- redir translation */
893 UInt cno = (UInt)VG_TT_FAST_HASH((Addr)tst->arch.vex.VG_INSTR_PTR);
894 if (LIKELY(VG_(tt_fast)[cno].guest == (Addr)tst->arch.vex.VG_INSTR_PTR))
895 host_code_addr = VG_(tt_fast)[cno].host;
896 else {
florian44bd4462014-12-29 17:04:46 +0000897 Addr res = 0;
sewardj291849f2012-04-20 23:58:55 +0000898 /* not found in VG_(tt_fast). Searching here the transtab
899 improves the performance compared to returning directly
900 to the scheduler. */
901 Bool found = VG_(search_transtab)(&res, NULL, NULL,
902 (Addr)tst->arch.vex.VG_INSTR_PTR,
903 True/*upd cache*/
904 );
905 if (LIKELY(found)) {
906 host_code_addr = res;
907 } else {
908 /* At this point, we know that we intended to start at a
909 normal redir translation, but it was not found. In
910 which case we can return now claiming it's not
911 findable. */
912 two_words[0] = VG_TRC_INNER_FASTMISS; /* hmm, is that right? */
913 return;
914 }
915 }
916 }
917 /* We have either a no-redir or a redir translation. */
918 vg_assert(host_code_addr != 0); /* implausible */
919
sewardj0ec07f32006-01-12 12:32:32 +0000920 /* there should be no undealt-with signals */
921 //vg_assert(VG_(threads)[tid].siginfo.si_signo == 0);
922
sewardj291849f2012-04-20 23:58:55 +0000923 /* Set up event counter stuff for the run. */
924 tst->arch.vex.host_EvC_COUNTER = *dispatchCtrP;
925 tst->arch.vex.host_EvC_FAILADDR
926 = (HWord)VG_(fnptr_to_fnentry)( &VG_(disp_cp_evcheck_fail) );
927
sewardjf54342a2006-10-17 01:51:24 +0000928 if (0) {
929 vki_sigset_t m;
930 Int i, err = VG_(sigprocmask)(VKI_SIG_SETMASK, NULL, &m);
931 vg_assert(err == 0);
florianc6e5d762015-08-05 22:27:24 +0000932 VG_(printf)("tid %u: entering code with unblocked signals: ", tid);
sewardjf54342a2006-10-17 01:51:24 +0000933 for (i = 1; i <= _VKI_NSIG; i++)
934 if (!VG_(sigismember)(&m, i))
935 VG_(printf)("%d ", i);
936 VG_(printf)("\n");
937 }
938
sewardj291849f2012-04-20 23:58:55 +0000939 /* Set up return-value area. */
940
sewardj97561812006-12-23 01:21:12 +0000941 // Tell the tool this thread is about to run client code
njn3e32c872006-12-24 07:51:17 +0000942 VG_TRACK( start_client_code, tid, bbs_done );
sewardj97561812006-12-23 01:21:12 +0000943
sewardj0ec07f32006-01-12 12:32:32 +0000944 vg_assert(VG_(in_generated_code) == False);
945 VG_(in_generated_code) = True;
946
947 SCHEDSETJMP(
948 tid,
949 jumped,
sewardj291849f2012-04-20 23:58:55 +0000950 VG_(disp_run_translations)(
951 two_words,
florian8eebf232014-09-18 18:35:47 +0000952 (volatile void*)&tst->arch.vex,
sewardj291849f2012-04-20 23:58:55 +0000953 host_code_addr
954 )
sewardj0ec07f32006-01-12 12:32:32 +0000955 );
956
sewardjde764e82007-11-09 23:13:22 +0000957 vg_assert(VG_(in_generated_code) == True);
sewardj0ec07f32006-01-12 12:32:32 +0000958 VG_(in_generated_code) = False;
959
sewardj291849f2012-04-20 23:58:55 +0000960 if (jumped != (HWord)0) {
sewardj0ec07f32006-01-12 12:32:32 +0000961 /* We get here if the client took a fault that caused our signal
962 handler to longjmp. */
sewardj291849f2012-04-20 23:58:55 +0000963 vg_assert(two_words[0] == 0 && two_words[1] == 0); // correct?
964 two_words[0] = VG_TRC_FAULT_SIGNAL;
965 two_words[1] = 0;
njn1dcee092009-02-24 03:07:37 +0000966 block_signals();
sewardj0ec07f32006-01-12 12:32:32 +0000967 }
968
sewardjbba6f312012-04-21 23:05:57 +0000969 /* Merge the 32-bit XIndir/miss counters into the 64 bit versions,
970 and zero out the 32-bit ones in preparation for the next run of
971 generated code. */
972 stats__n_xindirs += (ULong)VG_(stats__n_xindirs_32);
973 VG_(stats__n_xindirs_32) = 0;
974 stats__n_xindir_misses += (ULong)VG_(stats__n_xindir_misses_32);
975 VG_(stats__n_xindir_misses_32) = 0;
976
977 /* Inspect the event counter. */
sewardj291849f2012-04-20 23:58:55 +0000978 vg_assert((Int)tst->arch.vex.host_EvC_COUNTER >= -1);
979 vg_assert(tst->arch.vex.host_EvC_FAILADDR
980 == (HWord)VG_(fnptr_to_fnentry)( &VG_(disp_cp_evcheck_fail)) );
981
sewardj0a1086e2014-08-29 19:12:38 +0000982 /* The number of events done this time is the difference between
983 the event counter originally and what it is now. Except -- if
984 it has gone negative (to -1) then the transition 0 to -1 doesn't
985 correspond to a real executed block, so back it out. It's like
986 this because the event checks decrement the counter first and
987 check it for negativeness second, hence the 0 to -1 transition
988 causes a bailout and the block it happens in isn't executed. */
989 {
990 Int dispatchCtrAfterwards = (Int)tst->arch.vex.host_EvC_COUNTER;
991 done_this_time = *dispatchCtrP - dispatchCtrAfterwards;
992 if (dispatchCtrAfterwards == -1) {
993 done_this_time--;
994 } else {
995 /* If the generated code drives the counter below -1, something
996 is seriously wrong. */
997 vg_assert(dispatchCtrAfterwards >= 0);
998 }
999 }
sewardj0ec07f32006-01-12 12:32:32 +00001000
1001 vg_assert(done_this_time >= 0);
1002 bbs_done += (ULong)done_this_time;
1003
sewardj291849f2012-04-20 23:58:55 +00001004 *dispatchCtrP -= done_this_time;
1005 vg_assert(*dispatchCtrP >= 0);
1006
sewardj97561812006-12-23 01:21:12 +00001007 // Tell the tool this thread has stopped running client code
njn3e32c872006-12-24 07:51:17 +00001008 VG_TRACK( stop_client_code, tid, bbs_done );
sewardj97561812006-12-23 01:21:12 +00001009
sewardj3b290482011-05-06 21:02:55 +00001010 if (bbs_done >= vgdb_next_poll) {
1011 if (VG_(clo_vgdb_poll))
1012 vgdb_next_poll = bbs_done + (ULong)VG_(clo_vgdb_poll);
1013 else
1014 /* value was changed due to gdbserver invocation via ptrace */
1015 vgdb_next_poll = NO_VGDB_POLL;
1016 if (VG_(gdbserver_activity) (tid))
1017 VG_(gdbserver) (tid);
1018 }
1019
sewardj291849f2012-04-20 23:58:55 +00001020 /* TRC value and possible auxiliary patch-address word are already
1021 in two_words[0] and [1] respectively, as a result of the call to
1022 VG_(run_innerloop). */
1023 /* Stay sane .. */
1024 if (two_words[0] == VG_TRC_CHAIN_ME_TO_SLOW_EP
1025 || two_words[0] == VG_TRC_CHAIN_ME_TO_FAST_EP) {
1026 vg_assert(two_words[1] != 0); /* we have a legit patch addr */
sewardj0ec07f32006-01-12 12:32:32 +00001027 } else {
sewardj291849f2012-04-20 23:58:55 +00001028 vg_assert(two_words[1] == 0); /* nobody messed with it */
sewardj0ec07f32006-01-12 12:32:32 +00001029 }
1030}
1031
sewardj0ec07f32006-01-12 12:32:32 +00001032
1033/* ---------------------------------------------------------------------
sewardje663cb92002-04-12 10:26:32 +00001034 The scheduler proper.
1035 ------------------------------------------------------------------ */
1036
sewardjb5f6f512005-03-10 23:59:00 +00001037static void handle_tt_miss ( ThreadId tid )
sewardje663cb92002-04-12 10:26:32 +00001038{
sewardjb5f6f512005-03-10 23:59:00 +00001039 Bool found;
njnf536bbb2005-06-13 04:21:38 +00001040 Addr ip = VG_(get_IP)(tid);
sewardjb5f6f512005-03-10 23:59:00 +00001041
1042 /* Trivial event. Miss in the fast-cache. Do a full
1043 lookup for it. */
sewardj291849f2012-04-20 23:58:55 +00001044 found = VG_(search_transtab)( NULL, NULL, NULL,
1045 ip, True/*upd_fast_cache*/ );
sewardj5d0d1f32010-03-14 15:09:27 +00001046 if (UNLIKELY(!found)) {
sewardjb5f6f512005-03-10 23:59:00 +00001047 /* Not found; we need to request a translation. */
sewardj0ec07f32006-01-12 12:32:32 +00001048 if (VG_(translate)( tid, ip, /*debug*/False, 0/*not verbose*/,
1049 bbs_done, True/*allow redirection*/ )) {
sewardj291849f2012-04-20 23:58:55 +00001050 found = VG_(search_transtab)( NULL, NULL, NULL,
1051 ip, True );
1052 vg_assert2(found, "handle_tt_miss: missing tt_fast entry");
njn50ae1a72005-04-08 23:28:23 +00001053
sewardjb5f6f512005-03-10 23:59:00 +00001054 } else {
1055 // If VG_(translate)() fails, it's because it had to throw a
1056 // signal because the client jumped to a bad address. That
1057 // means that either a signal has been set up for delivery,
1058 // or the thread has been marked for termination. Either
1059 // way, we just need to go back into the scheduler loop.
1060 }
1061 }
1062}
1063
sewardj291849f2012-04-20 23:58:55 +00001064static
1065void handle_chain_me ( ThreadId tid, void* place_to_chain, Bool toFastEP )
1066{
1067 Bool found = False;
1068 Addr ip = VG_(get_IP)(tid);
philippe523b5b82015-03-23 21:49:32 +00001069 SECno to_sNo = INV_SNO;
1070 TTEno to_tteNo = INV_TTE;
sewardj291849f2012-04-20 23:58:55 +00001071
1072 found = VG_(search_transtab)( NULL, &to_sNo, &to_tteNo,
1073 ip, False/*dont_upd_fast_cache*/ );
1074 if (!found) {
1075 /* Not found; we need to request a translation. */
1076 if (VG_(translate)( tid, ip, /*debug*/False, 0/*not verbose*/,
1077 bbs_done, True/*allow redirection*/ )) {
1078 found = VG_(search_transtab)( NULL, &to_sNo, &to_tteNo,
1079 ip, False );
1080 vg_assert2(found, "handle_chain_me: missing tt_fast entry");
1081 } else {
1082 // If VG_(translate)() fails, it's because it had to throw a
1083 // signal because the client jumped to a bad address. That
1084 // means that either a signal has been set up for delivery,
1085 // or the thread has been marked for termination. Either
1086 // way, we just need to go back into the scheduler loop.
1087 return;
1088 }
1089 }
1090 vg_assert(found);
philippe523b5b82015-03-23 21:49:32 +00001091 vg_assert(to_sNo != INV_SNO);
1092 vg_assert(to_tteNo != INV_TTE);
sewardj291849f2012-04-20 23:58:55 +00001093
1094 /* So, finally we know where to patch through to. Do the patching
1095 and update the various admin tables that allow it to be undone
1096 in the case that the destination block gets deleted. */
1097 VG_(tt_tc_do_chaining)( place_to_chain,
1098 to_sNo, to_tteNo, toFastEP );
1099}
1100
njnf76d27a2009-05-28 01:53:07 +00001101static void handle_syscall(ThreadId tid, UInt trc)
sewardjb5f6f512005-03-10 23:59:00 +00001102{
sewardj1ac9d0c2007-05-01 14:18:48 +00001103 ThreadState * volatile tst = VG_(get_ThreadState)(tid);
sewardjadbb4912011-09-29 17:34:17 +00001104 volatile UWord jumped;
sewardjb5f6f512005-03-10 23:59:00 +00001105
1106 /* Syscall may or may not block; either way, it will be
1107 complete by the time this call returns, and we'll be
1108 runnable again. We could take a signal while the
1109 syscall runs. */
sewardj45f4e7c2005-09-27 19:20:21 +00001110
sewardj67553572014-09-01 21:12:44 +00001111 if (VG_(clo_sanity_level) >= 3) {
florianf44ff622014-12-20 16:52:08 +00001112 HChar buf[50]; // large enough
florianc6e5d762015-08-05 22:27:24 +00001113 VG_(sprintf)(buf, "(BEFORE SYSCALL, tid %u)", tid);
sewardj594fc462014-07-08 08:05:02 +00001114 Bool ok = VG_(am_do_sync_check)(buf, __FILE__, __LINE__);
1115 vg_assert(ok);
1116 }
sewardj45f4e7c2005-09-27 19:20:21 +00001117
njnf76d27a2009-05-28 01:53:07 +00001118 SCHEDSETJMP(tid, jumped, VG_(client_syscall)(tid, trc));
sewardjb5f6f512005-03-10 23:59:00 +00001119
sewardj67553572014-09-01 21:12:44 +00001120 if (VG_(clo_sanity_level) >= 3) {
florianf44ff622014-12-20 16:52:08 +00001121 HChar buf[50]; // large enough
florianc6e5d762015-08-05 22:27:24 +00001122 VG_(sprintf)(buf, "(AFTER SYSCALL, tid %u)", tid);
sewardj594fc462014-07-08 08:05:02 +00001123 Bool ok = VG_(am_do_sync_check)(buf, __FILE__, __LINE__);
1124 vg_assert(ok);
1125 }
sewardj45f4e7c2005-09-27 19:20:21 +00001126
sewardjb5f6f512005-03-10 23:59:00 +00001127 if (!VG_(is_running_thread)(tid))
florianc6e5d762015-08-05 22:27:24 +00001128 VG_(printf)("tid %u not running; VG_(running_tid)=%u, tid %u status %u\n",
njnc7561b92005-06-19 01:24:32 +00001129 tid, VG_(running_tid), tid, tst->status);
sewardjb5f6f512005-03-10 23:59:00 +00001130 vg_assert(VG_(is_running_thread)(tid));
1131
sewardjadbb4912011-09-29 17:34:17 +00001132 if (jumped != (UWord)0) {
njn1dcee092009-02-24 03:07:37 +00001133 block_signals();
sewardjb5f6f512005-03-10 23:59:00 +00001134 VG_(poll_signals)(tid);
1135 }
1136}
1137
sewardja591a052006-01-12 14:04:46 +00001138/* tid just requested a jump to the noredir version of its current
1139 program counter. So make up that translation if needed, run it,
sewardj291849f2012-04-20 23:58:55 +00001140 and return the resulting thread return code in two_words[]. */
1141static
1142void handle_noredir_jump ( /*OUT*/HWord* two_words,
1143 /*MOD*/Int* dispatchCtrP,
1144 ThreadId tid )
sewardja591a052006-01-12 14:04:46 +00001145{
sewardj291849f2012-04-20 23:58:55 +00001146 /* Clear return area. */
1147 two_words[0] = two_words[1] = 0;
1148
florian44bd4462014-12-29 17:04:46 +00001149 Addr hcode = 0;
sewardja591a052006-01-12 14:04:46 +00001150 Addr ip = VG_(get_IP)(tid);
1151
1152 Bool found = VG_(search_unredir_transtab)( &hcode, ip );
1153 if (!found) {
1154 /* Not found; we need to request a translation. */
1155 if (VG_(translate)( tid, ip, /*debug*/False, 0/*not verbose*/, bbs_done,
1156 False/*NO REDIRECTION*/ )) {
1157
1158 found = VG_(search_unredir_transtab)( &hcode, ip );
1159 vg_assert2(found, "unredir translation missing after creation?!");
sewardja591a052006-01-12 14:04:46 +00001160 } else {
1161 // If VG_(translate)() fails, it's because it had to throw a
1162 // signal because the client jumped to a bad address. That
1163 // means that either a signal has been set up for delivery,
1164 // or the thread has been marked for termination. Either
1165 // way, we just need to go back into the scheduler loop.
sewardj291849f2012-04-20 23:58:55 +00001166 two_words[0] = VG_TRC_BORING;
1167 return;
sewardja591a052006-01-12 14:04:46 +00001168 }
1169
1170 }
1171
1172 vg_assert(found);
1173 vg_assert(hcode != 0);
1174
sewardj291849f2012-04-20 23:58:55 +00001175 /* Otherwise run it and return the resulting VG_TRC_* value. */
1176 vg_assert(*dispatchCtrP > 0); /* so as to guarantee progress */
1177 run_thread_for_a_while( two_words, dispatchCtrP, tid,
1178 hcode, True/*use hcode*/ );
sewardja591a052006-01-12 14:04:46 +00001179}
1180
1181
sewardjb5f6f512005-03-10 23:59:00 +00001182/*
1183 Run a thread until it wants to exit.
1184
sewardjad0a3a82006-12-17 18:58:55 +00001185 We assume that the caller has already called VG_(acquire_BigLock) for
sewardjb5f6f512005-03-10 23:59:00 +00001186 us, so we own the VCPU. Also, all signals are blocked.
1187 */
1188VgSchedReturnCode VG_(scheduler) ( ThreadId tid )
1189{
sewardj291849f2012-04-20 23:58:55 +00001190 /* Holds the remaining size of this thread's "timeslice". */
1191 Int dispatch_ctr = 0;
1192
sewardjb5f6f512005-03-10 23:59:00 +00001193 ThreadState *tst = VG_(get_ThreadState)(tid);
sewardj3b290482011-05-06 21:02:55 +00001194 static Bool vgdb_startup_action_done = False;
sewardje663cb92002-04-12 10:26:32 +00001195
sewardjc24be7a2005-03-15 01:40:12 +00001196 if (VG_(clo_trace_sched))
1197 print_sched_event(tid, "entering VG_(scheduler)");
1198
sewardj3b290482011-05-06 21:02:55 +00001199 /* Do vgdb initialization (but once). Only the first (main) task
1200 starting up will do the below.
1201 Initialize gdbserver earlier than at the first
1202 thread VG_(scheduler) is causing problems:
1203 * at the end of VG_(scheduler_init_phase2) :
1204 The main thread is in VgTs_Init state, but in a not yet
1205 consistent state => the thread cannot be reported to gdb
1206 (e.g. causes an assert in LibVEX_GuestX86_get_eflags when giving
1207 back the guest registers to gdb).
1208 * at end of valgrind_main, just
1209 before VG_(main_thread_wrapper_NORETURN)(1) :
1210 The main thread is still in VgTs_Init state but in a
1211 more advanced state. However, the thread state is not yet
1212 completely initialized : a.o., the os_state is not yet fully
1213 set => the thread is then not properly reported to gdb,
1214 which is then confused (causing e.g. a duplicate thread be
1215 shown, without thread id).
1216 * it would be possible to initialize gdbserver "lower" in the
1217 call stack (e.g. in VG_(main_thread_wrapper_NORETURN)) but
1218 these are platform dependent and the place at which
1219 the thread state is completely initialized is not
1220 specific anymore to the main thread (so a similar "do it only
1221 once" would be needed).
1222
1223 => a "once only" initialization here is the best compromise. */
1224 if (!vgdb_startup_action_done) {
1225 vg_assert(tid == 1); // it must be the main thread.
1226 vgdb_startup_action_done = True;
1227 if (VG_(clo_vgdb) != Vg_VgdbNo) {
1228 /* If we have to poll, ensures we do an initial poll at first
1229 scheduler call. Otherwise, ensure no poll (unless interrupted
1230 by ptrace). */
1231 if (VG_(clo_vgdb_poll))
1232 VG_(force_vgdb_poll) ();
1233 else
1234 VG_(disable_vgdb_poll) ();
1235
1236 vg_assert (VG_(dyn_vgdb_error) == VG_(clo_vgdb_error));
1237 /* As we are initializing, VG_(dyn_vgdb_error) can't have been
1238 changed yet. */
1239
sewardj997546c2011-05-17 18:14:53 +00001240 VG_(gdbserver_prerun_action) (1);
sewardj3b290482011-05-06 21:02:55 +00001241 } else {
1242 VG_(disable_vgdb_poll) ();
1243 }
1244 }
1245
philippe0d22fc02014-08-21 20:01:50 +00001246 if (SimHintiS(SimHint_no_nptl_pthread_stackcache, VG_(clo_sim_hints))
1247 && tid != 1) {
1248 /* We disable the stack cache the first time we see a thread other
1249 than the main thread appearing. At this moment, we are sure the pthread
1250 lib loading is done/variable was initialised by pthread lib/... */
1251 if (VG_(client__stack_cache_actsize__addr)) {
1252 if (*VG_(client__stack_cache_actsize__addr) == 0) {
1253 VG_(debugLog)(1,"sched",
1254 "pthread stack cache size disable done"
1255 " via kludge\n");
1256 *VG_(client__stack_cache_actsize__addr) = 1000 * 1000 * 1000;
1257 /* Set a value big enough to be above the hardcoded maximum stack
1258 cache size in glibc, small enough to allow a pthread stack size
1259 to be added without risk of overflow. */
1260 }
1261 } else {
1262 VG_(debugLog)(0,"sched",
1263 "WARNING: pthread stack cache cannot be disabled!\n");
philippef35dad72014-09-02 18:34:25 +00001264 VG_(clo_sim_hints) &= ~SimHint2S(SimHint_no_nptl_pthread_stackcache);
philippe0d22fc02014-08-21 20:01:50 +00001265 /* Remove SimHint_no_nptl_pthread_stackcache from VG_(clo_sim_hints)
1266 to avoid having a msg for all following threads. */
1267 }
1268 }
1269
sewardjb5f6f512005-03-10 23:59:00 +00001270 /* set the proper running signal mask */
njn1dcee092009-02-24 03:07:37 +00001271 block_signals();
sewardjb5f6f512005-03-10 23:59:00 +00001272
1273 vg_assert(VG_(is_running_thread)(tid));
sewardje663cb92002-04-12 10:26:32 +00001274
sewardj291849f2012-04-20 23:58:55 +00001275 dispatch_ctr = SCHEDULING_QUANTUM;
sewardj6072c362002-04-19 14:40:57 +00001276
sewardjf54342a2006-10-17 01:51:24 +00001277 while (!VG_(is_exiting)(tid)) {
1278
sewardj291849f2012-04-20 23:58:55 +00001279 vg_assert(dispatch_ctr >= 0);
1280 if (dispatch_ctr == 0) {
sewardjf54342a2006-10-17 01:51:24 +00001281
sewardjf54342a2006-10-17 01:51:24 +00001282 /* Our slice is done, so yield the CPU to another thread. On
1283 Linux, this doesn't sleep between sleeping and running,
sewardj6e9de462011-06-28 07:25:29 +00001284 since that would take too much time. */
sewardjf54342a2006-10-17 01:51:24 +00001285
1286 /* 4 July 06: it seems that a zero-length nsleep is needed to
1287 cause async thread cancellation (canceller.c) to terminate
1288 in finite time; else it is in some kind of race/starvation
1289 situation and completion is arbitrarily delayed (although
1290 this is not a deadlock).
1291
1292 Unfortunately these sleeps cause MPI jobs not to terminate
1293 sometimes (some kind of livelock). So sleeping once
1294 every N opportunities appears to work. */
1295
1296 /* 3 Aug 06: doing sys__nsleep works but crashes some apps.
1297 sys_yield also helps the problem, whilst not crashing apps. */
1298
sewardjad0a3a82006-12-17 18:58:55 +00001299 VG_(release_BigLock)(tid, VgTs_Yielding,
1300 "VG_(scheduler):timeslice");
sewardjf54342a2006-10-17 01:51:24 +00001301 /* ------------ now we don't have The Lock ------------ */
1302
sewardjad0a3a82006-12-17 18:58:55 +00001303 VG_(acquire_BigLock)(tid, "VG_(scheduler):timeslice");
sewardjf54342a2006-10-17 01:51:24 +00001304 /* ------------ now we do have The Lock ------------ */
sewardje663cb92002-04-12 10:26:32 +00001305
sewardjb5f6f512005-03-10 23:59:00 +00001306 /* OK, do some relatively expensive housekeeping stuff */
1307 scheduler_sanity(tid);
1308 VG_(sanity_check_general)(False);
sewardje663cb92002-04-12 10:26:32 +00001309
sewardjb5f6f512005-03-10 23:59:00 +00001310 /* Look for any pending signals for this thread, and set them up
1311 for delivery */
1312 VG_(poll_signals)(tid);
sewardje663cb92002-04-12 10:26:32 +00001313
sewardjb5f6f512005-03-10 23:59:00 +00001314 if (VG_(is_exiting)(tid))
1315 break; /* poll_signals picked up a fatal signal */
sewardje663cb92002-04-12 10:26:32 +00001316
sewardjb5f6f512005-03-10 23:59:00 +00001317 /* For stats purposes only. */
1318 n_scheduling_events_MAJOR++;
sewardje663cb92002-04-12 10:26:32 +00001319
sewardj0a1086e2014-08-29 19:12:38 +00001320 /* Figure out how many bbs to ask vg_run_innerloop to do. */
sewardj291849f2012-04-20 23:58:55 +00001321 dispatch_ctr = SCHEDULING_QUANTUM;
jsgf855d93d2003-10-13 22:26:55 +00001322
sewardjb5f6f512005-03-10 23:59:00 +00001323 /* paranoia ... */
1324 vg_assert(tst->tid == tid);
1325 vg_assert(tst->os_state.lwpid == VG_(gettid)());
sewardje663cb92002-04-12 10:26:32 +00001326 }
1327
sewardjb5f6f512005-03-10 23:59:00 +00001328 /* For stats purposes only. */
1329 n_scheduling_events_MINOR++;
sewardje663cb92002-04-12 10:26:32 +00001330
1331 if (0)
florianc6e5d762015-08-05 22:27:24 +00001332 VG_(message)(Vg_DebugMsg, "thread %u: running for %d bbs\n",
sewardj291849f2012-04-20 23:58:55 +00001333 tid, dispatch_ctr - 1 );
sewardje663cb92002-04-12 10:26:32 +00001334
sewardj291849f2012-04-20 23:58:55 +00001335 HWord trc[2]; /* "two_words" */
1336 run_thread_for_a_while( &trc[0],
1337 &dispatch_ctr,
1338 tid, 0/*ignored*/, False );
sewardje663cb92002-04-12 10:26:32 +00001339
sewardjb5f6f512005-03-10 23:59:00 +00001340 if (VG_(clo_trace_sched) && VG_(clo_verbosity) > 2) {
florianf44ff622014-12-20 16:52:08 +00001341 const HChar *name = name_of_sched_event(trc[0]);
1342 HChar buf[VG_(strlen)(name) + 10]; // large enough
1343 VG_(sprintf)(buf, "TRC: %s", name);
sewardjb5f6f512005-03-10 23:59:00 +00001344 print_sched_event(tid, buf);
sewardje663cb92002-04-12 10:26:32 +00001345 }
1346
sewardj291849f2012-04-20 23:58:55 +00001347 if (trc[0] == VEX_TRC_JMP_NOREDIR) {
sewardj0ec07f32006-01-12 12:32:32 +00001348 /* If we got a request to run a no-redir version of
1349 something, do so now -- handle_noredir_jump just (creates
1350 and) runs that one translation. The flip side is that the
1351 noredir translation can't itself return another noredir
1352 request -- that would be nonsensical. It can, however,
1353 return VG_TRC_BORING, which just means keep going as
1354 normal. */
sewardj291849f2012-04-20 23:58:55 +00001355 /* Note that the fact that we need to continue with a
1356 no-redir jump is not recorded anywhere else in this
1357 thread's state. So we *must* execute the block right now
1358 -- we can't fail to execute it and later resume with it,
1359 because by then we'll have forgotten the fact that it
1360 should be run as no-redir, but will get run as a normal
1361 potentially-redir'd, hence screwing up. This really ought
1362 to be cleaned up, by noting in the guest state that the
1363 next block to be executed should be no-redir. Then we can
1364 suspend and resume at any point, which isn't the case at
1365 the moment. */
sewardj0a1086e2014-08-29 19:12:38 +00001366 /* We can't enter a no-redir translation with the dispatch
1367 ctr set to zero, for the reasons commented just above --
1368 we need to force it to execute right now. So, if the
1369 dispatch ctr is zero, set it to one. Note that this would
1370 have the bad side effect of holding the Big Lock arbitrary
1371 long should there be an arbitrarily long sequence of
1372 back-to-back no-redir translations to run. But we assert
1373 just below that this translation cannot request another
1374 no-redir jump, so we should be safe against that. */
1375 if (dispatch_ctr == 0) {
1376 dispatch_ctr = 1;
1377 }
sewardj291849f2012-04-20 23:58:55 +00001378 handle_noredir_jump( &trc[0],
1379 &dispatch_ctr,
1380 tid );
1381 vg_assert(trc[0] != VEX_TRC_JMP_NOREDIR);
1382
1383 /* This can't be allowed to happen, since it means the block
1384 didn't execute, and we have no way to resume-as-noredir
1385 after we get more timeslice. But I don't think it ever
1386 can, since handle_noredir_jump will assert if the counter
1387 is zero on entry. */
1388 vg_assert(trc[0] != VG_TRC_INNER_COUNTERZERO);
sewardj0a1086e2014-08-29 19:12:38 +00001389 /* This asserts the same thing. */
1390 vg_assert(dispatch_ctr >= 0);
sewardj291849f2012-04-20 23:58:55 +00001391
1392 /* A no-redir translation can't return with a chain-me
1393 request, since chaining in the no-redir cache is too
1394 complex. */
1395 vg_assert(trc[0] != VG_TRC_CHAIN_ME_TO_SLOW_EP
1396 && trc[0] != VG_TRC_CHAIN_ME_TO_FAST_EP);
sewardj0ec07f32006-01-12 12:32:32 +00001397 }
1398
sewardj291849f2012-04-20 23:58:55 +00001399 switch (trc[0]) {
1400 case VEX_TRC_JMP_BORING:
1401 /* assisted dispatch, no event. Used by no-redir
1402 translations to force return to the scheduler. */
sewardj0ec07f32006-01-12 12:32:32 +00001403 case VG_TRC_BORING:
1404 /* no special event, just keep going. */
1405 break;
1406
sewardjb5f6f512005-03-10 23:59:00 +00001407 case VG_TRC_INNER_FASTMISS:
sewardj0a1086e2014-08-29 19:12:38 +00001408 vg_assert(dispatch_ctr >= 0);
sewardjb5f6f512005-03-10 23:59:00 +00001409 handle_tt_miss(tid);
1410 break;
sewardj291849f2012-04-20 23:58:55 +00001411
1412 case VG_TRC_CHAIN_ME_TO_SLOW_EP: {
1413 if (0) VG_(printf)("sched: CHAIN_TO_SLOW_EP: %p\n", (void*)trc[1] );
1414 handle_chain_me(tid, (void*)trc[1], False);
1415 break;
1416 }
1417
1418 case VG_TRC_CHAIN_ME_TO_FAST_EP: {
1419 if (0) VG_(printf)("sched: CHAIN_TO_FAST_EP: %p\n", (void*)trc[1] );
1420 handle_chain_me(tid, (void*)trc[1], True);
1421 break;
1422 }
1423
sewardjb5f6f512005-03-10 23:59:00 +00001424 case VEX_TRC_JMP_CLIENTREQ:
1425 do_client_request(tid);
1426 break;
sewardja0fef1b2005-11-03 13:46:30 +00001427
1428 case VEX_TRC_JMP_SYS_INT128: /* x86-linux */
njnf76d27a2009-05-28 01:53:07 +00001429 case VEX_TRC_JMP_SYS_INT129: /* x86-darwin */
1430 case VEX_TRC_JMP_SYS_INT130: /* x86-darwin */
sewardj8eb8bab2015-07-21 14:44:28 +00001431 case VEX_TRC_JMP_SYS_INT145: /* x86-solaris */
1432 case VEX_TRC_JMP_SYS_INT210: /* x86-solaris */
1433 /* amd64-linux, ppc32-linux, amd64-darwin, amd64-solaris */
1434 case VEX_TRC_JMP_SYS_SYSCALL:
sewardj291849f2012-04-20 23:58:55 +00001435 handle_syscall(tid, trc[0]);
sewardjb5f6f512005-03-10 23:59:00 +00001436 if (VG_(clo_sanity_level) > 2)
1437 VG_(sanity_check_general)(True); /* sanity-check every syscall */
1438 break;
sewardje663cb92002-04-12 10:26:32 +00001439
sewardjb5f6f512005-03-10 23:59:00 +00001440 case VEX_TRC_JMP_YIELD:
1441 /* Explicit yield, because this thread is in a spin-lock
sewardj3fc75752005-03-12 15:16:31 +00001442 or something. Only let the thread run for a short while
1443 longer. Because swapping to another thread is expensive,
1444 we're prepared to let this thread eat a little more CPU
1445 before swapping to another. That means that short term
1446 spins waiting for hardware to poke memory won't cause a
1447 thread swap. */
sewardj0a1086e2014-08-29 19:12:38 +00001448 if (dispatch_ctr > 1000)
sewardje1374cf2013-03-28 10:40:53 +00001449 dispatch_ctr = 1000;
sewardjb5f6f512005-03-10 23:59:00 +00001450 break;
sewardje663cb92002-04-12 10:26:32 +00001451
sewardjb5f6f512005-03-10 23:59:00 +00001452 case VG_TRC_INNER_COUNTERZERO:
1453 /* Timeslice is out. Let a new thread be scheduled. */
sewardj291849f2012-04-20 23:58:55 +00001454 vg_assert(dispatch_ctr == 0);
sewardjb5f6f512005-03-10 23:59:00 +00001455 break;
sewardje663cb92002-04-12 10:26:32 +00001456
sewardjb5f6f512005-03-10 23:59:00 +00001457 case VG_TRC_FAULT_SIGNAL:
1458 /* Everything should be set up (either we're exiting, or
1459 about to start in a signal handler). */
1460 break;
sewardj9d1b5d32002-04-17 19:40:49 +00001461
sewardj07bdc5e2005-03-11 13:19:47 +00001462 case VEX_TRC_JMP_MAPFAIL:
1463 /* Failure of arch-specific address translation (x86/amd64
1464 segment override use) */
1465 /* jrs 2005 03 11: is this correct? */
1466 VG_(synth_fault)(tid);
1467 break;
1468
sewardjb5f6f512005-03-10 23:59:00 +00001469 case VEX_TRC_JMP_EMWARN: {
florian2e497412012-08-26 03:22:09 +00001470 static Int counts[EmNote_NUMBER];
sewardjb5f6f512005-03-10 23:59:00 +00001471 static Bool counts_initted = False;
florian2e497412012-08-26 03:22:09 +00001472 VexEmNote ew;
florian11f3cc82012-10-21 02:19:35 +00001473 const HChar* what;
sewardjb5f6f512005-03-10 23:59:00 +00001474 Bool show;
1475 Int q;
1476 if (!counts_initted) {
1477 counts_initted = True;
florian2e497412012-08-26 03:22:09 +00001478 for (q = 0; q < EmNote_NUMBER; q++)
sewardjb5f6f512005-03-10 23:59:00 +00001479 counts[q] = 0;
1480 }
florian2e497412012-08-26 03:22:09 +00001481 ew = (VexEmNote)VG_(threads)[tid].arch.vex.guest_EMNOTE;
1482 what = (ew < 0 || ew >= EmNote_NUMBER)
sewardjb5f6f512005-03-10 23:59:00 +00001483 ? "unknown (?!)"
florian2e497412012-08-26 03:22:09 +00001484 : LibVEX_EmNote_string(ew);
1485 show = (ew < 0 || ew >= EmNote_NUMBER)
sewardjb5f6f512005-03-10 23:59:00 +00001486 ? True
1487 : counts[ew]++ < 3;
sewardjd68ac3e2006-01-20 14:31:57 +00001488 if (show && VG_(clo_show_emwarns) && !VG_(clo_xml)) {
sewardjb5f6f512005-03-10 23:59:00 +00001489 VG_(message)( Vg_UserMsg,
sewardj738856f2009-07-15 14:48:32 +00001490 "Emulation warning: unsupported action:\n");
1491 VG_(message)( Vg_UserMsg, " %s\n", what);
njnd01fef72005-03-25 23:35:48 +00001492 VG_(get_and_pp_StackTrace)( tid, VG_(clo_backtrace_size) );
sewardjb5f6f512005-03-10 23:59:00 +00001493 }
1494 break;
1495 }
sewardje663cb92002-04-12 10:26:32 +00001496
sewardjd68ac3e2006-01-20 14:31:57 +00001497 case VEX_TRC_JMP_EMFAIL: {
florian2e497412012-08-26 03:22:09 +00001498 VexEmNote ew;
florian11f3cc82012-10-21 02:19:35 +00001499 const HChar* what;
florian2e497412012-08-26 03:22:09 +00001500 ew = (VexEmNote)VG_(threads)[tid].arch.vex.guest_EMNOTE;
1501 what = (ew < 0 || ew >= EmNote_NUMBER)
sewardjd68ac3e2006-01-20 14:31:57 +00001502 ? "unknown (?!)"
florian2e497412012-08-26 03:22:09 +00001503 : LibVEX_EmNote_string(ew);
sewardjd68ac3e2006-01-20 14:31:57 +00001504 VG_(message)( Vg_UserMsg,
sewardj738856f2009-07-15 14:48:32 +00001505 "Emulation fatal error -- Valgrind cannot continue:\n");
1506 VG_(message)( Vg_UserMsg, " %s\n", what);
sewardjd68ac3e2006-01-20 14:31:57 +00001507 VG_(get_and_pp_StackTrace)( tid, VG_(clo_backtrace_size) );
sewardj738856f2009-07-15 14:48:32 +00001508 VG_(message)(Vg_UserMsg, "\n");
1509 VG_(message)(Vg_UserMsg, "Valgrind has to exit now. Sorry.\n");
1510 VG_(message)(Vg_UserMsg, "\n");
sewardjd68ac3e2006-01-20 14:31:57 +00001511 VG_(exit)(1);
1512 break;
1513 }
1514
dejanj24f0c3a2014-02-19 11:57:22 +00001515 case VEX_TRC_JMP_SIGILL:
1516 VG_(synth_sigill)(tid, VG_(get_IP)(tid));
1517 break;
1518
sewardj4f9d6742007-08-29 09:11:35 +00001519 case VEX_TRC_JMP_SIGTRAP:
sewardj86df1552006-02-07 20:56:41 +00001520 VG_(synth_sigtrap)(tid);
1521 break;
1522
sewardj4f9d6742007-08-29 09:11:35 +00001523 case VEX_TRC_JMP_SIGSEGV:
1524 VG_(synth_fault)(tid);
1525 break;
1526
sewardj1c0ce7a2009-07-01 08:10:49 +00001527 case VEX_TRC_JMP_SIGBUS:
1528 VG_(synth_sigbus)(tid);
1529 break;
1530
petarj80e5c172012-10-19 14:45:17 +00001531 case VEX_TRC_JMP_SIGFPE_INTDIV:
1532 VG_(synth_sigfpe)(tid, VKI_FPE_INTDIV);
1533 break;
1534
1535 case VEX_TRC_JMP_SIGFPE_INTOVF:
1536 VG_(synth_sigfpe)(tid, VKI_FPE_INTOVF);
1537 break;
1538
florian2baf7532012-07-26 02:41:31 +00001539 case VEX_TRC_JMP_NODECODE: {
1540 Addr addr = VG_(get_IP)(tid);
1541
sewardjc30cd9b2012-12-06 18:08:54 +00001542 if (VG_(clo_sigill_diag)) {
1543 VG_(umsg)(
1544 "valgrind: Unrecognised instruction at address %#lx.\n", addr);
1545 VG_(get_and_pp_StackTrace)(tid, VG_(clo_backtrace_size));
sewardjc76d0e52014-05-03 21:22:55 +00001546# define M(a) VG_(umsg)(a "\n");
1547 M("Your program just tried to execute an instruction that Valgrind" );
1548 M("did not recognise. There are two possible reasons for this." );
1549 M("1. Your program has a bug and erroneously jumped to a non-code" );
1550 M(" location. If you are running Memcheck and you just saw a" );
1551 M(" warning about a bad jump, it's probably your program's fault.");
1552 M("2. The instruction is legitimate but Valgrind doesn't handle it,");
1553 M(" i.e. it's Valgrind's fault. If you think this is the case or");
1554 M(" you are not sure, please let us know and we'll try to fix it.");
1555 M("Either way, Valgrind will now raise a SIGILL signal which will" );
1556 M("probably kill your program." );
1557# undef M
sewardjc30cd9b2012-12-06 18:08:54 +00001558 }
sewardjc76d0e52014-05-03 21:22:55 +00001559# if defined(VGA_s390x)
florian2baf7532012-07-26 02:41:31 +00001560 /* Now that the complaint is out we need to adjust the guest_IA. The
1561 reason is that -- after raising the exception -- execution will
1562 continue with the insn that follows the invalid insn. As the first
1563 2 bits of the invalid insn determine its length in the usual way,
1564 we can compute the address of the next insn here and adjust the
1565 guest_IA accordingly. This adjustment is essential and tested by
1566 none/tests/s390x/op_exception.c (which would loop forever
1567 otherwise) */
1568 UChar byte = ((UChar *)addr)[0];
1569 UInt insn_length = ((((byte >> 6) + 1) >> 1) + 1) << 1;
1570 Addr next_insn_addr = addr + insn_length;
florian2baf7532012-07-26 02:41:31 +00001571 VG_(set_IP)(tid, next_insn_addr);
sewardjc76d0e52014-05-03 21:22:55 +00001572# endif
florian2baf7532012-07-26 02:41:31 +00001573 VG_(synth_sigill)(tid, addr);
1574 break;
1575 }
sewardjc76d0e52014-05-03 21:22:55 +00001576
sewardj1146ae62014-05-04 10:54:08 +00001577 case VEX_TRC_JMP_INVALICACHE:
cerion85665ca2005-06-20 15:51:07 +00001578 VG_(discard_translations)(
florianddd61ff2015-01-04 17:20:45 +00001579 (Addr)VG_(threads)[tid].arch.vex.guest_CMSTART,
sewardj1146ae62014-05-04 10:54:08 +00001580 VG_(threads)[tid].arch.vex.guest_CMLEN,
1581 "scheduler(VEX_TRC_JMP_INVALICACHE)"
sewardj487ac702005-06-21 12:52:38 +00001582 );
cerion85665ca2005-06-20 15:51:07 +00001583 if (0)
1584 VG_(printf)("dump translations done.\n");
cerion85665ca2005-06-20 15:51:07 +00001585 break;
1586
sewardjc76d0e52014-05-03 21:22:55 +00001587 case VEX_TRC_JMP_FLUSHDCACHE: {
sewardj1146ae62014-05-04 10:54:08 +00001588 void* start = (void*)VG_(threads)[tid].arch.vex.guest_CMSTART;
1589 SizeT len = VG_(threads)[tid].arch.vex.guest_CMLEN;
sewardjc76d0e52014-05-03 21:22:55 +00001590 VG_(debugLog)(2, "sched", "flush_dcache(%p, %lu)\n", start, len);
1591 VG_(flush_dcache)(start, len);
1592 break;
1593 }
1594
sewardje3a384b2005-07-29 08:51:34 +00001595 case VG_TRC_INVARIANT_FAILED:
1596 /* This typically happens if, after running generated code,
1597 it is detected that host CPU settings (eg, FPU/Vector
1598 control words) are not as they should be. Vex's code
1599 generation specifies the state such control words should
1600 be in on entry to Vex-generated code, and they should be
1601 unchanged on exit from it. Failure of this assertion
1602 usually means a bug in Vex's code generation. */
sewardj59570ff2010-01-01 11:59:33 +00001603 //{ UInt xx;
1604 // __asm__ __volatile__ (
1605 // "\t.word 0xEEF12A10\n" // fmrx r2,fpscr
1606 // "\tmov %0, r2" : "=r"(xx) : : "r2" );
1607 // VG_(printf)("QQQQ new fpscr = %08x\n", xx);
1608 //}
sewardje3a384b2005-07-29 08:51:34 +00001609 vg_assert2(0, "VG_(scheduler), phase 3: "
1610 "run_innerloop detected host "
1611 "state invariant failure", trc);
1612
sewardja0fef1b2005-11-03 13:46:30 +00001613 case VEX_TRC_JMP_SYS_SYSENTER:
sewardj5438a012005-08-07 14:49:27 +00001614 /* Do whatever simulation is appropriate for an x86 sysenter
1615 instruction. Note that it is critical to set this thread's
1616 guest_EIP to point at the code to execute after the
1617 sysenter, since Vex-generated code will not have set it --
1618 vex does not know what it should be. Vex sets the next
njncda2f0f2009-05-18 02:12:08 +00001619 address to zero, so if you don't set guest_EIP, the thread
1620 will jump to zero afterwards and probably die as a result. */
1621# if defined(VGP_x86_linux)
sewardj5438a012005-08-07 14:49:27 +00001622 vg_assert2(0, "VG_(scheduler), phase 3: "
njncda2f0f2009-05-18 02:12:08 +00001623 "sysenter_x86 on x86-linux is not supported");
sewardj8eb8bab2015-07-21 14:44:28 +00001624# elif defined(VGP_x86_darwin) || defined(VGP_x86_solaris)
njnf76d27a2009-05-28 01:53:07 +00001625 /* return address in client edx */
1626 VG_(threads)[tid].arch.vex.guest_EIP
1627 = VG_(threads)[tid].arch.vex.guest_EDX;
sewardj93a97572012-04-21 15:35:12 +00001628 handle_syscall(tid, trc[0]);
sewardj5438a012005-08-07 14:49:27 +00001629# else
1630 vg_assert2(0, "VG_(scheduler), phase 3: "
1631 "sysenter_x86 on non-x86 platform?!?!");
1632# endif
njnf76d27a2009-05-28 01:53:07 +00001633 break;
sewardj5438a012005-08-07 14:49:27 +00001634
sewardjb5f6f512005-03-10 23:59:00 +00001635 default:
njn50ae1a72005-04-08 23:28:23 +00001636 vg_assert2(0, "VG_(scheduler), phase 3: "
sewardj291849f2012-04-20 23:58:55 +00001637 "unexpected thread return code (%u)", trc[0]);
sewardjb5f6f512005-03-10 23:59:00 +00001638 /* NOTREACHED */
1639 break;
sewardje663cb92002-04-12 10:26:32 +00001640
1641 } /* switch (trc) */
sewardjb0473e92011-06-07 22:54:32 +00001642
sewardj17c5e2e2012-12-28 09:12:14 +00001643 if (UNLIKELY(VG_(clo_profyle_sbs)) && VG_(clo_profyle_interval) > 0)
1644 maybe_show_sb_profile();
nethercote238a3c32004-08-09 13:13:31 +00001645 }
sewardjc24be7a2005-03-15 01:40:12 +00001646
1647 if (VG_(clo_trace_sched))
1648 print_sched_event(tid, "exiting VG_(scheduler)");
1649
sewardjb5f6f512005-03-10 23:59:00 +00001650 vg_assert(VG_(is_exiting)(tid));
thughes513197c2004-06-13 12:07:53 +00001651
sewardjb5f6f512005-03-10 23:59:00 +00001652 return tst->exitreason;
sewardj20917d82002-05-28 01:36:45 +00001653}
1654
1655
sewardjb5f6f512005-03-10 23:59:00 +00001656void VG_(nuke_all_threads_except) ( ThreadId me, VgSchedReturnCode src )
sewardjccef2e62002-05-29 19:26:32 +00001657{
1658 ThreadId tid;
sewardjb5f6f512005-03-10 23:59:00 +00001659
1660 vg_assert(VG_(is_running_thread)(me));
sewardj45f02c42005-02-05 18:27:14 +00001661
sewardjccef2e62002-05-29 19:26:32 +00001662 for (tid = 1; tid < VG_N_THREADS; tid++) {
1663 if (tid == me
jsgf855d93d2003-10-13 22:26:55 +00001664 || VG_(threads)[tid].status == VgTs_Empty)
sewardjccef2e62002-05-29 19:26:32 +00001665 continue;
sewardjb5f6f512005-03-10 23:59:00 +00001666 if (0)
sewardjef037c72002-05-30 00:40:03 +00001667 VG_(printf)(
florianc6e5d762015-08-05 22:27:24 +00001668 "VG_(nuke_all_threads_except): nuking tid %u\n", tid);
sewardjb5f6f512005-03-10 23:59:00 +00001669
1670 VG_(threads)[tid].exitreason = src;
sewardja8d8e232005-06-07 20:04:56 +00001671 if (src == VgSrc_FatalSig)
1672 VG_(threads)[tid].os_state.fatalsig = VKI_SIGKILL;
sewardjf54342a2006-10-17 01:51:24 +00001673 VG_(get_thread_out_of_syscall)(tid);
sewardjccef2e62002-05-29 19:26:32 +00001674 }
1675}
1676
1677
njnd3040452003-05-19 15:04:06 +00001678/* ---------------------------------------------------------------------
sewardjb5f6f512005-03-10 23:59:00 +00001679 Specifying shadow register values
njnd3040452003-05-19 15:04:06 +00001680 ------------------------------------------------------------------ */
1681
njnf536bbb2005-06-13 04:21:38 +00001682#if defined(VGA_x86)
njnaf839f52005-06-23 03:27:57 +00001683# define VG_CLREQ_ARGS guest_EAX
1684# define VG_CLREQ_RET guest_EDX
njnf536bbb2005-06-13 04:21:38 +00001685#elif defined(VGA_amd64)
njnaf839f52005-06-23 03:27:57 +00001686# define VG_CLREQ_ARGS guest_RAX
1687# define VG_CLREQ_RET guest_RDX
carllcae0cc22014-08-07 23:17:29 +00001688#elif defined(VGA_ppc32) || defined(VGA_ppc64be) || defined(VGA_ppc64le)
njnaf839f52005-06-23 03:27:57 +00001689# define VG_CLREQ_ARGS guest_GPR4
1690# define VG_CLREQ_RET guest_GPR3
sewardj59570ff2010-01-01 11:59:33 +00001691#elif defined(VGA_arm)
1692# define VG_CLREQ_ARGS guest_R4
1693# define VG_CLREQ_RET guest_R3
sewardjf0c12502014-01-12 12:54:00 +00001694#elif defined(VGA_arm64)
1695# define VG_CLREQ_ARGS guest_X4
1696# define VG_CLREQ_RET guest_X3
sewardjb5b87402011-03-07 16:05:35 +00001697#elif defined (VGA_s390x)
1698# define VG_CLREQ_ARGS guest_r2
1699# define VG_CLREQ_RET guest_r3
petarj4df0bfc2013-02-27 23:17:33 +00001700#elif defined(VGA_mips32) || defined(VGA_mips64)
sewardj5db15402012-06-07 09:13:21 +00001701# define VG_CLREQ_ARGS guest_r12
1702# define VG_CLREQ_RET guest_r11
sewardj112711a2015-04-10 12:30:09 +00001703#elif defined(VGA_tilegx)
1704# define VG_CLREQ_ARGS guest_r12
1705# define VG_CLREQ_RET guest_r11
njnf536bbb2005-06-13 04:21:38 +00001706#else
1707# error Unknown arch
1708#endif
1709
njnaf839f52005-06-23 03:27:57 +00001710#define CLREQ_ARGS(regs) ((regs).vex.VG_CLREQ_ARGS)
1711#define CLREQ_RET(regs) ((regs).vex.VG_CLREQ_RET)
1712#define O_CLREQ_RET (offsetof(VexGuestArchState, VG_CLREQ_RET))
njnf536bbb2005-06-13 04:21:38 +00001713
njn502badb2005-05-08 02:04:49 +00001714// These macros write a value to a client's thread register, and tell the
1715// tool that it's happened (if necessary).
1716
1717#define SET_CLREQ_RETVAL(zztid, zzval) \
1718 do { CLREQ_RET(VG_(threads)[zztid].arch) = (zzval); \
1719 VG_TRACK( post_reg_write, \
1720 Vg_CoreClientReq, zztid, O_CLREQ_RET, sizeof(UWord)); \
1721 } while (0)
1722
1723#define SET_CLCALL_RETVAL(zztid, zzval, f) \
1724 do { CLREQ_RET(VG_(threads)[zztid].arch) = (zzval); \
1725 VG_TRACK( post_reg_write_clientcall_return, \
1726 zztid, O_CLREQ_RET, sizeof(UWord), f); \
1727 } while (0)
1728
sewardj0ec07f32006-01-12 12:32:32 +00001729
sewardje663cb92002-04-12 10:26:32 +00001730/* ---------------------------------------------------------------------
sewardj124ca2a2002-06-20 10:19:38 +00001731 Handle client requests.
sewardje663cb92002-04-12 10:26:32 +00001732 ------------------------------------------------------------------ */
1733
njn9cb54ac2005-06-12 04:19:17 +00001734// OS-specific(?) client requests
1735static Bool os_client_request(ThreadId tid, UWord *args)
1736{
1737 Bool handled = True;
1738
1739 vg_assert(VG_(is_running_thread)(tid));
1740
1741 switch(args[0]) {
Elliott Hughesa0664b92017-04-18 17:46:52 -07001742 case VG_USERREQ__FREERES_DONE:
njn9cb54ac2005-06-12 04:19:17 +00001743 /* This is equivalent to an exit() syscall, but we don't set the
1744 exitcode (since it might already be set) */
1745 if (0 || VG_(clo_trace_syscalls) || VG_(clo_trace_sched))
sewardj738856f2009-07-15 14:48:32 +00001746 VG_(message)(Vg_DebugMsg,
Elliott Hughesa0664b92017-04-18 17:46:52 -07001747 "__gnu_cxx::__freeres() and __libc_freeres() wrapper "
1748 "done; really quitting!\n");
sewardjf54342a2006-10-17 01:51:24 +00001749 VG_(threads)[tid].exitreason = VgSrc_ExitThread;
njn9cb54ac2005-06-12 04:19:17 +00001750 break;
1751
1752 default:
1753 handled = False;
1754 break;
1755 }
1756
1757 return handled;
1758}
1759
1760
florian661786e2013-08-27 15:17:53 +00001761/* Write out a client message, possibly including a back trace. Return
1762 the number of characters written. In case of XML output, the format
1763 string as well as any arguments it requires will be XML'ified.
1764 I.e. special characters such as the angle brackets will be translated
1765 into proper escape sequences. */
1766static
1767Int print_client_message( ThreadId tid, const HChar *format,
1768 va_list *vargsp, Bool include_backtrace)
1769{
1770 Int count;
1771
1772 if (VG_(clo_xml)) {
1773 /* Translate the format string as follows:
1774 < --> &lt;
1775 > --> &gt;
1776 & --> &amp;
1777 %s --> %pS
1778 Yes, yes, it's simplified but in synch with
1779 myvprintf_str_XML_simplistic and VG_(debugLog_vprintf).
1780 */
1781
1782 /* Allocate a buffer that is for sure large enough. */
1783 HChar xml_format[VG_(strlen)(format) * 5 + 1];
1784
1785 const HChar *p;
1786 HChar *q = xml_format;
1787
1788 for (p = format; *p; ++p) {
1789 switch (*p) {
1790 case '<': VG_(strcpy)(q, "&lt;"); q += 4; break;
1791 case '>': VG_(strcpy)(q, "&gt;"); q += 4; break;
1792 case '&': VG_(strcpy)(q, "&amp;"); q += 5; break;
1793 case '%':
1794 /* Careful: make sure %%s stays %%s */
1795 *q++ = *p++;
1796 if (*p == 's') {
1797 *q++ = 'p';
1798 *q++ = 'S';
1799 } else {
1800 *q++ = *p;
1801 }
1802 break;
1803
1804 default:
1805 *q++ = *p;
1806 break;
1807 }
1808 }
1809 *q = '\0';
1810
1811 VG_(printf_xml)( "<clientmsg>\n" );
florianc6e5d762015-08-05 22:27:24 +00001812 VG_(printf_xml)( " <tid>%u</tid>\n", tid );
florianead018e2015-07-30 21:49:49 +00001813 const ThreadState *tst = VG_(get_ThreadState)(tid);
1814 if (tst->thread_name)
1815 VG_(printf_xml)(" <threadname>%s</threadname>\n", tst->thread_name);
florian661786e2013-08-27 15:17:53 +00001816 VG_(printf_xml)( " <text>" );
1817 count = VG_(vprintf_xml)( xml_format, *vargsp );
1818 VG_(printf_xml)( " </text>\n" );
1819 } else {
1820 count = VG_(vmessage)( Vg_ClientMsg, format, *vargsp );
1821 VG_(message_flush)();
1822 }
1823
1824 if (include_backtrace)
1825 VG_(get_and_pp_StackTrace)( tid, VG_(clo_backtrace_size) );
1826
1827 if (VG_(clo_xml))
1828 VG_(printf_xml)( "</clientmsg>\n" );
1829
1830 return count;
1831}
1832
1833
sewardj124ca2a2002-06-20 10:19:38 +00001834/* Do a client request for the thread tid. After the request, tid may
1835 or may not still be runnable; if not, the scheduler will have to
1836 choose a new thread to run.
1837*/
sewardje663cb92002-04-12 10:26:32 +00001838static
sewardjb5f6f512005-03-10 23:59:00 +00001839void do_client_request ( ThreadId tid )
sewardje663cb92002-04-12 10:26:32 +00001840{
sewardjb5f6f512005-03-10 23:59:00 +00001841 UWord* arg = (UWord*)(CLREQ_ARGS(VG_(threads)[tid].arch));
nethercoted1b64b22004-11-04 18:22:28 +00001842 UWord req_no = arg[0];
sewardj124ca2a2002-06-20 10:19:38 +00001843
fitzhardinge98abfc72003-12-16 02:05:15 +00001844 if (0)
florianc6e5d762015-08-05 22:27:24 +00001845 VG_(printf)("req no = 0x%lx, arg = %p\n", req_no, arg);
sewardje663cb92002-04-12 10:26:32 +00001846 switch (req_no) {
1847
njn3e884182003-04-15 13:03:23 +00001848 case VG_USERREQ__CLIENT_CALL0: {
florian7822f632014-12-24 11:11:42 +00001849 UWord (*f)(ThreadId) = (__typeof__(f))arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00001850 if (f == NULL)
sewardj738856f2009-07-15 14:48:32 +00001851 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL0: func=%p\n", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00001852 else
njn2ac95242005-03-13 23:07:30 +00001853 SET_CLCALL_RETVAL(tid, f ( tid ), (Addr)f);
njn3e884182003-04-15 13:03:23 +00001854 break;
1855 }
1856 case VG_USERREQ__CLIENT_CALL1: {
florian7822f632014-12-24 11:11:42 +00001857 UWord (*f)(ThreadId, UWord) = (__typeof__(f))arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00001858 if (f == NULL)
sewardj738856f2009-07-15 14:48:32 +00001859 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL1: func=%p\n", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00001860 else
njn2ac95242005-03-13 23:07:30 +00001861 SET_CLCALL_RETVAL(tid, f ( tid, arg[2] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00001862 break;
1863 }
1864 case VG_USERREQ__CLIENT_CALL2: {
florian7822f632014-12-24 11:11:42 +00001865 UWord (*f)(ThreadId, UWord, UWord) = (__typeof__(f))arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00001866 if (f == NULL)
sewardj738856f2009-07-15 14:48:32 +00001867 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL2: func=%p\n", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00001868 else
njn2ac95242005-03-13 23:07:30 +00001869 SET_CLCALL_RETVAL(tid, f ( tid, arg[2], arg[3] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00001870 break;
1871 }
1872 case VG_USERREQ__CLIENT_CALL3: {
florian7822f632014-12-24 11:11:42 +00001873 UWord (*f)(ThreadId, UWord, UWord, UWord) = (__typeof__(f))arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00001874 if (f == NULL)
sewardj738856f2009-07-15 14:48:32 +00001875 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL3: func=%p\n", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00001876 else
njn2ac95242005-03-13 23:07:30 +00001877 SET_CLCALL_RETVAL(tid, f ( tid, arg[2], arg[3], arg[4] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00001878 break;
1879 }
1880
njnf09745a2005-05-10 03:01:23 +00001881 // Nb: this looks like a circular definition, because it kind of is.
1882 // See comment in valgrind.h to understand what's going on.
sewardj124ca2a2002-06-20 10:19:38 +00001883 case VG_USERREQ__RUNNING_ON_VALGRIND:
sewardjb5f6f512005-03-10 23:59:00 +00001884 SET_CLREQ_RETVAL(tid, RUNNING_ON_VALGRIND+1);
sewardj124ca2a2002-06-20 10:19:38 +00001885 break;
1886
fitzhardinge39de4b42003-10-31 07:12:21 +00001887 case VG_USERREQ__PRINTF: {
florian661786e2013-08-27 15:17:53 +00001888 const HChar* format = (HChar *)arg[1];
sewardjc560fb32010-01-28 15:23:54 +00001889 /* JRS 2010-Jan-28: this is DEPRECATED; use the
1890 _VALIST_BY_REF version instead */
1891 if (sizeof(va_list) != sizeof(UWord))
1892 goto va_list_casting_error_NORETURN;
sewardj05b07152010-01-04 01:01:02 +00001893 union {
1894 va_list vargs;
sewardjc560fb32010-01-28 15:23:54 +00001895 unsigned long uw;
1896 } u;
1897 u.uw = (unsigned long)arg[2];
1898 Int count =
florian661786e2013-08-27 15:17:53 +00001899 print_client_message( tid, format, &u.vargs,
1900 /* include_backtrace */ False );
sewardjc560fb32010-01-28 15:23:54 +00001901 SET_CLREQ_RETVAL( tid, count );
1902 break;
1903 }
fitzhardinge39de4b42003-10-31 07:12:21 +00001904
sewardjc560fb32010-01-28 15:23:54 +00001905 case VG_USERREQ__PRINTF_BACKTRACE: {
florian661786e2013-08-27 15:17:53 +00001906 const HChar* format = (HChar *)arg[1];
sewardjc560fb32010-01-28 15:23:54 +00001907 /* JRS 2010-Jan-28: this is DEPRECATED; use the
1908 _VALIST_BY_REF version instead */
1909 if (sizeof(va_list) != sizeof(UWord))
1910 goto va_list_casting_error_NORETURN;
sewardj05b07152010-01-04 01:01:02 +00001911 union {
1912 va_list vargs;
sewardjc560fb32010-01-28 15:23:54 +00001913 unsigned long uw;
1914 } u;
1915 u.uw = (unsigned long)arg[2];
1916 Int count =
florian661786e2013-08-27 15:17:53 +00001917 print_client_message( tid, format, &u.vargs,
1918 /* include_backtrace */ True );
sewardjc560fb32010-01-28 15:23:54 +00001919 SET_CLREQ_RETVAL( tid, count );
1920 break;
1921 }
1922
1923 case VG_USERREQ__PRINTF_VALIST_BY_REF: {
florian661786e2013-08-27 15:17:53 +00001924 const HChar* format = (HChar *)arg[1];
sewardjc560fb32010-01-28 15:23:54 +00001925 va_list* vargsp = (va_list*)arg[2];
florian661786e2013-08-27 15:17:53 +00001926 Int count =
1927 print_client_message( tid, format, vargsp,
1928 /* include_backtrace */ False );
1929
sewardjc560fb32010-01-28 15:23:54 +00001930 SET_CLREQ_RETVAL( tid, count );
1931 break;
1932 }
1933
1934 case VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF: {
florian661786e2013-08-27 15:17:53 +00001935 const HChar* format = (HChar *)arg[1];
sewardjc560fb32010-01-28 15:23:54 +00001936 va_list* vargsp = (va_list*)arg[2];
1937 Int count =
florian661786e2013-08-27 15:17:53 +00001938 print_client_message( tid, format, vargsp,
1939 /* include_backtrace */ True );
sewardjc560fb32010-01-28 15:23:54 +00001940 SET_CLREQ_RETVAL( tid, count );
1941 break;
1942 }
1943
1944 case VG_USERREQ__INTERNAL_PRINTF_VALIST_BY_REF: {
1945 va_list* vargsp = (va_list*)arg[2];
1946 Int count =
floriancd19e992012-11-03 19:32:28 +00001947 VG_(vmessage)( Vg_DebugMsg, (HChar *)arg[1], *vargsp );
sewardjc560fb32010-01-28 15:23:54 +00001948 VG_(message_flush)();
1949 SET_CLREQ_RETVAL( tid, count );
1950 break;
1951 }
fitzhardinge39de4b42003-10-31 07:12:21 +00001952
tomd2645142009-10-29 09:27:11 +00001953 case VG_USERREQ__ADD_IFUNC_TARGET: {
1954 VG_(redir_add_ifunc_target)( arg[1], arg[2] );
1955 SET_CLREQ_RETVAL( tid, 0);
1956 break; }
1957
rjwalsh0140af52005-06-04 20:42:33 +00001958 case VG_USERREQ__STACK_REGISTER: {
njn945ed2e2005-06-24 03:28:30 +00001959 UWord sid = VG_(register_stack)((Addr)arg[1], (Addr)arg[2]);
rjwalsh0140af52005-06-04 20:42:33 +00001960 SET_CLREQ_RETVAL( tid, sid );
1961 break; }
1962
1963 case VG_USERREQ__STACK_DEREGISTER: {
njn945ed2e2005-06-24 03:28:30 +00001964 VG_(deregister_stack)(arg[1]);
rjwalsh0140af52005-06-04 20:42:33 +00001965 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
1966 break; }
1967
1968 case VG_USERREQ__STACK_CHANGE: {
njn945ed2e2005-06-24 03:28:30 +00001969 VG_(change_stack)(arg[1], (Addr)arg[2], (Addr)arg[3]);
rjwalsh0140af52005-06-04 20:42:33 +00001970 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
1971 break; }
1972
fitzhardinge98abfc72003-12-16 02:05:15 +00001973 case VG_USERREQ__GET_MALLOCFUNCS: {
1974 struct vg_mallocfunc_info *info = (struct vg_mallocfunc_info *)arg[1];
1975
njnfc51f8d2005-06-21 03:20:17 +00001976 info->tl_malloc = VG_(tdict).tool_malloc;
1977 info->tl_calloc = VG_(tdict).tool_calloc;
1978 info->tl_realloc = VG_(tdict).tool_realloc;
1979 info->tl_memalign = VG_(tdict).tool_memalign;
1980 info->tl___builtin_new = VG_(tdict).tool___builtin_new;
1981 info->tl___builtin_vec_new = VG_(tdict).tool___builtin_vec_new;
1982 info->tl_free = VG_(tdict).tool_free;
1983 info->tl___builtin_delete = VG_(tdict).tool___builtin_delete;
1984 info->tl___builtin_vec_delete = VG_(tdict).tool___builtin_vec_delete;
njn8b140de2009-02-17 04:31:18 +00001985 info->tl_malloc_usable_size = VG_(tdict).tool_malloc_usable_size;
fitzhardinge98abfc72003-12-16 02:05:15 +00001986
njn088bfb42005-08-17 05:01:37 +00001987 info->mallinfo = VG_(mallinfo);
sewardjb5f6f512005-03-10 23:59:00 +00001988 info->clo_trace_malloc = VG_(clo_trace_malloc);
fitzhardinge98abfc72003-12-16 02:05:15 +00001989
1990 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
1991
1992 break;
1993 }
1994
njn25e49d8e72002-09-23 09:36:25 +00001995 /* Requests from the client program */
1996
1997 case VG_USERREQ__DISCARD_TRANSLATIONS:
1998 if (VG_(clo_verbosity) > 2)
1999 VG_(printf)( "client request: DISCARD_TRANSLATIONS,"
njn8a7b41b2007-09-23 00:51:24 +00002000 " addr %p, len %lu\n",
njn25e49d8e72002-09-23 09:36:25 +00002001 (void*)arg[1], arg[2] );
2002
sewardj45f4e7c2005-09-27 19:20:21 +00002003 VG_(discard_translations)(
2004 arg[1], arg[2], "scheduler(VG_USERREQ__DISCARD_TRANSLATIONS)"
2005 );
njn25e49d8e72002-09-23 09:36:25 +00002006
njnd3040452003-05-19 15:04:06 +00002007 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
njn25e49d8e72002-09-23 09:36:25 +00002008 break;
2009
njn47363ab2003-04-21 13:24:40 +00002010 case VG_USERREQ__COUNT_ERRORS:
nethercotef2b11482004-08-02 12:36:01 +00002011 SET_CLREQ_RETVAL( tid, VG_(get_n_errs_found)() );
njn47363ab2003-04-21 13:24:40 +00002012 break;
2013
sewardjc8259b82009-04-22 22:42:10 +00002014 case VG_USERREQ__LOAD_PDB_DEBUGINFO:
2015 VG_(di_notify_pdb_debuginfo)( arg[1], arg[2], arg[3], arg[4] );
2016 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
2017 break;
2018
sewardj5c659622010-08-20 18:22:07 +00002019 case VG_USERREQ__MAP_IP_TO_SRCLOC: {
2020 Addr ip = arg[1];
florian10ef7252014-10-27 12:06:35 +00002021 HChar* buf64 = (HChar*)arg[2]; // points to a HChar [64] array
2022 const HChar *buf; // points to a string of unknown size
sewardj5c659622010-08-20 18:22:07 +00002023
2024 VG_(memset)(buf64, 0, 64);
2025 UInt linenum = 0;
2026 Bool ok = VG_(get_filename_linenum)(
florianf4384f42014-12-16 20:55:58 +00002027 ip, &buf, NULL, &linenum
sewardj5c659622010-08-20 18:22:07 +00002028 );
2029 if (ok) {
florian10ef7252014-10-27 12:06:35 +00002030 /* For backward compatibility truncate the filename to
2031 49 characters. */
2032 VG_(strncpy)(buf64, buf, 50);
2033 buf64[49] = '\0';
sewardj5c659622010-08-20 18:22:07 +00002034 UInt i;
2035 for (i = 0; i < 50; i++) {
2036 if (buf64[i] == 0)
2037 break;
2038 }
florian10ef7252014-10-27 12:06:35 +00002039 VG_(sprintf)(buf64+i, ":%u", linenum); // safe
sewardj5c659622010-08-20 18:22:07 +00002040 } else {
2041 buf64[0] = 0;
2042 }
2043
2044 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
2045 break;
2046 }
2047
sewardjdc873c02011-07-24 16:02:33 +00002048 case VG_USERREQ__CHANGE_ERR_DISABLEMENT: {
2049 Word delta = arg[1];
2050 vg_assert(delta == 1 || delta == -1);
2051 ThreadState* tst = VG_(get_ThreadState)(tid);
2052 vg_assert(tst);
2053 if (delta == 1 && tst->err_disablement_level < 0xFFFFFFFF) {
2054 tst->err_disablement_level++;
2055 }
2056 else
2057 if (delta == -1 && tst->err_disablement_level > 0) {
2058 tst->err_disablement_level--;
2059 }
2060 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
2061 break;
2062 }
2063
philippe46207652013-01-20 17:11:58 +00002064 case VG_USERREQ__GDB_MONITOR_COMMAND: {
2065 UWord ret;
2066 ret = (UWord) VG_(client_monitor_command) ((HChar*)arg[1]);
2067 SET_CLREQ_RETVAL(tid, ret);
2068 break;
2069 }
2070
njn32f8d8c2009-07-15 02:31:45 +00002071 case VG_USERREQ__MALLOCLIKE_BLOCK:
bart91347382011-03-25 20:07:25 +00002072 case VG_USERREQ__RESIZEINPLACE_BLOCK:
njn32f8d8c2009-07-15 02:31:45 +00002073 case VG_USERREQ__FREELIKE_BLOCK:
2074 // Ignore them if the addr is NULL; otherwise pass onto the tool.
2075 if (!arg[1]) {
2076 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
2077 break;
2078 } else {
2079 goto my_default;
2080 }
2081
florianbb913cd2012-08-28 16:50:39 +00002082 case VG_USERREQ__VEX_INIT_FOR_IRI:
2083 LibVEX_InitIRI ( (IRICB *)arg[1] );
2084 break;
2085
sewardje663cb92002-04-12 10:26:32 +00002086 default:
njn32f8d8c2009-07-15 02:31:45 +00002087 my_default:
njn9cb54ac2005-06-12 04:19:17 +00002088 if (os_client_request(tid, arg)) {
2089 // do nothing, os_client_request() handled it
sewardjb5f6f512005-03-10 23:59:00 +00002090 } else if (VG_(needs).client_requests) {
nethercoted1b64b22004-11-04 18:22:28 +00002091 UWord ret;
sewardj34042512002-10-22 04:14:35 +00002092
njn25e49d8e72002-09-23 09:36:25 +00002093 if (VG_(clo_verbosity) > 2)
njn8a7b41b2007-09-23 00:51:24 +00002094 VG_(printf)("client request: code %lx, addr %p, len %lu\n",
njn25e49d8e72002-09-23 09:36:25 +00002095 arg[0], (void*)arg[1], arg[2] );
2096
njn51d827b2005-05-09 01:02:08 +00002097 if ( VG_TDICT_CALL(tool_handle_client_request, tid, arg, &ret) )
sewardjb5f6f512005-03-10 23:59:00 +00002098 SET_CLREQ_RETVAL(tid, ret);
njn25e49d8e72002-09-23 09:36:25 +00002099 } else {
sewardj34042512002-10-22 04:14:35 +00002100 static Bool whined = False;
2101
sewardjb5f6f512005-03-10 23:59:00 +00002102 if (!whined && VG_(clo_verbosity) > 2) {
nethercote7cc9c232004-01-21 15:08:04 +00002103 // Allow for requests in core, but defined by tools, which
njnd7994182003-10-02 13:44:04 +00002104 // have 0 and 0 in their two high bytes.
floriandbb35842012-10-27 18:39:11 +00002105 HChar c1 = (arg[0] >> 24) & 0xff;
2106 HChar c2 = (arg[0] >> 16) & 0xff;
njnd7994182003-10-02 13:44:04 +00002107 if (c1 == 0) c1 = '_';
2108 if (c2 == 0) c2 = '_';
sewardj34042512002-10-22 04:14:35 +00002109 VG_(message)(Vg_UserMsg, "Warning:\n"
barta0b6b2c2008-07-07 06:49:24 +00002110 " unhandled client request: 0x%lx (%c%c+0x%lx). Perhaps\n"
sewardj738856f2009-07-15 14:48:32 +00002111 " VG_(needs).client_requests should be set?\n",
njnd7994182003-10-02 13:44:04 +00002112 arg[0], c1, c2, arg[0] & 0xffff);
sewardj34042512002-10-22 04:14:35 +00002113 whined = True;
2114 }
njn25e49d8e72002-09-23 09:36:25 +00002115 }
sewardje663cb92002-04-12 10:26:32 +00002116 break;
2117 }
sewardjc560fb32010-01-28 15:23:54 +00002118 return;
2119
2120 /*NOTREACHED*/
2121 va_list_casting_error_NORETURN:
2122 VG_(umsg)(
2123 "Valgrind: fatal error - cannot continue: use of the deprecated\n"
2124 "client requests VG_USERREQ__PRINTF or VG_USERREQ__PRINTF_BACKTRACE\n"
2125 "on a platform where they cannot be supported. Please use the\n"
2126 "equivalent _VALIST_BY_REF versions instead.\n"
2127 "\n"
2128 "This is a binary-incompatible change in Valgrind's client request\n"
2129 "mechanism. It is unfortunate, but difficult to avoid. End-users\n"
2130 "are expected to almost never see this message. The only case in\n"
2131 "which you might see this message is if your code uses the macros\n"
2132 "VALGRIND_PRINTF or VALGRIND_PRINTF_BACKTRACE. If so, you will need\n"
2133 "to recompile such code, using the header files from this version of\n"
2134 "Valgrind, and not any previous version.\n"
2135 "\n"
2136 "If you see this mesage in any other circumstances, it is probably\n"
2137 "a bug in Valgrind. In this case, please file a bug report at\n"
2138 "\n"
2139 " http://www.valgrind.org/support/bug_reports.html\n"
2140 "\n"
2141 "Will now abort.\n"
2142 );
2143 vg_assert(0);
sewardje663cb92002-04-12 10:26:32 +00002144}
2145
2146
sewardj6072c362002-04-19 14:40:57 +00002147/* ---------------------------------------------------------------------
njn6676d5b2005-06-19 18:49:19 +00002148 Sanity checking (permanently engaged)
sewardj6072c362002-04-19 14:40:57 +00002149 ------------------------------------------------------------------ */
2150
sewardjb5f6f512005-03-10 23:59:00 +00002151/* Internal consistency checks on the sched structures. */
sewardj6072c362002-04-19 14:40:57 +00002152static
sewardjb5f6f512005-03-10 23:59:00 +00002153void scheduler_sanity ( ThreadId tid )
sewardj6072c362002-04-19 14:40:57 +00002154{
sewardjb5f6f512005-03-10 23:59:00 +00002155 Bool bad = False;
sewardjf54342a2006-10-17 01:51:24 +00002156 Int lwpid = VG_(gettid)();
jsgf855d93d2003-10-13 22:26:55 +00002157
sewardjb5f6f512005-03-10 23:59:00 +00002158 if (!VG_(is_running_thread)(tid)) {
2159 VG_(message)(Vg_DebugMsg,
florianc6e5d762015-08-05 22:27:24 +00002160 "Thread %u is supposed to be running, "
2161 "but doesn't own the_BigLock (owned by %u)\n",
njnc7561b92005-06-19 01:24:32 +00002162 tid, VG_(running_tid));
sewardjb5f6f512005-03-10 23:59:00 +00002163 bad = True;
jsgf855d93d2003-10-13 22:26:55 +00002164 }
sewardj5f07b662002-04-23 16:52:51 +00002165
sewardjf54342a2006-10-17 01:51:24 +00002166 if (lwpid != VG_(threads)[tid].os_state.lwpid) {
sewardjb5f6f512005-03-10 23:59:00 +00002167 VG_(message)(Vg_DebugMsg,
florianc6e5d762015-08-05 22:27:24 +00002168 "Thread %u supposed to be in LWP %d, but we're actually %d\n",
njnd06ed472005-03-13 05:12:31 +00002169 tid, VG_(threads)[tid].os_state.lwpid, VG_(gettid)());
sewardjb5f6f512005-03-10 23:59:00 +00002170 bad = True;
sewardj5f07b662002-04-23 16:52:51 +00002171 }
sewardjf54342a2006-10-17 01:51:24 +00002172
bart78bfc712011-12-08 16:14:59 +00002173 if (lwpid != ML_(get_sched_lock_owner)(the_BigLock)) {
sewardjf54342a2006-10-17 01:51:24 +00002174 VG_(message)(Vg_DebugMsg,
florianc6e5d762015-08-05 22:27:24 +00002175 "Thread (LWPID) %u doesn't own the_BigLock\n",
sewardjf54342a2006-10-17 01:51:24 +00002176 tid);
2177 bad = True;
2178 }
2179
philippe9e9b5892013-01-23 22:19:36 +00002180 if (0) {
2181 /* Periodically show the state of all threads, for debugging
2182 purposes. */
2183 static UInt lasttime = 0;
2184 UInt now;
2185 now = VG_(read_millisecond_timer)();
2186 if ((!bad) && (lasttime + 4000/*ms*/ <= now)) {
2187 lasttime = now;
2188 VG_(printf)("\n------------ Sched State at %d ms ------------\n",
2189 (Int)now);
philippe4f6f3362014-04-19 00:25:54 +00002190 VG_(show_sched_status)(True, // host_stacktrace
philippe38a74d22014-08-29 22:53:19 +00002191 True, // stack_usage
philippe4f6f3362014-04-19 00:25:54 +00002192 True); // exited_threads);
philippe9e9b5892013-01-23 22:19:36 +00002193 }
sewardjf54342a2006-10-17 01:51:24 +00002194 }
2195
2196 /* core_panic also shows the sched status, which is why we don't
2197 show it above if bad==True. */
2198 if (bad)
2199 VG_(core_panic)("scheduler_sanity: failed");
sewardj6072c362002-04-19 14:40:57 +00002200}
2201
njn6676d5b2005-06-19 18:49:19 +00002202void VG_(sanity_check_general) ( Bool force_expensive )
2203{
2204 ThreadId tid;
2205
sewardjf54342a2006-10-17 01:51:24 +00002206 static UInt next_slow_check_at = 1;
2207 static UInt slow_check_interval = 25;
2208
njn6676d5b2005-06-19 18:49:19 +00002209 if (VG_(clo_sanity_level) < 1) return;
2210
2211 /* --- First do all the tests that we can do quickly. ---*/
2212
2213 sanity_fast_count++;
2214
2215 /* Check stuff pertaining to the memory check system. */
2216
2217 /* Check that nobody has spuriously claimed that the first or
2218 last 16 pages of memory have become accessible [...] */
2219 if (VG_(needs).sanity_checks) {
njn6676d5b2005-06-19 18:49:19 +00002220 vg_assert(VG_TDICT_CALL(tool_cheap_sanity_check));
njn6676d5b2005-06-19 18:49:19 +00002221 }
2222
2223 /* --- Now some more expensive checks. ---*/
2224
sewardjf54342a2006-10-17 01:51:24 +00002225 /* Once every now and again, check some more expensive stuff.
2226 Gradually increase the interval between such checks so as not to
2227 burden long-running programs too much. */
njn6676d5b2005-06-19 18:49:19 +00002228 if ( force_expensive
sewardjf54342a2006-10-17 01:51:24 +00002229 || VG_(clo_sanity_level) > 1
2230 || (VG_(clo_sanity_level) == 1
2231 && sanity_fast_count == next_slow_check_at)) {
njn6676d5b2005-06-19 18:49:19 +00002232
florianc6e5d762015-08-05 22:27:24 +00002233 if (0) VG_(printf)("SLOW at %u\n", sanity_fast_count-1);
sewardjf54342a2006-10-17 01:51:24 +00002234
2235 next_slow_check_at = sanity_fast_count - 1 + slow_check_interval;
2236 slow_check_interval++;
njn6676d5b2005-06-19 18:49:19 +00002237 sanity_slow_count++;
2238
njn6676d5b2005-06-19 18:49:19 +00002239 if (VG_(needs).sanity_checks) {
njn6676d5b2005-06-19 18:49:19 +00002240 vg_assert(VG_TDICT_CALL(tool_expensive_sanity_check));
njn6676d5b2005-06-19 18:49:19 +00002241 }
2242
njn6676d5b2005-06-19 18:49:19 +00002243 /* Look for stack overruns. Visit all threads. */
njnd666ea72005-06-26 17:26:22 +00002244 for (tid = 1; tid < VG_N_THREADS; tid++) {
sewardj45f4e7c2005-09-27 19:20:21 +00002245 SizeT remains;
2246 VgStack* stack;
njn6676d5b2005-06-19 18:49:19 +00002247
2248 if (VG_(threads)[tid].status == VgTs_Empty ||
2249 VG_(threads)[tid].status == VgTs_Zombie)
2250 continue;
2251
sewardj45f4e7c2005-09-27 19:20:21 +00002252 stack
2253 = (VgStack*)
2254 VG_(get_ThreadState)(tid)->os_state.valgrind_stack_base;
sewardj46dbd3f2010-09-08 08:30:31 +00002255 SizeT limit
2256 = 4096; // Let's say. Checking more causes lots of L2 misses.
sewardj45f4e7c2005-09-27 19:20:21 +00002257 remains
sewardj46dbd3f2010-09-08 08:30:31 +00002258 = VG_(am_get_VgStack_unused_szB)(stack, limit);
2259 if (remains < limit)
njn6676d5b2005-06-19 18:49:19 +00002260 VG_(message)(Vg_DebugMsg,
florianc6e5d762015-08-05 22:27:24 +00002261 "WARNING: Thread %u is within %lu bytes "
philipped0720e42015-03-12 20:43:46 +00002262 "of running out of valgrind stack!\n"
2263 "Valgrind stack size can be increased "
2264 "using --valgrind-stacksize=....\n",
njn6676d5b2005-06-19 18:49:19 +00002265 tid, remains);
2266 }
njn6676d5b2005-06-19 18:49:19 +00002267 }
2268
2269 if (VG_(clo_sanity_level) > 1) {
njn6676d5b2005-06-19 18:49:19 +00002270 /* Check sanity of the low-level memory manager. Note that bugs
2271 in the client's code can cause this to fail, so we don't do
2272 this check unless specially asked for. And because it's
2273 potentially very expensive. */
2274 VG_(sanity_check_malloc_all)();
njn6676d5b2005-06-19 18:49:19 +00002275 }
njn6676d5b2005-06-19 18:49:19 +00002276}
sewardj6072c362002-04-19 14:40:57 +00002277
sewardje663cb92002-04-12 10:26:32 +00002278/*--------------------------------------------------------------------*/
njn278b3d62005-05-30 23:20:51 +00002279/*--- end ---*/
sewardje663cb92002-04-12 10:26:32 +00002280/*--------------------------------------------------------------------*/