blob: 39f10f888238ffaf6b83a7f0f73d011a6f571e6a [file] [log] [blame]
sewardje663cb92002-04-12 10:26:32 +00001
2/*--------------------------------------------------------------------*/
njn278b3d62005-05-30 23:20:51 +00003/*--- Thread scheduling. scheduler.c ---*/
sewardje663cb92002-04-12 10:26:32 +00004/*--------------------------------------------------------------------*/
5
6/*
njnc0ae7052005-08-25 22:55:19 +00007 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
sewardje663cb92002-04-12 10:26:32 +00009
Elliott Hughesed398002017-06-21 14:41:24 -070010 Copyright (C) 2000-2017 Julian Seward
sewardje663cb92002-04-12 10:26:32 +000011 jseward@acm.org
sewardje663cb92002-04-12 10:26:32 +000012
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
njn25e49d8e72002-09-23 09:36:25 +000028 The GNU General Public License is contained in the file COPYING.
sewardje663cb92002-04-12 10:26:32 +000029*/
30
sewardjb5f6f512005-03-10 23:59:00 +000031/*
32 Overview
33
34 Valgrind tries to emulate the kernel's threading as closely as
35 possible. The client does all threading via the normal syscalls
36 (on Linux: clone, etc). Valgrind emulates this by creating exactly
37 the same process structure as would be created without Valgrind.
38 There are no extra threads.
39
40 The main difference is that Valgrind only allows one client thread
sewardjad0a3a82006-12-17 18:58:55 +000041 to run at once. This is controlled with the CPU Big Lock,
42 "the_BigLock". Any time a thread wants to run client code or
sewardjb5f6f512005-03-10 23:59:00 +000043 manipulate any shared state (which is anything other than its own
sewardjad0a3a82006-12-17 18:58:55 +000044 ThreadState entry), it must hold the_BigLock.
sewardjb5f6f512005-03-10 23:59:00 +000045
46 When a thread is about to block in a blocking syscall, it releases
sewardjad0a3a82006-12-17 18:58:55 +000047 the_BigLock, and re-takes it when it becomes runnable again (either
sewardjb5f6f512005-03-10 23:59:00 +000048 because the syscall finished, or we took a signal).
49
50 VG_(scheduler) therefore runs in each thread. It returns only when
51 the thread is exiting, either because it exited itself, or it was
52 told to exit by another thread.
53
54 This file is almost entirely OS-independent. The details of how
55 the OS handles threading and signalling are abstracted away and
njn12771092005-06-18 02:18:04 +000056 implemented elsewhere. [Some of the functions have worked their
57 way back for the moment, until we do an OS port in earnest...]
sewardj291849f2012-04-20 23:58:55 +000058*/
59
sewardjb5f6f512005-03-10 23:59:00 +000060
njnc7561b92005-06-19 01:24:32 +000061#include "pub_core_basics.h"
sewardjf9d2f9b2006-11-17 20:00:57 +000062#include "pub_core_debuglog.h"
sewardj4cfea4f2006-10-14 19:26:10 +000063#include "pub_core_vki.h"
philippe98486902014-08-19 22:46:44 +000064#include "pub_core_vkiscnums.h" // __NR_sched_yield
njnc7561b92005-06-19 01:24:32 +000065#include "pub_core_threadstate.h"
philippe98486902014-08-19 22:46:44 +000066#include "pub_core_clientstate.h"
njn04e16982005-05-31 00:23:43 +000067#include "pub_core_aspacemgr.h"
philippe98486902014-08-19 22:46:44 +000068#include "pub_core_clreq.h" // for VG_USERREQ__*
njn36b66df2005-05-12 05:13:04 +000069#include "pub_core_dispatch.h"
philippe98486902014-08-19 22:46:44 +000070#include "pub_core_errormgr.h" // For VG_(get_n_errs_found)()
71#include "pub_core_gdbserver.h" // for VG_(gdbserver)/VG_(gdbserver_activity)
njn97405b22005-06-02 03:39:33 +000072#include "pub_core_libcbase.h"
njn132bfcc2005-06-04 19:16:06 +000073#include "pub_core_libcassert.h"
njn36a20fa2005-06-03 03:08:39 +000074#include "pub_core_libcprint.h"
njnf39e9a32005-06-12 02:43:17 +000075#include "pub_core_libcproc.h"
njnde62cbf2005-06-10 22:08:14 +000076#include "pub_core_libcsignal.h"
njnf76d27a2009-05-28 01:53:07 +000077#if defined(VGO_darwin)
78#include "pub_core_mach.h"
79#endif
njnf536bbb2005-06-13 04:21:38 +000080#include "pub_core_machine.h"
njnaf1d7df2005-06-11 01:31:52 +000081#include "pub_core_mallocfree.h"
njn20242342005-05-16 23:31:24 +000082#include "pub_core_options.h"
njn717cde52005-05-10 02:47:21 +000083#include "pub_core_replacemalloc.h"
sewardj17c5e2e2012-12-28 09:12:14 +000084#include "pub_core_sbprofile.h"
njn0c246472005-05-31 01:00:08 +000085#include "pub_core_signals.h"
njn945ed2e2005-06-24 03:28:30 +000086#include "pub_core_stacks.h"
njnf4c50162005-06-20 14:18:12 +000087#include "pub_core_stacktrace.h" // For VG_(get_and_pp_StackTrace)()
njn9abd6082005-06-17 21:31:45 +000088#include "pub_core_syscall.h"
njnc1b01812005-06-17 22:19:06 +000089#include "pub_core_syswrap.h"
njn43b9a8a2005-05-10 04:37:01 +000090#include "pub_core_tooliface.h"
njnf4c50162005-06-20 14:18:12 +000091#include "pub_core_translate.h" // For VG_(translate)()
njn8bddf582005-05-13 23:40:55 +000092#include "pub_core_transtab.h"
sewardjc8259b82009-04-22 22:42:10 +000093#include "pub_core_debuginfo.h" // VG_(di_notify_pdb_debuginfo)
bart78bfc712011-12-08 16:14:59 +000094#include "priv_sched-lock.h"
sewardjf54342a2006-10-17 01:51:24 +000095#include "pub_core_scheduler.h" // self
tomd2645142009-10-29 09:27:11 +000096#include "pub_core_redir.h"
florian639e1f82012-09-30 20:30:40 +000097#include "libvex_emnote.h" // VexEmNote
sewardje663cb92002-04-12 10:26:32 +000098
sewardj63fed7f2006-01-17 02:02:47 +000099
sewardje663cb92002-04-12 10:26:32 +0000100/* ---------------------------------------------------------------------
101 Types and globals for the scheduler.
102 ------------------------------------------------------------------ */
103
njnc7561b92005-06-19 01:24:32 +0000104/* ThreadId and ThreadState are defined elsewhere*/
sewardje663cb92002-04-12 10:26:32 +0000105
njn14319cc2005-03-13 06:26:22 +0000106/* Defines the thread-scheduling timeslice, in terms of the number of
107 basic blocks we attempt to run each thread for. Smaller values
108 give finer interleaving but much increased scheduling overheads. */
sewardjea3a99f2006-05-07 14:37:03 +0000109#define SCHEDULING_QUANTUM 100000
njn14319cc2005-03-13 06:26:22 +0000110
sewardj0ec07f32006-01-12 12:32:32 +0000111/* If False, a fault is Valgrind-internal (ie, a bug) */
112Bool VG_(in_generated_code) = False;
njn25e49d8e72002-09-23 09:36:25 +0000113
njn394213a2005-06-19 18:38:24 +0000114/* 64-bit counter for the number of basic blocks done. */
115static ULong bbs_done = 0;
116
sewardj3b290482011-05-06 21:02:55 +0000117/* Counter to see if vgdb activity is to be verified.
118 When nr of bbs done reaches vgdb_next_poll, scheduler will
119 poll for gdbserver activity. VG_(force_vgdb_poll) and
120 VG_(disable_vgdb_poll) allows the valgrind core (e.g. m_gdbserver)
121 to control when the next poll will be done. */
122static ULong vgdb_next_poll;
123
sewardje663cb92002-04-12 10:26:32 +0000124/* Forwards */
sewardjb5f6f512005-03-10 23:59:00 +0000125static void do_client_request ( ThreadId tid );
126static void scheduler_sanity ( ThreadId tid );
127static void mostly_clear_thread_record ( ThreadId tid );
sewardjd140e442002-05-29 01:21:19 +0000128
nethercote844e7122004-08-02 15:27:22 +0000129/* Stats. */
njn0fd92f42005-10-06 03:32:42 +0000130static ULong n_scheduling_events_MINOR = 0;
131static ULong n_scheduling_events_MAJOR = 0;
nethercote844e7122004-08-02 15:27:22 +0000132
sewardjbba6f312012-04-21 23:05:57 +0000133/* Stats: number of XIndirs, and number that missed in the fast
134 cache. */
135static ULong stats__n_xindirs = 0;
136static ULong stats__n_xindir_misses = 0;
137
138/* And 32-bit temp bins for the above, so that 32-bit platforms don't
139 have to do 64 bit incs on the hot path through
140 VG_(cp_disp_xindir). */
141/*global*/ UInt VG_(stats__n_xindirs_32) = 0;
142/*global*/ UInt VG_(stats__n_xindir_misses_32) = 0;
sewardj291849f2012-04-20 23:58:55 +0000143
njn6676d5b2005-06-19 18:49:19 +0000144/* Sanity checking counts. */
145static UInt sanity_fast_count = 0;
146static UInt sanity_slow_count = 0;
sewardjb5f6f512005-03-10 23:59:00 +0000147
nethercote844e7122004-08-02 15:27:22 +0000148void VG_(print_scheduler_stats)(void)
149{
150 VG_(message)(Vg_DebugMsg,
sewardj291849f2012-04-20 23:58:55 +0000151 "scheduler: %'llu event checks.\n", bbs_done );
152 VG_(message)(Vg_DebugMsg,
153 "scheduler: %'llu indir transfers, %'llu misses (1 in %llu)\n",
sewardjbba6f312012-04-21 23:05:57 +0000154 stats__n_xindirs, stats__n_xindir_misses,
155 stats__n_xindirs / (stats__n_xindir_misses
156 ? stats__n_xindir_misses : 1));
njn394213a2005-06-19 18:38:24 +0000157 VG_(message)(Vg_DebugMsg,
sewardj738856f2009-07-15 14:48:32 +0000158 "scheduler: %'llu/%'llu major/minor sched events.\n",
nethercote844e7122004-08-02 15:27:22 +0000159 n_scheduling_events_MAJOR, n_scheduling_events_MINOR);
njn6676d5b2005-06-19 18:49:19 +0000160 VG_(message)(Vg_DebugMsg,
florianc6e5d762015-08-05 22:27:24 +0000161 " sanity: %u cheap, %u expensive checks.\n",
njn6676d5b2005-06-19 18:49:19 +0000162 sanity_fast_count, sanity_slow_count );
nethercote844e7122004-08-02 15:27:22 +0000163}
164
bart78bfc712011-12-08 16:14:59 +0000165/*
166 * Mutual exclusion object used to serialize threads.
167 */
168static struct sched_lock *the_BigLock;
sewardjb5f6f512005-03-10 23:59:00 +0000169
170
sewardje663cb92002-04-12 10:26:32 +0000171/* ---------------------------------------------------------------------
172 Helper functions for the scheduler.
173 ------------------------------------------------------------------ */
174
sewardje663cb92002-04-12 10:26:32 +0000175static
floriandbb35842012-10-27 18:39:11 +0000176void print_sched_event ( ThreadId tid, const HChar* what )
sewardje663cb92002-04-12 10:26:32 +0000177{
florianc6e5d762015-08-05 22:27:24 +0000178 VG_(message)(Vg_DebugMsg, " SCHED[%u]: %s\n", tid, what );
sewardj8937c812002-04-12 20:12:20 +0000179}
180
sewardj17c5e2e2012-12-28 09:12:14 +0000181/* For showing SB profiles, if the user asks to see them. */
sewardjb0473e92011-06-07 22:54:32 +0000182static
sewardj17c5e2e2012-12-28 09:12:14 +0000183void maybe_show_sb_profile ( void )
sewardjb0473e92011-06-07 22:54:32 +0000184{
sewardj17c5e2e2012-12-28 09:12:14 +0000185 /* DO NOT MAKE NON-STATIC */
186 static ULong bbs_done_lastcheck = 0;
187 /* */
188 vg_assert(VG_(clo_profyle_interval) > 0);
189 Long delta = (Long)(bbs_done - bbs_done_lastcheck);
sewardjb0473e92011-06-07 22:54:32 +0000190 vg_assert(delta >= 0);
sewardj17c5e2e2012-12-28 09:12:14 +0000191 if ((ULong)delta >= VG_(clo_profyle_interval)) {
sewardjb0473e92011-06-07 22:54:32 +0000192 bbs_done_lastcheck = bbs_done;
sewardj17c5e2e2012-12-28 09:12:14 +0000193 VG_(get_and_show_SB_profile)(bbs_done);
sewardjb0473e92011-06-07 22:54:32 +0000194 }
195}
196
sewardj8937c812002-04-12 20:12:20 +0000197static
floriancd19e992012-11-03 19:32:28 +0000198const HChar* name_of_sched_event ( UInt event )
sewardje663cb92002-04-12 10:26:32 +0000199{
200 switch (event) {
sewardj1146ae62014-05-04 10:54:08 +0000201 case VEX_TRC_JMP_INVALICACHE: return "INVALICACHE";
202 case VEX_TRC_JMP_FLUSHDCACHE: return "FLUSHDCACHE";
philippe6d6ddbc2012-05-17 14:31:13 +0000203 case VEX_TRC_JMP_NOREDIR: return "NOREDIR";
dejanj24f0c3a2014-02-19 11:57:22 +0000204 case VEX_TRC_JMP_SIGILL: return "SIGILL";
philippe6d6ddbc2012-05-17 14:31:13 +0000205 case VEX_TRC_JMP_SIGTRAP: return "SIGTRAP";
206 case VEX_TRC_JMP_SIGSEGV: return "SIGSEGV";
207 case VEX_TRC_JMP_SIGBUS: return "SIGBUS";
petarj80e5c172012-10-19 14:45:17 +0000208 case VEX_TRC_JMP_SIGFPE_INTOVF:
209 case VEX_TRC_JMP_SIGFPE_INTDIV: return "SIGFPE";
philippe6d6ddbc2012-05-17 14:31:13 +0000210 case VEX_TRC_JMP_EMWARN: return "EMWARN";
211 case VEX_TRC_JMP_EMFAIL: return "EMFAIL";
212 case VEX_TRC_JMP_CLIENTREQ: return "CLIENTREQ";
213 case VEX_TRC_JMP_YIELD: return "YIELD";
214 case VEX_TRC_JMP_NODECODE: return "NODECODE";
215 case VEX_TRC_JMP_MAPFAIL: return "MAPFAIL";
216 case VEX_TRC_JMP_SYS_SYSCALL: return "SYSCALL";
217 case VEX_TRC_JMP_SYS_INT32: return "INT32";
218 case VEX_TRC_JMP_SYS_INT128: return "INT128";
219 case VEX_TRC_JMP_SYS_INT129: return "INT129";
220 case VEX_TRC_JMP_SYS_INT130: return "INT130";
sewardj8eb8bab2015-07-21 14:44:28 +0000221 case VEX_TRC_JMP_SYS_INT145: return "INT145";
222 case VEX_TRC_JMP_SYS_INT210: return "INT210";
philippe6d6ddbc2012-05-17 14:31:13 +0000223 case VEX_TRC_JMP_SYS_SYSENTER: return "SYSENTER";
224 case VEX_TRC_JMP_BORING: return "VEX_BORING";
225
226 case VG_TRC_BORING: return "VG_BORING";
227 case VG_TRC_INNER_FASTMISS: return "FASTMISS";
228 case VG_TRC_INNER_COUNTERZERO: return "COUNTERZERO";
229 case VG_TRC_FAULT_SIGNAL: return "FAULTSIGNAL";
230 case VG_TRC_INVARIANT_FAILED: return "INVFAILED";
231 case VG_TRC_CHAIN_ME_TO_SLOW_EP: return "CHAIN_ME_SLOW";
232 case VG_TRC_CHAIN_ME_TO_FAST_EP: return "CHAIN_ME_FAST";
233 default: return "??UNKNOWN??";
sewardje663cb92002-04-12 10:26:32 +0000234 }
235}
236
sewardje663cb92002-04-12 10:26:32 +0000237/* Allocate a completely empty ThreadState record. */
sewardjb5f6f512005-03-10 23:59:00 +0000238ThreadId VG_(alloc_ThreadState) ( void )
sewardje663cb92002-04-12 10:26:32 +0000239{
240 Int i;
sewardj6072c362002-04-19 14:40:57 +0000241 for (i = 1; i < VG_N_THREADS; i++) {
sewardjb5f6f512005-03-10 23:59:00 +0000242 if (VG_(threads)[i].status == VgTs_Empty) {
243 VG_(threads)[i].status = VgTs_Init;
244 VG_(threads)[i].exitreason = VgSrc_None;
florianb8911212013-09-18 14:00:10 +0000245 if (VG_(threads)[i].thread_name)
florian77eb20b2014-09-11 21:19:17 +0000246 VG_(free)(VG_(threads)[i].thread_name);
florianb8911212013-09-18 14:00:10 +0000247 VG_(threads)[i].thread_name = NULL;
sewardje663cb92002-04-12 10:26:32 +0000248 return i;
sewardjb5f6f512005-03-10 23:59:00 +0000249 }
sewardje663cb92002-04-12 10:26:32 +0000250 }
florian1e802b62015-02-13 19:08:26 +0000251 VG_(printf)("Use --max-threads=INT to specify a larger number of threads\n"
252 "and rerun valgrind\n");
253 VG_(core_panic)("Max number of threads is too low");
sewardje663cb92002-04-12 10:26:32 +0000254 /*NOTREACHED*/
255}
256
sewardjb5f6f512005-03-10 23:59:00 +0000257/*
sewardjad0a3a82006-12-17 18:58:55 +0000258 Mark a thread as Runnable. This will block until the_BigLock is
sewardjb5f6f512005-03-10 23:59:00 +0000259 available, so that we get exclusive access to all the shared
sewardjad0a3a82006-12-17 18:58:55 +0000260 structures and the CPU. Up until we get the_BigLock, we must not
sewardjb5f6f512005-03-10 23:59:00 +0000261 touch any shared state.
262
263 When this returns, we'll actually be running.
264 */
floriandbb35842012-10-27 18:39:11 +0000265void VG_(acquire_BigLock)(ThreadId tid, const HChar* who)
sewardjb5f6f512005-03-10 23:59:00 +0000266{
sewardjf54342a2006-10-17 01:51:24 +0000267 ThreadState *tst;
268
269#if 0
270 if (VG_(clo_trace_sched)) {
florian7b7d5942014-12-19 20:29:22 +0000271 HChar buf[VG_(strlen)(who) + 30];
sewardjf54342a2006-10-17 01:51:24 +0000272 VG_(sprintf)(buf, "waiting for lock (%s)", who);
273 print_sched_event(tid, buf);
274 }
275#endif
276
sewardjad0a3a82006-12-17 18:58:55 +0000277 /* First, acquire the_BigLock. We can't do anything else safely
278 prior to this point. Even doing debug printing prior to this
279 point is, technically, wrong. */
bart78bfc712011-12-08 16:14:59 +0000280 VG_(acquire_BigLock_LL)(NULL);
sewardjf54342a2006-10-17 01:51:24 +0000281
282 tst = VG_(get_ThreadState)(tid);
sewardjb5f6f512005-03-10 23:59:00 +0000283
284 vg_assert(tst->status != VgTs_Runnable);
285
286 tst->status = VgTs_Runnable;
sewardjf54342a2006-10-17 01:51:24 +0000287
njnc7561b92005-06-19 01:24:32 +0000288 if (VG_(running_tid) != VG_INVALID_THREADID)
florianc6e5d762015-08-05 22:27:24 +0000289 VG_(printf)("tid %u found %u running\n", tid, VG_(running_tid));
njnc7561b92005-06-19 01:24:32 +0000290 vg_assert(VG_(running_tid) == VG_INVALID_THREADID);
291 VG_(running_tid) = tid;
sewardjb5f6f512005-03-10 23:59:00 +0000292
sewardj7cf4e6b2008-05-01 20:24:26 +0000293 { Addr gsp = VG_(get_SP)(tid);
philipped5fb89d2013-01-13 13:59:17 +0000294 if (NULL != VG_(tdict).track_new_mem_stack_w_ECU)
295 VG_(unknown_SP_update_w_ECU)(gsp, gsp, 0/*unknown origin*/);
296 else
297 VG_(unknown_SP_update)(gsp, gsp);
sewardj7cf4e6b2008-05-01 20:24:26 +0000298 }
tome0008d62005-11-10 15:02:42 +0000299
sewardjf54342a2006-10-17 01:51:24 +0000300 if (VG_(clo_trace_sched)) {
florian7b7d5942014-12-19 20:29:22 +0000301 HChar buf[VG_(strlen)(who) + 30];
sewardjf54342a2006-10-17 01:51:24 +0000302 VG_(sprintf)(buf, " acquired lock (%s)", who);
303 print_sched_event(tid, buf);
304 }
sewardjb5f6f512005-03-10 23:59:00 +0000305}
306
sewardjb5f6f512005-03-10 23:59:00 +0000307/*
308 Set a thread into a sleeping state, and give up exclusive access to
309 the CPU. On return, the thread must be prepared to block until it
310 is ready to run again (generally this means blocking in a syscall,
311 but it may mean that we remain in a Runnable state and we're just
312 yielding the CPU to another thread).
313 */
floriandbb35842012-10-27 18:39:11 +0000314void VG_(release_BigLock)(ThreadId tid, ThreadStatus sleepstate,
315 const HChar* who)
sewardjb5f6f512005-03-10 23:59:00 +0000316{
317 ThreadState *tst = VG_(get_ThreadState)(tid);
318
319 vg_assert(tst->status == VgTs_Runnable);
320
321 vg_assert(sleepstate == VgTs_WaitSys ||
322 sleepstate == VgTs_Yielding);
323
324 tst->status = sleepstate;
325
njnc7561b92005-06-19 01:24:32 +0000326 vg_assert(VG_(running_tid) == tid);
327 VG_(running_tid) = VG_INVALID_THREADID;
sewardjb5f6f512005-03-10 23:59:00 +0000328
sewardjf54342a2006-10-17 01:51:24 +0000329 if (VG_(clo_trace_sched)) {
florian7b7d5942014-12-19 20:29:22 +0000330 const HChar *status = VG_(name_of_ThreadStatus)(sleepstate);
331 HChar buf[VG_(strlen)(who) + VG_(strlen)(status) + 30];
332 VG_(sprintf)(buf, "releasing lock (%s) -> %s", who, status);
sewardjf54342a2006-10-17 01:51:24 +0000333 print_sched_event(tid, buf);
334 }
335
sewardjad0a3a82006-12-17 18:58:55 +0000336 /* Release the_BigLock; this will reschedule any runnable
sewardjb5f6f512005-03-10 23:59:00 +0000337 thread. */
bart78bfc712011-12-08 16:14:59 +0000338 VG_(release_BigLock_LL)(NULL);
339}
340
341static void init_BigLock(void)
342{
343 vg_assert(!the_BigLock);
344 the_BigLock = ML_(create_sched_lock)();
345}
346
347static void deinit_BigLock(void)
348{
349 ML_(destroy_sched_lock)(the_BigLock);
350 the_BigLock = NULL;
nethercote75d26242004-08-01 22:59:18 +0000351}
352
njnf76d27a2009-05-28 01:53:07 +0000353/* See pub_core_scheduler.h for description */
floriandbb35842012-10-27 18:39:11 +0000354void VG_(acquire_BigLock_LL) ( const HChar* who )
njnf76d27a2009-05-28 01:53:07 +0000355{
bart78bfc712011-12-08 16:14:59 +0000356 ML_(acquire_sched_lock)(the_BigLock);
njnf76d27a2009-05-28 01:53:07 +0000357}
358
359/* See pub_core_scheduler.h for description */
floriandbb35842012-10-27 18:39:11 +0000360void VG_(release_BigLock_LL) ( const HChar* who )
njnf76d27a2009-05-28 01:53:07 +0000361{
bart78bfc712011-12-08 16:14:59 +0000362 ML_(release_sched_lock)(the_BigLock);
njnf76d27a2009-05-28 01:53:07 +0000363}
364
bart9a2b80d2012-03-25 17:51:59 +0000365Bool VG_(owns_BigLock_LL) ( ThreadId tid )
366{
367 return (ML_(get_sched_lock_owner)(the_BigLock)
368 == VG_(threads)[tid].os_state.lwpid);
369}
370
njnf76d27a2009-05-28 01:53:07 +0000371
sewardjb5f6f512005-03-10 23:59:00 +0000372/* Clear out the ThreadState and release the semaphore. Leaves the
373 ThreadState in VgTs_Zombie state, so that it doesn't get
374 reallocated until the caller is really ready. */
375void VG_(exit_thread)(ThreadId tid)
376{
377 vg_assert(VG_(is_valid_tid)(tid));
378 vg_assert(VG_(is_running_thread)(tid));
379 vg_assert(VG_(is_exiting)(tid));
380
sewardjb5f6f512005-03-10 23:59:00 +0000381 mostly_clear_thread_record(tid);
njnc7561b92005-06-19 01:24:32 +0000382 VG_(running_tid) = VG_INVALID_THREADID;
sewardjb5f6f512005-03-10 23:59:00 +0000383
384 /* There should still be a valid exitreason for this thread */
385 vg_assert(VG_(threads)[tid].exitreason != VgSrc_None);
386
sewardjf54342a2006-10-17 01:51:24 +0000387 if (VG_(clo_trace_sched))
388 print_sched_event(tid, "release lock in VG_(exit_thread)");
389
bart78bfc712011-12-08 16:14:59 +0000390 VG_(release_BigLock_LL)(NULL);
sewardjb5f6f512005-03-10 23:59:00 +0000391}
392
sewardjf54342a2006-10-17 01:51:24 +0000393/* If 'tid' is blocked in a syscall, send it SIGVGKILL so as to get it
394 out of the syscall and onto doing the next thing, whatever that is.
395 If it isn't blocked in a syscall, has no effect on the thread. */
396void VG_(get_thread_out_of_syscall)(ThreadId tid)
sewardjb5f6f512005-03-10 23:59:00 +0000397{
398 vg_assert(VG_(is_valid_tid)(tid));
399 vg_assert(!VG_(is_running_thread)(tid));
sewardjb5f6f512005-03-10 23:59:00 +0000400
401 if (VG_(threads)[tid].status == VgTs_WaitSys) {
njnf76d27a2009-05-28 01:53:07 +0000402 if (VG_(clo_trace_signals)) {
sewardjf54342a2006-10-17 01:51:24 +0000403 VG_(message)(Vg_DebugMsg,
florianc6e5d762015-08-05 22:27:24 +0000404 "get_thread_out_of_syscall zaps tid %u lwp %d\n",
sewardjb5f6f512005-03-10 23:59:00 +0000405 tid, VG_(threads)[tid].os_state.lwpid);
njnf76d27a2009-05-28 01:53:07 +0000406 }
407# if defined(VGO_darwin)
408 {
409 // GrP fixme use mach primitives on darwin?
410 // GrP fixme thread_abort_safely?
411 // GrP fixme race for thread with WaitSys set but not in syscall yet?
412 extern kern_return_t thread_abort(mach_port_t);
413 thread_abort(VG_(threads)[tid].os_state.lwpid);
414 }
415# else
416 {
417 __attribute__((unused))
418 Int r = VG_(tkill)(VG_(threads)[tid].os_state.lwpid, VG_SIGVGKILL);
419 /* JRS 2009-Mar-20: should we assert for r==0 (tkill succeeded)?
420 I'm really not sure. Here's a race scenario which argues
421 that we shoudn't; but equally I'm not sure the scenario is
422 even possible, because of constraints caused by the question
423 of who holds the BigLock when.
424
425 Target thread tid does sys_read on a socket and blocks. This
426 function gets called, and we observe correctly that tid's
427 status is WaitSys but then for whatever reason this function
428 goes very slowly for a while. Then data arrives from
429 wherever, tid's sys_read returns, tid exits. Then we do
430 tkill on tid, but tid no longer exists; tkill returns an
431 error code and the assert fails. */
432 /* vg_assert(r == 0); */
433 }
434# endif
sewardjb5f6f512005-03-10 23:59:00 +0000435 }
436}
437
438/*
439 Yield the CPU for a short time to let some other thread run.
440 */
441void VG_(vg_yield)(void)
442{
njnc7561b92005-06-19 01:24:32 +0000443 ThreadId tid = VG_(running_tid);
sewardjb5f6f512005-03-10 23:59:00 +0000444
445 vg_assert(tid != VG_INVALID_THREADID);
446 vg_assert(VG_(threads)[tid].os_state.lwpid == VG_(gettid)());
447
sewardjad0a3a82006-12-17 18:58:55 +0000448 VG_(release_BigLock)(tid, VgTs_Yielding, "VG_(vg_yield)");
sewardjb5f6f512005-03-10 23:59:00 +0000449
450 /*
451 Tell the kernel we're yielding.
452 */
sewardj8eb8bab2015-07-21 14:44:28 +0000453# if defined(VGO_linux) || defined(VGO_darwin)
sewardjf54342a2006-10-17 01:51:24 +0000454 VG_(do_syscall0)(__NR_sched_yield);
sewardj8eb8bab2015-07-21 14:44:28 +0000455# elif defined(VGO_solaris)
456 VG_(do_syscall0)(__NR_yield);
457# else
458# error Unknown OS
459# endif
sewardjb5f6f512005-03-10 23:59:00 +0000460
sewardjad0a3a82006-12-17 18:58:55 +0000461 VG_(acquire_BigLock)(tid, "VG_(vg_yield)");
sewardjb5f6f512005-03-10 23:59:00 +0000462}
463
464
sewardj0ec07f32006-01-12 12:32:32 +0000465/* Set the standard set of blocked signals, used whenever we're not
njn9fc31122005-05-11 18:48:33 +0000466 running a client syscall. */
njn1dcee092009-02-24 03:07:37 +0000467static void block_signals(void)
njn9fc31122005-05-11 18:48:33 +0000468{
469 vki_sigset_t mask;
470
471 VG_(sigfillset)(&mask);
472
473 /* Don't block these because they're synchronous */
474 VG_(sigdelset)(&mask, VKI_SIGSEGV);
475 VG_(sigdelset)(&mask, VKI_SIGBUS);
476 VG_(sigdelset)(&mask, VKI_SIGFPE);
477 VG_(sigdelset)(&mask, VKI_SIGILL);
478 VG_(sigdelset)(&mask, VKI_SIGTRAP);
479
480 /* Can't block these anyway */
481 VG_(sigdelset)(&mask, VKI_SIGSTOP);
482 VG_(sigdelset)(&mask, VKI_SIGKILL);
483
njn9fc31122005-05-11 18:48:33 +0000484 VG_(sigprocmask)(VKI_SIG_SETMASK, &mask, NULL);
485}
486
njn8aa35852005-06-10 22:59:56 +0000487static void os_state_clear(ThreadState *tst)
488{
sewardj45f4e7c2005-09-27 19:20:21 +0000489 tst->os_state.lwpid = 0;
njn8aa35852005-06-10 22:59:56 +0000490 tst->os_state.threadgroup = 0;
Elliott Hughesed398002017-06-21 14:41:24 -0700491 tst->os_state.stk_id = NULL_STK_ID;
njnf76d27a2009-05-28 01:53:07 +0000492# if defined(VGO_linux)
493 /* no other fields to clear */
njnf76d27a2009-05-28 01:53:07 +0000494# elif defined(VGO_darwin)
495 tst->os_state.post_mach_trap_fn = NULL;
496 tst->os_state.pthread = 0;
497 tst->os_state.func_arg = 0;
498 VG_(memset)(&tst->os_state.child_go, 0, sizeof(tst->os_state.child_go));
499 VG_(memset)(&tst->os_state.child_done, 0, sizeof(tst->os_state.child_done));
500 tst->os_state.wq_jmpbuf_valid = False;
501 tst->os_state.remote_port = 0;
502 tst->os_state.msgh_id = 0;
503 VG_(memset)(&tst->os_state.mach_args, 0, sizeof(tst->os_state.mach_args));
sewardj8eb8bab2015-07-21 14:44:28 +0000504# elif defined(VGO_solaris)
505# if defined(VGP_x86_solaris)
506 tst->os_state.thrptr = 0;
507# endif
sewardj8eb8bab2015-07-21 14:44:28 +0000508 tst->os_state.ustack = NULL;
509 tst->os_state.in_door_return = False;
510 tst->os_state.door_return_procedure = 0;
511 tst->os_state.oldcontext = NULL;
512 tst->os_state.schedctl_data = 0;
513 tst->os_state.daemon_thread = False;
njnf76d27a2009-05-28 01:53:07 +0000514# else
515# error "Unknown OS"
sewardjf54342a2006-10-17 01:51:24 +0000516# endif
njn8aa35852005-06-10 22:59:56 +0000517}
518
519static void os_state_init(ThreadState *tst)
520{
sewardj45f4e7c2005-09-27 19:20:21 +0000521 tst->os_state.valgrind_stack_base = 0;
522 tst->os_state.valgrind_stack_init_SP = 0;
njn8aa35852005-06-10 22:59:56 +0000523 os_state_clear(tst);
524}
525
sewardj20917d82002-05-28 01:36:45 +0000526static
527void mostly_clear_thread_record ( ThreadId tid )
528{
sewardjb5f6f512005-03-10 23:59:00 +0000529 vki_sigset_t savedmask;
530
sewardj20917d82002-05-28 01:36:45 +0000531 vg_assert(tid >= 0 && tid < VG_N_THREADS);
njnaf839f52005-06-23 03:27:57 +0000532 VG_(cleanup_thread)(&VG_(threads)[tid].arch);
sewardjb5f6f512005-03-10 23:59:00 +0000533 VG_(threads)[tid].tid = tid;
534
535 /* Leave the thread in Zombie, so that it doesn't get reallocated
536 until the caller is finally done with the thread stack. */
537 VG_(threads)[tid].status = VgTs_Zombie;
538
nethercote73b526f2004-10-31 18:48:21 +0000539 VG_(sigemptyset)(&VG_(threads)[tid].sig_mask);
sewardjb5f6f512005-03-10 23:59:00 +0000540 VG_(sigemptyset)(&VG_(threads)[tid].tmp_sig_mask);
jsgf855d93d2003-10-13 22:26:55 +0000541
njn8aa35852005-06-10 22:59:56 +0000542 os_state_clear(&VG_(threads)[tid]);
fitzhardinge28428592004-03-16 22:07:12 +0000543
544 /* start with no altstack */
545 VG_(threads)[tid].altstack.ss_sp = (void *)0xdeadbeef;
546 VG_(threads)[tid].altstack.ss_size = 0;
547 VG_(threads)[tid].altstack.ss_flags = VKI_SS_DISABLE;
sewardjb5f6f512005-03-10 23:59:00 +0000548
njn444eba12005-05-12 03:47:31 +0000549 VG_(clear_out_queued_signals)(tid, &savedmask);
sewardjb5f6f512005-03-10 23:59:00 +0000550
551 VG_(threads)[tid].sched_jmpbuf_valid = False;
sewardj20917d82002-05-28 01:36:45 +0000552}
553
njn3f8c4372005-03-13 04:43:10 +0000554/*
sewardj0ec07f32006-01-12 12:32:32 +0000555 Called in the child after fork. If the parent has multiple
556 threads, then we've inherited a VG_(threads) array describing them,
557 but only the thread which called fork() is actually alive in the
558 child. This functions needs to clean up all those other thread
559 structures.
njn3f8c4372005-03-13 04:43:10 +0000560
561 Whichever tid in the parent which called fork() becomes the
562 master_tid in the child. That's because the only living slot in
563 VG_(threads) in the child after fork is VG_(threads)[tid], and it
564 would be too hard to try to re-number the thread and relocate the
565 thread state down to VG_(threads)[1].
566
sewardjad0a3a82006-12-17 18:58:55 +0000567 This function also needs to reinitialize the_BigLock, since
568 otherwise we may end up sharing its state with the parent, which
569 would be deeply confusing.
njn3f8c4372005-03-13 04:43:10 +0000570*/
sewardjb5f6f512005-03-10 23:59:00 +0000571static void sched_fork_cleanup(ThreadId me)
572{
573 ThreadId tid;
njnc7561b92005-06-19 01:24:32 +0000574 vg_assert(VG_(running_tid) == me);
sewardjb5f6f512005-03-10 23:59:00 +0000575
njnf76d27a2009-05-28 01:53:07 +0000576# if defined(VGO_darwin)
577 // GrP fixme hack reset Mach ports
578 VG_(mach_init)();
579# endif
580
sewardjb5f6f512005-03-10 23:59:00 +0000581 VG_(threads)[me].os_state.lwpid = VG_(gettid)();
582 VG_(threads)[me].os_state.threadgroup = VG_(getpid)();
583
584 /* clear out all the unused thread slots */
585 for (tid = 1; tid < VG_N_THREADS; tid++) {
njn3f8c4372005-03-13 04:43:10 +0000586 if (tid != me) {
587 mostly_clear_thread_record(tid);
sewardjb5f6f512005-03-10 23:59:00 +0000588 VG_(threads)[tid].status = VgTs_Empty;
sewardja8d8e232005-06-07 20:04:56 +0000589 VG_(clear_syscallInfo)(tid);
njn3f8c4372005-03-13 04:43:10 +0000590 }
sewardjb5f6f512005-03-10 23:59:00 +0000591 }
592
593 /* re-init and take the sema */
bart78bfc712011-12-08 16:14:59 +0000594 deinit_BigLock();
595 init_BigLock();
596 VG_(acquire_BigLock_LL)(NULL);
sewardjb5f6f512005-03-10 23:59:00 +0000597}
sewardj20917d82002-05-28 01:36:45 +0000598
jsgf855d93d2003-10-13 22:26:55 +0000599
sewardjde764e82007-11-09 23:13:22 +0000600/* First phase of initialisation of the scheduler. Initialise the
601 bigLock, zeroise the VG_(threads) structure and decide on the
602 ThreadId of the root thread.
sewardje663cb92002-04-12 10:26:32 +0000603*/
sewardjde764e82007-11-09 23:13:22 +0000604ThreadId VG_(scheduler_init_phase1) ( void )
sewardje663cb92002-04-12 10:26:32 +0000605{
thughesc37184f2004-09-11 14:16:57 +0000606 Int i;
sewardje663cb92002-04-12 10:26:32 +0000607 ThreadId tid_main;
608
sewardjde764e82007-11-09 23:13:22 +0000609 VG_(debugLog)(1,"sched","sched_init_phase1\n");
sewardj45f4e7c2005-09-27 19:20:21 +0000610
bart78bfc712011-12-08 16:14:59 +0000611 if (VG_(clo_fair_sched) != disable_fair_sched
612 && !ML_(set_sched_lock_impl)(sched_lock_ticket)
613 && VG_(clo_fair_sched) == enable_fair_sched)
614 {
615 VG_(printf)("Error: fair scheduling is not supported on this system.\n");
616 VG_(exit)(1);
617 }
618
619 if (VG_(clo_verbosity) > 1) {
620 VG_(message)(Vg_DebugMsg,
621 "Scheduler: using %s scheduler lock implementation.\n",
622 ML_(get_sched_lock_name)());
623 }
624
625 init_BigLock();
sewardjb5f6f512005-03-10 23:59:00 +0000626
sewardj6072c362002-04-19 14:40:57 +0000627 for (i = 0 /* NB; not 1 */; i < VG_N_THREADS; i++) {
sewardjc793fd32005-05-31 17:24:49 +0000628 /* Paranoia .. completely zero it out. */
629 VG_(memset)( & VG_(threads)[i], 0, sizeof( VG_(threads)[i] ) );
630
631 VG_(threads)[i].sig_queue = NULL;
sewardjb5f6f512005-03-10 23:59:00 +0000632
njn8aa35852005-06-10 22:59:56 +0000633 os_state_init(&VG_(threads)[i]);
sewardj20917d82002-05-28 01:36:45 +0000634 mostly_clear_thread_record(i);
sewardjb5f6f512005-03-10 23:59:00 +0000635
njn50ba34e2005-04-04 02:41:42 +0000636 VG_(threads)[i].status = VgTs_Empty;
637 VG_(threads)[i].client_stack_szB = 0;
philippe38a74d22014-08-29 22:53:19 +0000638 VG_(threads)[i].client_stack_highest_byte = (Addr)NULL;
sewardjdc873c02011-07-24 16:02:33 +0000639 VG_(threads)[i].err_disablement_level = 0;
florian49789512013-09-16 17:08:50 +0000640 VG_(threads)[i].thread_name = NULL;
sewardje663cb92002-04-12 10:26:32 +0000641 }
642
sewardjb5f6f512005-03-10 23:59:00 +0000643 tid_main = VG_(alloc_ThreadState)();
sewardjde764e82007-11-09 23:13:22 +0000644
sewardj95d86c02007-12-18 01:49:23 +0000645 /* Bleh. Unfortunately there are various places in the system that
646 assume that the main thread has a ThreadId of 1.
647 - Helgrind (possibly)
648 - stack overflow message in default_action() in m_signals.c
649 - definitely a lot more places
650 */
651 vg_assert(tid_main == 1);
652
sewardjde764e82007-11-09 23:13:22 +0000653 return tid_main;
654}
655
656
657/* Second phase of initialisation of the scheduler. Given the root
658 ThreadId computed by first phase of initialisation, fill in stack
659 details and acquire bigLock. Initialise the scheduler. This is
660 called at startup. The caller subsequently initialises the guest
661 state components of this main thread.
662*/
663void VG_(scheduler_init_phase2) ( ThreadId tid_main,
664 Addr clstack_end,
665 SizeT clstack_size )
666{
florianc6e5d762015-08-05 22:27:24 +0000667 VG_(debugLog)(1,"sched","sched_init_phase2: tid_main=%u, "
668 "cls_end=0x%lx, cls_sz=%lu\n",
sewardjde764e82007-11-09 23:13:22 +0000669 tid_main, clstack_end, clstack_size);
670
671 vg_assert(VG_IS_PAGE_ALIGNED(clstack_end+1));
672 vg_assert(VG_IS_PAGE_ALIGNED(clstack_size));
sewardj5f07b662002-04-23 16:52:51 +0000673
philippe38a74d22014-08-29 22:53:19 +0000674 VG_(threads)[tid_main].client_stack_highest_byte
675 = clstack_end;
sewardj45f4e7c2005-09-27 19:20:21 +0000676 VG_(threads)[tid_main].client_stack_szB
677 = clstack_size;
sewardjbf290b92002-05-01 02:28:01 +0000678
njne9ba34a2008-10-13 04:19:15 +0000679 VG_(atfork)(NULL, NULL, sched_fork_cleanup);
sewardje663cb92002-04-12 10:26:32 +0000680}
681
682
sewardje663cb92002-04-12 10:26:32 +0000683/* ---------------------------------------------------------------------
sewardj0ec07f32006-01-12 12:32:32 +0000684 Helpers for running translations.
685 ------------------------------------------------------------------ */
686
687/* Use gcc's built-in setjmp/longjmp. longjmp must not restore signal
sewardjadbb4912011-09-29 17:34:17 +0000688 mask state, but does need to pass "val" through. jumped must be a
689 volatile UWord. */
sewardj0ec07f32006-01-12 12:32:32 +0000690#define SCHEDSETJMP(tid, jumped, stmt) \
691 do { \
692 ThreadState * volatile _qq_tst = VG_(get_ThreadState)(tid); \
693 \
sewardj6c591e12011-04-11 16:17:51 +0000694 (jumped) = VG_MINIMAL_SETJMP(_qq_tst->sched_jmpbuf); \
sewardjadbb4912011-09-29 17:34:17 +0000695 if ((jumped) == ((UWord)0)) { \
sewardj0ec07f32006-01-12 12:32:32 +0000696 vg_assert(!_qq_tst->sched_jmpbuf_valid); \
697 _qq_tst->sched_jmpbuf_valid = True; \
698 stmt; \
699 } else if (VG_(clo_trace_sched)) \
florianc6e5d762015-08-05 22:27:24 +0000700 VG_(printf)("SCHEDSETJMP(line %d) tid %u, jumped=%lu\n", \
sewardj0ec07f32006-01-12 12:32:32 +0000701 __LINE__, tid, jumped); \
702 vg_assert(_qq_tst->sched_jmpbuf_valid); \
703 _qq_tst->sched_jmpbuf_valid = False; \
704 } while(0)
705
706
707/* Do various guest state alignment checks prior to running a thread.
708 Specifically, check that what we have matches Vex's guest state
sewardj7cf4e6b2008-05-01 20:24:26 +0000709 layout requirements. See libvex.h for details, but in short the
710 requirements are: There must be no holes in between the primary
711 guest state, its two copies, and the spill area. In short, all 4
florian5fdb28c2015-02-13 17:05:57 +0000712 areas must be aligned on the LibVEX_GUEST_STATE_ALIGN boundary and
713 be placed back-to-back without holes in between. */
florian8eebf232014-09-18 18:35:47 +0000714static void do_pre_run_checks ( volatile ThreadState* tst )
sewardj0ec07f32006-01-12 12:32:32 +0000715{
sewardj7cf4e6b2008-05-01 20:24:26 +0000716 Addr a_vex = (Addr) & tst->arch.vex;
717 Addr a_vexsh1 = (Addr) & tst->arch.vex_shadow1;
718 Addr a_vexsh2 = (Addr) & tst->arch.vex_shadow2;
719 Addr a_spill = (Addr) & tst->arch.vex_spill;
720 UInt sz_vex = (UInt) sizeof tst->arch.vex;
721 UInt sz_vexsh1 = (UInt) sizeof tst->arch.vex_shadow1;
722 UInt sz_vexsh2 = (UInt) sizeof tst->arch.vex_shadow2;
723 UInt sz_spill = (UInt) sizeof tst->arch.vex_spill;
sewardj0ec07f32006-01-12 12:32:32 +0000724
725 if (0)
florianc6e5d762015-08-05 22:27:24 +0000726 VG_(printf)("gst %p %u, sh1 %p %u, "
727 "sh2 %p %u, spill %p %u\n",
sewardj7cf4e6b2008-05-01 20:24:26 +0000728 (void*)a_vex, sz_vex,
729 (void*)a_vexsh1, sz_vexsh1,
730 (void*)a_vexsh2, sz_vexsh2,
sewardj0ec07f32006-01-12 12:32:32 +0000731 (void*)a_spill, sz_spill );
732
florian5fdb28c2015-02-13 17:05:57 +0000733 vg_assert(sz_vex % LibVEX_GUEST_STATE_ALIGN == 0);
734 vg_assert(sz_vexsh1 % LibVEX_GUEST_STATE_ALIGN == 0);
735 vg_assert(sz_vexsh2 % LibVEX_GUEST_STATE_ALIGN == 0);
736 vg_assert(sz_spill % LibVEX_GUEST_STATE_ALIGN == 0);
sewardj0ec07f32006-01-12 12:32:32 +0000737
florian5fdb28c2015-02-13 17:05:57 +0000738 vg_assert(a_vex % LibVEX_GUEST_STATE_ALIGN == 0);
739 vg_assert(a_vexsh1 % LibVEX_GUEST_STATE_ALIGN == 0);
740 vg_assert(a_vexsh2 % LibVEX_GUEST_STATE_ALIGN == 0);
741 vg_assert(a_spill % LibVEX_GUEST_STATE_ALIGN == 0);
sewardj0ec07f32006-01-12 12:32:32 +0000742
sewardj7cf4e6b2008-05-01 20:24:26 +0000743 /* Check that the guest state and its two shadows have the same
744 size, and that there are no holes in between. The latter is
745 important because Memcheck assumes that it can reliably access
746 the shadows by indexing off a pointer to the start of the
747 primary guest state area. */
748 vg_assert(sz_vex == sz_vexsh1);
749 vg_assert(sz_vex == sz_vexsh2);
750 vg_assert(a_vex + 1 * sz_vex == a_vexsh1);
751 vg_assert(a_vex + 2 * sz_vex == a_vexsh2);
752 /* Also check there's no hole between the second shadow area and
753 the spill area. */
sewardj0ec07f32006-01-12 12:32:32 +0000754 vg_assert(sz_spill == LibVEX_N_SPILL_BYTES);
sewardj7cf4e6b2008-05-01 20:24:26 +0000755 vg_assert(a_vex + 3 * sz_vex == a_spill);
sewardj0ec07f32006-01-12 12:32:32 +0000756
sewardj291849f2012-04-20 23:58:55 +0000757# if defined(VGA_x86)
758 /* x86 XMM regs must form an array, ie, have no holes in
759 between. */
760 vg_assert(
761 (offsetof(VexGuestX86State,guest_XMM7)
762 - offsetof(VexGuestX86State,guest_XMM0))
763 == (8/*#regs*/-1) * 16/*bytes per reg*/
764 );
765 vg_assert(VG_IS_16_ALIGNED(offsetof(VexGuestX86State,guest_XMM0)));
766 vg_assert(VG_IS_8_ALIGNED(offsetof(VexGuestX86State,guest_FPREG)));
767 vg_assert(8 == offsetof(VexGuestX86State,guest_EAX));
768 vg_assert(VG_IS_4_ALIGNED(offsetof(VexGuestX86State,guest_EAX)));
769 vg_assert(VG_IS_4_ALIGNED(offsetof(VexGuestX86State,guest_EIP)));
770# endif
771
sewardj565dc132010-08-06 08:01:47 +0000772# if defined(VGA_amd64)
sewardj45fa9f42012-05-21 10:18:10 +0000773 /* amd64 YMM regs must form an array, ie, have no holes in
sewardj291849f2012-04-20 23:58:55 +0000774 between. */
sewardj565dc132010-08-06 08:01:47 +0000775 vg_assert(
sewardj45fa9f42012-05-21 10:18:10 +0000776 (offsetof(VexGuestAMD64State,guest_YMM16)
777 - offsetof(VexGuestAMD64State,guest_YMM0))
778 == (17/*#regs*/-1) * 32/*bytes per reg*/
sewardj565dc132010-08-06 08:01:47 +0000779 );
sewardj02e97e92012-08-02 22:08:53 +0000780 vg_assert(VG_IS_16_ALIGNED(offsetof(VexGuestAMD64State,guest_YMM0)));
sewardj291849f2012-04-20 23:58:55 +0000781 vg_assert(VG_IS_8_ALIGNED(offsetof(VexGuestAMD64State,guest_FPREG)));
782 vg_assert(16 == offsetof(VexGuestAMD64State,guest_RAX));
783 vg_assert(VG_IS_8_ALIGNED(offsetof(VexGuestAMD64State,guest_RAX)));
784 vg_assert(VG_IS_8_ALIGNED(offsetof(VexGuestAMD64State,guest_RIP)));
sewardj565dc132010-08-06 08:01:47 +0000785# endif
786
carllcae0cc22014-08-07 23:17:29 +0000787# if defined(VGA_ppc32) || defined(VGA_ppc64be) || defined(VGA_ppc64le)
sewardj0ec07f32006-01-12 12:32:32 +0000788 /* ppc guest_state vector regs must be 16 byte aligned for
sewardj7cf4e6b2008-05-01 20:24:26 +0000789 loads/stores. This is important! */
sewardjf34eb492011-04-15 11:57:05 +0000790 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex.guest_VSR0));
791 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow1.guest_VSR0));
792 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow2.guest_VSR0));
sewardj7cf4e6b2008-05-01 20:24:26 +0000793 /* be extra paranoid .. */
sewardjf34eb492011-04-15 11:57:05 +0000794 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex.guest_VSR1));
795 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow1.guest_VSR1));
796 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow2.guest_VSR1));
sewardj565dc132010-08-06 08:01:47 +0000797# endif
sewardj59570ff2010-01-01 11:59:33 +0000798
799# if defined(VGA_arm)
800 /* arm guest_state VFP regs must be 8 byte aligned for
sewardj291849f2012-04-20 23:58:55 +0000801 loads/stores. Let's use 16 just to be on the safe side. */
802 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex.guest_D0));
803 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow1.guest_D0));
804 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow2.guest_D0));
sewardj59570ff2010-01-01 11:59:33 +0000805 /* be extra paranoid .. */
806 vg_assert(VG_IS_8_ALIGNED(& tst->arch.vex.guest_D1));
807 vg_assert(VG_IS_8_ALIGNED(& tst->arch.vex_shadow1.guest_D1));
808 vg_assert(VG_IS_8_ALIGNED(& tst->arch.vex_shadow2.guest_D1));
809# endif
sewardjb5b87402011-03-07 16:05:35 +0000810
sewardjf0c12502014-01-12 12:54:00 +0000811# if defined(VGA_arm64)
812 vg_assert(VG_IS_8_ALIGNED(& tst->arch.vex.guest_X0));
813 vg_assert(VG_IS_8_ALIGNED(& tst->arch.vex_shadow1.guest_X0));
814 vg_assert(VG_IS_8_ALIGNED(& tst->arch.vex_shadow2.guest_X0));
815 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex.guest_Q0));
816 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow1.guest_Q0));
817 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow2.guest_Q0));
818# endif
819
sewardjb5b87402011-03-07 16:05:35 +0000820# if defined(VGA_s390x)
821 /* no special requirements */
822# endif
sewardj5db15402012-06-07 09:13:21 +0000823
petarj4df0bfc2013-02-27 23:17:33 +0000824# if defined(VGA_mips32) || defined(VGA_mips64)
sewardjf0c12502014-01-12 12:54:00 +0000825 /* no special requirements */
sewardj5db15402012-06-07 09:13:21 +0000826# endif
sewardj0ec07f32006-01-12 12:32:32 +0000827}
828
sewardj3b290482011-05-06 21:02:55 +0000829// NO_VGDB_POLL value ensures vgdb is not polled, while
830// VGDB_POLL_ASAP ensures that the next scheduler call
831// will cause a poll.
832#define NO_VGDB_POLL 0xffffffffffffffffULL
833#define VGDB_POLL_ASAP 0x0ULL
834
835void VG_(disable_vgdb_poll) (void )
836{
837 vgdb_next_poll = NO_VGDB_POLL;
838}
839void VG_(force_vgdb_poll) ( void )
840{
841 vgdb_next_poll = VGDB_POLL_ASAP;
842}
sewardj0ec07f32006-01-12 12:32:32 +0000843
844/* Run the thread tid for a while, and return a VG_TRC_* value
sewardj291849f2012-04-20 23:58:55 +0000845 indicating why VG_(disp_run_translations) stopped, and possibly an
846 auxiliary word. Also, only allow the thread to run for at most
847 *dispatchCtrP events. If (as is the normal case) use_alt_host_addr
848 is False, we are running ordinary redir'd translations, and we
849 should therefore start by looking up the guest next IP in TT. If
850 it is True then we ignore the guest next IP and just run from
851 alt_host_addr, which presumably points at host code for a no-redir
852 translation.
853
854 Return results are placed in two_words. two_words[0] is set to the
855 TRC. In the case where that is VG_TRC_CHAIN_ME_TO_{SLOW,FAST}_EP,
856 the address to patch is placed in two_words[1].
857*/
858static
859void run_thread_for_a_while ( /*OUT*/HWord* two_words,
860 /*MOD*/Int* dispatchCtrP,
861 ThreadId tid,
862 HWord alt_host_addr,
863 Bool use_alt_host_addr )
sewardj0ec07f32006-01-12 12:32:32 +0000864{
sewardj291849f2012-04-20 23:58:55 +0000865 volatile HWord jumped = 0;
866 volatile ThreadState* tst = NULL; /* stop gcc complaining */
867 volatile Int done_this_time = 0;
868 volatile HWord host_code_addr = 0;
sewardj0ec07f32006-01-12 12:32:32 +0000869
870 /* Paranoia */
871 vg_assert(VG_(is_valid_tid)(tid));
872 vg_assert(VG_(is_running_thread)(tid));
873 vg_assert(!VG_(is_exiting)(tid));
sewardj291849f2012-04-20 23:58:55 +0000874 vg_assert(*dispatchCtrP > 0);
sewardj0ec07f32006-01-12 12:32:32 +0000875
876 tst = VG_(get_ThreadState)(tid);
florian8eebf232014-09-18 18:35:47 +0000877 do_pre_run_checks( tst );
sewardj0ec07f32006-01-12 12:32:32 +0000878 /* end Paranoia */
879
sewardjbba6f312012-04-21 23:05:57 +0000880 /* Futz with the XIndir stats counters. */
881 vg_assert(VG_(stats__n_xindirs_32) == 0);
882 vg_assert(VG_(stats__n_xindir_misses_32) == 0);
883
sewardj291849f2012-04-20 23:58:55 +0000884 /* Clear return area. */
885 two_words[0] = two_words[1] = 0;
886
887 /* Figure out where we're starting from. */
888 if (use_alt_host_addr) {
889 /* unusual case -- no-redir translation */
890 host_code_addr = alt_host_addr;
891 } else {
892 /* normal case -- redir translation */
893 UInt cno = (UInt)VG_TT_FAST_HASH((Addr)tst->arch.vex.VG_INSTR_PTR);
894 if (LIKELY(VG_(tt_fast)[cno].guest == (Addr)tst->arch.vex.VG_INSTR_PTR))
895 host_code_addr = VG_(tt_fast)[cno].host;
896 else {
florian44bd4462014-12-29 17:04:46 +0000897 Addr res = 0;
sewardj291849f2012-04-20 23:58:55 +0000898 /* not found in VG_(tt_fast). Searching here the transtab
899 improves the performance compared to returning directly
900 to the scheduler. */
901 Bool found = VG_(search_transtab)(&res, NULL, NULL,
902 (Addr)tst->arch.vex.VG_INSTR_PTR,
903 True/*upd cache*/
904 );
905 if (LIKELY(found)) {
906 host_code_addr = res;
907 } else {
908 /* At this point, we know that we intended to start at a
909 normal redir translation, but it was not found. In
910 which case we can return now claiming it's not
911 findable. */
912 two_words[0] = VG_TRC_INNER_FASTMISS; /* hmm, is that right? */
913 return;
914 }
915 }
916 }
917 /* We have either a no-redir or a redir translation. */
918 vg_assert(host_code_addr != 0); /* implausible */
919
sewardj0ec07f32006-01-12 12:32:32 +0000920 /* there should be no undealt-with signals */
921 //vg_assert(VG_(threads)[tid].siginfo.si_signo == 0);
922
sewardj291849f2012-04-20 23:58:55 +0000923 /* Set up event counter stuff for the run. */
924 tst->arch.vex.host_EvC_COUNTER = *dispatchCtrP;
925 tst->arch.vex.host_EvC_FAILADDR
926 = (HWord)VG_(fnptr_to_fnentry)( &VG_(disp_cp_evcheck_fail) );
927
Elliott Hughesed398002017-06-21 14:41:24 -0700928 /* Invalidate any in-flight LL/SC transactions, in the case that we're
929 using the fallback LL/SC implementation. See bugs 344524 and 369459. */
930# if defined(VGP_mips32_linux) || defined(VGP_mips64_linux)
931 tst->arch.vex.guest_LLaddr = (HWord)(-1);
932# elif defined(VGP_arm64_linux)
933 tst->arch.vex.guest_LLSC_SIZE = 0;
934# endif
935
sewardjf54342a2006-10-17 01:51:24 +0000936 if (0) {
937 vki_sigset_t m;
938 Int i, err = VG_(sigprocmask)(VKI_SIG_SETMASK, NULL, &m);
939 vg_assert(err == 0);
florianc6e5d762015-08-05 22:27:24 +0000940 VG_(printf)("tid %u: entering code with unblocked signals: ", tid);
sewardjf54342a2006-10-17 01:51:24 +0000941 for (i = 1; i <= _VKI_NSIG; i++)
942 if (!VG_(sigismember)(&m, i))
943 VG_(printf)("%d ", i);
944 VG_(printf)("\n");
945 }
946
sewardj291849f2012-04-20 23:58:55 +0000947 /* Set up return-value area. */
948
sewardj97561812006-12-23 01:21:12 +0000949 // Tell the tool this thread is about to run client code
njn3e32c872006-12-24 07:51:17 +0000950 VG_TRACK( start_client_code, tid, bbs_done );
sewardj97561812006-12-23 01:21:12 +0000951
sewardj0ec07f32006-01-12 12:32:32 +0000952 vg_assert(VG_(in_generated_code) == False);
953 VG_(in_generated_code) = True;
954
955 SCHEDSETJMP(
956 tid,
957 jumped,
sewardj291849f2012-04-20 23:58:55 +0000958 VG_(disp_run_translations)(
959 two_words,
florian8eebf232014-09-18 18:35:47 +0000960 (volatile void*)&tst->arch.vex,
sewardj291849f2012-04-20 23:58:55 +0000961 host_code_addr
962 )
sewardj0ec07f32006-01-12 12:32:32 +0000963 );
964
sewardjde764e82007-11-09 23:13:22 +0000965 vg_assert(VG_(in_generated_code) == True);
sewardj0ec07f32006-01-12 12:32:32 +0000966 VG_(in_generated_code) = False;
967
sewardj291849f2012-04-20 23:58:55 +0000968 if (jumped != (HWord)0) {
sewardj0ec07f32006-01-12 12:32:32 +0000969 /* We get here if the client took a fault that caused our signal
970 handler to longjmp. */
sewardj291849f2012-04-20 23:58:55 +0000971 vg_assert(two_words[0] == 0 && two_words[1] == 0); // correct?
972 two_words[0] = VG_TRC_FAULT_SIGNAL;
973 two_words[1] = 0;
njn1dcee092009-02-24 03:07:37 +0000974 block_signals();
sewardj0ec07f32006-01-12 12:32:32 +0000975 }
976
sewardjbba6f312012-04-21 23:05:57 +0000977 /* Merge the 32-bit XIndir/miss counters into the 64 bit versions,
978 and zero out the 32-bit ones in preparation for the next run of
979 generated code. */
980 stats__n_xindirs += (ULong)VG_(stats__n_xindirs_32);
981 VG_(stats__n_xindirs_32) = 0;
982 stats__n_xindir_misses += (ULong)VG_(stats__n_xindir_misses_32);
983 VG_(stats__n_xindir_misses_32) = 0;
984
985 /* Inspect the event counter. */
sewardj291849f2012-04-20 23:58:55 +0000986 vg_assert((Int)tst->arch.vex.host_EvC_COUNTER >= -1);
987 vg_assert(tst->arch.vex.host_EvC_FAILADDR
988 == (HWord)VG_(fnptr_to_fnentry)( &VG_(disp_cp_evcheck_fail)) );
989
sewardj0a1086e2014-08-29 19:12:38 +0000990 /* The number of events done this time is the difference between
991 the event counter originally and what it is now. Except -- if
992 it has gone negative (to -1) then the transition 0 to -1 doesn't
993 correspond to a real executed block, so back it out. It's like
994 this because the event checks decrement the counter first and
995 check it for negativeness second, hence the 0 to -1 transition
996 causes a bailout and the block it happens in isn't executed. */
997 {
998 Int dispatchCtrAfterwards = (Int)tst->arch.vex.host_EvC_COUNTER;
999 done_this_time = *dispatchCtrP - dispatchCtrAfterwards;
1000 if (dispatchCtrAfterwards == -1) {
1001 done_this_time--;
1002 } else {
1003 /* If the generated code drives the counter below -1, something
1004 is seriously wrong. */
1005 vg_assert(dispatchCtrAfterwards >= 0);
1006 }
1007 }
sewardj0ec07f32006-01-12 12:32:32 +00001008
1009 vg_assert(done_this_time >= 0);
1010 bbs_done += (ULong)done_this_time;
1011
sewardj291849f2012-04-20 23:58:55 +00001012 *dispatchCtrP -= done_this_time;
1013 vg_assert(*dispatchCtrP >= 0);
1014
sewardj97561812006-12-23 01:21:12 +00001015 // Tell the tool this thread has stopped running client code
njn3e32c872006-12-24 07:51:17 +00001016 VG_TRACK( stop_client_code, tid, bbs_done );
sewardj97561812006-12-23 01:21:12 +00001017
sewardj3b290482011-05-06 21:02:55 +00001018 if (bbs_done >= vgdb_next_poll) {
1019 if (VG_(clo_vgdb_poll))
1020 vgdb_next_poll = bbs_done + (ULong)VG_(clo_vgdb_poll);
1021 else
1022 /* value was changed due to gdbserver invocation via ptrace */
1023 vgdb_next_poll = NO_VGDB_POLL;
1024 if (VG_(gdbserver_activity) (tid))
1025 VG_(gdbserver) (tid);
1026 }
1027
sewardj291849f2012-04-20 23:58:55 +00001028 /* TRC value and possible auxiliary patch-address word are already
1029 in two_words[0] and [1] respectively, as a result of the call to
1030 VG_(run_innerloop). */
1031 /* Stay sane .. */
1032 if (two_words[0] == VG_TRC_CHAIN_ME_TO_SLOW_EP
1033 || two_words[0] == VG_TRC_CHAIN_ME_TO_FAST_EP) {
1034 vg_assert(two_words[1] != 0); /* we have a legit patch addr */
sewardj0ec07f32006-01-12 12:32:32 +00001035 } else {
sewardj291849f2012-04-20 23:58:55 +00001036 vg_assert(two_words[1] == 0); /* nobody messed with it */
sewardj0ec07f32006-01-12 12:32:32 +00001037 }
1038}
1039
sewardj0ec07f32006-01-12 12:32:32 +00001040
1041/* ---------------------------------------------------------------------
sewardje663cb92002-04-12 10:26:32 +00001042 The scheduler proper.
1043 ------------------------------------------------------------------ */
1044
sewardjb5f6f512005-03-10 23:59:00 +00001045static void handle_tt_miss ( ThreadId tid )
sewardje663cb92002-04-12 10:26:32 +00001046{
sewardjb5f6f512005-03-10 23:59:00 +00001047 Bool found;
njnf536bbb2005-06-13 04:21:38 +00001048 Addr ip = VG_(get_IP)(tid);
sewardjb5f6f512005-03-10 23:59:00 +00001049
1050 /* Trivial event. Miss in the fast-cache. Do a full
1051 lookup for it. */
sewardj291849f2012-04-20 23:58:55 +00001052 found = VG_(search_transtab)( NULL, NULL, NULL,
1053 ip, True/*upd_fast_cache*/ );
sewardj5d0d1f32010-03-14 15:09:27 +00001054 if (UNLIKELY(!found)) {
sewardjb5f6f512005-03-10 23:59:00 +00001055 /* Not found; we need to request a translation. */
sewardj0ec07f32006-01-12 12:32:32 +00001056 if (VG_(translate)( tid, ip, /*debug*/False, 0/*not verbose*/,
1057 bbs_done, True/*allow redirection*/ )) {
sewardj291849f2012-04-20 23:58:55 +00001058 found = VG_(search_transtab)( NULL, NULL, NULL,
1059 ip, True );
1060 vg_assert2(found, "handle_tt_miss: missing tt_fast entry");
njn50ae1a72005-04-08 23:28:23 +00001061
sewardjb5f6f512005-03-10 23:59:00 +00001062 } else {
1063 // If VG_(translate)() fails, it's because it had to throw a
1064 // signal because the client jumped to a bad address. That
1065 // means that either a signal has been set up for delivery,
1066 // or the thread has been marked for termination. Either
1067 // way, we just need to go back into the scheduler loop.
1068 }
1069 }
1070}
1071
sewardj291849f2012-04-20 23:58:55 +00001072static
1073void handle_chain_me ( ThreadId tid, void* place_to_chain, Bool toFastEP )
1074{
1075 Bool found = False;
1076 Addr ip = VG_(get_IP)(tid);
philippe523b5b82015-03-23 21:49:32 +00001077 SECno to_sNo = INV_SNO;
1078 TTEno to_tteNo = INV_TTE;
sewardj291849f2012-04-20 23:58:55 +00001079
1080 found = VG_(search_transtab)( NULL, &to_sNo, &to_tteNo,
1081 ip, False/*dont_upd_fast_cache*/ );
1082 if (!found) {
1083 /* Not found; we need to request a translation. */
1084 if (VG_(translate)( tid, ip, /*debug*/False, 0/*not verbose*/,
1085 bbs_done, True/*allow redirection*/ )) {
1086 found = VG_(search_transtab)( NULL, &to_sNo, &to_tteNo,
1087 ip, False );
1088 vg_assert2(found, "handle_chain_me: missing tt_fast entry");
1089 } else {
1090 // If VG_(translate)() fails, it's because it had to throw a
1091 // signal because the client jumped to a bad address. That
1092 // means that either a signal has been set up for delivery,
1093 // or the thread has been marked for termination. Either
1094 // way, we just need to go back into the scheduler loop.
1095 return;
1096 }
1097 }
1098 vg_assert(found);
philippe523b5b82015-03-23 21:49:32 +00001099 vg_assert(to_sNo != INV_SNO);
1100 vg_assert(to_tteNo != INV_TTE);
sewardj291849f2012-04-20 23:58:55 +00001101
1102 /* So, finally we know where to patch through to. Do the patching
1103 and update the various admin tables that allow it to be undone
1104 in the case that the destination block gets deleted. */
1105 VG_(tt_tc_do_chaining)( place_to_chain,
1106 to_sNo, to_tteNo, toFastEP );
1107}
1108
njnf76d27a2009-05-28 01:53:07 +00001109static void handle_syscall(ThreadId tid, UInt trc)
sewardjb5f6f512005-03-10 23:59:00 +00001110{
sewardj1ac9d0c2007-05-01 14:18:48 +00001111 ThreadState * volatile tst = VG_(get_ThreadState)(tid);
sewardjadbb4912011-09-29 17:34:17 +00001112 volatile UWord jumped;
sewardjb5f6f512005-03-10 23:59:00 +00001113
1114 /* Syscall may or may not block; either way, it will be
1115 complete by the time this call returns, and we'll be
1116 runnable again. We could take a signal while the
1117 syscall runs. */
sewardj45f4e7c2005-09-27 19:20:21 +00001118
sewardj67553572014-09-01 21:12:44 +00001119 if (VG_(clo_sanity_level) >= 3) {
florianf44ff622014-12-20 16:52:08 +00001120 HChar buf[50]; // large enough
florianc6e5d762015-08-05 22:27:24 +00001121 VG_(sprintf)(buf, "(BEFORE SYSCALL, tid %u)", tid);
sewardj594fc462014-07-08 08:05:02 +00001122 Bool ok = VG_(am_do_sync_check)(buf, __FILE__, __LINE__);
1123 vg_assert(ok);
1124 }
sewardj45f4e7c2005-09-27 19:20:21 +00001125
njnf76d27a2009-05-28 01:53:07 +00001126 SCHEDSETJMP(tid, jumped, VG_(client_syscall)(tid, trc));
sewardjb5f6f512005-03-10 23:59:00 +00001127
sewardj67553572014-09-01 21:12:44 +00001128 if (VG_(clo_sanity_level) >= 3) {
florianf44ff622014-12-20 16:52:08 +00001129 HChar buf[50]; // large enough
florianc6e5d762015-08-05 22:27:24 +00001130 VG_(sprintf)(buf, "(AFTER SYSCALL, tid %u)", tid);
sewardj594fc462014-07-08 08:05:02 +00001131 Bool ok = VG_(am_do_sync_check)(buf, __FILE__, __LINE__);
1132 vg_assert(ok);
1133 }
sewardj45f4e7c2005-09-27 19:20:21 +00001134
sewardjb5f6f512005-03-10 23:59:00 +00001135 if (!VG_(is_running_thread)(tid))
florianc6e5d762015-08-05 22:27:24 +00001136 VG_(printf)("tid %u not running; VG_(running_tid)=%u, tid %u status %u\n",
njnc7561b92005-06-19 01:24:32 +00001137 tid, VG_(running_tid), tid, tst->status);
sewardjb5f6f512005-03-10 23:59:00 +00001138 vg_assert(VG_(is_running_thread)(tid));
1139
sewardjadbb4912011-09-29 17:34:17 +00001140 if (jumped != (UWord)0) {
njn1dcee092009-02-24 03:07:37 +00001141 block_signals();
sewardjb5f6f512005-03-10 23:59:00 +00001142 VG_(poll_signals)(tid);
1143 }
1144}
1145
sewardja591a052006-01-12 14:04:46 +00001146/* tid just requested a jump to the noredir version of its current
1147 program counter. So make up that translation if needed, run it,
sewardj291849f2012-04-20 23:58:55 +00001148 and return the resulting thread return code in two_words[]. */
1149static
1150void handle_noredir_jump ( /*OUT*/HWord* two_words,
1151 /*MOD*/Int* dispatchCtrP,
1152 ThreadId tid )
sewardja591a052006-01-12 14:04:46 +00001153{
sewardj291849f2012-04-20 23:58:55 +00001154 /* Clear return area. */
1155 two_words[0] = two_words[1] = 0;
1156
florian44bd4462014-12-29 17:04:46 +00001157 Addr hcode = 0;
sewardja591a052006-01-12 14:04:46 +00001158 Addr ip = VG_(get_IP)(tid);
1159
1160 Bool found = VG_(search_unredir_transtab)( &hcode, ip );
1161 if (!found) {
1162 /* Not found; we need to request a translation. */
1163 if (VG_(translate)( tid, ip, /*debug*/False, 0/*not verbose*/, bbs_done,
1164 False/*NO REDIRECTION*/ )) {
1165
1166 found = VG_(search_unredir_transtab)( &hcode, ip );
1167 vg_assert2(found, "unredir translation missing after creation?!");
sewardja591a052006-01-12 14:04:46 +00001168 } else {
1169 // If VG_(translate)() fails, it's because it had to throw a
1170 // signal because the client jumped to a bad address. That
1171 // means that either a signal has been set up for delivery,
1172 // or the thread has been marked for termination. Either
1173 // way, we just need to go back into the scheduler loop.
sewardj291849f2012-04-20 23:58:55 +00001174 two_words[0] = VG_TRC_BORING;
1175 return;
sewardja591a052006-01-12 14:04:46 +00001176 }
1177
1178 }
1179
1180 vg_assert(found);
1181 vg_assert(hcode != 0);
1182
sewardj291849f2012-04-20 23:58:55 +00001183 /* Otherwise run it and return the resulting VG_TRC_* value. */
1184 vg_assert(*dispatchCtrP > 0); /* so as to guarantee progress */
1185 run_thread_for_a_while( two_words, dispatchCtrP, tid,
1186 hcode, True/*use hcode*/ );
sewardja591a052006-01-12 14:04:46 +00001187}
1188
1189
sewardjb5f6f512005-03-10 23:59:00 +00001190/*
1191 Run a thread until it wants to exit.
1192
sewardjad0a3a82006-12-17 18:58:55 +00001193 We assume that the caller has already called VG_(acquire_BigLock) for
sewardjb5f6f512005-03-10 23:59:00 +00001194 us, so we own the VCPU. Also, all signals are blocked.
1195 */
1196VgSchedReturnCode VG_(scheduler) ( ThreadId tid )
1197{
sewardj291849f2012-04-20 23:58:55 +00001198 /* Holds the remaining size of this thread's "timeslice". */
1199 Int dispatch_ctr = 0;
1200
sewardjb5f6f512005-03-10 23:59:00 +00001201 ThreadState *tst = VG_(get_ThreadState)(tid);
sewardj3b290482011-05-06 21:02:55 +00001202 static Bool vgdb_startup_action_done = False;
sewardje663cb92002-04-12 10:26:32 +00001203
sewardjc24be7a2005-03-15 01:40:12 +00001204 if (VG_(clo_trace_sched))
1205 print_sched_event(tid, "entering VG_(scheduler)");
1206
sewardj3b290482011-05-06 21:02:55 +00001207 /* Do vgdb initialization (but once). Only the first (main) task
1208 starting up will do the below.
1209 Initialize gdbserver earlier than at the first
1210 thread VG_(scheduler) is causing problems:
1211 * at the end of VG_(scheduler_init_phase2) :
1212 The main thread is in VgTs_Init state, but in a not yet
1213 consistent state => the thread cannot be reported to gdb
1214 (e.g. causes an assert in LibVEX_GuestX86_get_eflags when giving
1215 back the guest registers to gdb).
1216 * at end of valgrind_main, just
1217 before VG_(main_thread_wrapper_NORETURN)(1) :
1218 The main thread is still in VgTs_Init state but in a
1219 more advanced state. However, the thread state is not yet
1220 completely initialized : a.o., the os_state is not yet fully
1221 set => the thread is then not properly reported to gdb,
1222 which is then confused (causing e.g. a duplicate thread be
1223 shown, without thread id).
1224 * it would be possible to initialize gdbserver "lower" in the
1225 call stack (e.g. in VG_(main_thread_wrapper_NORETURN)) but
1226 these are platform dependent and the place at which
1227 the thread state is completely initialized is not
1228 specific anymore to the main thread (so a similar "do it only
1229 once" would be needed).
1230
1231 => a "once only" initialization here is the best compromise. */
1232 if (!vgdb_startup_action_done) {
1233 vg_assert(tid == 1); // it must be the main thread.
1234 vgdb_startup_action_done = True;
1235 if (VG_(clo_vgdb) != Vg_VgdbNo) {
1236 /* If we have to poll, ensures we do an initial poll at first
1237 scheduler call. Otherwise, ensure no poll (unless interrupted
1238 by ptrace). */
1239 if (VG_(clo_vgdb_poll))
1240 VG_(force_vgdb_poll) ();
1241 else
1242 VG_(disable_vgdb_poll) ();
1243
1244 vg_assert (VG_(dyn_vgdb_error) == VG_(clo_vgdb_error));
1245 /* As we are initializing, VG_(dyn_vgdb_error) can't have been
1246 changed yet. */
1247
sewardj997546c2011-05-17 18:14:53 +00001248 VG_(gdbserver_prerun_action) (1);
sewardj3b290482011-05-06 21:02:55 +00001249 } else {
1250 VG_(disable_vgdb_poll) ();
1251 }
1252 }
1253
philippe0d22fc02014-08-21 20:01:50 +00001254 if (SimHintiS(SimHint_no_nptl_pthread_stackcache, VG_(clo_sim_hints))
1255 && tid != 1) {
1256 /* We disable the stack cache the first time we see a thread other
1257 than the main thread appearing. At this moment, we are sure the pthread
1258 lib loading is done/variable was initialised by pthread lib/... */
1259 if (VG_(client__stack_cache_actsize__addr)) {
1260 if (*VG_(client__stack_cache_actsize__addr) == 0) {
1261 VG_(debugLog)(1,"sched",
1262 "pthread stack cache size disable done"
1263 " via kludge\n");
1264 *VG_(client__stack_cache_actsize__addr) = 1000 * 1000 * 1000;
1265 /* Set a value big enough to be above the hardcoded maximum stack
1266 cache size in glibc, small enough to allow a pthread stack size
1267 to be added without risk of overflow. */
1268 }
1269 } else {
1270 VG_(debugLog)(0,"sched",
1271 "WARNING: pthread stack cache cannot be disabled!\n");
philippef35dad72014-09-02 18:34:25 +00001272 VG_(clo_sim_hints) &= ~SimHint2S(SimHint_no_nptl_pthread_stackcache);
philippe0d22fc02014-08-21 20:01:50 +00001273 /* Remove SimHint_no_nptl_pthread_stackcache from VG_(clo_sim_hints)
1274 to avoid having a msg for all following threads. */
1275 }
1276 }
1277
sewardjb5f6f512005-03-10 23:59:00 +00001278 /* set the proper running signal mask */
njn1dcee092009-02-24 03:07:37 +00001279 block_signals();
sewardjb5f6f512005-03-10 23:59:00 +00001280
1281 vg_assert(VG_(is_running_thread)(tid));
sewardje663cb92002-04-12 10:26:32 +00001282
sewardj291849f2012-04-20 23:58:55 +00001283 dispatch_ctr = SCHEDULING_QUANTUM;
sewardj6072c362002-04-19 14:40:57 +00001284
sewardjf54342a2006-10-17 01:51:24 +00001285 while (!VG_(is_exiting)(tid)) {
1286
sewardj291849f2012-04-20 23:58:55 +00001287 vg_assert(dispatch_ctr >= 0);
1288 if (dispatch_ctr == 0) {
sewardjf54342a2006-10-17 01:51:24 +00001289
sewardjf54342a2006-10-17 01:51:24 +00001290 /* Our slice is done, so yield the CPU to another thread. On
1291 Linux, this doesn't sleep between sleeping and running,
sewardj6e9de462011-06-28 07:25:29 +00001292 since that would take too much time. */
sewardjf54342a2006-10-17 01:51:24 +00001293
1294 /* 4 July 06: it seems that a zero-length nsleep is needed to
1295 cause async thread cancellation (canceller.c) to terminate
1296 in finite time; else it is in some kind of race/starvation
1297 situation and completion is arbitrarily delayed (although
1298 this is not a deadlock).
1299
1300 Unfortunately these sleeps cause MPI jobs not to terminate
1301 sometimes (some kind of livelock). So sleeping once
1302 every N opportunities appears to work. */
1303
1304 /* 3 Aug 06: doing sys__nsleep works but crashes some apps.
1305 sys_yield also helps the problem, whilst not crashing apps. */
1306
sewardjad0a3a82006-12-17 18:58:55 +00001307 VG_(release_BigLock)(tid, VgTs_Yielding,
1308 "VG_(scheduler):timeslice");
sewardjf54342a2006-10-17 01:51:24 +00001309 /* ------------ now we don't have The Lock ------------ */
1310
sewardjad0a3a82006-12-17 18:58:55 +00001311 VG_(acquire_BigLock)(tid, "VG_(scheduler):timeslice");
sewardjf54342a2006-10-17 01:51:24 +00001312 /* ------------ now we do have The Lock ------------ */
sewardje663cb92002-04-12 10:26:32 +00001313
sewardjb5f6f512005-03-10 23:59:00 +00001314 /* OK, do some relatively expensive housekeeping stuff */
1315 scheduler_sanity(tid);
1316 VG_(sanity_check_general)(False);
sewardje663cb92002-04-12 10:26:32 +00001317
sewardjb5f6f512005-03-10 23:59:00 +00001318 /* Look for any pending signals for this thread, and set them up
1319 for delivery */
1320 VG_(poll_signals)(tid);
sewardje663cb92002-04-12 10:26:32 +00001321
sewardjb5f6f512005-03-10 23:59:00 +00001322 if (VG_(is_exiting)(tid))
1323 break; /* poll_signals picked up a fatal signal */
sewardje663cb92002-04-12 10:26:32 +00001324
sewardjb5f6f512005-03-10 23:59:00 +00001325 /* For stats purposes only. */
1326 n_scheduling_events_MAJOR++;
sewardje663cb92002-04-12 10:26:32 +00001327
sewardj0a1086e2014-08-29 19:12:38 +00001328 /* Figure out how many bbs to ask vg_run_innerloop to do. */
sewardj291849f2012-04-20 23:58:55 +00001329 dispatch_ctr = SCHEDULING_QUANTUM;
jsgf855d93d2003-10-13 22:26:55 +00001330
sewardjb5f6f512005-03-10 23:59:00 +00001331 /* paranoia ... */
1332 vg_assert(tst->tid == tid);
1333 vg_assert(tst->os_state.lwpid == VG_(gettid)());
sewardje663cb92002-04-12 10:26:32 +00001334 }
1335
sewardjb5f6f512005-03-10 23:59:00 +00001336 /* For stats purposes only. */
1337 n_scheduling_events_MINOR++;
sewardje663cb92002-04-12 10:26:32 +00001338
1339 if (0)
florianc6e5d762015-08-05 22:27:24 +00001340 VG_(message)(Vg_DebugMsg, "thread %u: running for %d bbs\n",
sewardj291849f2012-04-20 23:58:55 +00001341 tid, dispatch_ctr - 1 );
sewardje663cb92002-04-12 10:26:32 +00001342
sewardj291849f2012-04-20 23:58:55 +00001343 HWord trc[2]; /* "two_words" */
1344 run_thread_for_a_while( &trc[0],
1345 &dispatch_ctr,
1346 tid, 0/*ignored*/, False );
sewardje663cb92002-04-12 10:26:32 +00001347
sewardjb5f6f512005-03-10 23:59:00 +00001348 if (VG_(clo_trace_sched) && VG_(clo_verbosity) > 2) {
florianf44ff622014-12-20 16:52:08 +00001349 const HChar *name = name_of_sched_event(trc[0]);
1350 HChar buf[VG_(strlen)(name) + 10]; // large enough
1351 VG_(sprintf)(buf, "TRC: %s", name);
sewardjb5f6f512005-03-10 23:59:00 +00001352 print_sched_event(tid, buf);
sewardje663cb92002-04-12 10:26:32 +00001353 }
1354
sewardj291849f2012-04-20 23:58:55 +00001355 if (trc[0] == VEX_TRC_JMP_NOREDIR) {
sewardj0ec07f32006-01-12 12:32:32 +00001356 /* If we got a request to run a no-redir version of
1357 something, do so now -- handle_noredir_jump just (creates
1358 and) runs that one translation. The flip side is that the
1359 noredir translation can't itself return another noredir
1360 request -- that would be nonsensical. It can, however,
1361 return VG_TRC_BORING, which just means keep going as
1362 normal. */
sewardj291849f2012-04-20 23:58:55 +00001363 /* Note that the fact that we need to continue with a
1364 no-redir jump is not recorded anywhere else in this
1365 thread's state. So we *must* execute the block right now
1366 -- we can't fail to execute it and later resume with it,
1367 because by then we'll have forgotten the fact that it
1368 should be run as no-redir, but will get run as a normal
1369 potentially-redir'd, hence screwing up. This really ought
1370 to be cleaned up, by noting in the guest state that the
1371 next block to be executed should be no-redir. Then we can
1372 suspend and resume at any point, which isn't the case at
1373 the moment. */
sewardj0a1086e2014-08-29 19:12:38 +00001374 /* We can't enter a no-redir translation with the dispatch
1375 ctr set to zero, for the reasons commented just above --
1376 we need to force it to execute right now. So, if the
1377 dispatch ctr is zero, set it to one. Note that this would
1378 have the bad side effect of holding the Big Lock arbitrary
1379 long should there be an arbitrarily long sequence of
1380 back-to-back no-redir translations to run. But we assert
1381 just below that this translation cannot request another
1382 no-redir jump, so we should be safe against that. */
1383 if (dispatch_ctr == 0) {
1384 dispatch_ctr = 1;
1385 }
sewardj291849f2012-04-20 23:58:55 +00001386 handle_noredir_jump( &trc[0],
1387 &dispatch_ctr,
1388 tid );
1389 vg_assert(trc[0] != VEX_TRC_JMP_NOREDIR);
1390
1391 /* This can't be allowed to happen, since it means the block
1392 didn't execute, and we have no way to resume-as-noredir
1393 after we get more timeslice. But I don't think it ever
1394 can, since handle_noredir_jump will assert if the counter
1395 is zero on entry. */
1396 vg_assert(trc[0] != VG_TRC_INNER_COUNTERZERO);
sewardj0a1086e2014-08-29 19:12:38 +00001397 /* This asserts the same thing. */
1398 vg_assert(dispatch_ctr >= 0);
sewardj291849f2012-04-20 23:58:55 +00001399
1400 /* A no-redir translation can't return with a chain-me
1401 request, since chaining in the no-redir cache is too
1402 complex. */
1403 vg_assert(trc[0] != VG_TRC_CHAIN_ME_TO_SLOW_EP
1404 && trc[0] != VG_TRC_CHAIN_ME_TO_FAST_EP);
sewardj0ec07f32006-01-12 12:32:32 +00001405 }
1406
sewardj291849f2012-04-20 23:58:55 +00001407 switch (trc[0]) {
1408 case VEX_TRC_JMP_BORING:
1409 /* assisted dispatch, no event. Used by no-redir
1410 translations to force return to the scheduler. */
sewardj0ec07f32006-01-12 12:32:32 +00001411 case VG_TRC_BORING:
1412 /* no special event, just keep going. */
1413 break;
1414
sewardjb5f6f512005-03-10 23:59:00 +00001415 case VG_TRC_INNER_FASTMISS:
sewardj0a1086e2014-08-29 19:12:38 +00001416 vg_assert(dispatch_ctr >= 0);
sewardjb5f6f512005-03-10 23:59:00 +00001417 handle_tt_miss(tid);
1418 break;
sewardj291849f2012-04-20 23:58:55 +00001419
1420 case VG_TRC_CHAIN_ME_TO_SLOW_EP: {
1421 if (0) VG_(printf)("sched: CHAIN_TO_SLOW_EP: %p\n", (void*)trc[1] );
1422 handle_chain_me(tid, (void*)trc[1], False);
1423 break;
1424 }
1425
1426 case VG_TRC_CHAIN_ME_TO_FAST_EP: {
1427 if (0) VG_(printf)("sched: CHAIN_TO_FAST_EP: %p\n", (void*)trc[1] );
1428 handle_chain_me(tid, (void*)trc[1], True);
1429 break;
1430 }
1431
sewardjb5f6f512005-03-10 23:59:00 +00001432 case VEX_TRC_JMP_CLIENTREQ:
1433 do_client_request(tid);
1434 break;
sewardja0fef1b2005-11-03 13:46:30 +00001435
1436 case VEX_TRC_JMP_SYS_INT128: /* x86-linux */
njnf76d27a2009-05-28 01:53:07 +00001437 case VEX_TRC_JMP_SYS_INT129: /* x86-darwin */
1438 case VEX_TRC_JMP_SYS_INT130: /* x86-darwin */
sewardj8eb8bab2015-07-21 14:44:28 +00001439 case VEX_TRC_JMP_SYS_INT145: /* x86-solaris */
1440 case VEX_TRC_JMP_SYS_INT210: /* x86-solaris */
1441 /* amd64-linux, ppc32-linux, amd64-darwin, amd64-solaris */
1442 case VEX_TRC_JMP_SYS_SYSCALL:
sewardj291849f2012-04-20 23:58:55 +00001443 handle_syscall(tid, trc[0]);
sewardjb5f6f512005-03-10 23:59:00 +00001444 if (VG_(clo_sanity_level) > 2)
1445 VG_(sanity_check_general)(True); /* sanity-check every syscall */
1446 break;
sewardje663cb92002-04-12 10:26:32 +00001447
sewardjb5f6f512005-03-10 23:59:00 +00001448 case VEX_TRC_JMP_YIELD:
1449 /* Explicit yield, because this thread is in a spin-lock
sewardj3fc75752005-03-12 15:16:31 +00001450 or something. Only let the thread run for a short while
1451 longer. Because swapping to another thread is expensive,
1452 we're prepared to let this thread eat a little more CPU
1453 before swapping to another. That means that short term
1454 spins waiting for hardware to poke memory won't cause a
1455 thread swap. */
Elliott Hughesed398002017-06-21 14:41:24 -07001456 if (dispatch_ctr > 300)
1457 dispatch_ctr = 300;
sewardjb5f6f512005-03-10 23:59:00 +00001458 break;
sewardje663cb92002-04-12 10:26:32 +00001459
sewardjb5f6f512005-03-10 23:59:00 +00001460 case VG_TRC_INNER_COUNTERZERO:
1461 /* Timeslice is out. Let a new thread be scheduled. */
sewardj291849f2012-04-20 23:58:55 +00001462 vg_assert(dispatch_ctr == 0);
sewardjb5f6f512005-03-10 23:59:00 +00001463 break;
sewardje663cb92002-04-12 10:26:32 +00001464
sewardjb5f6f512005-03-10 23:59:00 +00001465 case VG_TRC_FAULT_SIGNAL:
1466 /* Everything should be set up (either we're exiting, or
1467 about to start in a signal handler). */
1468 break;
sewardj9d1b5d32002-04-17 19:40:49 +00001469
sewardj07bdc5e2005-03-11 13:19:47 +00001470 case VEX_TRC_JMP_MAPFAIL:
1471 /* Failure of arch-specific address translation (x86/amd64
1472 segment override use) */
1473 /* jrs 2005 03 11: is this correct? */
1474 VG_(synth_fault)(tid);
1475 break;
1476
sewardjb5f6f512005-03-10 23:59:00 +00001477 case VEX_TRC_JMP_EMWARN: {
florian2e497412012-08-26 03:22:09 +00001478 static Int counts[EmNote_NUMBER];
sewardjb5f6f512005-03-10 23:59:00 +00001479 static Bool counts_initted = False;
florian2e497412012-08-26 03:22:09 +00001480 VexEmNote ew;
florian11f3cc82012-10-21 02:19:35 +00001481 const HChar* what;
sewardjb5f6f512005-03-10 23:59:00 +00001482 Bool show;
1483 Int q;
1484 if (!counts_initted) {
1485 counts_initted = True;
florian2e497412012-08-26 03:22:09 +00001486 for (q = 0; q < EmNote_NUMBER; q++)
sewardjb5f6f512005-03-10 23:59:00 +00001487 counts[q] = 0;
1488 }
florian2e497412012-08-26 03:22:09 +00001489 ew = (VexEmNote)VG_(threads)[tid].arch.vex.guest_EMNOTE;
1490 what = (ew < 0 || ew >= EmNote_NUMBER)
sewardjb5f6f512005-03-10 23:59:00 +00001491 ? "unknown (?!)"
florian2e497412012-08-26 03:22:09 +00001492 : LibVEX_EmNote_string(ew);
1493 show = (ew < 0 || ew >= EmNote_NUMBER)
sewardjb5f6f512005-03-10 23:59:00 +00001494 ? True
1495 : counts[ew]++ < 3;
sewardjd68ac3e2006-01-20 14:31:57 +00001496 if (show && VG_(clo_show_emwarns) && !VG_(clo_xml)) {
sewardjb5f6f512005-03-10 23:59:00 +00001497 VG_(message)( Vg_UserMsg,
sewardj738856f2009-07-15 14:48:32 +00001498 "Emulation warning: unsupported action:\n");
1499 VG_(message)( Vg_UserMsg, " %s\n", what);
njnd01fef72005-03-25 23:35:48 +00001500 VG_(get_and_pp_StackTrace)( tid, VG_(clo_backtrace_size) );
sewardjb5f6f512005-03-10 23:59:00 +00001501 }
1502 break;
1503 }
sewardje663cb92002-04-12 10:26:32 +00001504
sewardjd68ac3e2006-01-20 14:31:57 +00001505 case VEX_TRC_JMP_EMFAIL: {
florian2e497412012-08-26 03:22:09 +00001506 VexEmNote ew;
florian11f3cc82012-10-21 02:19:35 +00001507 const HChar* what;
florian2e497412012-08-26 03:22:09 +00001508 ew = (VexEmNote)VG_(threads)[tid].arch.vex.guest_EMNOTE;
1509 what = (ew < 0 || ew >= EmNote_NUMBER)
sewardjd68ac3e2006-01-20 14:31:57 +00001510 ? "unknown (?!)"
florian2e497412012-08-26 03:22:09 +00001511 : LibVEX_EmNote_string(ew);
sewardjd68ac3e2006-01-20 14:31:57 +00001512 VG_(message)( Vg_UserMsg,
sewardj738856f2009-07-15 14:48:32 +00001513 "Emulation fatal error -- Valgrind cannot continue:\n");
1514 VG_(message)( Vg_UserMsg, " %s\n", what);
sewardjd68ac3e2006-01-20 14:31:57 +00001515 VG_(get_and_pp_StackTrace)( tid, VG_(clo_backtrace_size) );
sewardj738856f2009-07-15 14:48:32 +00001516 VG_(message)(Vg_UserMsg, "\n");
1517 VG_(message)(Vg_UserMsg, "Valgrind has to exit now. Sorry.\n");
1518 VG_(message)(Vg_UserMsg, "\n");
sewardjd68ac3e2006-01-20 14:31:57 +00001519 VG_(exit)(1);
1520 break;
1521 }
1522
dejanj24f0c3a2014-02-19 11:57:22 +00001523 case VEX_TRC_JMP_SIGILL:
1524 VG_(synth_sigill)(tid, VG_(get_IP)(tid));
1525 break;
1526
sewardj4f9d6742007-08-29 09:11:35 +00001527 case VEX_TRC_JMP_SIGTRAP:
sewardj86df1552006-02-07 20:56:41 +00001528 VG_(synth_sigtrap)(tid);
1529 break;
1530
sewardj4f9d6742007-08-29 09:11:35 +00001531 case VEX_TRC_JMP_SIGSEGV:
1532 VG_(synth_fault)(tid);
1533 break;
1534
sewardj1c0ce7a2009-07-01 08:10:49 +00001535 case VEX_TRC_JMP_SIGBUS:
1536 VG_(synth_sigbus)(tid);
1537 break;
1538
petarj80e5c172012-10-19 14:45:17 +00001539 case VEX_TRC_JMP_SIGFPE_INTDIV:
1540 VG_(synth_sigfpe)(tid, VKI_FPE_INTDIV);
1541 break;
1542
1543 case VEX_TRC_JMP_SIGFPE_INTOVF:
1544 VG_(synth_sigfpe)(tid, VKI_FPE_INTOVF);
1545 break;
1546
florian2baf7532012-07-26 02:41:31 +00001547 case VEX_TRC_JMP_NODECODE: {
1548 Addr addr = VG_(get_IP)(tid);
1549
sewardjc30cd9b2012-12-06 18:08:54 +00001550 if (VG_(clo_sigill_diag)) {
1551 VG_(umsg)(
1552 "valgrind: Unrecognised instruction at address %#lx.\n", addr);
1553 VG_(get_and_pp_StackTrace)(tid, VG_(clo_backtrace_size));
sewardjc76d0e52014-05-03 21:22:55 +00001554# define M(a) VG_(umsg)(a "\n");
1555 M("Your program just tried to execute an instruction that Valgrind" );
1556 M("did not recognise. There are two possible reasons for this." );
1557 M("1. Your program has a bug and erroneously jumped to a non-code" );
1558 M(" location. If you are running Memcheck and you just saw a" );
1559 M(" warning about a bad jump, it's probably your program's fault.");
1560 M("2. The instruction is legitimate but Valgrind doesn't handle it,");
1561 M(" i.e. it's Valgrind's fault. If you think this is the case or");
1562 M(" you are not sure, please let us know and we'll try to fix it.");
1563 M("Either way, Valgrind will now raise a SIGILL signal which will" );
1564 M("probably kill your program." );
1565# undef M
sewardjc30cd9b2012-12-06 18:08:54 +00001566 }
sewardjc76d0e52014-05-03 21:22:55 +00001567# if defined(VGA_s390x)
florian2baf7532012-07-26 02:41:31 +00001568 /* Now that the complaint is out we need to adjust the guest_IA. The
1569 reason is that -- after raising the exception -- execution will
1570 continue with the insn that follows the invalid insn. As the first
1571 2 bits of the invalid insn determine its length in the usual way,
1572 we can compute the address of the next insn here and adjust the
1573 guest_IA accordingly. This adjustment is essential and tested by
1574 none/tests/s390x/op_exception.c (which would loop forever
1575 otherwise) */
1576 UChar byte = ((UChar *)addr)[0];
1577 UInt insn_length = ((((byte >> 6) + 1) >> 1) + 1) << 1;
1578 Addr next_insn_addr = addr + insn_length;
florian2baf7532012-07-26 02:41:31 +00001579 VG_(set_IP)(tid, next_insn_addr);
sewardjc76d0e52014-05-03 21:22:55 +00001580# endif
florian2baf7532012-07-26 02:41:31 +00001581 VG_(synth_sigill)(tid, addr);
1582 break;
1583 }
sewardjc76d0e52014-05-03 21:22:55 +00001584
sewardj1146ae62014-05-04 10:54:08 +00001585 case VEX_TRC_JMP_INVALICACHE:
cerion85665ca2005-06-20 15:51:07 +00001586 VG_(discard_translations)(
florianddd61ff2015-01-04 17:20:45 +00001587 (Addr)VG_(threads)[tid].arch.vex.guest_CMSTART,
sewardj1146ae62014-05-04 10:54:08 +00001588 VG_(threads)[tid].arch.vex.guest_CMLEN,
1589 "scheduler(VEX_TRC_JMP_INVALICACHE)"
sewardj487ac702005-06-21 12:52:38 +00001590 );
cerion85665ca2005-06-20 15:51:07 +00001591 if (0)
1592 VG_(printf)("dump translations done.\n");
cerion85665ca2005-06-20 15:51:07 +00001593 break;
1594
sewardjc76d0e52014-05-03 21:22:55 +00001595 case VEX_TRC_JMP_FLUSHDCACHE: {
sewardj1146ae62014-05-04 10:54:08 +00001596 void* start = (void*)VG_(threads)[tid].arch.vex.guest_CMSTART;
1597 SizeT len = VG_(threads)[tid].arch.vex.guest_CMLEN;
sewardjc76d0e52014-05-03 21:22:55 +00001598 VG_(debugLog)(2, "sched", "flush_dcache(%p, %lu)\n", start, len);
1599 VG_(flush_dcache)(start, len);
1600 break;
1601 }
1602
sewardje3a384b2005-07-29 08:51:34 +00001603 case VG_TRC_INVARIANT_FAILED:
1604 /* This typically happens if, after running generated code,
1605 it is detected that host CPU settings (eg, FPU/Vector
1606 control words) are not as they should be. Vex's code
1607 generation specifies the state such control words should
1608 be in on entry to Vex-generated code, and they should be
1609 unchanged on exit from it. Failure of this assertion
1610 usually means a bug in Vex's code generation. */
sewardj59570ff2010-01-01 11:59:33 +00001611 //{ UInt xx;
1612 // __asm__ __volatile__ (
1613 // "\t.word 0xEEF12A10\n" // fmrx r2,fpscr
1614 // "\tmov %0, r2" : "=r"(xx) : : "r2" );
1615 // VG_(printf)("QQQQ new fpscr = %08x\n", xx);
1616 //}
sewardje3a384b2005-07-29 08:51:34 +00001617 vg_assert2(0, "VG_(scheduler), phase 3: "
1618 "run_innerloop detected host "
1619 "state invariant failure", trc);
1620
sewardja0fef1b2005-11-03 13:46:30 +00001621 case VEX_TRC_JMP_SYS_SYSENTER:
sewardj5438a012005-08-07 14:49:27 +00001622 /* Do whatever simulation is appropriate for an x86 sysenter
1623 instruction. Note that it is critical to set this thread's
1624 guest_EIP to point at the code to execute after the
1625 sysenter, since Vex-generated code will not have set it --
1626 vex does not know what it should be. Vex sets the next
njncda2f0f2009-05-18 02:12:08 +00001627 address to zero, so if you don't set guest_EIP, the thread
1628 will jump to zero afterwards and probably die as a result. */
1629# if defined(VGP_x86_linux)
sewardj5438a012005-08-07 14:49:27 +00001630 vg_assert2(0, "VG_(scheduler), phase 3: "
njncda2f0f2009-05-18 02:12:08 +00001631 "sysenter_x86 on x86-linux is not supported");
sewardj8eb8bab2015-07-21 14:44:28 +00001632# elif defined(VGP_x86_darwin) || defined(VGP_x86_solaris)
njnf76d27a2009-05-28 01:53:07 +00001633 /* return address in client edx */
1634 VG_(threads)[tid].arch.vex.guest_EIP
1635 = VG_(threads)[tid].arch.vex.guest_EDX;
sewardj93a97572012-04-21 15:35:12 +00001636 handle_syscall(tid, trc[0]);
sewardj5438a012005-08-07 14:49:27 +00001637# else
1638 vg_assert2(0, "VG_(scheduler), phase 3: "
1639 "sysenter_x86 on non-x86 platform?!?!");
1640# endif
njnf76d27a2009-05-28 01:53:07 +00001641 break;
sewardj5438a012005-08-07 14:49:27 +00001642
sewardjb5f6f512005-03-10 23:59:00 +00001643 default:
njn50ae1a72005-04-08 23:28:23 +00001644 vg_assert2(0, "VG_(scheduler), phase 3: "
sewardj291849f2012-04-20 23:58:55 +00001645 "unexpected thread return code (%u)", trc[0]);
sewardjb5f6f512005-03-10 23:59:00 +00001646 /* NOTREACHED */
1647 break;
sewardje663cb92002-04-12 10:26:32 +00001648
1649 } /* switch (trc) */
sewardjb0473e92011-06-07 22:54:32 +00001650
sewardj17c5e2e2012-12-28 09:12:14 +00001651 if (UNLIKELY(VG_(clo_profyle_sbs)) && VG_(clo_profyle_interval) > 0)
1652 maybe_show_sb_profile();
nethercote238a3c32004-08-09 13:13:31 +00001653 }
sewardjc24be7a2005-03-15 01:40:12 +00001654
1655 if (VG_(clo_trace_sched))
1656 print_sched_event(tid, "exiting VG_(scheduler)");
1657
sewardjb5f6f512005-03-10 23:59:00 +00001658 vg_assert(VG_(is_exiting)(tid));
thughes513197c2004-06-13 12:07:53 +00001659
sewardjb5f6f512005-03-10 23:59:00 +00001660 return tst->exitreason;
sewardj20917d82002-05-28 01:36:45 +00001661}
1662
1663
sewardjb5f6f512005-03-10 23:59:00 +00001664void VG_(nuke_all_threads_except) ( ThreadId me, VgSchedReturnCode src )
sewardjccef2e62002-05-29 19:26:32 +00001665{
1666 ThreadId tid;
sewardjb5f6f512005-03-10 23:59:00 +00001667
1668 vg_assert(VG_(is_running_thread)(me));
sewardj45f02c42005-02-05 18:27:14 +00001669
sewardjccef2e62002-05-29 19:26:32 +00001670 for (tid = 1; tid < VG_N_THREADS; tid++) {
1671 if (tid == me
jsgf855d93d2003-10-13 22:26:55 +00001672 || VG_(threads)[tid].status == VgTs_Empty)
sewardjccef2e62002-05-29 19:26:32 +00001673 continue;
sewardjb5f6f512005-03-10 23:59:00 +00001674 if (0)
sewardjef037c72002-05-30 00:40:03 +00001675 VG_(printf)(
florianc6e5d762015-08-05 22:27:24 +00001676 "VG_(nuke_all_threads_except): nuking tid %u\n", tid);
sewardjb5f6f512005-03-10 23:59:00 +00001677
1678 VG_(threads)[tid].exitreason = src;
sewardja8d8e232005-06-07 20:04:56 +00001679 if (src == VgSrc_FatalSig)
1680 VG_(threads)[tid].os_state.fatalsig = VKI_SIGKILL;
sewardjf54342a2006-10-17 01:51:24 +00001681 VG_(get_thread_out_of_syscall)(tid);
sewardjccef2e62002-05-29 19:26:32 +00001682 }
1683}
1684
1685
njnd3040452003-05-19 15:04:06 +00001686/* ---------------------------------------------------------------------
sewardjb5f6f512005-03-10 23:59:00 +00001687 Specifying shadow register values
njnd3040452003-05-19 15:04:06 +00001688 ------------------------------------------------------------------ */
1689
njnf536bbb2005-06-13 04:21:38 +00001690#if defined(VGA_x86)
njnaf839f52005-06-23 03:27:57 +00001691# define VG_CLREQ_ARGS guest_EAX
1692# define VG_CLREQ_RET guest_EDX
njnf536bbb2005-06-13 04:21:38 +00001693#elif defined(VGA_amd64)
njnaf839f52005-06-23 03:27:57 +00001694# define VG_CLREQ_ARGS guest_RAX
1695# define VG_CLREQ_RET guest_RDX
carllcae0cc22014-08-07 23:17:29 +00001696#elif defined(VGA_ppc32) || defined(VGA_ppc64be) || defined(VGA_ppc64le)
njnaf839f52005-06-23 03:27:57 +00001697# define VG_CLREQ_ARGS guest_GPR4
1698# define VG_CLREQ_RET guest_GPR3
sewardj59570ff2010-01-01 11:59:33 +00001699#elif defined(VGA_arm)
1700# define VG_CLREQ_ARGS guest_R4
1701# define VG_CLREQ_RET guest_R3
sewardjf0c12502014-01-12 12:54:00 +00001702#elif defined(VGA_arm64)
1703# define VG_CLREQ_ARGS guest_X4
1704# define VG_CLREQ_RET guest_X3
sewardjb5b87402011-03-07 16:05:35 +00001705#elif defined (VGA_s390x)
1706# define VG_CLREQ_ARGS guest_r2
1707# define VG_CLREQ_RET guest_r3
petarj4df0bfc2013-02-27 23:17:33 +00001708#elif defined(VGA_mips32) || defined(VGA_mips64)
sewardj5db15402012-06-07 09:13:21 +00001709# define VG_CLREQ_ARGS guest_r12
1710# define VG_CLREQ_RET guest_r11
njnf536bbb2005-06-13 04:21:38 +00001711#else
1712# error Unknown arch
1713#endif
1714
njnaf839f52005-06-23 03:27:57 +00001715#define CLREQ_ARGS(regs) ((regs).vex.VG_CLREQ_ARGS)
1716#define CLREQ_RET(regs) ((regs).vex.VG_CLREQ_RET)
1717#define O_CLREQ_RET (offsetof(VexGuestArchState, VG_CLREQ_RET))
njnf536bbb2005-06-13 04:21:38 +00001718
njn502badb2005-05-08 02:04:49 +00001719// These macros write a value to a client's thread register, and tell the
1720// tool that it's happened (if necessary).
1721
1722#define SET_CLREQ_RETVAL(zztid, zzval) \
1723 do { CLREQ_RET(VG_(threads)[zztid].arch) = (zzval); \
1724 VG_TRACK( post_reg_write, \
1725 Vg_CoreClientReq, zztid, O_CLREQ_RET, sizeof(UWord)); \
1726 } while (0)
1727
1728#define SET_CLCALL_RETVAL(zztid, zzval, f) \
1729 do { CLREQ_RET(VG_(threads)[zztid].arch) = (zzval); \
1730 VG_TRACK( post_reg_write_clientcall_return, \
1731 zztid, O_CLREQ_RET, sizeof(UWord), f); \
1732 } while (0)
1733
sewardj0ec07f32006-01-12 12:32:32 +00001734
sewardje663cb92002-04-12 10:26:32 +00001735/* ---------------------------------------------------------------------
sewardj124ca2a2002-06-20 10:19:38 +00001736 Handle client requests.
sewardje663cb92002-04-12 10:26:32 +00001737 ------------------------------------------------------------------ */
1738
njn9cb54ac2005-06-12 04:19:17 +00001739// OS-specific(?) client requests
1740static Bool os_client_request(ThreadId tid, UWord *args)
1741{
1742 Bool handled = True;
1743
1744 vg_assert(VG_(is_running_thread)(tid));
1745
1746 switch(args[0]) {
Elliott Hughesa0664b92017-04-18 17:46:52 -07001747 case VG_USERREQ__FREERES_DONE:
njn9cb54ac2005-06-12 04:19:17 +00001748 /* This is equivalent to an exit() syscall, but we don't set the
1749 exitcode (since it might already be set) */
1750 if (0 || VG_(clo_trace_syscalls) || VG_(clo_trace_sched))
sewardj738856f2009-07-15 14:48:32 +00001751 VG_(message)(Vg_DebugMsg,
Elliott Hughesa0664b92017-04-18 17:46:52 -07001752 "__gnu_cxx::__freeres() and __libc_freeres() wrapper "
1753 "done; really quitting!\n");
sewardjf54342a2006-10-17 01:51:24 +00001754 VG_(threads)[tid].exitreason = VgSrc_ExitThread;
njn9cb54ac2005-06-12 04:19:17 +00001755 break;
1756
1757 default:
1758 handled = False;
1759 break;
1760 }
1761
1762 return handled;
1763}
1764
1765
florian661786e2013-08-27 15:17:53 +00001766/* Write out a client message, possibly including a back trace. Return
1767 the number of characters written. In case of XML output, the format
1768 string as well as any arguments it requires will be XML'ified.
1769 I.e. special characters such as the angle brackets will be translated
1770 into proper escape sequences. */
1771static
1772Int print_client_message( ThreadId tid, const HChar *format,
1773 va_list *vargsp, Bool include_backtrace)
1774{
1775 Int count;
1776
1777 if (VG_(clo_xml)) {
1778 /* Translate the format string as follows:
1779 < --> &lt;
1780 > --> &gt;
1781 & --> &amp;
1782 %s --> %pS
1783 Yes, yes, it's simplified but in synch with
1784 myvprintf_str_XML_simplistic and VG_(debugLog_vprintf).
1785 */
1786
1787 /* Allocate a buffer that is for sure large enough. */
1788 HChar xml_format[VG_(strlen)(format) * 5 + 1];
1789
1790 const HChar *p;
1791 HChar *q = xml_format;
1792
1793 for (p = format; *p; ++p) {
1794 switch (*p) {
1795 case '<': VG_(strcpy)(q, "&lt;"); q += 4; break;
1796 case '>': VG_(strcpy)(q, "&gt;"); q += 4; break;
1797 case '&': VG_(strcpy)(q, "&amp;"); q += 5; break;
1798 case '%':
1799 /* Careful: make sure %%s stays %%s */
1800 *q++ = *p++;
1801 if (*p == 's') {
1802 *q++ = 'p';
1803 *q++ = 'S';
1804 } else {
1805 *q++ = *p;
1806 }
1807 break;
1808
1809 default:
1810 *q++ = *p;
1811 break;
1812 }
1813 }
1814 *q = '\0';
1815
1816 VG_(printf_xml)( "<clientmsg>\n" );
florianc6e5d762015-08-05 22:27:24 +00001817 VG_(printf_xml)( " <tid>%u</tid>\n", tid );
florianead018e2015-07-30 21:49:49 +00001818 const ThreadState *tst = VG_(get_ThreadState)(tid);
1819 if (tst->thread_name)
1820 VG_(printf_xml)(" <threadname>%s</threadname>\n", tst->thread_name);
florian661786e2013-08-27 15:17:53 +00001821 VG_(printf_xml)( " <text>" );
1822 count = VG_(vprintf_xml)( xml_format, *vargsp );
1823 VG_(printf_xml)( " </text>\n" );
1824 } else {
1825 count = VG_(vmessage)( Vg_ClientMsg, format, *vargsp );
1826 VG_(message_flush)();
1827 }
1828
1829 if (include_backtrace)
1830 VG_(get_and_pp_StackTrace)( tid, VG_(clo_backtrace_size) );
1831
1832 if (VG_(clo_xml))
1833 VG_(printf_xml)( "</clientmsg>\n" );
1834
1835 return count;
1836}
1837
1838
sewardj124ca2a2002-06-20 10:19:38 +00001839/* Do a client request for the thread tid. After the request, tid may
1840 or may not still be runnable; if not, the scheduler will have to
1841 choose a new thread to run.
1842*/
sewardje663cb92002-04-12 10:26:32 +00001843static
sewardjb5f6f512005-03-10 23:59:00 +00001844void do_client_request ( ThreadId tid )
sewardje663cb92002-04-12 10:26:32 +00001845{
sewardjb5f6f512005-03-10 23:59:00 +00001846 UWord* arg = (UWord*)(CLREQ_ARGS(VG_(threads)[tid].arch));
nethercoted1b64b22004-11-04 18:22:28 +00001847 UWord req_no = arg[0];
sewardj124ca2a2002-06-20 10:19:38 +00001848
fitzhardinge98abfc72003-12-16 02:05:15 +00001849 if (0)
florianc6e5d762015-08-05 22:27:24 +00001850 VG_(printf)("req no = 0x%lx, arg = %p\n", req_no, arg);
sewardje663cb92002-04-12 10:26:32 +00001851 switch (req_no) {
1852
njn3e884182003-04-15 13:03:23 +00001853 case VG_USERREQ__CLIENT_CALL0: {
florian7822f632014-12-24 11:11:42 +00001854 UWord (*f)(ThreadId) = (__typeof__(f))arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00001855 if (f == NULL)
sewardj738856f2009-07-15 14:48:32 +00001856 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL0: func=%p\n", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00001857 else
njn2ac95242005-03-13 23:07:30 +00001858 SET_CLCALL_RETVAL(tid, f ( tid ), (Addr)f);
njn3e884182003-04-15 13:03:23 +00001859 break;
1860 }
1861 case VG_USERREQ__CLIENT_CALL1: {
florian7822f632014-12-24 11:11:42 +00001862 UWord (*f)(ThreadId, UWord) = (__typeof__(f))arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00001863 if (f == NULL)
sewardj738856f2009-07-15 14:48:32 +00001864 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL1: func=%p\n", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00001865 else
njn2ac95242005-03-13 23:07:30 +00001866 SET_CLCALL_RETVAL(tid, f ( tid, arg[2] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00001867 break;
1868 }
1869 case VG_USERREQ__CLIENT_CALL2: {
florian7822f632014-12-24 11:11:42 +00001870 UWord (*f)(ThreadId, UWord, UWord) = (__typeof__(f))arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00001871 if (f == NULL)
sewardj738856f2009-07-15 14:48:32 +00001872 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL2: func=%p\n", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00001873 else
njn2ac95242005-03-13 23:07:30 +00001874 SET_CLCALL_RETVAL(tid, f ( tid, arg[2], arg[3] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00001875 break;
1876 }
1877 case VG_USERREQ__CLIENT_CALL3: {
florian7822f632014-12-24 11:11:42 +00001878 UWord (*f)(ThreadId, UWord, UWord, UWord) = (__typeof__(f))arg[1];
fitzhardinge98abfc72003-12-16 02:05:15 +00001879 if (f == NULL)
sewardj738856f2009-07-15 14:48:32 +00001880 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL3: func=%p\n", f);
fitzhardinge98abfc72003-12-16 02:05:15 +00001881 else
njn2ac95242005-03-13 23:07:30 +00001882 SET_CLCALL_RETVAL(tid, f ( tid, arg[2], arg[3], arg[4] ), (Addr)f );
njn3e884182003-04-15 13:03:23 +00001883 break;
1884 }
1885
njnf09745a2005-05-10 03:01:23 +00001886 // Nb: this looks like a circular definition, because it kind of is.
1887 // See comment in valgrind.h to understand what's going on.
sewardj124ca2a2002-06-20 10:19:38 +00001888 case VG_USERREQ__RUNNING_ON_VALGRIND:
sewardjb5f6f512005-03-10 23:59:00 +00001889 SET_CLREQ_RETVAL(tid, RUNNING_ON_VALGRIND+1);
sewardj124ca2a2002-06-20 10:19:38 +00001890 break;
1891
fitzhardinge39de4b42003-10-31 07:12:21 +00001892 case VG_USERREQ__PRINTF: {
florian661786e2013-08-27 15:17:53 +00001893 const HChar* format = (HChar *)arg[1];
sewardjc560fb32010-01-28 15:23:54 +00001894 /* JRS 2010-Jan-28: this is DEPRECATED; use the
1895 _VALIST_BY_REF version instead */
1896 if (sizeof(va_list) != sizeof(UWord))
1897 goto va_list_casting_error_NORETURN;
sewardj05b07152010-01-04 01:01:02 +00001898 union {
1899 va_list vargs;
sewardjc560fb32010-01-28 15:23:54 +00001900 unsigned long uw;
1901 } u;
1902 u.uw = (unsigned long)arg[2];
1903 Int count =
florian661786e2013-08-27 15:17:53 +00001904 print_client_message( tid, format, &u.vargs,
1905 /* include_backtrace */ False );
sewardjc560fb32010-01-28 15:23:54 +00001906 SET_CLREQ_RETVAL( tid, count );
1907 break;
1908 }
fitzhardinge39de4b42003-10-31 07:12:21 +00001909
sewardjc560fb32010-01-28 15:23:54 +00001910 case VG_USERREQ__PRINTF_BACKTRACE: {
florian661786e2013-08-27 15:17:53 +00001911 const HChar* format = (HChar *)arg[1];
sewardjc560fb32010-01-28 15:23:54 +00001912 /* JRS 2010-Jan-28: this is DEPRECATED; use the
1913 _VALIST_BY_REF version instead */
1914 if (sizeof(va_list) != sizeof(UWord))
1915 goto va_list_casting_error_NORETURN;
sewardj05b07152010-01-04 01:01:02 +00001916 union {
1917 va_list vargs;
sewardjc560fb32010-01-28 15:23:54 +00001918 unsigned long uw;
1919 } u;
1920 u.uw = (unsigned long)arg[2];
1921 Int count =
florian661786e2013-08-27 15:17:53 +00001922 print_client_message( tid, format, &u.vargs,
1923 /* include_backtrace */ True );
sewardjc560fb32010-01-28 15:23:54 +00001924 SET_CLREQ_RETVAL( tid, count );
1925 break;
1926 }
1927
1928 case VG_USERREQ__PRINTF_VALIST_BY_REF: {
florian661786e2013-08-27 15:17:53 +00001929 const HChar* format = (HChar *)arg[1];
sewardjc560fb32010-01-28 15:23:54 +00001930 va_list* vargsp = (va_list*)arg[2];
florian661786e2013-08-27 15:17:53 +00001931 Int count =
1932 print_client_message( tid, format, vargsp,
1933 /* include_backtrace */ False );
1934
sewardjc560fb32010-01-28 15:23:54 +00001935 SET_CLREQ_RETVAL( tid, count );
1936 break;
1937 }
1938
1939 case VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF: {
florian661786e2013-08-27 15:17:53 +00001940 const HChar* format = (HChar *)arg[1];
sewardjc560fb32010-01-28 15:23:54 +00001941 va_list* vargsp = (va_list*)arg[2];
1942 Int count =
florian661786e2013-08-27 15:17:53 +00001943 print_client_message( tid, format, vargsp,
1944 /* include_backtrace */ True );
sewardjc560fb32010-01-28 15:23:54 +00001945 SET_CLREQ_RETVAL( tid, count );
1946 break;
1947 }
1948
1949 case VG_USERREQ__INTERNAL_PRINTF_VALIST_BY_REF: {
1950 va_list* vargsp = (va_list*)arg[2];
1951 Int count =
floriancd19e992012-11-03 19:32:28 +00001952 VG_(vmessage)( Vg_DebugMsg, (HChar *)arg[1], *vargsp );
sewardjc560fb32010-01-28 15:23:54 +00001953 VG_(message_flush)();
1954 SET_CLREQ_RETVAL( tid, count );
1955 break;
1956 }
fitzhardinge39de4b42003-10-31 07:12:21 +00001957
tomd2645142009-10-29 09:27:11 +00001958 case VG_USERREQ__ADD_IFUNC_TARGET: {
1959 VG_(redir_add_ifunc_target)( arg[1], arg[2] );
1960 SET_CLREQ_RETVAL( tid, 0);
1961 break; }
1962
rjwalsh0140af52005-06-04 20:42:33 +00001963 case VG_USERREQ__STACK_REGISTER: {
njn945ed2e2005-06-24 03:28:30 +00001964 UWord sid = VG_(register_stack)((Addr)arg[1], (Addr)arg[2]);
rjwalsh0140af52005-06-04 20:42:33 +00001965 SET_CLREQ_RETVAL( tid, sid );
1966 break; }
1967
1968 case VG_USERREQ__STACK_DEREGISTER: {
njn945ed2e2005-06-24 03:28:30 +00001969 VG_(deregister_stack)(arg[1]);
rjwalsh0140af52005-06-04 20:42:33 +00001970 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
1971 break; }
1972
1973 case VG_USERREQ__STACK_CHANGE: {
njn945ed2e2005-06-24 03:28:30 +00001974 VG_(change_stack)(arg[1], (Addr)arg[2], (Addr)arg[3]);
rjwalsh0140af52005-06-04 20:42:33 +00001975 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
1976 break; }
1977
fitzhardinge98abfc72003-12-16 02:05:15 +00001978 case VG_USERREQ__GET_MALLOCFUNCS: {
1979 struct vg_mallocfunc_info *info = (struct vg_mallocfunc_info *)arg[1];
1980
njnfc51f8d2005-06-21 03:20:17 +00001981 info->tl_malloc = VG_(tdict).tool_malloc;
1982 info->tl_calloc = VG_(tdict).tool_calloc;
1983 info->tl_realloc = VG_(tdict).tool_realloc;
1984 info->tl_memalign = VG_(tdict).tool_memalign;
1985 info->tl___builtin_new = VG_(tdict).tool___builtin_new;
1986 info->tl___builtin_vec_new = VG_(tdict).tool___builtin_vec_new;
1987 info->tl_free = VG_(tdict).tool_free;
1988 info->tl___builtin_delete = VG_(tdict).tool___builtin_delete;
1989 info->tl___builtin_vec_delete = VG_(tdict).tool___builtin_vec_delete;
njn8b140de2009-02-17 04:31:18 +00001990 info->tl_malloc_usable_size = VG_(tdict).tool_malloc_usable_size;
fitzhardinge98abfc72003-12-16 02:05:15 +00001991
njn088bfb42005-08-17 05:01:37 +00001992 info->mallinfo = VG_(mallinfo);
sewardjb5f6f512005-03-10 23:59:00 +00001993 info->clo_trace_malloc = VG_(clo_trace_malloc);
fitzhardinge98abfc72003-12-16 02:05:15 +00001994
1995 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
1996
1997 break;
1998 }
1999
njn25e49d8e72002-09-23 09:36:25 +00002000 /* Requests from the client program */
2001
2002 case VG_USERREQ__DISCARD_TRANSLATIONS:
2003 if (VG_(clo_verbosity) > 2)
2004 VG_(printf)( "client request: DISCARD_TRANSLATIONS,"
njn8a7b41b2007-09-23 00:51:24 +00002005 " addr %p, len %lu\n",
njn25e49d8e72002-09-23 09:36:25 +00002006 (void*)arg[1], arg[2] );
2007
sewardj45f4e7c2005-09-27 19:20:21 +00002008 VG_(discard_translations)(
2009 arg[1], arg[2], "scheduler(VG_USERREQ__DISCARD_TRANSLATIONS)"
2010 );
njn25e49d8e72002-09-23 09:36:25 +00002011
njnd3040452003-05-19 15:04:06 +00002012 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
njn25e49d8e72002-09-23 09:36:25 +00002013 break;
2014
Elliott Hughesed398002017-06-21 14:41:24 -07002015 case VG_USERREQ__INNER_THREADS:
2016 if (VG_(clo_verbosity) > 2)
2017 VG_(printf)( "client request: INNER_THREADS,"
2018 " addr %p\n",
2019 (void*)arg[1] );
2020 VG_(inner_threads) = (ThreadState*)arg[1];
2021 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
2022 break;
2023
njn47363ab2003-04-21 13:24:40 +00002024 case VG_USERREQ__COUNT_ERRORS:
nethercotef2b11482004-08-02 12:36:01 +00002025 SET_CLREQ_RETVAL( tid, VG_(get_n_errs_found)() );
njn47363ab2003-04-21 13:24:40 +00002026 break;
2027
sewardjc8259b82009-04-22 22:42:10 +00002028 case VG_USERREQ__LOAD_PDB_DEBUGINFO:
2029 VG_(di_notify_pdb_debuginfo)( arg[1], arg[2], arg[3], arg[4] );
2030 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
2031 break;
2032
sewardj5c659622010-08-20 18:22:07 +00002033 case VG_USERREQ__MAP_IP_TO_SRCLOC: {
2034 Addr ip = arg[1];
florian10ef7252014-10-27 12:06:35 +00002035 HChar* buf64 = (HChar*)arg[2]; // points to a HChar [64] array
2036 const HChar *buf; // points to a string of unknown size
sewardj5c659622010-08-20 18:22:07 +00002037
2038 VG_(memset)(buf64, 0, 64);
2039 UInt linenum = 0;
2040 Bool ok = VG_(get_filename_linenum)(
florianf4384f42014-12-16 20:55:58 +00002041 ip, &buf, NULL, &linenum
sewardj5c659622010-08-20 18:22:07 +00002042 );
2043 if (ok) {
florian10ef7252014-10-27 12:06:35 +00002044 /* For backward compatibility truncate the filename to
2045 49 characters. */
2046 VG_(strncpy)(buf64, buf, 50);
2047 buf64[49] = '\0';
sewardj5c659622010-08-20 18:22:07 +00002048 UInt i;
2049 for (i = 0; i < 50; i++) {
2050 if (buf64[i] == 0)
2051 break;
2052 }
florian10ef7252014-10-27 12:06:35 +00002053 VG_(sprintf)(buf64+i, ":%u", linenum); // safe
sewardj5c659622010-08-20 18:22:07 +00002054 } else {
2055 buf64[0] = 0;
2056 }
2057
2058 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
2059 break;
2060 }
2061
sewardjdc873c02011-07-24 16:02:33 +00002062 case VG_USERREQ__CHANGE_ERR_DISABLEMENT: {
2063 Word delta = arg[1];
2064 vg_assert(delta == 1 || delta == -1);
2065 ThreadState* tst = VG_(get_ThreadState)(tid);
2066 vg_assert(tst);
2067 if (delta == 1 && tst->err_disablement_level < 0xFFFFFFFF) {
2068 tst->err_disablement_level++;
2069 }
2070 else
2071 if (delta == -1 && tst->err_disablement_level > 0) {
2072 tst->err_disablement_level--;
2073 }
2074 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
2075 break;
2076 }
2077
philippe46207652013-01-20 17:11:58 +00002078 case VG_USERREQ__GDB_MONITOR_COMMAND: {
2079 UWord ret;
2080 ret = (UWord) VG_(client_monitor_command) ((HChar*)arg[1]);
2081 SET_CLREQ_RETVAL(tid, ret);
2082 break;
2083 }
2084
njn32f8d8c2009-07-15 02:31:45 +00002085 case VG_USERREQ__MALLOCLIKE_BLOCK:
bart91347382011-03-25 20:07:25 +00002086 case VG_USERREQ__RESIZEINPLACE_BLOCK:
njn32f8d8c2009-07-15 02:31:45 +00002087 case VG_USERREQ__FREELIKE_BLOCK:
2088 // Ignore them if the addr is NULL; otherwise pass onto the tool.
2089 if (!arg[1]) {
2090 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
2091 break;
2092 } else {
2093 goto my_default;
2094 }
2095
florianbb913cd2012-08-28 16:50:39 +00002096 case VG_USERREQ__VEX_INIT_FOR_IRI:
2097 LibVEX_InitIRI ( (IRICB *)arg[1] );
2098 break;
2099
sewardje663cb92002-04-12 10:26:32 +00002100 default:
njn32f8d8c2009-07-15 02:31:45 +00002101 my_default:
njn9cb54ac2005-06-12 04:19:17 +00002102 if (os_client_request(tid, arg)) {
2103 // do nothing, os_client_request() handled it
sewardjb5f6f512005-03-10 23:59:00 +00002104 } else if (VG_(needs).client_requests) {
nethercoted1b64b22004-11-04 18:22:28 +00002105 UWord ret;
sewardj34042512002-10-22 04:14:35 +00002106
njn25e49d8e72002-09-23 09:36:25 +00002107 if (VG_(clo_verbosity) > 2)
njn8a7b41b2007-09-23 00:51:24 +00002108 VG_(printf)("client request: code %lx, addr %p, len %lu\n",
njn25e49d8e72002-09-23 09:36:25 +00002109 arg[0], (void*)arg[1], arg[2] );
2110
njn51d827b2005-05-09 01:02:08 +00002111 if ( VG_TDICT_CALL(tool_handle_client_request, tid, arg, &ret) )
sewardjb5f6f512005-03-10 23:59:00 +00002112 SET_CLREQ_RETVAL(tid, ret);
njn25e49d8e72002-09-23 09:36:25 +00002113 } else {
sewardj34042512002-10-22 04:14:35 +00002114 static Bool whined = False;
2115
sewardjb5f6f512005-03-10 23:59:00 +00002116 if (!whined && VG_(clo_verbosity) > 2) {
nethercote7cc9c232004-01-21 15:08:04 +00002117 // Allow for requests in core, but defined by tools, which
njnd7994182003-10-02 13:44:04 +00002118 // have 0 and 0 in their two high bytes.
floriandbb35842012-10-27 18:39:11 +00002119 HChar c1 = (arg[0] >> 24) & 0xff;
2120 HChar c2 = (arg[0] >> 16) & 0xff;
njnd7994182003-10-02 13:44:04 +00002121 if (c1 == 0) c1 = '_';
2122 if (c2 == 0) c2 = '_';
sewardj34042512002-10-22 04:14:35 +00002123 VG_(message)(Vg_UserMsg, "Warning:\n"
barta0b6b2c2008-07-07 06:49:24 +00002124 " unhandled client request: 0x%lx (%c%c+0x%lx). Perhaps\n"
sewardj738856f2009-07-15 14:48:32 +00002125 " VG_(needs).client_requests should be set?\n",
njnd7994182003-10-02 13:44:04 +00002126 arg[0], c1, c2, arg[0] & 0xffff);
sewardj34042512002-10-22 04:14:35 +00002127 whined = True;
2128 }
njn25e49d8e72002-09-23 09:36:25 +00002129 }
sewardje663cb92002-04-12 10:26:32 +00002130 break;
2131 }
sewardjc560fb32010-01-28 15:23:54 +00002132 return;
2133
2134 /*NOTREACHED*/
2135 va_list_casting_error_NORETURN:
2136 VG_(umsg)(
2137 "Valgrind: fatal error - cannot continue: use of the deprecated\n"
2138 "client requests VG_USERREQ__PRINTF or VG_USERREQ__PRINTF_BACKTRACE\n"
2139 "on a platform where they cannot be supported. Please use the\n"
2140 "equivalent _VALIST_BY_REF versions instead.\n"
2141 "\n"
2142 "This is a binary-incompatible change in Valgrind's client request\n"
2143 "mechanism. It is unfortunate, but difficult to avoid. End-users\n"
2144 "are expected to almost never see this message. The only case in\n"
2145 "which you might see this message is if your code uses the macros\n"
2146 "VALGRIND_PRINTF or VALGRIND_PRINTF_BACKTRACE. If so, you will need\n"
2147 "to recompile such code, using the header files from this version of\n"
2148 "Valgrind, and not any previous version.\n"
2149 "\n"
2150 "If you see this mesage in any other circumstances, it is probably\n"
2151 "a bug in Valgrind. In this case, please file a bug report at\n"
2152 "\n"
2153 " http://www.valgrind.org/support/bug_reports.html\n"
2154 "\n"
2155 "Will now abort.\n"
2156 );
2157 vg_assert(0);
sewardje663cb92002-04-12 10:26:32 +00002158}
2159
2160
sewardj6072c362002-04-19 14:40:57 +00002161/* ---------------------------------------------------------------------
njn6676d5b2005-06-19 18:49:19 +00002162 Sanity checking (permanently engaged)
sewardj6072c362002-04-19 14:40:57 +00002163 ------------------------------------------------------------------ */
2164
sewardjb5f6f512005-03-10 23:59:00 +00002165/* Internal consistency checks on the sched structures. */
sewardj6072c362002-04-19 14:40:57 +00002166static
sewardjb5f6f512005-03-10 23:59:00 +00002167void scheduler_sanity ( ThreadId tid )
sewardj6072c362002-04-19 14:40:57 +00002168{
sewardjb5f6f512005-03-10 23:59:00 +00002169 Bool bad = False;
sewardjf54342a2006-10-17 01:51:24 +00002170 Int lwpid = VG_(gettid)();
jsgf855d93d2003-10-13 22:26:55 +00002171
sewardjb5f6f512005-03-10 23:59:00 +00002172 if (!VG_(is_running_thread)(tid)) {
2173 VG_(message)(Vg_DebugMsg,
florianc6e5d762015-08-05 22:27:24 +00002174 "Thread %u is supposed to be running, "
2175 "but doesn't own the_BigLock (owned by %u)\n",
njnc7561b92005-06-19 01:24:32 +00002176 tid, VG_(running_tid));
sewardjb5f6f512005-03-10 23:59:00 +00002177 bad = True;
jsgf855d93d2003-10-13 22:26:55 +00002178 }
sewardj5f07b662002-04-23 16:52:51 +00002179
sewardjf54342a2006-10-17 01:51:24 +00002180 if (lwpid != VG_(threads)[tid].os_state.lwpid) {
sewardjb5f6f512005-03-10 23:59:00 +00002181 VG_(message)(Vg_DebugMsg,
florianc6e5d762015-08-05 22:27:24 +00002182 "Thread %u supposed to be in LWP %d, but we're actually %d\n",
njnd06ed472005-03-13 05:12:31 +00002183 tid, VG_(threads)[tid].os_state.lwpid, VG_(gettid)());
sewardjb5f6f512005-03-10 23:59:00 +00002184 bad = True;
sewardj5f07b662002-04-23 16:52:51 +00002185 }
sewardjf54342a2006-10-17 01:51:24 +00002186
bart78bfc712011-12-08 16:14:59 +00002187 if (lwpid != ML_(get_sched_lock_owner)(the_BigLock)) {
sewardjf54342a2006-10-17 01:51:24 +00002188 VG_(message)(Vg_DebugMsg,
florianc6e5d762015-08-05 22:27:24 +00002189 "Thread (LWPID) %u doesn't own the_BigLock\n",
sewardjf54342a2006-10-17 01:51:24 +00002190 tid);
2191 bad = True;
2192 }
2193
philippe9e9b5892013-01-23 22:19:36 +00002194 if (0) {
2195 /* Periodically show the state of all threads, for debugging
2196 purposes. */
2197 static UInt lasttime = 0;
2198 UInt now;
2199 now = VG_(read_millisecond_timer)();
2200 if ((!bad) && (lasttime + 4000/*ms*/ <= now)) {
2201 lasttime = now;
2202 VG_(printf)("\n------------ Sched State at %d ms ------------\n",
2203 (Int)now);
philippe4f6f3362014-04-19 00:25:54 +00002204 VG_(show_sched_status)(True, // host_stacktrace
philippe38a74d22014-08-29 22:53:19 +00002205 True, // stack_usage
philippe4f6f3362014-04-19 00:25:54 +00002206 True); // exited_threads);
philippe9e9b5892013-01-23 22:19:36 +00002207 }
sewardjf54342a2006-10-17 01:51:24 +00002208 }
2209
2210 /* core_panic also shows the sched status, which is why we don't
2211 show it above if bad==True. */
2212 if (bad)
2213 VG_(core_panic)("scheduler_sanity: failed");
sewardj6072c362002-04-19 14:40:57 +00002214}
2215
njn6676d5b2005-06-19 18:49:19 +00002216void VG_(sanity_check_general) ( Bool force_expensive )
2217{
2218 ThreadId tid;
2219
sewardjf54342a2006-10-17 01:51:24 +00002220 static UInt next_slow_check_at = 1;
2221 static UInt slow_check_interval = 25;
2222
njn6676d5b2005-06-19 18:49:19 +00002223 if (VG_(clo_sanity_level) < 1) return;
2224
2225 /* --- First do all the tests that we can do quickly. ---*/
2226
2227 sanity_fast_count++;
2228
2229 /* Check stuff pertaining to the memory check system. */
2230
2231 /* Check that nobody has spuriously claimed that the first or
2232 last 16 pages of memory have become accessible [...] */
2233 if (VG_(needs).sanity_checks) {
njn6676d5b2005-06-19 18:49:19 +00002234 vg_assert(VG_TDICT_CALL(tool_cheap_sanity_check));
njn6676d5b2005-06-19 18:49:19 +00002235 }
2236
2237 /* --- Now some more expensive checks. ---*/
2238
sewardjf54342a2006-10-17 01:51:24 +00002239 /* Once every now and again, check some more expensive stuff.
2240 Gradually increase the interval between such checks so as not to
2241 burden long-running programs too much. */
njn6676d5b2005-06-19 18:49:19 +00002242 if ( force_expensive
sewardjf54342a2006-10-17 01:51:24 +00002243 || VG_(clo_sanity_level) > 1
2244 || (VG_(clo_sanity_level) == 1
2245 && sanity_fast_count == next_slow_check_at)) {
njn6676d5b2005-06-19 18:49:19 +00002246
florianc6e5d762015-08-05 22:27:24 +00002247 if (0) VG_(printf)("SLOW at %u\n", sanity_fast_count-1);
sewardjf54342a2006-10-17 01:51:24 +00002248
2249 next_slow_check_at = sanity_fast_count - 1 + slow_check_interval;
2250 slow_check_interval++;
njn6676d5b2005-06-19 18:49:19 +00002251 sanity_slow_count++;
2252
njn6676d5b2005-06-19 18:49:19 +00002253 if (VG_(needs).sanity_checks) {
njn6676d5b2005-06-19 18:49:19 +00002254 vg_assert(VG_TDICT_CALL(tool_expensive_sanity_check));
njn6676d5b2005-06-19 18:49:19 +00002255 }
2256
njn6676d5b2005-06-19 18:49:19 +00002257 /* Look for stack overruns. Visit all threads. */
njnd666ea72005-06-26 17:26:22 +00002258 for (tid = 1; tid < VG_N_THREADS; tid++) {
sewardj45f4e7c2005-09-27 19:20:21 +00002259 SizeT remains;
2260 VgStack* stack;
njn6676d5b2005-06-19 18:49:19 +00002261
2262 if (VG_(threads)[tid].status == VgTs_Empty ||
2263 VG_(threads)[tid].status == VgTs_Zombie)
2264 continue;
2265
sewardj45f4e7c2005-09-27 19:20:21 +00002266 stack
2267 = (VgStack*)
2268 VG_(get_ThreadState)(tid)->os_state.valgrind_stack_base;
sewardj46dbd3f2010-09-08 08:30:31 +00002269 SizeT limit
2270 = 4096; // Let's say. Checking more causes lots of L2 misses.
sewardj45f4e7c2005-09-27 19:20:21 +00002271 remains
sewardj46dbd3f2010-09-08 08:30:31 +00002272 = VG_(am_get_VgStack_unused_szB)(stack, limit);
2273 if (remains < limit)
njn6676d5b2005-06-19 18:49:19 +00002274 VG_(message)(Vg_DebugMsg,
florianc6e5d762015-08-05 22:27:24 +00002275 "WARNING: Thread %u is within %lu bytes "
philipped0720e42015-03-12 20:43:46 +00002276 "of running out of valgrind stack!\n"
2277 "Valgrind stack size can be increased "
2278 "using --valgrind-stacksize=....\n",
njn6676d5b2005-06-19 18:49:19 +00002279 tid, remains);
2280 }
njn6676d5b2005-06-19 18:49:19 +00002281 }
2282
2283 if (VG_(clo_sanity_level) > 1) {
njn6676d5b2005-06-19 18:49:19 +00002284 /* Check sanity of the low-level memory manager. Note that bugs
2285 in the client's code can cause this to fail, so we don't do
2286 this check unless specially asked for. And because it's
2287 potentially very expensive. */
2288 VG_(sanity_check_malloc_all)();
njn6676d5b2005-06-19 18:49:19 +00002289 }
njn6676d5b2005-06-19 18:49:19 +00002290}
sewardj6072c362002-04-19 14:40:57 +00002291
sewardje663cb92002-04-12 10:26:32 +00002292/*--------------------------------------------------------------------*/
njn278b3d62005-05-30 23:20:51 +00002293/*--- end ---*/
sewardje663cb92002-04-12 10:26:32 +00002294/*--------------------------------------------------------------------*/