sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 1 | |
| 2 | /*--------------------------------------------------------------------*/ |
njn | 278b3d6 | 2005-05-30 23:20:51 +0000 | [diff] [blame] | 3 | /*--- Thread scheduling. scheduler.c ---*/ |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 4 | /*--------------------------------------------------------------------*/ |
| 5 | |
| 6 | /* |
njn | c0ae705 | 2005-08-25 22:55:19 +0000 | [diff] [blame] | 7 | This file is part of Valgrind, a dynamic binary instrumentation |
| 8 | framework. |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 9 | |
sewardj | e4b0bf0 | 2006-06-05 23:21:15 +0000 | [diff] [blame] | 10 | Copyright (C) 2000-2006 Julian Seward |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 11 | jseward@acm.org |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 12 | |
| 13 | This program is free software; you can redistribute it and/or |
| 14 | modify it under the terms of the GNU General Public License as |
| 15 | published by the Free Software Foundation; either version 2 of the |
| 16 | License, or (at your option) any later version. |
| 17 | |
| 18 | This program is distributed in the hope that it will be useful, but |
| 19 | WITHOUT ANY WARRANTY; without even the implied warranty of |
| 20 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 21 | General Public License for more details. |
| 22 | |
| 23 | You should have received a copy of the GNU General Public License |
| 24 | along with this program; if not, write to the Free Software |
| 25 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA |
| 26 | 02111-1307, USA. |
| 27 | |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 28 | The GNU General Public License is contained in the file COPYING. |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 29 | */ |
| 30 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 31 | /* |
| 32 | Overview |
| 33 | |
| 34 | Valgrind tries to emulate the kernel's threading as closely as |
| 35 | possible. The client does all threading via the normal syscalls |
| 36 | (on Linux: clone, etc). Valgrind emulates this by creating exactly |
| 37 | the same process structure as would be created without Valgrind. |
| 38 | There are no extra threads. |
| 39 | |
| 40 | The main difference is that Valgrind only allows one client thread |
| 41 | to run at once. This is controlled with the VCPU semaphore, |
| 42 | "run_sema". Any time a thread wants to run client code or |
| 43 | manipulate any shared state (which is anything other than its own |
| 44 | ThreadState entry), it must hold the run_sema. |
| 45 | |
| 46 | When a thread is about to block in a blocking syscall, it releases |
| 47 | run_sema, and re-takes it when it becomes runnable again (either |
| 48 | because the syscall finished, or we took a signal). |
| 49 | |
| 50 | VG_(scheduler) therefore runs in each thread. It returns only when |
| 51 | the thread is exiting, either because it exited itself, or it was |
| 52 | told to exit by another thread. |
| 53 | |
| 54 | This file is almost entirely OS-independent. The details of how |
| 55 | the OS handles threading and signalling are abstracted away and |
njn | 1277109 | 2005-06-18 02:18:04 +0000 | [diff] [blame] | 56 | implemented elsewhere. [Some of the functions have worked their |
| 57 | way back for the moment, until we do an OS port in earnest...] |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 58 | */ |
| 59 | |
njn | c7561b9 | 2005-06-19 01:24:32 +0000 | [diff] [blame] | 60 | #include "pub_core_basics.h" |
| 61 | #include "pub_core_threadstate.h" |
njn | 04e1698 | 2005-05-31 00:23:43 +0000 | [diff] [blame] | 62 | #include "pub_core_aspacemgr.h" |
njn | 93fe3b2 | 2005-12-21 20:22:52 +0000 | [diff] [blame] | 63 | #include "pub_core_clreq.h" // for VG_USERREQ__* |
njn | 36b66df | 2005-05-12 05:13:04 +0000 | [diff] [blame] | 64 | #include "pub_core_dispatch.h" |
njn | f4c5016 | 2005-06-20 14:18:12 +0000 | [diff] [blame] | 65 | #include "pub_core_errormgr.h" // For VG_(get_n_errs_found)() |
njn | 97405b2 | 2005-06-02 03:39:33 +0000 | [diff] [blame] | 66 | #include "pub_core_libcbase.h" |
njn | 132bfcc | 2005-06-04 19:16:06 +0000 | [diff] [blame] | 67 | #include "pub_core_libcassert.h" |
njn | 36a20fa | 2005-06-03 03:08:39 +0000 | [diff] [blame] | 68 | #include "pub_core_libcprint.h" |
njn | f39e9a3 | 2005-06-12 02:43:17 +0000 | [diff] [blame] | 69 | #include "pub_core_libcproc.h" |
njn | de62cbf | 2005-06-10 22:08:14 +0000 | [diff] [blame] | 70 | #include "pub_core_libcsignal.h" |
njn | f536bbb | 2005-06-13 04:21:38 +0000 | [diff] [blame] | 71 | #include "pub_core_machine.h" |
njn | af1d7df | 2005-06-11 01:31:52 +0000 | [diff] [blame] | 72 | #include "pub_core_mallocfree.h" |
njn | 2024234 | 2005-05-16 23:31:24 +0000 | [diff] [blame] | 73 | #include "pub_core_options.h" |
njn | 717cde5 | 2005-05-10 02:47:21 +0000 | [diff] [blame] | 74 | #include "pub_core_replacemalloc.h" |
njn | 278b3d6 | 2005-05-30 23:20:51 +0000 | [diff] [blame] | 75 | #include "pub_core_scheduler.h" |
njn | 0c24647 | 2005-05-31 01:00:08 +0000 | [diff] [blame] | 76 | #include "pub_core_signals.h" |
njn | 945ed2e | 2005-06-24 03:28:30 +0000 | [diff] [blame] | 77 | #include "pub_core_stacks.h" |
njn | f4c5016 | 2005-06-20 14:18:12 +0000 | [diff] [blame] | 78 | #include "pub_core_stacktrace.h" // For VG_(get_and_pp_StackTrace)() |
njn | 9abd608 | 2005-06-17 21:31:45 +0000 | [diff] [blame] | 79 | #include "pub_core_syscall.h" |
njn | c1b0181 | 2005-06-17 22:19:06 +0000 | [diff] [blame] | 80 | #include "pub_core_syswrap.h" |
njn | 43b9a8a | 2005-05-10 04:37:01 +0000 | [diff] [blame] | 81 | #include "pub_core_tooliface.h" |
njn | f4c5016 | 2005-06-20 14:18:12 +0000 | [diff] [blame] | 82 | #include "pub_core_translate.h" // For VG_(translate)() |
njn | 8bddf58 | 2005-05-13 23:40:55 +0000 | [diff] [blame] | 83 | #include "pub_core_transtab.h" |
sewardj | 4eee476 | 2006-10-14 15:51:32 +0000 | [diff] [blame^] | 84 | #include "pub_core_vkiscnums.h" |
njn | 278b3d6 | 2005-05-30 23:20:51 +0000 | [diff] [blame] | 85 | #include "priv_sema.h" |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 86 | |
sewardj | 63fed7f | 2006-01-17 02:02:47 +0000 | [diff] [blame] | 87 | /* #include "pub_core_debuginfo.h" */ // DEBUGGING HACK ONLY |
| 88 | |
| 89 | |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 90 | /* --------------------------------------------------------------------- |
| 91 | Types and globals for the scheduler. |
| 92 | ------------------------------------------------------------------ */ |
| 93 | |
njn | c7561b9 | 2005-06-19 01:24:32 +0000 | [diff] [blame] | 94 | /* ThreadId and ThreadState are defined elsewhere*/ |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 95 | |
njn | 14319cc | 2005-03-13 06:26:22 +0000 | [diff] [blame] | 96 | /* Defines the thread-scheduling timeslice, in terms of the number of |
| 97 | basic blocks we attempt to run each thread for. Smaller values |
| 98 | give finer interleaving but much increased scheduling overheads. */ |
sewardj | ea3a99f | 2006-05-07 14:37:03 +0000 | [diff] [blame] | 99 | #define SCHEDULING_QUANTUM 100000 |
njn | 14319cc | 2005-03-13 06:26:22 +0000 | [diff] [blame] | 100 | |
sewardj | 0ec07f3 | 2006-01-12 12:32:32 +0000 | [diff] [blame] | 101 | /* If False, a fault is Valgrind-internal (ie, a bug) */ |
| 102 | Bool VG_(in_generated_code) = False; |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 103 | |
njn | de583aa | 2005-05-11 18:57:02 +0000 | [diff] [blame] | 104 | /* Counts downwards in VG_(run_innerloop). */ |
| 105 | UInt VG_(dispatch_ctr); |
| 106 | |
njn | 394213a | 2005-06-19 18:38:24 +0000 | [diff] [blame] | 107 | /* 64-bit counter for the number of basic blocks done. */ |
| 108 | static ULong bbs_done = 0; |
| 109 | |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 110 | /* Forwards */ |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 111 | static void do_client_request ( ThreadId tid ); |
| 112 | static void scheduler_sanity ( ThreadId tid ); |
| 113 | static void mostly_clear_thread_record ( ThreadId tid ); |
sewardj | d140e44 | 2002-05-29 01:21:19 +0000 | [diff] [blame] | 114 | |
nethercote | 844e712 | 2004-08-02 15:27:22 +0000 | [diff] [blame] | 115 | /* Stats. */ |
njn | 0fd92f4 | 2005-10-06 03:32:42 +0000 | [diff] [blame] | 116 | static ULong n_scheduling_events_MINOR = 0; |
| 117 | static ULong n_scheduling_events_MAJOR = 0; |
nethercote | 844e712 | 2004-08-02 15:27:22 +0000 | [diff] [blame] | 118 | |
njn | 6676d5b | 2005-06-19 18:49:19 +0000 | [diff] [blame] | 119 | /* Sanity checking counts. */ |
| 120 | static UInt sanity_fast_count = 0; |
| 121 | static UInt sanity_slow_count = 0; |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 122 | |
nethercote | 844e712 | 2004-08-02 15:27:22 +0000 | [diff] [blame] | 123 | void VG_(print_scheduler_stats)(void) |
| 124 | { |
| 125 | VG_(message)(Vg_DebugMsg, |
njn | 0fd92f4 | 2005-10-06 03:32:42 +0000 | [diff] [blame] | 126 | "scheduler: %,llu jumps (bb entries).", bbs_done ); |
njn | 394213a | 2005-06-19 18:38:24 +0000 | [diff] [blame] | 127 | VG_(message)(Vg_DebugMsg, |
njn | 0fd92f4 | 2005-10-06 03:32:42 +0000 | [diff] [blame] | 128 | "scheduler: %,llu/%,llu major/minor sched events.", |
nethercote | 844e712 | 2004-08-02 15:27:22 +0000 | [diff] [blame] | 129 | n_scheduling_events_MAJOR, n_scheduling_events_MINOR); |
njn | 6676d5b | 2005-06-19 18:49:19 +0000 | [diff] [blame] | 130 | VG_(message)(Vg_DebugMsg, |
| 131 | " sanity: %d cheap, %d expensive checks.", |
| 132 | sanity_fast_count, sanity_slow_count ); |
nethercote | 844e712 | 2004-08-02 15:27:22 +0000 | [diff] [blame] | 133 | } |
| 134 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 135 | /* CPU semaphore, so that threads can run exclusively */ |
| 136 | static vg_sema_t run_sema; |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 137 | |
| 138 | |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 139 | /* --------------------------------------------------------------------- |
| 140 | Helper functions for the scheduler. |
| 141 | ------------------------------------------------------------------ */ |
| 142 | |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 143 | static |
| 144 | void print_sched_event ( ThreadId tid, Char* what ) |
| 145 | { |
sewardj | 45b4b37 | 2002-04-16 22:50:32 +0000 | [diff] [blame] | 146 | VG_(message)(Vg_DebugMsg, " SCHED[%d]: %s", tid, what ); |
sewardj | 8937c81 | 2002-04-12 20:12:20 +0000 | [diff] [blame] | 147 | } |
| 148 | |
sewardj | 8937c81 | 2002-04-12 20:12:20 +0000 | [diff] [blame] | 149 | static |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 150 | HChar* name_of_sched_event ( UInt event ) |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 151 | { |
| 152 | switch (event) { |
sewardj | a0fef1b | 2005-11-03 13:46:30 +0000 | [diff] [blame] | 153 | case VEX_TRC_JMP_SYS_SYSCALL: return "SYSCALL"; |
| 154 | case VEX_TRC_JMP_SYS_INT32: return "INT32"; |
| 155 | case VEX_TRC_JMP_SYS_INT128: return "INT128"; |
| 156 | case VEX_TRC_JMP_SYS_SYSENTER: return "SYSENTER"; |
sewardj | d79ef68 | 2004-11-26 13:25:17 +0000 | [diff] [blame] | 157 | case VEX_TRC_JMP_CLIENTREQ: return "CLIENTREQ"; |
| 158 | case VEX_TRC_JMP_YIELD: return "YIELD"; |
sewardj | 45f02c4 | 2005-02-05 18:27:14 +0000 | [diff] [blame] | 159 | case VEX_TRC_JMP_NODECODE: return "NODECODE"; |
sewardj | 1f430d3 | 2005-12-16 01:07:11 +0000 | [diff] [blame] | 160 | case VEX_TRC_JMP_MAPFAIL: return "MAPFAIL"; |
tom | 360ed5d | 2006-01-13 09:26:23 +0000 | [diff] [blame] | 161 | case VEX_TRC_JMP_NOREDIR: return "NOREDIR"; |
sewardj | 1f430d3 | 2005-12-16 01:07:11 +0000 | [diff] [blame] | 162 | case VEX_TRC_JMP_EMWARN: return "EMWARN"; |
| 163 | case VEX_TRC_JMP_TINVAL: return "TINVAL"; |
| 164 | case VG_TRC_INVARIANT_FAILED: return "INVFAILED"; |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 165 | case VG_TRC_INNER_COUNTERZERO: return "COUNTERZERO"; |
| 166 | case VG_TRC_INNER_FASTMISS: return "FASTMISS"; |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 167 | case VG_TRC_FAULT_SIGNAL: return "FAULTSIGNAL"; |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 168 | default: return "??UNKNOWN??"; |
| 169 | } |
| 170 | } |
| 171 | |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 172 | /* Allocate a completely empty ThreadState record. */ |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 173 | ThreadId VG_(alloc_ThreadState) ( void ) |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 174 | { |
| 175 | Int i; |
sewardj | 6072c36 | 2002-04-19 14:40:57 +0000 | [diff] [blame] | 176 | for (i = 1; i < VG_N_THREADS; i++) { |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 177 | if (VG_(threads)[i].status == VgTs_Empty) { |
| 178 | VG_(threads)[i].status = VgTs_Init; |
| 179 | VG_(threads)[i].exitreason = VgSrc_None; |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 180 | return i; |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 181 | } |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 182 | } |
| 183 | VG_(printf)("vg_alloc_ThreadState: no free slots available\n"); |
| 184 | VG_(printf)("Increase VG_N_THREADS, rebuild and try again.\n"); |
njn | e427a66 | 2002-10-02 11:08:25 +0000 | [diff] [blame] | 185 | VG_(core_panic)("VG_N_THREADS is too low"); |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 186 | /*NOTREACHED*/ |
| 187 | } |
| 188 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 189 | /* |
| 190 | Mark a thread as Runnable. This will block until the run_sema is |
| 191 | available, so that we get exclusive access to all the shared |
| 192 | structures and the CPU. Up until we get the sema, we must not |
| 193 | touch any shared state. |
| 194 | |
| 195 | When this returns, we'll actually be running. |
| 196 | */ |
| 197 | void VG_(set_running)(ThreadId tid) |
| 198 | { |
| 199 | ThreadState *tst = VG_(get_ThreadState)(tid); |
| 200 | |
| 201 | vg_assert(tst->status != VgTs_Runnable); |
| 202 | |
| 203 | tst->status = VgTs_Runnable; |
| 204 | |
sewardj | 7eb7c58 | 2005-06-23 01:02:53 +0000 | [diff] [blame] | 205 | ML_(sema_down)(&run_sema); |
njn | c7561b9 | 2005-06-19 01:24:32 +0000 | [diff] [blame] | 206 | if (VG_(running_tid) != VG_INVALID_THREADID) |
| 207 | VG_(printf)("tid %d found %d running\n", tid, VG_(running_tid)); |
| 208 | vg_assert(VG_(running_tid) == VG_INVALID_THREADID); |
| 209 | VG_(running_tid) = tid; |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 210 | |
tom | e0008d6 | 2005-11-10 15:02:42 +0000 | [diff] [blame] | 211 | VG_(unknown_SP_update)(VG_(get_SP(tid)), VG_(get_SP(tid))); |
| 212 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 213 | if (VG_(clo_trace_sched)) |
| 214 | print_sched_event(tid, "now running"); |
tom | deca43f | 2005-07-27 23:04:28 +0000 | [diff] [blame] | 215 | |
| 216 | // While thre modeling is disable, issue thread_run events here |
| 217 | // VG_(tm_thread_switchto)(tid); |
| 218 | VG_TRACK( thread_run, tid ); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 219 | } |
| 220 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 221 | /* |
| 222 | Set a thread into a sleeping state, and give up exclusive access to |
| 223 | the CPU. On return, the thread must be prepared to block until it |
| 224 | is ready to run again (generally this means blocking in a syscall, |
| 225 | but it may mean that we remain in a Runnable state and we're just |
| 226 | yielding the CPU to another thread). |
| 227 | */ |
| 228 | void VG_(set_sleeping)(ThreadId tid, ThreadStatus sleepstate) |
| 229 | { |
| 230 | ThreadState *tst = VG_(get_ThreadState)(tid); |
| 231 | |
| 232 | vg_assert(tst->status == VgTs_Runnable); |
| 233 | |
| 234 | vg_assert(sleepstate == VgTs_WaitSys || |
| 235 | sleepstate == VgTs_Yielding); |
| 236 | |
| 237 | tst->status = sleepstate; |
| 238 | |
njn | c7561b9 | 2005-06-19 01:24:32 +0000 | [diff] [blame] | 239 | vg_assert(VG_(running_tid) == tid); |
| 240 | VG_(running_tid) = VG_INVALID_THREADID; |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 241 | |
| 242 | /* Release the run_sema; this will reschedule any runnable |
| 243 | thread. */ |
sewardj | 7eb7c58 | 2005-06-23 01:02:53 +0000 | [diff] [blame] | 244 | ML_(sema_up)(&run_sema); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 245 | |
| 246 | if (VG_(clo_trace_sched)) { |
| 247 | Char buf[50]; |
sewardj | a8d8e23 | 2005-06-07 20:04:56 +0000 | [diff] [blame] | 248 | VG_(sprintf)(buf, "now sleeping in state %s", |
njn | c7561b9 | 2005-06-19 01:24:32 +0000 | [diff] [blame] | 249 | VG_(name_of_ThreadStatus)(sleepstate)); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 250 | print_sched_event(tid, buf); |
nethercote | 75d2624 | 2004-08-01 22:59:18 +0000 | [diff] [blame] | 251 | } |
| 252 | } |
| 253 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 254 | /* Clear out the ThreadState and release the semaphore. Leaves the |
| 255 | ThreadState in VgTs_Zombie state, so that it doesn't get |
| 256 | reallocated until the caller is really ready. */ |
| 257 | void VG_(exit_thread)(ThreadId tid) |
| 258 | { |
| 259 | vg_assert(VG_(is_valid_tid)(tid)); |
| 260 | vg_assert(VG_(is_running_thread)(tid)); |
| 261 | vg_assert(VG_(is_exiting)(tid)); |
| 262 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 263 | mostly_clear_thread_record(tid); |
njn | c7561b9 | 2005-06-19 01:24:32 +0000 | [diff] [blame] | 264 | VG_(running_tid) = VG_INVALID_THREADID; |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 265 | |
| 266 | /* There should still be a valid exitreason for this thread */ |
| 267 | vg_assert(VG_(threads)[tid].exitreason != VgSrc_None); |
| 268 | |
sewardj | 7eb7c58 | 2005-06-23 01:02:53 +0000 | [diff] [blame] | 269 | ML_(sema_up)(&run_sema); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 270 | } |
| 271 | |
| 272 | /* Kill a thread. This interrupts whatever a thread is doing, and |
| 273 | makes it exit ASAP. This does not set the exitreason or |
| 274 | exitcode. */ |
| 275 | void VG_(kill_thread)(ThreadId tid) |
| 276 | { |
| 277 | vg_assert(VG_(is_valid_tid)(tid)); |
| 278 | vg_assert(!VG_(is_running_thread)(tid)); |
| 279 | vg_assert(VG_(is_exiting)(tid)); |
| 280 | |
| 281 | if (VG_(threads)[tid].status == VgTs_WaitSys) { |
| 282 | if (VG_(clo_trace_signals)) |
| 283 | VG_(message)(Vg_DebugMsg, "kill_thread zaps tid %d lwp %d", |
| 284 | tid, VG_(threads)[tid].os_state.lwpid); |
njn | 351d006 | 2005-06-21 22:23:59 +0000 | [diff] [blame] | 285 | VG_(tkill)(VG_(threads)[tid].os_state.lwpid, VG_SIGVGKILL); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 286 | } |
| 287 | } |
| 288 | |
| 289 | /* |
| 290 | Yield the CPU for a short time to let some other thread run. |
| 291 | */ |
| 292 | void VG_(vg_yield)(void) |
| 293 | { |
| 294 | struct vki_timespec ts = { 0, 1 }; |
njn | c7561b9 | 2005-06-19 01:24:32 +0000 | [diff] [blame] | 295 | ThreadId tid = VG_(running_tid); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 296 | |
| 297 | vg_assert(tid != VG_INVALID_THREADID); |
| 298 | vg_assert(VG_(threads)[tid].os_state.lwpid == VG_(gettid)()); |
| 299 | |
| 300 | VG_(set_sleeping)(tid, VgTs_Yielding); |
| 301 | |
| 302 | //VG_(printf)("tid %d yielding EIP=%p\n", tid, VG_(threads)[tid].arch.m_eip); |
| 303 | |
| 304 | /* |
| 305 | Tell the kernel we're yielding. |
| 306 | */ |
| 307 | if (1) |
| 308 | VG_(do_syscall0)(__NR_sched_yield); |
| 309 | else |
| 310 | VG_(nanosleep)(&ts); |
| 311 | |
| 312 | VG_(set_running)(tid); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 313 | } |
| 314 | |
| 315 | |
sewardj | 0ec07f3 | 2006-01-12 12:32:32 +0000 | [diff] [blame] | 316 | /* Set the standard set of blocked signals, used whenever we're not |
njn | 9fc3112 | 2005-05-11 18:48:33 +0000 | [diff] [blame] | 317 | running a client syscall. */ |
| 318 | static void block_signals(ThreadId tid) |
| 319 | { |
| 320 | vki_sigset_t mask; |
| 321 | |
| 322 | VG_(sigfillset)(&mask); |
| 323 | |
| 324 | /* Don't block these because they're synchronous */ |
| 325 | VG_(sigdelset)(&mask, VKI_SIGSEGV); |
| 326 | VG_(sigdelset)(&mask, VKI_SIGBUS); |
| 327 | VG_(sigdelset)(&mask, VKI_SIGFPE); |
| 328 | VG_(sigdelset)(&mask, VKI_SIGILL); |
| 329 | VG_(sigdelset)(&mask, VKI_SIGTRAP); |
| 330 | |
| 331 | /* Can't block these anyway */ |
| 332 | VG_(sigdelset)(&mask, VKI_SIGSTOP); |
| 333 | VG_(sigdelset)(&mask, VKI_SIGKILL); |
| 334 | |
njn | 9fc3112 | 2005-05-11 18:48:33 +0000 | [diff] [blame] | 335 | VG_(sigprocmask)(VKI_SIG_SETMASK, &mask, NULL); |
| 336 | } |
| 337 | |
njn | 8aa3585 | 2005-06-10 22:59:56 +0000 | [diff] [blame] | 338 | static void os_state_clear(ThreadState *tst) |
| 339 | { |
sewardj | 45f4e7c | 2005-09-27 19:20:21 +0000 | [diff] [blame] | 340 | tst->os_state.lwpid = 0; |
njn | 8aa3585 | 2005-06-10 22:59:56 +0000 | [diff] [blame] | 341 | tst->os_state.threadgroup = 0; |
| 342 | } |
| 343 | |
| 344 | static void os_state_init(ThreadState *tst) |
| 345 | { |
sewardj | 45f4e7c | 2005-09-27 19:20:21 +0000 | [diff] [blame] | 346 | tst->os_state.valgrind_stack_base = 0; |
| 347 | tst->os_state.valgrind_stack_init_SP = 0; |
njn | 8aa3585 | 2005-06-10 22:59:56 +0000 | [diff] [blame] | 348 | os_state_clear(tst); |
| 349 | } |
| 350 | |
sewardj | 20917d8 | 2002-05-28 01:36:45 +0000 | [diff] [blame] | 351 | static |
| 352 | void mostly_clear_thread_record ( ThreadId tid ) |
| 353 | { |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 354 | vki_sigset_t savedmask; |
| 355 | |
sewardj | 20917d8 | 2002-05-28 01:36:45 +0000 | [diff] [blame] | 356 | vg_assert(tid >= 0 && tid < VG_N_THREADS); |
njn | af839f5 | 2005-06-23 03:27:57 +0000 | [diff] [blame] | 357 | VG_(cleanup_thread)(&VG_(threads)[tid].arch); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 358 | VG_(threads)[tid].tid = tid; |
| 359 | |
| 360 | /* Leave the thread in Zombie, so that it doesn't get reallocated |
| 361 | until the caller is finally done with the thread stack. */ |
| 362 | VG_(threads)[tid].status = VgTs_Zombie; |
| 363 | |
nethercote | 73b526f | 2004-10-31 18:48:21 +0000 | [diff] [blame] | 364 | VG_(sigemptyset)(&VG_(threads)[tid].sig_mask); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 365 | VG_(sigemptyset)(&VG_(threads)[tid].tmp_sig_mask); |
jsgf | 855d93d | 2003-10-13 22:26:55 +0000 | [diff] [blame] | 366 | |
njn | 8aa3585 | 2005-06-10 22:59:56 +0000 | [diff] [blame] | 367 | os_state_clear(&VG_(threads)[tid]); |
fitzhardinge | 2842859 | 2004-03-16 22:07:12 +0000 | [diff] [blame] | 368 | |
| 369 | /* start with no altstack */ |
| 370 | VG_(threads)[tid].altstack.ss_sp = (void *)0xdeadbeef; |
| 371 | VG_(threads)[tid].altstack.ss_size = 0; |
| 372 | VG_(threads)[tid].altstack.ss_flags = VKI_SS_DISABLE; |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 373 | |
njn | 444eba1 | 2005-05-12 03:47:31 +0000 | [diff] [blame] | 374 | VG_(clear_out_queued_signals)(tid, &savedmask); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 375 | |
| 376 | VG_(threads)[tid].sched_jmpbuf_valid = False; |
sewardj | 20917d8 | 2002-05-28 01:36:45 +0000 | [diff] [blame] | 377 | } |
| 378 | |
njn | 3f8c437 | 2005-03-13 04:43:10 +0000 | [diff] [blame] | 379 | /* |
sewardj | 0ec07f3 | 2006-01-12 12:32:32 +0000 | [diff] [blame] | 380 | Called in the child after fork. If the parent has multiple |
| 381 | threads, then we've inherited a VG_(threads) array describing them, |
| 382 | but only the thread which called fork() is actually alive in the |
| 383 | child. This functions needs to clean up all those other thread |
| 384 | structures. |
njn | 3f8c437 | 2005-03-13 04:43:10 +0000 | [diff] [blame] | 385 | |
| 386 | Whichever tid in the parent which called fork() becomes the |
| 387 | master_tid in the child. That's because the only living slot in |
| 388 | VG_(threads) in the child after fork is VG_(threads)[tid], and it |
| 389 | would be too hard to try to re-number the thread and relocate the |
| 390 | thread state down to VG_(threads)[1]. |
| 391 | |
| 392 | This function also needs to reinitialize the run_sema, since |
| 393 | otherwise we may end up sharing its state with the parent, which |
| 394 | would be deeply confusing. |
| 395 | */ |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 396 | static void sched_fork_cleanup(ThreadId me) |
| 397 | { |
| 398 | ThreadId tid; |
njn | c7561b9 | 2005-06-19 01:24:32 +0000 | [diff] [blame] | 399 | vg_assert(VG_(running_tid) == me); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 400 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 401 | VG_(threads)[me].os_state.lwpid = VG_(gettid)(); |
| 402 | VG_(threads)[me].os_state.threadgroup = VG_(getpid)(); |
| 403 | |
| 404 | /* clear out all the unused thread slots */ |
| 405 | for (tid = 1; tid < VG_N_THREADS; tid++) { |
njn | 3f8c437 | 2005-03-13 04:43:10 +0000 | [diff] [blame] | 406 | if (tid != me) { |
| 407 | mostly_clear_thread_record(tid); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 408 | VG_(threads)[tid].status = VgTs_Empty; |
sewardj | a8d8e23 | 2005-06-07 20:04:56 +0000 | [diff] [blame] | 409 | VG_(clear_syscallInfo)(tid); |
njn | 3f8c437 | 2005-03-13 04:43:10 +0000 | [diff] [blame] | 410 | } |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 411 | } |
| 412 | |
| 413 | /* re-init and take the sema */ |
sewardj | 7eb7c58 | 2005-06-23 01:02:53 +0000 | [diff] [blame] | 414 | ML_(sema_deinit)(&run_sema); |
| 415 | ML_(sema_init)(&run_sema); |
| 416 | ML_(sema_down)(&run_sema); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 417 | } |
sewardj | 20917d8 | 2002-05-28 01:36:45 +0000 | [diff] [blame] | 418 | |
jsgf | 855d93d | 2003-10-13 22:26:55 +0000 | [diff] [blame] | 419 | |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 420 | /* Initialise the scheduler. Create a single "main" thread ready to |
sewardj | 2a99cf6 | 2004-11-24 10:44:19 +0000 | [diff] [blame] | 421 | run, with special ThreadId of one. This is called at startup. The |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 422 | caller subsequently initialises the guest state components of this |
| 423 | main thread, thread 1. |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 424 | */ |
sewardj | 45f4e7c | 2005-09-27 19:20:21 +0000 | [diff] [blame] | 425 | void VG_(scheduler_init) ( Addr clstack_end, SizeT clstack_size ) |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 426 | { |
thughes | c37184f | 2004-09-11 14:16:57 +0000 | [diff] [blame] | 427 | Int i; |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 428 | ThreadId tid_main; |
| 429 | |
sewardj | 45f4e7c | 2005-09-27 19:20:21 +0000 | [diff] [blame] | 430 | vg_assert(VG_IS_PAGE_ALIGNED(clstack_end+1)); |
| 431 | vg_assert(VG_IS_PAGE_ALIGNED(clstack_size)); |
| 432 | |
sewardj | 7eb7c58 | 2005-06-23 01:02:53 +0000 | [diff] [blame] | 433 | ML_(sema_init)(&run_sema); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 434 | |
sewardj | 6072c36 | 2002-04-19 14:40:57 +0000 | [diff] [blame] | 435 | for (i = 0 /* NB; not 1 */; i < VG_N_THREADS; i++) { |
sewardj | c793fd3 | 2005-05-31 17:24:49 +0000 | [diff] [blame] | 436 | |
| 437 | /* Paranoia .. completely zero it out. */ |
| 438 | VG_(memset)( & VG_(threads)[i], 0, sizeof( VG_(threads)[i] ) ); |
| 439 | |
| 440 | VG_(threads)[i].sig_queue = NULL; |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 441 | |
njn | 8aa3585 | 2005-06-10 22:59:56 +0000 | [diff] [blame] | 442 | os_state_init(&VG_(threads)[i]); |
sewardj | 20917d8 | 2002-05-28 01:36:45 +0000 | [diff] [blame] | 443 | mostly_clear_thread_record(i); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 444 | |
njn | 50ba34e | 2005-04-04 02:41:42 +0000 | [diff] [blame] | 445 | VG_(threads)[i].status = VgTs_Empty; |
| 446 | VG_(threads)[i].client_stack_szB = 0; |
| 447 | VG_(threads)[i].client_stack_highest_word = (Addr)NULL; |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 448 | } |
| 449 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 450 | tid_main = VG_(alloc_ThreadState)(); |
sewardj | a4068de | 2006-04-05 23:06:31 +0000 | [diff] [blame] | 451 | vg_assert(tid_main == 1); |
sewardj | 5f07b66 | 2002-04-23 16:52:51 +0000 | [diff] [blame] | 452 | |
njn | 50ba34e | 2005-04-04 02:41:42 +0000 | [diff] [blame] | 453 | VG_(threads)[tid_main].client_stack_highest_word |
sewardj | 45f4e7c | 2005-09-27 19:20:21 +0000 | [diff] [blame] | 454 | = clstack_end + 1 - sizeof(UWord); |
| 455 | VG_(threads)[tid_main].client_stack_szB |
| 456 | = clstack_size; |
sewardj | bf290b9 | 2002-05-01 02:28:01 +0000 | [diff] [blame] | 457 | |
njn | 310ed28 | 2005-06-26 15:11:37 +0000 | [diff] [blame] | 458 | VG_(atfork_child)(sched_fork_cleanup); |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 459 | } |
| 460 | |
| 461 | |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 462 | /* --------------------------------------------------------------------- |
sewardj | 0ec07f3 | 2006-01-12 12:32:32 +0000 | [diff] [blame] | 463 | Helpers for running translations. |
| 464 | ------------------------------------------------------------------ */ |
| 465 | |
| 466 | /* Use gcc's built-in setjmp/longjmp. longjmp must not restore signal |
| 467 | mask state, but does need to pass "val" through. */ |
| 468 | #define SCHEDSETJMP(tid, jumped, stmt) \ |
| 469 | do { \ |
| 470 | ThreadState * volatile _qq_tst = VG_(get_ThreadState)(tid); \ |
| 471 | \ |
| 472 | (jumped) = __builtin_setjmp(_qq_tst->sched_jmpbuf); \ |
| 473 | if ((jumped) == 0) { \ |
| 474 | vg_assert(!_qq_tst->sched_jmpbuf_valid); \ |
| 475 | _qq_tst->sched_jmpbuf_valid = True; \ |
| 476 | stmt; \ |
| 477 | } else if (VG_(clo_trace_sched)) \ |
| 478 | VG_(printf)("SCHEDSETJMP(line %d) tid %d, jumped=%d\n", \ |
| 479 | __LINE__, tid, jumped); \ |
| 480 | vg_assert(_qq_tst->sched_jmpbuf_valid); \ |
| 481 | _qq_tst->sched_jmpbuf_valid = False; \ |
| 482 | } while(0) |
| 483 | |
| 484 | |
| 485 | /* Do various guest state alignment checks prior to running a thread. |
| 486 | Specifically, check that what we have matches Vex's guest state |
| 487 | layout requirements. */ |
sewardj | 6b0d5b3 | 2006-01-22 01:10:12 +0000 | [diff] [blame] | 488 | static void do_pre_run_checks ( volatile ThreadState* tst ) |
sewardj | 0ec07f3 | 2006-01-12 12:32:32 +0000 | [diff] [blame] | 489 | { |
| 490 | Addr a_vex = (Addr) & tst->arch.vex; |
| 491 | Addr a_vexsh = (Addr) & tst->arch.vex_shadow; |
| 492 | Addr a_spill = (Addr) & tst->arch.vex_spill; |
| 493 | UInt sz_vex = (UInt) sizeof tst->arch.vex; |
| 494 | UInt sz_vexsh = (UInt) sizeof tst->arch.vex_shadow; |
| 495 | UInt sz_spill = (UInt) sizeof tst->arch.vex_spill; |
| 496 | |
| 497 | if (0) |
| 498 | VG_(printf)("%p %d %p %d %p %d\n", |
| 499 | (void*)a_vex, sz_vex, (void*)a_vexsh, sz_vexsh, |
| 500 | (void*)a_spill, sz_spill ); |
| 501 | |
| 502 | vg_assert(VG_IS_8_ALIGNED(sz_vex)); |
| 503 | vg_assert(VG_IS_8_ALIGNED(sz_vexsh)); |
| 504 | vg_assert(VG_IS_16_ALIGNED(sz_spill)); |
| 505 | |
| 506 | vg_assert(VG_IS_4_ALIGNED(a_vex)); |
| 507 | vg_assert(VG_IS_4_ALIGNED(a_vexsh)); |
| 508 | vg_assert(VG_IS_4_ALIGNED(a_spill)); |
| 509 | |
| 510 | vg_assert(sz_vex == sz_vexsh); |
| 511 | vg_assert(a_vex + sz_vex == a_vexsh); |
| 512 | |
| 513 | vg_assert(sz_spill == LibVEX_N_SPILL_BYTES); |
| 514 | vg_assert(a_vex + 2 * sz_vex == a_spill); |
| 515 | |
| 516 | # if defined(VGA_ppc32) || defined(VGA_ppc64) |
| 517 | /* ppc guest_state vector regs must be 16 byte aligned for |
| 518 | loads/stores */ |
| 519 | vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex.guest_VR0)); |
| 520 | vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow.guest_VR0)); |
| 521 | # endif |
| 522 | } |
| 523 | |
| 524 | |
| 525 | /* Run the thread tid for a while, and return a VG_TRC_* value |
| 526 | indicating why VG_(run_innerloop) stopped. */ |
| 527 | static UInt run_thread_for_a_while ( ThreadId tid ) |
| 528 | { |
| 529 | volatile Int jumped; |
sewardj | 1a85f4f | 2006-01-12 21:15:35 +0000 | [diff] [blame] | 530 | volatile ThreadState* tst = NULL; /* stop gcc complaining */ |
sewardj | 0ec07f3 | 2006-01-12 12:32:32 +0000 | [diff] [blame] | 531 | volatile UInt trc; |
| 532 | volatile Int dispatch_ctr_SAVED; |
| 533 | volatile Int done_this_time; |
| 534 | |
| 535 | /* Paranoia */ |
| 536 | vg_assert(VG_(is_valid_tid)(tid)); |
| 537 | vg_assert(VG_(is_running_thread)(tid)); |
| 538 | vg_assert(!VG_(is_exiting)(tid)); |
| 539 | |
| 540 | tst = VG_(get_ThreadState)(tid); |
| 541 | do_pre_run_checks(tst); |
| 542 | /* end Paranoia */ |
| 543 | |
sewardj | 63fed7f | 2006-01-17 02:02:47 +0000 | [diff] [blame] | 544 | //if (0) { |
| 545 | // Char buf[100]; |
| 546 | // Bool ok = VG_(get_fnname_if_entry) ( tst->arch.vex.guest_CIA, |
| 547 | // buf, 100 ); |
| 548 | // if (ok) { |
| 549 | // Addr r2actual = tst->arch.vex.guest_GPR2; |
| 550 | // Addr r2tocptr = VG_(get_tocptr)( tst->arch.vex.guest_CIA ); |
| 551 | // if (1) VG_(printf)("R2 act 0x%016llx toc 0x%016llx %s\n", |
| 552 | // r2actual, r2tocptr, buf); |
| 553 | // if (r2tocptr != 0) vg_assert(r2actual == r2tocptr); |
| 554 | // } |
| 555 | //} |
| 556 | |
sewardj | 0ec07f3 | 2006-01-12 12:32:32 +0000 | [diff] [blame] | 557 | trc = 0; |
| 558 | dispatch_ctr_SAVED = VG_(dispatch_ctr); |
| 559 | |
| 560 | # if defined(VGA_ppc32) || defined(VGA_ppc64) |
| 561 | /* This is necessary due to the hacky way vex models reservations |
| 562 | on ppc. It's really quite incorrect for each thread to have its |
| 563 | own reservation flag/address, since it's really something that |
| 564 | all threads share (that's the whole point). But having shared |
| 565 | guest state is something we can't model with Vex. However, as |
| 566 | per PaulM's 2.4.0ppc, the reservation is modelled using a |
| 567 | reservation flag which is cleared at each context switch. So it |
| 568 | is indeed possible to get away with a per thread-reservation if |
| 569 | the thread's reservation is cleared before running it. |
| 570 | */ |
| 571 | /* Clear any existing reservation that this thread might have made |
| 572 | last time it was running. */ |
| 573 | VG_(threads)[tid].arch.vex.guest_RESVN = 0; |
| 574 | # endif |
| 575 | |
| 576 | /* there should be no undealt-with signals */ |
| 577 | //vg_assert(VG_(threads)[tid].siginfo.si_signo == 0); |
| 578 | |
| 579 | vg_assert(VG_(in_generated_code) == False); |
| 580 | VG_(in_generated_code) = True; |
| 581 | |
| 582 | SCHEDSETJMP( |
| 583 | tid, |
| 584 | jumped, |
| 585 | trc = (UInt)VG_(run_innerloop)( (void*)&tst->arch.vex, |
| 586 | VG_(clo_profile_flags) > 0 ? 1 : 0 ) |
| 587 | ); |
| 588 | |
| 589 | VG_(in_generated_code) = False; |
| 590 | |
| 591 | if (jumped) { |
| 592 | /* We get here if the client took a fault that caused our signal |
| 593 | handler to longjmp. */ |
| 594 | vg_assert(trc == 0); |
| 595 | trc = VG_TRC_FAULT_SIGNAL; |
| 596 | block_signals(tid); |
| 597 | } |
| 598 | |
| 599 | done_this_time = (Int)dispatch_ctr_SAVED - (Int)VG_(dispatch_ctr) - 0; |
| 600 | |
| 601 | vg_assert(done_this_time >= 0); |
| 602 | bbs_done += (ULong)done_this_time; |
| 603 | |
| 604 | return trc; |
| 605 | } |
| 606 | |
| 607 | |
| 608 | /* Run a no-redir translation just once, and return the resulting |
| 609 | VG_TRC_* value. */ |
| 610 | static UInt run_noredir_translation ( Addr hcode, ThreadId tid ) |
| 611 | { |
| 612 | volatile Int jumped; |
| 613 | volatile ThreadState* tst; |
| 614 | volatile UWord argblock[4]; |
| 615 | |
| 616 | /* Paranoia */ |
| 617 | vg_assert(VG_(is_valid_tid)(tid)); |
| 618 | vg_assert(VG_(is_running_thread)(tid)); |
| 619 | vg_assert(!VG_(is_exiting)(tid)); |
| 620 | |
| 621 | tst = VG_(get_ThreadState)(tid); |
| 622 | do_pre_run_checks(tst); |
| 623 | /* end Paranoia */ |
| 624 | |
| 625 | # if defined(VGA_ppc32) || defined(VGA_ppc64) |
| 626 | /* I don't think we need to clear this thread's guest_RESVN here, |
| 627 | because we can only get here if run_thread_for_a_while() has |
| 628 | been used immediately before, on this same thread. */ |
| 629 | # endif |
| 630 | |
sewardj | 74d6e0e | 2006-01-13 13:04:03 +0000 | [diff] [blame] | 631 | /* There can be 3 outcomes from VG_(run_a_noredir_translation): |
| 632 | |
| 633 | - a signal occurred and the sighandler longjmp'd. Then both [2] |
| 634 | and [3] are unchanged - hence zero. |
| 635 | |
| 636 | - translation ran normally, set [2] (next guest IP) and set [3] |
| 637 | to whatever [1] was beforehand, indicating a normal (boring) |
| 638 | jump to the next block. |
| 639 | |
| 640 | - translation ran normally, set [2] (next guest IP) and set [3] |
| 641 | to something different from [1] beforehand, which indicates a |
| 642 | TRC_ value. |
| 643 | */ |
sewardj | 0ec07f3 | 2006-01-12 12:32:32 +0000 | [diff] [blame] | 644 | argblock[0] = (UWord)hcode; |
| 645 | argblock[1] = (UWord)&VG_(threads)[tid].arch.vex; |
sewardj | 74d6e0e | 2006-01-13 13:04:03 +0000 | [diff] [blame] | 646 | argblock[2] = 0; /* next guest IP is written here */ |
| 647 | argblock[3] = 0; /* guest state ptr afterwards is written here */ |
sewardj | 0ec07f3 | 2006-01-12 12:32:32 +0000 | [diff] [blame] | 648 | |
| 649 | vg_assert(VG_(in_generated_code) == False); |
| 650 | VG_(in_generated_code) = True; |
| 651 | |
| 652 | SCHEDSETJMP( |
| 653 | tid, |
| 654 | jumped, |
| 655 | VG_(run_a_noredir_translation)( &argblock[0] ) |
| 656 | ); |
| 657 | |
| 658 | VG_(in_generated_code) = False; |
| 659 | |
| 660 | if (jumped) { |
| 661 | /* We get here if the client took a fault that caused our signal |
| 662 | handler to longjmp. */ |
sewardj | 74d6e0e | 2006-01-13 13:04:03 +0000 | [diff] [blame] | 663 | vg_assert(argblock[2] == 0); /* next guest IP was not written */ |
| 664 | vg_assert(argblock[3] == 0); /* trc was not written */ |
sewardj | 0ec07f3 | 2006-01-12 12:32:32 +0000 | [diff] [blame] | 665 | block_signals(tid); |
| 666 | return VG_TRC_FAULT_SIGNAL; |
| 667 | } else { |
| 668 | /* store away the guest program counter */ |
| 669 | VG_(set_IP)( tid, argblock[2] ); |
| 670 | if (argblock[3] == argblock[1]) |
| 671 | /* the guest state pointer afterwards was unchanged */ |
| 672 | return VG_TRC_BORING; |
| 673 | else |
| 674 | return (UInt)argblock[3]; |
| 675 | } |
| 676 | } |
| 677 | |
sewardj | 0ec07f3 | 2006-01-12 12:32:32 +0000 | [diff] [blame] | 678 | |
| 679 | /* --------------------------------------------------------------------- |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 680 | The scheduler proper. |
| 681 | ------------------------------------------------------------------ */ |
| 682 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 683 | static void handle_tt_miss ( ThreadId tid ) |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 684 | { |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 685 | Bool found; |
njn | f536bbb | 2005-06-13 04:21:38 +0000 | [diff] [blame] | 686 | Addr ip = VG_(get_IP)(tid); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 687 | |
| 688 | /* Trivial event. Miss in the fast-cache. Do a full |
| 689 | lookup for it. */ |
njn | f536bbb | 2005-06-13 04:21:38 +0000 | [diff] [blame] | 690 | found = VG_(search_transtab)( NULL, ip, True/*upd_fast_cache*/ ); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 691 | if (!found) { |
| 692 | /* Not found; we need to request a translation. */ |
sewardj | 0ec07f3 | 2006-01-12 12:32:32 +0000 | [diff] [blame] | 693 | if (VG_(translate)( tid, ip, /*debug*/False, 0/*not verbose*/, |
| 694 | bbs_done, True/*allow redirection*/ )) { |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 695 | found = VG_(search_transtab)( NULL, ip, True ); |
njn | 50ae1a7 | 2005-04-08 23:28:23 +0000 | [diff] [blame] | 696 | vg_assert2(found, "VG_TRC_INNER_FASTMISS: missing tt_fast entry"); |
| 697 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 698 | } else { |
| 699 | // If VG_(translate)() fails, it's because it had to throw a |
| 700 | // signal because the client jumped to a bad address. That |
| 701 | // means that either a signal has been set up for delivery, |
| 702 | // or the thread has been marked for termination. Either |
| 703 | // way, we just need to go back into the scheduler loop. |
| 704 | } |
| 705 | } |
| 706 | } |
| 707 | |
| 708 | static void handle_syscall(ThreadId tid) |
| 709 | { |
| 710 | ThreadState *tst = VG_(get_ThreadState)(tid); |
| 711 | Bool jumped; |
| 712 | |
| 713 | /* Syscall may or may not block; either way, it will be |
| 714 | complete by the time this call returns, and we'll be |
| 715 | runnable again. We could take a signal while the |
| 716 | syscall runs. */ |
sewardj | 45f4e7c | 2005-09-27 19:20:21 +0000 | [diff] [blame] | 717 | |
| 718 | if (VG_(clo_sanity_level >= 3)) |
| 719 | VG_(am_do_sync_check)("(BEFORE SYSCALL)",__FILE__,__LINE__); |
| 720 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 721 | SCHEDSETJMP(tid, jumped, VG_(client_syscall)(tid)); |
| 722 | |
sewardj | 45f4e7c | 2005-09-27 19:20:21 +0000 | [diff] [blame] | 723 | if (VG_(clo_sanity_level >= 3)) |
| 724 | VG_(am_do_sync_check)("(AFTER SYSCALL)",__FILE__,__LINE__); |
| 725 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 726 | if (!VG_(is_running_thread)(tid)) |
njn | c7561b9 | 2005-06-19 01:24:32 +0000 | [diff] [blame] | 727 | VG_(printf)("tid %d not running; VG_(running_tid)=%d, tid %d status %d\n", |
| 728 | tid, VG_(running_tid), tid, tst->status); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 729 | vg_assert(VG_(is_running_thread)(tid)); |
| 730 | |
| 731 | if (jumped) { |
njn | 9fc3112 | 2005-05-11 18:48:33 +0000 | [diff] [blame] | 732 | block_signals(tid); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 733 | VG_(poll_signals)(tid); |
| 734 | } |
| 735 | } |
| 736 | |
sewardj | a591a05 | 2006-01-12 14:04:46 +0000 | [diff] [blame] | 737 | /* tid just requested a jump to the noredir version of its current |
| 738 | program counter. So make up that translation if needed, run it, |
| 739 | and return the resulting thread return code. */ |
| 740 | static UInt/*trc*/ handle_noredir_jump ( ThreadId tid ) |
| 741 | { |
| 742 | AddrH hcode = 0; |
| 743 | Addr ip = VG_(get_IP)(tid); |
| 744 | |
| 745 | Bool found = VG_(search_unredir_transtab)( &hcode, ip ); |
| 746 | if (!found) { |
| 747 | /* Not found; we need to request a translation. */ |
| 748 | if (VG_(translate)( tid, ip, /*debug*/False, 0/*not verbose*/, bbs_done, |
| 749 | False/*NO REDIRECTION*/ )) { |
| 750 | |
| 751 | found = VG_(search_unredir_transtab)( &hcode, ip ); |
| 752 | vg_assert2(found, "unredir translation missing after creation?!"); |
| 753 | |
| 754 | } else { |
| 755 | // If VG_(translate)() fails, it's because it had to throw a |
| 756 | // signal because the client jumped to a bad address. That |
| 757 | // means that either a signal has been set up for delivery, |
| 758 | // or the thread has been marked for termination. Either |
| 759 | // way, we just need to go back into the scheduler loop. |
| 760 | return VG_TRC_BORING; |
| 761 | } |
| 762 | |
| 763 | } |
| 764 | |
| 765 | vg_assert(found); |
| 766 | vg_assert(hcode != 0); |
| 767 | |
| 768 | /* Otherwise run it and return the resulting VG_TRC_* value. */ |
| 769 | return run_noredir_translation( hcode, tid ); |
| 770 | } |
| 771 | |
| 772 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 773 | /* |
| 774 | Run a thread until it wants to exit. |
| 775 | |
| 776 | We assume that the caller has already called VG_(set_running) for |
| 777 | us, so we own the VCPU. Also, all signals are blocked. |
| 778 | */ |
| 779 | VgSchedReturnCode VG_(scheduler) ( ThreadId tid ) |
| 780 | { |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 781 | UInt trc; |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 782 | ThreadState *tst = VG_(get_ThreadState)(tid); |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 783 | |
sewardj | c24be7a | 2005-03-15 01:40:12 +0000 | [diff] [blame] | 784 | if (VG_(clo_trace_sched)) |
| 785 | print_sched_event(tid, "entering VG_(scheduler)"); |
| 786 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 787 | /* set the proper running signal mask */ |
njn | 9fc3112 | 2005-05-11 18:48:33 +0000 | [diff] [blame] | 788 | block_signals(tid); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 789 | |
| 790 | vg_assert(VG_(is_running_thread)(tid)); |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 791 | |
njn | 14319cc | 2005-03-13 06:26:22 +0000 | [diff] [blame] | 792 | VG_(dispatch_ctr) = SCHEDULING_QUANTUM + 1; |
sewardj | 6072c36 | 2002-04-19 14:40:57 +0000 | [diff] [blame] | 793 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 794 | while(!VG_(is_exiting)(tid)) { |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 795 | if (VG_(dispatch_ctr) == 1) { |
| 796 | /* Our slice is done, so yield the CPU to another thread. This |
| 797 | doesn't sleep between sleeping and running, since that would |
| 798 | take too much time. */ |
| 799 | VG_(set_sleeping)(tid, VgTs_Yielding); |
| 800 | /* nothing */ |
| 801 | VG_(set_running)(tid); |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 802 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 803 | /* OK, do some relatively expensive housekeeping stuff */ |
| 804 | scheduler_sanity(tid); |
| 805 | VG_(sanity_check_general)(False); |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 806 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 807 | /* Look for any pending signals for this thread, and set them up |
| 808 | for delivery */ |
| 809 | VG_(poll_signals)(tid); |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 810 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 811 | if (VG_(is_exiting)(tid)) |
| 812 | break; /* poll_signals picked up a fatal signal */ |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 813 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 814 | /* For stats purposes only. */ |
| 815 | n_scheduling_events_MAJOR++; |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 816 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 817 | /* Figure out how many bbs to ask vg_run_innerloop to do. Note |
| 818 | that it decrements the counter before testing it for zero, so |
| 819 | that if tst->dispatch_ctr is set to N you get at most N-1 |
| 820 | iterations. Also this means that tst->dispatch_ctr must |
| 821 | exceed zero before entering the innerloop. Also also, the |
| 822 | decrement is done before the bb is actually run, so you |
| 823 | always get at least one decrement even if nothing happens. */ |
njn | 14319cc | 2005-03-13 06:26:22 +0000 | [diff] [blame] | 824 | VG_(dispatch_ctr) = SCHEDULING_QUANTUM + 1; |
jsgf | 855d93d | 2003-10-13 22:26:55 +0000 | [diff] [blame] | 825 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 826 | /* paranoia ... */ |
| 827 | vg_assert(tst->tid == tid); |
| 828 | vg_assert(tst->os_state.lwpid == VG_(gettid)()); |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 829 | } |
| 830 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 831 | /* For stats purposes only. */ |
| 832 | n_scheduling_events_MINOR++; |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 833 | |
| 834 | if (0) |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 835 | VG_(message)(Vg_DebugMsg, "thread %d: running for %d bbs", |
| 836 | tid, VG_(dispatch_ctr) - 1 ); |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 837 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 838 | trc = run_thread_for_a_while ( tid ); |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 839 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 840 | if (VG_(clo_trace_sched) && VG_(clo_verbosity) > 2) { |
| 841 | Char buf[50]; |
| 842 | VG_(sprintf)(buf, "TRC: %s", name_of_sched_event(trc)); |
| 843 | print_sched_event(tid, buf); |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 844 | } |
| 845 | |
sewardj | 0ec07f3 | 2006-01-12 12:32:32 +0000 | [diff] [blame] | 846 | if (trc == VEX_TRC_JMP_NOREDIR) { |
| 847 | /* If we got a request to run a no-redir version of |
| 848 | something, do so now -- handle_noredir_jump just (creates |
| 849 | and) runs that one translation. The flip side is that the |
| 850 | noredir translation can't itself return another noredir |
| 851 | request -- that would be nonsensical. It can, however, |
| 852 | return VG_TRC_BORING, which just means keep going as |
| 853 | normal. */ |
| 854 | trc = handle_noredir_jump(tid); |
| 855 | vg_assert(trc != VEX_TRC_JMP_NOREDIR); |
| 856 | } |
| 857 | |
| 858 | switch (trc) { |
| 859 | case VG_TRC_BORING: |
| 860 | /* no special event, just keep going. */ |
| 861 | break; |
| 862 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 863 | case VG_TRC_INNER_FASTMISS: |
| 864 | vg_assert(VG_(dispatch_ctr) > 1); |
| 865 | handle_tt_miss(tid); |
| 866 | break; |
| 867 | |
| 868 | case VEX_TRC_JMP_CLIENTREQ: |
| 869 | do_client_request(tid); |
| 870 | break; |
sewardj | a0fef1b | 2005-11-03 13:46:30 +0000 | [diff] [blame] | 871 | |
| 872 | case VEX_TRC_JMP_SYS_INT128: /* x86-linux */ |
| 873 | case VEX_TRC_JMP_SYS_SYSCALL: /* amd64-linux, ppc32-linux */ |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 874 | handle_syscall(tid); |
| 875 | if (VG_(clo_sanity_level) > 2) |
| 876 | VG_(sanity_check_general)(True); /* sanity-check every syscall */ |
| 877 | break; |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 878 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 879 | case VEX_TRC_JMP_YIELD: |
| 880 | /* Explicit yield, because this thread is in a spin-lock |
sewardj | 3fc7575 | 2005-03-12 15:16:31 +0000 | [diff] [blame] | 881 | or something. Only let the thread run for a short while |
| 882 | longer. Because swapping to another thread is expensive, |
| 883 | we're prepared to let this thread eat a little more CPU |
| 884 | before swapping to another. That means that short term |
| 885 | spins waiting for hardware to poke memory won't cause a |
| 886 | thread swap. */ |
sewardj | 3a74fb0 | 2006-03-16 11:31:29 +0000 | [diff] [blame] | 887 | if (VG_(dispatch_ctr) > 2000) |
| 888 | VG_(dispatch_ctr) = 2000; |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 889 | break; |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 890 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 891 | case VG_TRC_INNER_COUNTERZERO: |
| 892 | /* Timeslice is out. Let a new thread be scheduled. */ |
| 893 | vg_assert(VG_(dispatch_ctr) == 1); |
| 894 | break; |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 895 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 896 | case VG_TRC_FAULT_SIGNAL: |
| 897 | /* Everything should be set up (either we're exiting, or |
| 898 | about to start in a signal handler). */ |
| 899 | break; |
sewardj | 9d1b5d3 | 2002-04-17 19:40:49 +0000 | [diff] [blame] | 900 | |
sewardj | 07bdc5e | 2005-03-11 13:19:47 +0000 | [diff] [blame] | 901 | case VEX_TRC_JMP_MAPFAIL: |
| 902 | /* Failure of arch-specific address translation (x86/amd64 |
| 903 | segment override use) */ |
| 904 | /* jrs 2005 03 11: is this correct? */ |
| 905 | VG_(synth_fault)(tid); |
| 906 | break; |
| 907 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 908 | case VEX_TRC_JMP_EMWARN: { |
| 909 | static Int counts[EmWarn_NUMBER]; |
| 910 | static Bool counts_initted = False; |
| 911 | VexEmWarn ew; |
| 912 | HChar* what; |
| 913 | Bool show; |
| 914 | Int q; |
| 915 | if (!counts_initted) { |
| 916 | counts_initted = True; |
| 917 | for (q = 0; q < EmWarn_NUMBER; q++) |
| 918 | counts[q] = 0; |
| 919 | } |
| 920 | ew = (VexEmWarn)VG_(threads)[tid].arch.vex.guest_EMWARN; |
| 921 | what = (ew < 0 || ew >= EmWarn_NUMBER) |
| 922 | ? "unknown (?!)" |
| 923 | : LibVEX_EmWarn_string(ew); |
| 924 | show = (ew < 0 || ew >= EmWarn_NUMBER) |
| 925 | ? True |
| 926 | : counts[ew]++ < 3; |
sewardj | d68ac3e | 2006-01-20 14:31:57 +0000 | [diff] [blame] | 927 | if (show && VG_(clo_show_emwarns) && !VG_(clo_xml)) { |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 928 | VG_(message)( Vg_UserMsg, |
| 929 | "Emulation warning: unsupported action:"); |
| 930 | VG_(message)( Vg_UserMsg, " %s", what); |
njn | d01fef7 | 2005-03-25 23:35:48 +0000 | [diff] [blame] | 931 | VG_(get_and_pp_StackTrace)( tid, VG_(clo_backtrace_size) ); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 932 | } |
| 933 | break; |
| 934 | } |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 935 | |
sewardj | d68ac3e | 2006-01-20 14:31:57 +0000 | [diff] [blame] | 936 | case VEX_TRC_JMP_EMFAIL: { |
| 937 | VexEmWarn ew; |
| 938 | HChar* what; |
| 939 | ew = (VexEmWarn)VG_(threads)[tid].arch.vex.guest_EMWARN; |
| 940 | what = (ew < 0 || ew >= EmWarn_NUMBER) |
| 941 | ? "unknown (?!)" |
| 942 | : LibVEX_EmWarn_string(ew); |
| 943 | VG_(message)( Vg_UserMsg, |
| 944 | "Emulation fatal error -- Valgrind cannot continue:"); |
| 945 | VG_(message)( Vg_UserMsg, " %s", what); |
| 946 | VG_(get_and_pp_StackTrace)( tid, VG_(clo_backtrace_size) ); |
| 947 | VG_(message)(Vg_UserMsg, ""); |
| 948 | VG_(message)(Vg_UserMsg, "Valgrind has to exit now. Sorry."); |
| 949 | VG_(message)(Vg_UserMsg, ""); |
| 950 | VG_(exit)(1); |
| 951 | break; |
| 952 | } |
| 953 | |
sewardj | 86df155 | 2006-02-07 20:56:41 +0000 | [diff] [blame] | 954 | case VEX_TRC_JMP_TRAP: |
| 955 | VG_(synth_sigtrap)(tid); |
| 956 | break; |
| 957 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 958 | case VEX_TRC_JMP_NODECODE: |
njn | ec4d513 | 2006-03-21 23:15:43 +0000 | [diff] [blame] | 959 | VG_(message)(Vg_UserMsg, |
| 960 | "valgrind: Unrecognised instruction at address %p.", VG_(get_IP)(tid)); |
njn | d502136 | 2005-09-29 00:35:18 +0000 | [diff] [blame] | 961 | #define M(a) VG_(message)(Vg_UserMsg, a); |
njn | 7cf6658 | 2005-10-15 17:18:08 +0000 | [diff] [blame] | 962 | M("Your program just tried to execute an instruction that Valgrind" ); |
| 963 | M("did not recognise. There are two possible reasons for this." ); |
| 964 | M("1. Your program has a bug and erroneously jumped to a non-code" ); |
| 965 | M(" location. If you are running Memcheck and you just saw a" ); |
| 966 | M(" warning about a bad jump, it's probably your program's fault."); |
| 967 | M("2. The instruction is legitimate but Valgrind doesn't handle it,"); |
| 968 | M(" i.e. it's Valgrind's fault. If you think this is the case or"); |
njn | ec4d513 | 2006-03-21 23:15:43 +0000 | [diff] [blame] | 969 | M(" you are not sure, please let us know and we'll try to fix it."); |
njn | 7cf6658 | 2005-10-15 17:18:08 +0000 | [diff] [blame] | 970 | M("Either way, Valgrind will now raise a SIGILL signal which will" ); |
| 971 | M("probably kill your program." ); |
njn | d502136 | 2005-09-29 00:35:18 +0000 | [diff] [blame] | 972 | #undef M |
njn | f536bbb | 2005-06-13 04:21:38 +0000 | [diff] [blame] | 973 | VG_(synth_sigill)(tid, VG_(get_IP)(tid)); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 974 | break; |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 975 | |
cerion | 85665ca | 2005-06-20 15:51:07 +0000 | [diff] [blame] | 976 | case VEX_TRC_JMP_TINVAL: |
cerion | 85665ca | 2005-06-20 15:51:07 +0000 | [diff] [blame] | 977 | VG_(discard_translations)( |
| 978 | (Addr64)VG_(threads)[tid].arch.vex.guest_TISTART, |
sewardj | 45f4e7c | 2005-09-27 19:20:21 +0000 | [diff] [blame] | 979 | VG_(threads)[tid].arch.vex.guest_TILEN, |
| 980 | "scheduler(VEX_TRC_JMP_TINVAL)" |
sewardj | 487ac70 | 2005-06-21 12:52:38 +0000 | [diff] [blame] | 981 | ); |
cerion | 85665ca | 2005-06-20 15:51:07 +0000 | [diff] [blame] | 982 | if (0) |
| 983 | VG_(printf)("dump translations done.\n"); |
cerion | 85665ca | 2005-06-20 15:51:07 +0000 | [diff] [blame] | 984 | break; |
| 985 | |
sewardj | e3a384b | 2005-07-29 08:51:34 +0000 | [diff] [blame] | 986 | case VG_TRC_INVARIANT_FAILED: |
| 987 | /* This typically happens if, after running generated code, |
| 988 | it is detected that host CPU settings (eg, FPU/Vector |
| 989 | control words) are not as they should be. Vex's code |
| 990 | generation specifies the state such control words should |
| 991 | be in on entry to Vex-generated code, and they should be |
| 992 | unchanged on exit from it. Failure of this assertion |
| 993 | usually means a bug in Vex's code generation. */ |
| 994 | vg_assert2(0, "VG_(scheduler), phase 3: " |
| 995 | "run_innerloop detected host " |
| 996 | "state invariant failure", trc); |
| 997 | |
sewardj | a0fef1b | 2005-11-03 13:46:30 +0000 | [diff] [blame] | 998 | case VEX_TRC_JMP_SYS_SYSENTER: |
sewardj | 5438a01 | 2005-08-07 14:49:27 +0000 | [diff] [blame] | 999 | /* Do whatever simulation is appropriate for an x86 sysenter |
| 1000 | instruction. Note that it is critical to set this thread's |
| 1001 | guest_EIP to point at the code to execute after the |
| 1002 | sysenter, since Vex-generated code will not have set it -- |
| 1003 | vex does not know what it should be. Vex sets the next |
| 1004 | address to zero, so if you don't guest_EIP, the thread will |
| 1005 | jump to zero afterwards and probably die as a result. */ |
| 1006 | # if defined(VGA_x86) |
| 1007 | //FIXME: VG_(threads)[tid].arch.vex.guest_EIP = .... |
| 1008 | //handle_sysenter_x86(tid); |
| 1009 | vg_assert2(0, "VG_(scheduler), phase 3: " |
| 1010 | "sysenter_x86 on not yet implemented"); |
| 1011 | # else |
| 1012 | vg_assert2(0, "VG_(scheduler), phase 3: " |
| 1013 | "sysenter_x86 on non-x86 platform?!?!"); |
| 1014 | # endif |
| 1015 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 1016 | default: |
njn | 50ae1a7 | 2005-04-08 23:28:23 +0000 | [diff] [blame] | 1017 | vg_assert2(0, "VG_(scheduler), phase 3: " |
| 1018 | "unexpected thread return code (%u)", trc); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 1019 | /* NOTREACHED */ |
| 1020 | break; |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 1021 | |
| 1022 | } /* switch (trc) */ |
nethercote | 238a3c3 | 2004-08-09 13:13:31 +0000 | [diff] [blame] | 1023 | } |
sewardj | c24be7a | 2005-03-15 01:40:12 +0000 | [diff] [blame] | 1024 | |
| 1025 | if (VG_(clo_trace_sched)) |
| 1026 | print_sched_event(tid, "exiting VG_(scheduler)"); |
| 1027 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 1028 | vg_assert(VG_(is_exiting)(tid)); |
thughes | 513197c | 2004-06-13 12:07:53 +0000 | [diff] [blame] | 1029 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 1030 | //if (VG_(clo_model_pthreads)) |
| 1031 | // VG_(tm_thread_exit)(tid); |
| 1032 | |
| 1033 | return tst->exitreason; |
sewardj | 20917d8 | 2002-05-28 01:36:45 +0000 | [diff] [blame] | 1034 | } |
| 1035 | |
| 1036 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 1037 | /* |
| 1038 | This causes all threads to forceably exit. They aren't actually |
| 1039 | dead by the time this returns; you need to call |
njn | af839f5 | 2005-06-23 03:27:57 +0000 | [diff] [blame] | 1040 | VG_(reap_threads)() to wait for them. |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 1041 | */ |
| 1042 | void VG_(nuke_all_threads_except) ( ThreadId me, VgSchedReturnCode src ) |
sewardj | ccef2e6 | 2002-05-29 19:26:32 +0000 | [diff] [blame] | 1043 | { |
| 1044 | ThreadId tid; |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 1045 | |
| 1046 | vg_assert(VG_(is_running_thread)(me)); |
sewardj | 45f02c4 | 2005-02-05 18:27:14 +0000 | [diff] [blame] | 1047 | |
sewardj | ccef2e6 | 2002-05-29 19:26:32 +0000 | [diff] [blame] | 1048 | for (tid = 1; tid < VG_N_THREADS; tid++) { |
| 1049 | if (tid == me |
jsgf | 855d93d | 2003-10-13 22:26:55 +0000 | [diff] [blame] | 1050 | || VG_(threads)[tid].status == VgTs_Empty) |
sewardj | ccef2e6 | 2002-05-29 19:26:32 +0000 | [diff] [blame] | 1051 | continue; |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 1052 | if (0) |
sewardj | ef037c7 | 2002-05-30 00:40:03 +0000 | [diff] [blame] | 1053 | VG_(printf)( |
| 1054 | "VG_(nuke_all_threads_except): nuking tid %d\n", tid); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 1055 | |
| 1056 | VG_(threads)[tid].exitreason = src; |
sewardj | a8d8e23 | 2005-06-07 20:04:56 +0000 | [diff] [blame] | 1057 | if (src == VgSrc_FatalSig) |
| 1058 | VG_(threads)[tid].os_state.fatalsig = VKI_SIGKILL; |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 1059 | VG_(kill_thread)(tid); |
sewardj | ccef2e6 | 2002-05-29 19:26:32 +0000 | [diff] [blame] | 1060 | } |
| 1061 | } |
| 1062 | |
| 1063 | |
njn | d304045 | 2003-05-19 15:04:06 +0000 | [diff] [blame] | 1064 | /* --------------------------------------------------------------------- |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 1065 | Specifying shadow register values |
njn | d304045 | 2003-05-19 15:04:06 +0000 | [diff] [blame] | 1066 | ------------------------------------------------------------------ */ |
| 1067 | |
njn | f536bbb | 2005-06-13 04:21:38 +0000 | [diff] [blame] | 1068 | #if defined(VGA_x86) |
njn | af839f5 | 2005-06-23 03:27:57 +0000 | [diff] [blame] | 1069 | # define VG_CLREQ_ARGS guest_EAX |
| 1070 | # define VG_CLREQ_RET guest_EDX |
njn | f536bbb | 2005-06-13 04:21:38 +0000 | [diff] [blame] | 1071 | #elif defined(VGA_amd64) |
njn | af839f5 | 2005-06-23 03:27:57 +0000 | [diff] [blame] | 1072 | # define VG_CLREQ_ARGS guest_RAX |
| 1073 | # define VG_CLREQ_RET guest_RDX |
sewardj | 2c48c7b | 2005-11-29 13:05:56 +0000 | [diff] [blame] | 1074 | #elif defined(VGA_ppc32) || defined(VGA_ppc64) |
njn | af839f5 | 2005-06-23 03:27:57 +0000 | [diff] [blame] | 1075 | # define VG_CLREQ_ARGS guest_GPR4 |
| 1076 | # define VG_CLREQ_RET guest_GPR3 |
njn | f536bbb | 2005-06-13 04:21:38 +0000 | [diff] [blame] | 1077 | #else |
| 1078 | # error Unknown arch |
| 1079 | #endif |
| 1080 | |
njn | af839f5 | 2005-06-23 03:27:57 +0000 | [diff] [blame] | 1081 | #define CLREQ_ARGS(regs) ((regs).vex.VG_CLREQ_ARGS) |
| 1082 | #define CLREQ_RET(regs) ((regs).vex.VG_CLREQ_RET) |
| 1083 | #define O_CLREQ_RET (offsetof(VexGuestArchState, VG_CLREQ_RET)) |
njn | f536bbb | 2005-06-13 04:21:38 +0000 | [diff] [blame] | 1084 | |
njn | 502badb | 2005-05-08 02:04:49 +0000 | [diff] [blame] | 1085 | // These macros write a value to a client's thread register, and tell the |
| 1086 | // tool that it's happened (if necessary). |
| 1087 | |
| 1088 | #define SET_CLREQ_RETVAL(zztid, zzval) \ |
| 1089 | do { CLREQ_RET(VG_(threads)[zztid].arch) = (zzval); \ |
| 1090 | VG_TRACK( post_reg_write, \ |
| 1091 | Vg_CoreClientReq, zztid, O_CLREQ_RET, sizeof(UWord)); \ |
| 1092 | } while (0) |
| 1093 | |
| 1094 | #define SET_CLCALL_RETVAL(zztid, zzval, f) \ |
| 1095 | do { CLREQ_RET(VG_(threads)[zztid].arch) = (zzval); \ |
| 1096 | VG_TRACK( post_reg_write_clientcall_return, \ |
| 1097 | zztid, O_CLREQ_RET, sizeof(UWord), f); \ |
| 1098 | } while (0) |
| 1099 | |
sewardj | 0ec07f3 | 2006-01-12 12:32:32 +0000 | [diff] [blame] | 1100 | |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 1101 | /* --------------------------------------------------------------------- |
sewardj | 124ca2a | 2002-06-20 10:19:38 +0000 | [diff] [blame] | 1102 | Handle client requests. |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 1103 | ------------------------------------------------------------------ */ |
| 1104 | |
njn | 9cb54ac | 2005-06-12 04:19:17 +0000 | [diff] [blame] | 1105 | // OS-specific(?) client requests |
| 1106 | static Bool os_client_request(ThreadId tid, UWord *args) |
| 1107 | { |
| 1108 | Bool handled = True; |
| 1109 | |
| 1110 | vg_assert(VG_(is_running_thread)(tid)); |
| 1111 | |
| 1112 | switch(args[0]) { |
| 1113 | case VG_USERREQ__LIBC_FREERES_DONE: |
| 1114 | /* This is equivalent to an exit() syscall, but we don't set the |
| 1115 | exitcode (since it might already be set) */ |
| 1116 | if (0 || VG_(clo_trace_syscalls) || VG_(clo_trace_sched)) |
| 1117 | VG_(message)(Vg_DebugMsg, |
| 1118 | "__libc_freeres() done; really quitting!"); |
| 1119 | VG_(threads)[tid].exitreason = VgSrc_ExitSyscall; |
| 1120 | break; |
| 1121 | |
| 1122 | default: |
| 1123 | handled = False; |
| 1124 | break; |
| 1125 | } |
| 1126 | |
| 1127 | return handled; |
| 1128 | } |
| 1129 | |
| 1130 | |
sewardj | 124ca2a | 2002-06-20 10:19:38 +0000 | [diff] [blame] | 1131 | /* Do a client request for the thread tid. After the request, tid may |
| 1132 | or may not still be runnable; if not, the scheduler will have to |
| 1133 | choose a new thread to run. |
| 1134 | */ |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 1135 | static |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 1136 | void do_client_request ( ThreadId tid ) |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 1137 | { |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 1138 | UWord* arg = (UWord*)(CLREQ_ARGS(VG_(threads)[tid].arch)); |
nethercote | d1b64b2 | 2004-11-04 18:22:28 +0000 | [diff] [blame] | 1139 | UWord req_no = arg[0]; |
sewardj | 124ca2a | 2002-06-20 10:19:38 +0000 | [diff] [blame] | 1140 | |
fitzhardinge | 98abfc7 | 2003-12-16 02:05:15 +0000 | [diff] [blame] | 1141 | if (0) |
nethercote | d1b64b2 | 2004-11-04 18:22:28 +0000 | [diff] [blame] | 1142 | VG_(printf)("req no = 0x%llx, arg = %p\n", (ULong)req_no, arg); |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 1143 | switch (req_no) { |
| 1144 | |
njn | 3e88418 | 2003-04-15 13:03:23 +0000 | [diff] [blame] | 1145 | case VG_USERREQ__CLIENT_CALL0: { |
njn | 2ac9524 | 2005-03-13 23:07:30 +0000 | [diff] [blame] | 1146 | UWord (*f)(ThreadId) = (void*)arg[1]; |
fitzhardinge | 98abfc7 | 2003-12-16 02:05:15 +0000 | [diff] [blame] | 1147 | if (f == NULL) |
njn | 3d9edb2 | 2005-08-25 01:52:52 +0000 | [diff] [blame] | 1148 | VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL0: func=%p", f); |
fitzhardinge | 98abfc7 | 2003-12-16 02:05:15 +0000 | [diff] [blame] | 1149 | else |
njn | 2ac9524 | 2005-03-13 23:07:30 +0000 | [diff] [blame] | 1150 | SET_CLCALL_RETVAL(tid, f ( tid ), (Addr)f); |
njn | 3e88418 | 2003-04-15 13:03:23 +0000 | [diff] [blame] | 1151 | break; |
| 1152 | } |
| 1153 | case VG_USERREQ__CLIENT_CALL1: { |
njn | 2ac9524 | 2005-03-13 23:07:30 +0000 | [diff] [blame] | 1154 | UWord (*f)(ThreadId, UWord) = (void*)arg[1]; |
fitzhardinge | 98abfc7 | 2003-12-16 02:05:15 +0000 | [diff] [blame] | 1155 | if (f == NULL) |
njn | 3d9edb2 | 2005-08-25 01:52:52 +0000 | [diff] [blame] | 1156 | VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL1: func=%p", f); |
fitzhardinge | 98abfc7 | 2003-12-16 02:05:15 +0000 | [diff] [blame] | 1157 | else |
njn | 2ac9524 | 2005-03-13 23:07:30 +0000 | [diff] [blame] | 1158 | SET_CLCALL_RETVAL(tid, f ( tid, arg[2] ), (Addr)f ); |
njn | 3e88418 | 2003-04-15 13:03:23 +0000 | [diff] [blame] | 1159 | break; |
| 1160 | } |
| 1161 | case VG_USERREQ__CLIENT_CALL2: { |
njn | 2ac9524 | 2005-03-13 23:07:30 +0000 | [diff] [blame] | 1162 | UWord (*f)(ThreadId, UWord, UWord) = (void*)arg[1]; |
fitzhardinge | 98abfc7 | 2003-12-16 02:05:15 +0000 | [diff] [blame] | 1163 | if (f == NULL) |
njn | 3d9edb2 | 2005-08-25 01:52:52 +0000 | [diff] [blame] | 1164 | VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL2: func=%p", f); |
fitzhardinge | 98abfc7 | 2003-12-16 02:05:15 +0000 | [diff] [blame] | 1165 | else |
njn | 2ac9524 | 2005-03-13 23:07:30 +0000 | [diff] [blame] | 1166 | SET_CLCALL_RETVAL(tid, f ( tid, arg[2], arg[3] ), (Addr)f ); |
njn | 3e88418 | 2003-04-15 13:03:23 +0000 | [diff] [blame] | 1167 | break; |
| 1168 | } |
| 1169 | case VG_USERREQ__CLIENT_CALL3: { |
njn | 2ac9524 | 2005-03-13 23:07:30 +0000 | [diff] [blame] | 1170 | UWord (*f)(ThreadId, UWord, UWord, UWord) = (void*)arg[1]; |
fitzhardinge | 98abfc7 | 2003-12-16 02:05:15 +0000 | [diff] [blame] | 1171 | if (f == NULL) |
njn | 3d9edb2 | 2005-08-25 01:52:52 +0000 | [diff] [blame] | 1172 | VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL3: func=%p", f); |
fitzhardinge | 98abfc7 | 2003-12-16 02:05:15 +0000 | [diff] [blame] | 1173 | else |
njn | 2ac9524 | 2005-03-13 23:07:30 +0000 | [diff] [blame] | 1174 | SET_CLCALL_RETVAL(tid, f ( tid, arg[2], arg[3], arg[4] ), (Addr)f ); |
njn | 3e88418 | 2003-04-15 13:03:23 +0000 | [diff] [blame] | 1175 | break; |
| 1176 | } |
| 1177 | |
njn | f09745a | 2005-05-10 03:01:23 +0000 | [diff] [blame] | 1178 | // Nb: this looks like a circular definition, because it kind of is. |
| 1179 | // See comment in valgrind.h to understand what's going on. |
sewardj | 124ca2a | 2002-06-20 10:19:38 +0000 | [diff] [blame] | 1180 | case VG_USERREQ__RUNNING_ON_VALGRIND: |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 1181 | SET_CLREQ_RETVAL(tid, RUNNING_ON_VALGRIND+1); |
sewardj | 124ca2a | 2002-06-20 10:19:38 +0000 | [diff] [blame] | 1182 | break; |
| 1183 | |
fitzhardinge | 39de4b4 | 2003-10-31 07:12:21 +0000 | [diff] [blame] | 1184 | case VG_USERREQ__PRINTF: { |
sewardj | 63fed7f | 2006-01-17 02:02:47 +0000 | [diff] [blame] | 1185 | Int count = |
nethercote | 3e901a2 | 2004-09-11 13:17:02 +0000 | [diff] [blame] | 1186 | VG_(vmessage)( Vg_ClientMsg, (char *)arg[1], (void*)arg[2] ); |
fitzhardinge | 39de4b4 | 2003-10-31 07:12:21 +0000 | [diff] [blame] | 1187 | SET_CLREQ_RETVAL( tid, count ); |
| 1188 | break; } |
| 1189 | |
| 1190 | case VG_USERREQ__INTERNAL_PRINTF: { |
sewardj | 63fed7f | 2006-01-17 02:02:47 +0000 | [diff] [blame] | 1191 | Int count = |
njn | aa3c26b | 2005-03-12 05:32:28 +0000 | [diff] [blame] | 1192 | VG_(vmessage)( Vg_DebugMsg, (char *)arg[1], (void*)arg[2] ); |
fitzhardinge | 39de4b4 | 2003-10-31 07:12:21 +0000 | [diff] [blame] | 1193 | SET_CLREQ_RETVAL( tid, count ); |
| 1194 | break; } |
| 1195 | |
| 1196 | case VG_USERREQ__PRINTF_BACKTRACE: { |
sewardj | 63fed7f | 2006-01-17 02:02:47 +0000 | [diff] [blame] | 1197 | Int count = |
nethercote | 3e901a2 | 2004-09-11 13:17:02 +0000 | [diff] [blame] | 1198 | VG_(vmessage)( Vg_ClientMsg, (char *)arg[1], (void*)arg[2] ); |
njn | d01fef7 | 2005-03-25 23:35:48 +0000 | [diff] [blame] | 1199 | VG_(get_and_pp_StackTrace)( tid, VG_(clo_backtrace_size) ); |
fitzhardinge | 39de4b4 | 2003-10-31 07:12:21 +0000 | [diff] [blame] | 1200 | SET_CLREQ_RETVAL( tid, count ); |
| 1201 | break; } |
| 1202 | |
rjwalsh | 0140af5 | 2005-06-04 20:42:33 +0000 | [diff] [blame] | 1203 | case VG_USERREQ__STACK_REGISTER: { |
njn | 945ed2e | 2005-06-24 03:28:30 +0000 | [diff] [blame] | 1204 | UWord sid = VG_(register_stack)((Addr)arg[1], (Addr)arg[2]); |
rjwalsh | 0140af5 | 2005-06-04 20:42:33 +0000 | [diff] [blame] | 1205 | SET_CLREQ_RETVAL( tid, sid ); |
| 1206 | break; } |
| 1207 | |
| 1208 | case VG_USERREQ__STACK_DEREGISTER: { |
njn | 945ed2e | 2005-06-24 03:28:30 +0000 | [diff] [blame] | 1209 | VG_(deregister_stack)(arg[1]); |
rjwalsh | 0140af5 | 2005-06-04 20:42:33 +0000 | [diff] [blame] | 1210 | SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */ |
| 1211 | break; } |
| 1212 | |
| 1213 | case VG_USERREQ__STACK_CHANGE: { |
njn | 945ed2e | 2005-06-24 03:28:30 +0000 | [diff] [blame] | 1214 | VG_(change_stack)(arg[1], (Addr)arg[2], (Addr)arg[3]); |
rjwalsh | 0140af5 | 2005-06-04 20:42:33 +0000 | [diff] [blame] | 1215 | SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */ |
| 1216 | break; } |
| 1217 | |
fitzhardinge | 98abfc7 | 2003-12-16 02:05:15 +0000 | [diff] [blame] | 1218 | case VG_USERREQ__GET_MALLOCFUNCS: { |
| 1219 | struct vg_mallocfunc_info *info = (struct vg_mallocfunc_info *)arg[1]; |
| 1220 | |
njn | fc51f8d | 2005-06-21 03:20:17 +0000 | [diff] [blame] | 1221 | info->tl_malloc = VG_(tdict).tool_malloc; |
| 1222 | info->tl_calloc = VG_(tdict).tool_calloc; |
| 1223 | info->tl_realloc = VG_(tdict).tool_realloc; |
| 1224 | info->tl_memalign = VG_(tdict).tool_memalign; |
| 1225 | info->tl___builtin_new = VG_(tdict).tool___builtin_new; |
| 1226 | info->tl___builtin_vec_new = VG_(tdict).tool___builtin_vec_new; |
| 1227 | info->tl_free = VG_(tdict).tool_free; |
| 1228 | info->tl___builtin_delete = VG_(tdict).tool___builtin_delete; |
| 1229 | info->tl___builtin_vec_delete = VG_(tdict).tool___builtin_vec_delete; |
fitzhardinge | 98abfc7 | 2003-12-16 02:05:15 +0000 | [diff] [blame] | 1230 | |
njn | cf81d55 | 2005-03-31 04:52:26 +0000 | [diff] [blame] | 1231 | info->arena_payload_szB = VG_(arena_payload_szB); |
njn | 088bfb4 | 2005-08-17 05:01:37 +0000 | [diff] [blame] | 1232 | info->mallinfo = VG_(mallinfo); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 1233 | info->clo_trace_malloc = VG_(clo_trace_malloc); |
fitzhardinge | 98abfc7 | 2003-12-16 02:05:15 +0000 | [diff] [blame] | 1234 | |
| 1235 | SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */ |
| 1236 | |
| 1237 | break; |
| 1238 | } |
| 1239 | |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1240 | /* Requests from the client program */ |
| 1241 | |
| 1242 | case VG_USERREQ__DISCARD_TRANSLATIONS: |
| 1243 | if (VG_(clo_verbosity) > 2) |
| 1244 | VG_(printf)( "client request: DISCARD_TRANSLATIONS," |
| 1245 | " addr %p, len %d\n", |
| 1246 | (void*)arg[1], arg[2] ); |
| 1247 | |
sewardj | 45f4e7c | 2005-09-27 19:20:21 +0000 | [diff] [blame] | 1248 | VG_(discard_translations)( |
| 1249 | arg[1], arg[2], "scheduler(VG_USERREQ__DISCARD_TRANSLATIONS)" |
| 1250 | ); |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1251 | |
njn | d304045 | 2003-05-19 15:04:06 +0000 | [diff] [blame] | 1252 | SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */ |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1253 | break; |
| 1254 | |
njn | 47363ab | 2003-04-21 13:24:40 +0000 | [diff] [blame] | 1255 | case VG_USERREQ__COUNT_ERRORS: |
nethercote | f2b1148 | 2004-08-02 12:36:01 +0000 | [diff] [blame] | 1256 | SET_CLREQ_RETVAL( tid, VG_(get_n_errs_found)() ); |
njn | 47363ab | 2003-04-21 13:24:40 +0000 | [diff] [blame] | 1257 | break; |
| 1258 | |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 1259 | default: |
njn | 9cb54ac | 2005-06-12 04:19:17 +0000 | [diff] [blame] | 1260 | if (os_client_request(tid, arg)) { |
| 1261 | // do nothing, os_client_request() handled it |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 1262 | } else if (VG_(needs).client_requests) { |
nethercote | d1b64b2 | 2004-11-04 18:22:28 +0000 | [diff] [blame] | 1263 | UWord ret; |
sewardj | 3404251 | 2002-10-22 04:14:35 +0000 | [diff] [blame] | 1264 | |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1265 | if (VG_(clo_verbosity) > 2) |
fitzhardinge | 98abfc7 | 2003-12-16 02:05:15 +0000 | [diff] [blame] | 1266 | VG_(printf)("client request: code %x, addr %p, len %d\n", |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1267 | arg[0], (void*)arg[1], arg[2] ); |
| 1268 | |
njn | 51d827b | 2005-05-09 01:02:08 +0000 | [diff] [blame] | 1269 | if ( VG_TDICT_CALL(tool_handle_client_request, tid, arg, &ret) ) |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 1270 | SET_CLREQ_RETVAL(tid, ret); |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1271 | } else { |
sewardj | 3404251 | 2002-10-22 04:14:35 +0000 | [diff] [blame] | 1272 | static Bool whined = False; |
| 1273 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 1274 | if (!whined && VG_(clo_verbosity) > 2) { |
nethercote | 7cc9c23 | 2004-01-21 15:08:04 +0000 | [diff] [blame] | 1275 | // Allow for requests in core, but defined by tools, which |
njn | d799418 | 2003-10-02 13:44:04 +0000 | [diff] [blame] | 1276 | // have 0 and 0 in their two high bytes. |
| 1277 | Char c1 = (arg[0] >> 24) & 0xff; |
| 1278 | Char c2 = (arg[0] >> 16) & 0xff; |
| 1279 | if (c1 == 0) c1 = '_'; |
| 1280 | if (c2 == 0) c2 = '_'; |
sewardj | 3404251 | 2002-10-22 04:14:35 +0000 | [diff] [blame] | 1281 | VG_(message)(Vg_UserMsg, "Warning:\n" |
njn | d799418 | 2003-10-02 13:44:04 +0000 | [diff] [blame] | 1282 | " unhandled client request: 0x%x (%c%c+0x%x). Perhaps\n" |
sewardj | 45f4e7c | 2005-09-27 19:20:21 +0000 | [diff] [blame] | 1283 | " VG_(needs).client_requests should be set?", |
njn | d799418 | 2003-10-02 13:44:04 +0000 | [diff] [blame] | 1284 | arg[0], c1, c2, arg[0] & 0xffff); |
sewardj | 3404251 | 2002-10-22 04:14:35 +0000 | [diff] [blame] | 1285 | whined = True; |
| 1286 | } |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1287 | } |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 1288 | break; |
| 1289 | } |
| 1290 | } |
| 1291 | |
| 1292 | |
sewardj | 6072c36 | 2002-04-19 14:40:57 +0000 | [diff] [blame] | 1293 | /* --------------------------------------------------------------------- |
njn | 6676d5b | 2005-06-19 18:49:19 +0000 | [diff] [blame] | 1294 | Sanity checking (permanently engaged) |
sewardj | 6072c36 | 2002-04-19 14:40:57 +0000 | [diff] [blame] | 1295 | ------------------------------------------------------------------ */ |
| 1296 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 1297 | /* Internal consistency checks on the sched structures. */ |
sewardj | 6072c36 | 2002-04-19 14:40:57 +0000 | [diff] [blame] | 1298 | static |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 1299 | void scheduler_sanity ( ThreadId tid ) |
sewardj | 6072c36 | 2002-04-19 14:40:57 +0000 | [diff] [blame] | 1300 | { |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 1301 | Bool bad = False; |
jsgf | 855d93d | 2003-10-13 22:26:55 +0000 | [diff] [blame] | 1302 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 1303 | if (!VG_(is_running_thread)(tid)) { |
| 1304 | VG_(message)(Vg_DebugMsg, |
| 1305 | "Thread %d is supposed to be running, but doesn't own run_sema (owned by %d)\n", |
njn | c7561b9 | 2005-06-19 01:24:32 +0000 | [diff] [blame] | 1306 | tid, VG_(running_tid)); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 1307 | bad = True; |
jsgf | 855d93d | 2003-10-13 22:26:55 +0000 | [diff] [blame] | 1308 | } |
sewardj | 5f07b66 | 2002-04-23 16:52:51 +0000 | [diff] [blame] | 1309 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 1310 | if (VG_(gettid)() != VG_(threads)[tid].os_state.lwpid) { |
| 1311 | VG_(message)(Vg_DebugMsg, |
njn | d06ed47 | 2005-03-13 05:12:31 +0000 | [diff] [blame] | 1312 | "Thread %d supposed to be in LWP %d, but we're actually %d\n", |
| 1313 | tid, VG_(threads)[tid].os_state.lwpid, VG_(gettid)()); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 1314 | bad = True; |
sewardj | 5f07b66 | 2002-04-23 16:52:51 +0000 | [diff] [blame] | 1315 | } |
sewardj | 6072c36 | 2002-04-19 14:40:57 +0000 | [diff] [blame] | 1316 | } |
| 1317 | |
njn | 6676d5b | 2005-06-19 18:49:19 +0000 | [diff] [blame] | 1318 | void VG_(sanity_check_general) ( Bool force_expensive ) |
| 1319 | { |
| 1320 | ThreadId tid; |
| 1321 | |
njn | 6676d5b | 2005-06-19 18:49:19 +0000 | [diff] [blame] | 1322 | if (VG_(clo_sanity_level) < 1) return; |
| 1323 | |
| 1324 | /* --- First do all the tests that we can do quickly. ---*/ |
| 1325 | |
| 1326 | sanity_fast_count++; |
| 1327 | |
| 1328 | /* Check stuff pertaining to the memory check system. */ |
| 1329 | |
| 1330 | /* Check that nobody has spuriously claimed that the first or |
| 1331 | last 16 pages of memory have become accessible [...] */ |
| 1332 | if (VG_(needs).sanity_checks) { |
njn | 6676d5b | 2005-06-19 18:49:19 +0000 | [diff] [blame] | 1333 | vg_assert(VG_TDICT_CALL(tool_cheap_sanity_check)); |
njn | 6676d5b | 2005-06-19 18:49:19 +0000 | [diff] [blame] | 1334 | } |
| 1335 | |
| 1336 | /* --- Now some more expensive checks. ---*/ |
| 1337 | |
| 1338 | /* Once every 25 times, check some more expensive stuff. */ |
| 1339 | if ( force_expensive |
| 1340 | || VG_(clo_sanity_level) > 1 |
| 1341 | || (VG_(clo_sanity_level) == 1 && (sanity_fast_count % 25) == 0)) { |
| 1342 | |
njn | 6676d5b | 2005-06-19 18:49:19 +0000 | [diff] [blame] | 1343 | sanity_slow_count++; |
| 1344 | |
njn | 6676d5b | 2005-06-19 18:49:19 +0000 | [diff] [blame] | 1345 | if (VG_(needs).sanity_checks) { |
njn | 6676d5b | 2005-06-19 18:49:19 +0000 | [diff] [blame] | 1346 | vg_assert(VG_TDICT_CALL(tool_expensive_sanity_check)); |
njn | 6676d5b | 2005-06-19 18:49:19 +0000 | [diff] [blame] | 1347 | } |
| 1348 | |
njn | 6676d5b | 2005-06-19 18:49:19 +0000 | [diff] [blame] | 1349 | /* Look for stack overruns. Visit all threads. */ |
njn | d666ea7 | 2005-06-26 17:26:22 +0000 | [diff] [blame] | 1350 | for (tid = 1; tid < VG_N_THREADS; tid++) { |
sewardj | 45f4e7c | 2005-09-27 19:20:21 +0000 | [diff] [blame] | 1351 | SizeT remains; |
| 1352 | VgStack* stack; |
njn | 6676d5b | 2005-06-19 18:49:19 +0000 | [diff] [blame] | 1353 | |
| 1354 | if (VG_(threads)[tid].status == VgTs_Empty || |
| 1355 | VG_(threads)[tid].status == VgTs_Zombie) |
| 1356 | continue; |
| 1357 | |
sewardj | 45f4e7c | 2005-09-27 19:20:21 +0000 | [diff] [blame] | 1358 | stack |
| 1359 | = (VgStack*) |
| 1360 | VG_(get_ThreadState)(tid)->os_state.valgrind_stack_base; |
| 1361 | remains |
| 1362 | = VG_(am_get_VgStack_unused_szB)(stack); |
njn | 6676d5b | 2005-06-19 18:49:19 +0000 | [diff] [blame] | 1363 | if (remains < VKI_PAGE_SIZE) |
| 1364 | VG_(message)(Vg_DebugMsg, |
| 1365 | "WARNING: Thread %d is within %d bytes " |
| 1366 | "of running out of stack!", |
| 1367 | tid, remains); |
| 1368 | } |
njn | 6676d5b | 2005-06-19 18:49:19 +0000 | [diff] [blame] | 1369 | } |
| 1370 | |
| 1371 | if (VG_(clo_sanity_level) > 1) { |
njn | 6676d5b | 2005-06-19 18:49:19 +0000 | [diff] [blame] | 1372 | /* Check sanity of the low-level memory manager. Note that bugs |
| 1373 | in the client's code can cause this to fail, so we don't do |
| 1374 | this check unless specially asked for. And because it's |
| 1375 | potentially very expensive. */ |
| 1376 | VG_(sanity_check_malloc_all)(); |
njn | 6676d5b | 2005-06-19 18:49:19 +0000 | [diff] [blame] | 1377 | } |
njn | 6676d5b | 2005-06-19 18:49:19 +0000 | [diff] [blame] | 1378 | } |
sewardj | 6072c36 | 2002-04-19 14:40:57 +0000 | [diff] [blame] | 1379 | |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 1380 | /*--------------------------------------------------------------------*/ |
njn | 278b3d6 | 2005-05-30 23:20:51 +0000 | [diff] [blame] | 1381 | /*--- end ---*/ |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 1382 | /*--------------------------------------------------------------------*/ |