sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 1 | |
| 2 | /*--------------------------------------------------------------------*/ |
njn | 278b3d6 | 2005-05-30 23:20:51 +0000 | [diff] [blame] | 3 | /*--- Thread scheduling. scheduler.c ---*/ |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 4 | /*--------------------------------------------------------------------*/ |
| 5 | |
| 6 | /* |
njn | c0ae705 | 2005-08-25 22:55:19 +0000 | [diff] [blame] | 7 | This file is part of Valgrind, a dynamic binary instrumentation |
| 8 | framework. |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 9 | |
njn | 5361242 | 2005-03-12 16:22:54 +0000 | [diff] [blame] | 10 | Copyright (C) 2000-2005 Julian Seward |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 11 | jseward@acm.org |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 12 | |
| 13 | This program is free software; you can redistribute it and/or |
| 14 | modify it under the terms of the GNU General Public License as |
| 15 | published by the Free Software Foundation; either version 2 of the |
| 16 | License, or (at your option) any later version. |
| 17 | |
| 18 | This program is distributed in the hope that it will be useful, but |
| 19 | WITHOUT ANY WARRANTY; without even the implied warranty of |
| 20 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 21 | General Public License for more details. |
| 22 | |
| 23 | You should have received a copy of the GNU General Public License |
| 24 | along with this program; if not, write to the Free Software |
| 25 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA |
| 26 | 02111-1307, USA. |
| 27 | |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 28 | The GNU General Public License is contained in the file COPYING. |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 29 | */ |
| 30 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 31 | /* |
| 32 | Overview |
| 33 | |
| 34 | Valgrind tries to emulate the kernel's threading as closely as |
| 35 | possible. The client does all threading via the normal syscalls |
| 36 | (on Linux: clone, etc). Valgrind emulates this by creating exactly |
| 37 | the same process structure as would be created without Valgrind. |
| 38 | There are no extra threads. |
| 39 | |
| 40 | The main difference is that Valgrind only allows one client thread |
| 41 | to run at once. This is controlled with the VCPU semaphore, |
| 42 | "run_sema". Any time a thread wants to run client code or |
| 43 | manipulate any shared state (which is anything other than its own |
| 44 | ThreadState entry), it must hold the run_sema. |
| 45 | |
| 46 | When a thread is about to block in a blocking syscall, it releases |
| 47 | run_sema, and re-takes it when it becomes runnable again (either |
| 48 | because the syscall finished, or we took a signal). |
| 49 | |
| 50 | VG_(scheduler) therefore runs in each thread. It returns only when |
| 51 | the thread is exiting, either because it exited itself, or it was |
| 52 | told to exit by another thread. |
| 53 | |
| 54 | This file is almost entirely OS-independent. The details of how |
| 55 | the OS handles threading and signalling are abstracted away and |
njn | 1277109 | 2005-06-18 02:18:04 +0000 | [diff] [blame] | 56 | implemented elsewhere. [Some of the functions have worked their |
| 57 | way back for the moment, until we do an OS port in earnest...] |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 58 | */ |
| 59 | |
njn | 1277109 | 2005-06-18 02:18:04 +0000 | [diff] [blame] | 60 | #include "valgrind.h" // for VG_USERREQ__* |
| 61 | #include "coregrind.h" // for VG_USERREQ__* |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 62 | |
njn | c7561b9 | 2005-06-19 01:24:32 +0000 | [diff] [blame] | 63 | #include "pub_core_basics.h" |
| 64 | #include "pub_core_threadstate.h" |
njn | 04e1698 | 2005-05-31 00:23:43 +0000 | [diff] [blame] | 65 | #include "pub_core_aspacemgr.h" |
njn | 36b66df | 2005-05-12 05:13:04 +0000 | [diff] [blame] | 66 | #include "pub_core_dispatch.h" |
njn | f4c5016 | 2005-06-20 14:18:12 +0000 | [diff] [blame] | 67 | #include "pub_core_errormgr.h" // For VG_(get_n_errs_found)() |
njn | 97405b2 | 2005-06-02 03:39:33 +0000 | [diff] [blame] | 68 | #include "pub_core_libcbase.h" |
njn | 132bfcc | 2005-06-04 19:16:06 +0000 | [diff] [blame] | 69 | #include "pub_core_libcassert.h" |
njn | 36a20fa | 2005-06-03 03:08:39 +0000 | [diff] [blame] | 70 | #include "pub_core_libcprint.h" |
njn | f39e9a3 | 2005-06-12 02:43:17 +0000 | [diff] [blame] | 71 | #include "pub_core_libcproc.h" |
njn | de62cbf | 2005-06-10 22:08:14 +0000 | [diff] [blame] | 72 | #include "pub_core_libcsignal.h" |
njn | f536bbb | 2005-06-13 04:21:38 +0000 | [diff] [blame] | 73 | #include "pub_core_machine.h" |
njn | af1d7df | 2005-06-11 01:31:52 +0000 | [diff] [blame] | 74 | #include "pub_core_mallocfree.h" |
njn | 2024234 | 2005-05-16 23:31:24 +0000 | [diff] [blame] | 75 | #include "pub_core_options.h" |
njn | 31513b4 | 2005-06-01 03:09:59 +0000 | [diff] [blame] | 76 | #include "pub_core_profile.h" |
njn | 717cde5 | 2005-05-10 02:47:21 +0000 | [diff] [blame] | 77 | #include "pub_core_replacemalloc.h" |
njn | 278b3d6 | 2005-05-30 23:20:51 +0000 | [diff] [blame] | 78 | #include "pub_core_scheduler.h" |
njn | 0c24647 | 2005-05-31 01:00:08 +0000 | [diff] [blame] | 79 | #include "pub_core_signals.h" |
njn | 945ed2e | 2005-06-24 03:28:30 +0000 | [diff] [blame] | 80 | #include "pub_core_stacks.h" |
njn | f4c5016 | 2005-06-20 14:18:12 +0000 | [diff] [blame] | 81 | #include "pub_core_stacktrace.h" // For VG_(get_and_pp_StackTrace)() |
njn | 9abd608 | 2005-06-17 21:31:45 +0000 | [diff] [blame] | 82 | #include "pub_core_syscall.h" |
njn | c1b0181 | 2005-06-17 22:19:06 +0000 | [diff] [blame] | 83 | #include "pub_core_syswrap.h" |
njn | 43b9a8a | 2005-05-10 04:37:01 +0000 | [diff] [blame] | 84 | #include "pub_core_tooliface.h" |
njn | f4c5016 | 2005-06-20 14:18:12 +0000 | [diff] [blame] | 85 | #include "pub_core_translate.h" // For VG_(translate)() |
njn | 8bddf58 | 2005-05-13 23:40:55 +0000 | [diff] [blame] | 86 | #include "pub_core_transtab.h" |
njn | 3c660b6 | 2005-05-13 22:18:47 +0000 | [diff] [blame] | 87 | #include "vki_unistd.h" |
njn | 278b3d6 | 2005-05-30 23:20:51 +0000 | [diff] [blame] | 88 | #include "priv_sema.h" |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 89 | |
| 90 | /* --------------------------------------------------------------------- |
| 91 | Types and globals for the scheduler. |
| 92 | ------------------------------------------------------------------ */ |
| 93 | |
njn | c7561b9 | 2005-06-19 01:24:32 +0000 | [diff] [blame] | 94 | /* ThreadId and ThreadState are defined elsewhere*/ |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 95 | |
njn | 14319cc | 2005-03-13 06:26:22 +0000 | [diff] [blame] | 96 | /* Defines the thread-scheduling timeslice, in terms of the number of |
| 97 | basic blocks we attempt to run each thread for. Smaller values |
| 98 | give finer interleaving but much increased scheduling overheads. */ |
| 99 | #define SCHEDULING_QUANTUM 50000 |
| 100 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 101 | /* If true, a fault is Valgrind-internal (ie, a bug) */ |
| 102 | Bool VG_(my_fault) = True; |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 103 | |
njn | de583aa | 2005-05-11 18:57:02 +0000 | [diff] [blame] | 104 | /* Counts downwards in VG_(run_innerloop). */ |
| 105 | UInt VG_(dispatch_ctr); |
| 106 | |
njn | 394213a | 2005-06-19 18:38:24 +0000 | [diff] [blame] | 107 | /* 64-bit counter for the number of basic blocks done. */ |
| 108 | static ULong bbs_done = 0; |
| 109 | |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 110 | /* Forwards */ |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 111 | static void do_client_request ( ThreadId tid ); |
| 112 | static void scheduler_sanity ( ThreadId tid ); |
| 113 | static void mostly_clear_thread_record ( ThreadId tid ); |
sewardj | d140e44 | 2002-05-29 01:21:19 +0000 | [diff] [blame] | 114 | |
nethercote | 844e712 | 2004-08-02 15:27:22 +0000 | [diff] [blame] | 115 | /* Stats. */ |
njn | 0fd92f4 | 2005-10-06 03:32:42 +0000 | [diff] [blame] | 116 | static ULong n_scheduling_events_MINOR = 0; |
| 117 | static ULong n_scheduling_events_MAJOR = 0; |
nethercote | 844e712 | 2004-08-02 15:27:22 +0000 | [diff] [blame] | 118 | |
njn | 6676d5b | 2005-06-19 18:49:19 +0000 | [diff] [blame] | 119 | /* Sanity checking counts. */ |
| 120 | static UInt sanity_fast_count = 0; |
| 121 | static UInt sanity_slow_count = 0; |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 122 | |
nethercote | 844e712 | 2004-08-02 15:27:22 +0000 | [diff] [blame] | 123 | void VG_(print_scheduler_stats)(void) |
| 124 | { |
| 125 | VG_(message)(Vg_DebugMsg, |
njn | 0fd92f4 | 2005-10-06 03:32:42 +0000 | [diff] [blame] | 126 | "scheduler: %,llu jumps (bb entries).", bbs_done ); |
njn | 394213a | 2005-06-19 18:38:24 +0000 | [diff] [blame] | 127 | VG_(message)(Vg_DebugMsg, |
njn | 0fd92f4 | 2005-10-06 03:32:42 +0000 | [diff] [blame] | 128 | "scheduler: %,llu/%,llu major/minor sched events.", |
nethercote | 844e712 | 2004-08-02 15:27:22 +0000 | [diff] [blame] | 129 | n_scheduling_events_MAJOR, n_scheduling_events_MINOR); |
njn | 6676d5b | 2005-06-19 18:49:19 +0000 | [diff] [blame] | 130 | VG_(message)(Vg_DebugMsg, |
| 131 | " sanity: %d cheap, %d expensive checks.", |
| 132 | sanity_fast_count, sanity_slow_count ); |
nethercote | 844e712 | 2004-08-02 15:27:22 +0000 | [diff] [blame] | 133 | } |
| 134 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 135 | /* CPU semaphore, so that threads can run exclusively */ |
| 136 | static vg_sema_t run_sema; |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 137 | |
| 138 | |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 139 | /* --------------------------------------------------------------------- |
| 140 | Helper functions for the scheduler. |
| 141 | ------------------------------------------------------------------ */ |
| 142 | |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 143 | static |
| 144 | void print_sched_event ( ThreadId tid, Char* what ) |
| 145 | { |
sewardj | 45b4b37 | 2002-04-16 22:50:32 +0000 | [diff] [blame] | 146 | VG_(message)(Vg_DebugMsg, " SCHED[%d]: %s", tid, what ); |
sewardj | 8937c81 | 2002-04-12 20:12:20 +0000 | [diff] [blame] | 147 | } |
| 148 | |
sewardj | 8937c81 | 2002-04-12 20:12:20 +0000 | [diff] [blame] | 149 | static |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 150 | HChar* name_of_sched_event ( UInt event ) |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 151 | { |
| 152 | switch (event) { |
sewardj | a0fef1b | 2005-11-03 13:46:30 +0000 | [diff] [blame] | 153 | case VEX_TRC_JMP_SYS_SYSCALL: return "SYSCALL"; |
| 154 | case VEX_TRC_JMP_SYS_INT32: return "INT32"; |
| 155 | case VEX_TRC_JMP_SYS_INT128: return "INT128"; |
| 156 | case VEX_TRC_JMP_SYS_SYSENTER: return "SYSENTER"; |
sewardj | d79ef68 | 2004-11-26 13:25:17 +0000 | [diff] [blame] | 157 | case VEX_TRC_JMP_CLIENTREQ: return "CLIENTREQ"; |
| 158 | case VEX_TRC_JMP_YIELD: return "YIELD"; |
sewardj | 45f02c4 | 2005-02-05 18:27:14 +0000 | [diff] [blame] | 159 | case VEX_TRC_JMP_NODECODE: return "NODECODE"; |
sewardj | 1f430d3 | 2005-12-16 01:07:11 +0000 | [diff] [blame] | 160 | case VEX_TRC_JMP_MAPFAIL: return "MAPFAIL"; |
| 161 | case VEX_TRC_JMP_EMWARN: return "EMWARN"; |
| 162 | case VEX_TRC_JMP_TINVAL: return "TINVAL"; |
| 163 | case VG_TRC_INVARIANT_FAILED: return "INVFAILED"; |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 164 | case VG_TRC_INNER_COUNTERZERO: return "COUNTERZERO"; |
| 165 | case VG_TRC_INNER_FASTMISS: return "FASTMISS"; |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 166 | case VG_TRC_FAULT_SIGNAL: return "FAULTSIGNAL"; |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 167 | default: return "??UNKNOWN??"; |
| 168 | } |
| 169 | } |
| 170 | |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 171 | /* Allocate a completely empty ThreadState record. */ |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 172 | ThreadId VG_(alloc_ThreadState) ( void ) |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 173 | { |
| 174 | Int i; |
sewardj | 6072c36 | 2002-04-19 14:40:57 +0000 | [diff] [blame] | 175 | for (i = 1; i < VG_N_THREADS; i++) { |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 176 | if (VG_(threads)[i].status == VgTs_Empty) { |
| 177 | VG_(threads)[i].status = VgTs_Init; |
| 178 | VG_(threads)[i].exitreason = VgSrc_None; |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 179 | return i; |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 180 | } |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 181 | } |
| 182 | VG_(printf)("vg_alloc_ThreadState: no free slots available\n"); |
| 183 | VG_(printf)("Increase VG_N_THREADS, rebuild and try again.\n"); |
njn | e427a66 | 2002-10-02 11:08:25 +0000 | [diff] [blame] | 184 | VG_(core_panic)("VG_N_THREADS is too low"); |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 185 | /*NOTREACHED*/ |
| 186 | } |
| 187 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 188 | /* |
| 189 | Mark a thread as Runnable. This will block until the run_sema is |
| 190 | available, so that we get exclusive access to all the shared |
| 191 | structures and the CPU. Up until we get the sema, we must not |
| 192 | touch any shared state. |
| 193 | |
| 194 | When this returns, we'll actually be running. |
| 195 | */ |
| 196 | void VG_(set_running)(ThreadId tid) |
| 197 | { |
| 198 | ThreadState *tst = VG_(get_ThreadState)(tid); |
| 199 | |
| 200 | vg_assert(tst->status != VgTs_Runnable); |
| 201 | |
| 202 | tst->status = VgTs_Runnable; |
| 203 | |
sewardj | 7eb7c58 | 2005-06-23 01:02:53 +0000 | [diff] [blame] | 204 | ML_(sema_down)(&run_sema); |
njn | c7561b9 | 2005-06-19 01:24:32 +0000 | [diff] [blame] | 205 | if (VG_(running_tid) != VG_INVALID_THREADID) |
| 206 | VG_(printf)("tid %d found %d running\n", tid, VG_(running_tid)); |
| 207 | vg_assert(VG_(running_tid) == VG_INVALID_THREADID); |
| 208 | VG_(running_tid) = tid; |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 209 | |
tom | e0008d6 | 2005-11-10 15:02:42 +0000 | [diff] [blame] | 210 | VG_(unknown_SP_update)(VG_(get_SP(tid)), VG_(get_SP(tid))); |
| 211 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 212 | if (VG_(clo_trace_sched)) |
| 213 | print_sched_event(tid, "now running"); |
tom | deca43f | 2005-07-27 23:04:28 +0000 | [diff] [blame] | 214 | |
| 215 | // While thre modeling is disable, issue thread_run events here |
| 216 | // VG_(tm_thread_switchto)(tid); |
| 217 | VG_TRACK( thread_run, tid ); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 218 | } |
| 219 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 220 | /* |
| 221 | Set a thread into a sleeping state, and give up exclusive access to |
| 222 | the CPU. On return, the thread must be prepared to block until it |
| 223 | is ready to run again (generally this means blocking in a syscall, |
| 224 | but it may mean that we remain in a Runnable state and we're just |
| 225 | yielding the CPU to another thread). |
| 226 | */ |
| 227 | void VG_(set_sleeping)(ThreadId tid, ThreadStatus sleepstate) |
| 228 | { |
| 229 | ThreadState *tst = VG_(get_ThreadState)(tid); |
| 230 | |
| 231 | vg_assert(tst->status == VgTs_Runnable); |
| 232 | |
| 233 | vg_assert(sleepstate == VgTs_WaitSys || |
| 234 | sleepstate == VgTs_Yielding); |
| 235 | |
| 236 | tst->status = sleepstate; |
| 237 | |
njn | c7561b9 | 2005-06-19 01:24:32 +0000 | [diff] [blame] | 238 | vg_assert(VG_(running_tid) == tid); |
| 239 | VG_(running_tid) = VG_INVALID_THREADID; |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 240 | |
| 241 | /* Release the run_sema; this will reschedule any runnable |
| 242 | thread. */ |
sewardj | 7eb7c58 | 2005-06-23 01:02:53 +0000 | [diff] [blame] | 243 | ML_(sema_up)(&run_sema); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 244 | |
| 245 | if (VG_(clo_trace_sched)) { |
| 246 | Char buf[50]; |
sewardj | a8d8e23 | 2005-06-07 20:04:56 +0000 | [diff] [blame] | 247 | VG_(sprintf)(buf, "now sleeping in state %s", |
njn | c7561b9 | 2005-06-19 01:24:32 +0000 | [diff] [blame] | 248 | VG_(name_of_ThreadStatus)(sleepstate)); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 249 | print_sched_event(tid, buf); |
nethercote | 75d2624 | 2004-08-01 22:59:18 +0000 | [diff] [blame] | 250 | } |
| 251 | } |
| 252 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 253 | /* Clear out the ThreadState and release the semaphore. Leaves the |
| 254 | ThreadState in VgTs_Zombie state, so that it doesn't get |
| 255 | reallocated until the caller is really ready. */ |
| 256 | void VG_(exit_thread)(ThreadId tid) |
| 257 | { |
| 258 | vg_assert(VG_(is_valid_tid)(tid)); |
| 259 | vg_assert(VG_(is_running_thread)(tid)); |
| 260 | vg_assert(VG_(is_exiting)(tid)); |
| 261 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 262 | mostly_clear_thread_record(tid); |
njn | c7561b9 | 2005-06-19 01:24:32 +0000 | [diff] [blame] | 263 | VG_(running_tid) = VG_INVALID_THREADID; |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 264 | |
| 265 | /* There should still be a valid exitreason for this thread */ |
| 266 | vg_assert(VG_(threads)[tid].exitreason != VgSrc_None); |
| 267 | |
sewardj | 7eb7c58 | 2005-06-23 01:02:53 +0000 | [diff] [blame] | 268 | ML_(sema_up)(&run_sema); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 269 | } |
| 270 | |
| 271 | /* Kill a thread. This interrupts whatever a thread is doing, and |
| 272 | makes it exit ASAP. This does not set the exitreason or |
| 273 | exitcode. */ |
| 274 | void VG_(kill_thread)(ThreadId tid) |
| 275 | { |
| 276 | vg_assert(VG_(is_valid_tid)(tid)); |
| 277 | vg_assert(!VG_(is_running_thread)(tid)); |
| 278 | vg_assert(VG_(is_exiting)(tid)); |
| 279 | |
| 280 | if (VG_(threads)[tid].status == VgTs_WaitSys) { |
| 281 | if (VG_(clo_trace_signals)) |
| 282 | VG_(message)(Vg_DebugMsg, "kill_thread zaps tid %d lwp %d", |
| 283 | tid, VG_(threads)[tid].os_state.lwpid); |
njn | 351d006 | 2005-06-21 22:23:59 +0000 | [diff] [blame] | 284 | VG_(tkill)(VG_(threads)[tid].os_state.lwpid, VG_SIGVGKILL); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 285 | } |
| 286 | } |
| 287 | |
| 288 | /* |
| 289 | Yield the CPU for a short time to let some other thread run. |
| 290 | */ |
| 291 | void VG_(vg_yield)(void) |
| 292 | { |
| 293 | struct vki_timespec ts = { 0, 1 }; |
njn | c7561b9 | 2005-06-19 01:24:32 +0000 | [diff] [blame] | 294 | ThreadId tid = VG_(running_tid); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 295 | |
| 296 | vg_assert(tid != VG_INVALID_THREADID); |
| 297 | vg_assert(VG_(threads)[tid].os_state.lwpid == VG_(gettid)()); |
| 298 | |
| 299 | VG_(set_sleeping)(tid, VgTs_Yielding); |
| 300 | |
| 301 | //VG_(printf)("tid %d yielding EIP=%p\n", tid, VG_(threads)[tid].arch.m_eip); |
| 302 | |
| 303 | /* |
| 304 | Tell the kernel we're yielding. |
| 305 | */ |
| 306 | if (1) |
| 307 | VG_(do_syscall0)(__NR_sched_yield); |
| 308 | else |
| 309 | VG_(nanosleep)(&ts); |
| 310 | |
| 311 | VG_(set_running)(tid); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 312 | } |
| 313 | |
| 314 | |
njn | 9fc3112 | 2005-05-11 18:48:33 +0000 | [diff] [blame] | 315 | /* Set the standard set of blocked signals, used wheneever we're not |
| 316 | running a client syscall. */ |
| 317 | static void block_signals(ThreadId tid) |
| 318 | { |
| 319 | vki_sigset_t mask; |
| 320 | |
| 321 | VG_(sigfillset)(&mask); |
| 322 | |
| 323 | /* Don't block these because they're synchronous */ |
| 324 | VG_(sigdelset)(&mask, VKI_SIGSEGV); |
| 325 | VG_(sigdelset)(&mask, VKI_SIGBUS); |
| 326 | VG_(sigdelset)(&mask, VKI_SIGFPE); |
| 327 | VG_(sigdelset)(&mask, VKI_SIGILL); |
| 328 | VG_(sigdelset)(&mask, VKI_SIGTRAP); |
| 329 | |
| 330 | /* Can't block these anyway */ |
| 331 | VG_(sigdelset)(&mask, VKI_SIGSTOP); |
| 332 | VG_(sigdelset)(&mask, VKI_SIGKILL); |
| 333 | |
njn | 9fc3112 | 2005-05-11 18:48:33 +0000 | [diff] [blame] | 334 | VG_(sigprocmask)(VKI_SIG_SETMASK, &mask, NULL); |
| 335 | } |
| 336 | |
njn | 4f6e370 | 2005-05-16 20:50:52 +0000 | [diff] [blame] | 337 | /* Use libc setjmp/longjmp. longjmp must not restore signal mask |
| 338 | state, but does need to pass "val" through. */ |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 339 | #define SCHEDSETJMP(tid, jumped, stmt) \ |
| 340 | do { \ |
| 341 | ThreadState * volatile _qq_tst = VG_(get_ThreadState)(tid); \ |
| 342 | \ |
sewardj | 45f4e7c | 2005-09-27 19:20:21 +0000 | [diff] [blame] | 343 | (jumped) = __builtin_setjmp(_qq_tst->sched_jmpbuf); \ |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 344 | if ((jumped) == 0) { \ |
| 345 | vg_assert(!_qq_tst->sched_jmpbuf_valid); \ |
| 346 | _qq_tst->sched_jmpbuf_valid = True; \ |
| 347 | stmt; \ |
| 348 | } else if (VG_(clo_trace_sched)) \ |
| 349 | VG_(printf)("SCHEDSETJMP(line %d) tid %d, jumped=%d\n", __LINE__, tid, jumped); \ |
| 350 | vg_assert(_qq_tst->sched_jmpbuf_valid); \ |
| 351 | _qq_tst->sched_jmpbuf_valid = False; \ |
| 352 | } while(0) |
| 353 | |
| 354 | /* Run the thread tid for a while, and return a VG_TRC_* value to the |
| 355 | scheduler indicating what happened. */ |
sewardj | 6072c36 | 2002-04-19 14:40:57 +0000 | [diff] [blame] | 356 | static |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 357 | UInt run_thread_for_a_while ( ThreadId tid ) |
| 358 | { |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 359 | volatile Bool jumped; |
| 360 | volatile ThreadState *tst = VG_(get_ThreadState)(tid); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 361 | |
sewardj | 7ccc5c2 | 2002-04-24 21:39:11 +0000 | [diff] [blame] | 362 | volatile UInt trc = 0; |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 363 | volatile Int dispatch_ctr_SAVED = VG_(dispatch_ctr); |
| 364 | volatile Int done_this_time; |
sewardj | 8b635a4 | 2004-11-22 19:01:47 +0000 | [diff] [blame] | 365 | |
sewardj | 873b313 | 2004-11-25 22:50:17 +0000 | [diff] [blame] | 366 | /* For paranoia purposes only */ |
| 367 | volatile Addr a_vex = (Addr) & VG_(threads)[tid].arch.vex; |
| 368 | volatile Addr a_vexsh = (Addr) & VG_(threads)[tid].arch.vex_shadow; |
| 369 | volatile Addr a_spill = (Addr) & VG_(threads)[tid].arch.vex_spill; |
| 370 | volatile UInt sz_vex = (UInt) sizeof VG_(threads)[tid].arch.vex; |
| 371 | volatile UInt sz_vexsh = (UInt) sizeof VG_(threads)[tid].arch.vex_shadow; |
| 372 | volatile UInt sz_spill = (UInt) sizeof VG_(threads)[tid].arch.vex_spill; |
| 373 | |
| 374 | /* Paranoia */ |
sewardj | b48e500 | 2002-05-13 00:16:03 +0000 | [diff] [blame] | 375 | vg_assert(VG_(is_valid_tid)(tid)); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 376 | vg_assert(VG_(is_valid_tid)(tid)); |
| 377 | vg_assert(VG_(is_running_thread)(tid)); |
| 378 | vg_assert(!VG_(is_exiting)(tid)); |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 379 | |
sewardj | 873b313 | 2004-11-25 22:50:17 +0000 | [diff] [blame] | 380 | /* Even more paranoia. Check that what we have matches |
| 381 | Vex's guest state layout requirements. */ |
sewardj | 12a74b5 | 2004-11-26 11:57:41 +0000 | [diff] [blame] | 382 | if (0) |
| 383 | VG_(printf)("%p %d %p %d %p %d\n", |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 384 | (void*)a_vex, sz_vex, (void*)a_vexsh, sz_vexsh, |
| 385 | (void*)a_spill, sz_spill ); |
sewardj | 873b313 | 2004-11-25 22:50:17 +0000 | [diff] [blame] | 386 | |
njn | be91aae | 2005-03-27 01:42:41 +0000 | [diff] [blame] | 387 | vg_assert(VG_IS_8_ALIGNED(sz_vex)); |
| 388 | vg_assert(VG_IS_8_ALIGNED(sz_vexsh)); |
| 389 | vg_assert(VG_IS_16_ALIGNED(sz_spill)); |
sewardj | 12a74b5 | 2004-11-26 11:57:41 +0000 | [diff] [blame] | 390 | |
njn | be91aae | 2005-03-27 01:42:41 +0000 | [diff] [blame] | 391 | vg_assert(VG_IS_4_ALIGNED(a_vex)); |
| 392 | vg_assert(VG_IS_4_ALIGNED(a_vexsh)); |
| 393 | vg_assert(VG_IS_4_ALIGNED(a_spill)); |
sewardj | 873b313 | 2004-11-25 22:50:17 +0000 | [diff] [blame] | 394 | |
| 395 | vg_assert(sz_vex == sz_vexsh); |
| 396 | vg_assert(a_vex + sz_vex == a_vexsh); |
| 397 | |
| 398 | vg_assert(sz_spill == LibVEX_N_SPILL_BYTES); |
| 399 | vg_assert(a_vex + 2 * sz_vex == a_spill); |
| 400 | |
sewardj | 671ff54 | 2002-05-07 09:25:30 +0000 | [diff] [blame] | 401 | VGP_PUSHCC(VgpRun); |
jsgf | 855d93d | 2003-10-13 22:26:55 +0000 | [diff] [blame] | 402 | |
sewardj | 1053733 | 2005-08-05 00:25:11 +0000 | [diff] [blame] | 403 | # if defined(VGA_ppc32) |
| 404 | /* This is necessary due to the hacky way vex models reservations |
| 405 | on ppc. It's really quite incorrect for each thread to have its |
| 406 | own reservation flag/address, since it's really something that |
| 407 | all threads share (that's the whole point). But having shared |
| 408 | guest state is something we can't model with Vex. However, as |
| 409 | per PaulM's 2.4.0ppc, the reservation is modelled using a |
| 410 | reservation flag which is cleared at each context switch. So it |
| 411 | is indeed possible to get away with a per thread-reservation if |
| 412 | the thread's reservation is cleared before running it. |
| 413 | |
| 414 | This should be abstractified and lifted out. |
| 415 | */ |
| 416 | { Int i; |
| 417 | /* Clear any existing reservation. Be paranoid and clear them all. */ |
| 418 | for (i = 0; i < VG_N_THREADS; i++) |
| 419 | VG_(threads)[i].arch.vex.guest_RESVN = 0; |
| 420 | } |
cerion | 78090d2 | 2005-09-12 22:53:39 +0000 | [diff] [blame] | 421 | |
| 422 | /* ppc guest_state vector regs must be 16byte aligned for loads/stores */ |
| 423 | vg_assert(VG_IS_16_ALIGNED(VG_(threads)[tid].arch.vex.guest_VR0)); |
| 424 | vg_assert(VG_IS_16_ALIGNED(VG_(threads)[tid].arch.vex_shadow.guest_VR0)); |
sewardj | 1053733 | 2005-08-05 00:25:11 +0000 | [diff] [blame] | 425 | # endif |
| 426 | |
jsgf | 855d93d | 2003-10-13 22:26:55 +0000 | [diff] [blame] | 427 | /* there should be no undealt-with signals */ |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 428 | //vg_assert(VG_(threads)[tid].siginfo.si_signo == 0); |
jsgf | 855d93d | 2003-10-13 22:26:55 +0000 | [diff] [blame] | 429 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 430 | //VG_(printf)("running EIP = %p ESP=%p\n", VG_(threads)[tid].arch.m_eip, VG_(threads)[tid].arch.m_esp); |
| 431 | |
| 432 | vg_assert(VG_(my_fault)); |
| 433 | VG_(my_fault) = False; |
| 434 | |
sewardj | 274807d | 2005-12-15 14:07:07 +0000 | [diff] [blame] | 435 | SCHEDSETJMP( |
| 436 | tid, |
| 437 | jumped, |
| 438 | trc = (UInt)VG_(run_innerloop)( (void*)&tst->arch.vex, |
| 439 | VG_(clo_profile_flags) > 0 ? 1 : 0 ) |
| 440 | ); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 441 | |
| 442 | //nextEIP = tst->arch.m_eip; |
| 443 | //if (nextEIP >= VG_(client_end)) |
| 444 | // VG_(printf)("trc=%d jump to %p from %p\n", |
| 445 | // trc, nextEIP, EIP); |
| 446 | |
| 447 | VG_(my_fault) = True; |
| 448 | |
| 449 | if (jumped) { |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 450 | /* We get here if the client took a fault, which caused our |
| 451 | signal handler to longjmp. */ |
| 452 | vg_assert(trc == 0); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 453 | trc = VG_TRC_FAULT_SIGNAL; |
njn | 9fc3112 | 2005-05-11 18:48:33 +0000 | [diff] [blame] | 454 | block_signals(tid); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 455 | } |
sewardj | 5390e66 | 2005-01-10 16:51:14 +0000 | [diff] [blame] | 456 | |
sewardj | 8b635a4 | 2004-11-22 19:01:47 +0000 | [diff] [blame] | 457 | done_this_time = (Int)dispatch_ctr_SAVED - (Int)VG_(dispatch_ctr) - 0; |
| 458 | |
| 459 | vg_assert(done_this_time >= 0); |
njn | 394213a | 2005-06-19 18:38:24 +0000 | [diff] [blame] | 460 | bbs_done += (ULong)done_this_time; |
sewardj | 8b635a4 | 2004-11-22 19:01:47 +0000 | [diff] [blame] | 461 | |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 462 | VGP_POPCC(VgpRun); |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 463 | return trc; |
| 464 | } |
| 465 | |
| 466 | |
njn | 8aa3585 | 2005-06-10 22:59:56 +0000 | [diff] [blame] | 467 | static void os_state_clear(ThreadState *tst) |
| 468 | { |
sewardj | 45f4e7c | 2005-09-27 19:20:21 +0000 | [diff] [blame] | 469 | tst->os_state.lwpid = 0; |
njn | 8aa3585 | 2005-06-10 22:59:56 +0000 | [diff] [blame] | 470 | tst->os_state.threadgroup = 0; |
| 471 | } |
| 472 | |
| 473 | static void os_state_init(ThreadState *tst) |
| 474 | { |
sewardj | 45f4e7c | 2005-09-27 19:20:21 +0000 | [diff] [blame] | 475 | tst->os_state.valgrind_stack_base = 0; |
| 476 | tst->os_state.valgrind_stack_init_SP = 0; |
njn | 8aa3585 | 2005-06-10 22:59:56 +0000 | [diff] [blame] | 477 | os_state_clear(tst); |
| 478 | } |
| 479 | |
sewardj | 20917d8 | 2002-05-28 01:36:45 +0000 | [diff] [blame] | 480 | static |
| 481 | void mostly_clear_thread_record ( ThreadId tid ) |
| 482 | { |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 483 | vki_sigset_t savedmask; |
| 484 | |
sewardj | 20917d8 | 2002-05-28 01:36:45 +0000 | [diff] [blame] | 485 | vg_assert(tid >= 0 && tid < VG_N_THREADS); |
njn | af839f5 | 2005-06-23 03:27:57 +0000 | [diff] [blame] | 486 | VG_(cleanup_thread)(&VG_(threads)[tid].arch); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 487 | VG_(threads)[tid].tid = tid; |
| 488 | |
| 489 | /* Leave the thread in Zombie, so that it doesn't get reallocated |
| 490 | until the caller is finally done with the thread stack. */ |
| 491 | VG_(threads)[tid].status = VgTs_Zombie; |
| 492 | |
nethercote | 73b526f | 2004-10-31 18:48:21 +0000 | [diff] [blame] | 493 | VG_(sigemptyset)(&VG_(threads)[tid].sig_mask); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 494 | VG_(sigemptyset)(&VG_(threads)[tid].tmp_sig_mask); |
jsgf | 855d93d | 2003-10-13 22:26:55 +0000 | [diff] [blame] | 495 | |
njn | 8aa3585 | 2005-06-10 22:59:56 +0000 | [diff] [blame] | 496 | os_state_clear(&VG_(threads)[tid]); |
fitzhardinge | 2842859 | 2004-03-16 22:07:12 +0000 | [diff] [blame] | 497 | |
| 498 | /* start with no altstack */ |
| 499 | VG_(threads)[tid].altstack.ss_sp = (void *)0xdeadbeef; |
| 500 | VG_(threads)[tid].altstack.ss_size = 0; |
| 501 | VG_(threads)[tid].altstack.ss_flags = VKI_SS_DISABLE; |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 502 | |
njn | 444eba1 | 2005-05-12 03:47:31 +0000 | [diff] [blame] | 503 | VG_(clear_out_queued_signals)(tid, &savedmask); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 504 | |
| 505 | VG_(threads)[tid].sched_jmpbuf_valid = False; |
sewardj | 20917d8 | 2002-05-28 01:36:45 +0000 | [diff] [blame] | 506 | } |
| 507 | |
njn | 3f8c437 | 2005-03-13 04:43:10 +0000 | [diff] [blame] | 508 | /* |
| 509 | Called in the child after fork. If the parent has multiple |
| 510 | threads, then we've inhereted a VG_(threads) array describing them, |
| 511 | but only the thread which called fork() is actually alive in the |
| 512 | child. This functions needs to clean up all those other thread |
| 513 | structures. |
| 514 | |
| 515 | Whichever tid in the parent which called fork() becomes the |
| 516 | master_tid in the child. That's because the only living slot in |
| 517 | VG_(threads) in the child after fork is VG_(threads)[tid], and it |
| 518 | would be too hard to try to re-number the thread and relocate the |
| 519 | thread state down to VG_(threads)[1]. |
| 520 | |
| 521 | This function also needs to reinitialize the run_sema, since |
| 522 | otherwise we may end up sharing its state with the parent, which |
| 523 | would be deeply confusing. |
| 524 | */ |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 525 | static void sched_fork_cleanup(ThreadId me) |
| 526 | { |
| 527 | ThreadId tid; |
njn | c7561b9 | 2005-06-19 01:24:32 +0000 | [diff] [blame] | 528 | vg_assert(VG_(running_tid) == me); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 529 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 530 | VG_(threads)[me].os_state.lwpid = VG_(gettid)(); |
| 531 | VG_(threads)[me].os_state.threadgroup = VG_(getpid)(); |
| 532 | |
| 533 | /* clear out all the unused thread slots */ |
| 534 | for (tid = 1; tid < VG_N_THREADS; tid++) { |
njn | 3f8c437 | 2005-03-13 04:43:10 +0000 | [diff] [blame] | 535 | if (tid != me) { |
| 536 | mostly_clear_thread_record(tid); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 537 | VG_(threads)[tid].status = VgTs_Empty; |
sewardj | a8d8e23 | 2005-06-07 20:04:56 +0000 | [diff] [blame] | 538 | VG_(clear_syscallInfo)(tid); |
njn | 3f8c437 | 2005-03-13 04:43:10 +0000 | [diff] [blame] | 539 | } |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 540 | } |
| 541 | |
| 542 | /* re-init and take the sema */ |
sewardj | 7eb7c58 | 2005-06-23 01:02:53 +0000 | [diff] [blame] | 543 | ML_(sema_deinit)(&run_sema); |
| 544 | ML_(sema_init)(&run_sema); |
| 545 | ML_(sema_down)(&run_sema); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 546 | } |
sewardj | 20917d8 | 2002-05-28 01:36:45 +0000 | [diff] [blame] | 547 | |
jsgf | 855d93d | 2003-10-13 22:26:55 +0000 | [diff] [blame] | 548 | |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 549 | /* Initialise the scheduler. Create a single "main" thread ready to |
sewardj | 2a99cf6 | 2004-11-24 10:44:19 +0000 | [diff] [blame] | 550 | run, with special ThreadId of one. This is called at startup. The |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 551 | caller subsequently initialises the guest state components of this |
| 552 | main thread, thread 1. |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 553 | */ |
sewardj | 45f4e7c | 2005-09-27 19:20:21 +0000 | [diff] [blame] | 554 | void VG_(scheduler_init) ( Addr clstack_end, SizeT clstack_size ) |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 555 | { |
thughes | c37184f | 2004-09-11 14:16:57 +0000 | [diff] [blame] | 556 | Int i; |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 557 | ThreadId tid_main; |
| 558 | |
sewardj | 45f4e7c | 2005-09-27 19:20:21 +0000 | [diff] [blame] | 559 | vg_assert(VG_IS_PAGE_ALIGNED(clstack_end+1)); |
| 560 | vg_assert(VG_IS_PAGE_ALIGNED(clstack_size)); |
| 561 | |
sewardj | 7eb7c58 | 2005-06-23 01:02:53 +0000 | [diff] [blame] | 562 | ML_(sema_init)(&run_sema); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 563 | |
sewardj | 6072c36 | 2002-04-19 14:40:57 +0000 | [diff] [blame] | 564 | for (i = 0 /* NB; not 1 */; i < VG_N_THREADS; i++) { |
sewardj | c793fd3 | 2005-05-31 17:24:49 +0000 | [diff] [blame] | 565 | |
| 566 | /* Paranoia .. completely zero it out. */ |
| 567 | VG_(memset)( & VG_(threads)[i], 0, sizeof( VG_(threads)[i] ) ); |
| 568 | |
| 569 | VG_(threads)[i].sig_queue = NULL; |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 570 | |
njn | 8aa3585 | 2005-06-10 22:59:56 +0000 | [diff] [blame] | 571 | os_state_init(&VG_(threads)[i]); |
sewardj | 20917d8 | 2002-05-28 01:36:45 +0000 | [diff] [blame] | 572 | mostly_clear_thread_record(i); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 573 | |
njn | 50ba34e | 2005-04-04 02:41:42 +0000 | [diff] [blame] | 574 | VG_(threads)[i].status = VgTs_Empty; |
| 575 | VG_(threads)[i].client_stack_szB = 0; |
| 576 | VG_(threads)[i].client_stack_highest_word = (Addr)NULL; |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 577 | } |
| 578 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 579 | tid_main = VG_(alloc_ThreadState)(); |
sewardj | 5f07b66 | 2002-04-23 16:52:51 +0000 | [diff] [blame] | 580 | |
njn | 50ba34e | 2005-04-04 02:41:42 +0000 | [diff] [blame] | 581 | VG_(threads)[tid_main].client_stack_highest_word |
sewardj | 45f4e7c | 2005-09-27 19:20:21 +0000 | [diff] [blame] | 582 | = clstack_end + 1 - sizeof(UWord); |
| 583 | VG_(threads)[tid_main].client_stack_szB |
| 584 | = clstack_size; |
sewardj | bf290b9 | 2002-05-01 02:28:01 +0000 | [diff] [blame] | 585 | |
njn | 310ed28 | 2005-06-26 15:11:37 +0000 | [diff] [blame] | 586 | VG_(atfork_child)(sched_fork_cleanup); |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 587 | } |
| 588 | |
| 589 | |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 590 | /* --------------------------------------------------------------------- |
| 591 | The scheduler proper. |
| 592 | ------------------------------------------------------------------ */ |
| 593 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 594 | static void handle_tt_miss ( ThreadId tid ) |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 595 | { |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 596 | Bool found; |
njn | f536bbb | 2005-06-13 04:21:38 +0000 | [diff] [blame] | 597 | Addr ip = VG_(get_IP)(tid); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 598 | |
| 599 | /* Trivial event. Miss in the fast-cache. Do a full |
| 600 | lookup for it. */ |
njn | f536bbb | 2005-06-13 04:21:38 +0000 | [diff] [blame] | 601 | found = VG_(search_transtab)( NULL, ip, True/*upd_fast_cache*/ ); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 602 | if (!found) { |
| 603 | /* Not found; we need to request a translation. */ |
njn | 394213a | 2005-06-19 18:38:24 +0000 | [diff] [blame] | 604 | if (VG_(translate)( tid, ip, /*debug*/False, 0/*not verbose*/, bbs_done )) { |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 605 | found = VG_(search_transtab)( NULL, ip, True ); |
njn | 50ae1a7 | 2005-04-08 23:28:23 +0000 | [diff] [blame] | 606 | vg_assert2(found, "VG_TRC_INNER_FASTMISS: missing tt_fast entry"); |
| 607 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 608 | } else { |
| 609 | // If VG_(translate)() fails, it's because it had to throw a |
| 610 | // signal because the client jumped to a bad address. That |
| 611 | // means that either a signal has been set up for delivery, |
| 612 | // or the thread has been marked for termination. Either |
| 613 | // way, we just need to go back into the scheduler loop. |
| 614 | } |
| 615 | } |
| 616 | } |
| 617 | |
| 618 | static void handle_syscall(ThreadId tid) |
| 619 | { |
| 620 | ThreadState *tst = VG_(get_ThreadState)(tid); |
| 621 | Bool jumped; |
| 622 | |
| 623 | /* Syscall may or may not block; either way, it will be |
| 624 | complete by the time this call returns, and we'll be |
| 625 | runnable again. We could take a signal while the |
| 626 | syscall runs. */ |
sewardj | 45f4e7c | 2005-09-27 19:20:21 +0000 | [diff] [blame] | 627 | |
| 628 | if (VG_(clo_sanity_level >= 3)) |
| 629 | VG_(am_do_sync_check)("(BEFORE SYSCALL)",__FILE__,__LINE__); |
| 630 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 631 | SCHEDSETJMP(tid, jumped, VG_(client_syscall)(tid)); |
| 632 | |
sewardj | 45f4e7c | 2005-09-27 19:20:21 +0000 | [diff] [blame] | 633 | if (VG_(clo_sanity_level >= 3)) |
| 634 | VG_(am_do_sync_check)("(AFTER SYSCALL)",__FILE__,__LINE__); |
| 635 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 636 | if (!VG_(is_running_thread)(tid)) |
njn | c7561b9 | 2005-06-19 01:24:32 +0000 | [diff] [blame] | 637 | VG_(printf)("tid %d not running; VG_(running_tid)=%d, tid %d status %d\n", |
| 638 | tid, VG_(running_tid), tid, tst->status); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 639 | vg_assert(VG_(is_running_thread)(tid)); |
| 640 | |
| 641 | if (jumped) { |
njn | 9fc3112 | 2005-05-11 18:48:33 +0000 | [diff] [blame] | 642 | block_signals(tid); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 643 | VG_(poll_signals)(tid); |
| 644 | } |
| 645 | } |
| 646 | |
| 647 | /* |
| 648 | Run a thread until it wants to exit. |
| 649 | |
| 650 | We assume that the caller has already called VG_(set_running) for |
| 651 | us, so we own the VCPU. Also, all signals are blocked. |
| 652 | */ |
| 653 | VgSchedReturnCode VG_(scheduler) ( ThreadId tid ) |
| 654 | { |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 655 | UInt trc; |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 656 | ThreadState *tst = VG_(get_ThreadState)(tid); |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 657 | |
sewardj | c24be7a | 2005-03-15 01:40:12 +0000 | [diff] [blame] | 658 | if (VG_(clo_trace_sched)) |
| 659 | print_sched_event(tid, "entering VG_(scheduler)"); |
| 660 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 661 | VGP_PUSHCC(VgpSched); |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 662 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 663 | /* set the proper running signal mask */ |
njn | 9fc3112 | 2005-05-11 18:48:33 +0000 | [diff] [blame] | 664 | block_signals(tid); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 665 | |
| 666 | vg_assert(VG_(is_running_thread)(tid)); |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 667 | |
njn | 14319cc | 2005-03-13 06:26:22 +0000 | [diff] [blame] | 668 | VG_(dispatch_ctr) = SCHEDULING_QUANTUM + 1; |
sewardj | 6072c36 | 2002-04-19 14:40:57 +0000 | [diff] [blame] | 669 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 670 | while(!VG_(is_exiting)(tid)) { |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 671 | if (VG_(dispatch_ctr) == 1) { |
| 672 | /* Our slice is done, so yield the CPU to another thread. This |
| 673 | doesn't sleep between sleeping and running, since that would |
| 674 | take too much time. */ |
| 675 | VG_(set_sleeping)(tid, VgTs_Yielding); |
| 676 | /* nothing */ |
| 677 | VG_(set_running)(tid); |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 678 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 679 | /* OK, do some relatively expensive housekeeping stuff */ |
| 680 | scheduler_sanity(tid); |
| 681 | VG_(sanity_check_general)(False); |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 682 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 683 | /* Look for any pending signals for this thread, and set them up |
| 684 | for delivery */ |
| 685 | VG_(poll_signals)(tid); |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 686 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 687 | if (VG_(is_exiting)(tid)) |
| 688 | break; /* poll_signals picked up a fatal signal */ |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 689 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 690 | /* For stats purposes only. */ |
| 691 | n_scheduling_events_MAJOR++; |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 692 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 693 | /* Figure out how many bbs to ask vg_run_innerloop to do. Note |
| 694 | that it decrements the counter before testing it for zero, so |
| 695 | that if tst->dispatch_ctr is set to N you get at most N-1 |
| 696 | iterations. Also this means that tst->dispatch_ctr must |
| 697 | exceed zero before entering the innerloop. Also also, the |
| 698 | decrement is done before the bb is actually run, so you |
| 699 | always get at least one decrement even if nothing happens. */ |
njn | 14319cc | 2005-03-13 06:26:22 +0000 | [diff] [blame] | 700 | VG_(dispatch_ctr) = SCHEDULING_QUANTUM + 1; |
jsgf | 855d93d | 2003-10-13 22:26:55 +0000 | [diff] [blame] | 701 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 702 | /* paranoia ... */ |
| 703 | vg_assert(tst->tid == tid); |
| 704 | vg_assert(tst->os_state.lwpid == VG_(gettid)()); |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 705 | } |
| 706 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 707 | /* For stats purposes only. */ |
| 708 | n_scheduling_events_MINOR++; |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 709 | |
| 710 | if (0) |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 711 | VG_(message)(Vg_DebugMsg, "thread %d: running for %d bbs", |
| 712 | tid, VG_(dispatch_ctr) - 1 ); |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 713 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 714 | trc = run_thread_for_a_while ( tid ); |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 715 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 716 | if (VG_(clo_trace_sched) && VG_(clo_verbosity) > 2) { |
| 717 | Char buf[50]; |
| 718 | VG_(sprintf)(buf, "TRC: %s", name_of_sched_event(trc)); |
| 719 | print_sched_event(tid, buf); |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 720 | } |
| 721 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 722 | switch(trc) { |
| 723 | case VG_TRC_INNER_FASTMISS: |
| 724 | vg_assert(VG_(dispatch_ctr) > 1); |
| 725 | handle_tt_miss(tid); |
| 726 | break; |
| 727 | |
| 728 | case VEX_TRC_JMP_CLIENTREQ: |
| 729 | do_client_request(tid); |
| 730 | break; |
sewardj | a0fef1b | 2005-11-03 13:46:30 +0000 | [diff] [blame] | 731 | |
| 732 | case VEX_TRC_JMP_SYS_INT128: /* x86-linux */ |
| 733 | case VEX_TRC_JMP_SYS_SYSCALL: /* amd64-linux, ppc32-linux */ |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 734 | handle_syscall(tid); |
| 735 | if (VG_(clo_sanity_level) > 2) |
| 736 | VG_(sanity_check_general)(True); /* sanity-check every syscall */ |
| 737 | break; |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 738 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 739 | case VEX_TRC_JMP_YIELD: |
| 740 | /* Explicit yield, because this thread is in a spin-lock |
sewardj | 3fc7575 | 2005-03-12 15:16:31 +0000 | [diff] [blame] | 741 | or something. Only let the thread run for a short while |
| 742 | longer. Because swapping to another thread is expensive, |
| 743 | we're prepared to let this thread eat a little more CPU |
| 744 | before swapping to another. That means that short term |
| 745 | spins waiting for hardware to poke memory won't cause a |
| 746 | thread swap. */ |
| 747 | if (VG_(dispatch_ctr) > 100) |
| 748 | VG_(dispatch_ctr) = 100; |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 749 | break; |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 750 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 751 | case VG_TRC_INNER_COUNTERZERO: |
| 752 | /* Timeslice is out. Let a new thread be scheduled. */ |
| 753 | vg_assert(VG_(dispatch_ctr) == 1); |
| 754 | break; |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 755 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 756 | case VG_TRC_FAULT_SIGNAL: |
| 757 | /* Everything should be set up (either we're exiting, or |
| 758 | about to start in a signal handler). */ |
| 759 | break; |
sewardj | 9d1b5d3 | 2002-04-17 19:40:49 +0000 | [diff] [blame] | 760 | |
sewardj | 07bdc5e | 2005-03-11 13:19:47 +0000 | [diff] [blame] | 761 | case VEX_TRC_JMP_MAPFAIL: |
| 762 | /* Failure of arch-specific address translation (x86/amd64 |
| 763 | segment override use) */ |
| 764 | /* jrs 2005 03 11: is this correct? */ |
| 765 | VG_(synth_fault)(tid); |
| 766 | break; |
| 767 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 768 | case VEX_TRC_JMP_EMWARN: { |
| 769 | static Int counts[EmWarn_NUMBER]; |
| 770 | static Bool counts_initted = False; |
| 771 | VexEmWarn ew; |
| 772 | HChar* what; |
| 773 | Bool show; |
| 774 | Int q; |
| 775 | if (!counts_initted) { |
| 776 | counts_initted = True; |
| 777 | for (q = 0; q < EmWarn_NUMBER; q++) |
| 778 | counts[q] = 0; |
| 779 | } |
| 780 | ew = (VexEmWarn)VG_(threads)[tid].arch.vex.guest_EMWARN; |
| 781 | what = (ew < 0 || ew >= EmWarn_NUMBER) |
| 782 | ? "unknown (?!)" |
| 783 | : LibVEX_EmWarn_string(ew); |
| 784 | show = (ew < 0 || ew >= EmWarn_NUMBER) |
| 785 | ? True |
| 786 | : counts[ew]++ < 3; |
sewardj | b1131a8 | 2005-03-19 15:12:21 +0000 | [diff] [blame] | 787 | if (show && VG_(clo_show_emwarns)) { |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 788 | VG_(message)( Vg_UserMsg, |
| 789 | "Emulation warning: unsupported action:"); |
| 790 | VG_(message)( Vg_UserMsg, " %s", what); |
njn | d01fef7 | 2005-03-25 23:35:48 +0000 | [diff] [blame] | 791 | VG_(get_and_pp_StackTrace)( tid, VG_(clo_backtrace_size) ); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 792 | } |
| 793 | break; |
| 794 | } |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 795 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 796 | case VEX_TRC_JMP_NODECODE: |
njn | d502136 | 2005-09-29 00:35:18 +0000 | [diff] [blame] | 797 | #define M(a) VG_(message)(Vg_UserMsg, a); |
njn | 7cf6658 | 2005-10-15 17:18:08 +0000 | [diff] [blame] | 798 | M("Your program just tried to execute an instruction that Valgrind" ); |
| 799 | M("did not recognise. There are two possible reasons for this." ); |
| 800 | M("1. Your program has a bug and erroneously jumped to a non-code" ); |
| 801 | M(" location. If you are running Memcheck and you just saw a" ); |
| 802 | M(" warning about a bad jump, it's probably your program's fault."); |
| 803 | M("2. The instruction is legitimate but Valgrind doesn't handle it,"); |
| 804 | M(" i.e. it's Valgrind's fault. If you think this is the case or"); |
| 805 | M(" you are not sure, please let us know." ); |
| 806 | M("Either way, Valgrind will now raise a SIGILL signal which will" ); |
| 807 | M("probably kill your program." ); |
njn | d502136 | 2005-09-29 00:35:18 +0000 | [diff] [blame] | 808 | #undef M |
njn | f536bbb | 2005-06-13 04:21:38 +0000 | [diff] [blame] | 809 | VG_(synth_sigill)(tid, VG_(get_IP)(tid)); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 810 | break; |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 811 | |
cerion | 85665ca | 2005-06-20 15:51:07 +0000 | [diff] [blame] | 812 | case VEX_TRC_JMP_TINVAL: |
cerion | 85665ca | 2005-06-20 15:51:07 +0000 | [diff] [blame] | 813 | VG_(discard_translations)( |
| 814 | (Addr64)VG_(threads)[tid].arch.vex.guest_TISTART, |
sewardj | 45f4e7c | 2005-09-27 19:20:21 +0000 | [diff] [blame] | 815 | VG_(threads)[tid].arch.vex.guest_TILEN, |
| 816 | "scheduler(VEX_TRC_JMP_TINVAL)" |
sewardj | 487ac70 | 2005-06-21 12:52:38 +0000 | [diff] [blame] | 817 | ); |
cerion | 85665ca | 2005-06-20 15:51:07 +0000 | [diff] [blame] | 818 | if (0) |
| 819 | VG_(printf)("dump translations done.\n"); |
cerion | 85665ca | 2005-06-20 15:51:07 +0000 | [diff] [blame] | 820 | break; |
| 821 | |
sewardj | e3a384b | 2005-07-29 08:51:34 +0000 | [diff] [blame] | 822 | case VG_TRC_INVARIANT_FAILED: |
| 823 | /* This typically happens if, after running generated code, |
| 824 | it is detected that host CPU settings (eg, FPU/Vector |
| 825 | control words) are not as they should be. Vex's code |
| 826 | generation specifies the state such control words should |
| 827 | be in on entry to Vex-generated code, and they should be |
| 828 | unchanged on exit from it. Failure of this assertion |
| 829 | usually means a bug in Vex's code generation. */ |
| 830 | vg_assert2(0, "VG_(scheduler), phase 3: " |
| 831 | "run_innerloop detected host " |
| 832 | "state invariant failure", trc); |
| 833 | |
sewardj | a0fef1b | 2005-11-03 13:46:30 +0000 | [diff] [blame] | 834 | case VEX_TRC_JMP_SYS_SYSENTER: |
sewardj | 5438a01 | 2005-08-07 14:49:27 +0000 | [diff] [blame] | 835 | /* Do whatever simulation is appropriate for an x86 sysenter |
| 836 | instruction. Note that it is critical to set this thread's |
| 837 | guest_EIP to point at the code to execute after the |
| 838 | sysenter, since Vex-generated code will not have set it -- |
| 839 | vex does not know what it should be. Vex sets the next |
| 840 | address to zero, so if you don't guest_EIP, the thread will |
| 841 | jump to zero afterwards and probably die as a result. */ |
| 842 | # if defined(VGA_x86) |
| 843 | //FIXME: VG_(threads)[tid].arch.vex.guest_EIP = .... |
| 844 | //handle_sysenter_x86(tid); |
| 845 | vg_assert2(0, "VG_(scheduler), phase 3: " |
| 846 | "sysenter_x86 on not yet implemented"); |
| 847 | # else |
| 848 | vg_assert2(0, "VG_(scheduler), phase 3: " |
| 849 | "sysenter_x86 on non-x86 platform?!?!"); |
| 850 | # endif |
| 851 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 852 | default: |
njn | 50ae1a7 | 2005-04-08 23:28:23 +0000 | [diff] [blame] | 853 | vg_assert2(0, "VG_(scheduler), phase 3: " |
| 854 | "unexpected thread return code (%u)", trc); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 855 | /* NOTREACHED */ |
| 856 | break; |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 857 | |
| 858 | } /* switch (trc) */ |
nethercote | 238a3c3 | 2004-08-09 13:13:31 +0000 | [diff] [blame] | 859 | } |
sewardj | c24be7a | 2005-03-15 01:40:12 +0000 | [diff] [blame] | 860 | |
| 861 | if (VG_(clo_trace_sched)) |
| 862 | print_sched_event(tid, "exiting VG_(scheduler)"); |
| 863 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 864 | vg_assert(VG_(is_exiting)(tid)); |
thughes | 513197c | 2004-06-13 12:07:53 +0000 | [diff] [blame] | 865 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 866 | VGP_POPCC(VgpSched); |
thughes | 513197c | 2004-06-13 12:07:53 +0000 | [diff] [blame] | 867 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 868 | //if (VG_(clo_model_pthreads)) |
| 869 | // VG_(tm_thread_exit)(tid); |
| 870 | |
| 871 | return tst->exitreason; |
sewardj | 20917d8 | 2002-05-28 01:36:45 +0000 | [diff] [blame] | 872 | } |
| 873 | |
| 874 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 875 | /* |
| 876 | This causes all threads to forceably exit. They aren't actually |
| 877 | dead by the time this returns; you need to call |
njn | af839f5 | 2005-06-23 03:27:57 +0000 | [diff] [blame] | 878 | VG_(reap_threads)() to wait for them. |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 879 | */ |
| 880 | void VG_(nuke_all_threads_except) ( ThreadId me, VgSchedReturnCode src ) |
sewardj | ccef2e6 | 2002-05-29 19:26:32 +0000 | [diff] [blame] | 881 | { |
| 882 | ThreadId tid; |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 883 | |
| 884 | vg_assert(VG_(is_running_thread)(me)); |
sewardj | 45f02c4 | 2005-02-05 18:27:14 +0000 | [diff] [blame] | 885 | |
sewardj | ccef2e6 | 2002-05-29 19:26:32 +0000 | [diff] [blame] | 886 | for (tid = 1; tid < VG_N_THREADS; tid++) { |
| 887 | if (tid == me |
jsgf | 855d93d | 2003-10-13 22:26:55 +0000 | [diff] [blame] | 888 | || VG_(threads)[tid].status == VgTs_Empty) |
sewardj | ccef2e6 | 2002-05-29 19:26:32 +0000 | [diff] [blame] | 889 | continue; |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 890 | if (0) |
sewardj | ef037c7 | 2002-05-30 00:40:03 +0000 | [diff] [blame] | 891 | VG_(printf)( |
| 892 | "VG_(nuke_all_threads_except): nuking tid %d\n", tid); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 893 | |
| 894 | VG_(threads)[tid].exitreason = src; |
sewardj | a8d8e23 | 2005-06-07 20:04:56 +0000 | [diff] [blame] | 895 | if (src == VgSrc_FatalSig) |
| 896 | VG_(threads)[tid].os_state.fatalsig = VKI_SIGKILL; |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 897 | VG_(kill_thread)(tid); |
sewardj | ccef2e6 | 2002-05-29 19:26:32 +0000 | [diff] [blame] | 898 | } |
| 899 | } |
| 900 | |
| 901 | |
njn | d304045 | 2003-05-19 15:04:06 +0000 | [diff] [blame] | 902 | /* --------------------------------------------------------------------- |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 903 | Specifying shadow register values |
njn | d304045 | 2003-05-19 15:04:06 +0000 | [diff] [blame] | 904 | ------------------------------------------------------------------ */ |
| 905 | |
njn | f536bbb | 2005-06-13 04:21:38 +0000 | [diff] [blame] | 906 | #if defined(VGA_x86) |
njn | af839f5 | 2005-06-23 03:27:57 +0000 | [diff] [blame] | 907 | # define VG_CLREQ_ARGS guest_EAX |
| 908 | # define VG_CLREQ_RET guest_EDX |
njn | f536bbb | 2005-06-13 04:21:38 +0000 | [diff] [blame] | 909 | #elif defined(VGA_amd64) |
njn | af839f5 | 2005-06-23 03:27:57 +0000 | [diff] [blame] | 910 | # define VG_CLREQ_ARGS guest_RAX |
| 911 | # define VG_CLREQ_RET guest_RDX |
sewardj | 2c48c7b | 2005-11-29 13:05:56 +0000 | [diff] [blame] | 912 | #elif defined(VGA_ppc32) || defined(VGA_ppc64) |
njn | af839f5 | 2005-06-23 03:27:57 +0000 | [diff] [blame] | 913 | # define VG_CLREQ_ARGS guest_GPR4 |
| 914 | # define VG_CLREQ_RET guest_GPR3 |
njn | f536bbb | 2005-06-13 04:21:38 +0000 | [diff] [blame] | 915 | #else |
| 916 | # error Unknown arch |
| 917 | #endif |
| 918 | |
njn | af839f5 | 2005-06-23 03:27:57 +0000 | [diff] [blame] | 919 | #define CLREQ_ARGS(regs) ((regs).vex.VG_CLREQ_ARGS) |
| 920 | #define CLREQ_RET(regs) ((regs).vex.VG_CLREQ_RET) |
| 921 | #define O_CLREQ_RET (offsetof(VexGuestArchState, VG_CLREQ_RET)) |
njn | f536bbb | 2005-06-13 04:21:38 +0000 | [diff] [blame] | 922 | |
njn | 502badb | 2005-05-08 02:04:49 +0000 | [diff] [blame] | 923 | // These macros write a value to a client's thread register, and tell the |
| 924 | // tool that it's happened (if necessary). |
| 925 | |
| 926 | #define SET_CLREQ_RETVAL(zztid, zzval) \ |
| 927 | do { CLREQ_RET(VG_(threads)[zztid].arch) = (zzval); \ |
| 928 | VG_TRACK( post_reg_write, \ |
| 929 | Vg_CoreClientReq, zztid, O_CLREQ_RET, sizeof(UWord)); \ |
| 930 | } while (0) |
| 931 | |
| 932 | #define SET_CLCALL_RETVAL(zztid, zzval, f) \ |
| 933 | do { CLREQ_RET(VG_(threads)[zztid].arch) = (zzval); \ |
| 934 | VG_TRACK( post_reg_write_clientcall_return, \ |
| 935 | zztid, O_CLREQ_RET, sizeof(UWord), f); \ |
| 936 | } while (0) |
| 937 | |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 938 | /* --------------------------------------------------------------------- |
sewardj | 124ca2a | 2002-06-20 10:19:38 +0000 | [diff] [blame] | 939 | Handle client requests. |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 940 | ------------------------------------------------------------------ */ |
| 941 | |
njn | 9cb54ac | 2005-06-12 04:19:17 +0000 | [diff] [blame] | 942 | // OS-specific(?) client requests |
| 943 | static Bool os_client_request(ThreadId tid, UWord *args) |
| 944 | { |
| 945 | Bool handled = True; |
| 946 | |
| 947 | vg_assert(VG_(is_running_thread)(tid)); |
| 948 | |
| 949 | switch(args[0]) { |
| 950 | case VG_USERREQ__LIBC_FREERES_DONE: |
| 951 | /* This is equivalent to an exit() syscall, but we don't set the |
| 952 | exitcode (since it might already be set) */ |
| 953 | if (0 || VG_(clo_trace_syscalls) || VG_(clo_trace_sched)) |
| 954 | VG_(message)(Vg_DebugMsg, |
| 955 | "__libc_freeres() done; really quitting!"); |
| 956 | VG_(threads)[tid].exitreason = VgSrc_ExitSyscall; |
| 957 | break; |
| 958 | |
| 959 | default: |
| 960 | handled = False; |
| 961 | break; |
| 962 | } |
| 963 | |
| 964 | return handled; |
| 965 | } |
| 966 | |
| 967 | |
sewardj | 124ca2a | 2002-06-20 10:19:38 +0000 | [diff] [blame] | 968 | /* Do a client request for the thread tid. After the request, tid may |
| 969 | or may not still be runnable; if not, the scheduler will have to |
| 970 | choose a new thread to run. |
| 971 | */ |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 972 | static |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 973 | void do_client_request ( ThreadId tid ) |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 974 | { |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 975 | UWord* arg = (UWord*)(CLREQ_ARGS(VG_(threads)[tid].arch)); |
nethercote | d1b64b2 | 2004-11-04 18:22:28 +0000 | [diff] [blame] | 976 | UWord req_no = arg[0]; |
sewardj | 124ca2a | 2002-06-20 10:19:38 +0000 | [diff] [blame] | 977 | |
fitzhardinge | 98abfc7 | 2003-12-16 02:05:15 +0000 | [diff] [blame] | 978 | if (0) |
nethercote | d1b64b2 | 2004-11-04 18:22:28 +0000 | [diff] [blame] | 979 | VG_(printf)("req no = 0x%llx, arg = %p\n", (ULong)req_no, arg); |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 980 | switch (req_no) { |
| 981 | |
njn | 3e88418 | 2003-04-15 13:03:23 +0000 | [diff] [blame] | 982 | case VG_USERREQ__CLIENT_CALL0: { |
njn | 2ac9524 | 2005-03-13 23:07:30 +0000 | [diff] [blame] | 983 | UWord (*f)(ThreadId) = (void*)arg[1]; |
fitzhardinge | 98abfc7 | 2003-12-16 02:05:15 +0000 | [diff] [blame] | 984 | if (f == NULL) |
njn | 3d9edb2 | 2005-08-25 01:52:52 +0000 | [diff] [blame] | 985 | VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL0: func=%p", f); |
fitzhardinge | 98abfc7 | 2003-12-16 02:05:15 +0000 | [diff] [blame] | 986 | else |
njn | 2ac9524 | 2005-03-13 23:07:30 +0000 | [diff] [blame] | 987 | SET_CLCALL_RETVAL(tid, f ( tid ), (Addr)f); |
njn | 3e88418 | 2003-04-15 13:03:23 +0000 | [diff] [blame] | 988 | break; |
| 989 | } |
| 990 | case VG_USERREQ__CLIENT_CALL1: { |
njn | 2ac9524 | 2005-03-13 23:07:30 +0000 | [diff] [blame] | 991 | UWord (*f)(ThreadId, UWord) = (void*)arg[1]; |
fitzhardinge | 98abfc7 | 2003-12-16 02:05:15 +0000 | [diff] [blame] | 992 | if (f == NULL) |
njn | 3d9edb2 | 2005-08-25 01:52:52 +0000 | [diff] [blame] | 993 | VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL1: func=%p", f); |
fitzhardinge | 98abfc7 | 2003-12-16 02:05:15 +0000 | [diff] [blame] | 994 | else |
njn | 2ac9524 | 2005-03-13 23:07:30 +0000 | [diff] [blame] | 995 | SET_CLCALL_RETVAL(tid, f ( tid, arg[2] ), (Addr)f ); |
njn | 3e88418 | 2003-04-15 13:03:23 +0000 | [diff] [blame] | 996 | break; |
| 997 | } |
| 998 | case VG_USERREQ__CLIENT_CALL2: { |
njn | 2ac9524 | 2005-03-13 23:07:30 +0000 | [diff] [blame] | 999 | UWord (*f)(ThreadId, UWord, UWord) = (void*)arg[1]; |
fitzhardinge | 98abfc7 | 2003-12-16 02:05:15 +0000 | [diff] [blame] | 1000 | if (f == NULL) |
njn | 3d9edb2 | 2005-08-25 01:52:52 +0000 | [diff] [blame] | 1001 | VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL2: func=%p", f); |
fitzhardinge | 98abfc7 | 2003-12-16 02:05:15 +0000 | [diff] [blame] | 1002 | else |
njn | 2ac9524 | 2005-03-13 23:07:30 +0000 | [diff] [blame] | 1003 | SET_CLCALL_RETVAL(tid, f ( tid, arg[2], arg[3] ), (Addr)f ); |
njn | 3e88418 | 2003-04-15 13:03:23 +0000 | [diff] [blame] | 1004 | break; |
| 1005 | } |
| 1006 | case VG_USERREQ__CLIENT_CALL3: { |
njn | 2ac9524 | 2005-03-13 23:07:30 +0000 | [diff] [blame] | 1007 | UWord (*f)(ThreadId, UWord, UWord, UWord) = (void*)arg[1]; |
fitzhardinge | 98abfc7 | 2003-12-16 02:05:15 +0000 | [diff] [blame] | 1008 | if (f == NULL) |
njn | 3d9edb2 | 2005-08-25 01:52:52 +0000 | [diff] [blame] | 1009 | VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL3: func=%p", f); |
fitzhardinge | 98abfc7 | 2003-12-16 02:05:15 +0000 | [diff] [blame] | 1010 | else |
njn | 2ac9524 | 2005-03-13 23:07:30 +0000 | [diff] [blame] | 1011 | SET_CLCALL_RETVAL(tid, f ( tid, arg[2], arg[3], arg[4] ), (Addr)f ); |
njn | 3e88418 | 2003-04-15 13:03:23 +0000 | [diff] [blame] | 1012 | break; |
| 1013 | } |
| 1014 | |
njn | f09745a | 2005-05-10 03:01:23 +0000 | [diff] [blame] | 1015 | // Nb: this looks like a circular definition, because it kind of is. |
| 1016 | // See comment in valgrind.h to understand what's going on. |
sewardj | 124ca2a | 2002-06-20 10:19:38 +0000 | [diff] [blame] | 1017 | case VG_USERREQ__RUNNING_ON_VALGRIND: |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 1018 | SET_CLREQ_RETVAL(tid, RUNNING_ON_VALGRIND+1); |
sewardj | 124ca2a | 2002-06-20 10:19:38 +0000 | [diff] [blame] | 1019 | break; |
| 1020 | |
fitzhardinge | 39de4b4 | 2003-10-31 07:12:21 +0000 | [diff] [blame] | 1021 | case VG_USERREQ__PRINTF: { |
| 1022 | int count = |
nethercote | 3e901a2 | 2004-09-11 13:17:02 +0000 | [diff] [blame] | 1023 | VG_(vmessage)( Vg_ClientMsg, (char *)arg[1], (void*)arg[2] ); |
fitzhardinge | 39de4b4 | 2003-10-31 07:12:21 +0000 | [diff] [blame] | 1024 | SET_CLREQ_RETVAL( tid, count ); |
| 1025 | break; } |
| 1026 | |
| 1027 | case VG_USERREQ__INTERNAL_PRINTF: { |
| 1028 | int count = |
njn | aa3c26b | 2005-03-12 05:32:28 +0000 | [diff] [blame] | 1029 | VG_(vmessage)( Vg_DebugMsg, (char *)arg[1], (void*)arg[2] ); |
fitzhardinge | 39de4b4 | 2003-10-31 07:12:21 +0000 | [diff] [blame] | 1030 | SET_CLREQ_RETVAL( tid, count ); |
| 1031 | break; } |
| 1032 | |
| 1033 | case VG_USERREQ__PRINTF_BACKTRACE: { |
fitzhardinge | 39de4b4 | 2003-10-31 07:12:21 +0000 | [diff] [blame] | 1034 | int count = |
nethercote | 3e901a2 | 2004-09-11 13:17:02 +0000 | [diff] [blame] | 1035 | VG_(vmessage)( Vg_ClientMsg, (char *)arg[1], (void*)arg[2] ); |
njn | d01fef7 | 2005-03-25 23:35:48 +0000 | [diff] [blame] | 1036 | VG_(get_and_pp_StackTrace)( tid, VG_(clo_backtrace_size) ); |
fitzhardinge | 39de4b4 | 2003-10-31 07:12:21 +0000 | [diff] [blame] | 1037 | SET_CLREQ_RETVAL( tid, count ); |
| 1038 | break; } |
| 1039 | |
rjwalsh | 0140af5 | 2005-06-04 20:42:33 +0000 | [diff] [blame] | 1040 | case VG_USERREQ__STACK_REGISTER: { |
njn | 945ed2e | 2005-06-24 03:28:30 +0000 | [diff] [blame] | 1041 | UWord sid = VG_(register_stack)((Addr)arg[1], (Addr)arg[2]); |
rjwalsh | 0140af5 | 2005-06-04 20:42:33 +0000 | [diff] [blame] | 1042 | SET_CLREQ_RETVAL( tid, sid ); |
| 1043 | break; } |
| 1044 | |
| 1045 | case VG_USERREQ__STACK_DEREGISTER: { |
njn | 945ed2e | 2005-06-24 03:28:30 +0000 | [diff] [blame] | 1046 | VG_(deregister_stack)(arg[1]); |
rjwalsh | 0140af5 | 2005-06-04 20:42:33 +0000 | [diff] [blame] | 1047 | SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */ |
| 1048 | break; } |
| 1049 | |
| 1050 | case VG_USERREQ__STACK_CHANGE: { |
njn | 945ed2e | 2005-06-24 03:28:30 +0000 | [diff] [blame] | 1051 | VG_(change_stack)(arg[1], (Addr)arg[2], (Addr)arg[3]); |
rjwalsh | 0140af5 | 2005-06-04 20:42:33 +0000 | [diff] [blame] | 1052 | SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */ |
| 1053 | break; } |
| 1054 | |
fitzhardinge | 98abfc7 | 2003-12-16 02:05:15 +0000 | [diff] [blame] | 1055 | case VG_USERREQ__GET_MALLOCFUNCS: { |
| 1056 | struct vg_mallocfunc_info *info = (struct vg_mallocfunc_info *)arg[1]; |
| 1057 | |
njn | fc51f8d | 2005-06-21 03:20:17 +0000 | [diff] [blame] | 1058 | info->tl_malloc = VG_(tdict).tool_malloc; |
| 1059 | info->tl_calloc = VG_(tdict).tool_calloc; |
| 1060 | info->tl_realloc = VG_(tdict).tool_realloc; |
| 1061 | info->tl_memalign = VG_(tdict).tool_memalign; |
| 1062 | info->tl___builtin_new = VG_(tdict).tool___builtin_new; |
| 1063 | info->tl___builtin_vec_new = VG_(tdict).tool___builtin_vec_new; |
| 1064 | info->tl_free = VG_(tdict).tool_free; |
| 1065 | info->tl___builtin_delete = VG_(tdict).tool___builtin_delete; |
| 1066 | info->tl___builtin_vec_delete = VG_(tdict).tool___builtin_vec_delete; |
fitzhardinge | 98abfc7 | 2003-12-16 02:05:15 +0000 | [diff] [blame] | 1067 | |
njn | cf81d55 | 2005-03-31 04:52:26 +0000 | [diff] [blame] | 1068 | info->arena_payload_szB = VG_(arena_payload_szB); |
njn | 088bfb4 | 2005-08-17 05:01:37 +0000 | [diff] [blame] | 1069 | info->mallinfo = VG_(mallinfo); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 1070 | info->clo_trace_malloc = VG_(clo_trace_malloc); |
fitzhardinge | 98abfc7 | 2003-12-16 02:05:15 +0000 | [diff] [blame] | 1071 | |
| 1072 | SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */ |
| 1073 | |
| 1074 | break; |
| 1075 | } |
| 1076 | |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1077 | /* Requests from the client program */ |
| 1078 | |
| 1079 | case VG_USERREQ__DISCARD_TRANSLATIONS: |
| 1080 | if (VG_(clo_verbosity) > 2) |
| 1081 | VG_(printf)( "client request: DISCARD_TRANSLATIONS," |
| 1082 | " addr %p, len %d\n", |
| 1083 | (void*)arg[1], arg[2] ); |
| 1084 | |
sewardj | 45f4e7c | 2005-09-27 19:20:21 +0000 | [diff] [blame] | 1085 | VG_(discard_translations)( |
| 1086 | arg[1], arg[2], "scheduler(VG_USERREQ__DISCARD_TRANSLATIONS)" |
| 1087 | ); |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1088 | |
njn | d304045 | 2003-05-19 15:04:06 +0000 | [diff] [blame] | 1089 | SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */ |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1090 | break; |
| 1091 | |
njn | 47363ab | 2003-04-21 13:24:40 +0000 | [diff] [blame] | 1092 | case VG_USERREQ__COUNT_ERRORS: |
nethercote | f2b1148 | 2004-08-02 12:36:01 +0000 | [diff] [blame] | 1093 | SET_CLREQ_RETVAL( tid, VG_(get_n_errs_found)() ); |
njn | 47363ab | 2003-04-21 13:24:40 +0000 | [diff] [blame] | 1094 | break; |
| 1095 | |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 1096 | default: |
njn | 9cb54ac | 2005-06-12 04:19:17 +0000 | [diff] [blame] | 1097 | if (os_client_request(tid, arg)) { |
| 1098 | // do nothing, os_client_request() handled it |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 1099 | } else if (VG_(needs).client_requests) { |
nethercote | d1b64b2 | 2004-11-04 18:22:28 +0000 | [diff] [blame] | 1100 | UWord ret; |
sewardj | 3404251 | 2002-10-22 04:14:35 +0000 | [diff] [blame] | 1101 | |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1102 | if (VG_(clo_verbosity) > 2) |
fitzhardinge | 98abfc7 | 2003-12-16 02:05:15 +0000 | [diff] [blame] | 1103 | VG_(printf)("client request: code %x, addr %p, len %d\n", |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1104 | arg[0], (void*)arg[1], arg[2] ); |
| 1105 | |
njn | 51d827b | 2005-05-09 01:02:08 +0000 | [diff] [blame] | 1106 | if ( VG_TDICT_CALL(tool_handle_client_request, tid, arg, &ret) ) |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 1107 | SET_CLREQ_RETVAL(tid, ret); |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1108 | } else { |
sewardj | 3404251 | 2002-10-22 04:14:35 +0000 | [diff] [blame] | 1109 | static Bool whined = False; |
| 1110 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 1111 | if (!whined && VG_(clo_verbosity) > 2) { |
nethercote | 7cc9c23 | 2004-01-21 15:08:04 +0000 | [diff] [blame] | 1112 | // Allow for requests in core, but defined by tools, which |
njn | d799418 | 2003-10-02 13:44:04 +0000 | [diff] [blame] | 1113 | // have 0 and 0 in their two high bytes. |
| 1114 | Char c1 = (arg[0] >> 24) & 0xff; |
| 1115 | Char c2 = (arg[0] >> 16) & 0xff; |
| 1116 | if (c1 == 0) c1 = '_'; |
| 1117 | if (c2 == 0) c2 = '_'; |
sewardj | 3404251 | 2002-10-22 04:14:35 +0000 | [diff] [blame] | 1118 | VG_(message)(Vg_UserMsg, "Warning:\n" |
njn | d799418 | 2003-10-02 13:44:04 +0000 | [diff] [blame] | 1119 | " unhandled client request: 0x%x (%c%c+0x%x). Perhaps\n" |
sewardj | 45f4e7c | 2005-09-27 19:20:21 +0000 | [diff] [blame] | 1120 | " VG_(needs).client_requests should be set?", |
njn | d799418 | 2003-10-02 13:44:04 +0000 | [diff] [blame] | 1121 | arg[0], c1, c2, arg[0] & 0xffff); |
sewardj | 3404251 | 2002-10-22 04:14:35 +0000 | [diff] [blame] | 1122 | whined = True; |
| 1123 | } |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1124 | } |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 1125 | break; |
| 1126 | } |
| 1127 | } |
| 1128 | |
| 1129 | |
sewardj | 6072c36 | 2002-04-19 14:40:57 +0000 | [diff] [blame] | 1130 | /* --------------------------------------------------------------------- |
njn | 6676d5b | 2005-06-19 18:49:19 +0000 | [diff] [blame] | 1131 | Sanity checking (permanently engaged) |
sewardj | 6072c36 | 2002-04-19 14:40:57 +0000 | [diff] [blame] | 1132 | ------------------------------------------------------------------ */ |
| 1133 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 1134 | /* Internal consistency checks on the sched structures. */ |
sewardj | 6072c36 | 2002-04-19 14:40:57 +0000 | [diff] [blame] | 1135 | static |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 1136 | void scheduler_sanity ( ThreadId tid ) |
sewardj | 6072c36 | 2002-04-19 14:40:57 +0000 | [diff] [blame] | 1137 | { |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 1138 | Bool bad = False; |
jsgf | 855d93d | 2003-10-13 22:26:55 +0000 | [diff] [blame] | 1139 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 1140 | if (!VG_(is_running_thread)(tid)) { |
| 1141 | VG_(message)(Vg_DebugMsg, |
| 1142 | "Thread %d is supposed to be running, but doesn't own run_sema (owned by %d)\n", |
njn | c7561b9 | 2005-06-19 01:24:32 +0000 | [diff] [blame] | 1143 | tid, VG_(running_tid)); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 1144 | bad = True; |
jsgf | 855d93d | 2003-10-13 22:26:55 +0000 | [diff] [blame] | 1145 | } |
sewardj | 5f07b66 | 2002-04-23 16:52:51 +0000 | [diff] [blame] | 1146 | |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 1147 | if (VG_(gettid)() != VG_(threads)[tid].os_state.lwpid) { |
| 1148 | VG_(message)(Vg_DebugMsg, |
njn | d06ed47 | 2005-03-13 05:12:31 +0000 | [diff] [blame] | 1149 | "Thread %d supposed to be in LWP %d, but we're actually %d\n", |
| 1150 | tid, VG_(threads)[tid].os_state.lwpid, VG_(gettid)()); |
sewardj | b5f6f51 | 2005-03-10 23:59:00 +0000 | [diff] [blame] | 1151 | bad = True; |
sewardj | 5f07b66 | 2002-04-23 16:52:51 +0000 | [diff] [blame] | 1152 | } |
sewardj | 6072c36 | 2002-04-19 14:40:57 +0000 | [diff] [blame] | 1153 | } |
| 1154 | |
njn | 6676d5b | 2005-06-19 18:49:19 +0000 | [diff] [blame] | 1155 | void VG_(sanity_check_general) ( Bool force_expensive ) |
| 1156 | { |
| 1157 | ThreadId tid; |
| 1158 | |
| 1159 | VGP_PUSHCC(VgpCoreCheapSanity); |
| 1160 | |
| 1161 | if (VG_(clo_sanity_level) < 1) return; |
| 1162 | |
| 1163 | /* --- First do all the tests that we can do quickly. ---*/ |
| 1164 | |
| 1165 | sanity_fast_count++; |
| 1166 | |
| 1167 | /* Check stuff pertaining to the memory check system. */ |
| 1168 | |
| 1169 | /* Check that nobody has spuriously claimed that the first or |
| 1170 | last 16 pages of memory have become accessible [...] */ |
| 1171 | if (VG_(needs).sanity_checks) { |
| 1172 | VGP_PUSHCC(VgpToolCheapSanity); |
| 1173 | vg_assert(VG_TDICT_CALL(tool_cheap_sanity_check)); |
| 1174 | VGP_POPCC(VgpToolCheapSanity); |
| 1175 | } |
| 1176 | |
| 1177 | /* --- Now some more expensive checks. ---*/ |
| 1178 | |
| 1179 | /* Once every 25 times, check some more expensive stuff. */ |
| 1180 | if ( force_expensive |
| 1181 | || VG_(clo_sanity_level) > 1 |
| 1182 | || (VG_(clo_sanity_level) == 1 && (sanity_fast_count % 25) == 0)) { |
| 1183 | |
| 1184 | VGP_PUSHCC(VgpCoreExpensiveSanity); |
| 1185 | sanity_slow_count++; |
| 1186 | |
njn | 6676d5b | 2005-06-19 18:49:19 +0000 | [diff] [blame] | 1187 | if (VG_(needs).sanity_checks) { |
| 1188 | VGP_PUSHCC(VgpToolExpensiveSanity); |
| 1189 | vg_assert(VG_TDICT_CALL(tool_expensive_sanity_check)); |
| 1190 | VGP_POPCC(VgpToolExpensiveSanity); |
| 1191 | } |
| 1192 | |
njn | 6676d5b | 2005-06-19 18:49:19 +0000 | [diff] [blame] | 1193 | /* Look for stack overruns. Visit all threads. */ |
njn | d666ea7 | 2005-06-26 17:26:22 +0000 | [diff] [blame] | 1194 | for (tid = 1; tid < VG_N_THREADS; tid++) { |
sewardj | 45f4e7c | 2005-09-27 19:20:21 +0000 | [diff] [blame] | 1195 | SizeT remains; |
| 1196 | VgStack* stack; |
njn | 6676d5b | 2005-06-19 18:49:19 +0000 | [diff] [blame] | 1197 | |
| 1198 | if (VG_(threads)[tid].status == VgTs_Empty || |
| 1199 | VG_(threads)[tid].status == VgTs_Zombie) |
| 1200 | continue; |
| 1201 | |
sewardj | 45f4e7c | 2005-09-27 19:20:21 +0000 | [diff] [blame] | 1202 | stack |
| 1203 | = (VgStack*) |
| 1204 | VG_(get_ThreadState)(tid)->os_state.valgrind_stack_base; |
| 1205 | remains |
| 1206 | = VG_(am_get_VgStack_unused_szB)(stack); |
njn | 6676d5b | 2005-06-19 18:49:19 +0000 | [diff] [blame] | 1207 | if (remains < VKI_PAGE_SIZE) |
| 1208 | VG_(message)(Vg_DebugMsg, |
| 1209 | "WARNING: Thread %d is within %d bytes " |
| 1210 | "of running out of stack!", |
| 1211 | tid, remains); |
| 1212 | } |
| 1213 | |
njn | 6676d5b | 2005-06-19 18:49:19 +0000 | [diff] [blame] | 1214 | VGP_POPCC(VgpCoreExpensiveSanity); |
| 1215 | } |
| 1216 | |
| 1217 | if (VG_(clo_sanity_level) > 1) { |
| 1218 | VGP_PUSHCC(VgpCoreExpensiveSanity); |
| 1219 | /* Check sanity of the low-level memory manager. Note that bugs |
| 1220 | in the client's code can cause this to fail, so we don't do |
| 1221 | this check unless specially asked for. And because it's |
| 1222 | potentially very expensive. */ |
| 1223 | VG_(sanity_check_malloc_all)(); |
| 1224 | VGP_POPCC(VgpCoreExpensiveSanity); |
| 1225 | } |
| 1226 | VGP_POPCC(VgpCoreCheapSanity); |
| 1227 | } |
sewardj | 6072c36 | 2002-04-19 14:40:57 +0000 | [diff] [blame] | 1228 | |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 1229 | /*--------------------------------------------------------------------*/ |
njn | 278b3d6 | 2005-05-30 23:20:51 +0000 | [diff] [blame] | 1230 | /*--- end ---*/ |
sewardj | e663cb9 | 2002-04-12 10:26:32 +0000 | [diff] [blame] | 1231 | /*--------------------------------------------------------------------*/ |