sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1 | |
| 2 | /*--------------------------------------------------------------------*/ |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 3 | /*--- Memory-related stuff: segment initialisation and tracking, ---*/ |
| 4 | /*--- stack operations ---*/ |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 5 | /*--- vg_memory.c ---*/ |
| 6 | /*--------------------------------------------------------------------*/ |
| 7 | |
| 8 | /* |
| 9 | This file is part of Valgrind, an x86 protected-mode emulator |
| 10 | designed for debugging and profiling binaries on x86-Unixes. |
| 11 | |
| 12 | Copyright (C) 2000-2002 Julian Seward |
| 13 | jseward@acm.org |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 14 | |
| 15 | This program is free software; you can redistribute it and/or |
| 16 | modify it under the terms of the GNU General Public License as |
| 17 | published by the Free Software Foundation; either version 2 of the |
| 18 | License, or (at your option) any later version. |
| 19 | |
| 20 | This program is distributed in the hope that it will be useful, but |
| 21 | WITHOUT ANY WARRANTY; without even the implied warranty of |
| 22 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 23 | General Public License for more details. |
| 24 | |
| 25 | You should have received a copy of the GNU General Public License |
| 26 | along with this program; if not, write to the Free Software |
| 27 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA |
| 28 | 02111-1307, USA. |
| 29 | |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 30 | The GNU General Public License is contained in the file COPYING. |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 31 | */ |
| 32 | |
| 33 | #include "vg_include.h" |
| 34 | |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 35 | |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 36 | /*--------------------------------------------------------------*/ |
| 37 | /*--- Initialise program data/text etc on program startup. ---*/ |
| 38 | /*--------------------------------------------------------------*/ |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 39 | |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 40 | typedef |
| 41 | struct _ExeSeg { |
| 42 | Addr start; |
| 43 | UInt size; |
| 44 | struct _ExeSeg* next; |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 45 | } |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 46 | ExeSeg; |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 47 | |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 48 | /* The list of current executable segments loaded. Required so that when a |
| 49 | segment is munmap'd, if it's executable we can recognise it as such and |
| 50 | invalidate translations for it, and drop any basic-block specific |
| 51 | information being stored. If symbols are being used, this list will have |
| 52 | the same segments recorded in it as the SegInfo symbols list (but much |
| 53 | less information about each segment). |
| 54 | */ |
| 55 | static ExeSeg* exeSegsHead = NULL; |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 56 | |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 57 | /* Prepend it -- mmaps/munmaps likely to follow a stack pattern(?) so this |
| 58 | is good. |
| 59 | Also check no segments overlap, which would be very bad. Check is linear |
| 60 | for each seg added (quadratic overall) but the total number should be |
| 61 | small (konqueror has around 50 --njn). */ |
| 62 | static void add_exe_segment_to_list( a, len ) |
| 63 | { |
| 64 | Addr lo = a; |
| 65 | Addr hi = a + len - 1; |
| 66 | ExeSeg* es; |
| 67 | ExeSeg* es2; |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 68 | |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 69 | /* Prepend it */ |
| 70 | es = (ExeSeg*)VG_(arena_malloc)(VG_AR_CORE, sizeof(ExeSeg)); |
| 71 | es->start = a; |
| 72 | es->size = len; |
| 73 | es->next = exeSegsHead; |
| 74 | exeSegsHead = es; |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 75 | |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 76 | /* Check there's no overlap with the rest of the list */ |
| 77 | for (es2 = es->next; es2 != NULL; es2 = es2->next) { |
| 78 | Addr lo2 = es2->start; |
| 79 | Addr hi2 = es2->start + es2->size - 1; |
| 80 | Bool overlap; |
| 81 | vg_assert(lo < hi); |
| 82 | vg_assert(lo2 < hi2); |
| 83 | /* the main assertion */ |
| 84 | overlap = (lo <= lo2 && lo2 <= hi) |
| 85 | || (lo <= hi2 && hi2 <= hi); |
| 86 | if (overlap) { |
| 87 | VG_(printf)("\n\nOVERLAPPING EXE SEGMENTS\n" |
| 88 | " new: start %p, size %d\n" |
| 89 | " old: start %p, size %d\n\n", |
| 90 | es->start, es->size, es2->start, es2->size ); |
| 91 | vg_assert(! overlap); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 92 | } |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 93 | } |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 94 | } |
| 95 | |
| 96 | static Bool remove_if_exe_segment_from_list( Addr a, UInt len ) |
| 97 | { |
| 98 | ExeSeg **prev_next_ptr = & exeSegsHead, |
| 99 | *curr = exeSegsHead; |
| 100 | |
| 101 | while (True) { |
| 102 | if (curr == NULL) break; |
| 103 | if (a == curr->start) break; |
| 104 | prev_next_ptr = &curr->next; |
| 105 | curr = curr->next; |
| 106 | } |
| 107 | if (curr == NULL) |
| 108 | return False; |
| 109 | |
| 110 | vg_assert(*prev_next_ptr == curr); |
| 111 | |
| 112 | *prev_next_ptr = curr->next; |
| 113 | |
| 114 | VG_(arena_free)(VG_AR_CORE, curr); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 115 | return True; |
| 116 | } |
| 117 | |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 118 | /* Records the exe segment in the ExeSeg list (checking for overlaps), and |
| 119 | reads debug info if required. Note the entire /proc/pid/maps file is |
| 120 | read for the debug info, but it just reads symbols for newly added exe |
| 121 | segments. This is required to find out their names if they have one. So |
| 122 | we don't use this at startup because it's overkill and can screw reading |
| 123 | of /proc/pid/maps. |
| 124 | */ |
| 125 | void VG_(new_exe_segment) ( Addr a, UInt len ) |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 126 | { |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 127 | // SSS: only bother if size != 0? Does that happen? (probably can) |
| 128 | |
| 129 | add_exe_segment_to_list( a, len ); |
| 130 | VG_(maybe_read_symbols)(); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 131 | } |
| 132 | |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 133 | /* Invalidate translations as necessary (also discarding any basic |
| 134 | block-specific info retained by the skin) and unload any debug |
| 135 | symbols. */ |
| 136 | // Nb: remove_if_exe_segment_from_list() and VG_(maybe_unload_symbols)() |
| 137 | // both ignore 'len', but that seems that's ok for most programs... see |
| 138 | // comment above vg_syscalls.c:mmap_segment() et al for more details. |
| 139 | void VG_(remove_if_exe_segment) ( Addr a, UInt len ) |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 140 | { |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 141 | if (remove_if_exe_segment_from_list( a, len )) { |
| 142 | VG_(invalidate_translations) ( a, len ); |
| 143 | VG_(maybe_unload_symbols) ( a, len ); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 144 | } |
| 145 | } |
| 146 | |
| 147 | |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 148 | static |
| 149 | void startup_segment_callback ( Addr start, UInt size, |
| 150 | Char rr, Char ww, Char xx, |
| 151 | UInt foffset, UChar* filename ) |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 152 | { |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 153 | UInt r_esp; |
| 154 | Bool is_stack_segment; |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 155 | |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 156 | /* Sanity check ... if this is the executable's text segment, |
| 157 | ensure it is loaded where we think it ought to be. Any file |
| 158 | name which doesn't contain ".so" is assumed to be the |
| 159 | executable. */ |
| 160 | if (filename != NULL |
| 161 | && xx == 'x' |
| 162 | && VG_(strstr(filename, ".so")) == NULL |
| 163 | ) { |
| 164 | /* We assume this is the executable. */ |
| 165 | if (start != VG_ASSUMED_EXE_BASE) { |
| 166 | VG_(message)(Vg_UserMsg, |
| 167 | "FATAL: executable base addr not as assumed."); |
| 168 | VG_(message)(Vg_UserMsg, "name %s, actual %p, assumed %p.", |
| 169 | filename, start, VG_ASSUMED_EXE_BASE); |
| 170 | VG_(message)(Vg_UserMsg, |
| 171 | "One reason this could happen is that you have a shared object"); |
| 172 | VG_(message)(Vg_UserMsg, |
| 173 | " whose name doesn't contain the characters \".so\", so Valgrind "); |
| 174 | VG_(message)(Vg_UserMsg, |
| 175 | "naively assumes it is the executable. "); |
| 176 | VG_(message)(Vg_UserMsg, |
| 177 | "In that case, rename it appropriately."); |
| 178 | VG_(panic)("VG_ASSUMED_EXE_BASE doesn't match reality"); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 179 | } |
| 180 | } |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 181 | |
| 182 | if (0) |
| 183 | VG_(message)(Vg_DebugMsg, |
| 184 | "initial map %8x-%8x %c%c%c? %8x (%d) (%s)", |
| 185 | start,start+size,rr,ww,xx,foffset, |
| 186 | size, filename?filename:(UChar*)"NULL"); |
| 187 | |
| 188 | if (rr != 'r' && xx != 'x' && ww != 'w') { |
| 189 | VG_(printf)("No permissions on the segment named %s\n", filename); |
| 190 | VG_(panic)("Non-readable, writable, executable segment at startup"); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 191 | } |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 192 | |
| 193 | /* This parallels what happens when we mmap some new memory */ |
| 194 | if (filename != NULL && xx == 'x') { |
| 195 | VG_(new_exe_segment)( start, size ); |
| 196 | } |
| 197 | VG_TRACK( new_mem_startup, start, size, rr=='r', ww=='w', xx=='x' ); |
| 198 | |
| 199 | /* If this is the stack segment mark all below %esp as noaccess. */ |
| 200 | r_esp = VG_(baseBlock)[VGOFF_(m_esp)]; |
| 201 | is_stack_segment = start <= r_esp && r_esp < start+size; |
| 202 | if (is_stack_segment) { |
| 203 | if (0) |
| 204 | VG_(message)(Vg_DebugMsg, "invalidating stack area: %x .. %x", |
| 205 | start,r_esp); |
| 206 | VG_TRACK( die_mem_stack, start, r_esp-start ); |
| 207 | } |
| 208 | } |
| 209 | |
| 210 | |
| 211 | /* 1. Records exe segments from /proc/pid/maps -- always necessary, because |
| 212 | if they're munmap()ed we need to know if they were executable in order |
| 213 | to discard translations. Also checks there's no exe segment overlaps. |
| 214 | |
| 215 | 2. Marks global variables that might be accessed from generated code; |
| 216 | |
| 217 | 3. Sets up the end of the data segment so that vg_syscalls.c can make |
| 218 | sense of calls to brk(). |
| 219 | */ |
| 220 | void VG_(init_memory) ( void ) |
| 221 | { |
| 222 | /* 1 and 2 */ |
| 223 | VG_(read_procselfmaps) ( startup_segment_callback ); |
| 224 | |
| 225 | /* 3 */ |
| 226 | VG_TRACK( post_mem_write, (Addr) & VG_(running_on_simd_CPU), 1 ); |
| 227 | VG_TRACK( post_mem_write, (Addr) & VG_(clo_trace_malloc), 1 ); |
| 228 | VG_TRACK( post_mem_write, (Addr) & VG_(clo_sloppy_malloc), 1 ); |
| 229 | |
| 230 | /* 4 */ |
| 231 | VG_(init_dataseg_end_for_brk)(); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 232 | } |
| 233 | |
| 234 | |
| 235 | /*------------------------------------------------------------*/ |
| 236 | /*--- Tracking permissions around %esp changes. ---*/ |
| 237 | /*------------------------------------------------------------*/ |
| 238 | |
| 239 | /* |
| 240 | The stack |
| 241 | ~~~~~~~~~ |
| 242 | The stack's segment seems to be dynamically extended downwards |
| 243 | by the kernel as the stack pointer moves down. Initially, a |
| 244 | 1-page (4k) stack is allocated. When %esp moves below that for |
| 245 | the first time, presumably a page fault occurs. The kernel |
| 246 | detects that the faulting address is in the range from %esp upwards |
| 247 | to the current valid stack. It then extends the stack segment |
| 248 | downwards for enough to cover the faulting address, and resumes |
| 249 | the process (invisibly). The process is unaware of any of this. |
| 250 | |
| 251 | That means that Valgrind can't spot when the stack segment is |
| 252 | being extended. Fortunately, we want to precisely and continuously |
| 253 | update stack permissions around %esp, so we need to spot all |
| 254 | writes to %esp anyway. |
| 255 | |
| 256 | The deal is: when %esp is assigned a lower value, the stack is |
| 257 | being extended. Create a secondary maps to fill in any holes |
| 258 | between the old stack ptr and this one, if necessary. Then |
| 259 | mark all bytes in the area just "uncovered" by this %esp change |
| 260 | as write-only. |
| 261 | |
| 262 | When %esp goes back up, mark the area receded over as unreadable |
| 263 | and unwritable. |
| 264 | |
| 265 | Just to record the %esp boundary conditions somewhere convenient: |
| 266 | %esp always points to the lowest live byte in the stack. All |
| 267 | addresses below %esp are not live; those at and above it are. |
| 268 | */ |
| 269 | |
sewardj | 1e8cdc9 | 2002-04-18 11:37:52 +0000 | [diff] [blame] | 270 | /* Does this address look like something in or vaguely near the |
| 271 | current thread's stack? */ |
| 272 | static |
| 273 | Bool is_plausible_stack_addr ( ThreadState* tst, Addr aa ) |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 274 | { |
| 275 | UInt a = (UInt)aa; |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 276 | //PROF_EVENT(100); PPP |
sewardj | 1e8cdc9 | 2002-04-18 11:37:52 +0000 | [diff] [blame] | 277 | if (a <= tst->stack_highest_word && |
| 278 | a > tst->stack_highest_word - VG_PLAUSIBLE_STACK_SIZE) |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 279 | return True; |
| 280 | else |
| 281 | return False; |
| 282 | } |
| 283 | |
| 284 | |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 285 | /* Kludgey ... how much does %esp have to change before we reckon that |
| 286 | the application is switching stacks ? */ |
| 287 | #define VG_HUGE_DELTA (VG_PLAUSIBLE_STACK_SIZE / 4) |
| 288 | |
| 289 | static Addr get_page_base ( Addr a ) |
| 290 | { |
| 291 | return a & ~(VKI_BYTES_PER_PAGE-1); |
| 292 | } |
| 293 | |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 294 | static void vg_handle_esp_assignment_SLOWLY ( Addr old_esp, Addr new_esp ); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 295 | |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 296 | __attribute__ ((regparm (1))) |
| 297 | void VG_(handle_esp_assignment) ( Addr new_esp ) |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 298 | { |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 299 | UInt old_esp; |
| 300 | Int delta; |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 301 | |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 302 | VGP_MAYBE_PUSHCC(VgpStack); |
| 303 | |
| 304 | old_esp = VG_(baseBlock)[VGOFF_(m_esp)]; |
| 305 | delta = ((Int)new_esp) - ((Int)old_esp); |
| 306 | |
| 307 | /* Update R_ESP */ |
| 308 | VG_(baseBlock)[VGOFF_(m_esp)] = new_esp; |
| 309 | |
| 310 | //PROF_EVENT(101); PPP |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 311 | |
| 312 | # ifndef VG_DEBUG_MEMORY |
| 313 | |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 314 | if (IS_ALIGNED4_ADDR(old_esp) && IS_ALIGNED4_ADDR(new_esp)) { |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 315 | |
| 316 | /* Deal with the most common cases fast. These are ordered in |
| 317 | the sequence most common first. */ |
| 318 | |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 319 | # ifdef VG_PROFILE_MEMORY |
| 320 | // PPP |
| 321 | if (delta = - 4) PROF_EVENT(102); |
| 322 | else if (delta = 4) PROF_EVENT(103); |
| 323 | else if (delta = -12) PROF_EVENT(104); |
| 324 | else if (delta = - 8) PROF_EVENT(105); |
| 325 | else if (delta = 16) PROF_EVENT(106); |
| 326 | else if (delta = 12) PROF_EVENT(107); |
| 327 | else if (delta = 0) PROF_EVENT(108); |
| 328 | else if (delta = 8) PROF_EVENT(109); |
| 329 | else if (delta = -16) PROF_EVENT(110); |
| 330 | else if (delta = 20) PROF_EVENT(111); |
| 331 | else if (delta = -20) PROF_EVENT(112); |
| 332 | else if (delta = 24) PROF_EVENT(113); |
| 333 | else if (delta = -24) PROF_EVENT(114); |
| 334 | else if (delta > 0) PROF_EVENT(115); // PPP: new: aligned_big_pos |
| 335 | else PROF_EVENT(116); // PPP: new: aligned_big_neg |
| 336 | # endif |
| 337 | |
| 338 | if (delta < 0) { |
| 339 | VG_TRACK(new_mem_stack_aligned, new_esp, -delta); |
| 340 | } else if (delta > 0) { |
| 341 | VG_TRACK(die_mem_stack_aligned, old_esp, delta); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 342 | } |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 343 | /* Do nothing if (delta==0) */ |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 344 | |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 345 | VGP_MAYBE_POPCC(VgpStack); |
| 346 | return; |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 347 | } |
| 348 | |
| 349 | # endif |
| 350 | |
| 351 | /* The above special cases handle 90% to 95% of all the stack |
| 352 | adjustments. The rest we give to the slow-but-general |
| 353 | mechanism. */ |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 354 | vg_handle_esp_assignment_SLOWLY ( old_esp, new_esp ); |
| 355 | VGP_MAYBE_POPCC(VgpStack); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 356 | } |
| 357 | |
| 358 | |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 359 | static void vg_handle_esp_assignment_SLOWLY ( Addr old_esp, Addr new_esp ) |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 360 | { |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 361 | Int delta; |
| 362 | |
| 363 | delta = ((Int)new_esp) - ((Int)old_esp); |
| 364 | //VG_(printf)("delta %d (%x) %x --> %x\n", delta, delta, old_esp, new_esp); |
| 365 | //PROF_EVENT(120); PPP |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 366 | if (-(VG_HUGE_DELTA) < delta && delta < VG_HUGE_DELTA) { |
| 367 | /* "Ordinary" stack change. */ |
| 368 | if (new_esp < old_esp) { |
| 369 | /* Moving down; the stack is growing. */ |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 370 | //PROF_EVENT(121); PPP |
| 371 | VG_TRACK( new_mem_stack, new_esp, -delta ); |
| 372 | |
| 373 | } else if (new_esp > old_esp) { |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 374 | /* Moving up; the stack is shrinking. */ |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 375 | //PROF_EVENT(122); PPP |
| 376 | VG_TRACK( die_mem_stack, old_esp, delta ); |
| 377 | |
| 378 | } else { |
| 379 | /* when old_esp == new_esp */ |
| 380 | //PROF_EVENT(123); PPP |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 381 | } |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 382 | return; |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 383 | } |
| 384 | |
| 385 | /* %esp has changed by more than HUGE_DELTA. We take this to mean |
| 386 | that the application is switching to a new stack, for whatever |
| 387 | reason, and we attempt to initialise the permissions around the |
| 388 | new stack in some plausible way. All pretty kludgey; needed to |
| 389 | make netscape-4.07 run without generating thousands of error |
| 390 | contexts. |
| 391 | |
| 392 | If we appear to be switching back to the main stack, don't mess |
| 393 | with the permissions in the area at and above the stack ptr. |
| 394 | Otherwise, we're switching to an alternative stack; make the |
| 395 | area above %esp readable -- this doesn't seem right -- the right |
| 396 | thing to do would be to make it writable -- but is needed to |
| 397 | avoid huge numbers of errs in netscape. To be investigated. */ |
| 398 | |
| 399 | { Addr invalid_down_to = get_page_base(new_esp) |
| 400 | - 0 * VKI_BYTES_PER_PAGE; |
| 401 | Addr valid_up_to = get_page_base(new_esp) + VKI_BYTES_PER_PAGE |
| 402 | + 0 * VKI_BYTES_PER_PAGE; |
sewardj | 1e8cdc9 | 2002-04-18 11:37:52 +0000 | [diff] [blame] | 403 | ThreadState* tst = VG_(get_current_thread_state)(); |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 404 | //PROF_EVENT(124); PPP |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 405 | if (VG_(clo_verbosity) > 1) |
| 406 | VG_(message)(Vg_UserMsg, "Warning: client switching stacks? " |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 407 | "%%esp: %p --> %p", old_esp, new_esp); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 408 | /* VG_(printf)("na %p, %%esp %p, wr %p\n", |
| 409 | invalid_down_to, new_esp, valid_up_to ); */ |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 410 | VG_TRACK( die_mem_stack, invalid_down_to, new_esp - invalid_down_to ); |
sewardj | 1e8cdc9 | 2002-04-18 11:37:52 +0000 | [diff] [blame] | 411 | if (!is_plausible_stack_addr(tst, new_esp)) { |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 412 | VG_TRACK( post_mem_write, new_esp, valid_up_to - new_esp ); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 413 | } |
| 414 | } |
| 415 | } |
| 416 | |
| 417 | |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 418 | /*--------------------------------------------------------------------*/ |
| 419 | /*--- end vg_memory.c ---*/ |
| 420 | /*--------------------------------------------------------------------*/ |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame^] | 421 | |