blob: 21d83c1b42fdbb5872024b62d425316152dba5ce [file] [log] [blame]
sewardjde4a1d02002-03-22 01:27:54 +00001
2/*--------------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00003/*--- Memory-related stuff: segment initialisation and tracking, ---*/
4/*--- stack operations ---*/
sewardjde4a1d02002-03-22 01:27:54 +00005/*--- vg_memory.c ---*/
6/*--------------------------------------------------------------------*/
7
8/*
njnc9539842002-10-02 13:26:35 +00009 This file is part of Valgrind, an extensible x86 protected-mode
10 emulator for monitoring program execution on x86-Unixes.
sewardjde4a1d02002-03-22 01:27:54 +000011
nethercotebb1c9912004-01-04 16:43:23 +000012 Copyright (C) 2000-2004 Julian Seward
sewardjde4a1d02002-03-22 01:27:54 +000013 jseward@acm.org
sewardjde4a1d02002-03-22 01:27:54 +000014
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
njn25e49d8e72002-09-23 09:36:25 +000030 The GNU General Public License is contained in the file COPYING.
sewardjde4a1d02002-03-22 01:27:54 +000031*/
32
33#include "vg_include.h"
34
fitzhardinge98abfc72003-12-16 02:05:15 +000035#include <stddef.h>
36
sewardja4495682002-10-21 07:29:59 +000037/* Define to debug the memory-leak-detector. */
38/* #define VG_DEBUG_LEAKCHECK */
39
fitzhardinge98abfc72003-12-16 02:05:15 +000040static const Bool mem_debug = False;
41
42static Int addrcmp(const void *ap, const void *bp)
43{
44 Addr a = *(Addr *)ap;
45 Addr b = *(Addr *)bp;
46 Int ret;
47
48 if (a == b)
49 ret = 0;
50 else
51 ret = (a < b) ? -1 : 1;
52
53 return ret;
54}
55
56static Char *straddr(void *p)
57{
58 static Char buf[16];
59
60 VG_(sprintf)(buf, "%p", *(Addr *)p);
61
62 return buf;
63}
64
65static SkipList sk_segments = SKIPLIST_INIT(Segment, addr, addrcmp, straddr, VG_AR_CORE);
66
67/*--------------------------------------------------------------*/
68/*--- Maintain an ordered list of all the client's mappings ---*/
69/*--------------------------------------------------------------*/
70
71Bool VG_(seg_contains)(const Segment *s, Addr p, UInt len)
72{
73 Addr se = s->addr+s->len;
74 Addr pe = p+len;
75
76 vg_assert(pe >= p);
77
78 return (p >= s->addr && pe <= se);
79}
80
81Bool VG_(seg_overlaps)(const Segment *s, Addr p, UInt len)
82{
83 Addr se = s->addr+s->len;
84 Addr pe = p+len;
85
86 vg_assert(pe >= p);
87
88 return (p < se && pe > s->addr);
89}
90
91/* Prepare a Segment structure for recycling by freeing everything
92 hanging off it. */
93static void recycleseg(Segment *s)
94{
95 if (s->flags & SF_CODE)
96 VG_(invalidate_translations)(s->addr, s->len, False);
97
98 if (s->filename != NULL)
99 VG_(arena_free)(VG_AR_CORE, (Char *)s->filename);
100
101 /* keep the SegInfo, if any - it probably still applies */
102}
103
104/* When freeing a Segment, also clean up every one else's ideas of
105 what was going on in that range of memory */
106static void freeseg(Segment *s)
107{
108 recycleseg(s);
109 if (s->symtab != NULL) {
110 VG_(symtab_decref)(s->symtab, s->addr, s->len);
111 s->symtab = NULL;
112 }
113
114 VG_(SkipNode_Free)(&sk_segments, s);
115}
116
fitzhardinge1a303042003-12-22 08:48:50 +0000117/* Split a segment at address a, returning the new segment */
118Segment *VG_(split_segment)(Addr a)
fitzhardinge98abfc72003-12-16 02:05:15 +0000119{
120 Segment *s = VG_(SkipList_Find)(&sk_segments, &a);
121 Segment *ns;
122 Int delta;
123
124 vg_assert((a & (VKI_BYTES_PER_PAGE-1)) == 0);
125
126 /* missed */
127 if (s == NULL)
128 return NULL;
129
130 /* a at or beyond endpoint */
131 if (s->addr == a || a >= (s->addr+s->len))
132 return NULL;
133
134 vg_assert(a > s->addr && a < (s->addr+s->len));
135
136 ns = VG_(SkipNode_Alloc)(&sk_segments);
137
138 *ns = *s;
139
140 delta = a - s->addr;
141 ns->addr += delta;
142 ns->offset += delta;
143 ns->len -= delta;
fitzhardingee3632c62003-12-22 10:58:06 +0000144 s->len = delta;
fitzhardinge98abfc72003-12-16 02:05:15 +0000145
fitzhardinge1a4adf02003-12-22 10:42:59 +0000146 if (s->filename != NULL)
147 ns->filename = VG_(arena_strdup)(VG_AR_CORE, s->filename);
148
fitzhardinge98abfc72003-12-16 02:05:15 +0000149 if (ns->symtab != NULL)
150 VG_(symtab_incref)(ns->symtab);
151
152 VG_(SkipList_Insert)(&sk_segments, ns);
153
154 return ns;
155}
156
157/* This unmaps all the segments in the range [addr, addr+len); any
158 partial mappings at the ends are truncated. */
159void VG_(unmap_range)(Addr addr, UInt len)
160{
161 Segment *s;
162 Segment *next;
fitzhardingee3632c62003-12-22 10:58:06 +0000163 static const Bool debug = False || mem_debug;
fitzhardinge1a4adf02003-12-22 10:42:59 +0000164 Addr end;
fitzhardinge98abfc72003-12-16 02:05:15 +0000165
166 if (len == 0)
167 return;
168
fitzhardinge1a4adf02003-12-22 10:42:59 +0000169 len = PGROUNDUP(len);
170 vg_assert(addr == PGROUNDDN(addr));
171
fitzhardinge98abfc72003-12-16 02:05:15 +0000172 if (debug)
173 VG_(printf)("unmap_range(%p, %d)\n", addr, len);
174
fitzhardinge1a4adf02003-12-22 10:42:59 +0000175 end = addr+len;
fitzhardinge98abfc72003-12-16 02:05:15 +0000176
177 /* Everything must be page-aligned */
178 vg_assert((addr & (VKI_BYTES_PER_PAGE-1)) == 0);
179 vg_assert((len & (VKI_BYTES_PER_PAGE-1)) == 0);
180
181 for(s = VG_(SkipList_Find)(&sk_segments, &addr);
182 s != NULL && s->addr < (addr+len);
183 s = next) {
fitzhardinge1a303042003-12-22 08:48:50 +0000184 Addr seg_end = s->addr + s->len;
fitzhardinge98abfc72003-12-16 02:05:15 +0000185
186 /* fetch next now in case we end up deleting this segment */
187 next = VG_(SkipNode_Next)(&sk_segments, s);
188
189 if (debug)
fitzhardinge1a303042003-12-22 08:48:50 +0000190 VG_(printf)("unmap: addr=%p-%p s=%p ->addr=%p-%p len=%d\n",
fitzhardinge1a4adf02003-12-22 10:42:59 +0000191 addr, end, s, s->addr, seg_end, s->len);
fitzhardinge98abfc72003-12-16 02:05:15 +0000192
fitzhardinge1a303042003-12-22 08:48:50 +0000193 if (!VG_(seg_overlaps)(s, addr, len)) {
194 if (debug)
195 VG_(printf)(" (no overlap)\n");
fitzhardinge98abfc72003-12-16 02:05:15 +0000196 continue;
fitzhardinge1a303042003-12-22 08:48:50 +0000197 }
fitzhardinge98abfc72003-12-16 02:05:15 +0000198
199 /* 4 cases: */
fitzhardinge1a303042003-12-22 08:48:50 +0000200 if (addr > s->addr &&
201 addr < seg_end &&
202 end >= seg_end) {
fitzhardinge98abfc72003-12-16 02:05:15 +0000203 /* this segment's tail is truncated by [addr, addr+len)
204 -> truncate tail
205 */
206 s->len = addr - s->addr;
207
208 if (debug)
209 VG_(printf)(" case 1: s->len=%d\n", s->len);
fitzhardingee3632c62003-12-22 10:58:06 +0000210 } else if (addr <= s->addr && end > s->addr && end < seg_end) {
211 /* this segment's head is truncated by [addr, addr+len)
212 -> truncate head
213 */
214 Int delta = end - s->addr;
215
216 if (debug)
217 VG_(printf)(" case 2: s->addr=%p s->len=%d delta=%d\n", s->addr, s->len, delta);
218
219 s->addr += delta;
220 s->offset += delta;
221 s->len -= delta;
222
223 vg_assert(s->len != 0);
fitzhardinge1a303042003-12-22 08:48:50 +0000224 } else if (addr <= s->addr && end >= seg_end) {
fitzhardinge98abfc72003-12-16 02:05:15 +0000225 /* this segment is completely contained within [addr, addr+len)
226 -> delete segment
227 */
228 Segment *rs = VG_(SkipList_Remove)(&sk_segments, &s->addr);
229 vg_assert(rs == s);
230 freeseg(s);
231
232 if (debug)
fitzhardingee3632c62003-12-22 10:58:06 +0000233 VG_(printf)(" case 3: s==%p deleted\n", s);
fitzhardinge1a303042003-12-22 08:48:50 +0000234 } else if (addr > s->addr && end < seg_end) {
fitzhardinge98abfc72003-12-16 02:05:15 +0000235 /* [addr, addr+len) is contained within a single segment
236 -> split segment into 3, delete middle portion
237 */
238 Segment *middle, *rs;
239
fitzhardinge1a303042003-12-22 08:48:50 +0000240 middle = VG_(split_segment)(addr);
241 VG_(split_segment)(addr+len);
fitzhardinge98abfc72003-12-16 02:05:15 +0000242
243 vg_assert(middle->addr == addr);
244 rs = VG_(SkipList_Remove)(&sk_segments, &addr);
245 vg_assert(rs == middle);
246
247 freeseg(rs);
248
249 if (debug)
250 VG_(printf)(" case 4: subrange %p-%p deleted\n",
251 addr, addr+len);
252 }
253 }
254}
255
fitzhardinge1a4adf02003-12-22 10:42:59 +0000256/* Return true if two segments are adjacent and mergable (s1 is
257 assumed to have a lower ->addr than s2) */
fitzhardinge98abfc72003-12-16 02:05:15 +0000258static inline Bool neighbours(Segment *s1, Segment *s2)
259{
260 if (s1->addr+s1->len != s2->addr)
261 return False;
262
263 if (s1->flags != s2->flags)
264 return False;
265
266 if (s1->prot != s2->prot)
267 return False;
268
269 if (s1->symtab != s2->symtab)
270 return False;
271
272 if (s1->flags & SF_FILE){
273 if ((s1->offset + s1->len) != s2->offset)
274 return False;
275 if (s1->dev != s2->dev)
276 return False;
277 if (s1->ino != s2->ino)
278 return False;
279 }
280
281 return True;
282}
283
fitzhardinge1a4adf02003-12-22 10:42:59 +0000284/* If possible, merge segment with its neighbours - some segments,
285 including s, may be destroyed in the process */
fitzhardinge98abfc72003-12-16 02:05:15 +0000286static void merge_segments(Addr a, UInt len)
287{
288 Segment *s;
289 Segment *next;
290
291 vg_assert((a & (VKI_BYTES_PER_PAGE-1)) == 0);
292 vg_assert((len & (VKI_BYTES_PER_PAGE-1)) == 0);
293
294 a -= VKI_BYTES_PER_PAGE;
295 len += VKI_BYTES_PER_PAGE;
296
297 for(s = VG_(SkipList_Find)(&sk_segments, &a);
298 s != NULL && s->addr < (a+len);) {
299 next = VG_(SkipNode_Next)(&sk_segments, s);
300
301 if (next && neighbours(s, next)) {
302 Segment *rs;
303
304 if (0)
305 VG_(printf)("merge %p-%p with %p-%p\n",
306 s->addr, s->addr+s->len,
307 next->addr, next->addr+next->len);
308 s->len += next->len;
309 s = VG_(SkipNode_Next)(&sk_segments, next);
310
311 rs = VG_(SkipList_Remove)(&sk_segments, &next->addr);
312 vg_assert(next == rs);
313 freeseg(next);
314 } else
315 s = next;
316 }
317}
318
319void VG_(map_file_segment)(Addr addr, UInt len, UInt prot, UInt flags,
320 UInt dev, UInt ino, ULong off, const Char *filename)
321{
322 Segment *s;
323 static const Bool debug = False || mem_debug;
324 Bool recycled;
325
326 if (debug)
327 VG_(printf)("map_file_segment(%p, %d, %x, %x, %4x, %d, %ld, %s)\n",
328 addr, len, prot, flags, dev, ino, off, filename);
329
330 /* Everything must be page-aligned */
331 vg_assert((addr & (VKI_BYTES_PER_PAGE-1)) == 0);
332 len = PGROUNDUP(len);
333
334 /* First look to see what already exists around here */
335 s = VG_(SkipList_Find)(&sk_segments, &addr);
336
337 if (s != NULL && s->addr == addr && s->len == len) {
fitzhardinge98abfc72003-12-16 02:05:15 +0000338 recycled = True;
339 recycleseg(s);
340
341 /* If we had a symtab, but the new mapping is incompatible, then
342 free up the old symtab in preparation for a new one. */
343 if (s->symtab != NULL &&
344 (!(s->flags & SF_FILE) ||
345 !(flags & SF_FILE) ||
346 s->dev != dev ||
347 s->ino != ino ||
348 s->offset != off)) {
349 VG_(symtab_decref)(s->symtab, s->addr, s->len);
350 s->symtab = NULL;
351 }
352 } else {
353 recycled = False;
354 VG_(unmap_range)(addr, len);
355
356 s = VG_(SkipNode_Alloc)(&sk_segments);
357
358 s->addr = addr;
359 s->len = len;
360 s->symtab = NULL;
361 }
362
363 s->flags = flags;
364 s->prot = prot;
365 s->dev = dev;
366 s->ino = ino;
367 s->offset = off;
368
369 if (filename != NULL)
370 s->filename = VG_(arena_strdup)(VG_AR_CORE, filename);
371 else
372 s->filename = NULL;
373
374 if (debug) {
375 Segment *ts;
376 for(ts = VG_(SkipNode_First)(&sk_segments);
377 ts != NULL;
378 ts = VG_(SkipNode_Next)(&sk_segments, ts))
379 VG_(printf)("list: %8p->%8p ->%d (0x%x) prot=%x flags=%x\n",
380 ts, ts->addr, ts->len, ts->len, ts->prot, ts->flags);
381
382 VG_(printf)("inserting s=%p addr=%p len=%d\n",
383 s, s->addr, s->len);
384 }
385
386 if (!recycled)
387 VG_(SkipList_Insert)(&sk_segments, s);
388
389 /* If this mapping is of the beginning of a file, isn't part of
390 Valgrind, is at least readable and seems to contain an object
391 file, then try reading symbols from it. */
392 if ((flags & (SF_MMAP|SF_NOSYMS)) == SF_MMAP &&
393 s->symtab == NULL) {
394 if (off == 0 &&
395 filename != NULL &&
396 (prot & (VKI_PROT_READ|VKI_PROT_EXEC)) == (VKI_PROT_READ|VKI_PROT_EXEC) &&
397 len >= VKI_BYTES_PER_PAGE &&
398 s->symtab == NULL &&
nethercote71980f02004-01-24 18:18:54 +0000399 VG_(is_object_file)((void *)addr))
400 {
401 s->symtab = VG_(read_seg_symbols)(s);
fitzhardinge98abfc72003-12-16 02:05:15 +0000402
nethercote71980f02004-01-24 18:18:54 +0000403 if (s->symtab != NULL) {
404 s->flags |= SF_DYNLIB;
405 }
fitzhardinge98abfc72003-12-16 02:05:15 +0000406 } else if (flags & SF_MMAP) {
407 const SegInfo *info;
408
409 /* Otherwise see if an existing symtab applies to this Segment */
410 for(info = VG_(next_seginfo)(NULL);
411 info != NULL;
412 info = VG_(next_seginfo)(info)) {
nethercote71980f02004-01-24 18:18:54 +0000413 if (VG_(seg_overlaps)(s, VG_(seg_start)(info), VG_(seg_size)(info)))
414 {
fitzhardinge98abfc72003-12-16 02:05:15 +0000415 s->symtab = (SegInfo *)info;
416 VG_(symtab_incref)((SegInfo *)info);
417 }
418 }
419 }
420 }
421
422 /* clean up */
423 merge_segments(addr, len);
424}
425
426void VG_(map_fd_segment)(Addr addr, UInt len, UInt prot, UInt flags,
427 Int fd, ULong off, const Char *filename)
428{
429 struct vki_stat st;
430 Char *name = NULL;
431
432 st.st_dev = 0;
433 st.st_ino = 0;
434
435 if (fd != -1 && (flags & SF_FILE)) {
436 vg_assert((off & (VKI_BYTES_PER_PAGE-1)) == 0);
437
438 if (VG_(fstat)(fd, &st) < 0)
439 flags &= ~SF_FILE;
440 }
441
442 if ((flags & SF_FILE) && filename == NULL && fd != -1)
443 name = VG_(resolve_filename)(fd);
444
445 if (filename == NULL)
446 filename = name;
447
448 VG_(map_file_segment)(addr, len, prot, flags, st.st_dev, st.st_ino, off, filename);
449
450 if (name)
451 VG_(arena_free)(VG_AR_CORE, name);
452}
453
454void VG_(map_segment)(Addr addr, UInt len, UInt prot, UInt flags)
455{
456 flags &= ~SF_FILE;
457
458 VG_(map_file_segment)(addr, len, prot, flags, 0, 0, 0, 0);
459}
460
461/* set new protection flags on an address range */
462void VG_(mprotect_range)(Addr a, UInt len, UInt prot)
463{
464 Segment *s, *next;
465 static const Bool debug = False || mem_debug;
466
467 if (debug)
468 VG_(printf)("mprotect_range(%p, %d, %x)\n", a, len, prot);
469
470 /* Everything must be page-aligned */
471 vg_assert((a & (VKI_BYTES_PER_PAGE-1)) == 0);
fitzhardinge92360792003-12-24 10:11:11 +0000472 len = PGROUNDUP(len);
fitzhardinge98abfc72003-12-16 02:05:15 +0000473
fitzhardinge1a303042003-12-22 08:48:50 +0000474 VG_(split_segment)(a);
475 VG_(split_segment)(a+len);
fitzhardinge98abfc72003-12-16 02:05:15 +0000476
477 for(s = VG_(SkipList_Find)(&sk_segments, &a);
478 s != NULL && s->addr < a+len;
479 s = next)
480 {
481 next = VG_(SkipNode_Next)(&sk_segments, s);
482 if (s->addr < a)
483 continue;
484
485 s->prot = prot;
486 }
487
488 merge_segments(a, len);
489}
490
491Addr VG_(find_map_space)(Addr addr, UInt len, Bool for_client)
492{
fitzhardingee3632c62003-12-22 10:58:06 +0000493 static const Bool debug = False || mem_debug;
fitzhardinge98abfc72003-12-16 02:05:15 +0000494 Segment *s;
495 Addr ret;
fitzhardinge98abfc72003-12-16 02:05:15 +0000496 Addr limit = (for_client ? VG_(client_end) : VG_(valgrind_mmap_end));
497
498 if (addr == 0)
499 addr = for_client ? VG_(client_mapbase) : VG_(valgrind_base);
500 else {
501 /* leave space for redzone and still try to get the exact
502 address asked for */
503 addr -= VKI_BYTES_PER_PAGE;
504 }
505 ret = addr;
506
507 /* Everything must be page-aligned */
508 vg_assert((addr & (VKI_BYTES_PER_PAGE-1)) == 0);
509 len = PGROUNDUP(len);
510
511 len += VKI_BYTES_PER_PAGE * 2; /* leave redzone gaps before and after mapping */
512
513 if (debug)
514 VG_(printf)("find_map_space: ret starts as %p-%p client=%d\n",
515 ret, ret+len, for_client);
516
517 for(s = VG_(SkipList_Find)(&sk_segments, &ret);
518 s != NULL && s->addr < (ret+len);
519 s = VG_(SkipNode_Next)(&sk_segments, s))
520 {
521 if (debug)
522 VG_(printf)("s->addr=%p len=%d (%p) ret=%p\n",
523 s->addr, s->len, s->addr+s->len, ret);
524
525 if (s->addr < (ret + len) && (s->addr + s->len) > ret)
526 ret = s->addr+s->len;
527 }
528
529 if (debug) {
530 if (s)
531 VG_(printf)(" s->addr=%p ->len=%d\n", s->addr, s->len);
532 else
533 VG_(printf)(" s == NULL\n");
534 }
535
536 if ((limit - len) < ret)
537 ret = 0; /* no space */
538 else
539 ret += VKI_BYTES_PER_PAGE; /* skip leading redzone */
540
541 if (debug)
542 VG_(printf)("find_map_space(%p, %d, %d) -> %p\n",
543 addr, len, for_client, ret);
544
545 return ret;
546}
547
548Segment *VG_(find_segment)(Addr a)
549{
550 return VG_(SkipList_Find)(&sk_segments, &a);
551}
552
fitzhardinged65dcad2004-03-13 02:06:58 +0000553Segment *VG_(first_segment)(void)
554{
555 return VG_(SkipNode_First)(&sk_segments);
556}
557
fitzhardinge98abfc72003-12-16 02:05:15 +0000558Segment *VG_(next_segment)(Segment *s)
559{
560 return VG_(SkipNode_Next)(&sk_segments, s);
561}
sewardjde4a1d02002-03-22 01:27:54 +0000562
sewardjde4a1d02002-03-22 01:27:54 +0000563/*------------------------------------------------------------*/
564/*--- Tracking permissions around %esp changes. ---*/
565/*------------------------------------------------------------*/
566
567/*
568 The stack
569 ~~~~~~~~~
570 The stack's segment seems to be dynamically extended downwards
571 by the kernel as the stack pointer moves down. Initially, a
572 1-page (4k) stack is allocated. When %esp moves below that for
573 the first time, presumably a page fault occurs. The kernel
574 detects that the faulting address is in the range from %esp upwards
575 to the current valid stack. It then extends the stack segment
576 downwards for enough to cover the faulting address, and resumes
577 the process (invisibly). The process is unaware of any of this.
578
579 That means that Valgrind can't spot when the stack segment is
580 being extended. Fortunately, we want to precisely and continuously
581 update stack permissions around %esp, so we need to spot all
582 writes to %esp anyway.
583
584 The deal is: when %esp is assigned a lower value, the stack is
585 being extended. Create a secondary maps to fill in any holes
586 between the old stack ptr and this one, if necessary. Then
587 mark all bytes in the area just "uncovered" by this %esp change
588 as write-only.
589
590 When %esp goes back up, mark the area receded over as unreadable
591 and unwritable.
592
593 Just to record the %esp boundary conditions somewhere convenient:
594 %esp always points to the lowest live byte in the stack. All
595 addresses below %esp are not live; those at and above it are.
596*/
597
sewardjde4a1d02002-03-22 01:27:54 +0000598/* Kludgey ... how much does %esp have to change before we reckon that
599 the application is switching stacks ? */
njn9b007f62003-04-07 14:40:25 +0000600#define VG_PLAUSIBLE_STACK_SIZE 8000000
601#define VG_HUGE_DELTA (VG_PLAUSIBLE_STACK_SIZE / 4)
sewardjde4a1d02002-03-22 01:27:54 +0000602
njn9b007f62003-04-07 14:40:25 +0000603/* This function gets called if new_mem_stack and/or die_mem_stack are
604 tracked by the skin, and one of the specialised cases (eg. new_mem_stack_4)
605 isn't used in preference */
606__attribute__((regparm(1)))
607void VG_(unknown_esp_update)(Addr new_ESP)
sewardjde4a1d02002-03-22 01:27:54 +0000608{
njn9b007f62003-04-07 14:40:25 +0000609 Addr old_ESP = VG_(get_archreg)(R_ESP);
610 Int delta = (Int)new_ESP - (Int)old_ESP;
sewardjde4a1d02002-03-22 01:27:54 +0000611
njn9b007f62003-04-07 14:40:25 +0000612 if (delta < -(VG_HUGE_DELTA) || VG_HUGE_DELTA < delta) {
613 /* %esp has changed by more than HUGE_DELTA. We take this to mean
614 that the application is switching to a new stack, for whatever
615 reason.
616
617 JRS 20021001: following discussions with John Regehr, if a stack
618 switch happens, it seems best not to mess at all with memory
619 permissions. Seems to work well with Netscape 4.X. Really the
620 only remaining difficulty is knowing exactly when a stack switch is
621 happening. */
622 if (VG_(clo_verbosity) > 1)
623 VG_(message)(Vg_UserMsg, "Warning: client switching stacks? "
624 "%%esp: %p --> %p", old_ESP, new_ESP);
625 } else if (delta < 0) {
626 VG_TRACK( new_mem_stack, new_ESP, -delta );
sewardjde4a1d02002-03-22 01:27:54 +0000627
njn9b007f62003-04-07 14:40:25 +0000628 } else if (delta > 0) {
629 VG_TRACK( die_mem_stack, old_ESP, delta );
sewardjde4a1d02002-03-22 01:27:54 +0000630 }
631}
632
jsgf855d93d2003-10-13 22:26:55 +0000633static jmp_buf segv_jmpbuf;
634
635static void segv_handler(Int seg)
636{
637 __builtin_longjmp(segv_jmpbuf, 1);
638 VG_(core_panic)("longjmp failed");
639}
640
641/*
642 Test if a piece of memory is addressable by setting up a temporary
643 SIGSEGV handler, then try to touch the memory. No signal = good,
644 signal = bad.
645 */
646Bool VG_(is_addressable)(Addr p, Int size)
647{
648 volatile Char * volatile cp = (volatile Char *)p;
649 volatile Bool ret;
650 vki_ksigaction sa, origsa;
651 vki_ksigset_t mask;
652
653 vg_assert(size > 0);
654
655 sa.ksa_handler = segv_handler;
656 sa.ksa_flags = 0;
657 VG_(ksigfillset)(&sa.ksa_mask);
658 VG_(ksigaction)(VKI_SIGSEGV, &sa, &origsa);
659 VG_(ksigprocmask)(VKI_SIG_SETMASK, NULL, &mask);
660
661 if (__builtin_setjmp(&segv_jmpbuf) == 0) {
662 while(size--)
663 *cp++;
664 ret = True;
665 } else
666 ret = False;
667
668 VG_(ksigaction)(VKI_SIGSEGV, &origsa, NULL);
669 VG_(ksigprocmask)(VKI_SIG_SETMASK, &mask, NULL);
670
671 return ret;
672}
673
sewardjde4a1d02002-03-22 01:27:54 +0000674/*--------------------------------------------------------------------*/
nethercote88a90162004-07-10 16:59:25 +0000675/*--- Manage allocation of memory on behalf of the client ---*/
fitzhardinge98abfc72003-12-16 02:05:15 +0000676/*--------------------------------------------------------------------*/
677
nethercote57e36b32004-07-10 14:56:28 +0000678// Returns 0 on failure.
nethercoteb4250ae2004-07-10 16:50:09 +0000679Addr VG_(client_alloc)(Addr addr, UInt len, UInt prot, UInt sf_flags)
fitzhardinge98abfc72003-12-16 02:05:15 +0000680{
681 len = PGROUNDUP(len);
682
nethercoteb4250ae2004-07-10 16:50:09 +0000683 if (!(sf_flags & SF_FIXED))
fitzhardinge98abfc72003-12-16 02:05:15 +0000684 addr = VG_(find_map_space)(addr, len, True);
685
nethercote57e36b32004-07-10 14:56:28 +0000686 // Don't do the mapping if we couldn't find space!
687 if (0 == addr)
688 return 0;
689
fitzhardinge98abfc72003-12-16 02:05:15 +0000690 if (VG_(mmap)((void *)addr, len, prot,
691 VKI_MAP_FIXED | VKI_MAP_PRIVATE | VKI_MAP_ANONYMOUS | VKI_MAP_CLIENT,
nethercoteb4250ae2004-07-10 16:50:09 +0000692 sf_flags | SF_CORE, -1, 0) == (void *)addr)
693 {
fitzhardinge98abfc72003-12-16 02:05:15 +0000694 return addr;
695 }
696
697 return 0;
698}
699
700void VG_(client_free)(Addr addr)
701{
702 Segment *s = VG_(find_segment)(addr);
703
704 if (s == NULL || s->addr != addr || !(s->flags & SF_CORE)) {
705 VG_(message)(Vg_DebugMsg, "VG_(client_free)(%p) - no CORE memory found there", addr);
706 return;
707 }
708
709 VG_(munmap)((void *)s->addr, s->len);
710}
711
nethercote88a90162004-07-10 16:59:25 +0000712/*--------------------------------------------------------------------*/
713/*--- Querying memory layout ---*/
714/*--------------------------------------------------------------------*/
715
fitzhardinge98abfc72003-12-16 02:05:15 +0000716Bool VG_(is_client_addr)(Addr a)
717{
718 return a >= VG_(client_base) && a < VG_(client_end);
719}
720
721Bool VG_(is_shadow_addr)(Addr a)
722{
723 return a >= VG_(shadow_base) && a < VG_(shadow_end);
724}
725
726Bool VG_(is_valgrind_addr)(Addr a)
727{
728 return a >= VG_(valgrind_base) && a < VG_(valgrind_end);
729}
730
731Addr VG_(get_client_base)(void)
732{
733 return VG_(client_base);
734}
735
736Addr VG_(get_client_end)(void)
737{
738 return VG_(client_end);
739}
740
741Addr VG_(get_client_size)(void)
742{
743 return VG_(client_end)-VG_(client_base);
744}
745
746Addr VG_(get_shadow_base)(void)
747{
748 return VG_(shadow_base);
749}
750
751Addr VG_(get_shadow_end)(void)
752{
753 return VG_(shadow_end);
754}
755
756Addr VG_(get_shadow_size)(void)
757{
758 return VG_(shadow_end)-VG_(shadow_base);
759}
760
nethercote88a90162004-07-10 16:59:25 +0000761/*--------------------------------------------------------------------*/
762/*--- manage allocation of memory on behalf of the client ---*/
763/*--------------------------------------------------------------------*/
fitzhardinge98abfc72003-12-16 02:05:15 +0000764
765void VG_(init_shadow_range)(Addr p, UInt sz, Bool call_init)
766{
767 if (0)
768 VG_(printf)("init_shadow_range(%p, %d)\n", p, sz);
769
770 vg_assert(VG_(needs).shadow_memory);
771 vg_assert(VG_(defined_init_shadow_page)());
772
773 sz = PGROUNDUP(p+sz) - PGROUNDDN(p);
774 p = PGROUNDDN(p);
775
776 VG_(mprotect)((void *)p, sz, VKI_PROT_READ|VKI_PROT_WRITE);
777
778 if (call_init)
779 while(sz) {
780 /* ask the skin to initialize each page */
781 VG_TRACK( init_shadow_page, PGROUNDDN(p) );
782
783 p += VKI_BYTES_PER_PAGE;
784 sz -= VKI_BYTES_PER_PAGE;
785 }
786}
787
788void *VG_(shadow_alloc)(UInt size)
789{
790 static Addr shadow_alloc = 0;
791 void *ret;
792
793 vg_assert(VG_(needs).shadow_memory);
794 vg_assert(!VG_(defined_init_shadow_page)());
795
796 size = PGROUNDUP(size);
797
798 if (shadow_alloc == 0)
799 shadow_alloc = VG_(shadow_base);
800
801 if (shadow_alloc >= VG_(shadow_end))
802 return 0;
803
804 ret = (void *)shadow_alloc;
805 VG_(mprotect)(ret, size, VKI_PROT_READ|VKI_PROT_WRITE);
806
807 shadow_alloc += size;
808
809 return ret;
810}
811
812/*--------------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +0000813/*--- end vg_memory.c ---*/
814/*--------------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000815