blob: ff6a04066b1d60a3196a20e03bbcdaec95976201 [file] [log] [blame]
sewardjde4a1d02002-03-22 01:27:54 +00001
2/*--------------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00003/*--- Memory-related stuff: segment initialisation and tracking, ---*/
4/*--- stack operations ---*/
sewardjde4a1d02002-03-22 01:27:54 +00005/*--- vg_memory.c ---*/
6/*--------------------------------------------------------------------*/
7
8/*
njnc9539842002-10-02 13:26:35 +00009 This file is part of Valgrind, an extensible x86 protected-mode
10 emulator for monitoring program execution on x86-Unixes.
sewardjde4a1d02002-03-22 01:27:54 +000011
njn0e1b5142003-04-15 14:58:06 +000012 Copyright (C) 2000-2003 Julian Seward
sewardjde4a1d02002-03-22 01:27:54 +000013 jseward@acm.org
sewardjde4a1d02002-03-22 01:27:54 +000014
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
njn25e49d8e72002-09-23 09:36:25 +000030 The GNU General Public License is contained in the file COPYING.
sewardjde4a1d02002-03-22 01:27:54 +000031*/
32
33#include "vg_include.h"
34
fitzhardinge98abfc72003-12-16 02:05:15 +000035#include <stddef.h>
36
sewardja4495682002-10-21 07:29:59 +000037/* Define to debug the memory-leak-detector. */
38/* #define VG_DEBUG_LEAKCHECK */
39
fitzhardinge98abfc72003-12-16 02:05:15 +000040static const Bool mem_debug = False;
41
42static Int addrcmp(const void *ap, const void *bp)
43{
44 Addr a = *(Addr *)ap;
45 Addr b = *(Addr *)bp;
46 Int ret;
47
48 if (a == b)
49 ret = 0;
50 else
51 ret = (a < b) ? -1 : 1;
52
53 return ret;
54}
55
56static Char *straddr(void *p)
57{
58 static Char buf[16];
59
60 VG_(sprintf)(buf, "%p", *(Addr *)p);
61
62 return buf;
63}
64
65static SkipList sk_segments = SKIPLIST_INIT(Segment, addr, addrcmp, straddr, VG_AR_CORE);
66
67/*--------------------------------------------------------------*/
68/*--- Maintain an ordered list of all the client's mappings ---*/
69/*--------------------------------------------------------------*/
70
71Bool VG_(seg_contains)(const Segment *s, Addr p, UInt len)
72{
73 Addr se = s->addr+s->len;
74 Addr pe = p+len;
75
76 vg_assert(pe >= p);
77
78 return (p >= s->addr && pe <= se);
79}
80
81Bool VG_(seg_overlaps)(const Segment *s, Addr p, UInt len)
82{
83 Addr se = s->addr+s->len;
84 Addr pe = p+len;
85
86 vg_assert(pe >= p);
87
88 return (p < se && pe > s->addr);
89}
90
91/* Prepare a Segment structure for recycling by freeing everything
92 hanging off it. */
93static void recycleseg(Segment *s)
94{
95 if (s->flags & SF_CODE)
96 VG_(invalidate_translations)(s->addr, s->len, False);
97
98 if (s->filename != NULL)
99 VG_(arena_free)(VG_AR_CORE, (Char *)s->filename);
100
101 /* keep the SegInfo, if any - it probably still applies */
102}
103
104/* When freeing a Segment, also clean up every one else's ideas of
105 what was going on in that range of memory */
106static void freeseg(Segment *s)
107{
108 recycleseg(s);
109 if (s->symtab != NULL) {
110 VG_(symtab_decref)(s->symtab, s->addr, s->len);
111 s->symtab = NULL;
112 }
113
114 VG_(SkipNode_Free)(&sk_segments, s);
115}
116
117/* Split a segment at address a */
118static Segment *split_segment(Addr a)
119{
120 Segment *s = VG_(SkipList_Find)(&sk_segments, &a);
121 Segment *ns;
122 Int delta;
123
124 vg_assert((a & (VKI_BYTES_PER_PAGE-1)) == 0);
125
126 /* missed */
127 if (s == NULL)
128 return NULL;
129
130 /* a at or beyond endpoint */
131 if (s->addr == a || a >= (s->addr+s->len))
132 return NULL;
133
134 vg_assert(a > s->addr && a < (s->addr+s->len));
135
136 ns = VG_(SkipNode_Alloc)(&sk_segments);
137
138 *ns = *s;
139
140 delta = a - s->addr;
141 ns->addr += delta;
142 ns->offset += delta;
143 ns->len -= delta;
144
145 if (ns->symtab != NULL)
146 VG_(symtab_incref)(ns->symtab);
147
148 VG_(SkipList_Insert)(&sk_segments, ns);
149
150 return ns;
151}
152
153/* This unmaps all the segments in the range [addr, addr+len); any
154 partial mappings at the ends are truncated. */
155void VG_(unmap_range)(Addr addr, UInt len)
156{
157 Segment *s;
158 Segment *next;
159 static const Bool debug = False || mem_debug;
160
161 if (len == 0)
162 return;
163
164 if (debug)
165 VG_(printf)("unmap_range(%p, %d)\n", addr, len);
166
167 len = PGROUNDUP(addr+len)-PGROUNDDN(addr);
168 addr = PGROUNDDN(addr);
169
170 /* Everything must be page-aligned */
171 vg_assert((addr & (VKI_BYTES_PER_PAGE-1)) == 0);
172 vg_assert((len & (VKI_BYTES_PER_PAGE-1)) == 0);
173
174 for(s = VG_(SkipList_Find)(&sk_segments, &addr);
175 s != NULL && s->addr < (addr+len);
176 s = next) {
177
178 /* fetch next now in case we end up deleting this segment */
179 next = VG_(SkipNode_Next)(&sk_segments, s);
180
181 if (debug)
182 VG_(printf)("unmap: addr=%p s=%p ->addr=%p len=%d end=%p\n",
183 addr, s, s->addr, s->len, s->addr+s->len);
184
185 if (!VG_(seg_overlaps)(s, addr, len))
186 continue;
187
188 /* 4 cases: */
189 if (addr > s->addr && addr < (s->addr + s->len)) {
190 /* this segment's tail is truncated by [addr, addr+len)
191 -> truncate tail
192 */
193 s->len = addr - s->addr;
194
195 if (debug)
196 VG_(printf)(" case 1: s->len=%d\n", s->len);
197 } else if (addr <= s->addr && (addr+len) >= (s->addr + s->len)) {
198 /* this segment is completely contained within [addr, addr+len)
199 -> delete segment
200 */
201 Segment *rs = VG_(SkipList_Remove)(&sk_segments, &s->addr);
202 vg_assert(rs == s);
203 freeseg(s);
204
205 if (debug)
206 VG_(printf)(" case 2: s==%p deleted\n", s);
207 } else if ((addr+len) > s->addr && (addr+len) < (s->addr+s->len)) {
208 /* this segment's head is truncated by [addr, addr+len)
209 -> truncate head
210 */
211 Int delta = (addr+len) - s->addr;
212
213 s->addr += delta;
214 s->offset += delta;
215 s->len -= delta;
216
217 if (debug)
218 VG_(printf)(" case 3: s->addr=%p s->len=%d delta=%d\n", s->addr, s->len, delta);
219 } else if (addr > s->addr && (addr+len) < (s->addr + s->len)) {
220 /* [addr, addr+len) is contained within a single segment
221 -> split segment into 3, delete middle portion
222 */
223 Segment *middle, *rs;
224
225 middle = split_segment(addr);
226 split_segment(addr+len);
227
228 vg_assert(middle->addr == addr);
229 rs = VG_(SkipList_Remove)(&sk_segments, &addr);
230 vg_assert(rs == middle);
231
232 freeseg(rs);
233
234 if (debug)
235 VG_(printf)(" case 4: subrange %p-%p deleted\n",
236 addr, addr+len);
237 }
238 }
239}
240
241/* If possible, merge segment with its neighbours - some segments,
242 including s, may be destroyed in the process */
243static inline Bool neighbours(Segment *s1, Segment *s2)
244{
245 if (s1->addr+s1->len != s2->addr)
246 return False;
247
248 if (s1->flags != s2->flags)
249 return False;
250
251 if (s1->prot != s2->prot)
252 return False;
253
254 if (s1->symtab != s2->symtab)
255 return False;
256
257 if (s1->flags & SF_FILE){
258 if ((s1->offset + s1->len) != s2->offset)
259 return False;
260 if (s1->dev != s2->dev)
261 return False;
262 if (s1->ino != s2->ino)
263 return False;
264 }
265
266 return True;
267}
268
269/* Merge segments in the address range if they're adjacent and
270 compatible */
271static void merge_segments(Addr a, UInt len)
272{
273 Segment *s;
274 Segment *next;
275
276 vg_assert((a & (VKI_BYTES_PER_PAGE-1)) == 0);
277 vg_assert((len & (VKI_BYTES_PER_PAGE-1)) == 0);
278
279 a -= VKI_BYTES_PER_PAGE;
280 len += VKI_BYTES_PER_PAGE;
281
282 for(s = VG_(SkipList_Find)(&sk_segments, &a);
283 s != NULL && s->addr < (a+len);) {
284 next = VG_(SkipNode_Next)(&sk_segments, s);
285
286 if (next && neighbours(s, next)) {
287 Segment *rs;
288
289 if (0)
290 VG_(printf)("merge %p-%p with %p-%p\n",
291 s->addr, s->addr+s->len,
292 next->addr, next->addr+next->len);
293 s->len += next->len;
294 s = VG_(SkipNode_Next)(&sk_segments, next);
295
296 rs = VG_(SkipList_Remove)(&sk_segments, &next->addr);
297 vg_assert(next == rs);
298 freeseg(next);
299 } else
300 s = next;
301 }
302}
303
304void VG_(map_file_segment)(Addr addr, UInt len, UInt prot, UInt flags,
305 UInt dev, UInt ino, ULong off, const Char *filename)
306{
307 Segment *s;
308 static const Bool debug = False || mem_debug;
309 Bool recycled;
310
311 if (debug)
312 VG_(printf)("map_file_segment(%p, %d, %x, %x, %4x, %d, %ld, %s)\n",
313 addr, len, prot, flags, dev, ino, off, filename);
314
315 /* Everything must be page-aligned */
316 vg_assert((addr & (VKI_BYTES_PER_PAGE-1)) == 0);
317 len = PGROUNDUP(len);
318
319 /* First look to see what already exists around here */
320 s = VG_(SkipList_Find)(&sk_segments, &addr);
321
322 if (s != NULL && s->addr == addr && s->len == len) {
323 /* This probably means we're just updating the flags */
324 recycled = True;
325 recycleseg(s);
326
327 /* If we had a symtab, but the new mapping is incompatible, then
328 free up the old symtab in preparation for a new one. */
329 if (s->symtab != NULL &&
330 (!(s->flags & SF_FILE) ||
331 !(flags & SF_FILE) ||
332 s->dev != dev ||
333 s->ino != ino ||
334 s->offset != off)) {
335 VG_(symtab_decref)(s->symtab, s->addr, s->len);
336 s->symtab = NULL;
337 }
338 } else {
339 recycled = False;
340 VG_(unmap_range)(addr, len);
341
342 s = VG_(SkipNode_Alloc)(&sk_segments);
343
344 s->addr = addr;
345 s->len = len;
346 s->symtab = NULL;
347 }
348
349 s->flags = flags;
350 s->prot = prot;
351 s->dev = dev;
352 s->ino = ino;
353 s->offset = off;
354
355 if (filename != NULL)
356 s->filename = VG_(arena_strdup)(VG_AR_CORE, filename);
357 else
358 s->filename = NULL;
359
360 if (debug) {
361 Segment *ts;
362 for(ts = VG_(SkipNode_First)(&sk_segments);
363 ts != NULL;
364 ts = VG_(SkipNode_Next)(&sk_segments, ts))
365 VG_(printf)("list: %8p->%8p ->%d (0x%x) prot=%x flags=%x\n",
366 ts, ts->addr, ts->len, ts->len, ts->prot, ts->flags);
367
368 VG_(printf)("inserting s=%p addr=%p len=%d\n",
369 s, s->addr, s->len);
370 }
371
372 if (!recycled)
373 VG_(SkipList_Insert)(&sk_segments, s);
374
375 /* If this mapping is of the beginning of a file, isn't part of
376 Valgrind, is at least readable and seems to contain an object
377 file, then try reading symbols from it. */
378 if ((flags & (SF_MMAP|SF_NOSYMS)) == SF_MMAP &&
379 s->symtab == NULL) {
380 if (off == 0 &&
381 filename != NULL &&
382 (prot & (VKI_PROT_READ|VKI_PROT_EXEC)) == (VKI_PROT_READ|VKI_PROT_EXEC) &&
383 len >= VKI_BYTES_PER_PAGE &&
384 s->symtab == NULL &&
385 VG_(is_object_file)((void *)addr)) {
386
387 s->symtab = VG_(read_seg_symbols)(s);
388
389 if (s->symtab != NULL)
390 s->flags |= SF_DYNLIB;
391 } else if (flags & SF_MMAP) {
392 const SegInfo *info;
393
394 /* Otherwise see if an existing symtab applies to this Segment */
395 for(info = VG_(next_seginfo)(NULL);
396 info != NULL;
397 info = VG_(next_seginfo)(info)) {
398 if (VG_(seg_overlaps)(s, VG_(seg_start)(info), VG_(seg_size)(info))) {
399 s->symtab = (SegInfo *)info;
400 VG_(symtab_incref)((SegInfo *)info);
401 }
402 }
403 }
404 }
405
406 /* clean up */
407 merge_segments(addr, len);
408}
409
410void VG_(map_fd_segment)(Addr addr, UInt len, UInt prot, UInt flags,
411 Int fd, ULong off, const Char *filename)
412{
413 struct vki_stat st;
414 Char *name = NULL;
415
416 st.st_dev = 0;
417 st.st_ino = 0;
418
419 if (fd != -1 && (flags & SF_FILE)) {
420 vg_assert((off & (VKI_BYTES_PER_PAGE-1)) == 0);
421
422 if (VG_(fstat)(fd, &st) < 0)
423 flags &= ~SF_FILE;
424 }
425
426 if ((flags & SF_FILE) && filename == NULL && fd != -1)
427 name = VG_(resolve_filename)(fd);
428
429 if (filename == NULL)
430 filename = name;
431
432 VG_(map_file_segment)(addr, len, prot, flags, st.st_dev, st.st_ino, off, filename);
433
434 if (name)
435 VG_(arena_free)(VG_AR_CORE, name);
436}
437
438void VG_(map_segment)(Addr addr, UInt len, UInt prot, UInt flags)
439{
440 flags &= ~SF_FILE;
441
442 VG_(map_file_segment)(addr, len, prot, flags, 0, 0, 0, 0);
443}
444
445/* set new protection flags on an address range */
446void VG_(mprotect_range)(Addr a, UInt len, UInt prot)
447{
448 Segment *s, *next;
449 static const Bool debug = False || mem_debug;
450
451 if (debug)
452 VG_(printf)("mprotect_range(%p, %d, %x)\n", a, len, prot);
453
454 /* Everything must be page-aligned */
455 vg_assert((a & (VKI_BYTES_PER_PAGE-1)) == 0);
456 vg_assert((len & (VKI_BYTES_PER_PAGE-1)) == 0);
457
458 split_segment(a);
459 split_segment(a+len);
460
461 for(s = VG_(SkipList_Find)(&sk_segments, &a);
462 s != NULL && s->addr < a+len;
463 s = next)
464 {
465 next = VG_(SkipNode_Next)(&sk_segments, s);
466 if (s->addr < a)
467 continue;
468
469 s->prot = prot;
470 }
471
472 merge_segments(a, len);
473}
474
475Addr VG_(find_map_space)(Addr addr, UInt len, Bool for_client)
476{
477 Segment *s;
478 Addr ret;
479 static const Bool debug = False || mem_debug;
480 Addr limit = (for_client ? VG_(client_end) : VG_(valgrind_mmap_end));
481
482 if (addr == 0)
483 addr = for_client ? VG_(client_mapbase) : VG_(valgrind_base);
484 else {
485 /* leave space for redzone and still try to get the exact
486 address asked for */
487 addr -= VKI_BYTES_PER_PAGE;
488 }
489 ret = addr;
490
491 /* Everything must be page-aligned */
492 vg_assert((addr & (VKI_BYTES_PER_PAGE-1)) == 0);
493 len = PGROUNDUP(len);
494
495 len += VKI_BYTES_PER_PAGE * 2; /* leave redzone gaps before and after mapping */
496
497 if (debug)
498 VG_(printf)("find_map_space: ret starts as %p-%p client=%d\n",
499 ret, ret+len, for_client);
500
501 for(s = VG_(SkipList_Find)(&sk_segments, &ret);
502 s != NULL && s->addr < (ret+len);
503 s = VG_(SkipNode_Next)(&sk_segments, s))
504 {
505 if (debug)
506 VG_(printf)("s->addr=%p len=%d (%p) ret=%p\n",
507 s->addr, s->len, s->addr+s->len, ret);
508
509 if (s->addr < (ret + len) && (s->addr + s->len) > ret)
510 ret = s->addr+s->len;
511 }
512
513 if (debug) {
514 if (s)
515 VG_(printf)(" s->addr=%p ->len=%d\n", s->addr, s->len);
516 else
517 VG_(printf)(" s == NULL\n");
518 }
519
520 if ((limit - len) < ret)
521 ret = 0; /* no space */
522 else
523 ret += VKI_BYTES_PER_PAGE; /* skip leading redzone */
524
525 if (debug)
526 VG_(printf)("find_map_space(%p, %d, %d) -> %p\n",
527 addr, len, for_client, ret);
528
529 return ret;
530}
531
532Segment *VG_(find_segment)(Addr a)
533{
534 return VG_(SkipList_Find)(&sk_segments, &a);
535}
536
537Segment *VG_(next_segment)(Segment *s)
538{
539 return VG_(SkipNode_Next)(&sk_segments, s);
540}
sewardjde4a1d02002-03-22 01:27:54 +0000541
njn25e49d8e72002-09-23 09:36:25 +0000542/*--------------------------------------------------------------*/
543/*--- Initialise program data/text etc on program startup. ---*/
544/*--------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +0000545
fitzhardinge98abfc72003-12-16 02:05:15 +0000546static
547void build_valgrind_map_callback ( Addr start, UInt size,
548 Char rr, Char ww, Char xx, UInt dev, UInt ino,
549 ULong foffset, const UChar* filename )
njn25e49d8e72002-09-23 09:36:25 +0000550{
fitzhardinge98abfc72003-12-16 02:05:15 +0000551 UInt prot = 0;
552 UInt flags;
553 Bool is_stack_segment;
554 Bool verbose = False || mem_debug; /* set to True for debugging */
sewardjde4a1d02002-03-22 01:27:54 +0000555
fitzhardinge98abfc72003-12-16 02:05:15 +0000556 is_stack_segment = (start == VG_(clstk_base) && (start+size) == VG_(clstk_end));
557
558 prot = 0;
559 flags = SF_MMAP|SF_NOSYMS;
560
561 if (start >= VG_(valgrind_base) && (start+size) <= VG_(valgrind_end))
562 flags |= SF_VALGRIND;
563
564 /* Only record valgrind mappings for now, without loading any
565 symbols. This is so we know where the free space is before we
566 start allocating more memory (note: heap is OK, it's just mmap
567 which is the problem here). */
568 if (flags & SF_VALGRIND) {
569 if (verbose)
570 VG_(printf)("adding segment %08p-%08p prot=%x flags=%4x filename=%s\n",
571 start, start+size, prot, flags, filename);
572
573 VG_(map_file_segment)(start, size, prot, flags, dev, ino, foffset, filename);
sewardjde4a1d02002-03-22 01:27:54 +0000574 }
njn25e49d8e72002-09-23 09:36:25 +0000575}
576
njn25e49d8e72002-09-23 09:36:25 +0000577static
fitzhardinge98abfc72003-12-16 02:05:15 +0000578void build_segment_map_callback ( Addr start, UInt size,
579 Char rr, Char ww, Char xx, UInt dev, UInt ino,
580 ULong foffset, const UChar* filename )
sewardjde4a1d02002-03-22 01:27:54 +0000581{
fitzhardinge98abfc72003-12-16 02:05:15 +0000582 UInt prot = 0;
583 UInt flags;
njn25e49d8e72002-09-23 09:36:25 +0000584 Bool is_stack_segment;
fitzhardinge98abfc72003-12-16 02:05:15 +0000585 Bool verbose = False || mem_debug; /* set to True for debugging */
586 Addr r_esp;
587
588 is_stack_segment = (start == VG_(clstk_base) && (start+size) == VG_(clstk_end));
589
590 if (rr == 'r')
591 prot |= VKI_PROT_READ;
592 if (ww == 'w')
593 prot |= VKI_PROT_WRITE;
594 if (xx == 'x')
595 prot |= VKI_PROT_EXEC;
596
597
598 if (is_stack_segment)
599 flags = SF_STACK | SF_GROWDOWN;
600 else
601 flags = SF_EXEC|SF_MMAP;
602
603 if (filename != NULL)
604 flags |= SF_FILE;
605
606 if (start >= VG_(valgrind_base) && (start+size) <= VG_(valgrind_end))
607 flags |= SF_VALGRIND;
sewardjde4a1d02002-03-22 01:27:54 +0000608
sewardjecf8e102003-07-12 12:11:39 +0000609 if (verbose)
fitzhardinge98abfc72003-12-16 02:05:15 +0000610 VG_(printf)("adding segment %08p-%08p prot=%x flags=%4x filename=%s\n",
611 start, start+size, prot, flags, filename);
njn25e49d8e72002-09-23 09:36:25 +0000612
fitzhardinge98abfc72003-12-16 02:05:15 +0000613 VG_(map_file_segment)(start, size, prot, flags, dev, ino, foffset, filename);
njn25e49d8e72002-09-23 09:36:25 +0000614
fitzhardinge98abfc72003-12-16 02:05:15 +0000615 if (VG_(is_client_addr)(start) && VG_(is_client_addr)(start+size-1))
616 VG_TRACK( new_mem_startup, start, size, rr=='r', ww=='w', xx=='x' );
njn25e49d8e72002-09-23 09:36:25 +0000617
618 /* If this is the stack segment mark all below %esp as noaccess. */
fitzhardinge98abfc72003-12-16 02:05:15 +0000619 r_esp = VG_(m_state_static)[40/4];
njn25e49d8e72002-09-23 09:36:25 +0000620 if (is_stack_segment) {
621 if (0)
622 VG_(message)(Vg_DebugMsg, "invalidating stack area: %x .. %x",
623 start,r_esp);
624 VG_TRACK( die_mem_stack, start, r_esp-start );
625 }
626}
627
628
njn3e884182003-04-15 13:03:23 +0000629/* 1. Records startup segments from /proc/pid/maps. Takes special note
630 of the executable ones, because if they're munmap()ed we need to
631 discard translations. Also checks there's no exe segment overlaps.
njn25e49d8e72002-09-23 09:36:25 +0000632
njn3e884182003-04-15 13:03:23 +0000633 Note that `read_from_file' is false; we read /proc/self/maps into a
634 buffer at the start of VG_(main) so that any superblocks mmap'd by
635 calls to VG_(malloc)() by SK_({pre,post}_clo_init) aren't erroneously
636 thought of as being owned by the client.
njn25e49d8e72002-09-23 09:36:25 +0000637 */
638void VG_(init_memory) ( void )
639{
njn3e884182003-04-15 13:03:23 +0000640 /* 1 */
fitzhardinge98abfc72003-12-16 02:05:15 +0000641 /* reserve Valgrind's kickstart, heap and stack */
642 VG_(map_segment)(VG_(valgrind_mmap_end), VG_(valgrind_end)-VG_(valgrind_mmap_end),
643 VKI_PROT_NONE, SF_VALGRIND|SF_FIXED);
njn25e49d8e72002-09-23 09:36:25 +0000644
fitzhardinge98abfc72003-12-16 02:05:15 +0000645 /* work out what's mapped where, and read interesting symtabs */
646 VG_(parse_procselfmaps) ( build_valgrind_map_callback ); /* just Valgrind mappings */
647 VG_(parse_procselfmaps) ( build_segment_map_callback ); /* everything */
sewardjd5815ec2003-04-06 12:23:27 +0000648
649 /* kludge: some newer kernels place a "sysinfo" page up high, with
650 vsyscalls in it, and possibly some other stuff in the future. */
651 if (VG_(sysinfo_page_exists)) {
njnfa1016e2003-09-25 17:54:11 +0000652 // 2003-Sep-25, njn: Jeremy thinks the sysinfo page probably doesn't
653 // have any symbols that need to be loaded. So just treat it like
654 // a non-executable page.
655 //VG_(new_exeseg_mmap)( VG_(sysinfo_page_addr), 4096 );
sewardjd5815ec2003-04-06 12:23:27 +0000656 VG_TRACK( new_mem_startup, VG_(sysinfo_page_addr), 4096,
657 True, True, True );
658 }
sewardjde4a1d02002-03-22 01:27:54 +0000659}
660
sewardjde4a1d02002-03-22 01:27:54 +0000661/*------------------------------------------------------------*/
662/*--- Tracking permissions around %esp changes. ---*/
663/*------------------------------------------------------------*/
664
665/*
666 The stack
667 ~~~~~~~~~
668 The stack's segment seems to be dynamically extended downwards
669 by the kernel as the stack pointer moves down. Initially, a
670 1-page (4k) stack is allocated. When %esp moves below that for
671 the first time, presumably a page fault occurs. The kernel
672 detects that the faulting address is in the range from %esp upwards
673 to the current valid stack. It then extends the stack segment
674 downwards for enough to cover the faulting address, and resumes
675 the process (invisibly). The process is unaware of any of this.
676
677 That means that Valgrind can't spot when the stack segment is
678 being extended. Fortunately, we want to precisely and continuously
679 update stack permissions around %esp, so we need to spot all
680 writes to %esp anyway.
681
682 The deal is: when %esp is assigned a lower value, the stack is
683 being extended. Create a secondary maps to fill in any holes
684 between the old stack ptr and this one, if necessary. Then
685 mark all bytes in the area just "uncovered" by this %esp change
686 as write-only.
687
688 When %esp goes back up, mark the area receded over as unreadable
689 and unwritable.
690
691 Just to record the %esp boundary conditions somewhere convenient:
692 %esp always points to the lowest live byte in the stack. All
693 addresses below %esp are not live; those at and above it are.
694*/
695
sewardjde4a1d02002-03-22 01:27:54 +0000696/* Kludgey ... how much does %esp have to change before we reckon that
697 the application is switching stacks ? */
njn9b007f62003-04-07 14:40:25 +0000698#define VG_PLAUSIBLE_STACK_SIZE 8000000
699#define VG_HUGE_DELTA (VG_PLAUSIBLE_STACK_SIZE / 4)
sewardjde4a1d02002-03-22 01:27:54 +0000700
njn9b007f62003-04-07 14:40:25 +0000701/* This function gets called if new_mem_stack and/or die_mem_stack are
702 tracked by the skin, and one of the specialised cases (eg. new_mem_stack_4)
703 isn't used in preference */
704__attribute__((regparm(1)))
705void VG_(unknown_esp_update)(Addr new_ESP)
sewardjde4a1d02002-03-22 01:27:54 +0000706{
njn9b007f62003-04-07 14:40:25 +0000707 Addr old_ESP = VG_(get_archreg)(R_ESP);
708 Int delta = (Int)new_ESP - (Int)old_ESP;
sewardjde4a1d02002-03-22 01:27:54 +0000709
njn9b007f62003-04-07 14:40:25 +0000710 if (delta < -(VG_HUGE_DELTA) || VG_HUGE_DELTA < delta) {
711 /* %esp has changed by more than HUGE_DELTA. We take this to mean
712 that the application is switching to a new stack, for whatever
713 reason.
714
715 JRS 20021001: following discussions with John Regehr, if a stack
716 switch happens, it seems best not to mess at all with memory
717 permissions. Seems to work well with Netscape 4.X. Really the
718 only remaining difficulty is knowing exactly when a stack switch is
719 happening. */
720 if (VG_(clo_verbosity) > 1)
721 VG_(message)(Vg_UserMsg, "Warning: client switching stacks? "
722 "%%esp: %p --> %p", old_ESP, new_ESP);
723 } else if (delta < 0) {
724 VG_TRACK( new_mem_stack, new_ESP, -delta );
sewardjde4a1d02002-03-22 01:27:54 +0000725
njn9b007f62003-04-07 14:40:25 +0000726 } else if (delta > 0) {
727 VG_TRACK( die_mem_stack, old_ESP, delta );
sewardjde4a1d02002-03-22 01:27:54 +0000728 }
729}
730
jsgf855d93d2003-10-13 22:26:55 +0000731static jmp_buf segv_jmpbuf;
732
733static void segv_handler(Int seg)
734{
735 __builtin_longjmp(segv_jmpbuf, 1);
736 VG_(core_panic)("longjmp failed");
737}
738
739/*
740 Test if a piece of memory is addressable by setting up a temporary
741 SIGSEGV handler, then try to touch the memory. No signal = good,
742 signal = bad.
743 */
744Bool VG_(is_addressable)(Addr p, Int size)
745{
746 volatile Char * volatile cp = (volatile Char *)p;
747 volatile Bool ret;
748 vki_ksigaction sa, origsa;
749 vki_ksigset_t mask;
750
751 vg_assert(size > 0);
752
753 sa.ksa_handler = segv_handler;
754 sa.ksa_flags = 0;
755 VG_(ksigfillset)(&sa.ksa_mask);
756 VG_(ksigaction)(VKI_SIGSEGV, &sa, &origsa);
757 VG_(ksigprocmask)(VKI_SIG_SETMASK, NULL, &mask);
758
759 if (__builtin_setjmp(&segv_jmpbuf) == 0) {
760 while(size--)
761 *cp++;
762 ret = True;
763 } else
764 ret = False;
765
766 VG_(ksigaction)(VKI_SIGSEGV, &origsa, NULL);
767 VG_(ksigprocmask)(VKI_SIG_SETMASK, &mask, NULL);
768
769 return ret;
770}
771
sewardjde4a1d02002-03-22 01:27:54 +0000772/*--------------------------------------------------------------------*/
fitzhardinge98abfc72003-12-16 02:05:15 +0000773/*--- manage allocation of memory on behalf of the client ---*/
774/*--------------------------------------------------------------------*/
775
776Addr VG_(client_alloc)(Addr addr, UInt len, UInt prot, UInt flags)
777{
778 len = PGROUNDUP(len);
779
780 if (!(flags & SF_FIXED))
781 addr = VG_(find_map_space)(addr, len, True);
782
783 flags |= SF_CORE;
784
785 if (VG_(mmap)((void *)addr, len, prot,
786 VKI_MAP_FIXED | VKI_MAP_PRIVATE | VKI_MAP_ANONYMOUS | VKI_MAP_CLIENT,
787 -1, 0) == (void *)addr) {
788 VG_(map_segment)(addr, len, prot, flags);
789 return addr;
790 }
791
792 return 0;
793}
794
795void VG_(client_free)(Addr addr)
796{
797 Segment *s = VG_(find_segment)(addr);
798
799 if (s == NULL || s->addr != addr || !(s->flags & SF_CORE)) {
800 VG_(message)(Vg_DebugMsg, "VG_(client_free)(%p) - no CORE memory found there", addr);
801 return;
802 }
803
804 VG_(munmap)((void *)s->addr, s->len);
805}
806
807Bool VG_(is_client_addr)(Addr a)
808{
809 return a >= VG_(client_base) && a < VG_(client_end);
810}
811
812Bool VG_(is_shadow_addr)(Addr a)
813{
814 return a >= VG_(shadow_base) && a < VG_(shadow_end);
815}
816
817Bool VG_(is_valgrind_addr)(Addr a)
818{
819 return a >= VG_(valgrind_base) && a < VG_(valgrind_end);
820}
821
822Addr VG_(get_client_base)(void)
823{
824 return VG_(client_base);
825}
826
827Addr VG_(get_client_end)(void)
828{
829 return VG_(client_end);
830}
831
832Addr VG_(get_client_size)(void)
833{
834 return VG_(client_end)-VG_(client_base);
835}
836
837Addr VG_(get_shadow_base)(void)
838{
839 return VG_(shadow_base);
840}
841
842Addr VG_(get_shadow_end)(void)
843{
844 return VG_(shadow_end);
845}
846
847Addr VG_(get_shadow_size)(void)
848{
849 return VG_(shadow_end)-VG_(shadow_base);
850}
851
852
853void VG_(init_shadow_range)(Addr p, UInt sz, Bool call_init)
854{
855 if (0)
856 VG_(printf)("init_shadow_range(%p, %d)\n", p, sz);
857
858 vg_assert(VG_(needs).shadow_memory);
859 vg_assert(VG_(defined_init_shadow_page)());
860
861 sz = PGROUNDUP(p+sz) - PGROUNDDN(p);
862 p = PGROUNDDN(p);
863
864 VG_(mprotect)((void *)p, sz, VKI_PROT_READ|VKI_PROT_WRITE);
865
866 if (call_init)
867 while(sz) {
868 /* ask the skin to initialize each page */
869 VG_TRACK( init_shadow_page, PGROUNDDN(p) );
870
871 p += VKI_BYTES_PER_PAGE;
872 sz -= VKI_BYTES_PER_PAGE;
873 }
874}
875
876void *VG_(shadow_alloc)(UInt size)
877{
878 static Addr shadow_alloc = 0;
879 void *ret;
880
881 vg_assert(VG_(needs).shadow_memory);
882 vg_assert(!VG_(defined_init_shadow_page)());
883
884 size = PGROUNDUP(size);
885
886 if (shadow_alloc == 0)
887 shadow_alloc = VG_(shadow_base);
888
889 if (shadow_alloc >= VG_(shadow_end))
890 return 0;
891
892 ret = (void *)shadow_alloc;
893 VG_(mprotect)(ret, size, VKI_PROT_READ|VKI_PROT_WRITE);
894
895 shadow_alloc += size;
896
897 return ret;
898}
899
900/*--------------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +0000901/*--- end vg_memory.c ---*/
902/*--------------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000903