blob: c8b42baabefab1b0aa13c588d4328c5acbbb55cd [file] [log] [blame]
sewardjde4a1d02002-03-22 01:27:54 +00001
2/*--------------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00003/*--- Memory-related stuff: segment initialisation and tracking, ---*/
4/*--- stack operations ---*/
sewardjde4a1d02002-03-22 01:27:54 +00005/*--- vg_memory.c ---*/
6/*--------------------------------------------------------------------*/
7
8/*
njnc9539842002-10-02 13:26:35 +00009 This file is part of Valgrind, an extensible x86 protected-mode
10 emulator for monitoring program execution on x86-Unixes.
sewardjde4a1d02002-03-22 01:27:54 +000011
njn0e1b5142003-04-15 14:58:06 +000012 Copyright (C) 2000-2003 Julian Seward
sewardjde4a1d02002-03-22 01:27:54 +000013 jseward@acm.org
sewardjde4a1d02002-03-22 01:27:54 +000014
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
njn25e49d8e72002-09-23 09:36:25 +000030 The GNU General Public License is contained in the file COPYING.
sewardjde4a1d02002-03-22 01:27:54 +000031*/
32
33#include "vg_include.h"
34
fitzhardinge98abfc72003-12-16 02:05:15 +000035#include <stddef.h>
36
sewardja4495682002-10-21 07:29:59 +000037/* Define to debug the memory-leak-detector. */
38/* #define VG_DEBUG_LEAKCHECK */
39
fitzhardinge98abfc72003-12-16 02:05:15 +000040static const Bool mem_debug = False;
41
42static Int addrcmp(const void *ap, const void *bp)
43{
44 Addr a = *(Addr *)ap;
45 Addr b = *(Addr *)bp;
46 Int ret;
47
48 if (a == b)
49 ret = 0;
50 else
51 ret = (a < b) ? -1 : 1;
52
53 return ret;
54}
55
56static Char *straddr(void *p)
57{
58 static Char buf[16];
59
60 VG_(sprintf)(buf, "%p", *(Addr *)p);
61
62 return buf;
63}
64
65static SkipList sk_segments = SKIPLIST_INIT(Segment, addr, addrcmp, straddr, VG_AR_CORE);
66
67/*--------------------------------------------------------------*/
68/*--- Maintain an ordered list of all the client's mappings ---*/
69/*--------------------------------------------------------------*/
70
71Bool VG_(seg_contains)(const Segment *s, Addr p, UInt len)
72{
73 Addr se = s->addr+s->len;
74 Addr pe = p+len;
75
76 vg_assert(pe >= p);
77
78 return (p >= s->addr && pe <= se);
79}
80
81Bool VG_(seg_overlaps)(const Segment *s, Addr p, UInt len)
82{
83 Addr se = s->addr+s->len;
84 Addr pe = p+len;
85
86 vg_assert(pe >= p);
87
88 return (p < se && pe > s->addr);
89}
90
91/* Prepare a Segment structure for recycling by freeing everything
92 hanging off it. */
93static void recycleseg(Segment *s)
94{
95 if (s->flags & SF_CODE)
96 VG_(invalidate_translations)(s->addr, s->len, False);
97
98 if (s->filename != NULL)
99 VG_(arena_free)(VG_AR_CORE, (Char *)s->filename);
100
101 /* keep the SegInfo, if any - it probably still applies */
102}
103
104/* When freeing a Segment, also clean up every one else's ideas of
105 what was going on in that range of memory */
106static void freeseg(Segment *s)
107{
108 recycleseg(s);
109 if (s->symtab != NULL) {
110 VG_(symtab_decref)(s->symtab, s->addr, s->len);
111 s->symtab = NULL;
112 }
113
114 VG_(SkipNode_Free)(&sk_segments, s);
115}
116
fitzhardinge1a303042003-12-22 08:48:50 +0000117/* Split a segment at address a, returning the new segment */
118Segment *VG_(split_segment)(Addr a)
fitzhardinge98abfc72003-12-16 02:05:15 +0000119{
120 Segment *s = VG_(SkipList_Find)(&sk_segments, &a);
121 Segment *ns;
122 Int delta;
123
124 vg_assert((a & (VKI_BYTES_PER_PAGE-1)) == 0);
125
126 /* missed */
127 if (s == NULL)
128 return NULL;
129
130 /* a at or beyond endpoint */
131 if (s->addr == a || a >= (s->addr+s->len))
132 return NULL;
133
134 vg_assert(a > s->addr && a < (s->addr+s->len));
135
136 ns = VG_(SkipNode_Alloc)(&sk_segments);
137
138 *ns = *s;
139
140 delta = a - s->addr;
141 ns->addr += delta;
142 ns->offset += delta;
143 ns->len -= delta;
144
fitzhardinge1a4adf02003-12-22 10:42:59 +0000145 if (s->filename != NULL)
146 ns->filename = VG_(arena_strdup)(VG_AR_CORE, s->filename);
147
fitzhardinge98abfc72003-12-16 02:05:15 +0000148 if (ns->symtab != NULL)
149 VG_(symtab_incref)(ns->symtab);
150
151 VG_(SkipList_Insert)(&sk_segments, ns);
152
153 return ns;
154}
155
156/* This unmaps all the segments in the range [addr, addr+len); any
157 partial mappings at the ends are truncated. */
158void VG_(unmap_range)(Addr addr, UInt len)
159{
160 Segment *s;
161 Segment *next;
fitzhardinge1a4adf02003-12-22 10:42:59 +0000162 static const Bool debug = True || mem_debug;
163 Addr end;
fitzhardinge98abfc72003-12-16 02:05:15 +0000164
165 if (len == 0)
166 return;
167
fitzhardinge1a4adf02003-12-22 10:42:59 +0000168 len = PGROUNDUP(len);
169 vg_assert(addr == PGROUNDDN(addr));
170
fitzhardinge98abfc72003-12-16 02:05:15 +0000171 if (debug)
172 VG_(printf)("unmap_range(%p, %d)\n", addr, len);
173
fitzhardinge1a4adf02003-12-22 10:42:59 +0000174 end = addr+len;
fitzhardinge98abfc72003-12-16 02:05:15 +0000175
176 /* Everything must be page-aligned */
177 vg_assert((addr & (VKI_BYTES_PER_PAGE-1)) == 0);
178 vg_assert((len & (VKI_BYTES_PER_PAGE-1)) == 0);
179
180 for(s = VG_(SkipList_Find)(&sk_segments, &addr);
181 s != NULL && s->addr < (addr+len);
182 s = next) {
fitzhardinge1a303042003-12-22 08:48:50 +0000183 Addr seg_end = s->addr + s->len;
fitzhardinge98abfc72003-12-16 02:05:15 +0000184
185 /* fetch next now in case we end up deleting this segment */
186 next = VG_(SkipNode_Next)(&sk_segments, s);
187
188 if (debug)
fitzhardinge1a303042003-12-22 08:48:50 +0000189 VG_(printf)("unmap: addr=%p-%p s=%p ->addr=%p-%p len=%d\n",
fitzhardinge1a4adf02003-12-22 10:42:59 +0000190 addr, end, s, s->addr, seg_end, s->len);
fitzhardinge98abfc72003-12-16 02:05:15 +0000191
fitzhardinge1a303042003-12-22 08:48:50 +0000192 if (!VG_(seg_overlaps)(s, addr, len)) {
193 if (debug)
194 VG_(printf)(" (no overlap)\n");
fitzhardinge98abfc72003-12-16 02:05:15 +0000195 continue;
fitzhardinge1a303042003-12-22 08:48:50 +0000196 }
fitzhardinge98abfc72003-12-16 02:05:15 +0000197
198 /* 4 cases: */
fitzhardinge1a303042003-12-22 08:48:50 +0000199 if (addr > s->addr &&
200 addr < seg_end &&
201 end >= seg_end) {
fitzhardinge98abfc72003-12-16 02:05:15 +0000202 /* this segment's tail is truncated by [addr, addr+len)
203 -> truncate tail
204 */
205 s->len = addr - s->addr;
206
207 if (debug)
208 VG_(printf)(" case 1: s->len=%d\n", s->len);
fitzhardinge1a303042003-12-22 08:48:50 +0000209 } else if (addr <= s->addr && end >= seg_end) {
fitzhardinge98abfc72003-12-16 02:05:15 +0000210 /* this segment is completely contained within [addr, addr+len)
211 -> delete segment
212 */
213 Segment *rs = VG_(SkipList_Remove)(&sk_segments, &s->addr);
214 vg_assert(rs == s);
215 freeseg(s);
216
217 if (debug)
218 VG_(printf)(" case 2: s==%p deleted\n", s);
fitzhardinge1a303042003-12-22 08:48:50 +0000219 } else if (addr <= s->addr && end > s->addr && end < seg_end) {
fitzhardinge98abfc72003-12-16 02:05:15 +0000220 /* this segment's head is truncated by [addr, addr+len)
221 -> truncate head
222 */
223 Int delta = (addr+len) - s->addr;
224
fitzhardinge1a4adf02003-12-22 10:42:59 +0000225 if (debug)
226 VG_(printf)(" case 3: s->addr=%p s->len=%d delta=%d\n", s->addr, s->len, delta);
227
fitzhardinge98abfc72003-12-16 02:05:15 +0000228 s->addr += delta;
229 s->offset += delta;
230 s->len -= delta;
231
fitzhardinge1a4adf02003-12-22 10:42:59 +0000232 vg_assert(s->len != 0);
fitzhardinge1a303042003-12-22 08:48:50 +0000233 } else if (addr > s->addr && end < seg_end) {
fitzhardinge98abfc72003-12-16 02:05:15 +0000234 /* [addr, addr+len) is contained within a single segment
235 -> split segment into 3, delete middle portion
236 */
237 Segment *middle, *rs;
238
fitzhardinge1a303042003-12-22 08:48:50 +0000239 middle = VG_(split_segment)(addr);
240 VG_(split_segment)(addr+len);
fitzhardinge98abfc72003-12-16 02:05:15 +0000241
242 vg_assert(middle->addr == addr);
243 rs = VG_(SkipList_Remove)(&sk_segments, &addr);
244 vg_assert(rs == middle);
245
246 freeseg(rs);
247
248 if (debug)
249 VG_(printf)(" case 4: subrange %p-%p deleted\n",
250 addr, addr+len);
251 }
252 }
253}
254
fitzhardinge1a4adf02003-12-22 10:42:59 +0000255/* Return true if two segments are adjacent and mergable (s1 is
256 assumed to have a lower ->addr than s2) */
fitzhardinge98abfc72003-12-16 02:05:15 +0000257static inline Bool neighbours(Segment *s1, Segment *s2)
258{
259 if (s1->addr+s1->len != s2->addr)
260 return False;
261
262 if (s1->flags != s2->flags)
263 return False;
264
265 if (s1->prot != s2->prot)
266 return False;
267
268 if (s1->symtab != s2->symtab)
269 return False;
270
271 if (s1->flags & SF_FILE){
272 if ((s1->offset + s1->len) != s2->offset)
273 return False;
274 if (s1->dev != s2->dev)
275 return False;
276 if (s1->ino != s2->ino)
277 return False;
278 }
279
280 return True;
281}
282
fitzhardinge1a4adf02003-12-22 10:42:59 +0000283/* If possible, merge segment with its neighbours - some segments,
284 including s, may be destroyed in the process */
fitzhardinge98abfc72003-12-16 02:05:15 +0000285static void merge_segments(Addr a, UInt len)
286{
287 Segment *s;
288 Segment *next;
289
290 vg_assert((a & (VKI_BYTES_PER_PAGE-1)) == 0);
291 vg_assert((len & (VKI_BYTES_PER_PAGE-1)) == 0);
292
293 a -= VKI_BYTES_PER_PAGE;
294 len += VKI_BYTES_PER_PAGE;
295
296 for(s = VG_(SkipList_Find)(&sk_segments, &a);
297 s != NULL && s->addr < (a+len);) {
298 next = VG_(SkipNode_Next)(&sk_segments, s);
299
300 if (next && neighbours(s, next)) {
301 Segment *rs;
302
303 if (0)
304 VG_(printf)("merge %p-%p with %p-%p\n",
305 s->addr, s->addr+s->len,
306 next->addr, next->addr+next->len);
307 s->len += next->len;
308 s = VG_(SkipNode_Next)(&sk_segments, next);
309
310 rs = VG_(SkipList_Remove)(&sk_segments, &next->addr);
311 vg_assert(next == rs);
312 freeseg(next);
313 } else
314 s = next;
315 }
316}
317
318void VG_(map_file_segment)(Addr addr, UInt len, UInt prot, UInt flags,
319 UInt dev, UInt ino, ULong off, const Char *filename)
320{
321 Segment *s;
322 static const Bool debug = False || mem_debug;
323 Bool recycled;
324
325 if (debug)
326 VG_(printf)("map_file_segment(%p, %d, %x, %x, %4x, %d, %ld, %s)\n",
327 addr, len, prot, flags, dev, ino, off, filename);
328
329 /* Everything must be page-aligned */
330 vg_assert((addr & (VKI_BYTES_PER_PAGE-1)) == 0);
331 len = PGROUNDUP(len);
332
333 /* First look to see what already exists around here */
334 s = VG_(SkipList_Find)(&sk_segments, &addr);
335
336 if (s != NULL && s->addr == addr && s->len == len) {
337 /* This probably means we're just updating the flags */
338 recycled = True;
339 recycleseg(s);
340
341 /* If we had a symtab, but the new mapping is incompatible, then
342 free up the old symtab in preparation for a new one. */
343 if (s->symtab != NULL &&
344 (!(s->flags & SF_FILE) ||
345 !(flags & SF_FILE) ||
346 s->dev != dev ||
347 s->ino != ino ||
348 s->offset != off)) {
349 VG_(symtab_decref)(s->symtab, s->addr, s->len);
350 s->symtab = NULL;
351 }
352 } else {
353 recycled = False;
354 VG_(unmap_range)(addr, len);
355
356 s = VG_(SkipNode_Alloc)(&sk_segments);
357
358 s->addr = addr;
359 s->len = len;
360 s->symtab = NULL;
361 }
362
363 s->flags = flags;
364 s->prot = prot;
365 s->dev = dev;
366 s->ino = ino;
367 s->offset = off;
368
369 if (filename != NULL)
370 s->filename = VG_(arena_strdup)(VG_AR_CORE, filename);
371 else
372 s->filename = NULL;
373
374 if (debug) {
375 Segment *ts;
376 for(ts = VG_(SkipNode_First)(&sk_segments);
377 ts != NULL;
378 ts = VG_(SkipNode_Next)(&sk_segments, ts))
379 VG_(printf)("list: %8p->%8p ->%d (0x%x) prot=%x flags=%x\n",
380 ts, ts->addr, ts->len, ts->len, ts->prot, ts->flags);
381
382 VG_(printf)("inserting s=%p addr=%p len=%d\n",
383 s, s->addr, s->len);
384 }
385
386 if (!recycled)
387 VG_(SkipList_Insert)(&sk_segments, s);
388
389 /* If this mapping is of the beginning of a file, isn't part of
390 Valgrind, is at least readable and seems to contain an object
391 file, then try reading symbols from it. */
392 if ((flags & (SF_MMAP|SF_NOSYMS)) == SF_MMAP &&
393 s->symtab == NULL) {
394 if (off == 0 &&
395 filename != NULL &&
396 (prot & (VKI_PROT_READ|VKI_PROT_EXEC)) == (VKI_PROT_READ|VKI_PROT_EXEC) &&
397 len >= VKI_BYTES_PER_PAGE &&
398 s->symtab == NULL &&
399 VG_(is_object_file)((void *)addr)) {
400
401 s->symtab = VG_(read_seg_symbols)(s);
402
403 if (s->symtab != NULL)
404 s->flags |= SF_DYNLIB;
405 } else if (flags & SF_MMAP) {
406 const SegInfo *info;
407
408 /* Otherwise see if an existing symtab applies to this Segment */
409 for(info = VG_(next_seginfo)(NULL);
410 info != NULL;
411 info = VG_(next_seginfo)(info)) {
412 if (VG_(seg_overlaps)(s, VG_(seg_start)(info), VG_(seg_size)(info))) {
413 s->symtab = (SegInfo *)info;
414 VG_(symtab_incref)((SegInfo *)info);
415 }
416 }
417 }
418 }
419
420 /* clean up */
421 merge_segments(addr, len);
422}
423
424void VG_(map_fd_segment)(Addr addr, UInt len, UInt prot, UInt flags,
425 Int fd, ULong off, const Char *filename)
426{
427 struct vki_stat st;
428 Char *name = NULL;
429
430 st.st_dev = 0;
431 st.st_ino = 0;
432
433 if (fd != -1 && (flags & SF_FILE)) {
434 vg_assert((off & (VKI_BYTES_PER_PAGE-1)) == 0);
435
436 if (VG_(fstat)(fd, &st) < 0)
437 flags &= ~SF_FILE;
438 }
439
440 if ((flags & SF_FILE) && filename == NULL && fd != -1)
441 name = VG_(resolve_filename)(fd);
442
443 if (filename == NULL)
444 filename = name;
445
446 VG_(map_file_segment)(addr, len, prot, flags, st.st_dev, st.st_ino, off, filename);
447
448 if (name)
449 VG_(arena_free)(VG_AR_CORE, name);
450}
451
452void VG_(map_segment)(Addr addr, UInt len, UInt prot, UInt flags)
453{
454 flags &= ~SF_FILE;
455
456 VG_(map_file_segment)(addr, len, prot, flags, 0, 0, 0, 0);
457}
458
459/* set new protection flags on an address range */
460void VG_(mprotect_range)(Addr a, UInt len, UInt prot)
461{
462 Segment *s, *next;
463 static const Bool debug = False || mem_debug;
464
465 if (debug)
466 VG_(printf)("mprotect_range(%p, %d, %x)\n", a, len, prot);
467
468 /* Everything must be page-aligned */
469 vg_assert((a & (VKI_BYTES_PER_PAGE-1)) == 0);
470 vg_assert((len & (VKI_BYTES_PER_PAGE-1)) == 0);
471
fitzhardinge1a303042003-12-22 08:48:50 +0000472 VG_(split_segment)(a);
473 VG_(split_segment)(a+len);
fitzhardinge98abfc72003-12-16 02:05:15 +0000474
475 for(s = VG_(SkipList_Find)(&sk_segments, &a);
476 s != NULL && s->addr < a+len;
477 s = next)
478 {
479 next = VG_(SkipNode_Next)(&sk_segments, s);
480 if (s->addr < a)
481 continue;
482
483 s->prot = prot;
484 }
485
486 merge_segments(a, len);
487}
488
489Addr VG_(find_map_space)(Addr addr, UInt len, Bool for_client)
490{
491 Segment *s;
492 Addr ret;
493 static const Bool debug = False || mem_debug;
494 Addr limit = (for_client ? VG_(client_end) : VG_(valgrind_mmap_end));
495
496 if (addr == 0)
497 addr = for_client ? VG_(client_mapbase) : VG_(valgrind_base);
498 else {
499 /* leave space for redzone and still try to get the exact
500 address asked for */
501 addr -= VKI_BYTES_PER_PAGE;
502 }
503 ret = addr;
504
505 /* Everything must be page-aligned */
506 vg_assert((addr & (VKI_BYTES_PER_PAGE-1)) == 0);
507 len = PGROUNDUP(len);
508
509 len += VKI_BYTES_PER_PAGE * 2; /* leave redzone gaps before and after mapping */
510
511 if (debug)
512 VG_(printf)("find_map_space: ret starts as %p-%p client=%d\n",
513 ret, ret+len, for_client);
514
515 for(s = VG_(SkipList_Find)(&sk_segments, &ret);
516 s != NULL && s->addr < (ret+len);
517 s = VG_(SkipNode_Next)(&sk_segments, s))
518 {
519 if (debug)
520 VG_(printf)("s->addr=%p len=%d (%p) ret=%p\n",
521 s->addr, s->len, s->addr+s->len, ret);
522
523 if (s->addr < (ret + len) && (s->addr + s->len) > ret)
524 ret = s->addr+s->len;
525 }
526
527 if (debug) {
528 if (s)
529 VG_(printf)(" s->addr=%p ->len=%d\n", s->addr, s->len);
530 else
531 VG_(printf)(" s == NULL\n");
532 }
533
534 if ((limit - len) < ret)
535 ret = 0; /* no space */
536 else
537 ret += VKI_BYTES_PER_PAGE; /* skip leading redzone */
538
539 if (debug)
540 VG_(printf)("find_map_space(%p, %d, %d) -> %p\n",
541 addr, len, for_client, ret);
542
543 return ret;
544}
545
546Segment *VG_(find_segment)(Addr a)
547{
548 return VG_(SkipList_Find)(&sk_segments, &a);
549}
550
551Segment *VG_(next_segment)(Segment *s)
552{
553 return VG_(SkipNode_Next)(&sk_segments, s);
554}
sewardjde4a1d02002-03-22 01:27:54 +0000555
njn25e49d8e72002-09-23 09:36:25 +0000556/*--------------------------------------------------------------*/
557/*--- Initialise program data/text etc on program startup. ---*/
558/*--------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +0000559
fitzhardinge98abfc72003-12-16 02:05:15 +0000560static
561void build_valgrind_map_callback ( Addr start, UInt size,
562 Char rr, Char ww, Char xx, UInt dev, UInt ino,
563 ULong foffset, const UChar* filename )
njn25e49d8e72002-09-23 09:36:25 +0000564{
fitzhardinge98abfc72003-12-16 02:05:15 +0000565 UInt prot = 0;
566 UInt flags;
567 Bool is_stack_segment;
568 Bool verbose = False || mem_debug; /* set to True for debugging */
sewardjde4a1d02002-03-22 01:27:54 +0000569
fitzhardinge98abfc72003-12-16 02:05:15 +0000570 is_stack_segment = (start == VG_(clstk_base) && (start+size) == VG_(clstk_end));
571
572 prot = 0;
573 flags = SF_MMAP|SF_NOSYMS;
574
575 if (start >= VG_(valgrind_base) && (start+size) <= VG_(valgrind_end))
576 flags |= SF_VALGRIND;
577
578 /* Only record valgrind mappings for now, without loading any
579 symbols. This is so we know where the free space is before we
580 start allocating more memory (note: heap is OK, it's just mmap
581 which is the problem here). */
582 if (flags & SF_VALGRIND) {
583 if (verbose)
584 VG_(printf)("adding segment %08p-%08p prot=%x flags=%4x filename=%s\n",
585 start, start+size, prot, flags, filename);
586
587 VG_(map_file_segment)(start, size, prot, flags, dev, ino, foffset, filename);
sewardjde4a1d02002-03-22 01:27:54 +0000588 }
njn25e49d8e72002-09-23 09:36:25 +0000589}
590
njn25e49d8e72002-09-23 09:36:25 +0000591static
fitzhardinge98abfc72003-12-16 02:05:15 +0000592void build_segment_map_callback ( Addr start, UInt size,
593 Char rr, Char ww, Char xx, UInt dev, UInt ino,
594 ULong foffset, const UChar* filename )
sewardjde4a1d02002-03-22 01:27:54 +0000595{
fitzhardinge98abfc72003-12-16 02:05:15 +0000596 UInt prot = 0;
597 UInt flags;
njn25e49d8e72002-09-23 09:36:25 +0000598 Bool is_stack_segment;
fitzhardinge98abfc72003-12-16 02:05:15 +0000599 Bool verbose = False || mem_debug; /* set to True for debugging */
600 Addr r_esp;
601
602 is_stack_segment = (start == VG_(clstk_base) && (start+size) == VG_(clstk_end));
603
604 if (rr == 'r')
605 prot |= VKI_PROT_READ;
606 if (ww == 'w')
607 prot |= VKI_PROT_WRITE;
608 if (xx == 'x')
609 prot |= VKI_PROT_EXEC;
610
611
612 if (is_stack_segment)
613 flags = SF_STACK | SF_GROWDOWN;
614 else
615 flags = SF_EXEC|SF_MMAP;
616
617 if (filename != NULL)
618 flags |= SF_FILE;
619
620 if (start >= VG_(valgrind_base) && (start+size) <= VG_(valgrind_end))
621 flags |= SF_VALGRIND;
sewardjde4a1d02002-03-22 01:27:54 +0000622
sewardjecf8e102003-07-12 12:11:39 +0000623 if (verbose)
fitzhardinge98abfc72003-12-16 02:05:15 +0000624 VG_(printf)("adding segment %08p-%08p prot=%x flags=%4x filename=%s\n",
625 start, start+size, prot, flags, filename);
njn25e49d8e72002-09-23 09:36:25 +0000626
fitzhardinge98abfc72003-12-16 02:05:15 +0000627 VG_(map_file_segment)(start, size, prot, flags, dev, ino, foffset, filename);
njn25e49d8e72002-09-23 09:36:25 +0000628
fitzhardinge98abfc72003-12-16 02:05:15 +0000629 if (VG_(is_client_addr)(start) && VG_(is_client_addr)(start+size-1))
630 VG_TRACK( new_mem_startup, start, size, rr=='r', ww=='w', xx=='x' );
njn25e49d8e72002-09-23 09:36:25 +0000631
632 /* If this is the stack segment mark all below %esp as noaccess. */
fitzhardinge98abfc72003-12-16 02:05:15 +0000633 r_esp = VG_(m_state_static)[40/4];
njn25e49d8e72002-09-23 09:36:25 +0000634 if (is_stack_segment) {
635 if (0)
636 VG_(message)(Vg_DebugMsg, "invalidating stack area: %x .. %x",
637 start,r_esp);
638 VG_TRACK( die_mem_stack, start, r_esp-start );
639 }
640}
641
642
njn3e884182003-04-15 13:03:23 +0000643/* 1. Records startup segments from /proc/pid/maps. Takes special note
644 of the executable ones, because if they're munmap()ed we need to
645 discard translations. Also checks there's no exe segment overlaps.
njn25e49d8e72002-09-23 09:36:25 +0000646
njn3e884182003-04-15 13:03:23 +0000647 Note that `read_from_file' is false; we read /proc/self/maps into a
648 buffer at the start of VG_(main) so that any superblocks mmap'd by
649 calls to VG_(malloc)() by SK_({pre,post}_clo_init) aren't erroneously
650 thought of as being owned by the client.
njn25e49d8e72002-09-23 09:36:25 +0000651 */
652void VG_(init_memory) ( void )
653{
njn3e884182003-04-15 13:03:23 +0000654 /* 1 */
fitzhardinge98abfc72003-12-16 02:05:15 +0000655 /* reserve Valgrind's kickstart, heap and stack */
656 VG_(map_segment)(VG_(valgrind_mmap_end), VG_(valgrind_end)-VG_(valgrind_mmap_end),
657 VKI_PROT_NONE, SF_VALGRIND|SF_FIXED);
njn25e49d8e72002-09-23 09:36:25 +0000658
fitzhardinge98abfc72003-12-16 02:05:15 +0000659 /* work out what's mapped where, and read interesting symtabs */
660 VG_(parse_procselfmaps) ( build_valgrind_map_callback ); /* just Valgrind mappings */
661 VG_(parse_procselfmaps) ( build_segment_map_callback ); /* everything */
sewardjd5815ec2003-04-06 12:23:27 +0000662
663 /* kludge: some newer kernels place a "sysinfo" page up high, with
664 vsyscalls in it, and possibly some other stuff in the future. */
665 if (VG_(sysinfo_page_exists)) {
njnfa1016e2003-09-25 17:54:11 +0000666 // 2003-Sep-25, njn: Jeremy thinks the sysinfo page probably doesn't
667 // have any symbols that need to be loaded. So just treat it like
668 // a non-executable page.
669 //VG_(new_exeseg_mmap)( VG_(sysinfo_page_addr), 4096 );
sewardjd5815ec2003-04-06 12:23:27 +0000670 VG_TRACK( new_mem_startup, VG_(sysinfo_page_addr), 4096,
671 True, True, True );
672 }
sewardjde4a1d02002-03-22 01:27:54 +0000673}
674
sewardjde4a1d02002-03-22 01:27:54 +0000675/*------------------------------------------------------------*/
676/*--- Tracking permissions around %esp changes. ---*/
677/*------------------------------------------------------------*/
678
679/*
680 The stack
681 ~~~~~~~~~
682 The stack's segment seems to be dynamically extended downwards
683 by the kernel as the stack pointer moves down. Initially, a
684 1-page (4k) stack is allocated. When %esp moves below that for
685 the first time, presumably a page fault occurs. The kernel
686 detects that the faulting address is in the range from %esp upwards
687 to the current valid stack. It then extends the stack segment
688 downwards for enough to cover the faulting address, and resumes
689 the process (invisibly). The process is unaware of any of this.
690
691 That means that Valgrind can't spot when the stack segment is
692 being extended. Fortunately, we want to precisely and continuously
693 update stack permissions around %esp, so we need to spot all
694 writes to %esp anyway.
695
696 The deal is: when %esp is assigned a lower value, the stack is
697 being extended. Create a secondary maps to fill in any holes
698 between the old stack ptr and this one, if necessary. Then
699 mark all bytes in the area just "uncovered" by this %esp change
700 as write-only.
701
702 When %esp goes back up, mark the area receded over as unreadable
703 and unwritable.
704
705 Just to record the %esp boundary conditions somewhere convenient:
706 %esp always points to the lowest live byte in the stack. All
707 addresses below %esp are not live; those at and above it are.
708*/
709
sewardjde4a1d02002-03-22 01:27:54 +0000710/* Kludgey ... how much does %esp have to change before we reckon that
711 the application is switching stacks ? */
njn9b007f62003-04-07 14:40:25 +0000712#define VG_PLAUSIBLE_STACK_SIZE 8000000
713#define VG_HUGE_DELTA (VG_PLAUSIBLE_STACK_SIZE / 4)
sewardjde4a1d02002-03-22 01:27:54 +0000714
njn9b007f62003-04-07 14:40:25 +0000715/* This function gets called if new_mem_stack and/or die_mem_stack are
716 tracked by the skin, and one of the specialised cases (eg. new_mem_stack_4)
717 isn't used in preference */
718__attribute__((regparm(1)))
719void VG_(unknown_esp_update)(Addr new_ESP)
sewardjde4a1d02002-03-22 01:27:54 +0000720{
njn9b007f62003-04-07 14:40:25 +0000721 Addr old_ESP = VG_(get_archreg)(R_ESP);
722 Int delta = (Int)new_ESP - (Int)old_ESP;
sewardjde4a1d02002-03-22 01:27:54 +0000723
njn9b007f62003-04-07 14:40:25 +0000724 if (delta < -(VG_HUGE_DELTA) || VG_HUGE_DELTA < delta) {
725 /* %esp has changed by more than HUGE_DELTA. We take this to mean
726 that the application is switching to a new stack, for whatever
727 reason.
728
729 JRS 20021001: following discussions with John Regehr, if a stack
730 switch happens, it seems best not to mess at all with memory
731 permissions. Seems to work well with Netscape 4.X. Really the
732 only remaining difficulty is knowing exactly when a stack switch is
733 happening. */
734 if (VG_(clo_verbosity) > 1)
735 VG_(message)(Vg_UserMsg, "Warning: client switching stacks? "
736 "%%esp: %p --> %p", old_ESP, new_ESP);
737 } else if (delta < 0) {
738 VG_TRACK( new_mem_stack, new_ESP, -delta );
sewardjde4a1d02002-03-22 01:27:54 +0000739
njn9b007f62003-04-07 14:40:25 +0000740 } else if (delta > 0) {
741 VG_TRACK( die_mem_stack, old_ESP, delta );
sewardjde4a1d02002-03-22 01:27:54 +0000742 }
743}
744
jsgf855d93d2003-10-13 22:26:55 +0000745static jmp_buf segv_jmpbuf;
746
747static void segv_handler(Int seg)
748{
749 __builtin_longjmp(segv_jmpbuf, 1);
750 VG_(core_panic)("longjmp failed");
751}
752
753/*
754 Test if a piece of memory is addressable by setting up a temporary
755 SIGSEGV handler, then try to touch the memory. No signal = good,
756 signal = bad.
757 */
758Bool VG_(is_addressable)(Addr p, Int size)
759{
760 volatile Char * volatile cp = (volatile Char *)p;
761 volatile Bool ret;
762 vki_ksigaction sa, origsa;
763 vki_ksigset_t mask;
764
765 vg_assert(size > 0);
766
767 sa.ksa_handler = segv_handler;
768 sa.ksa_flags = 0;
769 VG_(ksigfillset)(&sa.ksa_mask);
770 VG_(ksigaction)(VKI_SIGSEGV, &sa, &origsa);
771 VG_(ksigprocmask)(VKI_SIG_SETMASK, NULL, &mask);
772
773 if (__builtin_setjmp(&segv_jmpbuf) == 0) {
774 while(size--)
775 *cp++;
776 ret = True;
777 } else
778 ret = False;
779
780 VG_(ksigaction)(VKI_SIGSEGV, &origsa, NULL);
781 VG_(ksigprocmask)(VKI_SIG_SETMASK, &mask, NULL);
782
783 return ret;
784}
785
sewardjde4a1d02002-03-22 01:27:54 +0000786/*--------------------------------------------------------------------*/
fitzhardinge98abfc72003-12-16 02:05:15 +0000787/*--- manage allocation of memory on behalf of the client ---*/
788/*--------------------------------------------------------------------*/
789
790Addr VG_(client_alloc)(Addr addr, UInt len, UInt prot, UInt flags)
791{
792 len = PGROUNDUP(len);
793
794 if (!(flags & SF_FIXED))
795 addr = VG_(find_map_space)(addr, len, True);
796
797 flags |= SF_CORE;
798
799 if (VG_(mmap)((void *)addr, len, prot,
800 VKI_MAP_FIXED | VKI_MAP_PRIVATE | VKI_MAP_ANONYMOUS | VKI_MAP_CLIENT,
801 -1, 0) == (void *)addr) {
802 VG_(map_segment)(addr, len, prot, flags);
803 return addr;
804 }
805
806 return 0;
807}
808
809void VG_(client_free)(Addr addr)
810{
811 Segment *s = VG_(find_segment)(addr);
812
813 if (s == NULL || s->addr != addr || !(s->flags & SF_CORE)) {
814 VG_(message)(Vg_DebugMsg, "VG_(client_free)(%p) - no CORE memory found there", addr);
815 return;
816 }
817
818 VG_(munmap)((void *)s->addr, s->len);
819}
820
821Bool VG_(is_client_addr)(Addr a)
822{
823 return a >= VG_(client_base) && a < VG_(client_end);
824}
825
826Bool VG_(is_shadow_addr)(Addr a)
827{
828 return a >= VG_(shadow_base) && a < VG_(shadow_end);
829}
830
831Bool VG_(is_valgrind_addr)(Addr a)
832{
833 return a >= VG_(valgrind_base) && a < VG_(valgrind_end);
834}
835
836Addr VG_(get_client_base)(void)
837{
838 return VG_(client_base);
839}
840
841Addr VG_(get_client_end)(void)
842{
843 return VG_(client_end);
844}
845
846Addr VG_(get_client_size)(void)
847{
848 return VG_(client_end)-VG_(client_base);
849}
850
851Addr VG_(get_shadow_base)(void)
852{
853 return VG_(shadow_base);
854}
855
856Addr VG_(get_shadow_end)(void)
857{
858 return VG_(shadow_end);
859}
860
861Addr VG_(get_shadow_size)(void)
862{
863 return VG_(shadow_end)-VG_(shadow_base);
864}
865
866
867void VG_(init_shadow_range)(Addr p, UInt sz, Bool call_init)
868{
869 if (0)
870 VG_(printf)("init_shadow_range(%p, %d)\n", p, sz);
871
872 vg_assert(VG_(needs).shadow_memory);
873 vg_assert(VG_(defined_init_shadow_page)());
874
875 sz = PGROUNDUP(p+sz) - PGROUNDDN(p);
876 p = PGROUNDDN(p);
877
878 VG_(mprotect)((void *)p, sz, VKI_PROT_READ|VKI_PROT_WRITE);
879
880 if (call_init)
881 while(sz) {
882 /* ask the skin to initialize each page */
883 VG_TRACK( init_shadow_page, PGROUNDDN(p) );
884
885 p += VKI_BYTES_PER_PAGE;
886 sz -= VKI_BYTES_PER_PAGE;
887 }
888}
889
890void *VG_(shadow_alloc)(UInt size)
891{
892 static Addr shadow_alloc = 0;
893 void *ret;
894
895 vg_assert(VG_(needs).shadow_memory);
896 vg_assert(!VG_(defined_init_shadow_page)());
897
898 size = PGROUNDUP(size);
899
900 if (shadow_alloc == 0)
901 shadow_alloc = VG_(shadow_base);
902
903 if (shadow_alloc >= VG_(shadow_end))
904 return 0;
905
906 ret = (void *)shadow_alloc;
907 VG_(mprotect)(ret, size, VKI_PROT_READ|VKI_PROT_WRITE);
908
909 shadow_alloc += size;
910
911 return ret;
912}
913
914/*--------------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +0000915/*--- end vg_memory.c ---*/
916/*--------------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000917