blob: 3a50975d80bc629f8058fe2d6db525afbd234a48 [file] [log] [blame]
sewardjde4a1d02002-03-22 01:27:54 +00001
2/*--------------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00003/*--- Memory-related stuff: segment initialisation and tracking, ---*/
4/*--- stack operations ---*/
sewardjde4a1d02002-03-22 01:27:54 +00005/*--- vg_memory.c ---*/
6/*--------------------------------------------------------------------*/
7
8/*
njnc9539842002-10-02 13:26:35 +00009 This file is part of Valgrind, an extensible x86 protected-mode
10 emulator for monitoring program execution on x86-Unixes.
sewardjde4a1d02002-03-22 01:27:54 +000011
njn0e1b5142003-04-15 14:58:06 +000012 Copyright (C) 2000-2003 Julian Seward
sewardjde4a1d02002-03-22 01:27:54 +000013 jseward@acm.org
sewardjde4a1d02002-03-22 01:27:54 +000014
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
njn25e49d8e72002-09-23 09:36:25 +000030 The GNU General Public License is contained in the file COPYING.
sewardjde4a1d02002-03-22 01:27:54 +000031*/
32
33#include "vg_include.h"
34
fitzhardinge98abfc72003-12-16 02:05:15 +000035#include <stddef.h>
36
sewardja4495682002-10-21 07:29:59 +000037/* Define to debug the memory-leak-detector. */
38/* #define VG_DEBUG_LEAKCHECK */
39
fitzhardinge98abfc72003-12-16 02:05:15 +000040static const Bool mem_debug = False;
41
42static Int addrcmp(const void *ap, const void *bp)
43{
44 Addr a = *(Addr *)ap;
45 Addr b = *(Addr *)bp;
46 Int ret;
47
48 if (a == b)
49 ret = 0;
50 else
51 ret = (a < b) ? -1 : 1;
52
53 return ret;
54}
55
56static Char *straddr(void *p)
57{
58 static Char buf[16];
59
60 VG_(sprintf)(buf, "%p", *(Addr *)p);
61
62 return buf;
63}
64
65static SkipList sk_segments = SKIPLIST_INIT(Segment, addr, addrcmp, straddr, VG_AR_CORE);
66
67/*--------------------------------------------------------------*/
68/*--- Maintain an ordered list of all the client's mappings ---*/
69/*--------------------------------------------------------------*/
70
71Bool VG_(seg_contains)(const Segment *s, Addr p, UInt len)
72{
73 Addr se = s->addr+s->len;
74 Addr pe = p+len;
75
76 vg_assert(pe >= p);
77
78 return (p >= s->addr && pe <= se);
79}
80
81Bool VG_(seg_overlaps)(const Segment *s, Addr p, UInt len)
82{
83 Addr se = s->addr+s->len;
84 Addr pe = p+len;
85
86 vg_assert(pe >= p);
87
88 return (p < se && pe > s->addr);
89}
90
91/* Prepare a Segment structure for recycling by freeing everything
92 hanging off it. */
93static void recycleseg(Segment *s)
94{
95 if (s->flags & SF_CODE)
96 VG_(invalidate_translations)(s->addr, s->len, False);
97
98 if (s->filename != NULL)
99 VG_(arena_free)(VG_AR_CORE, (Char *)s->filename);
100
101 /* keep the SegInfo, if any - it probably still applies */
102}
103
104/* When freeing a Segment, also clean up every one else's ideas of
105 what was going on in that range of memory */
106static void freeseg(Segment *s)
107{
108 recycleseg(s);
109 if (s->symtab != NULL) {
110 VG_(symtab_decref)(s->symtab, s->addr, s->len);
111 s->symtab = NULL;
112 }
113
114 VG_(SkipNode_Free)(&sk_segments, s);
115}
116
fitzhardinge1a303042003-12-22 08:48:50 +0000117/* Split a segment at address a, returning the new segment */
118Segment *VG_(split_segment)(Addr a)
fitzhardinge98abfc72003-12-16 02:05:15 +0000119{
120 Segment *s = VG_(SkipList_Find)(&sk_segments, &a);
121 Segment *ns;
122 Int delta;
123
124 vg_assert((a & (VKI_BYTES_PER_PAGE-1)) == 0);
125
126 /* missed */
127 if (s == NULL)
128 return NULL;
129
130 /* a at or beyond endpoint */
131 if (s->addr == a || a >= (s->addr+s->len))
132 return NULL;
133
134 vg_assert(a > s->addr && a < (s->addr+s->len));
135
136 ns = VG_(SkipNode_Alloc)(&sk_segments);
137
138 *ns = *s;
139
140 delta = a - s->addr;
141 ns->addr += delta;
142 ns->offset += delta;
143 ns->len -= delta;
144
145 if (ns->symtab != NULL)
146 VG_(symtab_incref)(ns->symtab);
147
148 VG_(SkipList_Insert)(&sk_segments, ns);
149
150 return ns;
151}
152
153/* This unmaps all the segments in the range [addr, addr+len); any
154 partial mappings at the ends are truncated. */
155void VG_(unmap_range)(Addr addr, UInt len)
156{
157 Segment *s;
158 Segment *next;
159 static const Bool debug = False || mem_debug;
fitzhardinge1a303042003-12-22 08:48:50 +0000160 Addr end = addr+len;
fitzhardinge98abfc72003-12-16 02:05:15 +0000161
162 if (len == 0)
163 return;
164
165 if (debug)
166 VG_(printf)("unmap_range(%p, %d)\n", addr, len);
167
168 len = PGROUNDUP(addr+len)-PGROUNDDN(addr);
169 addr = PGROUNDDN(addr);
170
171 /* Everything must be page-aligned */
172 vg_assert((addr & (VKI_BYTES_PER_PAGE-1)) == 0);
173 vg_assert((len & (VKI_BYTES_PER_PAGE-1)) == 0);
174
175 for(s = VG_(SkipList_Find)(&sk_segments, &addr);
176 s != NULL && s->addr < (addr+len);
177 s = next) {
fitzhardinge1a303042003-12-22 08:48:50 +0000178 Addr seg_end = s->addr + s->len;
fitzhardinge98abfc72003-12-16 02:05:15 +0000179
180 /* fetch next now in case we end up deleting this segment */
181 next = VG_(SkipNode_Next)(&sk_segments, s);
182
183 if (debug)
fitzhardinge1a303042003-12-22 08:48:50 +0000184 VG_(printf)("unmap: addr=%p-%p s=%p ->addr=%p-%p len=%d\n",
185 addr, addr+len, s, s->addr, s->addr+s->len, s->len);
fitzhardinge98abfc72003-12-16 02:05:15 +0000186
fitzhardinge1a303042003-12-22 08:48:50 +0000187 if (!VG_(seg_overlaps)(s, addr, len)) {
188 if (debug)
189 VG_(printf)(" (no overlap)\n");
fitzhardinge98abfc72003-12-16 02:05:15 +0000190 continue;
fitzhardinge1a303042003-12-22 08:48:50 +0000191 }
fitzhardinge98abfc72003-12-16 02:05:15 +0000192
193 /* 4 cases: */
fitzhardinge1a303042003-12-22 08:48:50 +0000194 if (addr > s->addr &&
195 addr < seg_end &&
196 end >= seg_end) {
fitzhardinge98abfc72003-12-16 02:05:15 +0000197 /* this segment's tail is truncated by [addr, addr+len)
198 -> truncate tail
199 */
200 s->len = addr - s->addr;
201
202 if (debug)
203 VG_(printf)(" case 1: s->len=%d\n", s->len);
fitzhardinge1a303042003-12-22 08:48:50 +0000204 } else if (addr <= s->addr && end >= seg_end) {
fitzhardinge98abfc72003-12-16 02:05:15 +0000205 /* this segment is completely contained within [addr, addr+len)
206 -> delete segment
207 */
208 Segment *rs = VG_(SkipList_Remove)(&sk_segments, &s->addr);
209 vg_assert(rs == s);
210 freeseg(s);
211
212 if (debug)
213 VG_(printf)(" case 2: s==%p deleted\n", s);
fitzhardinge1a303042003-12-22 08:48:50 +0000214 } else if (addr <= s->addr && end > s->addr && end < seg_end) {
fitzhardinge98abfc72003-12-16 02:05:15 +0000215 /* this segment's head is truncated by [addr, addr+len)
216 -> truncate head
217 */
218 Int delta = (addr+len) - s->addr;
219
220 s->addr += delta;
221 s->offset += delta;
222 s->len -= delta;
223
224 if (debug)
225 VG_(printf)(" case 3: s->addr=%p s->len=%d delta=%d\n", s->addr, s->len, delta);
fitzhardinge1a303042003-12-22 08:48:50 +0000226 } else if (addr > s->addr && end < seg_end) {
fitzhardinge98abfc72003-12-16 02:05:15 +0000227 /* [addr, addr+len) is contained within a single segment
228 -> split segment into 3, delete middle portion
229 */
230 Segment *middle, *rs;
231
fitzhardinge1a303042003-12-22 08:48:50 +0000232 middle = VG_(split_segment)(addr);
233 VG_(split_segment)(addr+len);
fitzhardinge98abfc72003-12-16 02:05:15 +0000234
235 vg_assert(middle->addr == addr);
236 rs = VG_(SkipList_Remove)(&sk_segments, &addr);
237 vg_assert(rs == middle);
238
239 freeseg(rs);
240
241 if (debug)
242 VG_(printf)(" case 4: subrange %p-%p deleted\n",
243 addr, addr+len);
244 }
245 }
246}
247
248/* If possible, merge segment with its neighbours - some segments,
249 including s, may be destroyed in the process */
250static inline Bool neighbours(Segment *s1, Segment *s2)
251{
252 if (s1->addr+s1->len != s2->addr)
253 return False;
254
255 if (s1->flags != s2->flags)
256 return False;
257
258 if (s1->prot != s2->prot)
259 return False;
260
261 if (s1->symtab != s2->symtab)
262 return False;
263
264 if (s1->flags & SF_FILE){
265 if ((s1->offset + s1->len) != s2->offset)
266 return False;
267 if (s1->dev != s2->dev)
268 return False;
269 if (s1->ino != s2->ino)
270 return False;
271 }
272
273 return True;
274}
275
276/* Merge segments in the address range if they're adjacent and
277 compatible */
278static void merge_segments(Addr a, UInt len)
279{
280 Segment *s;
281 Segment *next;
282
283 vg_assert((a & (VKI_BYTES_PER_PAGE-1)) == 0);
284 vg_assert((len & (VKI_BYTES_PER_PAGE-1)) == 0);
285
286 a -= VKI_BYTES_PER_PAGE;
287 len += VKI_BYTES_PER_PAGE;
288
289 for(s = VG_(SkipList_Find)(&sk_segments, &a);
290 s != NULL && s->addr < (a+len);) {
291 next = VG_(SkipNode_Next)(&sk_segments, s);
292
293 if (next && neighbours(s, next)) {
294 Segment *rs;
295
296 if (0)
297 VG_(printf)("merge %p-%p with %p-%p\n",
298 s->addr, s->addr+s->len,
299 next->addr, next->addr+next->len);
300 s->len += next->len;
301 s = VG_(SkipNode_Next)(&sk_segments, next);
302
303 rs = VG_(SkipList_Remove)(&sk_segments, &next->addr);
304 vg_assert(next == rs);
305 freeseg(next);
306 } else
307 s = next;
308 }
309}
310
311void VG_(map_file_segment)(Addr addr, UInt len, UInt prot, UInt flags,
312 UInt dev, UInt ino, ULong off, const Char *filename)
313{
314 Segment *s;
315 static const Bool debug = False || mem_debug;
316 Bool recycled;
317
318 if (debug)
319 VG_(printf)("map_file_segment(%p, %d, %x, %x, %4x, %d, %ld, %s)\n",
320 addr, len, prot, flags, dev, ino, off, filename);
321
322 /* Everything must be page-aligned */
323 vg_assert((addr & (VKI_BYTES_PER_PAGE-1)) == 0);
324 len = PGROUNDUP(len);
325
326 /* First look to see what already exists around here */
327 s = VG_(SkipList_Find)(&sk_segments, &addr);
328
329 if (s != NULL && s->addr == addr && s->len == len) {
330 /* This probably means we're just updating the flags */
331 recycled = True;
332 recycleseg(s);
333
334 /* If we had a symtab, but the new mapping is incompatible, then
335 free up the old symtab in preparation for a new one. */
336 if (s->symtab != NULL &&
337 (!(s->flags & SF_FILE) ||
338 !(flags & SF_FILE) ||
339 s->dev != dev ||
340 s->ino != ino ||
341 s->offset != off)) {
342 VG_(symtab_decref)(s->symtab, s->addr, s->len);
343 s->symtab = NULL;
344 }
345 } else {
346 recycled = False;
347 VG_(unmap_range)(addr, len);
348
349 s = VG_(SkipNode_Alloc)(&sk_segments);
350
351 s->addr = addr;
352 s->len = len;
353 s->symtab = NULL;
354 }
355
356 s->flags = flags;
357 s->prot = prot;
358 s->dev = dev;
359 s->ino = ino;
360 s->offset = off;
361
362 if (filename != NULL)
363 s->filename = VG_(arena_strdup)(VG_AR_CORE, filename);
364 else
365 s->filename = NULL;
366
367 if (debug) {
368 Segment *ts;
369 for(ts = VG_(SkipNode_First)(&sk_segments);
370 ts != NULL;
371 ts = VG_(SkipNode_Next)(&sk_segments, ts))
372 VG_(printf)("list: %8p->%8p ->%d (0x%x) prot=%x flags=%x\n",
373 ts, ts->addr, ts->len, ts->len, ts->prot, ts->flags);
374
375 VG_(printf)("inserting s=%p addr=%p len=%d\n",
376 s, s->addr, s->len);
377 }
378
379 if (!recycled)
380 VG_(SkipList_Insert)(&sk_segments, s);
381
382 /* If this mapping is of the beginning of a file, isn't part of
383 Valgrind, is at least readable and seems to contain an object
384 file, then try reading symbols from it. */
385 if ((flags & (SF_MMAP|SF_NOSYMS)) == SF_MMAP &&
386 s->symtab == NULL) {
387 if (off == 0 &&
388 filename != NULL &&
389 (prot & (VKI_PROT_READ|VKI_PROT_EXEC)) == (VKI_PROT_READ|VKI_PROT_EXEC) &&
390 len >= VKI_BYTES_PER_PAGE &&
391 s->symtab == NULL &&
392 VG_(is_object_file)((void *)addr)) {
393
394 s->symtab = VG_(read_seg_symbols)(s);
395
396 if (s->symtab != NULL)
397 s->flags |= SF_DYNLIB;
398 } else if (flags & SF_MMAP) {
399 const SegInfo *info;
400
401 /* Otherwise see if an existing symtab applies to this Segment */
402 for(info = VG_(next_seginfo)(NULL);
403 info != NULL;
404 info = VG_(next_seginfo)(info)) {
405 if (VG_(seg_overlaps)(s, VG_(seg_start)(info), VG_(seg_size)(info))) {
406 s->symtab = (SegInfo *)info;
407 VG_(symtab_incref)((SegInfo *)info);
408 }
409 }
410 }
411 }
412
413 /* clean up */
414 merge_segments(addr, len);
415}
416
417void VG_(map_fd_segment)(Addr addr, UInt len, UInt prot, UInt flags,
418 Int fd, ULong off, const Char *filename)
419{
420 struct vki_stat st;
421 Char *name = NULL;
422
423 st.st_dev = 0;
424 st.st_ino = 0;
425
426 if (fd != -1 && (flags & SF_FILE)) {
427 vg_assert((off & (VKI_BYTES_PER_PAGE-1)) == 0);
428
429 if (VG_(fstat)(fd, &st) < 0)
430 flags &= ~SF_FILE;
431 }
432
433 if ((flags & SF_FILE) && filename == NULL && fd != -1)
434 name = VG_(resolve_filename)(fd);
435
436 if (filename == NULL)
437 filename = name;
438
439 VG_(map_file_segment)(addr, len, prot, flags, st.st_dev, st.st_ino, off, filename);
440
441 if (name)
442 VG_(arena_free)(VG_AR_CORE, name);
443}
444
445void VG_(map_segment)(Addr addr, UInt len, UInt prot, UInt flags)
446{
447 flags &= ~SF_FILE;
448
449 VG_(map_file_segment)(addr, len, prot, flags, 0, 0, 0, 0);
450}
451
452/* set new protection flags on an address range */
453void VG_(mprotect_range)(Addr a, UInt len, UInt prot)
454{
455 Segment *s, *next;
456 static const Bool debug = False || mem_debug;
457
458 if (debug)
459 VG_(printf)("mprotect_range(%p, %d, %x)\n", a, len, prot);
460
461 /* Everything must be page-aligned */
462 vg_assert((a & (VKI_BYTES_PER_PAGE-1)) == 0);
463 vg_assert((len & (VKI_BYTES_PER_PAGE-1)) == 0);
464
fitzhardinge1a303042003-12-22 08:48:50 +0000465 VG_(split_segment)(a);
466 VG_(split_segment)(a+len);
fitzhardinge98abfc72003-12-16 02:05:15 +0000467
468 for(s = VG_(SkipList_Find)(&sk_segments, &a);
469 s != NULL && s->addr < a+len;
470 s = next)
471 {
472 next = VG_(SkipNode_Next)(&sk_segments, s);
473 if (s->addr < a)
474 continue;
475
476 s->prot = prot;
477 }
478
479 merge_segments(a, len);
480}
481
482Addr VG_(find_map_space)(Addr addr, UInt len, Bool for_client)
483{
484 Segment *s;
485 Addr ret;
486 static const Bool debug = False || mem_debug;
487 Addr limit = (for_client ? VG_(client_end) : VG_(valgrind_mmap_end));
488
489 if (addr == 0)
490 addr = for_client ? VG_(client_mapbase) : VG_(valgrind_base);
491 else {
492 /* leave space for redzone and still try to get the exact
493 address asked for */
494 addr -= VKI_BYTES_PER_PAGE;
495 }
496 ret = addr;
497
498 /* Everything must be page-aligned */
499 vg_assert((addr & (VKI_BYTES_PER_PAGE-1)) == 0);
500 len = PGROUNDUP(len);
501
502 len += VKI_BYTES_PER_PAGE * 2; /* leave redzone gaps before and after mapping */
503
504 if (debug)
505 VG_(printf)("find_map_space: ret starts as %p-%p client=%d\n",
506 ret, ret+len, for_client);
507
508 for(s = VG_(SkipList_Find)(&sk_segments, &ret);
509 s != NULL && s->addr < (ret+len);
510 s = VG_(SkipNode_Next)(&sk_segments, s))
511 {
512 if (debug)
513 VG_(printf)("s->addr=%p len=%d (%p) ret=%p\n",
514 s->addr, s->len, s->addr+s->len, ret);
515
516 if (s->addr < (ret + len) && (s->addr + s->len) > ret)
517 ret = s->addr+s->len;
518 }
519
520 if (debug) {
521 if (s)
522 VG_(printf)(" s->addr=%p ->len=%d\n", s->addr, s->len);
523 else
524 VG_(printf)(" s == NULL\n");
525 }
526
527 if ((limit - len) < ret)
528 ret = 0; /* no space */
529 else
530 ret += VKI_BYTES_PER_PAGE; /* skip leading redzone */
531
532 if (debug)
533 VG_(printf)("find_map_space(%p, %d, %d) -> %p\n",
534 addr, len, for_client, ret);
535
536 return ret;
537}
538
539Segment *VG_(find_segment)(Addr a)
540{
541 return VG_(SkipList_Find)(&sk_segments, &a);
542}
543
544Segment *VG_(next_segment)(Segment *s)
545{
546 return VG_(SkipNode_Next)(&sk_segments, s);
547}
sewardjde4a1d02002-03-22 01:27:54 +0000548
njn25e49d8e72002-09-23 09:36:25 +0000549/*--------------------------------------------------------------*/
550/*--- Initialise program data/text etc on program startup. ---*/
551/*--------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +0000552
fitzhardinge98abfc72003-12-16 02:05:15 +0000553static
554void build_valgrind_map_callback ( Addr start, UInt size,
555 Char rr, Char ww, Char xx, UInt dev, UInt ino,
556 ULong foffset, const UChar* filename )
njn25e49d8e72002-09-23 09:36:25 +0000557{
fitzhardinge98abfc72003-12-16 02:05:15 +0000558 UInt prot = 0;
559 UInt flags;
560 Bool is_stack_segment;
561 Bool verbose = False || mem_debug; /* set to True for debugging */
sewardjde4a1d02002-03-22 01:27:54 +0000562
fitzhardinge98abfc72003-12-16 02:05:15 +0000563 is_stack_segment = (start == VG_(clstk_base) && (start+size) == VG_(clstk_end));
564
565 prot = 0;
566 flags = SF_MMAP|SF_NOSYMS;
567
568 if (start >= VG_(valgrind_base) && (start+size) <= VG_(valgrind_end))
569 flags |= SF_VALGRIND;
570
571 /* Only record valgrind mappings for now, without loading any
572 symbols. This is so we know where the free space is before we
573 start allocating more memory (note: heap is OK, it's just mmap
574 which is the problem here). */
575 if (flags & SF_VALGRIND) {
576 if (verbose)
577 VG_(printf)("adding segment %08p-%08p prot=%x flags=%4x filename=%s\n",
578 start, start+size, prot, flags, filename);
579
580 VG_(map_file_segment)(start, size, prot, flags, dev, ino, foffset, filename);
sewardjde4a1d02002-03-22 01:27:54 +0000581 }
njn25e49d8e72002-09-23 09:36:25 +0000582}
583
njn25e49d8e72002-09-23 09:36:25 +0000584static
fitzhardinge98abfc72003-12-16 02:05:15 +0000585void build_segment_map_callback ( Addr start, UInt size,
586 Char rr, Char ww, Char xx, UInt dev, UInt ino,
587 ULong foffset, const UChar* filename )
sewardjde4a1d02002-03-22 01:27:54 +0000588{
fitzhardinge98abfc72003-12-16 02:05:15 +0000589 UInt prot = 0;
590 UInt flags;
njn25e49d8e72002-09-23 09:36:25 +0000591 Bool is_stack_segment;
fitzhardinge98abfc72003-12-16 02:05:15 +0000592 Bool verbose = False || mem_debug; /* set to True for debugging */
593 Addr r_esp;
594
595 is_stack_segment = (start == VG_(clstk_base) && (start+size) == VG_(clstk_end));
596
597 if (rr == 'r')
598 prot |= VKI_PROT_READ;
599 if (ww == 'w')
600 prot |= VKI_PROT_WRITE;
601 if (xx == 'x')
602 prot |= VKI_PROT_EXEC;
603
604
605 if (is_stack_segment)
606 flags = SF_STACK | SF_GROWDOWN;
607 else
608 flags = SF_EXEC|SF_MMAP;
609
610 if (filename != NULL)
611 flags |= SF_FILE;
612
613 if (start >= VG_(valgrind_base) && (start+size) <= VG_(valgrind_end))
614 flags |= SF_VALGRIND;
sewardjde4a1d02002-03-22 01:27:54 +0000615
sewardjecf8e102003-07-12 12:11:39 +0000616 if (verbose)
fitzhardinge98abfc72003-12-16 02:05:15 +0000617 VG_(printf)("adding segment %08p-%08p prot=%x flags=%4x filename=%s\n",
618 start, start+size, prot, flags, filename);
njn25e49d8e72002-09-23 09:36:25 +0000619
fitzhardinge98abfc72003-12-16 02:05:15 +0000620 VG_(map_file_segment)(start, size, prot, flags, dev, ino, foffset, filename);
njn25e49d8e72002-09-23 09:36:25 +0000621
fitzhardinge98abfc72003-12-16 02:05:15 +0000622 if (VG_(is_client_addr)(start) && VG_(is_client_addr)(start+size-1))
623 VG_TRACK( new_mem_startup, start, size, rr=='r', ww=='w', xx=='x' );
njn25e49d8e72002-09-23 09:36:25 +0000624
625 /* If this is the stack segment mark all below %esp as noaccess. */
fitzhardinge98abfc72003-12-16 02:05:15 +0000626 r_esp = VG_(m_state_static)[40/4];
njn25e49d8e72002-09-23 09:36:25 +0000627 if (is_stack_segment) {
628 if (0)
629 VG_(message)(Vg_DebugMsg, "invalidating stack area: %x .. %x",
630 start,r_esp);
631 VG_TRACK( die_mem_stack, start, r_esp-start );
632 }
633}
634
635
njn3e884182003-04-15 13:03:23 +0000636/* 1. Records startup segments from /proc/pid/maps. Takes special note
637 of the executable ones, because if they're munmap()ed we need to
638 discard translations. Also checks there's no exe segment overlaps.
njn25e49d8e72002-09-23 09:36:25 +0000639
njn3e884182003-04-15 13:03:23 +0000640 Note that `read_from_file' is false; we read /proc/self/maps into a
641 buffer at the start of VG_(main) so that any superblocks mmap'd by
642 calls to VG_(malloc)() by SK_({pre,post}_clo_init) aren't erroneously
643 thought of as being owned by the client.
njn25e49d8e72002-09-23 09:36:25 +0000644 */
645void VG_(init_memory) ( void )
646{
njn3e884182003-04-15 13:03:23 +0000647 /* 1 */
fitzhardinge98abfc72003-12-16 02:05:15 +0000648 /* reserve Valgrind's kickstart, heap and stack */
649 VG_(map_segment)(VG_(valgrind_mmap_end), VG_(valgrind_end)-VG_(valgrind_mmap_end),
650 VKI_PROT_NONE, SF_VALGRIND|SF_FIXED);
njn25e49d8e72002-09-23 09:36:25 +0000651
fitzhardinge98abfc72003-12-16 02:05:15 +0000652 /* work out what's mapped where, and read interesting symtabs */
653 VG_(parse_procselfmaps) ( build_valgrind_map_callback ); /* just Valgrind mappings */
654 VG_(parse_procselfmaps) ( build_segment_map_callback ); /* everything */
sewardjd5815ec2003-04-06 12:23:27 +0000655
656 /* kludge: some newer kernels place a "sysinfo" page up high, with
657 vsyscalls in it, and possibly some other stuff in the future. */
658 if (VG_(sysinfo_page_exists)) {
njnfa1016e2003-09-25 17:54:11 +0000659 // 2003-Sep-25, njn: Jeremy thinks the sysinfo page probably doesn't
660 // have any symbols that need to be loaded. So just treat it like
661 // a non-executable page.
662 //VG_(new_exeseg_mmap)( VG_(sysinfo_page_addr), 4096 );
sewardjd5815ec2003-04-06 12:23:27 +0000663 VG_TRACK( new_mem_startup, VG_(sysinfo_page_addr), 4096,
664 True, True, True );
665 }
sewardjde4a1d02002-03-22 01:27:54 +0000666}
667
sewardjde4a1d02002-03-22 01:27:54 +0000668/*------------------------------------------------------------*/
669/*--- Tracking permissions around %esp changes. ---*/
670/*------------------------------------------------------------*/
671
672/*
673 The stack
674 ~~~~~~~~~
675 The stack's segment seems to be dynamically extended downwards
676 by the kernel as the stack pointer moves down. Initially, a
677 1-page (4k) stack is allocated. When %esp moves below that for
678 the first time, presumably a page fault occurs. The kernel
679 detects that the faulting address is in the range from %esp upwards
680 to the current valid stack. It then extends the stack segment
681 downwards for enough to cover the faulting address, and resumes
682 the process (invisibly). The process is unaware of any of this.
683
684 That means that Valgrind can't spot when the stack segment is
685 being extended. Fortunately, we want to precisely and continuously
686 update stack permissions around %esp, so we need to spot all
687 writes to %esp anyway.
688
689 The deal is: when %esp is assigned a lower value, the stack is
690 being extended. Create a secondary maps to fill in any holes
691 between the old stack ptr and this one, if necessary. Then
692 mark all bytes in the area just "uncovered" by this %esp change
693 as write-only.
694
695 When %esp goes back up, mark the area receded over as unreadable
696 and unwritable.
697
698 Just to record the %esp boundary conditions somewhere convenient:
699 %esp always points to the lowest live byte in the stack. All
700 addresses below %esp are not live; those at and above it are.
701*/
702
sewardjde4a1d02002-03-22 01:27:54 +0000703/* Kludgey ... how much does %esp have to change before we reckon that
704 the application is switching stacks ? */
njn9b007f62003-04-07 14:40:25 +0000705#define VG_PLAUSIBLE_STACK_SIZE 8000000
706#define VG_HUGE_DELTA (VG_PLAUSIBLE_STACK_SIZE / 4)
sewardjde4a1d02002-03-22 01:27:54 +0000707
njn9b007f62003-04-07 14:40:25 +0000708/* This function gets called if new_mem_stack and/or die_mem_stack are
709 tracked by the skin, and one of the specialised cases (eg. new_mem_stack_4)
710 isn't used in preference */
711__attribute__((regparm(1)))
712void VG_(unknown_esp_update)(Addr new_ESP)
sewardjde4a1d02002-03-22 01:27:54 +0000713{
njn9b007f62003-04-07 14:40:25 +0000714 Addr old_ESP = VG_(get_archreg)(R_ESP);
715 Int delta = (Int)new_ESP - (Int)old_ESP;
sewardjde4a1d02002-03-22 01:27:54 +0000716
njn9b007f62003-04-07 14:40:25 +0000717 if (delta < -(VG_HUGE_DELTA) || VG_HUGE_DELTA < delta) {
718 /* %esp has changed by more than HUGE_DELTA. We take this to mean
719 that the application is switching to a new stack, for whatever
720 reason.
721
722 JRS 20021001: following discussions with John Regehr, if a stack
723 switch happens, it seems best not to mess at all with memory
724 permissions. Seems to work well with Netscape 4.X. Really the
725 only remaining difficulty is knowing exactly when a stack switch is
726 happening. */
727 if (VG_(clo_verbosity) > 1)
728 VG_(message)(Vg_UserMsg, "Warning: client switching stacks? "
729 "%%esp: %p --> %p", old_ESP, new_ESP);
730 } else if (delta < 0) {
731 VG_TRACK( new_mem_stack, new_ESP, -delta );
sewardjde4a1d02002-03-22 01:27:54 +0000732
njn9b007f62003-04-07 14:40:25 +0000733 } else if (delta > 0) {
734 VG_TRACK( die_mem_stack, old_ESP, delta );
sewardjde4a1d02002-03-22 01:27:54 +0000735 }
736}
737
jsgf855d93d2003-10-13 22:26:55 +0000738static jmp_buf segv_jmpbuf;
739
740static void segv_handler(Int seg)
741{
742 __builtin_longjmp(segv_jmpbuf, 1);
743 VG_(core_panic)("longjmp failed");
744}
745
746/*
747 Test if a piece of memory is addressable by setting up a temporary
748 SIGSEGV handler, then try to touch the memory. No signal = good,
749 signal = bad.
750 */
751Bool VG_(is_addressable)(Addr p, Int size)
752{
753 volatile Char * volatile cp = (volatile Char *)p;
754 volatile Bool ret;
755 vki_ksigaction sa, origsa;
756 vki_ksigset_t mask;
757
758 vg_assert(size > 0);
759
760 sa.ksa_handler = segv_handler;
761 sa.ksa_flags = 0;
762 VG_(ksigfillset)(&sa.ksa_mask);
763 VG_(ksigaction)(VKI_SIGSEGV, &sa, &origsa);
764 VG_(ksigprocmask)(VKI_SIG_SETMASK, NULL, &mask);
765
766 if (__builtin_setjmp(&segv_jmpbuf) == 0) {
767 while(size--)
768 *cp++;
769 ret = True;
770 } else
771 ret = False;
772
773 VG_(ksigaction)(VKI_SIGSEGV, &origsa, NULL);
774 VG_(ksigprocmask)(VKI_SIG_SETMASK, &mask, NULL);
775
776 return ret;
777}
778
sewardjde4a1d02002-03-22 01:27:54 +0000779/*--------------------------------------------------------------------*/
fitzhardinge98abfc72003-12-16 02:05:15 +0000780/*--- manage allocation of memory on behalf of the client ---*/
781/*--------------------------------------------------------------------*/
782
783Addr VG_(client_alloc)(Addr addr, UInt len, UInt prot, UInt flags)
784{
785 len = PGROUNDUP(len);
786
787 if (!(flags & SF_FIXED))
788 addr = VG_(find_map_space)(addr, len, True);
789
790 flags |= SF_CORE;
791
792 if (VG_(mmap)((void *)addr, len, prot,
793 VKI_MAP_FIXED | VKI_MAP_PRIVATE | VKI_MAP_ANONYMOUS | VKI_MAP_CLIENT,
794 -1, 0) == (void *)addr) {
795 VG_(map_segment)(addr, len, prot, flags);
796 return addr;
797 }
798
799 return 0;
800}
801
802void VG_(client_free)(Addr addr)
803{
804 Segment *s = VG_(find_segment)(addr);
805
806 if (s == NULL || s->addr != addr || !(s->flags & SF_CORE)) {
807 VG_(message)(Vg_DebugMsg, "VG_(client_free)(%p) - no CORE memory found there", addr);
808 return;
809 }
810
811 VG_(munmap)((void *)s->addr, s->len);
812}
813
814Bool VG_(is_client_addr)(Addr a)
815{
816 return a >= VG_(client_base) && a < VG_(client_end);
817}
818
819Bool VG_(is_shadow_addr)(Addr a)
820{
821 return a >= VG_(shadow_base) && a < VG_(shadow_end);
822}
823
824Bool VG_(is_valgrind_addr)(Addr a)
825{
826 return a >= VG_(valgrind_base) && a < VG_(valgrind_end);
827}
828
829Addr VG_(get_client_base)(void)
830{
831 return VG_(client_base);
832}
833
834Addr VG_(get_client_end)(void)
835{
836 return VG_(client_end);
837}
838
839Addr VG_(get_client_size)(void)
840{
841 return VG_(client_end)-VG_(client_base);
842}
843
844Addr VG_(get_shadow_base)(void)
845{
846 return VG_(shadow_base);
847}
848
849Addr VG_(get_shadow_end)(void)
850{
851 return VG_(shadow_end);
852}
853
854Addr VG_(get_shadow_size)(void)
855{
856 return VG_(shadow_end)-VG_(shadow_base);
857}
858
859
860void VG_(init_shadow_range)(Addr p, UInt sz, Bool call_init)
861{
862 if (0)
863 VG_(printf)("init_shadow_range(%p, %d)\n", p, sz);
864
865 vg_assert(VG_(needs).shadow_memory);
866 vg_assert(VG_(defined_init_shadow_page)());
867
868 sz = PGROUNDUP(p+sz) - PGROUNDDN(p);
869 p = PGROUNDDN(p);
870
871 VG_(mprotect)((void *)p, sz, VKI_PROT_READ|VKI_PROT_WRITE);
872
873 if (call_init)
874 while(sz) {
875 /* ask the skin to initialize each page */
876 VG_TRACK( init_shadow_page, PGROUNDDN(p) );
877
878 p += VKI_BYTES_PER_PAGE;
879 sz -= VKI_BYTES_PER_PAGE;
880 }
881}
882
883void *VG_(shadow_alloc)(UInt size)
884{
885 static Addr shadow_alloc = 0;
886 void *ret;
887
888 vg_assert(VG_(needs).shadow_memory);
889 vg_assert(!VG_(defined_init_shadow_page)());
890
891 size = PGROUNDUP(size);
892
893 if (shadow_alloc == 0)
894 shadow_alloc = VG_(shadow_base);
895
896 if (shadow_alloc >= VG_(shadow_end))
897 return 0;
898
899 ret = (void *)shadow_alloc;
900 VG_(mprotect)(ret, size, VKI_PROT_READ|VKI_PROT_WRITE);
901
902 shadow_alloc += size;
903
904 return ret;
905}
906
907/*--------------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +0000908/*--- end vg_memory.c ---*/
909/*--------------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000910