blob: 34299d8dabe80c54cfc694ca8f206f01897a59b8 [file] [log] [blame]
sewardjde4a1d02002-03-22 01:27:54 +00001
2/*--------------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00003/*--- Memory-related stuff: segment initialisation and tracking, ---*/
4/*--- stack operations ---*/
sewardjde4a1d02002-03-22 01:27:54 +00005/*--- vg_memory.c ---*/
6/*--------------------------------------------------------------------*/
7
8/*
njnc9539842002-10-02 13:26:35 +00009 This file is part of Valgrind, an extensible x86 protected-mode
10 emulator for monitoring program execution on x86-Unixes.
sewardjde4a1d02002-03-22 01:27:54 +000011
nethercotebb1c9912004-01-04 16:43:23 +000012 Copyright (C) 2000-2004 Julian Seward
sewardjde4a1d02002-03-22 01:27:54 +000013 jseward@acm.org
sewardjde4a1d02002-03-22 01:27:54 +000014
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
njn25e49d8e72002-09-23 09:36:25 +000030 The GNU General Public License is contained in the file COPYING.
sewardjde4a1d02002-03-22 01:27:54 +000031*/
32
33#include "vg_include.h"
34
fitzhardinge98abfc72003-12-16 02:05:15 +000035#include <stddef.h>
36
sewardja4495682002-10-21 07:29:59 +000037/* Define to debug the memory-leak-detector. */
38/* #define VG_DEBUG_LEAKCHECK */
39
fitzhardinge98abfc72003-12-16 02:05:15 +000040static const Bool mem_debug = False;
41
42static Int addrcmp(const void *ap, const void *bp)
43{
44 Addr a = *(Addr *)ap;
45 Addr b = *(Addr *)bp;
46 Int ret;
47
48 if (a == b)
49 ret = 0;
50 else
51 ret = (a < b) ? -1 : 1;
52
53 return ret;
54}
55
56static Char *straddr(void *p)
57{
58 static Char buf[16];
59
60 VG_(sprintf)(buf, "%p", *(Addr *)p);
61
62 return buf;
63}
64
65static SkipList sk_segments = SKIPLIST_INIT(Segment, addr, addrcmp, straddr, VG_AR_CORE);
66
67/*--------------------------------------------------------------*/
68/*--- Maintain an ordered list of all the client's mappings ---*/
69/*--------------------------------------------------------------*/
70
71Bool VG_(seg_contains)(const Segment *s, Addr p, UInt len)
72{
73 Addr se = s->addr+s->len;
74 Addr pe = p+len;
75
76 vg_assert(pe >= p);
77
78 return (p >= s->addr && pe <= se);
79}
80
81Bool VG_(seg_overlaps)(const Segment *s, Addr p, UInt len)
82{
83 Addr se = s->addr+s->len;
84 Addr pe = p+len;
85
86 vg_assert(pe >= p);
87
88 return (p < se && pe > s->addr);
89}
90
91/* Prepare a Segment structure for recycling by freeing everything
92 hanging off it. */
93static void recycleseg(Segment *s)
94{
95 if (s->flags & SF_CODE)
96 VG_(invalidate_translations)(s->addr, s->len, False);
97
98 if (s->filename != NULL)
99 VG_(arena_free)(VG_AR_CORE, (Char *)s->filename);
100
101 /* keep the SegInfo, if any - it probably still applies */
102}
103
104/* When freeing a Segment, also clean up every one else's ideas of
105 what was going on in that range of memory */
106static void freeseg(Segment *s)
107{
108 recycleseg(s);
109 if (s->symtab != NULL) {
110 VG_(symtab_decref)(s->symtab, s->addr, s->len);
111 s->symtab = NULL;
112 }
113
114 VG_(SkipNode_Free)(&sk_segments, s);
115}
116
fitzhardinge1a303042003-12-22 08:48:50 +0000117/* Split a segment at address a, returning the new segment */
118Segment *VG_(split_segment)(Addr a)
fitzhardinge98abfc72003-12-16 02:05:15 +0000119{
120 Segment *s = VG_(SkipList_Find)(&sk_segments, &a);
121 Segment *ns;
122 Int delta;
123
124 vg_assert((a & (VKI_BYTES_PER_PAGE-1)) == 0);
125
126 /* missed */
127 if (s == NULL)
128 return NULL;
129
130 /* a at or beyond endpoint */
131 if (s->addr == a || a >= (s->addr+s->len))
132 return NULL;
133
134 vg_assert(a > s->addr && a < (s->addr+s->len));
135
136 ns = VG_(SkipNode_Alloc)(&sk_segments);
137
138 *ns = *s;
139
140 delta = a - s->addr;
141 ns->addr += delta;
142 ns->offset += delta;
143 ns->len -= delta;
fitzhardingee3632c62003-12-22 10:58:06 +0000144 s->len = delta;
fitzhardinge98abfc72003-12-16 02:05:15 +0000145
fitzhardinge1a4adf02003-12-22 10:42:59 +0000146 if (s->filename != NULL)
147 ns->filename = VG_(arena_strdup)(VG_AR_CORE, s->filename);
148
fitzhardinge98abfc72003-12-16 02:05:15 +0000149 if (ns->symtab != NULL)
150 VG_(symtab_incref)(ns->symtab);
151
152 VG_(SkipList_Insert)(&sk_segments, ns);
153
154 return ns;
155}
156
157/* This unmaps all the segments in the range [addr, addr+len); any
158 partial mappings at the ends are truncated. */
159void VG_(unmap_range)(Addr addr, UInt len)
160{
161 Segment *s;
162 Segment *next;
fitzhardingee3632c62003-12-22 10:58:06 +0000163 static const Bool debug = False || mem_debug;
fitzhardinge1a4adf02003-12-22 10:42:59 +0000164 Addr end;
fitzhardinge98abfc72003-12-16 02:05:15 +0000165
166 if (len == 0)
167 return;
168
fitzhardinge1a4adf02003-12-22 10:42:59 +0000169 len = PGROUNDUP(len);
170 vg_assert(addr == PGROUNDDN(addr));
171
fitzhardinge98abfc72003-12-16 02:05:15 +0000172 if (debug)
173 VG_(printf)("unmap_range(%p, %d)\n", addr, len);
174
fitzhardinge1a4adf02003-12-22 10:42:59 +0000175 end = addr+len;
fitzhardinge98abfc72003-12-16 02:05:15 +0000176
177 /* Everything must be page-aligned */
178 vg_assert((addr & (VKI_BYTES_PER_PAGE-1)) == 0);
179 vg_assert((len & (VKI_BYTES_PER_PAGE-1)) == 0);
180
181 for(s = VG_(SkipList_Find)(&sk_segments, &addr);
182 s != NULL && s->addr < (addr+len);
183 s = next) {
fitzhardinge1a303042003-12-22 08:48:50 +0000184 Addr seg_end = s->addr + s->len;
fitzhardinge98abfc72003-12-16 02:05:15 +0000185
186 /* fetch next now in case we end up deleting this segment */
187 next = VG_(SkipNode_Next)(&sk_segments, s);
188
189 if (debug)
fitzhardinge1a303042003-12-22 08:48:50 +0000190 VG_(printf)("unmap: addr=%p-%p s=%p ->addr=%p-%p len=%d\n",
fitzhardinge1a4adf02003-12-22 10:42:59 +0000191 addr, end, s, s->addr, seg_end, s->len);
fitzhardinge98abfc72003-12-16 02:05:15 +0000192
fitzhardinge1a303042003-12-22 08:48:50 +0000193 if (!VG_(seg_overlaps)(s, addr, len)) {
194 if (debug)
195 VG_(printf)(" (no overlap)\n");
fitzhardinge98abfc72003-12-16 02:05:15 +0000196 continue;
fitzhardinge1a303042003-12-22 08:48:50 +0000197 }
fitzhardinge98abfc72003-12-16 02:05:15 +0000198
199 /* 4 cases: */
fitzhardinge1a303042003-12-22 08:48:50 +0000200 if (addr > s->addr &&
201 addr < seg_end &&
202 end >= seg_end) {
fitzhardinge98abfc72003-12-16 02:05:15 +0000203 /* this segment's tail is truncated by [addr, addr+len)
204 -> truncate tail
205 */
206 s->len = addr - s->addr;
207
208 if (debug)
209 VG_(printf)(" case 1: s->len=%d\n", s->len);
fitzhardingee3632c62003-12-22 10:58:06 +0000210 } else if (addr <= s->addr && end > s->addr && end < seg_end) {
211 /* this segment's head is truncated by [addr, addr+len)
212 -> truncate head
213 */
214 Int delta = end - s->addr;
215
216 if (debug)
217 VG_(printf)(" case 2: s->addr=%p s->len=%d delta=%d\n", s->addr, s->len, delta);
218
219 s->addr += delta;
220 s->offset += delta;
221 s->len -= delta;
222
223 vg_assert(s->len != 0);
fitzhardinge1a303042003-12-22 08:48:50 +0000224 } else if (addr <= s->addr && end >= seg_end) {
fitzhardinge98abfc72003-12-16 02:05:15 +0000225 /* this segment is completely contained within [addr, addr+len)
226 -> delete segment
227 */
228 Segment *rs = VG_(SkipList_Remove)(&sk_segments, &s->addr);
229 vg_assert(rs == s);
230 freeseg(s);
231
232 if (debug)
fitzhardingee3632c62003-12-22 10:58:06 +0000233 VG_(printf)(" case 3: s==%p deleted\n", s);
fitzhardinge1a303042003-12-22 08:48:50 +0000234 } else if (addr > s->addr && end < seg_end) {
fitzhardinge98abfc72003-12-16 02:05:15 +0000235 /* [addr, addr+len) is contained within a single segment
236 -> split segment into 3, delete middle portion
237 */
238 Segment *middle, *rs;
239
fitzhardinge1a303042003-12-22 08:48:50 +0000240 middle = VG_(split_segment)(addr);
241 VG_(split_segment)(addr+len);
fitzhardinge98abfc72003-12-16 02:05:15 +0000242
243 vg_assert(middle->addr == addr);
244 rs = VG_(SkipList_Remove)(&sk_segments, &addr);
245 vg_assert(rs == middle);
246
247 freeseg(rs);
248
249 if (debug)
250 VG_(printf)(" case 4: subrange %p-%p deleted\n",
251 addr, addr+len);
252 }
253 }
254}
255
fitzhardinge1a4adf02003-12-22 10:42:59 +0000256/* Return true if two segments are adjacent and mergable (s1 is
257 assumed to have a lower ->addr than s2) */
fitzhardinge98abfc72003-12-16 02:05:15 +0000258static inline Bool neighbours(Segment *s1, Segment *s2)
259{
260 if (s1->addr+s1->len != s2->addr)
261 return False;
262
263 if (s1->flags != s2->flags)
264 return False;
265
266 if (s1->prot != s2->prot)
267 return False;
268
269 if (s1->symtab != s2->symtab)
270 return False;
271
272 if (s1->flags & SF_FILE){
273 if ((s1->offset + s1->len) != s2->offset)
274 return False;
275 if (s1->dev != s2->dev)
276 return False;
277 if (s1->ino != s2->ino)
278 return False;
279 }
280
281 return True;
282}
283
fitzhardinge1a4adf02003-12-22 10:42:59 +0000284/* If possible, merge segment with its neighbours - some segments,
285 including s, may be destroyed in the process */
fitzhardinge98abfc72003-12-16 02:05:15 +0000286static void merge_segments(Addr a, UInt len)
287{
288 Segment *s;
289 Segment *next;
290
291 vg_assert((a & (VKI_BYTES_PER_PAGE-1)) == 0);
292 vg_assert((len & (VKI_BYTES_PER_PAGE-1)) == 0);
293
294 a -= VKI_BYTES_PER_PAGE;
295 len += VKI_BYTES_PER_PAGE;
296
297 for(s = VG_(SkipList_Find)(&sk_segments, &a);
298 s != NULL && s->addr < (a+len);) {
299 next = VG_(SkipNode_Next)(&sk_segments, s);
300
301 if (next && neighbours(s, next)) {
302 Segment *rs;
303
304 if (0)
305 VG_(printf)("merge %p-%p with %p-%p\n",
306 s->addr, s->addr+s->len,
307 next->addr, next->addr+next->len);
308 s->len += next->len;
309 s = VG_(SkipNode_Next)(&sk_segments, next);
310
311 rs = VG_(SkipList_Remove)(&sk_segments, &next->addr);
312 vg_assert(next == rs);
313 freeseg(next);
314 } else
315 s = next;
316 }
317}
318
319void VG_(map_file_segment)(Addr addr, UInt len, UInt prot, UInt flags,
320 UInt dev, UInt ino, ULong off, const Char *filename)
321{
322 Segment *s;
323 static const Bool debug = False || mem_debug;
324 Bool recycled;
325
326 if (debug)
327 VG_(printf)("map_file_segment(%p, %d, %x, %x, %4x, %d, %ld, %s)\n",
328 addr, len, prot, flags, dev, ino, off, filename);
329
330 /* Everything must be page-aligned */
331 vg_assert((addr & (VKI_BYTES_PER_PAGE-1)) == 0);
332 len = PGROUNDUP(len);
333
334 /* First look to see what already exists around here */
335 s = VG_(SkipList_Find)(&sk_segments, &addr);
336
337 if (s != NULL && s->addr == addr && s->len == len) {
338 /* This probably means we're just updating the flags */
339 recycled = True;
340 recycleseg(s);
341
342 /* If we had a symtab, but the new mapping is incompatible, then
343 free up the old symtab in preparation for a new one. */
344 if (s->symtab != NULL &&
345 (!(s->flags & SF_FILE) ||
346 !(flags & SF_FILE) ||
347 s->dev != dev ||
348 s->ino != ino ||
349 s->offset != off)) {
350 VG_(symtab_decref)(s->symtab, s->addr, s->len);
351 s->symtab = NULL;
352 }
353 } else {
354 recycled = False;
355 VG_(unmap_range)(addr, len);
356
357 s = VG_(SkipNode_Alloc)(&sk_segments);
358
359 s->addr = addr;
360 s->len = len;
361 s->symtab = NULL;
362 }
363
364 s->flags = flags;
365 s->prot = prot;
366 s->dev = dev;
367 s->ino = ino;
368 s->offset = off;
369
370 if (filename != NULL)
371 s->filename = VG_(arena_strdup)(VG_AR_CORE, filename);
372 else
373 s->filename = NULL;
374
375 if (debug) {
376 Segment *ts;
377 for(ts = VG_(SkipNode_First)(&sk_segments);
378 ts != NULL;
379 ts = VG_(SkipNode_Next)(&sk_segments, ts))
380 VG_(printf)("list: %8p->%8p ->%d (0x%x) prot=%x flags=%x\n",
381 ts, ts->addr, ts->len, ts->len, ts->prot, ts->flags);
382
383 VG_(printf)("inserting s=%p addr=%p len=%d\n",
384 s, s->addr, s->len);
385 }
386
387 if (!recycled)
388 VG_(SkipList_Insert)(&sk_segments, s);
389
390 /* If this mapping is of the beginning of a file, isn't part of
391 Valgrind, is at least readable and seems to contain an object
392 file, then try reading symbols from it. */
393 if ((flags & (SF_MMAP|SF_NOSYMS)) == SF_MMAP &&
394 s->symtab == NULL) {
395 if (off == 0 &&
396 filename != NULL &&
397 (prot & (VKI_PROT_READ|VKI_PROT_EXEC)) == (VKI_PROT_READ|VKI_PROT_EXEC) &&
398 len >= VKI_BYTES_PER_PAGE &&
399 s->symtab == NULL &&
400 VG_(is_object_file)((void *)addr)) {
401
402 s->symtab = VG_(read_seg_symbols)(s);
403
404 if (s->symtab != NULL)
405 s->flags |= SF_DYNLIB;
406 } else if (flags & SF_MMAP) {
407 const SegInfo *info;
408
409 /* Otherwise see if an existing symtab applies to this Segment */
410 for(info = VG_(next_seginfo)(NULL);
411 info != NULL;
412 info = VG_(next_seginfo)(info)) {
413 if (VG_(seg_overlaps)(s, VG_(seg_start)(info), VG_(seg_size)(info))) {
414 s->symtab = (SegInfo *)info;
415 VG_(symtab_incref)((SegInfo *)info);
416 }
417 }
418 }
419 }
420
421 /* clean up */
422 merge_segments(addr, len);
423}
424
425void VG_(map_fd_segment)(Addr addr, UInt len, UInt prot, UInt flags,
426 Int fd, ULong off, const Char *filename)
427{
428 struct vki_stat st;
429 Char *name = NULL;
430
431 st.st_dev = 0;
432 st.st_ino = 0;
433
434 if (fd != -1 && (flags & SF_FILE)) {
435 vg_assert((off & (VKI_BYTES_PER_PAGE-1)) == 0);
436
437 if (VG_(fstat)(fd, &st) < 0)
438 flags &= ~SF_FILE;
439 }
440
441 if ((flags & SF_FILE) && filename == NULL && fd != -1)
442 name = VG_(resolve_filename)(fd);
443
444 if (filename == NULL)
445 filename = name;
446
447 VG_(map_file_segment)(addr, len, prot, flags, st.st_dev, st.st_ino, off, filename);
448
449 if (name)
450 VG_(arena_free)(VG_AR_CORE, name);
451}
452
453void VG_(map_segment)(Addr addr, UInt len, UInt prot, UInt flags)
454{
455 flags &= ~SF_FILE;
456
457 VG_(map_file_segment)(addr, len, prot, flags, 0, 0, 0, 0);
458}
459
460/* set new protection flags on an address range */
461void VG_(mprotect_range)(Addr a, UInt len, UInt prot)
462{
463 Segment *s, *next;
464 static const Bool debug = False || mem_debug;
465
466 if (debug)
467 VG_(printf)("mprotect_range(%p, %d, %x)\n", a, len, prot);
468
469 /* Everything must be page-aligned */
470 vg_assert((a & (VKI_BYTES_PER_PAGE-1)) == 0);
fitzhardinge92360792003-12-24 10:11:11 +0000471 len = PGROUNDUP(len);
fitzhardinge98abfc72003-12-16 02:05:15 +0000472
fitzhardinge1a303042003-12-22 08:48:50 +0000473 VG_(split_segment)(a);
474 VG_(split_segment)(a+len);
fitzhardinge98abfc72003-12-16 02:05:15 +0000475
476 for(s = VG_(SkipList_Find)(&sk_segments, &a);
477 s != NULL && s->addr < a+len;
478 s = next)
479 {
480 next = VG_(SkipNode_Next)(&sk_segments, s);
481 if (s->addr < a)
482 continue;
483
484 s->prot = prot;
485 }
486
487 merge_segments(a, len);
488}
489
490Addr VG_(find_map_space)(Addr addr, UInt len, Bool for_client)
491{
fitzhardingee3632c62003-12-22 10:58:06 +0000492 static const Bool debug = False || mem_debug;
fitzhardinge98abfc72003-12-16 02:05:15 +0000493 Segment *s;
494 Addr ret;
fitzhardinge98abfc72003-12-16 02:05:15 +0000495 Addr limit = (for_client ? VG_(client_end) : VG_(valgrind_mmap_end));
496
497 if (addr == 0)
498 addr = for_client ? VG_(client_mapbase) : VG_(valgrind_base);
499 else {
500 /* leave space for redzone and still try to get the exact
501 address asked for */
502 addr -= VKI_BYTES_PER_PAGE;
503 }
504 ret = addr;
505
506 /* Everything must be page-aligned */
507 vg_assert((addr & (VKI_BYTES_PER_PAGE-1)) == 0);
508 len = PGROUNDUP(len);
509
510 len += VKI_BYTES_PER_PAGE * 2; /* leave redzone gaps before and after mapping */
511
512 if (debug)
513 VG_(printf)("find_map_space: ret starts as %p-%p client=%d\n",
514 ret, ret+len, for_client);
515
516 for(s = VG_(SkipList_Find)(&sk_segments, &ret);
517 s != NULL && s->addr < (ret+len);
518 s = VG_(SkipNode_Next)(&sk_segments, s))
519 {
520 if (debug)
521 VG_(printf)("s->addr=%p len=%d (%p) ret=%p\n",
522 s->addr, s->len, s->addr+s->len, ret);
523
524 if (s->addr < (ret + len) && (s->addr + s->len) > ret)
525 ret = s->addr+s->len;
526 }
527
528 if (debug) {
529 if (s)
530 VG_(printf)(" s->addr=%p ->len=%d\n", s->addr, s->len);
531 else
532 VG_(printf)(" s == NULL\n");
533 }
534
535 if ((limit - len) < ret)
536 ret = 0; /* no space */
537 else
538 ret += VKI_BYTES_PER_PAGE; /* skip leading redzone */
539
540 if (debug)
541 VG_(printf)("find_map_space(%p, %d, %d) -> %p\n",
542 addr, len, for_client, ret);
543
544 return ret;
545}
546
547Segment *VG_(find_segment)(Addr a)
548{
549 return VG_(SkipList_Find)(&sk_segments, &a);
550}
551
552Segment *VG_(next_segment)(Segment *s)
553{
554 return VG_(SkipNode_Next)(&sk_segments, s);
555}
sewardjde4a1d02002-03-22 01:27:54 +0000556
njn25e49d8e72002-09-23 09:36:25 +0000557/*--------------------------------------------------------------*/
558/*--- Initialise program data/text etc on program startup. ---*/
559/*--------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +0000560
fitzhardinge98abfc72003-12-16 02:05:15 +0000561static
562void build_valgrind_map_callback ( Addr start, UInt size,
563 Char rr, Char ww, Char xx, UInt dev, UInt ino,
564 ULong foffset, const UChar* filename )
njn25e49d8e72002-09-23 09:36:25 +0000565{
fitzhardinge98abfc72003-12-16 02:05:15 +0000566 UInt prot = 0;
567 UInt flags;
568 Bool is_stack_segment;
569 Bool verbose = False || mem_debug; /* set to True for debugging */
sewardjde4a1d02002-03-22 01:27:54 +0000570
fitzhardinge98abfc72003-12-16 02:05:15 +0000571 is_stack_segment = (start == VG_(clstk_base) && (start+size) == VG_(clstk_end));
572
573 prot = 0;
574 flags = SF_MMAP|SF_NOSYMS;
575
576 if (start >= VG_(valgrind_base) && (start+size) <= VG_(valgrind_end))
577 flags |= SF_VALGRIND;
578
579 /* Only record valgrind mappings for now, without loading any
580 symbols. This is so we know where the free space is before we
581 start allocating more memory (note: heap is OK, it's just mmap
582 which is the problem here). */
583 if (flags & SF_VALGRIND) {
584 if (verbose)
585 VG_(printf)("adding segment %08p-%08p prot=%x flags=%4x filename=%s\n",
586 start, start+size, prot, flags, filename);
587
588 VG_(map_file_segment)(start, size, prot, flags, dev, ino, foffset, filename);
sewardjde4a1d02002-03-22 01:27:54 +0000589 }
njn25e49d8e72002-09-23 09:36:25 +0000590}
591
njn25e49d8e72002-09-23 09:36:25 +0000592static
fitzhardinge98abfc72003-12-16 02:05:15 +0000593void build_segment_map_callback ( Addr start, UInt size,
594 Char rr, Char ww, Char xx, UInt dev, UInt ino,
595 ULong foffset, const UChar* filename )
sewardjde4a1d02002-03-22 01:27:54 +0000596{
fitzhardinge98abfc72003-12-16 02:05:15 +0000597 UInt prot = 0;
598 UInt flags;
njn25e49d8e72002-09-23 09:36:25 +0000599 Bool is_stack_segment;
fitzhardinge98abfc72003-12-16 02:05:15 +0000600 Bool verbose = False || mem_debug; /* set to True for debugging */
601 Addr r_esp;
602
603 is_stack_segment = (start == VG_(clstk_base) && (start+size) == VG_(clstk_end));
604
605 if (rr == 'r')
606 prot |= VKI_PROT_READ;
607 if (ww == 'w')
608 prot |= VKI_PROT_WRITE;
609 if (xx == 'x')
610 prot |= VKI_PROT_EXEC;
611
612
613 if (is_stack_segment)
614 flags = SF_STACK | SF_GROWDOWN;
615 else
616 flags = SF_EXEC|SF_MMAP;
617
618 if (filename != NULL)
619 flags |= SF_FILE;
620
621 if (start >= VG_(valgrind_base) && (start+size) <= VG_(valgrind_end))
622 flags |= SF_VALGRIND;
sewardjde4a1d02002-03-22 01:27:54 +0000623
sewardjecf8e102003-07-12 12:11:39 +0000624 if (verbose)
fitzhardinge98abfc72003-12-16 02:05:15 +0000625 VG_(printf)("adding segment %08p-%08p prot=%x flags=%4x filename=%s\n",
626 start, start+size, prot, flags, filename);
njn25e49d8e72002-09-23 09:36:25 +0000627
fitzhardinge98abfc72003-12-16 02:05:15 +0000628 VG_(map_file_segment)(start, size, prot, flags, dev, ino, foffset, filename);
njn25e49d8e72002-09-23 09:36:25 +0000629
fitzhardinge98abfc72003-12-16 02:05:15 +0000630 if (VG_(is_client_addr)(start) && VG_(is_client_addr)(start+size-1))
631 VG_TRACK( new_mem_startup, start, size, rr=='r', ww=='w', xx=='x' );
njn25e49d8e72002-09-23 09:36:25 +0000632
633 /* If this is the stack segment mark all below %esp as noaccess. */
fitzhardinge98abfc72003-12-16 02:05:15 +0000634 r_esp = VG_(m_state_static)[40/4];
njn25e49d8e72002-09-23 09:36:25 +0000635 if (is_stack_segment) {
636 if (0)
637 VG_(message)(Vg_DebugMsg, "invalidating stack area: %x .. %x",
638 start,r_esp);
639 VG_TRACK( die_mem_stack, start, r_esp-start );
640 }
641}
642
643
njn3e884182003-04-15 13:03:23 +0000644/* 1. Records startup segments from /proc/pid/maps. Takes special note
645 of the executable ones, because if they're munmap()ed we need to
646 discard translations. Also checks there's no exe segment overlaps.
njn25e49d8e72002-09-23 09:36:25 +0000647
njn3e884182003-04-15 13:03:23 +0000648 Note that `read_from_file' is false; we read /proc/self/maps into a
649 buffer at the start of VG_(main) so that any superblocks mmap'd by
650 calls to VG_(malloc)() by SK_({pre,post}_clo_init) aren't erroneously
651 thought of as being owned by the client.
njn25e49d8e72002-09-23 09:36:25 +0000652 */
653void VG_(init_memory) ( void )
654{
njn3e884182003-04-15 13:03:23 +0000655 /* 1 */
fitzhardinge98abfc72003-12-16 02:05:15 +0000656 /* reserve Valgrind's kickstart, heap and stack */
657 VG_(map_segment)(VG_(valgrind_mmap_end), VG_(valgrind_end)-VG_(valgrind_mmap_end),
658 VKI_PROT_NONE, SF_VALGRIND|SF_FIXED);
njn25e49d8e72002-09-23 09:36:25 +0000659
fitzhardinge98abfc72003-12-16 02:05:15 +0000660 /* work out what's mapped where, and read interesting symtabs */
661 VG_(parse_procselfmaps) ( build_valgrind_map_callback ); /* just Valgrind mappings */
662 VG_(parse_procselfmaps) ( build_segment_map_callback ); /* everything */
sewardjd5815ec2003-04-06 12:23:27 +0000663
fitzhardinge92360792003-12-24 10:11:11 +0000664 /* initialize our trampoline page (which is also sysinfo stuff) */
665 VG_(memcpy)((void *)VG_(client_trampoline_code),
666 &VG_(trampoline_code_start),
667 VG_(trampoline_code_length));
668 VG_(mprotect)((void *)VG_(client_trampoline_code), VG_(trampoline_code_length),
669 VKI_PROT_READ|VKI_PROT_EXEC);
sewardjde4a1d02002-03-22 01:27:54 +0000670}
671
sewardjde4a1d02002-03-22 01:27:54 +0000672/*------------------------------------------------------------*/
673/*--- Tracking permissions around %esp changes. ---*/
674/*------------------------------------------------------------*/
675
676/*
677 The stack
678 ~~~~~~~~~
679 The stack's segment seems to be dynamically extended downwards
680 by the kernel as the stack pointer moves down. Initially, a
681 1-page (4k) stack is allocated. When %esp moves below that for
682 the first time, presumably a page fault occurs. The kernel
683 detects that the faulting address is in the range from %esp upwards
684 to the current valid stack. It then extends the stack segment
685 downwards for enough to cover the faulting address, and resumes
686 the process (invisibly). The process is unaware of any of this.
687
688 That means that Valgrind can't spot when the stack segment is
689 being extended. Fortunately, we want to precisely and continuously
690 update stack permissions around %esp, so we need to spot all
691 writes to %esp anyway.
692
693 The deal is: when %esp is assigned a lower value, the stack is
694 being extended. Create a secondary maps to fill in any holes
695 between the old stack ptr and this one, if necessary. Then
696 mark all bytes in the area just "uncovered" by this %esp change
697 as write-only.
698
699 When %esp goes back up, mark the area receded over as unreadable
700 and unwritable.
701
702 Just to record the %esp boundary conditions somewhere convenient:
703 %esp always points to the lowest live byte in the stack. All
704 addresses below %esp are not live; those at and above it are.
705*/
706
sewardjde4a1d02002-03-22 01:27:54 +0000707/* Kludgey ... how much does %esp have to change before we reckon that
708 the application is switching stacks ? */
njn9b007f62003-04-07 14:40:25 +0000709#define VG_PLAUSIBLE_STACK_SIZE 8000000
710#define VG_HUGE_DELTA (VG_PLAUSIBLE_STACK_SIZE / 4)
sewardjde4a1d02002-03-22 01:27:54 +0000711
njn9b007f62003-04-07 14:40:25 +0000712/* This function gets called if new_mem_stack and/or die_mem_stack are
713 tracked by the skin, and one of the specialised cases (eg. new_mem_stack_4)
714 isn't used in preference */
715__attribute__((regparm(1)))
716void VG_(unknown_esp_update)(Addr new_ESP)
sewardjde4a1d02002-03-22 01:27:54 +0000717{
njn9b007f62003-04-07 14:40:25 +0000718 Addr old_ESP = VG_(get_archreg)(R_ESP);
719 Int delta = (Int)new_ESP - (Int)old_ESP;
sewardjde4a1d02002-03-22 01:27:54 +0000720
njn9b007f62003-04-07 14:40:25 +0000721 if (delta < -(VG_HUGE_DELTA) || VG_HUGE_DELTA < delta) {
722 /* %esp has changed by more than HUGE_DELTA. We take this to mean
723 that the application is switching to a new stack, for whatever
724 reason.
725
726 JRS 20021001: following discussions with John Regehr, if a stack
727 switch happens, it seems best not to mess at all with memory
728 permissions. Seems to work well with Netscape 4.X. Really the
729 only remaining difficulty is knowing exactly when a stack switch is
730 happening. */
731 if (VG_(clo_verbosity) > 1)
732 VG_(message)(Vg_UserMsg, "Warning: client switching stacks? "
733 "%%esp: %p --> %p", old_ESP, new_ESP);
734 } else if (delta < 0) {
735 VG_TRACK( new_mem_stack, new_ESP, -delta );
sewardjde4a1d02002-03-22 01:27:54 +0000736
njn9b007f62003-04-07 14:40:25 +0000737 } else if (delta > 0) {
738 VG_TRACK( die_mem_stack, old_ESP, delta );
sewardjde4a1d02002-03-22 01:27:54 +0000739 }
740}
741
jsgf855d93d2003-10-13 22:26:55 +0000742static jmp_buf segv_jmpbuf;
743
744static void segv_handler(Int seg)
745{
746 __builtin_longjmp(segv_jmpbuf, 1);
747 VG_(core_panic)("longjmp failed");
748}
749
750/*
751 Test if a piece of memory is addressable by setting up a temporary
752 SIGSEGV handler, then try to touch the memory. No signal = good,
753 signal = bad.
754 */
755Bool VG_(is_addressable)(Addr p, Int size)
756{
757 volatile Char * volatile cp = (volatile Char *)p;
758 volatile Bool ret;
759 vki_ksigaction sa, origsa;
760 vki_ksigset_t mask;
761
762 vg_assert(size > 0);
763
764 sa.ksa_handler = segv_handler;
765 sa.ksa_flags = 0;
766 VG_(ksigfillset)(&sa.ksa_mask);
767 VG_(ksigaction)(VKI_SIGSEGV, &sa, &origsa);
768 VG_(ksigprocmask)(VKI_SIG_SETMASK, NULL, &mask);
769
770 if (__builtin_setjmp(&segv_jmpbuf) == 0) {
771 while(size--)
772 *cp++;
773 ret = True;
774 } else
775 ret = False;
776
777 VG_(ksigaction)(VKI_SIGSEGV, &origsa, NULL);
778 VG_(ksigprocmask)(VKI_SIG_SETMASK, &mask, NULL);
779
780 return ret;
781}
782
sewardjde4a1d02002-03-22 01:27:54 +0000783/*--------------------------------------------------------------------*/
fitzhardinge98abfc72003-12-16 02:05:15 +0000784/*--- manage allocation of memory on behalf of the client ---*/
785/*--------------------------------------------------------------------*/
786
787Addr VG_(client_alloc)(Addr addr, UInt len, UInt prot, UInt flags)
788{
789 len = PGROUNDUP(len);
790
791 if (!(flags & SF_FIXED))
792 addr = VG_(find_map_space)(addr, len, True);
793
794 flags |= SF_CORE;
795
796 if (VG_(mmap)((void *)addr, len, prot,
797 VKI_MAP_FIXED | VKI_MAP_PRIVATE | VKI_MAP_ANONYMOUS | VKI_MAP_CLIENT,
798 -1, 0) == (void *)addr) {
799 VG_(map_segment)(addr, len, prot, flags);
800 return addr;
801 }
802
803 return 0;
804}
805
806void VG_(client_free)(Addr addr)
807{
808 Segment *s = VG_(find_segment)(addr);
809
810 if (s == NULL || s->addr != addr || !(s->flags & SF_CORE)) {
811 VG_(message)(Vg_DebugMsg, "VG_(client_free)(%p) - no CORE memory found there", addr);
812 return;
813 }
814
815 VG_(munmap)((void *)s->addr, s->len);
816}
817
818Bool VG_(is_client_addr)(Addr a)
819{
820 return a >= VG_(client_base) && a < VG_(client_end);
821}
822
823Bool VG_(is_shadow_addr)(Addr a)
824{
825 return a >= VG_(shadow_base) && a < VG_(shadow_end);
826}
827
828Bool VG_(is_valgrind_addr)(Addr a)
829{
830 return a >= VG_(valgrind_base) && a < VG_(valgrind_end);
831}
832
833Addr VG_(get_client_base)(void)
834{
835 return VG_(client_base);
836}
837
838Addr VG_(get_client_end)(void)
839{
840 return VG_(client_end);
841}
842
843Addr VG_(get_client_size)(void)
844{
845 return VG_(client_end)-VG_(client_base);
846}
847
848Addr VG_(get_shadow_base)(void)
849{
850 return VG_(shadow_base);
851}
852
853Addr VG_(get_shadow_end)(void)
854{
855 return VG_(shadow_end);
856}
857
858Addr VG_(get_shadow_size)(void)
859{
860 return VG_(shadow_end)-VG_(shadow_base);
861}
862
863
864void VG_(init_shadow_range)(Addr p, UInt sz, Bool call_init)
865{
866 if (0)
867 VG_(printf)("init_shadow_range(%p, %d)\n", p, sz);
868
869 vg_assert(VG_(needs).shadow_memory);
870 vg_assert(VG_(defined_init_shadow_page)());
871
872 sz = PGROUNDUP(p+sz) - PGROUNDDN(p);
873 p = PGROUNDDN(p);
874
875 VG_(mprotect)((void *)p, sz, VKI_PROT_READ|VKI_PROT_WRITE);
876
877 if (call_init)
878 while(sz) {
879 /* ask the skin to initialize each page */
880 VG_TRACK( init_shadow_page, PGROUNDDN(p) );
881
882 p += VKI_BYTES_PER_PAGE;
883 sz -= VKI_BYTES_PER_PAGE;
884 }
885}
886
887void *VG_(shadow_alloc)(UInt size)
888{
889 static Addr shadow_alloc = 0;
890 void *ret;
891
892 vg_assert(VG_(needs).shadow_memory);
893 vg_assert(!VG_(defined_init_shadow_page)());
894
895 size = PGROUNDUP(size);
896
897 if (shadow_alloc == 0)
898 shadow_alloc = VG_(shadow_base);
899
900 if (shadow_alloc >= VG_(shadow_end))
901 return 0;
902
903 ret = (void *)shadow_alloc;
904 VG_(mprotect)(ret, size, VKI_PROT_READ|VKI_PROT_WRITE);
905
906 shadow_alloc += size;
907
908 return ret;
909}
910
911/*--------------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +0000912/*--- end vg_memory.c ---*/
913/*--------------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000914