blob: 7f5121baebecbd548f44c29b7d1ce14a9fbbed43 [file] [log] [blame]
sewardjde4a1d02002-03-22 01:27:54 +00001
2/*--------------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00003/*--- Memory-related stuff: segment initialisation and tracking, ---*/
4/*--- stack operations ---*/
sewardjde4a1d02002-03-22 01:27:54 +00005/*--- vg_memory.c ---*/
6/*--------------------------------------------------------------------*/
7
8/*
njnc9539842002-10-02 13:26:35 +00009 This file is part of Valgrind, an extensible x86 protected-mode
10 emulator for monitoring program execution on x86-Unixes.
sewardjde4a1d02002-03-22 01:27:54 +000011
nethercotebb1c9912004-01-04 16:43:23 +000012 Copyright (C) 2000-2004 Julian Seward
sewardjde4a1d02002-03-22 01:27:54 +000013 jseward@acm.org
sewardjde4a1d02002-03-22 01:27:54 +000014
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
njn25e49d8e72002-09-23 09:36:25 +000030 The GNU General Public License is contained in the file COPYING.
sewardjde4a1d02002-03-22 01:27:54 +000031*/
32
33#include "vg_include.h"
34
fitzhardinge98abfc72003-12-16 02:05:15 +000035#include <stddef.h>
36
sewardja4495682002-10-21 07:29:59 +000037/* Define to debug the memory-leak-detector. */
38/* #define VG_DEBUG_LEAKCHECK */
39
fitzhardinge98abfc72003-12-16 02:05:15 +000040static const Bool mem_debug = False;
41
42static Int addrcmp(const void *ap, const void *bp)
43{
44 Addr a = *(Addr *)ap;
45 Addr b = *(Addr *)bp;
46 Int ret;
47
48 if (a == b)
49 ret = 0;
50 else
51 ret = (a < b) ? -1 : 1;
52
53 return ret;
54}
55
56static Char *straddr(void *p)
57{
58 static Char buf[16];
59
60 VG_(sprintf)(buf, "%p", *(Addr *)p);
61
62 return buf;
63}
64
65static SkipList sk_segments = SKIPLIST_INIT(Segment, addr, addrcmp, straddr, VG_AR_CORE);
66
67/*--------------------------------------------------------------*/
68/*--- Maintain an ordered list of all the client's mappings ---*/
69/*--------------------------------------------------------------*/
70
71Bool VG_(seg_contains)(const Segment *s, Addr p, UInt len)
72{
73 Addr se = s->addr+s->len;
74 Addr pe = p+len;
75
76 vg_assert(pe >= p);
77
78 return (p >= s->addr && pe <= se);
79}
80
81Bool VG_(seg_overlaps)(const Segment *s, Addr p, UInt len)
82{
83 Addr se = s->addr+s->len;
84 Addr pe = p+len;
85
86 vg_assert(pe >= p);
87
88 return (p < se && pe > s->addr);
89}
90
91/* Prepare a Segment structure for recycling by freeing everything
92 hanging off it. */
93static void recycleseg(Segment *s)
94{
95 if (s->flags & SF_CODE)
96 VG_(invalidate_translations)(s->addr, s->len, False);
97
98 if (s->filename != NULL)
99 VG_(arena_free)(VG_AR_CORE, (Char *)s->filename);
100
101 /* keep the SegInfo, if any - it probably still applies */
102}
103
104/* When freeing a Segment, also clean up every one else's ideas of
105 what was going on in that range of memory */
106static void freeseg(Segment *s)
107{
108 recycleseg(s);
109 if (s->symtab != NULL) {
110 VG_(symtab_decref)(s->symtab, s->addr, s->len);
111 s->symtab = NULL;
112 }
113
114 VG_(SkipNode_Free)(&sk_segments, s);
115}
116
fitzhardinge1a303042003-12-22 08:48:50 +0000117/* Split a segment at address a, returning the new segment */
118Segment *VG_(split_segment)(Addr a)
fitzhardinge98abfc72003-12-16 02:05:15 +0000119{
120 Segment *s = VG_(SkipList_Find)(&sk_segments, &a);
121 Segment *ns;
122 Int delta;
123
124 vg_assert((a & (VKI_BYTES_PER_PAGE-1)) == 0);
125
126 /* missed */
127 if (s == NULL)
128 return NULL;
129
130 /* a at or beyond endpoint */
131 if (s->addr == a || a >= (s->addr+s->len))
132 return NULL;
133
134 vg_assert(a > s->addr && a < (s->addr+s->len));
135
136 ns = VG_(SkipNode_Alloc)(&sk_segments);
137
138 *ns = *s;
139
140 delta = a - s->addr;
141 ns->addr += delta;
142 ns->offset += delta;
143 ns->len -= delta;
fitzhardingee3632c62003-12-22 10:58:06 +0000144 s->len = delta;
fitzhardinge98abfc72003-12-16 02:05:15 +0000145
fitzhardinge1a4adf02003-12-22 10:42:59 +0000146 if (s->filename != NULL)
147 ns->filename = VG_(arena_strdup)(VG_AR_CORE, s->filename);
148
fitzhardinge98abfc72003-12-16 02:05:15 +0000149 if (ns->symtab != NULL)
150 VG_(symtab_incref)(ns->symtab);
151
152 VG_(SkipList_Insert)(&sk_segments, ns);
153
154 return ns;
155}
156
157/* This unmaps all the segments in the range [addr, addr+len); any
158 partial mappings at the ends are truncated. */
159void VG_(unmap_range)(Addr addr, UInt len)
160{
161 Segment *s;
162 Segment *next;
fitzhardingee3632c62003-12-22 10:58:06 +0000163 static const Bool debug = False || mem_debug;
fitzhardinge1a4adf02003-12-22 10:42:59 +0000164 Addr end;
fitzhardinge98abfc72003-12-16 02:05:15 +0000165
166 if (len == 0)
167 return;
168
fitzhardinge1a4adf02003-12-22 10:42:59 +0000169 len = PGROUNDUP(len);
170 vg_assert(addr == PGROUNDDN(addr));
171
fitzhardinge98abfc72003-12-16 02:05:15 +0000172 if (debug)
173 VG_(printf)("unmap_range(%p, %d)\n", addr, len);
174
fitzhardinge1a4adf02003-12-22 10:42:59 +0000175 end = addr+len;
fitzhardinge98abfc72003-12-16 02:05:15 +0000176
177 /* Everything must be page-aligned */
178 vg_assert((addr & (VKI_BYTES_PER_PAGE-1)) == 0);
179 vg_assert((len & (VKI_BYTES_PER_PAGE-1)) == 0);
180
181 for(s = VG_(SkipList_Find)(&sk_segments, &addr);
182 s != NULL && s->addr < (addr+len);
183 s = next) {
fitzhardinge1a303042003-12-22 08:48:50 +0000184 Addr seg_end = s->addr + s->len;
fitzhardinge98abfc72003-12-16 02:05:15 +0000185
186 /* fetch next now in case we end up deleting this segment */
187 next = VG_(SkipNode_Next)(&sk_segments, s);
188
189 if (debug)
fitzhardinge1a303042003-12-22 08:48:50 +0000190 VG_(printf)("unmap: addr=%p-%p s=%p ->addr=%p-%p len=%d\n",
fitzhardinge1a4adf02003-12-22 10:42:59 +0000191 addr, end, s, s->addr, seg_end, s->len);
fitzhardinge98abfc72003-12-16 02:05:15 +0000192
fitzhardinge1a303042003-12-22 08:48:50 +0000193 if (!VG_(seg_overlaps)(s, addr, len)) {
194 if (debug)
195 VG_(printf)(" (no overlap)\n");
fitzhardinge98abfc72003-12-16 02:05:15 +0000196 continue;
fitzhardinge1a303042003-12-22 08:48:50 +0000197 }
fitzhardinge98abfc72003-12-16 02:05:15 +0000198
199 /* 4 cases: */
fitzhardinge1a303042003-12-22 08:48:50 +0000200 if (addr > s->addr &&
201 addr < seg_end &&
202 end >= seg_end) {
fitzhardinge98abfc72003-12-16 02:05:15 +0000203 /* this segment's tail is truncated by [addr, addr+len)
204 -> truncate tail
205 */
206 s->len = addr - s->addr;
207
208 if (debug)
209 VG_(printf)(" case 1: s->len=%d\n", s->len);
fitzhardingee3632c62003-12-22 10:58:06 +0000210 } else if (addr <= s->addr && end > s->addr && end < seg_end) {
211 /* this segment's head is truncated by [addr, addr+len)
212 -> truncate head
213 */
214 Int delta = end - s->addr;
215
216 if (debug)
217 VG_(printf)(" case 2: s->addr=%p s->len=%d delta=%d\n", s->addr, s->len, delta);
218
219 s->addr += delta;
220 s->offset += delta;
221 s->len -= delta;
222
223 vg_assert(s->len != 0);
fitzhardinge1a303042003-12-22 08:48:50 +0000224 } else if (addr <= s->addr && end >= seg_end) {
fitzhardinge98abfc72003-12-16 02:05:15 +0000225 /* this segment is completely contained within [addr, addr+len)
226 -> delete segment
227 */
228 Segment *rs = VG_(SkipList_Remove)(&sk_segments, &s->addr);
229 vg_assert(rs == s);
230 freeseg(s);
231
232 if (debug)
fitzhardingee3632c62003-12-22 10:58:06 +0000233 VG_(printf)(" case 3: s==%p deleted\n", s);
fitzhardinge1a303042003-12-22 08:48:50 +0000234 } else if (addr > s->addr && end < seg_end) {
fitzhardinge98abfc72003-12-16 02:05:15 +0000235 /* [addr, addr+len) is contained within a single segment
236 -> split segment into 3, delete middle portion
237 */
238 Segment *middle, *rs;
239
fitzhardinge1a303042003-12-22 08:48:50 +0000240 middle = VG_(split_segment)(addr);
241 VG_(split_segment)(addr+len);
fitzhardinge98abfc72003-12-16 02:05:15 +0000242
243 vg_assert(middle->addr == addr);
244 rs = VG_(SkipList_Remove)(&sk_segments, &addr);
245 vg_assert(rs == middle);
246
247 freeseg(rs);
248
249 if (debug)
250 VG_(printf)(" case 4: subrange %p-%p deleted\n",
251 addr, addr+len);
252 }
253 }
254}
255
fitzhardinge1a4adf02003-12-22 10:42:59 +0000256/* Return true if two segments are adjacent and mergable (s1 is
257 assumed to have a lower ->addr than s2) */
fitzhardinge98abfc72003-12-16 02:05:15 +0000258static inline Bool neighbours(Segment *s1, Segment *s2)
259{
260 if (s1->addr+s1->len != s2->addr)
261 return False;
262
263 if (s1->flags != s2->flags)
264 return False;
265
266 if (s1->prot != s2->prot)
267 return False;
268
269 if (s1->symtab != s2->symtab)
270 return False;
271
272 if (s1->flags & SF_FILE){
273 if ((s1->offset + s1->len) != s2->offset)
274 return False;
275 if (s1->dev != s2->dev)
276 return False;
277 if (s1->ino != s2->ino)
278 return False;
279 }
280
281 return True;
282}
283
fitzhardinge1a4adf02003-12-22 10:42:59 +0000284/* If possible, merge segment with its neighbours - some segments,
285 including s, may be destroyed in the process */
fitzhardinge98abfc72003-12-16 02:05:15 +0000286static void merge_segments(Addr a, UInt len)
287{
288 Segment *s;
289 Segment *next;
290
291 vg_assert((a & (VKI_BYTES_PER_PAGE-1)) == 0);
292 vg_assert((len & (VKI_BYTES_PER_PAGE-1)) == 0);
293
294 a -= VKI_BYTES_PER_PAGE;
295 len += VKI_BYTES_PER_PAGE;
296
297 for(s = VG_(SkipList_Find)(&sk_segments, &a);
298 s != NULL && s->addr < (a+len);) {
299 next = VG_(SkipNode_Next)(&sk_segments, s);
300
301 if (next && neighbours(s, next)) {
302 Segment *rs;
303
304 if (0)
305 VG_(printf)("merge %p-%p with %p-%p\n",
306 s->addr, s->addr+s->len,
307 next->addr, next->addr+next->len);
308 s->len += next->len;
309 s = VG_(SkipNode_Next)(&sk_segments, next);
310
311 rs = VG_(SkipList_Remove)(&sk_segments, &next->addr);
312 vg_assert(next == rs);
313 freeseg(next);
314 } else
315 s = next;
316 }
317}
318
319void VG_(map_file_segment)(Addr addr, UInt len, UInt prot, UInt flags,
320 UInt dev, UInt ino, ULong off, const Char *filename)
321{
322 Segment *s;
323 static const Bool debug = False || mem_debug;
324 Bool recycled;
325
326 if (debug)
327 VG_(printf)("map_file_segment(%p, %d, %x, %x, %4x, %d, %ld, %s)\n",
328 addr, len, prot, flags, dev, ino, off, filename);
329
330 /* Everything must be page-aligned */
331 vg_assert((addr & (VKI_BYTES_PER_PAGE-1)) == 0);
332 len = PGROUNDUP(len);
333
334 /* First look to see what already exists around here */
335 s = VG_(SkipList_Find)(&sk_segments, &addr);
336
337 if (s != NULL && s->addr == addr && s->len == len) {
338 /* This probably means we're just updating the flags */
339 recycled = True;
340 recycleseg(s);
341
342 /* If we had a symtab, but the new mapping is incompatible, then
343 free up the old symtab in preparation for a new one. */
344 if (s->symtab != NULL &&
345 (!(s->flags & SF_FILE) ||
346 !(flags & SF_FILE) ||
347 s->dev != dev ||
348 s->ino != ino ||
349 s->offset != off)) {
350 VG_(symtab_decref)(s->symtab, s->addr, s->len);
351 s->symtab = NULL;
352 }
353 } else {
354 recycled = False;
355 VG_(unmap_range)(addr, len);
356
357 s = VG_(SkipNode_Alloc)(&sk_segments);
358
359 s->addr = addr;
360 s->len = len;
361 s->symtab = NULL;
362 }
363
364 s->flags = flags;
365 s->prot = prot;
366 s->dev = dev;
367 s->ino = ino;
368 s->offset = off;
369
370 if (filename != NULL)
371 s->filename = VG_(arena_strdup)(VG_AR_CORE, filename);
372 else
373 s->filename = NULL;
374
375 if (debug) {
376 Segment *ts;
377 for(ts = VG_(SkipNode_First)(&sk_segments);
378 ts != NULL;
379 ts = VG_(SkipNode_Next)(&sk_segments, ts))
380 VG_(printf)("list: %8p->%8p ->%d (0x%x) prot=%x flags=%x\n",
381 ts, ts->addr, ts->len, ts->len, ts->prot, ts->flags);
382
383 VG_(printf)("inserting s=%p addr=%p len=%d\n",
384 s, s->addr, s->len);
385 }
386
387 if (!recycled)
388 VG_(SkipList_Insert)(&sk_segments, s);
389
390 /* If this mapping is of the beginning of a file, isn't part of
391 Valgrind, is at least readable and seems to contain an object
392 file, then try reading symbols from it. */
393 if ((flags & (SF_MMAP|SF_NOSYMS)) == SF_MMAP &&
394 s->symtab == NULL) {
395 if (off == 0 &&
396 filename != NULL &&
397 (prot & (VKI_PROT_READ|VKI_PROT_EXEC)) == (VKI_PROT_READ|VKI_PROT_EXEC) &&
398 len >= VKI_BYTES_PER_PAGE &&
399 s->symtab == NULL &&
nethercote71980f02004-01-24 18:18:54 +0000400 VG_(is_object_file)((void *)addr))
401 {
402 s->symtab = VG_(read_seg_symbols)(s);
fitzhardinge98abfc72003-12-16 02:05:15 +0000403
nethercote71980f02004-01-24 18:18:54 +0000404 if (s->symtab != NULL) {
405 s->flags |= SF_DYNLIB;
406 }
fitzhardinge98abfc72003-12-16 02:05:15 +0000407 } else if (flags & SF_MMAP) {
408 const SegInfo *info;
409
410 /* Otherwise see if an existing symtab applies to this Segment */
411 for(info = VG_(next_seginfo)(NULL);
412 info != NULL;
413 info = VG_(next_seginfo)(info)) {
nethercote71980f02004-01-24 18:18:54 +0000414 if (VG_(seg_overlaps)(s, VG_(seg_start)(info), VG_(seg_size)(info)))
415 {
fitzhardinge98abfc72003-12-16 02:05:15 +0000416 s->symtab = (SegInfo *)info;
417 VG_(symtab_incref)((SegInfo *)info);
418 }
419 }
420 }
421 }
422
423 /* clean up */
424 merge_segments(addr, len);
425}
426
427void VG_(map_fd_segment)(Addr addr, UInt len, UInt prot, UInt flags,
428 Int fd, ULong off, const Char *filename)
429{
430 struct vki_stat st;
431 Char *name = NULL;
432
433 st.st_dev = 0;
434 st.st_ino = 0;
435
436 if (fd != -1 && (flags & SF_FILE)) {
437 vg_assert((off & (VKI_BYTES_PER_PAGE-1)) == 0);
438
439 if (VG_(fstat)(fd, &st) < 0)
440 flags &= ~SF_FILE;
441 }
442
443 if ((flags & SF_FILE) && filename == NULL && fd != -1)
444 name = VG_(resolve_filename)(fd);
445
446 if (filename == NULL)
447 filename = name;
448
449 VG_(map_file_segment)(addr, len, prot, flags, st.st_dev, st.st_ino, off, filename);
450
451 if (name)
452 VG_(arena_free)(VG_AR_CORE, name);
453}
454
455void VG_(map_segment)(Addr addr, UInt len, UInt prot, UInt flags)
456{
457 flags &= ~SF_FILE;
458
459 VG_(map_file_segment)(addr, len, prot, flags, 0, 0, 0, 0);
460}
461
462/* set new protection flags on an address range */
463void VG_(mprotect_range)(Addr a, UInt len, UInt prot)
464{
465 Segment *s, *next;
466 static const Bool debug = False || mem_debug;
467
468 if (debug)
469 VG_(printf)("mprotect_range(%p, %d, %x)\n", a, len, prot);
470
471 /* Everything must be page-aligned */
472 vg_assert((a & (VKI_BYTES_PER_PAGE-1)) == 0);
fitzhardinge92360792003-12-24 10:11:11 +0000473 len = PGROUNDUP(len);
fitzhardinge98abfc72003-12-16 02:05:15 +0000474
fitzhardinge1a303042003-12-22 08:48:50 +0000475 VG_(split_segment)(a);
476 VG_(split_segment)(a+len);
fitzhardinge98abfc72003-12-16 02:05:15 +0000477
478 for(s = VG_(SkipList_Find)(&sk_segments, &a);
479 s != NULL && s->addr < a+len;
480 s = next)
481 {
482 next = VG_(SkipNode_Next)(&sk_segments, s);
483 if (s->addr < a)
484 continue;
485
486 s->prot = prot;
487 }
488
489 merge_segments(a, len);
490}
491
492Addr VG_(find_map_space)(Addr addr, UInt len, Bool for_client)
493{
fitzhardingee3632c62003-12-22 10:58:06 +0000494 static const Bool debug = False || mem_debug;
fitzhardinge98abfc72003-12-16 02:05:15 +0000495 Segment *s;
496 Addr ret;
fitzhardinge98abfc72003-12-16 02:05:15 +0000497 Addr limit = (for_client ? VG_(client_end) : VG_(valgrind_mmap_end));
498
499 if (addr == 0)
500 addr = for_client ? VG_(client_mapbase) : VG_(valgrind_base);
501 else {
502 /* leave space for redzone and still try to get the exact
503 address asked for */
504 addr -= VKI_BYTES_PER_PAGE;
505 }
506 ret = addr;
507
508 /* Everything must be page-aligned */
509 vg_assert((addr & (VKI_BYTES_PER_PAGE-1)) == 0);
510 len = PGROUNDUP(len);
511
512 len += VKI_BYTES_PER_PAGE * 2; /* leave redzone gaps before and after mapping */
513
514 if (debug)
515 VG_(printf)("find_map_space: ret starts as %p-%p client=%d\n",
516 ret, ret+len, for_client);
517
518 for(s = VG_(SkipList_Find)(&sk_segments, &ret);
519 s != NULL && s->addr < (ret+len);
520 s = VG_(SkipNode_Next)(&sk_segments, s))
521 {
522 if (debug)
523 VG_(printf)("s->addr=%p len=%d (%p) ret=%p\n",
524 s->addr, s->len, s->addr+s->len, ret);
525
526 if (s->addr < (ret + len) && (s->addr + s->len) > ret)
527 ret = s->addr+s->len;
528 }
529
530 if (debug) {
531 if (s)
532 VG_(printf)(" s->addr=%p ->len=%d\n", s->addr, s->len);
533 else
534 VG_(printf)(" s == NULL\n");
535 }
536
537 if ((limit - len) < ret)
538 ret = 0; /* no space */
539 else
540 ret += VKI_BYTES_PER_PAGE; /* skip leading redzone */
541
542 if (debug)
543 VG_(printf)("find_map_space(%p, %d, %d) -> %p\n",
544 addr, len, for_client, ret);
545
546 return ret;
547}
548
549Segment *VG_(find_segment)(Addr a)
550{
551 return VG_(SkipList_Find)(&sk_segments, &a);
552}
553
554Segment *VG_(next_segment)(Segment *s)
555{
556 return VG_(SkipNode_Next)(&sk_segments, s);
557}
sewardjde4a1d02002-03-22 01:27:54 +0000558
sewardjde4a1d02002-03-22 01:27:54 +0000559/*------------------------------------------------------------*/
560/*--- Tracking permissions around %esp changes. ---*/
561/*------------------------------------------------------------*/
562
563/*
564 The stack
565 ~~~~~~~~~
566 The stack's segment seems to be dynamically extended downwards
567 by the kernel as the stack pointer moves down. Initially, a
568 1-page (4k) stack is allocated. When %esp moves below that for
569 the first time, presumably a page fault occurs. The kernel
570 detects that the faulting address is in the range from %esp upwards
571 to the current valid stack. It then extends the stack segment
572 downwards for enough to cover the faulting address, and resumes
573 the process (invisibly). The process is unaware of any of this.
574
575 That means that Valgrind can't spot when the stack segment is
576 being extended. Fortunately, we want to precisely and continuously
577 update stack permissions around %esp, so we need to spot all
578 writes to %esp anyway.
579
580 The deal is: when %esp is assigned a lower value, the stack is
581 being extended. Create a secondary maps to fill in any holes
582 between the old stack ptr and this one, if necessary. Then
583 mark all bytes in the area just "uncovered" by this %esp change
584 as write-only.
585
586 When %esp goes back up, mark the area receded over as unreadable
587 and unwritable.
588
589 Just to record the %esp boundary conditions somewhere convenient:
590 %esp always points to the lowest live byte in the stack. All
591 addresses below %esp are not live; those at and above it are.
592*/
593
sewardjde4a1d02002-03-22 01:27:54 +0000594/* Kludgey ... how much does %esp have to change before we reckon that
595 the application is switching stacks ? */
njn9b007f62003-04-07 14:40:25 +0000596#define VG_PLAUSIBLE_STACK_SIZE 8000000
597#define VG_HUGE_DELTA (VG_PLAUSIBLE_STACK_SIZE / 4)
sewardjde4a1d02002-03-22 01:27:54 +0000598
njn9b007f62003-04-07 14:40:25 +0000599/* This function gets called if new_mem_stack and/or die_mem_stack are
600 tracked by the skin, and one of the specialised cases (eg. new_mem_stack_4)
601 isn't used in preference */
602__attribute__((regparm(1)))
603void VG_(unknown_esp_update)(Addr new_ESP)
sewardjde4a1d02002-03-22 01:27:54 +0000604{
njn9b007f62003-04-07 14:40:25 +0000605 Addr old_ESP = VG_(get_archreg)(R_ESP);
606 Int delta = (Int)new_ESP - (Int)old_ESP;
sewardjde4a1d02002-03-22 01:27:54 +0000607
njn9b007f62003-04-07 14:40:25 +0000608 if (delta < -(VG_HUGE_DELTA) || VG_HUGE_DELTA < delta) {
609 /* %esp has changed by more than HUGE_DELTA. We take this to mean
610 that the application is switching to a new stack, for whatever
611 reason.
612
613 JRS 20021001: following discussions with John Regehr, if a stack
614 switch happens, it seems best not to mess at all with memory
615 permissions. Seems to work well with Netscape 4.X. Really the
616 only remaining difficulty is knowing exactly when a stack switch is
617 happening. */
618 if (VG_(clo_verbosity) > 1)
619 VG_(message)(Vg_UserMsg, "Warning: client switching stacks? "
620 "%%esp: %p --> %p", old_ESP, new_ESP);
621 } else if (delta < 0) {
622 VG_TRACK( new_mem_stack, new_ESP, -delta );
sewardjde4a1d02002-03-22 01:27:54 +0000623
njn9b007f62003-04-07 14:40:25 +0000624 } else if (delta > 0) {
625 VG_TRACK( die_mem_stack, old_ESP, delta );
sewardjde4a1d02002-03-22 01:27:54 +0000626 }
627}
628
jsgf855d93d2003-10-13 22:26:55 +0000629static jmp_buf segv_jmpbuf;
630
631static void segv_handler(Int seg)
632{
633 __builtin_longjmp(segv_jmpbuf, 1);
634 VG_(core_panic)("longjmp failed");
635}
636
637/*
638 Test if a piece of memory is addressable by setting up a temporary
639 SIGSEGV handler, then try to touch the memory. No signal = good,
640 signal = bad.
641 */
642Bool VG_(is_addressable)(Addr p, Int size)
643{
644 volatile Char * volatile cp = (volatile Char *)p;
645 volatile Bool ret;
646 vki_ksigaction sa, origsa;
647 vki_ksigset_t mask;
648
649 vg_assert(size > 0);
650
651 sa.ksa_handler = segv_handler;
652 sa.ksa_flags = 0;
653 VG_(ksigfillset)(&sa.ksa_mask);
654 VG_(ksigaction)(VKI_SIGSEGV, &sa, &origsa);
655 VG_(ksigprocmask)(VKI_SIG_SETMASK, NULL, &mask);
656
657 if (__builtin_setjmp(&segv_jmpbuf) == 0) {
658 while(size--)
659 *cp++;
660 ret = True;
661 } else
662 ret = False;
663
664 VG_(ksigaction)(VKI_SIGSEGV, &origsa, NULL);
665 VG_(ksigprocmask)(VKI_SIG_SETMASK, &mask, NULL);
666
667 return ret;
668}
669
sewardjde4a1d02002-03-22 01:27:54 +0000670/*--------------------------------------------------------------------*/
fitzhardinge98abfc72003-12-16 02:05:15 +0000671/*--- manage allocation of memory on behalf of the client ---*/
672/*--------------------------------------------------------------------*/
673
674Addr VG_(client_alloc)(Addr addr, UInt len, UInt prot, UInt flags)
675{
676 len = PGROUNDUP(len);
677
678 if (!(flags & SF_FIXED))
679 addr = VG_(find_map_space)(addr, len, True);
680
681 flags |= SF_CORE;
682
683 if (VG_(mmap)((void *)addr, len, prot,
684 VKI_MAP_FIXED | VKI_MAP_PRIVATE | VKI_MAP_ANONYMOUS | VKI_MAP_CLIENT,
685 -1, 0) == (void *)addr) {
686 VG_(map_segment)(addr, len, prot, flags);
687 return addr;
688 }
689
690 return 0;
691}
692
693void VG_(client_free)(Addr addr)
694{
695 Segment *s = VG_(find_segment)(addr);
696
697 if (s == NULL || s->addr != addr || !(s->flags & SF_CORE)) {
698 VG_(message)(Vg_DebugMsg, "VG_(client_free)(%p) - no CORE memory found there", addr);
699 return;
700 }
701
702 VG_(munmap)((void *)s->addr, s->len);
703}
704
705Bool VG_(is_client_addr)(Addr a)
706{
707 return a >= VG_(client_base) && a < VG_(client_end);
708}
709
710Bool VG_(is_shadow_addr)(Addr a)
711{
712 return a >= VG_(shadow_base) && a < VG_(shadow_end);
713}
714
715Bool VG_(is_valgrind_addr)(Addr a)
716{
717 return a >= VG_(valgrind_base) && a < VG_(valgrind_end);
718}
719
720Addr VG_(get_client_base)(void)
721{
722 return VG_(client_base);
723}
724
725Addr VG_(get_client_end)(void)
726{
727 return VG_(client_end);
728}
729
730Addr VG_(get_client_size)(void)
731{
732 return VG_(client_end)-VG_(client_base);
733}
734
735Addr VG_(get_shadow_base)(void)
736{
737 return VG_(shadow_base);
738}
739
740Addr VG_(get_shadow_end)(void)
741{
742 return VG_(shadow_end);
743}
744
745Addr VG_(get_shadow_size)(void)
746{
747 return VG_(shadow_end)-VG_(shadow_base);
748}
749
750
751void VG_(init_shadow_range)(Addr p, UInt sz, Bool call_init)
752{
753 if (0)
754 VG_(printf)("init_shadow_range(%p, %d)\n", p, sz);
755
756 vg_assert(VG_(needs).shadow_memory);
757 vg_assert(VG_(defined_init_shadow_page)());
758
759 sz = PGROUNDUP(p+sz) - PGROUNDDN(p);
760 p = PGROUNDDN(p);
761
762 VG_(mprotect)((void *)p, sz, VKI_PROT_READ|VKI_PROT_WRITE);
763
764 if (call_init)
765 while(sz) {
766 /* ask the skin to initialize each page */
767 VG_TRACK( init_shadow_page, PGROUNDDN(p) );
768
769 p += VKI_BYTES_PER_PAGE;
770 sz -= VKI_BYTES_PER_PAGE;
771 }
772}
773
774void *VG_(shadow_alloc)(UInt size)
775{
776 static Addr shadow_alloc = 0;
777 void *ret;
778
779 vg_assert(VG_(needs).shadow_memory);
780 vg_assert(!VG_(defined_init_shadow_page)());
781
782 size = PGROUNDUP(size);
783
784 if (shadow_alloc == 0)
785 shadow_alloc = VG_(shadow_base);
786
787 if (shadow_alloc >= VG_(shadow_end))
788 return 0;
789
790 ret = (void *)shadow_alloc;
791 VG_(mprotect)(ret, size, VKI_PROT_READ|VKI_PROT_WRITE);
792
793 shadow_alloc += size;
794
795 return ret;
796}
797
798/*--------------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +0000799/*--- end vg_memory.c ---*/
800/*--------------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000801