blob: dcde8dc1293a6ddb5af22d7232e0aeb7f8e18daf [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
4/*--- accessibility (A) and validity (V) status of each byte. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
njnc9539842002-10-02 13:26:35 +00009 This file is part of MemCheck, a heavyweight Valgrind skin for
10 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
12 Copyright (C) 2000-2002 Julian Seward
13 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
njn25cac76cb2002-09-23 11:21:57 +000033#include "mc_include.h"
34#include "memcheck.h" /* for client requests */
njn25e49d8e72002-09-23 09:36:25 +000035//#include "vg_profile.c"
36
37/* Define to debug the mem audit system. */
38/* #define VG_DEBUG_MEMORY */
39
40/* Define to debug the memory-leak-detector. */
41/* #define VG_DEBUG_LEAKCHECK */
42
43/* Define to collect detailed performance info. */
44/* #define VG_PROFILE_MEMORY */
45
46#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
47
48/*------------------------------------------------------------*/
49/*--- Command line options ---*/
50/*------------------------------------------------------------*/
51
52Bool SK_(clo_partial_loads_ok) = True;
53Int SK_(clo_freelist_vol) = 1000000;
54Bool SK_(clo_leak_check) = False;
55VgRes SK_(clo_leak_resolution) = Vg_LowRes;
56Bool SK_(clo_show_reachable) = False;
57Bool SK_(clo_workaround_gcc296_bugs) = False;
58Bool SK_(clo_check_addrVs) = True;
59Bool SK_(clo_cleanup) = True;
sewardj8ec2cfc2002-10-13 00:57:26 +000060Bool SK_(clo_avoid_strlen_errors) = True;
61
njn25e49d8e72002-09-23 09:36:25 +000062
63/*------------------------------------------------------------*/
64/*--- Profiling events ---*/
65/*------------------------------------------------------------*/
66
67typedef
68 enum {
69 VgpCheckMem = VgpFini+1,
70 VgpSetMem
71 }
72 VgpSkinCC;
73
74/*------------------------------------------------------------*/
75/*--- Low-level support for memory checking. ---*/
76/*------------------------------------------------------------*/
77
78/* All reads and writes are checked against a memory map, which
79 records the state of all memory in the process. The memory map is
80 organised like this:
81
82 The top 16 bits of an address are used to index into a top-level
83 map table, containing 65536 entries. Each entry is a pointer to a
84 second-level map, which records the accesibililty and validity
85 permissions for the 65536 bytes indexed by the lower 16 bits of the
86 address. Each byte is represented by nine bits, one indicating
87 accessibility, the other eight validity. So each second-level map
88 contains 73728 bytes. This two-level arrangement conveniently
89 divides the 4G address space into 64k lumps, each size 64k bytes.
90
91 All entries in the primary (top-level) map must point to a valid
92 secondary (second-level) map. Since most of the 4G of address
93 space will not be in use -- ie, not mapped at all -- there is a
94 distinguished secondary map, which indicates `not addressible and
95 not valid' writeable for all bytes. Entries in the primary map for
96 which the entire 64k is not in use at all point at this
97 distinguished map.
98
99 [...] lots of stuff deleted due to out of date-ness
100
101 As a final optimisation, the alignment and address checks for
102 4-byte loads and stores are combined in a neat way. The primary
103 map is extended to have 262144 entries (2^18), rather than 2^16.
104 The top 3/4 of these entries are permanently set to the
105 distinguished secondary map. For a 4-byte load/store, the
106 top-level map is indexed not with (addr >> 16) but instead f(addr),
107 where
108
109 f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
110 = ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or
111 = ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
112
113 ie the lowest two bits are placed above the 16 high address bits.
114 If either of these two bits are nonzero, the address is misaligned;
115 this will select a secondary map from the upper 3/4 of the primary
116 map. Because this is always the distinguished secondary map, a
117 (bogus) address check failure will result. The failure handling
118 code can then figure out whether this is a genuine addr check
119 failure or whether it is a possibly-legitimate access at a
120 misaligned address.
121*/
122
123
124/*------------------------------------------------------------*/
125/*--- Crude profiling machinery. ---*/
126/*------------------------------------------------------------*/
127
128#ifdef VG_PROFILE_MEMORY
129
130#define N_PROF_EVENTS 150
131
132static UInt event_ctr[N_PROF_EVENTS];
133
134static void init_prof_mem ( void )
135{
136 Int i;
137 for (i = 0; i < N_PROF_EVENTS; i++)
138 event_ctr[i] = 0;
139}
140
141static void done_prof_mem ( void )
142{
143 Int i;
144 for (i = 0; i < N_PROF_EVENTS; i++) {
145 if ((i % 10) == 0)
146 VG_(printf)("\n");
147 if (event_ctr[i] > 0)
148 VG_(printf)( "prof mem event %2d: %d\n", i, event_ctr[i] );
149 }
150 VG_(printf)("\n");
151}
152
153#define PROF_EVENT(ev) \
njne427a662002-10-02 11:08:25 +0000154 do { sk_assert((ev) >= 0 && (ev) < N_PROF_EVENTS); \
njn25e49d8e72002-09-23 09:36:25 +0000155 event_ctr[ev]++; \
156 } while (False);
157
158#else
159
160static void init_prof_mem ( void ) { }
161static void done_prof_mem ( void ) { }
162
163#define PROF_EVENT(ev) /* */
164
165#endif
166
167/* Event index. If just the name of the fn is given, this means the
168 number of calls to the fn. Otherwise it is the specified event.
169
170 10 alloc_secondary_map
171
172 20 get_abit
173 21 get_vbyte
174 22 set_abit
175 23 set_vbyte
176 24 get_abits4_ALIGNED
177 25 get_vbytes4_ALIGNED
178
179 30 set_address_range_perms
180 31 set_address_range_perms(lower byte loop)
181 32 set_address_range_perms(quadword loop)
182 33 set_address_range_perms(upper byte loop)
183
184 35 make_noaccess
185 36 make_writable
186 37 make_readable
187
188 40 copy_address_range_state
189 41 copy_address_range_state(byte loop)
190 42 check_writable
191 43 check_writable(byte loop)
192 44 check_readable
193 45 check_readable(byte loop)
194 46 check_readable_asciiz
195 47 check_readable_asciiz(byte loop)
196
197 50 make_aligned_word_NOACCESS
198 51 make_aligned_word_WRITABLE
199
200 60 helperc_LOADV4
201 61 helperc_STOREV4
202 62 helperc_LOADV2
203 63 helperc_STOREV2
204 64 helperc_LOADV1
205 65 helperc_STOREV1
206
207 70 rim_rd_V4_SLOWLY
208 71 rim_wr_V4_SLOWLY
209 72 rim_rd_V2_SLOWLY
210 73 rim_wr_V2_SLOWLY
211 74 rim_rd_V1_SLOWLY
212 75 rim_wr_V1_SLOWLY
213
214 80 fpu_read
215 81 fpu_read aligned 4
216 82 fpu_read aligned 8
217 83 fpu_read 2
218 84 fpu_read 10
219
220 85 fpu_write
221 86 fpu_write aligned 4
222 87 fpu_write aligned 8
223 88 fpu_write 2
224 89 fpu_write 10
225
226 90 fpu_read_check_SLOWLY
227 91 fpu_read_check_SLOWLY(byte loop)
228 92 fpu_write_check_SLOWLY
229 93 fpu_write_check_SLOWLY(byte loop)
230
231 100 is_plausible_stack_addr
232 101 handle_esp_assignment
233 102 handle_esp_assignment(-4)
234 103 handle_esp_assignment(+4)
235 104 handle_esp_assignment(-12)
236 105 handle_esp_assignment(-8)
237 106 handle_esp_assignment(+16)
238 107 handle_esp_assignment(+12)
239 108 handle_esp_assignment(0)
240 109 handle_esp_assignment(+8)
241 110 handle_esp_assignment(-16)
242 111 handle_esp_assignment(+20)
243 112 handle_esp_assignment(-20)
244 113 handle_esp_assignment(+24)
245 114 handle_esp_assignment(-24)
246
247 120 vg_handle_esp_assignment_SLOWLY
248 121 vg_handle_esp_assignment_SLOWLY(normal; move down)
249 122 vg_handle_esp_assignment_SLOWLY(normal; move up)
250 123 vg_handle_esp_assignment_SLOWLY(normal)
251 124 vg_handle_esp_assignment_SLOWLY(>= HUGE_DELTA)
252*/
253
254/*------------------------------------------------------------*/
255/*--- Function declarations. ---*/
256/*------------------------------------------------------------*/
257
258static UInt vgmext_rd_V4_SLOWLY ( Addr a );
259static UInt vgmext_rd_V2_SLOWLY ( Addr a );
260static UInt vgmext_rd_V1_SLOWLY ( Addr a );
261static void vgmext_wr_V4_SLOWLY ( Addr a, UInt vbytes );
262static void vgmext_wr_V2_SLOWLY ( Addr a, UInt vbytes );
263static void vgmext_wr_V1_SLOWLY ( Addr a, UInt vbytes );
264static void fpu_read_check_SLOWLY ( Addr addr, Int size );
265static void fpu_write_check_SLOWLY ( Addr addr, Int size );
266
267/*------------------------------------------------------------*/
268/*--- Data defns. ---*/
269/*------------------------------------------------------------*/
270
271typedef
272 struct {
273 UChar abits[8192];
274 UChar vbyte[65536];
275 }
276 SecMap;
277
278static SecMap* primary_map[ /*65536*/ 262144 ];
279static SecMap distinguished_secondary_map;
280
281#define IS_DISTINGUISHED_SM(smap) \
282 ((smap) == &distinguished_secondary_map)
283
284#define ENSURE_MAPPABLE(addr,caller) \
285 do { \
286 if (IS_DISTINGUISHED_SM(primary_map[(addr) >> 16])) { \
287 primary_map[(addr) >> 16] = alloc_secondary_map(caller); \
288 /* VG_(printf)("new 2map because of %p\n", addr); */ \
289 } \
290 } while(0)
291
292#define BITARR_SET(aaa_p,iii_p) \
293 do { \
294 UInt iii = (UInt)iii_p; \
295 UChar* aaa = (UChar*)aaa_p; \
296 aaa[iii >> 3] |= (1 << (iii & 7)); \
297 } while (0)
298
299#define BITARR_CLEAR(aaa_p,iii_p) \
300 do { \
301 UInt iii = (UInt)iii_p; \
302 UChar* aaa = (UChar*)aaa_p; \
303 aaa[iii >> 3] &= ~(1 << (iii & 7)); \
304 } while (0)
305
306#define BITARR_TEST(aaa_p,iii_p) \
307 (0 != (((UChar*)aaa_p)[ ((UInt)iii_p) >> 3 ] \
308 & (1 << (((UInt)iii_p) & 7)))) \
309
310
311#define VGM_BIT_VALID 0
312#define VGM_BIT_INVALID 1
313
314#define VGM_NIBBLE_VALID 0
315#define VGM_NIBBLE_INVALID 0xF
316
317#define VGM_BYTE_VALID 0
318#define VGM_BYTE_INVALID 0xFF
319
320#define VGM_WORD_VALID 0
321#define VGM_WORD_INVALID 0xFFFFFFFF
322
323#define VGM_EFLAGS_VALID 0xFFFFFFFE
324#define VGM_EFLAGS_INVALID 0xFFFFFFFF /* not used */
325
326
327static void init_shadow_memory ( void )
328{
329 Int i;
330
331 for (i = 0; i < 8192; i++) /* Invalid address */
332 distinguished_secondary_map.abits[i] = VGM_BYTE_INVALID;
333 for (i = 0; i < 65536; i++) /* Invalid Value */
334 distinguished_secondary_map.vbyte[i] = VGM_BYTE_INVALID;
335
336 /* These entries gradually get overwritten as the used address
337 space expands. */
338 for (i = 0; i < 65536; i++)
339 primary_map[i] = &distinguished_secondary_map;
340
341 /* These ones should never change; it's a bug in Valgrind if they do. */
342 for (i = 65536; i < 262144; i++)
343 primary_map[i] = &distinguished_secondary_map;
344}
345
346void SK_(post_clo_init) ( void )
347{
348}
349
350void SK_(fini) ( void )
351{
352 VG_(print_malloc_stats)();
353
354 if (VG_(clo_verbosity) == 1) {
355 if (!SK_(clo_leak_check))
356 VG_(message)(Vg_UserMsg,
357 "For a detailed leak analysis, rerun with: --leak-check=yes");
358
359 VG_(message)(Vg_UserMsg,
360 "For counts of detected errors, rerun with: -v");
361 }
362 if (SK_(clo_leak_check)) SK_(detect_memory_leaks)();
363
364 done_prof_mem();
365
366 if (0) {
367 VG_(message)(Vg_DebugMsg,
368 "------ Valgrind's client block stats follow ---------------" );
369 SK_(show_client_block_stats)();
370 }
371}
372
373/*------------------------------------------------------------*/
374/*--- Basic bitmap management, reading and writing. ---*/
375/*------------------------------------------------------------*/
376
377/* Allocate and initialise a secondary map. */
378
379static SecMap* alloc_secondary_map ( __attribute__ ((unused))
380 Char* caller )
381{
382 SecMap* map;
383 UInt i;
384 PROF_EVENT(10);
385
386 /* Mark all bytes as invalid access and invalid value. */
387
388 /* It just happens that a SecMap occupies exactly 18 pages --
389 although this isn't important, so the following assert is
390 spurious. */
njne427a662002-10-02 11:08:25 +0000391 sk_assert(0 == (sizeof(SecMap) % VKI_BYTES_PER_PAGE));
njn25e49d8e72002-09-23 09:36:25 +0000392 map = VG_(get_memory_from_mmap)( sizeof(SecMap), caller );
393
394 for (i = 0; i < 8192; i++)
395 map->abits[i] = VGM_BYTE_INVALID; /* Invalid address */
396 for (i = 0; i < 65536; i++)
397 map->vbyte[i] = VGM_BYTE_INVALID; /* Invalid Value */
398
399 /* VG_(printf)("ALLOC_2MAP(%s)\n", caller ); */
400 return map;
401}
402
403
404/* Basic reading/writing of the bitmaps, for byte-sized accesses. */
405
406static __inline__ UChar get_abit ( Addr a )
407{
408 SecMap* sm = primary_map[a >> 16];
409 UInt sm_off = a & 0xFFFF;
410 PROF_EVENT(20);
411# if 0
412 if (IS_DISTINGUISHED_SM(sm))
413 VG_(message)(Vg_DebugMsg,
414 "accessed distinguished 2ndary (A)map! 0x%x\n", a);
415# endif
416 return BITARR_TEST(sm->abits, sm_off)
417 ? VGM_BIT_INVALID : VGM_BIT_VALID;
418}
419
420static __inline__ UChar get_vbyte ( Addr a )
421{
422 SecMap* sm = primary_map[a >> 16];
423 UInt sm_off = a & 0xFFFF;
424 PROF_EVENT(21);
425# if 0
426 if (IS_DISTINGUISHED_SM(sm))
427 VG_(message)(Vg_DebugMsg,
428 "accessed distinguished 2ndary (V)map! 0x%x\n", a);
429# endif
430 return sm->vbyte[sm_off];
431}
432
433static __inline__ void set_abit ( Addr a, UChar abit )
434{
435 SecMap* sm;
436 UInt sm_off;
437 PROF_EVENT(22);
438 ENSURE_MAPPABLE(a, "set_abit");
439 sm = primary_map[a >> 16];
440 sm_off = a & 0xFFFF;
441 if (abit)
442 BITARR_SET(sm->abits, sm_off);
443 else
444 BITARR_CLEAR(sm->abits, sm_off);
445}
446
447static __inline__ void set_vbyte ( Addr a, UChar vbyte )
448{
449 SecMap* sm;
450 UInt sm_off;
451 PROF_EVENT(23);
452 ENSURE_MAPPABLE(a, "set_vbyte");
453 sm = primary_map[a >> 16];
454 sm_off = a & 0xFFFF;
455 sm->vbyte[sm_off] = vbyte;
456}
457
458
459/* Reading/writing of the bitmaps, for aligned word-sized accesses. */
460
461static __inline__ UChar get_abits4_ALIGNED ( Addr a )
462{
463 SecMap* sm;
464 UInt sm_off;
465 UChar abits8;
466 PROF_EVENT(24);
467# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000468 sk_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000469# endif
470 sm = primary_map[a >> 16];
471 sm_off = a & 0xFFFF;
472 abits8 = sm->abits[sm_off >> 3];
473 abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
474 abits8 &= 0x0F;
475 return abits8;
476}
477
478static UInt __inline__ get_vbytes4_ALIGNED ( Addr a )
479{
480 SecMap* sm = primary_map[a >> 16];
481 UInt sm_off = a & 0xFFFF;
482 PROF_EVENT(25);
483# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000484 sk_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000485# endif
486 return ((UInt*)(sm->vbyte))[sm_off >> 2];
487}
488
489
490/*------------------------------------------------------------*/
491/*--- Setting permissions over address ranges. ---*/
492/*------------------------------------------------------------*/
493
494static void set_address_range_perms ( Addr a, UInt len,
495 UInt example_a_bit,
496 UInt example_v_bit )
497{
498 UChar vbyte, abyte8;
499 UInt vword4, sm_off;
500 SecMap* sm;
501
502 PROF_EVENT(30);
503
504 if (len == 0)
505 return;
506
507 if (len > 100 * 1000 * 1000) {
508 VG_(message)(Vg_UserMsg,
509 "Warning: set address range perms: "
510 "large range %u, a %d, v %d",
511 len, example_a_bit, example_v_bit );
512 }
513
514 VGP_PUSHCC(VgpSetMem);
515
516 /* Requests to change permissions of huge address ranges may
517 indicate bugs in our machinery. 30,000,000 is arbitrary, but so
518 far all legitimate requests have fallen beneath that size. */
519 /* 4 Mar 02: this is just stupid; get rid of it. */
njne427a662002-10-02 11:08:25 +0000520 /* sk_assert(len < 30000000); */
njn25e49d8e72002-09-23 09:36:25 +0000521
522 /* Check the permissions make sense. */
njne427a662002-10-02 11:08:25 +0000523 sk_assert(example_a_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000524 || example_a_bit == VGM_BIT_INVALID);
njne427a662002-10-02 11:08:25 +0000525 sk_assert(example_v_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000526 || example_v_bit == VGM_BIT_INVALID);
527 if (example_a_bit == VGM_BIT_INVALID)
njne427a662002-10-02 11:08:25 +0000528 sk_assert(example_v_bit == VGM_BIT_INVALID);
njn25e49d8e72002-09-23 09:36:25 +0000529
530 /* The validity bits to write. */
531 vbyte = example_v_bit==VGM_BIT_VALID
532 ? VGM_BYTE_VALID : VGM_BYTE_INVALID;
533
534 /* In order that we can charge through the address space at 8
535 bytes/main-loop iteration, make up some perms. */
536 abyte8 = (example_a_bit << 7)
537 | (example_a_bit << 6)
538 | (example_a_bit << 5)
539 | (example_a_bit << 4)
540 | (example_a_bit << 3)
541 | (example_a_bit << 2)
542 | (example_a_bit << 1)
543 | (example_a_bit << 0);
544 vword4 = (vbyte << 24) | (vbyte << 16) | (vbyte << 8) | vbyte;
545
546# ifdef VG_DEBUG_MEMORY
547 /* Do it ... */
548 while (True) {
549 PROF_EVENT(31);
550 if (len == 0) break;
551 set_abit ( a, example_a_bit );
552 set_vbyte ( a, vbyte );
553 a++;
554 len--;
555 }
556
557# else
558 /* Slowly do parts preceding 8-byte alignment. */
559 while (True) {
560 PROF_EVENT(31);
561 if (len == 0) break;
562 if ((a % 8) == 0) break;
563 set_abit ( a, example_a_bit );
564 set_vbyte ( a, vbyte );
565 a++;
566 len--;
567 }
568
569 if (len == 0) {
570 VGP_POPCC(VgpSetMem);
571 return;
572 }
njne427a662002-10-02 11:08:25 +0000573 sk_assert((a % 8) == 0 && len > 0);
njn25e49d8e72002-09-23 09:36:25 +0000574
575 /* Once aligned, go fast. */
576 while (True) {
577 PROF_EVENT(32);
578 if (len < 8) break;
579 ENSURE_MAPPABLE(a, "set_address_range_perms(fast)");
580 sm = primary_map[a >> 16];
581 sm_off = a & 0xFFFF;
582 sm->abits[sm_off >> 3] = abyte8;
583 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = vword4;
584 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = vword4;
585 a += 8;
586 len -= 8;
587 }
588
589 if (len == 0) {
590 VGP_POPCC(VgpSetMem);
591 return;
592 }
njne427a662002-10-02 11:08:25 +0000593 sk_assert((a % 8) == 0 && len > 0 && len < 8);
njn25e49d8e72002-09-23 09:36:25 +0000594
595 /* Finish the upper fragment. */
596 while (True) {
597 PROF_EVENT(33);
598 if (len == 0) break;
599 set_abit ( a, example_a_bit );
600 set_vbyte ( a, vbyte );
601 a++;
602 len--;
603 }
604# endif
605
606 /* Check that zero page and highest page have not been written to
607 -- this could happen with buggy syscall wrappers. Today
608 (2001-04-26) had precisely such a problem with __NR_setitimer. */
njne427a662002-10-02 11:08:25 +0000609 sk_assert(SK_(cheap_sanity_check)());
njn25e49d8e72002-09-23 09:36:25 +0000610 VGP_POPCC(VgpSetMem);
611}
612
613/* Set permissions for address ranges ... */
614
615void SK_(make_noaccess) ( Addr a, UInt len )
616{
617 PROF_EVENT(35);
618 DEBUG("SK_(make_noaccess)(%p, %x)\n", a, len);
619 set_address_range_perms ( a, len, VGM_BIT_INVALID, VGM_BIT_INVALID );
620}
621
622void SK_(make_writable) ( Addr a, UInt len )
623{
624 PROF_EVENT(36);
625 DEBUG("SK_(make_writable)(%p, %x)\n", a, len);
626 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_INVALID );
627}
628
629void SK_(make_readable) ( Addr a, UInt len )
630{
631 PROF_EVENT(37);
632 DEBUG("SK_(make_readable)(%p, 0x%x)\n", a, len);
633 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_VALID );
634}
635
636/* Block-copy permissions (needed for implementing realloc()). */
637
638static void copy_address_range_state ( Addr src, Addr dst, UInt len )
639{
640 UInt i;
641
642 DEBUG("copy_address_range_state\n");
643
644 PROF_EVENT(40);
645 for (i = 0; i < len; i++) {
646 UChar abit = get_abit ( src+i );
647 UChar vbyte = get_vbyte ( src+i );
648 PROF_EVENT(41);
649 set_abit ( dst+i, abit );
650 set_vbyte ( dst+i, vbyte );
651 }
652}
653
654
655/* Check permissions for address range. If inadequate permissions
656 exist, *bad_addr is set to the offending address, so the caller can
657 know what it is. */
658
659Bool SK_(check_writable) ( Addr a, UInt len, Addr* bad_addr )
660{
661 UInt i;
662 UChar abit;
663 PROF_EVENT(42);
664 for (i = 0; i < len; i++) {
665 PROF_EVENT(43);
666 abit = get_abit(a);
667 if (abit == VGM_BIT_INVALID) {
668 if (bad_addr != NULL) *bad_addr = a;
669 return False;
670 }
671 a++;
672 }
673 return True;
674}
675
676Bool SK_(check_readable) ( Addr a, UInt len, Addr* bad_addr )
677{
678 UInt i;
679 UChar abit;
680 UChar vbyte;
681
682 PROF_EVENT(44);
683 DEBUG("SK_(check_readable)\n");
684 for (i = 0; i < len; i++) {
685 abit = get_abit(a);
686 vbyte = get_vbyte(a);
687 PROF_EVENT(45);
688 if (abit != VGM_BIT_VALID || vbyte != VGM_BYTE_VALID) {
689 if (bad_addr != NULL) *bad_addr = a;
690 return False;
691 }
692 a++;
693 }
694 return True;
695}
696
697
698/* Check a zero-terminated ascii string. Tricky -- don't want to
699 examine the actual bytes, to find the end, until we're sure it is
700 safe to do so. */
701
702Bool SK_(check_readable_asciiz) ( Addr a, Addr* bad_addr )
703{
704 UChar abit;
705 UChar vbyte;
706 PROF_EVENT(46);
707 DEBUG("SK_(check_readable_asciiz)\n");
708 while (True) {
709 PROF_EVENT(47);
710 abit = get_abit(a);
711 vbyte = get_vbyte(a);
712 if (abit != VGM_BIT_VALID || vbyte != VGM_BYTE_VALID) {
713 if (bad_addr != NULL) *bad_addr = a;
714 return False;
715 }
716 /* Ok, a is safe to read. */
717 if (* ((UChar*)a) == 0) return True;
718 a++;
719 }
720}
721
722
723/*------------------------------------------------------------*/
724/*--- Memory event handlers ---*/
725/*------------------------------------------------------------*/
726
727/* Setting permissions for aligned words. This supports fast stack
728 operations. */
729
730static void make_noaccess_aligned ( Addr a, UInt len )
731{
732 SecMap* sm;
733 UInt sm_off;
734 UChar mask;
735 Addr a_past_end = a + len;
736
737 VGP_PUSHCC(VgpSetMem);
738
739 PROF_EVENT(50);
740# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000741 sk_assert(IS_ALIGNED4_ADDR(a));
742 sk_assert(IS_ALIGNED4_ADDR(len));
njn25e49d8e72002-09-23 09:36:25 +0000743# endif
744
745 for ( ; a < a_past_end; a += 4) {
746 ENSURE_MAPPABLE(a, "make_noaccess_aligned");
747 sm = primary_map[a >> 16];
748 sm_off = a & 0xFFFF;
749 ((UInt*)(sm->vbyte))[sm_off >> 2] = VGM_WORD_INVALID;
750 mask = 0x0F;
751 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
752 /* mask now contains 1s where we wish to make address bits
753 invalid (1s). */
754 sm->abits[sm_off >> 3] |= mask;
755 }
756 VGP_POPCC(VgpSetMem);
757}
758
759static void make_writable_aligned ( Addr a, UInt len )
760{
761 SecMap* sm;
762 UInt sm_off;
763 UChar mask;
764 Addr a_past_end = a + len;
765
766 VGP_PUSHCC(VgpSetMem);
767
768 PROF_EVENT(51);
769# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000770 sk_assert(IS_ALIGNED4_ADDR(a));
771 sk_assert(IS_ALIGNED4_ADDR(len));
njn25e49d8e72002-09-23 09:36:25 +0000772# endif
773
774 for ( ; a < a_past_end; a += 4) {
775 ENSURE_MAPPABLE(a, "make_writable_aligned");
776 sm = primary_map[a >> 16];
777 sm_off = a & 0xFFFF;
778 ((UInt*)(sm->vbyte))[sm_off >> 2] = VGM_WORD_INVALID;
779 mask = 0x0F;
780 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
781 /* mask now contains 1s where we wish to make address bits
782 invalid (0s). */
783 sm->abits[sm_off >> 3] &= ~mask;
784 }
785 VGP_POPCC(VgpSetMem);
786}
787
788
789static
790void check_is_writable ( CorePart part, ThreadState* tst,
791 Char* s, UInt base, UInt size )
792{
793 Bool ok;
794 Addr bad_addr;
795
796 VGP_PUSHCC(VgpCheckMem);
797
798 /* VG_(message)(Vg_DebugMsg,"check is writable: %x .. %x",
799 base,base+size-1); */
800 ok = SK_(check_writable) ( base, size, &bad_addr );
801 if (!ok) {
802 switch (part) {
803 case Vg_CoreSysCall:
804 SK_(record_param_error) ( tst, bad_addr, /*isWrite =*/True, s );
805 break;
806
807 case Vg_CorePThread:
808 case Vg_CoreSignal:
809 SK_(record_core_mem_error)( tst, /*isWrite=*/True, s );
810 break;
811
812 default:
njne427a662002-10-02 11:08:25 +0000813 VG_(skin_panic)("check_is_writable: Unknown or unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +0000814 }
815 }
816
817 VGP_POPCC(VgpCheckMem);
818}
819
820static
821void check_is_readable ( CorePart part, ThreadState* tst,
822 Char* s, UInt base, UInt size )
823{
824 Bool ok;
825 Addr bad_addr;
826
827 VGP_PUSHCC(VgpCheckMem);
828
829 /* VG_(message)(Vg_DebugMsg,"check is readable: %x .. %x",
830 base,base+size-1); */
831 ok = SK_(check_readable) ( base, size, &bad_addr );
832 if (!ok) {
833 switch (part) {
834 case Vg_CoreSysCall:
835 SK_(record_param_error) ( tst, bad_addr, /*isWrite =*/False, s );
836 break;
837
838 case Vg_CorePThread:
839 SK_(record_core_mem_error)( tst, /*isWrite=*/False, s );
840 break;
841
842 /* If we're being asked to jump to a silly address, record an error
843 message before potentially crashing the entire system. */
844 case Vg_CoreTranslate:
845 SK_(record_jump_error)( tst, bad_addr );
846 break;
847
848 default:
njne427a662002-10-02 11:08:25 +0000849 VG_(skin_panic)("check_is_readable: Unknown or unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +0000850 }
851 }
852 VGP_POPCC(VgpCheckMem);
853}
854
855static
856void check_is_readable_asciiz ( CorePart part, ThreadState* tst,
857 Char* s, UInt str )
858{
859 Bool ok = True;
860 Addr bad_addr;
861 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
862
863 VGP_PUSHCC(VgpCheckMem);
864
njne427a662002-10-02 11:08:25 +0000865 sk_assert(part == Vg_CoreSysCall);
njn25e49d8e72002-09-23 09:36:25 +0000866 ok = SK_(check_readable_asciiz) ( (Addr)str, &bad_addr );
867 if (!ok) {
868 SK_(record_param_error) ( tst, bad_addr, /*is_writable =*/False, s );
869 }
870
871 VGP_POPCC(VgpCheckMem);
872}
873
874
875static
876void memcheck_new_mem_startup( Addr a, UInt len, Bool rr, Bool ww, Bool xx )
877{
njn1f3a9092002-10-04 09:22:30 +0000878 /* Ignore the permissions, just make it readable. Seems to work... */
njn25e49d8e72002-09-23 09:36:25 +0000879 DEBUG("new_mem_startup(%p, %u, rr=%u, ww=%u, xx=%u)\n", a,len,rr,ww,xx);
880 SK_(make_readable)(a, len);
881}
882
883static
884void memcheck_new_mem_heap ( Addr a, UInt len, Bool is_inited )
885{
886 if (is_inited) {
887 SK_(make_readable)(a, len);
888 } else {
889 SK_(make_writable)(a, len);
890 }
891}
892
893static
894void memcheck_set_perms (Addr a, UInt len,
895 Bool nn, Bool rr, Bool ww, Bool xx)
896{
897 DEBUG("memcheck_set_perms(%p, %u, nn=%u, rr=%u ww=%u, xx=%u)\n",
898 a, len, nn, rr, ww, xx);
899 if (rr) SK_(make_readable)(a, len);
900 else if (ww) SK_(make_writable)(a, len);
901 else SK_(make_noaccess)(a, len);
902}
903
904
905/*------------------------------------------------------------*/
906/*--- Functions called directly from generated code. ---*/
907/*------------------------------------------------------------*/
908
909static __inline__ UInt rotateRight16 ( UInt x )
910{
911 /* Amazingly, gcc turns this into a single rotate insn. */
912 return (x >> 16) | (x << 16);
913}
914
915
916static __inline__ UInt shiftRight16 ( UInt x )
917{
918 return x >> 16;
919}
920
921
922/* Read/write 1/2/4 sized V bytes, and emit an address error if
923 needed. */
924
925/* VG_(helperc_{LD,ST}V{1,2,4}) handle the common case fast.
926 Under all other circumstances, it defers to the relevant _SLOWLY
927 function, which can handle all situations.
928*/
929__attribute__ ((regparm(1)))
930UInt SK_(helperc_LOADV4) ( Addr a )
931{
932# ifdef VG_DEBUG_MEMORY
933 return vgmext_rd_V4_SLOWLY(a);
934# else
935 UInt sec_no = rotateRight16(a) & 0x3FFFF;
936 SecMap* sm = primary_map[sec_no];
937 UInt a_off = (a & 0xFFFF) >> 3;
938 UChar abits = sm->abits[a_off];
939 abits >>= (a & 4);
940 abits &= 15;
941 PROF_EVENT(60);
942 if (abits == VGM_NIBBLE_VALID) {
943 /* Handle common case quickly: a is suitably aligned, is mapped,
944 and is addressible. */
945 UInt v_off = a & 0xFFFF;
946 return ((UInt*)(sm->vbyte))[ v_off >> 2 ];
947 } else {
948 /* Slow but general case. */
949 return vgmext_rd_V4_SLOWLY(a);
950 }
951# endif
952}
953
954__attribute__ ((regparm(2)))
955void SK_(helperc_STOREV4) ( Addr a, UInt vbytes )
956{
957# ifdef VG_DEBUG_MEMORY
958 vgmext_wr_V4_SLOWLY(a, vbytes);
959# else
960 UInt sec_no = rotateRight16(a) & 0x3FFFF;
961 SecMap* sm = primary_map[sec_no];
962 UInt a_off = (a & 0xFFFF) >> 3;
963 UChar abits = sm->abits[a_off];
964 abits >>= (a & 4);
965 abits &= 15;
966 PROF_EVENT(61);
967 if (abits == VGM_NIBBLE_VALID) {
968 /* Handle common case quickly: a is suitably aligned, is mapped,
969 and is addressible. */
970 UInt v_off = a & 0xFFFF;
971 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = vbytes;
972 } else {
973 /* Slow but general case. */
974 vgmext_wr_V4_SLOWLY(a, vbytes);
975 }
976# endif
977}
978
979__attribute__ ((regparm(1)))
980UInt SK_(helperc_LOADV2) ( Addr a )
981{
982# ifdef VG_DEBUG_MEMORY
983 return vgmext_rd_V2_SLOWLY(a);
984# else
985 UInt sec_no = rotateRight16(a) & 0x1FFFF;
986 SecMap* sm = primary_map[sec_no];
987 UInt a_off = (a & 0xFFFF) >> 3;
988 PROF_EVENT(62);
989 if (sm->abits[a_off] == VGM_BYTE_VALID) {
990 /* Handle common case quickly. */
991 UInt v_off = a & 0xFFFF;
992 return 0xFFFF0000
993 |
994 (UInt)( ((UShort*)(sm->vbyte))[ v_off >> 1 ] );
995 } else {
996 /* Slow but general case. */
997 return vgmext_rd_V2_SLOWLY(a);
998 }
999# endif
1000}
1001
1002__attribute__ ((regparm(2)))
1003void SK_(helperc_STOREV2) ( Addr a, UInt vbytes )
1004{
1005# ifdef VG_DEBUG_MEMORY
1006 vgmext_wr_V2_SLOWLY(a, vbytes);
1007# else
1008 UInt sec_no = rotateRight16(a) & 0x1FFFF;
1009 SecMap* sm = primary_map[sec_no];
1010 UInt a_off = (a & 0xFFFF) >> 3;
1011 PROF_EVENT(63);
1012 if (sm->abits[a_off] == VGM_BYTE_VALID) {
1013 /* Handle common case quickly. */
1014 UInt v_off = a & 0xFFFF;
1015 ((UShort*)(sm->vbyte))[ v_off >> 1 ] = vbytes & 0x0000FFFF;
1016 } else {
1017 /* Slow but general case. */
1018 vgmext_wr_V2_SLOWLY(a, vbytes);
1019 }
1020# endif
1021}
1022
1023__attribute__ ((regparm(1)))
1024UInt SK_(helperc_LOADV1) ( Addr a )
1025{
1026# ifdef VG_DEBUG_MEMORY
1027 return vgmext_rd_V1_SLOWLY(a);
1028# else
1029 UInt sec_no = shiftRight16(a);
1030 SecMap* sm = primary_map[sec_no];
1031 UInt a_off = (a & 0xFFFF) >> 3;
1032 PROF_EVENT(64);
1033 if (sm->abits[a_off] == VGM_BYTE_VALID) {
1034 /* Handle common case quickly. */
1035 UInt v_off = a & 0xFFFF;
1036 return 0xFFFFFF00
1037 |
1038 (UInt)( ((UChar*)(sm->vbyte))[ v_off ] );
1039 } else {
1040 /* Slow but general case. */
1041 return vgmext_rd_V1_SLOWLY(a);
1042 }
1043# endif
1044}
1045
1046__attribute__ ((regparm(2)))
1047void SK_(helperc_STOREV1) ( Addr a, UInt vbytes )
1048{
1049# ifdef VG_DEBUG_MEMORY
1050 vgmext_wr_V1_SLOWLY(a, vbytes);
1051# else
1052 UInt sec_no = shiftRight16(a);
1053 SecMap* sm = primary_map[sec_no];
1054 UInt a_off = (a & 0xFFFF) >> 3;
1055 PROF_EVENT(65);
1056 if (sm->abits[a_off] == VGM_BYTE_VALID) {
1057 /* Handle common case quickly. */
1058 UInt v_off = a & 0xFFFF;
1059 ((UChar*)(sm->vbyte))[ v_off ] = vbytes & 0x000000FF;
1060 } else {
1061 /* Slow but general case. */
1062 vgmext_wr_V1_SLOWLY(a, vbytes);
1063 }
1064# endif
1065}
1066
1067
1068/*------------------------------------------------------------*/
1069/*--- Fallback functions to handle cases that the above ---*/
1070/*--- VG_(helperc_{LD,ST}V{1,2,4}) can't manage. ---*/
1071/*------------------------------------------------------------*/
1072
1073static UInt vgmext_rd_V4_SLOWLY ( Addr a )
1074{
1075 Bool a0ok, a1ok, a2ok, a3ok;
1076 UInt vb0, vb1, vb2, vb3;
1077
1078 PROF_EVENT(70);
1079
1080 /* First establish independently the addressibility of the 4 bytes
1081 involved. */
1082 a0ok = get_abit(a+0) == VGM_BIT_VALID;
1083 a1ok = get_abit(a+1) == VGM_BIT_VALID;
1084 a2ok = get_abit(a+2) == VGM_BIT_VALID;
1085 a3ok = get_abit(a+3) == VGM_BIT_VALID;
1086
1087 /* Also get the validity bytes for the address. */
1088 vb0 = (UInt)get_vbyte(a+0);
1089 vb1 = (UInt)get_vbyte(a+1);
1090 vb2 = (UInt)get_vbyte(a+2);
1091 vb3 = (UInt)get_vbyte(a+3);
1092
1093 /* Now distinguish 3 cases */
1094
1095 /* Case 1: the address is completely valid, so:
1096 - no addressing error
1097 - return V bytes as read from memory
1098 */
1099 if (a0ok && a1ok && a2ok && a3ok) {
1100 UInt vw = VGM_WORD_INVALID;
1101 vw <<= 8; vw |= vb3;
1102 vw <<= 8; vw |= vb2;
1103 vw <<= 8; vw |= vb1;
1104 vw <<= 8; vw |= vb0;
1105 return vw;
1106 }
1107
1108 /* Case 2: the address is completely invalid.
1109 - emit addressing error
1110 - return V word indicating validity.
1111 This sounds strange, but if we make loads from invalid addresses
1112 give invalid data, we also risk producing a number of confusing
1113 undefined-value errors later, which confuses the fact that the
1114 error arose in the first place from an invalid address.
1115 */
1116 /* VG_(printf)("%p (%d %d %d %d)\n", a, a0ok, a1ok, a2ok, a3ok); */
1117 if (!SK_(clo_partial_loads_ok)
1118 || ((a & 3) != 0)
1119 || (!a0ok && !a1ok && !a2ok && !a3ok)) {
1120 SK_(record_address_error)( a, 4, False );
1121 return (VGM_BYTE_VALID << 24) | (VGM_BYTE_VALID << 16)
1122 | (VGM_BYTE_VALID << 8) | VGM_BYTE_VALID;
1123 }
1124
1125 /* Case 3: the address is partially valid.
1126 - no addressing error
1127 - returned V word is invalid where the address is invalid,
1128 and contains V bytes from memory otherwise.
1129 Case 3 is only allowed if SK_(clo_partial_loads_ok) is True
1130 (which is the default), and the address is 4-aligned.
1131 If not, Case 2 will have applied.
1132 */
njne427a662002-10-02 11:08:25 +00001133 sk_assert(SK_(clo_partial_loads_ok));
njn25e49d8e72002-09-23 09:36:25 +00001134 {
1135 UInt vw = VGM_WORD_INVALID;
1136 vw <<= 8; vw |= (a3ok ? vb3 : VGM_BYTE_INVALID);
1137 vw <<= 8; vw |= (a2ok ? vb2 : VGM_BYTE_INVALID);
1138 vw <<= 8; vw |= (a1ok ? vb1 : VGM_BYTE_INVALID);
1139 vw <<= 8; vw |= (a0ok ? vb0 : VGM_BYTE_INVALID);
1140 return vw;
1141 }
1142}
1143
1144static void vgmext_wr_V4_SLOWLY ( Addr a, UInt vbytes )
1145{
1146 /* Check the address for validity. */
1147 Bool aerr = False;
1148 PROF_EVENT(71);
1149
1150 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1151 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1152 if (get_abit(a+2) != VGM_BIT_VALID) aerr = True;
1153 if (get_abit(a+3) != VGM_BIT_VALID) aerr = True;
1154
1155 /* Store the V bytes, remembering to do it little-endian-ly. */
1156 set_vbyte( a+0, vbytes & 0x000000FF ); vbytes >>= 8;
1157 set_vbyte( a+1, vbytes & 0x000000FF ); vbytes >>= 8;
1158 set_vbyte( a+2, vbytes & 0x000000FF ); vbytes >>= 8;
1159 set_vbyte( a+3, vbytes & 0x000000FF );
1160
1161 /* If an address error has happened, report it. */
1162 if (aerr)
1163 SK_(record_address_error)( a, 4, True );
1164}
1165
1166static UInt vgmext_rd_V2_SLOWLY ( Addr a )
1167{
1168 /* Check the address for validity. */
1169 UInt vw = VGM_WORD_INVALID;
1170 Bool aerr = False;
1171 PROF_EVENT(72);
1172
1173 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1174 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1175
1176 /* Fetch the V bytes, remembering to do it little-endian-ly. */
1177 vw <<= 8; vw |= (UInt)get_vbyte(a+1);
1178 vw <<= 8; vw |= (UInt)get_vbyte(a+0);
1179
1180 /* If an address error has happened, report it. */
1181 if (aerr) {
1182 SK_(record_address_error)( a, 2, False );
1183 vw = (VGM_BYTE_INVALID << 24) | (VGM_BYTE_INVALID << 16)
1184 | (VGM_BYTE_VALID << 8) | (VGM_BYTE_VALID);
1185 }
1186 return vw;
1187}
1188
1189static void vgmext_wr_V2_SLOWLY ( Addr a, UInt vbytes )
1190{
1191 /* Check the address for validity. */
1192 Bool aerr = False;
1193 PROF_EVENT(73);
1194
1195 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1196 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1197
1198 /* Store the V bytes, remembering to do it little-endian-ly. */
1199 set_vbyte( a+0, vbytes & 0x000000FF ); vbytes >>= 8;
1200 set_vbyte( a+1, vbytes & 0x000000FF );
1201
1202 /* If an address error has happened, report it. */
1203 if (aerr)
1204 SK_(record_address_error)( a, 2, True );
1205}
1206
1207static UInt vgmext_rd_V1_SLOWLY ( Addr a )
1208{
1209 /* Check the address for validity. */
1210 UInt vw = VGM_WORD_INVALID;
1211 Bool aerr = False;
1212 PROF_EVENT(74);
1213
1214 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1215
1216 /* Fetch the V byte. */
1217 vw <<= 8; vw |= (UInt)get_vbyte(a+0);
1218
1219 /* If an address error has happened, report it. */
1220 if (aerr) {
1221 SK_(record_address_error)( a, 1, False );
1222 vw = (VGM_BYTE_INVALID << 24) | (VGM_BYTE_INVALID << 16)
1223 | (VGM_BYTE_INVALID << 8) | (VGM_BYTE_VALID);
1224 }
1225 return vw;
1226}
1227
1228static void vgmext_wr_V1_SLOWLY ( Addr a, UInt vbytes )
1229{
1230 /* Check the address for validity. */
1231 Bool aerr = False;
1232 PROF_EVENT(75);
1233 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1234
1235 /* Store the V bytes, remembering to do it little-endian-ly. */
1236 set_vbyte( a+0, vbytes & 0x000000FF );
1237
1238 /* If an address error has happened, report it. */
1239 if (aerr)
1240 SK_(record_address_error)( a, 1, True );
1241}
1242
1243
1244/* ---------------------------------------------------------------------
1245 Called from generated code, or from the assembly helpers.
1246 Handlers for value check failures.
1247 ------------------------------------------------------------------ */
1248
1249void SK_(helperc_value_check0_fail) ( void )
1250{
1251 SK_(record_value_error) ( 0 );
1252}
1253
1254void SK_(helperc_value_check1_fail) ( void )
1255{
1256 SK_(record_value_error) ( 1 );
1257}
1258
1259void SK_(helperc_value_check2_fail) ( void )
1260{
1261 SK_(record_value_error) ( 2 );
1262}
1263
1264void SK_(helperc_value_check4_fail) ( void )
1265{
1266 SK_(record_value_error) ( 4 );
1267}
1268
1269
1270/* ---------------------------------------------------------------------
1271 FPU load and store checks, called from generated code.
1272 ------------------------------------------------------------------ */
1273
1274__attribute__ ((regparm(2)))
1275void SK_(fpu_read_check) ( Addr addr, Int size )
1276{
1277 /* Ensure the read area is both addressible and valid (ie,
1278 readable). If there's an address error, don't report a value
1279 error too; but if there isn't an address error, check for a
1280 value error.
1281
1282 Try to be reasonably fast on the common case; wimp out and defer
1283 to fpu_read_check_SLOWLY for everything else. */
1284
1285 SecMap* sm;
1286 UInt sm_off, v_off, a_off;
1287 Addr addr4;
1288
1289 PROF_EVENT(80);
1290
1291# ifdef VG_DEBUG_MEMORY
1292 fpu_read_check_SLOWLY ( addr, size );
1293# else
1294
1295 if (size == 4) {
1296 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
1297 PROF_EVENT(81);
1298 /* Properly aligned. */
1299 sm = primary_map[addr >> 16];
1300 sm_off = addr & 0xFFFF;
1301 a_off = sm_off >> 3;
1302 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
1303 /* Properly aligned and addressible. */
1304 v_off = addr & 0xFFFF;
1305 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1306 goto slow4;
1307 /* Properly aligned, addressible and with valid data. */
1308 return;
1309 slow4:
1310 fpu_read_check_SLOWLY ( addr, 4 );
1311 return;
1312 }
1313
1314 if (size == 8) {
1315 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
1316 PROF_EVENT(82);
1317 /* Properly aligned. Do it in two halves. */
1318 addr4 = addr + 4;
1319 /* First half. */
1320 sm = primary_map[addr >> 16];
1321 sm_off = addr & 0xFFFF;
1322 a_off = sm_off >> 3;
1323 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1324 /* First half properly aligned and addressible. */
1325 v_off = addr & 0xFFFF;
1326 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1327 goto slow8;
1328 /* Second half. */
1329 sm = primary_map[addr4 >> 16];
1330 sm_off = addr4 & 0xFFFF;
1331 a_off = sm_off >> 3;
1332 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1333 /* Second half properly aligned and addressible. */
1334 v_off = addr4 & 0xFFFF;
1335 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1336 goto slow8;
1337 /* Both halves properly aligned, addressible and with valid
1338 data. */
1339 return;
1340 slow8:
1341 fpu_read_check_SLOWLY ( addr, 8 );
1342 return;
1343 }
1344
1345 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
1346 cases go quickly. */
1347 if (size == 2) {
1348 PROF_EVENT(83);
1349 fpu_read_check_SLOWLY ( addr, 2 );
1350 return;
1351 }
1352
1353 if (size == 10) {
1354 PROF_EVENT(84);
1355 fpu_read_check_SLOWLY ( addr, 10 );
1356 return;
1357 }
1358
1359 if (size == 28 || size == 108) {
1360 PROF_EVENT(84); /* XXX assign correct event number */
sewardjb3243352002-09-27 01:11:36 +00001361 fpu_read_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001362 return;
1363 }
1364
1365 VG_(printf)("size is %d\n", size);
njne427a662002-10-02 11:08:25 +00001366 VG_(skin_panic)("vgmext_fpu_read_check: unhandled size");
njn25e49d8e72002-09-23 09:36:25 +00001367# endif
1368}
1369
1370
1371__attribute__ ((regparm(2)))
1372void SK_(fpu_write_check) ( Addr addr, Int size )
1373{
1374 /* Ensure the written area is addressible, and moan if otherwise.
1375 If it is addressible, make it valid, otherwise invalid.
1376 */
1377
1378 SecMap* sm;
1379 UInt sm_off, v_off, a_off;
1380 Addr addr4;
1381
1382 PROF_EVENT(85);
1383
1384# ifdef VG_DEBUG_MEMORY
1385 fpu_write_check_SLOWLY ( addr, size );
1386# else
1387
1388 if (size == 4) {
1389 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
1390 PROF_EVENT(86);
1391 /* Properly aligned. */
1392 sm = primary_map[addr >> 16];
1393 sm_off = addr & 0xFFFF;
1394 a_off = sm_off >> 3;
1395 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
1396 /* Properly aligned and addressible. Make valid. */
1397 v_off = addr & 0xFFFF;
1398 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1399 return;
1400 slow4:
1401 fpu_write_check_SLOWLY ( addr, 4 );
1402 return;
1403 }
1404
1405 if (size == 8) {
1406 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
1407 PROF_EVENT(87);
1408 /* Properly aligned. Do it in two halves. */
1409 addr4 = addr + 4;
1410 /* First half. */
1411 sm = primary_map[addr >> 16];
1412 sm_off = addr & 0xFFFF;
1413 a_off = sm_off >> 3;
1414 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1415 /* First half properly aligned and addressible. Make valid. */
1416 v_off = addr & 0xFFFF;
1417 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1418 /* Second half. */
1419 sm = primary_map[addr4 >> 16];
1420 sm_off = addr4 & 0xFFFF;
1421 a_off = sm_off >> 3;
1422 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1423 /* Second half properly aligned and addressible. */
1424 v_off = addr4 & 0xFFFF;
1425 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1426 /* Properly aligned, addressible and with valid data. */
1427 return;
1428 slow8:
1429 fpu_write_check_SLOWLY ( addr, 8 );
1430 return;
1431 }
1432
1433 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
1434 cases go quickly. */
1435 if (size == 2) {
1436 PROF_EVENT(88);
1437 fpu_write_check_SLOWLY ( addr, 2 );
1438 return;
1439 }
1440
1441 if (size == 10) {
1442 PROF_EVENT(89);
1443 fpu_write_check_SLOWLY ( addr, 10 );
1444 return;
1445 }
1446
1447 if (size == 28 || size == 108) {
1448 PROF_EVENT(89); /* XXX assign correct event number */
sewardjb3243352002-09-27 01:11:36 +00001449 fpu_write_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001450 return;
1451 }
1452
1453 VG_(printf)("size is %d\n", size);
njne427a662002-10-02 11:08:25 +00001454 VG_(skin_panic)("vgmext_fpu_write_check: unhandled size");
njn25e49d8e72002-09-23 09:36:25 +00001455# endif
1456}
1457
1458
1459/* ---------------------------------------------------------------------
1460 Slow, general cases for FPU load and store checks.
1461 ------------------------------------------------------------------ */
1462
1463/* Generic version. Test for both addr and value errors, but if
1464 there's an addr error, don't report a value error even if it
1465 exists. */
1466
1467void fpu_read_check_SLOWLY ( Addr addr, Int size )
1468{
1469 Int i;
1470 Bool aerr = False;
1471 Bool verr = False;
1472 PROF_EVENT(90);
1473 for (i = 0; i < size; i++) {
1474 PROF_EVENT(91);
1475 if (get_abit(addr+i) != VGM_BIT_VALID)
1476 aerr = True;
1477 if (get_vbyte(addr+i) != VGM_BYTE_VALID)
1478 verr = True;
1479 }
1480
1481 if (aerr) {
1482 SK_(record_address_error)( addr, size, False );
1483 } else {
1484 if (verr)
1485 SK_(record_value_error)( size );
1486 }
1487}
1488
1489
1490/* Generic version. Test for addr errors. Valid addresses are
1491 given valid values, and invalid addresses invalid values. */
1492
1493void fpu_write_check_SLOWLY ( Addr addr, Int size )
1494{
1495 Int i;
1496 Addr a_here;
1497 Bool a_ok;
1498 Bool aerr = False;
1499 PROF_EVENT(92);
1500 for (i = 0; i < size; i++) {
1501 PROF_EVENT(93);
1502 a_here = addr+i;
1503 a_ok = get_abit(a_here) == VGM_BIT_VALID;
1504 if (a_ok) {
1505 set_vbyte(a_here, VGM_BYTE_VALID);
1506 } else {
1507 set_vbyte(a_here, VGM_BYTE_INVALID);
1508 aerr = True;
1509 }
1510 }
1511 if (aerr) {
1512 SK_(record_address_error)( addr, size, True );
1513 }
1514}
1515
1516/*------------------------------------------------------------*/
1517/*--- Shadow chunks info ---*/
1518/*------------------------------------------------------------*/
1519
1520static __inline__
1521void set_where( ShadowChunk* sc, ExeContext* ec )
1522{
1523 sc->skin_extra[0] = (UInt)ec;
1524}
1525
1526static __inline__
1527ExeContext *get_where( ShadowChunk* sc )
1528{
1529 return (ExeContext*)sc->skin_extra[0];
1530}
1531
1532void SK_(complete_shadow_chunk) ( ShadowChunk* sc, ThreadState* tst )
1533{
1534 set_where( sc, VG_(get_ExeContext) ( tst ) );
1535}
1536
1537/*------------------------------------------------------------*/
1538/*--- Postponing free()ing ---*/
1539/*------------------------------------------------------------*/
1540
1541/* Holds blocks after freeing. */
1542static ShadowChunk* vg_freed_list_start = NULL;
1543static ShadowChunk* vg_freed_list_end = NULL;
1544static Int vg_freed_list_volume = 0;
1545
1546static __attribute__ ((unused))
1547 Int count_freelist ( void )
1548{
1549 ShadowChunk* sc;
1550 Int n = 0;
1551 for (sc = vg_freed_list_start; sc != NULL; sc = sc->next)
1552 n++;
1553 return n;
1554}
1555
1556static __attribute__ ((unused))
1557 void freelist_sanity ( void )
1558{
1559 ShadowChunk* sc;
1560 Int n = 0;
1561 /* VG_(printf)("freelist sanity\n"); */
1562 for (sc = vg_freed_list_start; sc != NULL; sc = sc->next)
1563 n += sc->size;
njne427a662002-10-02 11:08:25 +00001564 sk_assert(n == vg_freed_list_volume);
njn25e49d8e72002-09-23 09:36:25 +00001565}
1566
1567/* Put a shadow chunk on the freed blocks queue, possibly freeing up
1568 some of the oldest blocks in the queue at the same time. */
1569static void add_to_freed_queue ( ShadowChunk* sc )
1570{
1571 ShadowChunk* sc1;
1572
1573 /* Put it at the end of the freed list */
1574 if (vg_freed_list_end == NULL) {
njne427a662002-10-02 11:08:25 +00001575 sk_assert(vg_freed_list_start == NULL);
njn25e49d8e72002-09-23 09:36:25 +00001576 vg_freed_list_end = vg_freed_list_start = sc;
1577 vg_freed_list_volume = sc->size;
1578 } else {
njne427a662002-10-02 11:08:25 +00001579 sk_assert(vg_freed_list_end->next == NULL);
njn25e49d8e72002-09-23 09:36:25 +00001580 vg_freed_list_end->next = sc;
1581 vg_freed_list_end = sc;
1582 vg_freed_list_volume += sc->size;
1583 }
1584 sc->next = NULL;
1585
1586 /* Release enough of the oldest blocks to bring the free queue
1587 volume below vg_clo_freelist_vol. */
1588
1589 while (vg_freed_list_volume > SK_(clo_freelist_vol)) {
1590 /* freelist_sanity(); */
njne427a662002-10-02 11:08:25 +00001591 sk_assert(vg_freed_list_start != NULL);
1592 sk_assert(vg_freed_list_end != NULL);
njn25e49d8e72002-09-23 09:36:25 +00001593
1594 sc1 = vg_freed_list_start;
1595 vg_freed_list_volume -= sc1->size;
1596 /* VG_(printf)("volume now %d\n", vg_freed_list_volume); */
njne427a662002-10-02 11:08:25 +00001597 sk_assert(vg_freed_list_volume >= 0);
njn25e49d8e72002-09-23 09:36:25 +00001598
1599 if (vg_freed_list_start == vg_freed_list_end) {
1600 vg_freed_list_start = vg_freed_list_end = NULL;
1601 } else {
1602 vg_freed_list_start = sc1->next;
1603 }
1604 sc1->next = NULL; /* just paranoia */
njn4ba5a792002-09-30 10:23:54 +00001605 VG_(free_ShadowChunk) ( sc1 );
njn25e49d8e72002-09-23 09:36:25 +00001606 }
1607}
1608
1609/* Return the first shadow chunk satisfying the predicate p. */
1610ShadowChunk* SK_(any_matching_freed_ShadowChunks)
1611 ( Bool (*p) ( ShadowChunk* ))
1612{
1613 ShadowChunk* sc;
1614
1615 /* No point looking through freed blocks if we're not keeping
1616 them around for a while... */
1617 for (sc = vg_freed_list_start; sc != NULL; sc = sc->next)
1618 if (p(sc))
1619 return sc;
1620
1621 return NULL;
1622}
1623
1624void SK_(alt_free) ( ShadowChunk* sc, ThreadState* tst )
1625{
1626 /* Record where freed */
1627 set_where( sc, VG_(get_ExeContext) ( tst ) );
1628
1629 /* Put it out of harm's way for a while. */
1630 add_to_freed_queue ( sc );
1631}
1632
1633/*------------------------------------------------------------*/
1634/*--- Low-level address-space scanning, for the leak ---*/
1635/*--- detector. ---*/
1636/*------------------------------------------------------------*/
1637
1638static
1639jmp_buf memscan_jmpbuf;
1640
1641static
1642void vg_scan_all_valid_memory_sighandler ( Int sigNo )
1643{
1644 __builtin_longjmp(memscan_jmpbuf, 1);
1645}
1646
1647/* Safely (avoiding SIGSEGV / SIGBUS) scan the entire valid address
1648 space and pass the addresses and values of all addressible,
1649 defined, aligned words to notify_word. This is the basis for the
1650 leak detector. Returns the number of calls made to notify_word. */
1651UInt VG_(scan_all_valid_memory) ( void (*notify_word)( Addr, UInt ) )
1652{
1653 /* All volatile, because some gccs seem paranoid about longjmp(). */
1654 volatile UInt res, numPages, page, vbytes, primaryMapNo, nWordsNotified;
1655 volatile Addr pageBase, addr;
1656 volatile SecMap* sm;
1657 volatile UChar abits;
1658 volatile UInt page_first_word;
1659
1660 vki_ksigaction sigbus_saved;
1661 vki_ksigaction sigbus_new;
1662 vki_ksigaction sigsegv_saved;
1663 vki_ksigaction sigsegv_new;
1664 vki_ksigset_t blockmask_saved;
1665 vki_ksigset_t unblockmask_new;
1666
1667 /* Temporarily install a new sigsegv and sigbus handler, and make
1668 sure SIGBUS, SIGSEGV and SIGTERM are unblocked. (Perhaps the
1669 first two can never be blocked anyway?) */
1670
1671 sigbus_new.ksa_handler = vg_scan_all_valid_memory_sighandler;
1672 sigbus_new.ksa_flags = VKI_SA_ONSTACK | VKI_SA_RESTART;
1673 sigbus_new.ksa_restorer = NULL;
1674 res = VG_(ksigemptyset)( &sigbus_new.ksa_mask );
njne427a662002-10-02 11:08:25 +00001675 sk_assert(res == 0);
njn25e49d8e72002-09-23 09:36:25 +00001676
1677 sigsegv_new.ksa_handler = vg_scan_all_valid_memory_sighandler;
1678 sigsegv_new.ksa_flags = VKI_SA_ONSTACK | VKI_SA_RESTART;
1679 sigsegv_new.ksa_restorer = NULL;
1680 res = VG_(ksigemptyset)( &sigsegv_new.ksa_mask );
njne427a662002-10-02 11:08:25 +00001681 sk_assert(res == 0+0);
njn25e49d8e72002-09-23 09:36:25 +00001682
1683 res = VG_(ksigemptyset)( &unblockmask_new );
1684 res |= VG_(ksigaddset)( &unblockmask_new, VKI_SIGBUS );
1685 res |= VG_(ksigaddset)( &unblockmask_new, VKI_SIGSEGV );
1686 res |= VG_(ksigaddset)( &unblockmask_new, VKI_SIGTERM );
njne427a662002-10-02 11:08:25 +00001687 sk_assert(res == 0+0+0);
njn25e49d8e72002-09-23 09:36:25 +00001688
1689 res = VG_(ksigaction)( VKI_SIGBUS, &sigbus_new, &sigbus_saved );
njne427a662002-10-02 11:08:25 +00001690 sk_assert(res == 0+0+0+0);
njn25e49d8e72002-09-23 09:36:25 +00001691
1692 res = VG_(ksigaction)( VKI_SIGSEGV, &sigsegv_new, &sigsegv_saved );
njne427a662002-10-02 11:08:25 +00001693 sk_assert(res == 0+0+0+0+0);
njn25e49d8e72002-09-23 09:36:25 +00001694
1695 res = VG_(ksigprocmask)( VKI_SIG_UNBLOCK, &unblockmask_new, &blockmask_saved );
njne427a662002-10-02 11:08:25 +00001696 sk_assert(res == 0+0+0+0+0+0);
njn25e49d8e72002-09-23 09:36:25 +00001697
1698 /* The signal handlers are installed. Actually do the memory scan. */
1699 numPages = 1 << (32-VKI_BYTES_PER_PAGE_BITS);
njne427a662002-10-02 11:08:25 +00001700 sk_assert(numPages == 1048576);
1701 sk_assert(4096 == (1 << VKI_BYTES_PER_PAGE_BITS));
njn25e49d8e72002-09-23 09:36:25 +00001702
1703 nWordsNotified = 0;
1704
1705 for (page = 0; page < numPages; page++) {
1706 pageBase = page << VKI_BYTES_PER_PAGE_BITS;
1707 primaryMapNo = pageBase >> 16;
1708 sm = primary_map[primaryMapNo];
1709 if (IS_DISTINGUISHED_SM(sm)) continue;
1710 if (__builtin_setjmp(memscan_jmpbuf) == 0) {
1711 /* try this ... */
1712 page_first_word = * (volatile UInt*)pageBase;
1713 /* we get here if we didn't get a fault */
1714 /* Scan the page */
1715 for (addr = pageBase; addr < pageBase+VKI_BYTES_PER_PAGE; addr += 4) {
1716 abits = get_abits4_ALIGNED(addr);
1717 vbytes = get_vbytes4_ALIGNED(addr);
1718 if (abits == VGM_NIBBLE_VALID
1719 && vbytes == VGM_WORD_VALID) {
1720 nWordsNotified++;
1721 notify_word ( addr, *(UInt*)addr );
1722 }
1723 }
1724 } else {
1725 /* We get here if reading the first word of the page caused a
1726 fault, which in turn caused the signal handler to longjmp.
1727 Ignore this page. */
1728 if (0)
1729 VG_(printf)(
1730 "vg_scan_all_valid_memory_sighandler: ignoring page at %p\n",
1731 (void*)pageBase
1732 );
1733 }
1734 }
1735
1736 /* Restore signal state to whatever it was before. */
1737 res = VG_(ksigaction)( VKI_SIGBUS, &sigbus_saved, NULL );
njne427a662002-10-02 11:08:25 +00001738 sk_assert(res == 0 +0);
njn25e49d8e72002-09-23 09:36:25 +00001739
1740 res = VG_(ksigaction)( VKI_SIGSEGV, &sigsegv_saved, NULL );
njne427a662002-10-02 11:08:25 +00001741 sk_assert(res == 0 +0 +0);
njn25e49d8e72002-09-23 09:36:25 +00001742
1743 res = VG_(ksigprocmask)( VKI_SIG_SETMASK, &blockmask_saved, NULL );
njne427a662002-10-02 11:08:25 +00001744 sk_assert(res == 0 +0 +0 +0);
njn25e49d8e72002-09-23 09:36:25 +00001745
1746 return nWordsNotified;
1747}
1748
1749
1750/*------------------------------------------------------------*/
1751/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
1752/*------------------------------------------------------------*/
1753
1754/* A block is either
1755 -- Proper-ly reached; a pointer to its start has been found
1756 -- Interior-ly reached; only an interior pointer to it has been found
1757 -- Unreached; so far, no pointers to any part of it have been found.
1758*/
1759typedef
1760 enum { Unreached, Interior, Proper }
1761 Reachedness;
1762
1763/* A block record, used for generating err msgs. */
1764typedef
1765 struct _LossRecord {
1766 struct _LossRecord* next;
1767 /* Where these lost blocks were allocated. */
1768 ExeContext* allocated_at;
1769 /* Their reachability. */
1770 Reachedness loss_mode;
1771 /* Number of blocks and total # bytes involved. */
1772 UInt total_bytes;
1773 UInt num_blocks;
1774 }
1775 LossRecord;
1776
1777
1778/* Find the i such that ptr points at or inside the block described by
1779 shadows[i]. Return -1 if none found. This assumes that shadows[]
1780 has been sorted on the ->data field. */
1781
1782#ifdef VG_DEBUG_LEAKCHECK
1783/* Used to sanity-check the fast binary-search mechanism. */
1784static Int find_shadow_for_OLD ( Addr ptr,
1785 ShadowChunk** shadows,
1786 Int n_shadows )
1787
1788{
1789 Int i;
1790 Addr a_lo, a_hi;
1791 PROF_EVENT(70);
1792 for (i = 0; i < n_shadows; i++) {
1793 PROF_EVENT(71);
1794 a_lo = shadows[i]->data;
1795 a_hi = ((Addr)shadows[i]->data) + shadows[i]->size - 1;
1796 if (a_lo <= ptr && ptr <= a_hi)
1797 return i;
1798 }
1799 return -1;
1800}
1801#endif
1802
1803
1804static Int find_shadow_for ( Addr ptr,
1805 ShadowChunk** shadows,
1806 Int n_shadows )
1807{
1808 Addr a_mid_lo, a_mid_hi;
1809 Int lo, mid, hi, retVal;
1810 PROF_EVENT(70);
1811 /* VG_(printf)("find shadow for %p = ", ptr); */
1812 retVal = -1;
1813 lo = 0;
1814 hi = n_shadows-1;
1815 while (True) {
1816 PROF_EVENT(71);
1817
1818 /* invariant: current unsearched space is from lo to hi,
1819 inclusive. */
1820 if (lo > hi) break; /* not found */
1821
1822 mid = (lo + hi) / 2;
1823 a_mid_lo = shadows[mid]->data;
1824 a_mid_hi = ((Addr)shadows[mid]->data) + shadows[mid]->size - 1;
1825
1826 if (ptr < a_mid_lo) {
1827 hi = mid-1;
1828 continue;
1829 }
1830 if (ptr > a_mid_hi) {
1831 lo = mid+1;
1832 continue;
1833 }
njne427a662002-10-02 11:08:25 +00001834 sk_assert(ptr >= a_mid_lo && ptr <= a_mid_hi);
njn25e49d8e72002-09-23 09:36:25 +00001835 retVal = mid;
1836 break;
1837 }
1838
1839# ifdef VG_DEBUG_LEAKCHECK
njne427a662002-10-02 11:08:25 +00001840 sk_assert(retVal == find_shadow_for_OLD ( ptr, shadows, n_shadows ));
njn25e49d8e72002-09-23 09:36:25 +00001841# endif
1842 /* VG_(printf)("%d\n", retVal); */
1843 return retVal;
1844}
1845
1846
1847
1848static void sort_malloc_shadows ( ShadowChunk** shadows, UInt n_shadows )
1849{
1850 Int incs[14] = { 1, 4, 13, 40, 121, 364, 1093, 3280,
1851 9841, 29524, 88573, 265720,
1852 797161, 2391484 };
1853 Int lo = 0;
1854 Int hi = n_shadows-1;
1855 Int i, j, h, bigN, hp;
1856 ShadowChunk* v;
1857
1858 PROF_EVENT(72);
1859 bigN = hi - lo + 1; if (bigN < 2) return;
1860 hp = 0; while (incs[hp] < bigN) hp++; hp--;
1861
1862 for (; hp >= 0; hp--) {
1863 PROF_EVENT(73);
1864 h = incs[hp];
1865 i = lo + h;
1866 while (1) {
1867 PROF_EVENT(74);
1868 if (i > hi) break;
1869 v = shadows[i];
1870 j = i;
1871 while (shadows[j-h]->data > v->data) {
1872 PROF_EVENT(75);
1873 shadows[j] = shadows[j-h];
1874 j = j - h;
1875 if (j <= (lo + h - 1)) break;
1876 }
1877 shadows[j] = v;
1878 i++;
1879 }
1880 }
1881}
1882
1883/* Globals, for the callback used by SK_(detect_memory_leaks). */
1884
1885static ShadowChunk** vglc_shadows;
1886static Int vglc_n_shadows;
1887static Reachedness* vglc_reachedness;
1888static Addr vglc_min_mallocd_addr;
1889static Addr vglc_max_mallocd_addr;
1890
1891static
1892void vg_detect_memory_leaks_notify_addr ( Addr a, UInt word_at_a )
1893{
1894 Int sh_no;
1895 Addr ptr;
1896
1897 /* Rule out some known causes of bogus pointers. Mostly these do
1898 not cause much trouble because only a few false pointers can
1899 ever lurk in these places. This mainly stops it reporting that
1900 blocks are still reachable in stupid test programs like this
1901
1902 int main (void) { char* a = malloc(100); return 0; }
1903
1904 which people seem inordinately fond of writing, for some reason.
1905
1906 Note that this is a complete kludge. It would be better to
1907 ignore any addresses corresponding to valgrind.so's .bss and
1908 .data segments, but I cannot think of a reliable way to identify
1909 where the .bss segment has been put. If you can, drop me a
1910 line.
1911 */
1912 if (VG_(within_stack)(a)) return;
1913 if (VG_(within_m_state_static)(a)) return;
1914 if (a == (Addr)(&vglc_min_mallocd_addr)) return;
1915 if (a == (Addr)(&vglc_max_mallocd_addr)) return;
1916
1917 /* OK, let's get on and do something Useful for a change. */
1918
1919 ptr = (Addr)word_at_a;
1920 if (ptr >= vglc_min_mallocd_addr && ptr <= vglc_max_mallocd_addr) {
1921 /* Might be legitimate; we'll have to investigate further. */
1922 sh_no = find_shadow_for ( ptr, vglc_shadows, vglc_n_shadows );
1923 if (sh_no != -1) {
1924 /* Found a block at/into which ptr points. */
njne427a662002-10-02 11:08:25 +00001925 sk_assert(sh_no >= 0 && sh_no < vglc_n_shadows);
1926 sk_assert(ptr < vglc_shadows[sh_no]->data
njn25e49d8e72002-09-23 09:36:25 +00001927 + vglc_shadows[sh_no]->size);
1928 /* Decide whether Proper-ly or Interior-ly reached. */
1929 if (ptr == vglc_shadows[sh_no]->data) {
1930 if (0) VG_(printf)("pointer at %p to %p\n", a, word_at_a );
1931 vglc_reachedness[sh_no] = Proper;
1932 } else {
1933 if (vglc_reachedness[sh_no] == Unreached)
1934 vglc_reachedness[sh_no] = Interior;
1935 }
1936 }
1937 }
1938}
1939
1940
1941void SK_(detect_memory_leaks) ( void )
1942{
1943 Int i;
1944 Int blocks_leaked, bytes_leaked;
1945 Int blocks_dubious, bytes_dubious;
1946 Int blocks_reachable, bytes_reachable;
1947 Int n_lossrecords;
1948 UInt bytes_notified;
1949
1950 LossRecord* errlist;
1951 LossRecord* p;
1952
1953 PROF_EVENT(76);
1954
1955 /* VG_(get_malloc_shadows) allocates storage for shadows */
1956 vglc_shadows = VG_(get_malloc_shadows)( &vglc_n_shadows );
1957 if (vglc_n_shadows == 0) {
njne427a662002-10-02 11:08:25 +00001958 sk_assert(vglc_shadows == NULL);
njn25e49d8e72002-09-23 09:36:25 +00001959 VG_(message)(Vg_UserMsg,
1960 "No malloc'd blocks -- no leaks are possible.\n");
1961 return;
1962 }
1963
1964 VG_(message)(Vg_UserMsg,
1965 "searching for pointers to %d not-freed blocks.",
1966 vglc_n_shadows );
1967 sort_malloc_shadows ( vglc_shadows, vglc_n_shadows );
1968
1969 /* Sanity check; assert that the blocks are now in order and that
1970 they don't overlap. */
1971 for (i = 0; i < vglc_n_shadows-1; i++) {
njne427a662002-10-02 11:08:25 +00001972 sk_assert( ((Addr)vglc_shadows[i]->data)
njn25e49d8e72002-09-23 09:36:25 +00001973 < ((Addr)vglc_shadows[i+1]->data) );
njne427a662002-10-02 11:08:25 +00001974 sk_assert( ((Addr)vglc_shadows[i]->data) + vglc_shadows[i]->size
njn25e49d8e72002-09-23 09:36:25 +00001975 < ((Addr)vglc_shadows[i+1]->data) );
1976 }
1977
1978 vglc_min_mallocd_addr = ((Addr)vglc_shadows[0]->data);
1979 vglc_max_mallocd_addr = ((Addr)vglc_shadows[vglc_n_shadows-1]->data)
1980 + vglc_shadows[vglc_n_shadows-1]->size - 1;
1981
1982 vglc_reachedness
1983 = VG_(malloc)( vglc_n_shadows * sizeof(Reachedness) );
1984 for (i = 0; i < vglc_n_shadows; i++)
1985 vglc_reachedness[i] = Unreached;
1986
1987 /* Do the scan of memory. */
1988 bytes_notified
1989 = VG_(scan_all_valid_memory)( &vg_detect_memory_leaks_notify_addr )
1990 * VKI_BYTES_PER_WORD;
1991
1992 VG_(message)(Vg_UserMsg, "checked %d bytes.", bytes_notified);
1993
1994 blocks_leaked = bytes_leaked = 0;
1995 blocks_dubious = bytes_dubious = 0;
1996 blocks_reachable = bytes_reachable = 0;
1997
1998 for (i = 0; i < vglc_n_shadows; i++) {
1999 if (vglc_reachedness[i] == Unreached) {
2000 blocks_leaked++;
2001 bytes_leaked += vglc_shadows[i]->size;
2002 }
2003 else if (vglc_reachedness[i] == Interior) {
2004 blocks_dubious++;
2005 bytes_dubious += vglc_shadows[i]->size;
2006 }
2007 else if (vglc_reachedness[i] == Proper) {
2008 blocks_reachable++;
2009 bytes_reachable += vglc_shadows[i]->size;
2010 }
2011 }
2012
2013 VG_(message)(Vg_UserMsg, "");
2014 VG_(message)(Vg_UserMsg, "definitely lost: %d bytes in %d blocks.",
2015 bytes_leaked, blocks_leaked );
2016 VG_(message)(Vg_UserMsg, "possibly lost: %d bytes in %d blocks.",
2017 bytes_dubious, blocks_dubious );
2018 VG_(message)(Vg_UserMsg, "still reachable: %d bytes in %d blocks.",
2019 bytes_reachable, blocks_reachable );
2020
2021
2022 /* Common up the lost blocks so we can print sensible error
2023 messages. */
2024
2025 n_lossrecords = 0;
2026 errlist = NULL;
2027 for (i = 0; i < vglc_n_shadows; i++) {
2028
2029 /* 'where' stored in 'skin_extra' field */
2030 ExeContext* where = get_where ( vglc_shadows[i] );
2031
2032 for (p = errlist; p != NULL; p = p->next) {
2033 if (p->loss_mode == vglc_reachedness[i]
2034 && VG_(eq_ExeContext) ( SK_(clo_leak_resolution),
2035 p->allocated_at,
2036 where) ) {
2037 break;
2038 }
2039 }
2040 if (p != NULL) {
2041 p->num_blocks ++;
2042 p->total_bytes += vglc_shadows[i]->size;
2043 } else {
2044 n_lossrecords ++;
2045 p = VG_(malloc)(sizeof(LossRecord));
2046 p->loss_mode = vglc_reachedness[i];
2047 p->allocated_at = where;
2048 p->total_bytes = vglc_shadows[i]->size;
2049 p->num_blocks = 1;
2050 p->next = errlist;
2051 errlist = p;
2052 }
2053 }
2054
2055 for (i = 0; i < n_lossrecords; i++) {
2056 LossRecord* p_min = NULL;
2057 UInt n_min = 0xFFFFFFFF;
2058 for (p = errlist; p != NULL; p = p->next) {
2059 if (p->num_blocks > 0 && p->total_bytes < n_min) {
2060 n_min = p->total_bytes;
2061 p_min = p;
2062 }
2063 }
njne427a662002-10-02 11:08:25 +00002064 sk_assert(p_min != NULL);
njn25e49d8e72002-09-23 09:36:25 +00002065
2066 if ( (!SK_(clo_show_reachable)) && p_min->loss_mode == Proper) {
2067 p_min->num_blocks = 0;
2068 continue;
2069 }
2070
2071 VG_(message)(Vg_UserMsg, "");
2072 VG_(message)(
2073 Vg_UserMsg,
2074 "%d bytes in %d blocks are %s in loss record %d of %d",
2075 p_min->total_bytes, p_min->num_blocks,
2076 p_min->loss_mode==Unreached ? "definitely lost" :
2077 (p_min->loss_mode==Interior ? "possibly lost"
2078 : "still reachable"),
2079 i+1, n_lossrecords
2080 );
2081 VG_(pp_ExeContext)(p_min->allocated_at);
2082 p_min->num_blocks = 0;
2083 }
2084
2085 VG_(message)(Vg_UserMsg, "");
2086 VG_(message)(Vg_UserMsg, "LEAK SUMMARY:");
2087 VG_(message)(Vg_UserMsg, " definitely lost: %d bytes in %d blocks.",
2088 bytes_leaked, blocks_leaked );
2089 VG_(message)(Vg_UserMsg, " possibly lost: %d bytes in %d blocks.",
2090 bytes_dubious, blocks_dubious );
2091 VG_(message)(Vg_UserMsg, " still reachable: %d bytes in %d blocks.",
2092 bytes_reachable, blocks_reachable );
2093 if (!SK_(clo_show_reachable)) {
2094 VG_(message)(Vg_UserMsg,
2095 "Reachable blocks (those to which a pointer was found) are not shown.");
2096 VG_(message)(Vg_UserMsg,
2097 "To see them, rerun with: --show-reachable=yes");
2098 }
2099 VG_(message)(Vg_UserMsg, "");
2100
2101 VG_(free) ( vglc_shadows );
2102 VG_(free) ( vglc_reachedness );
2103}
2104
2105
2106/* ---------------------------------------------------------------------
2107 Sanity check machinery (permanently engaged).
2108 ------------------------------------------------------------------ */
2109
2110/* Check that nobody has spuriously claimed that the first or last 16
2111 pages (64 KB) of address space have become accessible. Failure of
2112 the following do not per se indicate an internal consistency
2113 problem, but they are so likely to that we really want to know
2114 about it if so. */
2115
2116Bool SK_(cheap_sanity_check) ( void )
2117{
2118 if (IS_DISTINGUISHED_SM(primary_map[0]) &&
2119 IS_DISTINGUISHED_SM(primary_map[65535]))
2120 return True;
2121 else
2122 return False;
2123}
2124
2125Bool SK_(expensive_sanity_check) ( void )
2126{
2127 Int i;
2128
2129 /* Make sure nobody changed the distinguished secondary. */
2130 for (i = 0; i < 8192; i++)
2131 if (distinguished_secondary_map.abits[i] != VGM_BYTE_INVALID)
2132 return False;
2133
2134 for (i = 0; i < 65536; i++)
2135 if (distinguished_secondary_map.vbyte[i] != VGM_BYTE_INVALID)
2136 return False;
2137
2138 /* Make sure that the upper 3/4 of the primary map hasn't
2139 been messed with. */
2140 for (i = 65536; i < 262144; i++)
2141 if (primary_map[i] != & distinguished_secondary_map)
2142 return False;
2143
2144 return True;
2145}
2146
2147/* ---------------------------------------------------------------------
2148 Debugging machinery (turn on to debug). Something of a mess.
2149 ------------------------------------------------------------------ */
2150
2151#if 0
2152/* Print the value tags on the 8 integer registers & flag reg. */
2153
2154static void uint_to_bits ( UInt x, Char* str )
2155{
2156 Int i;
2157 Int w = 0;
2158 /* str must point to a space of at least 36 bytes. */
2159 for (i = 31; i >= 0; i--) {
2160 str[w++] = (x & ( ((UInt)1) << i)) ? '1' : '0';
2161 if (i == 24 || i == 16 || i == 8)
2162 str[w++] = ' ';
2163 }
2164 str[w++] = 0;
njne427a662002-10-02 11:08:25 +00002165 sk_assert(w == 36);
njn25e49d8e72002-09-23 09:36:25 +00002166}
2167
2168/* Caution! Not vthread-safe; looks in VG_(baseBlock), not the thread
2169 state table. */
2170
2171static void vg_show_reg_tags ( void )
2172{
2173 Char buf1[36];
2174 Char buf2[36];
2175 UInt z_eax, z_ebx, z_ecx, z_edx,
2176 z_esi, z_edi, z_ebp, z_esp, z_eflags;
2177
2178 z_eax = VG_(baseBlock)[VGOFF_(sh_eax)];
2179 z_ebx = VG_(baseBlock)[VGOFF_(sh_ebx)];
2180 z_ecx = VG_(baseBlock)[VGOFF_(sh_ecx)];
2181 z_edx = VG_(baseBlock)[VGOFF_(sh_edx)];
2182 z_esi = VG_(baseBlock)[VGOFF_(sh_esi)];
2183 z_edi = VG_(baseBlock)[VGOFF_(sh_edi)];
2184 z_ebp = VG_(baseBlock)[VGOFF_(sh_ebp)];
2185 z_esp = VG_(baseBlock)[VGOFF_(sh_esp)];
2186 z_eflags = VG_(baseBlock)[VGOFF_(sh_eflags)];
2187
2188 uint_to_bits(z_eflags, buf1);
njn9b6d34e2002-10-15 08:48:08 +00002189 VG_(message)(Vg_DebugMsg, "efl %s\n", buf1);
njn25e49d8e72002-09-23 09:36:25 +00002190
2191 uint_to_bits(z_eax, buf1);
2192 uint_to_bits(z_ebx, buf2);
2193 VG_(message)(Vg_DebugMsg, "eax %s ebx %s\n", buf1, buf2);
2194
2195 uint_to_bits(z_ecx, buf1);
2196 uint_to_bits(z_edx, buf2);
2197 VG_(message)(Vg_DebugMsg, "ecx %s edx %s\n", buf1, buf2);
2198
2199 uint_to_bits(z_esi, buf1);
2200 uint_to_bits(z_edi, buf2);
2201 VG_(message)(Vg_DebugMsg, "esi %s edi %s\n", buf1, buf2);
2202
2203 uint_to_bits(z_ebp, buf1);
2204 uint_to_bits(z_esp, buf2);
2205 VG_(message)(Vg_DebugMsg, "ebp %s esp %s\n", buf1, buf2);
2206}
2207
2208
2209/* For debugging only. Scan the address space and touch all allegedly
2210 addressible words. Useful for establishing where Valgrind's idea of
2211 addressibility has diverged from what the kernel believes. */
2212
2213static
2214void zzzmemscan_notify_word ( Addr a, UInt w )
2215{
2216}
2217
2218void zzzmemscan ( void )
2219{
2220 Int n_notifies
2221 = VG_(scan_all_valid_memory)( zzzmemscan_notify_word );
2222 VG_(printf)("zzzmemscan: n_bytes = %d\n", 4 * n_notifies );
2223}
2224#endif
2225
2226
2227
2228
2229#if 0
2230static Int zzz = 0;
2231
2232void show_bb ( Addr eip_next )
2233{
2234 VG_(printf)("[%4d] ", zzz);
2235 vg_show_reg_tags( &VG_(m_shadow );
2236 VG_(translate) ( eip_next, NULL, NULL, NULL );
2237}
2238#endif /* 0 */
2239
2240/*------------------------------------------------------------*/
2241/*--- Syscall wrappers ---*/
2242/*------------------------------------------------------------*/
2243
2244void* SK_(pre_syscall) ( ThreadId tid, UInt syscallno, Bool isBlocking )
2245{
2246 Int sane = SK_(cheap_sanity_check)();
2247 return (void*)sane;
2248}
2249
2250void SK_(post_syscall) ( ThreadId tid, UInt syscallno,
2251 void* pre_result, Int res, Bool isBlocking )
2252{
2253 Int sane_before_call = (Int)pre_result;
2254 Bool sane_after_call = SK_(cheap_sanity_check)();
2255
2256 if ((Int)sane_before_call && (!sane_after_call)) {
2257 VG_(message)(Vg_DebugMsg, "post-syscall: ");
2258 VG_(message)(Vg_DebugMsg,
2259 "probable sanity check failure for syscall number %d\n",
2260 syscallno );
njne427a662002-10-02 11:08:25 +00002261 VG_(skin_panic)("aborting due to the above ... bye!");
njn25e49d8e72002-09-23 09:36:25 +00002262 }
2263}
2264
2265
2266/*------------------------------------------------------------*/
2267/*--- Setup ---*/
2268/*------------------------------------------------------------*/
2269
2270void SK_(written_shadow_regs_values)( UInt* gen_reg_value, UInt* eflags_value )
2271{
2272 *gen_reg_value = VGM_WORD_VALID;
2273 *eflags_value = VGM_EFLAGS_VALID;
2274}
2275
2276Bool SK_(process_cmd_line_option)(Char* arg)
2277{
2278# define STREQ(s1,s2) (0==VG_(strcmp_ws)((s1),(s2)))
2279# define STREQN(nn,s1,s2) (0==VG_(strncmp_ws)((s1),(s2),(nn)))
2280
2281 if (STREQ(arg, "--partial-loads-ok=yes"))
2282 SK_(clo_partial_loads_ok) = True;
2283 else if (STREQ(arg, "--partial-loads-ok=no"))
2284 SK_(clo_partial_loads_ok) = False;
2285
2286 else if (STREQN(15, arg, "--freelist-vol=")) {
2287 SK_(clo_freelist_vol) = (Int)VG_(atoll)(&arg[15]);
2288 if (SK_(clo_freelist_vol) < 0) SK_(clo_freelist_vol) = 0;
2289 }
2290
2291 else if (STREQ(arg, "--leak-check=yes"))
2292 SK_(clo_leak_check) = True;
2293 else if (STREQ(arg, "--leak-check=no"))
2294 SK_(clo_leak_check) = False;
2295
2296 else if (STREQ(arg, "--leak-resolution=low"))
2297 SK_(clo_leak_resolution) = Vg_LowRes;
2298 else if (STREQ(arg, "--leak-resolution=med"))
2299 SK_(clo_leak_resolution) = Vg_MedRes;
2300 else if (STREQ(arg, "--leak-resolution=high"))
2301 SK_(clo_leak_resolution) = Vg_HighRes;
2302
2303 else if (STREQ(arg, "--show-reachable=yes"))
2304 SK_(clo_show_reachable) = True;
2305 else if (STREQ(arg, "--show-reachable=no"))
2306 SK_(clo_show_reachable) = False;
2307
2308 else if (STREQ(arg, "--workaround-gcc296-bugs=yes"))
2309 SK_(clo_workaround_gcc296_bugs) = True;
2310 else if (STREQ(arg, "--workaround-gcc296-bugs=no"))
2311 SK_(clo_workaround_gcc296_bugs) = False;
2312
2313 else if (STREQ(arg, "--check-addrVs=yes"))
2314 SK_(clo_check_addrVs) = True;
2315 else if (STREQ(arg, "--check-addrVs=no"))
2316 SK_(clo_check_addrVs) = False;
2317
2318 else if (STREQ(arg, "--cleanup=yes"))
2319 SK_(clo_cleanup) = True;
2320 else if (STREQ(arg, "--cleanup=no"))
2321 SK_(clo_cleanup) = False;
2322
sewardj8ec2cfc2002-10-13 00:57:26 +00002323 else if (STREQ(arg, "--avoid-strlen-errors=yes"))
2324 SK_(clo_avoid_strlen_errors) = True;
2325 else if (STREQ(arg, "--avoid-strlen-errors=no"))
2326 SK_(clo_avoid_strlen_errors) = False;
2327
njn25e49d8e72002-09-23 09:36:25 +00002328 else
2329 return False;
2330
2331 return True;
2332
2333#undef STREQ
2334#undef STREQN
2335}
2336
2337Char* SK_(usage)(void)
2338{
2339 return
2340" --partial-loads-ok=no|yes too hard to explain here; see manual [yes]\n"
2341" --freelist-vol=<number> volume of freed blocks queue [1000000]\n"
2342" --leak-check=no|yes search for memory leaks at exit? [no]\n"
2343" --leak-resolution=low|med|high\n"
2344" amount of bt merging in leak check [low]\n"
2345" --show-reachable=no|yes show reachable blocks in leak check? [no]\n"
2346" --workaround-gcc296-bugs=no|yes self explanatory [no]\n"
2347" --check-addrVs=no|yes experimental lighterweight checking? [yes]\n"
2348" yes == Valgrind's original behaviour\n"
2349"\n"
sewardj8ec2cfc2002-10-13 00:57:26 +00002350" --cleanup=no|yes improve after instrumentation? [yes]\n"
2351" --avoid-strlen-errors=no|yes suppress errs from inlined strlen [yes]\n";
njn25e49d8e72002-09-23 09:36:25 +00002352}
2353
2354
2355/*------------------------------------------------------------*/
2356/*--- Setup ---*/
2357/*------------------------------------------------------------*/
2358
njnd04b7c62002-10-03 14:05:52 +00002359void SK_(pre_clo_init)(VgDetails* details, VgNeeds* needs, VgTrackEvents* track)
njn25e49d8e72002-09-23 09:36:25 +00002360{
sewardj34eccb12002-10-05 16:49:09 +00002361 details->name = "Memcheck";
njnd04b7c62002-10-03 14:05:52 +00002362 details->version = NULL;
sewardj34eccb12002-10-05 16:49:09 +00002363 details->description = "a.k.a. Valgrind, a memory error detector";
njnd04b7c62002-10-03 14:05:52 +00002364 details->copyright_author =
2365 "Copyright (C) 2000-2002, and GNU GPL'd, by Julian Seward.";
2366 details->bug_reports_to = "jseward@acm.org";
njn25e49d8e72002-09-23 09:36:25 +00002367
njnd04b7c62002-10-03 14:05:52 +00002368 needs->core_errors = True;
2369 needs->skin_errors = True;
2370 needs->libc_freeres = True;
2371 needs->sizeof_shadow_block = 1;
2372 needs->basic_block_discards = False;
2373 needs->shadow_regs = True;
2374 needs->command_line_options = True;
2375 needs->client_requests = True;
2376 needs->extended_UCode = True;
2377 needs->syscall_wrapper = True;
2378 needs->alternative_free = True;
2379 needs->sanity_checks = True;
njn25e49d8e72002-09-23 09:36:25 +00002380
njn25e49d8e72002-09-23 09:36:25 +00002381 track->new_mem_startup = & memcheck_new_mem_startup;
2382 track->new_mem_heap = & memcheck_new_mem_heap;
2383 track->new_mem_stack = & SK_(make_writable);
2384 track->new_mem_stack_aligned = & make_writable_aligned;
2385 track->new_mem_stack_signal = & SK_(make_writable);
2386 track->new_mem_brk = & SK_(make_writable);
2387 track->new_mem_mmap = & memcheck_set_perms;
2388
2389 track->copy_mem_heap = & copy_address_range_state;
2390 track->copy_mem_remap = & copy_address_range_state;
2391 track->change_mem_mprotect = & memcheck_set_perms;
2392
2393 track->ban_mem_heap = & SK_(make_noaccess);
2394 track->ban_mem_stack = & SK_(make_noaccess);
2395
2396 track->die_mem_heap = & SK_(make_noaccess);
2397 track->die_mem_stack = & SK_(make_noaccess);
2398 track->die_mem_stack_aligned = & make_noaccess_aligned;
2399 track->die_mem_stack_signal = & SK_(make_noaccess);
2400 track->die_mem_brk = & SK_(make_noaccess);
2401 track->die_mem_munmap = & SK_(make_noaccess);
2402
2403 track->bad_free = & SK_(record_free_error);
2404 track->mismatched_free = & SK_(record_freemismatch_error);
2405
2406 track->pre_mem_read = & check_is_readable;
2407 track->pre_mem_read_asciiz = & check_is_readable_asciiz;
2408 track->pre_mem_write = & check_is_writable;
2409 track->post_mem_write = & SK_(make_readable);
2410
njnd04b7c62002-10-03 14:05:52 +00002411 VG_(register_compact_helper)((Addr) & SK_(helper_value_check4_fail));
2412 VG_(register_compact_helper)((Addr) & SK_(helper_value_check0_fail));
2413 VG_(register_compact_helper)((Addr) & SK_(helper_value_check2_fail));
2414 VG_(register_compact_helper)((Addr) & SK_(helperc_STOREV4));
2415 VG_(register_compact_helper)((Addr) & SK_(helperc_STOREV1));
2416 VG_(register_compact_helper)((Addr) & SK_(helperc_LOADV4));
2417 VG_(register_compact_helper)((Addr) & SK_(helperc_LOADV1));
njn25e49d8e72002-09-23 09:36:25 +00002418
njnd04b7c62002-10-03 14:05:52 +00002419 /* These two made non-compact because 2-byte transactions are rare. */
2420 VG_(register_noncompact_helper)((Addr) & SK_(helperc_STOREV2));
2421 VG_(register_noncompact_helper)((Addr) & SK_(helperc_LOADV2));
2422 VG_(register_noncompact_helper)((Addr) & SK_(fpu_write_check));
2423 VG_(register_noncompact_helper)((Addr) & SK_(fpu_read_check));
2424 VG_(register_noncompact_helper)((Addr) & SK_(helper_value_check1_fail));
njn25e49d8e72002-09-23 09:36:25 +00002425
2426 VGP_(register_profile_event) ( VgpSetMem, "set-mem-perms" );
2427 VGP_(register_profile_event) ( VgpCheckMem, "check-mem-perms" );
njnd04b7c62002-10-03 14:05:52 +00002428
2429 init_shadow_memory();
2430 init_prof_mem();
njn25e49d8e72002-09-23 09:36:25 +00002431}
2432
2433/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00002434/*--- end mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00002435/*--------------------------------------------------------------------*/