blob: ef9dfc49a72de224db5e450e57e4c66431988656 [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
4/*--- accessibility (A) and validity (V) status of each byte. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
njnc9539842002-10-02 13:26:35 +00009 This file is part of MemCheck, a heavyweight Valgrind skin for
10 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
12 Copyright (C) 2000-2002 Julian Seward
13 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
njn25cac76cb2002-09-23 11:21:57 +000033#include "mc_include.h"
34#include "memcheck.h" /* for client requests */
njn25e49d8e72002-09-23 09:36:25 +000035//#include "vg_profile.c"
36
37/* Define to debug the mem audit system. */
38/* #define VG_DEBUG_MEMORY */
39
40/* Define to debug the memory-leak-detector. */
41/* #define VG_DEBUG_LEAKCHECK */
42
43/* Define to collect detailed performance info. */
44/* #define VG_PROFILE_MEMORY */
45
46#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
47
48/*------------------------------------------------------------*/
49/*--- Command line options ---*/
50/*------------------------------------------------------------*/
51
52Bool SK_(clo_partial_loads_ok) = True;
53Int SK_(clo_freelist_vol) = 1000000;
54Bool SK_(clo_leak_check) = False;
55VgRes SK_(clo_leak_resolution) = Vg_LowRes;
56Bool SK_(clo_show_reachable) = False;
57Bool SK_(clo_workaround_gcc296_bugs) = False;
58Bool SK_(clo_check_addrVs) = True;
59Bool SK_(clo_cleanup) = True;
60
61/*------------------------------------------------------------*/
62/*--- Profiling events ---*/
63/*------------------------------------------------------------*/
64
65typedef
66 enum {
67 VgpCheckMem = VgpFini+1,
68 VgpSetMem
69 }
70 VgpSkinCC;
71
72/*------------------------------------------------------------*/
73/*--- Low-level support for memory checking. ---*/
74/*------------------------------------------------------------*/
75
76/* All reads and writes are checked against a memory map, which
77 records the state of all memory in the process. The memory map is
78 organised like this:
79
80 The top 16 bits of an address are used to index into a top-level
81 map table, containing 65536 entries. Each entry is a pointer to a
82 second-level map, which records the accesibililty and validity
83 permissions for the 65536 bytes indexed by the lower 16 bits of the
84 address. Each byte is represented by nine bits, one indicating
85 accessibility, the other eight validity. So each second-level map
86 contains 73728 bytes. This two-level arrangement conveniently
87 divides the 4G address space into 64k lumps, each size 64k bytes.
88
89 All entries in the primary (top-level) map must point to a valid
90 secondary (second-level) map. Since most of the 4G of address
91 space will not be in use -- ie, not mapped at all -- there is a
92 distinguished secondary map, which indicates `not addressible and
93 not valid' writeable for all bytes. Entries in the primary map for
94 which the entire 64k is not in use at all point at this
95 distinguished map.
96
97 [...] lots of stuff deleted due to out of date-ness
98
99 As a final optimisation, the alignment and address checks for
100 4-byte loads and stores are combined in a neat way. The primary
101 map is extended to have 262144 entries (2^18), rather than 2^16.
102 The top 3/4 of these entries are permanently set to the
103 distinguished secondary map. For a 4-byte load/store, the
104 top-level map is indexed not with (addr >> 16) but instead f(addr),
105 where
106
107 f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
108 = ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or
109 = ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
110
111 ie the lowest two bits are placed above the 16 high address bits.
112 If either of these two bits are nonzero, the address is misaligned;
113 this will select a secondary map from the upper 3/4 of the primary
114 map. Because this is always the distinguished secondary map, a
115 (bogus) address check failure will result. The failure handling
116 code can then figure out whether this is a genuine addr check
117 failure or whether it is a possibly-legitimate access at a
118 misaligned address.
119*/
120
121
122/*------------------------------------------------------------*/
123/*--- Crude profiling machinery. ---*/
124/*------------------------------------------------------------*/
125
126#ifdef VG_PROFILE_MEMORY
127
128#define N_PROF_EVENTS 150
129
130static UInt event_ctr[N_PROF_EVENTS];
131
132static void init_prof_mem ( void )
133{
134 Int i;
135 for (i = 0; i < N_PROF_EVENTS; i++)
136 event_ctr[i] = 0;
137}
138
139static void done_prof_mem ( void )
140{
141 Int i;
142 for (i = 0; i < N_PROF_EVENTS; i++) {
143 if ((i % 10) == 0)
144 VG_(printf)("\n");
145 if (event_ctr[i] > 0)
146 VG_(printf)( "prof mem event %2d: %d\n", i, event_ctr[i] );
147 }
148 VG_(printf)("\n");
149}
150
151#define PROF_EVENT(ev) \
njne427a662002-10-02 11:08:25 +0000152 do { sk_assert((ev) >= 0 && (ev) < N_PROF_EVENTS); \
njn25e49d8e72002-09-23 09:36:25 +0000153 event_ctr[ev]++; \
154 } while (False);
155
156#else
157
158static void init_prof_mem ( void ) { }
159static void done_prof_mem ( void ) { }
160
161#define PROF_EVENT(ev) /* */
162
163#endif
164
165/* Event index. If just the name of the fn is given, this means the
166 number of calls to the fn. Otherwise it is the specified event.
167
168 10 alloc_secondary_map
169
170 20 get_abit
171 21 get_vbyte
172 22 set_abit
173 23 set_vbyte
174 24 get_abits4_ALIGNED
175 25 get_vbytes4_ALIGNED
176
177 30 set_address_range_perms
178 31 set_address_range_perms(lower byte loop)
179 32 set_address_range_perms(quadword loop)
180 33 set_address_range_perms(upper byte loop)
181
182 35 make_noaccess
183 36 make_writable
184 37 make_readable
185
186 40 copy_address_range_state
187 41 copy_address_range_state(byte loop)
188 42 check_writable
189 43 check_writable(byte loop)
190 44 check_readable
191 45 check_readable(byte loop)
192 46 check_readable_asciiz
193 47 check_readable_asciiz(byte loop)
194
195 50 make_aligned_word_NOACCESS
196 51 make_aligned_word_WRITABLE
197
198 60 helperc_LOADV4
199 61 helperc_STOREV4
200 62 helperc_LOADV2
201 63 helperc_STOREV2
202 64 helperc_LOADV1
203 65 helperc_STOREV1
204
205 70 rim_rd_V4_SLOWLY
206 71 rim_wr_V4_SLOWLY
207 72 rim_rd_V2_SLOWLY
208 73 rim_wr_V2_SLOWLY
209 74 rim_rd_V1_SLOWLY
210 75 rim_wr_V1_SLOWLY
211
212 80 fpu_read
213 81 fpu_read aligned 4
214 82 fpu_read aligned 8
215 83 fpu_read 2
216 84 fpu_read 10
217
218 85 fpu_write
219 86 fpu_write aligned 4
220 87 fpu_write aligned 8
221 88 fpu_write 2
222 89 fpu_write 10
223
224 90 fpu_read_check_SLOWLY
225 91 fpu_read_check_SLOWLY(byte loop)
226 92 fpu_write_check_SLOWLY
227 93 fpu_write_check_SLOWLY(byte loop)
228
229 100 is_plausible_stack_addr
230 101 handle_esp_assignment
231 102 handle_esp_assignment(-4)
232 103 handle_esp_assignment(+4)
233 104 handle_esp_assignment(-12)
234 105 handle_esp_assignment(-8)
235 106 handle_esp_assignment(+16)
236 107 handle_esp_assignment(+12)
237 108 handle_esp_assignment(0)
238 109 handle_esp_assignment(+8)
239 110 handle_esp_assignment(-16)
240 111 handle_esp_assignment(+20)
241 112 handle_esp_assignment(-20)
242 113 handle_esp_assignment(+24)
243 114 handle_esp_assignment(-24)
244
245 120 vg_handle_esp_assignment_SLOWLY
246 121 vg_handle_esp_assignment_SLOWLY(normal; move down)
247 122 vg_handle_esp_assignment_SLOWLY(normal; move up)
248 123 vg_handle_esp_assignment_SLOWLY(normal)
249 124 vg_handle_esp_assignment_SLOWLY(>= HUGE_DELTA)
250*/
251
252/*------------------------------------------------------------*/
253/*--- Function declarations. ---*/
254/*------------------------------------------------------------*/
255
256static UInt vgmext_rd_V4_SLOWLY ( Addr a );
257static UInt vgmext_rd_V2_SLOWLY ( Addr a );
258static UInt vgmext_rd_V1_SLOWLY ( Addr a );
259static void vgmext_wr_V4_SLOWLY ( Addr a, UInt vbytes );
260static void vgmext_wr_V2_SLOWLY ( Addr a, UInt vbytes );
261static void vgmext_wr_V1_SLOWLY ( Addr a, UInt vbytes );
262static void fpu_read_check_SLOWLY ( Addr addr, Int size );
263static void fpu_write_check_SLOWLY ( Addr addr, Int size );
264
265/*------------------------------------------------------------*/
266/*--- Data defns. ---*/
267/*------------------------------------------------------------*/
268
269typedef
270 struct {
271 UChar abits[8192];
272 UChar vbyte[65536];
273 }
274 SecMap;
275
276static SecMap* primary_map[ /*65536*/ 262144 ];
277static SecMap distinguished_secondary_map;
278
279#define IS_DISTINGUISHED_SM(smap) \
280 ((smap) == &distinguished_secondary_map)
281
282#define ENSURE_MAPPABLE(addr,caller) \
283 do { \
284 if (IS_DISTINGUISHED_SM(primary_map[(addr) >> 16])) { \
285 primary_map[(addr) >> 16] = alloc_secondary_map(caller); \
286 /* VG_(printf)("new 2map because of %p\n", addr); */ \
287 } \
288 } while(0)
289
290#define BITARR_SET(aaa_p,iii_p) \
291 do { \
292 UInt iii = (UInt)iii_p; \
293 UChar* aaa = (UChar*)aaa_p; \
294 aaa[iii >> 3] |= (1 << (iii & 7)); \
295 } while (0)
296
297#define BITARR_CLEAR(aaa_p,iii_p) \
298 do { \
299 UInt iii = (UInt)iii_p; \
300 UChar* aaa = (UChar*)aaa_p; \
301 aaa[iii >> 3] &= ~(1 << (iii & 7)); \
302 } while (0)
303
304#define BITARR_TEST(aaa_p,iii_p) \
305 (0 != (((UChar*)aaa_p)[ ((UInt)iii_p) >> 3 ] \
306 & (1 << (((UInt)iii_p) & 7)))) \
307
308
309#define VGM_BIT_VALID 0
310#define VGM_BIT_INVALID 1
311
312#define VGM_NIBBLE_VALID 0
313#define VGM_NIBBLE_INVALID 0xF
314
315#define VGM_BYTE_VALID 0
316#define VGM_BYTE_INVALID 0xFF
317
318#define VGM_WORD_VALID 0
319#define VGM_WORD_INVALID 0xFFFFFFFF
320
321#define VGM_EFLAGS_VALID 0xFFFFFFFE
322#define VGM_EFLAGS_INVALID 0xFFFFFFFF /* not used */
323
324
325static void init_shadow_memory ( void )
326{
327 Int i;
328
329 for (i = 0; i < 8192; i++) /* Invalid address */
330 distinguished_secondary_map.abits[i] = VGM_BYTE_INVALID;
331 for (i = 0; i < 65536; i++) /* Invalid Value */
332 distinguished_secondary_map.vbyte[i] = VGM_BYTE_INVALID;
333
334 /* These entries gradually get overwritten as the used address
335 space expands. */
336 for (i = 0; i < 65536; i++)
337 primary_map[i] = &distinguished_secondary_map;
338
339 /* These ones should never change; it's a bug in Valgrind if they do. */
340 for (i = 65536; i < 262144; i++)
341 primary_map[i] = &distinguished_secondary_map;
342}
343
344void SK_(post_clo_init) ( void )
345{
346}
347
348void SK_(fini) ( void )
349{
350 VG_(print_malloc_stats)();
351
352 if (VG_(clo_verbosity) == 1) {
353 if (!SK_(clo_leak_check))
354 VG_(message)(Vg_UserMsg,
355 "For a detailed leak analysis, rerun with: --leak-check=yes");
356
357 VG_(message)(Vg_UserMsg,
358 "For counts of detected errors, rerun with: -v");
359 }
360 if (SK_(clo_leak_check)) SK_(detect_memory_leaks)();
361
362 done_prof_mem();
363
364 if (0) {
365 VG_(message)(Vg_DebugMsg,
366 "------ Valgrind's client block stats follow ---------------" );
367 SK_(show_client_block_stats)();
368 }
369}
370
371/*------------------------------------------------------------*/
372/*--- Basic bitmap management, reading and writing. ---*/
373/*------------------------------------------------------------*/
374
375/* Allocate and initialise a secondary map. */
376
377static SecMap* alloc_secondary_map ( __attribute__ ((unused))
378 Char* caller )
379{
380 SecMap* map;
381 UInt i;
382 PROF_EVENT(10);
383
384 /* Mark all bytes as invalid access and invalid value. */
385
386 /* It just happens that a SecMap occupies exactly 18 pages --
387 although this isn't important, so the following assert is
388 spurious. */
njne427a662002-10-02 11:08:25 +0000389 sk_assert(0 == (sizeof(SecMap) % VKI_BYTES_PER_PAGE));
njn25e49d8e72002-09-23 09:36:25 +0000390 map = VG_(get_memory_from_mmap)( sizeof(SecMap), caller );
391
392 for (i = 0; i < 8192; i++)
393 map->abits[i] = VGM_BYTE_INVALID; /* Invalid address */
394 for (i = 0; i < 65536; i++)
395 map->vbyte[i] = VGM_BYTE_INVALID; /* Invalid Value */
396
397 /* VG_(printf)("ALLOC_2MAP(%s)\n", caller ); */
398 return map;
399}
400
401
402/* Basic reading/writing of the bitmaps, for byte-sized accesses. */
403
404static __inline__ UChar get_abit ( Addr a )
405{
406 SecMap* sm = primary_map[a >> 16];
407 UInt sm_off = a & 0xFFFF;
408 PROF_EVENT(20);
409# if 0
410 if (IS_DISTINGUISHED_SM(sm))
411 VG_(message)(Vg_DebugMsg,
412 "accessed distinguished 2ndary (A)map! 0x%x\n", a);
413# endif
414 return BITARR_TEST(sm->abits, sm_off)
415 ? VGM_BIT_INVALID : VGM_BIT_VALID;
416}
417
418static __inline__ UChar get_vbyte ( Addr a )
419{
420 SecMap* sm = primary_map[a >> 16];
421 UInt sm_off = a & 0xFFFF;
422 PROF_EVENT(21);
423# if 0
424 if (IS_DISTINGUISHED_SM(sm))
425 VG_(message)(Vg_DebugMsg,
426 "accessed distinguished 2ndary (V)map! 0x%x\n", a);
427# endif
428 return sm->vbyte[sm_off];
429}
430
431static __inline__ void set_abit ( Addr a, UChar abit )
432{
433 SecMap* sm;
434 UInt sm_off;
435 PROF_EVENT(22);
436 ENSURE_MAPPABLE(a, "set_abit");
437 sm = primary_map[a >> 16];
438 sm_off = a & 0xFFFF;
439 if (abit)
440 BITARR_SET(sm->abits, sm_off);
441 else
442 BITARR_CLEAR(sm->abits, sm_off);
443}
444
445static __inline__ void set_vbyte ( Addr a, UChar vbyte )
446{
447 SecMap* sm;
448 UInt sm_off;
449 PROF_EVENT(23);
450 ENSURE_MAPPABLE(a, "set_vbyte");
451 sm = primary_map[a >> 16];
452 sm_off = a & 0xFFFF;
453 sm->vbyte[sm_off] = vbyte;
454}
455
456
457/* Reading/writing of the bitmaps, for aligned word-sized accesses. */
458
459static __inline__ UChar get_abits4_ALIGNED ( Addr a )
460{
461 SecMap* sm;
462 UInt sm_off;
463 UChar abits8;
464 PROF_EVENT(24);
465# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000466 sk_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000467# endif
468 sm = primary_map[a >> 16];
469 sm_off = a & 0xFFFF;
470 abits8 = sm->abits[sm_off >> 3];
471 abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
472 abits8 &= 0x0F;
473 return abits8;
474}
475
476static UInt __inline__ get_vbytes4_ALIGNED ( Addr a )
477{
478 SecMap* sm = primary_map[a >> 16];
479 UInt sm_off = a & 0xFFFF;
480 PROF_EVENT(25);
481# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000482 sk_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000483# endif
484 return ((UInt*)(sm->vbyte))[sm_off >> 2];
485}
486
487
488/*------------------------------------------------------------*/
489/*--- Setting permissions over address ranges. ---*/
490/*------------------------------------------------------------*/
491
492static void set_address_range_perms ( Addr a, UInt len,
493 UInt example_a_bit,
494 UInt example_v_bit )
495{
496 UChar vbyte, abyte8;
497 UInt vword4, sm_off;
498 SecMap* sm;
499
500 PROF_EVENT(30);
501
502 if (len == 0)
503 return;
504
505 if (len > 100 * 1000 * 1000) {
506 VG_(message)(Vg_UserMsg,
507 "Warning: set address range perms: "
508 "large range %u, a %d, v %d",
509 len, example_a_bit, example_v_bit );
510 }
511
512 VGP_PUSHCC(VgpSetMem);
513
514 /* Requests to change permissions of huge address ranges may
515 indicate bugs in our machinery. 30,000,000 is arbitrary, but so
516 far all legitimate requests have fallen beneath that size. */
517 /* 4 Mar 02: this is just stupid; get rid of it. */
njne427a662002-10-02 11:08:25 +0000518 /* sk_assert(len < 30000000); */
njn25e49d8e72002-09-23 09:36:25 +0000519
520 /* Check the permissions make sense. */
njne427a662002-10-02 11:08:25 +0000521 sk_assert(example_a_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000522 || example_a_bit == VGM_BIT_INVALID);
njne427a662002-10-02 11:08:25 +0000523 sk_assert(example_v_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000524 || example_v_bit == VGM_BIT_INVALID);
525 if (example_a_bit == VGM_BIT_INVALID)
njne427a662002-10-02 11:08:25 +0000526 sk_assert(example_v_bit == VGM_BIT_INVALID);
njn25e49d8e72002-09-23 09:36:25 +0000527
528 /* The validity bits to write. */
529 vbyte = example_v_bit==VGM_BIT_VALID
530 ? VGM_BYTE_VALID : VGM_BYTE_INVALID;
531
532 /* In order that we can charge through the address space at 8
533 bytes/main-loop iteration, make up some perms. */
534 abyte8 = (example_a_bit << 7)
535 | (example_a_bit << 6)
536 | (example_a_bit << 5)
537 | (example_a_bit << 4)
538 | (example_a_bit << 3)
539 | (example_a_bit << 2)
540 | (example_a_bit << 1)
541 | (example_a_bit << 0);
542 vword4 = (vbyte << 24) | (vbyte << 16) | (vbyte << 8) | vbyte;
543
544# ifdef VG_DEBUG_MEMORY
545 /* Do it ... */
546 while (True) {
547 PROF_EVENT(31);
548 if (len == 0) break;
549 set_abit ( a, example_a_bit );
550 set_vbyte ( a, vbyte );
551 a++;
552 len--;
553 }
554
555# else
556 /* Slowly do parts preceding 8-byte alignment. */
557 while (True) {
558 PROF_EVENT(31);
559 if (len == 0) break;
560 if ((a % 8) == 0) break;
561 set_abit ( a, example_a_bit );
562 set_vbyte ( a, vbyte );
563 a++;
564 len--;
565 }
566
567 if (len == 0) {
568 VGP_POPCC(VgpSetMem);
569 return;
570 }
njne427a662002-10-02 11:08:25 +0000571 sk_assert((a % 8) == 0 && len > 0);
njn25e49d8e72002-09-23 09:36:25 +0000572
573 /* Once aligned, go fast. */
574 while (True) {
575 PROF_EVENT(32);
576 if (len < 8) break;
577 ENSURE_MAPPABLE(a, "set_address_range_perms(fast)");
578 sm = primary_map[a >> 16];
579 sm_off = a & 0xFFFF;
580 sm->abits[sm_off >> 3] = abyte8;
581 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = vword4;
582 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = vword4;
583 a += 8;
584 len -= 8;
585 }
586
587 if (len == 0) {
588 VGP_POPCC(VgpSetMem);
589 return;
590 }
njne427a662002-10-02 11:08:25 +0000591 sk_assert((a % 8) == 0 && len > 0 && len < 8);
njn25e49d8e72002-09-23 09:36:25 +0000592
593 /* Finish the upper fragment. */
594 while (True) {
595 PROF_EVENT(33);
596 if (len == 0) break;
597 set_abit ( a, example_a_bit );
598 set_vbyte ( a, vbyte );
599 a++;
600 len--;
601 }
602# endif
603
604 /* Check that zero page and highest page have not been written to
605 -- this could happen with buggy syscall wrappers. Today
606 (2001-04-26) had precisely such a problem with __NR_setitimer. */
njne427a662002-10-02 11:08:25 +0000607 sk_assert(SK_(cheap_sanity_check)());
njn25e49d8e72002-09-23 09:36:25 +0000608 VGP_POPCC(VgpSetMem);
609}
610
611/* Set permissions for address ranges ... */
612
613void SK_(make_noaccess) ( Addr a, UInt len )
614{
615 PROF_EVENT(35);
616 DEBUG("SK_(make_noaccess)(%p, %x)\n", a, len);
617 set_address_range_perms ( a, len, VGM_BIT_INVALID, VGM_BIT_INVALID );
618}
619
620void SK_(make_writable) ( Addr a, UInt len )
621{
622 PROF_EVENT(36);
623 DEBUG("SK_(make_writable)(%p, %x)\n", a, len);
624 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_INVALID );
625}
626
627void SK_(make_readable) ( Addr a, UInt len )
628{
629 PROF_EVENT(37);
630 DEBUG("SK_(make_readable)(%p, 0x%x)\n", a, len);
631 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_VALID );
632}
633
634/* Block-copy permissions (needed for implementing realloc()). */
635
636static void copy_address_range_state ( Addr src, Addr dst, UInt len )
637{
638 UInt i;
639
640 DEBUG("copy_address_range_state\n");
641
642 PROF_EVENT(40);
643 for (i = 0; i < len; i++) {
644 UChar abit = get_abit ( src+i );
645 UChar vbyte = get_vbyte ( src+i );
646 PROF_EVENT(41);
647 set_abit ( dst+i, abit );
648 set_vbyte ( dst+i, vbyte );
649 }
650}
651
652
653/* Check permissions for address range. If inadequate permissions
654 exist, *bad_addr is set to the offending address, so the caller can
655 know what it is. */
656
657Bool SK_(check_writable) ( Addr a, UInt len, Addr* bad_addr )
658{
659 UInt i;
660 UChar abit;
661 PROF_EVENT(42);
662 for (i = 0; i < len; i++) {
663 PROF_EVENT(43);
664 abit = get_abit(a);
665 if (abit == VGM_BIT_INVALID) {
666 if (bad_addr != NULL) *bad_addr = a;
667 return False;
668 }
669 a++;
670 }
671 return True;
672}
673
674Bool SK_(check_readable) ( Addr a, UInt len, Addr* bad_addr )
675{
676 UInt i;
677 UChar abit;
678 UChar vbyte;
679
680 PROF_EVENT(44);
681 DEBUG("SK_(check_readable)\n");
682 for (i = 0; i < len; i++) {
683 abit = get_abit(a);
684 vbyte = get_vbyte(a);
685 PROF_EVENT(45);
686 if (abit != VGM_BIT_VALID || vbyte != VGM_BYTE_VALID) {
687 if (bad_addr != NULL) *bad_addr = a;
688 return False;
689 }
690 a++;
691 }
692 return True;
693}
694
695
696/* Check a zero-terminated ascii string. Tricky -- don't want to
697 examine the actual bytes, to find the end, until we're sure it is
698 safe to do so. */
699
700Bool SK_(check_readable_asciiz) ( Addr a, Addr* bad_addr )
701{
702 UChar abit;
703 UChar vbyte;
704 PROF_EVENT(46);
705 DEBUG("SK_(check_readable_asciiz)\n");
706 while (True) {
707 PROF_EVENT(47);
708 abit = get_abit(a);
709 vbyte = get_vbyte(a);
710 if (abit != VGM_BIT_VALID || vbyte != VGM_BYTE_VALID) {
711 if (bad_addr != NULL) *bad_addr = a;
712 return False;
713 }
714 /* Ok, a is safe to read. */
715 if (* ((UChar*)a) == 0) return True;
716 a++;
717 }
718}
719
720
721/*------------------------------------------------------------*/
722/*--- Memory event handlers ---*/
723/*------------------------------------------------------------*/
724
725/* Setting permissions for aligned words. This supports fast stack
726 operations. */
727
728static void make_noaccess_aligned ( Addr a, UInt len )
729{
730 SecMap* sm;
731 UInt sm_off;
732 UChar mask;
733 Addr a_past_end = a + len;
734
735 VGP_PUSHCC(VgpSetMem);
736
737 PROF_EVENT(50);
738# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000739 sk_assert(IS_ALIGNED4_ADDR(a));
740 sk_assert(IS_ALIGNED4_ADDR(len));
njn25e49d8e72002-09-23 09:36:25 +0000741# endif
742
743 for ( ; a < a_past_end; a += 4) {
744 ENSURE_MAPPABLE(a, "make_noaccess_aligned");
745 sm = primary_map[a >> 16];
746 sm_off = a & 0xFFFF;
747 ((UInt*)(sm->vbyte))[sm_off >> 2] = VGM_WORD_INVALID;
748 mask = 0x0F;
749 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
750 /* mask now contains 1s where we wish to make address bits
751 invalid (1s). */
752 sm->abits[sm_off >> 3] |= mask;
753 }
754 VGP_POPCC(VgpSetMem);
755}
756
757static void make_writable_aligned ( Addr a, UInt len )
758{
759 SecMap* sm;
760 UInt sm_off;
761 UChar mask;
762 Addr a_past_end = a + len;
763
764 VGP_PUSHCC(VgpSetMem);
765
766 PROF_EVENT(51);
767# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000768 sk_assert(IS_ALIGNED4_ADDR(a));
769 sk_assert(IS_ALIGNED4_ADDR(len));
njn25e49d8e72002-09-23 09:36:25 +0000770# endif
771
772 for ( ; a < a_past_end; a += 4) {
773 ENSURE_MAPPABLE(a, "make_writable_aligned");
774 sm = primary_map[a >> 16];
775 sm_off = a & 0xFFFF;
776 ((UInt*)(sm->vbyte))[sm_off >> 2] = VGM_WORD_INVALID;
777 mask = 0x0F;
778 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
779 /* mask now contains 1s where we wish to make address bits
780 invalid (0s). */
781 sm->abits[sm_off >> 3] &= ~mask;
782 }
783 VGP_POPCC(VgpSetMem);
784}
785
786
787static
788void check_is_writable ( CorePart part, ThreadState* tst,
789 Char* s, UInt base, UInt size )
790{
791 Bool ok;
792 Addr bad_addr;
793
794 VGP_PUSHCC(VgpCheckMem);
795
796 /* VG_(message)(Vg_DebugMsg,"check is writable: %x .. %x",
797 base,base+size-1); */
798 ok = SK_(check_writable) ( base, size, &bad_addr );
799 if (!ok) {
800 switch (part) {
801 case Vg_CoreSysCall:
802 SK_(record_param_error) ( tst, bad_addr, /*isWrite =*/True, s );
803 break;
804
805 case Vg_CorePThread:
806 case Vg_CoreSignal:
807 SK_(record_core_mem_error)( tst, /*isWrite=*/True, s );
808 break;
809
810 default:
njne427a662002-10-02 11:08:25 +0000811 VG_(skin_panic)("check_is_writable: Unknown or unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +0000812 }
813 }
814
815 VGP_POPCC(VgpCheckMem);
816}
817
818static
819void check_is_readable ( CorePart part, ThreadState* tst,
820 Char* s, UInt base, UInt size )
821{
822 Bool ok;
823 Addr bad_addr;
824
825 VGP_PUSHCC(VgpCheckMem);
826
827 /* VG_(message)(Vg_DebugMsg,"check is readable: %x .. %x",
828 base,base+size-1); */
829 ok = SK_(check_readable) ( base, size, &bad_addr );
830 if (!ok) {
831 switch (part) {
832 case Vg_CoreSysCall:
833 SK_(record_param_error) ( tst, bad_addr, /*isWrite =*/False, s );
834 break;
835
836 case Vg_CorePThread:
837 SK_(record_core_mem_error)( tst, /*isWrite=*/False, s );
838 break;
839
840 /* If we're being asked to jump to a silly address, record an error
841 message before potentially crashing the entire system. */
842 case Vg_CoreTranslate:
843 SK_(record_jump_error)( tst, bad_addr );
844 break;
845
846 default:
njne427a662002-10-02 11:08:25 +0000847 VG_(skin_panic)("check_is_readable: Unknown or unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +0000848 }
849 }
850 VGP_POPCC(VgpCheckMem);
851}
852
853static
854void check_is_readable_asciiz ( CorePart part, ThreadState* tst,
855 Char* s, UInt str )
856{
857 Bool ok = True;
858 Addr bad_addr;
859 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
860
861 VGP_PUSHCC(VgpCheckMem);
862
njne427a662002-10-02 11:08:25 +0000863 sk_assert(part == Vg_CoreSysCall);
njn25e49d8e72002-09-23 09:36:25 +0000864 ok = SK_(check_readable_asciiz) ( (Addr)str, &bad_addr );
865 if (!ok) {
866 SK_(record_param_error) ( tst, bad_addr, /*is_writable =*/False, s );
867 }
868
869 VGP_POPCC(VgpCheckMem);
870}
871
872
873static
874void memcheck_new_mem_startup( Addr a, UInt len, Bool rr, Bool ww, Bool xx )
875{
njn1f3a9092002-10-04 09:22:30 +0000876 /* Ignore the permissions, just make it readable. Seems to work... */
njn25e49d8e72002-09-23 09:36:25 +0000877 DEBUG("new_mem_startup(%p, %u, rr=%u, ww=%u, xx=%u)\n", a,len,rr,ww,xx);
878 SK_(make_readable)(a, len);
879}
880
881static
882void memcheck_new_mem_heap ( Addr a, UInt len, Bool is_inited )
883{
884 if (is_inited) {
885 SK_(make_readable)(a, len);
886 } else {
887 SK_(make_writable)(a, len);
888 }
889}
890
891static
892void memcheck_set_perms (Addr a, UInt len,
893 Bool nn, Bool rr, Bool ww, Bool xx)
894{
895 DEBUG("memcheck_set_perms(%p, %u, nn=%u, rr=%u ww=%u, xx=%u)\n",
896 a, len, nn, rr, ww, xx);
897 if (rr) SK_(make_readable)(a, len);
898 else if (ww) SK_(make_writable)(a, len);
899 else SK_(make_noaccess)(a, len);
900}
901
902
903/*------------------------------------------------------------*/
904/*--- Functions called directly from generated code. ---*/
905/*------------------------------------------------------------*/
906
907static __inline__ UInt rotateRight16 ( UInt x )
908{
909 /* Amazingly, gcc turns this into a single rotate insn. */
910 return (x >> 16) | (x << 16);
911}
912
913
914static __inline__ UInt shiftRight16 ( UInt x )
915{
916 return x >> 16;
917}
918
919
920/* Read/write 1/2/4 sized V bytes, and emit an address error if
921 needed. */
922
923/* VG_(helperc_{LD,ST}V{1,2,4}) handle the common case fast.
924 Under all other circumstances, it defers to the relevant _SLOWLY
925 function, which can handle all situations.
926*/
927__attribute__ ((regparm(1)))
928UInt SK_(helperc_LOADV4) ( Addr a )
929{
930# ifdef VG_DEBUG_MEMORY
931 return vgmext_rd_V4_SLOWLY(a);
932# else
933 UInt sec_no = rotateRight16(a) & 0x3FFFF;
934 SecMap* sm = primary_map[sec_no];
935 UInt a_off = (a & 0xFFFF) >> 3;
936 UChar abits = sm->abits[a_off];
937 abits >>= (a & 4);
938 abits &= 15;
939 PROF_EVENT(60);
940 if (abits == VGM_NIBBLE_VALID) {
941 /* Handle common case quickly: a is suitably aligned, is mapped,
942 and is addressible. */
943 UInt v_off = a & 0xFFFF;
944 return ((UInt*)(sm->vbyte))[ v_off >> 2 ];
945 } else {
946 /* Slow but general case. */
947 return vgmext_rd_V4_SLOWLY(a);
948 }
949# endif
950}
951
952__attribute__ ((regparm(2)))
953void SK_(helperc_STOREV4) ( Addr a, UInt vbytes )
954{
955# ifdef VG_DEBUG_MEMORY
956 vgmext_wr_V4_SLOWLY(a, vbytes);
957# else
958 UInt sec_no = rotateRight16(a) & 0x3FFFF;
959 SecMap* sm = primary_map[sec_no];
960 UInt a_off = (a & 0xFFFF) >> 3;
961 UChar abits = sm->abits[a_off];
962 abits >>= (a & 4);
963 abits &= 15;
964 PROF_EVENT(61);
965 if (abits == VGM_NIBBLE_VALID) {
966 /* Handle common case quickly: a is suitably aligned, is mapped,
967 and is addressible. */
968 UInt v_off = a & 0xFFFF;
969 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = vbytes;
970 } else {
971 /* Slow but general case. */
972 vgmext_wr_V4_SLOWLY(a, vbytes);
973 }
974# endif
975}
976
977__attribute__ ((regparm(1)))
978UInt SK_(helperc_LOADV2) ( Addr a )
979{
980# ifdef VG_DEBUG_MEMORY
981 return vgmext_rd_V2_SLOWLY(a);
982# else
983 UInt sec_no = rotateRight16(a) & 0x1FFFF;
984 SecMap* sm = primary_map[sec_no];
985 UInt a_off = (a & 0xFFFF) >> 3;
986 PROF_EVENT(62);
987 if (sm->abits[a_off] == VGM_BYTE_VALID) {
988 /* Handle common case quickly. */
989 UInt v_off = a & 0xFFFF;
990 return 0xFFFF0000
991 |
992 (UInt)( ((UShort*)(sm->vbyte))[ v_off >> 1 ] );
993 } else {
994 /* Slow but general case. */
995 return vgmext_rd_V2_SLOWLY(a);
996 }
997# endif
998}
999
1000__attribute__ ((regparm(2)))
1001void SK_(helperc_STOREV2) ( Addr a, UInt vbytes )
1002{
1003# ifdef VG_DEBUG_MEMORY
1004 vgmext_wr_V2_SLOWLY(a, vbytes);
1005# else
1006 UInt sec_no = rotateRight16(a) & 0x1FFFF;
1007 SecMap* sm = primary_map[sec_no];
1008 UInt a_off = (a & 0xFFFF) >> 3;
1009 PROF_EVENT(63);
1010 if (sm->abits[a_off] == VGM_BYTE_VALID) {
1011 /* Handle common case quickly. */
1012 UInt v_off = a & 0xFFFF;
1013 ((UShort*)(sm->vbyte))[ v_off >> 1 ] = vbytes & 0x0000FFFF;
1014 } else {
1015 /* Slow but general case. */
1016 vgmext_wr_V2_SLOWLY(a, vbytes);
1017 }
1018# endif
1019}
1020
1021__attribute__ ((regparm(1)))
1022UInt SK_(helperc_LOADV1) ( Addr a )
1023{
1024# ifdef VG_DEBUG_MEMORY
1025 return vgmext_rd_V1_SLOWLY(a);
1026# else
1027 UInt sec_no = shiftRight16(a);
1028 SecMap* sm = primary_map[sec_no];
1029 UInt a_off = (a & 0xFFFF) >> 3;
1030 PROF_EVENT(64);
1031 if (sm->abits[a_off] == VGM_BYTE_VALID) {
1032 /* Handle common case quickly. */
1033 UInt v_off = a & 0xFFFF;
1034 return 0xFFFFFF00
1035 |
1036 (UInt)( ((UChar*)(sm->vbyte))[ v_off ] );
1037 } else {
1038 /* Slow but general case. */
1039 return vgmext_rd_V1_SLOWLY(a);
1040 }
1041# endif
1042}
1043
1044__attribute__ ((regparm(2)))
1045void SK_(helperc_STOREV1) ( Addr a, UInt vbytes )
1046{
1047# ifdef VG_DEBUG_MEMORY
1048 vgmext_wr_V1_SLOWLY(a, vbytes);
1049# else
1050 UInt sec_no = shiftRight16(a);
1051 SecMap* sm = primary_map[sec_no];
1052 UInt a_off = (a & 0xFFFF) >> 3;
1053 PROF_EVENT(65);
1054 if (sm->abits[a_off] == VGM_BYTE_VALID) {
1055 /* Handle common case quickly. */
1056 UInt v_off = a & 0xFFFF;
1057 ((UChar*)(sm->vbyte))[ v_off ] = vbytes & 0x000000FF;
1058 } else {
1059 /* Slow but general case. */
1060 vgmext_wr_V1_SLOWLY(a, vbytes);
1061 }
1062# endif
1063}
1064
1065
1066/*------------------------------------------------------------*/
1067/*--- Fallback functions to handle cases that the above ---*/
1068/*--- VG_(helperc_{LD,ST}V{1,2,4}) can't manage. ---*/
1069/*------------------------------------------------------------*/
1070
1071static UInt vgmext_rd_V4_SLOWLY ( Addr a )
1072{
1073 Bool a0ok, a1ok, a2ok, a3ok;
1074 UInt vb0, vb1, vb2, vb3;
1075
1076 PROF_EVENT(70);
1077
1078 /* First establish independently the addressibility of the 4 bytes
1079 involved. */
1080 a0ok = get_abit(a+0) == VGM_BIT_VALID;
1081 a1ok = get_abit(a+1) == VGM_BIT_VALID;
1082 a2ok = get_abit(a+2) == VGM_BIT_VALID;
1083 a3ok = get_abit(a+3) == VGM_BIT_VALID;
1084
1085 /* Also get the validity bytes for the address. */
1086 vb0 = (UInt)get_vbyte(a+0);
1087 vb1 = (UInt)get_vbyte(a+1);
1088 vb2 = (UInt)get_vbyte(a+2);
1089 vb3 = (UInt)get_vbyte(a+3);
1090
1091 /* Now distinguish 3 cases */
1092
1093 /* Case 1: the address is completely valid, so:
1094 - no addressing error
1095 - return V bytes as read from memory
1096 */
1097 if (a0ok && a1ok && a2ok && a3ok) {
1098 UInt vw = VGM_WORD_INVALID;
1099 vw <<= 8; vw |= vb3;
1100 vw <<= 8; vw |= vb2;
1101 vw <<= 8; vw |= vb1;
1102 vw <<= 8; vw |= vb0;
1103 return vw;
1104 }
1105
1106 /* Case 2: the address is completely invalid.
1107 - emit addressing error
1108 - return V word indicating validity.
1109 This sounds strange, but if we make loads from invalid addresses
1110 give invalid data, we also risk producing a number of confusing
1111 undefined-value errors later, which confuses the fact that the
1112 error arose in the first place from an invalid address.
1113 */
1114 /* VG_(printf)("%p (%d %d %d %d)\n", a, a0ok, a1ok, a2ok, a3ok); */
1115 if (!SK_(clo_partial_loads_ok)
1116 || ((a & 3) != 0)
1117 || (!a0ok && !a1ok && !a2ok && !a3ok)) {
1118 SK_(record_address_error)( a, 4, False );
1119 return (VGM_BYTE_VALID << 24) | (VGM_BYTE_VALID << 16)
1120 | (VGM_BYTE_VALID << 8) | VGM_BYTE_VALID;
1121 }
1122
1123 /* Case 3: the address is partially valid.
1124 - no addressing error
1125 - returned V word is invalid where the address is invalid,
1126 and contains V bytes from memory otherwise.
1127 Case 3 is only allowed if SK_(clo_partial_loads_ok) is True
1128 (which is the default), and the address is 4-aligned.
1129 If not, Case 2 will have applied.
1130 */
njne427a662002-10-02 11:08:25 +00001131 sk_assert(SK_(clo_partial_loads_ok));
njn25e49d8e72002-09-23 09:36:25 +00001132 {
1133 UInt vw = VGM_WORD_INVALID;
1134 vw <<= 8; vw |= (a3ok ? vb3 : VGM_BYTE_INVALID);
1135 vw <<= 8; vw |= (a2ok ? vb2 : VGM_BYTE_INVALID);
1136 vw <<= 8; vw |= (a1ok ? vb1 : VGM_BYTE_INVALID);
1137 vw <<= 8; vw |= (a0ok ? vb0 : VGM_BYTE_INVALID);
1138 return vw;
1139 }
1140}
1141
1142static void vgmext_wr_V4_SLOWLY ( Addr a, UInt vbytes )
1143{
1144 /* Check the address for validity. */
1145 Bool aerr = False;
1146 PROF_EVENT(71);
1147
1148 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1149 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1150 if (get_abit(a+2) != VGM_BIT_VALID) aerr = True;
1151 if (get_abit(a+3) != VGM_BIT_VALID) aerr = True;
1152
1153 /* Store the V bytes, remembering to do it little-endian-ly. */
1154 set_vbyte( a+0, vbytes & 0x000000FF ); vbytes >>= 8;
1155 set_vbyte( a+1, vbytes & 0x000000FF ); vbytes >>= 8;
1156 set_vbyte( a+2, vbytes & 0x000000FF ); vbytes >>= 8;
1157 set_vbyte( a+3, vbytes & 0x000000FF );
1158
1159 /* If an address error has happened, report it. */
1160 if (aerr)
1161 SK_(record_address_error)( a, 4, True );
1162}
1163
1164static UInt vgmext_rd_V2_SLOWLY ( Addr a )
1165{
1166 /* Check the address for validity. */
1167 UInt vw = VGM_WORD_INVALID;
1168 Bool aerr = False;
1169 PROF_EVENT(72);
1170
1171 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1172 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1173
1174 /* Fetch the V bytes, remembering to do it little-endian-ly. */
1175 vw <<= 8; vw |= (UInt)get_vbyte(a+1);
1176 vw <<= 8; vw |= (UInt)get_vbyte(a+0);
1177
1178 /* If an address error has happened, report it. */
1179 if (aerr) {
1180 SK_(record_address_error)( a, 2, False );
1181 vw = (VGM_BYTE_INVALID << 24) | (VGM_BYTE_INVALID << 16)
1182 | (VGM_BYTE_VALID << 8) | (VGM_BYTE_VALID);
1183 }
1184 return vw;
1185}
1186
1187static void vgmext_wr_V2_SLOWLY ( Addr a, UInt vbytes )
1188{
1189 /* Check the address for validity. */
1190 Bool aerr = False;
1191 PROF_EVENT(73);
1192
1193 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1194 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1195
1196 /* Store the V bytes, remembering to do it little-endian-ly. */
1197 set_vbyte( a+0, vbytes & 0x000000FF ); vbytes >>= 8;
1198 set_vbyte( a+1, vbytes & 0x000000FF );
1199
1200 /* If an address error has happened, report it. */
1201 if (aerr)
1202 SK_(record_address_error)( a, 2, True );
1203}
1204
1205static UInt vgmext_rd_V1_SLOWLY ( Addr a )
1206{
1207 /* Check the address for validity. */
1208 UInt vw = VGM_WORD_INVALID;
1209 Bool aerr = False;
1210 PROF_EVENT(74);
1211
1212 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1213
1214 /* Fetch the V byte. */
1215 vw <<= 8; vw |= (UInt)get_vbyte(a+0);
1216
1217 /* If an address error has happened, report it. */
1218 if (aerr) {
1219 SK_(record_address_error)( a, 1, False );
1220 vw = (VGM_BYTE_INVALID << 24) | (VGM_BYTE_INVALID << 16)
1221 | (VGM_BYTE_INVALID << 8) | (VGM_BYTE_VALID);
1222 }
1223 return vw;
1224}
1225
1226static void vgmext_wr_V1_SLOWLY ( Addr a, UInt vbytes )
1227{
1228 /* Check the address for validity. */
1229 Bool aerr = False;
1230 PROF_EVENT(75);
1231 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1232
1233 /* Store the V bytes, remembering to do it little-endian-ly. */
1234 set_vbyte( a+0, vbytes & 0x000000FF );
1235
1236 /* If an address error has happened, report it. */
1237 if (aerr)
1238 SK_(record_address_error)( a, 1, True );
1239}
1240
1241
1242/* ---------------------------------------------------------------------
1243 Called from generated code, or from the assembly helpers.
1244 Handlers for value check failures.
1245 ------------------------------------------------------------------ */
1246
1247void SK_(helperc_value_check0_fail) ( void )
1248{
1249 SK_(record_value_error) ( 0 );
1250}
1251
1252void SK_(helperc_value_check1_fail) ( void )
1253{
1254 SK_(record_value_error) ( 1 );
1255}
1256
1257void SK_(helperc_value_check2_fail) ( void )
1258{
1259 SK_(record_value_error) ( 2 );
1260}
1261
1262void SK_(helperc_value_check4_fail) ( void )
1263{
1264 SK_(record_value_error) ( 4 );
1265}
1266
1267
1268/* ---------------------------------------------------------------------
1269 FPU load and store checks, called from generated code.
1270 ------------------------------------------------------------------ */
1271
1272__attribute__ ((regparm(2)))
1273void SK_(fpu_read_check) ( Addr addr, Int size )
1274{
1275 /* Ensure the read area is both addressible and valid (ie,
1276 readable). If there's an address error, don't report a value
1277 error too; but if there isn't an address error, check for a
1278 value error.
1279
1280 Try to be reasonably fast on the common case; wimp out and defer
1281 to fpu_read_check_SLOWLY for everything else. */
1282
1283 SecMap* sm;
1284 UInt sm_off, v_off, a_off;
1285 Addr addr4;
1286
1287 PROF_EVENT(80);
1288
1289# ifdef VG_DEBUG_MEMORY
1290 fpu_read_check_SLOWLY ( addr, size );
1291# else
1292
1293 if (size == 4) {
1294 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
1295 PROF_EVENT(81);
1296 /* Properly aligned. */
1297 sm = primary_map[addr >> 16];
1298 sm_off = addr & 0xFFFF;
1299 a_off = sm_off >> 3;
1300 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
1301 /* Properly aligned and addressible. */
1302 v_off = addr & 0xFFFF;
1303 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1304 goto slow4;
1305 /* Properly aligned, addressible and with valid data. */
1306 return;
1307 slow4:
1308 fpu_read_check_SLOWLY ( addr, 4 );
1309 return;
1310 }
1311
1312 if (size == 8) {
1313 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
1314 PROF_EVENT(82);
1315 /* Properly aligned. Do it in two halves. */
1316 addr4 = addr + 4;
1317 /* First half. */
1318 sm = primary_map[addr >> 16];
1319 sm_off = addr & 0xFFFF;
1320 a_off = sm_off >> 3;
1321 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1322 /* First half properly aligned and addressible. */
1323 v_off = addr & 0xFFFF;
1324 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1325 goto slow8;
1326 /* Second half. */
1327 sm = primary_map[addr4 >> 16];
1328 sm_off = addr4 & 0xFFFF;
1329 a_off = sm_off >> 3;
1330 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1331 /* Second half properly aligned and addressible. */
1332 v_off = addr4 & 0xFFFF;
1333 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1334 goto slow8;
1335 /* Both halves properly aligned, addressible and with valid
1336 data. */
1337 return;
1338 slow8:
1339 fpu_read_check_SLOWLY ( addr, 8 );
1340 return;
1341 }
1342
1343 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
1344 cases go quickly. */
1345 if (size == 2) {
1346 PROF_EVENT(83);
1347 fpu_read_check_SLOWLY ( addr, 2 );
1348 return;
1349 }
1350
1351 if (size == 10) {
1352 PROF_EVENT(84);
1353 fpu_read_check_SLOWLY ( addr, 10 );
1354 return;
1355 }
1356
1357 if (size == 28 || size == 108) {
1358 PROF_EVENT(84); /* XXX assign correct event number */
sewardjb3243352002-09-27 01:11:36 +00001359 fpu_read_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001360 return;
1361 }
1362
1363 VG_(printf)("size is %d\n", size);
njne427a662002-10-02 11:08:25 +00001364 VG_(skin_panic)("vgmext_fpu_read_check: unhandled size");
njn25e49d8e72002-09-23 09:36:25 +00001365# endif
1366}
1367
1368
1369__attribute__ ((regparm(2)))
1370void SK_(fpu_write_check) ( Addr addr, Int size )
1371{
1372 /* Ensure the written area is addressible, and moan if otherwise.
1373 If it is addressible, make it valid, otherwise invalid.
1374 */
1375
1376 SecMap* sm;
1377 UInt sm_off, v_off, a_off;
1378 Addr addr4;
1379
1380 PROF_EVENT(85);
1381
1382# ifdef VG_DEBUG_MEMORY
1383 fpu_write_check_SLOWLY ( addr, size );
1384# else
1385
1386 if (size == 4) {
1387 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
1388 PROF_EVENT(86);
1389 /* Properly aligned. */
1390 sm = primary_map[addr >> 16];
1391 sm_off = addr & 0xFFFF;
1392 a_off = sm_off >> 3;
1393 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
1394 /* Properly aligned and addressible. Make valid. */
1395 v_off = addr & 0xFFFF;
1396 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1397 return;
1398 slow4:
1399 fpu_write_check_SLOWLY ( addr, 4 );
1400 return;
1401 }
1402
1403 if (size == 8) {
1404 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
1405 PROF_EVENT(87);
1406 /* Properly aligned. Do it in two halves. */
1407 addr4 = addr + 4;
1408 /* First half. */
1409 sm = primary_map[addr >> 16];
1410 sm_off = addr & 0xFFFF;
1411 a_off = sm_off >> 3;
1412 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1413 /* First half properly aligned and addressible. Make valid. */
1414 v_off = addr & 0xFFFF;
1415 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1416 /* Second half. */
1417 sm = primary_map[addr4 >> 16];
1418 sm_off = addr4 & 0xFFFF;
1419 a_off = sm_off >> 3;
1420 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1421 /* Second half properly aligned and addressible. */
1422 v_off = addr4 & 0xFFFF;
1423 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1424 /* Properly aligned, addressible and with valid data. */
1425 return;
1426 slow8:
1427 fpu_write_check_SLOWLY ( addr, 8 );
1428 return;
1429 }
1430
1431 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
1432 cases go quickly. */
1433 if (size == 2) {
1434 PROF_EVENT(88);
1435 fpu_write_check_SLOWLY ( addr, 2 );
1436 return;
1437 }
1438
1439 if (size == 10) {
1440 PROF_EVENT(89);
1441 fpu_write_check_SLOWLY ( addr, 10 );
1442 return;
1443 }
1444
1445 if (size == 28 || size == 108) {
1446 PROF_EVENT(89); /* XXX assign correct event number */
sewardjb3243352002-09-27 01:11:36 +00001447 fpu_write_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001448 return;
1449 }
1450
1451 VG_(printf)("size is %d\n", size);
njne427a662002-10-02 11:08:25 +00001452 VG_(skin_panic)("vgmext_fpu_write_check: unhandled size");
njn25e49d8e72002-09-23 09:36:25 +00001453# endif
1454}
1455
1456
1457/* ---------------------------------------------------------------------
1458 Slow, general cases for FPU load and store checks.
1459 ------------------------------------------------------------------ */
1460
1461/* Generic version. Test for both addr and value errors, but if
1462 there's an addr error, don't report a value error even if it
1463 exists. */
1464
1465void fpu_read_check_SLOWLY ( Addr addr, Int size )
1466{
1467 Int i;
1468 Bool aerr = False;
1469 Bool verr = False;
1470 PROF_EVENT(90);
1471 for (i = 0; i < size; i++) {
1472 PROF_EVENT(91);
1473 if (get_abit(addr+i) != VGM_BIT_VALID)
1474 aerr = True;
1475 if (get_vbyte(addr+i) != VGM_BYTE_VALID)
1476 verr = True;
1477 }
1478
1479 if (aerr) {
1480 SK_(record_address_error)( addr, size, False );
1481 } else {
1482 if (verr)
1483 SK_(record_value_error)( size );
1484 }
1485}
1486
1487
1488/* Generic version. Test for addr errors. Valid addresses are
1489 given valid values, and invalid addresses invalid values. */
1490
1491void fpu_write_check_SLOWLY ( Addr addr, Int size )
1492{
1493 Int i;
1494 Addr a_here;
1495 Bool a_ok;
1496 Bool aerr = False;
1497 PROF_EVENT(92);
1498 for (i = 0; i < size; i++) {
1499 PROF_EVENT(93);
1500 a_here = addr+i;
1501 a_ok = get_abit(a_here) == VGM_BIT_VALID;
1502 if (a_ok) {
1503 set_vbyte(a_here, VGM_BYTE_VALID);
1504 } else {
1505 set_vbyte(a_here, VGM_BYTE_INVALID);
1506 aerr = True;
1507 }
1508 }
1509 if (aerr) {
1510 SK_(record_address_error)( addr, size, True );
1511 }
1512}
1513
1514/*------------------------------------------------------------*/
1515/*--- Shadow chunks info ---*/
1516/*------------------------------------------------------------*/
1517
1518static __inline__
1519void set_where( ShadowChunk* sc, ExeContext* ec )
1520{
1521 sc->skin_extra[0] = (UInt)ec;
1522}
1523
1524static __inline__
1525ExeContext *get_where( ShadowChunk* sc )
1526{
1527 return (ExeContext*)sc->skin_extra[0];
1528}
1529
1530void SK_(complete_shadow_chunk) ( ShadowChunk* sc, ThreadState* tst )
1531{
1532 set_where( sc, VG_(get_ExeContext) ( tst ) );
1533}
1534
1535/*------------------------------------------------------------*/
1536/*--- Postponing free()ing ---*/
1537/*------------------------------------------------------------*/
1538
1539/* Holds blocks after freeing. */
1540static ShadowChunk* vg_freed_list_start = NULL;
1541static ShadowChunk* vg_freed_list_end = NULL;
1542static Int vg_freed_list_volume = 0;
1543
1544static __attribute__ ((unused))
1545 Int count_freelist ( void )
1546{
1547 ShadowChunk* sc;
1548 Int n = 0;
1549 for (sc = vg_freed_list_start; sc != NULL; sc = sc->next)
1550 n++;
1551 return n;
1552}
1553
1554static __attribute__ ((unused))
1555 void freelist_sanity ( void )
1556{
1557 ShadowChunk* sc;
1558 Int n = 0;
1559 /* VG_(printf)("freelist sanity\n"); */
1560 for (sc = vg_freed_list_start; sc != NULL; sc = sc->next)
1561 n += sc->size;
njne427a662002-10-02 11:08:25 +00001562 sk_assert(n == vg_freed_list_volume);
njn25e49d8e72002-09-23 09:36:25 +00001563}
1564
1565/* Put a shadow chunk on the freed blocks queue, possibly freeing up
1566 some of the oldest blocks in the queue at the same time. */
1567static void add_to_freed_queue ( ShadowChunk* sc )
1568{
1569 ShadowChunk* sc1;
1570
1571 /* Put it at the end of the freed list */
1572 if (vg_freed_list_end == NULL) {
njne427a662002-10-02 11:08:25 +00001573 sk_assert(vg_freed_list_start == NULL);
njn25e49d8e72002-09-23 09:36:25 +00001574 vg_freed_list_end = vg_freed_list_start = sc;
1575 vg_freed_list_volume = sc->size;
1576 } else {
njne427a662002-10-02 11:08:25 +00001577 sk_assert(vg_freed_list_end->next == NULL);
njn25e49d8e72002-09-23 09:36:25 +00001578 vg_freed_list_end->next = sc;
1579 vg_freed_list_end = sc;
1580 vg_freed_list_volume += sc->size;
1581 }
1582 sc->next = NULL;
1583
1584 /* Release enough of the oldest blocks to bring the free queue
1585 volume below vg_clo_freelist_vol. */
1586
1587 while (vg_freed_list_volume > SK_(clo_freelist_vol)) {
1588 /* freelist_sanity(); */
njne427a662002-10-02 11:08:25 +00001589 sk_assert(vg_freed_list_start != NULL);
1590 sk_assert(vg_freed_list_end != NULL);
njn25e49d8e72002-09-23 09:36:25 +00001591
1592 sc1 = vg_freed_list_start;
1593 vg_freed_list_volume -= sc1->size;
1594 /* VG_(printf)("volume now %d\n", vg_freed_list_volume); */
njne427a662002-10-02 11:08:25 +00001595 sk_assert(vg_freed_list_volume >= 0);
njn25e49d8e72002-09-23 09:36:25 +00001596
1597 if (vg_freed_list_start == vg_freed_list_end) {
1598 vg_freed_list_start = vg_freed_list_end = NULL;
1599 } else {
1600 vg_freed_list_start = sc1->next;
1601 }
1602 sc1->next = NULL; /* just paranoia */
njn4ba5a792002-09-30 10:23:54 +00001603 VG_(free_ShadowChunk) ( sc1 );
njn25e49d8e72002-09-23 09:36:25 +00001604 }
1605}
1606
1607/* Return the first shadow chunk satisfying the predicate p. */
1608ShadowChunk* SK_(any_matching_freed_ShadowChunks)
1609 ( Bool (*p) ( ShadowChunk* ))
1610{
1611 ShadowChunk* sc;
1612
1613 /* No point looking through freed blocks if we're not keeping
1614 them around for a while... */
1615 for (sc = vg_freed_list_start; sc != NULL; sc = sc->next)
1616 if (p(sc))
1617 return sc;
1618
1619 return NULL;
1620}
1621
1622void SK_(alt_free) ( ShadowChunk* sc, ThreadState* tst )
1623{
1624 /* Record where freed */
1625 set_where( sc, VG_(get_ExeContext) ( tst ) );
1626
1627 /* Put it out of harm's way for a while. */
1628 add_to_freed_queue ( sc );
1629}
1630
1631/*------------------------------------------------------------*/
1632/*--- Low-level address-space scanning, for the leak ---*/
1633/*--- detector. ---*/
1634/*------------------------------------------------------------*/
1635
1636static
1637jmp_buf memscan_jmpbuf;
1638
1639static
1640void vg_scan_all_valid_memory_sighandler ( Int sigNo )
1641{
1642 __builtin_longjmp(memscan_jmpbuf, 1);
1643}
1644
1645/* Safely (avoiding SIGSEGV / SIGBUS) scan the entire valid address
1646 space and pass the addresses and values of all addressible,
1647 defined, aligned words to notify_word. This is the basis for the
1648 leak detector. Returns the number of calls made to notify_word. */
1649UInt VG_(scan_all_valid_memory) ( void (*notify_word)( Addr, UInt ) )
1650{
1651 /* All volatile, because some gccs seem paranoid about longjmp(). */
1652 volatile UInt res, numPages, page, vbytes, primaryMapNo, nWordsNotified;
1653 volatile Addr pageBase, addr;
1654 volatile SecMap* sm;
1655 volatile UChar abits;
1656 volatile UInt page_first_word;
1657
1658 vki_ksigaction sigbus_saved;
1659 vki_ksigaction sigbus_new;
1660 vki_ksigaction sigsegv_saved;
1661 vki_ksigaction sigsegv_new;
1662 vki_ksigset_t blockmask_saved;
1663 vki_ksigset_t unblockmask_new;
1664
1665 /* Temporarily install a new sigsegv and sigbus handler, and make
1666 sure SIGBUS, SIGSEGV and SIGTERM are unblocked. (Perhaps the
1667 first two can never be blocked anyway?) */
1668
1669 sigbus_new.ksa_handler = vg_scan_all_valid_memory_sighandler;
1670 sigbus_new.ksa_flags = VKI_SA_ONSTACK | VKI_SA_RESTART;
1671 sigbus_new.ksa_restorer = NULL;
1672 res = VG_(ksigemptyset)( &sigbus_new.ksa_mask );
njne427a662002-10-02 11:08:25 +00001673 sk_assert(res == 0);
njn25e49d8e72002-09-23 09:36:25 +00001674
1675 sigsegv_new.ksa_handler = vg_scan_all_valid_memory_sighandler;
1676 sigsegv_new.ksa_flags = VKI_SA_ONSTACK | VKI_SA_RESTART;
1677 sigsegv_new.ksa_restorer = NULL;
1678 res = VG_(ksigemptyset)( &sigsegv_new.ksa_mask );
njne427a662002-10-02 11:08:25 +00001679 sk_assert(res == 0+0);
njn25e49d8e72002-09-23 09:36:25 +00001680
1681 res = VG_(ksigemptyset)( &unblockmask_new );
1682 res |= VG_(ksigaddset)( &unblockmask_new, VKI_SIGBUS );
1683 res |= VG_(ksigaddset)( &unblockmask_new, VKI_SIGSEGV );
1684 res |= VG_(ksigaddset)( &unblockmask_new, VKI_SIGTERM );
njne427a662002-10-02 11:08:25 +00001685 sk_assert(res == 0+0+0);
njn25e49d8e72002-09-23 09:36:25 +00001686
1687 res = VG_(ksigaction)( VKI_SIGBUS, &sigbus_new, &sigbus_saved );
njne427a662002-10-02 11:08:25 +00001688 sk_assert(res == 0+0+0+0);
njn25e49d8e72002-09-23 09:36:25 +00001689
1690 res = VG_(ksigaction)( VKI_SIGSEGV, &sigsegv_new, &sigsegv_saved );
njne427a662002-10-02 11:08:25 +00001691 sk_assert(res == 0+0+0+0+0);
njn25e49d8e72002-09-23 09:36:25 +00001692
1693 res = VG_(ksigprocmask)( VKI_SIG_UNBLOCK, &unblockmask_new, &blockmask_saved );
njne427a662002-10-02 11:08:25 +00001694 sk_assert(res == 0+0+0+0+0+0);
njn25e49d8e72002-09-23 09:36:25 +00001695
1696 /* The signal handlers are installed. Actually do the memory scan. */
1697 numPages = 1 << (32-VKI_BYTES_PER_PAGE_BITS);
njne427a662002-10-02 11:08:25 +00001698 sk_assert(numPages == 1048576);
1699 sk_assert(4096 == (1 << VKI_BYTES_PER_PAGE_BITS));
njn25e49d8e72002-09-23 09:36:25 +00001700
1701 nWordsNotified = 0;
1702
1703 for (page = 0; page < numPages; page++) {
1704 pageBase = page << VKI_BYTES_PER_PAGE_BITS;
1705 primaryMapNo = pageBase >> 16;
1706 sm = primary_map[primaryMapNo];
1707 if (IS_DISTINGUISHED_SM(sm)) continue;
1708 if (__builtin_setjmp(memscan_jmpbuf) == 0) {
1709 /* try this ... */
1710 page_first_word = * (volatile UInt*)pageBase;
1711 /* we get here if we didn't get a fault */
1712 /* Scan the page */
1713 for (addr = pageBase; addr < pageBase+VKI_BYTES_PER_PAGE; addr += 4) {
1714 abits = get_abits4_ALIGNED(addr);
1715 vbytes = get_vbytes4_ALIGNED(addr);
1716 if (abits == VGM_NIBBLE_VALID
1717 && vbytes == VGM_WORD_VALID) {
1718 nWordsNotified++;
1719 notify_word ( addr, *(UInt*)addr );
1720 }
1721 }
1722 } else {
1723 /* We get here if reading the first word of the page caused a
1724 fault, which in turn caused the signal handler to longjmp.
1725 Ignore this page. */
1726 if (0)
1727 VG_(printf)(
1728 "vg_scan_all_valid_memory_sighandler: ignoring page at %p\n",
1729 (void*)pageBase
1730 );
1731 }
1732 }
1733
1734 /* Restore signal state to whatever it was before. */
1735 res = VG_(ksigaction)( VKI_SIGBUS, &sigbus_saved, NULL );
njne427a662002-10-02 11:08:25 +00001736 sk_assert(res == 0 +0);
njn25e49d8e72002-09-23 09:36:25 +00001737
1738 res = VG_(ksigaction)( VKI_SIGSEGV, &sigsegv_saved, NULL );
njne427a662002-10-02 11:08:25 +00001739 sk_assert(res == 0 +0 +0);
njn25e49d8e72002-09-23 09:36:25 +00001740
1741 res = VG_(ksigprocmask)( VKI_SIG_SETMASK, &blockmask_saved, NULL );
njne427a662002-10-02 11:08:25 +00001742 sk_assert(res == 0 +0 +0 +0);
njn25e49d8e72002-09-23 09:36:25 +00001743
1744 return nWordsNotified;
1745}
1746
1747
1748/*------------------------------------------------------------*/
1749/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
1750/*------------------------------------------------------------*/
1751
1752/* A block is either
1753 -- Proper-ly reached; a pointer to its start has been found
1754 -- Interior-ly reached; only an interior pointer to it has been found
1755 -- Unreached; so far, no pointers to any part of it have been found.
1756*/
1757typedef
1758 enum { Unreached, Interior, Proper }
1759 Reachedness;
1760
1761/* A block record, used for generating err msgs. */
1762typedef
1763 struct _LossRecord {
1764 struct _LossRecord* next;
1765 /* Where these lost blocks were allocated. */
1766 ExeContext* allocated_at;
1767 /* Their reachability. */
1768 Reachedness loss_mode;
1769 /* Number of blocks and total # bytes involved. */
1770 UInt total_bytes;
1771 UInt num_blocks;
1772 }
1773 LossRecord;
1774
1775
1776/* Find the i such that ptr points at or inside the block described by
1777 shadows[i]. Return -1 if none found. This assumes that shadows[]
1778 has been sorted on the ->data field. */
1779
1780#ifdef VG_DEBUG_LEAKCHECK
1781/* Used to sanity-check the fast binary-search mechanism. */
1782static Int find_shadow_for_OLD ( Addr ptr,
1783 ShadowChunk** shadows,
1784 Int n_shadows )
1785
1786{
1787 Int i;
1788 Addr a_lo, a_hi;
1789 PROF_EVENT(70);
1790 for (i = 0; i < n_shadows; i++) {
1791 PROF_EVENT(71);
1792 a_lo = shadows[i]->data;
1793 a_hi = ((Addr)shadows[i]->data) + shadows[i]->size - 1;
1794 if (a_lo <= ptr && ptr <= a_hi)
1795 return i;
1796 }
1797 return -1;
1798}
1799#endif
1800
1801
1802static Int find_shadow_for ( Addr ptr,
1803 ShadowChunk** shadows,
1804 Int n_shadows )
1805{
1806 Addr a_mid_lo, a_mid_hi;
1807 Int lo, mid, hi, retVal;
1808 PROF_EVENT(70);
1809 /* VG_(printf)("find shadow for %p = ", ptr); */
1810 retVal = -1;
1811 lo = 0;
1812 hi = n_shadows-1;
1813 while (True) {
1814 PROF_EVENT(71);
1815
1816 /* invariant: current unsearched space is from lo to hi,
1817 inclusive. */
1818 if (lo > hi) break; /* not found */
1819
1820 mid = (lo + hi) / 2;
1821 a_mid_lo = shadows[mid]->data;
1822 a_mid_hi = ((Addr)shadows[mid]->data) + shadows[mid]->size - 1;
1823
1824 if (ptr < a_mid_lo) {
1825 hi = mid-1;
1826 continue;
1827 }
1828 if (ptr > a_mid_hi) {
1829 lo = mid+1;
1830 continue;
1831 }
njne427a662002-10-02 11:08:25 +00001832 sk_assert(ptr >= a_mid_lo && ptr <= a_mid_hi);
njn25e49d8e72002-09-23 09:36:25 +00001833 retVal = mid;
1834 break;
1835 }
1836
1837# ifdef VG_DEBUG_LEAKCHECK
njne427a662002-10-02 11:08:25 +00001838 sk_assert(retVal == find_shadow_for_OLD ( ptr, shadows, n_shadows ));
njn25e49d8e72002-09-23 09:36:25 +00001839# endif
1840 /* VG_(printf)("%d\n", retVal); */
1841 return retVal;
1842}
1843
1844
1845
1846static void sort_malloc_shadows ( ShadowChunk** shadows, UInt n_shadows )
1847{
1848 Int incs[14] = { 1, 4, 13, 40, 121, 364, 1093, 3280,
1849 9841, 29524, 88573, 265720,
1850 797161, 2391484 };
1851 Int lo = 0;
1852 Int hi = n_shadows-1;
1853 Int i, j, h, bigN, hp;
1854 ShadowChunk* v;
1855
1856 PROF_EVENT(72);
1857 bigN = hi - lo + 1; if (bigN < 2) return;
1858 hp = 0; while (incs[hp] < bigN) hp++; hp--;
1859
1860 for (; hp >= 0; hp--) {
1861 PROF_EVENT(73);
1862 h = incs[hp];
1863 i = lo + h;
1864 while (1) {
1865 PROF_EVENT(74);
1866 if (i > hi) break;
1867 v = shadows[i];
1868 j = i;
1869 while (shadows[j-h]->data > v->data) {
1870 PROF_EVENT(75);
1871 shadows[j] = shadows[j-h];
1872 j = j - h;
1873 if (j <= (lo + h - 1)) break;
1874 }
1875 shadows[j] = v;
1876 i++;
1877 }
1878 }
1879}
1880
1881/* Globals, for the callback used by SK_(detect_memory_leaks). */
1882
1883static ShadowChunk** vglc_shadows;
1884static Int vglc_n_shadows;
1885static Reachedness* vglc_reachedness;
1886static Addr vglc_min_mallocd_addr;
1887static Addr vglc_max_mallocd_addr;
1888
1889static
1890void vg_detect_memory_leaks_notify_addr ( Addr a, UInt word_at_a )
1891{
1892 Int sh_no;
1893 Addr ptr;
1894
1895 /* Rule out some known causes of bogus pointers. Mostly these do
1896 not cause much trouble because only a few false pointers can
1897 ever lurk in these places. This mainly stops it reporting that
1898 blocks are still reachable in stupid test programs like this
1899
1900 int main (void) { char* a = malloc(100); return 0; }
1901
1902 which people seem inordinately fond of writing, for some reason.
1903
1904 Note that this is a complete kludge. It would be better to
1905 ignore any addresses corresponding to valgrind.so's .bss and
1906 .data segments, but I cannot think of a reliable way to identify
1907 where the .bss segment has been put. If you can, drop me a
1908 line.
1909 */
1910 if (VG_(within_stack)(a)) return;
1911 if (VG_(within_m_state_static)(a)) return;
1912 if (a == (Addr)(&vglc_min_mallocd_addr)) return;
1913 if (a == (Addr)(&vglc_max_mallocd_addr)) return;
1914
1915 /* OK, let's get on and do something Useful for a change. */
1916
1917 ptr = (Addr)word_at_a;
1918 if (ptr >= vglc_min_mallocd_addr && ptr <= vglc_max_mallocd_addr) {
1919 /* Might be legitimate; we'll have to investigate further. */
1920 sh_no = find_shadow_for ( ptr, vglc_shadows, vglc_n_shadows );
1921 if (sh_no != -1) {
1922 /* Found a block at/into which ptr points. */
njne427a662002-10-02 11:08:25 +00001923 sk_assert(sh_no >= 0 && sh_no < vglc_n_shadows);
1924 sk_assert(ptr < vglc_shadows[sh_no]->data
njn25e49d8e72002-09-23 09:36:25 +00001925 + vglc_shadows[sh_no]->size);
1926 /* Decide whether Proper-ly or Interior-ly reached. */
1927 if (ptr == vglc_shadows[sh_no]->data) {
1928 if (0) VG_(printf)("pointer at %p to %p\n", a, word_at_a );
1929 vglc_reachedness[sh_no] = Proper;
1930 } else {
1931 if (vglc_reachedness[sh_no] == Unreached)
1932 vglc_reachedness[sh_no] = Interior;
1933 }
1934 }
1935 }
1936}
1937
1938
1939void SK_(detect_memory_leaks) ( void )
1940{
1941 Int i;
1942 Int blocks_leaked, bytes_leaked;
1943 Int blocks_dubious, bytes_dubious;
1944 Int blocks_reachable, bytes_reachable;
1945 Int n_lossrecords;
1946 UInt bytes_notified;
1947
1948 LossRecord* errlist;
1949 LossRecord* p;
1950
1951 PROF_EVENT(76);
1952
1953 /* VG_(get_malloc_shadows) allocates storage for shadows */
1954 vglc_shadows = VG_(get_malloc_shadows)( &vglc_n_shadows );
1955 if (vglc_n_shadows == 0) {
njne427a662002-10-02 11:08:25 +00001956 sk_assert(vglc_shadows == NULL);
njn25e49d8e72002-09-23 09:36:25 +00001957 VG_(message)(Vg_UserMsg,
1958 "No malloc'd blocks -- no leaks are possible.\n");
1959 return;
1960 }
1961
1962 VG_(message)(Vg_UserMsg,
1963 "searching for pointers to %d not-freed blocks.",
1964 vglc_n_shadows );
1965 sort_malloc_shadows ( vglc_shadows, vglc_n_shadows );
1966
1967 /* Sanity check; assert that the blocks are now in order and that
1968 they don't overlap. */
1969 for (i = 0; i < vglc_n_shadows-1; i++) {
njne427a662002-10-02 11:08:25 +00001970 sk_assert( ((Addr)vglc_shadows[i]->data)
njn25e49d8e72002-09-23 09:36:25 +00001971 < ((Addr)vglc_shadows[i+1]->data) );
njne427a662002-10-02 11:08:25 +00001972 sk_assert( ((Addr)vglc_shadows[i]->data) + vglc_shadows[i]->size
njn25e49d8e72002-09-23 09:36:25 +00001973 < ((Addr)vglc_shadows[i+1]->data) );
1974 }
1975
1976 vglc_min_mallocd_addr = ((Addr)vglc_shadows[0]->data);
1977 vglc_max_mallocd_addr = ((Addr)vglc_shadows[vglc_n_shadows-1]->data)
1978 + vglc_shadows[vglc_n_shadows-1]->size - 1;
1979
1980 vglc_reachedness
1981 = VG_(malloc)( vglc_n_shadows * sizeof(Reachedness) );
1982 for (i = 0; i < vglc_n_shadows; i++)
1983 vglc_reachedness[i] = Unreached;
1984
1985 /* Do the scan of memory. */
1986 bytes_notified
1987 = VG_(scan_all_valid_memory)( &vg_detect_memory_leaks_notify_addr )
1988 * VKI_BYTES_PER_WORD;
1989
1990 VG_(message)(Vg_UserMsg, "checked %d bytes.", bytes_notified);
1991
1992 blocks_leaked = bytes_leaked = 0;
1993 blocks_dubious = bytes_dubious = 0;
1994 blocks_reachable = bytes_reachable = 0;
1995
1996 for (i = 0; i < vglc_n_shadows; i++) {
1997 if (vglc_reachedness[i] == Unreached) {
1998 blocks_leaked++;
1999 bytes_leaked += vglc_shadows[i]->size;
2000 }
2001 else if (vglc_reachedness[i] == Interior) {
2002 blocks_dubious++;
2003 bytes_dubious += vglc_shadows[i]->size;
2004 }
2005 else if (vglc_reachedness[i] == Proper) {
2006 blocks_reachable++;
2007 bytes_reachable += vglc_shadows[i]->size;
2008 }
2009 }
2010
2011 VG_(message)(Vg_UserMsg, "");
2012 VG_(message)(Vg_UserMsg, "definitely lost: %d bytes in %d blocks.",
2013 bytes_leaked, blocks_leaked );
2014 VG_(message)(Vg_UserMsg, "possibly lost: %d bytes in %d blocks.",
2015 bytes_dubious, blocks_dubious );
2016 VG_(message)(Vg_UserMsg, "still reachable: %d bytes in %d blocks.",
2017 bytes_reachable, blocks_reachable );
2018
2019
2020 /* Common up the lost blocks so we can print sensible error
2021 messages. */
2022
2023 n_lossrecords = 0;
2024 errlist = NULL;
2025 for (i = 0; i < vglc_n_shadows; i++) {
2026
2027 /* 'where' stored in 'skin_extra' field */
2028 ExeContext* where = get_where ( vglc_shadows[i] );
2029
2030 for (p = errlist; p != NULL; p = p->next) {
2031 if (p->loss_mode == vglc_reachedness[i]
2032 && VG_(eq_ExeContext) ( SK_(clo_leak_resolution),
2033 p->allocated_at,
2034 where) ) {
2035 break;
2036 }
2037 }
2038 if (p != NULL) {
2039 p->num_blocks ++;
2040 p->total_bytes += vglc_shadows[i]->size;
2041 } else {
2042 n_lossrecords ++;
2043 p = VG_(malloc)(sizeof(LossRecord));
2044 p->loss_mode = vglc_reachedness[i];
2045 p->allocated_at = where;
2046 p->total_bytes = vglc_shadows[i]->size;
2047 p->num_blocks = 1;
2048 p->next = errlist;
2049 errlist = p;
2050 }
2051 }
2052
2053 for (i = 0; i < n_lossrecords; i++) {
2054 LossRecord* p_min = NULL;
2055 UInt n_min = 0xFFFFFFFF;
2056 for (p = errlist; p != NULL; p = p->next) {
2057 if (p->num_blocks > 0 && p->total_bytes < n_min) {
2058 n_min = p->total_bytes;
2059 p_min = p;
2060 }
2061 }
njne427a662002-10-02 11:08:25 +00002062 sk_assert(p_min != NULL);
njn25e49d8e72002-09-23 09:36:25 +00002063
2064 if ( (!SK_(clo_show_reachable)) && p_min->loss_mode == Proper) {
2065 p_min->num_blocks = 0;
2066 continue;
2067 }
2068
2069 VG_(message)(Vg_UserMsg, "");
2070 VG_(message)(
2071 Vg_UserMsg,
2072 "%d bytes in %d blocks are %s in loss record %d of %d",
2073 p_min->total_bytes, p_min->num_blocks,
2074 p_min->loss_mode==Unreached ? "definitely lost" :
2075 (p_min->loss_mode==Interior ? "possibly lost"
2076 : "still reachable"),
2077 i+1, n_lossrecords
2078 );
2079 VG_(pp_ExeContext)(p_min->allocated_at);
2080 p_min->num_blocks = 0;
2081 }
2082
2083 VG_(message)(Vg_UserMsg, "");
2084 VG_(message)(Vg_UserMsg, "LEAK SUMMARY:");
2085 VG_(message)(Vg_UserMsg, " definitely lost: %d bytes in %d blocks.",
2086 bytes_leaked, blocks_leaked );
2087 VG_(message)(Vg_UserMsg, " possibly lost: %d bytes in %d blocks.",
2088 bytes_dubious, blocks_dubious );
2089 VG_(message)(Vg_UserMsg, " still reachable: %d bytes in %d blocks.",
2090 bytes_reachable, blocks_reachable );
2091 if (!SK_(clo_show_reachable)) {
2092 VG_(message)(Vg_UserMsg,
2093 "Reachable blocks (those to which a pointer was found) are not shown.");
2094 VG_(message)(Vg_UserMsg,
2095 "To see them, rerun with: --show-reachable=yes");
2096 }
2097 VG_(message)(Vg_UserMsg, "");
2098
2099 VG_(free) ( vglc_shadows );
2100 VG_(free) ( vglc_reachedness );
2101}
2102
2103
2104/* ---------------------------------------------------------------------
2105 Sanity check machinery (permanently engaged).
2106 ------------------------------------------------------------------ */
2107
2108/* Check that nobody has spuriously claimed that the first or last 16
2109 pages (64 KB) of address space have become accessible. Failure of
2110 the following do not per se indicate an internal consistency
2111 problem, but they are so likely to that we really want to know
2112 about it if so. */
2113
2114Bool SK_(cheap_sanity_check) ( void )
2115{
2116 if (IS_DISTINGUISHED_SM(primary_map[0]) &&
2117 IS_DISTINGUISHED_SM(primary_map[65535]))
2118 return True;
2119 else
2120 return False;
2121}
2122
2123Bool SK_(expensive_sanity_check) ( void )
2124{
2125 Int i;
2126
2127 /* Make sure nobody changed the distinguished secondary. */
2128 for (i = 0; i < 8192; i++)
2129 if (distinguished_secondary_map.abits[i] != VGM_BYTE_INVALID)
2130 return False;
2131
2132 for (i = 0; i < 65536; i++)
2133 if (distinguished_secondary_map.vbyte[i] != VGM_BYTE_INVALID)
2134 return False;
2135
2136 /* Make sure that the upper 3/4 of the primary map hasn't
2137 been messed with. */
2138 for (i = 65536; i < 262144; i++)
2139 if (primary_map[i] != & distinguished_secondary_map)
2140 return False;
2141
2142 return True;
2143}
2144
2145/* ---------------------------------------------------------------------
2146 Debugging machinery (turn on to debug). Something of a mess.
2147 ------------------------------------------------------------------ */
2148
2149#if 0
2150/* Print the value tags on the 8 integer registers & flag reg. */
2151
2152static void uint_to_bits ( UInt x, Char* str )
2153{
2154 Int i;
2155 Int w = 0;
2156 /* str must point to a space of at least 36 bytes. */
2157 for (i = 31; i >= 0; i--) {
2158 str[w++] = (x & ( ((UInt)1) << i)) ? '1' : '0';
2159 if (i == 24 || i == 16 || i == 8)
2160 str[w++] = ' ';
2161 }
2162 str[w++] = 0;
njne427a662002-10-02 11:08:25 +00002163 sk_assert(w == 36);
njn25e49d8e72002-09-23 09:36:25 +00002164}
2165
2166/* Caution! Not vthread-safe; looks in VG_(baseBlock), not the thread
2167 state table. */
2168
2169static void vg_show_reg_tags ( void )
2170{
2171 Char buf1[36];
2172 Char buf2[36];
2173 UInt z_eax, z_ebx, z_ecx, z_edx,
2174 z_esi, z_edi, z_ebp, z_esp, z_eflags;
2175
2176 z_eax = VG_(baseBlock)[VGOFF_(sh_eax)];
2177 z_ebx = VG_(baseBlock)[VGOFF_(sh_ebx)];
2178 z_ecx = VG_(baseBlock)[VGOFF_(sh_ecx)];
2179 z_edx = VG_(baseBlock)[VGOFF_(sh_edx)];
2180 z_esi = VG_(baseBlock)[VGOFF_(sh_esi)];
2181 z_edi = VG_(baseBlock)[VGOFF_(sh_edi)];
2182 z_ebp = VG_(baseBlock)[VGOFF_(sh_ebp)];
2183 z_esp = VG_(baseBlock)[VGOFF_(sh_esp)];
2184 z_eflags = VG_(baseBlock)[VGOFF_(sh_eflags)];
2185
2186 uint_to_bits(z_eflags, buf1);
2187 VG_(message)(Vg_DebugMsg, "efl %\n", buf1);
2188
2189 uint_to_bits(z_eax, buf1);
2190 uint_to_bits(z_ebx, buf2);
2191 VG_(message)(Vg_DebugMsg, "eax %s ebx %s\n", buf1, buf2);
2192
2193 uint_to_bits(z_ecx, buf1);
2194 uint_to_bits(z_edx, buf2);
2195 VG_(message)(Vg_DebugMsg, "ecx %s edx %s\n", buf1, buf2);
2196
2197 uint_to_bits(z_esi, buf1);
2198 uint_to_bits(z_edi, buf2);
2199 VG_(message)(Vg_DebugMsg, "esi %s edi %s\n", buf1, buf2);
2200
2201 uint_to_bits(z_ebp, buf1);
2202 uint_to_bits(z_esp, buf2);
2203 VG_(message)(Vg_DebugMsg, "ebp %s esp %s\n", buf1, buf2);
2204}
2205
2206
2207/* For debugging only. Scan the address space and touch all allegedly
2208 addressible words. Useful for establishing where Valgrind's idea of
2209 addressibility has diverged from what the kernel believes. */
2210
2211static
2212void zzzmemscan_notify_word ( Addr a, UInt w )
2213{
2214}
2215
2216void zzzmemscan ( void )
2217{
2218 Int n_notifies
2219 = VG_(scan_all_valid_memory)( zzzmemscan_notify_word );
2220 VG_(printf)("zzzmemscan: n_bytes = %d\n", 4 * n_notifies );
2221}
2222#endif
2223
2224
2225
2226
2227#if 0
2228static Int zzz = 0;
2229
2230void show_bb ( Addr eip_next )
2231{
2232 VG_(printf)("[%4d] ", zzz);
2233 vg_show_reg_tags( &VG_(m_shadow );
2234 VG_(translate) ( eip_next, NULL, NULL, NULL );
2235}
2236#endif /* 0 */
2237
2238/*------------------------------------------------------------*/
2239/*--- Syscall wrappers ---*/
2240/*------------------------------------------------------------*/
2241
2242void* SK_(pre_syscall) ( ThreadId tid, UInt syscallno, Bool isBlocking )
2243{
2244 Int sane = SK_(cheap_sanity_check)();
2245 return (void*)sane;
2246}
2247
2248void SK_(post_syscall) ( ThreadId tid, UInt syscallno,
2249 void* pre_result, Int res, Bool isBlocking )
2250{
2251 Int sane_before_call = (Int)pre_result;
2252 Bool sane_after_call = SK_(cheap_sanity_check)();
2253
2254 if ((Int)sane_before_call && (!sane_after_call)) {
2255 VG_(message)(Vg_DebugMsg, "post-syscall: ");
2256 VG_(message)(Vg_DebugMsg,
2257 "probable sanity check failure for syscall number %d\n",
2258 syscallno );
njne427a662002-10-02 11:08:25 +00002259 VG_(skin_panic)("aborting due to the above ... bye!");
njn25e49d8e72002-09-23 09:36:25 +00002260 }
2261}
2262
2263
2264/*------------------------------------------------------------*/
2265/*--- Setup ---*/
2266/*------------------------------------------------------------*/
2267
2268void SK_(written_shadow_regs_values)( UInt* gen_reg_value, UInt* eflags_value )
2269{
2270 *gen_reg_value = VGM_WORD_VALID;
2271 *eflags_value = VGM_EFLAGS_VALID;
2272}
2273
2274Bool SK_(process_cmd_line_option)(Char* arg)
2275{
2276# define STREQ(s1,s2) (0==VG_(strcmp_ws)((s1),(s2)))
2277# define STREQN(nn,s1,s2) (0==VG_(strncmp_ws)((s1),(s2),(nn)))
2278
2279 if (STREQ(arg, "--partial-loads-ok=yes"))
2280 SK_(clo_partial_loads_ok) = True;
2281 else if (STREQ(arg, "--partial-loads-ok=no"))
2282 SK_(clo_partial_loads_ok) = False;
2283
2284 else if (STREQN(15, arg, "--freelist-vol=")) {
2285 SK_(clo_freelist_vol) = (Int)VG_(atoll)(&arg[15]);
2286 if (SK_(clo_freelist_vol) < 0) SK_(clo_freelist_vol) = 0;
2287 }
2288
2289 else if (STREQ(arg, "--leak-check=yes"))
2290 SK_(clo_leak_check) = True;
2291 else if (STREQ(arg, "--leak-check=no"))
2292 SK_(clo_leak_check) = False;
2293
2294 else if (STREQ(arg, "--leak-resolution=low"))
2295 SK_(clo_leak_resolution) = Vg_LowRes;
2296 else if (STREQ(arg, "--leak-resolution=med"))
2297 SK_(clo_leak_resolution) = Vg_MedRes;
2298 else if (STREQ(arg, "--leak-resolution=high"))
2299 SK_(clo_leak_resolution) = Vg_HighRes;
2300
2301 else if (STREQ(arg, "--show-reachable=yes"))
2302 SK_(clo_show_reachable) = True;
2303 else if (STREQ(arg, "--show-reachable=no"))
2304 SK_(clo_show_reachable) = False;
2305
2306 else if (STREQ(arg, "--workaround-gcc296-bugs=yes"))
2307 SK_(clo_workaround_gcc296_bugs) = True;
2308 else if (STREQ(arg, "--workaround-gcc296-bugs=no"))
2309 SK_(clo_workaround_gcc296_bugs) = False;
2310
2311 else if (STREQ(arg, "--check-addrVs=yes"))
2312 SK_(clo_check_addrVs) = True;
2313 else if (STREQ(arg, "--check-addrVs=no"))
2314 SK_(clo_check_addrVs) = False;
2315
2316 else if (STREQ(arg, "--cleanup=yes"))
2317 SK_(clo_cleanup) = True;
2318 else if (STREQ(arg, "--cleanup=no"))
2319 SK_(clo_cleanup) = False;
2320
2321 else
2322 return False;
2323
2324 return True;
2325
2326#undef STREQ
2327#undef STREQN
2328}
2329
2330Char* SK_(usage)(void)
2331{
2332 return
2333" --partial-loads-ok=no|yes too hard to explain here; see manual [yes]\n"
2334" --freelist-vol=<number> volume of freed blocks queue [1000000]\n"
2335" --leak-check=no|yes search for memory leaks at exit? [no]\n"
2336" --leak-resolution=low|med|high\n"
2337" amount of bt merging in leak check [low]\n"
2338" --show-reachable=no|yes show reachable blocks in leak check? [no]\n"
2339" --workaround-gcc296-bugs=no|yes self explanatory [no]\n"
2340" --check-addrVs=no|yes experimental lighterweight checking? [yes]\n"
2341" yes == Valgrind's original behaviour\n"
2342"\n"
2343" --cleanup=no|yes improve after instrumentation? [yes]\n";
2344}
2345
2346
2347/*------------------------------------------------------------*/
2348/*--- Setup ---*/
2349/*------------------------------------------------------------*/
2350
njnd04b7c62002-10-03 14:05:52 +00002351void SK_(pre_clo_init)(VgDetails* details, VgNeeds* needs, VgTrackEvents* track)
njn25e49d8e72002-09-23 09:36:25 +00002352{
sewardj34eccb12002-10-05 16:49:09 +00002353 details->name = "Memcheck";
njnd04b7c62002-10-03 14:05:52 +00002354 details->version = NULL;
sewardj34eccb12002-10-05 16:49:09 +00002355 details->description = "a.k.a. Valgrind, a memory error detector";
njnd04b7c62002-10-03 14:05:52 +00002356 details->copyright_author =
2357 "Copyright (C) 2000-2002, and GNU GPL'd, by Julian Seward.";
2358 details->bug_reports_to = "jseward@acm.org";
njn25e49d8e72002-09-23 09:36:25 +00002359
njnd04b7c62002-10-03 14:05:52 +00002360 needs->core_errors = True;
2361 needs->skin_errors = True;
2362 needs->libc_freeres = True;
2363 needs->sizeof_shadow_block = 1;
2364 needs->basic_block_discards = False;
2365 needs->shadow_regs = True;
2366 needs->command_line_options = True;
2367 needs->client_requests = True;
2368 needs->extended_UCode = True;
2369 needs->syscall_wrapper = True;
2370 needs->alternative_free = True;
2371 needs->sanity_checks = True;
njn25e49d8e72002-09-23 09:36:25 +00002372
njn25e49d8e72002-09-23 09:36:25 +00002373 track->new_mem_startup = & memcheck_new_mem_startup;
2374 track->new_mem_heap = & memcheck_new_mem_heap;
2375 track->new_mem_stack = & SK_(make_writable);
2376 track->new_mem_stack_aligned = & make_writable_aligned;
2377 track->new_mem_stack_signal = & SK_(make_writable);
2378 track->new_mem_brk = & SK_(make_writable);
2379 track->new_mem_mmap = & memcheck_set_perms;
2380
2381 track->copy_mem_heap = & copy_address_range_state;
2382 track->copy_mem_remap = & copy_address_range_state;
2383 track->change_mem_mprotect = & memcheck_set_perms;
2384
2385 track->ban_mem_heap = & SK_(make_noaccess);
2386 track->ban_mem_stack = & SK_(make_noaccess);
2387
2388 track->die_mem_heap = & SK_(make_noaccess);
2389 track->die_mem_stack = & SK_(make_noaccess);
2390 track->die_mem_stack_aligned = & make_noaccess_aligned;
2391 track->die_mem_stack_signal = & SK_(make_noaccess);
2392 track->die_mem_brk = & SK_(make_noaccess);
2393 track->die_mem_munmap = & SK_(make_noaccess);
2394
2395 track->bad_free = & SK_(record_free_error);
2396 track->mismatched_free = & SK_(record_freemismatch_error);
2397
2398 track->pre_mem_read = & check_is_readable;
2399 track->pre_mem_read_asciiz = & check_is_readable_asciiz;
2400 track->pre_mem_write = & check_is_writable;
2401 track->post_mem_write = & SK_(make_readable);
2402
njnd04b7c62002-10-03 14:05:52 +00002403 VG_(register_compact_helper)((Addr) & SK_(helper_value_check4_fail));
2404 VG_(register_compact_helper)((Addr) & SK_(helper_value_check0_fail));
2405 VG_(register_compact_helper)((Addr) & SK_(helper_value_check2_fail));
2406 VG_(register_compact_helper)((Addr) & SK_(helperc_STOREV4));
2407 VG_(register_compact_helper)((Addr) & SK_(helperc_STOREV1));
2408 VG_(register_compact_helper)((Addr) & SK_(helperc_LOADV4));
2409 VG_(register_compact_helper)((Addr) & SK_(helperc_LOADV1));
njn25e49d8e72002-09-23 09:36:25 +00002410
njnd04b7c62002-10-03 14:05:52 +00002411 /* These two made non-compact because 2-byte transactions are rare. */
2412 VG_(register_noncompact_helper)((Addr) & SK_(helperc_STOREV2));
2413 VG_(register_noncompact_helper)((Addr) & SK_(helperc_LOADV2));
2414 VG_(register_noncompact_helper)((Addr) & SK_(fpu_write_check));
2415 VG_(register_noncompact_helper)((Addr) & SK_(fpu_read_check));
2416 VG_(register_noncompact_helper)((Addr) & SK_(helper_value_check1_fail));
njn25e49d8e72002-09-23 09:36:25 +00002417
2418 VGP_(register_profile_event) ( VgpSetMem, "set-mem-perms" );
2419 VGP_(register_profile_event) ( VgpCheckMem, "check-mem-perms" );
njnd04b7c62002-10-03 14:05:52 +00002420
2421 init_shadow_memory();
2422 init_prof_mem();
njn25e49d8e72002-09-23 09:36:25 +00002423}
2424
2425/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00002426/*--- end mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00002427/*--------------------------------------------------------------------*/