blob: 4ee380f720c84d7fd51ee04b89bd24169ee609fc [file] [log] [blame]
njn25e49d8e72002-09-23 09:36:25 +00001/*--------------------------------------------------------------------*/
2/*--- Part of the MemCheck skin: Maintain bitmaps of memory, ---*/
3/*--- tracking the accessibility (A) and validity (V) status of ---*/
4/*--- each byte. ---*/
5/*--- vg_memcheck.c ---*/
6/*--------------------------------------------------------------------*/
7
8/*
9 This file is part of Valgrind, an x86 protected-mode emulator
10 designed for debugging and profiling binaries on x86-Unixes.
11
12 Copyright (C) 2000-2002 Julian Seward
13 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
33#include "vg_memcheck_include.h"
34#include "vg_memcheck.h" /* for client requests */
35//#include "vg_profile.c"
36
37/* Define to debug the mem audit system. */
38/* #define VG_DEBUG_MEMORY */
39
40/* Define to debug the memory-leak-detector. */
41/* #define VG_DEBUG_LEAKCHECK */
42
43/* Define to collect detailed performance info. */
44/* #define VG_PROFILE_MEMORY */
45
46#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
47
48/*------------------------------------------------------------*/
49/*--- Command line options ---*/
50/*------------------------------------------------------------*/
51
52Bool SK_(clo_partial_loads_ok) = True;
53Int SK_(clo_freelist_vol) = 1000000;
54Bool SK_(clo_leak_check) = False;
55VgRes SK_(clo_leak_resolution) = Vg_LowRes;
56Bool SK_(clo_show_reachable) = False;
57Bool SK_(clo_workaround_gcc296_bugs) = False;
58Bool SK_(clo_check_addrVs) = True;
59Bool SK_(clo_cleanup) = True;
60
61/*------------------------------------------------------------*/
62/*--- Profiling events ---*/
63/*------------------------------------------------------------*/
64
65typedef
66 enum {
67 VgpCheckMem = VgpFini+1,
68 VgpSetMem
69 }
70 VgpSkinCC;
71
72/*------------------------------------------------------------*/
73/*--- Low-level support for memory checking. ---*/
74/*------------------------------------------------------------*/
75
76/* All reads and writes are checked against a memory map, which
77 records the state of all memory in the process. The memory map is
78 organised like this:
79
80 The top 16 bits of an address are used to index into a top-level
81 map table, containing 65536 entries. Each entry is a pointer to a
82 second-level map, which records the accesibililty and validity
83 permissions for the 65536 bytes indexed by the lower 16 bits of the
84 address. Each byte is represented by nine bits, one indicating
85 accessibility, the other eight validity. So each second-level map
86 contains 73728 bytes. This two-level arrangement conveniently
87 divides the 4G address space into 64k lumps, each size 64k bytes.
88
89 All entries in the primary (top-level) map must point to a valid
90 secondary (second-level) map. Since most of the 4G of address
91 space will not be in use -- ie, not mapped at all -- there is a
92 distinguished secondary map, which indicates `not addressible and
93 not valid' writeable for all bytes. Entries in the primary map for
94 which the entire 64k is not in use at all point at this
95 distinguished map.
96
97 [...] lots of stuff deleted due to out of date-ness
98
99 As a final optimisation, the alignment and address checks for
100 4-byte loads and stores are combined in a neat way. The primary
101 map is extended to have 262144 entries (2^18), rather than 2^16.
102 The top 3/4 of these entries are permanently set to the
103 distinguished secondary map. For a 4-byte load/store, the
104 top-level map is indexed not with (addr >> 16) but instead f(addr),
105 where
106
107 f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
108 = ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or
109 = ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
110
111 ie the lowest two bits are placed above the 16 high address bits.
112 If either of these two bits are nonzero, the address is misaligned;
113 this will select a secondary map from the upper 3/4 of the primary
114 map. Because this is always the distinguished secondary map, a
115 (bogus) address check failure will result. The failure handling
116 code can then figure out whether this is a genuine addr check
117 failure or whether it is a possibly-legitimate access at a
118 misaligned address.
119*/
120
121
122/*------------------------------------------------------------*/
123/*--- Crude profiling machinery. ---*/
124/*------------------------------------------------------------*/
125
126#ifdef VG_PROFILE_MEMORY
127
128#define N_PROF_EVENTS 150
129
130static UInt event_ctr[N_PROF_EVENTS];
131
132static void init_prof_mem ( void )
133{
134 Int i;
135 for (i = 0; i < N_PROF_EVENTS; i++)
136 event_ctr[i] = 0;
137}
138
139static void done_prof_mem ( void )
140{
141 Int i;
142 for (i = 0; i < N_PROF_EVENTS; i++) {
143 if ((i % 10) == 0)
144 VG_(printf)("\n");
145 if (event_ctr[i] > 0)
146 VG_(printf)( "prof mem event %2d: %d\n", i, event_ctr[i] );
147 }
148 VG_(printf)("\n");
149}
150
151#define PROF_EVENT(ev) \
152 do { vg_assert((ev) >= 0 && (ev) < N_PROF_EVENTS); \
153 event_ctr[ev]++; \
154 } while (False);
155
156#else
157
158static void init_prof_mem ( void ) { }
159static void done_prof_mem ( void ) { }
160
161#define PROF_EVENT(ev) /* */
162
163#endif
164
165/* Event index. If just the name of the fn is given, this means the
166 number of calls to the fn. Otherwise it is the specified event.
167
168 10 alloc_secondary_map
169
170 20 get_abit
171 21 get_vbyte
172 22 set_abit
173 23 set_vbyte
174 24 get_abits4_ALIGNED
175 25 get_vbytes4_ALIGNED
176
177 30 set_address_range_perms
178 31 set_address_range_perms(lower byte loop)
179 32 set_address_range_perms(quadword loop)
180 33 set_address_range_perms(upper byte loop)
181
182 35 make_noaccess
183 36 make_writable
184 37 make_readable
185
186 40 copy_address_range_state
187 41 copy_address_range_state(byte loop)
188 42 check_writable
189 43 check_writable(byte loop)
190 44 check_readable
191 45 check_readable(byte loop)
192 46 check_readable_asciiz
193 47 check_readable_asciiz(byte loop)
194
195 50 make_aligned_word_NOACCESS
196 51 make_aligned_word_WRITABLE
197
198 60 helperc_LOADV4
199 61 helperc_STOREV4
200 62 helperc_LOADV2
201 63 helperc_STOREV2
202 64 helperc_LOADV1
203 65 helperc_STOREV1
204
205 70 rim_rd_V4_SLOWLY
206 71 rim_wr_V4_SLOWLY
207 72 rim_rd_V2_SLOWLY
208 73 rim_wr_V2_SLOWLY
209 74 rim_rd_V1_SLOWLY
210 75 rim_wr_V1_SLOWLY
211
212 80 fpu_read
213 81 fpu_read aligned 4
214 82 fpu_read aligned 8
215 83 fpu_read 2
216 84 fpu_read 10
217
218 85 fpu_write
219 86 fpu_write aligned 4
220 87 fpu_write aligned 8
221 88 fpu_write 2
222 89 fpu_write 10
223
224 90 fpu_read_check_SLOWLY
225 91 fpu_read_check_SLOWLY(byte loop)
226 92 fpu_write_check_SLOWLY
227 93 fpu_write_check_SLOWLY(byte loop)
228
229 100 is_plausible_stack_addr
230 101 handle_esp_assignment
231 102 handle_esp_assignment(-4)
232 103 handle_esp_assignment(+4)
233 104 handle_esp_assignment(-12)
234 105 handle_esp_assignment(-8)
235 106 handle_esp_assignment(+16)
236 107 handle_esp_assignment(+12)
237 108 handle_esp_assignment(0)
238 109 handle_esp_assignment(+8)
239 110 handle_esp_assignment(-16)
240 111 handle_esp_assignment(+20)
241 112 handle_esp_assignment(-20)
242 113 handle_esp_assignment(+24)
243 114 handle_esp_assignment(-24)
244
245 120 vg_handle_esp_assignment_SLOWLY
246 121 vg_handle_esp_assignment_SLOWLY(normal; move down)
247 122 vg_handle_esp_assignment_SLOWLY(normal; move up)
248 123 vg_handle_esp_assignment_SLOWLY(normal)
249 124 vg_handle_esp_assignment_SLOWLY(>= HUGE_DELTA)
250*/
251
252/*------------------------------------------------------------*/
253/*--- Function declarations. ---*/
254/*------------------------------------------------------------*/
255
256static UInt vgmext_rd_V4_SLOWLY ( Addr a );
257static UInt vgmext_rd_V2_SLOWLY ( Addr a );
258static UInt vgmext_rd_V1_SLOWLY ( Addr a );
259static void vgmext_wr_V4_SLOWLY ( Addr a, UInt vbytes );
260static void vgmext_wr_V2_SLOWLY ( Addr a, UInt vbytes );
261static void vgmext_wr_V1_SLOWLY ( Addr a, UInt vbytes );
262static void fpu_read_check_SLOWLY ( Addr addr, Int size );
263static void fpu_write_check_SLOWLY ( Addr addr, Int size );
264
265/*------------------------------------------------------------*/
266/*--- Data defns. ---*/
267/*------------------------------------------------------------*/
268
269typedef
270 struct {
271 UChar abits[8192];
272 UChar vbyte[65536];
273 }
274 SecMap;
275
276static SecMap* primary_map[ /*65536*/ 262144 ];
277static SecMap distinguished_secondary_map;
278
279#define IS_DISTINGUISHED_SM(smap) \
280 ((smap) == &distinguished_secondary_map)
281
282#define ENSURE_MAPPABLE(addr,caller) \
283 do { \
284 if (IS_DISTINGUISHED_SM(primary_map[(addr) >> 16])) { \
285 primary_map[(addr) >> 16] = alloc_secondary_map(caller); \
286 /* VG_(printf)("new 2map because of %p\n", addr); */ \
287 } \
288 } while(0)
289
290#define BITARR_SET(aaa_p,iii_p) \
291 do { \
292 UInt iii = (UInt)iii_p; \
293 UChar* aaa = (UChar*)aaa_p; \
294 aaa[iii >> 3] |= (1 << (iii & 7)); \
295 } while (0)
296
297#define BITARR_CLEAR(aaa_p,iii_p) \
298 do { \
299 UInt iii = (UInt)iii_p; \
300 UChar* aaa = (UChar*)aaa_p; \
301 aaa[iii >> 3] &= ~(1 << (iii & 7)); \
302 } while (0)
303
304#define BITARR_TEST(aaa_p,iii_p) \
305 (0 != (((UChar*)aaa_p)[ ((UInt)iii_p) >> 3 ] \
306 & (1 << (((UInt)iii_p) & 7)))) \
307
308
309#define VGM_BIT_VALID 0
310#define VGM_BIT_INVALID 1
311
312#define VGM_NIBBLE_VALID 0
313#define VGM_NIBBLE_INVALID 0xF
314
315#define VGM_BYTE_VALID 0
316#define VGM_BYTE_INVALID 0xFF
317
318#define VGM_WORD_VALID 0
319#define VGM_WORD_INVALID 0xFFFFFFFF
320
321#define VGM_EFLAGS_VALID 0xFFFFFFFE
322#define VGM_EFLAGS_INVALID 0xFFFFFFFF /* not used */
323
324
325static void init_shadow_memory ( void )
326{
327 Int i;
328
329 for (i = 0; i < 8192; i++) /* Invalid address */
330 distinguished_secondary_map.abits[i] = VGM_BYTE_INVALID;
331 for (i = 0; i < 65536; i++) /* Invalid Value */
332 distinguished_secondary_map.vbyte[i] = VGM_BYTE_INVALID;
333
334 /* These entries gradually get overwritten as the used address
335 space expands. */
336 for (i = 0; i < 65536; i++)
337 primary_map[i] = &distinguished_secondary_map;
338
339 /* These ones should never change; it's a bug in Valgrind if they do. */
340 for (i = 65536; i < 262144; i++)
341 primary_map[i] = &distinguished_secondary_map;
342}
343
344void SK_(post_clo_init) ( void )
345{
346}
347
348void SK_(fini) ( void )
349{
350 VG_(print_malloc_stats)();
351
352 if (VG_(clo_verbosity) == 1) {
353 if (!SK_(clo_leak_check))
354 VG_(message)(Vg_UserMsg,
355 "For a detailed leak analysis, rerun with: --leak-check=yes");
356
357 VG_(message)(Vg_UserMsg,
358 "For counts of detected errors, rerun with: -v");
359 }
360 if (SK_(clo_leak_check)) SK_(detect_memory_leaks)();
361
362 done_prof_mem();
363
364 if (0) {
365 VG_(message)(Vg_DebugMsg,
366 "------ Valgrind's client block stats follow ---------------" );
367 SK_(show_client_block_stats)();
368 }
369}
370
371/*------------------------------------------------------------*/
372/*--- Basic bitmap management, reading and writing. ---*/
373/*------------------------------------------------------------*/
374
375/* Allocate and initialise a secondary map. */
376
377static SecMap* alloc_secondary_map ( __attribute__ ((unused))
378 Char* caller )
379{
380 SecMap* map;
381 UInt i;
382 PROF_EVENT(10);
383
384 /* Mark all bytes as invalid access and invalid value. */
385
386 /* It just happens that a SecMap occupies exactly 18 pages --
387 although this isn't important, so the following assert is
388 spurious. */
389 vg_assert(0 == (sizeof(SecMap) % VKI_BYTES_PER_PAGE));
390 map = VG_(get_memory_from_mmap)( sizeof(SecMap), caller );
391
392 for (i = 0; i < 8192; i++)
393 map->abits[i] = VGM_BYTE_INVALID; /* Invalid address */
394 for (i = 0; i < 65536; i++)
395 map->vbyte[i] = VGM_BYTE_INVALID; /* Invalid Value */
396
397 /* VG_(printf)("ALLOC_2MAP(%s)\n", caller ); */
398 return map;
399}
400
401
402/* Basic reading/writing of the bitmaps, for byte-sized accesses. */
403
404static __inline__ UChar get_abit ( Addr a )
405{
406 SecMap* sm = primary_map[a >> 16];
407 UInt sm_off = a & 0xFFFF;
408 PROF_EVENT(20);
409# if 0
410 if (IS_DISTINGUISHED_SM(sm))
411 VG_(message)(Vg_DebugMsg,
412 "accessed distinguished 2ndary (A)map! 0x%x\n", a);
413# endif
414 return BITARR_TEST(sm->abits, sm_off)
415 ? VGM_BIT_INVALID : VGM_BIT_VALID;
416}
417
418static __inline__ UChar get_vbyte ( Addr a )
419{
420 SecMap* sm = primary_map[a >> 16];
421 UInt sm_off = a & 0xFFFF;
422 PROF_EVENT(21);
423# if 0
424 if (IS_DISTINGUISHED_SM(sm))
425 VG_(message)(Vg_DebugMsg,
426 "accessed distinguished 2ndary (V)map! 0x%x\n", a);
427# endif
428 return sm->vbyte[sm_off];
429}
430
431static __inline__ void set_abit ( Addr a, UChar abit )
432{
433 SecMap* sm;
434 UInt sm_off;
435 PROF_EVENT(22);
436 ENSURE_MAPPABLE(a, "set_abit");
437 sm = primary_map[a >> 16];
438 sm_off = a & 0xFFFF;
439 if (abit)
440 BITARR_SET(sm->abits, sm_off);
441 else
442 BITARR_CLEAR(sm->abits, sm_off);
443}
444
445static __inline__ void set_vbyte ( Addr a, UChar vbyte )
446{
447 SecMap* sm;
448 UInt sm_off;
449 PROF_EVENT(23);
450 ENSURE_MAPPABLE(a, "set_vbyte");
451 sm = primary_map[a >> 16];
452 sm_off = a & 0xFFFF;
453 sm->vbyte[sm_off] = vbyte;
454}
455
456
457/* Reading/writing of the bitmaps, for aligned word-sized accesses. */
458
459static __inline__ UChar get_abits4_ALIGNED ( Addr a )
460{
461 SecMap* sm;
462 UInt sm_off;
463 UChar abits8;
464 PROF_EVENT(24);
465# ifdef VG_DEBUG_MEMORY
466 vg_assert(IS_ALIGNED4_ADDR(a));
467# endif
468 sm = primary_map[a >> 16];
469 sm_off = a & 0xFFFF;
470 abits8 = sm->abits[sm_off >> 3];
471 abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
472 abits8 &= 0x0F;
473 return abits8;
474}
475
476static UInt __inline__ get_vbytes4_ALIGNED ( Addr a )
477{
478 SecMap* sm = primary_map[a >> 16];
479 UInt sm_off = a & 0xFFFF;
480 PROF_EVENT(25);
481# ifdef VG_DEBUG_MEMORY
482 vg_assert(IS_ALIGNED4_ADDR(a));
483# endif
484 return ((UInt*)(sm->vbyte))[sm_off >> 2];
485}
486
487
488/*------------------------------------------------------------*/
489/*--- Setting permissions over address ranges. ---*/
490/*------------------------------------------------------------*/
491
492static void set_address_range_perms ( Addr a, UInt len,
493 UInt example_a_bit,
494 UInt example_v_bit )
495{
496 UChar vbyte, abyte8;
497 UInt vword4, sm_off;
498 SecMap* sm;
499
500 PROF_EVENT(30);
501
502 if (len == 0)
503 return;
504
505 if (len > 100 * 1000 * 1000) {
506 VG_(message)(Vg_UserMsg,
507 "Warning: set address range perms: "
508 "large range %u, a %d, v %d",
509 len, example_a_bit, example_v_bit );
510 }
511
512 VGP_PUSHCC(VgpSetMem);
513
514 /* Requests to change permissions of huge address ranges may
515 indicate bugs in our machinery. 30,000,000 is arbitrary, but so
516 far all legitimate requests have fallen beneath that size. */
517 /* 4 Mar 02: this is just stupid; get rid of it. */
518 /* vg_assert(len < 30000000); */
519
520 /* Check the permissions make sense. */
521 vg_assert(example_a_bit == VGM_BIT_VALID
522 || example_a_bit == VGM_BIT_INVALID);
523 vg_assert(example_v_bit == VGM_BIT_VALID
524 || example_v_bit == VGM_BIT_INVALID);
525 if (example_a_bit == VGM_BIT_INVALID)
526 vg_assert(example_v_bit == VGM_BIT_INVALID);
527
528 /* The validity bits to write. */
529 vbyte = example_v_bit==VGM_BIT_VALID
530 ? VGM_BYTE_VALID : VGM_BYTE_INVALID;
531
532 /* In order that we can charge through the address space at 8
533 bytes/main-loop iteration, make up some perms. */
534 abyte8 = (example_a_bit << 7)
535 | (example_a_bit << 6)
536 | (example_a_bit << 5)
537 | (example_a_bit << 4)
538 | (example_a_bit << 3)
539 | (example_a_bit << 2)
540 | (example_a_bit << 1)
541 | (example_a_bit << 0);
542 vword4 = (vbyte << 24) | (vbyte << 16) | (vbyte << 8) | vbyte;
543
544# ifdef VG_DEBUG_MEMORY
545 /* Do it ... */
546 while (True) {
547 PROF_EVENT(31);
548 if (len == 0) break;
549 set_abit ( a, example_a_bit );
550 set_vbyte ( a, vbyte );
551 a++;
552 len--;
553 }
554
555# else
556 /* Slowly do parts preceding 8-byte alignment. */
557 while (True) {
558 PROF_EVENT(31);
559 if (len == 0) break;
560 if ((a % 8) == 0) break;
561 set_abit ( a, example_a_bit );
562 set_vbyte ( a, vbyte );
563 a++;
564 len--;
565 }
566
567 if (len == 0) {
568 VGP_POPCC(VgpSetMem);
569 return;
570 }
571 vg_assert((a % 8) == 0 && len > 0);
572
573 /* Once aligned, go fast. */
574 while (True) {
575 PROF_EVENT(32);
576 if (len < 8) break;
577 ENSURE_MAPPABLE(a, "set_address_range_perms(fast)");
578 sm = primary_map[a >> 16];
579 sm_off = a & 0xFFFF;
580 sm->abits[sm_off >> 3] = abyte8;
581 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = vword4;
582 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = vword4;
583 a += 8;
584 len -= 8;
585 }
586
587 if (len == 0) {
588 VGP_POPCC(VgpSetMem);
589 return;
590 }
591 vg_assert((a % 8) == 0 && len > 0 && len < 8);
592
593 /* Finish the upper fragment. */
594 while (True) {
595 PROF_EVENT(33);
596 if (len == 0) break;
597 set_abit ( a, example_a_bit );
598 set_vbyte ( a, vbyte );
599 a++;
600 len--;
601 }
602# endif
603
604 /* Check that zero page and highest page have not been written to
605 -- this could happen with buggy syscall wrappers. Today
606 (2001-04-26) had precisely such a problem with __NR_setitimer. */
607 vg_assert(SK_(cheap_sanity_check)());
608 VGP_POPCC(VgpSetMem);
609}
610
611/* Set permissions for address ranges ... */
612
613void SK_(make_noaccess) ( Addr a, UInt len )
614{
615 PROF_EVENT(35);
616 DEBUG("SK_(make_noaccess)(%p, %x)\n", a, len);
617 set_address_range_perms ( a, len, VGM_BIT_INVALID, VGM_BIT_INVALID );
618}
619
620void SK_(make_writable) ( Addr a, UInt len )
621{
622 PROF_EVENT(36);
623 DEBUG("SK_(make_writable)(%p, %x)\n", a, len);
624 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_INVALID );
625}
626
627void SK_(make_readable) ( Addr a, UInt len )
628{
629 PROF_EVENT(37);
630 DEBUG("SK_(make_readable)(%p, 0x%x)\n", a, len);
631 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_VALID );
632}
633
634/* Block-copy permissions (needed for implementing realloc()). */
635
636static void copy_address_range_state ( Addr src, Addr dst, UInt len )
637{
638 UInt i;
639
640 DEBUG("copy_address_range_state\n");
641
642 PROF_EVENT(40);
643 for (i = 0; i < len; i++) {
644 UChar abit = get_abit ( src+i );
645 UChar vbyte = get_vbyte ( src+i );
646 PROF_EVENT(41);
647 set_abit ( dst+i, abit );
648 set_vbyte ( dst+i, vbyte );
649 }
650}
651
652
653/* Check permissions for address range. If inadequate permissions
654 exist, *bad_addr is set to the offending address, so the caller can
655 know what it is. */
656
657Bool SK_(check_writable) ( Addr a, UInt len, Addr* bad_addr )
658{
659 UInt i;
660 UChar abit;
661 PROF_EVENT(42);
662 for (i = 0; i < len; i++) {
663 PROF_EVENT(43);
664 abit = get_abit(a);
665 if (abit == VGM_BIT_INVALID) {
666 if (bad_addr != NULL) *bad_addr = a;
667 return False;
668 }
669 a++;
670 }
671 return True;
672}
673
674Bool SK_(check_readable) ( Addr a, UInt len, Addr* bad_addr )
675{
676 UInt i;
677 UChar abit;
678 UChar vbyte;
679
680 PROF_EVENT(44);
681 DEBUG("SK_(check_readable)\n");
682 for (i = 0; i < len; i++) {
683 abit = get_abit(a);
684 vbyte = get_vbyte(a);
685 PROF_EVENT(45);
686 if (abit != VGM_BIT_VALID || vbyte != VGM_BYTE_VALID) {
687 if (bad_addr != NULL) *bad_addr = a;
688 return False;
689 }
690 a++;
691 }
692 return True;
693}
694
695
696/* Check a zero-terminated ascii string. Tricky -- don't want to
697 examine the actual bytes, to find the end, until we're sure it is
698 safe to do so. */
699
700Bool SK_(check_readable_asciiz) ( Addr a, Addr* bad_addr )
701{
702 UChar abit;
703 UChar vbyte;
704 PROF_EVENT(46);
705 DEBUG("SK_(check_readable_asciiz)\n");
706 while (True) {
707 PROF_EVENT(47);
708 abit = get_abit(a);
709 vbyte = get_vbyte(a);
710 if (abit != VGM_BIT_VALID || vbyte != VGM_BYTE_VALID) {
711 if (bad_addr != NULL) *bad_addr = a;
712 return False;
713 }
714 /* Ok, a is safe to read. */
715 if (* ((UChar*)a) == 0) return True;
716 a++;
717 }
718}
719
720
721/*------------------------------------------------------------*/
722/*--- Memory event handlers ---*/
723/*------------------------------------------------------------*/
724
725/* Setting permissions for aligned words. This supports fast stack
726 operations. */
727
728static void make_noaccess_aligned ( Addr a, UInt len )
729{
730 SecMap* sm;
731 UInt sm_off;
732 UChar mask;
733 Addr a_past_end = a + len;
734
735 VGP_PUSHCC(VgpSetMem);
736
737 PROF_EVENT(50);
738# ifdef VG_DEBUG_MEMORY
739 vg_assert(IS_ALIGNED4_ADDR(a));
740 vg_assert(IS_ALIGNED4_ADDR(len));
741# endif
742
743 for ( ; a < a_past_end; a += 4) {
744 ENSURE_MAPPABLE(a, "make_noaccess_aligned");
745 sm = primary_map[a >> 16];
746 sm_off = a & 0xFFFF;
747 ((UInt*)(sm->vbyte))[sm_off >> 2] = VGM_WORD_INVALID;
748 mask = 0x0F;
749 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
750 /* mask now contains 1s where we wish to make address bits
751 invalid (1s). */
752 sm->abits[sm_off >> 3] |= mask;
753 }
754 VGP_POPCC(VgpSetMem);
755}
756
757static void make_writable_aligned ( Addr a, UInt len )
758{
759 SecMap* sm;
760 UInt sm_off;
761 UChar mask;
762 Addr a_past_end = a + len;
763
764 VGP_PUSHCC(VgpSetMem);
765
766 PROF_EVENT(51);
767# ifdef VG_DEBUG_MEMORY
768 vg_assert(IS_ALIGNED4_ADDR(a));
769 vg_assert(IS_ALIGNED4_ADDR(len));
770# endif
771
772 for ( ; a < a_past_end; a += 4) {
773 ENSURE_MAPPABLE(a, "make_writable_aligned");
774 sm = primary_map[a >> 16];
775 sm_off = a & 0xFFFF;
776 ((UInt*)(sm->vbyte))[sm_off >> 2] = VGM_WORD_INVALID;
777 mask = 0x0F;
778 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
779 /* mask now contains 1s where we wish to make address bits
780 invalid (0s). */
781 sm->abits[sm_off >> 3] &= ~mask;
782 }
783 VGP_POPCC(VgpSetMem);
784}
785
786
787static
788void check_is_writable ( CorePart part, ThreadState* tst,
789 Char* s, UInt base, UInt size )
790{
791 Bool ok;
792 Addr bad_addr;
793
794 VGP_PUSHCC(VgpCheckMem);
795
796 /* VG_(message)(Vg_DebugMsg,"check is writable: %x .. %x",
797 base,base+size-1); */
798 ok = SK_(check_writable) ( base, size, &bad_addr );
799 if (!ok) {
800 switch (part) {
801 case Vg_CoreSysCall:
802 SK_(record_param_error) ( tst, bad_addr, /*isWrite =*/True, s );
803 break;
804
805 case Vg_CorePThread:
806 case Vg_CoreSignal:
807 SK_(record_core_mem_error)( tst, /*isWrite=*/True, s );
808 break;
809
810 default:
811 VG_(panic)("check_is_readable: Unknown or unexpected CorePart");
812 }
813 }
814
815 VGP_POPCC(VgpCheckMem);
816}
817
818static
819void check_is_readable ( CorePart part, ThreadState* tst,
820 Char* s, UInt base, UInt size )
821{
822 Bool ok;
823 Addr bad_addr;
824
825 VGP_PUSHCC(VgpCheckMem);
826
827 /* VG_(message)(Vg_DebugMsg,"check is readable: %x .. %x",
828 base,base+size-1); */
829 ok = SK_(check_readable) ( base, size, &bad_addr );
830 if (!ok) {
831 switch (part) {
832 case Vg_CoreSysCall:
833 SK_(record_param_error) ( tst, bad_addr, /*isWrite =*/False, s );
834 break;
835
836 case Vg_CorePThread:
837 SK_(record_core_mem_error)( tst, /*isWrite=*/False, s );
838 break;
839
840 /* If we're being asked to jump to a silly address, record an error
841 message before potentially crashing the entire system. */
842 case Vg_CoreTranslate:
843 SK_(record_jump_error)( tst, bad_addr );
844 break;
845
846 default:
847 VG_(panic)("check_is_readable: Unknown or unexpected CorePart");
848 }
849 }
850 VGP_POPCC(VgpCheckMem);
851}
852
853static
854void check_is_readable_asciiz ( CorePart part, ThreadState* tst,
855 Char* s, UInt str )
856{
857 Bool ok = True;
858 Addr bad_addr;
859 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
860
861 VGP_PUSHCC(VgpCheckMem);
862
863 vg_assert(part == Vg_CoreSysCall);
864 ok = SK_(check_readable_asciiz) ( (Addr)str, &bad_addr );
865 if (!ok) {
866 SK_(record_param_error) ( tst, bad_addr, /*is_writable =*/False, s );
867 }
868
869 VGP_POPCC(VgpCheckMem);
870}
871
872
873static
874void memcheck_new_mem_startup( Addr a, UInt len, Bool rr, Bool ww, Bool xx )
875{
876 // JJJ: this ignores the permissions and just makes it readable, like the
877 // old code did, AFAICT
878 DEBUG("new_mem_startup(%p, %u, rr=%u, ww=%u, xx=%u)\n", a,len,rr,ww,xx);
879 SK_(make_readable)(a, len);
880}
881
882static
883void memcheck_new_mem_heap ( Addr a, UInt len, Bool is_inited )
884{
885 if (is_inited) {
886 SK_(make_readable)(a, len);
887 } else {
888 SK_(make_writable)(a, len);
889 }
890}
891
892static
893void memcheck_set_perms (Addr a, UInt len,
894 Bool nn, Bool rr, Bool ww, Bool xx)
895{
896 DEBUG("memcheck_set_perms(%p, %u, nn=%u, rr=%u ww=%u, xx=%u)\n",
897 a, len, nn, rr, ww, xx);
898 if (rr) SK_(make_readable)(a, len);
899 else if (ww) SK_(make_writable)(a, len);
900 else SK_(make_noaccess)(a, len);
901}
902
903
904/*------------------------------------------------------------*/
905/*--- Functions called directly from generated code. ---*/
906/*------------------------------------------------------------*/
907
908static __inline__ UInt rotateRight16 ( UInt x )
909{
910 /* Amazingly, gcc turns this into a single rotate insn. */
911 return (x >> 16) | (x << 16);
912}
913
914
915static __inline__ UInt shiftRight16 ( UInt x )
916{
917 return x >> 16;
918}
919
920
921/* Read/write 1/2/4 sized V bytes, and emit an address error if
922 needed. */
923
924/* VG_(helperc_{LD,ST}V{1,2,4}) handle the common case fast.
925 Under all other circumstances, it defers to the relevant _SLOWLY
926 function, which can handle all situations.
927*/
928__attribute__ ((regparm(1)))
929UInt SK_(helperc_LOADV4) ( Addr a )
930{
931# ifdef VG_DEBUG_MEMORY
932 return vgmext_rd_V4_SLOWLY(a);
933# else
934 UInt sec_no = rotateRight16(a) & 0x3FFFF;
935 SecMap* sm = primary_map[sec_no];
936 UInt a_off = (a & 0xFFFF) >> 3;
937 UChar abits = sm->abits[a_off];
938 abits >>= (a & 4);
939 abits &= 15;
940 PROF_EVENT(60);
941 if (abits == VGM_NIBBLE_VALID) {
942 /* Handle common case quickly: a is suitably aligned, is mapped,
943 and is addressible. */
944 UInt v_off = a & 0xFFFF;
945 return ((UInt*)(sm->vbyte))[ v_off >> 2 ];
946 } else {
947 /* Slow but general case. */
948 return vgmext_rd_V4_SLOWLY(a);
949 }
950# endif
951}
952
953__attribute__ ((regparm(2)))
954void SK_(helperc_STOREV4) ( Addr a, UInt vbytes )
955{
956# ifdef VG_DEBUG_MEMORY
957 vgmext_wr_V4_SLOWLY(a, vbytes);
958# else
959 UInt sec_no = rotateRight16(a) & 0x3FFFF;
960 SecMap* sm = primary_map[sec_no];
961 UInt a_off = (a & 0xFFFF) >> 3;
962 UChar abits = sm->abits[a_off];
963 abits >>= (a & 4);
964 abits &= 15;
965 PROF_EVENT(61);
966 if (abits == VGM_NIBBLE_VALID) {
967 /* Handle common case quickly: a is suitably aligned, is mapped,
968 and is addressible. */
969 UInt v_off = a & 0xFFFF;
970 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = vbytes;
971 } else {
972 /* Slow but general case. */
973 vgmext_wr_V4_SLOWLY(a, vbytes);
974 }
975# endif
976}
977
978__attribute__ ((regparm(1)))
979UInt SK_(helperc_LOADV2) ( Addr a )
980{
981# ifdef VG_DEBUG_MEMORY
982 return vgmext_rd_V2_SLOWLY(a);
983# else
984 UInt sec_no = rotateRight16(a) & 0x1FFFF;
985 SecMap* sm = primary_map[sec_no];
986 UInt a_off = (a & 0xFFFF) >> 3;
987 PROF_EVENT(62);
988 if (sm->abits[a_off] == VGM_BYTE_VALID) {
989 /* Handle common case quickly. */
990 UInt v_off = a & 0xFFFF;
991 return 0xFFFF0000
992 |
993 (UInt)( ((UShort*)(sm->vbyte))[ v_off >> 1 ] );
994 } else {
995 /* Slow but general case. */
996 return vgmext_rd_V2_SLOWLY(a);
997 }
998# endif
999}
1000
1001__attribute__ ((regparm(2)))
1002void SK_(helperc_STOREV2) ( Addr a, UInt vbytes )
1003{
1004# ifdef VG_DEBUG_MEMORY
1005 vgmext_wr_V2_SLOWLY(a, vbytes);
1006# else
1007 UInt sec_no = rotateRight16(a) & 0x1FFFF;
1008 SecMap* sm = primary_map[sec_no];
1009 UInt a_off = (a & 0xFFFF) >> 3;
1010 PROF_EVENT(63);
1011 if (sm->abits[a_off] == VGM_BYTE_VALID) {
1012 /* Handle common case quickly. */
1013 UInt v_off = a & 0xFFFF;
1014 ((UShort*)(sm->vbyte))[ v_off >> 1 ] = vbytes & 0x0000FFFF;
1015 } else {
1016 /* Slow but general case. */
1017 vgmext_wr_V2_SLOWLY(a, vbytes);
1018 }
1019# endif
1020}
1021
1022__attribute__ ((regparm(1)))
1023UInt SK_(helperc_LOADV1) ( Addr a )
1024{
1025# ifdef VG_DEBUG_MEMORY
1026 return vgmext_rd_V1_SLOWLY(a);
1027# else
1028 UInt sec_no = shiftRight16(a);
1029 SecMap* sm = primary_map[sec_no];
1030 UInt a_off = (a & 0xFFFF) >> 3;
1031 PROF_EVENT(64);
1032 if (sm->abits[a_off] == VGM_BYTE_VALID) {
1033 /* Handle common case quickly. */
1034 UInt v_off = a & 0xFFFF;
1035 return 0xFFFFFF00
1036 |
1037 (UInt)( ((UChar*)(sm->vbyte))[ v_off ] );
1038 } else {
1039 /* Slow but general case. */
1040 return vgmext_rd_V1_SLOWLY(a);
1041 }
1042# endif
1043}
1044
1045__attribute__ ((regparm(2)))
1046void SK_(helperc_STOREV1) ( Addr a, UInt vbytes )
1047{
1048# ifdef VG_DEBUG_MEMORY
1049 vgmext_wr_V1_SLOWLY(a, vbytes);
1050# else
1051 UInt sec_no = shiftRight16(a);
1052 SecMap* sm = primary_map[sec_no];
1053 UInt a_off = (a & 0xFFFF) >> 3;
1054 PROF_EVENT(65);
1055 if (sm->abits[a_off] == VGM_BYTE_VALID) {
1056 /* Handle common case quickly. */
1057 UInt v_off = a & 0xFFFF;
1058 ((UChar*)(sm->vbyte))[ v_off ] = vbytes & 0x000000FF;
1059 } else {
1060 /* Slow but general case. */
1061 vgmext_wr_V1_SLOWLY(a, vbytes);
1062 }
1063# endif
1064}
1065
1066
1067/*------------------------------------------------------------*/
1068/*--- Fallback functions to handle cases that the above ---*/
1069/*--- VG_(helperc_{LD,ST}V{1,2,4}) can't manage. ---*/
1070/*------------------------------------------------------------*/
1071
1072static UInt vgmext_rd_V4_SLOWLY ( Addr a )
1073{
1074 Bool a0ok, a1ok, a2ok, a3ok;
1075 UInt vb0, vb1, vb2, vb3;
1076
1077 PROF_EVENT(70);
1078
1079 /* First establish independently the addressibility of the 4 bytes
1080 involved. */
1081 a0ok = get_abit(a+0) == VGM_BIT_VALID;
1082 a1ok = get_abit(a+1) == VGM_BIT_VALID;
1083 a2ok = get_abit(a+2) == VGM_BIT_VALID;
1084 a3ok = get_abit(a+3) == VGM_BIT_VALID;
1085
1086 /* Also get the validity bytes for the address. */
1087 vb0 = (UInt)get_vbyte(a+0);
1088 vb1 = (UInt)get_vbyte(a+1);
1089 vb2 = (UInt)get_vbyte(a+2);
1090 vb3 = (UInt)get_vbyte(a+3);
1091
1092 /* Now distinguish 3 cases */
1093
1094 /* Case 1: the address is completely valid, so:
1095 - no addressing error
1096 - return V bytes as read from memory
1097 */
1098 if (a0ok && a1ok && a2ok && a3ok) {
1099 UInt vw = VGM_WORD_INVALID;
1100 vw <<= 8; vw |= vb3;
1101 vw <<= 8; vw |= vb2;
1102 vw <<= 8; vw |= vb1;
1103 vw <<= 8; vw |= vb0;
1104 return vw;
1105 }
1106
1107 /* Case 2: the address is completely invalid.
1108 - emit addressing error
1109 - return V word indicating validity.
1110 This sounds strange, but if we make loads from invalid addresses
1111 give invalid data, we also risk producing a number of confusing
1112 undefined-value errors later, which confuses the fact that the
1113 error arose in the first place from an invalid address.
1114 */
1115 /* VG_(printf)("%p (%d %d %d %d)\n", a, a0ok, a1ok, a2ok, a3ok); */
1116 if (!SK_(clo_partial_loads_ok)
1117 || ((a & 3) != 0)
1118 || (!a0ok && !a1ok && !a2ok && !a3ok)) {
1119 SK_(record_address_error)( a, 4, False );
1120 return (VGM_BYTE_VALID << 24) | (VGM_BYTE_VALID << 16)
1121 | (VGM_BYTE_VALID << 8) | VGM_BYTE_VALID;
1122 }
1123
1124 /* Case 3: the address is partially valid.
1125 - no addressing error
1126 - returned V word is invalid where the address is invalid,
1127 and contains V bytes from memory otherwise.
1128 Case 3 is only allowed if SK_(clo_partial_loads_ok) is True
1129 (which is the default), and the address is 4-aligned.
1130 If not, Case 2 will have applied.
1131 */
1132 vg_assert(SK_(clo_partial_loads_ok));
1133 {
1134 UInt vw = VGM_WORD_INVALID;
1135 vw <<= 8; vw |= (a3ok ? vb3 : VGM_BYTE_INVALID);
1136 vw <<= 8; vw |= (a2ok ? vb2 : VGM_BYTE_INVALID);
1137 vw <<= 8; vw |= (a1ok ? vb1 : VGM_BYTE_INVALID);
1138 vw <<= 8; vw |= (a0ok ? vb0 : VGM_BYTE_INVALID);
1139 return vw;
1140 }
1141}
1142
1143static void vgmext_wr_V4_SLOWLY ( Addr a, UInt vbytes )
1144{
1145 /* Check the address for validity. */
1146 Bool aerr = False;
1147 PROF_EVENT(71);
1148
1149 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1150 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1151 if (get_abit(a+2) != VGM_BIT_VALID) aerr = True;
1152 if (get_abit(a+3) != VGM_BIT_VALID) aerr = True;
1153
1154 /* Store the V bytes, remembering to do it little-endian-ly. */
1155 set_vbyte( a+0, vbytes & 0x000000FF ); vbytes >>= 8;
1156 set_vbyte( a+1, vbytes & 0x000000FF ); vbytes >>= 8;
1157 set_vbyte( a+2, vbytes & 0x000000FF ); vbytes >>= 8;
1158 set_vbyte( a+3, vbytes & 0x000000FF );
1159
1160 /* If an address error has happened, report it. */
1161 if (aerr)
1162 SK_(record_address_error)( a, 4, True );
1163}
1164
1165static UInt vgmext_rd_V2_SLOWLY ( Addr a )
1166{
1167 /* Check the address for validity. */
1168 UInt vw = VGM_WORD_INVALID;
1169 Bool aerr = False;
1170 PROF_EVENT(72);
1171
1172 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1173 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1174
1175 /* Fetch the V bytes, remembering to do it little-endian-ly. */
1176 vw <<= 8; vw |= (UInt)get_vbyte(a+1);
1177 vw <<= 8; vw |= (UInt)get_vbyte(a+0);
1178
1179 /* If an address error has happened, report it. */
1180 if (aerr) {
1181 SK_(record_address_error)( a, 2, False );
1182 vw = (VGM_BYTE_INVALID << 24) | (VGM_BYTE_INVALID << 16)
1183 | (VGM_BYTE_VALID << 8) | (VGM_BYTE_VALID);
1184 }
1185 return vw;
1186}
1187
1188static void vgmext_wr_V2_SLOWLY ( Addr a, UInt vbytes )
1189{
1190 /* Check the address for validity. */
1191 Bool aerr = False;
1192 PROF_EVENT(73);
1193
1194 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1195 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1196
1197 /* Store the V bytes, remembering to do it little-endian-ly. */
1198 set_vbyte( a+0, vbytes & 0x000000FF ); vbytes >>= 8;
1199 set_vbyte( a+1, vbytes & 0x000000FF );
1200
1201 /* If an address error has happened, report it. */
1202 if (aerr)
1203 SK_(record_address_error)( a, 2, True );
1204}
1205
1206static UInt vgmext_rd_V1_SLOWLY ( Addr a )
1207{
1208 /* Check the address for validity. */
1209 UInt vw = VGM_WORD_INVALID;
1210 Bool aerr = False;
1211 PROF_EVENT(74);
1212
1213 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1214
1215 /* Fetch the V byte. */
1216 vw <<= 8; vw |= (UInt)get_vbyte(a+0);
1217
1218 /* If an address error has happened, report it. */
1219 if (aerr) {
1220 SK_(record_address_error)( a, 1, False );
1221 vw = (VGM_BYTE_INVALID << 24) | (VGM_BYTE_INVALID << 16)
1222 | (VGM_BYTE_INVALID << 8) | (VGM_BYTE_VALID);
1223 }
1224 return vw;
1225}
1226
1227static void vgmext_wr_V1_SLOWLY ( Addr a, UInt vbytes )
1228{
1229 /* Check the address for validity. */
1230 Bool aerr = False;
1231 PROF_EVENT(75);
1232 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1233
1234 /* Store the V bytes, remembering to do it little-endian-ly. */
1235 set_vbyte( a+0, vbytes & 0x000000FF );
1236
1237 /* If an address error has happened, report it. */
1238 if (aerr)
1239 SK_(record_address_error)( a, 1, True );
1240}
1241
1242
1243/* ---------------------------------------------------------------------
1244 Called from generated code, or from the assembly helpers.
1245 Handlers for value check failures.
1246 ------------------------------------------------------------------ */
1247
1248void SK_(helperc_value_check0_fail) ( void )
1249{
1250 SK_(record_value_error) ( 0 );
1251}
1252
1253void SK_(helperc_value_check1_fail) ( void )
1254{
1255 SK_(record_value_error) ( 1 );
1256}
1257
1258void SK_(helperc_value_check2_fail) ( void )
1259{
1260 SK_(record_value_error) ( 2 );
1261}
1262
1263void SK_(helperc_value_check4_fail) ( void )
1264{
1265 SK_(record_value_error) ( 4 );
1266}
1267
1268
1269/* ---------------------------------------------------------------------
1270 FPU load and store checks, called from generated code.
1271 ------------------------------------------------------------------ */
1272
1273__attribute__ ((regparm(2)))
1274void SK_(fpu_read_check) ( Addr addr, Int size )
1275{
1276 /* Ensure the read area is both addressible and valid (ie,
1277 readable). If there's an address error, don't report a value
1278 error too; but if there isn't an address error, check for a
1279 value error.
1280
1281 Try to be reasonably fast on the common case; wimp out and defer
1282 to fpu_read_check_SLOWLY for everything else. */
1283
1284 SecMap* sm;
1285 UInt sm_off, v_off, a_off;
1286 Addr addr4;
1287
1288 PROF_EVENT(80);
1289
1290# ifdef VG_DEBUG_MEMORY
1291 fpu_read_check_SLOWLY ( addr, size );
1292# else
1293
1294 if (size == 4) {
1295 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
1296 PROF_EVENT(81);
1297 /* Properly aligned. */
1298 sm = primary_map[addr >> 16];
1299 sm_off = addr & 0xFFFF;
1300 a_off = sm_off >> 3;
1301 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
1302 /* Properly aligned and addressible. */
1303 v_off = addr & 0xFFFF;
1304 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1305 goto slow4;
1306 /* Properly aligned, addressible and with valid data. */
1307 return;
1308 slow4:
1309 fpu_read_check_SLOWLY ( addr, 4 );
1310 return;
1311 }
1312
1313 if (size == 8) {
1314 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
1315 PROF_EVENT(82);
1316 /* Properly aligned. Do it in two halves. */
1317 addr4 = addr + 4;
1318 /* First half. */
1319 sm = primary_map[addr >> 16];
1320 sm_off = addr & 0xFFFF;
1321 a_off = sm_off >> 3;
1322 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1323 /* First half properly aligned and addressible. */
1324 v_off = addr & 0xFFFF;
1325 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1326 goto slow8;
1327 /* Second half. */
1328 sm = primary_map[addr4 >> 16];
1329 sm_off = addr4 & 0xFFFF;
1330 a_off = sm_off >> 3;
1331 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1332 /* Second half properly aligned and addressible. */
1333 v_off = addr4 & 0xFFFF;
1334 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1335 goto slow8;
1336 /* Both halves properly aligned, addressible and with valid
1337 data. */
1338 return;
1339 slow8:
1340 fpu_read_check_SLOWLY ( addr, 8 );
1341 return;
1342 }
1343
1344 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
1345 cases go quickly. */
1346 if (size == 2) {
1347 PROF_EVENT(83);
1348 fpu_read_check_SLOWLY ( addr, 2 );
1349 return;
1350 }
1351
1352 if (size == 10) {
1353 PROF_EVENT(84);
1354 fpu_read_check_SLOWLY ( addr, 10 );
1355 return;
1356 }
1357
1358 if (size == 28 || size == 108) {
1359 PROF_EVENT(84); /* XXX assign correct event number */
1360 fpu_read_check_SLOWLY ( addr, 28 );
1361 return;
1362 }
1363
1364 VG_(printf)("size is %d\n", size);
1365 VG_(panic)("vgmext_fpu_read_check: unhandled size");
1366# endif
1367}
1368
1369
1370__attribute__ ((regparm(2)))
1371void SK_(fpu_write_check) ( Addr addr, Int size )
1372{
1373 /* Ensure the written area is addressible, and moan if otherwise.
1374 If it is addressible, make it valid, otherwise invalid.
1375 */
1376
1377 SecMap* sm;
1378 UInt sm_off, v_off, a_off;
1379 Addr addr4;
1380
1381 PROF_EVENT(85);
1382
1383# ifdef VG_DEBUG_MEMORY
1384 fpu_write_check_SLOWLY ( addr, size );
1385# else
1386
1387 if (size == 4) {
1388 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
1389 PROF_EVENT(86);
1390 /* Properly aligned. */
1391 sm = primary_map[addr >> 16];
1392 sm_off = addr & 0xFFFF;
1393 a_off = sm_off >> 3;
1394 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
1395 /* Properly aligned and addressible. Make valid. */
1396 v_off = addr & 0xFFFF;
1397 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1398 return;
1399 slow4:
1400 fpu_write_check_SLOWLY ( addr, 4 );
1401 return;
1402 }
1403
1404 if (size == 8) {
1405 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
1406 PROF_EVENT(87);
1407 /* Properly aligned. Do it in two halves. */
1408 addr4 = addr + 4;
1409 /* First half. */
1410 sm = primary_map[addr >> 16];
1411 sm_off = addr & 0xFFFF;
1412 a_off = sm_off >> 3;
1413 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1414 /* First half properly aligned and addressible. Make valid. */
1415 v_off = addr & 0xFFFF;
1416 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1417 /* Second half. */
1418 sm = primary_map[addr4 >> 16];
1419 sm_off = addr4 & 0xFFFF;
1420 a_off = sm_off >> 3;
1421 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1422 /* Second half properly aligned and addressible. */
1423 v_off = addr4 & 0xFFFF;
1424 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1425 /* Properly aligned, addressible and with valid data. */
1426 return;
1427 slow8:
1428 fpu_write_check_SLOWLY ( addr, 8 );
1429 return;
1430 }
1431
1432 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
1433 cases go quickly. */
1434 if (size == 2) {
1435 PROF_EVENT(88);
1436 fpu_write_check_SLOWLY ( addr, 2 );
1437 return;
1438 }
1439
1440 if (size == 10) {
1441 PROF_EVENT(89);
1442 fpu_write_check_SLOWLY ( addr, 10 );
1443 return;
1444 }
1445
1446 if (size == 28 || size == 108) {
1447 PROF_EVENT(89); /* XXX assign correct event number */
1448 fpu_write_check_SLOWLY ( addr, 28 );
1449 return;
1450 }
1451
1452 VG_(printf)("size is %d\n", size);
1453 VG_(panic)("vgmext_fpu_write_check: unhandled size");
1454# endif
1455}
1456
1457
1458/* ---------------------------------------------------------------------
1459 Slow, general cases for FPU load and store checks.
1460 ------------------------------------------------------------------ */
1461
1462/* Generic version. Test for both addr and value errors, but if
1463 there's an addr error, don't report a value error even if it
1464 exists. */
1465
1466void fpu_read_check_SLOWLY ( Addr addr, Int size )
1467{
1468 Int i;
1469 Bool aerr = False;
1470 Bool verr = False;
1471 PROF_EVENT(90);
1472 for (i = 0; i < size; i++) {
1473 PROF_EVENT(91);
1474 if (get_abit(addr+i) != VGM_BIT_VALID)
1475 aerr = True;
1476 if (get_vbyte(addr+i) != VGM_BYTE_VALID)
1477 verr = True;
1478 }
1479
1480 if (aerr) {
1481 SK_(record_address_error)( addr, size, False );
1482 } else {
1483 if (verr)
1484 SK_(record_value_error)( size );
1485 }
1486}
1487
1488
1489/* Generic version. Test for addr errors. Valid addresses are
1490 given valid values, and invalid addresses invalid values. */
1491
1492void fpu_write_check_SLOWLY ( Addr addr, Int size )
1493{
1494 Int i;
1495 Addr a_here;
1496 Bool a_ok;
1497 Bool aerr = False;
1498 PROF_EVENT(92);
1499 for (i = 0; i < size; i++) {
1500 PROF_EVENT(93);
1501 a_here = addr+i;
1502 a_ok = get_abit(a_here) == VGM_BIT_VALID;
1503 if (a_ok) {
1504 set_vbyte(a_here, VGM_BYTE_VALID);
1505 } else {
1506 set_vbyte(a_here, VGM_BYTE_INVALID);
1507 aerr = True;
1508 }
1509 }
1510 if (aerr) {
1511 SK_(record_address_error)( addr, size, True );
1512 }
1513}
1514
1515/*------------------------------------------------------------*/
1516/*--- Shadow chunks info ---*/
1517/*------------------------------------------------------------*/
1518
1519static __inline__
1520void set_where( ShadowChunk* sc, ExeContext* ec )
1521{
1522 sc->skin_extra[0] = (UInt)ec;
1523}
1524
1525static __inline__
1526ExeContext *get_where( ShadowChunk* sc )
1527{
1528 return (ExeContext*)sc->skin_extra[0];
1529}
1530
1531void SK_(complete_shadow_chunk) ( ShadowChunk* sc, ThreadState* tst )
1532{
1533 set_where( sc, VG_(get_ExeContext) ( tst ) );
1534}
1535
1536/*------------------------------------------------------------*/
1537/*--- Postponing free()ing ---*/
1538/*------------------------------------------------------------*/
1539
1540/* Holds blocks after freeing. */
1541static ShadowChunk* vg_freed_list_start = NULL;
1542static ShadowChunk* vg_freed_list_end = NULL;
1543static Int vg_freed_list_volume = 0;
1544
1545static __attribute__ ((unused))
1546 Int count_freelist ( void )
1547{
1548 ShadowChunk* sc;
1549 Int n = 0;
1550 for (sc = vg_freed_list_start; sc != NULL; sc = sc->next)
1551 n++;
1552 return n;
1553}
1554
1555static __attribute__ ((unused))
1556 void freelist_sanity ( void )
1557{
1558 ShadowChunk* sc;
1559 Int n = 0;
1560 /* VG_(printf)("freelist sanity\n"); */
1561 for (sc = vg_freed_list_start; sc != NULL; sc = sc->next)
1562 n += sc->size;
1563 vg_assert(n == vg_freed_list_volume);
1564}
1565
1566/* Put a shadow chunk on the freed blocks queue, possibly freeing up
1567 some of the oldest blocks in the queue at the same time. */
1568static void add_to_freed_queue ( ShadowChunk* sc )
1569{
1570 ShadowChunk* sc1;
1571
1572 /* Put it at the end of the freed list */
1573 if (vg_freed_list_end == NULL) {
1574 vg_assert(vg_freed_list_start == NULL);
1575 vg_freed_list_end = vg_freed_list_start = sc;
1576 vg_freed_list_volume = sc->size;
1577 } else {
1578 vg_assert(vg_freed_list_end->next == NULL);
1579 vg_freed_list_end->next = sc;
1580 vg_freed_list_end = sc;
1581 vg_freed_list_volume += sc->size;
1582 }
1583 sc->next = NULL;
1584
1585 /* Release enough of the oldest blocks to bring the free queue
1586 volume below vg_clo_freelist_vol. */
1587
1588 while (vg_freed_list_volume > SK_(clo_freelist_vol)) {
1589 /* freelist_sanity(); */
1590 vg_assert(vg_freed_list_start != NULL);
1591 vg_assert(vg_freed_list_end != NULL);
1592
1593 sc1 = vg_freed_list_start;
1594 vg_freed_list_volume -= sc1->size;
1595 /* VG_(printf)("volume now %d\n", vg_freed_list_volume); */
1596 vg_assert(vg_freed_list_volume >= 0);
1597
1598 if (vg_freed_list_start == vg_freed_list_end) {
1599 vg_freed_list_start = vg_freed_list_end = NULL;
1600 } else {
1601 vg_freed_list_start = sc1->next;
1602 }
1603 sc1->next = NULL; /* just paranoia */
1604 VG_(freeShadowChunk) ( sc1 );
1605 }
1606}
1607
1608/* Return the first shadow chunk satisfying the predicate p. */
1609ShadowChunk* SK_(any_matching_freed_ShadowChunks)
1610 ( Bool (*p) ( ShadowChunk* ))
1611{
1612 ShadowChunk* sc;
1613
1614 /* No point looking through freed blocks if we're not keeping
1615 them around for a while... */
1616 for (sc = vg_freed_list_start; sc != NULL; sc = sc->next)
1617 if (p(sc))
1618 return sc;
1619
1620 return NULL;
1621}
1622
1623void SK_(alt_free) ( ShadowChunk* sc, ThreadState* tst )
1624{
1625 /* Record where freed */
1626 set_where( sc, VG_(get_ExeContext) ( tst ) );
1627
1628 /* Put it out of harm's way for a while. */
1629 add_to_freed_queue ( sc );
1630}
1631
1632/*------------------------------------------------------------*/
1633/*--- Low-level address-space scanning, for the leak ---*/
1634/*--- detector. ---*/
1635/*------------------------------------------------------------*/
1636
1637static
1638jmp_buf memscan_jmpbuf;
1639
1640static
1641void vg_scan_all_valid_memory_sighandler ( Int sigNo )
1642{
1643 __builtin_longjmp(memscan_jmpbuf, 1);
1644}
1645
1646/* Safely (avoiding SIGSEGV / SIGBUS) scan the entire valid address
1647 space and pass the addresses and values of all addressible,
1648 defined, aligned words to notify_word. This is the basis for the
1649 leak detector. Returns the number of calls made to notify_word. */
1650UInt VG_(scan_all_valid_memory) ( void (*notify_word)( Addr, UInt ) )
1651{
1652 /* All volatile, because some gccs seem paranoid about longjmp(). */
1653 volatile UInt res, numPages, page, vbytes, primaryMapNo, nWordsNotified;
1654 volatile Addr pageBase, addr;
1655 volatile SecMap* sm;
1656 volatile UChar abits;
1657 volatile UInt page_first_word;
1658
1659 vki_ksigaction sigbus_saved;
1660 vki_ksigaction sigbus_new;
1661 vki_ksigaction sigsegv_saved;
1662 vki_ksigaction sigsegv_new;
1663 vki_ksigset_t blockmask_saved;
1664 vki_ksigset_t unblockmask_new;
1665
1666 /* Temporarily install a new sigsegv and sigbus handler, and make
1667 sure SIGBUS, SIGSEGV and SIGTERM are unblocked. (Perhaps the
1668 first two can never be blocked anyway?) */
1669
1670 sigbus_new.ksa_handler = vg_scan_all_valid_memory_sighandler;
1671 sigbus_new.ksa_flags = VKI_SA_ONSTACK | VKI_SA_RESTART;
1672 sigbus_new.ksa_restorer = NULL;
1673 res = VG_(ksigemptyset)( &sigbus_new.ksa_mask );
1674 vg_assert(res == 0);
1675
1676 sigsegv_new.ksa_handler = vg_scan_all_valid_memory_sighandler;
1677 sigsegv_new.ksa_flags = VKI_SA_ONSTACK | VKI_SA_RESTART;
1678 sigsegv_new.ksa_restorer = NULL;
1679 res = VG_(ksigemptyset)( &sigsegv_new.ksa_mask );
1680 vg_assert(res == 0+0);
1681
1682 res = VG_(ksigemptyset)( &unblockmask_new );
1683 res |= VG_(ksigaddset)( &unblockmask_new, VKI_SIGBUS );
1684 res |= VG_(ksigaddset)( &unblockmask_new, VKI_SIGSEGV );
1685 res |= VG_(ksigaddset)( &unblockmask_new, VKI_SIGTERM );
1686 vg_assert(res == 0+0+0);
1687
1688 res = VG_(ksigaction)( VKI_SIGBUS, &sigbus_new, &sigbus_saved );
1689 vg_assert(res == 0+0+0+0);
1690
1691 res = VG_(ksigaction)( VKI_SIGSEGV, &sigsegv_new, &sigsegv_saved );
1692 vg_assert(res == 0+0+0+0+0);
1693
1694 res = VG_(ksigprocmask)( VKI_SIG_UNBLOCK, &unblockmask_new, &blockmask_saved );
1695 vg_assert(res == 0+0+0+0+0+0);
1696
1697 /* The signal handlers are installed. Actually do the memory scan. */
1698 numPages = 1 << (32-VKI_BYTES_PER_PAGE_BITS);
1699 vg_assert(numPages == 1048576);
1700 vg_assert(4096 == (1 << VKI_BYTES_PER_PAGE_BITS));
1701
1702 nWordsNotified = 0;
1703
1704 for (page = 0; page < numPages; page++) {
1705 pageBase = page << VKI_BYTES_PER_PAGE_BITS;
1706 primaryMapNo = pageBase >> 16;
1707 sm = primary_map[primaryMapNo];
1708 if (IS_DISTINGUISHED_SM(sm)) continue;
1709 if (__builtin_setjmp(memscan_jmpbuf) == 0) {
1710 /* try this ... */
1711 page_first_word = * (volatile UInt*)pageBase;
1712 /* we get here if we didn't get a fault */
1713 /* Scan the page */
1714 for (addr = pageBase; addr < pageBase+VKI_BYTES_PER_PAGE; addr += 4) {
1715 abits = get_abits4_ALIGNED(addr);
1716 vbytes = get_vbytes4_ALIGNED(addr);
1717 if (abits == VGM_NIBBLE_VALID
1718 && vbytes == VGM_WORD_VALID) {
1719 nWordsNotified++;
1720 notify_word ( addr, *(UInt*)addr );
1721 }
1722 }
1723 } else {
1724 /* We get here if reading the first word of the page caused a
1725 fault, which in turn caused the signal handler to longjmp.
1726 Ignore this page. */
1727 if (0)
1728 VG_(printf)(
1729 "vg_scan_all_valid_memory_sighandler: ignoring page at %p\n",
1730 (void*)pageBase
1731 );
1732 }
1733 }
1734
1735 /* Restore signal state to whatever it was before. */
1736 res = VG_(ksigaction)( VKI_SIGBUS, &sigbus_saved, NULL );
1737 vg_assert(res == 0 +0);
1738
1739 res = VG_(ksigaction)( VKI_SIGSEGV, &sigsegv_saved, NULL );
1740 vg_assert(res == 0 +0 +0);
1741
1742 res = VG_(ksigprocmask)( VKI_SIG_SETMASK, &blockmask_saved, NULL );
1743 vg_assert(res == 0 +0 +0 +0);
1744
1745 return nWordsNotified;
1746}
1747
1748
1749/*------------------------------------------------------------*/
1750/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
1751/*------------------------------------------------------------*/
1752
1753/* A block is either
1754 -- Proper-ly reached; a pointer to its start has been found
1755 -- Interior-ly reached; only an interior pointer to it has been found
1756 -- Unreached; so far, no pointers to any part of it have been found.
1757*/
1758typedef
1759 enum { Unreached, Interior, Proper }
1760 Reachedness;
1761
1762/* A block record, used for generating err msgs. */
1763typedef
1764 struct _LossRecord {
1765 struct _LossRecord* next;
1766 /* Where these lost blocks were allocated. */
1767 ExeContext* allocated_at;
1768 /* Their reachability. */
1769 Reachedness loss_mode;
1770 /* Number of blocks and total # bytes involved. */
1771 UInt total_bytes;
1772 UInt num_blocks;
1773 }
1774 LossRecord;
1775
1776
1777/* Find the i such that ptr points at or inside the block described by
1778 shadows[i]. Return -1 if none found. This assumes that shadows[]
1779 has been sorted on the ->data field. */
1780
1781#ifdef VG_DEBUG_LEAKCHECK
1782/* Used to sanity-check the fast binary-search mechanism. */
1783static Int find_shadow_for_OLD ( Addr ptr,
1784 ShadowChunk** shadows,
1785 Int n_shadows )
1786
1787{
1788 Int i;
1789 Addr a_lo, a_hi;
1790 PROF_EVENT(70);
1791 for (i = 0; i < n_shadows; i++) {
1792 PROF_EVENT(71);
1793 a_lo = shadows[i]->data;
1794 a_hi = ((Addr)shadows[i]->data) + shadows[i]->size - 1;
1795 if (a_lo <= ptr && ptr <= a_hi)
1796 return i;
1797 }
1798 return -1;
1799}
1800#endif
1801
1802
1803static Int find_shadow_for ( Addr ptr,
1804 ShadowChunk** shadows,
1805 Int n_shadows )
1806{
1807 Addr a_mid_lo, a_mid_hi;
1808 Int lo, mid, hi, retVal;
1809 PROF_EVENT(70);
1810 /* VG_(printf)("find shadow for %p = ", ptr); */
1811 retVal = -1;
1812 lo = 0;
1813 hi = n_shadows-1;
1814 while (True) {
1815 PROF_EVENT(71);
1816
1817 /* invariant: current unsearched space is from lo to hi,
1818 inclusive. */
1819 if (lo > hi) break; /* not found */
1820
1821 mid = (lo + hi) / 2;
1822 a_mid_lo = shadows[mid]->data;
1823 a_mid_hi = ((Addr)shadows[mid]->data) + shadows[mid]->size - 1;
1824
1825 if (ptr < a_mid_lo) {
1826 hi = mid-1;
1827 continue;
1828 }
1829 if (ptr > a_mid_hi) {
1830 lo = mid+1;
1831 continue;
1832 }
1833 vg_assert(ptr >= a_mid_lo && ptr <= a_mid_hi);
1834 retVal = mid;
1835 break;
1836 }
1837
1838# ifdef VG_DEBUG_LEAKCHECK
1839 vg_assert(retVal == find_shadow_for_OLD ( ptr, shadows, n_shadows ));
1840# endif
1841 /* VG_(printf)("%d\n", retVal); */
1842 return retVal;
1843}
1844
1845
1846
1847static void sort_malloc_shadows ( ShadowChunk** shadows, UInt n_shadows )
1848{
1849 Int incs[14] = { 1, 4, 13, 40, 121, 364, 1093, 3280,
1850 9841, 29524, 88573, 265720,
1851 797161, 2391484 };
1852 Int lo = 0;
1853 Int hi = n_shadows-1;
1854 Int i, j, h, bigN, hp;
1855 ShadowChunk* v;
1856
1857 PROF_EVENT(72);
1858 bigN = hi - lo + 1; if (bigN < 2) return;
1859 hp = 0; while (incs[hp] < bigN) hp++; hp--;
1860
1861 for (; hp >= 0; hp--) {
1862 PROF_EVENT(73);
1863 h = incs[hp];
1864 i = lo + h;
1865 while (1) {
1866 PROF_EVENT(74);
1867 if (i > hi) break;
1868 v = shadows[i];
1869 j = i;
1870 while (shadows[j-h]->data > v->data) {
1871 PROF_EVENT(75);
1872 shadows[j] = shadows[j-h];
1873 j = j - h;
1874 if (j <= (lo + h - 1)) break;
1875 }
1876 shadows[j] = v;
1877 i++;
1878 }
1879 }
1880}
1881
1882/* Globals, for the callback used by SK_(detect_memory_leaks). */
1883
1884static ShadowChunk** vglc_shadows;
1885static Int vglc_n_shadows;
1886static Reachedness* vglc_reachedness;
1887static Addr vglc_min_mallocd_addr;
1888static Addr vglc_max_mallocd_addr;
1889
1890static
1891void vg_detect_memory_leaks_notify_addr ( Addr a, UInt word_at_a )
1892{
1893 Int sh_no;
1894 Addr ptr;
1895
1896 /* Rule out some known causes of bogus pointers. Mostly these do
1897 not cause much trouble because only a few false pointers can
1898 ever lurk in these places. This mainly stops it reporting that
1899 blocks are still reachable in stupid test programs like this
1900
1901 int main (void) { char* a = malloc(100); return 0; }
1902
1903 which people seem inordinately fond of writing, for some reason.
1904
1905 Note that this is a complete kludge. It would be better to
1906 ignore any addresses corresponding to valgrind.so's .bss and
1907 .data segments, but I cannot think of a reliable way to identify
1908 where the .bss segment has been put. If you can, drop me a
1909 line.
1910 */
1911 if (VG_(within_stack)(a)) return;
1912 if (VG_(within_m_state_static)(a)) return;
1913 if (a == (Addr)(&vglc_min_mallocd_addr)) return;
1914 if (a == (Addr)(&vglc_max_mallocd_addr)) return;
1915
1916 /* OK, let's get on and do something Useful for a change. */
1917
1918 ptr = (Addr)word_at_a;
1919 if (ptr >= vglc_min_mallocd_addr && ptr <= vglc_max_mallocd_addr) {
1920 /* Might be legitimate; we'll have to investigate further. */
1921 sh_no = find_shadow_for ( ptr, vglc_shadows, vglc_n_shadows );
1922 if (sh_no != -1) {
1923 /* Found a block at/into which ptr points. */
1924 vg_assert(sh_no >= 0 && sh_no < vglc_n_shadows);
1925 vg_assert(ptr < vglc_shadows[sh_no]->data
1926 + vglc_shadows[sh_no]->size);
1927 /* Decide whether Proper-ly or Interior-ly reached. */
1928 if (ptr == vglc_shadows[sh_no]->data) {
1929 if (0) VG_(printf)("pointer at %p to %p\n", a, word_at_a );
1930 vglc_reachedness[sh_no] = Proper;
1931 } else {
1932 if (vglc_reachedness[sh_no] == Unreached)
1933 vglc_reachedness[sh_no] = Interior;
1934 }
1935 }
1936 }
1937}
1938
1939
1940void SK_(detect_memory_leaks) ( void )
1941{
1942 Int i;
1943 Int blocks_leaked, bytes_leaked;
1944 Int blocks_dubious, bytes_dubious;
1945 Int blocks_reachable, bytes_reachable;
1946 Int n_lossrecords;
1947 UInt bytes_notified;
1948
1949 LossRecord* errlist;
1950 LossRecord* p;
1951
1952 PROF_EVENT(76);
1953
1954 /* VG_(get_malloc_shadows) allocates storage for shadows */
1955 vglc_shadows = VG_(get_malloc_shadows)( &vglc_n_shadows );
1956 if (vglc_n_shadows == 0) {
1957 vg_assert(vglc_shadows == NULL);
1958 VG_(message)(Vg_UserMsg,
1959 "No malloc'd blocks -- no leaks are possible.\n");
1960 return;
1961 }
1962
1963 VG_(message)(Vg_UserMsg,
1964 "searching for pointers to %d not-freed blocks.",
1965 vglc_n_shadows );
1966 sort_malloc_shadows ( vglc_shadows, vglc_n_shadows );
1967
1968 /* Sanity check; assert that the blocks are now in order and that
1969 they don't overlap. */
1970 for (i = 0; i < vglc_n_shadows-1; i++) {
1971 vg_assert( ((Addr)vglc_shadows[i]->data)
1972 < ((Addr)vglc_shadows[i+1]->data) );
1973 vg_assert( ((Addr)vglc_shadows[i]->data) + vglc_shadows[i]->size
1974 < ((Addr)vglc_shadows[i+1]->data) );
1975 }
1976
1977 vglc_min_mallocd_addr = ((Addr)vglc_shadows[0]->data);
1978 vglc_max_mallocd_addr = ((Addr)vglc_shadows[vglc_n_shadows-1]->data)
1979 + vglc_shadows[vglc_n_shadows-1]->size - 1;
1980
1981 vglc_reachedness
1982 = VG_(malloc)( vglc_n_shadows * sizeof(Reachedness) );
1983 for (i = 0; i < vglc_n_shadows; i++)
1984 vglc_reachedness[i] = Unreached;
1985
1986 /* Do the scan of memory. */
1987 bytes_notified
1988 = VG_(scan_all_valid_memory)( &vg_detect_memory_leaks_notify_addr )
1989 * VKI_BYTES_PER_WORD;
1990
1991 VG_(message)(Vg_UserMsg, "checked %d bytes.", bytes_notified);
1992
1993 blocks_leaked = bytes_leaked = 0;
1994 blocks_dubious = bytes_dubious = 0;
1995 blocks_reachable = bytes_reachable = 0;
1996
1997 for (i = 0; i < vglc_n_shadows; i++) {
1998 if (vglc_reachedness[i] == Unreached) {
1999 blocks_leaked++;
2000 bytes_leaked += vglc_shadows[i]->size;
2001 }
2002 else if (vglc_reachedness[i] == Interior) {
2003 blocks_dubious++;
2004 bytes_dubious += vglc_shadows[i]->size;
2005 }
2006 else if (vglc_reachedness[i] == Proper) {
2007 blocks_reachable++;
2008 bytes_reachable += vglc_shadows[i]->size;
2009 }
2010 }
2011
2012 VG_(message)(Vg_UserMsg, "");
2013 VG_(message)(Vg_UserMsg, "definitely lost: %d bytes in %d blocks.",
2014 bytes_leaked, blocks_leaked );
2015 VG_(message)(Vg_UserMsg, "possibly lost: %d bytes in %d blocks.",
2016 bytes_dubious, blocks_dubious );
2017 VG_(message)(Vg_UserMsg, "still reachable: %d bytes in %d blocks.",
2018 bytes_reachable, blocks_reachable );
2019
2020
2021 /* Common up the lost blocks so we can print sensible error
2022 messages. */
2023
2024 n_lossrecords = 0;
2025 errlist = NULL;
2026 for (i = 0; i < vglc_n_shadows; i++) {
2027
2028 /* 'where' stored in 'skin_extra' field */
2029 ExeContext* where = get_where ( vglc_shadows[i] );
2030
2031 for (p = errlist; p != NULL; p = p->next) {
2032 if (p->loss_mode == vglc_reachedness[i]
2033 && VG_(eq_ExeContext) ( SK_(clo_leak_resolution),
2034 p->allocated_at,
2035 where) ) {
2036 break;
2037 }
2038 }
2039 if (p != NULL) {
2040 p->num_blocks ++;
2041 p->total_bytes += vglc_shadows[i]->size;
2042 } else {
2043 n_lossrecords ++;
2044 p = VG_(malloc)(sizeof(LossRecord));
2045 p->loss_mode = vglc_reachedness[i];
2046 p->allocated_at = where;
2047 p->total_bytes = vglc_shadows[i]->size;
2048 p->num_blocks = 1;
2049 p->next = errlist;
2050 errlist = p;
2051 }
2052 }
2053
2054 for (i = 0; i < n_lossrecords; i++) {
2055 LossRecord* p_min = NULL;
2056 UInt n_min = 0xFFFFFFFF;
2057 for (p = errlist; p != NULL; p = p->next) {
2058 if (p->num_blocks > 0 && p->total_bytes < n_min) {
2059 n_min = p->total_bytes;
2060 p_min = p;
2061 }
2062 }
2063 vg_assert(p_min != NULL);
2064
2065 if ( (!SK_(clo_show_reachable)) && p_min->loss_mode == Proper) {
2066 p_min->num_blocks = 0;
2067 continue;
2068 }
2069
2070 VG_(message)(Vg_UserMsg, "");
2071 VG_(message)(
2072 Vg_UserMsg,
2073 "%d bytes in %d blocks are %s in loss record %d of %d",
2074 p_min->total_bytes, p_min->num_blocks,
2075 p_min->loss_mode==Unreached ? "definitely lost" :
2076 (p_min->loss_mode==Interior ? "possibly lost"
2077 : "still reachable"),
2078 i+1, n_lossrecords
2079 );
2080 VG_(pp_ExeContext)(p_min->allocated_at);
2081 p_min->num_blocks = 0;
2082 }
2083
2084 VG_(message)(Vg_UserMsg, "");
2085 VG_(message)(Vg_UserMsg, "LEAK SUMMARY:");
2086 VG_(message)(Vg_UserMsg, " definitely lost: %d bytes in %d blocks.",
2087 bytes_leaked, blocks_leaked );
2088 VG_(message)(Vg_UserMsg, " possibly lost: %d bytes in %d blocks.",
2089 bytes_dubious, blocks_dubious );
2090 VG_(message)(Vg_UserMsg, " still reachable: %d bytes in %d blocks.",
2091 bytes_reachable, blocks_reachable );
2092 if (!SK_(clo_show_reachable)) {
2093 VG_(message)(Vg_UserMsg,
2094 "Reachable blocks (those to which a pointer was found) are not shown.");
2095 VG_(message)(Vg_UserMsg,
2096 "To see them, rerun with: --show-reachable=yes");
2097 }
2098 VG_(message)(Vg_UserMsg, "");
2099
2100 VG_(free) ( vglc_shadows );
2101 VG_(free) ( vglc_reachedness );
2102}
2103
2104
2105/* ---------------------------------------------------------------------
2106 Sanity check machinery (permanently engaged).
2107 ------------------------------------------------------------------ */
2108
2109/* Check that nobody has spuriously claimed that the first or last 16
2110 pages (64 KB) of address space have become accessible. Failure of
2111 the following do not per se indicate an internal consistency
2112 problem, but they are so likely to that we really want to know
2113 about it if so. */
2114
2115Bool SK_(cheap_sanity_check) ( void )
2116{
2117 if (IS_DISTINGUISHED_SM(primary_map[0]) &&
2118 IS_DISTINGUISHED_SM(primary_map[65535]))
2119 return True;
2120 else
2121 return False;
2122}
2123
2124Bool SK_(expensive_sanity_check) ( void )
2125{
2126 Int i;
2127
2128 /* Make sure nobody changed the distinguished secondary. */
2129 for (i = 0; i < 8192; i++)
2130 if (distinguished_secondary_map.abits[i] != VGM_BYTE_INVALID)
2131 return False;
2132
2133 for (i = 0; i < 65536; i++)
2134 if (distinguished_secondary_map.vbyte[i] != VGM_BYTE_INVALID)
2135 return False;
2136
2137 /* Make sure that the upper 3/4 of the primary map hasn't
2138 been messed with. */
2139 for (i = 65536; i < 262144; i++)
2140 if (primary_map[i] != & distinguished_secondary_map)
2141 return False;
2142
2143 return True;
2144}
2145
2146/* ---------------------------------------------------------------------
2147 Debugging machinery (turn on to debug). Something of a mess.
2148 ------------------------------------------------------------------ */
2149
2150#if 0
2151/* Print the value tags on the 8 integer registers & flag reg. */
2152
2153static void uint_to_bits ( UInt x, Char* str )
2154{
2155 Int i;
2156 Int w = 0;
2157 /* str must point to a space of at least 36 bytes. */
2158 for (i = 31; i >= 0; i--) {
2159 str[w++] = (x & ( ((UInt)1) << i)) ? '1' : '0';
2160 if (i == 24 || i == 16 || i == 8)
2161 str[w++] = ' ';
2162 }
2163 str[w++] = 0;
2164 vg_assert(w == 36);
2165}
2166
2167/* Caution! Not vthread-safe; looks in VG_(baseBlock), not the thread
2168 state table. */
2169
2170static void vg_show_reg_tags ( void )
2171{
2172 Char buf1[36];
2173 Char buf2[36];
2174 UInt z_eax, z_ebx, z_ecx, z_edx,
2175 z_esi, z_edi, z_ebp, z_esp, z_eflags;
2176
2177 z_eax = VG_(baseBlock)[VGOFF_(sh_eax)];
2178 z_ebx = VG_(baseBlock)[VGOFF_(sh_ebx)];
2179 z_ecx = VG_(baseBlock)[VGOFF_(sh_ecx)];
2180 z_edx = VG_(baseBlock)[VGOFF_(sh_edx)];
2181 z_esi = VG_(baseBlock)[VGOFF_(sh_esi)];
2182 z_edi = VG_(baseBlock)[VGOFF_(sh_edi)];
2183 z_ebp = VG_(baseBlock)[VGOFF_(sh_ebp)];
2184 z_esp = VG_(baseBlock)[VGOFF_(sh_esp)];
2185 z_eflags = VG_(baseBlock)[VGOFF_(sh_eflags)];
2186
2187 uint_to_bits(z_eflags, buf1);
2188 VG_(message)(Vg_DebugMsg, "efl %\n", buf1);
2189
2190 uint_to_bits(z_eax, buf1);
2191 uint_to_bits(z_ebx, buf2);
2192 VG_(message)(Vg_DebugMsg, "eax %s ebx %s\n", buf1, buf2);
2193
2194 uint_to_bits(z_ecx, buf1);
2195 uint_to_bits(z_edx, buf2);
2196 VG_(message)(Vg_DebugMsg, "ecx %s edx %s\n", buf1, buf2);
2197
2198 uint_to_bits(z_esi, buf1);
2199 uint_to_bits(z_edi, buf2);
2200 VG_(message)(Vg_DebugMsg, "esi %s edi %s\n", buf1, buf2);
2201
2202 uint_to_bits(z_ebp, buf1);
2203 uint_to_bits(z_esp, buf2);
2204 VG_(message)(Vg_DebugMsg, "ebp %s esp %s\n", buf1, buf2);
2205}
2206
2207
2208/* For debugging only. Scan the address space and touch all allegedly
2209 addressible words. Useful for establishing where Valgrind's idea of
2210 addressibility has diverged from what the kernel believes. */
2211
2212static
2213void zzzmemscan_notify_word ( Addr a, UInt w )
2214{
2215}
2216
2217void zzzmemscan ( void )
2218{
2219 Int n_notifies
2220 = VG_(scan_all_valid_memory)( zzzmemscan_notify_word );
2221 VG_(printf)("zzzmemscan: n_bytes = %d\n", 4 * n_notifies );
2222}
2223#endif
2224
2225
2226
2227
2228#if 0
2229static Int zzz = 0;
2230
2231void show_bb ( Addr eip_next )
2232{
2233 VG_(printf)("[%4d] ", zzz);
2234 vg_show_reg_tags( &VG_(m_shadow );
2235 VG_(translate) ( eip_next, NULL, NULL, NULL );
2236}
2237#endif /* 0 */
2238
2239/*------------------------------------------------------------*/
2240/*--- Syscall wrappers ---*/
2241/*------------------------------------------------------------*/
2242
2243void* SK_(pre_syscall) ( ThreadId tid, UInt syscallno, Bool isBlocking )
2244{
2245 Int sane = SK_(cheap_sanity_check)();
2246 return (void*)sane;
2247}
2248
2249void SK_(post_syscall) ( ThreadId tid, UInt syscallno,
2250 void* pre_result, Int res, Bool isBlocking )
2251{
2252 Int sane_before_call = (Int)pre_result;
2253 Bool sane_after_call = SK_(cheap_sanity_check)();
2254
2255 if ((Int)sane_before_call && (!sane_after_call)) {
2256 VG_(message)(Vg_DebugMsg, "post-syscall: ");
2257 VG_(message)(Vg_DebugMsg,
2258 "probable sanity check failure for syscall number %d\n",
2259 syscallno );
2260 VG_(panic)("aborting due to the above ... bye!");
2261 }
2262}
2263
2264
2265/*------------------------------------------------------------*/
2266/*--- Setup ---*/
2267/*------------------------------------------------------------*/
2268
2269void SK_(written_shadow_regs_values)( UInt* gen_reg_value, UInt* eflags_value )
2270{
2271 *gen_reg_value = VGM_WORD_VALID;
2272 *eflags_value = VGM_EFLAGS_VALID;
2273}
2274
2275Bool SK_(process_cmd_line_option)(Char* arg)
2276{
2277# define STREQ(s1,s2) (0==VG_(strcmp_ws)((s1),(s2)))
2278# define STREQN(nn,s1,s2) (0==VG_(strncmp_ws)((s1),(s2),(nn)))
2279
2280 if (STREQ(arg, "--partial-loads-ok=yes"))
2281 SK_(clo_partial_loads_ok) = True;
2282 else if (STREQ(arg, "--partial-loads-ok=no"))
2283 SK_(clo_partial_loads_ok) = False;
2284
2285 else if (STREQN(15, arg, "--freelist-vol=")) {
2286 SK_(clo_freelist_vol) = (Int)VG_(atoll)(&arg[15]);
2287 if (SK_(clo_freelist_vol) < 0) SK_(clo_freelist_vol) = 0;
2288 }
2289
2290 else if (STREQ(arg, "--leak-check=yes"))
2291 SK_(clo_leak_check) = True;
2292 else if (STREQ(arg, "--leak-check=no"))
2293 SK_(clo_leak_check) = False;
2294
2295 else if (STREQ(arg, "--leak-resolution=low"))
2296 SK_(clo_leak_resolution) = Vg_LowRes;
2297 else if (STREQ(arg, "--leak-resolution=med"))
2298 SK_(clo_leak_resolution) = Vg_MedRes;
2299 else if (STREQ(arg, "--leak-resolution=high"))
2300 SK_(clo_leak_resolution) = Vg_HighRes;
2301
2302 else if (STREQ(arg, "--show-reachable=yes"))
2303 SK_(clo_show_reachable) = True;
2304 else if (STREQ(arg, "--show-reachable=no"))
2305 SK_(clo_show_reachable) = False;
2306
2307 else if (STREQ(arg, "--workaround-gcc296-bugs=yes"))
2308 SK_(clo_workaround_gcc296_bugs) = True;
2309 else if (STREQ(arg, "--workaround-gcc296-bugs=no"))
2310 SK_(clo_workaround_gcc296_bugs) = False;
2311
2312 else if (STREQ(arg, "--check-addrVs=yes"))
2313 SK_(clo_check_addrVs) = True;
2314 else if (STREQ(arg, "--check-addrVs=no"))
2315 SK_(clo_check_addrVs) = False;
2316
2317 else if (STREQ(arg, "--cleanup=yes"))
2318 SK_(clo_cleanup) = True;
2319 else if (STREQ(arg, "--cleanup=no"))
2320 SK_(clo_cleanup) = False;
2321
2322 else
2323 return False;
2324
2325 return True;
2326
2327#undef STREQ
2328#undef STREQN
2329}
2330
2331Char* SK_(usage)(void)
2332{
2333 return
2334" --partial-loads-ok=no|yes too hard to explain here; see manual [yes]\n"
2335" --freelist-vol=<number> volume of freed blocks queue [1000000]\n"
2336" --leak-check=no|yes search for memory leaks at exit? [no]\n"
2337" --leak-resolution=low|med|high\n"
2338" amount of bt merging in leak check [low]\n"
2339" --show-reachable=no|yes show reachable blocks in leak check? [no]\n"
2340" --workaround-gcc296-bugs=no|yes self explanatory [no]\n"
2341" --check-addrVs=no|yes experimental lighterweight checking? [yes]\n"
2342" yes == Valgrind's original behaviour\n"
2343"\n"
2344" --cleanup=no|yes improve after instrumentation? [yes]\n";
2345}
2346
2347
2348/*------------------------------------------------------------*/
2349/*--- Setup ---*/
2350/*------------------------------------------------------------*/
2351
2352void SK_(pre_clo_init)(VgNeeds* needs, VgTrackEvents* track)
2353{
2354 needs->name = "valgrind";
2355 needs->description = "a memory error detector";
2356
2357 needs->core_errors = True;
2358 needs->skin_errors = True;
2359 needs->run_libc_freeres = True;
2360
2361 needs->sizeof_shadow_block = 1;
2362
2363 needs->basic_block_discards = False;
2364 needs->shadow_regs = True;
2365 needs->command_line_options = True;
2366 needs->client_requests = True;
2367 needs->extended_UCode = True;
2368 needs->syscall_wrapper = True;
2369 needs->alternative_free = True;
2370 needs->sanity_checks = True;
2371
2372 VG_(register_compact_helper)((Addr) & SK_(helper_value_check4_fail));
2373 VG_(register_compact_helper)((Addr) & SK_(helper_value_check0_fail));
2374 VG_(register_compact_helper)((Addr) & SK_(helperc_STOREV4));
2375 VG_(register_compact_helper)((Addr) & SK_(helperc_STOREV1));
2376 VG_(register_compact_helper)((Addr) & SK_(helperc_LOADV4));
2377 VG_(register_compact_helper)((Addr) & SK_(helperc_LOADV1));
2378
2379 /* These two made non-compact because 2-byte transactions are rare. */
2380 VG_(register_noncompact_helper)((Addr) & SK_(helperc_STOREV2));
2381 VG_(register_noncompact_helper)((Addr) & SK_(helperc_LOADV2));
2382 VG_(register_noncompact_helper)((Addr) & SK_(fpu_write_check));
2383 VG_(register_noncompact_helper)((Addr) & SK_(fpu_read_check));
2384 VG_(register_noncompact_helper)((Addr) & SK_(helper_value_check2_fail));
2385 VG_(register_noncompact_helper)((Addr) & SK_(helper_value_check1_fail));
2386
2387 /* Events to track */
2388 track->new_mem_startup = & memcheck_new_mem_startup;
2389 track->new_mem_heap = & memcheck_new_mem_heap;
2390 track->new_mem_stack = & SK_(make_writable);
2391 track->new_mem_stack_aligned = & make_writable_aligned;
2392 track->new_mem_stack_signal = & SK_(make_writable);
2393 track->new_mem_brk = & SK_(make_writable);
2394 track->new_mem_mmap = & memcheck_set_perms;
2395
2396 track->copy_mem_heap = & copy_address_range_state;
2397 track->copy_mem_remap = & copy_address_range_state;
2398 track->change_mem_mprotect = & memcheck_set_perms;
2399
2400 track->ban_mem_heap = & SK_(make_noaccess);
2401 track->ban_mem_stack = & SK_(make_noaccess);
2402
2403 track->die_mem_heap = & SK_(make_noaccess);
2404 track->die_mem_stack = & SK_(make_noaccess);
2405 track->die_mem_stack_aligned = & make_noaccess_aligned;
2406 track->die_mem_stack_signal = & SK_(make_noaccess);
2407 track->die_mem_brk = & SK_(make_noaccess);
2408 track->die_mem_munmap = & SK_(make_noaccess);
2409
2410 track->bad_free = & SK_(record_free_error);
2411 track->mismatched_free = & SK_(record_freemismatch_error);
2412
2413 track->pre_mem_read = & check_is_readable;
2414 track->pre_mem_read_asciiz = & check_is_readable_asciiz;
2415 track->pre_mem_write = & check_is_writable;
2416 track->post_mem_write = & SK_(make_readable);
2417
2418 init_shadow_memory();
2419
2420 init_prof_mem();
2421
2422 VGP_(register_profile_event) ( VgpSetMem, "set-mem-perms" );
2423 VGP_(register_profile_event) ( VgpCheckMem, "check-mem-perms" );
2424}
2425
2426/*--------------------------------------------------------------------*/
2427/*--- end vg_memcheck.c ---*/
2428/*--------------------------------------------------------------------*/